From f2f6ecabebddcbc54485cf3fca544e985457c0f4 Mon Sep 17 00:00:00 2001 From: Michal Kazior Date: Tue, 1 Mar 2016 11:32:46 +0100 Subject: [PATCH 0001/1649] ath10k: refactor tx code This prepares the code for future reuse with ieee80211_txq and wake_tx_queue() in mind. Signed-off-by: Michal Kazior Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/mac.c | 151 ++++++++++++++++---------- 1 file changed, 96 insertions(+), 55 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index 78999c9de23b..b3a790addb0a 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -3271,6 +3271,26 @@ static void ath10k_tx_h_add_p2p_noa_ie(struct ath10k *ar, } } +static void ath10k_mac_tx_h_fill_cb(struct ath10k *ar, + struct ieee80211_vif *vif, + struct sk_buff *skb) +{ + struct ieee80211_hdr *hdr = (void *)skb->data; + struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb); + + cb->flags = 0; + if (!ath10k_tx_h_use_hwcrypto(vif, skb)) + cb->flags |= ATH10K_SKB_F_NO_HWCRYPT; + + if (ieee80211_is_mgmt(hdr->frame_control)) + cb->flags |= ATH10K_SKB_F_MGMT; + + if (ieee80211_is_data_qos(hdr->frame_control)) + cb->flags |= ATH10K_SKB_F_QOS; + + cb->vif = vif; +} + bool ath10k_mac_tx_frm_has_freq(struct ath10k *ar) { /* FIXME: Not really sure since when the behaviour changed. At some @@ -3306,8 +3326,9 @@ unlock: return ret; } -static void ath10k_mac_tx(struct ath10k *ar, enum ath10k_hw_txrx_mode txmode, - struct sk_buff *skb) +static int ath10k_mac_tx_submit(struct ath10k *ar, + enum ath10k_hw_txrx_mode txmode, + struct sk_buff *skb) { struct ath10k_htt *htt = &ar->htt; int ret = 0; @@ -3334,6 +3355,63 @@ static void ath10k_mac_tx(struct ath10k *ar, enum ath10k_hw_txrx_mode txmode, ret); ieee80211_free_txskb(ar->hw, skb); } + + return ret; +} + +/* This function consumes the sk_buff regardless of return value as far as + * caller is concerned so no freeing is necessary afterwards. + */ +static int ath10k_mac_tx(struct ath10k *ar, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + enum ath10k_hw_txrx_mode txmode, + struct sk_buff *skb) +{ + struct ieee80211_hw *hw = ar->hw; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + int ret; + + /* We should disable CCK RATE due to P2P */ + if (info->flags & IEEE80211_TX_CTL_NO_CCK_RATE) + ath10k_dbg(ar, ATH10K_DBG_MAC, "IEEE80211_TX_CTL_NO_CCK_RATE\n"); + + switch (txmode) { + case ATH10K_HW_TXRX_MGMT: + case ATH10K_HW_TXRX_NATIVE_WIFI: + ath10k_tx_h_nwifi(hw, skb); + ath10k_tx_h_add_p2p_noa_ie(ar, vif, skb); + ath10k_tx_h_seq_no(vif, skb); + break; + case ATH10K_HW_TXRX_ETHERNET: + ath10k_tx_h_8023(skb); + break; + case ATH10K_HW_TXRX_RAW: + if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) { + WARN_ON_ONCE(1); + ieee80211_free_txskb(hw, skb); + return -ENOTSUPP; + } + } + + if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) { + if (!ath10k_mac_tx_frm_has_freq(ar)) { + ath10k_dbg(ar, ATH10K_DBG_MAC, "queued offchannel skb %p\n", + skb); + + skb_queue_tail(&ar->offchan_tx_queue, skb); + ieee80211_queue_work(hw, &ar->offchan_tx_work); + return 0; + } + } + + ret = ath10k_mac_tx_submit(ar, txmode, skb); + if (ret) { + ath10k_warn(ar, "failed to submit frame: %d\n", ret); + return ret; + } + + return 0; } void ath10k_offchan_tx_purge(struct ath10k *ar) @@ -3354,12 +3432,12 @@ void ath10k_offchan_tx_work(struct work_struct *work) struct ath10k *ar = container_of(work, struct ath10k, offchan_tx_work); struct ath10k_peer *peer; struct ath10k_vif *arvif; + enum ath10k_hw_txrx_mode txmode; struct ieee80211_hdr *hdr; struct ieee80211_vif *vif; struct ieee80211_sta *sta; struct sk_buff *skb; const u8 *peer_addr; - enum ath10k_hw_txrx_mode txmode; int vdev_id; int ret; unsigned long time_left; @@ -3424,7 +3502,12 @@ void ath10k_offchan_tx_work(struct work_struct *work) txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb); - ath10k_mac_tx(ar, txmode, skb); + ret = ath10k_mac_tx(ar, vif, sta, txmode, skb); + if (ret) { + ath10k_warn(ar, "failed to transmit offchannel frame: %d\n", + ret); + /* not serious */ + } time_left = wait_for_completion_timeout(&ar->offchan_tx_completed, 3 * HZ); @@ -3638,66 +3721,24 @@ static int ath10k_start_scan(struct ath10k *ar, /* mac80211 callbacks */ /**********************/ -static void ath10k_tx(struct ieee80211_hw *hw, - struct ieee80211_tx_control *control, - struct sk_buff *skb) +static void ath10k_mac_op_tx(struct ieee80211_hw *hw, + struct ieee80211_tx_control *control, + struct sk_buff *skb) { struct ath10k *ar = hw->priv; - struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb); struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_vif *vif = info->control.vif; struct ieee80211_sta *sta = control->sta; - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; enum ath10k_hw_txrx_mode txmode; + int ret; - /* We should disable CCK RATE due to P2P */ - if (info->flags & IEEE80211_TX_CTL_NO_CCK_RATE) - ath10k_dbg(ar, ATH10K_DBG_MAC, "IEEE80211_TX_CTL_NO_CCK_RATE\n"); + ath10k_mac_tx_h_fill_cb(ar, vif, skb); txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb); - skb_cb->flags = 0; - if (!ath10k_tx_h_use_hwcrypto(vif, skb)) - skb_cb->flags |= ATH10K_SKB_F_NO_HWCRYPT; - - if (ieee80211_is_mgmt(hdr->frame_control)) - skb_cb->flags |= ATH10K_SKB_F_MGMT; - - if (ieee80211_is_data_qos(hdr->frame_control)) - skb_cb->flags |= ATH10K_SKB_F_QOS; - - skb_cb->vif = vif; - - switch (txmode) { - case ATH10K_HW_TXRX_MGMT: - case ATH10K_HW_TXRX_NATIVE_WIFI: - ath10k_tx_h_nwifi(hw, skb); - ath10k_tx_h_add_p2p_noa_ie(ar, vif, skb); - ath10k_tx_h_seq_no(vif, skb); - break; - case ATH10K_HW_TXRX_ETHERNET: - ath10k_tx_h_8023(skb); - break; - case ATH10K_HW_TXRX_RAW: - if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) { - WARN_ON_ONCE(1); - ieee80211_free_txskb(hw, skb); - return; - } - } - - if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) { - if (!ath10k_mac_tx_frm_has_freq(ar)) { - ath10k_dbg(ar, ATH10K_DBG_MAC, "queued offchannel skb %p\n", - skb); - - skb_queue_tail(&ar->offchan_tx_queue, skb); - ieee80211_queue_work(hw, &ar->offchan_tx_work); - return; - } - } - - ath10k_mac_tx(ar, txmode, skb); + ret = ath10k_mac_tx(ar, vif, sta, txmode, skb); + if (ret) + ath10k_warn(ar, "failed to transmit frame: %d\n", ret); } /* Must not be called with conf_mutex held as workers can use that also. */ @@ -6807,7 +6848,7 @@ ath10k_mac_op_switch_vif_chanctx(struct ieee80211_hw *hw, } static const struct ieee80211_ops ath10k_ops = { - .tx = ath10k_tx, + .tx = ath10k_mac_op_tx, .start = ath10k_start, .stop = ath10k_stop, .config = ath10k_config, From a30c7d009ed56df43f09ab9af11e0bdd3a3f2a3f Mon Sep 17 00:00:00 2001 From: Michal Kazior Date: Sun, 6 Mar 2016 16:14:23 +0200 Subject: [PATCH 0002/1649] ath10k: unify txpath decision Some future changes will need to determine final tx method early on. Prepare the code. Signed-off-by: Michal Kazior Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/mac.c | 58 +++++++++++++++++++++------ 1 file changed, 45 insertions(+), 13 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index b3a790addb0a..67a50a61afb4 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -2994,6 +2994,13 @@ static void ath10k_reg_notifier(struct wiphy *wiphy, /* TX handlers */ /***************/ +enum ath10k_mac_tx_path { + ATH10K_MAC_TX_HTT, + ATH10K_MAC_TX_HTT_MGMT, + ATH10K_MAC_TX_WMI_MGMT, + ATH10K_MAC_TX_UNKNOWN, +}; + void ath10k_mac_tx_lock(struct ath10k *ar, int reason) { lockdep_assert_held(&ar->htt.tx_lock); @@ -3326,27 +3333,52 @@ unlock: return ret; } +static enum ath10k_mac_tx_path +ath10k_mac_tx_h_get_txpath(struct ath10k *ar, + struct sk_buff *skb, + enum ath10k_hw_txrx_mode txmode) +{ + switch (txmode) { + case ATH10K_HW_TXRX_RAW: + case ATH10K_HW_TXRX_NATIVE_WIFI: + case ATH10K_HW_TXRX_ETHERNET: + return ATH10K_MAC_TX_HTT; + case ATH10K_HW_TXRX_MGMT: + if (test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX, + ar->fw_features)) + return ATH10K_MAC_TX_WMI_MGMT; + else if (ar->htt.target_version_major >= 3) + return ATH10K_MAC_TX_HTT; + else + return ATH10K_MAC_TX_HTT_MGMT; + } + + return ATH10K_MAC_TX_UNKNOWN; +} + static int ath10k_mac_tx_submit(struct ath10k *ar, enum ath10k_hw_txrx_mode txmode, struct sk_buff *skb) { struct ath10k_htt *htt = &ar->htt; - int ret = 0; + enum ath10k_mac_tx_path txpath; + int ret; - switch (txmode) { - case ATH10K_HW_TXRX_RAW: - case ATH10K_HW_TXRX_NATIVE_WIFI: - case ATH10K_HW_TXRX_ETHERNET: + txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode); + + switch (txpath) { + case ATH10K_MAC_TX_HTT: ret = ath10k_htt_tx(htt, txmode, skb); break; - case ATH10K_HW_TXRX_MGMT: - if (test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX, - ar->fw_features)) - ret = ath10k_mac_tx_wmi_mgmt(ar, skb); - else if (ar->htt.target_version_major >= 3) - ret = ath10k_htt_tx(htt, txmode, skb); - else - ret = ath10k_htt_mgmt_tx(htt, skb); + case ATH10K_MAC_TX_HTT_MGMT: + ret = ath10k_htt_mgmt_tx(htt, skb); + break; + case ATH10K_MAC_TX_WMI_MGMT: + ret = ath10k_mac_tx_wmi_mgmt(ar, skb); + break; + case ATH10K_MAC_TX_UNKNOWN: + WARN_ON_ONCE(1); + ret = -EINVAL; break; } From 6421969f248fdf9d8cd38353a617ed7cc5ddab94 Mon Sep 17 00:00:00 2001 From: Michal Kazior Date: Sun, 6 Mar 2016 16:14:25 +0200 Subject: [PATCH 0003/1649] ath10k: refactor tx pending management Tx pending counter logic assumed that the sk_buff is already known and hence was performed in HTT functions themselves. However, for the sake of future wake_tx_queue() usage the driver must be able to tell whether it can submit more frames to firmware before it dequeues frame from ieee80211_txq (and thus long before HTT Tx functions are called) because once a frame is dequeued it cannot be requeud back to mac80211. This prepares the driver for future changes. Signed-off-by: Michal Kazior Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/htt.h | 7 +- drivers/net/wireless/ath/ath10k/htt_tx.c | 85 ++++++------------------ drivers/net/wireless/ath/ath10k/mac.c | 51 +++++++++++--- drivers/net/wireless/ath/ath10k/txrx.c | 2 +- 4 files changed, 71 insertions(+), 74 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h index 13391ea4422d..cb6d4fd687da 100644 --- a/drivers/net/wireless/ath/ath10k/htt.h +++ b/drivers/net/wireless/ath/ath10k/htt.h @@ -1753,7 +1753,12 @@ int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt, u8 max_subfrms_amsdu); void ath10k_htt_hif_tx_complete(struct ath10k *ar, struct sk_buff *skb); -void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt, bool limit_mgmt_desc); +void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt, + bool is_mgmt); +int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt, + bool is_mgmt, + bool is_presp); + int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb); void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id); int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *); diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c index 95acb727c068..860661d3812f 100644 --- a/drivers/net/wireless/ath/ath10k/htt_tx.c +++ b/drivers/net/wireless/ath/ath10k/htt_tx.c @@ -22,9 +22,12 @@ #include "txrx.h" #include "debug.h" -void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt, bool limit_mgmt_desc) +void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt, + bool is_mgmt) { - if (limit_mgmt_desc) + lockdep_assert_held(&htt->tx_lock); + + if (is_mgmt) htt->num_pending_mgmt_tx--; htt->num_pending_tx--; @@ -32,43 +35,31 @@ void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt, bool limit_mgmt_desc) ath10k_mac_tx_unlock(htt->ar, ATH10K_TX_PAUSE_Q_FULL); } -static void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt, - bool limit_mgmt_desc) -{ - spin_lock_bh(&htt->tx_lock); - __ath10k_htt_tx_dec_pending(htt, limit_mgmt_desc); - spin_unlock_bh(&htt->tx_lock); -} - -static int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt, - bool limit_mgmt_desc, bool is_probe_resp) +int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt, + bool is_mgmt, + bool is_presp) { struct ath10k *ar = htt->ar; - int ret = 0; - spin_lock_bh(&htt->tx_lock); + lockdep_assert_held(&htt->tx_lock); - if (htt->num_pending_tx >= htt->max_num_pending_tx) { - ret = -EBUSY; - goto exit; - } + if (htt->num_pending_tx >= htt->max_num_pending_tx) + return -EBUSY; - if (limit_mgmt_desc) { - if (is_probe_resp && (htt->num_pending_mgmt_tx > - ar->hw_params.max_probe_resp_desc_thres)) { - ret = -EBUSY; - goto exit; - } + if (is_mgmt && + is_presp && + ar->hw_params.max_probe_resp_desc_thres && + ar->hw_params.max_probe_resp_desc_thres < htt->num_pending_mgmt_tx) + return -EBUSY; + + if (is_mgmt) htt->num_pending_mgmt_tx++; - } htt->num_pending_tx++; if (htt->num_pending_tx == htt->max_num_pending_tx) ath10k_mac_tx_lock(htt->ar, ATH10K_TX_PAUSE_Q_FULL); -exit: - spin_unlock_bh(&htt->tx_lock); - return ret; + return 0; } int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb) @@ -576,20 +567,6 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) int msdu_id = -1; int res; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data; - bool limit_mgmt_desc = false; - bool is_probe_resp = false; - - if (ar->hw_params.max_probe_resp_desc_thres) { - limit_mgmt_desc = true; - - if (ieee80211_is_probe_resp(hdr->frame_control)) - is_probe_resp = true; - } - - res = ath10k_htt_tx_inc_pending(htt, limit_mgmt_desc, is_probe_resp); - - if (res) - goto err; len += sizeof(cmd->hdr); len += sizeof(cmd->mgmt_tx); @@ -598,7 +575,7 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) res = ath10k_htt_tx_alloc_msdu_id(htt, msdu); spin_unlock_bh(&htt->tx_lock); if (res < 0) - goto err_tx_dec; + goto err; msdu_id = res; @@ -649,8 +626,6 @@ err_free_msdu_id: spin_lock_bh(&htt->tx_lock); ath10k_htt_tx_free_msdu_id(htt, msdu_id); spin_unlock_bh(&htt->tx_lock); -err_tx_dec: - ath10k_htt_tx_dec_pending(htt, limit_mgmt_desc); err: return res; } @@ -677,26 +652,12 @@ int ath10k_htt_tx(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode, u32 frags_paddr = 0; u32 txbuf_paddr; struct htt_msdu_ext_desc *ext_desc = NULL; - bool limit_mgmt_desc = false; - bool is_probe_resp = false; - - if (unlikely(ieee80211_is_mgmt(hdr->frame_control)) && - ar->hw_params.max_probe_resp_desc_thres) { - limit_mgmt_desc = true; - - if (ieee80211_is_probe_resp(hdr->frame_control)) - is_probe_resp = true; - } - - res = ath10k_htt_tx_inc_pending(htt, limit_mgmt_desc, is_probe_resp); - if (res) - goto err; spin_lock_bh(&htt->tx_lock); res = ath10k_htt_tx_alloc_msdu_id(htt, msdu); spin_unlock_bh(&htt->tx_lock); if (res < 0) - goto err_tx_dec; + goto err; msdu_id = res; @@ -862,11 +823,7 @@ int ath10k_htt_tx(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode, err_unmap_msdu: dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); err_free_msdu_id: - spin_lock_bh(&htt->tx_lock); ath10k_htt_tx_free_msdu_id(htt, msdu_id); - spin_unlock_bh(&htt->tx_lock); -err_tx_dec: - ath10k_htt_tx_dec_pending(htt, limit_mgmt_desc); err: return res; } diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index 67a50a61afb4..140ad250ea69 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -3358,13 +3358,11 @@ ath10k_mac_tx_h_get_txpath(struct ath10k *ar, static int ath10k_mac_tx_submit(struct ath10k *ar, enum ath10k_hw_txrx_mode txmode, + enum ath10k_mac_tx_path txpath, struct sk_buff *skb) { struct ath10k_htt *htt = &ar->htt; - enum ath10k_mac_tx_path txpath; - int ret; - - txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode); + int ret = -EINVAL; switch (txpath) { case ATH10K_MAC_TX_HTT: @@ -3398,6 +3396,7 @@ static int ath10k_mac_tx(struct ath10k *ar, struct ieee80211_vif *vif, struct ieee80211_sta *sta, enum ath10k_hw_txrx_mode txmode, + enum ath10k_mac_tx_path txpath, struct sk_buff *skb) { struct ieee80211_hw *hw = ar->hw; @@ -3437,7 +3436,7 @@ static int ath10k_mac_tx(struct ath10k *ar, } } - ret = ath10k_mac_tx_submit(ar, txmode, skb); + ret = ath10k_mac_tx_submit(ar, txmode, txpath, skb); if (ret) { ath10k_warn(ar, "failed to submit frame: %d\n", ret); return ret; @@ -3465,6 +3464,7 @@ void ath10k_offchan_tx_work(struct work_struct *work) struct ath10k_peer *peer; struct ath10k_vif *arvif; enum ath10k_hw_txrx_mode txmode; + enum ath10k_mac_tx_path txpath; struct ieee80211_hdr *hdr; struct ieee80211_vif *vif; struct ieee80211_sta *sta; @@ -3533,8 +3533,9 @@ void ath10k_offchan_tx_work(struct work_struct *work) } txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb); + txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode); - ret = ath10k_mac_tx(ar, vif, sta, txmode, skb); + ret = ath10k_mac_tx(ar, vif, sta, txmode, txpath, skb); if (ret) { ath10k_warn(ar, "failed to transmit offchannel frame: %d\n", ret); @@ -3758,19 +3759,53 @@ static void ath10k_mac_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) { struct ath10k *ar = hw->priv; + struct ath10k_htt *htt = &ar->htt; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_vif *vif = info->control.vif; struct ieee80211_sta *sta = control->sta; + struct ieee80211_hdr *hdr = (void *)skb->data; enum ath10k_hw_txrx_mode txmode; + enum ath10k_mac_tx_path txpath; + bool is_htt; + bool is_mgmt; + bool is_presp; int ret; ath10k_mac_tx_h_fill_cb(ar, vif, skb); txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb); + txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode); + is_htt = (txpath == ATH10K_MAC_TX_HTT || + txpath == ATH10K_MAC_TX_HTT_MGMT); - ret = ath10k_mac_tx(ar, vif, sta, txmode, skb); - if (ret) + if (is_htt) { + spin_lock_bh(&ar->htt.tx_lock); + + is_mgmt = ieee80211_is_mgmt(hdr->frame_control); + is_presp = ieee80211_is_probe_resp(hdr->frame_control); + + ret = ath10k_htt_tx_inc_pending(htt, is_mgmt, is_presp); + if (ret) { + ath10k_warn(ar, "failed to increase tx pending count: %d, dropping\n", + ret); + spin_unlock_bh(&ar->htt.tx_lock); + ieee80211_free_txskb(ar->hw, skb); + return; + } + + spin_unlock_bh(&ar->htt.tx_lock); + } + + ret = ath10k_mac_tx(ar, vif, sta, txmode, txpath, skb); + if (ret) { ath10k_warn(ar, "failed to transmit frame: %d\n", ret); + if (is_htt) { + spin_lock_bh(&ar->htt.tx_lock); + ath10k_htt_tx_dec_pending(htt, is_mgmt); + spin_unlock_bh(&ar->htt.tx_lock); + } + return; + } } /* Must not be called with conf_mutex held as workers can use that also. */ diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c index fbfb608e48ab..118586ece20e 100644 --- a/drivers/net/wireless/ath/ath10k/txrx.c +++ b/drivers/net/wireless/ath/ath10k/txrx.c @@ -86,7 +86,7 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt, limit_mgmt_desc = true; ath10k_htt_tx_free_msdu_id(htt, tx_done->msdu_id); - __ath10k_htt_tx_dec_pending(htt, limit_mgmt_desc); + ath10k_htt_tx_dec_pending(htt, limit_mgmt_desc); if (htt->num_pending_tx == 0) wake_up(&htt->empty_tx_wq); spin_unlock_bh(&htt->tx_lock); From bb8f0c6af83f2217aebbe45540e81d31b754b805 Mon Sep 17 00:00:00 2001 From: Michal Kazior Date: Sun, 6 Mar 2016 16:14:27 +0200 Subject: [PATCH 0004/1649] ath10k: maintain peer_id for each sta and vif The 10.4.3 firmware with congestion control guarantees that each peer has only a single peer_id mapping. The 1:1 mapping isn't the case for older firmwares (e.g. 10.4.1, 10.2, 10.1) but it should not matter. This 1:1 mapping is going to be only used by future code which inherently (flow-wise) is for 10.4.3. Signed-off-by: Michal Kazior Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/core.h | 2 ++ drivers/net/wireless/ath/ath10k/mac.c | 38 ++++++++++++++++++++++++++ 2 files changed, 40 insertions(+) diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h index a62b62a62266..523585e85f35 100644 --- a/drivers/net/wireless/ath/ath10k/core.h +++ b/drivers/net/wireless/ath/ath10k/core.h @@ -313,6 +313,7 @@ struct ath10k_sta { u32 bw; u32 nss; u32 smps; + u16 peer_id; struct work_struct update_wk; @@ -335,6 +336,7 @@ struct ath10k_vif { struct list_head list; u32 vdev_id; + u16 peer_id; enum wmi_vdev_type vdev_type; enum wmi_vdev_subtype vdev_subtype; u32 beacon_interval; diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index 140ad250ea69..72b8f177445d 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -4421,6 +4421,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw, { struct ath10k *ar = hw->priv; struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + struct ath10k_peer *peer; enum wmi_sta_powersave_param param; int ret = 0; u32 value; @@ -4620,6 +4621,24 @@ static int ath10k_add_interface(struct ieee80211_hw *hw, arvif->vdev_id, ret); goto err_vdev_delete; } + + spin_lock_bh(&ar->data_lock); + + peer = ath10k_peer_find(ar, arvif->vdev_id, vif->addr); + if (!peer) { + ath10k_warn(ar, "failed to lookup peer %pM on vdev %i\n", + vif->addr, arvif->vdev_id); + spin_unlock_bh(&ar->data_lock); + ret = -ENOENT; + goto err_peer_delete; + } + + arvif->peer_id = find_first_bit(peer->peer_ids, + ATH10K_MAX_NUM_PEER_IDS); + + spin_unlock_bh(&ar->data_lock); + } else { + arvif->peer_id = HTT_INVALID_PEERID; } if (arvif->vdev_type == WMI_VDEV_TYPE_AP) { @@ -5501,6 +5520,7 @@ static int ath10k_sta_state(struct ieee80211_hw *hw, struct ath10k *ar = hw->priv; struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; + struct ath10k_peer *peer; int ret = 0; if (old_state == IEEE80211_STA_NOTEXIST && @@ -5551,6 +5571,24 @@ static int ath10k_sta_state(struct ieee80211_hw *hw, goto exit; } + spin_lock_bh(&ar->data_lock); + + peer = ath10k_peer_find(ar, arvif->vdev_id, sta->addr); + if (!peer) { + ath10k_warn(ar, "failed to lookup peer %pM on vdev %i\n", + vif->addr, arvif->vdev_id); + spin_unlock_bh(&ar->data_lock); + ath10k_peer_delete(ar, arvif->vdev_id, sta->addr); + ath10k_mac_dec_num_stations(arvif, sta); + ret = -ENOENT; + goto exit; + } + + arsta->peer_id = find_first_bit(peer->peer_ids, + ATH10K_MAX_NUM_PEER_IDS); + + spin_unlock_bh(&ar->data_lock); + if (!sta->tdls) goto exit; From 6942726f7f7bfc3c197795befe84c8e3c57435a0 Mon Sep 17 00:00:00 2001 From: Michal Kazior Date: Sun, 6 Mar 2016 16:14:30 +0200 Subject: [PATCH 0005/1649] ath10k: add fast peer_map lookup The pull-push functionality of 10.4 will be based on peer_id and tid. These will need to be mapped, eventually, to ieee80211_txq to be used with ieee80211_tx_dequeue(). Iterating over existing stations every time peer_id needs to be mapped to a station would be inefficient wrt CPU time. The new firmware, which will be the only user of the code flow-wise, will guarantee to use low peer_ids first so despite peer_map's apparent huge size d-cache thrashing should not be a problem. Older firmware hot paths will effectively not use peer_map. Signed-off-by: Michal Kazior Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/core.h | 4 ++ drivers/net/wireless/ath/ath10k/mac.c | 71 +++++++++++++++++++++++--- drivers/net/wireless/ath/ath10k/txrx.c | 2 + 3 files changed, 71 insertions(+), 6 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h index 523585e85f35..5f447444fe97 100644 --- a/drivers/net/wireless/ath/ath10k/core.h +++ b/drivers/net/wireless/ath/ath10k/core.h @@ -297,6 +297,9 @@ struct ath10k_dfs_stats { struct ath10k_peer { struct list_head list; + struct ieee80211_vif *vif; + struct ieee80211_sta *sta; + int vdev_id; u8 addr[ETH_ALEN]; DECLARE_BITMAP(peer_ids, ATH10K_MAX_NUM_PEER_IDS); @@ -791,6 +794,7 @@ struct ath10k { struct list_head arvifs; struct list_head peers; + struct ath10k_peer *peer_map[ATH10K_MAX_NUM_PEER_IDS]; wait_queue_head_t peer_mapping_wq; /* protected by conf_mutex */ diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index 72b8f177445d..4b69a373382b 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -618,10 +618,15 @@ ath10k_mac_get_any_chandef_iter(struct ieee80211_hw *hw, *def = &conf->def; } -static int ath10k_peer_create(struct ath10k *ar, u32 vdev_id, const u8 *addr, +static int ath10k_peer_create(struct ath10k *ar, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + u32 vdev_id, + const u8 *addr, enum wmi_peer_type peer_type) { struct ath10k_vif *arvif; + struct ath10k_peer *peer; int num_peers = 0; int ret; @@ -650,6 +655,22 @@ static int ath10k_peer_create(struct ath10k *ar, u32 vdev_id, const u8 *addr, return ret; } + spin_lock_bh(&ar->data_lock); + + peer = ath10k_peer_find(ar, vdev_id, addr); + if (!peer) { + ath10k_warn(ar, "failed to find peer %pM on vdev %i after creation\n", + addr, vdev_id); + ath10k_wmi_peer_delete(ar, vdev_id, addr); + spin_unlock_bh(&ar->data_lock); + return -ENOENT; + } + + peer->vif = vif; + peer->sta = sta; + + spin_unlock_bh(&ar->data_lock); + ar->num_peers++; return 0; @@ -731,6 +752,7 @@ static int ath10k_peer_delete(struct ath10k *ar, u32 vdev_id, const u8 *addr) static void ath10k_peer_cleanup(struct ath10k *ar, u32 vdev_id) { struct ath10k_peer *peer, *tmp; + int peer_id; lockdep_assert_held(&ar->conf_mutex); @@ -742,6 +764,11 @@ static void ath10k_peer_cleanup(struct ath10k *ar, u32 vdev_id) ath10k_warn(ar, "removing stale peer %pM from vdev_id %d\n", peer->addr, vdev_id); + for_each_set_bit(peer_id, peer->peer_ids, + ATH10K_MAX_NUM_PEER_IDS) { + ar->peer_map[peer_id] = NULL; + } + list_del(&peer->list); kfree(peer); ar->num_peers--; @@ -3506,7 +3533,8 @@ void ath10k_offchan_tx_work(struct work_struct *work) peer_addr, vdev_id); if (!peer) { - ret = ath10k_peer_create(ar, vdev_id, peer_addr, + ret = ath10k_peer_create(ar, NULL, NULL, vdev_id, + peer_addr, WMI_PEER_TYPE_DEFAULT); if (ret) ath10k_warn(ar, "failed to create peer %pM on vdev %d: %d\n", @@ -4614,8 +4642,8 @@ static int ath10k_add_interface(struct ieee80211_hw *hw, if (arvif->vdev_type == WMI_VDEV_TYPE_AP || arvif->vdev_type == WMI_VDEV_TYPE_IBSS) { - ret = ath10k_peer_create(ar, arvif->vdev_id, vif->addr, - WMI_PEER_TYPE_DEFAULT); + ret = ath10k_peer_create(ar, vif, NULL, arvif->vdev_id, + vif->addr, WMI_PEER_TYPE_DEFAULT); if (ret) { ath10k_warn(ar, "failed to create vdev %i peer for AP/IBSS: %d\n", arvif->vdev_id, ret); @@ -4749,7 +4777,9 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw, { struct ath10k *ar = hw->priv; struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + struct ath10k_peer *peer; int ret; + int i; cancel_work_sync(&arvif->ap_csa_work); cancel_delayed_work_sync(&arvif->connection_loss_work); @@ -4803,6 +4833,20 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw, spin_unlock_bh(&ar->data_lock); } + spin_lock_bh(&ar->data_lock); + for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) { + peer = ar->peer_map[i]; + if (!peer) + continue; + + if (peer->vif == vif) { + ath10k_warn(ar, "found vif peer %pM entry on vdev %i after it was supposedly removed\n", + vif->addr, arvif->vdev_id); + peer->vif = NULL; + } + } + spin_unlock_bh(&ar->data_lock); + ath10k_peer_cleanup(ar, arvif->vdev_id); if (vif->type == NL80211_IFTYPE_MONITOR) { @@ -5522,6 +5566,7 @@ static int ath10k_sta_state(struct ieee80211_hw *hw, struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; struct ath10k_peer *peer; int ret = 0; + int i; if (old_state == IEEE80211_STA_NOTEXIST && new_state == IEEE80211_STA_NONE) { @@ -5562,8 +5607,8 @@ static int ath10k_sta_state(struct ieee80211_hw *hw, if (sta->tdls) peer_type = WMI_PEER_TYPE_TDLS; - ret = ath10k_peer_create(ar, arvif->vdev_id, sta->addr, - peer_type); + ret = ath10k_peer_create(ar, vif, sta, arvif->vdev_id, + sta->addr, peer_type); if (ret) { ath10k_warn(ar, "failed to add peer %pM for vdev %d when adding a new sta: %i\n", sta->addr, arvif->vdev_id, ret); @@ -5651,6 +5696,20 @@ static int ath10k_sta_state(struct ieee80211_hw *hw, ath10k_mac_dec_num_stations(arvif, sta); + spin_lock_bh(&ar->data_lock); + for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) { + peer = ar->peer_map[i]; + if (!peer) + continue; + + if (peer->sta == sta) { + ath10k_warn(ar, "found sta peer %pM entry on vdev %i after it was supposedly removed\n", + sta->addr, arvif->vdev_id); + peer->sta = NULL; + } + } + spin_unlock_bh(&ar->data_lock); + if (!sta->tdls) goto exit; diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c index 118586ece20e..202e5192235b 100644 --- a/drivers/net/wireless/ath/ath10k/txrx.c +++ b/drivers/net/wireless/ath/ath10k/txrx.c @@ -203,6 +203,7 @@ void ath10k_peer_map_event(struct ath10k_htt *htt, ath10k_dbg(ar, ATH10K_DBG_HTT, "htt peer map vdev %d peer %pM id %d\n", ev->vdev_id, ev->addr, ev->peer_id); + ar->peer_map[ev->peer_id] = peer; set_bit(ev->peer_id, peer->peer_ids); exit: spin_unlock_bh(&ar->data_lock); @@ -225,6 +226,7 @@ void ath10k_peer_unmap_event(struct ath10k_htt *htt, ath10k_dbg(ar, ATH10K_DBG_HTT, "htt peer unmap vdev %d peer %pM id %d\n", peer->vdev_id, peer->addr, ev->peer_id); + ar->peer_map[ev->peer_id] = NULL; clear_bit(ev->peer_id, peer->peer_ids); if (bitmap_empty(peer->peer_ids, ATH10K_MAX_NUM_PEER_IDS)) { From 839ae6371e56594f06ef05a64fc90cd156007232 Mon Sep 17 00:00:00 2001 From: Michal Kazior Date: Sun, 6 Mar 2016 16:14:32 +0200 Subject: [PATCH 0006/1649] ath10k: add new htt message generation/parsing logic This merely adds some parsing, generation and sanity checks with placeholders for real code/functionality to be added later. Signed-off-by: Michal Kazior Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/htt.h | 5 + drivers/net/wireless/ath/ath10k/htt_rx.c | 198 ++++++++++++++++++++++- drivers/net/wireless/ath/ath10k/htt_tx.c | 53 ++++++ 3 files changed, 255 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h index cb6d4fd687da..65dcd22f31df 100644 --- a/drivers/net/wireless/ath/ath10k/htt.h +++ b/drivers/net/wireless/ath/ath10k/htt.h @@ -1752,6 +1752,11 @@ int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt, u8 max_subfrms_ampdu, u8 max_subfrms_amsdu); void ath10k_htt_hif_tx_complete(struct ath10k *ar, struct sk_buff *skb); +int ath10k_htt_tx_fetch_resp(struct ath10k *ar, + __le32 token, + __le16 fetch_seq_num, + struct htt_tx_fetch_record *records, + size_t num_records); void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt, bool is_mgmt); diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c index ae9b686a4e91..8b367e98f929 100644 --- a/drivers/net/wireless/ath/ath10k/htt_rx.c +++ b/drivers/net/wireless/ath/ath10k/htt_rx.c @@ -1982,6 +1982,198 @@ static void ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb) tasklet_schedule(&htt->rx_replenish_task); } +static void ath10k_htt_rx_tx_fetch_resp_id_confirm(struct ath10k *ar, + const __le32 *resp_ids, + int num_resp_ids) +{ + int i; + u32 resp_id; + + ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm num_resp_ids %d\n", + num_resp_ids); + + for (i = 0; i < num_resp_ids; i++) { + resp_id = le32_to_cpu(resp_ids[i]); + + ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm resp_id %u\n", + resp_id); + + /* TODO: free resp_id */ + } +} + +static void ath10k_htt_rx_tx_fetch_ind(struct ath10k *ar, struct sk_buff *skb) +{ + struct htt_resp *resp = (struct htt_resp *)skb->data; + struct htt_tx_fetch_record *record; + size_t len; + size_t max_num_bytes; + size_t max_num_msdus; + const __le32 *resp_ids; + u16 num_records; + u16 num_resp_ids; + u16 peer_id; + u8 tid; + int i; + + ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind\n"); + + len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_ind); + if (unlikely(skb->len < len)) { + ath10k_warn(ar, "received corrupted tx_fetch_ind event: buffer too short\n"); + return; + } + + num_records = le16_to_cpu(resp->tx_fetch_ind.num_records); + num_resp_ids = le16_to_cpu(resp->tx_fetch_ind.num_resp_ids); + + len += sizeof(resp->tx_fetch_ind.records[0]) * num_records; + len += sizeof(resp->tx_fetch_ind.resp_ids[0]) * num_resp_ids; + + if (unlikely(skb->len < len)) { + ath10k_warn(ar, "received corrupted tx_fetch_ind event: too many records/resp_ids\n"); + return; + } + + ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind num records %hu num resps %hu seq %hu\n", + num_records, num_resp_ids, + le16_to_cpu(resp->tx_fetch_ind.fetch_seq_num)); + + /* TODO: runtime sanity checks */ + + for (i = 0; i < num_records; i++) { + record = &resp->tx_fetch_ind.records[i]; + peer_id = MS(le16_to_cpu(record->info), + HTT_TX_FETCH_RECORD_INFO_PEER_ID); + tid = MS(le16_to_cpu(record->info), + HTT_TX_FETCH_RECORD_INFO_TID); + max_num_msdus = le16_to_cpu(record->num_msdus); + max_num_bytes = le32_to_cpu(record->num_bytes); + + ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch record %i peer_id %hu tid %hhu msdus %zu bytes %zu\n", + i, peer_id, tid, max_num_msdus, max_num_bytes); + + if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) || + unlikely(tid >= ar->htt.tx_q_state.num_tids)) { + ath10k_warn(ar, "received out of range peer_id %hu tid %hhu\n", + peer_id, tid); + continue; + } + + /* TODO: dequeue and submit tx to device */ + } + + resp_ids = ath10k_htt_get_tx_fetch_ind_resp_ids(&resp->tx_fetch_ind); + ath10k_htt_rx_tx_fetch_resp_id_confirm(ar, resp_ids, num_resp_ids); + + /* TODO: generate and send fetch response to device */ +} + +static void ath10k_htt_rx_tx_fetch_confirm(struct ath10k *ar, + struct sk_buff *skb) +{ + const struct htt_resp *resp = (void *)skb->data; + size_t len; + int num_resp_ids; + + ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm\n"); + + len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_confirm); + if (unlikely(skb->len < len)) { + ath10k_warn(ar, "received corrupted tx_fetch_confirm event: buffer too short\n"); + return; + } + + num_resp_ids = le16_to_cpu(resp->tx_fetch_confirm.num_resp_ids); + len += sizeof(resp->tx_fetch_confirm.resp_ids[0]) * num_resp_ids; + + if (unlikely(skb->len < len)) { + ath10k_warn(ar, "received corrupted tx_fetch_confirm event: resp_ids buffer overflow\n"); + return; + } + + ath10k_htt_rx_tx_fetch_resp_id_confirm(ar, + resp->tx_fetch_confirm.resp_ids, + num_resp_ids); +} + +static void ath10k_htt_rx_tx_mode_switch_ind(struct ath10k *ar, + struct sk_buff *skb) +{ + const struct htt_resp *resp = (void *)skb->data; + const struct htt_tx_mode_switch_record *record; + size_t len; + size_t num_records; + enum htt_tx_mode_switch_mode mode; + bool enable; + u16 info0; + u16 info1; + u16 threshold; + u16 peer_id; + u8 tid; + int i; + + ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx mode switch ind\n"); + + len = sizeof(resp->hdr) + sizeof(resp->tx_mode_switch_ind); + if (unlikely(skb->len < len)) { + ath10k_warn(ar, "received corrupted tx_mode_switch_ind event: buffer too short\n"); + return; + } + + info0 = le16_to_cpu(resp->tx_mode_switch_ind.info0); + info1 = le16_to_cpu(resp->tx_mode_switch_ind.info1); + + enable = !!(info0 & HTT_TX_MODE_SWITCH_IND_INFO0_ENABLE); + num_records = MS(info0, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD); + mode = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_MODE); + threshold = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD); + + ath10k_dbg(ar, ATH10K_DBG_HTT, + "htt rx tx mode switch ind info0 0x%04hx info1 0x%04hx enable %d num records %zd mode %d threshold %hu\n", + info0, info1, enable, num_records, mode, threshold); + + len += sizeof(resp->tx_mode_switch_ind.records[0]) * num_records; + + if (unlikely(skb->len < len)) { + ath10k_warn(ar, "received corrupted tx_mode_switch_mode_ind event: too many records\n"); + return; + } + + switch (mode) { + case HTT_TX_MODE_SWITCH_PUSH: + case HTT_TX_MODE_SWITCH_PUSH_PULL: + break; + default: + ath10k_warn(ar, "received invalid tx_mode_switch_mode_ind mode %d, ignoring\n", + mode); + return; + } + + if (!enable) + return; + + /* TODO: apply configuration */ + + for (i = 0; i < num_records; i++) { + record = &resp->tx_mode_switch_ind.records[i]; + info0 = le16_to_cpu(record->info0); + peer_id = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_PEER_ID); + tid = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_TID); + + if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) || + unlikely(tid >= ar->htt.tx_q_state.num_tids)) { + ath10k_warn(ar, "received out of range peer_id %hu tid %hhu\n", + peer_id, tid); + continue; + } + + /* TODO: apply configuration */ + } + + /* TODO: apply configuration */ +} + void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) { struct ath10k_htt *htt = &ar->htt; @@ -2120,9 +2312,13 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) case HTT_T2H_MSG_TYPE_AGGR_CONF: break; case HTT_T2H_MSG_TYPE_TX_FETCH_IND: + ath10k_htt_rx_tx_fetch_ind(ar, skb); + break; case HTT_T2H_MSG_TYPE_TX_FETCH_CONFIRM: + ath10k_htt_rx_tx_fetch_confirm(ar, skb); + break; case HTT_T2H_MSG_TYPE_TX_MODE_SWITCH_IND: - /* TODO: Implement pull-push logic */ + ath10k_htt_rx_tx_mode_switch_ind(ar, skb); break; case HTT_T2H_MSG_TYPE_EN_STATS: default: diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c index 860661d3812f..225f0561b3fd 100644 --- a/drivers/net/wireless/ath/ath10k/htt_tx.c +++ b/drivers/net/wireless/ath/ath10k/htt_tx.c @@ -526,6 +526,59 @@ int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt, return 0; } +int ath10k_htt_tx_fetch_resp(struct ath10k *ar, + __le32 token, + __le16 fetch_seq_num, + struct htt_tx_fetch_record *records, + size_t num_records) +{ + struct sk_buff *skb; + struct htt_cmd *cmd; + u16 resp_id; + int len = 0; + int ret; + + len += sizeof(cmd->hdr); + len += sizeof(cmd->tx_fetch_resp); + len += sizeof(cmd->tx_fetch_resp.records[0]) * num_records; + + skb = ath10k_htc_alloc_skb(ar, len); + if (!skb) + return -ENOMEM; + + resp_id = 0; /* TODO: allocate resp_id */ + ret = 0; + if (ret) + goto err_free_skb; + + skb_put(skb, len); + cmd = (struct htt_cmd *)skb->data; + cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FETCH_RESP; + cmd->tx_fetch_resp.resp_id = cpu_to_le16(resp_id); + cmd->tx_fetch_resp.fetch_seq_num = fetch_seq_num; + cmd->tx_fetch_resp.num_records = cpu_to_le16(num_records); + cmd->tx_fetch_resp.token = token; + + memcpy(cmd->tx_fetch_resp.records, records, + sizeof(records[0]) * num_records); + + ret = ath10k_htc_send(&ar->htc, ar->htt.eid, skb); + if (ret) { + ath10k_warn(ar, "failed to submit htc command: %d\n", ret); + goto err_free_resp_id; + } + + return 0; + +err_free_resp_id: + (void)resp_id; /* TODO: free resp_id */ + +err_free_skb: + dev_kfree_skb_any(skb); + + return ret; +} + static u8 ath10k_htt_tx_get_vdev_id(struct ath10k *ar, struct sk_buff *skb) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); From 299468782d94331f99a7eeb6e0d56598863be9fe Mon Sep 17 00:00:00 2001 From: Michal Kazior Date: Sun, 6 Mar 2016 16:14:34 +0200 Subject: [PATCH 0007/1649] ath10k: implement wake_tx_queue This implements very basic support for software queueing. It also contains some knobs that will be patched later. Signed-off-by: Michal Kazior Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/core.c | 2 + drivers/net/wireless/ath/ath10k/core.h | 7 ++ drivers/net/wireless/ath/ath10k/htt_rx.c | 3 + drivers/net/wireless/ath/ath10k/mac.c | 144 +++++++++++++++++++++++ drivers/net/wireless/ath/ath10k/mac.h | 1 + 5 files changed, 157 insertions(+) diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index c84c2d30ef1f..6f606f8fce25 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -2048,7 +2048,9 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev, mutex_init(&ar->conf_mutex); spin_lock_init(&ar->data_lock); + spin_lock_init(&ar->txqs_lock); + INIT_LIST_HEAD(&ar->txqs); INIT_LIST_HEAD(&ar->peers); init_waitqueue_head(&ar->peer_mapping_wq); init_waitqueue_head(&ar->htt.empty_tx_wq); diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h index 5f447444fe97..7df35628ca35 100644 --- a/drivers/net/wireless/ath/ath10k/core.h +++ b/drivers/net/wireless/ath/ath10k/core.h @@ -308,6 +308,10 @@ struct ath10k_peer { struct ieee80211_key_conf *keys[WMI_MAX_KEY_INDEX + 1]; }; +struct ath10k_txq { + struct list_head list; +}; + struct ath10k_sta { struct ath10k_vif *arvif; @@ -791,7 +795,10 @@ struct ath10k { /* protects shared structure data */ spinlock_t data_lock; + /* protects: ar->txqs, artxq->list */ + spinlock_t txqs_lock; + struct list_head txqs; struct list_head arvifs; struct list_head peers; struct ath10k_peer *peer_map[ATH10K_MAX_NUM_PEER_IDS]; diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c index 8b367e98f929..4a25a1d63843 100644 --- a/drivers/net/wireless/ath/ath10k/htt_rx.c +++ b/drivers/net/wireless/ath/ath10k/htt_rx.c @@ -2242,6 +2242,7 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) } ath10k_txrx_tx_unref(htt, &tx_done); + ath10k_mac_tx_push_pending(ar); break; } case HTT_T2H_MSG_TYPE_TX_COMPL_IND: @@ -2374,6 +2375,8 @@ static void ath10k_htt_txrx_compl_task(unsigned long ptr) dev_kfree_skb_any(skb); } + ath10k_mac_tx_push_pending(ar); + while ((skb = __skb_dequeue(&rx_q))) { resp = (struct htt_resp *)skb->data; spin_lock_bh(&htt->rx_ring.lock); diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index 4b69a373382b..74a42e3465e4 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -3620,6 +3620,123 @@ void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work) } } +static void ath10k_mac_txq_init(struct ieee80211_txq *txq) +{ + struct ath10k_txq *artxq = (void *)txq->drv_priv; + + if (!txq) + return; + + INIT_LIST_HEAD(&artxq->list); +} + +static void ath10k_mac_txq_unref(struct ath10k *ar, struct ieee80211_txq *txq) +{ + struct ath10k_txq *artxq = (void *)txq->drv_priv; + + if (!txq) + return; + + spin_lock_bh(&ar->txqs_lock); + if (!list_empty(&artxq->list)) + list_del_init(&artxq->list); + spin_unlock_bh(&ar->txqs_lock); +} + +static bool ath10k_mac_tx_can_push(struct ieee80211_hw *hw, + struct ieee80211_txq *txq) +{ + return 1; /* TBD */ +} + +static int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw, + struct ieee80211_txq *txq) +{ + const bool is_mgmt = false; + const bool is_presp = false; + struct ath10k *ar = hw->priv; + struct ath10k_htt *htt = &ar->htt; + struct ieee80211_vif *vif = txq->vif; + struct ieee80211_sta *sta = txq->sta; + enum ath10k_hw_txrx_mode txmode; + enum ath10k_mac_tx_path txpath; + struct sk_buff *skb; + int ret; + + spin_lock_bh(&ar->htt.tx_lock); + ret = ath10k_htt_tx_inc_pending(htt, is_mgmt, is_presp); + spin_unlock_bh(&ar->htt.tx_lock); + + if (ret) + return ret; + + skb = ieee80211_tx_dequeue(hw, txq); + if (!skb) { + spin_lock_bh(&ar->htt.tx_lock); + ath10k_htt_tx_dec_pending(htt, is_mgmt); + spin_unlock_bh(&ar->htt.tx_lock); + + return -ENOENT; + } + + ath10k_mac_tx_h_fill_cb(ar, vif, skb); + + txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb); + txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode); + + ret = ath10k_mac_tx(ar, vif, sta, txmode, txpath, skb); + if (unlikely(ret)) { + ath10k_warn(ar, "failed to push frame: %d\n", ret); + + spin_lock_bh(&ar->htt.tx_lock); + ath10k_htt_tx_dec_pending(htt, is_mgmt); + spin_unlock_bh(&ar->htt.tx_lock); + + return ret; + } + + return 0; +} + +void ath10k_mac_tx_push_pending(struct ath10k *ar) +{ + struct ieee80211_hw *hw = ar->hw; + struct ieee80211_txq *txq; + struct ath10k_txq *artxq; + struct ath10k_txq *last; + int ret; + int max; + + spin_lock_bh(&ar->txqs_lock); + rcu_read_lock(); + + last = list_last_entry(&ar->txqs, struct ath10k_txq, list); + while (!list_empty(&ar->txqs)) { + artxq = list_first_entry(&ar->txqs, struct ath10k_txq, list); + txq = container_of((void *)artxq, struct ieee80211_txq, + drv_priv); + + /* Prevent aggressive sta/tid taking over tx queue */ + max = 16; + while (max--) { + ret = ath10k_mac_tx_push_txq(hw, txq); + if (ret < 0) + break; + } + + list_del_init(&artxq->list); + + if (artxq == last || (ret < 0 && ret != -ENOENT)) { + if (ret != -ENOENT) + list_add_tail(&artxq->list, &ar->txqs); + break; + } + } + + rcu_read_unlock(); + spin_unlock_bh(&ar->txqs_lock); +} + /************/ /* Scanning */ /************/ @@ -3836,6 +3953,22 @@ static void ath10k_mac_op_tx(struct ieee80211_hw *hw, } } +static void ath10k_mac_op_wake_tx_queue(struct ieee80211_hw *hw, + struct ieee80211_txq *txq) +{ + struct ath10k *ar = hw->priv; + struct ath10k_txq *artxq = (void *)txq->drv_priv; + + if (ath10k_mac_tx_can_push(hw, txq)) { + spin_lock_bh(&ar->txqs_lock); + if (list_empty(&artxq->list)) + list_add_tail(&artxq->list, &ar->txqs); + spin_unlock_bh(&ar->txqs_lock); + + tasklet_schedule(&ar->htt.txrx_compl_task); + } +} + /* Must not be called with conf_mutex held as workers can use that also. */ void ath10k_drain_tx(struct ath10k *ar) { @@ -4462,6 +4595,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw, mutex_lock(&ar->conf_mutex); memset(arvif, 0, sizeof(*arvif)); + ath10k_mac_txq_init(vif->txq); arvif->ar = ar; arvif->vif = vif; @@ -4860,6 +4994,8 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw, ath10k_mac_vif_tx_unlock_all(arvif); spin_unlock_bh(&ar->htt.tx_lock); + ath10k_mac_txq_unref(ar, vif->txq); + mutex_unlock(&ar->conf_mutex); } @@ -5573,6 +5709,9 @@ static int ath10k_sta_state(struct ieee80211_hw *hw, memset(arsta, 0, sizeof(*arsta)); arsta->arvif = arvif; INIT_WORK(&arsta->update_wk, ath10k_sta_rc_update_wk); + + for (i = 0; i < ARRAY_SIZE(sta->txq); i++) + ath10k_mac_txq_init(sta->txq[i]); } /* cancel must be done outside the mutex to avoid deadlock */ @@ -5710,6 +5849,9 @@ static int ath10k_sta_state(struct ieee80211_hw *hw, } spin_unlock_bh(&ar->data_lock); + for (i = 0; i < ARRAY_SIZE(sta->txq); i++) + ath10k_mac_txq_unref(ar, sta->txq[i]); + if (!sta->tdls) goto exit; @@ -7013,6 +7155,7 @@ ath10k_mac_op_switch_vif_chanctx(struct ieee80211_hw *hw, static const struct ieee80211_ops ath10k_ops = { .tx = ath10k_mac_op_tx, + .wake_tx_queue = ath10k_mac_op_wake_tx_queue, .start = ath10k_start, .stop = ath10k_stop, .config = ath10k_config, @@ -7467,6 +7610,7 @@ int ath10k_mac_register(struct ath10k *ar) ar->hw->vif_data_size = sizeof(struct ath10k_vif); ar->hw->sta_data_size = sizeof(struct ath10k_sta); + ar->hw->txq_data_size = sizeof(struct ath10k_txq); ar->hw->max_listen_interval = ATH10K_MAX_HW_LISTEN_INTERVAL; diff --git a/drivers/net/wireless/ath/ath10k/mac.h b/drivers/net/wireless/ath/ath10k/mac.h index 53091588090d..453f606a250e 100644 --- a/drivers/net/wireless/ath/ath10k/mac.h +++ b/drivers/net/wireless/ath/ath10k/mac.h @@ -75,6 +75,7 @@ void ath10k_mac_tx_unlock(struct ath10k *ar, int reason); void ath10k_mac_vif_tx_lock(struct ath10k_vif *arvif, int reason); void ath10k_mac_vif_tx_unlock(struct ath10k_vif *arvif, int reason); bool ath10k_mac_tx_frm_has_freq(struct ath10k *ar); +void ath10k_mac_tx_push_pending(struct ath10k *ar); static inline struct ath10k_vif *ath10k_vif_to_arvif(struct ieee80211_vif *vif) { From c1a43d9720d8dcde1eb735f6cbdba181e564ec20 Mon Sep 17 00:00:00 2001 From: Michal Kazior Date: Sun, 6 Mar 2016 16:14:36 +0200 Subject: [PATCH 0008/1649] ath10k: implement updating shared htt txq state Firmware 10.4.3 onwards can support a pull-push Tx model where it shares a Tx queue state with the host. The host updates the DMA region it pointed to during HTT setup whenever number of software queued from (on host) changes. Based on this information firmware issues fetch requests to the host telling the host how many frames from a list of given stations/tids should be submitted to the firmware. The code won't be called because not all appropriate HTT events are processed yet. Signed-off-by: Michal Kazior Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/htt.h | 3 + drivers/net/wireless/ath/ath10k/htt_tx.c | 104 +++++++++++++++++++++++ drivers/net/wireless/ath/ath10k/mac.c | 3 + 3 files changed, 110 insertions(+) diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h index 65dcd22f31df..b1e40f44e76b 100644 --- a/drivers/net/wireless/ath/ath10k/htt.h +++ b/drivers/net/wireless/ath/ath10k/htt.h @@ -1667,6 +1667,7 @@ struct ath10k_htt { } txbuf; struct { + bool enabled; struct htt_q_state *vaddr; dma_addr_t paddr; u16 num_peers; @@ -1758,6 +1759,8 @@ int ath10k_htt_tx_fetch_resp(struct ath10k *ar, struct htt_tx_fetch_record *records, size_t num_records); +void ath10k_htt_tx_txq_update(struct ieee80211_hw *hw, + struct ieee80211_txq *txq); void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt, bool is_mgmt); int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt, diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c index 225f0561b3fd..6643be8692b5 100644 --- a/drivers/net/wireless/ath/ath10k/htt_tx.c +++ b/drivers/net/wireless/ath/ath10k/htt_tx.c @@ -22,6 +22,110 @@ #include "txrx.h" #include "debug.h" +static u8 ath10k_htt_tx_txq_calc_size(size_t count) +{ + int exp; + int factor; + + exp = 0; + factor = count >> 7; + + while (factor >= 64 && exp < 4) { + factor >>= 3; + exp++; + } + + if (exp == 4) + return 0xff; + + if (count > 0) + factor = max(1, factor); + + return SM(exp, HTT_TX_Q_STATE_ENTRY_EXP) | + SM(factor, HTT_TX_Q_STATE_ENTRY_FACTOR); +} + +static void __ath10k_htt_tx_txq_recalc(struct ieee80211_hw *hw, + struct ieee80211_txq *txq) +{ + struct ath10k *ar = hw->priv; + struct ath10k_sta *arsta = (void *)txq->sta->drv_priv; + struct ath10k_vif *arvif = (void *)txq->vif->drv_priv; + unsigned long frame_cnt; + unsigned long byte_cnt; + int idx; + u32 bit; + u16 peer_id; + u8 tid; + u8 count; + + lockdep_assert_held(&ar->htt.tx_lock); + + if (!ar->htt.tx_q_state.enabled) + return; + + if (txq->sta) + peer_id = arsta->peer_id; + else + peer_id = arvif->peer_id; + + tid = txq->tid; + bit = BIT(peer_id % 32); + idx = peer_id / 32; + + ieee80211_txq_get_depth(txq, &frame_cnt, &byte_cnt); + count = ath10k_htt_tx_txq_calc_size(byte_cnt); + + if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) || + unlikely(tid >= ar->htt.tx_q_state.num_tids)) { + ath10k_warn(ar, "refusing to update txq for peer_id %hu tid %hhu due to out of bounds\n", + peer_id, tid); + return; + } + + ar->htt.tx_q_state.vaddr->count[tid][peer_id] = count; + ar->htt.tx_q_state.vaddr->map[tid][idx] &= ~bit; + ar->htt.tx_q_state.vaddr->map[tid][idx] |= count ? bit : 0; + + ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx txq state update peer_id %hu tid %hhu count %hhu\n", + peer_id, tid, count); +} + +static void __ath10k_htt_tx_txq_sync(struct ath10k *ar) +{ + u32 seq; + size_t size; + + lockdep_assert_held(&ar->htt.tx_lock); + + if (!ar->htt.tx_q_state.enabled) + return; + + seq = le32_to_cpu(ar->htt.tx_q_state.vaddr->seq); + seq++; + ar->htt.tx_q_state.vaddr->seq = cpu_to_le32(seq); + + ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx txq state update commit seq %u\n", + seq); + + size = sizeof(*ar->htt.tx_q_state.vaddr); + dma_sync_single_for_device(ar->dev, + ar->htt.tx_q_state.paddr, + size, + DMA_TO_DEVICE); +} + +void ath10k_htt_tx_txq_update(struct ieee80211_hw *hw, + struct ieee80211_txq *txq) +{ + struct ath10k *ar = hw->priv; + + spin_lock_bh(&ar->htt.tx_lock); + __ath10k_htt_tx_txq_recalc(hw, txq); + __ath10k_htt_tx_txq_sync(ar); + spin_unlock_bh(&ar->htt.tx_lock); +} + void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt, bool is_mgmt) { diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index 74a42e3465e4..900c64b65b43 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -3725,6 +3725,7 @@ void ath10k_mac_tx_push_pending(struct ath10k *ar) } list_del_init(&artxq->list); + ath10k_htt_tx_txq_update(hw, txq); if (artxq == last || (ret < 0 && ret != -ENOENT)) { if (ret != -ENOENT) @@ -3967,6 +3968,8 @@ static void ath10k_mac_op_wake_tx_queue(struct ieee80211_hw *hw, tasklet_schedule(&ar->htt.txrx_compl_task); } + + ath10k_htt_tx_txq_update(hw, txq); } /* Must not be called with conf_mutex held as workers can use that also. */ From dd4717b6f45e70b609d4282667eb0a89f9660268 Mon Sep 17 00:00:00 2001 From: Michal Kazior Date: Sun, 6 Mar 2016 16:14:39 +0200 Subject: [PATCH 0009/1649] ath10k: store txq in skb_cb This will be necessary for later. Signed-off-by: Michal Kazior Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/core.h | 1 + drivers/net/wireless/ath/ath10k/mac.c | 19 +++++++++++++++++-- 2 files changed, 18 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h index 7df35628ca35..89f789f3e5b4 100644 --- a/drivers/net/wireless/ath/ath10k/core.h +++ b/drivers/net/wireless/ath/ath10k/core.h @@ -98,6 +98,7 @@ struct ath10k_skb_cb { u8 eid; u16 msdu_id; struct ieee80211_vif *vif; + struct ieee80211_txq *txq; } __packed; struct ath10k_skb_rxcb { diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index 900c64b65b43..8d02d53fdc2c 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -3307,6 +3307,7 @@ static void ath10k_tx_h_add_p2p_noa_ie(struct ath10k *ar, static void ath10k_mac_tx_h_fill_cb(struct ath10k *ar, struct ieee80211_vif *vif, + struct ieee80211_txq *txq, struct sk_buff *skb) { struct ieee80211_hdr *hdr = (void *)skb->data; @@ -3323,6 +3324,7 @@ static void ath10k_mac_tx_h_fill_cb(struct ath10k *ar, cb->flags |= ATH10K_SKB_F_QOS; cb->vif = vif; + cb->txq = txq; } bool ath10k_mac_tx_frm_has_freq(struct ath10k *ar) @@ -3633,6 +3635,9 @@ static void ath10k_mac_txq_init(struct ieee80211_txq *txq) static void ath10k_mac_txq_unref(struct ath10k *ar, struct ieee80211_txq *txq) { struct ath10k_txq *artxq = (void *)txq->drv_priv; + struct ath10k_skb_cb *cb; + struct sk_buff *msdu; + int msdu_id; if (!txq) return; @@ -3641,6 +3646,14 @@ static void ath10k_mac_txq_unref(struct ath10k *ar, struct ieee80211_txq *txq) if (!list_empty(&artxq->list)) list_del_init(&artxq->list); spin_unlock_bh(&ar->txqs_lock); + + spin_lock_bh(&ar->htt.tx_lock); + idr_for_each_entry(&ar->htt.pending_tx, msdu, msdu_id) { + cb = ATH10K_SKB_CB(msdu); + if (cb->txq == txq) + cb->txq = NULL; + } + spin_unlock_bh(&ar->htt.tx_lock); } static bool ath10k_mac_tx_can_push(struct ieee80211_hw *hw, @@ -3679,7 +3692,7 @@ static int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw, return -ENOENT; } - ath10k_mac_tx_h_fill_cb(ar, vif, skb); + ath10k_mac_tx_h_fill_cb(ar, vif, txq, skb); txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb); txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode); @@ -3909,6 +3922,7 @@ static void ath10k_mac_op_tx(struct ieee80211_hw *hw, struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_vif *vif = info->control.vif; struct ieee80211_sta *sta = control->sta; + struct ieee80211_txq *txq = NULL; struct ieee80211_hdr *hdr = (void *)skb->data; enum ath10k_hw_txrx_mode txmode; enum ath10k_mac_tx_path txpath; @@ -3917,7 +3931,7 @@ static void ath10k_mac_op_tx(struct ieee80211_hw *hw, bool is_presp; int ret; - ath10k_mac_tx_h_fill_cb(ar, vif, skb); + ath10k_mac_tx_h_fill_cb(ar, vif, txq, skb); txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb); txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode); @@ -4985,6 +4999,7 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw, spin_unlock_bh(&ar->data_lock); ath10k_peer_cleanup(ar, arvif->vdev_id); + ath10k_mac_txq_unref(ar, vif->txq); if (vif->type == NL80211_IFTYPE_MONITOR) { ar->monitor_arvif = NULL; From 3cc0fef6170dce8e7d4ec29afb4f34267fb9bf14 Mon Sep 17 00:00:00 2001 From: Michal Kazior Date: Sun, 6 Mar 2016 16:14:41 +0200 Subject: [PATCH 0010/1649] ath10k: keep track of queue depth per txq This will be necessary for later. Signed-off-by: Michal Kazior Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/core.h | 1 + drivers/net/wireless/ath/ath10k/mac.c | 5 +++++ drivers/net/wireless/ath/ath10k/txrx.c | 7 +++++++ 3 files changed, 13 insertions(+) diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h index 89f789f3e5b4..926ecb2244a5 100644 --- a/drivers/net/wireless/ath/ath10k/core.h +++ b/drivers/net/wireless/ath/ath10k/core.h @@ -311,6 +311,7 @@ struct ath10k_peer { struct ath10k_txq { struct list_head list; + unsigned long num_fw_queued; }; struct ath10k_sta { diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index 8d02d53fdc2c..5bf614f1f75a 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -3669,6 +3669,7 @@ static int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw, const bool is_presp = false; struct ath10k *ar = hw->priv; struct ath10k_htt *htt = &ar->htt; + struct ath10k_txq *artxq = (void *)txq->drv_priv; struct ieee80211_vif *vif = txq->vif; struct ieee80211_sta *sta = txq->sta; enum ath10k_hw_txrx_mode txmode; @@ -3708,6 +3709,10 @@ static int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw, return ret; } + spin_lock_bh(&ar->htt.tx_lock); + artxq->num_fw_queued++; + spin_unlock_bh(&ar->htt.tx_lock); + return 0; } diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c index 202e5192235b..ea4d3000c8c3 100644 --- a/drivers/net/wireless/ath/ath10k/txrx.c +++ b/drivers/net/wireless/ath/ath10k/txrx.c @@ -55,7 +55,9 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt, struct ath10k *ar = htt->ar; struct device *dev = ar->dev; struct ieee80211_tx_info *info; + struct ieee80211_txq *txq; struct ath10k_skb_cb *skb_cb; + struct ath10k_txq *artxq; struct sk_buff *msdu; bool limit_mgmt_desc = false; @@ -80,11 +82,16 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt, } skb_cb = ATH10K_SKB_CB(msdu); + txq = skb_cb->txq; + artxq = (void *)txq->drv_priv; if (unlikely(skb_cb->flags & ATH10K_SKB_F_MGMT) && ar->hw_params.max_probe_resp_desc_thres) limit_mgmt_desc = true; + if (txq) + artxq->num_fw_queued--; + ath10k_htt_tx_free_msdu_id(htt, tx_done->msdu_id); ath10k_htt_tx_dec_pending(htt, limit_mgmt_desc); if (htt->num_pending_tx == 0) From 426e10eaf76d7229ed6c2978f0d473d04ba0b377 Mon Sep 17 00:00:00 2001 From: Michal Kazior Date: Sun, 6 Mar 2016 16:14:43 +0200 Subject: [PATCH 0011/1649] ath10k: implement push-pull tx The current/old tx path design was that host, at its own leisure, pushed tx frames to the device. For HTT there was ~1000-1400 msdu queue depth. After reaching that limit the driver would request mac80211 to stop queues. There was little control over what packets got in there as far as DA/RA was considered so it was rather easy to starve per-station traffic flows. With MU-MIMO this became a significant problem because the queue depth was insufficient to buffer frames from multiple clients (which could have different signal quality and capabilities) in an efficient fashion. Hence the new tx path in 10.4 was introduced: a pull-push mode. Firmware and host can share tx queue state via DMA. The state is logically a 2 dimensional array addressed via peer_id+tid pair. Each entry is a counter (either number of bytes or packets. Host keeps it updated and firmware uses it for scheduling Tx pull requests to host. This allows MU-MIMO to become a lot more effective with 10+ clients. Signed-off-by: Michal Kazior Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/core.h | 1 + drivers/net/wireless/ath/ath10k/htt.h | 6 ++ drivers/net/wireless/ath/ath10k/htt_rx.c | 113 +++++++++++++++++++++-- drivers/net/wireless/ath/ath10k/htt_tx.c | 39 ++++++-- drivers/net/wireless/ath/ath10k/mac.c | 44 ++++++++- drivers/net/wireless/ath/ath10k/mac.h | 5 + 6 files changed, 186 insertions(+), 22 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h index 926ecb2244a5..3050e497fd22 100644 --- a/drivers/net/wireless/ath/ath10k/core.h +++ b/drivers/net/wireless/ath/ath10k/core.h @@ -312,6 +312,7 @@ struct ath10k_peer { struct ath10k_txq { struct list_head list; unsigned long num_fw_queued; + unsigned long num_push_allowed; }; struct ath10k_sta { diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h index b1e40f44e76b..02cf55d306e8 100644 --- a/drivers/net/wireless/ath/ath10k/htt.h +++ b/drivers/net/wireless/ath/ath10k/htt.h @@ -1652,6 +1652,7 @@ struct ath10k_htt { struct sk_buff_head tx_compl_q; struct sk_buff_head rx_compl_q; struct sk_buff_head rx_in_ord_compl_q; + struct sk_buff_head tx_fetch_ind_q; /* rx_status template */ struct ieee80211_rx_status rx_status; @@ -1670,8 +1671,10 @@ struct ath10k_htt { bool enabled; struct htt_q_state *vaddr; dma_addr_t paddr; + u16 num_push_allowed; u16 num_peers; u16 num_tids; + enum htt_tx_mode_switch_mode mode; enum htt_q_depth_type type; } tx_q_state; }; @@ -1761,6 +1764,9 @@ int ath10k_htt_tx_fetch_resp(struct ath10k *ar, void ath10k_htt_tx_txq_update(struct ieee80211_hw *hw, struct ieee80211_txq *txq); +void ath10k_htt_tx_txq_recalc(struct ieee80211_hw *hw, + struct ieee80211_txq *txq); +void ath10k_htt_tx_txq_sync(struct ath10k *ar); void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt, bool is_mgmt); int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt, diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c index 4a25a1d63843..40f969c72de8 100644 --- a/drivers/net/wireless/ath/ath10k/htt_rx.c +++ b/drivers/net/wireless/ath/ath10k/htt_rx.c @@ -229,6 +229,7 @@ void ath10k_htt_rx_free(struct ath10k_htt *htt) skb_queue_purge(&htt->tx_compl_q); skb_queue_purge(&htt->rx_compl_q); skb_queue_purge(&htt->rx_in_ord_compl_q); + skb_queue_purge(&htt->tx_fetch_ind_q); ath10k_htt_rx_ring_free(htt); @@ -569,6 +570,7 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt) skb_queue_head_init(&htt->tx_compl_q); skb_queue_head_init(&htt->rx_compl_q); skb_queue_head_init(&htt->rx_in_ord_compl_q); + skb_queue_head_init(&htt->tx_fetch_ind_q); tasklet_init(&htt->txrx_compl_task, ath10k_htt_txrx_compl_task, (unsigned long)htt); @@ -2004,16 +2006,21 @@ static void ath10k_htt_rx_tx_fetch_resp_id_confirm(struct ath10k *ar, static void ath10k_htt_rx_tx_fetch_ind(struct ath10k *ar, struct sk_buff *skb) { + struct ieee80211_hw *hw = ar->hw; + struct ieee80211_txq *txq; struct htt_resp *resp = (struct htt_resp *)skb->data; struct htt_tx_fetch_record *record; size_t len; size_t max_num_bytes; size_t max_num_msdus; + size_t num_bytes; + size_t num_msdus; const __le32 *resp_ids; u16 num_records; u16 num_resp_ids; u16 peer_id; u8 tid; + int ret; int i; ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind\n"); @@ -2039,7 +2046,17 @@ static void ath10k_htt_rx_tx_fetch_ind(struct ath10k *ar, struct sk_buff *skb) num_records, num_resp_ids, le16_to_cpu(resp->tx_fetch_ind.fetch_seq_num)); - /* TODO: runtime sanity checks */ + if (!ar->htt.tx_q_state.enabled) { + ath10k_warn(ar, "received unexpected tx_fetch_ind event: not enabled\n"); + return; + } + + if (ar->htt.tx_q_state.mode == HTT_TX_MODE_SWITCH_PUSH) { + ath10k_warn(ar, "received unexpected tx_fetch_ind event: in push mode\n"); + return; + } + + rcu_read_lock(); for (i = 0; i < num_records; i++) { record = &resp->tx_fetch_ind.records[i]; @@ -2060,13 +2077,56 @@ static void ath10k_htt_rx_tx_fetch_ind(struct ath10k *ar, struct sk_buff *skb) continue; } - /* TODO: dequeue and submit tx to device */ + spin_lock_bh(&ar->data_lock); + txq = ath10k_mac_txq_lookup(ar, peer_id, tid); + spin_unlock_bh(&ar->data_lock); + + /* It is okay to release the lock and use txq because RCU read + * lock is held. + */ + + if (unlikely(!txq)) { + ath10k_warn(ar, "failed to lookup txq for peer_id %hu tid %hhu\n", + peer_id, tid); + continue; + } + + num_msdus = 0; + num_bytes = 0; + + while (num_msdus < max_num_msdus && + num_bytes < max_num_bytes) { + ret = ath10k_mac_tx_push_txq(hw, txq); + if (ret < 0) + break; + + num_msdus++; + num_bytes += ret; + } + + record->num_msdus = cpu_to_le16(num_msdus); + record->num_bytes = cpu_to_le32(num_bytes); + + ath10k_htt_tx_txq_recalc(hw, txq); } + rcu_read_unlock(); + resp_ids = ath10k_htt_get_tx_fetch_ind_resp_ids(&resp->tx_fetch_ind); ath10k_htt_rx_tx_fetch_resp_id_confirm(ar, resp_ids, num_resp_ids); - /* TODO: generate and send fetch response to device */ + ret = ath10k_htt_tx_fetch_resp(ar, + resp->tx_fetch_ind.token, + resp->tx_fetch_ind.fetch_seq_num, + resp->tx_fetch_ind.records, + num_records); + if (unlikely(ret)) { + ath10k_warn(ar, "failed to submit tx fetch resp for token 0x%08x: %d\n", + le32_to_cpu(resp->tx_fetch_ind.token), ret); + /* FIXME: request fw restart */ + } + + ath10k_htt_tx_txq_sync(ar); } static void ath10k_htt_rx_tx_fetch_confirm(struct ath10k *ar, @@ -2102,6 +2162,8 @@ static void ath10k_htt_rx_tx_mode_switch_ind(struct ath10k *ar, { const struct htt_resp *resp = (void *)skb->data; const struct htt_tx_mode_switch_record *record; + struct ieee80211_txq *txq; + struct ath10k_txq *artxq; size_t len; size_t num_records; enum htt_tx_mode_switch_mode mode; @@ -2153,7 +2215,11 @@ static void ath10k_htt_rx_tx_mode_switch_ind(struct ath10k *ar, if (!enable) return; - /* TODO: apply configuration */ + ar->htt.tx_q_state.enabled = enable; + ar->htt.tx_q_state.mode = mode; + ar->htt.tx_q_state.num_push_allowed = threshold; + + rcu_read_lock(); for (i = 0; i < num_records; i++) { record = &resp->tx_mode_switch_ind.records[i]; @@ -2168,10 +2234,29 @@ static void ath10k_htt_rx_tx_mode_switch_ind(struct ath10k *ar, continue; } - /* TODO: apply configuration */ + spin_lock_bh(&ar->data_lock); + txq = ath10k_mac_txq_lookup(ar, peer_id, tid); + spin_unlock_bh(&ar->data_lock); + + /* It is okay to release the lock and use txq because RCU read + * lock is held. + */ + + if (unlikely(!txq)) { + ath10k_warn(ar, "failed to lookup txq for peer_id %hu tid %hhu\n", + peer_id, tid); + continue; + } + + spin_lock_bh(&ar->htt.tx_lock); + artxq = (void *)txq->drv_priv; + artxq->num_push_allowed = le16_to_cpu(record->num_max_msdus); + spin_unlock_bh(&ar->htt.tx_lock); } - /* TODO: apply configuration */ + rcu_read_unlock(); + + ath10k_mac_tx_push_pending(ar); } void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) @@ -2313,8 +2398,9 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) case HTT_T2H_MSG_TYPE_AGGR_CONF: break; case HTT_T2H_MSG_TYPE_TX_FETCH_IND: - ath10k_htt_rx_tx_fetch_ind(ar, skb); - break; + skb_queue_tail(&htt->tx_fetch_ind_q, skb); + tasklet_schedule(&htt->txrx_compl_task); + return; case HTT_T2H_MSG_TYPE_TX_FETCH_CONFIRM: ath10k_htt_rx_tx_fetch_confirm(ar, skb); break; @@ -2350,6 +2436,7 @@ static void ath10k_htt_txrx_compl_task(unsigned long ptr) struct sk_buff_head tx_q; struct sk_buff_head rx_q; struct sk_buff_head rx_ind_q; + struct sk_buff_head tx_ind_q; struct htt_resp *resp; struct sk_buff *skb; unsigned long flags; @@ -2357,6 +2444,7 @@ static void ath10k_htt_txrx_compl_task(unsigned long ptr) __skb_queue_head_init(&tx_q); __skb_queue_head_init(&rx_q); __skb_queue_head_init(&rx_ind_q); + __skb_queue_head_init(&tx_ind_q); spin_lock_irqsave(&htt->tx_compl_q.lock, flags); skb_queue_splice_init(&htt->tx_compl_q, &tx_q); @@ -2370,11 +2458,20 @@ static void ath10k_htt_txrx_compl_task(unsigned long ptr) skb_queue_splice_init(&htt->rx_in_ord_compl_q, &rx_ind_q); spin_unlock_irqrestore(&htt->rx_in_ord_compl_q.lock, flags); + spin_lock_irqsave(&htt->tx_fetch_ind_q.lock, flags); + skb_queue_splice_init(&htt->tx_fetch_ind_q, &tx_ind_q); + spin_unlock_irqrestore(&htt->tx_fetch_ind_q.lock, flags); + while ((skb = __skb_dequeue(&tx_q))) { ath10k_htt_rx_frm_tx_compl(htt->ar, skb); dev_kfree_skb_any(skb); } + while ((skb = __skb_dequeue(&tx_ind_q))) { + ath10k_htt_rx_tx_fetch_ind(ar, skb); + dev_kfree_skb_any(skb); + } + ath10k_mac_tx_push_pending(ar); while ((skb = __skb_dequeue(&rx_q))) { diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c index 6643be8692b5..a30c34eae0a7 100644 --- a/drivers/net/wireless/ath/ath10k/htt_tx.c +++ b/drivers/net/wireless/ath/ath10k/htt_tx.c @@ -64,6 +64,9 @@ static void __ath10k_htt_tx_txq_recalc(struct ieee80211_hw *hw, if (!ar->htt.tx_q_state.enabled) return; + if (ar->htt.tx_q_state.mode != HTT_TX_MODE_SWITCH_PUSH_PULL) + return; + if (txq->sta) peer_id = arsta->peer_id; else @@ -101,6 +104,9 @@ static void __ath10k_htt_tx_txq_sync(struct ath10k *ar) if (!ar->htt.tx_q_state.enabled) return; + if (ar->htt.tx_q_state.mode != HTT_TX_MODE_SWITCH_PUSH_PULL) + return; + seq = le32_to_cpu(ar->htt.tx_q_state.vaddr->seq); seq++; ar->htt.tx_q_state.vaddr->seq = cpu_to_le32(seq); @@ -115,6 +121,23 @@ static void __ath10k_htt_tx_txq_sync(struct ath10k *ar) DMA_TO_DEVICE); } +void ath10k_htt_tx_txq_recalc(struct ieee80211_hw *hw, + struct ieee80211_txq *txq) +{ + struct ath10k *ar = hw->priv; + + spin_lock_bh(&ar->htt.tx_lock); + __ath10k_htt_tx_txq_recalc(hw, txq); + spin_unlock_bh(&ar->htt.tx_lock); +} + +void ath10k_htt_tx_txq_sync(struct ath10k *ar) +{ + spin_lock_bh(&ar->htt.tx_lock); + __ath10k_htt_tx_txq_sync(ar); + spin_unlock_bh(&ar->htt.tx_lock); +} + void ath10k_htt_tx_txq_update(struct ieee80211_hw *hw, struct ieee80211_txq *txq) { @@ -638,10 +661,14 @@ int ath10k_htt_tx_fetch_resp(struct ath10k *ar, { struct sk_buff *skb; struct htt_cmd *cmd; - u16 resp_id; + const u16 resp_id = 0; int len = 0; int ret; + /* Response IDs are echo-ed back only for host driver convienence + * purposes. They aren't used for anything in the driver yet so use 0. + */ + len += sizeof(cmd->hdr); len += sizeof(cmd->tx_fetch_resp); len += sizeof(cmd->tx_fetch_resp.records[0]) * num_records; @@ -650,11 +677,6 @@ int ath10k_htt_tx_fetch_resp(struct ath10k *ar, if (!skb) return -ENOMEM; - resp_id = 0; /* TODO: allocate resp_id */ - ret = 0; - if (ret) - goto err_free_skb; - skb_put(skb, len); cmd = (struct htt_cmd *)skb->data; cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FETCH_RESP; @@ -669,14 +691,11 @@ int ath10k_htt_tx_fetch_resp(struct ath10k *ar, ret = ath10k_htc_send(&ar->htc, ar->htt.eid, skb); if (ret) { ath10k_warn(ar, "failed to submit htc command: %d\n", ret); - goto err_free_resp_id; + goto err_free_skb; } return 0; -err_free_resp_id: - (void)resp_id; /* TODO: free resp_id */ - err_free_skb: dev_kfree_skb_any(skb); diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index 5bf614f1f75a..4a27f27a8e8b 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -3656,14 +3656,48 @@ static void ath10k_mac_txq_unref(struct ath10k *ar, struct ieee80211_txq *txq) spin_unlock_bh(&ar->htt.tx_lock); } +struct ieee80211_txq *ath10k_mac_txq_lookup(struct ath10k *ar, + u16 peer_id, + u8 tid) +{ + struct ath10k_peer *peer; + + lockdep_assert_held(&ar->data_lock); + + peer = ar->peer_map[peer_id]; + if (!peer) + return NULL; + + if (peer->sta) + return peer->sta->txq[tid]; + else if (peer->vif) + return peer->vif->txq; + else + return NULL; +} + static bool ath10k_mac_tx_can_push(struct ieee80211_hw *hw, struct ieee80211_txq *txq) { - return 1; /* TBD */ + struct ath10k *ar = hw->priv; + struct ath10k_txq *artxq = (void *)txq->drv_priv; + + /* No need to get locks */ + + if (ar->htt.tx_q_state.mode == HTT_TX_MODE_SWITCH_PUSH) + return true; + + if (ar->htt.num_pending_tx < ar->htt.tx_q_state.num_push_allowed) + return true; + + if (artxq->num_fw_queued < artxq->num_push_allowed) + return true; + + return false; } -static int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw, - struct ieee80211_txq *txq) +int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw, + struct ieee80211_txq *txq) { const bool is_mgmt = false; const bool is_presp = false; @@ -3675,6 +3709,7 @@ static int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw, enum ath10k_hw_txrx_mode txmode; enum ath10k_mac_tx_path txpath; struct sk_buff *skb; + size_t skb_len; int ret; spin_lock_bh(&ar->htt.tx_lock); @@ -3695,6 +3730,7 @@ static int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw, ath10k_mac_tx_h_fill_cb(ar, vif, txq, skb); + skb_len = skb->len; txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb); txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode); @@ -3713,7 +3749,7 @@ static int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw, artxq->num_fw_queued++; spin_unlock_bh(&ar->htt.tx_lock); - return 0; + return skb_len; } void ath10k_mac_tx_push_pending(struct ath10k *ar) diff --git a/drivers/net/wireless/ath/ath10k/mac.h b/drivers/net/wireless/ath/ath10k/mac.h index 453f606a250e..2c3327beb445 100644 --- a/drivers/net/wireless/ath/ath10k/mac.h +++ b/drivers/net/wireless/ath/ath10k/mac.h @@ -76,6 +76,11 @@ void ath10k_mac_vif_tx_lock(struct ath10k_vif *arvif, int reason); void ath10k_mac_vif_tx_unlock(struct ath10k_vif *arvif, int reason); bool ath10k_mac_tx_frm_has_freq(struct ath10k *ar); void ath10k_mac_tx_push_pending(struct ath10k *ar); +int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw, + struct ieee80211_txq *txq); +struct ieee80211_txq *ath10k_mac_txq_lookup(struct ath10k *ar, + u16 peer_id, + u8 tid); static inline struct ath10k_vif *ath10k_vif_to_arvif(struct ieee80211_vif *vif) { From 43c9e3846ba30ca3d657bd82f1005d1573bb3a6d Mon Sep 17 00:00:00 2001 From: Michal Kazior Date: Tue, 1 Mar 2016 13:16:10 +0100 Subject: [PATCH 0012/1649] ath10k: fix HTT Tx CE ring size QCA4019 can queue up to 2500 frames at a time. This means it requires roughly 5000 entires on the ring to work properly. Otherwise random tx failure may occur. Signed-off-by: Michal Kazior Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/ce.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h index 47b734ce7ecf..dac676817532 100644 --- a/drivers/net/wireless/ath/ath10k/ce.h +++ b/drivers/net/wireless/ath/ath10k/ce.h @@ -22,7 +22,7 @@ /* Maximum number of Copy Engine's supported */ #define CE_COUNT_MAX 12 -#define CE_HTT_H2T_MSG_SRC_NENTRIES 4096 +#define CE_HTT_H2T_MSG_SRC_NENTRIES 8192 /* Descriptor rings must be aligned to this boundary */ #define CE_DESC_RING_ALIGN 8 From 99ad1cba313fc86797bca55d64e7c6c809098511 Mon Sep 17 00:00:00 2001 From: Michal Kazior Date: Tue, 1 Mar 2016 13:16:11 +0100 Subject: [PATCH 0013/1649] ath10k: change htt tx desc/qcache peer limit config The number of HTT Tx descriptors and qcache peer limit aren't hw-specific. In fact they are firmware specific and should not be placed in hw_params. The QCA4019 limits were submitted with the peer flow control firmware only and to my understanding there's no non-peer-flow-ctrl QCA4019 firmware. However QCA99X0 is planned to run firmware supporting the feature as well. Therefore this patch enables QCA99X0 to use 2500 tx descriptors whenever possible instead of just 1424. Signed-off-by: Michal Kazior Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/core.c | 11 ++++++----- drivers/net/wireless/ath/ath10k/core.h | 2 -- drivers/net/wireless/ath/ath10k/hw.h | 4 ++++ drivers/net/wireless/ath/ath10k/wmi.c | 10 ++++++++-- 4 files changed, 18 insertions(+), 9 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index 6f606f8fce25..2389c0713c13 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -156,8 +156,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .channel_counters_freq_hz = 150000, .max_probe_resp_desc_thres = 24, .hw_4addr_pad = ATH10K_HW_4ADDR_PAD_BEFORE, - .num_msdu_desc = 1424, - .qcache_active_peers = 50, .tx_chain_mask = 0xf, .rx_chain_mask = 0xf, .max_spatial_stream = 4, @@ -217,8 +215,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .channel_counters_freq_hz = 125000, .max_probe_resp_desc_thres = 24, .hw_4addr_pad = ATH10K_HW_4ADDR_PAD_BEFORE, - .num_msdu_desc = 2500, - .qcache_active_peers = 35, .tx_chain_mask = 0x3, .rx_chain_mask = 0x3, .max_spatial_stream = 2, @@ -1538,9 +1534,14 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar) ar->num_active_peers = TARGET_10_4_ACTIVE_PEERS; ar->max_num_vdevs = TARGET_10_4_NUM_VDEVS; ar->num_tids = TARGET_10_4_TGT_NUM_TIDS; - ar->htt.max_num_pending_tx = ar->hw_params.num_msdu_desc; ar->fw_stats_req_mask = WMI_STAT_PEER; ar->max_spatial_stream = ar->hw_params.max_spatial_stream; + + if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL, + ar->fw_features)) + ar->htt.max_num_pending_tx = TARGET_10_4_NUM_MSDU_DESC_PFC; + else + ar->htt.max_num_pending_tx = TARGET_10_4_NUM_MSDU_DESC; break; case ATH10K_FW_WMI_OP_VERSION_UNSET: case ATH10K_FW_WMI_OP_VERSION_MAX: diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h index 3050e497fd22..23ba03fb7a5f 100644 --- a/drivers/net/wireless/ath/ath10k/core.h +++ b/drivers/net/wireless/ath/ath10k/core.h @@ -692,8 +692,6 @@ struct ath10k { /* The padding bytes's location is different on various chips */ enum ath10k_hw_4addr_pad hw_4addr_pad; - u32 num_msdu_desc; - u32 qcache_active_peers; u32 tx_chain_mask; u32 rx_chain_mask; u32 max_spatial_stream; diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h index f0cfbc745c97..1ff617b05010 100644 --- a/drivers/net/wireless/ath/ath10k/hw.h +++ b/drivers/net/wireless/ath/ath10k/hw.h @@ -431,10 +431,14 @@ enum ath10k_hw_4addr_pad { #define TARGET_10_4_ACTIVE_PEERS 0 #define TARGET_10_4_NUM_QCACHE_PEERS_MAX 512 +#define TARGET_10_4_QCACHE_ACTIVE_PEERS 50 +#define TARGET_10_4_QCACHE_ACTIVE_PEERS_PFC 35 #define TARGET_10_4_NUM_OFFLOAD_PEERS 0 #define TARGET_10_4_NUM_OFFLOAD_REORDER_BUFFS 0 #define TARGET_10_4_NUM_PEER_KEYS 2 #define TARGET_10_4_TGT_NUM_TIDS ((TARGET_10_4_NUM_PEERS) * 2) +#define TARGET_10_4_NUM_MSDU_DESC (1024 + 400) +#define TARGET_10_4_NUM_MSDU_DESC_PFC 2500 #define TARGET_10_4_AST_SKID_LIMIT 32 /* 100 ms for video, best-effort, and background */ diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index 70261387d1a5..c31b4878cdc6 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -4617,10 +4617,16 @@ static void ath10k_wmi_event_service_ready_work(struct work_struct *work) } if (test_bit(WMI_SERVICE_PEER_CACHING, ar->wmi.svc_map)) { + if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL, + ar->fw_features)) + ar->num_active_peers = TARGET_10_4_QCACHE_ACTIVE_PEERS_PFC + + ar->max_num_vdevs; + else + ar->num_active_peers = TARGET_10_4_QCACHE_ACTIVE_PEERS + + ar->max_num_vdevs; + ar->max_num_peers = TARGET_10_4_NUM_QCACHE_PEERS_MAX + ar->max_num_vdevs; - ar->num_active_peers = ar->hw_params.qcache_active_peers + - ar->max_num_vdevs; ar->num_tids = ar->num_active_peers * 2; ar->max_num_stations = TARGET_10_4_NUM_QCACHE_PEERS_MAX; } From 8a75fc54745fd3ce9062ab1cc6429a9da9ac2a68 Mon Sep 17 00:00:00 2001 From: Rajkumar Manoharan Date: Wed, 2 Mar 2016 20:13:52 +0530 Subject: [PATCH 0014/1649] ath10k: fix firmware assert in monitor mode commit 166de3f1895d ("ath10k: remove supported chain mask") had revealed an issue on monitor mode. Configuring NSS upon monitor interface creation is causing target assert in all qca9888x and qca6174 firmware. Firmware assert issue can be reproduced by below sequence even after reverting commit 166de3f1895d ("ath10k: remove supported chain mask"). ip link set wlan0 down iw wlan0 set type monitor iw phy0 set antenna 7 ip link set wlan0 up This issue is originally reported on qca9888 with 10.1 firmware. Fixes: 5572a95b4b ("ath10k: apply chainmask settings to vdev on creation") Cc: stable@vger.kernel.org Reported-by: Janusz Dziedzic Signed-off-by: Rajkumar Manoharan Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/mac.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index 4a27f27a8e8b..ebff9c0a0784 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -4818,7 +4818,10 @@ static int ath10k_add_interface(struct ieee80211_hw *hw, goto err_vdev_delete; } - if (ar->cfg_tx_chainmask) { + /* Configuring number of spatial stream for monitor interface is causing + * target assert in qca9888 and qca6174. + */ + if (ar->cfg_tx_chainmask && (vif->type != NL80211_IFTYPE_MONITOR)) { u16 nss = get_nss_from_chainmask(ar->cfg_tx_chainmask); vdev_param = ar->wmi.vdev_param->nss; From 361486b27c7e57dab657dbffd1e17818c7911c72 Mon Sep 17 00:00:00 2001 From: Maya Erez Date: Tue, 1 Mar 2016 19:18:04 +0200 Subject: [PATCH 0015/1649] wil6210: remove BACK RX and TX workers WMI synchronous handling has changed and WMI calls that provide a buffer for the reply are completed in the WMI interrupt context. This allows sending the RX and TX BACK commands from the WMI event handler without the need for the worker thread. This is a better approach as it can decrease the handshake time in the connect flow and prevent race conditions in case of fast disconnects. An example for such a race is handling of wil_back_rx_handle during a disconnect event, as wil_back_rx_handle is not protected by the wil mutex and a disconnect can be handled after sta->status is verified as connected. Signed-off-by: Maya Erez Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wil6210/main.c | 10 - drivers/net/wireless/ath/wil6210/rx_reorder.c | 206 ++++-------------- drivers/net/wireless/ath/wil6210/wil6210.h | 29 --- 3 files changed, 42 insertions(+), 203 deletions(-) diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c index 78ba6e04c944..35db9940c519 100644 --- a/drivers/net/wireless/ath/wil6210/main.c +++ b/drivers/net/wireless/ath/wil6210/main.c @@ -440,8 +440,6 @@ int wil_priv_init(struct wil6210_priv *wil) mutex_init(&wil->mutex); mutex_init(&wil->wmi_mutex); - mutex_init(&wil->back_rx_mutex); - mutex_init(&wil->back_tx_mutex); mutex_init(&wil->probe_client_mutex); init_completion(&wil->wmi_ready); @@ -454,13 +452,9 @@ int wil_priv_init(struct wil6210_priv *wil) INIT_WORK(&wil->disconnect_worker, wil_disconnect_worker); INIT_WORK(&wil->wmi_event_worker, wmi_event_worker); INIT_WORK(&wil->fw_error_worker, wil_fw_error_worker); - INIT_WORK(&wil->back_rx_worker, wil_back_rx_worker); - INIT_WORK(&wil->back_tx_worker, wil_back_tx_worker); INIT_WORK(&wil->probe_client_worker, wil_probe_client_worker); INIT_LIST_HEAD(&wil->pending_wmi_ev); - INIT_LIST_HEAD(&wil->back_rx_pending); - INIT_LIST_HEAD(&wil->back_tx_pending); INIT_LIST_HEAD(&wil->probe_client_pending); spin_lock_init(&wil->wmi_ev_lock); init_waitqueue_head(&wil->wq); @@ -520,10 +514,6 @@ void wil_priv_deinit(struct wil6210_priv *wil) wil6210_disconnect(wil, NULL, WLAN_REASON_DEAUTH_LEAVING, false); mutex_unlock(&wil->mutex); wmi_event_flush(wil); - wil_back_rx_flush(wil); - cancel_work_sync(&wil->back_rx_worker); - wil_back_tx_flush(wil); - cancel_work_sync(&wil->back_tx_worker); wil_probe_client_flush(wil); cancel_work_sync(&wil->probe_client_worker); destroy_workqueue(wil->wq_service); diff --git a/drivers/net/wireless/ath/wil6210/rx_reorder.c b/drivers/net/wireless/ath/wil6210/rx_reorder.c index 32031e7a11d5..19ed127d4d05 100644 --- a/drivers/net/wireless/ath/wil6210/rx_reorder.c +++ b/drivers/net/wireless/ath/wil6210/rx_reorder.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014-2015 Qualcomm Atheros, Inc. + * Copyright (c) 2014-2016 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -291,35 +291,15 @@ static u16 wil_agg_size(struct wil6210_priv *wil, u16 req_agg_wsize) return min(max_agg_size, req_agg_wsize); } -/* Block Ack - Rx side (recipient */ +/* Block Ack - Rx side (recipient) */ int wil_addba_rx_request(struct wil6210_priv *wil, u8 cidxtid, u8 dialog_token, __le16 ba_param_set, __le16 ba_timeout, __le16 ba_seq_ctrl) -{ - struct wil_back_rx *req = kzalloc(sizeof(*req), GFP_KERNEL); - - if (!req) - return -ENOMEM; - - req->cidxtid = cidxtid; - req->dialog_token = dialog_token; - req->ba_param_set = le16_to_cpu(ba_param_set); - req->ba_timeout = le16_to_cpu(ba_timeout); - req->ba_seq_ctrl = le16_to_cpu(ba_seq_ctrl); - - mutex_lock(&wil->back_rx_mutex); - list_add_tail(&req->list, &wil->back_rx_pending); - mutex_unlock(&wil->back_rx_mutex); - - queue_work(wil->wq_service, &wil->back_rx_worker); - - return 0; -} - -static void wil_back_rx_handle(struct wil6210_priv *wil, - struct wil_back_rx *req) __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock) { + u16 param_set = le16_to_cpu(ba_param_set); + u16 agg_timeout = le16_to_cpu(ba_timeout); + u16 seq_ctrl = le16_to_cpu(ba_seq_ctrl); struct wil_sta_info *sta; u8 cid, tid; u16 agg_wsize = 0; @@ -328,34 +308,35 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock) * bits 2..5: TID * bits 6..15: buffer size */ - u16 req_agg_wsize = WIL_GET_BITS(req->ba_param_set, 6, 15); - bool agg_amsdu = !!(req->ba_param_set & BIT(0)); - int ba_policy = req->ba_param_set & BIT(1); - u16 agg_timeout = req->ba_timeout; + u16 req_agg_wsize = WIL_GET_BITS(param_set, 6, 15); + bool agg_amsdu = !!(param_set & BIT(0)); + int ba_policy = param_set & BIT(1); u16 status = WLAN_STATUS_SUCCESS; - u16 ssn = req->ba_seq_ctrl >> 4; + u16 ssn = seq_ctrl >> 4; struct wil_tid_ampdu_rx *r; - int rc; + int rc = 0; might_sleep(); - parse_cidxtid(req->cidxtid, &cid, &tid); + parse_cidxtid(cidxtid, &cid, &tid); /* sanity checks */ if (cid >= WIL6210_MAX_CID) { wil_err(wil, "BACK: invalid CID %d\n", cid); - return; + rc = -EINVAL; + goto out; } sta = &wil->sta[cid]; if (sta->status != wil_sta_connected) { wil_err(wil, "BACK: CID %d not connected\n", cid); - return; + rc = -EINVAL; + goto out; } wil_dbg_wmi(wil, "ADDBA request for CID %d %pM TID %d size %d timeout %d AMSDU%s policy %d token %d SSN 0x%03x\n", - cid, sta->addr, tid, req_agg_wsize, req->ba_timeout, - agg_amsdu ? "+" : "-", !!ba_policy, req->dialog_token, ssn); + cid, sta->addr, tid, req_agg_wsize, agg_timeout, + agg_amsdu ? "+" : "-", !!ba_policy, dialog_token, ssn); /* apply policies */ if (ba_policy) { @@ -365,10 +346,13 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock) if (status == WLAN_STATUS_SUCCESS) agg_wsize = wil_agg_size(wil, req_agg_wsize); - rc = wmi_addba_rx_resp(wil, cid, tid, req->dialog_token, status, + rc = wmi_addba_rx_resp(wil, cid, tid, dialog_token, status, agg_amsdu, agg_wsize, agg_timeout); - if (rc || (status != WLAN_STATUS_SUCCESS)) - return; + if (rc || (status != WLAN_STATUS_SUCCESS)) { + wil_err(wil, "%s: do not apply ba, rc(%d), status(%d)\n", + __func__, rc, status); + goto out; + } /* apply */ r = wil_tid_ampdu_rx_alloc(wil, agg_wsize, ssn); @@ -376,143 +360,37 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock) wil_tid_ampdu_rx_free(wil, sta->tid_rx[tid]); sta->tid_rx[tid] = r; spin_unlock_bh(&sta->tid_rx_lock); + +out: + return rc; } -void wil_back_rx_flush(struct wil6210_priv *wil) +/* BACK - Tx side (originator) */ +int wil_addba_tx_request(struct wil6210_priv *wil, u8 ringid, u16 wsize) { - struct wil_back_rx *evt, *t; - - wil_dbg_misc(wil, "%s()\n", __func__); - - mutex_lock(&wil->back_rx_mutex); - - list_for_each_entry_safe(evt, t, &wil->back_rx_pending, list) { - list_del(&evt->list); - kfree(evt); - } - - mutex_unlock(&wil->back_rx_mutex); -} - -/* Retrieve next ADDBA request from the pending list */ -static struct list_head *next_back_rx(struct wil6210_priv *wil) -{ - struct list_head *ret = NULL; - - mutex_lock(&wil->back_rx_mutex); - - if (!list_empty(&wil->back_rx_pending)) { - ret = wil->back_rx_pending.next; - list_del(ret); - } - - mutex_unlock(&wil->back_rx_mutex); - - return ret; -} - -void wil_back_rx_worker(struct work_struct *work) -{ - struct wil6210_priv *wil = container_of(work, struct wil6210_priv, - back_rx_worker); - struct wil_back_rx *evt; - struct list_head *lh; - - while ((lh = next_back_rx(wil)) != NULL) { - evt = list_entry(lh, struct wil_back_rx, list); - - wil_back_rx_handle(wil, evt); - kfree(evt); - } -} - -/* BACK - Tx (originator) side */ -static void wil_back_tx_handle(struct wil6210_priv *wil, - struct wil_back_tx *req) -{ - struct vring_tx_data *txdata = &wil->vring_tx_data[req->ringid]; - int rc; + u8 agg_wsize = wil_agg_size(wil, wsize); + u16 agg_timeout = 0; + struct vring_tx_data *txdata = &wil->vring_tx_data[ringid]; + int rc = 0; if (txdata->addba_in_progress) { wil_dbg_misc(wil, "ADDBA for vring[%d] already in progress\n", - req->ringid); - return; + ringid); + goto out; } if (txdata->agg_wsize) { wil_dbg_misc(wil, - "ADDBA for vring[%d] already established wsize %d\n", - req->ringid, txdata->agg_wsize); - return; + "ADDBA for vring[%d] already done for wsize %d\n", + ringid, txdata->agg_wsize); + goto out; } txdata->addba_in_progress = true; - rc = wmi_addba(wil, req->ringid, req->agg_wsize, req->agg_timeout); - if (rc) + rc = wmi_addba(wil, ringid, agg_wsize, agg_timeout); + if (rc) { + wil_err(wil, "%s: wmi_addba failed, rc (%d)", __func__, rc); txdata->addba_in_progress = false; -} - -static struct list_head *next_back_tx(struct wil6210_priv *wil) -{ - struct list_head *ret = NULL; - - mutex_lock(&wil->back_tx_mutex); - - if (!list_empty(&wil->back_tx_pending)) { - ret = wil->back_tx_pending.next; - list_del(ret); } - mutex_unlock(&wil->back_tx_mutex); - - return ret; -} - -void wil_back_tx_worker(struct work_struct *work) -{ - struct wil6210_priv *wil = container_of(work, struct wil6210_priv, - back_tx_worker); - struct wil_back_tx *evt; - struct list_head *lh; - - while ((lh = next_back_tx(wil)) != NULL) { - evt = list_entry(lh, struct wil_back_tx, list); - - wil_back_tx_handle(wil, evt); - kfree(evt); - } -} - -void wil_back_tx_flush(struct wil6210_priv *wil) -{ - struct wil_back_tx *evt, *t; - - wil_dbg_misc(wil, "%s()\n", __func__); - - mutex_lock(&wil->back_tx_mutex); - - list_for_each_entry_safe(evt, t, &wil->back_tx_pending, list) { - list_del(&evt->list); - kfree(evt); - } - - mutex_unlock(&wil->back_tx_mutex); -} - -int wil_addba_tx_request(struct wil6210_priv *wil, u8 ringid, u16 wsize) -{ - struct wil_back_tx *req = kzalloc(sizeof(*req), GFP_KERNEL); - - if (!req) - return -ENOMEM; - - req->ringid = ringid; - req->agg_wsize = wil_agg_size(wil, wsize); - req->agg_timeout = 0; - - mutex_lock(&wil->back_tx_mutex); - list_add_tail(&req->list, &wil->back_tx_pending); - mutex_unlock(&wil->back_tx_mutex); - - queue_work(wil->wq_service, &wil->back_tx_worker); - - return 0; +out: + return rc; } diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h index 8427d68b6fa8..d59c3f29941e 100644 --- a/drivers/net/wireless/ath/wil6210/wil6210.h +++ b/drivers/net/wireless/ath/wil6210/wil6210.h @@ -507,24 +507,6 @@ enum { hw_capability_last }; -struct wil_back_rx { - struct list_head list; - /* request params, converted to CPU byte order - what we asked for */ - u8 cidxtid; - u8 dialog_token; - u16 ba_param_set; - u16 ba_timeout; - u16 ba_seq_ctrl; -}; - -struct wil_back_tx { - struct list_head list; - /* request params, converted to CPU byte order - what we asked for */ - u8 ringid; - u8 agg_wsize; - u16 agg_timeout; -}; - struct wil_probe_client_req { struct list_head list; u64 cookie; @@ -595,13 +577,6 @@ struct wil6210_priv { spinlock_t wmi_ev_lock; struct napi_struct napi_rx; struct napi_struct napi_tx; - /* BACK */ - struct list_head back_rx_pending; - struct mutex back_rx_mutex; /* protect @back_rx_pending */ - struct work_struct back_rx_worker; - struct list_head back_tx_pending; - struct mutex back_tx_mutex; /* protect @back_tx_pending */ - struct work_struct back_tx_worker; /* keep alive */ struct list_head probe_client_pending; struct mutex probe_client_mutex; /* protect @probe_client_pending */ @@ -765,11 +740,7 @@ int wmi_addba_rx_resp(struct wil6210_priv *wil, u8 cid, u8 tid, u8 token, int wil_addba_rx_request(struct wil6210_priv *wil, u8 cidxtid, u8 dialog_token, __le16 ba_param_set, __le16 ba_timeout, __le16 ba_seq_ctrl); -void wil_back_rx_worker(struct work_struct *work); -void wil_back_rx_flush(struct wil6210_priv *wil); int wil_addba_tx_request(struct wil6210_priv *wil, u8 ringid, u16 wsize); -void wil_back_tx_worker(struct work_struct *work); -void wil_back_tx_flush(struct wil6210_priv *wil); void wil6210_clear_irq(struct wil6210_priv *wil); int wil6210_init_irq(struct wil6210_priv *wil, int irq, bool use_msi); From 3d287fb398c03189a1394778162f6404e4d44ad2 Mon Sep 17 00:00:00 2001 From: Maya Erez Date: Tue, 1 Mar 2016 19:18:05 +0200 Subject: [PATCH 0016/1649] wil6210: AP: prevent connecting to already connected station wmi_evt_connect doesn't check if the connect event is received for an already connected station. This can lead to memory leak as a new vring is allocated without freeing the previously allocated vring and to unexpected behavior of nl80211 layer due to unexpected notification of a new station. Add a check in wmi_evt_connect in AP mode to verify that the requested CID is not associated to an already connected station. Signed-off-by: Maya Erez Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wil6210/wmi.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c index 493e721c4fa7..fb090350df6d 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.c +++ b/drivers/net/wireless/ath/wil6210/wmi.c @@ -487,6 +487,14 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len) return; } del_timer_sync(&wil->connect_timer); + } else if ((wdev->iftype == NL80211_IFTYPE_AP) || + (wdev->iftype == NL80211_IFTYPE_P2P_GO)) { + if (wil->sta[evt->cid].status != wil_sta_unused) { + wil_err(wil, "%s: AP: Invalid status %d for CID %d\n", + __func__, wil->sta[evt->cid].status, evt->cid); + mutex_unlock(&wil->mutex); + return; + } } /* FIXME FW can transmit only ucast frames to peer */ From b42f11963f7bd8c54d0a28d679c13d9e83b85357 Mon Sep 17 00:00:00 2001 From: Hamad Kadmany Date: Tue, 1 Mar 2016 19:18:06 +0200 Subject: [PATCH 0017/1649] wil6210: Set permanent MAC address to wiphy MAC address of wil6210 was not set in wiphy Signed-off-by: Hamad Kadmany Signed-off-by: Maya Erez Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wil6210/main.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c index 35db9940c519..997a740e0a4b 100644 --- a/drivers/net/wireless/ath/wil6210/main.c +++ b/drivers/net/wireless/ath/wil6210/main.c @@ -627,6 +627,7 @@ void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r) static int wil_get_bl_info(struct wil6210_priv *wil) { struct net_device *ndev = wil_to_ndev(wil); + struct wiphy *wiphy = wil_to_wiphy(wil); union { struct bl_dedicated_registers_v0 bl0; struct bl_dedicated_registers_v1 bl1; @@ -671,6 +672,7 @@ static int wil_get_bl_info(struct wil6210_priv *wil) } ether_addr_copy(ndev->perm_addr, mac); + ether_addr_copy(wiphy->perm_addr, mac); if (!is_valid_ether_addr(ndev->dev_addr)) ether_addr_copy(ndev->dev_addr, mac); From 58527421489dcc1110f6bcfd3b50d479199af4e0 Mon Sep 17 00:00:00 2001 From: Vladimir Kondratiev Date: Tue, 1 Mar 2016 19:18:07 +0200 Subject: [PATCH 0018/1649] wil6210: replay attack detection Check PN for encrypted frames. Maintain PN data for Rx keys, pairwise per TID and group. Print PN's in the debugfs "stations" entry, like: [0] 04:ce:14:0a:3c:3d connected [ 0] ([32] 0 TU) 0x0fe [____________________________|___] total 252 drop 0 (dup 0 + old 0) last 0x000 [ 0] PN [0+]000000000000 [1-]000000000000 [2-]000000000000 [3-]000000000000 [GR] PN [0-]000000000000 [1+]000000000000 [2+]000000000000 [3-]000000000000 Rx invalid frame: non-data 0, short 0, large 0, replay 0 Rx/MCS: 0 110 65 65 65 0 12 0 0 0 0 0 0 [1] 00:00:00:00:00:00 unused [2] 00:00:00:00:00:00 unused [3] 00:00:00:00:00:00 unused [4] 00:00:00:00:00:00 unused [5] 00:00:00:00:00:00 unused [6] 00:00:00:00:00:00 unused [7] 00:00:00:00:00:00 unused Signed-off-by: Vladimir Kondratiev Signed-off-by: Hamad Kadmany Signed-off-by: Maya Erez Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wil6210/cfg80211.c | 100 +++++++++++++++++--- drivers/net/wireless/ath/wil6210/debugfs.c | 41 +++++++- drivers/net/wireless/ath/wil6210/main.c | 9 +- drivers/net/wireless/ath/wil6210/txrx.c | 63 ++++++++++++ drivers/net/wireless/ath/wil6210/txrx.h | 12 ++- drivers/net/wireless/ath/wil6210/wil6210.h | 18 ++++ 6 files changed, 226 insertions(+), 17 deletions(-) diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c index 11f1bb8dfebe..ddadda90cfa0 100644 --- a/drivers/net/wireless/ath/wil6210/cfg80211.c +++ b/drivers/net/wireless/ath/wil6210/cfg80211.c @@ -82,6 +82,12 @@ static const u32 wil_cipher_suites[] = { WLAN_CIPHER_SUITE_GCMP, }; +static const char * const key_usage_str[] = { + [WMI_KEY_USE_PAIRWISE] = "PTK", + [WMI_KEY_USE_RX_GROUP] = "RX_GTK", + [WMI_KEY_USE_TX_GROUP] = "TX_GTK", +}; + int wil_iftype_nl2wmi(enum nl80211_iftype type) { static const struct { @@ -610,11 +616,6 @@ static enum wmi_key_usage wil_detect_key_usage(struct wil6210_priv *wil, { struct wireless_dev *wdev = wil->wdev; enum wmi_key_usage rc; - static const char * const key_usage_str[] = { - [WMI_KEY_USE_PAIRWISE] = "WMI_KEY_USE_PAIRWISE", - [WMI_KEY_USE_RX_GROUP] = "WMI_KEY_USE_RX_GROUP", - [WMI_KEY_USE_TX_GROUP] = "WMI_KEY_USE_TX_GROUP", - }; if (pairwise) { rc = WMI_KEY_USE_PAIRWISE; @@ -638,20 +639,86 @@ static enum wmi_key_usage wil_detect_key_usage(struct wil6210_priv *wil, return rc; } +static struct wil_tid_crypto_rx_single * +wil_find_crypto_ctx(struct wil6210_priv *wil, u8 key_index, + enum wmi_key_usage key_usage, const u8 *mac_addr) +{ + int cid = -EINVAL; + int tid = 0; + struct wil_sta_info *s; + struct wil_tid_crypto_rx *c; + + if (key_usage == WMI_KEY_USE_TX_GROUP) + return NULL; /* not needed */ + + /* supplicant provides Rx group key in STA mode with NULL MAC address */ + if (mac_addr) + cid = wil_find_cid(wil, mac_addr); + else if (key_usage == WMI_KEY_USE_RX_GROUP) + cid = wil_find_cid_by_idx(wil, 0); + if (cid < 0) { + wil_err(wil, "No CID for %pM %s[%d]\n", mac_addr, + key_usage_str[key_usage], key_index); + return ERR_PTR(cid); + } + + s = &wil->sta[cid]; + if (key_usage == WMI_KEY_USE_PAIRWISE) + c = &s->tid_crypto_rx[tid]; + else + c = &s->group_crypto_rx; + + return &c->key_id[key_index]; +} + static int wil_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev, u8 key_index, bool pairwise, const u8 *mac_addr, struct key_params *params) { + int rc; struct wil6210_priv *wil = wiphy_to_wil(wiphy); enum wmi_key_usage key_usage = wil_detect_key_usage(wil, pairwise); + struct wil_tid_crypto_rx_single *cc = wil_find_crypto_ctx(wil, + key_index, + key_usage, + mac_addr); - wil_dbg_misc(wil, "%s(%pM[%d] %s)\n", __func__, mac_addr, key_index, - pairwise ? "PTK" : "GTK"); + wil_dbg_misc(wil, "%s(%pM %s[%d] PN %*phN)\n", __func__, + mac_addr, key_usage_str[key_usage], key_index, + params->seq_len, params->seq); - return wmi_add_cipher_key(wil, key_index, mac_addr, params->key_len, - params->key, key_usage); + if (IS_ERR(cc)) { + wil_err(wil, "Not connected, %s(%pM %s[%d] PN %*phN)\n", + __func__, mac_addr, key_usage_str[key_usage], key_index, + params->seq_len, params->seq); + return -EINVAL; + } + + if (cc) + cc->key_set = false; + + if (params->seq && params->seq_len != IEEE80211_GCMP_PN_LEN) { + wil_err(wil, + "Wrong PN len %d, %s(%pM %s[%d] PN %*phN)\n", + params->seq_len, __func__, mac_addr, + key_usage_str[key_usage], key_index, + params->seq_len, params->seq); + return -EINVAL; + } + + rc = wmi_add_cipher_key(wil, key_index, mac_addr, params->key_len, + params->key, key_usage); + if ((rc == 0) && cc) { + if (params->seq) + memcpy(cc->pn, params->seq, IEEE80211_GCMP_PN_LEN); + else + memset(cc->pn, 0, IEEE80211_GCMP_PN_LEN); + cc->key_set = true; + } + + return rc; } static int wil_cfg80211_del_key(struct wiphy *wiphy, @@ -661,9 +728,20 @@ static int wil_cfg80211_del_key(struct wiphy *wiphy, { struct wil6210_priv *wil = wiphy_to_wil(wiphy); enum wmi_key_usage key_usage = wil_detect_key_usage(wil, pairwise); + struct wil_tid_crypto_rx_single *cc = wil_find_crypto_ctx(wil, + key_index, + key_usage, + mac_addr); - wil_dbg_misc(wil, "%s(%pM[%d] %s)\n", __func__, mac_addr, key_index, - pairwise ? "PTK" : "GTK"); + wil_dbg_misc(wil, "%s(%pM %s[%d])\n", __func__, mac_addr, + key_usage_str[key_usage], key_index); + + if (IS_ERR(cc)) + wil_info(wil, "Not connected, %s(%pM %s[%d])\n", __func__, + mac_addr, key_usage_str[key_usage], key_index); + + if (!IS_ERR_OR_NULL(cc)) + cc->key_set = false; return wmi_del_cipher_key(wil, key_index, mac_addr, key_usage); } diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c index 3bbe73b6d05a..d80bb75c6576 100644 --- a/drivers/net/wireless/ath/wil6210/debugfs.c +++ b/drivers/net/wireless/ath/wil6210/debugfs.c @@ -1333,6 +1333,34 @@ static void wil_print_rxtid(struct seq_file *s, struct wil_tid_ampdu_rx *r) r->ssn_last_drop); } +static void wil_print_rxtid_crypto(struct seq_file *s, int tid, + struct wil_tid_crypto_rx *c) +{ + int i; + + for (i = 0; i < 4; i++) { + struct wil_tid_crypto_rx_single *cc = &c->key_id[i]; + + if (cc->key_set) + goto has_keys; + } + return; + +has_keys: + if (tid < WIL_STA_TID_NUM) + seq_printf(s, " [%2d] PN", tid); + else + seq_puts(s, " [GR] PN"); + + for (i = 0; i < 4; i++) { + struct wil_tid_crypto_rx_single *cc = &c->key_id[i]; + + seq_printf(s, " [%i%s]%6phN", i, cc->key_set ? "+" : "-", + cc->pn); + } + seq_puts(s, "\n"); +} + static int wil_sta_debugfs_show(struct seq_file *s, void *data) __acquires(&p->tid_rx_lock) __releases(&p->tid_rx_lock) { @@ -1360,18 +1388,25 @@ __acquires(&p->tid_rx_lock) __releases(&p->tid_rx_lock) spin_lock_bh(&p->tid_rx_lock); for (tid = 0; tid < WIL_STA_TID_NUM; tid++) { struct wil_tid_ampdu_rx *r = p->tid_rx[tid]; + struct wil_tid_crypto_rx *c = + &p->tid_crypto_rx[tid]; if (r) { - seq_printf(s, "[%2d] ", tid); + seq_printf(s, " [%2d] ", tid); wil_print_rxtid(s, r); } + + wil_print_rxtid_crypto(s, tid, c); } + wil_print_rxtid_crypto(s, WIL_STA_TID_NUM, + &p->group_crypto_rx); spin_unlock_bh(&p->tid_rx_lock); seq_printf(s, - "Rx invalid frame: non-data %lu, short %lu, large %lu\n", + "Rx invalid frame: non-data %lu, short %lu, large %lu, replay %lu\n", p->stats.rx_non_data_frame, p->stats.rx_short_frame, - p->stats.rx_large_frame); + p->stats.rx_large_frame, + p->stats.rx_replay); seq_puts(s, "Rx/MCS:"); for (mcs = 0; mcs < ARRAY_SIZE(p->stats.rx_per_mcs); diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c index 997a740e0a4b..1fa215d0eeed 100644 --- a/drivers/net/wireless/ath/wil6210/main.c +++ b/drivers/net/wireless/ath/wil6210/main.c @@ -149,7 +149,7 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock) might_sleep(); wil_dbg_misc(wil, "%s(CID %d, status %d)\n", __func__, cid, sta->status); - + /* inform upper/lower layers */ if (sta->status != wil_sta_unused) { if (!from_event) wmi_disconnect_sta(wil, sta->addr, reason_code, true); @@ -165,7 +165,7 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock) } sta->status = wil_sta_unused; } - + /* reorder buffers */ for (i = 0; i < WIL_STA_TID_NUM; i++) { struct wil_tid_ampdu_rx *r; @@ -177,10 +177,15 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock) spin_unlock_bh(&sta->tid_rx_lock); } + /* crypto context */ + memset(sta->tid_crypto_rx, 0, sizeof(sta->tid_crypto_rx)); + memset(&sta->group_crypto_rx, 0, sizeof(sta->group_crypto_rx)); + /* release vrings */ for (i = 0; i < ARRAY_SIZE(wil->vring_tx); i++) { if (wil->vring2cid_tid[i][0] == cid) wil_vring_fini_tx(wil, i); } + /* statistics */ memset(&sta->stats, 0, sizeof(sta->stats)); } diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c index 6af20903cf89..f383001b86aa 100644 --- a/drivers/net/wireless/ath/wil6210/txrx.c +++ b/drivers/net/wireless/ath/wil6210/txrx.c @@ -549,6 +549,60 @@ static int wil_rx_refill(struct wil6210_priv *wil, int count) return rc; } +/** + * reverse_memcmp - Compare two areas of memory, in reverse order + * @cs: One area of memory + * @ct: Another area of memory + * @count: The size of the area. + * + * Cut'n'paste from original memcmp (see lib/string.c) + * with minimal modifications + */ +static int reverse_memcmp(const void *cs, const void *ct, size_t count) +{ + const unsigned char *su1, *su2; + int res = 0; + + for (su1 = cs + count - 1, su2 = ct + count - 1; count > 0; + --su1, --su2, count--) { + res = *su1 - *su2; + if (res) + break; + } + return res; +} + +static int wil_rx_crypto_check(struct wil6210_priv *wil, struct sk_buff *skb) +{ + struct vring_rx_desc *d = wil_skb_rxdesc(skb); + int cid = wil_rxdesc_cid(d); + int tid = wil_rxdesc_tid(d); + int key_id = wil_rxdesc_key_id(d); + int mc = wil_rxdesc_mcast(d); + struct wil_sta_info *s = &wil->sta[cid]; + struct wil_tid_crypto_rx *c = mc ? &s->group_crypto_rx : + &s->tid_crypto_rx[tid]; + struct wil_tid_crypto_rx_single *cc = &c->key_id[key_id]; + const u8 *pn = (u8 *)&d->mac.pn_15_0; + + if (!cc->key_set) { + wil_err_ratelimited(wil, + "Key missing. CID %d TID %d MCast %d KEY_ID %d\n", + cid, tid, mc, key_id); + return -EINVAL; + } + + if (reverse_memcmp(pn, cc->pn, IEEE80211_GCMP_PN_LEN) <= 0) { + wil_err_ratelimited(wil, + "Replay attack. CID %d TID %d MCast %d KEY_ID %d PN %6phN last %6phN\n", + cid, tid, mc, key_id, pn, cc->pn); + return -EINVAL; + } + memcpy(cc->pn, pn, IEEE80211_GCMP_PN_LEN); + + return 0; +} + /* * Pass Rx packet to the netif. Update statistics. * Called in softirq context (NAPI poll). @@ -561,6 +615,7 @@ void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev) unsigned int len = skb->len; struct vring_rx_desc *d = wil_skb_rxdesc(skb); int cid = wil_rxdesc_cid(d); /* always 0..7, no need to check */ + int security = wil_rxdesc_security(d); struct ethhdr *eth = (void *)skb->data; /* here looking for DA, not A1, thus Rxdesc's 'mcast' indication * is not suitable, need to look at data @@ -586,6 +641,13 @@ void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev) skb_orphan(skb); + if (security && (wil_rx_crypto_check(wil, skb) != 0)) { + rc = GRO_DROP; + dev_kfree_skb(skb); + stats->rx_replay++; + goto stats; + } + if (wdev->iftype == NL80211_IFTYPE_AP && !wil->ap_isolate) { if (mcast) { /* send multicast frames both to higher layers in @@ -627,6 +689,7 @@ void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev) wil_dbg_txrx(wil, "Rx complete %d bytes => %s\n", len, gro_res_str[rc]); } +stats: /* statistics. rc set to GRO_NORMAL for AP bridging */ if (unlikely(rc == GRO_DROP)) { ndev->stats.rx_dropped++; diff --git a/drivers/net/wireless/ath/wil6210/txrx.h b/drivers/net/wireless/ath/wil6210/txrx.h index ee7c7b4b9a17..fcdffaa8251b 100644 --- a/drivers/net/wireless/ath/wil6210/txrx.h +++ b/drivers/net/wireless/ath/wil6210/txrx.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012-2014 Qualcomm Atheros, Inc. + * Copyright (c) 2012-2016 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -480,6 +480,16 @@ static inline int wil_rxdesc_ext_subtype(struct vring_rx_desc *d) return WIL_GET_BITS(d->mac.d0, 28, 31); } +static inline int wil_rxdesc_key_id(struct vring_rx_desc *d) +{ + return WIL_GET_BITS(d->mac.d1, 4, 5); +} + +static inline int wil_rxdesc_security(struct vring_rx_desc *d) +{ + return WIL_GET_BITS(d->mac.d1, 7, 7); +} + static inline int wil_rxdesc_ds_bits(struct vring_rx_desc *d) { return WIL_GET_BITS(d->mac.d1, 8, 9); diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h index d59c3f29941e..44ff040e2fea 100644 --- a/drivers/net/wireless/ath/wil6210/wil6210.h +++ b/drivers/net/wireless/ath/wil6210/wil6210.h @@ -455,6 +455,21 @@ struct wil_tid_ampdu_rx { bool first_time; /* is it 1-st time this buffer used? */ }; +/** + * struct wil_tid_crypto_rx_single - TID crypto information (Rx). + * + * @pn: GCMP PN for the session + * @key_set: valid key present + */ +struct wil_tid_crypto_rx_single { + u8 pn[IEEE80211_GCMP_PN_LEN]; + bool key_set; +}; + +struct wil_tid_crypto_rx { + struct wil_tid_crypto_rx_single key_id[4]; +}; + enum wil_sta_status { wil_sta_unused = 0, wil_sta_conn_pending = 1, @@ -474,6 +489,7 @@ struct wil_net_stats { unsigned long rx_non_data_frame; unsigned long rx_short_frame; unsigned long rx_large_frame; + unsigned long rx_replay; u16 last_mcs_rx; u64 rx_per_mcs[WIL_MCS_MAX + 1]; }; @@ -495,6 +511,8 @@ struct wil_sta_info { spinlock_t tid_rx_lock; /* guarding tid_rx array */ unsigned long tid_rx_timer_expired[BITS_TO_LONGS(WIL_STA_TID_NUM)]; unsigned long tid_rx_stop_requested[BITS_TO_LONGS(WIL_STA_TID_NUM)]; + struct wil_tid_crypto_rx tid_crypto_rx[WIL_STA_TID_NUM]; + struct wil_tid_crypto_rx group_crypto_rx; }; enum { From 74997a53d257e327699e359b78b3ecfd33f80cab Mon Sep 17 00:00:00 2001 From: Lior David Date: Tue, 1 Mar 2016 19:18:08 +0200 Subject: [PATCH 0019/1649] wil6210: add support for discovery mode during scan Add support for discovery mode during scan. When discovery mode is active, station transmits special beacons while scanning. This can optimize the scan mainly when there is only one AP/PCP around. Discovery mode is implicitly used by firmware during P2P search. Since there is currently no use case where user space has a reason to directly control discovery mode, we expose it only through a debugfs flag. Also fix name confusion in the wmi_scan_type enumeration. The type previously called WMI_LONG_SCAN is actually WMI_ACTIVE_SCAN. Signed-off-by: Lior David Signed-off-by: Maya Erez Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wil6210/cfg80211.c | 6 ++++++ drivers/net/wireless/ath/wil6210/debugfs.c | 6 ++++++ drivers/net/wireless/ath/wil6210/wil6210.h | 1 + drivers/net/wireless/ath/wil6210/wmi.h | 7 ++++--- 4 files changed, 17 insertions(+), 3 deletions(-) diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c index ddadda90cfa0..1ccf136b34e7 100644 --- a/drivers/net/wireless/ath/wil6210/cfg80211.c +++ b/drivers/net/wireless/ath/wil6210/cfg80211.c @@ -319,6 +319,7 @@ static int wil_cfg80211_scan(struct wiphy *wiphy, mod_timer(&wil->scan_timer, jiffies + WIL6210_SCAN_TO); memset(&cmd, 0, sizeof(cmd)); + cmd.cmd.scan_type = WMI_ACTIVE_SCAN; cmd.cmd.num_channels = 0; n = min(request->n_channels, 4U); for (i = 0; i < n; i++) { @@ -346,6 +347,11 @@ static int wil_cfg80211_scan(struct wiphy *wiphy, if (rc) goto out; + if (wil->discovery_mode && cmd.cmd.scan_type == WMI_ACTIVE_SCAN) { + cmd.cmd.discovery_mode = 1; + wil_dbg_misc(wil, "active scan with discovery_mode=1\n"); + } + rc = wmi_send(wil, WMI_START_SCAN_CMDID, &cmd, sizeof(cmd.cmd) + cmd.cmd.num_channels * sizeof(cmd.cmd.channel_list[0])); diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c index d80bb75c6576..8b7e1fddb5bd 100644 --- a/drivers/net/wireless/ath/wil6210/debugfs.c +++ b/drivers/net/wireless/ath/wil6210/debugfs.c @@ -37,6 +37,7 @@ enum dbg_off_type { doff_x32 = 1, doff_ulong = 2, doff_io32 = 3, + doff_u8 = 4 }; /* offset to "wil" */ @@ -346,6 +347,10 @@ static void wil6210_debugfs_init_offset(struct wil6210_priv *wil, tbl[i].mode, dbg, base + tbl[i].off); break; + case doff_u8: + f = debugfs_create_u8(tbl[i].name, tbl[i].mode, dbg, + base + tbl[i].off); + break; default: f = ERR_PTR(-EINVAL); } @@ -1522,6 +1527,7 @@ static const struct dbg_off dbg_wil_off[] = { WIL_FIELD(hw_version, S_IRUGO, doff_x32), WIL_FIELD(recovery_count, S_IRUGO, doff_u32), WIL_FIELD(ap_isolate, S_IRUGO, doff_u32), + WIL_FIELD(discovery_mode, S_IRUGO | S_IWUSR, doff_u8), {}, }; diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h index 44ff040e2fea..f662f7676a31 100644 --- a/drivers/net/wireless/ath/wil6210/wil6210.h +++ b/drivers/net/wireless/ath/wil6210/wil6210.h @@ -615,6 +615,7 @@ struct wil6210_priv { /* debugfs */ struct dentry *debug; struct debugfs_blob_wrapper blobs[ARRAY_SIZE(fw_mapping)]; + u8 discovery_mode; void *platform_handle; struct wil_platform_ops platform_ops; diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h index 6e90e78f1554..430a4c09db59 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.h +++ b/drivers/net/wireless/ath/wil6210/wmi.h @@ -286,16 +286,17 @@ struct wmi_delete_cipher_key_cmd { * - WMI_SCAN_COMPLETE_EVENTID */ enum wmi_scan_type { - WMI_LONG_SCAN = 0, + WMI_ACTIVE_SCAN = 0, WMI_SHORT_SCAN = 1, WMI_PBC_SCAN = 2, WMI_DIRECT_SCAN = 3, - WMI_ACTIVE_SCAN = 4, + WMI_LONG_SCAN = 4, }; struct wmi_start_scan_cmd { u8 direct_scan_mac_addr[6]; - u8 reserved[2]; + u8 discovery_mode; + u8 reserved; __le32 home_dwell_time; /* Max duration in the home channel(ms) */ __le32 force_scan_interval; /* Time interval between scans (ms)*/ u8 scan_type; /* wmi_scan_type */ From b874ddecae0a087aee024ef808c63060434a2d50 Mon Sep 17 00:00:00 2001 From: Lior David Date: Tue, 1 Mar 2016 19:18:09 +0200 Subject: [PATCH 0020/1649] wil6210: switch to generated wmi.h Switch to auto-generated version of wmi.h which is maintained by FW team. This will allow better sync between teams in the future and avoid bugs because of unexpected API changes. The wmi.h will have many differences but most are cosmetic. It also includes these real differences: 1. is_go parameter added to BCON_CTRL and START_PCP commands. 2. max_rx_pl_per_desc added to CFG_RX_CHAIN command. 3. various small API updates that are not currently used by driver. Signed-off-by: Lior David Signed-off-by: Maya Erez Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wil6210/cfg80211.c | 4 +- drivers/net/wireless/ath/wil6210/debugfs.c | 8 +- drivers/net/wireless/ath/wil6210/trace.h | 19 +- drivers/net/wireless/ath/wil6210/txrx.c | 4 +- drivers/net/wireless/ath/wil6210/wil6210.h | 21 +- drivers/net/wireless/ath/wil6210/wmi.c | 36 +- drivers/net/wireless/ath/wil6210/wmi.h | 1271 +++++++++---------- 7 files changed, 657 insertions(+), 706 deletions(-) diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c index 1ccf136b34e7..0c25e8beec3c 100644 --- a/drivers/net/wireless/ath/wil6210/cfg80211.c +++ b/drivers/net/wireless/ath/wil6210/cfg80211.c @@ -119,7 +119,7 @@ int wil_cid_fill_sinfo(struct wil6210_priv *wil, int cid, .interval_usec = 0, }; struct { - struct wil6210_mbox_hdr_wmi wmi; + struct wmi_cmd_hdr wmi; struct wmi_notify_req_done_event evt; } __packed reply; struct wil_net_stats *stats = &wil->sta[cid].stats; @@ -580,7 +580,7 @@ int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, struct ieee80211_mgmt *mgmt_frame = (void *)buf; struct wmi_sw_tx_req_cmd *cmd; struct { - struct wil6210_mbox_hdr_wmi wmi; + struct wmi_cmd_hdr wmi; struct wmi_sw_tx_complete_event evt; } __packed evt; diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c index 8b7e1fddb5bd..a4d3f70c3d29 100644 --- a/drivers/net/wireless/ath/wil6210/debugfs.c +++ b/drivers/net/wireless/ath/wil6210/debugfs.c @@ -826,9 +826,9 @@ static ssize_t wil_write_file_wmi(struct file *file, const char __user *buf, size_t len, loff_t *ppos) { struct wil6210_priv *wil = file->private_data; - struct wil6210_mbox_hdr_wmi *wmi; + struct wmi_cmd_hdr *wmi; void *cmd; - int cmdlen = len - sizeof(struct wil6210_mbox_hdr_wmi); + int cmdlen = len - sizeof(struct wmi_cmd_hdr); u16 cmdid; int rc, rc1; @@ -846,7 +846,7 @@ static ssize_t wil_write_file_wmi(struct file *file, const char __user *buf, } cmd = &wmi[1]; - cmdid = le16_to_cpu(wmi->id); + cmdid = le16_to_cpu(wmi->command_id); rc1 = wmi_send(wil, cmdid, cmd, cmdlen); kfree(wmi); @@ -990,7 +990,7 @@ static int wil_bf_debugfs_show(struct seq_file *s, void *data) .interval_usec = 0, }; struct { - struct wil6210_mbox_hdr_wmi wmi; + struct wmi_cmd_hdr wmi; struct wmi_notify_req_done_event evt; } __packed reply; diff --git a/drivers/net/wireless/ath/wil6210/trace.h b/drivers/net/wireless/ath/wil6210/trace.h index e59239d22b94..c4db2a9d9f7f 100644 --- a/drivers/net/wireless/ath/wil6210/trace.h +++ b/drivers/net/wireless/ath/wil6210/trace.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013 Qualcomm Atheros, Inc. + * Copyright (c) 2013-2016 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -37,39 +37,40 @@ static inline void trace_ ## name(proto) {} #endif /* !CONFIG_WIL6210_TRACING || defined(__CHECKER__) */ DECLARE_EVENT_CLASS(wil6210_wmi, - TP_PROTO(struct wil6210_mbox_hdr_wmi *wmi, void *buf, u16 buf_len), + TP_PROTO(struct wmi_cmd_hdr *wmi, void *buf, u16 buf_len), TP_ARGS(wmi, buf, buf_len), TP_STRUCT__entry( __field(u8, mid) - __field(u16, id) - __field(u32, timestamp) + __field(u16, command_id) + __field(u32, fw_timestamp) __field(u16, buf_len) __dynamic_array(u8, buf, buf_len) ), TP_fast_assign( __entry->mid = wmi->mid; - __entry->id = le16_to_cpu(wmi->id); - __entry->timestamp = le32_to_cpu(wmi->timestamp); + __entry->command_id = le16_to_cpu(wmi->command_id); + __entry->fw_timestamp = le32_to_cpu(wmi->fw_timestamp); __entry->buf_len = buf_len; memcpy(__get_dynamic_array(buf), buf, buf_len); ), TP_printk( "MID %d id 0x%04x len %d timestamp %d", - __entry->mid, __entry->id, __entry->buf_len, __entry->timestamp + __entry->mid, __entry->command_id, __entry->buf_len, + __entry->fw_timestamp ) ); DEFINE_EVENT(wil6210_wmi, wil6210_wmi_cmd, - TP_PROTO(struct wil6210_mbox_hdr_wmi *wmi, void *buf, u16 buf_len), + TP_PROTO(struct wmi_cmd_hdr *wmi, void *buf, u16 buf_len), TP_ARGS(wmi, buf, buf_len) ); DEFINE_EVENT(wil6210_wmi, wil6210_wmi_event, - TP_PROTO(struct wil6210_mbox_hdr_wmi *wmi, void *buf, u16 buf_len), + TP_PROTO(struct wmi_cmd_hdr *wmi, void *buf, u16 buf_len), TP_ARGS(wmi, buf, buf_len) ); diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c index f383001b86aa..f260b232fd57 100644 --- a/drivers/net/wireless/ath/wil6210/txrx.c +++ b/drivers/net/wireless/ath/wil6210/txrx.c @@ -820,7 +820,7 @@ int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size, }, }; struct { - struct wil6210_mbox_hdr_wmi wmi; + struct wmi_cmd_hdr wmi; struct wmi_vring_cfg_done_event cmd; } __packed reply; struct vring *vring = &wil->vring_tx[id]; @@ -897,7 +897,7 @@ int wil_vring_init_bcast(struct wil6210_priv *wil, int id, int size) }, }; struct { - struct wil6210_mbox_hdr_wmi wmi; + struct wmi_cmd_hdr wmi; struct wmi_vring_cfg_done_event cmd; } __packed reply; struct vring *vring = &wil->vring_tx[id]; diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h index f662f7676a31..e69df0c1a125 100644 --- a/drivers/net/wireless/ath/wil6210/wil6210.h +++ b/drivers/net/wireless/ath/wil6210/wil6210.h @@ -22,6 +22,7 @@ #include #include #include +#include "wmi.h" #include "wil_platform.h" extern bool no_fw_recovery; @@ -334,29 +335,11 @@ struct wil6210_mbox_hdr { /* max. value for wil6210_mbox_hdr.len */ #define MAX_MBOXITEM_SIZE (240) -/** - * struct wil6210_mbox_hdr_wmi - WMI header - * - * @mid: MAC ID - * 00 - default, created by FW - * 01..0f - WiFi ports, driver to create - * 10..fe - debug - * ff - broadcast - * @id: command/event ID - * @timestamp: FW fills for events, free-running msec timer - */ -struct wil6210_mbox_hdr_wmi { - u8 mid; - u8 reserved; - __le16 id; - __le32 timestamp; -} __packed; - struct pending_wmi_event { struct list_head list; struct { struct wil6210_mbox_hdr hdr; - struct wil6210_mbox_hdr_wmi wmi; + struct wmi_cmd_hdr wmi; u8 data[0]; } __packed event; }; diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c index fb090350df6d..db7d2b602d1a 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.c +++ b/drivers/net/wireless/ath/wil6210/wmi.c @@ -176,7 +176,7 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len) { struct { struct wil6210_mbox_hdr hdr; - struct wil6210_mbox_hdr_wmi wmi; + struct wmi_cmd_hdr wmi; } __packed cmd = { .hdr = { .type = WIL_MBOX_HDR_TYPE_WMI, @@ -185,7 +185,7 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len) }, .wmi = { .mid = 0, - .id = cpu_to_le16(cmdid), + .command_id = cpu_to_le16(cmdid), }, }; struct wil6210_mbox_ring *r = &wil->mbox_ctl.tx; @@ -656,7 +656,7 @@ static void wmi_evt_vring_en(struct wil6210_priv *wil, int id, void *d, int len) static void wmi_evt_ba_status(struct wil6210_priv *wil, int id, void *d, int len) { - struct wmi_vring_ba_status_event *evt = d; + struct wmi_ba_status_event *evt = d; struct vring_tx_data *txdata; wil_dbg_wmi(wil, "BACK[%d] %s {%d} timeout %d AMSDU%s\n", @@ -842,10 +842,10 @@ void wmi_recv_cmd(struct wil6210_priv *wil) offsetof(struct wil6210_mbox_ring_desc, sync), 0); /* indicate */ if ((hdr.type == WIL_MBOX_HDR_TYPE_WMI) && - (len >= sizeof(struct wil6210_mbox_hdr_wmi))) { - struct wil6210_mbox_hdr_wmi *wmi = &evt->event.wmi; - u16 id = le16_to_cpu(wmi->id); - u32 tstamp = le32_to_cpu(wmi->timestamp); + (len >= sizeof(struct wmi_cmd_hdr))) { + struct wmi_cmd_hdr *wmi = &evt->event.wmi; + u16 id = le16_to_cpu(wmi->command_id); + u32 tstamp = le32_to_cpu(wmi->fw_timestamp); spin_lock_irqsave(&wil->wmi_ev_lock, flags); if (wil->reply_id && wil->reply_id == id) { if (wil->reply_buf) { @@ -968,7 +968,7 @@ int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype, .hidden_ssid = hidden_ssid, }; struct { - struct wil6210_mbox_hdr_wmi wmi; + struct wmi_cmd_hdr wmi; struct wmi_pcp_started_event evt; } __packed reply; @@ -1022,7 +1022,7 @@ int wmi_get_ssid(struct wil6210_priv *wil, u8 *ssid_len, void *ssid) { int rc; struct { - struct wil6210_mbox_hdr_wmi wmi; + struct wmi_cmd_hdr wmi; struct wmi_set_ssid_cmd cmd; } __packed reply; int len; /* reply.cmd.ssid_len in CPU order */ @@ -1055,7 +1055,7 @@ int wmi_get_channel(struct wil6210_priv *wil, int *channel) { int rc; struct { - struct wil6210_mbox_hdr_wmi wmi; + struct wmi_cmd_hdr wmi; struct wmi_set_pcp_channel_cmd cmd; } __packed reply; @@ -1163,7 +1163,7 @@ int wmi_rxon(struct wil6210_priv *wil, bool on) { int rc; struct { - struct wil6210_mbox_hdr_wmi wmi; + struct wmi_cmd_hdr wmi; struct wmi_listen_started_event evt; } __packed reply; @@ -1200,7 +1200,7 @@ int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring) .host_thrsh = cpu_to_le16(rx_ring_overflow_thrsh), }; struct { - struct wil6210_mbox_hdr_wmi wmi; + struct wmi_cmd_hdr wmi; struct wmi_cfg_rx_chain_done_event evt; } __packed evt; int rc; @@ -1254,7 +1254,7 @@ int wmi_get_temperature(struct wil6210_priv *wil, u32 *t_bb, u32 *t_rf) .measure_mode = cpu_to_le32(TEMPERATURE_MEASURE_NOW), }; struct { - struct wil6210_mbox_hdr_wmi wmi; + struct wmi_cmd_hdr wmi; struct wmi_temp_sense_done_event evt; } __packed reply; @@ -1280,7 +1280,7 @@ int wmi_disconnect_sta(struct wil6210_priv *wil, const u8 *mac, u16 reason, .disconnect_reason = cpu_to_le16(reason), }; struct { - struct wil6210_mbox_hdr_wmi wmi; + struct wmi_cmd_hdr wmi; struct wmi_disconnect_event evt; } __packed reply; @@ -1372,7 +1372,7 @@ int wmi_addba_rx_resp(struct wil6210_priv *wil, u8 cid, u8 tid, u8 token, .ba_timeout = cpu_to_le16(timeout), }; struct { - struct wil6210_mbox_hdr_wmi wmi; + struct wmi_cmd_hdr wmi; struct wmi_rcp_addba_resp_sent_event evt; } __packed reply; @@ -1428,10 +1428,10 @@ static void wmi_event_handle(struct wil6210_priv *wil, u16 len = le16_to_cpu(hdr->len); if ((hdr->type == WIL_MBOX_HDR_TYPE_WMI) && - (len >= sizeof(struct wil6210_mbox_hdr_wmi))) { - struct wil6210_mbox_hdr_wmi *wmi = (void *)(&hdr[1]); + (len >= sizeof(struct wmi_cmd_hdr))) { + struct wmi_cmd_hdr *wmi = (void *)(&hdr[1]); void *evt_data = (void *)(&wmi[1]); - u16 id = le16_to_cpu(wmi->id); + u16 id = le16_to_cpu(wmi->command_id); wil_dbg_wmi(wil, "Handle WMI 0x%04x (reply_id 0x%04x)\n", id, wil->reply_id); diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h index 430a4c09db59..29865e0b5203 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.h +++ b/drivers/net/wireless/ath/wil6210/wmi.h @@ -1,6 +1,6 @@ /* - * Copyright (c) 2012-2015 Qualcomm Atheros, Inc. - * Copyright (c) 2006-2012 Wilocity . + * Copyright (c) 2012-2016 Qualcomm Atheros, Inc. + * Copyright (c) 2006-2012 Wilocity * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -17,187 +17,197 @@ /* * This file contains the definitions of the WMI protocol specified in the - * Wireless Module Interface (WMI) for the Wilocity - * MARLON 60 Gigabit wireless solution. + * Wireless Module Interface (WMI) for the Qualcomm + * 60 GHz wireless solution. * It includes definitions of all the commands and events. * Commands are messages from the host to the WM. * Events are messages from the WM to the host. + * + * This is an automatically generated file. */ #ifndef __WILOCITY_WMI_H__ #define __WILOCITY_WMI_H__ /* General */ -#define WILOCITY_MAX_ASSOC_STA (8) -#define WILOCITY_DEFAULT_ASSOC_STA (1) -#define WMI_MAC_LEN (6) -#define WMI_PROX_RANGE_NUM (3) -#define WMI_MAX_LOSS_DMG_BEACONS (32) +#define WMI_MAX_ASSOC_STA (8) +#define WMI_DEFAULT_ASSOC_STA (1) +#define WMI_MAC_LEN (6) +#define WMI_PROX_RANGE_NUM (3) +#define WMI_MAX_LOSS_DMG_BEACONS (20) + +/* Mailbox interface + * used for commands and events + */ +enum wmi_mid { + MID_DEFAULT = 0x00, + FIRST_DBG_MID_ID = 0x10, + LAST_DBG_MID_ID = 0xFE, + MID_BROADCAST = 0xFF, +}; + +/* WMI_CMD_HDR */ +struct wmi_cmd_hdr { + u8 mid; + u8 reserved; + __le16 command_id; + __le32 fw_timestamp; +} __packed; /* List of Commands */ enum wmi_command_id { - WMI_CONNECT_CMDID = 0x0001, - WMI_DISCONNECT_CMDID = 0x0003, - WMI_DISCONNECT_STA_CMDID = 0x0004, - WMI_START_SCAN_CMDID = 0x0007, - WMI_SET_BSS_FILTER_CMDID = 0x0009, - WMI_SET_PROBED_SSID_CMDID = 0x000a, - WMI_SET_LISTEN_INT_CMDID = 0x000b, - WMI_BCON_CTRL_CMDID = 0x000f, - WMI_ADD_CIPHER_KEY_CMDID = 0x0016, - WMI_DELETE_CIPHER_KEY_CMDID = 0x0017, - WMI_SET_APPIE_CMDID = 0x003f, - WMI_SET_WSC_STATUS_CMDID = 0x0041, - WMI_PXMT_RANGE_CFG_CMDID = 0x0042, - WMI_PXMT_SNR2_RANGE_CFG_CMDID = 0x0043, -/* WMI_FAST_MEM_ACC_MODE_CMDID = 0x0300, */ - WMI_MEM_READ_CMDID = 0x0800, - WMI_MEM_WR_CMDID = 0x0801, - WMI_ECHO_CMDID = 0x0803, - WMI_DEEP_ECHO_CMDID = 0x0804, - WMI_CONFIG_MAC_CMDID = 0x0805, - WMI_CONFIG_PHY_DEBUG_CMDID = 0x0806, - WMI_ADD_DEBUG_TX_PCKT_CMDID = 0x0808, - WMI_PHY_GET_STATISTICS_CMDID = 0x0809, - WMI_FS_TUNE_CMDID = 0x080a, - WMI_CORR_MEASURE_CMDID = 0x080b, - WMI_READ_RSSI_CMDID = 0x080c, - WMI_TEMP_SENSE_CMDID = 0x080e, - WMI_DC_CALIB_CMDID = 0x080f, - WMI_SEND_TONE_CMDID = 0x0810, - WMI_IQ_TX_CALIB_CMDID = 0x0811, - WMI_IQ_RX_CALIB_CMDID = 0x0812, - WMI_SET_UCODE_IDLE_CMDID = 0x0813, - WMI_SET_WORK_MODE_CMDID = 0x0815, - WMI_LO_LEAKAGE_CALIB_CMDID = 0x0816, - WMI_MARLON_R_READ_CMDID = 0x0818, - WMI_MARLON_R_WRITE_CMDID = 0x0819, - WMI_MARLON_R_TXRX_SEL_CMDID = 0x081a, - MAC_IO_STATIC_PARAMS_CMDID = 0x081b, - MAC_IO_DYNAMIC_PARAMS_CMDID = 0x081c, - WMI_SILENT_RSSI_CALIB_CMDID = 0x081d, - WMI_RF_RX_TEST_CMDID = 0x081e, - WMI_CFG_RX_CHAIN_CMDID = 0x0820, - WMI_VRING_CFG_CMDID = 0x0821, - WMI_BCAST_VRING_CFG_CMDID = 0x0822, - WMI_VRING_BA_EN_CMDID = 0x0823, - WMI_VRING_BA_DIS_CMDID = 0x0824, - WMI_RCP_ADDBA_RESP_CMDID = 0x0825, - WMI_RCP_DELBA_CMDID = 0x0826, - WMI_SET_SSID_CMDID = 0x0827, - WMI_GET_SSID_CMDID = 0x0828, - WMI_SET_PCP_CHANNEL_CMDID = 0x0829, - WMI_GET_PCP_CHANNEL_CMDID = 0x082a, - WMI_SW_TX_REQ_CMDID = 0x082b, - WMI_READ_MAC_RXQ_CMDID = 0x0830, - WMI_READ_MAC_TXQ_CMDID = 0x0831, - WMI_WRITE_MAC_RXQ_CMDID = 0x0832, - WMI_WRITE_MAC_TXQ_CMDID = 0x0833, - WMI_WRITE_MAC_XQ_FIELD_CMDID = 0x0834, - WMI_MLME_PUSH_CMDID = 0x0835, - WMI_BEAMFORMING_MGMT_CMDID = 0x0836, - WMI_BF_TXSS_MGMT_CMDID = 0x0837, - WMI_BF_SM_MGMT_CMDID = 0x0838, - WMI_BF_RXSS_MGMT_CMDID = 0x0839, - WMI_BF_TRIG_CMDID = 0x083A, - WMI_SET_SECTORS_CMDID = 0x0849, - WMI_MAINTAIN_PAUSE_CMDID = 0x0850, - WMI_MAINTAIN_RESUME_CMDID = 0x0851, - WMI_RS_MGMT_CMDID = 0x0852, - WMI_RF_MGMT_CMDID = 0x0853, - WMI_THERMAL_THROTTLING_CTRL_CMDID = 0x0854, - WMI_THERMAL_THROTTLING_GET_STATUS_CMDID = 0x0855, + WMI_CONNECT_CMDID = 0x01, + WMI_DISCONNECT_CMDID = 0x03, + WMI_DISCONNECT_STA_CMDID = 0x04, + WMI_START_SCAN_CMDID = 0x07, + WMI_SET_BSS_FILTER_CMDID = 0x09, + WMI_SET_PROBED_SSID_CMDID = 0x0A, + WMI_SET_LISTEN_INT_CMDID = 0x0B, + WMI_BCON_CTRL_CMDID = 0x0F, + WMI_ADD_CIPHER_KEY_CMDID = 0x16, + WMI_DELETE_CIPHER_KEY_CMDID = 0x17, + WMI_PCP_CONF_CMDID = 0x18, + WMI_SET_APPIE_CMDID = 0x3F, + WMI_SET_WSC_STATUS_CMDID = 0x41, + WMI_PXMT_RANGE_CFG_CMDID = 0x42, + WMI_PXMT_SNR2_RANGE_CFG_CMDID = 0x43, + WMI_MEM_READ_CMDID = 0x800, + WMI_MEM_WR_CMDID = 0x801, + WMI_ECHO_CMDID = 0x803, + WMI_DEEP_ECHO_CMDID = 0x804, + WMI_CONFIG_MAC_CMDID = 0x805, + WMI_CONFIG_PHY_DEBUG_CMDID = 0x806, + WMI_ADD_DEBUG_TX_PCKT_CMDID = 0x808, + WMI_PHY_GET_STATISTICS_CMDID = 0x809, + WMI_FS_TUNE_CMDID = 0x80A, + WMI_CORR_MEASURE_CMDID = 0x80B, + WMI_READ_RSSI_CMDID = 0x80C, + WMI_TEMP_SENSE_CMDID = 0x80E, + WMI_DC_CALIB_CMDID = 0x80F, + WMI_SEND_TONE_CMDID = 0x810, + WMI_IQ_TX_CALIB_CMDID = 0x811, + WMI_IQ_RX_CALIB_CMDID = 0x812, + WMI_SET_UCODE_IDLE_CMDID = 0x813, + WMI_SET_WORK_MODE_CMDID = 0x815, + WMI_LO_LEAKAGE_CALIB_CMDID = 0x816, + WMI_MARLON_R_READ_CMDID = 0x818, + WMI_MARLON_R_WRITE_CMDID = 0x819, + WMI_MARLON_R_TXRX_SEL_CMDID = 0x81A, + MAC_IO_STATIC_PARAMS_CMDID = 0x81B, + MAC_IO_DYNAMIC_PARAMS_CMDID = 0x81C, + WMI_SILENT_RSSI_CALIB_CMDID = 0x81D, + WMI_RF_RX_TEST_CMDID = 0x81E, + WMI_CFG_RX_CHAIN_CMDID = 0x820, + WMI_VRING_CFG_CMDID = 0x821, + WMI_BCAST_VRING_CFG_CMDID = 0x822, + WMI_VRING_BA_EN_CMDID = 0x823, + WMI_VRING_BA_DIS_CMDID = 0x824, + WMI_RCP_ADDBA_RESP_CMDID = 0x825, + WMI_RCP_DELBA_CMDID = 0x826, + WMI_SET_SSID_CMDID = 0x827, + WMI_GET_SSID_CMDID = 0x828, + WMI_SET_PCP_CHANNEL_CMDID = 0x829, + WMI_GET_PCP_CHANNEL_CMDID = 0x82A, + WMI_SW_TX_REQ_CMDID = 0x82B, + WMI_READ_MAC_RXQ_CMDID = 0x830, + WMI_READ_MAC_TXQ_CMDID = 0x831, + WMI_WRITE_MAC_RXQ_CMDID = 0x832, + WMI_WRITE_MAC_TXQ_CMDID = 0x833, + WMI_WRITE_MAC_XQ_FIELD_CMDID = 0x834, + WMI_MLME_PUSH_CMDID = 0x835, + WMI_BEAMFORMING_MGMT_CMDID = 0x836, + WMI_BF_TXSS_MGMT_CMDID = 0x837, + WMI_BF_SM_MGMT_CMDID = 0x838, + WMI_BF_RXSS_MGMT_CMDID = 0x839, + WMI_BF_TRIG_CMDID = 0x83A, + WMI_SET_SECTORS_CMDID = 0x849, + WMI_MAINTAIN_PAUSE_CMDID = 0x850, + WMI_MAINTAIN_RESUME_CMDID = 0x851, + WMI_RS_MGMT_CMDID = 0x852, + WMI_RF_MGMT_CMDID = 0x853, + WMI_THERMAL_THROTTLING_CTRL_CMDID = 0x854, + WMI_THERMAL_THROTTLING_GET_STATUS_CMDID = 0x855, + WMI_OTP_READ_CMDID = 0x856, + WMI_OTP_WRITE_CMDID = 0x857, /* Performance monitoring commands */ - WMI_BF_CTRL_CMDID = 0x0862, - WMI_NOTIFY_REQ_CMDID = 0x0863, - WMI_GET_STATUS_CMDID = 0x0864, - WMI_UNIT_TEST_CMDID = 0x0900, - WMI_HICCUP_CMDID = 0x0901, - WMI_FLASH_READ_CMDID = 0x0902, - WMI_FLASH_WRITE_CMDID = 0x0903, - WMI_SECURITY_UNIT_TEST_CMDID = 0x0904, - /*P2P*/ - WMI_P2P_CFG_CMDID = 0x0910, - WMI_PORT_ALLOCATE_CMDID = 0x0911, - WMI_PORT_DELETE_CMDID = 0x0912, - WMI_POWER_MGMT_CFG_CMDID = 0x0913, - WMI_START_LISTEN_CMDID = 0x0914, - WMI_START_SEARCH_CMDID = 0x0915, - WMI_DISCOVERY_START_CMDID = 0x0916, - WMI_DISCOVERY_STOP_CMDID = 0x0917, - WMI_PCP_START_CMDID = 0x0918, - WMI_PCP_STOP_CMDID = 0x0919, - WMI_GET_PCP_FACTOR_CMDID = 0x091b, - - WMI_SET_MAC_ADDRESS_CMDID = 0xf003, - WMI_ABORT_SCAN_CMDID = 0xf007, - WMI_SET_PMK_CMDID = 0xf028, - - WMI_SET_PROMISCUOUS_MODE_CMDID = 0xf041, - WMI_GET_PMK_CMDID = 0xf048, - WMI_SET_PASSPHRASE_CMDID = 0xf049, - WMI_SEND_ASSOC_RES_CMDID = 0xf04a, - WMI_SET_ASSOC_REQ_RELAY_CMDID = 0xf04b, - WMI_EAPOL_TX_CMDID = 0xf04c, - WMI_MAC_ADDR_REQ_CMDID = 0xf04d, - WMI_FW_VER_CMDID = 0xf04e, - WMI_PMC_CMDID = 0xf04f, + WMI_BF_CTRL_CMDID = 0x862, + WMI_NOTIFY_REQ_CMDID = 0x863, + WMI_GET_STATUS_CMDID = 0x864, + WMI_UNIT_TEST_CMDID = 0x900, + WMI_HICCUP_CMDID = 0x901, + WMI_FLASH_READ_CMDID = 0x902, + WMI_FLASH_WRITE_CMDID = 0x903, + /* P2P */ + WMI_P2P_CFG_CMDID = 0x910, + WMI_PORT_ALLOCATE_CMDID = 0x911, + WMI_PORT_DELETE_CMDID = 0x912, + WMI_POWER_MGMT_CFG_CMDID = 0x913, + WMI_START_LISTEN_CMDID = 0x914, + WMI_START_SEARCH_CMDID = 0x915, + WMI_DISCOVERY_START_CMDID = 0x916, + WMI_DISCOVERY_STOP_CMDID = 0x917, + WMI_PCP_START_CMDID = 0x918, + WMI_PCP_STOP_CMDID = 0x919, + WMI_GET_PCP_FACTOR_CMDID = 0x91B, + WMI_SET_MAC_ADDRESS_CMDID = 0xF003, + WMI_ABORT_SCAN_CMDID = 0xF007, + WMI_SET_PROMISCUOUS_MODE_CMDID = 0xF041, + WMI_GET_PMK_CMDID = 0xF048, + WMI_SET_PASSPHRASE_CMDID = 0xF049, + WMI_SEND_ASSOC_RES_CMDID = 0xF04A, + WMI_SET_ASSOC_REQ_RELAY_CMDID = 0xF04B, + WMI_MAC_ADDR_REQ_CMDID = 0xF04D, + WMI_FW_VER_CMDID = 0xF04E, + WMI_PMC_CMDID = 0xF04F, }; -/* - * Commands data structures - */ - -/* - * WMI_CONNECT_CMDID - */ +/* WMI_CONNECT_CMDID */ enum wmi_network_type { WMI_NETTYPE_INFRA = 0x01, WMI_NETTYPE_ADHOC = 0x02, WMI_NETTYPE_ADHOC_CREATOR = 0x04, WMI_NETTYPE_AP = 0x10, WMI_NETTYPE_P2P = 0x20, - WMI_NETTYPE_WBE = 0x40, /* PCIE over 60g */ + /* PCIE over 60g */ + WMI_NETTYPE_WBE = 0x40, }; enum wmi_dot11_auth_mode { - WMI_AUTH11_OPEN = 0x01, - WMI_AUTH11_SHARED = 0x02, - WMI_AUTH11_LEAP = 0x04, - WMI_AUTH11_WSC = 0x08, + WMI_AUTH11_OPEN = 0x01, + WMI_AUTH11_SHARED = 0x02, + WMI_AUTH11_LEAP = 0x04, + WMI_AUTH11_WSC = 0x08, }; enum wmi_auth_mode { - WMI_AUTH_NONE = 0x01, - WMI_AUTH_WPA = 0x02, - WMI_AUTH_WPA2 = 0x04, - WMI_AUTH_WPA_PSK = 0x08, - WMI_AUTH_WPA2_PSK = 0x10, - WMI_AUTH_WPA_CCKM = 0x20, - WMI_AUTH_WPA2_CCKM = 0x40, + WMI_AUTH_NONE = 0x01, + WMI_AUTH_WPA = 0x02, + WMI_AUTH_WPA2 = 0x04, + WMI_AUTH_WPA_PSK = 0x08, + WMI_AUTH_WPA2_PSK = 0x10, + WMI_AUTH_WPA_CCKM = 0x20, + WMI_AUTH_WPA2_CCKM = 0x40, }; enum wmi_crypto_type { - WMI_CRYPT_NONE = 0x01, - WMI_CRYPT_WEP = 0x02, - WMI_CRYPT_TKIP = 0x04, - WMI_CRYPT_AES = 0x08, - WMI_CRYPT_AES_GCMP = 0x20, + WMI_CRYPT_NONE = 0x01, + WMI_CRYPT_AES_GCMP = 0x20, }; enum wmi_connect_ctrl_flag_bits { - WMI_CONNECT_ASSOC_POLICY_USER = 0x0001, - WMI_CONNECT_SEND_REASSOC = 0x0002, - WMI_CONNECT_IGNORE_WPA_GROUP_CIPHER = 0x0004, - WMI_CONNECT_PROFILE_MATCH_DONE = 0x0008, - WMI_CONNECT_IGNORE_AAC_BEACON = 0x0010, - WMI_CONNECT_CSA_FOLLOW_BSS = 0x0020, - WMI_CONNECT_DO_WPA_OFFLOAD = 0x0040, - WMI_CONNECT_DO_NOT_DEAUTH = 0x0080, + WMI_CONNECT_ASSOC_POLICY_USER = 0x01, + WMI_CONNECT_SEND_REASSOC = 0x02, + WMI_CONNECT_IGNORE_WPA_GROUP_CIPHER = 0x04, + WMI_CONNECT_PROFILE_MATCH_DONE = 0x08, + WMI_CONNECT_IGNORE_AAC_BEACON = 0x10, + WMI_CONNECT_CSA_FOLLOW_BSS = 0x20, + WMI_CONNECT_DO_WPA_OFFLOAD = 0x40, + WMI_CONNECT_DO_NOT_DEAUTH = 0x80, }; -#define WMI_MAX_SSID_LEN (32) +#define WMI_MAX_SSID_LEN (32) +/* WMI_CONNECT_CMDID */ struct wmi_connect_cmd { u8 network_type; u8 dot11_auth_mode; @@ -216,31 +226,17 @@ struct wmi_connect_cmd { u8 reserved1[2]; } __packed; -/* - * WMI_DISCONNECT_STA_CMDID - */ +/* WMI_DISCONNECT_STA_CMDID */ struct wmi_disconnect_sta_cmd { u8 dst_mac[WMI_MAC_LEN]; __le16 disconnect_reason; } __packed; -/* - * WMI_SET_PMK_CMDID - */ - -#define WMI_MIN_KEY_INDEX (0) #define WMI_MAX_KEY_INDEX (3) #define WMI_MAX_KEY_LEN (32) #define WMI_PASSPHRASE_LEN (64) -#define WMI_PMK_LEN (32) -struct wmi_set_pmk_cmd { - u8 pmk[WMI_PMK_LEN]; -} __packed; - -/* - * WMI_SET_PASSPHRASE_CMDID - */ +/* WMI_SET_PASSPHRASE_CMDID */ struct wmi_set_passphrase_cmd { u8 ssid[WMI_MAX_SSID_LEN]; u8 passphrase[WMI_PASSPHRASE_LEN]; @@ -248,36 +244,34 @@ struct wmi_set_passphrase_cmd { u8 passphrase_len; } __packed; -/* - * WMI_ADD_CIPHER_KEY_CMDID - */ +/* WMI_ADD_CIPHER_KEY_CMDID */ enum wmi_key_usage { - WMI_KEY_USE_PAIRWISE = 0, - WMI_KEY_USE_RX_GROUP = 1, - WMI_KEY_USE_TX_GROUP = 2, + WMI_KEY_USE_PAIRWISE = 0x00, + WMI_KEY_USE_RX_GROUP = 0x01, + WMI_KEY_USE_TX_GROUP = 0x02, }; struct wmi_add_cipher_key_cmd { u8 key_index; u8 key_type; - u8 key_usage; /* enum wmi_key_usage */ + /* enum wmi_key_usage */ + u8 key_usage; u8 key_len; - u8 key_rsc[8]; /* key replay sequence counter */ + /* key replay sequence counter */ + u8 key_rsc[8]; u8 key[WMI_MAX_KEY_LEN]; - u8 key_op_ctrl; /* Additional Key Control information */ + /* Additional Key Control information */ + u8 key_op_ctrl; u8 mac[WMI_MAC_LEN]; } __packed; -/* - * WMI_DELETE_CIPHER_KEY_CMDID - */ +/* WMI_DELETE_CIPHER_KEY_CMDID */ struct wmi_delete_cipher_key_cmd { u8 key_index; u8 mac[WMI_MAC_LEN]; } __packed; -/* - * WMI_START_SCAN_CMDID +/* WMI_START_SCAN_CMDID * * Start L1 scan operation * @@ -286,147 +280,142 @@ struct wmi_delete_cipher_key_cmd { * - WMI_SCAN_COMPLETE_EVENTID */ enum wmi_scan_type { - WMI_ACTIVE_SCAN = 0, - WMI_SHORT_SCAN = 1, - WMI_PBC_SCAN = 2, - WMI_DIRECT_SCAN = 3, - WMI_LONG_SCAN = 4, + WMI_ACTIVE_SCAN = 0x00, + WMI_SHORT_SCAN = 0x01, + WMI_PASSIVE_SCAN = 0x02, + WMI_DIRECT_SCAN = 0x03, + WMI_LONG_SCAN = 0x04, }; +/* WMI_START_SCAN_CMDID */ struct wmi_start_scan_cmd { - u8 direct_scan_mac_addr[6]; + u8 direct_scan_mac_addr[WMI_MAC_LEN]; + /* DMG Beacon frame is transmitted during active scanning */ u8 discovery_mode; + /* reserved */ u8 reserved; - __le32 home_dwell_time; /* Max duration in the home channel(ms) */ - __le32 force_scan_interval; /* Time interval between scans (ms)*/ - u8 scan_type; /* wmi_scan_type */ - u8 num_channels; /* how many channels follow */ + /* Max duration in the home channel(ms) */ + __le32 dwell_time; + /* Time interval between scans (ms) */ + __le32 force_scan_interval; + /* enum wmi_scan_type */ + u8 scan_type; + /* how many channels follow */ + u8 num_channels; + /* channels ID's: + * 0 - 58320 MHz + * 1 - 60480 MHz + * 2 - 62640 MHz + */ struct { u8 channel; u8 reserved; - } channel_list[0]; /* channels ID's */ - /* 0 - 58320 MHz */ - /* 1 - 60480 MHz */ - /* 2 - 62640 MHz */ + } channel_list[0]; } __packed; -/* - * WMI_SET_PROBED_SSID_CMDID - */ +/* WMI_SET_PROBED_SSID_CMDID */ #define MAX_PROBED_SSID_INDEX (3) enum wmi_ssid_flag { - WMI_SSID_FLAG_DISABLE = 0, /* disables entry */ - WMI_SSID_FLAG_SPECIFIC = 1, /* probes specified ssid */ - WMI_SSID_FLAG_ANY = 2, /* probes for any ssid */ + /* disables entry */ + WMI_SSID_FLAG_DISABLE = 0x00, + /* probes specified ssid */ + WMI_SSID_FLAG_SPECIFIC = 0x01, + /* probes for any ssid */ + WMI_SSID_FLAG_ANY = 0x02, }; struct wmi_probed_ssid_cmd { - u8 entry_index; /* 0 to MAX_PROBED_SSID_INDEX */ - u8 flag; /* enum wmi_ssid_flag */ + /* 0 to MAX_PROBED_SSID_INDEX */ + u8 entry_index; + /* enum wmi_ssid_flag */ + u8 flag; u8 ssid_len; u8 ssid[WMI_MAX_SSID_LEN]; } __packed; -/* - * WMI_SET_APPIE_CMDID +/* WMI_SET_APPIE_CMDID * Add Application specified IE to a management frame */ -#define WMI_MAX_IE_LEN (1024) +#define WMI_MAX_IE_LEN (1024) -/* - * Frame Types - */ +/* Frame Types */ enum wmi_mgmt_frame_type { - WMI_FRAME_BEACON = 0, - WMI_FRAME_PROBE_REQ = 1, - WMI_FRAME_PROBE_RESP = 2, - WMI_FRAME_ASSOC_REQ = 3, - WMI_FRAME_ASSOC_RESP = 4, - WMI_NUM_MGMT_FRAME, + WMI_FRAME_BEACON = 0x00, + WMI_FRAME_PROBE_REQ = 0x01, + WMI_FRAME_PROBE_RESP = 0x02, + WMI_FRAME_ASSOC_REQ = 0x03, + WMI_FRAME_ASSOC_RESP = 0x04, + WMI_NUM_MGMT_FRAME = 0x05, }; struct wmi_set_appie_cmd { - u8 mgmt_frm_type; /* enum wmi_mgmt_frame_type */ + /* enum wmi_mgmt_frame_type */ + u8 mgmt_frm_type; u8 reserved; - __le16 ie_len; /* Length of the IE to be added to MGMT frame */ + /* Length of the IE to be added to MGMT frame */ + __le16 ie_len; u8 ie_info[0]; } __packed; -/* - * WMI_PXMT_RANGE_CFG_CMDID - */ +/* WMI_PXMT_RANGE_CFG_CMDID */ struct wmi_pxmt_range_cfg_cmd { u8 dst_mac[WMI_MAC_LEN]; __le16 range; } __packed; -/* - * WMI_PXMT_SNR2_RANGE_CFG_CMDID - */ +/* WMI_PXMT_SNR2_RANGE_CFG_CMDID */ struct wmi_pxmt_snr2_range_cfg_cmd { - s8 snr2range_arr[WMI_PROX_RANGE_NUM-1]; + s8 snr2range_arr[2]; } __packed; -/* - * WMI_RF_MGMT_CMDID - */ +/* WMI_RF_MGMT_CMDID */ enum wmi_rf_mgmt_type { - WMI_RF_MGMT_W_DISABLE = 0, - WMI_RF_MGMT_W_ENABLE = 1, - WMI_RF_MGMT_GET_STATUS = 2, + WMI_RF_MGMT_W_DISABLE = 0x00, + WMI_RF_MGMT_W_ENABLE = 0x01, + WMI_RF_MGMT_GET_STATUS = 0x02, }; +/* WMI_RF_MGMT_CMDID */ struct wmi_rf_mgmt_cmd { __le32 rf_mgmt_type; } __packed; -/* - * WMI_THERMAL_THROTTLING_CTRL_CMDID - */ +/* WMI_THERMAL_THROTTLING_CTRL_CMDID */ #define THERMAL_THROTTLING_USE_DEFAULT_MAX_TXOP_LENGTH (0xFFFFFFFF) +/* WMI_THERMAL_THROTTLING_CTRL_CMDID */ struct wmi_thermal_throttling_ctrl_cmd { __le32 time_on_usec; __le32 time_off_usec; __le32 max_txop_length_usec; } __packed; -/* - * WMI_RF_RX_TEST_CMDID - */ +/* WMI_RF_RX_TEST_CMDID */ struct wmi_rf_rx_test_cmd { __le32 sector; } __packed; -/* - * WMI_CORR_MEASURE_CMDID - */ +/* WMI_CORR_MEASURE_CMDID */ struct wmi_corr_measure_cmd { - s32 freq_mhz; + __le32 freq_mhz; __le32 length_samples; __le32 iterations; } __packed; -/* - * WMI_SET_SSID_CMDID - */ +/* WMI_SET_SSID_CMDID */ struct wmi_set_ssid_cmd { __le32 ssid_len; u8 ssid[WMI_MAX_SSID_LEN]; } __packed; -/* - * WMI_SET_PCP_CHANNEL_CMDID - */ +/* WMI_SET_PCP_CHANNEL_CMDID */ struct wmi_set_pcp_channel_cmd { u8 channel; u8 reserved[3]; } __packed; -/* - * WMI_BCON_CTRL_CMDID - */ +/* WMI_BCON_CTRL_CMDID */ struct wmi_bcon_ctrl_cmd { __le16 bcon_interval; __le16 frag_num; @@ -435,214 +424,192 @@ struct wmi_bcon_ctrl_cmd { u8 pcp_max_assoc_sta; u8 disable_sec_offload; u8 disable_sec; + u8 hidden_ssid; + u8 is_go; + u8 reserved[2]; } __packed; -/******* P2P ***********/ - -/* - * WMI_PORT_ALLOCATE_CMDID - */ +/* WMI_PORT_ALLOCATE_CMDID */ enum wmi_port_role { - WMI_PORT_STA = 0, - WMI_PORT_PCP = 1, - WMI_PORT_AP = 2, - WMI_PORT_P2P_DEV = 3, - WMI_PORT_P2P_CLIENT = 4, - WMI_PORT_P2P_GO = 5, + WMI_PORT_STA = 0x00, + WMI_PORT_PCP = 0x01, + WMI_PORT_AP = 0x02, + WMI_PORT_P2P_DEV = 0x03, + WMI_PORT_P2P_CLIENT = 0x04, + WMI_PORT_P2P_GO = 0x05, }; +/* WMI_PORT_ALLOCATE_CMDID */ struct wmi_port_allocate_cmd { u8 mac[WMI_MAC_LEN]; u8 port_role; u8 mid; } __packed; -/* - * WMI_PORT_DELETE_CMDID - */ -struct wmi_delete_port_cmd { +/* WMI_PORT_DELETE_CMDID */ +struct wmi_port_delete_cmd { u8 mid; u8 reserved[3]; } __packed; -/* - * WMI_P2P_CFG_CMDID - */ +/* WMI_P2P_CFG_CMDID */ enum wmi_discovery_mode { - WMI_DISCOVERY_MODE_NON_OFFLOAD = 0, - WMI_DISCOVERY_MODE_OFFLOAD = 1, - WMI_DISCOVERY_MODE_PEER2PEER = 2, + WMI_DISCOVERY_MODE_NON_OFFLOAD = 0x00, + WMI_DISCOVERY_MODE_OFFLOAD = 0x01, + WMI_DISCOVERY_MODE_PEER2PEER = 0x02, }; struct wmi_p2p_cfg_cmd { - u8 discovery_mode; /* wmi_discovery_mode */ + /* enum wmi_discovery_mode */ + u8 discovery_mode; u8 channel; - __le16 bcon_interval; /* base to listen/search duration calculation */ + /* base to listen/search duration calculation */ + __le16 bcon_interval; } __packed; -/* - * WMI_POWER_MGMT_CFG_CMDID - */ +/* WMI_POWER_MGMT_CFG_CMDID */ enum wmi_power_source_type { - WMI_POWER_SOURCE_BATTERY = 0, - WMI_POWER_SOURCE_OTHER = 1, + WMI_POWER_SOURCE_BATTERY = 0x00, + WMI_POWER_SOURCE_OTHER = 0x01, }; struct wmi_power_mgmt_cfg_cmd { - u8 power_source; /* wmi_power_source_type */ + /* enum wmi_power_source_type */ + u8 power_source; u8 reserved[3]; } __packed; -/* - * WMI_PCP_START_CMDID - */ - -enum wmi_hidden_ssid { - WMI_HIDDEN_SSID_DISABLED = 0, - WMI_HIDDEN_SSID_SEND_EMPTY = 1, - WMI_HIDDEN_SSID_CLEAR = 2, -}; - +/* WMI_PCP_START_CMDID */ struct wmi_pcp_start_cmd { __le16 bcon_interval; u8 pcp_max_assoc_sta; u8 hidden_ssid; - u8 reserved0[8]; + u8 is_go; + u8 reserved0[7]; u8 network_type; u8 channel; u8 disable_sec_offload; u8 disable_sec; } __packed; -/* - * WMI_SW_TX_REQ_CMDID - */ +/* WMI_SW_TX_REQ_CMDID */ struct wmi_sw_tx_req_cmd { u8 dst_mac[WMI_MAC_LEN]; __le16 len; u8 payload[0]; } __packed; -/* - * WMI_VRING_CFG_CMDID - */ - struct wmi_sw_ring_cfg { __le64 ring_mem_base; __le16 ring_size; __le16 max_mpdu_size; } __packed; +/* wmi_vring_cfg_schd */ struct wmi_vring_cfg_schd { __le16 priority; __le16 timeslot_us; } __packed; enum wmi_vring_cfg_encap_trans_type { - WMI_VRING_ENC_TYPE_802_3 = 0, - WMI_VRING_ENC_TYPE_NATIVE_WIFI = 1, + WMI_VRING_ENC_TYPE_802_3 = 0x00, + WMI_VRING_ENC_TYPE_NATIVE_WIFI = 0x01, }; enum wmi_vring_cfg_ds_cfg { - WMI_VRING_DS_PBSS = 0, - WMI_VRING_DS_STATION = 1, - WMI_VRING_DS_AP = 2, - WMI_VRING_DS_ADDR4 = 3, + WMI_VRING_DS_PBSS = 0x00, + WMI_VRING_DS_STATION = 0x01, + WMI_VRING_DS_AP = 0x02, + WMI_VRING_DS_ADDR4 = 0x03, }; enum wmi_vring_cfg_nwifi_ds_trans_type { - WMI_NWIFI_TX_TRANS_MODE_NO = 0, - WMI_NWIFI_TX_TRANS_MODE_AP2PBSS = 1, - WMI_NWIFI_TX_TRANS_MODE_STA2PBSS = 2, + WMI_NWIFI_TX_TRANS_MODE_NO = 0x00, + WMI_NWIFI_TX_TRANS_MODE_AP2PBSS = 0x01, + WMI_NWIFI_TX_TRANS_MODE_STA2PBSS = 0x02, }; enum wmi_vring_cfg_schd_params_priority { - WMI_SCH_PRIO_REGULAR = 0, - WMI_SCH_PRIO_HIGH = 1, + WMI_SCH_PRIO_REGULAR = 0x00, + WMI_SCH_PRIO_HIGH = 0x01, }; -#define CIDXTID_CID_POS (0) -#define CIDXTID_CID_LEN (4) -#define CIDXTID_CID_MSK (0xF) -#define CIDXTID_TID_POS (4) -#define CIDXTID_TID_LEN (4) -#define CIDXTID_TID_MSK (0xF0) +#define CIDXTID_CID_POS (0) +#define CIDXTID_CID_LEN (4) +#define CIDXTID_CID_MSK (0xF) +#define CIDXTID_TID_POS (4) +#define CIDXTID_TID_LEN (4) +#define CIDXTID_TID_MSK (0xF0) +#define VRING_CFG_MAC_CTRL_LIFETIME_EN_POS (0) +#define VRING_CFG_MAC_CTRL_LIFETIME_EN_LEN (1) +#define VRING_CFG_MAC_CTRL_LIFETIME_EN_MSK (0x1) +#define VRING_CFG_MAC_CTRL_AGGR_EN_POS (1) +#define VRING_CFG_MAC_CTRL_AGGR_EN_LEN (1) +#define VRING_CFG_MAC_CTRL_AGGR_EN_MSK (0x2) +#define VRING_CFG_TO_RESOLUTION_VALUE_POS (0) +#define VRING_CFG_TO_RESOLUTION_VALUE_LEN (6) +#define VRING_CFG_TO_RESOLUTION_VALUE_MSK (0x3F) struct wmi_vring_cfg { struct wmi_sw_ring_cfg tx_sw_ring; - u8 ringid; /* 0-23 vrings */ - + /* 0-23 vrings */ + u8 ringid; u8 cidxtid; - u8 encap_trans_type; - u8 ds_cfg; /* 802.3 DS cfg */ + /* 802.3 DS cfg */ + u8 ds_cfg; u8 nwifi_ds_trans_type; - - #define VRING_CFG_MAC_CTRL_LIFETIME_EN_POS (0) - #define VRING_CFG_MAC_CTRL_LIFETIME_EN_LEN (1) - #define VRING_CFG_MAC_CTRL_LIFETIME_EN_MSK (0x1) - #define VRING_CFG_MAC_CTRL_AGGR_EN_POS (1) - #define VRING_CFG_MAC_CTRL_AGGR_EN_LEN (1) - #define VRING_CFG_MAC_CTRL_AGGR_EN_MSK (0x2) u8 mac_ctrl; - - #define VRING_CFG_TO_RESOLUTION_VALUE_POS (0) - #define VRING_CFG_TO_RESOLUTION_VALUE_LEN (6) - #define VRING_CFG_TO_RESOLUTION_VALUE_MSK (0x3F) u8 to_resolution; u8 agg_max_wsize; struct wmi_vring_cfg_schd schd_params; } __packed; enum wmi_vring_cfg_cmd_action { - WMI_VRING_CMD_ADD = 0, - WMI_VRING_CMD_MODIFY = 1, - WMI_VRING_CMD_DELETE = 2, + WMI_VRING_CMD_ADD = 0x00, + WMI_VRING_CMD_MODIFY = 0x01, + WMI_VRING_CMD_DELETE = 0x02, }; +/* WMI_VRING_CFG_CMDID */ struct wmi_vring_cfg_cmd { __le32 action; struct wmi_vring_cfg vring_cfg; } __packed; -/* - * WMI_BCAST_VRING_CFG_CMDID - */ struct wmi_bcast_vring_cfg { struct wmi_sw_ring_cfg tx_sw_ring; - u8 ringid; /* 0-23 vrings */ + /* 0-23 vrings */ + u8 ringid; u8 encap_trans_type; - u8 ds_cfg; /* 802.3 DS cfg */ + /* 802.3 DS cfg */ + u8 ds_cfg; u8 nwifi_ds_trans_type; } __packed; +/* WMI_BCAST_VRING_CFG_CMDID */ struct wmi_bcast_vring_cfg_cmd { __le32 action; struct wmi_bcast_vring_cfg vring_cfg; } __packed; -/* - * WMI_VRING_BA_EN_CMDID - */ +/* WMI_VRING_BA_EN_CMDID */ struct wmi_vring_ba_en_cmd { u8 ringid; u8 agg_max_wsize; __le16 ba_timeout; u8 amsdu; + u8 reserved[3]; } __packed; -/* - * WMI_VRING_BA_DIS_CMDID - */ +/* WMI_VRING_BA_DIS_CMDID */ struct wmi_vring_ba_dis_cmd { u8 ringid; u8 reserved; __le16 reason; } __packed; -/* - * WMI_NOTIFY_REQ_CMDID - */ +/* WMI_NOTIFY_REQ_CMDID */ struct wmi_notify_req_cmd { u8 cid; u8 year; @@ -655,102 +622,100 @@ struct wmi_notify_req_cmd { u8 miliseconds; } __packed; -/* - * WMI_CFG_RX_CHAIN_CMDID - */ +/* WMI_CFG_RX_CHAIN_CMDID */ enum wmi_sniffer_cfg_mode { - WMI_SNIFFER_OFF = 0, - WMI_SNIFFER_ON = 1, + WMI_SNIFFER_OFF = 0x00, + WMI_SNIFFER_ON = 0x01, }; enum wmi_sniffer_cfg_phy_info_mode { - WMI_SNIFFER_PHY_INFO_DISABLED = 0, - WMI_SNIFFER_PHY_INFO_ENABLED = 1, + WMI_SNIFFER_PHY_INFO_DISABLED = 0x00, + WMI_SNIFFER_PHY_INFO_ENABLED = 0x01, }; enum wmi_sniffer_cfg_phy_support { - WMI_SNIFFER_CP = 0, - WMI_SNIFFER_DP = 1, - WMI_SNIFFER_BOTH_PHYS = 2, + WMI_SNIFFER_CP = 0x00, + WMI_SNIFFER_DP = 0x01, + WMI_SNIFFER_BOTH_PHYS = 0x02, }; +/* wmi_sniffer_cfg */ struct wmi_sniffer_cfg { - __le32 mode; /* enum wmi_sniffer_cfg_mode */ - __le32 phy_info_mode; /* enum wmi_sniffer_cfg_phy_info_mode */ - __le32 phy_support; /* enum wmi_sniffer_cfg_phy_support */ + /* enum wmi_sniffer_cfg_mode */ + __le32 mode; + /* enum wmi_sniffer_cfg_phy_info_mode */ + __le32 phy_info_mode; + /* enum wmi_sniffer_cfg_phy_support */ + __le32 phy_support; u8 channel; u8 reserved[3]; } __packed; enum wmi_cfg_rx_chain_cmd_action { - WMI_RX_CHAIN_ADD = 0, - WMI_RX_CHAIN_DEL = 1, + WMI_RX_CHAIN_ADD = 0x00, + WMI_RX_CHAIN_DEL = 0x01, }; enum wmi_cfg_rx_chain_cmd_decap_trans_type { - WMI_DECAP_TYPE_802_3 = 0, - WMI_DECAP_TYPE_NATIVE_WIFI = 1, - WMI_DECAP_TYPE_NONE = 2, + WMI_DECAP_TYPE_802_3 = 0x00, + WMI_DECAP_TYPE_NATIVE_WIFI = 0x01, + WMI_DECAP_TYPE_NONE = 0x02, }; enum wmi_cfg_rx_chain_cmd_nwifi_ds_trans_type { - WMI_NWIFI_RX_TRANS_MODE_NO = 0, - WMI_NWIFI_RX_TRANS_MODE_PBSS2AP = 1, - WMI_NWIFI_RX_TRANS_MODE_PBSS2STA = 2, + WMI_NWIFI_RX_TRANS_MODE_NO = 0x00, + WMI_NWIFI_RX_TRANS_MODE_PBSS2AP = 0x01, + WMI_NWIFI_RX_TRANS_MODE_PBSS2STA = 0x02, }; enum wmi_cfg_rx_chain_cmd_reorder_type { - WMI_RX_HW_REORDER = 0, - WMI_RX_SW_REORDER = 1, + WMI_RX_HW_REORDER = 0x00, + WMI_RX_SW_REORDER = 0x01, }; +#define L2_802_3_OFFLOAD_CTRL_VLAN_TAG_INSERTION_POS (0) +#define L2_802_3_OFFLOAD_CTRL_VLAN_TAG_INSERTION_LEN (1) +#define L2_802_3_OFFLOAD_CTRL_VLAN_TAG_INSERTION_MSK (0x1) +#define L2_802_3_OFFLOAD_CTRL_SNAP_KEEP_POS (1) +#define L2_802_3_OFFLOAD_CTRL_SNAP_KEEP_LEN (1) +#define L2_802_3_OFFLOAD_CTRL_SNAP_KEEP_MSK (0x2) +#define L2_NWIFI_OFFLOAD_CTRL_REMOVE_QOS_POS (0) +#define L2_NWIFI_OFFLOAD_CTRL_REMOVE_QOS_LEN (1) +#define L2_NWIFI_OFFLOAD_CTRL_REMOVE_QOS_MSK (0x1) +#define L2_NWIFI_OFFLOAD_CTRL_REMOVE_PN_POS (1) +#define L2_NWIFI_OFFLOAD_CTRL_REMOVE_PN_LEN (1) +#define L2_NWIFI_OFFLOAD_CTRL_REMOVE_PN_MSK (0x2) +#define L3_L4_CTRL_IPV4_CHECKSUM_EN_POS (0) +#define L3_L4_CTRL_IPV4_CHECKSUM_EN_LEN (1) +#define L3_L4_CTRL_IPV4_CHECKSUM_EN_MSK (0x1) +#define L3_L4_CTRL_TCPIP_CHECKSUM_EN_POS (1) +#define L3_L4_CTRL_TCPIP_CHECKSUM_EN_LEN (1) +#define L3_L4_CTRL_TCPIP_CHECKSUM_EN_MSK (0x2) +#define RING_CTRL_OVERRIDE_PREFETCH_THRSH_POS (0) +#define RING_CTRL_OVERRIDE_PREFETCH_THRSH_LEN (1) +#define RING_CTRL_OVERRIDE_PREFETCH_THRSH_MSK (0x1) +#define RING_CTRL_OVERRIDE_WB_THRSH_POS (1) +#define RING_CTRL_OVERRIDE_WB_THRSH_LEN (1) +#define RING_CTRL_OVERRIDE_WB_THRSH_MSK (0x2) +#define RING_CTRL_OVERRIDE_ITR_THRSH_POS (2) +#define RING_CTRL_OVERRIDE_ITR_THRSH_LEN (1) +#define RING_CTRL_OVERRIDE_ITR_THRSH_MSK (0x4) +#define RING_CTRL_OVERRIDE_HOST_THRSH_POS (3) +#define RING_CTRL_OVERRIDE_HOST_THRSH_LEN (1) +#define RING_CTRL_OVERRIDE_HOST_THRSH_MSK (0x8) + +/* WMI_CFG_RX_CHAIN_CMDID */ struct wmi_cfg_rx_chain_cmd { __le32 action; struct wmi_sw_ring_cfg rx_sw_ring; u8 mid; u8 decap_trans_type; - - #define L2_802_3_OFFLOAD_CTRL_VLAN_TAG_INSERTION_POS (0) - #define L2_802_3_OFFLOAD_CTRL_VLAN_TAG_INSERTION_LEN (1) - #define L2_802_3_OFFLOAD_CTRL_VLAN_TAG_INSERTION_MSK (0x1) - #define L2_802_3_OFFLOAD_CTRL_SNAP_KEEP_POS (1) - #define L2_802_3_OFFLOAD_CTRL_SNAP_KEEP_LEN (1) - #define L2_802_3_OFFLOAD_CTRL_SNAP_KEEP_MSK (0x2) u8 l2_802_3_offload_ctrl; - - #define L2_NWIFI_OFFLOAD_CTRL_REMOVE_QOS_POS (0) - #define L2_NWIFI_OFFLOAD_CTRL_REMOVE_QOS_LEN (1) - #define L2_NWIFI_OFFLOAD_CTRL_REMOVE_QOS_MSK (0x1) - #define L2_NWIFI_OFFLOAD_CTRL_REMOVE_PN_POS (1) - #define L2_NWIFI_OFFLOAD_CTRL_REMOVE_PN_LEN (1) - #define L2_NWIFI_OFFLOAD_CTRL_REMOVE_PN_MSK (0x2) u8 l2_nwifi_offload_ctrl; - u8 vlan_id; u8 nwifi_ds_trans_type; - - #define L3_L4_CTRL_IPV4_CHECKSUM_EN_POS (0) - #define L3_L4_CTRL_IPV4_CHECKSUM_EN_LEN (1) - #define L3_L4_CTRL_IPV4_CHECKSUM_EN_MSK (0x1) - #define L3_L4_CTRL_TCPIP_CHECKSUM_EN_POS (1) - #define L3_L4_CTRL_TCPIP_CHECKSUM_EN_LEN (1) - #define L3_L4_CTRL_TCPIP_CHECKSUM_EN_MSK (0x2) u8 l3_l4_ctrl; - - #define RING_CTRL_OVERRIDE_PREFETCH_THRSH_POS (0) - #define RING_CTRL_OVERRIDE_PREFETCH_THRSH_LEN (1) - #define RING_CTRL_OVERRIDE_PREFETCH_THRSH_MSK (0x1) - #define RING_CTRL_OVERRIDE_WB_THRSH_POS (1) - #define RING_CTRL_OVERRIDE_WB_THRSH_LEN (1) - #define RING_CTRL_OVERRIDE_WB_THRSH_MSK (0x2) - #define RING_CTRL_OVERRIDE_ITR_THRSH_POS (2) - #define RING_CTRL_OVERRIDE_ITR_THRSH_LEN (1) - #define RING_CTRL_OVERRIDE_ITR_THRSH_MSK (0x4) - #define RING_CTRL_OVERRIDE_HOST_THRSH_POS (3) - #define RING_CTRL_OVERRIDE_HOST_THRSH_LEN (1) - #define RING_CTRL_OVERRIDE_HOST_THRSH_MSK (0x8) u8 ring_ctrl; - __le16 prefetch_thrsh; __le16 wb_thrsh; __le32 itr_value; @@ -758,31 +723,27 @@ struct wmi_cfg_rx_chain_cmd { u8 reorder_type; u8 reserved; struct wmi_sniffer_cfg sniffer_cfg; + __le16 max_rx_pl_per_desc; } __packed; -/* - * WMI_RCP_ADDBA_RESP_CMDID - */ +/* WMI_RCP_ADDBA_RESP_CMDID */ struct wmi_rcp_addba_resp_cmd { u8 cidxtid; u8 dialog_token; __le16 status_code; - __le16 ba_param_set; /* ieee80211_ba_parameterset field to send */ + /* ieee80211_ba_parameterset field to send */ + __le16 ba_param_set; __le16 ba_timeout; } __packed; -/* - * WMI_RCP_DELBA_CMDID - */ +/* WMI_RCP_DELBA_CMDID */ struct wmi_rcp_delba_cmd { u8 cidxtid; u8 reserved; __le16 reason; } __packed; -/* - * WMI_RCP_ADDBA_REQ_CMDID - */ +/* WMI_RCP_ADDBA_REQ_CMDID */ struct wmi_rcp_addba_req_cmd { u8 cidxtid; u8 dialog_token; @@ -793,32 +754,16 @@ struct wmi_rcp_addba_req_cmd { __le16 ba_seq_ctrl; } __packed; -/* - * WMI_SET_MAC_ADDRESS_CMDID - */ +/* WMI_SET_MAC_ADDRESS_CMDID */ struct wmi_set_mac_address_cmd { u8 mac[WMI_MAC_LEN]; u8 reserved[2]; } __packed; -/* -* WMI_EAPOL_TX_CMDID -*/ -struct wmi_eapol_tx_cmd { - u8 dst_mac[WMI_MAC_LEN]; - __le16 eapol_len; - u8 eapol[0]; -} __packed; - -/* - * WMI_ECHO_CMDID - * +/* WMI_ECHO_CMDID * Check FW is alive - * * WMI_DEEP_ECHO_CMDID - * * Check FW and ucode are alive - * * Returned event: WMI_ECHO_RSP_EVENTID * same event for both commands */ @@ -826,70 +771,79 @@ struct wmi_echo_cmd { __le32 value; } __packed; -/* - * WMI_TEMP_SENSE_CMDID +/* WMI_OTP_READ_CMDID */ +struct wmi_otp_read_cmd { + __le32 addr; + __le32 size; + __le32 values; +} __packed; + +/* WMI_OTP_WRITE_CMDID */ +struct wmi_otp_write_cmd { + __le32 addr; + __le32 size; + __le32 values; +} __packed; + +/* WMI_TEMP_SENSE_CMDID * * Measure MAC and radio temperatures + * + * Possible modes for temperature measurement */ - -/* Possible modes for temperature measurement */ enum wmi_temperature_measure_mode { - TEMPERATURE_USE_OLD_VALUE = 0x1, - TEMPERATURE_MEASURE_NOW = 0x2, + TEMPERATURE_USE_OLD_VALUE = 0x01, + TEMPERATURE_MEASURE_NOW = 0x02, }; +/* WMI_TEMP_SENSE_CMDID */ struct wmi_temp_sense_cmd { __le32 measure_baseband_en; __le32 measure_rf_en; __le32 measure_mode; } __packed; -/* - * WMI_PMC_CMDID - */ -enum wmi_pmc_op_e { - WMI_PMC_ALLOCATE = 0, - WMI_PMC_RELEASE = 1, +enum wmi_pmc_op { + WMI_PMC_ALLOCATE = 0x00, + WMI_PMC_RELEASE = 0x01, }; +/* WMI_PMC_CMDID */ struct wmi_pmc_cmd { - u8 op; /* enum wmi_pmc_cmd_op_type */ + /* enum wmi_pmc_cmd_op_type */ + u8 op; u8 reserved; __le16 ring_size; __le64 mem_base; } __packed; -/* - * WMI Events - */ - -/* +/* WMI Events * List of Events (target to host) */ enum wmi_event_id { WMI_READY_EVENTID = 0x1001, WMI_CONNECT_EVENTID = 0x1002, WMI_DISCONNECT_EVENTID = 0x1003, - WMI_SCAN_COMPLETE_EVENTID = 0x100a, - WMI_REPORT_STATISTICS_EVENTID = 0x100b, + WMI_SCAN_COMPLETE_EVENTID = 0x100A, + WMI_REPORT_STATISTICS_EVENTID = 0x100B, WMI_RD_MEM_RSP_EVENTID = 0x1800, WMI_FW_READY_EVENTID = 0x1801, - WMI_EXIT_FAST_MEM_ACC_MODE_EVENTID = 0x0200, + WMI_EXIT_FAST_MEM_ACC_MODE_EVENTID = 0x200, WMI_ECHO_RSP_EVENTID = 0x1803, - WMI_FS_TUNE_DONE_EVENTID = 0x180a, - WMI_CORR_MEASURE_EVENTID = 0x180b, - WMI_READ_RSSI_EVENTID = 0x180c, - WMI_TEMP_SENSE_DONE_EVENTID = 0x180e, - WMI_DC_CALIB_DONE_EVENTID = 0x180f, + WMI_FS_TUNE_DONE_EVENTID = 0x180A, + WMI_CORR_MEASURE_EVENTID = 0x180B, + WMI_READ_RSSI_EVENTID = 0x180C, + WMI_TEMP_SENSE_DONE_EVENTID = 0x180E, + WMI_DC_CALIB_DONE_EVENTID = 0x180F, WMI_IQ_TX_CALIB_DONE_EVENTID = 0x1811, WMI_IQ_RX_CALIB_DONE_EVENTID = 0x1812, WMI_SET_WORK_MODE_DONE_EVENTID = 0x1815, WMI_LO_LEAKAGE_CALIB_DONE_EVENTID = 0x1816, WMI_MARLON_R_READ_DONE_EVENTID = 0x1818, WMI_MARLON_R_WRITE_DONE_EVENTID = 0x1819, - WMI_MARLON_R_TXRX_SEL_DONE_EVENTID = 0x181a, - WMI_SILENT_RSSI_CALIB_DONE_EVENTID = 0x181d, - WMI_RF_RX_TEST_DONE_EVENTID = 0x181e, + WMI_MARLON_R_TXRX_SEL_DONE_EVENTID = 0x181A, + WMI_SILENT_RSSI_CALIB_DONE_EVENTID = 0x181D, + WMI_RF_RX_TEST_DONE_EVENTID = 0x181E, WMI_CFG_RX_CHAIN_DONE_EVENTID = 0x1820, WMI_VRING_CFG_DONE_EVENTID = 0x1821, WMI_BA_STATUS_EVENTID = 0x1823, @@ -897,15 +851,13 @@ enum wmi_event_id { WMI_RCP_ADDBA_RESP_SENT_EVENTID = 0x1825, WMI_DELBA_EVENTID = 0x1826, WMI_GET_SSID_EVENTID = 0x1828, - WMI_GET_PCP_CHANNEL_EVENTID = 0x182a, - WMI_SW_TX_COMPLETE_EVENTID = 0x182b, - + WMI_GET_PCP_CHANNEL_EVENTID = 0x182A, + WMI_SW_TX_COMPLETE_EVENTID = 0x182B, WMI_READ_MAC_RXQ_EVENTID = 0x1830, WMI_READ_MAC_TXQ_EVENTID = 0x1831, WMI_WRITE_MAC_RXQ_EVENTID = 0x1832, WMI_WRITE_MAC_TXQ_EVENTID = 0x1833, WMI_WRITE_MAC_XQ_FIELD_EVENTID = 0x1834, - WMI_BEAMFORMING_MGMT_DONE_EVENTID = 0x1836, WMI_BF_TXSS_MGMT_DONE_EVENTID = 0x1837, WMI_BF_RXSS_MGMT_DONE_EVENTID = 0x1839, @@ -915,20 +867,18 @@ enum wmi_event_id { WMI_BF_SM_MGMT_DONE_EVENTID = 0x1838, WMI_RX_MGMT_PACKET_EVENTID = 0x1840, WMI_TX_MGMT_PACKET_EVENTID = 0x1841, - + WMI_OTP_READ_RESULT_EVENTID = 0x1856, /* Performance monitoring events */ WMI_DATA_PORT_OPEN_EVENTID = 0x1860, WMI_WBE_LINK_DOWN_EVENTID = 0x1861, - WMI_BF_CTRL_DONE_EVENTID = 0x1862, WMI_NOTIFY_REQ_DONE_EVENTID = 0x1863, WMI_GET_STATUS_DONE_EVENTID = 0x1864, WMI_VRING_EN_EVENTID = 0x1865, - WMI_UNIT_TEST_EVENTID = 0x1900, WMI_FLASH_READ_DONE_EVENTID = 0x1902, WMI_FLASH_WRITE_DONE_EVENTID = 0x1903, - /*P2P*/ + /* P2P */ WMI_P2P_CFG_DONE_EVENTID = 0x1910, WMI_PORT_ALLOCATED_EVENTID = 0x1911, WMI_PORT_DELETED_EVENTID = 0x1912, @@ -938,49 +888,42 @@ enum wmi_event_id { WMI_DISCOVERY_STOPPED_EVENTID = 0x1917, WMI_PCP_STARTED_EVENTID = 0x1918, WMI_PCP_STOPPED_EVENTID = 0x1919, - WMI_PCP_FACTOR_EVENTID = 0x191a, + WMI_PCP_FACTOR_EVENTID = 0x191A, WMI_SET_CHANNEL_EVENTID = 0x9000, WMI_ASSOC_REQ_EVENTID = 0x9001, WMI_EAPOL_RX_EVENTID = 0x9002, WMI_MAC_ADDR_RESP_EVENTID = 0x9003, WMI_FW_VER_EVENTID = 0x9004, + WMI_ACS_PASSIVE_SCAN_COMPLETE_EVENTID = 0x9005, }; -/* - * Events data structures - */ - +/* Events data structures */ enum wmi_fw_status { - WMI_FW_STATUS_SUCCESS, - WMI_FW_STATUS_FAILURE, + WMI_FW_STATUS_SUCCESS = 0x00, + WMI_FW_STATUS_FAILURE = 0x01, }; -/* - * WMI_RF_MGMT_STATUS_EVENTID - */ +/* WMI_RF_MGMT_STATUS_EVENTID */ enum wmi_rf_status { - WMI_RF_ENABLED = 0, - WMI_RF_DISABLED_HW = 1, - WMI_RF_DISABLED_SW = 2, - WMI_RF_DISABLED_HW_SW = 3, + WMI_RF_ENABLED = 0x00, + WMI_RF_DISABLED_HW = 0x01, + WMI_RF_DISABLED_SW = 0x02, + WMI_RF_DISABLED_HW_SW = 0x03, }; +/* WMI_RF_MGMT_STATUS_EVENTID */ struct wmi_rf_mgmt_status_event { __le32 rf_status; } __packed; -/* - * WMI_THERMAL_THROTTLING_STATUS_EVENTID - */ +/* WMI_THERMAL_THROTTLING_STATUS_EVENTID */ struct wmi_thermal_throttling_status_event { __le32 time_on_usec; __le32 time_off_usec; __le32 max_txop_length_usec; } __packed; -/* - * WMI_GET_STATUS_DONE_EVENTID - */ +/* WMI_GET_STATUS_DONE_EVENTID */ struct wmi_get_status_done_event { __le32 is_associated; u8 cid; @@ -996,9 +939,7 @@ struct wmi_get_status_done_event { __le32 is_secured; } __packed; -/* - * WMI_FW_VER_EVENTID - */ +/* WMI_FW_VER_EVENTID */ struct wmi_fw_ver_event { u8 major; u8 minor; @@ -1006,9 +947,7 @@ struct wmi_fw_ver_event { __le16 build; } __packed; -/* -* WMI_MAC_ADDR_RESP_EVENTID -*/ +/* WMI_MAC_ADDR_RESP_EVENTID */ struct wmi_mac_addr_resp_event { u8 mac[WMI_MAC_LEN]; u8 auth_mode; @@ -1016,42 +955,38 @@ struct wmi_mac_addr_resp_event { __le32 offload_mode; } __packed; -/* -* WMI_EAPOL_RX_EVENTID -*/ +/* WMI_EAPOL_RX_EVENTID */ struct wmi_eapol_rx_event { u8 src_mac[WMI_MAC_LEN]; __le16 eapol_len; u8 eapol[0]; } __packed; -/* -* WMI_READY_EVENTID -*/ +/* WMI_READY_EVENTID */ enum wmi_phy_capability { - WMI_11A_CAPABILITY = 1, - WMI_11G_CAPABILITY = 2, - WMI_11AG_CAPABILITY = 3, - WMI_11NA_CAPABILITY = 4, - WMI_11NG_CAPABILITY = 5, - WMI_11NAG_CAPABILITY = 6, - WMI_11AD_CAPABILITY = 7, - WMI_11N_CAPABILITY_OFFSET = WMI_11NA_CAPABILITY - WMI_11A_CAPABILITY, + WMI_11A_CAPABILITY = 0x01, + WMI_11G_CAPABILITY = 0x02, + WMI_11AG_CAPABILITY = 0x03, + WMI_11NA_CAPABILITY = 0x04, + WMI_11NG_CAPABILITY = 0x05, + WMI_11NAG_CAPABILITY = 0x06, + WMI_11AD_CAPABILITY = 0x07, + WMI_11N_CAPABILITY_OFFSET = 0x03, }; struct wmi_ready_event { __le32 sw_version; __le32 abi_version; u8 mac[WMI_MAC_LEN]; - u8 phy_capability; /* enum wmi_phy_capability */ + /* enum wmi_phy_capability */ + u8 phy_capability; u8 numof_additional_mids; } __packed; -/* - * WMI_NOTIFY_REQ_DONE_EVENTID - */ +/* WMI_NOTIFY_REQ_DONE_EVENTID */ struct wmi_notify_req_done_event { - __le32 status; /* beamforming status, 0: fail; 1: OK; 2: retrying */ + /* beamforming status, 0: fail; 1: OK; 2: retrying */ + __le32 status; __le64 tsf; __le32 snr_val; __le32 tx_tpt; @@ -1067,9 +1002,7 @@ struct wmi_notify_req_done_event { u8 reserved[3]; } __packed; -/* - * WMI_CONNECT_EVENTID - */ +/* WMI_CONNECT_EVENTID */ struct wmi_connect_event { u8 channel; u8 reserved0; @@ -1083,68 +1016,103 @@ struct wmi_connect_event { u8 assoc_resp_len; u8 cid; u8 reserved2[3]; + /* not in use */ u8 assoc_info[0]; } __packed; -/* - * WMI_DISCONNECT_EVENTID - */ +/* WMI_DISCONNECT_EVENTID */ enum wmi_disconnect_reason { - WMI_DIS_REASON_NO_NETWORK_AVAIL = 1, - WMI_DIS_REASON_LOST_LINK = 2, /* bmiss */ - WMI_DIS_REASON_DISCONNECT_CMD = 3, - WMI_DIS_REASON_BSS_DISCONNECTED = 4, - WMI_DIS_REASON_AUTH_FAILED = 5, - WMI_DIS_REASON_ASSOC_FAILED = 6, - WMI_DIS_REASON_NO_RESOURCES_AVAIL = 7, - WMI_DIS_REASON_CSERV_DISCONNECT = 8, - WMI_DIS_REASON_INVALID_PROFILE = 10, - WMI_DIS_REASON_DOT11H_CHANNEL_SWITCH = 11, - WMI_DIS_REASON_PROFILE_MISMATCH = 12, - WMI_DIS_REASON_CONNECTION_EVICTED = 13, - WMI_DIS_REASON_IBSS_MERGE = 14, + WMI_DIS_REASON_NO_NETWORK_AVAIL = 0x01, + /* bmiss */ + WMI_DIS_REASON_LOST_LINK = 0x02, + WMI_DIS_REASON_DISCONNECT_CMD = 0x03, + WMI_DIS_REASON_BSS_DISCONNECTED = 0x04, + WMI_DIS_REASON_AUTH_FAILED = 0x05, + WMI_DIS_REASON_ASSOC_FAILED = 0x06, + WMI_DIS_REASON_NO_RESOURCES_AVAIL = 0x07, + WMI_DIS_REASON_CSERV_DISCONNECT = 0x08, + WMI_DIS_REASON_INVALID_PROFILE = 0x0A, + WMI_DIS_REASON_DOT11H_CHANNEL_SWITCH = 0x0B, + WMI_DIS_REASON_PROFILE_MISMATCH = 0x0C, + WMI_DIS_REASON_CONNECTION_EVICTED = 0x0D, + WMI_DIS_REASON_IBSS_MERGE = 0x0E, }; struct wmi_disconnect_event { - __le16 protocol_reason_status; /* reason code, see 802.11 spec. */ - u8 bssid[WMI_MAC_LEN]; /* set if known */ - u8 disconnect_reason; /* see wmi_disconnect_reason */ - u8 assoc_resp_len; /* not used */ - u8 assoc_info[0]; /* not used */ + /* reason code, see 802.11 spec. */ + __le16 protocol_reason_status; + /* set if known */ + u8 bssid[WMI_MAC_LEN]; + /* see enum wmi_disconnect_reason */ + u8 disconnect_reason; + /* last assoc req may passed to host - not in used */ + u8 assoc_resp_len; + /* last assoc req may passed to host - not in used */ + u8 assoc_info[0]; } __packed; -/* - * WMI_SCAN_COMPLETE_EVENTID - */ +/* WMI_SCAN_COMPLETE_EVENTID */ enum scan_status { - WMI_SCAN_SUCCESS = 0, - WMI_SCAN_FAILED = 1, - WMI_SCAN_ABORTED = 2, - WMI_SCAN_REJECTED = 3, + WMI_SCAN_SUCCESS = 0x00, + WMI_SCAN_FAILED = 0x01, + WMI_SCAN_ABORTED = 0x02, + WMI_SCAN_REJECTED = 0x03, + WMI_SCAN_ABORT_REJECTED = 0x04, }; struct wmi_scan_complete_event { - __le32 status; /* scan_status */ + /* enum scan_status */ + __le32 status; } __packed; -/* - * WMI_BA_STATUS_EVENTID - */ -enum wmi_vring_ba_status { - WMI_BA_AGREED = 0, - WMI_BA_NON_AGREED = 1, - /* BA_EN in middle of teardown flow */ - WMI_BA_TD_WIP = 2, - /* BA_DIS or BA_EN in middle of BA SETUP flow */ - WMI_BA_SETUP_WIP = 3, - /* BA_EN when the BA session is already active */ - WMI_BA_SESSION_ACTIVE = 4, - /* BA_DIS when the BA session is not active */ - WMI_BA_SESSION_NOT_ACTIVE = 5, +/* WMI_ACS_PASSIVE_SCAN_COMPLETE_EVENT */ +enum wmi_acs_info_bitmask { + WMI_ACS_INFO_BITMASK_BEACON_FOUND = 0x01, + WMI_ACS_INFO_BITMASK_BUSY_TIME = 0x02, + WMI_ACS_INFO_BITMASK_TX_TIME = 0x04, + WMI_ACS_INFO_BITMASK_RX_TIME = 0x08, + WMI_ACS_INFO_BITMASK_NOISE = 0x10, }; -struct wmi_vring_ba_status_event { - __le16 status; /* enum wmi_vring_ba_status */ +struct scan_acs_info { + u8 channel; + u8 beacon_found; + /* msec */ + __le16 busy_time; + __le16 tx_time; + __le16 rx_time; + u8 noise; + u8 reserved[3]; +} __packed; + +struct wmi_acs_passive_scan_complete_event { + __le32 dwell_time; + /* valid fields within channel info according to + * their appearance in struct order + */ + __le16 filled; + u8 num_scanned_channels; + u8 reserved; + struct scan_acs_info scan_info_list[0]; +} __packed; + +/* WMI_BA_STATUS_EVENTID */ +enum wmi_vring_ba_status { + WMI_BA_AGREED = 0x00, + WMI_BA_NON_AGREED = 0x01, + /* BA_EN in middle of teardown flow */ + WMI_BA_TD_WIP = 0x02, + /* BA_DIS or BA_EN in middle of BA SETUP flow */ + WMI_BA_SETUP_WIP = 0x03, + /* BA_EN when the BA session is already active */ + WMI_BA_SESSION_ACTIVE = 0x04, + /* BA_DIS when the BA session is not active */ + WMI_BA_SESSION_NOT_ACTIVE = 0x05, +}; + +struct wmi_ba_status_event { + /* enum wmi_vring_ba_status */ + __le16 status; u8 reserved[2]; u8 ringid; u8 agg_wsize; @@ -1152,18 +1120,14 @@ struct wmi_vring_ba_status_event { u8 amsdu; } __packed; -/* - * WMI_DELBA_EVENTID - */ +/* WMI_DELBA_EVENTID */ struct wmi_delba_event { u8 cidxtid; u8 from_initiator; __le16 reason; } __packed; -/* - * WMI_VRING_CFG_DONE_EVENTID - */ +/* WMI_VRING_CFG_DONE_EVENTID */ struct wmi_vring_cfg_done_event { u8 ringid; u8 status; @@ -1171,174 +1135,151 @@ struct wmi_vring_cfg_done_event { __le32 tx_vring_tail_ptr; } __packed; -/* - * WMI_RCP_ADDBA_RESP_SENT_EVENTID - */ +/* WMI_RCP_ADDBA_RESP_SENT_EVENTID */ struct wmi_rcp_addba_resp_sent_event { u8 cidxtid; u8 reserved; __le16 status; } __packed; -/* - * WMI_RCP_ADDBA_REQ_EVENTID - */ +/* WMI_RCP_ADDBA_REQ_EVENTID */ struct wmi_rcp_addba_req_event { u8 cidxtid; u8 dialog_token; - __le16 ba_param_set; /* ieee80211_ba_parameterset as it received */ + /* ieee80211_ba_parameterset as it received */ + __le16 ba_param_set; __le16 ba_timeout; - __le16 ba_seq_ctrl; /* ieee80211_ba_seqstrl field as it received */ + /* ieee80211_ba_seqstrl field as it received */ + __le16 ba_seq_ctrl; } __packed; -/* - * WMI_CFG_RX_CHAIN_DONE_EVENTID - */ +/* WMI_CFG_RX_CHAIN_DONE_EVENTID */ enum wmi_cfg_rx_chain_done_event_status { - WMI_CFG_RX_CHAIN_SUCCESS = 1, + WMI_CFG_RX_CHAIN_SUCCESS = 0x01, }; struct wmi_cfg_rx_chain_done_event { - __le32 rx_ring_tail_ptr; /* Rx V-Ring Tail pointer */ + /* V-Ring Tail pointer */ + __le32 rx_ring_tail_ptr; __le32 status; } __packed; -/* - * WMI_WBE_LINK_DOWN_EVENTID - */ +/* WMI_WBE_LINK_DOWN_EVENTID */ enum wmi_wbe_link_down_event_reason { - WMI_WBE_REASON_USER_REQUEST = 0, - WMI_WBE_REASON_RX_DISASSOC = 1, - WMI_WBE_REASON_BAD_PHY_LINK = 2, + WMI_WBE_REASON_USER_REQUEST = 0x00, + WMI_WBE_REASON_RX_DISASSOC = 0x01, + WMI_WBE_REASON_BAD_PHY_LINK = 0x02, }; +/* WMI_WBE_LINK_DOWN_EVENTID */ struct wmi_wbe_link_down_event { u8 cid; u8 reserved[3]; __le32 reason; } __packed; -/* - * WMI_DATA_PORT_OPEN_EVENTID - */ +/* WMI_DATA_PORT_OPEN_EVENTID */ struct wmi_data_port_open_event { u8 cid; u8 reserved[3]; } __packed; -/* - * WMI_VRING_EN_EVENTID - */ +/* WMI_VRING_EN_EVENTID */ struct wmi_vring_en_event { u8 vring_index; u8 reserved[3]; } __packed; -/* - * WMI_GET_PCP_CHANNEL_EVENTID - */ +/* WMI_GET_PCP_CHANNEL_EVENTID */ struct wmi_get_pcp_channel_event { u8 channel; u8 reserved[3]; } __packed; -/* - * WMI_P2P_CFG_DONE_EVENTID - */ +/* WMI_P2P_CFG_DONE_EVENTID */ struct wmi_p2p_cfg_done_event { - u8 status; /* wmi_fw_status */ + /* wmi_fw_status */ + u8 status; u8 reserved[3]; } __packed; -/* -* WMI_PORT_ALLOCATED_EVENTID -*/ +/* WMI_PORT_ALLOCATED_EVENTID */ struct wmi_port_allocated_event { - u8 status; /* wmi_fw_status */ + /* wmi_fw_status */ + u8 status; u8 reserved[3]; } __packed; -/* -* WMI_PORT_DELETED_EVENTID -*/ +/* WMI_PORT_DELETED_EVENTID */ struct wmi_port_deleted_event { - u8 status; /* wmi_fw_status */ + /* wmi_fw_status */ + u8 status; u8 reserved[3]; } __packed; -/* - * WMI_LISTEN_STARTED_EVENTID - */ +/* WMI_LISTEN_STARTED_EVENTID */ struct wmi_listen_started_event { - u8 status; /* wmi_fw_status */ + /* wmi_fw_status */ + u8 status; u8 reserved[3]; } __packed; -/* - * WMI_SEARCH_STARTED_EVENTID - */ +/* WMI_SEARCH_STARTED_EVENTID */ struct wmi_search_started_event { - u8 status; /* wmi_fw_status */ + /* wmi_fw_status */ + u8 status; u8 reserved[3]; } __packed; -/* - * WMI_PCP_STARTED_EVENTID - */ +/* WMI_PCP_STARTED_EVENTID */ struct wmi_pcp_started_event { - u8 status; /* wmi_fw_status */ + /* wmi_fw_status */ + u8 status; u8 reserved[3]; } __packed; -/* - * WMI_PCP_FACTOR_EVENTID - */ +/* WMI_PCP_FACTOR_EVENTID */ struct wmi_pcp_factor_event { __le32 pcp_factor; } __packed; -/* - * WMI_SW_TX_COMPLETE_EVENTID - */ enum wmi_sw_tx_status { - WMI_TX_SW_STATUS_SUCCESS = 0, - WMI_TX_SW_STATUS_FAILED_NO_RESOURCES = 1, - WMI_TX_SW_STATUS_FAILED_TX = 2, + WMI_TX_SW_STATUS_SUCCESS = 0x00, + WMI_TX_SW_STATUS_FAILED_NO_RESOURCES = 0x01, + WMI_TX_SW_STATUS_FAILED_TX = 0x02, }; +/* WMI_SW_TX_COMPLETE_EVENTID */ struct wmi_sw_tx_complete_event { - u8 status; /* enum wmi_sw_tx_status */ + /* enum wmi_sw_tx_status */ + u8 status; u8 reserved[3]; } __packed; -/* - * WMI_CORR_MEASURE_EVENTID - */ +/* WMI_CORR_MEASURE_EVENTID */ struct wmi_corr_measure_event { - s32 i; - s32 q; - s32 image_i; - s32 image_q; + /* signed */ + __le32 i; + /* signed */ + __le32 q; + /* signed */ + __le32 image_i; + /* signed */ + __le32 image_q; } __packed; -/* - * WMI_READ_RSSI_EVENTID - */ +/* WMI_READ_RSSI_EVENTID */ struct wmi_read_rssi_event { __le32 ina_rssi_adc_dbm; } __packed; -/* - * WMI_GET_SSID_EVENTID - */ +/* WMI_GET_SSID_EVENTID */ struct wmi_get_ssid_event { __le32 ssid_len; u8 ssid[WMI_MAX_SSID_LEN]; } __packed; -/* - * WMI_RX_MGMT_PACKET_EVENTID - */ +/* wmi_rx_mgmt_info */ struct wmi_rx_mgmt_info { u8 mcs; s8 snr; @@ -1347,39 +1288,65 @@ struct wmi_rx_mgmt_info { __le16 stype; __le16 status; __le32 len; + /* Not resolved when == 0xFFFFFFFF ==> Broadcast to all MIDS */ u8 qid; + /* Not resolved when == 0xFFFFFFFF ==> Broadcast to all MIDS */ u8 mid; u8 cid; - u8 channel; /* From Radio MNGR */ + /* From Radio MNGR */ + u8 channel; } __packed; -/* - * WMI_TX_MGMT_PACKET_EVENTID - */ +/* wmi_otp_read_write_cmd */ +struct wmi_otp_read_write_cmd { + __le32 addr; + __le32 size; + u8 values[0]; +} __packed; + +/* WMI_OTP_READ_RESULT_EVENTID */ +struct wmi_otp_read_result_event { + u8 payload[0]; +} __packed; + +/* WMI_TX_MGMT_PACKET_EVENTID */ struct wmi_tx_mgmt_packet_event { u8 payload[0]; } __packed; +/* WMI_RX_MGMT_PACKET_EVENTID */ struct wmi_rx_mgmt_packet_event { struct wmi_rx_mgmt_info info; u8 payload[0]; } __packed; -/* - * WMI_ECHO_RSP_EVENTID - */ -struct wmi_echo_event { +/* WMI_ECHO_RSP_EVENTID */ +struct wmi_echo_rsp_event { __le32 echoed_value; } __packed; -/* - * WMI_TEMP_SENSE_DONE_EVENTID +/* WMI_TEMP_SENSE_DONE_EVENTID * * Measure MAC and radio temperatures */ struct wmi_temp_sense_done_event { + /* Temperature times 1000 (actual temperature will be achieved by + * dividing the value by 1000) + */ __le32 baseband_t1000; + /* Temperature times 1000 (actual temperature will be achieved by + * dividing the value by 1000) + */ __le32 rf_t1000; } __packed; +#define WMI_SCAN_DWELL_TIME_MS (100) +#define WMI_SURVEY_TIMEOUT_MS (10000) + +enum wmi_hidden_ssid { + WMI_HIDDEN_SSID_DISABLED = 0x00, + WMI_HIDDEN_SSID_SEND_EMPTY = 0x10, + WMI_HIDDEN_SSID_CLEAR = 0xFE, +}; + #endif /* __WILOCITY_WMI_H__ */ From eabb03b4a37cc7945ca62453402c74a0622e5a05 Mon Sep 17 00:00:00 2001 From: Lior David Date: Tue, 1 Mar 2016 19:18:10 +0200 Subject: [PATCH 0021/1649] wil6210: basic PBSS/PCP support PBSS (Personal Basic Service Set) is a new BSS type for DMG networks. It is similar to infrastructure BSS, having an AP-like entity called PCP (PBSS Control Point), but it has few differences. For example, stations inside a PBSS can communicate directly, and the PCP role can be transferred between stations. This change adds PBSS support, and has 2 main parts: 1. When starting an AP, add an option to start as a PCP instead. This is implemented by a new PBSS flag which is passed as part of the cfg80211_ap_settings structure. 2. When connecting to a BSS, add an option to connect to a PCP instead of an AP. This is again implemented by a new PBSS flag, added to the cfg80211_connect_params structure. Signed-off-by: Lior David Signed-off-by: Maya Erez Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wil6210/cfg80211.c | 27 +++++++++++---------- drivers/net/wireless/ath/wil6210/wil6210.h | 2 ++ 2 files changed, 16 insertions(+), 13 deletions(-) diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c index 0c25e8beec3c..4a95e1c8bc22 100644 --- a/drivers/net/wireless/ath/wil6210/cfg80211.c +++ b/drivers/net/wireless/ath/wil6210/cfg80211.c @@ -402,6 +402,7 @@ static void wil_print_connect_params(struct wil6210_priv *wil, print_hex_dump(KERN_INFO, " SSID: ", DUMP_PREFIX_OFFSET, 16, 1, sme->ssid, sme->ssid_len, true); wil_info(wil, " Privacy: %s\n", sme->privacy ? "secure" : "open"); + wil_info(wil, " PBSS: %d\n", sme->pbss); wil_print_crypto(wil, &sme->crypto); } @@ -416,6 +417,7 @@ static int wil_cfg80211_connect(struct wiphy *wiphy, const u8 *rsn_eid; int ch; int rc = 0; + enum ieee80211_bss_type bss_type = IEEE80211_BSS_TYPE_ESS; wil_print_connect_params(wil, sme); @@ -434,14 +436,12 @@ static int wil_cfg80211_connect(struct wiphy *wiphy, if (sme->privacy && !rsn_eid) wil_info(wil, "WSC connection\n"); - if (sme->pbss) { - wil_err(wil, "connect - PBSS not yet supported\n"); - return -EOPNOTSUPP; - } + if (sme->pbss) + bss_type = IEEE80211_BSS_TYPE_PBSS; bss = cfg80211_get_bss(wiphy, sme->channel, sme->bssid, sme->ssid, sme->ssid_len, - IEEE80211_BSS_TYPE_ESS, IEEE80211_PRIVACY_ANY); + bss_type, IEEE80211_PRIVACY_ANY); if (!bss) { wil_err(wil, "Unable to find BSS\n"); return -ENOENT; @@ -936,13 +936,16 @@ static int _wil_cfg80211_start_ap(struct wiphy *wiphy, const u8 *ssid, size_t ssid_len, u32 privacy, int bi, u8 chan, struct cfg80211_beacon_data *bcon, - u8 hidden_ssid) + u8 hidden_ssid, u32 pbss) { struct wil6210_priv *wil = wiphy_to_wil(wiphy); int rc; struct wireless_dev *wdev = ndev->ieee80211_ptr; u8 wmi_nettype = wil_iftype_nl2wmi(wdev->iftype); + if (pbss) + wmi_nettype = WMI_NETTYPE_P2P; + wil_set_recovery_state(wil, fw_recovery_idle); mutex_lock(&wil->mutex); @@ -963,6 +966,7 @@ static int _wil_cfg80211_start_ap(struct wiphy *wiphy, wil->privacy = privacy; wil->channel = chan; wil->hidden_ssid = hidden_ssid; + wil->pbss = pbss; netif_carrier_on(ndev); @@ -1012,7 +1016,8 @@ static int wil_cfg80211_change_beacon(struct wiphy *wiphy, wdev->ssid_len, privacy, wdev->beacon_interval, wil->channel, bcon, - wil->hidden_ssid); + wil->hidden_ssid, + wil->pbss); } else { rc = _wil_cfg80211_set_ies(wiphy, bcon); } @@ -1038,11 +1043,6 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy, return -EINVAL; } - if (info->pbss) { - wil_err(wil, "AP: PBSS not yet supported\n"); - return -EOPNOTSUPP; - } - switch (info->hidden_ssid) { case NL80211_HIDDEN_SSID_NOT_IN_USE: hidden_ssid = WMI_HIDDEN_SSID_DISABLED; @@ -1068,6 +1068,7 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy, info->hidden_ssid); wil_dbg_misc(wil, "BI %d DTIM %d\n", info->beacon_interval, info->dtim_period); + wil_dbg_misc(wil, "PBSS %d\n", info->pbss); print_hex_dump_bytes("SSID ", DUMP_PREFIX_OFFSET, info->ssid, info->ssid_len); wil_print_bcon_data(bcon); @@ -1076,7 +1077,7 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy, rc = _wil_cfg80211_start_ap(wiphy, ndev, info->ssid, info->ssid_len, info->privacy, info->beacon_interval, channel->hw_value, - bcon, hidden_ssid); + bcon, hidden_ssid, info->pbss); return rc; } diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h index e69df0c1a125..69d970a74aae 100644 --- a/drivers/net/wireless/ath/wil6210/wil6210.h +++ b/drivers/net/wireless/ath/wil6210/wil6210.h @@ -604,6 +604,8 @@ struct wil6210_priv { struct wil_platform_ops platform_ops; struct pmc_ctx pmc; + + bool pbss; }; #define wil_to_wiphy(i) (i->wdev->wiphy) From 5f0823ef8b76f446ab8b187fabfb4e7560bc33a1 Mon Sep 17 00:00:00 2001 From: Maya Erez Date: Tue, 1 Mar 2016 19:18:11 +0200 Subject: [PATCH 0022/1649] wil6210: add support for platform specific notification events Add the ability to notify the platform driver on different events, such as FW crash, pre reset and FW ready. Signed-off-by: Maya Erez Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wil6210/interrupt.c | 5 +-- drivers/net/wireless/ath/wil6210/main.c | 32 +++++++++++++++++-- .../net/wireless/ath/wil6210/wil_platform.h | 8 ++++- 3 files changed, 40 insertions(+), 5 deletions(-) diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c index 4f2ffa5c6e17..ae902958bf55 100644 --- a/drivers/net/wireless/ath/wil6210/interrupt.c +++ b/drivers/net/wireless/ath/wil6210/interrupt.c @@ -394,9 +394,10 @@ static irqreturn_t wil6210_irq_misc_thread(int irq, void *cookie) wil_fw_core_dump(wil); wil_notify_fw_error(wil); isr &= ~ISR_MISC_FW_ERROR; - if (wil->platform_ops.notify_crash) { + if (wil->platform_ops.notify) { wil_err(wil, "notify platform driver about FW crash"); - wil->platform_ops.notify_crash(wil->platform_handle); + wil->platform_ops.notify(wil->platform_handle, + WIL_PLATFORM_EVT_FW_CRASH); } else { wil_fw_error_recovery(wil); } diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c index 1fa215d0eeed..1472978dd3f0 100644 --- a/drivers/net/wireless/ath/wil6210/main.c +++ b/drivers/net/wireless/ath/wil6210/main.c @@ -764,6 +764,15 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw) if (wil->hw_version == HW_VER_UNKNOWN) return -ENODEV; + if (wil->platform_ops.notify) { + rc = wil->platform_ops.notify(wil->platform_handle, + WIL_PLATFORM_EVT_PRE_RESET); + if (rc) + wil_err(wil, + "%s: PRE_RESET platform notify failed, rc %d\n", + __func__, rc); + } + set_bit(wil_status_resetting, wil->status); cancel_work_sync(&wil->disconnect_worker); @@ -843,8 +852,27 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw) /* we just started MAC, wait for FW ready */ rc = wil_wait_for_fw_ready(wil); - if (rc == 0) /* check FW is responsive */ - rc = wmi_echo(wil); + if (rc) + return rc; + + /* check FW is responsive */ + rc = wmi_echo(wil); + if (rc) { + wil_err(wil, "%s: wmi_echo failed, rc %d\n", + __func__, rc); + return rc; + } + + if (wil->platform_ops.notify) { + rc = wil->platform_ops.notify(wil->platform_handle, + WIL_PLATFORM_EVT_FW_RDY); + if (rc) { + wil_err(wil, + "%s: FW_RDY notify failed, rc %d\n", + __func__, rc); + rc = 0; + } + } } return rc; diff --git a/drivers/net/wireless/ath/wil6210/wil_platform.h b/drivers/net/wireless/ath/wil6210/wil_platform.h index 9a949d910343..33d4a34b3b1c 100644 --- a/drivers/net/wireless/ath/wil6210/wil_platform.h +++ b/drivers/net/wireless/ath/wil6210/wil_platform.h @@ -19,6 +19,12 @@ struct device; +enum wil_platform_event { + WIL_PLATFORM_EVT_FW_CRASH = 0, + WIL_PLATFORM_EVT_PRE_RESET = 1, + WIL_PLATFORM_EVT_FW_RDY = 2, +}; + /** * struct wil_platform_ops - wil platform module calls from this * driver to platform driver @@ -28,7 +34,7 @@ struct wil_platform_ops { int (*suspend)(void *handle); int (*resume)(void *handle); void (*uninit)(void *handle); - int (*notify_crash)(void *handle); + int (*notify)(void *handle, enum wil_platform_event evt); }; /** From e6d68341e7286386451adf14cebb635a52b0effe Mon Sep 17 00:00:00 2001 From: Dedy Lansky Date: Tue, 1 Mar 2016 19:18:12 +0200 Subject: [PATCH 0023/1649] wil6210: p2p initial support supporting p2p_find, p2p_listen and p2p_connect Use updated cfg80211_get_bss API (additional argument) Signed-off-by: Dedy Lansky Signed-off-by: Lior David Signed-off-by: Maya Erez Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wil6210/Makefile | 1 + drivers/net/wireless/ath/wil6210/cfg80211.c | 67 ++++-- drivers/net/wireless/ath/wil6210/main.c | 6 + drivers/net/wireless/ath/wil6210/p2p.c | 220 ++++++++++++++++++++ drivers/net/wireless/ath/wil6210/wil6210.h | 31 ++- drivers/net/wireless/ath/wil6210/wmi.c | 80 ++++++- 6 files changed, 389 insertions(+), 16 deletions(-) create mode 100644 drivers/net/wireless/ath/wil6210/p2p.c diff --git a/drivers/net/wireless/ath/wil6210/Makefile b/drivers/net/wireless/ath/wil6210/Makefile index fdf63d5fe82b..11b544b26c74 100644 --- a/drivers/net/wireless/ath/wil6210/Makefile +++ b/drivers/net/wireless/ath/wil6210/Makefile @@ -18,6 +18,7 @@ wil6210-$(CONFIG_WIL6210_TRACING) += trace.o wil6210-y += wil_platform.o wil6210-y += ethtool.o wil6210-y += wil_crash_dump.o +wil6210-y += p2p.o # for tracing framework to find trace.h CFLAGS_trace.o := -I$(src) diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c index 4a95e1c8bc22..80e1482f480d 100644 --- a/drivers/net/wireless/ath/wil6210/cfg80211.c +++ b/drivers/net/wireless/ath/wil6210/cfg80211.c @@ -18,6 +18,8 @@ #include "wil6210.h" #include "wmi.h" +#define WIL_MAX_ROC_DURATION_MS 5000 + #define CHAN60G(_channel, _flags) { \ .band = IEEE80211_BAND_60GHZ, \ .center_freq = 56160 + (2160 * (_channel)), \ @@ -239,6 +241,20 @@ static int wil_cfg80211_change_iface(struct wiphy *wiphy, { struct wil6210_priv *wil = wiphy_to_wil(wiphy); struct wireless_dev *wdev = wil->wdev; + int rc; + + wil_dbg_misc(wil, "%s() type=%d\n", __func__, type); + + if (netif_running(wil_to_ndev(wil))) { + wil_dbg_misc(wil, "interface is up. resetting...\n"); + mutex_lock(&wil->mutex); + __wil_down(wil); + rc = __wil_up(wil); + mutex_unlock(&wil->mutex); + + if (rc) + return rc; + } switch (type) { case NL80211_IFTYPE_STATION: @@ -274,6 +290,8 @@ static int wil_cfg80211_scan(struct wiphy *wiphy, uint i, n; int rc; + wil_dbg_misc(wil, "%s()\n", __func__); + if (wil->scan_request) { wil_err(wil, "Already scanning\n"); return -EAGAIN; @@ -294,6 +312,14 @@ static int wil_cfg80211_scan(struct wiphy *wiphy, return -EBUSY; } + /* check if scan request is a P2P search request */ + if (wil_scan_is_p2p_search(wil, request)) { + wil->scan_request = request; + return wil_p2p_search(wil, request); + } + + wil_p2p_stop_discovery(wil); + wil_dbg_misc(wil, "Start scan_request 0x%p\n", request); wil_dbg_misc(wil, "SSID count: %d", request->n_ssids); @@ -419,6 +445,7 @@ static int wil_cfg80211_connect(struct wiphy *wiphy, int rc = 0; enum ieee80211_bss_type bss_type = IEEE80211_BSS_TYPE_ESS; + wil_dbg_misc(wil, "%s()\n", __func__); wil_print_connect_params(wil, sme); if (test_bit(wil_status_fwconnecting, wil->status) || @@ -584,6 +611,16 @@ int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, struct wmi_sw_tx_complete_event evt; } __packed evt; + /* Note, currently we do not support the "wait" parameter, user-space + * must call remain_on_channel before mgmt_tx or listen on a channel + * another way (AP/PCP or connected station) + * in addition we need to check if specified "chan" argument is + * different from currently "listened" channel and fail if it is. + */ + + wil_dbg_misc(wil, "%s()\n", __func__); + print_hex_dump_bytes("mgmt tx frame ", DUMP_PREFIX_OFFSET, buf, len); + cmd = kmalloc(sizeof(*cmd) + len, GFP_KERNEL); if (!cmd) { rc = -ENOMEM; @@ -628,9 +665,11 @@ static enum wmi_key_usage wil_detect_key_usage(struct wil6210_priv *wil, } else { switch (wdev->iftype) { case NL80211_IFTYPE_STATION: + case NL80211_IFTYPE_P2P_CLIENT: rc = WMI_KEY_USE_RX_GROUP; break; case NL80211_IFTYPE_AP: + case NL80211_IFTYPE_P2P_GO: rc = WMI_KEY_USE_TX_GROUP; break; default: @@ -770,16 +809,17 @@ static int wil_remain_on_channel(struct wiphy *wiphy, struct wil6210_priv *wil = wiphy_to_wil(wiphy); int rc; - /* TODO: handle duration */ - wil_info(wil, "%s(%d, %d ms)\n", __func__, chan->center_freq, duration); + wil_dbg_misc(wil, "%s() center_freq=%d, duration=%d\n", __func__, + chan->center_freq, duration); - rc = wmi_set_channel(wil, chan->hw_value); + rc = wil_p2p_listen(wil, duration, chan, cookie); if (rc) return rc; - rc = wmi_rxon(wil, true); + cfg80211_ready_on_channel(wil->wdev, *cookie, chan, duration, + GFP_KERNEL); - return rc; + return 0; } static int wil_cancel_remain_on_channel(struct wiphy *wiphy, @@ -787,13 +827,12 @@ static int wil_cancel_remain_on_channel(struct wiphy *wiphy, u64 cookie) { struct wil6210_priv *wil = wiphy_to_wil(wiphy); - int rc; - wil_info(wil, "%s()\n", __func__); + wil_dbg_misc(wil, "%s()\n", __func__); - rc = wmi_rxon(wil, false); + wil_p2p_cancel_listen(wil, cookie); - return rc; + return 0; } /** @@ -1251,14 +1290,18 @@ static void wil_wiphy_init(struct wiphy *wiphy) { wiphy->max_scan_ssids = 1; wiphy->max_scan_ie_len = WMI_MAX_IE_LEN; + wiphy->max_remain_on_channel_duration = WIL_MAX_ROC_DURATION_MS; wiphy->max_num_pmkids = 0 /* TODO: */; wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP) | + BIT(NL80211_IFTYPE_P2P_CLIENT) | + BIT(NL80211_IFTYPE_P2P_GO) | + /* enable this when supporting multi vif + * BIT(NL80211_IFTYPE_P2P_DEVICE) | + */ BIT(NL80211_IFTYPE_MONITOR); - /* TODO: enable P2P when integrated with supplicant: - * BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO) - */ wiphy->flags |= WIPHY_FLAG_HAVE_AP_SME | + WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL | WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD; dev_dbg(wiphy_dev(wiphy), "%s : flags = 0x%08x\n", __func__, wiphy->flags); diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c index 1472978dd3f0..025751e223c2 100644 --- a/drivers/net/wireless/ath/wil6210/main.c +++ b/drivers/net/wireless/ath/wil6210/main.c @@ -453,6 +453,8 @@ int wil_priv_init(struct wil6210_priv *wil) wil->bcast_vring = -1; setup_timer(&wil->connect_timer, wil_connect_timer_fn, (ulong)wil); setup_timer(&wil->scan_timer, wil_scan_timer_fn, (ulong)wil); + setup_timer(&wil->p2p.discovery_timer, wil_p2p_discovery_timer_fn, + (ulong)wil); INIT_WORK(&wil->disconnect_worker, wil_disconnect_worker); INIT_WORK(&wil->wmi_event_worker, wmi_event_worker); @@ -513,8 +515,10 @@ void wil_priv_deinit(struct wil6210_priv *wil) wil_set_recovery_state(wil, fw_recovery_idle); del_timer_sync(&wil->scan_timer); + del_timer_sync(&wil->p2p.discovery_timer); cancel_work_sync(&wil->disconnect_worker); cancel_work_sync(&wil->fw_error_worker); + cancel_work_sync(&wil->p2p.discovery_expired_work); mutex_lock(&wil->mutex); wil6210_disconnect(wil, NULL, WLAN_REASON_DEAUTH_LEAVING, false); mutex_unlock(&wil->mutex); @@ -979,6 +983,8 @@ int __wil_down(struct wil6210_priv *wil) } wil_enable_irq(wil); + wil_p2p_stop_discovery(wil); + if (wil->scan_request) { wil_dbg_misc(wil, "Abort scan_request 0x%p\n", wil->scan_request); diff --git a/drivers/net/wireless/ath/wil6210/p2p.c b/drivers/net/wireless/ath/wil6210/p2p.c new file mode 100644 index 000000000000..974bf84dbf52 --- /dev/null +++ b/drivers/net/wireless/ath/wil6210/p2p.c @@ -0,0 +1,220 @@ +/* + * Copyright (c) 2014-2016 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "wil6210.h" +#include "wmi.h" + +#define P2P_WILDCARD_SSID "DIRECT-" +#define P2P_DMG_SOCIAL_CHANNEL 2 +#define P2P_SEARCH_DURATION_MS 500 +#define P2P_DEFAULT_BI 100 + +int wil_scan_is_p2p_search(struct wil6210_priv *wil, + struct cfg80211_scan_request *request) +{ + /* need P2P_DEVICE changes to make this work */ + return 0; +} + +void wil_p2p_discovery_timer_fn(ulong x) +{ + struct wil6210_priv *wil = (void *)x; + + wil_dbg_misc(wil, "%s\n", __func__); + + schedule_work(&wil->p2p.discovery_expired_work); +} + +int wil_p2p_search(struct wil6210_priv *wil, + struct cfg80211_scan_request *request) +{ + int rc; + struct wil_p2p_info *p2p = &wil->p2p; + + wil_dbg_misc(wil, "%s: channel %d\n", + __func__, P2P_DMG_SOCIAL_CHANNEL); + + mutex_lock(&wil->mutex); + + if (p2p->discovery_started) { + wil_err(wil, "%s: search failed. discovery already ongoing\n", + __func__); + rc = -EBUSY; + goto out; + } + + rc = wmi_p2p_cfg(wil, P2P_DMG_SOCIAL_CHANNEL, P2P_DEFAULT_BI); + if (rc) { + wil_err(wil, "%s: wmi_p2p_cfg failed\n", __func__); + goto out; + } + + rc = wmi_set_ssid(wil, strlen(P2P_WILDCARD_SSID), P2P_WILDCARD_SSID); + if (rc) { + wil_err(wil, "%s: wmi_set_ssid failed\n", __func__); + goto out_stop; + } + + /* Set application IE to probe request and probe response */ + rc = wmi_set_ie(wil, WMI_FRAME_PROBE_REQ, + request->ie_len, request->ie); + if (rc) { + wil_err(wil, "%s: wmi_set_ie(WMI_FRAME_PROBE_REQ) failed\n", + __func__); + goto out_stop; + } + + /* supplicant doesn't provide Probe Response IEs. As a workaround - + * re-use Probe Request IEs + */ + rc = wmi_set_ie(wil, WMI_FRAME_PROBE_RESP, + request->ie_len, request->ie); + if (rc) { + wil_err(wil, "%s: wmi_set_ie(WMI_FRAME_PROBE_RESP) failed\n", + __func__); + goto out_stop; + } + + rc = wmi_start_search(wil); + if (rc) { + wil_err(wil, "%s: wmi_start_search failed\n", __func__); + goto out_stop; + } + + p2p->discovery_started = 1; + INIT_WORK(&p2p->discovery_expired_work, wil_p2p_search_expired); + mod_timer(&p2p->discovery_timer, + jiffies + msecs_to_jiffies(P2P_SEARCH_DURATION_MS)); + +out_stop: + if (rc) + wmi_stop_discovery(wil); + +out: + mutex_unlock(&wil->mutex); + return rc; +} + +int wil_p2p_listen(struct wil6210_priv *wil, unsigned int duration, + struct ieee80211_channel *chan, u64 *cookie) +{ + struct wil_p2p_info *p2p = &wil->p2p; + u8 channel = P2P_DMG_SOCIAL_CHANNEL; + int rc; + + if (chan) + channel = chan->hw_value; + + wil_dbg_misc(wil, "%s: duration %d\n", __func__, duration); + + mutex_lock(&wil->mutex); + + if (p2p->discovery_started) { + wil_err(wil, "%s: discovery already ongoing\n", __func__); + rc = -EBUSY; + goto out; + } + + rc = wmi_p2p_cfg(wil, channel, P2P_DEFAULT_BI); + if (rc) { + wil_err(wil, "%s: wmi_p2p_cfg failed\n", __func__); + goto out; + } + + rc = wmi_set_ssid(wil, strlen(P2P_WILDCARD_SSID), P2P_WILDCARD_SSID); + if (rc) { + wil_err(wil, "%s: wmi_set_ssid failed\n", __func__); + goto out_stop; + } + + rc = wmi_start_listen(wil); + if (rc) { + wil_err(wil, "%s: wmi_start_listen failed\n", __func__); + goto out_stop; + } + + memcpy(&p2p->listen_chan, chan, sizeof(*chan)); + *cookie = ++p2p->cookie; + + p2p->discovery_started = 1; + INIT_WORK(&p2p->discovery_expired_work, wil_p2p_listen_expired); + mod_timer(&p2p->discovery_timer, + jiffies + msecs_to_jiffies(duration)); + +out_stop: + if (rc) + wmi_stop_discovery(wil); + +out: + mutex_unlock(&wil->mutex); + return rc; +} + +void wil_p2p_stop_discovery(struct wil6210_priv *wil) +{ + struct wil_p2p_info *p2p = &wil->p2p; + + if (p2p->discovery_started) { + del_timer_sync(&p2p->discovery_timer); + p2p->discovery_started = 0; + wmi_stop_discovery(wil); + } +} + +void wil_p2p_cancel_listen(struct wil6210_priv *wil, u64 cookie) +{ + struct wil_p2p_info *p2p = &wil->p2p; + + if (cookie != p2p->cookie) + wil_info(wil, "%s: Cookie mismatch: 0x%016llx vs. 0x%016llx\n", + __func__, p2p->cookie, cookie); + + wil_p2p_stop_discovery(wil); + cfg80211_remain_on_channel_expired(wil->wdev, + p2p->cookie, + &p2p->listen_chan, + GFP_KERNEL); +} + +void wil_p2p_listen_expired(struct work_struct *work) +{ + struct wil_p2p_info *p2p = container_of(work, + struct wil_p2p_info, discovery_expired_work); + struct wil6210_priv *wil = container_of(p2p, + struct wil6210_priv, p2p); + + wil_dbg_misc(wil, "%s()\n", __func__); + + wil_p2p_stop_discovery(wil); + cfg80211_remain_on_channel_expired(wil->wdev, + p2p->cookie, + &p2p->listen_chan, + GFP_KERNEL); +} + +void wil_p2p_search_expired(struct work_struct *work) +{ + struct wil_p2p_info *p2p = container_of(work, + struct wil_p2p_info, discovery_expired_work); + struct wil6210_priv *wil = container_of(p2p, + struct wil6210_priv, p2p); + + wil_dbg_misc(wil, "%s()\n", __func__); + + wil_p2p_stop_discovery(wil); + cfg80211_scan_done(wil->scan_request, 0); + wil->scan_request = NULL; +} diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h index 69d970a74aae..0aba86c6b05e 100644 --- a/drivers/net/wireless/ath/wil6210/wil6210.h +++ b/drivers/net/wireless/ath/wil6210/wil6210.h @@ -453,6 +453,14 @@ struct wil_tid_crypto_rx { struct wil_tid_crypto_rx_single key_id[4]; }; +struct wil_p2p_info { + struct ieee80211_channel listen_chan; + u8 discovery_started; + u64 cookie; + struct timer_list discovery_timer; /* listen/search duration */ + struct work_struct discovery_expired_work; /* listen/search expire */ +}; + enum wil_sta_status { wil_sta_unused = 0, wil_sta_conn_pending = 1, @@ -606,6 +614,8 @@ struct wil6210_priv { struct pmc_ctx pmc; bool pbss; + + struct wil_p2p_info p2p; }; #define wil_to_wiphy(i) (i->wdev->wiphy) @@ -731,7 +741,6 @@ int wmi_add_cipher_key(struct wil6210_priv *wil, u8 key_index, int wmi_echo(struct wil6210_priv *wil); int wmi_set_ie(struct wil6210_priv *wil, u8 type, u16 ie_len, const void *ie); int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring); -int wmi_p2p_cfg(struct wil6210_priv *wil, int channel); int wmi_rxon(struct wil6210_priv *wil, bool on); int wmi_get_temperature(struct wil6210_priv *wil, u32 *t_m, u32 *t_r); int wmi_disconnect_sta(struct wil6210_priv *wil, const u8 *mac, u16 reason, @@ -754,6 +763,26 @@ void wil_unmask_irq(struct wil6210_priv *wil); void wil_configure_interrupt_moderation(struct wil6210_priv *wil); void wil_disable_irq(struct wil6210_priv *wil); void wil_enable_irq(struct wil6210_priv *wil); + +/* P2P */ +int wil_scan_is_p2p_search(struct wil6210_priv *wil, + struct cfg80211_scan_request *request); +void wil_p2p_discovery_timer_fn(ulong x); +int wil_p2p_search(struct wil6210_priv *wil, + struct cfg80211_scan_request *request); +int wil_p2p_listen(struct wil6210_priv *wil, unsigned int duration, + struct ieee80211_channel *chan, u64 *cookie); +void wil_p2p_stop_discovery(struct wil6210_priv *wil); +void wil_p2p_cancel_listen(struct wil6210_priv *wil, u64 cookie); +void wil_p2p_listen_expired(struct work_struct *work); +void wil_p2p_search_expired(struct work_struct *work); + +/* WMI for P2P */ +int wmi_p2p_cfg(struct wil6210_priv *wil, int channel, int bi); +int wmi_start_listen(struct wil6210_priv *wil); +int wmi_start_search(struct wil6210_priv *wil); +int wmi_stop_discovery(struct wil6210_priv *wil); + int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, struct cfg80211_mgmt_tx_params *params, u64 *cookie); diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c index db7d2b602d1a..4a1cdd256ef2 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.c +++ b/drivers/net/wireless/ath/wil6210/wmi.c @@ -368,6 +368,8 @@ static void wmi_evt_rx_mgmt(struct wil6210_priv *wil, int id, void *d, int len) wil_hex_dump_wmi("IE ", DUMP_PREFIX_OFFSET, 16, 1, ie_buf, ie_len, true); + wil_dbg_wmi(wil, "Capability info : 0x%04x\n", cap); + bss = cfg80211_inform_bss_frame(wiphy, channel, rx_mgmt_frame, d_len, signal, GFP_KERNEL); if (bss) { @@ -1072,14 +1074,86 @@ int wmi_get_channel(struct wil6210_priv *wil, int *channel) return 0; } -int wmi_p2p_cfg(struct wil6210_priv *wil, int channel) +int wmi_p2p_cfg(struct wil6210_priv *wil, int channel, int bi) { + int rc; struct wmi_p2p_cfg_cmd cmd = { - .discovery_mode = WMI_DISCOVERY_MODE_NON_OFFLOAD, + .discovery_mode = WMI_DISCOVERY_MODE_PEER2PEER, + .bcon_interval = cpu_to_le16(bi), .channel = channel - 1, }; + struct { + struct wmi_cmd_hdr wmi; + struct wmi_p2p_cfg_done_event evt; + } __packed reply; - return wmi_send(wil, WMI_P2P_CFG_CMDID, &cmd, sizeof(cmd)); + wil_dbg_wmi(wil, "sending WMI_P2P_CFG_CMDID\n"); + + rc = wmi_call(wil, WMI_P2P_CFG_CMDID, &cmd, sizeof(cmd), + WMI_P2P_CFG_DONE_EVENTID, &reply, sizeof(reply), 300); + if (!rc && reply.evt.status != WMI_FW_STATUS_SUCCESS) { + wil_err(wil, "P2P_CFG failed. status %d\n", reply.evt.status); + rc = -EINVAL; + } + + return rc; +} + +int wmi_start_listen(struct wil6210_priv *wil) +{ + int rc; + struct { + struct wmi_cmd_hdr wmi; + struct wmi_listen_started_event evt; + } __packed reply; + + wil_dbg_wmi(wil, "sending WMI_START_LISTEN_CMDID\n"); + + rc = wmi_call(wil, WMI_START_LISTEN_CMDID, NULL, 0, + WMI_LISTEN_STARTED_EVENTID, &reply, sizeof(reply), 300); + if (!rc && reply.evt.status != WMI_FW_STATUS_SUCCESS) { + wil_err(wil, "device failed to start listen. status %d\n", + reply.evt.status); + rc = -EINVAL; + } + + return rc; +} + +int wmi_start_search(struct wil6210_priv *wil) +{ + int rc; + struct { + struct wmi_cmd_hdr wmi; + struct wmi_search_started_event evt; + } __packed reply; + + wil_dbg_wmi(wil, "sending WMI_START_SEARCH_CMDID\n"); + + rc = wmi_call(wil, WMI_START_SEARCH_CMDID, NULL, 0, + WMI_SEARCH_STARTED_EVENTID, &reply, sizeof(reply), 300); + if (!rc && reply.evt.status != WMI_FW_STATUS_SUCCESS) { + wil_err(wil, "device failed to start search. status %d\n", + reply.evt.status); + rc = -EINVAL; + } + + return rc; +} + +int wmi_stop_discovery(struct wil6210_priv *wil) +{ + int rc; + + wil_dbg_wmi(wil, "sending WMI_DISCOVERY_STOP_CMDID\n"); + + rc = wmi_call(wil, WMI_DISCOVERY_STOP_CMDID, NULL, 0, + WMI_DISCOVERY_STOPPED_EVENTID, NULL, 0, 100); + + if (rc) + wil_err(wil, "Failed to stop discovery\n"); + + return rc; } int wmi_del_cipher_key(struct wil6210_priv *wil, u8 key_index, From 4332cac17b5c0cb80d8b99fda33a0faad3238b0e Mon Sep 17 00:00:00 2001 From: Lior David Date: Tue, 1 Mar 2016 19:18:13 +0200 Subject: [PATCH 0024/1649] wil6210: P2P_DEVICE virtual interface support Added support for the P2P_DEVICE virtual interface. This interface is intended for P2P management operations such as discovery and GO negotiation. Normally it is implemented by drivers to allow a separate interface for P2P management with its own MAC address, but for 11ad drivers it is needed to support P2P search, since it cannot otherwise be separated from normal scan. Since we only support a single interface/MAC address, we can't easily separate between primary and P2P_DEVICE interfaces. For example when a management packet arrives we can't tell for which interface it is intended. To work around this, we store a pointer to the interface where the last "radio operation" was triggered such as scan or remain on channel, and we forward management packets and scan results to this interface. Signed-off-by: Lior David Signed-off-by: Maya Erez Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wil6210/cfg80211.c | 136 ++++++++++++++++++-- drivers/net/wireless/ath/wil6210/main.c | 1 + drivers/net/wireless/ath/wil6210/netdev.c | 1 + drivers/net/wireless/ath/wil6210/p2p.c | 24 ++-- drivers/net/wireless/ath/wil6210/pcie_bus.c | 1 + drivers/net/wireless/ath/wil6210/wil6210.h | 8 +- drivers/net/wireless/ath/wil6210/wmi.c | 7 +- 7 files changed, 152 insertions(+), 26 deletions(-) diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c index 80e1482f480d..24f9829c8222 100644 --- a/drivers/net/wireless/ath/wil6210/cfg80211.c +++ b/drivers/net/wireless/ath/wil6210/cfg80211.c @@ -78,6 +78,12 @@ wil_mgmt_stypes[NUM_NL80211_IFTYPES] = { .rx = BIT(IEEE80211_STYPE_ACTION >> 4) | BIT(IEEE80211_STYPE_PROBE_REQ >> 4) }, + [NL80211_IFTYPE_P2P_DEVICE] = { + .tx = BIT(IEEE80211_STYPE_ACTION >> 4) | + BIT(IEEE80211_STYPE_PROBE_RESP >> 4), + .rx = BIT(IEEE80211_STYPE_ACTION >> 4) | + BIT(IEEE80211_STYPE_PROBE_REQ >> 4) + }, }; static const u32 wil_cipher_suites[] = { @@ -234,13 +240,68 @@ static int wil_cfg80211_dump_station(struct wiphy *wiphy, return rc; } +static struct wireless_dev * +wil_cfg80211_add_iface(struct wiphy *wiphy, const char *name, + unsigned char name_assign_type, + enum nl80211_iftype type, + u32 *flags, struct vif_params *params) +{ + struct wil6210_priv *wil = wiphy_to_wil(wiphy); + struct net_device *ndev = wil_to_ndev(wil); + struct wireless_dev *p2p_wdev; + + wil_dbg_misc(wil, "%s()\n", __func__); + + if (type != NL80211_IFTYPE_P2P_DEVICE) { + wil_err(wil, "%s: unsupported iftype %d\n", __func__, type); + return ERR_PTR(-EINVAL); + } + + if (wil->p2p_wdev) { + wil_err(wil, "%s: P2P_DEVICE interface already created\n", + __func__); + return ERR_PTR(-EINVAL); + } + + p2p_wdev = kzalloc(sizeof(*p2p_wdev), GFP_KERNEL); + if (!p2p_wdev) + return ERR_PTR(-ENOMEM); + + p2p_wdev->iftype = type; + p2p_wdev->wiphy = wiphy; + /* use our primary ethernet address */ + ether_addr_copy(p2p_wdev->address, ndev->perm_addr); + + wil->p2p_wdev = p2p_wdev; + + return p2p_wdev; +} + +static int wil_cfg80211_del_iface(struct wiphy *wiphy, + struct wireless_dev *wdev) +{ + struct wil6210_priv *wil = wiphy_to_wil(wiphy); + + wil_dbg_misc(wil, "%s()\n", __func__); + + if (wdev != wil->p2p_wdev) { + wil_err(wil, "%s: delete of incorrect interface 0x%p\n", + __func__, wdev); + return -EINVAL; + } + + wil_p2p_wdev_free(wil); + + return 0; +} + static int wil_cfg80211_change_iface(struct wiphy *wiphy, struct net_device *ndev, enum nl80211_iftype type, u32 *flags, struct vif_params *params) { struct wil6210_priv *wil = wiphy_to_wil(wiphy); - struct wireless_dev *wdev = wil->wdev; + struct wireless_dev *wdev = wil_to_wdev(wil); int rc; wil_dbg_misc(wil, "%s() type=%d\n", __func__, type); @@ -282,7 +343,7 @@ static int wil_cfg80211_scan(struct wiphy *wiphy, struct cfg80211_scan_request *request) { struct wil6210_priv *wil = wiphy_to_wil(wiphy); - struct wireless_dev *wdev = wil->wdev; + struct wireless_dev *wdev = request->wdev; struct { struct wmi_start_scan_cmd cmd; u16 chnl[4]; @@ -290,7 +351,8 @@ static int wil_cfg80211_scan(struct wiphy *wiphy, uint i, n; int rc; - wil_dbg_misc(wil, "%s()\n", __func__); + wil_dbg_misc(wil, "%s(), wdev=0x%p iftype=%d\n", + __func__, wdev, wdev->iftype); if (wil->scan_request) { wil_err(wil, "Already scanning\n"); @@ -301,6 +363,7 @@ static int wil_cfg80211_scan(struct wiphy *wiphy, switch (wdev->iftype) { case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_P2P_CLIENT: + case NL80211_IFTYPE_P2P_DEVICE: break; default: return -EOPNOTSUPP; @@ -312,10 +375,16 @@ static int wil_cfg80211_scan(struct wiphy *wiphy, return -EBUSY; } - /* check if scan request is a P2P search request */ - if (wil_scan_is_p2p_search(wil, request)) { + /* scan on P2P_DEVICE is handled as p2p search */ + if (wdev->iftype == NL80211_IFTYPE_P2P_DEVICE) { wil->scan_request = request; - return wil_p2p_search(wil, request); + wil->radio_wdev = wdev; + rc = wil_p2p_search(wil, request); + if (rc) { + wil->radio_wdev = wil_to_wdev(wil); + wil->scan_request = NULL; + } + return rc; } wil_p2p_stop_discovery(wil); @@ -378,12 +447,14 @@ static int wil_cfg80211_scan(struct wiphy *wiphy, wil_dbg_misc(wil, "active scan with discovery_mode=1\n"); } + wil->radio_wdev = wdev; rc = wmi_send(wil, WMI_START_SCAN_CMDID, &cmd, sizeof(cmd.cmd) + cmd.cmd.num_channels * sizeof(cmd.cmd.channel_list[0])); out: if (rc) { del_timer_sync(&wil->scan_timer); + wil->radio_wdev = wil_to_wdev(wil); wil->scan_request = NULL; } @@ -647,7 +718,7 @@ static int wil_cfg80211_set_channel(struct wiphy *wiphy, struct cfg80211_chan_def *chandef) { struct wil6210_priv *wil = wiphy_to_wil(wiphy); - struct wireless_dev *wdev = wil->wdev; + struct wireless_dev *wdev = wil_to_wdev(wil); wdev->preset_chandef = *chandef; @@ -657,7 +728,7 @@ static int wil_cfg80211_set_channel(struct wiphy *wiphy, static enum wmi_key_usage wil_detect_key_usage(struct wil6210_priv *wil, bool pairwise) { - struct wireless_dev *wdev = wil->wdev; + struct wireless_dev *wdev = wil_to_wdev(wil); enum wmi_key_usage rc; if (pairwise) { @@ -809,14 +880,16 @@ static int wil_remain_on_channel(struct wiphy *wiphy, struct wil6210_priv *wil = wiphy_to_wil(wiphy); int rc; - wil_dbg_misc(wil, "%s() center_freq=%d, duration=%d\n", __func__, - chan->center_freq, duration); + wil_dbg_misc(wil, "%s() center_freq=%d, duration=%d iftype=%d\n", + __func__, chan->center_freq, duration, wdev->iftype); rc = wil_p2p_listen(wil, duration, chan, cookie); if (rc) return rc; - cfg80211_ready_on_channel(wil->wdev, *cookie, chan, duration, + wil->radio_wdev = wdev; + + cfg80211_ready_on_channel(wdev, *cookie, chan, duration, GFP_KERNEL); return 0; @@ -1263,7 +1336,26 @@ static int wil_cfg80211_change_bss(struct wiphy *wiphy, return 0; } +static int wil_cfg80211_start_p2p_device(struct wiphy *wiphy, + struct wireless_dev *wdev) +{ + struct wil6210_priv *wil = wiphy_to_wil(wiphy); + + wil_dbg_misc(wil, "%s: entered\n", __func__); + return 0; +} + +static void wil_cfg80211_stop_p2p_device(struct wiphy *wiphy, + struct wireless_dev *wdev) +{ + struct wil6210_priv *wil = wiphy_to_wil(wiphy); + + wil_dbg_misc(wil, "%s: entered\n", __func__); +} + static struct cfg80211_ops wil_cfg80211_ops = { + .add_virtual_intf = wil_cfg80211_add_iface, + .del_virtual_intf = wil_cfg80211_del_iface, .scan = wil_cfg80211_scan, .connect = wil_cfg80211_connect, .disconnect = wil_cfg80211_disconnect, @@ -1284,6 +1376,9 @@ static struct cfg80211_ops wil_cfg80211_ops = { .del_station = wil_cfg80211_del_station, .probe_client = wil_cfg80211_probe_client, .change_bss = wil_cfg80211_change_bss, + /* P2P device */ + .start_p2p_device = wil_cfg80211_start_p2p_device, + .stop_p2p_device = wil_cfg80211_stop_p2p_device, }; static void wil_wiphy_init(struct wiphy *wiphy) @@ -1296,9 +1391,7 @@ static void wil_wiphy_init(struct wiphy *wiphy) BIT(NL80211_IFTYPE_AP) | BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO) | - /* enable this when supporting multi vif - * BIT(NL80211_IFTYPE_P2P_DEVICE) | - */ + BIT(NL80211_IFTYPE_P2P_DEVICE) | BIT(NL80211_IFTYPE_MONITOR); wiphy->flags |= WIPHY_FLAG_HAVE_AP_SME | WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL | @@ -1369,3 +1462,18 @@ void wil_wdev_free(struct wil6210_priv *wil) wiphy_free(wdev->wiphy); kfree(wdev); } + +void wil_p2p_wdev_free(struct wil6210_priv *wil) +{ + struct wireless_dev *p2p_wdev; + + mutex_lock(&wil->p2p_wdev_mutex); + p2p_wdev = wil->p2p_wdev; + if (p2p_wdev) { + wil->p2p_wdev = NULL; + wil->radio_wdev = wil_to_wdev(wil); + cfg80211_unregister_wdev(p2p_wdev); + kfree(p2p_wdev); + } + mutex_unlock(&wil->p2p_wdev_mutex); +} diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c index 025751e223c2..e09e9bb28e39 100644 --- a/drivers/net/wireless/ath/wil6210/main.c +++ b/drivers/net/wireless/ath/wil6210/main.c @@ -446,6 +446,7 @@ int wil_priv_init(struct wil6210_priv *wil) mutex_init(&wil->mutex); mutex_init(&wil->wmi_mutex); mutex_init(&wil->probe_client_mutex); + mutex_init(&wil->p2p_wdev_mutex); init_completion(&wil->wmi_ready); init_completion(&wil->wmi_call); diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c index ecc3c1bdae4b..d4ec5b278eb3 100644 --- a/drivers/net/wireless/ath/wil6210/netdev.c +++ b/drivers/net/wireless/ath/wil6210/netdev.c @@ -149,6 +149,7 @@ void *wil_if_alloc(struct device *dev) wil = wdev_to_wil(wdev); wil->wdev = wdev; + wil->radio_wdev = wdev; wil_dbg_misc(wil, "%s()\n", __func__); diff --git a/drivers/net/wireless/ath/wil6210/p2p.c b/drivers/net/wireless/ath/wil6210/p2p.c index 974bf84dbf52..d223648076a0 100644 --- a/drivers/net/wireless/ath/wil6210/p2p.c +++ b/drivers/net/wireless/ath/wil6210/p2p.c @@ -22,13 +22,6 @@ #define P2P_SEARCH_DURATION_MS 500 #define P2P_DEFAULT_BI 100 -int wil_scan_is_p2p_search(struct wil6210_priv *wil, - struct cfg80211_scan_request *request) -{ - /* need P2P_DEVICE changes to make this work */ - return 0; -} - void wil_p2p_discovery_timer_fn(ulong x) { struct wil6210_priv *wil = (void *)x; @@ -183,10 +176,14 @@ void wil_p2p_cancel_listen(struct wil6210_priv *wil, u64 cookie) __func__, p2p->cookie, cookie); wil_p2p_stop_discovery(wil); - cfg80211_remain_on_channel_expired(wil->wdev, + + mutex_lock(&wil->p2p_wdev_mutex); + cfg80211_remain_on_channel_expired(wil->radio_wdev, p2p->cookie, &p2p->listen_chan, GFP_KERNEL); + wil->radio_wdev = wil->wdev; + mutex_unlock(&wil->p2p_wdev_mutex); } void wil_p2p_listen_expired(struct work_struct *work) @@ -199,10 +196,15 @@ void wil_p2p_listen_expired(struct work_struct *work) wil_dbg_misc(wil, "%s()\n", __func__); wil_p2p_stop_discovery(wil); - cfg80211_remain_on_channel_expired(wil->wdev, + + mutex_lock(&wil->p2p_wdev_mutex); + cfg80211_remain_on_channel_expired(wil->radio_wdev, p2p->cookie, &p2p->listen_chan, GFP_KERNEL); + wil->radio_wdev = wil->wdev; + mutex_unlock(&wil->p2p_wdev_mutex); + } void wil_p2p_search_expired(struct work_struct *work) @@ -215,6 +217,10 @@ void wil_p2p_search_expired(struct work_struct *work) wil_dbg_misc(wil, "%s()\n", __func__); wil_p2p_stop_discovery(wil); + + mutex_lock(&wil->p2p_wdev_mutex); cfg80211_scan_done(wil->scan_request, 0); wil->scan_request = NULL; + wil->radio_wdev = wil->wdev; + mutex_unlock(&wil->p2p_wdev_mutex); } diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c index e36f2a0c8cb6..aeb72c438e44 100644 --- a/drivers/net/wireless/ath/wil6210/pcie_bus.c +++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c @@ -275,6 +275,7 @@ static void wil_pcie_remove(struct pci_dev *pdev) pci_disable_device(pdev); if (wil->platform_ops.uninit) wil->platform_ops.uninit(wil->platform_handle); + wil_p2p_wdev_free(wil); wil_if_free(wil); } diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h index 0aba86c6b05e..9b77a0844a83 100644 --- a/drivers/net/wireless/ath/wil6210/wil6210.h +++ b/drivers/net/wireless/ath/wil6210/wil6210.h @@ -616,6 +616,11 @@ struct wil6210_priv { bool pbss; struct wil_p2p_info p2p; + + /* P2P_DEVICE vif */ + struct wireless_dev *p2p_wdev; + struct mutex p2p_wdev_mutex; /* protect @p2p_wdev */ + struct wireless_dev *radio_wdev; }; #define wil_to_wiphy(i) (i->wdev->wiphy) @@ -765,8 +770,6 @@ void wil_disable_irq(struct wil6210_priv *wil); void wil_enable_irq(struct wil6210_priv *wil); /* P2P */ -int wil_scan_is_p2p_search(struct wil6210_priv *wil, - struct cfg80211_scan_request *request); void wil_p2p_discovery_timer_fn(ulong x); int wil_p2p_search(struct wil6210_priv *wil, struct cfg80211_scan_request *request); @@ -794,6 +797,7 @@ int wil_cid_fill_sinfo(struct wil6210_priv *wil, int cid, struct wireless_dev *wil_cfg80211_init(struct device *dev); void wil_wdev_free(struct wil6210_priv *wil); +void wil_p2p_wdev_free(struct wil6210_priv *wil); int wmi_set_mac_address(struct wil6210_priv *wil, void *addr); int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype, diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c index 4a1cdd256ef2..f0761758fac7 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.c +++ b/drivers/net/wireless/ath/wil6210/wmi.c @@ -380,8 +380,10 @@ static void wmi_evt_rx_mgmt(struct wil6210_priv *wil, int id, void *d, int len) wil_err(wil, "cfg80211_inform_bss_frame() failed\n"); } } else { - cfg80211_rx_mgmt(wil->wdev, freq, signal, + mutex_lock(&wil->p2p_wdev_mutex); + cfg80211_rx_mgmt(wil->radio_wdev, freq, signal, (void *)rx_mgmt_frame, d_len, 0); + mutex_unlock(&wil->p2p_wdev_mutex); } } @@ -408,7 +410,10 @@ static void wmi_evt_scan_complete(struct wil6210_priv *wil, int id, wil->scan_request, aborted); del_timer_sync(&wil->scan_timer); + mutex_lock(&wil->p2p_wdev_mutex); cfg80211_scan_done(wil->scan_request, aborted); + wil->radio_wdev = wil->wdev; + mutex_unlock(&wil->p2p_wdev_mutex); wil->scan_request = NULL; } else { wil_err(wil, "SCAN_COMPLETE while not scanning\n"); From 280ab987ef21d1c196acb3af4663a99f94d9da00 Mon Sep 17 00:00:00 2001 From: Lior David Date: Tue, 1 Mar 2016 19:18:14 +0200 Subject: [PATCH 0025/1649] wil6210: fix race conditions in p2p listen and search Fix 2 race conditions found during test runs of P2P discovery: 1. Because wil_p2p_cancel_listen was not protected, user space could start a new P2P listen/search before wmi_stop_discovery completed. This caused a crash in the firmware. 2. In P2P listen, when listen timer expires and user space calls cancel_remain_on_channel at the same time, code could send the cfg80211_remain_on_channel_expired notification twice. Added protections with wil->mutex to several places that call wmi_stop_discovery. Signed-off-by: Lior David Signed-off-by: Maya Erez Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wil6210/cfg80211.c | 9 +-- drivers/net/wireless/ath/wil6210/main.c | 2 +- drivers/net/wireless/ath/wil6210/p2p.c | 63 +++++++++++++++------ drivers/net/wireless/ath/wil6210/wil6210.h | 4 +- 4 files changed, 53 insertions(+), 25 deletions(-) diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c index 24f9829c8222..e867c76a4197 100644 --- a/drivers/net/wireless/ath/wil6210/cfg80211.c +++ b/drivers/net/wireless/ath/wil6210/cfg80211.c @@ -387,7 +387,7 @@ static int wil_cfg80211_scan(struct wiphy *wiphy, return rc; } - wil_p2p_stop_discovery(wil); + (void)wil_p2p_stop_discovery(wil); wil_dbg_misc(wil, "Start scan_request 0x%p\n", request); wil_dbg_misc(wil, "SSID count: %d", request->n_ssids); @@ -868,6 +868,9 @@ static int wil_cfg80211_set_default_key(struct wiphy *wiphy, u8 key_index, bool unicast, bool multicast) { + struct wil6210_priv *wil = wiphy_to_wil(wiphy); + + wil_dbg_misc(wil, "%s: entered\n", __func__); return 0; } @@ -903,9 +906,7 @@ static int wil_cancel_remain_on_channel(struct wiphy *wiphy, wil_dbg_misc(wil, "%s()\n", __func__); - wil_p2p_cancel_listen(wil, cookie); - - return 0; + return wil_p2p_cancel_listen(wil, cookie); } /** diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c index e09e9bb28e39..05a4ae7a7765 100644 --- a/drivers/net/wireless/ath/wil6210/main.c +++ b/drivers/net/wireless/ath/wil6210/main.c @@ -984,7 +984,7 @@ int __wil_down(struct wil6210_priv *wil) } wil_enable_irq(wil); - wil_p2p_stop_discovery(wil); + (void)wil_p2p_stop_discovery(wil); if (wil->scan_request) { wil_dbg_misc(wil, "Abort scan_request 0x%p\n", diff --git a/drivers/net/wireless/ath/wil6210/p2p.c b/drivers/net/wireless/ath/wil6210/p2p.c index d223648076a0..2c1b8958180e 100644 --- a/drivers/net/wireless/ath/wil6210/p2p.c +++ b/drivers/net/wireless/ath/wil6210/p2p.c @@ -156,26 +156,42 @@ out: return rc; } -void wil_p2p_stop_discovery(struct wil6210_priv *wil) +u8 wil_p2p_stop_discovery(struct wil6210_priv *wil) { struct wil_p2p_info *p2p = &wil->p2p; + u8 started = p2p->discovery_started; if (p2p->discovery_started) { del_timer_sync(&p2p->discovery_timer); p2p->discovery_started = 0; wmi_stop_discovery(wil); } + + return started; } -void wil_p2p_cancel_listen(struct wil6210_priv *wil, u64 cookie) +int wil_p2p_cancel_listen(struct wil6210_priv *wil, u64 cookie) { struct wil_p2p_info *p2p = &wil->p2p; + u8 started; - if (cookie != p2p->cookie) + mutex_lock(&wil->mutex); + + if (cookie != p2p->cookie) { wil_info(wil, "%s: Cookie mismatch: 0x%016llx vs. 0x%016llx\n", __func__, p2p->cookie, cookie); + mutex_unlock(&wil->mutex); + return -ENOENT; + } - wil_p2p_stop_discovery(wil); + started = wil_p2p_stop_discovery(wil); + + mutex_unlock(&wil->mutex); + + if (!started) { + wil_err(wil, "%s: listen not started\n", __func__); + return -ENOENT; + } mutex_lock(&wil->p2p_wdev_mutex); cfg80211_remain_on_channel_expired(wil->radio_wdev, @@ -184,6 +200,7 @@ void wil_p2p_cancel_listen(struct wil6210_priv *wil, u64 cookie) GFP_KERNEL); wil->radio_wdev = wil->wdev; mutex_unlock(&wil->p2p_wdev_mutex); + return 0; } void wil_p2p_listen_expired(struct work_struct *work) @@ -192,18 +209,23 @@ void wil_p2p_listen_expired(struct work_struct *work) struct wil_p2p_info, discovery_expired_work); struct wil6210_priv *wil = container_of(p2p, struct wil6210_priv, p2p); + u8 started; wil_dbg_misc(wil, "%s()\n", __func__); - wil_p2p_stop_discovery(wil); + mutex_lock(&wil->mutex); + started = wil_p2p_stop_discovery(wil); + mutex_unlock(&wil->mutex); - mutex_lock(&wil->p2p_wdev_mutex); - cfg80211_remain_on_channel_expired(wil->radio_wdev, - p2p->cookie, - &p2p->listen_chan, - GFP_KERNEL); - wil->radio_wdev = wil->wdev; - mutex_unlock(&wil->p2p_wdev_mutex); + if (started) { + mutex_lock(&wil->p2p_wdev_mutex); + cfg80211_remain_on_channel_expired(wil->radio_wdev, + p2p->cookie, + &p2p->listen_chan, + GFP_KERNEL); + wil->radio_wdev = wil->wdev; + mutex_unlock(&wil->p2p_wdev_mutex); + } } @@ -213,14 +235,19 @@ void wil_p2p_search_expired(struct work_struct *work) struct wil_p2p_info, discovery_expired_work); struct wil6210_priv *wil = container_of(p2p, struct wil6210_priv, p2p); + u8 started; wil_dbg_misc(wil, "%s()\n", __func__); - wil_p2p_stop_discovery(wil); + mutex_lock(&wil->mutex); + started = wil_p2p_stop_discovery(wil); + mutex_unlock(&wil->mutex); - mutex_lock(&wil->p2p_wdev_mutex); - cfg80211_scan_done(wil->scan_request, 0); - wil->scan_request = NULL; - wil->radio_wdev = wil->wdev; - mutex_unlock(&wil->p2p_wdev_mutex); + if (started) { + mutex_lock(&wil->p2p_wdev_mutex); + cfg80211_scan_done(wil->scan_request, 0); + wil->scan_request = NULL; + wil->radio_wdev = wil->wdev; + mutex_unlock(&wil->p2p_wdev_mutex); + } } diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h index 9b77a0844a83..68f60eadde13 100644 --- a/drivers/net/wireless/ath/wil6210/wil6210.h +++ b/drivers/net/wireless/ath/wil6210/wil6210.h @@ -775,8 +775,8 @@ int wil_p2p_search(struct wil6210_priv *wil, struct cfg80211_scan_request *request); int wil_p2p_listen(struct wil6210_priv *wil, unsigned int duration, struct ieee80211_channel *chan, u64 *cookie); -void wil_p2p_stop_discovery(struct wil6210_priv *wil); -void wil_p2p_cancel_listen(struct wil6210_priv *wil, u64 cookie); +u8 wil_p2p_stop_discovery(struct wil6210_priv *wil); +int wil_p2p_cancel_listen(struct wil6210_priv *wil, u64 cookie); void wil_p2p_listen_expired(struct work_struct *work); void wil_p2p_search_expired(struct work_struct *work); From 6777e71ca91ea488488362a919900488e0ade3f2 Mon Sep 17 00:00:00 2001 From: Lior David Date: Tue, 1 Mar 2016 19:18:15 +0200 Subject: [PATCH 0026/1649] wil6210: clean ioctl debug message Fix a debug message related to IOCTL that was incorrectly logged with the MISC category, and move it inside wil_ioctl so it will always be logged even if we call wil_ioctl from other places. Signed-off-by: Lior David Signed-off-by: Maya Erez Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wil6210/ioctl.c | 11 +++++++++-- drivers/net/wireless/ath/wil6210/netdev.c | 6 +----- 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/drivers/net/wireless/ath/wil6210/ioctl.c b/drivers/net/wireless/ath/wil6210/ioctl.c index f7f948621951..630380078236 100644 --- a/drivers/net/wireless/ath/wil6210/ioctl.c +++ b/drivers/net/wireless/ath/wil6210/ioctl.c @@ -161,13 +161,20 @@ out_free: int wil_ioctl(struct wil6210_priv *wil, void __user *data, int cmd) { + int ret; + switch (cmd) { case WIL_IOCTL_MEMIO: - return wil_ioc_memio_dword(wil, data); + ret = wil_ioc_memio_dword(wil, data); + break; case WIL_IOCTL_MEMIO_BLOCK: - return wil_ioc_memio_block(wil, data); + ret = wil_ioc_memio_block(wil, data); + break; default: wil_dbg_ioctl(wil, "Unsupported IOCTL 0x%04x\n", cmd); return -ENOIOCTLCMD; } + + wil_dbg_ioctl(wil, "ioctl(0x%04x) -> %d\n", cmd, ret); + return ret; } diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c index d4ec5b278eb3..3bc0e2634db0 100644 --- a/drivers/net/wireless/ath/wil6210/netdev.c +++ b/drivers/net/wireless/ath/wil6210/netdev.c @@ -60,11 +60,7 @@ static int wil_do_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd) { struct wil6210_priv *wil = ndev_to_wil(ndev); - int ret = wil_ioctl(wil, ifr->ifr_data, cmd); - - wil_dbg_misc(wil, "ioctl(0x%04x) -> %d\n", cmd, ret); - - return ret; + return wil_ioctl(wil, ifr->ifr_data, cmd); } static const struct net_device_ops wil_netdev_ops = { From 375a173fc1524eb569c7e8f9cf331126a9d29033 Mon Sep 17 00:00:00 2001 From: Lior David Date: Tue, 1 Mar 2016 19:18:16 +0200 Subject: [PATCH 0027/1649] wil6210: fix no_fw_recovery mode with change_virtual_intf When FW crashed with no_fw_recovery mode enabled, user space could still call wil_cfg80211_change_iface quickly to change interface type, and this would cause recovery to proceed and FW crash logs may be lost. Fix this problem by not resetting the FW in case no_fw_recovery is enabled. Signed-off-by: Lior David Signed-off-by: Maya Erez Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wil6210/cfg80211.c | 2 +- drivers/net/wireless/ath/wil6210/interrupt.c | 1 + drivers/net/wireless/ath/wil6210/main.c | 5 +++++ drivers/net/wireless/ath/wil6210/wil6210.h | 1 + 4 files changed, 8 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c index e867c76a4197..33e54519602f 100644 --- a/drivers/net/wireless/ath/wil6210/cfg80211.c +++ b/drivers/net/wireless/ath/wil6210/cfg80211.c @@ -306,7 +306,7 @@ static int wil_cfg80211_change_iface(struct wiphy *wiphy, wil_dbg_misc(wil, "%s() type=%d\n", __func__, type); - if (netif_running(wil_to_ndev(wil))) { + if (netif_running(wil_to_ndev(wil)) && !wil_is_recovery_blocked(wil)) { wil_dbg_misc(wil, "interface is up. resetting...\n"); mutex_lock(&wil->mutex); __wil_down(wil); diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c index ae902958bf55..fe66b2b646f0 100644 --- a/drivers/net/wireless/ath/wil6210/interrupt.c +++ b/drivers/net/wireless/ath/wil6210/interrupt.c @@ -391,6 +391,7 @@ static irqreturn_t wil6210_irq_misc_thread(int irq, void *cookie) wil_dbg_irq(wil, "Thread ISR MISC 0x%08x\n", isr); if (isr & ISR_MISC_FW_ERROR) { + wil->recovery_state = fw_recovery_pending; wil_fw_core_dump(wil); wil_notify_fw_error(wil); isr &= ~ISR_MISC_FW_ERROR; diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c index 05a4ae7a7765..c2a0a6625252 100644 --- a/drivers/net/wireless/ath/wil6210/main.c +++ b/drivers/net/wireless/ath/wil6210/main.c @@ -305,6 +305,11 @@ void wil_set_recovery_state(struct wil6210_priv *wil, int state) wake_up_interruptible(&wil->wq); } +bool wil_is_recovery_blocked(struct wil6210_priv *wil) +{ + return no_fw_recovery && (wil->recovery_state == fw_recovery_pending); +} + static void wil_fw_error_worker(struct work_struct *work) { struct wil6210_priv *wil = container_of(work, struct wil6210_priv, diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h index 68f60eadde13..a23dcee886a0 100644 --- a/drivers/net/wireless/ath/wil6210/wil6210.h +++ b/drivers/net/wireless/ath/wil6210/wil6210.h @@ -716,6 +716,7 @@ void wil_priv_deinit(struct wil6210_priv *wil); int wil_reset(struct wil6210_priv *wil, bool no_fw); void wil_fw_error_recovery(struct wil6210_priv *wil); void wil_set_recovery_state(struct wil6210_priv *wil, int state); +bool wil_is_recovery_blocked(struct wil6210_priv *wil); int wil_up(struct wil6210_priv *wil); int __wil_up(struct wil6210_priv *wil); int wil_down(struct wil6210_priv *wil); From b4944f2c081ea0e2fa7bc8bb510e1e6e5667f30b Mon Sep 17 00:00:00 2001 From: Lior David Date: Tue, 1 Mar 2016 19:18:17 +0200 Subject: [PATCH 0028/1649] wil6210: pass is_go flag to firmware When starting a PCP, pass the is_go flag to firmware in wmi_pcp_start. This flag indicates whether we started a PCP which is also a GO(P2P group owner) or just a regular PCP. Signed-off-by: Lior David Signed-off-by: Maya Erez Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wil6210/cfg80211.c | 9 ++++++++- drivers/net/wireless/ath/wil6210/wil6210.h | 2 +- drivers/net/wireless/ath/wil6210/wmi.c | 3 ++- 3 files changed, 11 insertions(+), 3 deletions(-) diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c index 33e54519602f..12cae3c005fb 100644 --- a/drivers/net/wireless/ath/wil6210/cfg80211.c +++ b/drivers/net/wireless/ath/wil6210/cfg80211.c @@ -1055,10 +1055,17 @@ static int _wil_cfg80211_start_ap(struct wiphy *wiphy, int rc; struct wireless_dev *wdev = ndev->ieee80211_ptr; u8 wmi_nettype = wil_iftype_nl2wmi(wdev->iftype); + u8 is_go = (wdev->iftype == NL80211_IFTYPE_P2P_GO); if (pbss) wmi_nettype = WMI_NETTYPE_P2P; + wil_dbg_misc(wil, "%s: is_go=%d\n", __func__, is_go); + if (is_go && !pbss) { + wil_err(wil, "%s: P2P GO must be in PBSS\n", __func__); + return -ENOTSUPP; + } + wil_set_recovery_state(wil, fw_recovery_idle); mutex_lock(&wil->mutex); @@ -1083,7 +1090,7 @@ static int _wil_cfg80211_start_ap(struct wiphy *wiphy, netif_carrier_on(ndev); - rc = wmi_pcp_start(wil, bi, wmi_nettype, chan, hidden_ssid); + rc = wmi_pcp_start(wil, bi, wmi_nettype, chan, hidden_ssid, is_go); if (rc) goto err_pcp_start; diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h index a23dcee886a0..d18c448c4a32 100644 --- a/drivers/net/wireless/ath/wil6210/wil6210.h +++ b/drivers/net/wireless/ath/wil6210/wil6210.h @@ -802,7 +802,7 @@ void wil_p2p_wdev_free(struct wil6210_priv *wil); int wmi_set_mac_address(struct wil6210_priv *wil, void *addr); int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype, - u8 chan, u8 hidden_ssid); + u8 chan, u8 hidden_ssid, u8 is_go); int wmi_pcp_stop(struct wil6210_priv *wil); void wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid, u16 reason_code, bool from_event); diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c index f0761758fac7..3cc4462aec1a 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.c +++ b/drivers/net/wireless/ath/wil6210/wmi.c @@ -962,7 +962,7 @@ int wmi_set_mac_address(struct wil6210_priv *wil, void *addr) } int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype, - u8 chan, u8 hidden_ssid) + u8 chan, u8 hidden_ssid, u8 is_go) { int rc; @@ -973,6 +973,7 @@ int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype, .channel = chan - 1, .pcp_max_assoc_sta = max_assoc_sta, .hidden_ssid = hidden_ssid, + .is_go = is_go, }; struct { struct wmi_cmd_hdr wmi; From 1f1a361abf73edfb94ca010c51587de378bc7c68 Mon Sep 17 00:00:00 2001 From: Lior David Date: Tue, 1 Mar 2016 19:18:18 +0200 Subject: [PATCH 0029/1649] wil6210: add oob_mode module parameter Add module parameter oob_mode. Takes effect the next time the interface is brought up and FW is loaded. Puts the FW in special "out of the box" (OOB) mode which is used for diagnostics and certification. Signed-off-by: Lior David Signed-off-by: Maya Erez Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wil6210/main.c | 16 ++++++++++++++++ drivers/net/wireless/ath/wil6210/wil6210.h | 1 + 2 files changed, 17 insertions(+) diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c index c2a0a6625252..8d4e8843004e 100644 --- a/drivers/net/wireless/ath/wil6210/main.c +++ b/drivers/net/wireless/ath/wil6210/main.c @@ -27,6 +27,11 @@ bool debug_fw; /* = false; */ module_param(debug_fw, bool, S_IRUGO); MODULE_PARM_DESC(debug_fw, " do not perform card reset. For FW debug"); +static bool oob_mode; +module_param(oob_mode, bool, S_IRUGO); +MODULE_PARM_DESC(oob_mode, + " enable out of the box (OOB) mode in FW, for diagnostics and certification"); + bool no_fw_recovery; module_param(no_fw_recovery, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(no_fw_recovery, " disable automatic FW error recovery"); @@ -547,6 +552,16 @@ static inline void wil_release_cpu(struct wil6210_priv *wil) wil_w(wil, RGF_USER_USER_CPU_0, 1); } +static void wil_set_oob_mode(struct wil6210_priv *wil, bool enable) +{ + wil_info(wil, "%s: enable=%d\n", __func__, enable); + if (enable) { + wil_s(wil, RGF_USER_USAGE_6, BIT_USER_OOB_MODE); + } else { + wil_c(wil, RGF_USER_USAGE_6, BIT_USER_OOB_MODE); + } +} + static int wil_target_reset(struct wil6210_priv *wil) { int delay = 0; @@ -823,6 +838,7 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw) if (rc) return rc; + wil_set_oob_mode(wil, oob_mode); if (load_fw) { wil_info(wil, "Use firmware <%s> + board <%s>\n", WIL_FW_NAME, WIL_FW2_NAME); diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h index d18c448c4a32..4d699ea46373 100644 --- a/drivers/net/wireless/ath/wil6210/wil6210.h +++ b/drivers/net/wireless/ath/wil6210/wil6210.h @@ -132,6 +132,7 @@ struct RGF_ICR { /* registers - FW addresses */ #define RGF_USER_USAGE_1 (0x880004) #define RGF_USER_USAGE_6 (0x880018) + #define BIT_USER_OOB_MODE BIT(31) #define RGF_USER_HW_MACHINE_STATE (0x8801dc) #define HW_MACHINE_BOOT_DONE (0x3fffffd) #define RGF_USER_USER_CPU_0 (0x8801e0) From 60549cab2ea5ffa100076cb06d49579e05edd966 Mon Sep 17 00:00:00 2001 From: Grzegorz Bajorski Date: Mon, 30 Nov 2015 13:56:59 +0100 Subject: [PATCH 0030/1649] ath10k: deliver mgmt frames from htt to monitor vifs only Until now only WMI originating mgmt frames were reported to mac80211. Management frames on HTT were basically dropped (except frames which looked like management but had FCS error). To allow sniffing all frames (including offloaded frames) without interfering with mac80211 operation and states a new rx_flag was introduced and is not being used to distinguish frames and classify them for mac80211. Signed-off-by: Grzegorz Bajorski Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/htt_rx.c | 70 ++++++++++++------------ drivers/net/wireless/ath/ath10k/wmi.c | 6 ++ 2 files changed, 40 insertions(+), 36 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c index 40f969c72de8..84b060efa1b5 100644 --- a/drivers/net/wireless/ath/ath10k/htt_rx.c +++ b/drivers/net/wireless/ath/ath10k/htt_rx.c @@ -1078,20 +1078,25 @@ static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar, hdr = (void *)msdu->data; /* Tail */ - skb_trim(msdu, msdu->len - ath10k_htt_rx_crypto_tail_len(ar, enctype)); + if (status->flag & RX_FLAG_IV_STRIPPED) + skb_trim(msdu, msdu->len - + ath10k_htt_rx_crypto_tail_len(ar, enctype)); /* MMIC */ - if (!ieee80211_has_morefrags(hdr->frame_control) && + if ((status->flag & RX_FLAG_MMIC_STRIPPED) && + !ieee80211_has_morefrags(hdr->frame_control) && enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA) skb_trim(msdu, msdu->len - 8); /* Head */ - hdr_len = ieee80211_hdrlen(hdr->frame_control); - crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype); + if (status->flag & RX_FLAG_IV_STRIPPED) { + hdr_len = ieee80211_hdrlen(hdr->frame_control); + crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype); - memmove((void *)msdu->data + crypto_len, - (void *)msdu->data, hdr_len); - skb_pull(msdu, crypto_len); + memmove((void *)msdu->data + crypto_len, + (void *)msdu->data, hdr_len); + skb_pull(msdu, crypto_len); + } } static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar, @@ -1345,6 +1350,7 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar, bool has_tkip_err; bool has_peer_idx_invalid; bool is_decrypted; + bool is_mgmt; u32 attention; if (skb_queue_empty(amsdu)) @@ -1353,6 +1359,9 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar, first = skb_peek(amsdu); rxd = (void *)first->data - sizeof(*rxd); + is_mgmt = !!(rxd->attention.flags & + __cpu_to_le32(RX_ATTENTION_FLAGS_MGMT_TYPE)); + enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0), RX_MPDU_START_INFO0_ENCRYPT_TYPE); @@ -1394,6 +1403,7 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar, RX_FLAG_MMIC_ERROR | RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED | + RX_FLAG_ONLY_MONITOR | RX_FLAG_MMIC_STRIPPED); if (has_fcs_err) @@ -1402,10 +1412,21 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar, if (has_tkip_err) status->flag |= RX_FLAG_MMIC_ERROR; - if (is_decrypted) - status->flag |= RX_FLAG_DECRYPTED | - RX_FLAG_IV_STRIPPED | - RX_FLAG_MMIC_STRIPPED; + /* Firmware reports all necessary management frames via WMI already. + * They are not reported to monitor interfaces at all so pass the ones + * coming via HTT to monitor interfaces instead. This simplifies + * matters a lot. + */ + if (is_mgmt) + status->flag |= RX_FLAG_ONLY_MONITOR; + + if (is_decrypted) { + status->flag |= RX_FLAG_DECRYPTED; + + if (likely(!is_mgmt)) + status->flag |= RX_FLAG_IV_STRIPPED | + RX_FLAG_MMIC_STRIPPED; +} skb_queue_walk(amsdu, msdu) { ath10k_htt_rx_h_csum_offload(msdu); @@ -1418,6 +1439,8 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar, */ if (!is_decrypted) continue; + if (is_mgmt) + continue; hdr = (void *)msdu->data; hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED); @@ -1518,14 +1541,6 @@ static bool ath10k_htt_rx_amsdu_allowed(struct ath10k *ar, struct sk_buff_head *amsdu, struct ieee80211_rx_status *rx_status) { - struct sk_buff *msdu; - struct htt_rx_desc *rxd; - bool is_mgmt; - bool has_fcs_err; - - msdu = skb_peek(amsdu); - rxd = (void *)msdu->data - sizeof(*rxd); - /* FIXME: It might be a good idea to do some fuzzy-testing to drop * invalid/dangerous frames. */ @@ -1535,23 +1550,6 @@ static bool ath10k_htt_rx_amsdu_allowed(struct ath10k *ar, return false; } - is_mgmt = !!(rxd->attention.flags & - __cpu_to_le32(RX_ATTENTION_FLAGS_MGMT_TYPE)); - has_fcs_err = !!(rxd->attention.flags & - __cpu_to_le32(RX_ATTENTION_FLAGS_FCS_ERR)); - - /* Management frames are handled via WMI events. The pros of such - * approach is that channel is explicitly provided in WMI events - * whereas HTT doesn't provide channel information for Rxed frames. - * - * However some firmware revisions don't report corrupted frames via - * WMI so don't drop them. - */ - if (is_mgmt && !has_fcs_err) { - ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx mgmt ctrl\n"); - return false; - } - if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) { ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx cac running\n"); return false; diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index c31b4878cdc6..651220e98c72 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -2310,6 +2310,12 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb) hdr = (struct ieee80211_hdr *)skb->data; fc = le16_to_cpu(hdr->frame_control); + /* Firmware is guaranteed to report all essential management frames via + * WMI while it can deliver some extra via HTT. Since there can be + * duplicates split the reporting wrt monitor/sniffing. + */ + status->flag |= RX_FLAG_SKIP_MONITOR; + ath10k_wmi_handle_wep_reauth(ar, skb, status); /* FW delivers WEP Shared Auth frame with Protected Bit set and From 8d130963d38a5677dfd30a2fda83e5cd0b9f4f69 Mon Sep 17 00:00:00 2001 From: Peter Oh Date: Tue, 1 Mar 2016 09:52:49 -0800 Subject: [PATCH 0031/1649] ath10k: set MAC timestamp in management Rx frame Check and set Rx MAC timestamp when firmware indicates it. Firmware adds it in Rx beacon frame only at this moment. Driver and mac80211 may utilize it to detect such clockdrift or beacon collision and use the result for beacon collision avoidance. Signed-off-by: Peter Oh Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/wmi.c | 13 +++++++++++++ drivers/net/wireless/ath/ath10k/wmi.h | 7 +++++++ 2 files changed, 20 insertions(+) diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index 651220e98c72..c2608946f773 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -2167,8 +2167,10 @@ static int ath10k_wmi_op_pull_mgmt_rx_ev(struct ath10k *ar, struct sk_buff *skb, struct wmi_mgmt_rx_event_v1 *ev_v1; struct wmi_mgmt_rx_event_v2 *ev_v2; struct wmi_mgmt_rx_hdr_v1 *ev_hdr; + struct wmi_mgmt_rx_ext_info *ext_info; size_t pull_len; u32 msdu_len; + u32 len; if (test_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX, ar->fw_features)) { ev_v2 = (struct wmi_mgmt_rx_event_v2 *)skb->data; @@ -2195,6 +2197,12 @@ static int ath10k_wmi_op_pull_mgmt_rx_ev(struct ath10k *ar, struct sk_buff *skb, if (skb->len < msdu_len) return -EPROTO; + if (le32_to_cpu(arg->status) & WMI_RX_STATUS_EXT_INFO) { + len = ALIGN(le32_to_cpu(arg->buf_len), 4); + ext_info = (struct wmi_mgmt_rx_ext_info *)(skb->data + len); + memcpy(&arg->ext_info, ext_info, + sizeof(struct wmi_mgmt_rx_ext_info)); + } /* the WMI buffer might've ended up being padded to 4 bytes due to HTC * trailer with credit update. Trim the excess garbage. */ @@ -2281,6 +2289,11 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb) if (rx_status & WMI_RX_STATUS_ERR_MIC) status->flag |= RX_FLAG_MMIC_ERROR; + if (rx_status & WMI_RX_STATUS_EXT_INFO) { + status->mactime = + __le64_to_cpu(arg.ext_info.rx_mac_timestamp); + status->flag |= RX_FLAG_MACTIME_END; + } /* Hardware can Rx CCK rates on 5GHz. In that case phy_mode is set to * MODE_11B. This means phy_mode is not a reliable source for the band * of mgmt rx. diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h index 4d3cbc44fcd2..bb42f7a6ba23 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.h +++ b/drivers/net/wireless/ath/ath10k/wmi.h @@ -3037,11 +3037,17 @@ struct wmi_10_4_mgmt_rx_event { u8 buf[0]; } __packed; +struct wmi_mgmt_rx_ext_info { + __le64 rx_mac_timestamp; +} __packed __aligned(4); + #define WMI_RX_STATUS_OK 0x00 #define WMI_RX_STATUS_ERR_CRC 0x01 #define WMI_RX_STATUS_ERR_DECRYPT 0x08 #define WMI_RX_STATUS_ERR_MIC 0x10 #define WMI_RX_STATUS_ERR_KEY_CACHE_MISS 0x20 +/* Extension data at the end of mgmt frame */ +#define WMI_RX_STATUS_EXT_INFO 0x40 #define PHY_ERROR_GEN_SPECTRAL_SCAN 0x26 #define PHY_ERROR_GEN_FALSE_RADAR_EXT 0x24 @@ -6116,6 +6122,7 @@ struct wmi_mgmt_rx_ev_arg { __le32 phy_mode; __le32 buf_len; __le32 status; /* %WMI_RX_STATUS_ */ + struct wmi_mgmt_rx_ext_info ext_info; }; struct wmi_ch_info_ev_arg { From 33ea008db7569686310268d60232f284ad213982 Mon Sep 17 00:00:00 2001 From: Miaoqing Pan Date: Fri, 26 Feb 2016 16:08:31 +0800 Subject: [PATCH 0032/1649] ath9k: Update QCA953x initvals commit 14c5932805eb ("ath9k: Update QCA953x initvals") disabled HW peak detect calibartion on QCA953x 1.0, which should also be applied on 2.0. Signed-off-by: Miaoqing Pan Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath9k/ar953x_initvals.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/ath/ath9k/ar953x_initvals.h b/drivers/net/wireless/ath/ath9k/ar953x_initvals.h index c0b90daa3e3d..924ae6bde7f1 100644 --- a/drivers/net/wireless/ath/ath9k/ar953x_initvals.h +++ b/drivers/net/wireless/ath/ath9k/ar953x_initvals.h @@ -988,7 +988,7 @@ static const u32 qca953x_2p0_baseband_postamble[][5] = { {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c}, {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce}, {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021}, - {0x00009e3c, 0xcfa10820, 0xcfa10820, 0xcf946222, 0xcf946222}, + {0x00009e3c, 0xcfa10820, 0xcfa10820, 0xcf946220, 0xcf946220}, {0x00009e44, 0xfe321e27, 0xfe321e27, 0xfe291e27, 0xfe291e27}, {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012}, {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000}, @@ -1008,7 +1008,7 @@ static const u32 qca953x_2p0_baseband_postamble[][5] = { {0x0000a284, 0x00000000, 0x00000000, 0x00000010, 0x00000010}, {0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110}, {0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222}, - {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18}, + {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00058d18, 0x00058d18}, {0x0000a2cc, 0x18c50033, 0x18c43433, 0x18c41033, 0x18c44c33}, {0x0000a2d0, 0x00041982, 0x00041982, 0x00041982, 0x00041982}, {0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b}, From 137ef139b523a4e5f2582be0f58adbcce9996fe3 Mon Sep 17 00:00:00 2001 From: Miaoqing Pan Date: Fri, 26 Feb 2016 16:08:32 +0800 Subject: [PATCH 0033/1649] ath9k: Update AR9003 2.2 initvals HW peak detect calibration would fail for AR9300 chips and we went for implementing the SW way of doing it instead of HW doing the peak detect calibration. Signed-off-by: Miaoqing Pan Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h index c38399bc9aa9..c07866a2fdf9 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h +++ b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h @@ -331,7 +331,7 @@ static const u32 ar9300_2p2_baseband_postamble[][5] = { {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c}, {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce}, {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021}, - {0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946222, 0xcf946222}, + {0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946220, 0xcf946220}, {0x00009e44, 0x02321e27, 0x02321e27, 0x02291e27, 0x02291e27}, {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012}, {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000}, @@ -351,7 +351,7 @@ static const u32 ar9300_2p2_baseband_postamble[][5] = { {0x0000a284, 0x00000000, 0x00000000, 0x00000150, 0x00000150}, {0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110}, {0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222}, - {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18}, + {0x0000a2c4, 0x00058d18, 0x00058d18, 0x00058d18, 0x00058d18}, {0x0000a2d0, 0x00041983, 0x00041983, 0x00041981, 0x00041982}, {0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b}, {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, From 7da1ddddd55fdf478f712643e145e5d343f4ba46 Mon Sep 17 00:00:00 2001 From: Miaoqing Pan Date: Fri, 26 Feb 2016 16:08:33 +0800 Subject: [PATCH 0034/1649] ath9k: Update AR933x initvals HW peak detect calibration would fail for AR9300 chips and we went for implementing the SW way of doing it instead of HW doing the peak detect calibration. Signed-off-by: Miaoqing Pan Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h | 4 ++-- drivers/net/wireless/ath/ath9k/ar9330_1p2_initvals.h | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h b/drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h index 2c42ff05efa3..29479afbc4f1 100644 --- a/drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h +++ b/drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h @@ -40,7 +40,7 @@ static const u32 ar9331_1p1_baseband_postamble[][5] = { {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c}, {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce}, {0x00009e2c, 0x0000001c, 0x0000001c, 0x00003221, 0x00003221}, - {0x00009e3c, 0xcf946222, 0xcf946222, 0xcf946222, 0xcf946222}, + {0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946220, 0xcf946220}, {0x00009e44, 0x02321e27, 0x02321e27, 0x02282324, 0x02282324}, {0x00009e48, 0x5030201a, 0x5030201a, 0x50302010, 0x50302010}, {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000}, @@ -59,7 +59,7 @@ static const u32 ar9331_1p1_baseband_postamble[][5] = { {0x0000a284, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, {0x0000a288, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, {0x0000a28c, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, - {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00058d18, 0x00058d18}, + {0x0000a2c4, 0x00058d18, 0x00058d18, 0x00058d18, 0x00058d18}, {0x0000a2d0, 0x00071982, 0x00071982, 0x00071982, 0x00071982}, {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a}, {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, diff --git a/drivers/net/wireless/ath/ath9k/ar9330_1p2_initvals.h b/drivers/net/wireless/ath/ath9k/ar9330_1p2_initvals.h index 2154efcd3900..c4a6ffa55e8c 100644 --- a/drivers/net/wireless/ath/ath9k/ar9330_1p2_initvals.h +++ b/drivers/net/wireless/ath/ath9k/ar9330_1p2_initvals.h @@ -345,7 +345,7 @@ static const u32 ar9331_1p2_baseband_postamble[][5] = { {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c}, {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce}, {0x00009e2c, 0x0000001c, 0x0000001c, 0x00003221, 0x00003221}, - {0x00009e3c, 0xcf946222, 0xcf946222, 0xcf946222, 0xcf946222}, + {0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946220, 0xcf946220}, {0x00009e44, 0x02321e27, 0x02321e27, 0x02282324, 0x02282324}, {0x00009e48, 0x5030201a, 0x5030201a, 0x50302010, 0x50302010}, {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000}, @@ -364,7 +364,7 @@ static const u32 ar9331_1p2_baseband_postamble[][5] = { {0x0000a284, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, {0x0000a288, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, {0x0000a28c, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, - {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18}, + {0x0000a2c4, 0x00058d18, 0x00058d18, 0x00058d18, 0x00058d18}, {0x0000a2d0, 0x00071981, 0x00071981, 0x00071981, 0x00071981}, {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a}, {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, From 7b5c904ddc777f704d794c1e28942b0f35e7db32 Mon Sep 17 00:00:00 2001 From: Miaoqing Pan Date: Fri, 26 Feb 2016 16:08:34 +0800 Subject: [PATCH 0035/1649] ath9k: Update AR9340 initvals HW peak detect calibration would fail for AR9300 chips and we went for implementing the SW way of doing it instead of HW doing the peak detect calibration. Signed-off-by: Miaoqing Pan Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath9k/ar9340_initvals.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/ath/ath9k/ar9340_initvals.h b/drivers/net/wireless/ath/ath9k/ar9340_initvals.h index b995ffe88b33..2eb163fc1c18 100644 --- a/drivers/net/wireless/ath/ath9k/ar9340_initvals.h +++ b/drivers/net/wireless/ath/ath9k/ar9340_initvals.h @@ -245,7 +245,7 @@ static const u32 ar9340_1p0_baseband_postamble[][5] = { {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c}, {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce}, {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021}, - {0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946222, 0xcf946222}, + {0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946220, 0xcf946220}, {0x00009e44, 0x02321e27, 0x02321e27, 0x02291e27, 0x02291e27}, {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012}, {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000}, @@ -265,7 +265,7 @@ static const u32 ar9340_1p0_baseband_postamble[][5] = { {0x0000a284, 0x00000000, 0x00000000, 0x00000150, 0x00000150}, {0x0000a288, 0x00000220, 0x00000220, 0x00000110, 0x00000110}, {0x0000a28c, 0x00011111, 0x00011111, 0x00022222, 0x00022222}, - {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18}, + {0x0000a2c4, 0x00058d18, 0x00058d18, 0x00058d18, 0x00058d18}, {0x0000a2d0, 0x00041983, 0x00041983, 0x00041982, 0x00041982}, {0x0000a2d8, 0x7999a83a, 0x7999a83a, 0x7999a83a, 0x7999a83a}, {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, From 63a0bc0e6f16e1f25eeb2f730de76ef1b072b876 Mon Sep 17 00:00:00 2001 From: Miaoqing Pan Date: Fri, 26 Feb 2016 16:08:35 +0800 Subject: [PATCH 0036/1649] ath9k: Update AR9462 initvals HW peak detect calibration would fail for AR9300 chips and we went for implementing the SW way of doing it instead of HW doing the peak detect calibration. Signed-off-by: Miaoqing Pan Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h | 4 ++-- drivers/net/wireless/ath/ath9k/ar9462_2p1_initvals.h | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h index 1b6b4d0cfa97..b00dd649453d 100644 --- a/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h +++ b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h @@ -59,7 +59,7 @@ static const u32 ar9462_2p0_baseband_postamble[][5] = { {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c}, {0x00009e20, 0x000003a5, 0x000003a5, 0x000003a5, 0x000003a5}, {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021}, - {0x00009e3c, 0xcf946220, 0xcf946220, 0xcfd5c782, 0xcfd5c282}, + {0x00009e3c, 0xcf946220, 0xcf946220, 0xcfd5c780, 0xcfd5c280}, {0x00009e44, 0x62321e27, 0x62321e27, 0xfe291e27, 0xfe291e27}, {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012}, {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000}, @@ -79,7 +79,7 @@ static const u32 ar9462_2p0_baseband_postamble[][5] = { {0x0000a284, 0x00000000, 0x00000000, 0x00000150, 0x00000150}, {0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110}, {0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222}, - {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18}, + {0x0000a2c4, 0x00058d18, 0x00058d18, 0x00058d18, 0x00058d18}, {0x0000a2d0, 0x00041981, 0x00041981, 0x00041981, 0x00041982}, {0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b}, {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, diff --git a/drivers/net/wireless/ath/ath9k/ar9462_2p1_initvals.h b/drivers/net/wireless/ath/ath9k/ar9462_2p1_initvals.h index dc3adda46e8b..0f8745ec73b1 100644 --- a/drivers/net/wireless/ath/ath9k/ar9462_2p1_initvals.h +++ b/drivers/net/wireless/ath/ath9k/ar9462_2p1_initvals.h @@ -239,7 +239,7 @@ static const u32 ar9462_2p1_baseband_postamble[][5] = { {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c}, {0x00009e20, 0x000003a5, 0x000003a5, 0x000003a5, 0x000003a5}, {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021}, - {0x00009e3c, 0xcf946220, 0xcf946220, 0xcfd5c782, 0xcfd5c282}, + {0x00009e3c, 0xcf946220, 0xcf946220, 0xcfd5c780, 0xcfd5c280}, {0x00009e44, 0x62321e27, 0x62321e27, 0xfe291e27, 0xfe291e27}, {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012}, {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000}, @@ -259,7 +259,7 @@ static const u32 ar9462_2p1_baseband_postamble[][5] = { {0x0000a284, 0x00000000, 0x00000000, 0x00000150, 0x00000150}, {0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110}, {0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222}, - {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18}, + {0x0000a2c4, 0x00058d18, 0x00058d18, 0x00058d18, 0x00058d18}, {0x0000a2d0, 0x00041981, 0x00041981, 0x00041981, 0x00041982}, {0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b}, {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, From 93edb3addad8d1648c66b84673a10eb4905a1fb7 Mon Sep 17 00:00:00 2001 From: Miaoqing Pan Date: Fri, 26 Feb 2016 16:08:36 +0800 Subject: [PATCH 0037/1649] ath9k: Update AR9485 initvals HW peak detect calibration would fail for AR9300 chips and we went for implementing the SW way of doing it instead of HW doing the peak detect calibration. Signed-off-by: Miaoqing Pan Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath9k/ar9485_initvals.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/ath/ath9k/ar9485_initvals.h b/drivers/net/wireless/ath/ath9k/ar9485_initvals.h index ce83ce47a1ca..bdf6f107f6f1 100644 --- a/drivers/net/wireless/ath/ath9k/ar9485_initvals.h +++ b/drivers/net/wireless/ath/ath9k/ar9485_initvals.h @@ -1026,7 +1026,7 @@ static const u32 ar9485_1_1_baseband_postamble[][5] = { {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec80d2e, 0x7ec80d2e}, {0x00009e14, 0x31395d53, 0x31396053, 0x312e6053, 0x312e5d53}, {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c}, - {0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946222, 0xcf946222}, + {0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946220, 0xcf946220}, {0x00009e48, 0x5030201a, 0x5030201a, 0x50302010, 0x50302010}, {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000}, {0x0000a204, 0x01303fc0, 0x01303fc4, 0x01303fc4, 0x01303fc0}, @@ -1044,7 +1044,7 @@ static const u32 ar9485_1_1_baseband_postamble[][5] = { {0x0000a284, 0x00000000, 0x00000000, 0x000002a0, 0x000002a0}, {0x0000a288, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, {0x0000a28c, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, - {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18}, + {0x0000a2c4, 0x00058d18, 0x00058d18, 0x00058d18, 0x00058d18}, {0x0000a2d0, 0x00071981, 0x00071981, 0x00071982, 0x00071982}, {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a}, {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, From 836ff650eb4a1505a65bfc0d22ae51d478415e4f Mon Sep 17 00:00:00 2001 From: Miaoqing Pan Date: Fri, 26 Feb 2016 16:08:37 +0800 Subject: [PATCH 0038/1649] ath9k: Update AR955x initvals HW peak detect calibration would fail for AR9300 chips and we went for implementing the SW way of doing it instead of HW doing the peak detect calibration. Signed-off-by: Miaoqing Pan Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath9k/ar955x_1p0_initvals.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/ath/ath9k/ar955x_1p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar955x_1p0_initvals.h index 148562addd38..67edf344b427 100644 --- a/drivers/net/wireless/ath/ath9k/ar955x_1p0_initvals.h +++ b/drivers/net/wireless/ath/ath9k/ar955x_1p0_initvals.h @@ -83,7 +83,7 @@ static const u32 ar955x_1p0_baseband_postamble[][5] = { {0x0000a284, 0x00000000, 0x00000000, 0x00000010, 0x00000010}, {0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110}, {0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222}, - {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00058d18, 0x00058d18}, + {0x0000a2c4, 0x00058d18, 0x00058d18, 0x00058d18, 0x00058d18}, {0x0000a2cc, 0x18c50033, 0x18c43433, 0x18c41033, 0x18c44c33}, {0x0000a2d0, 0x00041982, 0x00041982, 0x00041982, 0x00041982}, {0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b}, From f294b096c6cfb89b2d1baac8314a7dc49daab995 Mon Sep 17 00:00:00 2001 From: Miaoqing Pan Date: Fri, 26 Feb 2016 16:08:38 +0800 Subject: [PATCH 0039/1649] ath9k: Update AR9565 initvals HW peak detect calibration would fail for AR9300 chips and we went for implementing the SW way of doing it instead of HW doing the peak detect calibration. Signed-off-by: Miaoqing Pan Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h index 10d4a6cb1c3b..35c1bbb2fa8a 100644 --- a/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h +++ b/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h @@ -347,7 +347,7 @@ static const u32 ar9565_1p0_baseband_postamble[][5] = { {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c}, {0x00009e20, 0x000003b5, 0x000003b5, 0x000003a4, 0x000003a4}, {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021}, - {0x00009e3c, 0xcf946222, 0xcf946222, 0xcf946220, 0xcf946220}, + {0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946220, 0xcf946220}, {0x00009e44, 0xfe321e27, 0xfe321e27, 0xfe291e27, 0xfe291e27}, {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012}, {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000}, From 628bb7056bfb9156e53e1bcb5486cb15623ee43a Mon Sep 17 00:00:00 2001 From: Miaoqing Pan Date: Fri, 26 Feb 2016 16:08:39 +0800 Subject: [PATCH 0040/1649] ath9k: Update QCA956x initvals HW peak detect calibration would fail for AR9300 chips and we went for implementing the SW way of doing it instead of HW doing the peak detect calibration. Signed-off-by: Miaoqing Pan Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath9k/ar956x_initvals.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/ath/ath9k/ar956x_initvals.h b/drivers/net/wireless/ath/ath9k/ar956x_initvals.h index c3a47eaaf0c0..db051071c676 100644 --- a/drivers/net/wireless/ath/ath9k/ar956x_initvals.h +++ b/drivers/net/wireless/ath/ath9k/ar956x_initvals.h @@ -220,7 +220,7 @@ static const u32 qca956x_1p0_baseband_postamble[][5] = { {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c}, {0x00009e20, 0x000003b5, 0x000003b5, 0x000003a6, 0x000003a6}, {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021}, - {0x00009e3c, 0xcfa10820, 0xcfa10820, 0xcf946222, 0xcf946222}, + {0x00009e3c, 0xcfa10820, 0xcfa10820, 0xcf946220, 0xcf946220}, {0x00009e44, 0xfe321e27, 0xfe321e27, 0xfe291e27, 0xfe291e27}, {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012}, {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000}, From fcf5dfda6e520813323559eeaa66ea9f31b10736 Mon Sep 17 00:00:00 2001 From: Miaoqing Pan Date: Fri, 26 Feb 2016 16:08:40 +0800 Subject: [PATCH 0041/1649] ath9k: Update AR9580 initvals HW peak detect calibration would fail for AR9300 chips and we went for implementing the SW way of doing it instead of HW doing the peak detect calibration. Signed-off-by: Miaoqing Pan Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h index 5d4629f96c15..f4c9befb3949 100644 --- a/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h +++ b/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h @@ -1290,7 +1290,7 @@ static const u32 ar9580_1p0_baseband_postamble[][5] = { {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c}, {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce}, {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021}, - {0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946222, 0xcf946222}, + {0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946220, 0xcf946220}, {0x00009e44, 0x02321e27, 0x02321e27, 0x02291e27, 0x02291e27}, {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012}, {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000}, @@ -1310,7 +1310,7 @@ static const u32 ar9580_1p0_baseband_postamble[][5] = { {0x0000a284, 0x00000000, 0x00000000, 0x00000150, 0x00000150}, {0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110}, {0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222}, - {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18}, + {0x0000a2c4, 0x00058d18, 0x00058d18, 0x00058d18, 0x00058d18}, {0x0000a2d0, 0x00041983, 0x00041983, 0x00041981, 0x00041982}, {0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b}, {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, From 27ae9cd258a84ce7259afbee38dbe7841e723a68 Mon Sep 17 00:00:00 2001 From: Miaoqing Pan Date: Fri, 26 Feb 2016 16:08:41 +0800 Subject: [PATCH 0042/1649] ath9k: enable manual peak cal for all ar9300 chips HW peak detect calibration would fail, enable all ar9300 chips manual peak calibration instead. Signed-off-by: Miaoqing Pan Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath9k/ar9003_calib.c | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c index 0c391997a2f7..99bc1a6393c6 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c @@ -1641,14 +1641,12 @@ static bool ar9003_hw_init_cal_soc(struct ath_hw *ah, skip_tx_iqcal: if (run_agc_cal || !(ah->ah_flags & AH_FASTCC)) { - if (AR_SREV_9330_11(ah) || AR_SREV_9531(ah) || AR_SREV_9550(ah) || - AR_SREV_9561(ah)) { - for (i = 0; i < AR9300_MAX_CHAINS; i++) { - if (!(ah->rxchainmask & (1 << i))) - continue; - ar9003_hw_manual_peak_cal(ah, i, - IS_CHAN_2GHZ(chan)); - } + for (i = 0; i < AR9300_MAX_CHAINS; i++) { + if (!(ah->rxchainmask & (1 << i))) + continue; + + ar9003_hw_manual_peak_cal(ah, i, + IS_CHAN_2GHZ(chan)); } /* From 9c8ec9951d1e30f1339bcde8d324996412a3586b Mon Sep 17 00:00:00 2001 From: Miaoqing Pan Date: Fri, 26 Feb 2016 16:08:42 +0800 Subject: [PATCH 0043/1649] ath9k: use AR_SREV_9003_PCOEM to identify PCOEM chips commit f49c90db4d23 ("ath9k: Add a macro to identify PCOEM chips") defined AR_SREV_9003_PCOEM macro, its more clear to use the macro instead of checking one by one. Also removed PCOEM chips checking in the callback of ar9003_hw_do_pcoem_manual_peak_cal() which only for PCOEM chips. Signed-off-by: Miaoqing Pan Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath9k/ar9003_calib.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c index 99bc1a6393c6..e1573ab6a609 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c @@ -1311,9 +1311,6 @@ static void ar9003_hw_do_pcoem_manual_peak_cal(struct ath_hw *ah, struct ath9k_hw_cal_data *caldata = ah->caldata; int i; - if (!AR_SREV_9462(ah) && !AR_SREV_9565(ah) && !AR_SREV_9485(ah)) - return; - if ((ah->caps.hw_caps & ATH9K_HW_CAP_RTT) && !run_rtt_cal) return; @@ -1707,7 +1704,7 @@ void ar9003_hw_attach_calib_ops(struct ath_hw *ah) struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah); struct ath_hw_ops *ops = ath9k_hw_ops(ah); - if (AR_SREV_9485(ah) || AR_SREV_9462(ah) || AR_SREV_9565(ah)) + if (AR_SREV_9003_PCOEM(ah)) priv_ops->init_cal = ar9003_hw_init_cal_pcoem; else priv_ops->init_cal = ar9003_hw_init_cal_soc; From 1f64252d0b731d55f262a80f8eef914240334d17 Mon Sep 17 00:00:00 2001 From: Miaoqing Pan Date: Fri, 26 Feb 2016 16:08:43 +0800 Subject: [PATCH 0044/1649] ath9k: set correct peak detect threshold Set QCA9561 peak detect threshold to 11. Signed-off-by: Miaoqing Pan Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath9k/ar9003_calib.c | 25 ++++++++----------- 1 file changed, 11 insertions(+), 14 deletions(-) diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c index e1573ab6a609..518e649ecff3 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c @@ -1203,12 +1203,12 @@ static void ar9003_hw_tx_iq_cal_reload(struct ath_hw *ah) static void ar9003_hw_manual_peak_cal(struct ath_hw *ah, u8 chain, bool is_2g) { int offset[8] = {0}, total = 0, test; - int agc_out, i, peak_detect_threshold; + int agc_out, i, peak_detect_threshold = 0; if (AR_SREV_9550(ah) || AR_SREV_9531(ah)) peak_detect_threshold = 8; - else - peak_detect_threshold = 0; + else if (AR_SREV_9561(ah)) + peak_detect_threshold = 11; /* * Turn off LNA/SW. @@ -1249,17 +1249,14 @@ static void ar9003_hw_manual_peak_cal(struct ath_hw *ah, u8 chain, bool is_2g) REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain), AR_PHY_65NM_RXRF_AGC_AGC2G_CALDAC_OVR, 0x0); - if (AR_SREV_9003_PCOEM(ah) || AR_SREV_9550(ah) || AR_SREV_9531(ah) || - AR_SREV_9561(ah)) { - if (is_2g) - REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain), - AR_PHY_65NM_RXRF_AGC_AGC2G_DBDAC_OVR, - peak_detect_threshold); - else - REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain), - AR_PHY_65NM_RXRF_AGC_AGC5G_DBDAC_OVR, - peak_detect_threshold); - } + if (is_2g) + REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain), + AR_PHY_65NM_RXRF_AGC_AGC2G_DBDAC_OVR, + peak_detect_threshold); + else + REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain), + AR_PHY_65NM_RXRF_AGC_AGC5G_DBDAC_OVR, + peak_detect_threshold); for (i = 6; i > 0; i--) { offset[i] = BIT(i - 1); From 0eb69ef355c3b77f8ce8f54b61759909ad3abcf8 Mon Sep 17 00:00:00 2001 From: Bob Copeland Date: Sun, 28 Feb 2016 20:07:55 -0500 Subject: [PATCH 0045/1649] ath5k: fix incorrect indentation smatch said: drivers/net/wireless/ath/ath5k/phy.c:1449 ath5k_hw_channel() warn: inconsistent indenting drivers/net/wireless/ath/ath5k/reset.c:637 ath5k_hw_on_hold() warn: inconsistent indenting drivers/net/wireless/ath/ath5k/reset.c:702 ath5k_hw_nic_wakeup() warn: inconsistent indenting All of these lines were indented a tabstop too far. Signed-off-by: Bob Copeland Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath5k/phy.c | 2 +- drivers/net/wireless/ath/ath5k/reset.c | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c index 0fce1c76638e..98ee85456321 100644 --- a/drivers/net/wireless/ath/ath5k/phy.c +++ b/drivers/net/wireless/ath/ath5k/phy.c @@ -1446,7 +1446,7 @@ ath5k_hw_channel(struct ath5k_hw *ah, "channel frequency (%u MHz) out of supported " "band range\n", channel->center_freq); - return -EINVAL; + return -EINVAL; } /* diff --git a/drivers/net/wireless/ath/ath5k/reset.c b/drivers/net/wireless/ath/ath5k/reset.c index 99e62f99a182..4b1c87fa15ac 100644 --- a/drivers/net/wireless/ath/ath5k/reset.c +++ b/drivers/net/wireless/ath/ath5k/reset.c @@ -634,7 +634,7 @@ ath5k_hw_on_hold(struct ath5k_hw *ah) ret = ath5k_hw_nic_reset(ah, AR5K_RESET_CTL_PCU | AR5K_RESET_CTL_MAC | AR5K_RESET_CTL_DMA | AR5K_RESET_CTL_PHY | AR5K_RESET_CTL_PCI); - usleep_range(2000, 2500); + usleep_range(2000, 2500); } else { ret = ath5k_hw_nic_reset(ah, AR5K_RESET_CTL_PCU | AR5K_RESET_CTL_BASEBAND | bus_flags); @@ -699,7 +699,7 @@ ath5k_hw_nic_wakeup(struct ath5k_hw *ah, struct ieee80211_channel *channel) ret = ath5k_hw_nic_reset(ah, AR5K_RESET_CTL_PCU | AR5K_RESET_CTL_MAC | AR5K_RESET_CTL_DMA | AR5K_RESET_CTL_PHY | AR5K_RESET_CTL_PCI); - usleep_range(2000, 2500); + usleep_range(2000, 2500); } else { if (ath5k_get_bus_type(ah) == ATH_AHB) ret = ath5k_hw_wisoc_reset(ah, AR5K_RESET_CTL_PCU | From 1451a3634ff5a443e256eb693627ffb1e34cd337 Mon Sep 17 00:00:00 2001 From: Bob Copeland Date: Sun, 28 Feb 2016 20:07:56 -0500 Subject: [PATCH 0046/1649] ath9k: fix a misleading indentation These lines belong inside the if-statement above, not in the main body of the switch. Found by smatch. Signed-off-by: Bob Copeland Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath9k/ar9003_phy.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c index 06c1ca6e8290..be14a8e01916 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c @@ -1337,11 +1337,11 @@ skip_ws_det: chan->channel, aniState->mrcCCK ? "on" : "off", is_on ? "on" : "off"); - if (is_on) - ah->stats.ast_ani_ccklow++; - else - ah->stats.ast_ani_cckhigh++; - aniState->mrcCCK = is_on; + if (is_on) + ah->stats.ast_ani_ccklow++; + else + ah->stats.ast_ani_cckhigh++; + aniState->mrcCCK = is_on; } break; } From c8c91b02a8ddb802259b712245ee97f9c3067a7f Mon Sep 17 00:00:00 2001 From: Bob Copeland Date: Sun, 28 Feb 2016 20:07:57 -0500 Subject: [PATCH 0047/1649] ath9k_htc: fix up indents with spaces Use tabs here. Found by smatch. Signed-off-by: Bob Copeland Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath9k/htc_drv_init.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c index 8647ab77c019..c2249ad54085 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c @@ -262,11 +262,11 @@ static void ath9k_multi_regread(void *hw_priv, u32 *addr, __be32 tmpval[8]; int i, ret; - for (i = 0; i < count; i++) { - tmpaddr[i] = cpu_to_be32(addr[i]); - } + for (i = 0; i < count; i++) { + tmpaddr[i] = cpu_to_be32(addr[i]); + } - ret = ath9k_wmi_cmd(priv->wmi, WMI_REG_READ_CMDID, + ret = ath9k_wmi_cmd(priv->wmi, WMI_REG_READ_CMDID, (u8 *)tmpaddr , sizeof(u32) * count, (u8 *)tmpval, sizeof(u32) * count, 100); @@ -275,9 +275,9 @@ static void ath9k_multi_regread(void *hw_priv, u32 *addr, "Multiple REGISTER READ FAILED (count: %d)\n", count); } - for (i = 0; i < count; i++) { - val[i] = be32_to_cpu(tmpval[i]); - } + for (i = 0; i < count; i++) { + val[i] = be32_to_cpu(tmpval[i]); + } } static void ath9k_regwrite_multi(struct ath_common *common) From a01ab81b09c55025365c1de1345b941a18e05529 Mon Sep 17 00:00:00 2001 From: Miaoqing Pan Date: Mon, 7 Mar 2016 10:38:14 +0800 Subject: [PATCH 0048/1649] ath9k: define correct GPIO numbers and bits mask Define correct GPIO numbers and MASK bits to indicate the WMAC GPIO resource. Allow SOC chips(AR9340, AR9531, AR9550, AR9561) to access all GPIOs which rely on gpiolib framework. But restrict SOC AR9330 only to access WMAC GPIO which has the same design with the old chips. Signed-off-by: Miaoqing Pan Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath9k/hw.c | 70 ++++++++++++++++++++++------ drivers/net/wireless/ath/ath9k/hw.h | 1 + drivers/net/wireless/ath/ath9k/reg.h | 42 +++++++++++++++-- 3 files changed, 96 insertions(+), 17 deletions(-) diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index e7a31016f370..f14242b3213e 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c @@ -2385,6 +2385,61 @@ static bool ath9k_hw_dfs_tested(struct ath_hw *ah) } } +static void ath9k_gpio_cap_init(struct ath_hw *ah) +{ + struct ath9k_hw_capabilities *pCap = &ah->caps; + + if (AR_SREV_9271(ah)) { + pCap->num_gpio_pins = AR9271_NUM_GPIO; + pCap->gpio_mask = AR9271_GPIO_MASK; + } else if (AR_DEVID_7010(ah)) { + pCap->num_gpio_pins = AR7010_NUM_GPIO; + pCap->gpio_mask = AR7010_GPIO_MASK; + } else if (AR_SREV_9287(ah)) { + pCap->num_gpio_pins = AR9287_NUM_GPIO; + pCap->gpio_mask = AR9287_GPIO_MASK; + } else if (AR_SREV_9285(ah)) { + pCap->num_gpio_pins = AR9285_NUM_GPIO; + pCap->gpio_mask = AR9285_GPIO_MASK; + } else if (AR_SREV_9280(ah)) { + pCap->num_gpio_pins = AR9280_NUM_GPIO; + pCap->gpio_mask = AR9280_GPIO_MASK; + } else if (AR_SREV_9300(ah)) { + pCap->num_gpio_pins = AR9300_NUM_GPIO; + pCap->gpio_mask = AR9300_GPIO_MASK; + } else if (AR_SREV_9330(ah)) { + pCap->num_gpio_pins = AR9330_NUM_GPIO; + pCap->gpio_mask = AR9330_GPIO_MASK; + } else if (AR_SREV_9340(ah)) { + pCap->num_gpio_pins = AR9340_NUM_GPIO; + pCap->gpio_mask = AR9340_GPIO_MASK; + } else if (AR_SREV_9462(ah)) { + pCap->num_gpio_pins = AR9462_NUM_GPIO; + pCap->gpio_mask = AR9462_GPIO_MASK; + } else if (AR_SREV_9485(ah)) { + pCap->num_gpio_pins = AR9485_NUM_GPIO; + pCap->gpio_mask = AR9485_GPIO_MASK; + } else if (AR_SREV_9531(ah)) { + pCap->num_gpio_pins = AR9531_NUM_GPIO; + pCap->gpio_mask = AR9531_GPIO_MASK; + } else if (AR_SREV_9550(ah)) { + pCap->num_gpio_pins = AR9550_NUM_GPIO; + pCap->gpio_mask = AR9550_GPIO_MASK; + } else if (AR_SREV_9561(ah)) { + pCap->num_gpio_pins = AR9561_NUM_GPIO; + pCap->gpio_mask = AR9561_GPIO_MASK; + } else if (AR_SREV_9565(ah)) { + pCap->num_gpio_pins = AR9565_NUM_GPIO; + pCap->gpio_mask = AR9565_GPIO_MASK; + } else if (AR_SREV_9580(ah)) { + pCap->num_gpio_pins = AR9580_NUM_GPIO; + pCap->gpio_mask = AR9580_GPIO_MASK; + } else { + pCap->num_gpio_pins = AR_NUM_GPIO; + pCap->gpio_mask = AR_GPIO_MASK; + } +} + int ath9k_hw_fill_cap_info(struct ath_hw *ah) { struct ath9k_hw_capabilities *pCap = &ah->caps; @@ -2478,20 +2533,7 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah) else pCap->hw_caps &= ~ATH9K_HW_CAP_HT; - if (AR_SREV_9271(ah)) - pCap->num_gpio_pins = AR9271_NUM_GPIO; - else if (AR_DEVID_7010(ah)) - pCap->num_gpio_pins = AR7010_NUM_GPIO; - else if (AR_SREV_9300_20_OR_LATER(ah)) - pCap->num_gpio_pins = AR9300_NUM_GPIO; - else if (AR_SREV_9287_11_OR_LATER(ah)) - pCap->num_gpio_pins = AR9287_NUM_GPIO; - else if (AR_SREV_9285_12_OR_LATER(ah)) - pCap->num_gpio_pins = AR9285_NUM_GPIO; - else if (AR_SREV_9280_20_OR_LATER(ah)) - pCap->num_gpio_pins = AR928X_NUM_GPIO; - else - pCap->num_gpio_pins = AR_NUM_GPIO; + ath9k_gpio_cap_init(ah); if (AR_SREV_9160_10_OR_LATER(ah) || AR_SREV_9100(ah)) pCap->rts_aggr_limit = ATH_AMPDU_LIMIT_MAX; diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h index 831a54415a25..c0740d6b3e97 100644 --- a/drivers/net/wireless/ath/ath9k/hw.h +++ b/drivers/net/wireless/ath/ath9k/hw.h @@ -301,6 +301,7 @@ struct ath9k_hw_capabilities { u8 max_txchains; u8 max_rxchains; u8 num_gpio_pins; + u32 gpio_mask; u8 rx_hp_qdepth; u8 rx_lp_qdepth; u8 rx_status_len; diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h index c8d35febaf0f..c06fdb955787 100644 --- a/drivers/net/wireless/ath/ath9k/reg.h +++ b/drivers/net/wireless/ath/ath9k/reg.h @@ -985,6 +985,10 @@ #define AR_SREV_9561(_ah) \ (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9561)) +#define AR_SREV_SOC(_ah) \ + (AR_SREV_9340(_ah) || AR_SREV_9531(_ah) || AR_SREV_9550(ah) || \ + AR_SREV_9561(ah)) + /* NOTE: When adding chips newer than Peacock, add chip check here */ #define AR_SREV_9580_10_OR_LATER(_ah) \ (AR_SREV_9580(_ah)) @@ -1104,14 +1108,46 @@ enum { #define AR_PCIE_PHY_REG3 0x18c08 +/* Define correct GPIO numbers and MASK bits to indicate the WMAC + * GPIO resource. + * Allow SOC chips(AR9340, AR9531, AR9550, AR9561) to access all GPIOs + * which rely on gpiolib framework. But restrict SOC AR9330 only to + * access WMAC GPIO which has the same design with the old chips. + */ #define AR_NUM_GPIO 14 -#define AR928X_NUM_GPIO 10 +#define AR9280_NUM_GPIO 10 #define AR9285_NUM_GPIO 12 -#define AR9287_NUM_GPIO 11 +#define AR9287_NUM_GPIO 10 #define AR9271_NUM_GPIO 16 -#define AR9300_NUM_GPIO 17 +#define AR9300_NUM_GPIO 16 +#define AR9330_NUM_GPIO 16 +#define AR9340_NUM_GPIO 23 +#define AR9462_NUM_GPIO 10 +#define AR9485_NUM_GPIO 12 +#define AR9531_NUM_GPIO 18 +#define AR9550_NUM_GPIO 24 +#define AR9561_NUM_GPIO 23 +#define AR9565_NUM_GPIO 12 +#define AR9580_NUM_GPIO 16 #define AR7010_NUM_GPIO 16 +#define AR_GPIO_MASK 0x00003FFF +#define AR9271_GPIO_MASK 0x0000FFFF +#define AR9280_GPIO_MASK 0x000003FF +#define AR9285_GPIO_MASK 0x00000FFF +#define AR9287_GPIO_MASK 0x000003FF +#define AR9300_GPIO_MASK 0x0000F4FF +#define AR9330_GPIO_MASK 0x0000F4FF +#define AR9340_GPIO_MASK 0x0000000F +#define AR9462_GPIO_MASK 0x000003FF +#define AR9485_GPIO_MASK 0x00000FFF +#define AR9531_GPIO_MASK 0x0000000F +#define AR9550_GPIO_MASK 0x0000000F +#define AR9561_GPIO_MASK 0x0000000F +#define AR9565_GPIO_MASK 0x00000FFF +#define AR9580_GPIO_MASK 0x0000F4FF +#define AR7010_GPIO_MASK 0x0000FFFF + #define AR_GPIO_IN_OUT (AR_SREV_9340(ah) ? 0x4028 : 0x4048) #define AR_GPIO_IN_VAL 0x0FFFC000 #define AR_GPIO_IN_VAL_S 14 From b2d70d4944c1789bc64376ad97a811f37e230c87 Mon Sep 17 00:00:00 2001 From: Miaoqing Pan Date: Mon, 7 Mar 2016 10:38:15 +0800 Subject: [PATCH 0049/1649] ath9k: make GPIO API to support both of WMAC and SOC commit 61b559dea40e ("ath9k: add extra GPIO led support") added ath9k to support access SOC's GPIOs, but implemented in a separated API: ath9k_hw_request_gpio(). So this patch make the APIs more common, to support both of WMAC and SOC GPIOs. The new APIs as below, void ath9k_hw_gpio_request_in(); void ath9k_hw_gpio_request_out(); void ath9k_hw_gpio_free(); NOTE, the BSP of the SOC chips(AR9340, AR9531, AR9550, AR9561) should set the corresponding MUX registers correctly. Signed-off-by: Miaoqing Pan Signed-off-by: Kalle Valo --- .../net/wireless/ath/ath9k/ar9003_eeprom.c | 4 +- drivers/net/wireless/ath/ath9k/ar9003_mci.c | 39 ++-- drivers/net/wireless/ath/ath9k/btcoex.c | 27 +-- drivers/net/wireless/ath/ath9k/gpio.c | 6 +- drivers/net/wireless/ath/ath9k/htc_drv_gpio.c | 6 +- drivers/net/wireless/ath/ath9k/hw.c | 208 +++++++++++------- drivers/net/wireless/ath/ath9k/hw.h | 10 +- drivers/net/wireless/ath/ath9k/main.c | 6 +- 8 files changed, 180 insertions(+), 126 deletions(-) diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c index 54ed2f72d35e..36b602577905 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c @@ -3590,8 +3590,8 @@ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz) else gpio = AR9300_EXT_LNA_CTL_GPIO_AR9485; - ath9k_hw_cfg_output(ah, gpio, - AR_GPIO_OUTPUT_MUX_AS_PCIE_ATTENTION_LED); + ath9k_hw_gpio_request_out(ah, gpio, NULL, + AR_GPIO_OUTPUT_MUX_AS_PCIE_ATTENTION_LED); } value = ar9003_hw_ant_ctrl_common_get(ah, is2ghz); diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mci.c b/drivers/net/wireless/ath/ath9k/ar9003_mci.c index af5ee416a560..0fe9c8378249 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_mci.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_mci.c @@ -427,21 +427,34 @@ static void ar9003_mci_observation_set_up(struct ath_hw *ah) struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; if (mci->config & ATH_MCI_CONFIG_MCI_OBS_MCI) { - ath9k_hw_cfg_output(ah, 3, AR_GPIO_OUTPUT_MUX_AS_MCI_WLAN_DATA); - ath9k_hw_cfg_output(ah, 2, AR_GPIO_OUTPUT_MUX_AS_MCI_WLAN_CLK); - ath9k_hw_cfg_output(ah, 1, AR_GPIO_OUTPUT_MUX_AS_MCI_BT_DATA); - ath9k_hw_cfg_output(ah, 0, AR_GPIO_OUTPUT_MUX_AS_MCI_BT_CLK); + ath9k_hw_gpio_request_out(ah, 3, NULL, + AR_GPIO_OUTPUT_MUX_AS_MCI_WLAN_DATA); + ath9k_hw_gpio_request_out(ah, 2, NULL, + AR_GPIO_OUTPUT_MUX_AS_MCI_WLAN_CLK); + ath9k_hw_gpio_request_out(ah, 1, NULL, + AR_GPIO_OUTPUT_MUX_AS_MCI_BT_DATA); + ath9k_hw_gpio_request_out(ah, 0, NULL, + AR_GPIO_OUTPUT_MUX_AS_MCI_BT_CLK); } else if (mci->config & ATH_MCI_CONFIG_MCI_OBS_TXRX) { - ath9k_hw_cfg_output(ah, 3, AR_GPIO_OUTPUT_MUX_AS_WL_IN_TX); - ath9k_hw_cfg_output(ah, 2, AR_GPIO_OUTPUT_MUX_AS_WL_IN_RX); - ath9k_hw_cfg_output(ah, 1, AR_GPIO_OUTPUT_MUX_AS_BT_IN_TX); - ath9k_hw_cfg_output(ah, 0, AR_GPIO_OUTPUT_MUX_AS_BT_IN_RX); - ath9k_hw_cfg_output(ah, 5, AR_GPIO_OUTPUT_MUX_AS_OUTPUT); + ath9k_hw_gpio_request_out(ah, 3, NULL, + AR_GPIO_OUTPUT_MUX_AS_WL_IN_TX); + ath9k_hw_gpio_request_out(ah, 2, NULL, + AR_GPIO_OUTPUT_MUX_AS_WL_IN_RX); + ath9k_hw_gpio_request_out(ah, 1, NULL, + AR_GPIO_OUTPUT_MUX_AS_BT_IN_TX); + ath9k_hw_gpio_request_out(ah, 0, NULL, + AR_GPIO_OUTPUT_MUX_AS_BT_IN_RX); + ath9k_hw_gpio_request_out(ah, 5, NULL, + AR_GPIO_OUTPUT_MUX_AS_OUTPUT); } else if (mci->config & ATH_MCI_CONFIG_MCI_OBS_BT) { - ath9k_hw_cfg_output(ah, 3, AR_GPIO_OUTPUT_MUX_AS_BT_IN_TX); - ath9k_hw_cfg_output(ah, 2, AR_GPIO_OUTPUT_MUX_AS_BT_IN_RX); - ath9k_hw_cfg_output(ah, 1, AR_GPIO_OUTPUT_MUX_AS_MCI_BT_DATA); - ath9k_hw_cfg_output(ah, 0, AR_GPIO_OUTPUT_MUX_AS_MCI_BT_CLK); + ath9k_hw_gpio_request_out(ah, 3, NULL, + AR_GPIO_OUTPUT_MUX_AS_BT_IN_TX); + ath9k_hw_gpio_request_out(ah, 2, NULL, + AR_GPIO_OUTPUT_MUX_AS_BT_IN_RX); + ath9k_hw_gpio_request_out(ah, 1, NULL, + AR_GPIO_OUTPUT_MUX_AS_MCI_BT_DATA); + ath9k_hw_gpio_request_out(ah, 0, NULL, + AR_GPIO_OUTPUT_MUX_AS_MCI_BT_CLK); } else return; diff --git a/drivers/net/wireless/ath/ath9k/btcoex.c b/drivers/net/wireless/ath/ath9k/btcoex.c index 5a084d94ed90..7719cb1d8b68 100644 --- a/drivers/net/wireless/ath/ath9k/btcoex.c +++ b/drivers/net/wireless/ath/ath9k/btcoex.c @@ -142,7 +142,8 @@ void ath9k_hw_btcoex_init_2wire(struct ath_hw *ah) btcoex_hw->btactive_gpio); /* Configure the desired gpio port for input */ - ath9k_hw_cfg_gpio_input(ah, btcoex_hw->btactive_gpio); + ath9k_hw_gpio_request_in(ah, btcoex_hw->btactive_gpio, + "ath9k-btactive"); } EXPORT_SYMBOL(ath9k_hw_btcoex_init_2wire); @@ -166,9 +167,10 @@ void ath9k_hw_btcoex_init_3wire(struct ath_hw *ah) btcoex_hw->btpriority_gpio); /* Configure the desired GPIO ports for input */ - - ath9k_hw_cfg_gpio_input(ah, btcoex_hw->btactive_gpio); - ath9k_hw_cfg_gpio_input(ah, btcoex_hw->btpriority_gpio); + ath9k_hw_gpio_request_in(ah, btcoex_hw->btactive_gpio, + "ath9k-btactive"); + ath9k_hw_gpio_request_in(ah, btcoex_hw->btpriority_gpio, + "ath9k-btpriority"); } EXPORT_SYMBOL(ath9k_hw_btcoex_init_3wire); @@ -201,8 +203,9 @@ static void ath9k_hw_btcoex_enable_2wire(struct ath_hw *ah) struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw; /* Configure the desired GPIO port for TX_FRAME output */ - ath9k_hw_cfg_output(ah, btcoex_hw->wlanactive_gpio, - AR_GPIO_OUTPUT_MUX_AS_TX_FRAME); + ath9k_hw_gpio_request_out(ah, btcoex_hw->wlanactive_gpio, + "ath9k-wlanactive", + AR_GPIO_OUTPUT_MUX_AS_TX_FRAME); } /* @@ -271,7 +274,6 @@ static void ath9k_hw_btcoex_enable_3wire(struct ath_hw *ah) REG_WRITE(ah, AR_BT_COEX_MODE, btcoex->bt_coex_mode); REG_WRITE(ah, AR_BT_COEX_MODE2, btcoex->bt_coex_mode2); - if (AR_SREV_9300_20_OR_LATER(ah)) { REG_WRITE(ah, AR_BT_COEX_WL_WEIGHTS0, btcoex->wlan_weight[0]); REG_WRITE(ah, AR_BT_COEX_WL_WEIGHTS1, btcoex->wlan_weight[1]); @@ -281,8 +283,6 @@ static void ath9k_hw_btcoex_enable_3wire(struct ath_hw *ah) } else REG_WRITE(ah, AR_BT_COEX_WEIGHT, btcoex->bt_coex_weights); - - if (AR_SREV_9271(ah)) { val = REG_READ(ah, 0x50040); val &= 0xFFFFFEFF; @@ -292,8 +292,9 @@ static void ath9k_hw_btcoex_enable_3wire(struct ath_hw *ah) REG_RMW_FIELD(ah, AR_QUIET1, AR_QUIET1_QUIET_ACK_CTS_ENABLE, 1); REG_RMW_FIELD(ah, AR_PCU_MISC, AR_PCU_BT_ANT_PREVENT_RX, 0); - ath9k_hw_cfg_output(ah, btcoex->wlanactive_gpio, - AR_GPIO_OUTPUT_MUX_AS_RX_CLEAR_EXTERNAL); + ath9k_hw_gpio_request_out(ah, btcoex->wlanactive_gpio, + "ath9k-wlanactive", + AR_GPIO_OUTPUT_MUX_AS_RX_CLEAR_EXTERNAL); } static void ath9k_hw_btcoex_enable_mci(struct ath_hw *ah) @@ -364,8 +365,8 @@ void ath9k_hw_btcoex_disable(struct ath_hw *ah) if (!AR_SREV_9300_20_OR_LATER(ah)) ath9k_hw_set_gpio(ah, btcoex_hw->wlanactive_gpio, 0); - ath9k_hw_cfg_output(ah, btcoex_hw->wlanactive_gpio, - AR_GPIO_OUTPUT_MUX_AS_OUTPUT); + ath9k_hw_gpio_request_out(ah, btcoex_hw->wlanactive_gpio, + NULL, AR_GPIO_OUTPUT_MUX_AS_OUTPUT); if (btcoex_hw->scheme == ATH_BTCOEX_CFG_3WIRE) { REG_WRITE(ah, AR_BT_COEX_MODE, AR_BT_QUIET | AR_BT_MODE); diff --git a/drivers/net/wireless/ath/ath9k/gpio.c b/drivers/net/wireless/ath/ath9k/gpio.c index 284706798c71..b41dfb7c2784 100644 --- a/drivers/net/wireless/ath/ath9k/gpio.c +++ b/drivers/net/wireless/ath/ath9k/gpio.c @@ -74,7 +74,8 @@ void ath_fill_led_pin(struct ath_softc *sc) if (ah->led_pin >= 0) { if (!((1 << ah->led_pin) & AR_GPIO_OE_OUT_MASK)) - ath9k_hw_request_gpio(ah, ah->led_pin, "ath9k-led"); + ath9k_hw_gpio_request_out(ah, ah->led_pin, "ath9k-led", + AR_GPIO_OUTPUT_MUX_AS_OUTPUT); return; } @@ -90,7 +91,8 @@ void ath_fill_led_pin(struct ath_softc *sc) ah->led_pin = ATH_LED_PIN_DEF; /* Configure gpio 1 for output */ - ath9k_hw_cfg_output(ah, ah->led_pin, AR_GPIO_OUTPUT_MUX_AS_OUTPUT); + ath9k_hw_gpio_request_out(ah, ah->led_pin, "ath9k-led", + AR_GPIO_OUTPUT_MUX_AS_OUTPUT); /* LED off, active low */ ath9k_hw_set_gpio(ah, ah->led_pin, (ah->config.led_active_high) ? 0 : 1); diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c b/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c index 2aabcbdaba4e..d9b640a2488c 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c @@ -259,11 +259,11 @@ void ath9k_deinit_leds(struct ath9k_htc_priv *priv) void ath9k_configure_leds(struct ath9k_htc_priv *priv) { /* Configure gpio 1 for output */ - ath9k_hw_cfg_output(priv->ah, priv->ah->led_pin, - AR_GPIO_OUTPUT_MUX_AS_OUTPUT); + ath9k_hw_gpio_request_out(priv->ah, priv->ah->led_pin, + "ath9k-led", + AR_GPIO_OUTPUT_MUX_AS_OUTPUT); /* LED off, active low */ ath9k_hw_set_gpio(priv->ah, priv->ah->led_pin, 1); - } void ath9k_init_leds(struct ath9k_htc_priv *priv) diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index f14242b3213e..7f39b13a4ca0 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c @@ -1582,7 +1582,8 @@ static void ath9k_hw_apply_gpio_override(struct ath_hw *ah) if (!(gpio_mask & 1)) continue; - ath9k_hw_cfg_output(ah, i, AR_GPIO_OUTPUT_MUX_AS_OUTPUT); + ath9k_hw_gpio_request_out(ah, i, NULL, + AR_GPIO_OUTPUT_MUX_AS_OUTPUT); ath9k_hw_set_gpio(ah, i, !!(ah->gpio_val & BIT(i))); } } @@ -1958,7 +1959,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan, ath9k_hw_init_qos(ah); if (ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT) - ath9k_hw_cfg_gpio_input(ah, ah->rfkill_gpio); + ath9k_hw_gpio_request_in(ah, ah->rfkill_gpio, "ath9k-rfkill"); ath9k_hw_init_global_settings(ah); @@ -2654,8 +2655,7 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah) /* GPIO / RFKILL / Antennae */ /****************************/ -static void ath9k_hw_gpio_cfg_output_mux(struct ath_hw *ah, - u32 gpio, u32 type) +static void ath9k_hw_gpio_cfg_output_mux(struct ath_hw *ah, u32 gpio, u32 type) { int addr; u32 gpio_shift, tmp; @@ -2669,8 +2669,8 @@ static void ath9k_hw_gpio_cfg_output_mux(struct ath_hw *ah, gpio_shift = (gpio % 6) * 5; - if (AR_SREV_9280_20_OR_LATER(ah) - || (addr != AR_GPIO_OUTPUT_MUX1)) { + if (AR_SREV_9280_20_OR_LATER(ah) || + (addr != AR_GPIO_OUTPUT_MUX1)) { REG_RMW(ah, addr, (type << gpio_shift), (0x1f << gpio_shift)); } else { @@ -2682,107 +2682,145 @@ static void ath9k_hw_gpio_cfg_output_mux(struct ath_hw *ah, } } -void ath9k_hw_cfg_gpio_input(struct ath_hw *ah, u32 gpio) +/* BSP should set the corresponding MUX register correctly. + */ +static void ath9k_hw_gpio_cfg_soc(struct ath_hw *ah, u32 gpio, bool out, + const char *label) { - u32 gpio_shift; + if (ah->caps.gpio_requested & BIT(gpio)) + return; - BUG_ON(gpio >= ah->caps.num_gpio_pins); + /* may be requested by BSP, free anyway */ + gpio_free(gpio); + + if (gpio_request_one(gpio, out ? GPIOF_OUT_INIT_LOW : GPIOF_IN, label)) + return; + + ah->caps.gpio_requested |= BIT(gpio); +} + +static void ath9k_hw_gpio_cfg_wmac(struct ath_hw *ah, u32 gpio, bool out, + u32 ah_signal_type) +{ + u32 gpio_set, gpio_shift = gpio; if (AR_DEVID_7010(ah)) { - gpio_shift = gpio; - REG_RMW(ah, AR7010_GPIO_OE, - (AR7010_GPIO_OE_AS_INPUT << gpio_shift), - (AR7010_GPIO_OE_MASK << gpio_shift)); - return; - } + gpio_set = out ? + AR7010_GPIO_OE_AS_OUTPUT : AR7010_GPIO_OE_AS_INPUT; + REG_RMW(ah, AR7010_GPIO_OE, gpio_set << gpio_shift, + AR7010_GPIO_OE_MASK << gpio_shift); + } else if (AR_SREV_SOC(ah)) { + gpio_set = out ? 1 : 0; + REG_RMW(ah, AR_GPIO_OE_OUT, gpio_set << gpio_shift, + gpio_set << gpio_shift); + } else { + gpio_shift = gpio << 1; + gpio_set = out ? + AR_GPIO_OE_OUT_DRV_ALL : AR_GPIO_OE_OUT_DRV_NO; + REG_RMW(ah, AR_GPIO_OE_OUT, gpio_set << gpio_shift, + AR_GPIO_OE_OUT_DRV << gpio_shift); - gpio_shift = gpio << 1; - REG_RMW(ah, - AR_GPIO_OE_OUT, - (AR_GPIO_OE_OUT_DRV_NO << gpio_shift), - (AR_GPIO_OE_OUT_DRV << gpio_shift)); + if (out) + ath9k_hw_gpio_cfg_output_mux(ah, gpio, ah_signal_type); + } } -EXPORT_SYMBOL(ath9k_hw_cfg_gpio_input); + +static void ath9k_hw_gpio_request(struct ath_hw *ah, u32 gpio, bool out, + const char *label, u32 ah_signal_type) +{ + WARN_ON(gpio >= ah->caps.num_gpio_pins); + + if (BIT(gpio) & ah->caps.gpio_mask) + ath9k_hw_gpio_cfg_wmac(ah, gpio, out, ah_signal_type); + else if (AR_SREV_SOC(ah)) + ath9k_hw_gpio_cfg_soc(ah, gpio, out, label); + else + WARN_ON(1); +} + +void ath9k_hw_gpio_request_in(struct ath_hw *ah, u32 gpio, const char *label) +{ + ath9k_hw_gpio_request(ah, gpio, false, label, 0); +} +EXPORT_SYMBOL(ath9k_hw_gpio_request_in); + +void ath9k_hw_gpio_request_out(struct ath_hw *ah, u32 gpio, const char *label, + u32 ah_signal_type) +{ + ath9k_hw_gpio_request(ah, gpio, true, label, ah_signal_type); +} +EXPORT_SYMBOL(ath9k_hw_gpio_request_out); + +void ath9k_hw_gpio_free(struct ath_hw *ah, u32 gpio) +{ + if (!AR_SREV_SOC(ah)) + return; + + WARN_ON(gpio >= ah->caps.num_gpio_pins); + + if (ah->caps.gpio_requested & BIT(gpio)) { + gpio_free(gpio); + ah->caps.gpio_requested &= ~BIT(gpio); + } +} +EXPORT_SYMBOL(ath9k_hw_gpio_free); u32 ath9k_hw_gpio_get(struct ath_hw *ah, u32 gpio) { + u32 val = 0xffffffff; + #define MS_REG_READ(x, y) \ - (MS(REG_READ(ah, AR_GPIO_IN_OUT), x##_GPIO_IN_VAL) & (AR_GPIO_BIT(y))) + (MS(REG_READ(ah, AR_GPIO_IN_OUT), x##_GPIO_IN_VAL) & BIT(y)) - if (gpio >= ah->caps.num_gpio_pins) - return 0xffffffff; + WARN_ON(gpio >= ah->caps.num_gpio_pins); - if (AR_DEVID_7010(ah)) { - u32 val; - val = REG_READ(ah, AR7010_GPIO_IN); - return (MS(val, AR7010_GPIO_IN_VAL) & AR_GPIO_BIT(gpio)) == 0; - } else if (AR_SREV_9300_20_OR_LATER(ah)) - return (MS(REG_READ(ah, AR_GPIO_IN), AR9300_GPIO_IN_VAL) & - AR_GPIO_BIT(gpio)) != 0; - else if (AR_SREV_9271(ah)) - return MS_REG_READ(AR9271, gpio) != 0; - else if (AR_SREV_9287_11_OR_LATER(ah)) - return MS_REG_READ(AR9287, gpio) != 0; - else if (AR_SREV_9285_12_OR_LATER(ah)) - return MS_REG_READ(AR9285, gpio) != 0; - else if (AR_SREV_9280_20_OR_LATER(ah)) - return MS_REG_READ(AR928X, gpio) != 0; - else - return MS_REG_READ(AR, gpio) != 0; + if (BIT(gpio) & ah->caps.gpio_mask) { + if (AR_SREV_9271(ah)) + val = MS_REG_READ(AR9271, gpio); + else if (AR_SREV_9287(ah)) + val = MS_REG_READ(AR9287, gpio); + else if (AR_SREV_9285(ah)) + val = MS_REG_READ(AR9285, gpio); + else if (AR_SREV_9280(ah)) + val = MS_REG_READ(AR928X, gpio); + else if (AR_DEVID_7010(ah)) + val = REG_READ(ah, AR7010_GPIO_IN) & BIT(gpio); + else if (AR_SREV_9300_20_OR_LATER(ah)) + val = REG_READ(ah, AR_GPIO_IN) & BIT(gpio); + else + val = MS_REG_READ(AR, gpio); + } else if (BIT(gpio) & ah->caps.gpio_requested) { + val = gpio_get_value(gpio) & BIT(gpio); + } else { + WARN_ON(1); + } + + return val; } EXPORT_SYMBOL(ath9k_hw_gpio_get); -void ath9k_hw_cfg_output(struct ath_hw *ah, u32 gpio, - u32 ah_signal_type) -{ - u32 gpio_shift; - - if (AR_DEVID_7010(ah)) { - gpio_shift = gpio; - REG_RMW(ah, AR7010_GPIO_OE, - (AR7010_GPIO_OE_AS_OUTPUT << gpio_shift), - (AR7010_GPIO_OE_MASK << gpio_shift)); - return; - } - - ath9k_hw_gpio_cfg_output_mux(ah, gpio, ah_signal_type); - gpio_shift = 2 * gpio; - REG_RMW(ah, - AR_GPIO_OE_OUT, - (AR_GPIO_OE_OUT_DRV_ALL << gpio_shift), - (AR_GPIO_OE_OUT_DRV << gpio_shift)); -} -EXPORT_SYMBOL(ath9k_hw_cfg_output); - void ath9k_hw_set_gpio(struct ath_hw *ah, u32 gpio, u32 val) { - if (AR_DEVID_7010(ah)) { - val = val ? 0 : 1; - REG_RMW(ah, AR7010_GPIO_OUT, ((val&1) << gpio), - AR_GPIO_BIT(gpio)); - return; - } + WARN_ON(gpio >= ah->caps.num_gpio_pins); - if (AR_SREV_9271(ah)) - val = ~val; - - if ((1 << gpio) & AR_GPIO_OE_OUT_MASK) - REG_RMW(ah, AR_GPIO_IN_OUT, ((val & 1) << gpio), - AR_GPIO_BIT(gpio)); + if (AR_DEVID_7010(ah) || AR_SREV_9271(ah)) + val = !val; else - gpio_set_value(gpio, val & 1); + val = !!val; + + if (BIT(gpio) & ah->caps.gpio_mask) { + u32 out_addr = AR_DEVID_7010(ah) ? + AR7010_GPIO_OUT : AR_GPIO_IN_OUT; + + REG_RMW(ah, out_addr, val << gpio, BIT(gpio)); + } else if (BIT(gpio) & ah->caps.gpio_requested) { + gpio_set_value(gpio, val); + } else { + WARN_ON(1); + } } EXPORT_SYMBOL(ath9k_hw_set_gpio); -void ath9k_hw_request_gpio(struct ath_hw *ah, u32 gpio, const char *label) -{ - if (gpio >= ah->caps.num_gpio_pins) - return; - - gpio_request_one(gpio, GPIOF_DIR_OUT | GPIOF_INIT_LOW, label); -} -EXPORT_SYMBOL(ath9k_hw_request_gpio); - void ath9k_hw_setantenna(struct ath_hw *ah, u32 antenna) { REG_WRITE(ah, AR_DEF_ANTENNA, (antenna & 0x7)); diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h index c0740d6b3e97..9cbca1229bac 100644 --- a/drivers/net/wireless/ath/ath9k/hw.h +++ b/drivers/net/wireless/ath/ath9k/hw.h @@ -160,7 +160,6 @@ #define AR_GPIO_OUTPUT_MUX_AS_RUCKUS_DATA 0x1e #define AR_GPIOD_MASK 0x00001FFF -#define AR_GPIO_BIT(_gpio) (1 << (_gpio)) #define BASE_ACTIVATE_DELAY 100 #define RTC_PLL_SETTLE_DELAY (AR_SREV_9340(ah) ? 1000 : 100) @@ -302,6 +301,7 @@ struct ath9k_hw_capabilities { u8 max_rxchains; u8 num_gpio_pins; u32 gpio_mask; + u32 gpio_requested; u8 rx_hp_qdepth; u8 rx_lp_qdepth; u8 rx_status_len; @@ -1020,12 +1020,12 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah); u32 ath9k_regd_get_ctl(struct ath_regulatory *reg, struct ath9k_channel *chan); /* GPIO / RFKILL / Antennae */ -void ath9k_hw_cfg_gpio_input(struct ath_hw *ah, u32 gpio); +void ath9k_hw_gpio_request_in(struct ath_hw *ah, u32 gpio, const char *label); +void ath9k_hw_gpio_request_out(struct ath_hw *ah, u32 gpio, const char *label, + u32 ah_signal_type); +void ath9k_hw_gpio_free(struct ath_hw *ah, u32 gpio); u32 ath9k_hw_gpio_get(struct ath_hw *ah, u32 gpio); -void ath9k_hw_cfg_output(struct ath_hw *ah, u32 gpio, - u32 ah_signal_type); void ath9k_hw_set_gpio(struct ath_hw *ah, u32 gpio, u32 val); -void ath9k_hw_request_gpio(struct ath_hw *ah, u32 gpio, const char *label); void ath9k_hw_setantenna(struct ath_hw *ah, u32 antenna); /* General Operation */ diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 3aed43a63f94..a8fcadb2fa84 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -719,10 +719,10 @@ static int ath9k_start(struct ieee80211_hw *hw) ah->reset_power_on = false; if (ah->led_pin >= 0) { - ath9k_hw_cfg_output(ah, ah->led_pin, - AR_GPIO_OUTPUT_MUX_AS_OUTPUT); ath9k_hw_set_gpio(ah, ah->led_pin, (ah->config.led_active_high) ? 1 : 0); + ath9k_hw_gpio_request_out(ah, ah->led_pin, NULL, + AR_GPIO_OUTPUT_MUX_AS_OUTPUT); } /* @@ -870,7 +870,7 @@ static void ath9k_stop(struct ieee80211_hw *hw) if (ah->led_pin >= 0) { ath9k_hw_set_gpio(ah, ah->led_pin, (ah->config.led_active_high) ? 0 : 1); - ath9k_hw_cfg_gpio_input(ah, ah->led_pin); + ath9k_hw_gpio_request_in(ah, ah->led_pin, NULL); } ath_prepare_reset(sc); From db2221901fbded787daed153281ed875de489692 Mon Sep 17 00:00:00 2001 From: Miaoqing Pan Date: Mon, 7 Mar 2016 10:38:16 +0800 Subject: [PATCH 0050/1649] ath9k: free GPIO resource for SOC GPIOs For SOC GPIOs, should call ath9k_hw_gpio_free() to release the GPIO resource. Signed-off-by: Miaoqing Pan Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath9k/btcoex.c | 10 ++++++++++ drivers/net/wireless/ath/ath9k/btcoex.h | 1 + drivers/net/wireless/ath/ath9k/gpio.c | 9 +++++++++ drivers/net/wireless/ath/ath9k/htc_drv_gpio.c | 2 ++ drivers/net/wireless/ath/ath9k/hw.c | 1 + 5 files changed, 23 insertions(+) diff --git a/drivers/net/wireless/ath/ath9k/btcoex.c b/drivers/net/wireless/ath/ath9k/btcoex.c index 7719cb1d8b68..4737aa947f99 100644 --- a/drivers/net/wireless/ath/ath9k/btcoex.c +++ b/drivers/net/wireless/ath/ath9k/btcoex.c @@ -174,6 +174,16 @@ void ath9k_hw_btcoex_init_3wire(struct ath_hw *ah) } EXPORT_SYMBOL(ath9k_hw_btcoex_init_3wire); +void ath9k_hw_btcoex_deinit(struct ath_hw *ah) +{ + struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw; + + ath9k_hw_gpio_free(ah, btcoex_hw->btactive_gpio); + ath9k_hw_gpio_free(ah, btcoex_hw->btpriority_gpio); + ath9k_hw_gpio_free(ah, btcoex_hw->wlanactive_gpio); +} +EXPORT_SYMBOL(ath9k_hw_btcoex_deinit); + void ath9k_hw_btcoex_init_mci(struct ath_hw *ah) { ah->btcoex_hw.mci.ready = false; diff --git a/drivers/net/wireless/ath/ath9k/btcoex.h b/drivers/net/wireless/ath/ath9k/btcoex.h index cd2f0a2373cb..0f7c4e61ac13 100644 --- a/drivers/net/wireless/ath/ath9k/btcoex.h +++ b/drivers/net/wireless/ath/ath9k/btcoex.h @@ -123,6 +123,7 @@ struct ath_btcoex_hw { void ath9k_hw_btcoex_init_scheme(struct ath_hw *ah); void ath9k_hw_btcoex_init_2wire(struct ath_hw *ah); void ath9k_hw_btcoex_init_3wire(struct ath_hw *ah); +void ath9k_hw_btcoex_deinit(struct ath_hw *ah); void ath9k_hw_btcoex_init_mci(struct ath_hw *ah); void ath9k_hw_init_btcoex_hw(struct ath_hw *ah, int qnum); void ath9k_hw_btcoex_set_weight(struct ath_hw *ah, diff --git a/drivers/net/wireless/ath/ath9k/gpio.c b/drivers/net/wireless/ath/ath9k/gpio.c index b41dfb7c2784..4964bb36dad2 100644 --- a/drivers/net/wireless/ath/ath9k/gpio.c +++ b/drivers/net/wireless/ath/ath9k/gpio.c @@ -40,6 +40,8 @@ void ath_deinit_leds(struct ath_softc *sc) ath_led_brightness(&sc->led_cdev, LED_OFF); led_classdev_unregister(&sc->led_cdev); + + ath9k_hw_gpio_free(sc->sc_ah, sc->sc_ah->led_pin); } void ath_init_leds(struct ath_softc *sc) @@ -404,6 +406,13 @@ void ath9k_deinit_btcoex(struct ath_softc *sc) if (ath9k_hw_mci_is_enabled(ah)) ath_mci_cleanup(sc); + else { + enum ath_btcoex_scheme scheme = ath9k_hw_get_btcoex_scheme(ah); + + if (scheme == ATH_BTCOEX_CFG_2WIRE || + scheme == ATH_BTCOEX_CFG_3WIRE) + ath9k_hw_btcoex_deinit(sc->sc_ah); + } } int ath9k_init_btcoex(struct ath_softc *sc) diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c b/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c index d9b640a2488c..ecb848b60725 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c @@ -253,6 +253,8 @@ void ath9k_deinit_leds(struct ath9k_htc_priv *priv) ath9k_led_brightness(&priv->led_cdev, LED_OFF); led_classdev_unregister(&priv->led_cdev); cancel_work_sync(&priv->led_work); + + ath9k_hw_gpio_free(priv->ah, priv->ah->led_pin); } diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index 7f39b13a4ca0..42009065e234 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c @@ -1585,6 +1585,7 @@ static void ath9k_hw_apply_gpio_override(struct ath_hw *ah) ath9k_hw_gpio_request_out(ah, i, NULL, AR_GPIO_OUTPUT_MUX_AS_OUTPUT); ath9k_hw_set_gpio(ah, i, !!(ah->gpio_val & BIT(i))); + ath9k_hw_gpio_free(ah, i); } } From 79d4db1214a0c7b1818aaf64d0606b17ff1acea7 Mon Sep 17 00:00:00 2001 From: Miaoqing Pan Date: Mon, 7 Mar 2016 10:38:17 +0800 Subject: [PATCH 0051/1649] ath9k: cleanup led_pin initial Make ath_init_leds() and ath_deinit_leds() pairs as the only API to set leds, also removed direction configuration from ath9k_start() and ath9k_stop(). So the initial is more clear now. Signed-off-by: Miaoqing Pan Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath9k/ath9k.h | 4 -- drivers/net/wireless/ath/ath9k/gpio.c | 62 ++++++++++++-------------- drivers/net/wireless/ath/ath9k/init.c | 1 - drivers/net/wireless/ath/ath9k/main.c | 9 +--- drivers/net/wireless/ath/ath9k/reg.h | 2 - 5 files changed, 31 insertions(+), 47 deletions(-) diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h index 5294595da5a7..93b3793cce2f 100644 --- a/drivers/net/wireless/ath/ath9k/ath9k.h +++ b/drivers/net/wireless/ath/ath9k/ath9k.h @@ -813,7 +813,6 @@ static inline int ath9k_dump_btcoex(struct ath_softc *sc, u8 *buf, u32 size) #ifdef CONFIG_MAC80211_LEDS void ath_init_leds(struct ath_softc *sc); void ath_deinit_leds(struct ath_softc *sc); -void ath_fill_led_pin(struct ath_softc *sc); #else static inline void ath_init_leds(struct ath_softc *sc) { @@ -822,9 +821,6 @@ static inline void ath_init_leds(struct ath_softc *sc) static inline void ath_deinit_leds(struct ath_softc *sc) { } -static inline void ath_fill_led_pin(struct ath_softc *sc) -{ -} #endif /************************/ diff --git a/drivers/net/wireless/ath/ath9k/gpio.c b/drivers/net/wireless/ath/ath9k/gpio.c index 4964bb36dad2..490f74d9ddf0 100644 --- a/drivers/net/wireless/ath/ath9k/gpio.c +++ b/drivers/net/wireless/ath/ath9k/gpio.c @@ -21,6 +21,33 @@ /********************************/ #ifdef CONFIG_MAC80211_LEDS + +void ath_fill_led_pin(struct ath_softc *sc) +{ + struct ath_hw *ah = sc->sc_ah; + + /* Set default led pin if invalid */ + if (ah->led_pin < 0) { + if (AR_SREV_9287(ah)) + ah->led_pin = ATH_LED_PIN_9287; + else if (AR_SREV_9485(ah)) + ah->led_pin = ATH_LED_PIN_9485; + else if (AR_SREV_9300(ah)) + ah->led_pin = ATH_LED_PIN_9300; + else if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) + ah->led_pin = ATH_LED_PIN_9462; + else + ah->led_pin = ATH_LED_PIN_DEF; + } + + /* Configure gpio for output */ + ath9k_hw_gpio_request_out(ah, ah->led_pin, "ath9k-led", + AR_GPIO_OUTPUT_MUX_AS_OUTPUT); + + /* LED off, active low */ + ath9k_hw_set_gpio(ah, ah->led_pin, ah->config.led_active_high ? 0 : 1); +} + static void ath_led_brightness(struct led_classdev *led_cdev, enum led_brightness brightness) { @@ -51,6 +78,8 @@ void ath_init_leds(struct ath_softc *sc) if (AR_SREV_9100(sc->sc_ah)) return; + ath_fill_led_pin(sc); + if (!ath9k_led_blink) sc->led_cdev.default_trigger = ieee80211_get_radio_led_name(sc->hw); @@ -66,39 +95,6 @@ void ath_init_leds(struct ath_softc *sc) sc->led_registered = true; } - -void ath_fill_led_pin(struct ath_softc *sc) -{ - struct ath_hw *ah = sc->sc_ah; - - if (AR_SREV_9100(ah)) - return; - - if (ah->led_pin >= 0) { - if (!((1 << ah->led_pin) & AR_GPIO_OE_OUT_MASK)) - ath9k_hw_gpio_request_out(ah, ah->led_pin, "ath9k-led", - AR_GPIO_OUTPUT_MUX_AS_OUTPUT); - return; - } - - if (AR_SREV_9287(ah)) - ah->led_pin = ATH_LED_PIN_9287; - else if (AR_SREV_9485(sc->sc_ah)) - ah->led_pin = ATH_LED_PIN_9485; - else if (AR_SREV_9300(sc->sc_ah)) - ah->led_pin = ATH_LED_PIN_9300; - else if (AR_SREV_9462(sc->sc_ah) || AR_SREV_9565(sc->sc_ah)) - ah->led_pin = ATH_LED_PIN_9462; - else - ah->led_pin = ATH_LED_PIN_DEF; - - /* Configure gpio 1 for output */ - ath9k_hw_gpio_request_out(ah, ah->led_pin, "ath9k-led", - AR_GPIO_OUTPUT_MUX_AS_OUTPUT); - - /* LED off, active low */ - ath9k_hw_set_gpio(ah, ah->led_pin, (ah->config.led_active_high) ? 0 : 1); -} #endif /*******************/ diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c index d4e0ac946c3a..d986687870af 100644 --- a/drivers/net/wireless/ath/ath9k/init.c +++ b/drivers/net/wireless/ath/ath9k/init.c @@ -660,7 +660,6 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc, ath9k_cmn_init_crypto(sc->sc_ah); ath9k_init_misc(sc); - ath_fill_led_pin(sc); ath_chanctx_init(sc); ath9k_offchannel_init(sc); diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index a8fcadb2fa84..50ec4c9a9da7 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -718,12 +718,9 @@ static int ath9k_start(struct ieee80211_hw *hw) if (!ath_complete_reset(sc, false)) ah->reset_power_on = false; - if (ah->led_pin >= 0) { + if (ah->led_pin >= 0) ath9k_hw_set_gpio(ah, ah->led_pin, (ah->config.led_active_high) ? 1 : 0); - ath9k_hw_gpio_request_out(ah, ah->led_pin, NULL, - AR_GPIO_OUTPUT_MUX_AS_OUTPUT); - } /* * Reset key cache to sane defaults (all entries cleared) instead of @@ -867,11 +864,9 @@ static void ath9k_stop(struct ieee80211_hw *hw) spin_lock_bh(&sc->sc_pcu_lock); - if (ah->led_pin >= 0) { + if (ah->led_pin >= 0) ath9k_hw_set_gpio(ah, ah->led_pin, (ah->config.led_active_high) ? 0 : 1); - ath9k_hw_gpio_request_in(ah, ah->led_pin, NULL); - } ath_prepare_reset(sc); diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h index c06fdb955787..909c6b5f4f7b 100644 --- a/drivers/net/wireless/ath/ath9k/reg.h +++ b/drivers/net/wireless/ath/ath9k/reg.h @@ -1168,8 +1168,6 @@ enum { #define AR_GPIO_OE_OUT (AR_SREV_9340(ah) ? 0x4030 : \ (AR_SREV_9300_20_OR_LATER(ah) ? 0x4050 : 0x404c)) -#define AR_GPIO_OE_OUT_MASK (AR_SREV_9550_OR_LATER(ah) ? \ - 0x0000000F : 0xFFFFFFFF) #define AR_GPIO_OE_OUT_DRV 0x3 #define AR_GPIO_OE_OUT_DRV_NO 0x0 #define AR_GPIO_OE_OUT_DRV_LOW 0x1 From c8770bcf5cefa8cbfae21c07c4fe3428f5a9d42a Mon Sep 17 00:00:00 2001 From: Miaoqing Pan Date: Mon, 7 Mar 2016 10:38:18 +0800 Subject: [PATCH 0052/1649] ath9k: Allow platform override BTCoex pin Add new platform data to allow override BTCoex default pin. Signed-off-by: Miaoqing Pan Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath9k/btcoex.c | 45 +++++++++++++++++++------ include/linux/ath9k_platform.h | 4 +++ 2 files changed, 39 insertions(+), 10 deletions(-) diff --git a/drivers/net/wireless/ath/ath9k/btcoex.c b/drivers/net/wireless/ath/ath9k/btcoex.c index 4737aa947f99..95a810ba98ac 100644 --- a/drivers/net/wireless/ath/ath9k/btcoex.c +++ b/drivers/net/wireless/ath/ath9k/btcoex.c @@ -15,6 +15,8 @@ */ #include +#include +#include #include "hw.h" enum ath_bt_mode { @@ -90,6 +92,29 @@ void ath9k_hw_init_btcoex_hw(struct ath_hw *ah, int qnum) } EXPORT_SYMBOL(ath9k_hw_init_btcoex_hw); +static void ath9k_hw_btcoex_pin_init(struct ath_hw *ah, u8 wlanactive_gpio, + u8 btactive_gpio, u8 btpriority_gpio) +{ + struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw; + struct ath9k_platform_data *pdata = ah->dev->platform_data; + + if (btcoex_hw->scheme != ATH_BTCOEX_CFG_2WIRE && + btcoex_hw->scheme != ATH_BTCOEX_CFG_3WIRE) + return; + + /* bt priority GPIO will be ignored by 2 wire scheme */ + if (pdata && (pdata->bt_active_pin || pdata->bt_priority_pin || + pdata->wlan_active_pin)) { + btcoex_hw->btactive_gpio = pdata->bt_active_pin; + btcoex_hw->wlanactive_gpio = pdata->wlan_active_pin; + btcoex_hw->btpriority_gpio = pdata->bt_priority_pin; + } else { + btcoex_hw->btactive_gpio = btactive_gpio; + btcoex_hw->wlanactive_gpio = wlanactive_gpio; + btcoex_hw->btpriority_gpio = btpriority_gpio; + } +} + void ath9k_hw_btcoex_init_scheme(struct ath_hw *ah) { struct ath_common *common = ath9k_hw_common(ah); @@ -107,19 +132,19 @@ void ath9k_hw_btcoex_init_scheme(struct ath_hw *ah) btcoex_hw->scheme = ATH_BTCOEX_CFG_MCI; } else if (AR_SREV_9300_20_OR_LATER(ah)) { btcoex_hw->scheme = ATH_BTCOEX_CFG_3WIRE; - btcoex_hw->btactive_gpio = ATH_BTACTIVE_GPIO_9300; - btcoex_hw->wlanactive_gpio = ATH_WLANACTIVE_GPIO_9300; - btcoex_hw->btpriority_gpio = ATH_BTPRIORITY_GPIO_9300; - } else if (AR_SREV_9280_20_OR_LATER(ah)) { - btcoex_hw->btactive_gpio = ATH_BTACTIVE_GPIO_9280; - btcoex_hw->wlanactive_gpio = ATH_WLANACTIVE_GPIO_9280; - if (AR_SREV_9285(ah)) { + ath9k_hw_btcoex_pin_init(ah, ATH_WLANACTIVE_GPIO_9300, + ATH_BTACTIVE_GPIO_9300, + ATH_BTPRIORITY_GPIO_9300); + } else if (AR_SREV_9280_20_OR_LATER(ah)) { + if (AR_SREV_9285(ah)) btcoex_hw->scheme = ATH_BTCOEX_CFG_3WIRE; - btcoex_hw->btpriority_gpio = ATH_BTPRIORITY_GPIO_9285; - } else { + else btcoex_hw->scheme = ATH_BTCOEX_CFG_2WIRE; - } + + ath9k_hw_btcoex_pin_init(ah, ATH_WLANACTIVE_GPIO_9280, + ATH_BTACTIVE_GPIO_9280, + ATH_BTPRIORITY_GPIO_9285); } } EXPORT_SYMBOL(ath9k_hw_btcoex_init_scheme); diff --git a/include/linux/ath9k_platform.h b/include/linux/ath9k_platform.h index 33eb274cd0e6..e66153d60bd5 100644 --- a/include/linux/ath9k_platform.h +++ b/include/linux/ath9k_platform.h @@ -31,6 +31,10 @@ struct ath9k_platform_data { u32 gpio_mask; u32 gpio_val; + u32 bt_active_pin; + u32 bt_priority_pin; + u32 wlan_active_pin; + bool endian_check; bool is_clk_25mhz; bool tx_gain_buffalo; From 668ae0a3e48ac6811f431915b466514bf167e2f4 Mon Sep 17 00:00:00 2001 From: Miaoqing Pan Date: Mon, 7 Mar 2016 10:38:19 +0800 Subject: [PATCH 0053/1649] ath9k: add bits definition of BTCoex MODE2/3 for SOC chips Add bits definition for AR_BT_COEX_MODE2 and AR_BT_COEX_MODE3, which needed by SOC chips (AR9340, AR9531, AR9550, AR9561). Signed-off-by: Miaoqing Pan Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath9k/reg.h | 46 ++++++++++++++++++++++------ 1 file changed, 37 insertions(+), 9 deletions(-) diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h index 909c6b5f4f7b..9272ca90632b 100644 --- a/drivers/net/wireless/ath/ath9k/reg.h +++ b/drivers/net/wireless/ath/ath9k/reg.h @@ -1892,15 +1892,33 @@ enum { #define AR9300_BT_WGHT 0xcccc4444 -#define AR_BT_COEX_MODE2 0x817c -#define AR_BT_BCN_MISS_THRESH 0x000000ff -#define AR_BT_BCN_MISS_THRESH_S 0 -#define AR_BT_BCN_MISS_CNT 0x0000ff00 -#define AR_BT_BCN_MISS_CNT_S 8 -#define AR_BT_HOLD_RX_CLEAR 0x00010000 -#define AR_BT_HOLD_RX_CLEAR_S 16 -#define AR_BT_DISABLE_BT_ANT 0x00100000 -#define AR_BT_DISABLE_BT_ANT_S 20 +#define AR_BT_COEX_MODE2 0x817c +#define AR_BT_BCN_MISS_THRESH 0x000000ff +#define AR_BT_BCN_MISS_THRESH_S 0 +#define AR_BT_BCN_MISS_CNT 0x0000ff00 +#define AR_BT_BCN_MISS_CNT_S 8 +#define AR_BT_HOLD_RX_CLEAR 0x00010000 +#define AR_BT_HOLD_RX_CLEAR_S 16 +#define AR_BT_PROTECT_BT_AFTER_WAKEUP 0x00080000 +#define AR_BT_PROTECT_BT_AFTER_WAKEUP_S 19 +#define AR_BT_DISABLE_BT_ANT 0x00100000 +#define AR_BT_DISABLE_BT_ANT_S 20 +#define AR_BT_QUIET_2_WIRE 0x00200000 +#define AR_BT_QUIET_2_WIRE_S 21 +#define AR_BT_WL_ACTIVE_MODE 0x00c00000 +#define AR_BT_WL_ACTIVE_MODE_S 22 +#define AR_BT_WL_TXRX_SEPARATE 0x01000000 +#define AR_BT_WL_TXRX_SEPARATE_S 24 +#define AR_BT_RS_DISCARD_EXTEND 0x02000000 +#define AR_BT_RS_DISCARD_EXTEND_S 25 +#define AR_BT_TSF_BT_ACTIVE_CTRL 0x0c000000 +#define AR_BT_TSF_BT_ACTIVE_CTRL_S 26 +#define AR_BT_TSF_BT_PRIORITY_CTRL 0x30000000 +#define AR_BT_TSF_BT_PRIORITY_CTRL_S 28 +#define AR_BT_INTERRUPT_ENABLE 0x40000000 +#define AR_BT_INTERRUPT_ENABLE_S 30 +#define AR_BT_PHY_ERR_BT_COLL_ENABLE 0x80000000 +#define AR_BT_PHY_ERR_BT_COLL_ENABLE_S 31 #define AR_TXSIFS 0x81d0 #define AR_TXSIFS_TIME 0x000000FF @@ -1909,6 +1927,16 @@ enum { #define AR_TXSIFS_ACK_SHIFT 0x00007000 #define AR_TXSIFS_ACK_SHIFT_S 12 +#define AR_BT_COEX_MODE3 0x81d4 +#define AR_BT_WL_ACTIVE_TIME 0x000000ff +#define AR_BT_WL_ACTIVE_TIME_S 0 +#define AR_BT_WL_QC_TIME 0x0000ff00 +#define AR_BT_WL_QC_TIME_S 8 +#define AR_BT_ALLOW_CONCURRENT_ACCESS 0x000f0000 +#define AR_BT_ALLOW_CONCURRENT_ACCESS_S 16 +#define AR_BT_AGC_SATURATION_CNT_ENABLE 0x00100000 +#define AR_BT_AGC_SATURATION_CNT_ENABLE_S 20 + #define AR_TXOP_X 0x81ec #define AR_TXOP_X_VAL 0x000000FF From dfcf02cd2998e2240b2bc7b4f4412578b8070bdb Mon Sep 17 00:00:00 2001 From: Miaoqing Pan Date: Mon, 7 Mar 2016 10:38:20 +0800 Subject: [PATCH 0054/1649] ath9k: fix BTCoex access invalid registers for SOC chips The registers of AR_GPIO_INPUT_MUX1 and AR_GPIO_PDPU were removed from SOC chips, fix invalid accessing Signed-off-by: Miaoqing Pan Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath9k/btcoex.c | 27 ++++++++++++++----------- 1 file changed, 15 insertions(+), 12 deletions(-) diff --git a/drivers/net/wireless/ath/ath9k/btcoex.c b/drivers/net/wireless/ath/ath9k/btcoex.c index 95a810ba98ac..d46cd319d524 100644 --- a/drivers/net/wireless/ath/ath9k/btcoex.c +++ b/drivers/net/wireless/ath/ath9k/btcoex.c @@ -162,9 +162,10 @@ void ath9k_hw_btcoex_init_2wire(struct ath_hw *ah) AR_GPIO_INPUT_EN_VAL_BT_ACTIVE_BB); /* Set input mux for bt_active to gpio pin */ - REG_RMW_FIELD(ah, AR_GPIO_INPUT_MUX1, - AR_GPIO_INPUT_MUX1_BT_ACTIVE, - btcoex_hw->btactive_gpio); + if (!AR_SREV_SOC(ah)) + REG_RMW_FIELD(ah, AR_GPIO_INPUT_MUX1, + AR_GPIO_INPUT_MUX1_BT_ACTIVE, + btcoex_hw->btactive_gpio); /* Configure the desired gpio port for input */ ath9k_hw_gpio_request_in(ah, btcoex_hw->btactive_gpio, @@ -183,13 +184,14 @@ void ath9k_hw_btcoex_init_3wire(struct ath_hw *ah) /* Set input mux for bt_prority_async and * bt_active_async to GPIO pins */ - REG_RMW_FIELD(ah, AR_GPIO_INPUT_MUX1, - AR_GPIO_INPUT_MUX1_BT_ACTIVE, - btcoex_hw->btactive_gpio); - - REG_RMW_FIELD(ah, AR_GPIO_INPUT_MUX1, - AR_GPIO_INPUT_MUX1_BT_PRIORITY, - btcoex_hw->btpriority_gpio); + if (!AR_SREV_SOC(ah)) { + REG_RMW_FIELD(ah, AR_GPIO_INPUT_MUX1, + AR_GPIO_INPUT_MUX1_BT_ACTIVE, + btcoex_hw->btactive_gpio); + REG_RMW_FIELD(ah, AR_GPIO_INPUT_MUX1, + AR_GPIO_INPUT_MUX1_BT_PRIORITY, + btcoex_hw->btpriority_gpio); + } /* Configure the desired GPIO ports for input */ ath9k_hw_gpio_request_in(ah, btcoex_hw->btactive_gpio, @@ -285,13 +287,13 @@ void ath9k_hw_btcoex_set_weight(struct ath_hw *ah, txprio_shift[i-1]); } } + /* Last WLAN weight has to be adjusted wrt tx priority */ if (concur_tx) { btcoex_hw->wlan_weight[i-1] &= ~(0xff << txprio_shift[i-1]); btcoex_hw->wlan_weight[i-1] |= (btcoex_hw->tx_prio[stomp_type] << txprio_shift[i-1]); } - } EXPORT_SYMBOL(ath9k_hw_btcoex_set_weight); @@ -375,7 +377,8 @@ void ath9k_hw_btcoex_enable(struct ath_hw *ah) break; } - if (ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_MCI) { + if (ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_MCI && + !AR_SREV_SOC(ah)) { REG_RMW(ah, AR_GPIO_PDPU, (0x2 << (btcoex_hw->btactive_gpio * 2)), (0x3 << (btcoex_hw->btactive_gpio * 2))); From c7212b7136ba69efb9785df68b669381cb893920 Mon Sep 17 00:00:00 2001 From: Miaoqing Pan Date: Mon, 7 Mar 2016 10:38:21 +0800 Subject: [PATCH 0055/1649] ath9k: fix BTCoex configuration for SOC chips Allow to set wl_active_time and wl_qc_time for SOC chips, also adjust bt_time_extend and bt_first_slot_time. Signed-off-by: Miaoqing Pan Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath9k/btcoex.c | 31 ++++++++++++++++++++++--- drivers/net/wireless/ath/ath9k/btcoex.h | 1 + 2 files changed, 29 insertions(+), 3 deletions(-) diff --git a/drivers/net/wireless/ath/ath9k/btcoex.c b/drivers/net/wireless/ath/ath9k/btcoex.c index d46cd319d524..618c9df35fc1 100644 --- a/drivers/net/wireless/ath/ath9k/btcoex.c +++ b/drivers/net/wireless/ath/ath9k/btcoex.c @@ -36,6 +36,8 @@ struct ath_btcoex_config { u8 bt_priority_time; u8 bt_first_slot_time; bool bt_hold_rx_clear; + u8 wl_active_time; + u8 wl_qc_time; }; static const u32 ar9003_wlan_weights[ATH_BTCOEX_STOMP_MAX] @@ -67,25 +69,42 @@ void ath9k_hw_init_btcoex_hw(struct ath_hw *ah, int qnum) .bt_priority_time = 2, .bt_first_slot_time = 5, .bt_hold_rx_clear = true, + .wl_active_time = 0x20, + .wl_qc_time = 0x20, }; bool rxclear_polarity = ath_bt_config.bt_rxclear_polarity; + u8 time_extend = ath_bt_config.bt_time_extend; + u8 first_slot_time = ath_bt_config.bt_first_slot_time; if (AR_SREV_9300_20_OR_LATER(ah)) rxclear_polarity = !ath_bt_config.bt_rxclear_polarity; + if (AR_SREV_SOC(ah)) { + first_slot_time = 0x1d; + time_extend = 0xa; + + btcoex_hw->bt_coex_mode3 = + SM(ath_bt_config.wl_active_time, AR_BT_WL_ACTIVE_TIME) | + SM(ath_bt_config.wl_qc_time, AR_BT_WL_QC_TIME); + + btcoex_hw->bt_coex_mode2 = + AR_BT_PROTECT_BT_AFTER_WAKEUP | + AR_BT_PHY_ERR_BT_COLL_ENABLE; + } + btcoex_hw->bt_coex_mode = (btcoex_hw->bt_coex_mode & AR_BT_QCU_THRESH) | - SM(ath_bt_config.bt_time_extend, AR_BT_TIME_EXTEND) | + SM(time_extend, AR_BT_TIME_EXTEND) | SM(ath_bt_config.bt_txstate_extend, AR_BT_TXSTATE_EXTEND) | SM(ath_bt_config.bt_txframe_extend, AR_BT_TX_FRAME_EXTEND) | SM(ath_bt_config.bt_mode, AR_BT_MODE) | SM(ath_bt_config.bt_quiet_collision, AR_BT_QUIET) | SM(rxclear_polarity, AR_BT_RX_CLEAR_POLARITY) | SM(ath_bt_config.bt_priority_time, AR_BT_PRIORITY_TIME) | - SM(ath_bt_config.bt_first_slot_time, AR_BT_FIRST_SLOT_TIME) | + SM(first_slot_time, AR_BT_FIRST_SLOT_TIME) | SM(qnum, AR_BT_QCU_THRESH); - btcoex_hw->bt_coex_mode2 = + btcoex_hw->bt_coex_mode2 |= SM(ath_bt_config.bt_hold_rx_clear, AR_BT_HOLD_RX_CLEAR) | SM(ATH_BTCOEX_BMISS_THRESH, AR_BT_BCN_MISS_THRESH) | AR_BT_DISABLE_BT_ANT; @@ -308,9 +327,15 @@ static void ath9k_hw_btcoex_enable_3wire(struct ath_hw *ah) * Program coex mode and weight registers to * enable coex 3-wire */ + if (AR_SREV_SOC(ah)) + REG_CLR_BIT(ah, AR_BT_COEX_MODE2, AR_BT_PHY_ERR_BT_COLL_ENABLE); + REG_WRITE(ah, AR_BT_COEX_MODE, btcoex->bt_coex_mode); REG_WRITE(ah, AR_BT_COEX_MODE2, btcoex->bt_coex_mode2); + if (AR_SREV_SOC(ah)) + REG_WRITE(ah, AR_BT_COEX_MODE3, btcoex->bt_coex_mode3); + if (AR_SREV_9300_20_OR_LATER(ah)) { REG_WRITE(ah, AR_BT_COEX_WL_WEIGHTS0, btcoex->wlan_weight[0]); REG_WRITE(ah, AR_BT_COEX_WL_WEIGHTS1, btcoex->wlan_weight[1]); diff --git a/drivers/net/wireless/ath/ath9k/btcoex.h b/drivers/net/wireless/ath/ath9k/btcoex.h index 0f7c4e61ac13..1bdfa8465b92 100644 --- a/drivers/net/wireless/ath/ath9k/btcoex.h +++ b/drivers/net/wireless/ath/ath9k/btcoex.h @@ -115,6 +115,7 @@ struct ath_btcoex_hw { u32 bt_coex_mode; /* Register setting for AR_BT_COEX_MODE */ u32 bt_coex_weights; /* Register setting for AR_BT_COEX_WEIGHT */ u32 bt_coex_mode2; /* Register setting for AR_BT_COEX_MODE2 */ + u32 bt_coex_mode3; /* Register setting for AR_BT_COEX_MODE3 */ u32 bt_weight[AR9300_NUM_BT_WEIGHTS]; u32 wlan_weight[AR9300_NUM_WLAN_WEIGHTS]; u8 tx_prio[ATH_BTCOEX_STOMP_MAX]; From c9b260a684d0493238433e08fc2ac7865a89aece Mon Sep 17 00:00:00 2001 From: Steve deRosier Date: Mon, 7 Mar 2016 16:58:50 -0800 Subject: [PATCH 0056/1649] ath6kl: ignore WMI_TXE_NOTIFY_EVENTID based on fw capability flags Certain 6004 firmware releases redefine the WMI_TXE_NOTIFY_EVENTID event number and sends the new event frequently. However it doesn't have the tx-err-notify feature and thus this firmware capability flag isn't set on the firmware package. By guarding the processing of this event by the same method we guard the sending of the WMI_SET_TXE_NOTIFY_CMDID command, we can ignore the spurious event that we don't know how to process. Without this change we call cfg80211_cqm_txe_notify() with possibly bad data. Signed-off-by: Steve deRosier Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath6kl/wmi.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c index a5e1de75a4a3..0b3e9c0293e0 100644 --- a/drivers/net/wireless/ath/ath6kl/wmi.c +++ b/drivers/net/wireless/ath/ath6kl/wmi.c @@ -1584,6 +1584,11 @@ static int ath6kl_wmi_txe_notify_event_rx(struct wmi *wmi, u8 *datap, int len, if (len < sizeof(*ev)) return -EINVAL; + if (vif->nw_type != INFRA_NETWORK || + !test_bit(ATH6KL_FW_CAPABILITY_TX_ERR_NOTIFY, + vif->ar->fw_capabilities)) + return -EOPNOTSUPP; + if (vif->sme_state != SME_CONNECTED) return -ENOTCONN; From 181c007dedacefaaf634b72b1f52c3b0415f87c1 Mon Sep 17 00:00:00 2001 From: Miaoqing Pan Date: Tue, 8 Mar 2016 11:19:37 +0800 Subject: [PATCH 0057/1649] ath9k: fix reg dump data bus error Changes: - restrict only dump MAC registers - skip the register memory holes Data bus error, epc == 831d4040, ra == 831d403c Oops[#1]: CPU: 0 PID: 1536 Comm: cat Not tainted 3.14.0 #3 task: 82f87840 ti: 82f88000 task.ti: 82f88000 $ 0 : 00000000 00000001 deadc0de 1000fc03 $ 4 : b8100200 00000200 831e0000 80218788 $ 8 : 00000030 00000003 00000001 09524547 $12 : 00000000 810594f4 00000000 3a206d61 $16 : 831dd3c0 00000081 00000a00 c05ff000 $20 : 00005af6 00000200 00071b39 00071139 $24 : 00000001 80217760 $28 : 82f88000 82f89c60 c05ffa00 831d403c Hi : 00000000 Lo : 453c0000 epc : 831d4040 ath_ahb_exit+0x2198/0x2904 [ath9k] Not tainted ra : 831d403c ath_ahb_exit+0x2194/0x2904 [ath9k] Status: 1000fc03 KERNEL EXL IE Cause : 4080801c PrId : 00019374 (MIPS 24Kc) Stack : 00000001 00000000 0000000e 80475c60 0000000e 800a8ebc 00000000 00000000 00000001 00000007 00000000 800a9678 00000000 00000004 00000002 00000010 00000000 00000000 00000000 00000000 80475c60 0000000e 000009ec c05ff000 831dd3c0 00000080 00000a00 c05ff000 00005af6 00000200 00071b39 0007114d c05ff9ec 800a9904 831dd3c0 82f89d10 00000001 81082194 831d8f0c 82f89d14 ... Call Trace: [<831d4040>] ath_ahb_exit+0x2198/0x2904 [ath9k] [<831d403c>] ath_ahb_exit+0x2194/0x2904 [ath9k] Signed-off-by: Miaoqing Pan Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath9k/debug.c | 24 +++++++++++++++++++++--- 1 file changed, 21 insertions(+), 3 deletions(-) diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c index 6de64cface3c..c56e40ff35e5 100644 --- a/drivers/net/wireless/ath/ath9k/debug.c +++ b/drivers/net/wireless/ath/ath9k/debug.c @@ -916,10 +916,21 @@ static int open_file_regdump(struct inode *inode, struct file *file) struct ath_softc *sc = inode->i_private; unsigned int len = 0; u8 *buf; - int i; + int i, j = 0; unsigned long num_regs, regdump_len, max_reg_offset; + const struct reg_hole { + u32 start; + u32 end; + } reg_hole_list[] = { + {0x0200, 0x07fc}, + {0x0c00, 0x0ffc}, + {0x2000, 0x3ffc}, + {0x4100, 0x6ffc}, + {0x705c, 0x7ffc}, + {0x0000, 0x0000} + }; - max_reg_offset = AR_SREV_9300_20_OR_LATER(sc->sc_ah) ? 0x16bd4 : 0xb500; + max_reg_offset = AR_SREV_9300_20_OR_LATER(sc->sc_ah) ? 0x8800 : 0xb500; num_regs = max_reg_offset / 4 + 1; regdump_len = num_regs * REGDUMP_LINE_SIZE + 1; buf = vmalloc(regdump_len); @@ -927,9 +938,16 @@ static int open_file_regdump(struct inode *inode, struct file *file) return -ENOMEM; ath9k_ps_wakeup(sc); - for (i = 0; i < num_regs; i++) + for (i = 0; i < num_regs; i++) { + if (reg_hole_list[j].start == i << 2) { + i = reg_hole_list[j].end >> 2; + j++; + continue; + } + len += scnprintf(buf + len, regdump_len - len, "0x%06x 0x%08x\n", i << 2, REG_READ(sc->sc_ah, i << 2)); + } ath9k_ps_restore(sc); file->private_data = buf; From 2ce9b25cefa64f11bcb21b21cf4a5e8c58c6d0af Mon Sep 17 00:00:00 2001 From: Rajkumar Manoharan Date: Tue, 8 Mar 2016 22:57:23 +0530 Subject: [PATCH 0058/1649] ath10k: handle channel change htt event Whenever firmware is configuring operating channel during scan or home channel, channel change event will be indicated to host. In some cases (device probe/ last vdev down), target will be configured to default channel whereas host is unaware of target's operating channel. This leads to packet drop due to unknown channel and kernel log will be filled up with "no channel configured; ignoring frame(s)!". Fix that by handling HTT_T2H_MSG_TYPE_CHAN_CHANGE event. Signed-off-by: Rajkumar Manoharan Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/core.h | 3 ++ drivers/net/wireless/ath/ath10k/htt.h | 9 ++++++ drivers/net/wireless/ath/ath10k/htt_rx.c | 41 +++++++++++++++++++++++- drivers/net/wireless/ath/ath10k/wmi.c | 28 ---------------- 4 files changed, 52 insertions(+), 29 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h index 23ba03fb7a5f..bb5f7e22fc1e 100644 --- a/drivers/net/wireless/ath/ath10k/core.h +++ b/drivers/net/wireless/ath/ath10k/core.h @@ -766,6 +766,9 @@ struct ath10k { /* current operating channel definition */ struct cfg80211_chan_def chandef; + /* currently configured operating channel in firmware */ + struct ieee80211_channel *tgt_oper_chan; + unsigned long long free_vdev_map; struct ath10k_vif *monitor_arvif; bool monitor; diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h index 02cf55d306e8..3583fd99df48 100644 --- a/drivers/net/wireless/ath/ath10k/htt.h +++ b/drivers/net/wireless/ath/ath10k/htt.h @@ -1461,6 +1461,14 @@ struct htt_tx_mode_switch_ind { struct htt_tx_mode_switch_record records[0]; } __packed; +struct htt_channel_change { + u8 pad[3]; + __le32 freq; + __le32 center_freq1; + __le32 center_freq2; + __le32 phymode; +} __packed; + union htt_rx_pn_t { /* WEP: 24-bit PN */ u32 pn24; @@ -1511,6 +1519,7 @@ struct htt_resp { struct htt_tx_fetch_ind tx_fetch_ind; struct htt_tx_fetch_confirm tx_fetch_confirm; struct htt_tx_mode_switch_ind tx_mode_switch_ind; + struct htt_channel_change chan_change; }; } __packed; diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c index 84b060efa1b5..24fe3b6b5eb6 100644 --- a/drivers/net/wireless/ath/ath10k/htt_rx.c +++ b/drivers/net/wireless/ath/ath10k/htt_rx.c @@ -862,6 +862,8 @@ static bool ath10k_htt_rx_h_channel(struct ath10k *ar, ch = ath10k_htt_rx_h_vdev_channel(ar, vdev_id); if (!ch) ch = ath10k_htt_rx_h_any_channel(ar); + if (!ch) + ch = ar->tgt_oper_chan; spin_unlock_bh(&ar->data_lock); if (!ch) @@ -2257,6 +2259,34 @@ static void ath10k_htt_rx_tx_mode_switch_ind(struct ath10k *ar, ath10k_mac_tx_push_pending(ar); } +static inline enum ieee80211_band phy_mode_to_band(u32 phy_mode) +{ + enum ieee80211_band band; + + switch (phy_mode) { + case MODE_11A: + case MODE_11NA_HT20: + case MODE_11NA_HT40: + case MODE_11AC_VHT20: + case MODE_11AC_VHT40: + case MODE_11AC_VHT80: + band = IEEE80211_BAND_5GHZ; + break; + case MODE_11G: + case MODE_11B: + case MODE_11GONLY: + case MODE_11NG_HT20: + case MODE_11NG_HT40: + case MODE_11AC_VHT20_2G: + case MODE_11AC_VHT40_2G: + case MODE_11AC_VHT80_2G: + default: + band = IEEE80211_BAND_2GHZ; + } + + return band; +} + void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) { struct ath10k_htt *htt = &ar->htt; @@ -2391,8 +2421,17 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) } case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND: break; - case HTT_T2H_MSG_TYPE_CHAN_CHANGE: + case HTT_T2H_MSG_TYPE_CHAN_CHANGE: { + u32 phymode = __le32_to_cpu(resp->chan_change.phymode); + u32 freq = __le32_to_cpu(resp->chan_change.freq); + + ar->tgt_oper_chan = + __ieee80211_get_channel(ar->hw->wiphy, freq); + ath10k_dbg(ar, ATH10K_DBG_HTT, + "htt chan change freq %u phymode %s\n", + freq, ath10k_wmi_phymode_str(phymode)); break; + } case HTT_T2H_MSG_TYPE_AGGR_CONF: break; case HTT_T2H_MSG_TYPE_TX_FETCH_IND: diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index c2608946f773..91375664dc35 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -2099,34 +2099,6 @@ int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb) return 0; } -static inline enum ieee80211_band phy_mode_to_band(u32 phy_mode) -{ - enum ieee80211_band band; - - switch (phy_mode) { - case MODE_11A: - case MODE_11NA_HT20: - case MODE_11NA_HT40: - case MODE_11AC_VHT20: - case MODE_11AC_VHT40: - case MODE_11AC_VHT80: - band = IEEE80211_BAND_5GHZ; - break; - case MODE_11G: - case MODE_11B: - case MODE_11GONLY: - case MODE_11NG_HT20: - case MODE_11NG_HT40: - case MODE_11AC_VHT20_2G: - case MODE_11AC_VHT40_2G: - case MODE_11AC_VHT80_2G: - default: - band = IEEE80211_BAND_2GHZ; - } - - return band; -} - /* If keys are configured, HW decrypts all frames * with protected bit set. Mark such frames as decrypted. */ From cac085524cf16434ac1d42427a8644cf532d3e87 Mon Sep 17 00:00:00 2001 From: Rajkumar Manoharan Date: Wed, 9 Mar 2016 20:25:46 +0530 Subject: [PATCH 0059/1649] ath10k: move mgmt descriptor limit handle under mgmt_tx Frames that are transmitted via MGMT_TX are using reserved descriptor slots in firmware. This limitation is for the htt_mgmt_tx path itself, not for mgmt frames per se. In 16 MBSSID scenario, these reserved slots will be easy exhausted due to frequent probe responses. So for 10.4 based solutions, probe responses are limited by a threshold (24). management tx path is separate for all except tlv based solutions. Since tlv solutions (qca6174 & qca9377) do not support 16 AP interfaces, it is safe to move management descriptor limitation check under mgmt_tx function. Though CPU improvement is negligible, unlikely conditions or never hit conditions in hot path can be avoided on data transmission. Signed-off-by: Rajkumar Manoharan Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/htt.h | 10 ++--- drivers/net/wireless/ath/ath10k/htt_rx.c | 7 +++- drivers/net/wireless/ath/ath10k/htt_tx.c | 50 +++++++++++++++--------- drivers/net/wireless/ath/ath10k/mac.c | 26 +++++++----- drivers/net/wireless/ath/ath10k/txrx.c | 18 ++++----- drivers/net/wireless/ath/ath10k/txrx.h | 4 +- 6 files changed, 68 insertions(+), 47 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h index 3583fd99df48..d196bcc50e50 100644 --- a/drivers/net/wireless/ath/ath10k/htt.h +++ b/drivers/net/wireless/ath/ath10k/htt.h @@ -1776,11 +1776,11 @@ void ath10k_htt_tx_txq_update(struct ieee80211_hw *hw, void ath10k_htt_tx_txq_recalc(struct ieee80211_hw *hw, struct ieee80211_txq *txq); void ath10k_htt_tx_txq_sync(struct ath10k *ar); -void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt, - bool is_mgmt); -int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt, - bool is_mgmt, - bool is_presp); +void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt); +int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt); +void ath10k_htt_tx_mgmt_dec_pending(struct ath10k_htt *htt); +int ath10k_htt_tx_mgmt_inc_pending(struct ath10k_htt *htt, bool is_mgmt, + bool is_presp); int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb); void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id); diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c index 24fe3b6b5eb6..06975bf49351 100644 --- a/drivers/net/wireless/ath/ath10k/htt_rx.c +++ b/drivers/net/wireless/ath/ath10k/htt_rx.c @@ -2354,7 +2354,12 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) break; } - ath10k_txrx_tx_unref(htt, &tx_done); + status = ath10k_txrx_tx_unref(htt, &tx_done); + if (!status) { + spin_lock_bh(&htt->tx_lock); + ath10k_htt_tx_mgmt_dec_pending(htt); + spin_unlock_bh(&htt->tx_lock); + } ath10k_mac_tx_push_pending(ar); break; } diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c index a30c34eae0a7..b2ae122381ca 100644 --- a/drivers/net/wireless/ath/ath10k/htt_tx.c +++ b/drivers/net/wireless/ath/ath10k/htt_tx.c @@ -149,39 +149,22 @@ void ath10k_htt_tx_txq_update(struct ieee80211_hw *hw, spin_unlock_bh(&ar->htt.tx_lock); } -void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt, - bool is_mgmt) +void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt) { lockdep_assert_held(&htt->tx_lock); - if (is_mgmt) - htt->num_pending_mgmt_tx--; - htt->num_pending_tx--; if (htt->num_pending_tx == htt->max_num_pending_tx - 1) ath10k_mac_tx_unlock(htt->ar, ATH10K_TX_PAUSE_Q_FULL); } -int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt, - bool is_mgmt, - bool is_presp) +int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt) { - struct ath10k *ar = htt->ar; - lockdep_assert_held(&htt->tx_lock); if (htt->num_pending_tx >= htt->max_num_pending_tx) return -EBUSY; - if (is_mgmt && - is_presp && - ar->hw_params.max_probe_resp_desc_thres && - ar->hw_params.max_probe_resp_desc_thres < htt->num_pending_mgmt_tx) - return -EBUSY; - - if (is_mgmt) - htt->num_pending_mgmt_tx++; - htt->num_pending_tx++; if (htt->num_pending_tx == htt->max_num_pending_tx) ath10k_mac_tx_lock(htt->ar, ATH10K_TX_PAUSE_Q_FULL); @@ -189,6 +172,35 @@ int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt, return 0; } +int ath10k_htt_tx_mgmt_inc_pending(struct ath10k_htt *htt, bool is_mgmt, + bool is_presp) +{ + struct ath10k *ar = htt->ar; + + lockdep_assert_held(&htt->tx_lock); + + if (!is_mgmt || !ar->hw_params.max_probe_resp_desc_thres) + return 0; + + if (is_presp && + ar->hw_params.max_probe_resp_desc_thres < htt->num_pending_mgmt_tx) + return -EBUSY; + + htt->num_pending_mgmt_tx++; + + return 0; +} + +void ath10k_htt_tx_mgmt_dec_pending(struct ath10k_htt *htt) +{ + lockdep_assert_held(&htt->tx_lock); + + if (!htt->ar->hw_params.max_probe_resp_desc_thres) + return; + + htt->num_pending_mgmt_tx--; +} + int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb) { struct ath10k *ar = htt->ar; diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index ebff9c0a0784..209c13d113a7 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -3699,8 +3699,6 @@ static bool ath10k_mac_tx_can_push(struct ieee80211_hw *hw, int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq) { - const bool is_mgmt = false; - const bool is_presp = false; struct ath10k *ar = hw->priv; struct ath10k_htt *htt = &ar->htt; struct ath10k_txq *artxq = (void *)txq->drv_priv; @@ -3713,7 +3711,7 @@ int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw, int ret; spin_lock_bh(&ar->htt.tx_lock); - ret = ath10k_htt_tx_inc_pending(htt, is_mgmt, is_presp); + ret = ath10k_htt_tx_inc_pending(htt); spin_unlock_bh(&ar->htt.tx_lock); if (ret) @@ -3722,7 +3720,7 @@ int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw, skb = ieee80211_tx_dequeue(hw, txq); if (!skb) { spin_lock_bh(&ar->htt.tx_lock); - ath10k_htt_tx_dec_pending(htt, is_mgmt); + ath10k_htt_tx_dec_pending(htt); spin_unlock_bh(&ar->htt.tx_lock); return -ENOENT; @@ -3739,7 +3737,7 @@ int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw, ath10k_warn(ar, "failed to push frame: %d\n", ret); spin_lock_bh(&ar->htt.tx_lock); - ath10k_htt_tx_dec_pending(htt, is_mgmt); + ath10k_htt_tx_dec_pending(htt); spin_unlock_bh(&ar->htt.tx_lock); return ret; @@ -3978,14 +3976,13 @@ static void ath10k_mac_op_tx(struct ieee80211_hw *hw, txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode); is_htt = (txpath == ATH10K_MAC_TX_HTT || txpath == ATH10K_MAC_TX_HTT_MGMT); + is_mgmt = (txpath == ATH10K_MAC_TX_HTT_MGMT); if (is_htt) { spin_lock_bh(&ar->htt.tx_lock); - - is_mgmt = ieee80211_is_mgmt(hdr->frame_control); is_presp = ieee80211_is_probe_resp(hdr->frame_control); - ret = ath10k_htt_tx_inc_pending(htt, is_mgmt, is_presp); + ret = ath10k_htt_tx_inc_pending(htt); if (ret) { ath10k_warn(ar, "failed to increase tx pending count: %d, dropping\n", ret); @@ -3994,6 +3991,15 @@ static void ath10k_mac_op_tx(struct ieee80211_hw *hw, return; } + ret = ath10k_htt_tx_mgmt_inc_pending(htt, is_mgmt, is_presp); + if (ret) { + ath10k_warn(ar, "failed to increase tx mgmt pending count: %d, dropping\n", + ret); + ath10k_htt_tx_dec_pending(htt); + spin_unlock_bh(&ar->htt.tx_lock); + ieee80211_free_txskb(ar->hw, skb); + return; + } spin_unlock_bh(&ar->htt.tx_lock); } @@ -4002,7 +4008,9 @@ static void ath10k_mac_op_tx(struct ieee80211_hw *hw, ath10k_warn(ar, "failed to transmit frame: %d\n", ret); if (is_htt) { spin_lock_bh(&ar->htt.tx_lock); - ath10k_htt_tx_dec_pending(htt, is_mgmt); + ath10k_htt_tx_dec_pending(htt); + if (is_mgmt) + ath10k_htt_tx_mgmt_dec_pending(htt); spin_unlock_bh(&ar->htt.tx_lock); } return; diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c index ea4d3000c8c3..48e26cdfe9a5 100644 --- a/drivers/net/wireless/ath/ath10k/txrx.c +++ b/drivers/net/wireless/ath/ath10k/txrx.c @@ -49,8 +49,8 @@ out: spin_unlock_bh(&ar->data_lock); } -void ath10k_txrx_tx_unref(struct ath10k_htt *htt, - const struct htt_tx_done *tx_done) +int ath10k_txrx_tx_unref(struct ath10k_htt *htt, + const struct htt_tx_done *tx_done) { struct ath10k *ar = htt->ar; struct device *dev = ar->dev; @@ -59,7 +59,6 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt, struct ath10k_skb_cb *skb_cb; struct ath10k_txq *artxq; struct sk_buff *msdu; - bool limit_mgmt_desc = false; ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx completion msdu_id %u discard %d no_ack %d success %d\n", @@ -69,7 +68,7 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt, if (tx_done->msdu_id >= htt->max_num_pending_tx) { ath10k_warn(ar, "warning: msdu_id %d too big, ignoring\n", tx_done->msdu_id); - return; + return -EINVAL; } spin_lock_bh(&htt->tx_lock); @@ -78,22 +77,18 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt, ath10k_warn(ar, "received tx completion for invalid msdu_id: %d\n", tx_done->msdu_id); spin_unlock_bh(&htt->tx_lock); - return; + return -ENOENT; } skb_cb = ATH10K_SKB_CB(msdu); txq = skb_cb->txq; artxq = (void *)txq->drv_priv; - if (unlikely(skb_cb->flags & ATH10K_SKB_F_MGMT) && - ar->hw_params.max_probe_resp_desc_thres) - limit_mgmt_desc = true; - if (txq) artxq->num_fw_queued--; ath10k_htt_tx_free_msdu_id(htt, tx_done->msdu_id); - ath10k_htt_tx_dec_pending(htt, limit_mgmt_desc); + ath10k_htt_tx_dec_pending(htt); if (htt->num_pending_tx == 0) wake_up(&htt->empty_tx_wq); spin_unlock_bh(&htt->tx_lock); @@ -108,7 +103,7 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt, if (tx_done->discard) { ieee80211_free_txskb(htt->ar->hw, msdu); - return; + return 0; } if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) @@ -122,6 +117,7 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt, ieee80211_tx_status(htt->ar->hw, msdu); /* we do not own the msdu anymore */ + return 0; } struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id, diff --git a/drivers/net/wireless/ath/ath10k/txrx.h b/drivers/net/wireless/ath/ath10k/txrx.h index a90e09f5c7f2..e7ea1ae1c438 100644 --- a/drivers/net/wireless/ath/ath10k/txrx.h +++ b/drivers/net/wireless/ath/ath10k/txrx.h @@ -19,8 +19,8 @@ #include "htt.h" -void ath10k_txrx_tx_unref(struct ath10k_htt *htt, - const struct htt_tx_done *tx_done); +int ath10k_txrx_tx_unref(struct ath10k_htt *htt, + const struct htt_tx_done *tx_done); struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id, const u8 *addr); From b9c191be3fbdd9d78be11160dd7a3ddb9fdc6d42 Mon Sep 17 00:00:00 2001 From: Raja Mani Date: Thu, 10 Mar 2016 10:25:07 +0530 Subject: [PATCH 0060/1649] ath10k: free cached fw bin contents when get board id fails ath10k_core_probe_fw() simply returns error without freeing cached firmware file content when get board id operation fails. Free cached fw bin data in failure case to avoid memory leak. Fixes: db0984e51a18 ("ath10k: select board data based on BMI chip id and board id") Signed-off-by: Raja Mani Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index 2389c0713c13..d5d0b88aa5fe 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -1839,7 +1839,7 @@ static int ath10k_core_probe_fw(struct ath10k *ar) if (ret && ret != -EOPNOTSUPP) { ath10k_err(ar, "failed to get board id from otp: %d\n", ret); - return ret; + goto err_free_firmware_files; } ret = ath10k_core_fetch_board_file(ar); From 9ddc486aa09a3413a6c492fcf160ce61bfccb7b1 Mon Sep 17 00:00:00 2001 From: Anilkumar Kolli Date: Fri, 11 Mar 2016 11:46:39 +0530 Subject: [PATCH 0061/1649] ath10k: fix debugfs pktlog_filter write It is observed that, we are disabling the packet log if we write same value to the pktlog_filter for the second time. Always enable pktlogs on non zero filter. Fixes: 90174455ae05 ("ath10k: add support to configure pktlog filter") Cc: stable@vger.kernel.org Signed-off-by: Anilkumar Kolli Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/debug.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c index 076d29b53ddf..0f834646e6a7 100644 --- a/drivers/net/wireless/ath/ath10k/debug.c +++ b/drivers/net/wireless/ath/ath10k/debug.c @@ -2019,7 +2019,12 @@ static ssize_t ath10k_write_pktlog_filter(struct file *file, goto out; } - if (filter && (filter != ar->debug.pktlog_filter)) { + if (filter == ar->debug.pktlog_filter) { + ret = count; + goto out; + } + + if (filter) { ret = ath10k_wmi_pdev_pktlog_enable(ar, filter); if (ret) { ath10k_warn(ar, "failed to enable pktlog filter %x: %d\n", From bf031bc4d6a168029b8a640373fd00837746b47d Mon Sep 17 00:00:00 2001 From: Vasanthakumar Thiagarajan Date: Tue, 15 Mar 2016 15:25:53 +0530 Subject: [PATCH 0062/1649] ath10k: advertise force AP scan feature Results obtained from scan can be used for spectrum management by doing something like building information of preferred channel lists and sharing them with stations around. It is to be noted that traffic to the connected stations would be affected during the scan. Signed-off-by: Vasanthakumar Thiagarajan Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/mac.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index 209c13d113a7..eedfe821700b 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -7705,7 +7705,8 @@ int ath10k_mac_register(struct ath10k *ar) ar->hw->wiphy->max_remain_on_channel_duration = 5000; ar->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD; - ar->hw->wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE; + ar->hw->wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE | + NL80211_FEATURE_AP_SCAN; ar->hw->wiphy->max_ap_assoc_sta = ar->max_num_stations; From 01d11cd12d85e88518eb884891e115cb4bf696a2 Mon Sep 17 00:00:00 2001 From: Sara Sharon Date: Wed, 9 Mar 2016 17:38:47 +0200 Subject: [PATCH 0063/1649] iwlwifi: pcie: clear trans reference on queue stop Currently when stop flow is performed, there might be transport TX RTPM references that are not freed in case we unmap a queue that still has packets not reclaimed. Fix that. Signed-off-by: Sara Sharon Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/pcie/tx.c | 59 ++++++++++++-------- 1 file changed, 36 insertions(+), 23 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c index 16ad820ca824..cc6fa00d350b 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c @@ -596,6 +596,28 @@ static void iwl_pcie_free_tso_page(struct sk_buff *skb) } } +static void iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + lockdep_assert_held(&trans_pcie->reg_lock); + + if (trans_pcie->ref_cmd_in_flight) { + trans_pcie->ref_cmd_in_flight = false; + IWL_DEBUG_RPM(trans, "clear ref_cmd_in_flight - unref\n"); + iwl_trans_pcie_unref(trans); + } + + if (!trans->cfg->base_params->apmg_wake_up_wa) + return; + if (WARN_ON(!trans_pcie->cmd_hold_nic_awake)) + return; + + trans_pcie->cmd_hold_nic_awake = false; + __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); +} + /* * iwl_pcie_txq_unmap - Unmap any remaining DMA mappings and free skb's */ @@ -620,6 +642,20 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id) } iwl_pcie_txq_free_tfd(trans, txq); q->read_ptr = iwl_queue_inc_wrap(q->read_ptr); + + if (q->read_ptr == q->write_ptr) { + unsigned long flags; + + spin_lock_irqsave(&trans_pcie->reg_lock, flags); + if (txq_id != trans_pcie->cmd_queue) { + IWL_DEBUG_RPM(trans, "Q %d - last tx freed\n", + q->id); + iwl_trans_pcie_unref(trans); + } else { + iwl_pcie_clear_cmd_in_flight(trans); + } + spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); + } } txq->active = false; @@ -1148,29 +1184,6 @@ static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans, return 0; } -static int iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - - lockdep_assert_held(&trans_pcie->reg_lock); - - if (trans_pcie->ref_cmd_in_flight) { - trans_pcie->ref_cmd_in_flight = false; - IWL_DEBUG_RPM(trans, "clear ref_cmd_in_flight - unref\n"); - iwl_trans_pcie_unref(trans); - } - - if (trans->cfg->base_params->apmg_wake_up_wa) { - if (WARN_ON(!trans_pcie->cmd_hold_nic_awake)) - return 0; - - trans_pcie->cmd_hold_nic_awake = false; - __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); - } - return 0; -} - /* * iwl_pcie_cmdq_reclaim - Reclaim TX command queue entries already Tx'd * From c94d7996db64582770103b178ea4060516d7e646 Mon Sep 17 00:00:00 2001 From: Matti Gottlieb Date: Wed, 9 Mar 2016 10:54:10 +0200 Subject: [PATCH 0064/1649] iwlwifi: mvm: Decrease size of the paging download buffer Currently the driver has 2 buffers for paging: 1. paging db - this contains all of the pages that were in the FW image, that the driver stores for the FW. This is allocated for each block separately (not contiguous). 2. download buffer - we need to provide this empty buffer for the iwl_sdio_load_fw_chunk function to copy the requested pages to the shared memory. This is one big buffer of contiguous memory whose size is the size of all the blocks that the fw paging section can contain. This download buffer size is too big, and causes the allocation to fail sometimes. Since the driver allocates memory for each block separately, it is not possible for the FW to request all of the pages in one request (the FW gives an address and size, so blocks need to be contiguous for this to happen), therefore the FW is limited to request only one block. Decrease the size of the paging download buffer to be the size of a paging block. Signed-off-by: Matti Gottlieb Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/mvm/fw.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index 594cd0dc7df9..ebbadcf41ea0 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -410,7 +410,9 @@ static int iwl_trans_get_paging_item(struct iwl_mvm *mvm) goto exit; } - mvm->trans->paging_download_buf = kzalloc(MAX_PAGING_IMAGE_SIZE, + /* Add an extra page for headers */ + mvm->trans->paging_download_buf = kzalloc(PAGING_BLOCK_SIZE + + FW_PAGING_SIZE, GFP_KERNEL); if (!mvm->trans->paging_download_buf) { ret = -ENOMEM; From cd47a3d3c72825b5feeb31c1974b0dc875692481 Mon Sep 17 00:00:00 2001 From: Matti Gottlieb Date: Thu, 10 Mar 2016 16:18:26 +0200 Subject: [PATCH 0065/1649] iwlwifi: mvm: make sure FW contains the right amount of paging sections Paging contains 3 sections in the fw. The first for the paging separator, The second for the CSS block, the third with the paging data. Currently if the driver finds the paging separator, and there is only section left (CSS), once reading the CSS section, the driver will attempt to read the paging data and will go out of the arrays bounds. Make sure that the FW image contains the right amount of sections for paging. Signed-off-by: Matti Gottlieb Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/mvm/fw.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index ebbadcf41ea0..766c262500d2 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -174,8 +174,12 @@ static int iwl_fill_paging_mem(struct iwl_mvm *mvm, const struct fw_img *image) } } - if (sec_idx >= IWL_UCODE_SECTION_MAX) { - IWL_ERR(mvm, "driver didn't find paging image\n"); + /* + * If paging is enabled there should be at least 2 more sections left + * (one for CSS and one for Paging data) + */ + if (sec_idx >= ARRAY_SIZE(image->sec) - 1) { + IWL_ERR(mvm, "Paging: Missing CSS and/or paging sections\n"); iwl_free_fw_paging(mvm); return -EINVAL; } From 5b086414293f906d8c5692cbbfa3500458982e5d Mon Sep 17 00:00:00 2001 From: Golan Ben-Ami Date: Tue, 9 Feb 2016 12:57:16 +0200 Subject: [PATCH 0066/1649] iwlwifi: mvm: support dumping UMAC internal txfifos In case of FW error, support dumping the UMAC internal txfifos. To do so, support version 2 of shared memory cfg command, which contains the sizes of the internal txfifos, and move the command to the system group. Signed-off-by: Golan Ben-Ami Signed-off-by: Emmanuel Grumbach --- .../intel/iwlwifi/iwl-fw-error-dump.h | 1 + .../net/wireless/intel/iwlwifi/iwl-fw-file.h | 3 + drivers/net/wireless/intel/iwlwifi/iwl-prph.h | 12 +++ .../net/wireless/intel/iwlwifi/mvm/fw-api.h | 17 +++- .../net/wireless/intel/iwlwifi/mvm/fw-dbg.c | 79 ++++++++++++++++++- drivers/net/wireless/intel/iwlwifi/mvm/fw.c | 28 ++++++- drivers/net/wireless/intel/iwlwifi/mvm/mvm.h | 3 + drivers/net/wireless/intel/iwlwifi/mvm/ops.c | 8 ++ 8 files changed, 146 insertions(+), 5 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fw-error-dump.h b/drivers/net/wireless/intel/iwlwifi/iwl-fw-error-dump.h index 8425e1a587d9..09b7ea28f4a0 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-fw-error-dump.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-fw-error-dump.h @@ -105,6 +105,7 @@ enum iwl_fw_error_dump_type { IWL_FW_ERROR_DUMP_RB = 11, IWL_FW_ERROR_DUMP_PAGING = 12, IWL_FW_ERROR_DUMP_RADIO_REG = 13, + IWL_FW_ERROR_DUMP_INTERNAL_TXF = 14, IWL_FW_ERROR_DUMP_MAX, }; diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h index 15ec4e2907d8..3a72b9715930 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h @@ -324,6 +324,8 @@ typedef unsigned int __bitwise__ iwl_ucode_tlv_capa_t; * @IWL_UCODE_TLV_CAPA_CTDP_SUPPORT: supports cTDP command * @IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED: supports usniffer enabled in * regular image. + * @IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG: support getting more shared + * memory addresses from the firmware. * * @NUM_IWL_UCODE_TLV_CAPA: number of bits used */ @@ -361,6 +363,7 @@ enum iwl_ucode_tlv_capa { IWL_UCODE_TLV_CAPA_TEMP_THS_REPORT_SUPPORT = (__force iwl_ucode_tlv_capa_t)75, IWL_UCODE_TLV_CAPA_CTDP_SUPPORT = (__force iwl_ucode_tlv_capa_t)76, IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED = (__force iwl_ucode_tlv_capa_t)77, + IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG = (__force iwl_ucode_tlv_capa_t)80, NUM_IWL_UCODE_TLV_CAPA #ifdef __CHECKER__ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h index c46e596e12b1..6c1d20ded04b 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h @@ -7,6 +7,7 @@ * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -33,6 +34,7 @@ * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -345,6 +347,16 @@ enum secure_load_status_reg { #define TXF_READ_MODIFY_DATA (0xa00448) #define TXF_READ_MODIFY_ADDR (0xa0044c) +/* UMAC Internal Tx Fifo */ +#define TXF_CPU2_FIFO_ITEM_CNT (0xA00538) +#define TXF_CPU2_WR_PTR (0xA00514) +#define TXF_CPU2_RD_PTR (0xA00510) +#define TXF_CPU2_FENCE_PTR (0xA00518) +#define TXF_CPU2_LOCK_FENCE (0xA00524) +#define TXF_CPU2_NUM (0xA0053C) +#define TXF_CPU2_READ_MODIFY_DATA (0xA00548) +#define TXF_CPU2_READ_MODIFY_ADDR (0xA0054C) + /* Radio registers access */ #define RSP_RADIO_CMD (0xa02804) #define RSP_RADIO_RDDAT (0xa02814) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h index 4a0fc47c81f2..61711b10ff82 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h @@ -287,6 +287,10 @@ enum iwl_phy_ops_subcmd_ids { DTS_MEASUREMENT_NOTIF_WIDE = 0xFF, }; +enum iwl_system_subcmd_ids { + SHARED_MEM_CFG_CMD = 0x0, +}; + enum iwl_data_path_subcmd_ids { UPDATE_MU_GROUPS_CMD = 0x1, TRIGGER_RX_QUEUES_NOTIF_CMD = 0x2, @@ -302,6 +306,7 @@ enum iwl_prot_offload_subcmd_ids { enum { LEGACY_GROUP = 0x0, LONG_GROUP = 0x1, + SYSTEM_GROUP = 0x2, PHY_OPS_GROUP = 0x4, DATA_PATH_GROUP = 0x5, PROT_OFFLOAD_GROUP = 0xb, @@ -1923,6 +1928,7 @@ struct iwl_tdls_config_res { #define TX_FIFO_MAX_NUM 8 #define RX_FIFO_MAX_NUM 2 +#define TX_FIFO_INTERNAL_MAX_NUM 6 /** * Shared memory configuration information from the FW @@ -1940,6 +1946,12 @@ struct iwl_tdls_config_res { * @page_buff_addr: used by UMAC and performance debug (page miss analysis), * when paging is not supported this should be 0 * @page_buff_size: size of %page_buff_addr + * @rxfifo_addr: Start address of rxFifo + * @internal_txfifo_addr: start address of internalFifo + * @internal_txfifo_size: internal fifos' size + * + * NOTE: on firmware that don't have IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG + * set, the last 3 members don't exist. */ struct iwl_shared_mem_cfg { __le32 shared_mem_addr; @@ -1951,7 +1963,10 @@ struct iwl_shared_mem_cfg { __le32 rxfifo_size[RX_FIFO_MAX_NUM]; __le32 page_buff_addr; __le32 page_buff_size; -} __packed; /* SHARED_MEM_ALLOC_API_S_VER_1 */ + __le32 rxfifo_addr; + __le32 internal_txfifo_addr; + __le32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM]; +} __packed; /* SHARED_MEM_ALLOC_API_S_VER_2 */ /** * VHT MU-MIMO group configuration diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c index 4856eac120f6..6ef706c13cda 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c @@ -7,7 +7,7 @@ * * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2015 Intel Deutschland GmbH + * Copyright(c) 2015 - 2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -32,7 +32,7 @@ * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2015 Intel Deutschland GmbH + * Copyright(c) 2015 - 2016 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -265,6 +265,65 @@ static void iwl_mvm_dump_fifos(struct iwl_mvm *mvm, *dump_data = iwl_fw_error_next_data(*dump_data); } + if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) { + /* Pull UMAC internal TXF data from all TXFs */ + for (i = 0; + i < ARRAY_SIZE(mvm->shared_mem_cfg.internal_txfifo_size); + i++) { + /* Mark the number of TXF we're pulling now */ + iwl_trans_write_prph(mvm->trans, TXF_CPU2_NUM, i); + + fifo_hdr = (void *)(*dump_data)->data; + fifo_data = (void *)fifo_hdr->data; + fifo_len = mvm->shared_mem_cfg.internal_txfifo_size[i]; + + /* No need to try to read the data if the length is 0 */ + if (fifo_len == 0) + continue; + + /* Add a TLV for the internal FIFOs */ + (*dump_data)->type = + cpu_to_le32(IWL_FW_ERROR_DUMP_INTERNAL_TXF); + (*dump_data)->len = + cpu_to_le32(fifo_len + sizeof(*fifo_hdr)); + + fifo_hdr->fifo_num = cpu_to_le32(i); + fifo_hdr->available_bytes = + cpu_to_le32(iwl_trans_read_prph(mvm->trans, + TXF_CPU2_FIFO_ITEM_CNT)); + fifo_hdr->wr_ptr = + cpu_to_le32(iwl_trans_read_prph(mvm->trans, + TXF_CPU2_WR_PTR)); + fifo_hdr->rd_ptr = + cpu_to_le32(iwl_trans_read_prph(mvm->trans, + TXF_CPU2_RD_PTR)); + fifo_hdr->fence_ptr = + cpu_to_le32(iwl_trans_read_prph(mvm->trans, + TXF_CPU2_FENCE_PTR)); + fifo_hdr->fence_mode = + cpu_to_le32(iwl_trans_read_prph(mvm->trans, + TXF_CPU2_LOCK_FENCE)); + + /* Set TXF_CPU2_READ_MODIFY_ADDR to TXF_CPU2_WR_PTR */ + iwl_trans_write_prph(mvm->trans, + TXF_CPU2_READ_MODIFY_ADDR, + TXF_CPU2_WR_PTR); + + /* Dummy-read to advance the read pointer to head */ + iwl_trans_read_prph(mvm->trans, + TXF_CPU2_READ_MODIFY_DATA); + + /* Read FIFO */ + fifo_len /= sizeof(u32); /* Size in DWORDS */ + for (j = 0; j < fifo_len; j++) + fifo_data[j] = + iwl_trans_read_prph(mvm->trans, + TXF_CPU2_READ_MODIFY_DATA); + *dump_data = iwl_fw_error_next_data(*dump_data); + } + } + iwl_trans_release_nic_access(mvm->trans, &flags); } @@ -494,6 +553,22 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) sizeof(struct iwl_fw_error_dump_fifo); } + if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) { + for (i = 0; + i < ARRAY_SIZE(mem_cfg->internal_txfifo_size); + i++) { + if (!mem_cfg->internal_txfifo_size[i]) + continue; + + /* Add header info */ + fifo_data_len += + mem_cfg->internal_txfifo_size[i] + + sizeof(*dump_data) + + sizeof(struct iwl_fw_error_dump_fifo); + } + } + /* Make room for PRPH registers */ for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr); i++) { /* The range includes both boundaries */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index 766c262500d2..f375275ee98e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -794,17 +794,22 @@ out: static void iwl_mvm_get_shared_mem_conf(struct iwl_mvm *mvm) { struct iwl_host_cmd cmd = { - .id = SHARED_MEM_CFG, .flags = CMD_WANT_SKB, .data = { NULL, }, .len = { 0, }, }; - struct iwl_rx_packet *pkt; struct iwl_shared_mem_cfg *mem_cfg; + struct iwl_rx_packet *pkt; u32 i; lockdep_assert_held(&mvm->mutex); + if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) + cmd.id = iwl_cmd_id(SHARED_MEM_CFG_CMD, SYSTEM_GROUP, 0); + else + cmd.id = SHARED_MEM_CFG; + if (WARN_ON(iwl_mvm_send_cmd(mvm, &cmd))) return; @@ -830,6 +835,25 @@ static void iwl_mvm_get_shared_mem_conf(struct iwl_mvm *mvm) le32_to_cpu(mem_cfg->page_buff_addr); mvm->shared_mem_cfg.page_buff_size = le32_to_cpu(mem_cfg->page_buff_size); + + /* new API has more data */ + if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) { + mvm->shared_mem_cfg.rxfifo_addr = + le32_to_cpu(mem_cfg->rxfifo_addr); + mvm->shared_mem_cfg.internal_txfifo_addr = + le32_to_cpu(mem_cfg->internal_txfifo_addr); + + BUILD_BUG_ON(sizeof(mvm->shared_mem_cfg.internal_txfifo_size) != + sizeof(mem_cfg->internal_txfifo_size)); + + for (i = 0; + i < ARRAY_SIZE(mvm->shared_mem_cfg.internal_txfifo_size); + i++) + mvm->shared_mem_cfg.internal_txfifo_size[i] = + le32_to_cpu(mem_cfg->internal_txfifo_size[i]); + } + IWL_DEBUG_INFO(mvm, "SHARED MEM CFG: got memory offsets/sizes\n"); iwl_free_resp(&cmd); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index 9abbc93e3c06..6c67c0f631c5 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -602,6 +602,9 @@ struct iwl_mvm_shared_mem_cfg { u32 rxfifo_size[RX_FIFO_MAX_NUM]; u32 page_buff_addr; u32 page_buff_size; + u32 rxfifo_addr; + u32 internal_txfifo_addr; + u32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM]; }; struct iwl_mvm { diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index 5e8ab796d5bc..ccf6ecd21b18 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -418,6 +418,13 @@ static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = { HCMD_NAME(REPLY_DEBUG_CMD), }; +/* Please keep this array *SORTED* by hex value. + * Access is done through binary search + */ +static const struct iwl_hcmd_names iwl_mvm_system_names[] = { + HCMD_NAME(SHARED_MEM_CFG_CMD), +}; + /* Please keep this array *SORTED* by hex value. * Access is done through binary search */ @@ -449,6 +456,7 @@ static const struct iwl_hcmd_names iwl_mvm_prot_offload_names[] = { static const struct iwl_hcmd_arr iwl_mvm_groups[] = { [LEGACY_GROUP] = HCMD_ARR(iwl_mvm_legacy_names), [LONG_GROUP] = HCMD_ARR(iwl_mvm_legacy_names), + [SYSTEM_GROUP] = HCMD_ARR(iwl_mvm_system_names), [PHY_OPS_GROUP] = HCMD_ARR(iwl_mvm_phy_names), [DATA_PATH_GROUP] = HCMD_ARR(iwl_mvm_data_path_names), [PROT_OFFLOAD_GROUP] = HCMD_ARR(iwl_mvm_prot_offload_names), From 9d71d47eed20f34620e54e29bcc90f959d5873b8 Mon Sep 17 00:00:00 2001 From: Michal Kazior Date: Thu, 17 Mar 2016 10:51:04 +0100 Subject: [PATCH 0067/1649] ath10k: fix tx hang The wake_tx_queue/push_pending logic had a bug which could stop queues indefinitely effectivelly breaking traffic. Fixes: 299468782d94 ("ath10k: implement wake_tx_queue") Signed-off-by: Michal Kazior Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/mac.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index eedfe821700b..1bc4bf1916a6 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -3777,13 +3777,13 @@ void ath10k_mac_tx_push_pending(struct ath10k *ar) } list_del_init(&artxq->list); + if (ret != -ENOENT) + list_add_tail(&artxq->list, &ar->txqs); + ath10k_htt_tx_txq_update(hw, txq); - if (artxq == last || (ret < 0 && ret != -ENOENT)) { - if (ret != -ENOENT) - list_add_tail(&artxq->list, &ar->txqs); + if (artxq == last || (ret < 0 && ret != -ENOENT)) break; - } } rcu_read_unlock(); From 750eeed89cf3c466df302e4707491b015531e26c Mon Sep 17 00:00:00 2001 From: Michal Kazior Date: Thu, 17 Mar 2016 10:51:05 +0100 Subject: [PATCH 0068/1649] ath10k: fix pull-push tx threshold handling This prevents tx hangs or hiccups if pull-push supporting firmware defines per-txq thresholds or switches modes dynamically. Fixes: 299468782d94 ("ath10k: implement wake_tx_queue") Signed-off-by: Michal Kazior Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/mac.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index 1bc4bf1916a6..ed00853ea9cc 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -3770,7 +3770,8 @@ void ath10k_mac_tx_push_pending(struct ath10k *ar) /* Prevent aggressive sta/tid taking over tx queue */ max = 16; - while (max--) { + ret = 0; + while (ath10k_mac_tx_can_push(hw, txq) && max--) { ret = ath10k_mac_tx_push_txq(hw, txq); if (ret < 0) break; @@ -4023,14 +4024,13 @@ static void ath10k_mac_op_wake_tx_queue(struct ieee80211_hw *hw, struct ath10k *ar = hw->priv; struct ath10k_txq *artxq = (void *)txq->drv_priv; - if (ath10k_mac_tx_can_push(hw, txq)) { - spin_lock_bh(&ar->txqs_lock); - if (list_empty(&artxq->list)) - list_add_tail(&artxq->list, &ar->txqs); - spin_unlock_bh(&ar->txqs_lock); + spin_lock_bh(&ar->txqs_lock); + if (list_empty(&artxq->list)) + list_add_tail(&artxq->list, &ar->txqs); + spin_unlock_bh(&ar->txqs_lock); + if (ath10k_mac_tx_can_push(hw, txq)) tasklet_schedule(&ar->htt.txrx_compl_task); - } ath10k_htt_tx_txq_update(hw, txq); } From 8866c727440d5b059637cb97927e383548099e8c Mon Sep 17 00:00:00 2001 From: Michal Kazior Date: Thu, 17 Mar 2016 10:52:08 +0100 Subject: [PATCH 0069/1649] ath10k: fix null deref if device crashes early If device failed to init during early probing (which is quite rare) it triggered driver to compute crc before ar->firmware was ready causing an oops. Fixes: 3e58044b61a9 ("ath10k: print crc32 checksums for firmware and board files") Signed-off-by: Michal Kazior Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/debug.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c index 0f834646e6a7..2cf1b350ac73 100644 --- a/drivers/net/wireless/ath/ath10k/debug.c +++ b/drivers/net/wireless/ath/ath10k/debug.c @@ -127,6 +127,7 @@ EXPORT_SYMBOL(ath10k_info); void ath10k_debug_print_hwfw_info(struct ath10k *ar) { char fw_features[128] = {}; + u32 crc = 0; ath10k_core_get_fw_features_str(ar, fw_features, sizeof(fw_features)); @@ -143,11 +144,14 @@ void ath10k_debug_print_hwfw_info(struct ath10k *ar) config_enabled(CONFIG_ATH10K_DFS_CERTIFIED), config_enabled(CONFIG_NL80211_TESTMODE)); + if (ar->firmware) + crc = crc32_le(0, ar->firmware->data, ar->firmware->size); + ath10k_info(ar, "firmware ver %s api %d features %s crc32 %08x\n", ar->hw->wiphy->fw_version, ar->fw_api, fw_features, - crc32_le(0, ar->firmware->data, ar->firmware->size)); + crc); } void ath10k_debug_print_board_info(struct ath10k *ar) From a47aaa69de88913d1640c4bd28c67fad142c61a3 Mon Sep 17 00:00:00 2001 From: Raja Mani Date: Fri, 18 Mar 2016 11:44:21 +0200 Subject: [PATCH 0070/1649] dt: bindings: add new dt entry for pre calibration in qcom, ath10k.txt There two things done in this patch, 1) Existing device tree entry 'qcom,ath10k-calibration-data' carries not only calibration data, it carries board specific data too. So, make appropriate update in doc. 2) ipq4019 wifi needs new devie tree entry to carry calibration data alone (called pre cal data, it doesn't include any other info). Using 'qcom,ath10k-calibration-data' for ipq4019 would alter the purpose of it. Hence, add new device tree entry called 'qcom,ath10k-pre-calibration-data' to carry only pre calibration data. Signed-off-by: Raja Mani Acked-by: Rob Herring Signed-off-by: Kalle Valo --- .../bindings/net/wireless/qcom,ath10k.txt | 23 +++++++++++++------ 1 file changed, 16 insertions(+), 7 deletions(-) diff --git a/Documentation/devicetree/bindings/net/wireless/qcom,ath10k.txt b/Documentation/devicetree/bindings/net/wireless/qcom,ath10k.txt index 96aae6b4f736..74d7f0af209c 100644 --- a/Documentation/devicetree/bindings/net/wireless/qcom,ath10k.txt +++ b/Documentation/devicetree/bindings/net/wireless/qcom,ath10k.txt @@ -5,12 +5,18 @@ Required properties: * "qcom,ath10k" * "qcom,ipq4019-wifi" -PCI based devices uses compatible string "qcom,ath10k" and takes only -calibration data via "qcom,ath10k-calibration-data". Rest of the properties -are not applicable for PCI based devices. +PCI based devices uses compatible string "qcom,ath10k" and takes calibration +data along with board specific data via "qcom,ath10k-calibration-data". +Rest of the properties are not applicable for PCI based devices. AHB based devices (i.e. ipq4019) uses compatible string "qcom,ipq4019-wifi" -and also uses most of the properties defined in this doc. +and also uses most of the properties defined in this doc (except +"qcom,ath10k-calibration-data"). It uses "qcom,ath10k-pre-calibration-data" +to carry pre calibration data. + +In general, entry "qcom,ath10k-pre-calibration-data" and +"qcom,ath10k-calibration-data" conflict with each other and only one +can be provided per device. Optional properties: - reg: Address and length of the register set for the device. @@ -35,8 +41,11 @@ Optional properties: - qcom,msi_addr: MSI interrupt address. - qcom,msi_base: Base value to add before writing MSI data into MSI address register. -- qcom,ath10k-calibration-data : calibration data as an array, the - length can vary between hw versions +- qcom,ath10k-calibration-data : calibration data + board specific data + as an array, the length can vary between + hw versions. +- qcom,ath10k-pre-calibration-data : pre calibration data as an array, + the length can vary between hw versions. Example (to supply the calibration data alone): @@ -105,5 +114,5 @@ wifi0: wifi@a000000 { "legacy"; qcom,msi_addr = <0x0b006040>; qcom,msi_base = <0x40>; - qcom,ath10k-calibration-data = [ 01 02 03 ... ]; + qcom,ath10k-pre-calibration-data = [ 01 02 03 ... ]; }; From f454add47adb4133f297e1b7af07bf07b3983044 Mon Sep 17 00:00:00 2001 From: Raja Mani Date: Fri, 18 Mar 2016 11:44:21 +0200 Subject: [PATCH 0071/1649] ath10k: pass cal data location as an argument to ath10k_download_cal_{file|dt} Both ath10k_download_cal_file() and ath10k_download_cal_dt() uses hard coded file pointer (ar->cal_file) and device tree entry (qcom,ath10k-calibration-data) respectively to get calibration data content. There is a need to use those two functions in qca4019 calibration download sequence with different file pointer and device tree entry name. Modify those two functions to take cal data location as an argument. So that it can serve the purpose for other file pointer and device tree entry. This is just preparation before adding actual qca4019 calibration download sequence. No functional changes. Signed-off-by: Raja Mani Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/core.c | 24 +++++++++++------------- 1 file changed, 11 insertions(+), 13 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index d5d0b88aa5fe..7c4a9c99b268 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -462,18 +462,18 @@ exit: return ret; } -static int ath10k_download_cal_file(struct ath10k *ar) +static int ath10k_download_cal_file(struct ath10k *ar, + const struct firmware *file) { int ret; - if (!ar->cal_file) + if (!file) return -ENOENT; - if (IS_ERR(ar->cal_file)) - return PTR_ERR(ar->cal_file); + if (IS_ERR(file)) + return PTR_ERR(file); - ret = ath10k_download_board_data(ar, ar->cal_file->data, - ar->cal_file->size); + ret = ath10k_download_board_data(ar, file->data, file->size); if (ret) { ath10k_err(ar, "failed to download cal_file data: %d\n", ret); return ret; @@ -484,7 +484,7 @@ static int ath10k_download_cal_file(struct ath10k *ar) return 0; } -static int ath10k_download_cal_dt(struct ath10k *ar) +static int ath10k_download_cal_dt(struct ath10k *ar, const char *dt_name) { struct device_node *node; int data_len; @@ -498,8 +498,7 @@ static int ath10k_download_cal_dt(struct ath10k *ar) */ return -ENOENT; - if (!of_get_property(node, "qcom,ath10k-calibration-data", - &data_len)) { + if (!of_get_property(node, dt_name, &data_len)) { /* The calibration data node is optional */ return -ENOENT; } @@ -517,8 +516,7 @@ static int ath10k_download_cal_dt(struct ath10k *ar) goto out; } - ret = of_property_read_u8_array(node, "qcom,ath10k-calibration-data", - data, data_len); + ret = of_property_read_u8_array(node, dt_name, data, data_len); if (ret) { ath10k_warn(ar, "failed to read calibration data from DT: %d\n", ret); @@ -1258,7 +1256,7 @@ static int ath10k_download_cal_data(struct ath10k *ar) { int ret; - ret = ath10k_download_cal_file(ar); + ret = ath10k_download_cal_file(ar, ar->cal_file); if (ret == 0) { ar->cal_mode = ATH10K_CAL_MODE_FILE; goto done; @@ -1268,7 +1266,7 @@ static int ath10k_download_cal_data(struct ath10k *ar) "boot did not find a calibration file, try DT next: %d\n", ret); - ret = ath10k_download_cal_dt(ar); + ret = ath10k_download_cal_dt(ar, "qcom,ath10k-calibration-data"); if (ret == 0) { ar->cal_mode = ATH10K_CAL_MODE_DT; goto done; From 0b8e3c4ca29fe2c0efd3d41a76e34a657b9f17a4 Mon Sep 17 00:00:00 2001 From: Raja Mani Date: Fri, 18 Mar 2016 11:44:22 +0200 Subject: [PATCH 0072/1649] ath10k: move cal data len to hw_params ath10k_download_cal_dt() compares obtained cal data content length against QCA988X_CAL_DATA_LEN (2116 bytes). It was written by keeping qca988x in mind. In fact, cal data length is more chip specific. To make ath10k_download_cal_dt() more generic and reusable for other chipsets (like qca4019), cal data length is moved to hw_params. Signed-off-by: Raja Mani Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/core.c | 11 ++++++++++- drivers/net/wireless/ath/ath10k/core.h | 1 + drivers/net/wireless/ath/ath10k/debug.c | 7 ++++--- drivers/net/wireless/ath/ath10k/hw.h | 2 -- 4 files changed, 15 insertions(+), 6 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index 7c4a9c99b268..c33fad96a1e8 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -60,6 +60,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .channel_counters_freq_hz = 88000, .max_probe_resp_desc_thres = 0, .hw_4addr_pad = ATH10K_HW_4ADDR_PAD_AFTER, + .cal_data_len = 2116, .fw = { .dir = QCA988X_HW_2_0_FW_DIR, .fw = QCA988X_HW_2_0_FW_FILE, @@ -78,6 +79,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .otp_exe_param = 0, .channel_counters_freq_hz = 88000, .max_probe_resp_desc_thres = 0, + .cal_data_len = 8124, .fw = { .dir = QCA6174_HW_2_1_FW_DIR, .fw = QCA6174_HW_2_1_FW_FILE, @@ -97,6 +99,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .channel_counters_freq_hz = 88000, .max_probe_resp_desc_thres = 0, .hw_4addr_pad = ATH10K_HW_4ADDR_PAD_AFTER, + .cal_data_len = 8124, .fw = { .dir = QCA6174_HW_2_1_FW_DIR, .fw = QCA6174_HW_2_1_FW_FILE, @@ -116,6 +119,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .channel_counters_freq_hz = 88000, .max_probe_resp_desc_thres = 0, .hw_4addr_pad = ATH10K_HW_4ADDR_PAD_AFTER, + .cal_data_len = 8124, .fw = { .dir = QCA6174_HW_3_0_FW_DIR, .fw = QCA6174_HW_3_0_FW_FILE, @@ -135,6 +139,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .channel_counters_freq_hz = 88000, .max_probe_resp_desc_thres = 0, .hw_4addr_pad = ATH10K_HW_4ADDR_PAD_AFTER, + .cal_data_len = 8124, .fw = { /* uses same binaries as hw3.0 */ .dir = QCA6174_HW_3_0_FW_DIR, @@ -159,6 +164,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .tx_chain_mask = 0xf, .rx_chain_mask = 0xf, .max_spatial_stream = 4, + .cal_data_len = 12064, .fw = { .dir = QCA99X0_HW_2_0_FW_DIR, .fw = QCA99X0_HW_2_0_FW_FILE, @@ -177,6 +183,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .otp_exe_param = 0, .channel_counters_freq_hz = 88000, .max_probe_resp_desc_thres = 0, + .cal_data_len = 8124, .fw = { .dir = QCA9377_HW_1_0_FW_DIR, .fw = QCA9377_HW_1_0_FW_FILE, @@ -195,6 +202,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .otp_exe_param = 0, .channel_counters_freq_hz = 88000, .max_probe_resp_desc_thres = 0, + .cal_data_len = 8124, .fw = { .dir = QCA9377_HW_1_0_FW_DIR, .fw = QCA9377_HW_1_0_FW_FILE, @@ -218,6 +226,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .tx_chain_mask = 0x3, .rx_chain_mask = 0x3, .max_spatial_stream = 2, + .cal_data_len = 12064, .fw = { .dir = QCA4019_HW_1_0_FW_DIR, .fw = QCA4019_HW_1_0_FW_FILE, @@ -503,7 +512,7 @@ static int ath10k_download_cal_dt(struct ath10k *ar, const char *dt_name) return -ENOENT; } - if (data_len != QCA988X_CAL_DATA_LEN) { + if (data_len != ar->hw_params.cal_data_len) { ath10k_warn(ar, "invalid calibration data length in DT: %d\n", data_len); ret = -EMSGSIZE; diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h index bb5f7e22fc1e..73edd69ed7e7 100644 --- a/drivers/net/wireless/ath/ath10k/core.h +++ b/drivers/net/wireless/ath/ath10k/core.h @@ -695,6 +695,7 @@ struct ath10k { u32 tx_chain_mask; u32 rx_chain_mask; u32 max_spatial_stream; + u32 cal_data_len; struct ath10k_hw_params_fw { const char *dir; diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c index 2cf1b350ac73..dec7e054b4b6 100644 --- a/drivers/net/wireless/ath/ath10k/debug.c +++ b/drivers/net/wireless/ath/ath10k/debug.c @@ -1451,7 +1451,7 @@ static int ath10k_debug_cal_data_open(struct inode *inode, struct file *file) goto err; } - buf = vmalloc(QCA988X_CAL_DATA_LEN); + buf = vmalloc(ar->hw_params.cal_data_len); if (!buf) { ret = -ENOMEM; goto err; @@ -1466,7 +1466,7 @@ static int ath10k_debug_cal_data_open(struct inode *inode, struct file *file) } ret = ath10k_hif_diag_read(ar, le32_to_cpu(addr), buf, - QCA988X_CAL_DATA_LEN); + ar->hw_params.cal_data_len); if (ret) { ath10k_warn(ar, "failed to read calibration data: %d\n", ret); goto err_vfree; @@ -1491,10 +1491,11 @@ static ssize_t ath10k_debug_cal_data_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { + struct ath10k *ar = file->private_data; void *buf = file->private_data; return simple_read_from_buffer(user_buf, count, ppos, - buf, QCA988X_CAL_DATA_LEN); + buf, ar->hw_params.cal_data_len); } static int ath10k_debug_cal_data_release(struct inode *inode, diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h index 1ff617b05010..c0179bc4af29 100644 --- a/drivers/net/wireless/ath/ath10k/hw.h +++ b/drivers/net/wireless/ath/ath10k/hw.h @@ -134,8 +134,6 @@ enum qca9377_chip_id_rev { #define REG_DUMP_COUNT_QCA988X 60 -#define QCA988X_CAL_DATA_LEN 2116 - struct ath10k_fw_ie { __le32 id; __le32 len; From 3d9195ea19e4854d7daa11688b01905e244aead9 Mon Sep 17 00:00:00 2001 From: Raja Mani Date: Fri, 18 Mar 2016 11:44:22 +0200 Subject: [PATCH 0073/1649] ath10k: incorporate qca4019 cal data download sequence qca4019 calibration data is stored in the host memory and it's mandatory to download it even before reading board id and chip id from the target. Also, there is a need to execute otp (download and run) twice, one after cal data download and another one after board data download. Existing cal data file name 'cal--.bin' and device tree entry 'qcom,ath10k-calibration-data' used in ath10k has assumption that it carries other data (like board data) also along with the calibration data. But, qca4019 cal data contains pure calibration data (doesn't include any other info). So, using existing same cal file name and DT entry in qca4019 case would alter the purpose of it. To avoid this, new cal file name 'pre-cal--.bin' and new device tree entry name 'qcom,ath10k-pre-calibration-data are introduced. Overall qca4019's firmware download sequence would look like, 1) Download cal data (either from a file or device tree entry) at the address specified by target in the host interest area member "hi_board_data". 2) Download otp and run with 0x10 (PARAM_GET_EEPROM_BOARD_ID) as a argument. At this point, otp will take back up of downloaded cal data content in another location in the target and return valid board id and chip id to the host. 3) Download board data at the address specified by target in host interest area member "hi_board_data". 4) Download otp and run with 0x10000 (PARAM_FLASH_SECTION_ALL) as a argument. Now otp will apply cal data content from it's backup on top of board data download in step 3 and prepare final data base. 5) Download code swap and athwlan binary content. Above sequences are implemented (step 1 to step 4) in the name of pre calibration configuration. Signed-off-by: Raja Mani Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/core.c | 85 +++++++++++++++++++++++++- drivers/net/wireless/ath/ath10k/core.h | 11 +++- 2 files changed, 94 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index c33fad96a1e8..8b35e3adcee9 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -729,6 +729,14 @@ static int ath10k_fetch_cal_file(struct ath10k *ar) { char filename[100]; + /* pre-cal--.bin */ + scnprintf(filename, sizeof(filename), "pre-cal-%s-%s.bin", + ath10k_bus_str(ar->hif.bus), dev_name(ar->dev)); + + ar->pre_cal_file = ath10k_fetch_fw_file(ar, ATH10K_FW_DIR, filename); + if (!IS_ERR(ar->pre_cal_file)) + goto success; + /* cal--.bin */ scnprintf(filename, sizeof(filename), "cal-%s-%s.bin", ath10k_bus_str(ar->hif.bus), dev_name(ar->dev)); @@ -737,7 +745,7 @@ static int ath10k_fetch_cal_file(struct ath10k *ar) if (IS_ERR(ar->cal_file)) /* calibration file is optional, don't print any warnings */ return PTR_ERR(ar->cal_file); - +success: ath10k_dbg(ar, ATH10K_DBG_BOOT, "found calibration file %s/%s\n", ATH10K_FW_DIR, filename); @@ -1261,10 +1269,76 @@ success: return 0; } +static int ath10k_core_pre_cal_download(struct ath10k *ar) +{ + int ret; + + ret = ath10k_download_cal_file(ar, ar->pre_cal_file); + if (ret == 0) { + ar->cal_mode = ATH10K_PRE_CAL_MODE_FILE; + goto success; + } + + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "boot did not find a pre calibration file, try DT next: %d\n", + ret); + + ret = ath10k_download_cal_dt(ar, "qcom,ath10k-pre-calibration-data"); + if (ret) { + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "unable to load pre cal data from DT: %d\n", ret); + return ret; + } + ar->cal_mode = ATH10K_PRE_CAL_MODE_DT; + +success: + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot using calibration mode %s\n", + ath10k_cal_mode_str(ar->cal_mode)); + + return 0; +} + +static int ath10k_core_pre_cal_config(struct ath10k *ar) +{ + int ret; + + ret = ath10k_core_pre_cal_download(ar); + if (ret) { + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "failed to load pre cal data: %d\n", ret); + return ret; + } + + ret = ath10k_core_get_board_id_from_otp(ar); + if (ret) { + ath10k_err(ar, "failed to get board id: %d\n", ret); + return ret; + } + + ret = ath10k_download_and_run_otp(ar); + if (ret) { + ath10k_err(ar, "failed to run otp: %d\n", ret); + return ret; + } + + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "pre cal configuration done successfully\n"); + + return 0; +} + static int ath10k_download_cal_data(struct ath10k *ar) { int ret; + ret = ath10k_core_pre_cal_config(ar); + if (ret == 0) + return 0; + + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "pre cal download procedure failed, try cal file: %d\n", + ret); + ret = ath10k_download_cal_file(ar, ar->cal_file); if (ret == 0) { ar->cal_mode = ATH10K_CAL_MODE_FILE; @@ -1842,6 +1916,15 @@ static int ath10k_core_probe_fw(struct ath10k *ar) ath10k_debug_print_hwfw_info(ar); + ret = ath10k_core_pre_cal_download(ar); + if (ret) { + /* pre calibration data download is not necessary + * for all the chipsets. Ignore failures and continue. + */ + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "could not load pre cal data: %d\n", ret); + } + ret = ath10k_core_get_board_id_from_otp(ar); if (ret && ret != -EOPNOTSUPP) { ath10k_err(ar, "failed to get board id from otp: %d\n", diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h index 73edd69ed7e7..b6c157ef705a 100644 --- a/drivers/net/wireless/ath/ath10k/core.h +++ b/drivers/net/wireless/ath/ath10k/core.h @@ -567,6 +567,8 @@ enum ath10k_cal_mode { ATH10K_CAL_MODE_FILE, ATH10K_CAL_MODE_OTP, ATH10K_CAL_MODE_DT, + ATH10K_PRE_CAL_MODE_FILE, + ATH10K_PRE_CAL_MODE_DT, }; enum ath10k_crypt_mode { @@ -585,6 +587,10 @@ static inline const char *ath10k_cal_mode_str(enum ath10k_cal_mode mode) return "otp"; case ATH10K_CAL_MODE_DT: return "dt"; + case ATH10K_PRE_CAL_MODE_FILE: + return "pre-cal-file"; + case ATH10K_PRE_CAL_MODE_DT: + return "pre-cal-dt"; } return "unknown"; @@ -719,7 +725,10 @@ struct ath10k { const void *firmware_data; size_t firmware_len; - const struct firmware *cal_file; + union { + const struct firmware *pre_cal_file; + const struct firmware *cal_file; + }; struct { const void *firmware_codeswap_data; From cc61a1bbbc0ebbda3cc155bcbe164f4609fd62f6 Mon Sep 17 00:00:00 2001 From: Mohammed Shafi Shajakhan Date: Wed, 16 Mar 2016 18:13:32 +0530 Subject: [PATCH 0074/1649] ath10k: enable debugfs provision to enable Peer Stats feature Provide a debugfs entry to enable/ disable Peer Stats feature. Peer Stats feature is for developers/users who are more interested in studying in Rx/Tx stats with multiple clients connected, hence disable this by default. Enabling this feature by default results in unneccessary processing of Peer Stats event for every 500ms and updating peer_stats list (allocating memory) and cleaning it up ifexceeds the higher limit and this can be an unnecessary overhead during long run stress testing. Signed-off-by: Mohammed Shafi Shajakhan Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/core.c | 2 +- drivers/net/wireless/ath/ath10k/core.h | 12 ++++ drivers/net/wireless/ath/ath10k/debug.c | 80 +++++++++++++++++++++++-- drivers/net/wireless/ath/ath10k/mac.c | 2 +- drivers/net/wireless/ath/ath10k/wmi.c | 12 ++-- 5 files changed, 94 insertions(+), 14 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index 8b35e3adcee9..7a714d971615 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -1586,7 +1586,7 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar) case ATH10K_FW_WMI_OP_VERSION_10_1: case ATH10K_FW_WMI_OP_VERSION_10_2: case ATH10K_FW_WMI_OP_VERSION_10_2_4: - if (test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map)) { + if (ath10k_peer_stats_enabled(ar)) { ar->max_num_peers = TARGET_10X_TX_STATS_NUM_PEERS; ar->max_num_stations = TARGET_10X_TX_STATS_NUM_STATIONS; } else { diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h index b6c157ef705a..c23c37312ef7 100644 --- a/drivers/net/wireless/ath/ath10k/core.h +++ b/drivers/net/wireless/ath/ath10k/core.h @@ -561,6 +561,9 @@ enum ath10k_dev_flags { /* Bluetooth coexistance enabled */ ATH10K_FLAG_BTCOEX, + + /* Per Station statistics service */ + ATH10K_FLAG_PEER_STATS, }; enum ath10k_cal_mode { @@ -903,6 +906,15 @@ struct ath10k { u8 drv_priv[0] __aligned(sizeof(void *)); }; +static inline bool ath10k_peer_stats_enabled(struct ath10k *ar) +{ + if (test_bit(ATH10K_FLAG_PEER_STATS, &ar->dev_flags) && + test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map)) + return true; + + return false; +} + struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev, enum ath10k_bus bus, enum ath10k_hw_rev hw_rev, diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c index dec7e054b4b6..76bbe17b25b6 100644 --- a/drivers/net/wireless/ath/ath10k/debug.c +++ b/drivers/net/wireless/ath/ath10k/debug.c @@ -323,7 +323,7 @@ static void ath10k_debug_fw_stats_reset(struct ath10k *ar) void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb) { struct ath10k_fw_stats stats = {}; - bool is_start, is_started, is_end, peer_stats_svc; + bool is_start, is_started, is_end; size_t num_peers; size_t num_vdevs; int ret; @@ -350,13 +350,11 @@ void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb) * b) consume stat update events until another one with pdev stats is * delivered which is treated as end-of-data and is itself discarded */ - - peer_stats_svc = test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map); - if (peer_stats_svc) + if (ath10k_peer_stats_enabled(ar)) ath10k_sta_update_rx_duration(ar, &stats.peers); if (ar->debug.fw_stats_done) { - if (!peer_stats_svc) + if (!ath10k_peer_stats_enabled(ar)) ath10k_warn(ar, "received unsolicited stats update event\n"); goto free; @@ -2184,6 +2182,73 @@ static const struct file_operations fops_btcoex = { .open = simple_open }; +static ssize_t ath10k_write_peer_stats(struct file *file, + const char __user *ubuf, + size_t count, loff_t *ppos) +{ + struct ath10k *ar = file->private_data; + char buf[32]; + size_t buf_size; + int ret = 0; + bool val; + + buf_size = min(count, (sizeof(buf) - 1)); + if (copy_from_user(buf, ubuf, buf_size)) + return -EFAULT; + + buf[buf_size] = '\0'; + + if (strtobool(buf, &val) != 0) + return -EINVAL; + + mutex_lock(&ar->conf_mutex); + + if (ar->state != ATH10K_STATE_ON && + ar->state != ATH10K_STATE_RESTARTED) { + ret = -ENETDOWN; + goto exit; + } + + if (!(test_bit(ATH10K_FLAG_PEER_STATS, &ar->dev_flags) ^ val)) + goto exit; + + if (val) + set_bit(ATH10K_FLAG_PEER_STATS, &ar->dev_flags); + else + clear_bit(ATH10K_FLAG_PEER_STATS, &ar->dev_flags); + + ath10k_info(ar, "restarting firmware due to Peer stats change"); + + queue_work(ar->workqueue, &ar->restart_work); + ret = count; + +exit: + mutex_unlock(&ar->conf_mutex); + return ret; +} + +static ssize_t ath10k_read_peer_stats(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) + +{ + char buf[32]; + struct ath10k *ar = file->private_data; + int len = 0; + + mutex_lock(&ar->conf_mutex); + len = scnprintf(buf, sizeof(buf) - len, "%d\n", + test_bit(ATH10K_FLAG_PEER_STATS, &ar->dev_flags)); + mutex_unlock(&ar->conf_mutex); + + return simple_read_from_buffer(ubuf, count, ppos, buf, len); +} + +static const struct file_operations fops_peer_stats = { + .read = ath10k_read_peer_stats, + .write = ath10k_write_peer_stats, + .open = simple_open +}; + static ssize_t ath10k_debug_fw_checksums_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) @@ -2347,6 +2412,11 @@ int ath10k_debug_register(struct ath10k *ar) debugfs_create_file("btcoex", S_IRUGO | S_IWUSR, ar->debug.debugfs_phy, ar, &fops_btcoex); + if (test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map)) + debugfs_create_file("peer_stats", S_IRUGO | S_IWUSR, + ar->debug.debugfs_phy, ar, + &fops_peer_stats); + debugfs_create_file("fw_checksums", S_IRUSR, ar->debug.debugfs_phy, ar, &fops_fw_checksums); diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index ed00853ea9cc..20d72e29dfa1 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -4435,7 +4435,7 @@ static int ath10k_start(struct ieee80211_hw *hw) ar->ani_enabled = true; - if (test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map)) { + if (ath10k_peer_stats_enabled(ar)) { param = ar->wmi.pdev_param->peer_stats_update_period; ret = ath10k_wmi_pdev_set_param(ar, param, PEER_DEFAULT_STATS_UPDATE_PERIOD); diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index 91375664dc35..afed9dab74f4 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -2856,11 +2856,8 @@ static int ath10k_wmi_10_2_4_op_pull_fw_stats(struct ath10k *ar, const struct wmi_10_2_4_ext_peer_stats *src; struct ath10k_fw_stats_peer *dst; int stats_len; - bool ext_peer_stats_support; - ext_peer_stats_support = test_bit(WMI_SERVICE_PEER_STATS, - ar->wmi.svc_map); - if (ext_peer_stats_support) + if (test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map)) stats_len = sizeof(struct wmi_10_2_4_ext_peer_stats); else stats_len = sizeof(struct wmi_10_2_4_peer_stats); @@ -2877,7 +2874,7 @@ static int ath10k_wmi_10_2_4_op_pull_fw_stats(struct ath10k *ar, dst->peer_rx_rate = __le32_to_cpu(src->common.peer_rx_rate); - if (ext_peer_stats_support) + if (ath10k_peer_stats_enabled(ar)) dst->rx_duration = __le32_to_cpu(src->rx_duration); /* FIXME: expose 10.2 specific values */ @@ -5514,7 +5511,8 @@ static struct sk_buff *ath10k_wmi_10_2_op_gen_init(struct ath10k *ar) config.num_vdevs = __cpu_to_le32(TARGET_10X_NUM_VDEVS); config.num_peer_keys = __cpu_to_le32(TARGET_10X_NUM_PEER_KEYS); - if (test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map)) { + + if (ath10k_peer_stats_enabled(ar)) { config.num_peers = __cpu_to_le32(TARGET_10X_TX_STATS_NUM_PEERS); config.num_tids = __cpu_to_le32(TARGET_10X_TX_STATS_NUM_TIDS); } else { @@ -5576,7 +5574,7 @@ static struct sk_buff *ath10k_wmi_10_2_op_gen_init(struct ath10k *ar) test_bit(WMI_SERVICE_COEX_GPIO, ar->wmi.svc_map)) features |= WMI_10_2_COEX_GPIO; - if (test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map)) + if (ath10k_peer_stats_enabled(ar)) features |= WMI_10_2_PEER_STATS; cmd->resource_config.feature_mask = __cpu_to_le32(features); From ccd63c20fe834a3f98f46f0447e5f106c4ffa2a4 Mon Sep 17 00:00:00 2001 From: Weongyo Jeong Date: Tue, 15 Mar 2016 10:57:44 -0700 Subject: [PATCH 0075/1649] netfilter: nf_conntrack: Uses pr_fmt() for logging. Uses pr_fmt() macro for debugging messages of nf_conntrack module. Signed-off-by: Weongyo Jeong Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_conntrack_core.c | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index afde5f5e728a..2fd607408998 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -12,6 +12,8 @@ * published by the Free Software Foundation. */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include #include #include @@ -966,7 +968,7 @@ init_conntrack(struct net *net, struct nf_conn *tmpl, if (!l4proto->new(ct, skb, dataoff, timeouts)) { nf_conntrack_free(ct); - pr_debug("init conntrack: can't track with proto module\n"); + pr_debug("can't track with proto module\n"); return NULL; } @@ -988,7 +990,7 @@ init_conntrack(struct net *net, struct nf_conn *tmpl, spin_lock(&nf_conntrack_expect_lock); exp = nf_ct_find_expectation(net, zone, tuple); if (exp) { - pr_debug("conntrack: expectation arrives ct=%p exp=%p\n", + pr_debug("expectation arrives ct=%p exp=%p\n", ct, exp); /* Welcome, Mr. Bond. We've been expecting you... */ __set_bit(IPS_EXPECTED_BIT, &ct->status); @@ -1053,7 +1055,7 @@ resolve_normal_ct(struct net *net, struct nf_conn *tmpl, if (!nf_ct_get_tuple(skb, skb_network_offset(skb), dataoff, l3num, protonum, net, &tuple, l3proto, l4proto)) { - pr_debug("resolve_normal_ct: Can't get tuple\n"); + pr_debug("Can't get tuple\n"); return NULL; } @@ -1079,14 +1081,13 @@ resolve_normal_ct(struct net *net, struct nf_conn *tmpl, } else { /* Once we've had two way comms, always ESTABLISHED. */ if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) { - pr_debug("nf_conntrack_in: normal packet for %p\n", ct); + pr_debug("normal packet for %p\n", ct); *ctinfo = IP_CT_ESTABLISHED; } else if (test_bit(IPS_EXPECTED_BIT, &ct->status)) { - pr_debug("nf_conntrack_in: related packet for %p\n", - ct); + pr_debug("related packet for %p\n", ct); *ctinfo = IP_CT_RELATED; } else { - pr_debug("nf_conntrack_in: new packet for %p\n", ct); + pr_debug("new packet for %p\n", ct); *ctinfo = IP_CT_NEW; } *set_reply = 0; From 2da62906b1e298695e1bb725927041cd59942c98 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 14 Mar 2015 21:13:46 -0400 Subject: [PATCH 0076/1649] [net] drop 'size' argument of sock_recvmsg() all callers have it equal to msg_data_left(msg). Signed-off-by: Al Viro --- drivers/target/iscsi/iscsi_target_util.c | 5 ++--- include/linux/net.h | 3 +-- net/socket.c | 23 ++++++++++------------- 3 files changed, 13 insertions(+), 18 deletions(-) diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c index 428b0d9e3dba..57720385a751 100644 --- a/drivers/target/iscsi/iscsi_target_util.c +++ b/drivers/target/iscsi/iscsi_target_util.c @@ -1283,9 +1283,8 @@ static int iscsit_do_rx_data( iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, count->iov, count->iov_count, data); - while (total_rx < data) { - rx_loop = sock_recvmsg(conn->sock, &msg, - (data - total_rx), MSG_WAITALL); + while (msg_data_left(&msg)) { + rx_loop = sock_recvmsg(conn->sock, &msg, MSG_WAITALL); if (rx_loop <= 0) { pr_debug("rx_loop: %d total_rx: %d\n", rx_loop, total_rx); diff --git a/include/linux/net.h b/include/linux/net.h index 49175e4ced11..72c1e0622ce2 100644 --- a/include/linux/net.h +++ b/include/linux/net.h @@ -218,8 +218,7 @@ int sock_create_lite(int family, int type, int proto, struct socket **res); struct socket *sock_alloc(void); void sock_release(struct socket *sock); int sock_sendmsg(struct socket *sock, struct msghdr *msg); -int sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, - int flags); +int sock_recvmsg(struct socket *sock, struct msghdr *msg, int flags); struct file *sock_alloc_file(struct socket *sock, int flags, const char *dname); struct socket *sockfd_lookup(int fd, int *err); struct socket *sock_from_file(struct file *file, int *err); diff --git a/net/socket.c b/net/socket.c index 5f77a8e93830..956426e347af 100644 --- a/net/socket.c +++ b/net/socket.c @@ -709,17 +709,16 @@ void __sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk, EXPORT_SYMBOL_GPL(__sock_recv_ts_and_drops); static inline int sock_recvmsg_nosec(struct socket *sock, struct msghdr *msg, - size_t size, int flags) + int flags) { - return sock->ops->recvmsg(sock, msg, size, flags); + return sock->ops->recvmsg(sock, msg, msg_data_left(msg), flags); } -int sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, - int flags) +int sock_recvmsg(struct socket *sock, struct msghdr *msg, int flags) { - int err = security_socket_recvmsg(sock, msg, size, flags); + int err = security_socket_recvmsg(sock, msg, msg_data_left(msg), flags); - return err ?: sock_recvmsg_nosec(sock, msg, size, flags); + return err ?: sock_recvmsg_nosec(sock, msg, flags); } EXPORT_SYMBOL(sock_recvmsg); @@ -746,7 +745,7 @@ int kernel_recvmsg(struct socket *sock, struct msghdr *msg, iov_iter_kvec(&msg->msg_iter, READ | ITER_KVEC, vec, num, size); set_fs(KERNEL_DS); - result = sock_recvmsg(sock, msg, size, flags); + result = sock_recvmsg(sock, msg, flags); set_fs(oldfs); return result; } @@ -796,7 +795,7 @@ static ssize_t sock_read_iter(struct kiocb *iocb, struct iov_iter *to) if (!iov_iter_count(to)) /* Match SYS5 behaviour */ return 0; - res = sock_recvmsg(sock, &msg, iov_iter_count(to), msg.msg_flags); + res = sock_recvmsg(sock, &msg, msg.msg_flags); *to = msg.msg_iter; return res; } @@ -1696,7 +1695,7 @@ SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size, msg.msg_iocb = NULL; if (sock->file->f_flags & O_NONBLOCK) flags |= MSG_DONTWAIT; - err = sock_recvmsg(sock, &msg, iov_iter_count(&msg.msg_iter), flags); + err = sock_recvmsg(sock, &msg, flags); if (err >= 0 && addr != NULL) { err2 = move_addr_to_user(&address, @@ -2073,7 +2072,7 @@ static int ___sys_recvmsg(struct socket *sock, struct user_msghdr __user *msg, struct iovec iovstack[UIO_FASTIOV]; struct iovec *iov = iovstack; unsigned long cmsg_ptr; - int total_len, len; + int len; ssize_t err; /* kernel mode address */ @@ -2091,7 +2090,6 @@ static int ___sys_recvmsg(struct socket *sock, struct user_msghdr __user *msg, err = copy_msghdr_from_user(msg_sys, msg, &uaddr, &iov); if (err < 0) return err; - total_len = iov_iter_count(&msg_sys->msg_iter); cmsg_ptr = (unsigned long)msg_sys->msg_control; msg_sys->msg_flags = flags & (MSG_CMSG_CLOEXEC|MSG_CMSG_COMPAT); @@ -2101,8 +2099,7 @@ static int ___sys_recvmsg(struct socket *sock, struct user_msghdr __user *msg, if (sock->file->f_flags & O_NONBLOCK) flags |= MSG_DONTWAIT; - err = (nosec ? sock_recvmsg_nosec : sock_recvmsg)(sock, msg_sys, - total_len, flags); + err = (nosec ? sock_recvmsg_nosec : sock_recvmsg)(sock, msg_sys, flags); if (err < 0) goto out_freeiov; len = err; From ac28634456867b23b95faccba7997a62ec430603 Mon Sep 17 00:00:00 2001 From: Stephane Bryant Date: Sat, 26 Mar 2016 08:42:10 +0100 Subject: [PATCH 0077/1649] netfilter: bridge: add nf_afinfo to enable queuing to userspace This just adds and registers a nf_afinfo for the ethernet bridge, which enables queuing to userspace for the AF_BRIDGE family. No checksum computation is done. Signed-off-by: Stephane Bryant Signed-off-by: Pablo Neira Ayuso --- net/bridge/netfilter/nf_tables_bridge.c | 47 +++++++++++++++++++++++-- 1 file changed, 45 insertions(+), 2 deletions(-) diff --git a/net/bridge/netfilter/nf_tables_bridge.c b/net/bridge/netfilter/nf_tables_bridge.c index 7fcdd7261d88..a78c4e2826e5 100644 --- a/net/bridge/netfilter/nf_tables_bridge.c +++ b/net/bridge/netfilter/nf_tables_bridge.c @@ -162,15 +162,57 @@ static const struct nf_chain_type filter_bridge = { (1 << NF_BR_POST_ROUTING), }; +static void nf_br_saveroute(const struct sk_buff *skb, + struct nf_queue_entry *entry) +{ +} + +static int nf_br_reroute(struct net *net, struct sk_buff *skb, + const struct nf_queue_entry *entry) +{ + return 0; +} + +static __sum16 nf_br_checksum(struct sk_buff *skb, unsigned int hook, + unsigned int dataoff, u_int8_t protocol) +{ + return 0; +} + +static __sum16 nf_br_checksum_partial(struct sk_buff *skb, unsigned int hook, + unsigned int dataoff, unsigned int len, + u_int8_t protocol) +{ + return 0; +} + +static int nf_br_route(struct net *net, struct dst_entry **dst, + struct flowi *fl, bool strict __always_unused) +{ + return 0; +} + +static const struct nf_afinfo nf_br_afinfo = { + .family = AF_BRIDGE, + .checksum = nf_br_checksum, + .checksum_partial = nf_br_checksum_partial, + .route = nf_br_route, + .saveroute = nf_br_saveroute, + .reroute = nf_br_reroute, + .route_key_size = 0, +}; + static int __init nf_tables_bridge_init(void) { int ret; + nf_register_afinfo(&nf_br_afinfo); nft_register_chain_type(&filter_bridge); ret = register_pernet_subsys(&nf_tables_bridge_net_ops); - if (ret < 0) + if (ret < 0) { nft_unregister_chain_type(&filter_bridge); - + nf_unregister_afinfo(&nf_br_afinfo); + } return ret; } @@ -178,6 +220,7 @@ static void __exit nf_tables_bridge_exit(void) { unregister_pernet_subsys(&nf_tables_bridge_net_ops); nft_unregister_chain_type(&filter_bridge); + nf_unregister_afinfo(&nf_br_afinfo); } module_init(nf_tables_bridge_init); From 15824ab29f364abd3299ecd17ea48473d971aa79 Mon Sep 17 00:00:00 2001 From: Stephane Bryant Date: Sat, 26 Mar 2016 08:42:11 +0100 Subject: [PATCH 0078/1649] netfilter: bridge: pass L2 header and VLAN as netlink attributes in queues to userspace - This creates 2 netlink attribute NFQA_VLAN and NFQA_L2HDR. - These are filled up for the PF_BRIDGE family on the way to userspace. - NFQA_VLAN is a nested attribute, with the NFQA_VLAN_PROTO and the NFQA_VLAN_TCI carrying the corresponding vlan_proto and vlan_tci fields from the skb using big endian ordering (and using the CFI bit as the VLAN_TAG_PRESENT flag in vlan_tci as in the skb) Signed-off-by: Stephane Bryant Signed-off-by: Pablo Neira Ayuso --- .../uapi/linux/netfilter/nfnetlink_queue.h | 10 ++++ net/netfilter/nfnetlink_queue.c | 58 +++++++++++++++++++ 2 files changed, 68 insertions(+) diff --git a/include/uapi/linux/netfilter/nfnetlink_queue.h b/include/uapi/linux/netfilter/nfnetlink_queue.h index b67a853638ff..ae30841ff94e 100644 --- a/include/uapi/linux/netfilter/nfnetlink_queue.h +++ b/include/uapi/linux/netfilter/nfnetlink_queue.h @@ -30,6 +30,14 @@ struct nfqnl_msg_packet_timestamp { __aligned_be64 usec; }; +enum nfqnl_vlan_attr { + NFQA_VLAN_UNSPEC, + NFQA_VLAN_PROTO, /* __be16 skb vlan_proto */ + NFQA_VLAN_TCI, /* __be16 skb htons(vlan_tci) */ + __NFQA_VLAN_MAX, +}; +#define NFQA_VLAN_MAX (__NFQA_VLAN_MAX + 1) + enum nfqnl_attr_type { NFQA_UNSPEC, NFQA_PACKET_HDR, @@ -50,6 +58,8 @@ enum nfqnl_attr_type { NFQA_UID, /* __u32 sk uid */ NFQA_GID, /* __u32 sk gid */ NFQA_SECCTX, /* security context string */ + NFQA_VLAN, /* nested attribute: packet vlan info */ + NFQA_L2HDR, /* full L2 header */ __NFQA_MAX }; diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c index 75429997ed41..6889c7c855d1 100644 --- a/net/netfilter/nfnetlink_queue.c +++ b/net/netfilter/nfnetlink_queue.c @@ -295,6 +295,59 @@ static u32 nfqnl_get_sk_secctx(struct sk_buff *skb, char **secdata) return seclen; } +static u32 nfqnl_get_bridge_size(struct nf_queue_entry *entry) +{ + struct sk_buff *entskb = entry->skb; + u32 nlalen = 0; + + if (entry->state.pf != PF_BRIDGE || !skb_mac_header_was_set(entskb)) + return 0; + + if (skb_vlan_tag_present(entskb)) + nlalen += nla_total_size(nla_total_size(sizeof(__be16)) + + nla_total_size(sizeof(__be16))); + + if (entskb->network_header > entskb->mac_header) + nlalen += nla_total_size((entskb->network_header - + entskb->mac_header)); + + return nlalen; +} + +static int nfqnl_put_bridge(struct nf_queue_entry *entry, struct sk_buff *skb) +{ + struct sk_buff *entskb = entry->skb; + + if (entry->state.pf != PF_BRIDGE || !skb_mac_header_was_set(entskb)) + return 0; + + if (skb_vlan_tag_present(entskb)) { + struct nlattr *nest; + + nest = nla_nest_start(skb, NFQA_VLAN | NLA_F_NESTED); + if (!nest) + goto nla_put_failure; + + if (nla_put_be16(skb, NFQA_VLAN_TCI, htons(entskb->vlan_tci)) || + nla_put_be16(skb, NFQA_VLAN_PROTO, entskb->vlan_proto)) + goto nla_put_failure; + + nla_nest_end(skb, nest); + } + + if (entskb->mac_header < entskb->network_header) { + int len = (int)(entskb->network_header - entskb->mac_header); + + if (nla_put(skb, NFQA_L2HDR, len, skb_mac_header(entskb))) + goto nla_put_failure; + } + + return 0; + +nla_put_failure: + return -1; +} + static struct sk_buff * nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue, struct nf_queue_entry *entry, @@ -334,6 +387,8 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue, if (entskb->tstamp.tv64) size += nla_total_size(sizeof(struct nfqnl_msg_packet_timestamp)); + size += nfqnl_get_bridge_size(entry); + if (entry->state.hook <= NF_INET_FORWARD || (entry->state.hook == NF_INET_POST_ROUTING && entskb->sk == NULL)) csum_verify = !skb_csum_unnecessary(entskb); @@ -497,6 +552,9 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue, } } + if (nfqnl_put_bridge(entry, skb) < 0) + goto nla_put_failure; + if (entskb->tstamp.tv64) { struct nfqnl_msg_packet_timestamp ts; struct timespec64 kts = ktime_to_timespec64(skb->tstamp); From 8d45ff22f1b43249f0cf1baafe0262ca10d1666e Mon Sep 17 00:00:00 2001 From: Stephane Bryant Date: Sat, 26 Mar 2016 08:42:12 +0100 Subject: [PATCH 0079/1649] netfilter: bridge: nf queue verdict to use NFQA_VLAN and NFQA_L2HDR This makes nf queues use NFQA_VLAN and NFQA_L2HDR in verdict to modify the original skb Signed-off-by: Stephane Bryant Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nfnetlink_queue.c | 47 +++++++++++++++++++++++++++++++++ 1 file changed, 47 insertions(+) diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c index 6889c7c855d1..5bebe78b9bbd 100644 --- a/net/netfilter/nfnetlink_queue.c +++ b/net/netfilter/nfnetlink_queue.c @@ -964,12 +964,18 @@ static struct notifier_block nfqnl_rtnl_notifier = { .notifier_call = nfqnl_rcv_nl_event, }; +static const struct nla_policy nfqa_vlan_policy[NFQA_VLAN_MAX + 1] = { + [NFQA_VLAN_TCI] = { .type = NLA_U16}, + [NFQA_VLAN_PROTO] = { .type = NLA_U16}, +}; + static const struct nla_policy nfqa_verdict_policy[NFQA_MAX+1] = { [NFQA_VERDICT_HDR] = { .len = sizeof(struct nfqnl_msg_verdict_hdr) }, [NFQA_MARK] = { .type = NLA_U32 }, [NFQA_PAYLOAD] = { .type = NLA_UNSPEC }, [NFQA_CT] = { .type = NLA_UNSPEC }, [NFQA_EXP] = { .type = NLA_UNSPEC }, + [NFQA_VLAN] = { .type = NLA_NESTED }, }; static const struct nla_policy nfqa_verdict_batch_policy[NFQA_MAX+1] = { @@ -1083,6 +1089,40 @@ static struct nf_conn *nfqnl_ct_parse(struct nfnl_ct_hook *nfnl_ct, return ct; } +static int nfqa_parse_bridge(struct nf_queue_entry *entry, + const struct nlattr * const nfqa[]) +{ + if (nfqa[NFQA_VLAN]) { + struct nlattr *tb[NFQA_VLAN_MAX + 1]; + int err; + + err = nla_parse_nested(tb, NFQA_VLAN_MAX, nfqa[NFQA_VLAN], + nfqa_vlan_policy); + if (err < 0) + return err; + + if (!tb[NFQA_VLAN_TCI] || !tb[NFQA_VLAN_PROTO]) + return -EINVAL; + + entry->skb->vlan_tci = ntohs(nla_get_be16(tb[NFQA_VLAN_TCI])); + entry->skb->vlan_proto = nla_get_be16(tb[NFQA_VLAN_PROTO]); + } + + if (nfqa[NFQA_L2HDR]) { + int mac_header_len = entry->skb->network_header - + entry->skb->mac_header; + + if (mac_header_len != nla_len(nfqa[NFQA_L2HDR])) + return -EINVAL; + else if (mac_header_len > 0) + memcpy(skb_mac_header(entry->skb), + nla_data(nfqa[NFQA_L2HDR]), + mac_header_len); + } + + return 0; +} + static int nfqnl_recv_verdict(struct net *net, struct sock *ctnl, struct sk_buff *skb, const struct nlmsghdr *nlh, @@ -1098,6 +1138,7 @@ static int nfqnl_recv_verdict(struct net *net, struct sock *ctnl, struct nfnl_ct_hook *nfnl_ct; struct nf_conn *ct = NULL; struct nfnl_queue_net *q = nfnl_queue_pernet(net); + int err; queue = instance_lookup(q, queue_num); if (!queue) @@ -1124,6 +1165,12 @@ static int nfqnl_recv_verdict(struct net *net, struct sock *ctnl, ct = nfqnl_ct_parse(nfnl_ct, nlh, nfqa, entry, &ctinfo); } + if (entry->state.pf == PF_BRIDGE) { + err = nfqa_parse_bridge(entry, nfqa); + if (err < 0) + return err; + } + if (nfqa[NFQA_PAYLOAD]) { u16 payload_len = nla_len(nfqa[NFQA_PAYLOAD]); int diff = payload_len - entry->skb->len; From 8fef24ca90fb79de8454e26e9f3eae6cc610de1a Mon Sep 17 00:00:00 2001 From: Liping Zhang Date: Mon, 28 Mar 2016 22:27:27 +0800 Subject: [PATCH 0080/1649] netfilter: ip6t_SYNPROXY: remove magic number for hop_limit Replace '64' with the per-net ipv6_devconf_all's hop_limit when building the ipv6 header. Signed-off-by: Liping Zhang Signed-off-by: Pablo Neira Ayuso --- net/ipv6/netfilter/ip6t_SYNPROXY.c | 56 ++++++++++++++++-------------- 1 file changed, 30 insertions(+), 26 deletions(-) diff --git a/net/ipv6/netfilter/ip6t_SYNPROXY.c b/net/ipv6/netfilter/ip6t_SYNPROXY.c index 3deed5860a42..5d778dd11f66 100644 --- a/net/ipv6/netfilter/ip6t_SYNPROXY.c +++ b/net/ipv6/netfilter/ip6t_SYNPROXY.c @@ -20,15 +20,16 @@ #include static struct ipv6hdr * -synproxy_build_ip(struct sk_buff *skb, const struct in6_addr *saddr, - const struct in6_addr *daddr) +synproxy_build_ip(struct net *net, struct sk_buff *skb, + const struct in6_addr *saddr, + const struct in6_addr *daddr) { struct ipv6hdr *iph; skb_reset_network_header(skb); iph = (struct ipv6hdr *)skb_put(skb, sizeof(*iph)); ip6_flow_hdr(iph, 0, 0); - iph->hop_limit = 64; //XXX + iph->hop_limit = net->ipv6.devconf_all->hop_limit; iph->nexthdr = IPPROTO_TCP; iph->saddr = *saddr; iph->daddr = *daddr; @@ -37,13 +38,12 @@ synproxy_build_ip(struct sk_buff *skb, const struct in6_addr *saddr, } static void -synproxy_send_tcp(const struct synproxy_net *snet, +synproxy_send_tcp(struct net *net, const struct sk_buff *skb, struct sk_buff *nskb, struct nf_conntrack *nfct, enum ip_conntrack_info ctinfo, struct ipv6hdr *niph, struct tcphdr *nth, unsigned int tcp_hdr_size) { - struct net *net = nf_ct_net(snet->tmpl); struct dst_entry *dst; struct flowi6 fl6; @@ -84,7 +84,7 @@ free_nskb: } static void -synproxy_send_client_synack(const struct synproxy_net *snet, +synproxy_send_client_synack(struct net *net, const struct sk_buff *skb, const struct tcphdr *th, const struct synproxy_options *opts) { @@ -103,7 +103,7 @@ synproxy_send_client_synack(const struct synproxy_net *snet, return; skb_reserve(nskb, MAX_TCP_HEADER); - niph = synproxy_build_ip(nskb, &iph->daddr, &iph->saddr); + niph = synproxy_build_ip(net, nskb, &iph->daddr, &iph->saddr); skb_reset_transport_header(nskb); nth = (struct tcphdr *)skb_put(nskb, tcp_hdr_size); @@ -121,15 +121,16 @@ synproxy_send_client_synack(const struct synproxy_net *snet, synproxy_build_options(nth, opts); - synproxy_send_tcp(snet, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY, + synproxy_send_tcp(net, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY, niph, nth, tcp_hdr_size); } static void -synproxy_send_server_syn(const struct synproxy_net *snet, +synproxy_send_server_syn(struct net *net, const struct sk_buff *skb, const struct tcphdr *th, const struct synproxy_options *opts, u32 recv_seq) { + struct synproxy_net *snet = synproxy_pernet(net); struct sk_buff *nskb; struct ipv6hdr *iph, *niph; struct tcphdr *nth; @@ -144,7 +145,7 @@ synproxy_send_server_syn(const struct synproxy_net *snet, return; skb_reserve(nskb, MAX_TCP_HEADER); - niph = synproxy_build_ip(nskb, &iph->saddr, &iph->daddr); + niph = synproxy_build_ip(net, nskb, &iph->saddr, &iph->daddr); skb_reset_transport_header(nskb); nth = (struct tcphdr *)skb_put(nskb, tcp_hdr_size); @@ -165,12 +166,12 @@ synproxy_send_server_syn(const struct synproxy_net *snet, synproxy_build_options(nth, opts); - synproxy_send_tcp(snet, skb, nskb, &snet->tmpl->ct_general, IP_CT_NEW, + synproxy_send_tcp(net, skb, nskb, &snet->tmpl->ct_general, IP_CT_NEW, niph, nth, tcp_hdr_size); } static void -synproxy_send_server_ack(const struct synproxy_net *snet, +synproxy_send_server_ack(struct net *net, const struct ip_ct_tcp *state, const struct sk_buff *skb, const struct tcphdr *th, const struct synproxy_options *opts) @@ -189,7 +190,7 @@ synproxy_send_server_ack(const struct synproxy_net *snet, return; skb_reserve(nskb, MAX_TCP_HEADER); - niph = synproxy_build_ip(nskb, &iph->daddr, &iph->saddr); + niph = synproxy_build_ip(net, nskb, &iph->daddr, &iph->saddr); skb_reset_transport_header(nskb); nth = (struct tcphdr *)skb_put(nskb, tcp_hdr_size); @@ -205,11 +206,11 @@ synproxy_send_server_ack(const struct synproxy_net *snet, synproxy_build_options(nth, opts); - synproxy_send_tcp(snet, skb, nskb, NULL, 0, niph, nth, tcp_hdr_size); + synproxy_send_tcp(net, skb, nskb, NULL, 0, niph, nth, tcp_hdr_size); } static void -synproxy_send_client_ack(const struct synproxy_net *snet, +synproxy_send_client_ack(struct net *net, const struct sk_buff *skb, const struct tcphdr *th, const struct synproxy_options *opts) { @@ -227,7 +228,7 @@ synproxy_send_client_ack(const struct synproxy_net *snet, return; skb_reserve(nskb, MAX_TCP_HEADER); - niph = synproxy_build_ip(nskb, &iph->saddr, &iph->daddr); + niph = synproxy_build_ip(net, nskb, &iph->saddr, &iph->daddr); skb_reset_transport_header(nskb); nth = (struct tcphdr *)skb_put(nskb, tcp_hdr_size); @@ -243,15 +244,16 @@ synproxy_send_client_ack(const struct synproxy_net *snet, synproxy_build_options(nth, opts); - synproxy_send_tcp(snet, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY, + synproxy_send_tcp(net, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY, niph, nth, tcp_hdr_size); } static bool -synproxy_recv_client_ack(const struct synproxy_net *snet, +synproxy_recv_client_ack(struct net *net, const struct sk_buff *skb, const struct tcphdr *th, struct synproxy_options *opts, u32 recv_seq) { + struct synproxy_net *snet = synproxy_pernet(net); int mss; mss = __cookie_v6_check(ipv6_hdr(skb), th, ntohl(th->ack_seq) - 1); @@ -267,7 +269,7 @@ synproxy_recv_client_ack(const struct synproxy_net *snet, if (opts->options & XT_SYNPROXY_OPT_TIMESTAMP) synproxy_check_timestamp_cookie(opts); - synproxy_send_server_syn(snet, skb, th, opts, recv_seq); + synproxy_send_server_syn(net, skb, th, opts, recv_seq); return true; } @@ -275,7 +277,8 @@ static unsigned int synproxy_tg6(struct sk_buff *skb, const struct xt_action_param *par) { const struct xt_synproxy_info *info = par->targinfo; - struct synproxy_net *snet = synproxy_pernet(par->net); + struct net *net = par->net; + struct synproxy_net *snet = synproxy_pernet(net); struct synproxy_options opts = {}; struct tcphdr *th, _th; @@ -304,12 +307,12 @@ synproxy_tg6(struct sk_buff *skb, const struct xt_action_param *par) XT_SYNPROXY_OPT_SACK_PERM | XT_SYNPROXY_OPT_ECN); - synproxy_send_client_synack(snet, skb, th, &opts); + synproxy_send_client_synack(net, skb, th, &opts); return NF_DROP; } else if (th->ack && !(th->fin || th->rst || th->syn)) { /* ACK from client */ - synproxy_recv_client_ack(snet, skb, th, &opts, ntohl(th->seq)); + synproxy_recv_client_ack(net, skb, th, &opts, ntohl(th->seq)); return NF_DROP; } @@ -320,7 +323,8 @@ static unsigned int ipv6_synproxy_hook(void *priv, struct sk_buff *skb, const struct nf_hook_state *nhs) { - struct synproxy_net *snet = synproxy_pernet(nhs->net); + struct net *net = nhs->net; + struct synproxy_net *snet = synproxy_pernet(net); enum ip_conntrack_info ctinfo; struct nf_conn *ct; struct nf_conn_synproxy *synproxy; @@ -384,7 +388,7 @@ static unsigned int ipv6_synproxy_hook(void *priv, * therefore we need to add 1 to make the SYN sequence * number match the one of first SYN. */ - if (synproxy_recv_client_ack(snet, skb, th, &opts, + if (synproxy_recv_client_ack(net, skb, th, &opts, ntohl(th->seq) + 1)) this_cpu_inc(snet->stats->cookie_retrans); @@ -410,12 +414,12 @@ static unsigned int ipv6_synproxy_hook(void *priv, XT_SYNPROXY_OPT_SACK_PERM); swap(opts.tsval, opts.tsecr); - synproxy_send_server_ack(snet, state, skb, th, &opts); + synproxy_send_server_ack(net, state, skb, th, &opts); nf_ct_seqadj_init(ct, ctinfo, synproxy->isn - ntohl(th->seq)); swap(opts.tsval, opts.tsecr); - synproxy_send_client_ack(snet, skb, th, &opts); + synproxy_send_client_ack(net, skb, th, &opts); consume_skb(skb); return NF_STOLEN; From 03098268a30d75188f15dd8fda8f0c896d2846e5 Mon Sep 17 00:00:00 2001 From: Aviya Erenfeld Date: Thu, 18 Feb 2016 14:09:33 +0200 Subject: [PATCH 0081/1649] iwlwifi: mvm: add LQM vendor command and notification LQM stands for Link Quality Measurement. The firmware will collect a defined set of statitics (see the notification for details) that allow to know how busy the medium is. The driver issues a request to the firmware that includes the duration of the measurement (the firmware needs to be on channel for that amount of time) and the timeout (in case the firmware has a lot of offchannel activities). If the timeout elapses, the firmware will send partial results which are still valuable. In case of disassociation / channel switch and alike, the driver is in charge of stopping the measurements and the firmware will reply with partial results. The user space API for now is debugfs only and will be implmemented in an upcoming patch. Signed-off-by: Aviya Erenfeld Signed-off-by: Emmanuel Grumbach --- .../net/wireless/intel/iwlwifi/iwl-fw-file.h | 2 + .../net/wireless/intel/iwlwifi/mvm/fw-api.h | 62 ++++++++++++++++ .../net/wireless/intel/iwlwifi/mvm/mac80211.c | 10 +++ drivers/net/wireless/intel/iwlwifi/mvm/mvm.h | 12 ++++ drivers/net/wireless/intel/iwlwifi/mvm/ops.c | 9 +++ .../net/wireless/intel/iwlwifi/mvm/utils.c | 71 +++++++++++++++++++ 6 files changed, 166 insertions(+) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h index 3a72b9715930..c82b94167ac6 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h @@ -326,6 +326,7 @@ typedef unsigned int __bitwise__ iwl_ucode_tlv_capa_t; * regular image. * @IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG: support getting more shared * memory addresses from the firmware. + * @IWL_UCODE_TLV_CAPA_LQM_SUPPORT: supports Link Quality Measurement * * @NUM_IWL_UCODE_TLV_CAPA: number of bits used */ @@ -364,6 +365,7 @@ enum iwl_ucode_tlv_capa { IWL_UCODE_TLV_CAPA_CTDP_SUPPORT = (__force iwl_ucode_tlv_capa_t)76, IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED = (__force iwl_ucode_tlv_capa_t)77, IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG = (__force iwl_ucode_tlv_capa_t)80, + IWL_UCODE_TLV_CAPA_LQM_SUPPORT = (__force iwl_ucode_tlv_capa_t)81, NUM_IWL_UCODE_TLV_CAPA #ifdef __CHECKER__ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h index 61711b10ff82..e6bd0c8d4cc0 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h @@ -279,6 +279,11 @@ enum { /* Please keep this enum *SORTED* by hex value. * Needed for binary search, otherwise a warning will be triggered. */ +enum iwl_mac_conf_subcmd_ids { + LINK_QUALITY_MEASUREMENT_CMD = 0x1, + LINK_QUALITY_MEASUREMENT_COMPLETE_NOTIF = 0xFE, +}; + enum iwl_phy_ops_subcmd_ids { CMD_DTS_MEASUREMENT_TRIGGER_WIDE = 0x0, CTDP_CONFIG_CMD = 0x03, @@ -307,6 +312,7 @@ enum { LEGACY_GROUP = 0x0, LONG_GROUP = 0x1, SYSTEM_GROUP = 0x2, + MAC_CONF_GROUP = 0x3, PHY_OPS_GROUP = 0x4, DATA_PATH_GROUP = 0x5, PROT_OFFLOAD_GROUP = 0xb, @@ -2017,4 +2023,60 @@ struct iwl_stored_beacon_notif { u8 data[MAX_STORED_BEACON_SIZE]; } __packed; /* WOWLAN_STROED_BEACON_INFO_S_VER_1 */ +#define LQM_NUMBER_OF_STATIONS_IN_REPORT 16 + +enum iwl_lqm_cmd_operatrions { + LQM_CMD_OPERATION_START_MEASUREMENT = 0x01, + LQM_CMD_OPERATION_STOP_MEASUREMENT = 0x02, +}; + +enum iwl_lqm_status { + LQM_STATUS_SUCCESS = 0, + LQM_STATUS_TIMEOUT = 1, + LQM_STATUS_ABORT = 2, +}; + +/** + * Link Quality Measurement command + * @cmd_operatrion: command operation to be performed (start or stop) + * as defined above. + * @mac_id: MAC ID the measurement applies to. + * @measurement_time: time of the total measurement to be performed, in uSec. + * @timeout: maximum time allowed until a response is sent, in uSec. + */ +struct iwl_link_qual_msrmnt_cmd { + __le32 cmd_operation; + __le32 mac_id; + __le32 measurement_time; + __le32 timeout; +} __packed /* LQM_CMD_API_S_VER_1 */; + +/** + * Link Quality Measurement notification + * + * @frequent_stations_air_time: an array containing the total air time + * (in uSec) used by the most frequently transmitting stations. + * @number_of_stations: the number of uniqe stations included in the array + * (a number between 0 to 16) + * @total_air_time_other_stations: the total air time (uSec) used by all the + * stations which are not included in the above report. + * @time_in_measurement_window: the total time in uSec in which a measurement + * took place. + * @tx_frame_dropped: the number of TX frames dropped due to retry limit during + * measurement + * @mac_id: MAC ID the measurement applies to. + * @status: return status. may be one of the LQM_STATUS_* defined above. + * @reserved: reserved. + */ +struct iwl_link_qual_msrmnt_notif { + __le32 frequent_stations_air_time[LQM_NUMBER_OF_STATIONS_IN_REPORT]; + __le32 number_of_stations; + __le32 total_air_time_other_stations; + __le32 time_in_measurement_window; + __le32 tx_frame_dropped; + __le32 mac_id; + __le32 status; + __le32 reserved[3]; +} __packed; /* LQM_MEASUREMENT_COMPLETE_NTF_API_S_VER1 */ + #endif /* __fw_api_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 76e649c680a1..cff9c16e4920 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -1821,6 +1821,11 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, if (changes & BSS_CHANGED_ASSOC && bss_conf->assoc) iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif); + if (changes & BSS_CHANGED_ASSOC && !bss_conf->assoc && + mvmvif->lqm_active) + iwl_mvm_send_lqm_cmd(vif, LQM_CMD_OPERATION_STOP_MEASUREMENT, + 0, 0); + /* * If we're not associated yet, take the (new) BSSID before associating * so the firmware knows. If we're already associated, then use the old @@ -3628,6 +3633,11 @@ static int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw, break; case NL80211_IFTYPE_STATION: + if (mvmvif->lqm_active) + iwl_mvm_send_lqm_cmd(vif, + LQM_CMD_OPERATION_STOP_MEASUREMENT, + 0, 0); + /* Schedule the time event to a bit before beacon 1, * to make sure we're in the new channel when the * GO/AP arrives. diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index 6c67c0f631c5..0668601f377c 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -453,6 +453,12 @@ struct iwl_mvm_vif { /* TCP Checksum Offload */ netdev_features_t features; + + /* + * link quality measurement - used to check whether this interface + * is in the middle of a link quality measurement + */ + bool lqm_active; }; static inline struct iwl_mvm_vif * @@ -1637,4 +1643,10 @@ unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm, void iwl_mvm_connection_loss(struct iwl_mvm *mvm, struct ieee80211_vif *vif, const char *errmsg); +/* Link Quality Measurement */ +int iwl_mvm_send_lqm_cmd(struct ieee80211_vif *vif, + enum iwl_lqm_cmd_operatrions operation, + u32 duration, u32 timeout); +bool iwl_mvm_lqm_active(struct iwl_mvm *mvm); + #endif /* __IWL_MVM_H__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index ccf6ecd21b18..46a22fd9f0d6 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -425,6 +425,14 @@ static const struct iwl_hcmd_names iwl_mvm_system_names[] = { HCMD_NAME(SHARED_MEM_CFG_CMD), }; +/* Please keep this array *SORTED* by hex value. + * Access is done through binary search + */ +static const struct iwl_hcmd_names iwl_mvm_mac_conf_names[] = { + HCMD_NAME(LINK_QUALITY_MEASUREMENT_CMD), + HCMD_NAME(LINK_QUALITY_MEASUREMENT_COMPLETE_NOTIF), +}; + /* Please keep this array *SORTED* by hex value. * Access is done through binary search */ @@ -457,6 +465,7 @@ static const struct iwl_hcmd_arr iwl_mvm_groups[] = { [LEGACY_GROUP] = HCMD_ARR(iwl_mvm_legacy_names), [LONG_GROUP] = HCMD_ARR(iwl_mvm_legacy_names), [SYSTEM_GROUP] = HCMD_ARR(iwl_mvm_system_names), + [MAC_CONF_GROUP] = HCMD_ARR(iwl_mvm_mac_conf_names), [PHY_OPS_GROUP] = HCMD_ARR(iwl_mvm_phy_names), [DATA_PATH_GROUP] = HCMD_ARR(iwl_mvm_data_path_names), [PROT_OFFLOAD_GROUP] = HCMD_ARR(iwl_mvm_prot_offload_names), diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c index 53cdc5760f68..2440248c8e69 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c @@ -1079,3 +1079,74 @@ void iwl_mvm_connection_loss(struct iwl_mvm *mvm, struct ieee80211_vif *vif, out: ieee80211_connection_loss(vif); } + +int iwl_mvm_send_lqm_cmd(struct ieee80211_vif *vif, + enum iwl_lqm_cmd_operatrions operation, + u32 duration, u32 timeout) +{ + struct iwl_mvm_vif *mvm_vif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_link_qual_msrmnt_cmd cmd = { + .cmd_operation = cpu_to_le32(operation), + .mac_id = cpu_to_le32(mvm_vif->id), + .measurement_time = cpu_to_le32(duration), + .timeout = cpu_to_le32(timeout), + }; + u32 cmdid = + iwl_cmd_id(LINK_QUALITY_MEASUREMENT_CMD, MAC_CONF_GROUP, 0); + int ret; + + if (!fw_has_capa(&mvm_vif->mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_LQM_SUPPORT)) + return -EOPNOTSUPP; + + if (vif->type != NL80211_IFTYPE_STATION || vif->p2p) + return -EINVAL; + + switch (operation) { + case LQM_CMD_OPERATION_START_MEASUREMENT: + if (iwl_mvm_lqm_active(mvm_vif->mvm)) + return -EBUSY; + if (!vif->bss_conf.assoc) + return -EINVAL; + mvm_vif->lqm_active = true; + break; + case LQM_CMD_OPERATION_STOP_MEASUREMENT: + if (!iwl_mvm_lqm_active(mvm_vif->mvm)) + return -EINVAL; + break; + default: + return -EINVAL; + } + + ret = iwl_mvm_send_cmd_pdu(mvm_vif->mvm, cmdid, 0, sizeof(cmd), + &cmd); + + /* command failed - roll back lqm_active state */ + if (ret) { + mvm_vif->lqm_active = + operation == LQM_CMD_OPERATION_STOP_MEASUREMENT; + } + + return ret; +} + +static void iwl_mvm_lqm_active_iterator(void *_data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct iwl_mvm_vif *mvm_vif = iwl_mvm_vif_from_mac80211(vif); + bool *lqm_active = _data; + + *lqm_active = *lqm_active || mvm_vif->lqm_active; +} + +bool iwl_mvm_lqm_active(struct iwl_mvm *mvm) +{ + bool ret = false; + + lockdep_assert_held(&mvm->mutex); + ieee80211_iterate_active_interfaces_atomic( + mvm->hw, IEEE80211_IFACE_ITER_NORMAL, + iwl_mvm_lqm_active_iterator, &ret); + + return ret; +} From dedfc0f3dba0713734d42efb8300760b27c5a54a Mon Sep 17 00:00:00 2001 From: Aviya Erenfeld Date: Sun, 13 Mar 2016 15:58:59 +0200 Subject: [PATCH 0082/1649] iwlwifi: add a debugfs hook for LQM Add debugfs entry named lqm_send_cmd for kicking a measurement. This hook takes the duration and the timeout as parameter. Signed-off-by: Aviya Erenfeld Signed-off-by: Emmanuel Grumbach --- .../wireless/intel/iwlwifi/mvm/debugfs-vif.c | 85 +++++++++++++++++++ 1 file changed, 85 insertions(+) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c index 14004456bf55..3a279d3403ef 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c @@ -1425,6 +1425,89 @@ static ssize_t iwl_dbgfs_quota_min_read(struct file *file, return simple_read_from_buffer(user_buf, count, ppos, buf, len); } +static const char * const chanwidths[] = { + [NL80211_CHAN_WIDTH_20_NOHT] = "noht", + [NL80211_CHAN_WIDTH_20] = "ht20", + [NL80211_CHAN_WIDTH_40] = "ht40", + [NL80211_CHAN_WIDTH_80] = "vht80", + [NL80211_CHAN_WIDTH_80P80] = "vht80p80", + [NL80211_CHAN_WIDTH_160] = "vht160", +}; + +static bool iwl_mvm_lqm_notif_wait(struct iwl_notif_wait_data *notif_wait, + struct iwl_rx_packet *pkt, void *data) +{ + struct ieee80211_vif *vif = data; + struct iwl_mvm *mvm = + container_of(notif_wait, struct iwl_mvm, notif_wait); + struct iwl_link_qual_msrmnt_notif *report = (void *)pkt->data; + u32 num_of_stations = le32_to_cpu(report->number_of_stations); + int i; + + IWL_INFO(mvm, "LQM report:\n"); + IWL_INFO(mvm, "\tstatus: %d\n", report->status); + IWL_INFO(mvm, "\tmacID: %d\n", le32_to_cpu(report->mac_id)); + IWL_INFO(mvm, "\ttx_frame_dropped: %d\n", + le32_to_cpu(report->tx_frame_dropped)); + IWL_INFO(mvm, "\ttime_in_measurement_window: %d us\n", + le32_to_cpu(report->time_in_measurement_window)); + IWL_INFO(mvm, "\ttotal_air_time_other_stations: %d\n", + le32_to_cpu(report->total_air_time_other_stations)); + IWL_INFO(mvm, "\tchannel_freq: %d\n", + vif->bss_conf.chandef.center_freq1); + IWL_INFO(mvm, "\tchannel_width: %s\n", + chanwidths[vif->bss_conf.chandef.width]); + IWL_INFO(mvm, "\tnumber_of_stations: %d\n", num_of_stations); + for (i = 0; i < num_of_stations; i++) + IWL_INFO(mvm, "\t\tsta[%d]: %d\n", i, + report->frequent_stations_air_time[i]); + + return true; +} + +static ssize_t iwl_dbgfs_lqm_send_cmd_write(struct ieee80211_vif *vif, + char *buf, size_t count, + loff_t *ppos) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm *mvm = mvmvif->mvm; + struct iwl_notification_wait wait_lqm_notif; + static u16 lqm_notif[] = { + WIDE_ID(MAC_CONF_GROUP, + LINK_QUALITY_MEASUREMENT_COMPLETE_NOTIF) + }; + int err; + u32 duration; + u32 timeout; + + if (sscanf(buf, "%d,%d", &duration, &timeout) != 2) + return -EINVAL; + + iwl_init_notification_wait(&mvm->notif_wait, &wait_lqm_notif, + lqm_notif, ARRAY_SIZE(lqm_notif), + iwl_mvm_lqm_notif_wait, vif); + mutex_lock(&mvm->mutex); + err = iwl_mvm_send_lqm_cmd(vif, LQM_CMD_OPERATION_START_MEASUREMENT, + duration, timeout); + mutex_unlock(&mvm->mutex); + + if (err) { + IWL_ERR(mvm, "Failed to send lqm cmdf(err=%d)\n", err); + iwl_remove_notification(&mvm->notif_wait, &wait_lqm_notif); + return err; + } + + /* wait for 2 * timeout (safety guard) and convert to jiffies*/ + timeout = msecs_to_jiffies((timeout * 2) / 1000); + + err = iwl_wait_notification(&mvm->notif_wait, &wait_lqm_notif, + timeout); + if (err) + IWL_ERR(mvm, "Getting lqm notif timed out\n"); + + return count; +} + #define MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \ _MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct ieee80211_vif) #define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \ @@ -1449,6 +1532,7 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_range_abort, 32); MVM_DEBUGFS_READ_FILE_OPS(tof_range_response); MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_responder_params, 32); MVM_DEBUGFS_READ_WRITE_FILE_OPS(quota_min, 32); +MVM_DEBUGFS_WRITE_FILE_OPS(lqm_send_cmd, 64); void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { @@ -1488,6 +1572,7 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif) S_IRUSR | S_IWUSR); MVM_DEBUGFS_ADD_FILE_VIF(quota_min, mvmvif->dbgfs_dir, S_IRUSR | S_IWUSR); + MVM_DEBUGFS_ADD_FILE_VIF(lqm_send_cmd, mvmvif->dbgfs_dir, S_IWUSR); if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p && mvmvif == mvm->bf_allowed_vif) From 431469259df6ebc8e022b268bbb2d9bc5562f920 Mon Sep 17 00:00:00 2001 From: Sara Sharon Date: Mon, 14 Mar 2016 13:11:47 +0200 Subject: [PATCH 0083/1649] iwlwifi: pcie: fix global table size My patch resized the pool size, but neglected to resize the global table, which is obviously wrong since the global table maps the pool's rxb to vid one to one. This results in a panic in 9000 devices. Add a build bug to avoid such a case in the future. Fixes: 7b5424361ec9 ("iwlwifi: pcie: fine tune number of rxbs") Reported-by: Haim Dreyfuss Signed-off-by: Sara Sharon Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/pcie/internal.h | 2 +- drivers/net/wireless/intel/iwlwifi/pcie/rx.c | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h index dadafbdef9d9..34bf7cede7f4 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h @@ -348,7 +348,7 @@ struct iwl_tso_hdr_page { struct iwl_trans_pcie { struct iwl_rxq *rxq; struct iwl_rx_mem_buffer rx_pool[RX_POOL_SIZE]; - struct iwl_rx_mem_buffer *global_table[MQ_RX_TABLE_SIZE]; + struct iwl_rx_mem_buffer *global_table[RX_POOL_SIZE]; struct iwl_rb_allocator rba; struct iwl_trans *trans; struct iwl_drv *drv; diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c index 4be3c35afd19..e379dbab685a 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c @@ -908,6 +908,8 @@ int iwl_pcie_rx_init(struct iwl_trans *trans) allocator_pool_size = trans->num_rx_queues * (RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC); num_alloc = queue_size + allocator_pool_size; + BUILD_BUG_ON(ARRAY_SIZE(trans_pcie->global_table) != + ARRAY_SIZE(trans_pcie->rx_pool)); for (i = 0; i < num_alloc; i++) { struct iwl_rx_mem_buffer *rxb = &trans_pcie->rx_pool[i]; From 18dcb9a90cd5c49ec23130d64dd7921998068002 Mon Sep 17 00:00:00 2001 From: Sara Sharon Date: Sun, 13 Mar 2016 21:48:35 +0200 Subject: [PATCH 0084/1649] iwlwifi: pcie: enable interrupts explicitly on resume When entering suspend the driver calls iwl_disable_interrupts() and then iwl_pcie_disable_ict(). On resume the driver calls only iwl_pcie_reset_ict() without calling explicitly to iwl_enable_interrupts(). This mostly works since iwl_pcie_reset_ict is calling to iwl_enable_interrupts, but it doesn't work when there is no ict_table in MSIx mode. The result is that driver tries to resume but fails since it doesn't get the RX interrupt from FW indicating that d0i3 exit was completed. Fix it by adding an explicit call to enable interrupts. Signed-off-by: Sara Sharon Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/pcie/trans.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index eb39c7e09781..d4306e23e286 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -1321,6 +1321,7 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, * after this call. */ iwl_pcie_reset_ict(trans); + iwl_enable_interrupts(trans); iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); From 5d93f3a278b387e3a2ec568c1f03d236bfdbef81 Mon Sep 17 00:00:00 2001 From: Luca Coelho Date: Fri, 4 Mar 2016 15:25:47 +0200 Subject: [PATCH 0085/1649] iwlwifi: pcie: refcounting is not necessary anymore We don't use the refcount value anymore, all the refcounting is done in the runtime PM usage_count value. Remove it. Signed-off-by: Luca Coelho Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/pcie/drv.c | 4 +-- .../wireless/intel/iwlwifi/pcie/internal.h | 4 --- .../net/wireless/intel/iwlwifi/pcie/trans.c | 25 +++++++------------ 3 files changed, 10 insertions(+), 23 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index 05b968506836..34566691f90e 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -651,10 +651,8 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* The PCI device starts with a reference taken and we are * supposed to release it here. But to simplify the * interaction with the opmode, we don't do it now, but let - * the opmode release it when it's ready. To account for this - * reference, we start with ref_count set to 1. + * the opmode release it when it's ready. */ - trans_pcie->ref_count = 1; return 0; diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h index 34bf7cede7f4..9ce4ec6cab2f 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h @@ -403,10 +403,6 @@ struct iwl_trans_pcie { bool cmd_hold_nic_awake; bool ref_cmd_in_flight; - /* protect ref counter */ - spinlock_t ref_lock; - u32 ref_count; - dma_addr_t fw_mon_phys; struct page *fw_mon_page; u32 fw_mon_size; diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index d4306e23e286..007bcb594946 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -2015,38 +2015,32 @@ static void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg, void iwl_trans_pcie_ref(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - unsigned long flags; if (iwlwifi_mod_params.d0i3_disable) return; - spin_lock_irqsave(&trans_pcie->ref_lock, flags); - IWL_DEBUG_RPM(trans, "ref_counter: %d\n", trans_pcie->ref_count); - trans_pcie->ref_count++; pm_runtime_get(&trans_pcie->pci_dev->dev); - spin_unlock_irqrestore(&trans_pcie->ref_lock, flags); + +#ifdef CONFIG_PM + IWL_DEBUG_RPM(trans, "runtime usage count: %d\n", + atomic_read(&trans_pcie->pci_dev->dev.power.usage_count)); +#endif /* CONFIG_PM */ } void iwl_trans_pcie_unref(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - unsigned long flags; if (iwlwifi_mod_params.d0i3_disable) return; - spin_lock_irqsave(&trans_pcie->ref_lock, flags); - IWL_DEBUG_RPM(trans, "ref_counter: %d\n", trans_pcie->ref_count); - if (WARN_ON_ONCE(trans_pcie->ref_count == 0)) { - spin_unlock_irqrestore(&trans_pcie->ref_lock, flags); - return; - } - trans_pcie->ref_count--; - pm_runtime_mark_last_busy(&trans_pcie->pci_dev->dev); pm_runtime_put_autosuspend(&trans_pcie->pci_dev->dev); - spin_unlock_irqrestore(&trans_pcie->ref_lock, flags); +#ifdef CONFIG_PM + IWL_DEBUG_RPM(trans, "runtime usage count: %d\n", + atomic_read(&trans_pcie->pci_dev->dev.power.usage_count)); +#endif /* CONFIG_PM */ } static const char *get_csr_string(int cmd) @@ -2794,7 +2788,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, trans_pcie->trans = trans; spin_lock_init(&trans_pcie->irq_lock); spin_lock_init(&trans_pcie->reg_lock); - spin_lock_init(&trans_pcie->ref_lock); mutex_init(&trans_pcie->mutex); init_waitqueue_head(&trans_pcie->ucode_write_waitq); trans_pcie->tso_hdr_page = alloc_percpu(struct iwl_tso_hdr_page); From ec77a33ee59723613d1e3ed6f02e5f9a1c898ce1 Mon Sep 17 00:00:00 2001 From: Chaya Rachel Ivgi Date: Sun, 13 Mar 2016 11:39:53 +0200 Subject: [PATCH 0086/1649] iwlwifi: mvm: handle async temperature notification with unlocked mutex Use RX_HANDLER_ASYNC_UNLOCKED instead of unlock and re-lock the mutex independently. Signed-off-by: Chaya Rachel Ivgi Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/mvm/ops.c | 2 +- drivers/net/wireless/intel/iwlwifi/mvm/tt.c | 9 --------- 2 files changed, 1 insertion(+), 10 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index 46a22fd9f0d6..6153c8e86c53 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -292,7 +292,7 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = { RX_HANDLER(DTS_MEASUREMENT_NOTIFICATION, iwl_mvm_temp_notif, RX_HANDLER_ASYNC_LOCKED), RX_HANDLER_GRP(PHY_OPS_GROUP, DTS_MEASUREMENT_NOTIF_WIDE, - iwl_mvm_temp_notif, RX_HANDLER_ASYNC_LOCKED), + iwl_mvm_temp_notif, RX_HANDLER_ASYNC_UNLOCKED), RX_HANDLER_GRP(PHY_OPS_GROUP, CT_KILL_NOTIFICATION, iwl_mvm_ct_kill_notif, RX_HANDLER_SYNC), diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c index f1f28255a3a6..8d27137a9284 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c @@ -204,20 +204,11 @@ void iwl_mvm_temp_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) if (WARN_ON(ths_crossed >= IWL_MAX_DTS_TRIPS)) return; - /* - * We are now handling a temperature notification from the firmware - * in ASYNC and hold the mutex. thermal_notify_framework will call - * us back through get_temp() which ought to send a SYNC command to - * the firmware and hence to take the mutex. - * Avoid the deadlock by unlocking the mutex here. - */ if (mvm->tz_device.tzone) { struct iwl_mvm_thermal_device *tz_dev = &mvm->tz_device; - mutex_unlock(&mvm->mutex); thermal_notify_framework(tz_dev->tzone, tz_dev->fw_trips_index[ths_crossed]); - mutex_lock(&mvm->mutex); } #endif /* CONFIG_THERMAL */ } From 6ed5e4d64a5020ac7535762bdb1c840baeb5b5ff Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Mon, 14 Mar 2016 19:53:57 +0200 Subject: [PATCH 0087/1649] iwlwifi: pcie: print error value as signed int Bjorn pointed out that printing an error value as an hexadecimal isn't very convenient. Change that. Reported-by: Bjorn Helgaas Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/pcie/trans.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index 007bcb594946..28fe22097d52 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -1466,7 +1466,7 @@ static void iwl_pcie_set_interrupt_capa(struct pci_dev *pdev, ret = pci_enable_msi(pdev); if (ret) { - dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", ret); + dev_err(&pdev->dev, "pci_enable_msi failed - %d\n", ret); /* enable rfkill interrupt: hw bug w/a */ pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd); if (pci_cmd & PCI_COMMAND_INTX_DISABLE) { From 6e2611f324a51dc63a8afa9ced58723e498bbf16 Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Tue, 15 Mar 2016 11:12:20 +0200 Subject: [PATCH 0088/1649] iwlwifi: mvm: modify the max SP to infinite This makes u-APSD work with more peers. Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/mvm/mvm.h | 2 +- drivers/net/wireless/intel/iwlwifi/mvm/power.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index 0668601f377c..2e0a8824aaba 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -208,7 +208,7 @@ enum iwl_power_scheme { }; #define IWL_CONN_MAX_LISTEN_INTERVAL 10 -#define IWL_UAPSD_MAX_SP IEEE80211_WMM_IE_STA_QOSINFO_SP_2 +#define IWL_UAPSD_MAX_SP IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL #ifdef CONFIG_IWLWIFI_DEBUGFS enum iwl_dbgfs_pm_mask { diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/power.c b/drivers/net/wireless/intel/iwlwifi/mvm/power.c index f313910cd026..7b1f6ad6062b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/power.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/power.c @@ -227,7 +227,7 @@ static void iwl_mvm_power_configure_uapsd(struct iwl_mvm *mvm, cpu_to_le16(IWL_MVM_PS_SNOOZE_WINDOW); } - cmd->uapsd_max_sp = IWL_UAPSD_MAX_SP; + cmd->uapsd_max_sp = mvm->hw->uapsd_max_sp_len; if (mvm->cur_ucode == IWL_UCODE_WOWLAN || cmd->flags & cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)) { From c772a3d3fa01048ae7992663e877e0a5f05a6d36 Mon Sep 17 00:00:00 2001 From: Sara Sharon Date: Sun, 13 Mar 2016 17:19:38 +0200 Subject: [PATCH 0089/1649] iwlwifi: pcie: do not pad QoS AMSDU We insert padding if the MAC header's size is not a multiple of 4 to ensure that the SNAP header is DWORD aligned. When we do so, we let the firmware know by setting a bit in Tx command (TX_CMD_FLG_MH_PAD) which will instruct the firmware to drop those 2 bytes before sending the frame. However, this is not needed for AMSDU as the sub frame header (14B) complements the MAC header (26B) so that the SNAP header is DWORD aligned without adding any pad. Until 9000, the firmware didn't check the TX_CMD_FLG_MH_PAD bit but rather checked the length of the MAC header itself and assumed the entity that enqueued the frame (driver or internal firmware code) added the pad. Since the driver inserted the pad even for AMSDU this logic applied. Note that the padding is a DMA optimization but it's not strictly needed, so we could pad even if it was not needed. However, the CSUM hardware introduced for the 9000 devices requires to not pad AMSDU as it is not needed, and will fail if such a pad exists. Due to older FW not checking the padding bit but checking the mac header size itself - we cannot do this adjustments for older generations. Do not align the size if it is an AMSDU and HW checksum is enabled - which will only happen on 9000 devices and on. Signed-off-by: Sara Sharon Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/pcie/tx.c | 21 +++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c index cc6fa00d350b..e1f7a3febb50 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c @@ -2210,6 +2210,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, __le16 fc; u8 hdr_len; u16 wifi_seq; + bool amsdu; txq = &trans_pcie->txq[txq_id]; q = &txq->q; @@ -2301,11 +2302,18 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, */ len = sizeof(struct iwl_tx_cmd) + sizeof(struct iwl_cmd_header) + hdr_len - IWL_HCMD_SCRATCHBUF_SIZE; - tb1_len = ALIGN(len, 4); - - /* Tell NIC about any 2-byte padding after MAC header */ - if (tb1_len != len) - tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK; + /* do not align A-MSDU to dword as the subframe header aligns it */ + amsdu = ieee80211_is_data_qos(fc) && + (*ieee80211_get_qos_ctl(hdr) & + IEEE80211_QOS_CTL_A_MSDU_PRESENT); + if (trans_pcie->sw_csum_tx || !amsdu) { + tb1_len = ALIGN(len, 4); + /* Tell NIC about any 2-byte padding after MAC header */ + if (tb1_len != len) + tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK; + } else { + tb1_len = len; + } /* The first TB points to the scratchbuf data - min_copy bytes */ memcpy(&txq->scratchbufs[q->write_ptr], &dev_cmd->hdr, @@ -2323,8 +2331,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, goto out_err; iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, false); - if (ieee80211_is_data_qos(fc) && - (*ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_A_MSDU_PRESENT)) { + if (amsdu) { if (unlikely(iwl_fill_data_tbs_amsdu(trans, skb, txq, hdr_len, out_meta, dev_cmd, tb1_len))) From d8fe484470dd72638613c42df3008ec118f24de9 Mon Sep 17 00:00:00 2001 From: Sara Sharon Date: Wed, 9 Mar 2016 10:12:45 +0200 Subject: [PATCH 0090/1649] iwlwifi: mvm: add support for new TX CMD API TX CMD API has changed to support offload assist. Currently we do not enable checksum yet, but must set the padding indication, to avoid FW errors. Set other amsdu flag as well. The rest of the flags will be configured only if HW csum is enabled and will be set in future patches. This change is backward compatible. Signed-off-by: Sara Sharon Signed-off-by: Emmanuel Grumbach --- .../wireless/intel/iwlwifi/mvm/fw-api-tx.h | 35 +++++++++++++++++-- drivers/net/wireless/intel/iwlwifi/mvm/tx.c | 9 ++++- 2 files changed, 41 insertions(+), 3 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h index ba3f0bbddde8..dadcccd88255 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -193,11 +194,41 @@ enum iwl_tx_pm_timeouts { #define IWL_BAR_DFAULT_RETRY_LIMIT 60 #define IWL_LOW_RETRY_LIMIT 7 +/** + * enum iwl_tx_offload_assist_flags_pos - set %iwl_tx_cmd offload_assist values + * @TX_CMD_OFFLD_IP_HDR_OFFSET: offset to start of IP header (in words) + * from mac header end. For normal case it is 4 words for SNAP. + * note: tx_cmd, mac header and pad are not counted in the offset. + * This is used to help the offload in case there is tunneling such as + * IPv6 in IPv4, in such case the ip header offset should point to the + * inner ip header and IPv4 checksum of the external header should be + * calculated by driver. + * @TX_CMD_OFFLD_L4_EN: enable TCP/UDP checksum + * @TX_CMD_OFFLD_L3_EN: enable IP header checksum + * @TX_CMD_OFFLD_MH_SIZE: size of the mac header in words. Includes the IV + * field. Doesn't include the pad. + * @TX_CMD_OFFLD_PAD: mark 2-byte pad was inserted after the mac header for + * alignment + * @TX_CMD_OFFLD_AMSDU: mark TX command is A-MSDU + */ +enum iwl_tx_offload_assist_flags_pos { + TX_CMD_OFFLD_IP_HDR = 0, + TX_CMD_OFFLD_L4_EN = 6, + TX_CMD_OFFLD_L3_EN = 7, + TX_CMD_OFFLD_MH_SIZE = 8, + TX_CMD_OFFLD_PAD = 13, + TX_CMD_OFFLD_AMSDU = 14, +}; + +#define IWL_TX_CMD_OFFLD_MH_MASK 0x1f +#define IWL_TX_CMD_OFFLD_IP_HDR_MASK 0x3f + /* TODO: complete documentation for try_cnt and btkill_cnt */ /** * struct iwl_tx_cmd - TX command struct to FW * ( TX_CMD = 0x1c ) * @len: in bytes of the payload, see below for details + * @offload_assist: TX offload configuration * @tx_flags: combination of TX_CMD_FLG_* * @rate_n_flags: rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is * cleared. Combination of RATE_MCS_* @@ -231,7 +262,7 @@ enum iwl_tx_pm_timeouts { */ struct iwl_tx_cmd { __le16 len; - __le16 next_frame_len; + __le16 offload_assist; __le32 tx_flags; struct { u8 try_cnt; @@ -255,7 +286,7 @@ struct iwl_tx_cmd { __le16 reserved4; u8 payload[0]; struct ieee80211_hdr hdr[0]; -} __packed; /* TX_CMD_API_S_VER_3 */ +} __packed; /* TX_CMD_API_S_VER_6 */ /* * TX response related data diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index 75870e68a7c3..138d64d2fc7a 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -126,6 +126,9 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb, u8 *qc = ieee80211_get_qos_ctl(hdr); tx_cmd->tid_tspec = qc[0] & 0xf; tx_flags &= ~TX_CMD_FLG_SEQ_CTL; + if (*qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT) + tx_cmd->offload_assist |= + cpu_to_le16(BIT(TX_CMD_OFFLD_AMSDU)); } else if (ieee80211_is_back_req(fc)) { struct ieee80211_bar *bar = (void *)skb->data; u16 control = le16_to_cpu(bar->control); @@ -186,9 +189,13 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb, /* Total # bytes to be transmitted */ tx_cmd->len = cpu_to_le16((u16)skb->len + (uintptr_t)info->driver_data[0]); - tx_cmd->next_frame_len = 0; tx_cmd->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE); tx_cmd->sta_id = sta_id; + + /* padding is inserted later in transport */ + if (ieee80211_hdrlen(fc) % 4 && + !(tx_cmd->offload_assist & cpu_to_le16(BIT(TX_CMD_OFFLD_AMSDU)))) + tx_cmd->offload_assist |= cpu_to_le16(BIT(TX_CMD_OFFLD_PAD)); } /* From a2a57a3548b94222e36a01db893b8f4788501150 Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Tue, 15 Mar 2016 15:36:36 +0200 Subject: [PATCH 0091/1649] iwlwifi: add missing mutex_destroy statements iwlwifi / iwlmvm didn't destroy their mutexes. Fix that. Signed-off-by: Emmanuel Grumbach Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/mvm/ops.c | 3 +++ drivers/net/wireless/intel/iwlwifi/pcie/trans.c | 1 + 2 files changed, 4 insertions(+) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index 6153c8e86c53..d4b71a7d0645 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -782,6 +782,9 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode) iwl_mvm_tof_clean(mvm); + mutex_destroy(&mvm->mutex); + mutex_destroy(&mvm->d0i3_suspend_mutex); + ieee80211_free_hw(mvm->hw); } diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index 28fe22097d52..0c40209bd718 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -1695,6 +1695,7 @@ void iwl_trans_pcie_free(struct iwl_trans *trans) } free_percpu(trans_pcie->tso_hdr_page); + mutex_destroy(&trans_pcie->mutex); iwl_trans_free(trans); } From 11dee0b4946bc8b0b4adc804f2110361fed81f08 Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Tue, 15 Mar 2016 11:04:29 +0200 Subject: [PATCH 0092/1649] iwlwifi: make uapsd_disable module param a bitmap This allows to disable uapsd for BSS only, or P2P client separately. Remove the now unneeded IWL_MVM_P2P_UAPSD_STANDALONE constant. Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/iwl-drv.c | 10 ++++++---- drivers/net/wireless/intel/iwlwifi/iwl-modparams.h | 10 ++++++++-- drivers/net/wireless/intel/iwlwifi/mvm/constants.h | 1 - drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c | 3 ++- drivers/net/wireless/intel/iwlwifi/mvm/mvm.h | 3 ++- 5 files changed, 18 insertions(+), 9 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c index f899666acb41..2cd9c3139a1c 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c @@ -1561,7 +1561,7 @@ struct iwl_mod_params iwlwifi_mod_params = { .d0i3_disable = true, .d0i3_entry_delay = 1000, #ifndef CONFIG_IWLWIFI_UAPSD - .uapsd_disable = true, + .uapsd_disable = IWL_DISABLE_UAPSD_BSS | IWL_DISABLE_UAPSD_P2P_CLIENT, #endif /* CONFIG_IWLWIFI_UAPSD */ /* the rest are 0 by default */ }; @@ -1681,11 +1681,13 @@ module_param_named(lar_disable, iwlwifi_mod_params.lar_disable, MODULE_PARM_DESC(lar_disable, "disable LAR functionality (default: N)"); module_param_named(uapsd_disable, iwlwifi_mod_params.uapsd_disable, - bool, S_IRUGO | S_IWUSR); + uint, S_IRUGO | S_IWUSR); #ifdef CONFIG_IWLWIFI_UAPSD -MODULE_PARM_DESC(uapsd_disable, "disable U-APSD functionality (default: N)"); +MODULE_PARM_DESC(uapsd_disable, + "disable U-APSD functionality bitmap 1: BSS 2: P2P Client (default: 0)"); #else -MODULE_PARM_DESC(uapsd_disable, "disable U-APSD functionality (default: Y)"); +MODULE_PARM_DESC(uapsd_disable, + "disable U-APSD functionality bitmap 1: BSS 2: P2P Client (default: 3)"); #endif /* diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h index d1a5dd1602f5..6c5c2f9f73a2 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h @@ -92,6 +92,11 @@ enum iwl_amsdu_size { IWL_AMSDU_12K = 2, }; +enum iwl_uapsd_disable { + IWL_DISABLE_UAPSD_BSS = BIT(0), + IWL_DISABLE_UAPSD_P2P_CLIENT = BIT(1), +}; + /** * struct iwl_mod_params * @@ -109,7 +114,8 @@ enum iwl_amsdu_size { * @debug_level: levels are IWL_DL_* * @ant_coupling: antenna coupling in dB, default = 0 * @nvm_file: specifies a external NVM file - * @uapsd_disable: disable U-APSD, default = 1 + * @uapsd_disable: disable U-APSD, see %enum iwl_uapsd_disable, default = + * IWL_DISABLE_UAPSD_BSS | IWL_DISABLE_UAPSD_P2P_CLIENT * @d0i3_disable: disable d0i3, default = 1, * @d0i3_entry_delay: time to wait after no refs are taken before * entering D0i3 (in msecs) @@ -131,7 +137,7 @@ struct iwl_mod_params { #endif int ant_coupling; char *nvm_file; - bool uapsd_disable; + u32 uapsd_disable; bool d0i3_disable; unsigned int d0i3_entry_delay; bool lar_disable; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h index 4b560e4417ee..b96b1c6a97fa 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h @@ -75,7 +75,6 @@ #define IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT (10 * USEC_PER_MSEC) #define IWL_MVM_SHORT_PS_TX_DATA_TIMEOUT (2 * 1024) /* defined in TU */ #define IWL_MVM_SHORT_PS_RX_DATA_TIMEOUT (40 * 1024) /* defined in TU */ -#define IWL_MVM_P2P_UAPSD_STANDALONE 0 #define IWL_MVM_P2P_LOWLATENCY_PS_ENABLE 0 #define IWL_MVM_UAPSD_RX_DATA_TIMEOUT (50 * USEC_PER_MSEC) #define IWL_MVM_UAPSD_TX_DATA_TIMEOUT (50 * USEC_PER_MSEC) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index cff9c16e4920..1a3481ba1446 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -2345,7 +2345,8 @@ static void iwl_mvm_check_uapsd(struct iwl_mvm *mvm, struct ieee80211_vif *vif, return; } - if (iwlwifi_mod_params.uapsd_disable) { + if (!vif->p2p && + (iwlwifi_mod_params.uapsd_disable & IWL_DISABLE_UAPSD_BSS)) { vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD; return; } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index 2e0a8824aaba..02ef1d91478c 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -1072,7 +1072,8 @@ bool iwl_mvm_is_p2p_standalone_uapsd_supported(struct iwl_mvm *mvm) { return fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_P2P_STANDALONE_UAPSD) && - IWL_MVM_P2P_UAPSD_STANDALONE; + !(iwlwifi_mod_params.uapsd_disable & + IWL_DISABLE_UAPSD_P2P_CLIENT); } static inline bool iwl_mvm_has_new_rx_api(struct iwl_mvm *mvm) From 2d3d31b562dd060cbc7a163fd824421d0ebeaadf Mon Sep 17 00:00:00 2001 From: Haim Dreyfuss Date: Tue, 15 Mar 2016 10:51:40 +0200 Subject: [PATCH 0093/1649] iwlwifi: 9000: update device id and FW serial number Update device id and FW serial number for 2X2 antenna devices in 9000 generation product. These will not be available on the market in the coming year. Signed-off-by: Haim Dreyfuss Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/iwl-9000.c | 8 ++++---- drivers/net/wireless/intel/iwlwifi/iwl-config.h | 2 +- drivers/net/wireless/intel/iwlwifi/pcie/drv.c | 10 +++++----- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-9000.c b/drivers/net/wireless/intel/iwlwifi/iwl-9000.c index 318b1dc171f2..642fc92d7788 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-9000.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-9000.c @@ -5,7 +5,7 @@ * * GPL LICENSE SUMMARY * - * Copyright(c) 2015 Intel Deutschland GmbH + * Copyright(c) 2015-2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -18,7 +18,7 @@ * * BSD LICENSE * - * Copyright(c) 2015 Intel Deutschland GmbH + * Copyright(c) 2015-2016 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -143,8 +143,8 @@ static const struct iwl_tt_params iwl9000_tt_params = { .vht_mu_mimo_supported = true, \ .mac_addr_from_csr = true -const struct iwl_cfg iwl9260_2ac_cfg = { - .name = "Intel(R) Dual Band Wireless AC 9260", +const struct iwl_cfg iwl9560_2ac_cfg = { + .name = "Intel(R) Dual Band Wireless AC 9560", .fw_name_pre = IWL9000_FW_PRE, IWL_DEVICE_9000, .ht_params = &iwl9000_ht_params, diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h index 3e4d346be350..8cbd24875ffc 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h @@ -439,7 +439,7 @@ extern const struct iwl_cfg iwl8265_2ac_cfg; extern const struct iwl_cfg iwl4165_2ac_cfg; extern const struct iwl_cfg iwl8260_2ac_sdio_cfg; extern const struct iwl_cfg iwl4165_2ac_sdio_cfg; -extern const struct iwl_cfg iwl9260_2ac_cfg; +extern const struct iwl_cfg iwl9560_2ac_cfg; extern const struct iwl_cfg iwl5165_2ac_cfg; #endif /* CONFIG_IWLMVM */ diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index 34566691f90e..fb8b5ecd6abb 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -485,15 +485,15 @@ static const struct pci_device_id iwl_hw_card_ids[] = { /* 9000 Series */ {IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl5165_2ac_cfg)}, {IWL_PCI_DEVICE(0x9DF0, 0x2010, iwl5165_2ac_cfg)}, - {IWL_PCI_DEVICE(0x9DF0, 0x0A10, iwl9260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x9DF0, 0x0010, iwl9260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x0A10, iwl9560_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x0010, iwl9560_2ac_cfg)}, {IWL_PCI_DEVICE(0x9DF0, 0x0000, iwl5165_2ac_cfg)}, {IWL_PCI_DEVICE(0x9DF0, 0x0310, iwl5165_2ac_cfg)}, {IWL_PCI_DEVICE(0x9DF0, 0x0510, iwl5165_2ac_cfg)}, {IWL_PCI_DEVICE(0x9DF0, 0x0710, iwl5165_2ac_cfg)}, - {IWL_PCI_DEVICE(0x9DF0, 0x0210, iwl9260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x9DF0, 0x0410, iwl9260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x9DF0, 0x0610, iwl9260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x0210, iwl9560_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x0410, iwl9560_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x0610, iwl9560_2ac_cfg)}, #endif /* CONFIG_IWLMVM */ {0} From 7ec54716e71a846dddf6aa1e33a12e1dcca6d276 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Wed, 16 Mar 2016 09:29:48 +0100 Subject: [PATCH 0094/1649] iwlwifi: mvm: remove is_data_qos variable in TX "is_data_qos == true" is equivalent to "tid < IWL_MAX_TID_COUNT" since tid is only assigned (and range-checked) in that case. This removes a (harmless) smatch warning that occurs because it can't seem to follow the above logic from the code. Signed-off-by: Johannes Berg Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/mvm/tx.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index 138d64d2fc7a..c7c3d7bd38ba 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -654,7 +654,7 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, u16 seq_number = 0; u8 tid = IWL_MAX_TID_COUNT; u8 txq_id = info->hw_queue; - bool is_data_qos = false, is_ampdu = false; + bool is_ampdu = false; int hdrlen; mvmsta = iwl_mvm_sta_from_mac80211(sta); @@ -694,7 +694,6 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, seq_number &= IEEE80211_SCTL_SEQ; hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); hdr->seq_ctrl |= cpu_to_le16(seq_number); - is_data_qos = true; is_ampdu = info->flags & IEEE80211_TX_CTL_AMPDU; } @@ -722,7 +721,7 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, if (iwl_trans_tx(mvm->trans, skb, dev_cmd, txq_id)) goto drop_unlock_sta; - if (is_data_qos && !ieee80211_has_morefrags(fc)) + if (tid < IWL_MAX_TID_COUNT && !ieee80211_has_morefrags(fc)) mvmsta->tid_data[tid].seq_number = seq_number + 0x10; spin_unlock(&mvmsta->lock); From 24afba7690e49714795a1e8ee25e617ea0fb566b Mon Sep 17 00:00:00 2001 From: Liad Kaufman Date: Tue, 28 Jul 2015 18:56:08 +0300 Subject: [PATCH 0095/1649] iwlwifi: mvm: support bss dynamic alloc/dealloc of queues "DQA" is shorthand for "dynamic queue allocation". This enables on-demand allocation of queues per RA/TID rather than statically allocating per vif, thus allowing a potential benefit of various factors. Please refer to the DOC section this patch adds to sta.h to see a more in-depth explanation of this feature. There are many things to take into consideration when working in DQA mode, and this patch is only one in a series. Note that default operation mode is non-DQA mode, unless the FW indicates that it supports DQA mode. This patch enables support of DQA for a station connected to an AP, and works in a non-aggregated mode. When a frame for an unused RA/TID arrives at the driver, it isn't TXed immediately, but deferred first until a suitable queue is first allocated for it, and then TXed by a worker that both allocates the queues and TXes deferred traffic. When a STA is removed, its queues goes back into the queue pools for reuse as needed. Signed-off-by: Liad Kaufman Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/mvm/d3.c | 2 +- .../net/wireless/intel/iwlwifi/mvm/fw-api.h | 22 +- .../net/wireless/intel/iwlwifi/mvm/mac-ctxt.c | 21 +- .../net/wireless/intel/iwlwifi/mvm/mac80211.c | 49 ++++ drivers/net/wireless/intel/iwlwifi/mvm/mvm.h | 7 + drivers/net/wireless/intel/iwlwifi/mvm/ops.c | 1 + drivers/net/wireless/intel/iwlwifi/mvm/sta.c | 254 +++++++++++++++++- drivers/net/wireless/intel/iwlwifi/mvm/sta.h | 87 +++++- drivers/net/wireless/intel/iwlwifi/mvm/tx.c | 54 ++++ 9 files changed, 481 insertions(+), 16 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c index c1a313149eed..e3561bbc2468 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c @@ -723,7 +723,7 @@ static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif, return -EIO; } - ret = iwl_mvm_sta_send_to_fw(mvm, ap_sta, false); + ret = iwl_mvm_sta_send_to_fw(mvm, ap_sta, false, 0); if (ret) return ret; rcu_assign_pointer(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id], ap_sta); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h index e6bd0c8d4cc0..8217eb25b090 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h @@ -80,12 +80,32 @@ #include "fw-api-stats.h" #include "fw-api-tof.h" -/* Tx queue numbers */ +/* Tx queue numbers for non-DQA mode */ enum { IWL_MVM_OFFCHANNEL_QUEUE = 8, IWL_MVM_CMD_QUEUE = 9, }; +/* + * DQA queue numbers + * + * @IWL_MVM_DQA_MIN_MGMT_QUEUE: first TXQ in pool for MGMT and non-QOS frames. + * Each MGMT queue is mapped to a single STA + * MGMT frames are frames that return true on ieee80211_is_mgmt() + * @IWL_MVM_DQA_MAX_MGMT_QUEUE: last TXQ in pool for MGMT frames + * @IWL_MVM_DQA_MIN_DATA_QUEUE: first TXQ in pool for DATA frames. + * DATA frames are intended for !ieee80211_is_mgmt() frames, but if + * the MGMT TXQ pool is exhausted, mgmt frames can be sent on DATA queues + * as well + * @IWL_MVM_DQA_MAX_DATA_QUEUE: last TXQ in pool for DATA frames + */ +enum iwl_mvm_dqa_txq { + IWL_MVM_DQA_MIN_MGMT_QUEUE = 5, + IWL_MVM_DQA_MAX_MGMT_QUEUE = 8, + IWL_MVM_DQA_MIN_DATA_QUEUE = 10, + IWL_MVM_DQA_MAX_DATA_QUEUE = 31, +}; + enum iwl_mvm_tx_fifo { IWL_MVM_TX_FIFO_BK = 0, IWL_MVM_TX_FIFO_BE, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c index e885db3464b0..c02c1055d534 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c @@ -425,12 +425,17 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm, return 0; } - /* Find available queues, and allocate them to the ACs */ + /* + * Find available queues, and allocate them to the ACs. When in + * DQA-mode they aren't really used, and this is done only so the + * mac80211 ieee80211_check_queues() function won't fail + */ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { u8 queue = find_first_zero_bit(&used_hw_queues, mvm->first_agg_queue); - if (queue >= mvm->first_agg_queue) { + if (!iwl_mvm_is_dqa_supported(mvm) && + queue >= mvm->first_agg_queue) { IWL_ERR(mvm, "Failed to allocate queue\n"); ret = -EIO; goto exit_fail; @@ -495,6 +500,10 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif) IWL_MVM_TX_FIFO_MCAST, 0, wdg_timeout); /* fall through */ default: + /* If DQA is supported - queues will be enabled when needed */ + if (iwl_mvm_is_dqa_supported(mvm)) + break; + for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) iwl_mvm_enable_ac_txq(mvm, vif->hw_queue[ac], vif->hw_queue[ac], @@ -523,6 +532,14 @@ void iwl_mvm_mac_ctxt_release(struct iwl_mvm *mvm, struct ieee80211_vif *vif) IWL_MAX_TID_COUNT, 0); /* fall through */ default: + /* + * If DQA is supported - queues were already disabled, since in + * DQA-mode the queues are a property of the STA and not of the + * vif, and at this point the STA was already deleted + */ + if (iwl_mvm_is_dqa_supported(mvm)) + break; + for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) iwl_mvm_disable_txq(mvm, vif->hw_queue[ac], vif->hw_queue[ac], diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 1a3481ba1446..115d7aa5e720 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -992,6 +992,7 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm) iwl_mvm_reset_phy_ctxts(mvm); memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table)); memset(mvm->sta_drained, 0, sizeof(mvm->sta_drained)); + memset(mvm->sta_deferred_frames, 0, sizeof(mvm->sta_deferred_frames)); memset(mvm->tfd_drained, 0, sizeof(mvm->tfd_drained)); memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif)); memset(&mvm->last_bt_notif_old, 0, sizeof(mvm->last_bt_notif_old)); @@ -1178,6 +1179,7 @@ static void iwl_mvm_mac_stop(struct ieee80211_hw *hw) flush_work(&mvm->d0i3_exit_work); flush_work(&mvm->async_handlers_wk); + flush_work(&mvm->add_stream_wk); cancel_delayed_work_sync(&mvm->fw_dump_wk); iwl_mvm_free_fw_dump_desc(mvm); @@ -2382,6 +2384,22 @@ iwl_mvm_tdls_check_trigger(struct iwl_mvm *mvm, peer_addr, action); } +static void iwl_mvm_purge_deferred_tx_frames(struct iwl_mvm *mvm, + struct iwl_mvm_sta *mvm_sta) +{ + struct iwl_mvm_tid_data *tid_data; + struct sk_buff *skb; + int i; + + spin_lock_bh(&mvm_sta->lock); + for (i = 0; i <= IWL_MAX_TID_COUNT; i++) { + tid_data = &mvm_sta->tid_data[i]; + while ((skb = __skb_dequeue(&tid_data->deferred_tx_frames))) + ieee80211_free_txskb(mvm->hw, skb); + } + spin_unlock_bh(&mvm_sta->lock); +} + static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, @@ -2402,6 +2420,33 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, /* if a STA is being removed, reuse its ID */ flush_work(&mvm->sta_drained_wk); + /* + * If we are in a STA removal flow and in DQA mode: + * + * This is after the sync_rcu part, so the queues have already been + * flushed. No more TXs on their way in mac80211's path, and no more in + * the queues. + * Also, we won't be getting any new TX frames for this station. + * What we might have are deferred TX frames that need to be taken care + * of. + * + * Drop any still-queued deferred-frame before removing the STA, and + * make sure the worker is no longer handling frames for this STA. + */ + if (old_state == IEEE80211_STA_NONE && + new_state == IEEE80211_STA_NOTEXIST && + iwl_mvm_is_dqa_supported(mvm)) { + struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); + + iwl_mvm_purge_deferred_tx_frames(mvm, mvm_sta); + flush_work(&mvm->add_stream_wk); + + /* + * No need to make sure deferred TX indication is off since the + * worker will already remove it if it was on + */ + } + mutex_lock(&mvm->mutex); if (old_state == IEEE80211_STA_NOTEXIST && new_state == IEEE80211_STA_NONE) { @@ -3738,6 +3783,10 @@ static void iwl_mvm_mac_flush(struct ieee80211_hw *hw, if (!vif || vif->type != NL80211_IFTYPE_STATION) return; + /* Make sure we're done with the deferred traffic before flushing */ + if (iwl_mvm_is_dqa_supported(mvm)) + flush_work(&mvm->add_stream_wk); + mutex_lock(&mvm->mutex); mvmvif = iwl_mvm_vif_from_mac80211(vif); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index 02ef1d91478c..f9430ee8f96b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -665,10 +665,16 @@ struct iwl_mvm { /* Map to HW queue */ u32 hw_queue_to_mac80211; u8 hw_queue_refcount; + /* + * This is to mark that queue is reserved for a STA but not yet + * allocated. This is needed to make sure we have at least one + * available queue to use when adding a new STA + */ bool setup_reserved; u16 tid_bitmap; /* Bitmap of the TIDs mapped to this queue */ } queue_info[IWL_MAX_HW_QUEUES]; spinlock_t queue_info_lock; /* For syncing queue mgmt operations */ + struct work_struct add_stream_wk; /* To add streams to queues */ atomic_t mac80211_queue_stop_count[IEEE80211_MAX_QUEUES]; const char *nvm_file_name; @@ -688,6 +694,7 @@ struct iwl_mvm { struct iwl_rx_phy_info last_phy_info; struct ieee80211_sta __rcu *fw_id_to_mac_id[IWL_MVM_STATION_COUNT]; struct work_struct sta_drained_wk; + unsigned long sta_deferred_frames[BITS_TO_LONGS(IWL_MVM_STATION_COUNT)]; unsigned long sta_drained[BITS_TO_LONGS(IWL_MVM_STATION_COUNT)]; atomic_t pending_frames[IWL_MVM_STATION_COUNT]; u32 tfd_drained[IWL_MVM_STATION_COUNT]; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index d4b71a7d0645..9fc705ca5841 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -579,6 +579,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, INIT_WORK(&mvm->d0i3_exit_work, iwl_mvm_d0i3_exit_work); INIT_DELAYED_WORK(&mvm->fw_dump_wk, iwl_mvm_fw_error_dump_wk); INIT_DELAYED_WORK(&mvm->tdls_cs.dwork, iwl_mvm_tdls_ch_switch_work); + INIT_WORK(&mvm->add_stream_wk, iwl_mvm_add_new_dqa_stream_wk); spin_lock_init(&mvm->d0i3_tx_lock); spin_lock_init(&mvm->refs_lock); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index ef99942d7169..3f36a661ec96 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -111,7 +111,7 @@ static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm, /* send station add/update command to firmware */ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta, - bool update) + bool update, unsigned int flags) { struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_add_sta_cmd add_sta_cmd = { @@ -126,9 +126,12 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta, u32 status; u32 agg_size = 0, mpdu_dens = 0; - if (!update) { + if (!update || (flags & STA_MODIFY_QUEUES)) { add_sta_cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk); memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN); + + if (flags & STA_MODIFY_QUEUES) + add_sta_cmd.modify_mask |= STA_MODIFY_QUEUES; } switch (sta->bandwidth) { @@ -274,6 +277,204 @@ static void iwl_mvm_tdls_sta_deinit(struct iwl_mvm *mvm, iwl_mvm_disable_txq(mvm, i, i, IWL_MAX_TID_COUNT, 0); } +static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, + struct ieee80211_sta *sta, u8 ac, int tid, + struct ieee80211_hdr *hdr) +{ + struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); + struct iwl_trans_txq_scd_cfg cfg = { + .fifo = iwl_mvm_ac_to_tx_fifo[ac], + .sta_id = mvmsta->sta_id, + .tid = tid, + .frame_limit = IWL_FRAME_LIMIT, + }; + unsigned int wdg_timeout = + iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false); + u8 mac_queue = mvmsta->vif->hw_queue[ac]; + int queue = -1; + int ssn; + + lockdep_assert_held(&mvm->mutex); + + spin_lock(&mvm->queue_info_lock); + + /* + * Non-QoS, QoS NDP and MGMT frames should go to a MGMT queue, if one + * exists + */ + if (!ieee80211_is_data_qos(hdr->frame_control) || + ieee80211_is_qos_nullfunc(hdr->frame_control)) { + queue = iwl_mvm_find_free_queue(mvm, IWL_MVM_DQA_MIN_MGMT_QUEUE, + IWL_MVM_DQA_MAX_MGMT_QUEUE); + if (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE) + IWL_DEBUG_TX_QUEUES(mvm, "Found free MGMT queue #%d\n", + queue); + + /* If no such queue is found, we'll use a DATA queue instead */ + } + + if (queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) { + queue = mvmsta->reserved_queue; + IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue); + } + + if (queue < 0) + queue = iwl_mvm_find_free_queue(mvm, IWL_MVM_DQA_MIN_DATA_QUEUE, + IWL_MVM_DQA_MAX_DATA_QUEUE); + if (queue >= 0) + mvm->queue_info[queue].setup_reserved = false; + + spin_unlock(&mvm->queue_info_lock); + + /* TODO: support shared queues for same RA */ + if (queue < 0) + return -ENOSPC; + + /* + * Actual en/disablement of aggregations is through the ADD_STA HCMD, + * but for configuring the SCD to send A-MPDUs we need to mark the queue + * as aggregatable. + * Mark all DATA queues as allowing to be aggregated at some point + */ + cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE); + + IWL_DEBUG_TX_QUEUES(mvm, "Allocating queue #%d to sta %d on tid %d\n", + queue, mvmsta->sta_id, tid); + + ssn = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)); + iwl_mvm_enable_txq(mvm, queue, mac_queue, ssn, &cfg, + wdg_timeout); + + spin_lock_bh(&mvmsta->lock); + mvmsta->tid_data[tid].txq_id = queue; + mvmsta->tfd_queue_msk |= BIT(queue); + + if (mvmsta->reserved_queue == queue) + mvmsta->reserved_queue = IEEE80211_INVAL_HW_QUEUE; + spin_unlock_bh(&mvmsta->lock); + + return iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES); +} + +static inline u8 iwl_mvm_tid_to_ac_queue(int tid) +{ + if (tid == IWL_MAX_TID_COUNT) + return IEEE80211_AC_VO; /* MGMT */ + + return tid_to_mac80211_ac[tid]; +} + +static void iwl_mvm_tx_deferred_stream(struct iwl_mvm *mvm, + struct ieee80211_sta *sta, int tid) +{ + struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); + struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; + struct sk_buff *skb; + struct ieee80211_hdr *hdr; + struct sk_buff_head deferred_tx; + u8 mac_queue; + bool no_queue = false; /* Marks if there is a problem with the queue */ + u8 ac; + + lockdep_assert_held(&mvm->mutex); + + skb = skb_peek(&tid_data->deferred_tx_frames); + if (!skb) + return; + hdr = (void *)skb->data; + + ac = iwl_mvm_tid_to_ac_queue(tid); + mac_queue = IEEE80211_SKB_CB(skb)->hw_queue; + + if (tid_data->txq_id == IEEE80211_INVAL_HW_QUEUE && + iwl_mvm_sta_alloc_queue(mvm, sta, ac, tid, hdr)) { + IWL_ERR(mvm, + "Can't alloc TXQ for sta %d tid %d - dropping frame\n", + mvmsta->sta_id, tid); + + /* + * Mark queue as problematic so later the deferred traffic is + * freed, as we can do nothing with it + */ + no_queue = true; + } + + __skb_queue_head_init(&deferred_tx); + + spin_lock(&mvmsta->lock); + skb_queue_splice_init(&tid_data->deferred_tx_frames, &deferred_tx); + spin_unlock(&mvmsta->lock); + + /* Disable bottom-halves when entering TX path */ + local_bh_disable(); + while ((skb = __skb_dequeue(&deferred_tx))) + if (no_queue || iwl_mvm_tx_skb(mvm, skb, sta)) + ieee80211_free_txskb(mvm->hw, skb); + local_bh_enable(); + + /* Wake queue */ + iwl_mvm_start_mac_queues(mvm, BIT(mac_queue)); +} + +void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk) +{ + struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, + add_stream_wk); + struct ieee80211_sta *sta; + struct iwl_mvm_sta *mvmsta; + unsigned long deferred_tid_traffic; + int sta_id, tid; + + mutex_lock(&mvm->mutex); + + /* Go over all stations with deferred traffic */ + for_each_set_bit(sta_id, mvm->sta_deferred_frames, + IWL_MVM_STATION_COUNT) { + clear_bit(sta_id, mvm->sta_deferred_frames); + sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], + lockdep_is_held(&mvm->mutex)); + if (IS_ERR_OR_NULL(sta)) + continue; + + mvmsta = iwl_mvm_sta_from_mac80211(sta); + deferred_tid_traffic = mvmsta->deferred_traffic_tid_map; + + for_each_set_bit(tid, &deferred_tid_traffic, + IWL_MAX_TID_COUNT + 1) + iwl_mvm_tx_deferred_stream(mvm, sta, tid); + } + + mutex_unlock(&mvm->mutex); +} + +static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm, + struct ieee80211_sta *sta) +{ + struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); + int queue; + + spin_lock_bh(&mvm->queue_info_lock); + + /* Make sure we have free resources for this STA */ + queue = iwl_mvm_find_free_queue(mvm, IWL_MVM_DQA_MIN_DATA_QUEUE, + IWL_MVM_DQA_MAX_DATA_QUEUE); + if (queue < 0) { + spin_unlock_bh(&mvm->queue_info_lock); + IWL_ERR(mvm, "No available queues for new station\n"); + return -ENOSPC; + } + mvm->queue_info[queue].setup_reserved = true; + + spin_unlock_bh(&mvm->queue_info_lock); + + mvmsta->reserved_queue = queue; + + IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n", + queue, mvmsta->sta_id); + + return 0; +} + int iwl_mvm_add_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta) @@ -314,18 +515,29 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm, ret = iwl_mvm_tdls_sta_init(mvm, sta); if (ret) return ret; - } else { + } else if (!iwl_mvm_is_dqa_supported(mvm)) { for (i = 0; i < IEEE80211_NUM_ACS; i++) if (vif->hw_queue[i] != IEEE80211_INVAL_HW_QUEUE) mvm_sta->tfd_queue_msk |= BIT(vif->hw_queue[i]); } /* for HW restart - reset everything but the sequence number */ - for (i = 0; i < IWL_MAX_TID_COUNT; i++) { + for (i = 0; i <= IWL_MAX_TID_COUNT; i++) { u16 seq = mvm_sta->tid_data[i].seq_number; memset(&mvm_sta->tid_data[i], 0, sizeof(mvm_sta->tid_data[i])); mvm_sta->tid_data[i].seq_number = seq; + + if (!iwl_mvm_is_dqa_supported(mvm)) + continue; + + /* + * Mark all queues for this STA as unallocated and defer TX + * frames until the queue is allocated + */ + mvm_sta->tid_data[i].txq_id = IEEE80211_INVAL_HW_QUEUE; + skb_queue_head_init(&mvm_sta->tid_data[i].deferred_tx_frames); } + mvm_sta->deferred_traffic_tid_map = 0; mvm_sta->agg_tids = 0; if (iwl_mvm_has_new_rx_api(mvm) && @@ -338,7 +550,13 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm, mvm_sta->dup_data = dup_data; } - ret = iwl_mvm_sta_send_to_fw(mvm, sta, false); + if (iwl_mvm_is_dqa_supported(mvm)) { + ret = iwl_mvm_reserve_sta_stream(mvm, sta); + if (ret) + goto err; + } + + ret = iwl_mvm_sta_send_to_fw(mvm, sta, false, 0); if (ret) goto err; @@ -364,7 +582,7 @@ int iwl_mvm_update_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { - return iwl_mvm_sta_send_to_fw(mvm, sta, true); + return iwl_mvm_sta_send_to_fw(mvm, sta, true, 0); } int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, @@ -509,6 +727,26 @@ void iwl_mvm_sta_drained_wk(struct work_struct *wk) mutex_unlock(&mvm->mutex); } +static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct iwl_mvm_sta *mvm_sta) +{ + int ac; + int i; + + lockdep_assert_held(&mvm->mutex); + + for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) { + if (mvm_sta->tid_data[i].txq_id == IEEE80211_INVAL_HW_QUEUE) + continue; + + ac = iwl_mvm_tid_to_ac_queue(i); + iwl_mvm_disable_txq(mvm, mvm_sta->tid_data[i].txq_id, + vif->hw_queue[ac], i, 0); + mvm_sta->tid_data[i].txq_id = IEEE80211_INVAL_HW_QUEUE; + } +} + int iwl_mvm_rm_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta) @@ -537,6 +775,10 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm, return ret; ret = iwl_mvm_drain_sta(mvm, mvm_sta, false); + /* If DQA is supported - the queues can be disabled now */ + if (iwl_mvm_is_dqa_supported(mvm)) + iwl_mvm_disable_sta_queues(mvm, vif, mvm_sta); + /* if we are associated - we can't remove the AP STA now */ if (vif->bss_conf.assoc) return ret; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h index 1a8f69a41405..e3efdcd900f0 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h @@ -7,7 +7,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright(c) 2015 Intel Deutschland GmbH + * Copyright(c) 2015 - 2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -34,7 +34,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright(c) 2015 Intel Deutschland GmbH + * Copyright(c) 2015 - 2016 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -79,6 +79,60 @@ struct iwl_mvm; struct iwl_mvm_vif; +/** + * DOC: DQA - Dynamic Queue Allocation -introduction + * + * Dynamic Queue Allocation (AKA "DQA") is a feature implemented in iwlwifi + * driver to allow dynamic allocation of queues on-demand, rather than allocate + * them statically ahead of time. Ideally, we would like to allocate one queue + * per RA/TID, thus allowing an AP - for example - to send BE traffic to STA2 + * even if it also needs to send traffic to a sleeping STA1, without being + * blocked by the sleeping station. + * + * Although the queues in DQA mode are dynamically allocated, there are still + * some queues that are statically allocated: + * TXQ #0 - command queue + * TXQ #1 - aux frames + * TXQ #2 - P2P device frames + * TXQ #3 - P2P GO/SoftAP GCAST/BCAST frames + * TXQ #4 - BSS DATA frames queue + * TXQ #5-8 - Non-QoS and MGMT frames queue pool + * TXQ #9 - P2P GO/SoftAP probe responses + * TXQ #10-31 - DATA frames queue pool + * The queues are dynamically taken from either the MGMT frames queue pool or + * the DATA frames one. See the %iwl_mvm_dqa_txq for more information on every + * queue. + * + * When a frame for a previously unseen RA/TID comes in, it needs to be deferred + * until a queue is allocated for it, and only then can be TXed. Therefore, it + * is placed into %iwl_mvm_tid_data.deferred_tx_frames, and a worker called + * %mvm->add_stream_wk later allocates the queues and TXes the deferred frames. + * + * For convenience, MGMT is considered as if it has TID=8, and go to the MGMT + * queues in the pool. If there is no longer a free MGMT queue to allocate, a + * queue will be allocated from the DATA pool instead. Since QoS NDPs can create + * a problem for aggregations, they too will use a MGMT queue. + * + * When adding a STA, a DATA queue is reserved for it so that it can TX from + * it. If no such free queue exists for reserving, the STA addition will fail. + * + * If the DATA queue pool gets exhausted, no new STA will be accepted, and if a + * new RA/TID comes in for an existing STA, one of the STA's queues will become + * shared and will serve more than the single TID (but always for the same RA!). + * + * When a RA/TID needs to become aggregated, no new queue is required to be + * allocated, only mark the queue as aggregated via the ADD_STA command. Note, + * however, that a shared queue cannot be aggregated, and only after the other + * TIDs become inactive and are removed - only then can the queue be + * reconfigured and become aggregated. + * + * When removing a station, its queues are returned to the pool for reuse. Here + * we also need to make sure that we are synced with the worker thread that TXes + * the deferred frames so we don't get into a situation where the queues are + * removed and then the worker puts deferred frames onto the released queues or + * tries to allocate new queues for a STA we don't need anymore. + */ + /** * DOC: station table - introduction * @@ -253,6 +307,7 @@ enum iwl_mvm_agg_state { /** * struct iwl_mvm_tid_data - holds the states for each RA / TID + * @deferred_tx_frames: deferred TX frames for this RA/TID * @seq_number: the next WiFi sequence number to use * @next_reclaimed: the WiFi sequence number of the next packet to be acked. * This is basically (last acked packet++). @@ -260,7 +315,7 @@ enum iwl_mvm_agg_state { * Tx response (TX_CMD), and the block ack notification (COMPRESSED_BA). * @amsdu_in_ampdu_allowed: true if A-MSDU in A-MPDU is allowed. * @state: state of the BA agreement establishment / tear down. - * @txq_id: Tx queue used by the BA session + * @txq_id: Tx queue used by the BA session / DQA * @ssn: the first packet to be sent in AGG HW queue in Tx AGG start flow, or * the first packet to be sent in legacy HW queue in Tx AGG stop flow. * Basically when next_reclaimed reaches ssn, we can tell mac80211 that @@ -268,6 +323,7 @@ enum iwl_mvm_agg_state { * @tx_time: medium time consumed by this A-MPDU */ struct iwl_mvm_tid_data { + struct sk_buff_head deferred_tx_frames; u16 seq_number; u16 next_reclaimed; /* The rest is Tx AGG related */ @@ -316,7 +372,10 @@ struct iwl_mvm_rxq_dup_data { * we need to signal the EOSP * @lock: lock to protect the whole struct. Since %tid_data is access from Tx * and from Tx response flow, it needs a spinlock. - * @tid_data: per tid data. Look at %iwl_mvm_tid_data. + * @tid_data: per tid data + mgmt. Look at %iwl_mvm_tid_data. + * @reserved_queue: the queue reserved for this STA for DQA purposes + * Every STA has is given one reserved queue to allow it to operate. If no + * such queue can be guaranteed, the STA addition will fail. * @tx_protection: reference counter for controlling the Tx protection. * @tt_tx_protection: is thermal throttling enable Tx protection? * @disable_tx: is tx to this STA disabled? @@ -329,6 +388,7 @@ struct iwl_mvm_rxq_dup_data { * the BA window. To be used for UAPSD only. * @ptk_pn: per-queue PTK PN data structures * @dup_data: per queue duplicate packet detection data + * @deferred_traffic_tid_map: indication bitmap of deferred traffic per-TID * * When mac80211 creates a station it reserves some space (hw->sta_data_size) * in the structure for use by driver. This structure is placed in that @@ -345,12 +405,16 @@ struct iwl_mvm_sta { bool bt_reduced_txpower; bool next_status_eosp; spinlock_t lock; - struct iwl_mvm_tid_data tid_data[IWL_MAX_TID_COUNT]; + struct iwl_mvm_tid_data tid_data[IWL_MAX_TID_COUNT + 1]; struct iwl_lq_sta lq_sta; struct ieee80211_vif *vif; struct iwl_mvm_key_pn __rcu *ptk_pn[4]; struct iwl_mvm_rxq_dup_data *dup_data; + u16 deferred_traffic_tid_map; + + u8 reserved_queue; + /* Temporary, until the new TLC will control the Tx protection */ s8 tx_protection; bool tt_tx_protection; @@ -378,8 +442,18 @@ struct iwl_mvm_int_sta { u32 tfd_queue_msk; }; +/** + * Send the STA info to the FW. + * + * @mvm: the iwl_mvm* to use + * @sta: the STA + * @update: this is true if the FW is being updated about a STA it already knows + * about. Otherwise (if this is a new STA), this should be false. + * @flags: if update==true, this marks what is being changed via ORs of values + * from enum iwl_sta_modify_flag. Otherwise, this is ignored. + */ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta, - bool update); + bool update, unsigned int flags); int iwl_mvm_add_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta); @@ -459,5 +533,6 @@ void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm, struct iwl_mvm_vif *mvmvif, bool disable); void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif); +void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk); #endif /* __sta_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index c7c3d7bd38ba..24cff98ecca0 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -639,6 +639,35 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, } #endif +static void iwl_mvm_tx_add_stream(struct iwl_mvm *mvm, + struct iwl_mvm_sta *mvm_sta, u8 tid, + struct sk_buff *skb) +{ + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + u8 mac_queue = info->hw_queue; + struct sk_buff_head *deferred_tx_frames; + + lockdep_assert_held(&mvm_sta->lock); + + mvm_sta->deferred_traffic_tid_map |= BIT(tid); + set_bit(mvm_sta->sta_id, mvm->sta_deferred_frames); + + deferred_tx_frames = &mvm_sta->tid_data[tid].deferred_tx_frames; + + skb_queue_tail(deferred_tx_frames, skb); + + /* + * The first deferred frame should've stopped the MAC queues, so we + * should never get a second deferred frame for the RA/TID. + */ + if (!WARN(skb_queue_len(deferred_tx_frames) != 1, + "RATID %d/%d has %d deferred frames\n", mvm_sta->sta_id, tid, + skb_queue_len(deferred_tx_frames))) { + iwl_mvm_stop_mac_queues(mvm, BIT(mac_queue)); + schedule_work(&mvm->add_stream_wk); + } +} + /* * Sets the fields in the Tx cmd that are crypto related */ @@ -695,6 +724,14 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); hdr->seq_ctrl |= cpu_to_le16(seq_number); is_ampdu = info->flags & IEEE80211_TX_CTL_AMPDU; + } else if (iwl_mvm_is_dqa_supported(mvm) && + (ieee80211_is_qos_nullfunc(fc) || + ieee80211_is_nullfunc(fc))) { + /* + * nullfunc frames should go to the MGMT queue regardless of QOS + */ + tid = IWL_MAX_TID_COUNT; + txq_id = mvmsta->tid_data[tid].txq_id; } /* Copy MAC header from skb into command buffer */ @@ -715,6 +752,23 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, txq_id = mvmsta->tid_data[tid].txq_id; } + if (iwl_mvm_is_dqa_supported(mvm)) { + if (unlikely(mvmsta->tid_data[tid].txq_id == + IEEE80211_INVAL_HW_QUEUE)) { + iwl_mvm_tx_add_stream(mvm, mvmsta, tid, skb); + + /* + * The frame is now deferred, and the worker scheduled + * will re-allocate it, so we can free it for now. + */ + iwl_trans_free_tx_cmd(mvm->trans, dev_cmd); + spin_unlock(&mvmsta->lock); + return 0; + } + + txq_id = mvmsta->tid_data[tid].txq_id; + } + IWL_DEBUG_TX(mvm, "TX to [%d|%d] Q:%d - seq: 0x%x\n", mvmsta->sta_id, tid, txq_id, IEEE80211_SEQ_TO_SN(seq_number)); From 0f851bbc28c3752440b9db334d65511909a4d427 Mon Sep 17 00:00:00 2001 From: Sara Sharon Date: Wed, 16 Mar 2016 16:28:42 +0200 Subject: [PATCH 0096/1649] iwlwifi: pcie: write to legacy register also in MQ Due to hardware bug, upon any shadow free-queue register write access, a legacy RBD shadow register must be written as well. This is required in order to trigger a copy of the shadow registers values after MAC exits sleep state. Specifically, the driver has to write (any value) to the legacy RBD register each time FRBDCB is accessed. Signed-off-by: Sara Sharon Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/pcie/rx.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c index e379dbab685a..59a7e45b12df 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c @@ -210,8 +210,12 @@ static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans, if (trans->cfg->mq_rx_supported) iwl_write_prph(trans, RFH_Q_FRBDCB_WIDX(rxq->id), rxq->write_actual); - else - iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual); + /* + * write to FH_RSCSR_CHNL0_WPTR register even in MQ as a W/A to + * hardware shadow registers bug - writing to RFH_Q_FRBDCB_WIDX will + * not wake the NIC. + */ + iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual); } static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans) From 3a171386f9f1bdbe0d9835c4e68dcaadefdc872a Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Mon, 14 Mar 2016 15:21:06 +0200 Subject: [PATCH 0097/1649] iwlwifi: remove IWLWIFI_UAPSD Kconfig We have a module parameter, this is enough. per platform customizations will be done through the init script of the platform. Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/Kconfig | 10 ---------- drivers/net/wireless/intel/iwlwifi/iwl-drv.c | 7 ------- 2 files changed, 17 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/Kconfig b/drivers/net/wireless/intel/iwlwifi/Kconfig index 16c4f383488f..492035f406e9 100644 --- a/drivers/net/wireless/intel/iwlwifi/Kconfig +++ b/drivers/net/wireless/intel/iwlwifi/Kconfig @@ -88,16 +88,6 @@ config IWLWIFI_BCAST_FILTERING If unsure, don't enable this option, as some programs might expect incoming broadcasts for their normal operations. -config IWLWIFI_UAPSD - bool "enable U-APSD by default" - depends on IWLMVM - help - Say Y here to enable U-APSD by default. This may cause - interoperability problems with some APs, manifesting in lower than - expected throughput due to those APs not enabling aggregation - - If unsure, say N. - config IWLWIFI_PCIE_RTPM bool "Enable runtime power management mode for PCIe devices" depends on IWLMVM && PM diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c index 2cd9c3139a1c..9c2a2fd0f40c 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c @@ -1560,9 +1560,7 @@ struct iwl_mod_params iwlwifi_mod_params = { .power_level = IWL_POWER_INDEX_1, .d0i3_disable = true, .d0i3_entry_delay = 1000, -#ifndef CONFIG_IWLWIFI_UAPSD .uapsd_disable = IWL_DISABLE_UAPSD_BSS | IWL_DISABLE_UAPSD_P2P_CLIENT, -#endif /* CONFIG_IWLWIFI_UAPSD */ /* the rest are 0 by default */ }; IWL_EXPORT_SYMBOL(iwlwifi_mod_params); @@ -1682,13 +1680,8 @@ MODULE_PARM_DESC(lar_disable, "disable LAR functionality (default: N)"); module_param_named(uapsd_disable, iwlwifi_mod_params.uapsd_disable, uint, S_IRUGO | S_IWUSR); -#ifdef CONFIG_IWLWIFI_UAPSD -MODULE_PARM_DESC(uapsd_disable, - "disable U-APSD functionality bitmap 1: BSS 2: P2P Client (default: 0)"); -#else MODULE_PARM_DESC(uapsd_disable, "disable U-APSD functionality bitmap 1: BSS 2: P2P Client (default: 3)"); -#endif /* * set bt_coex_active to true, uCode will do kill/defer From a0b09f13036cedfd67c9cb4b9d05138e7022723d Mon Sep 17 00:00:00 2001 From: Ayala Beker Date: Wed, 3 Feb 2016 15:36:52 +0200 Subject: [PATCH 0098/1649] iwlwifi: mvm: update GSCAN capabilities Gscan capabilities were updated with new capabilities supported by the device. Update GSCAN capabilities TLV. Signed-off-by: Ayala Beker Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/iwl-drv.c | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c index 9c2a2fd0f40c..7cd17f0e45e8 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c @@ -1060,11 +1060,18 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, return -EINVAL; } - if (WARN(fw_has_capa(capa, IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT) && - !gscan_capa, - "GSCAN is supported but capabilities TLV is unavailable\n")) + /* + * If ucode advertises that it supports GSCAN but GSCAN + * capabilities TLV is not present, or if it has an old format, + * warn and continue without GSCAN. + */ + if (fw_has_capa(capa, IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT) && + !gscan_capa) { + IWL_DEBUG_INFO(drv, + "GSCAN is supported but capabilities TLV is unavailable\n"); __clear_bit((__force long)IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT, capa->_capa); + } return 0; From 97f95c93c8ed5177371e75275f236513152fa308 Mon Sep 17 00:00:00 2001 From: Sara Sharon Date: Mon, 7 Mar 2016 16:55:20 +0200 Subject: [PATCH 0099/1649] iwlwifi: remove support for fw older than -16.ucode API version lower than 16 is not supported anymore - don't load older ucode. Remove code handling older versions. Signed-off-by: Sara Sharon Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/iwl-7000.c | 12 +- drivers/net/wireless/intel/iwlwifi/iwl-8000.c | 4 +- drivers/net/wireless/intel/iwlwifi/iwl-9000.c | 4 +- drivers/net/wireless/intel/iwlwifi/iwl-drv.c | 5 +- .../net/wireless/intel/iwlwifi/iwl-fw-file.h | 4 - .../net/wireless/intel/iwlwifi/mvm/Makefile | 2 +- drivers/net/wireless/intel/iwlwifi/mvm/coex.c | 42 - .../wireless/intel/iwlwifi/mvm/coex_legacy.c | 1315 ----------------- .../net/wireless/intel/iwlwifi/mvm/debugfs.c | 162 +- drivers/net/wireless/intel/iwlwifi/mvm/mvm.h | 16 - drivers/net/wireless/intel/iwlwifi/mvm/sf.c | 8 +- .../net/wireless/intel/iwlwifi/mvm/utils.c | 86 -- 12 files changed, 40 insertions(+), 1620 deletions(-) delete mode 100644 drivers/net/wireless/intel/iwlwifi/mvm/coex_legacy.c diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-7000.c b/drivers/net/wireless/intel/iwlwifi/iwl-7000.c index fc475ce59b47..f4012a3f4d06 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-7000.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-7000.c @@ -77,15 +77,15 @@ #define IWL3168_UCODE_API_MAX 21 /* Oldest version we won't warn about */ -#define IWL7260_UCODE_API_OK 13 -#define IWL7265_UCODE_API_OK 13 -#define IWL7265D_UCODE_API_OK 13 +#define IWL7260_UCODE_API_OK 16 +#define IWL7265_UCODE_API_OK 16 +#define IWL7265D_UCODE_API_OK 16 #define IWL3168_UCODE_API_OK 20 /* Lowest firmware API version supported */ -#define IWL7260_UCODE_API_MIN 13 -#define IWL7265_UCODE_API_MIN 13 -#define IWL7265D_UCODE_API_MIN 13 +#define IWL7260_UCODE_API_MIN 16 +#define IWL7265_UCODE_API_MIN 16 +#define IWL7265D_UCODE_API_MIN 16 #define IWL3168_UCODE_API_MIN 20 /* NVM versions */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c index 97be104d1203..49bb2a5f9dcf 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c @@ -74,11 +74,11 @@ #define IWL8265_UCODE_API_MAX 21 /* Oldest version we won't warn about */ -#define IWL8000_UCODE_API_OK 13 +#define IWL8000_UCODE_API_OK 16 #define IWL8265_UCODE_API_OK 20 /* Lowest firmware API version supported */ -#define IWL8000_UCODE_API_MIN 13 +#define IWL8000_UCODE_API_MIN 16 #define IWL8265_UCODE_API_MIN 20 /* NVM versions */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-9000.c b/drivers/net/wireless/intel/iwlwifi/iwl-9000.c index 642fc92d7788..277396a30713 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-9000.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-9000.c @@ -58,10 +58,10 @@ #define IWL9000_UCODE_API_MAX 21 /* Oldest version we won't warn about */ -#define IWL9000_UCODE_API_OK 13 +#define IWL9000_UCODE_API_OK 16 /* Lowest firmware API version supported */ -#define IWL9000_UCODE_API_MIN 13 +#define IWL9000_UCODE_API_MIN 16 /* NVM versions */ #define IWL9000_NVM_VERSION 0x0a1d diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c index 7cd17f0e45e8..9a680ac48820 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c @@ -1255,10 +1255,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) if (err) goto try_again; - if (fw_has_api(&drv->fw.ucode_capa, IWL_UCODE_TLV_API_NEW_VERSION)) - api_ver = drv->fw.ucode_ver; - else - api_ver = IWL_UCODE_API(drv->fw.ucode_ver); + api_ver = drv->fw.ucode_ver; /* * api_ver should match the api version forming part of the diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h index c82b94167ac6..a6e8826c61db 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h @@ -245,13 +245,11 @@ typedef unsigned int __bitwise__ iwl_ucode_tlv_api_t; /** * enum iwl_ucode_tlv_api - ucode api - * @IWL_UCODE_TLV_API_BT_COEX_SPLIT: new API for BT Coex * @IWL_UCODE_TLV_API_FRAGMENTED_SCAN: This ucode supports active dwell time * longer than the passive one, which is essential for fragmented scan. * @IWL_UCODE_TLV_API_WIFI_MCC_UPDATE: ucode supports MCC updates with source. * @IWL_UCODE_TLV_API_WIDE_CMD_HDR: ucode supports wide command header * @IWL_UCODE_TLV_API_LQ_SS_PARAMS: Configure STBC/BFER via LQ CMD ss_params - * @IWL_UCODE_TLV_API_NEW_VERSION: new versioning format * @IWL_UCODE_TLV_API_EXT_SCAN_PRIORITY: scan APIs use 8-level priority * instead of 3. * @IWL_UCODE_TLV_API_TX_POWER_CHAIN: TX power API has larger command size @@ -260,12 +258,10 @@ typedef unsigned int __bitwise__ iwl_ucode_tlv_api_t; * @NUM_IWL_UCODE_TLV_API: number of bits used */ enum iwl_ucode_tlv_api { - IWL_UCODE_TLV_API_BT_COEX_SPLIT = (__force iwl_ucode_tlv_api_t)3, IWL_UCODE_TLV_API_FRAGMENTED_SCAN = (__force iwl_ucode_tlv_api_t)8, IWL_UCODE_TLV_API_WIFI_MCC_UPDATE = (__force iwl_ucode_tlv_api_t)9, IWL_UCODE_TLV_API_WIDE_CMD_HDR = (__force iwl_ucode_tlv_api_t)14, IWL_UCODE_TLV_API_LQ_SS_PARAMS = (__force iwl_ucode_tlv_api_t)18, - IWL_UCODE_TLV_API_NEW_VERSION = (__force iwl_ucode_tlv_api_t)20, IWL_UCODE_TLV_API_EXT_SCAN_PRIORITY = (__force iwl_ucode_tlv_api_t)24, IWL_UCODE_TLV_API_TX_POWER_CHAIN = (__force iwl_ucode_tlv_api_t)27, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/Makefile b/drivers/net/wireless/intel/iwlwifi/mvm/Makefile index 23e7e2937566..2e06dfc1c477 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/Makefile +++ b/drivers/net/wireless/intel/iwlwifi/mvm/Makefile @@ -2,7 +2,7 @@ obj-$(CONFIG_IWLMVM) += iwlmvm.o iwlmvm-y += fw.o mac80211.o nvm.o ops.o phy-ctxt.o mac-ctxt.o iwlmvm-y += utils.o rx.o rxmq.o tx.o binding.o quota.o sta.o sf.o iwlmvm-y += scan.o time-event.o rs.o -iwlmvm-y += power.o coex.o coex_legacy.o +iwlmvm-y += power.o coex.o iwlmvm-y += tt.o offloading.o tdls.o iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o debugfs-vif.o iwlmvm-$(CONFIG_IWLWIFI_LEDS) += led.o diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c index 2e098f8e0f83..35cdeca3d61e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c @@ -411,9 +411,6 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm) struct iwl_bt_coex_cmd bt_cmd = {}; u32 mode; - if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT)) - return iwl_send_bt_init_conf_old(mvm); - lockdep_assert_held(&mvm->mutex); if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS)) { @@ -728,12 +725,6 @@ void iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_bt_coex_profile_notif *notif = (void *)pkt->data; - if (!fw_has_api(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_API_BT_COEX_SPLIT)) { - iwl_mvm_rx_bt_coex_notif_old(mvm, rxb); - return; - } - IWL_DEBUG_COEX(mvm, "BT Coex Notification received\n"); IWL_DEBUG_COEX(mvm, "\tBT ci compliance %d\n", notif->bt_ci_compliance); IWL_DEBUG_COEX(mvm, "\tBT primary_ch_lut %d\n", @@ -755,12 +746,6 @@ void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); int ret; - if (!fw_has_api(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_API_BT_COEX_SPLIT)) { - iwl_mvm_bt_rssi_event_old(mvm, vif, rssi_event); - return; - } - lockdep_assert_held(&mvm->mutex); /* Ignore updates if we are in force mode */ @@ -807,9 +792,6 @@ u16 iwl_mvm_coex_agg_time_limit(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *phy_ctxt = mvmvif->phy_ctxt; enum iwl_bt_coex_lut_type lut_type; - if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT)) - return iwl_mvm_coex_agg_time_limit_old(mvm, sta); - if (IWL_COEX_IS_TTC_ON(mvm->last_bt_notif.ttc_rrc_status, phy_ctxt->id)) return LINK_QUAL_AGG_TIME_LIMIT_DEF; @@ -834,9 +816,6 @@ bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *phy_ctxt = mvmvif->phy_ctxt; enum iwl_bt_coex_lut_type lut_type; - if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT)) - return iwl_mvm_bt_coex_is_mimo_allowed_old(mvm, sta); - if (IWL_COEX_IS_TTC_ON(mvm->last_bt_notif.ttc_rrc_status, phy_ctxt->id)) return true; @@ -864,9 +843,6 @@ bool iwl_mvm_bt_coex_is_ant_avail(struct iwl_mvm *mvm, u8 ant) if (ant & mvm->cfg->non_shared_ant) return true; - if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT)) - return iwl_mvm_bt_coex_is_shared_ant_avail_old(mvm); - return le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) < BT_HIGH_TRAFFIC; } @@ -877,9 +853,6 @@ bool iwl_mvm_bt_coex_is_shared_ant_avail(struct iwl_mvm *mvm) if (mvm->cfg->bt_shared_single_ant) return true; - if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT)) - return iwl_mvm_bt_coex_is_shared_ant_avail_old(mvm); - return le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) < BT_HIGH_TRAFFIC; } @@ -888,9 +861,6 @@ bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm, { u32 bt_activity = le32_to_cpu(mvm->last_bt_notif.bt_activity_grading); - if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT)) - return iwl_mvm_bt_coex_is_tpc_allowed_old(mvm, band); - if (band != IEEE80211_BAND_2GHZ) return false; @@ -937,12 +907,6 @@ u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr, void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm) { - if (!fw_has_api(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_API_BT_COEX_SPLIT)) { - iwl_mvm_bt_coex_vif_change_old(mvm); - return; - } - iwl_mvm_bt_coex_notif_handle(mvm); } @@ -955,12 +919,6 @@ void iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm, u8 __maybe_unused lower_bound, upper_bound; u8 lut; - if (!fw_has_api(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_API_BT_COEX_SPLIT)) { - iwl_mvm_rx_ant_coupling_notif_old(mvm, rxb); - return; - } - if (!iwl_mvm_bt_is_plcr_supported(mvm)) return; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/coex_legacy.c b/drivers/net/wireless/intel/iwlwifi/mvm/coex_legacy.c deleted file mode 100644 index 015045733444..000000000000 --- a/drivers/net/wireless/intel/iwlwifi/mvm/coex_legacy.c +++ /dev/null @@ -1,1315 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ - -#include -#include -#include - -#include "fw-api-coex.h" -#include "iwl-modparams.h" -#include "mvm.h" -#include "iwl-debug.h" - -#define EVENT_PRIO_ANT(_evt, _prio, _shrd_ant) \ - [(_evt)] = (((_prio) << BT_COEX_PRIO_TBL_PRIO_POS) | \ - ((_shrd_ant) << BT_COEX_PRIO_TBL_SHRD_ANT_POS)) - -static const u8 iwl_bt_prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX] = { - EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_INIT_CALIB1, - BT_COEX_PRIO_TBL_PRIO_BYPASS, 0), - EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_INIT_CALIB2, - BT_COEX_PRIO_TBL_PRIO_BYPASS, 1), - EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_LOW1, - BT_COEX_PRIO_TBL_PRIO_LOW, 0), - EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_LOW2, - BT_COEX_PRIO_TBL_PRIO_LOW, 1), - EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_HIGH1, - BT_COEX_PRIO_TBL_PRIO_HIGH, 0), - EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_HIGH2, - BT_COEX_PRIO_TBL_PRIO_HIGH, 1), - EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_DTIM, - BT_COEX_PRIO_TBL_DISABLED, 0), - EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_SCAN52, - BT_COEX_PRIO_TBL_PRIO_COEX_OFF, 0), - EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_SCAN24, - BT_COEX_PRIO_TBL_PRIO_COEX_ON, 0), - EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_IDLE, - BT_COEX_PRIO_TBL_PRIO_COEX_IDLE, 0), - 0, 0, 0, 0, 0, 0, -}; - -#undef EVENT_PRIO_ANT - -static int iwl_send_bt_prio_tbl(struct iwl_mvm *mvm) -{ - if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS)) - return 0; - - return iwl_mvm_send_cmd_pdu(mvm, BT_COEX_PRIO_TABLE, 0, - sizeof(struct iwl_bt_coex_prio_tbl_cmd), - &iwl_bt_prio_tbl); -} - -static const __le32 iwl_bt_prio_boost[BT_COEX_BOOST_SIZE] = { - cpu_to_le32(0xf0f0f0f0), /* 50% */ - cpu_to_le32(0xc0c0c0c0), /* 25% */ - cpu_to_le32(0xfcfcfcfc), /* 75% */ - cpu_to_le32(0xfefefefe), /* 87.5% */ -}; - -static const __le32 iwl_single_shared_ant[BT_COEX_MAX_LUT][BT_COEX_LUT_SIZE] = { - { - cpu_to_le32(0x40000000), - cpu_to_le32(0x00000000), - cpu_to_le32(0x44000000), - cpu_to_le32(0x00000000), - cpu_to_le32(0x40000000), - cpu_to_le32(0x00000000), - cpu_to_le32(0x44000000), - cpu_to_le32(0x00000000), - cpu_to_le32(0xc0004000), - cpu_to_le32(0xf0005000), - cpu_to_le32(0xc0004000), - cpu_to_le32(0xf0005000), - }, - { - cpu_to_le32(0x40000000), - cpu_to_le32(0x00000000), - cpu_to_le32(0x44000000), - cpu_to_le32(0x00000000), - cpu_to_le32(0x40000000), - cpu_to_le32(0x00000000), - cpu_to_le32(0x44000000), - cpu_to_le32(0x00000000), - cpu_to_le32(0xc0004000), - cpu_to_le32(0xf0005000), - cpu_to_le32(0xc0004000), - cpu_to_le32(0xf0005000), - }, - { - cpu_to_le32(0x40000000), - cpu_to_le32(0x00000000), - cpu_to_le32(0x44000000), - cpu_to_le32(0x00000000), - cpu_to_le32(0x40000000), - cpu_to_le32(0x00000000), - cpu_to_le32(0x44000000), - cpu_to_le32(0x00000000), - cpu_to_le32(0xc0004000), - cpu_to_le32(0xf0005000), - cpu_to_le32(0xc0004000), - cpu_to_le32(0xf0005000), - }, -}; - -static const __le32 iwl_combined_lookup[BT_COEX_MAX_LUT][BT_COEX_LUT_SIZE] = { - { - /* Tight */ - cpu_to_le32(0xaaaaaaaa), - cpu_to_le32(0xaaaaaaaa), - cpu_to_le32(0xaeaaaaaa), - cpu_to_le32(0xaaaaaaaa), - cpu_to_le32(0xcc00ff28), - cpu_to_le32(0x0000aaaa), - cpu_to_le32(0xcc00aaaa), - cpu_to_le32(0x0000aaaa), - cpu_to_le32(0xc0004000), - cpu_to_le32(0x00004000), - cpu_to_le32(0xf0005000), - cpu_to_le32(0xf0005000), - }, - { - /* Loose */ - cpu_to_le32(0xaaaaaaaa), - cpu_to_le32(0xaaaaaaaa), - cpu_to_le32(0xaaaaaaaa), - cpu_to_le32(0xaaaaaaaa), - cpu_to_le32(0xcc00ff28), - cpu_to_le32(0x0000aaaa), - cpu_to_le32(0xcc00aaaa), - cpu_to_le32(0x0000aaaa), - cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), - cpu_to_le32(0xf0005000), - cpu_to_le32(0xf0005000), - }, - { - /* Tx Tx disabled */ - cpu_to_le32(0xaaaaaaaa), - cpu_to_le32(0xaaaaaaaa), - cpu_to_le32(0xeeaaaaaa), - cpu_to_le32(0xaaaaaaaa), - cpu_to_le32(0xcc00ff28), - cpu_to_le32(0x0000aaaa), - cpu_to_le32(0xcc00aaaa), - cpu_to_le32(0x0000aaaa), - cpu_to_le32(0xc0004000), - cpu_to_le32(0xc0004000), - cpu_to_le32(0xf0005000), - cpu_to_le32(0xf0005000), - }, -}; - -/* 20MHz / 40MHz below / 40Mhz above*/ -static const __le64 iwl_ci_mask[][3] = { - /* dummy entry for channel 0 */ - {cpu_to_le64(0), cpu_to_le64(0), cpu_to_le64(0)}, - { - cpu_to_le64(0x0000001FFFULL), - cpu_to_le64(0x0ULL), - cpu_to_le64(0x00007FFFFFULL), - }, - { - cpu_to_le64(0x000000FFFFULL), - cpu_to_le64(0x0ULL), - cpu_to_le64(0x0003FFFFFFULL), - }, - { - cpu_to_le64(0x000003FFFCULL), - cpu_to_le64(0x0ULL), - cpu_to_le64(0x000FFFFFFCULL), - }, - { - cpu_to_le64(0x00001FFFE0ULL), - cpu_to_le64(0x0ULL), - cpu_to_le64(0x007FFFFFE0ULL), - }, - { - cpu_to_le64(0x00007FFF80ULL), - cpu_to_le64(0x00007FFFFFULL), - cpu_to_le64(0x01FFFFFF80ULL), - }, - { - cpu_to_le64(0x0003FFFC00ULL), - cpu_to_le64(0x0003FFFFFFULL), - cpu_to_le64(0x0FFFFFFC00ULL), - }, - { - cpu_to_le64(0x000FFFF000ULL), - cpu_to_le64(0x000FFFFFFCULL), - cpu_to_le64(0x3FFFFFF000ULL), - }, - { - cpu_to_le64(0x007FFF8000ULL), - cpu_to_le64(0x007FFFFFE0ULL), - cpu_to_le64(0xFFFFFF8000ULL), - }, - { - cpu_to_le64(0x01FFFE0000ULL), - cpu_to_le64(0x01FFFFFF80ULL), - cpu_to_le64(0xFFFFFE0000ULL), - }, - { - cpu_to_le64(0x0FFFF00000ULL), - cpu_to_le64(0x0FFFFFFC00ULL), - cpu_to_le64(0x0ULL), - }, - { - cpu_to_le64(0x3FFFC00000ULL), - cpu_to_le64(0x3FFFFFF000ULL), - cpu_to_le64(0x0) - }, - { - cpu_to_le64(0xFFFE000000ULL), - cpu_to_le64(0xFFFFFF8000ULL), - cpu_to_le64(0x0) - }, - { - cpu_to_le64(0xFFF8000000ULL), - cpu_to_le64(0xFFFFFE0000ULL), - cpu_to_le64(0x0) - }, - { - cpu_to_le64(0xFFC0000000ULL), - cpu_to_le64(0x0ULL), - cpu_to_le64(0x0ULL) - }, -}; - -enum iwl_bt_kill_msk { - BT_KILL_MSK_DEFAULT, - BT_KILL_MSK_NEVER, - BT_KILL_MSK_ALWAYS, - BT_KILL_MSK_MAX, -}; - -static const u32 iwl_bt_ctl_kill_msk[BT_KILL_MSK_MAX] = { - [BT_KILL_MSK_DEFAULT] = 0xfffffc00, - [BT_KILL_MSK_NEVER] = 0xffffffff, - [BT_KILL_MSK_ALWAYS] = 0, -}; - -static const u8 iwl_bt_cts_kill_msk[BT_MAX_AG][BT_COEX_MAX_LUT] = { - { - BT_KILL_MSK_ALWAYS, - BT_KILL_MSK_ALWAYS, - BT_KILL_MSK_ALWAYS, - }, - { - BT_KILL_MSK_NEVER, - BT_KILL_MSK_NEVER, - BT_KILL_MSK_NEVER, - }, - { - BT_KILL_MSK_NEVER, - BT_KILL_MSK_NEVER, - BT_KILL_MSK_NEVER, - }, - { - BT_KILL_MSK_DEFAULT, - BT_KILL_MSK_NEVER, - BT_KILL_MSK_DEFAULT, - }, -}; - -static const u8 iwl_bt_ack_kill_msk[BT_MAX_AG][BT_COEX_MAX_LUT] = { - { - BT_KILL_MSK_ALWAYS, - BT_KILL_MSK_ALWAYS, - BT_KILL_MSK_ALWAYS, - }, - { - BT_KILL_MSK_ALWAYS, - BT_KILL_MSK_ALWAYS, - BT_KILL_MSK_ALWAYS, - }, - { - BT_KILL_MSK_ALWAYS, - BT_KILL_MSK_ALWAYS, - BT_KILL_MSK_ALWAYS, - }, - { - BT_KILL_MSK_DEFAULT, - BT_KILL_MSK_ALWAYS, - BT_KILL_MSK_DEFAULT, - }, -}; - -struct corunning_block_luts { - u8 range; - __le32 lut20[BT_COEX_CORUN_LUT_SIZE]; -}; - -/* - * Ranges for the antenna coupling calibration / co-running block LUT: - * LUT0: [ 0, 12[ - * LUT1: [12, 20[ - * LUT2: [20, 21[ - * LUT3: [21, 23[ - * LUT4: [23, 27[ - * LUT5: [27, 30[ - * LUT6: [30, 32[ - * LUT7: [32, 33[ - * LUT8: [33, - [ - */ -static const struct corunning_block_luts antenna_coupling_ranges[] = { - { - .range = 0, - .lut20 = { - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - }, - }, - { - .range = 12, - .lut20 = { - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - }, - }, - { - .range = 20, - .lut20 = { - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - }, - }, - { - .range = 21, - .lut20 = { - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - }, - }, - { - .range = 23, - .lut20 = { - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - }, - }, - { - .range = 27, - .lut20 = { - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - }, - }, - { - .range = 30, - .lut20 = { - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - }, - }, - { - .range = 32, - .lut20 = { - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - }, - }, - { - .range = 33, - .lut20 = { - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - }, - }, -}; - -static enum iwl_bt_coex_lut_type -iwl_get_coex_type(struct iwl_mvm *mvm, const struct ieee80211_vif *vif) -{ - struct ieee80211_chanctx_conf *chanctx_conf; - enum iwl_bt_coex_lut_type ret; - u16 phy_ctx_id; - - /* - * Checking that we hold mvm->mutex is a good idea, but the rate - * control can't acquire the mutex since it runs in Tx path. - * So this is racy in that case, but in the worst case, the AMPDU - * size limit will be wrong for a short time which is not a big - * issue. - */ - - rcu_read_lock(); - - chanctx_conf = rcu_dereference(vif->chanctx_conf); - - if (!chanctx_conf || - chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ) { - rcu_read_unlock(); - return BT_COEX_INVALID_LUT; - } - - ret = BT_COEX_TX_DIS_LUT; - - if (mvm->cfg->bt_shared_single_ant) { - rcu_read_unlock(); - return ret; - } - - phy_ctx_id = *((u16 *)chanctx_conf->drv_priv); - - if (mvm->last_bt_ci_cmd_old.primary_ch_phy_id == phy_ctx_id) - ret = le32_to_cpu(mvm->last_bt_notif_old.primary_ch_lut); - else if (mvm->last_bt_ci_cmd_old.secondary_ch_phy_id == phy_ctx_id) - ret = le32_to_cpu(mvm->last_bt_notif_old.secondary_ch_lut); - /* else - default = TX TX disallowed */ - - rcu_read_unlock(); - - return ret; -} - -int iwl_send_bt_init_conf_old(struct iwl_mvm *mvm) -{ - struct iwl_bt_coex_cmd_old *bt_cmd; - struct iwl_host_cmd cmd = { - .id = BT_CONFIG, - .len = { sizeof(*bt_cmd), }, - .dataflags = { IWL_HCMD_DFL_NOCOPY, }, - }; - int ret; - u32 flags; - - ret = iwl_send_bt_prio_tbl(mvm); - if (ret) - return ret; - - bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL); - if (!bt_cmd) - return -ENOMEM; - cmd.data[0] = bt_cmd; - - lockdep_assert_held(&mvm->mutex); - - if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS)) { - switch (mvm->bt_force_ant_mode) { - case BT_FORCE_ANT_AUTO: - flags = BT_COEX_AUTO_OLD; - break; - case BT_FORCE_ANT_BT: - flags = BT_COEX_BT_OLD; - break; - case BT_FORCE_ANT_WIFI: - flags = BT_COEX_WIFI_OLD; - break; - default: - WARN_ON(1); - flags = 0; - } - - bt_cmd->flags = cpu_to_le32(flags); - bt_cmd->valid_bit_msk = cpu_to_le32(BT_VALID_ENABLE); - goto send_cmd; - } - - bt_cmd->max_kill = 5; - bt_cmd->bt4_antenna_isolation_thr = - IWL_MVM_BT_COEX_ANTENNA_COUPLING_THRS; - bt_cmd->bt4_antenna_isolation = iwlwifi_mod_params.ant_coupling; - bt_cmd->bt4_tx_tx_delta_freq_thr = 15; - bt_cmd->bt4_tx_rx_max_freq0 = 15; - bt_cmd->override_primary_lut = BT_COEX_INVALID_LUT; - bt_cmd->override_secondary_lut = BT_COEX_INVALID_LUT; - - flags = iwlwifi_mod_params.bt_coex_active ? - BT_COEX_NW_OLD : BT_COEX_DISABLE_OLD; - bt_cmd->flags = cpu_to_le32(flags); - - bt_cmd->valid_bit_msk = cpu_to_le32(BT_VALID_ENABLE | - BT_VALID_BT_PRIO_BOOST | - BT_VALID_MAX_KILL | - BT_VALID_3W_TMRS | - BT_VALID_KILL_ACK | - BT_VALID_KILL_CTS | - BT_VALID_REDUCED_TX_POWER | - BT_VALID_LUT | - BT_VALID_WIFI_RX_SW_PRIO_BOOST | - BT_VALID_WIFI_TX_SW_PRIO_BOOST | - BT_VALID_ANT_ISOLATION | - BT_VALID_ANT_ISOLATION_THRS | - BT_VALID_TXTX_DELTA_FREQ_THRS | - BT_VALID_TXRX_MAX_FREQ_0 | - BT_VALID_SYNC_TO_SCO | - BT_VALID_TTC | - BT_VALID_RRC); - - if (IWL_MVM_BT_COEX_SYNC2SCO) - bt_cmd->flags |= cpu_to_le32(BT_COEX_SYNC2SCO); - - if (iwl_mvm_bt_is_plcr_supported(mvm)) { - bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_CORUN_LUT_20 | - BT_VALID_CORUN_LUT_40); - bt_cmd->flags |= cpu_to_le32(BT_COEX_CORUNNING); - } - - if (IWL_MVM_BT_COEX_MPLUT) { - bt_cmd->flags |= cpu_to_le32(BT_COEX_MPLUT); - bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_MULTI_PRIO_LUT); - } - - if (IWL_MVM_BT_COEX_TTC) - bt_cmd->flags |= cpu_to_le32(BT_COEX_TTC); - - if (iwl_mvm_bt_is_rrc_supported(mvm)) - bt_cmd->flags |= cpu_to_le32(BT_COEX_RRC); - - if (mvm->cfg->bt_shared_single_ant) - memcpy(&bt_cmd->decision_lut, iwl_single_shared_ant, - sizeof(iwl_single_shared_ant)); - else - memcpy(&bt_cmd->decision_lut, iwl_combined_lookup, - sizeof(iwl_combined_lookup)); - - /* Take first Co-running block LUT to get started */ - memcpy(bt_cmd->bt4_corun_lut20, antenna_coupling_ranges[0].lut20, - sizeof(bt_cmd->bt4_corun_lut20)); - memcpy(bt_cmd->bt4_corun_lut40, antenna_coupling_ranges[0].lut20, - sizeof(bt_cmd->bt4_corun_lut40)); - - memcpy(&bt_cmd->bt_prio_boost, iwl_bt_prio_boost, - sizeof(iwl_bt_prio_boost)); - bt_cmd->bt4_multiprio_lut[0] = cpu_to_le32(IWL_MVM_BT_COEX_MPLUT_REG0); - bt_cmd->bt4_multiprio_lut[1] = cpu_to_le32(IWL_MVM_BT_COEX_MPLUT_REG1); - -send_cmd: - memset(&mvm->last_bt_notif_old, 0, sizeof(mvm->last_bt_notif_old)); - memset(&mvm->last_bt_ci_cmd_old, 0, sizeof(mvm->last_bt_ci_cmd_old)); - - ret = iwl_mvm_send_cmd(mvm, &cmd); - - kfree(bt_cmd); - return ret; -} - -static int iwl_mvm_bt_udpate_ctrl_kill_msk(struct iwl_mvm *mvm) -{ - struct iwl_bt_coex_profile_notif_old *notif = &mvm->last_bt_notif_old; - u32 primary_lut = le32_to_cpu(notif->primary_ch_lut); - u32 ag = le32_to_cpu(notif->bt_activity_grading); - struct iwl_bt_coex_cmd_old *bt_cmd; - u8 ack_kill_msk, cts_kill_msk; - struct iwl_host_cmd cmd = { - .id = BT_CONFIG, - .data[0] = &bt_cmd, - .len = { sizeof(*bt_cmd), }, - .dataflags = { IWL_HCMD_DFL_NOCOPY, }, - }; - int ret = 0; - - lockdep_assert_held(&mvm->mutex); - - ack_kill_msk = iwl_bt_ack_kill_msk[ag][primary_lut]; - cts_kill_msk = iwl_bt_cts_kill_msk[ag][primary_lut]; - - if (mvm->bt_ack_kill_msk[0] == ack_kill_msk && - mvm->bt_cts_kill_msk[0] == cts_kill_msk) - return 0; - - mvm->bt_ack_kill_msk[0] = ack_kill_msk; - mvm->bt_cts_kill_msk[0] = cts_kill_msk; - - bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL); - if (!bt_cmd) - return -ENOMEM; - cmd.data[0] = bt_cmd; - bt_cmd->flags = cpu_to_le32(BT_COEX_NW_OLD); - - bt_cmd->kill_ack_msk = cpu_to_le32(iwl_bt_ctl_kill_msk[ack_kill_msk]); - bt_cmd->kill_cts_msk = cpu_to_le32(iwl_bt_ctl_kill_msk[cts_kill_msk]); - bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_ENABLE | - BT_VALID_KILL_ACK | - BT_VALID_KILL_CTS); - - ret = iwl_mvm_send_cmd(mvm, &cmd); - - kfree(bt_cmd); - return ret; -} - -static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id, - bool enable) -{ - struct iwl_bt_coex_cmd_old *bt_cmd; - /* Send ASYNC since this can be sent from an atomic context */ - struct iwl_host_cmd cmd = { - .id = BT_CONFIG, - .len = { sizeof(*bt_cmd), }, - .dataflags = { IWL_HCMD_DFL_DUP, }, - .flags = CMD_ASYNC, - }; - struct iwl_mvm_sta *mvmsta; - int ret; - - mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id); - if (!mvmsta) - return 0; - - /* nothing to do */ - if (mvmsta->bt_reduced_txpower == enable) - return 0; - - bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_ATOMIC); - if (!bt_cmd) - return -ENOMEM; - cmd.data[0] = bt_cmd; - bt_cmd->flags = cpu_to_le32(BT_COEX_NW_OLD); - - bt_cmd->valid_bit_msk = - cpu_to_le32(BT_VALID_ENABLE | BT_VALID_REDUCED_TX_POWER); - bt_cmd->bt_reduced_tx_power = sta_id; - - if (enable) - bt_cmd->bt_reduced_tx_power |= BT_REDUCED_TX_POWER_BIT; - - IWL_DEBUG_COEX(mvm, "%sable reduced Tx Power for sta %d\n", - enable ? "en" : "dis", sta_id); - - mvmsta->bt_reduced_txpower = enable; - - ret = iwl_mvm_send_cmd(mvm, &cmd); - - kfree(bt_cmd); - return ret; -} - -struct iwl_bt_iterator_data { - struct iwl_bt_coex_profile_notif_old *notif; - struct iwl_mvm *mvm; - struct ieee80211_chanctx_conf *primary; - struct ieee80211_chanctx_conf *secondary; - bool primary_ll; -}; - -static inline -void iwl_mvm_bt_coex_enable_rssi_event(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - bool enable, int rssi) -{ - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - - mvmvif->bf_data.last_bt_coex_event = rssi; - mvmvif->bf_data.bt_coex_max_thold = - enable ? -IWL_MVM_BT_COEX_EN_RED_TXP_THRESH : 0; - mvmvif->bf_data.bt_coex_min_thold = - enable ? -IWL_MVM_BT_COEX_DIS_RED_TXP_THRESH : 0; -} - -/* must be called under rcu_read_lock */ -static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac, - struct ieee80211_vif *vif) -{ - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_bt_iterator_data *data = _data; - struct iwl_mvm *mvm = data->mvm; - struct ieee80211_chanctx_conf *chanctx_conf; - enum ieee80211_smps_mode smps_mode; - u32 bt_activity_grading; - int ave_rssi; - - lockdep_assert_held(&mvm->mutex); - - switch (vif->type) { - case NL80211_IFTYPE_STATION: - /* default smps_mode for BSS / P2P client is AUTOMATIC */ - smps_mode = IEEE80211_SMPS_AUTOMATIC; - break; - case NL80211_IFTYPE_AP: - if (!mvmvif->ap_ibss_active) - return; - break; - default: - return; - } - - chanctx_conf = rcu_dereference(vif->chanctx_conf); - - /* If channel context is invalid or not on 2.4GHz .. */ - if ((!chanctx_conf || - chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ)) { - if (vif->type == NL80211_IFTYPE_STATION) { - /* ... relax constraints and disable rssi events */ - iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX, - smps_mode); - iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, - false); - iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0); - } - return; - } - - bt_activity_grading = le32_to_cpu(data->notif->bt_activity_grading); - if (bt_activity_grading >= BT_HIGH_TRAFFIC) - smps_mode = IEEE80211_SMPS_STATIC; - else if (bt_activity_grading >= BT_LOW_TRAFFIC) - smps_mode = vif->type == NL80211_IFTYPE_AP ? - IEEE80211_SMPS_OFF : - IEEE80211_SMPS_DYNAMIC; - - /* relax SMPS contraints for next association */ - if (!vif->bss_conf.assoc) - smps_mode = IEEE80211_SMPS_AUTOMATIC; - - if (mvmvif->phy_ctxt && - data->notif->rrc_enabled & BIT(mvmvif->phy_ctxt->id)) - smps_mode = IEEE80211_SMPS_AUTOMATIC; - - IWL_DEBUG_COEX(data->mvm, - "mac %d: bt_status %d bt_activity_grading %d smps_req %d\n", - mvmvif->id, data->notif->bt_status, bt_activity_grading, - smps_mode); - - if (vif->type == NL80211_IFTYPE_STATION) - iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX, - smps_mode); - - /* low latency is always primary */ - if (iwl_mvm_vif_low_latency(mvmvif)) { - data->primary_ll = true; - - data->secondary = data->primary; - data->primary = chanctx_conf; - } - - if (vif->type == NL80211_IFTYPE_AP) { - if (!mvmvif->ap_ibss_active) - return; - - if (chanctx_conf == data->primary) - return; - - if (!data->primary_ll) { - /* - * downgrade the current primary no matter what its - * type is. - */ - data->secondary = data->primary; - data->primary = chanctx_conf; - } else { - /* there is low latency vif - we will be secondary */ - data->secondary = chanctx_conf; - } - return; - } - - /* - * STA / P2P Client, try to be primary if first vif. If we are in low - * latency mode, we are already in primary and just don't do much - */ - if (!data->primary || data->primary == chanctx_conf) - data->primary = chanctx_conf; - else if (!data->secondary) - /* if secondary is not NULL, it might be a GO */ - data->secondary = chanctx_conf; - - /* - * don't reduce the Tx power if one of these is true: - * we are in LOOSE - * single share antenna product - * BT is active - * we are associated - */ - if (iwl_get_coex_type(mvm, vif) == BT_COEX_LOOSE_LUT || - mvm->cfg->bt_shared_single_ant || !vif->bss_conf.assoc || - !data->notif->bt_status) { - iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, false); - iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0); - return; - } - - /* try to get the avg rssi from fw */ - ave_rssi = mvmvif->bf_data.ave_beacon_signal; - - /* if the RSSI isn't valid, fake it is very low */ - if (!ave_rssi) - ave_rssi = -100; - if (ave_rssi > -IWL_MVM_BT_COEX_EN_RED_TXP_THRESH) { - if (iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, true)) - IWL_ERR(mvm, "Couldn't send BT_CONFIG cmd\n"); - } else if (ave_rssi < -IWL_MVM_BT_COEX_DIS_RED_TXP_THRESH) { - if (iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, false)) - IWL_ERR(mvm, "Couldn't send BT_CONFIG cmd\n"); - } - - /* Begin to monitor the RSSI: it may influence the reduced Tx power */ - iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, true, ave_rssi); -} - -static void iwl_mvm_bt_coex_notif_handle(struct iwl_mvm *mvm) -{ - struct iwl_bt_iterator_data data = { - .mvm = mvm, - .notif = &mvm->last_bt_notif_old, - }; - struct iwl_bt_coex_ci_cmd_old cmd = {}; - u8 ci_bw_idx; - - /* Ignore updates if we are in force mode */ - if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS)) - return; - - rcu_read_lock(); - ieee80211_iterate_active_interfaces_atomic( - mvm->hw, IEEE80211_IFACE_ITER_NORMAL, - iwl_mvm_bt_notif_iterator, &data); - - if (data.primary) { - struct ieee80211_chanctx_conf *chan = data.primary; - - if (WARN_ON(!chan->def.chan)) { - rcu_read_unlock(); - return; - } - - if (chan->def.width < NL80211_CHAN_WIDTH_40) { - ci_bw_idx = 0; - cmd.co_run_bw_primary = 0; - } else { - cmd.co_run_bw_primary = 1; - if (chan->def.center_freq1 > - chan->def.chan->center_freq) - ci_bw_idx = 2; - else - ci_bw_idx = 1; - } - - cmd.bt_primary_ci = - iwl_ci_mask[chan->def.chan->hw_value][ci_bw_idx]; - cmd.primary_ch_phy_id = *((u16 *)data.primary->drv_priv); - } - - if (data.secondary) { - struct ieee80211_chanctx_conf *chan = data.secondary; - - if (WARN_ON(!data.secondary->def.chan)) { - rcu_read_unlock(); - return; - } - - if (chan->def.width < NL80211_CHAN_WIDTH_40) { - ci_bw_idx = 0; - cmd.co_run_bw_secondary = 0; - } else { - cmd.co_run_bw_secondary = 1; - if (chan->def.center_freq1 > - chan->def.chan->center_freq) - ci_bw_idx = 2; - else - ci_bw_idx = 1; - } - - cmd.bt_secondary_ci = - iwl_ci_mask[chan->def.chan->hw_value][ci_bw_idx]; - cmd.secondary_ch_phy_id = *((u16 *)data.secondary->drv_priv); - } - - rcu_read_unlock(); - - /* Don't spam the fw with the same command over and over */ - if (memcmp(&cmd, &mvm->last_bt_ci_cmd_old, sizeof(cmd))) { - if (iwl_mvm_send_cmd_pdu(mvm, BT_COEX_CI, 0, - sizeof(cmd), &cmd)) - IWL_ERR(mvm, "Failed to send BT_CI cmd\n"); - memcpy(&mvm->last_bt_ci_cmd_old, &cmd, sizeof(cmd)); - } - - if (iwl_mvm_bt_udpate_ctrl_kill_msk(mvm)) - IWL_ERR(mvm, "Failed to update the ctrl_kill_msk\n"); -} - -void iwl_mvm_rx_bt_coex_notif_old(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb) -{ - struct iwl_rx_packet *pkt = rxb_addr(rxb); - struct iwl_bt_coex_profile_notif_old *notif = (void *)pkt->data; - - IWL_DEBUG_COEX(mvm, "BT Coex Notification received\n"); - IWL_DEBUG_COEX(mvm, "\tBT status: %s\n", - notif->bt_status ? "ON" : "OFF"); - IWL_DEBUG_COEX(mvm, "\tBT open conn %d\n", notif->bt_open_conn); - IWL_DEBUG_COEX(mvm, "\tBT ci compliance %d\n", notif->bt_ci_compliance); - IWL_DEBUG_COEX(mvm, "\tBT primary_ch_lut %d\n", - le32_to_cpu(notif->primary_ch_lut)); - IWL_DEBUG_COEX(mvm, "\tBT secondary_ch_lut %d\n", - le32_to_cpu(notif->secondary_ch_lut)); - IWL_DEBUG_COEX(mvm, "\tBT activity grading %d\n", - le32_to_cpu(notif->bt_activity_grading)); - IWL_DEBUG_COEX(mvm, "\tBT agg traffic load %d\n", - notif->bt_agg_traffic_load); - - /* remember this notification for future use: rssi fluctuations */ - memcpy(&mvm->last_bt_notif_old, notif, sizeof(mvm->last_bt_notif_old)); - - iwl_mvm_bt_coex_notif_handle(mvm); -} - -static void iwl_mvm_bt_rssi_iterator(void *_data, u8 *mac, - struct ieee80211_vif *vif) -{ - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_bt_iterator_data *data = _data; - struct iwl_mvm *mvm = data->mvm; - - struct ieee80211_sta *sta; - struct iwl_mvm_sta *mvmsta; - - struct ieee80211_chanctx_conf *chanctx_conf; - - rcu_read_lock(); - chanctx_conf = rcu_dereference(vif->chanctx_conf); - /* If channel context is invalid or not on 2.4GHz - don't count it */ - if (!chanctx_conf || - chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ) { - rcu_read_unlock(); - return; - } - rcu_read_unlock(); - - if (vif->type != NL80211_IFTYPE_STATION || - mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT) - return; - - sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id], - lockdep_is_held(&mvm->mutex)); - - /* This can happen if the station has been removed right now */ - if (IS_ERR_OR_NULL(sta)) - return; - - mvmsta = iwl_mvm_sta_from_mac80211(sta); -} - -void iwl_mvm_bt_rssi_event_old(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - enum ieee80211_rssi_event_data rssi_event) -{ - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_bt_iterator_data data = { - .mvm = mvm, - }; - int ret; - - lockdep_assert_held(&mvm->mutex); - - /* Ignore updates if we are in force mode */ - if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS)) - return; - - /* - * Rssi update while not associated - can happen since the statistics - * are handled asynchronously - */ - if (mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT) - return; - - /* No BT - reports should be disabled */ - if (!mvm->last_bt_notif_old.bt_status) - return; - - IWL_DEBUG_COEX(mvm, "RSSI for %pM is now %s\n", vif->bss_conf.bssid, - rssi_event == RSSI_EVENT_HIGH ? "HIGH" : "LOW"); - - /* - * Check if rssi is good enough for reduced Tx power, but not in loose - * scheme. - */ - if (rssi_event == RSSI_EVENT_LOW || mvm->cfg->bt_shared_single_ant || - iwl_get_coex_type(mvm, vif) == BT_COEX_LOOSE_LUT) - ret = iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, - false); - else - ret = iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, true); - - if (ret) - IWL_ERR(mvm, "couldn't send BT_CONFIG HCMD upon RSSI event\n"); - - ieee80211_iterate_active_interfaces_atomic( - mvm->hw, IEEE80211_IFACE_ITER_NORMAL, - iwl_mvm_bt_rssi_iterator, &data); - - if (iwl_mvm_bt_udpate_ctrl_kill_msk(mvm)) - IWL_ERR(mvm, "Failed to update the ctrl_kill_msk\n"); -} - -#define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000) -#define LINK_QUAL_AGG_TIME_LIMIT_BT_ACT (1200) - -u16 iwl_mvm_coex_agg_time_limit_old(struct iwl_mvm *mvm, - struct ieee80211_sta *sta) -{ - struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); - enum iwl_bt_coex_lut_type lut_type; - - if (le32_to_cpu(mvm->last_bt_notif_old.bt_activity_grading) < - BT_HIGH_TRAFFIC) - return LINK_QUAL_AGG_TIME_LIMIT_DEF; - - if (mvm->last_bt_notif_old.ttc_enabled) - return LINK_QUAL_AGG_TIME_LIMIT_DEF; - - lut_type = iwl_get_coex_type(mvm, mvmsta->vif); - - if (lut_type == BT_COEX_LOOSE_LUT || lut_type == BT_COEX_INVALID_LUT) - return LINK_QUAL_AGG_TIME_LIMIT_DEF; - - /* tight coex, high bt traffic, reduce AGG time limit */ - return LINK_QUAL_AGG_TIME_LIMIT_BT_ACT; -} - -bool iwl_mvm_bt_coex_is_mimo_allowed_old(struct iwl_mvm *mvm, - struct ieee80211_sta *sta) -{ - struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); - enum iwl_bt_coex_lut_type lut_type; - - if (mvm->last_bt_notif_old.ttc_enabled) - return true; - - if (le32_to_cpu(mvm->last_bt_notif_old.bt_activity_grading) < - BT_HIGH_TRAFFIC) - return true; - - /* - * In Tight / TxTxDis, BT can't Rx while we Tx, so use both antennas - * since BT is already killed. - * In Loose, BT can Rx while we Tx, so forbid MIMO to let BT Rx while - * we Tx. - * When we are in 5GHz, we'll get BT_COEX_INVALID_LUT allowing MIMO. - */ - lut_type = iwl_get_coex_type(mvm, mvmsta->vif); - return lut_type != BT_COEX_LOOSE_LUT; -} - -bool iwl_mvm_bt_coex_is_shared_ant_avail_old(struct iwl_mvm *mvm) -{ - u32 ag = le32_to_cpu(mvm->last_bt_notif_old.bt_activity_grading); - return ag < BT_HIGH_TRAFFIC; -} - -bool iwl_mvm_bt_coex_is_tpc_allowed_old(struct iwl_mvm *mvm, - enum ieee80211_band band) -{ - u32 bt_activity = - le32_to_cpu(mvm->last_bt_notif_old.bt_activity_grading); - - if (band != IEEE80211_BAND_2GHZ) - return false; - - return bt_activity >= BT_LOW_TRAFFIC; -} - -void iwl_mvm_bt_coex_vif_change_old(struct iwl_mvm *mvm) -{ - iwl_mvm_bt_coex_notif_handle(mvm); -} - -void iwl_mvm_rx_ant_coupling_notif_old(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb) -{ - struct iwl_rx_packet *pkt = rxb_addr(rxb); - u32 ant_isolation = le32_to_cpup((void *)pkt->data); - u8 __maybe_unused lower_bound, upper_bound; - u8 lut; - - struct iwl_bt_coex_cmd_old *bt_cmd; - struct iwl_host_cmd cmd = { - .id = BT_CONFIG, - .len = { sizeof(*bt_cmd), }, - .dataflags = { IWL_HCMD_DFL_NOCOPY, }, - }; - - if (!iwl_mvm_bt_is_plcr_supported(mvm)) - return; - - lockdep_assert_held(&mvm->mutex); - - /* Ignore updates if we are in force mode */ - if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS)) - return; - - if (ant_isolation == mvm->last_ant_isol) - return; - - for (lut = 0; lut < ARRAY_SIZE(antenna_coupling_ranges) - 1; lut++) - if (ant_isolation < antenna_coupling_ranges[lut + 1].range) - break; - - lower_bound = antenna_coupling_ranges[lut].range; - - if (lut < ARRAY_SIZE(antenna_coupling_ranges) - 1) - upper_bound = antenna_coupling_ranges[lut + 1].range; - else - upper_bound = antenna_coupling_ranges[lut].range; - - IWL_DEBUG_COEX(mvm, "Antenna isolation=%d in range [%d,%d[, lut=%d\n", - ant_isolation, lower_bound, upper_bound, lut); - - mvm->last_ant_isol = ant_isolation; - - if (mvm->last_corun_lut == lut) - return; - - mvm->last_corun_lut = lut; - - bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL); - if (!bt_cmd) - return; - cmd.data[0] = bt_cmd; - - bt_cmd->flags = cpu_to_le32(BT_COEX_NW_OLD); - bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_ENABLE | - BT_VALID_CORUN_LUT_20 | - BT_VALID_CORUN_LUT_40); - - /* For the moment, use the same LUT for 20GHz and 40GHz */ - memcpy(bt_cmd->bt4_corun_lut20, antenna_coupling_ranges[lut].lut20, - sizeof(bt_cmd->bt4_corun_lut20)); - - memcpy(bt_cmd->bt4_corun_lut40, antenna_coupling_ranges[lut].lut20, - sizeof(bt_cmd->bt4_corun_lut40)); - - if (iwl_mvm_send_cmd(mvm, &cmd)) - IWL_ERR(mvm, "failed to send BT_CONFIG command\n"); - - kfree(bt_cmd); -} diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c index a43b3921c4c1..abc16f73f07b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c @@ -463,69 +463,11 @@ int iwl_mvm_coex_dump_mbox(struct iwl_bt_coex_profile_notif *notif, char *buf, return pos; } -static -int iwl_mvm_coex_dump_mbox_old(struct iwl_bt_coex_profile_notif_old *notif, - char *buf, int pos, int bufsz) -{ - pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw0:\n"); - - BT_MBOX_PRINT(0, LE_SLAVE_LAT, false); - BT_MBOX_PRINT(0, LE_PROF1, false); - BT_MBOX_PRINT(0, LE_PROF2, false); - BT_MBOX_PRINT(0, LE_PROF_OTHER, false); - BT_MBOX_PRINT(0, CHL_SEQ_N, false); - BT_MBOX_PRINT(0, INBAND_S, false); - BT_MBOX_PRINT(0, LE_MIN_RSSI, false); - BT_MBOX_PRINT(0, LE_SCAN, false); - BT_MBOX_PRINT(0, LE_ADV, false); - BT_MBOX_PRINT(0, LE_MAX_TX_POWER, false); - BT_MBOX_PRINT(0, OPEN_CON_1, true); - - pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw1:\n"); - - BT_MBOX_PRINT(1, BR_MAX_TX_POWER, false); - BT_MBOX_PRINT(1, IP_SR, false); - BT_MBOX_PRINT(1, LE_MSTR, false); - BT_MBOX_PRINT(1, AGGR_TRFC_LD, false); - BT_MBOX_PRINT(1, MSG_TYPE, false); - BT_MBOX_PRINT(1, SSN, true); - - pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw2:\n"); - - BT_MBOX_PRINT(2, SNIFF_ACT, false); - BT_MBOX_PRINT(2, PAG, false); - BT_MBOX_PRINT(2, INQUIRY, false); - BT_MBOX_PRINT(2, CONN, false); - BT_MBOX_PRINT(2, SNIFF_INTERVAL, false); - BT_MBOX_PRINT(2, DISC, false); - BT_MBOX_PRINT(2, SCO_TX_ACT, false); - BT_MBOX_PRINT(2, SCO_RX_ACT, false); - BT_MBOX_PRINT(2, ESCO_RE_TX, false); - BT_MBOX_PRINT(2, SCO_DURATION, true); - - pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw3:\n"); - - BT_MBOX_PRINT(3, SCO_STATE, false); - BT_MBOX_PRINT(3, SNIFF_STATE, false); - BT_MBOX_PRINT(3, A2DP_STATE, false); - BT_MBOX_PRINT(3, ACL_STATE, false); - BT_MBOX_PRINT(3, MSTR_STATE, false); - BT_MBOX_PRINT(3, OBX_STATE, false); - BT_MBOX_PRINT(3, OPEN_CON_2, false); - BT_MBOX_PRINT(3, TRAFFIC_LOAD, false); - BT_MBOX_PRINT(3, CHL_SEQN_LSB, false); - BT_MBOX_PRINT(3, INBAND_P, false); - BT_MBOX_PRINT(3, MSG_TYPE_2, false); - BT_MBOX_PRINT(3, SSN_2, false); - BT_MBOX_PRINT(3, UPDATE_REQUEST, true); - - return pos; -} - static ssize_t iwl_dbgfs_bt_notif_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; + struct iwl_bt_coex_profile_notif *notif = &mvm->last_bt_notif; char *buf; int ret, pos = 0, bufsz = sizeof(char) * 1024; @@ -535,52 +477,24 @@ static ssize_t iwl_dbgfs_bt_notif_read(struct file *file, char __user *user_buf, mutex_lock(&mvm->mutex); - if (!fw_has_api(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_API_BT_COEX_SPLIT)) { - struct iwl_bt_coex_profile_notif_old *notif = - &mvm->last_bt_notif_old; + pos += iwl_mvm_coex_dump_mbox(notif, buf, pos, bufsz); - pos += iwl_mvm_coex_dump_mbox_old(notif, buf, pos, bufsz); - - pos += scnprintf(buf+pos, bufsz-pos, "bt_ci_compliance = %d\n", - notif->bt_ci_compliance); - pos += scnprintf(buf+pos, bufsz-pos, "primary_ch_lut = %d\n", - le32_to_cpu(notif->primary_ch_lut)); - pos += scnprintf(buf+pos, bufsz-pos, "secondary_ch_lut = %d\n", - le32_to_cpu(notif->secondary_ch_lut)); - pos += scnprintf(buf+pos, - bufsz-pos, "bt_activity_grading = %d\n", - le32_to_cpu(notif->bt_activity_grading)); - pos += scnprintf(buf+pos, bufsz-pos, - "antenna isolation = %d CORUN LUT index = %d\n", - mvm->last_ant_isol, mvm->last_corun_lut); - pos += scnprintf(buf + pos, bufsz - pos, "bt_rrc = %d\n", - notif->rrc_enabled); - pos += scnprintf(buf + pos, bufsz - pos, "bt_ttc = %d\n", - notif->ttc_enabled); - } else { - struct iwl_bt_coex_profile_notif *notif = - &mvm->last_bt_notif; - - pos += iwl_mvm_coex_dump_mbox(notif, buf, pos, bufsz); - - pos += scnprintf(buf+pos, bufsz-pos, "bt_ci_compliance = %d\n", - notif->bt_ci_compliance); - pos += scnprintf(buf+pos, bufsz-pos, "primary_ch_lut = %d\n", - le32_to_cpu(notif->primary_ch_lut)); - pos += scnprintf(buf+pos, bufsz-pos, "secondary_ch_lut = %d\n", - le32_to_cpu(notif->secondary_ch_lut)); - pos += scnprintf(buf+pos, - bufsz-pos, "bt_activity_grading = %d\n", - le32_to_cpu(notif->bt_activity_grading)); - pos += scnprintf(buf+pos, bufsz-pos, - "antenna isolation = %d CORUN LUT index = %d\n", - mvm->last_ant_isol, mvm->last_corun_lut); - pos += scnprintf(buf + pos, bufsz - pos, "bt_rrc = %d\n", - (notif->ttc_rrc_status >> 4) & 0xF); - pos += scnprintf(buf + pos, bufsz - pos, "bt_ttc = %d\n", - notif->ttc_rrc_status & 0xF); - } + pos += scnprintf(buf + pos, bufsz - pos, "bt_ci_compliance = %d\n", + notif->bt_ci_compliance); + pos += scnprintf(buf + pos, bufsz - pos, "primary_ch_lut = %d\n", + le32_to_cpu(notif->primary_ch_lut)); + pos += scnprintf(buf + pos, bufsz - pos, "secondary_ch_lut = %d\n", + le32_to_cpu(notif->secondary_ch_lut)); + pos += scnprintf(buf + pos, + bufsz - pos, "bt_activity_grading = %d\n", + le32_to_cpu(notif->bt_activity_grading)); + pos += scnprintf(buf + pos, bufsz - pos, + "antenna isolation = %d CORUN LUT index = %d\n", + mvm->last_ant_isol, mvm->last_corun_lut); + pos += scnprintf(buf + pos, bufsz - pos, "bt_rrc = %d\n", + (notif->ttc_rrc_status >> 4) & 0xF); + pos += scnprintf(buf + pos, bufsz - pos, "bt_ttc = %d\n", + notif->ttc_rrc_status & 0xF); pos += scnprintf(buf + pos, bufsz - pos, "sync_sco = %d\n", IWL_MVM_BT_COEX_SYNC2SCO); @@ -602,44 +516,20 @@ static ssize_t iwl_dbgfs_bt_cmd_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; + struct iwl_bt_coex_ci_cmd *cmd = &mvm->last_bt_ci_cmd; char buf[256]; int bufsz = sizeof(buf); int pos = 0; mutex_lock(&mvm->mutex); - if (!fw_has_api(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_API_BT_COEX_SPLIT)) { - struct iwl_bt_coex_ci_cmd_old *cmd = &mvm->last_bt_ci_cmd_old; - - pos += scnprintf(buf+pos, bufsz-pos, - "Channel inhibition CMD\n"); - pos += scnprintf(buf+pos, bufsz-pos, - "\tPrimary Channel Bitmap 0x%016llx\n", - le64_to_cpu(cmd->bt_primary_ci)); - pos += scnprintf(buf+pos, bufsz-pos, - "\tSecondary Channel Bitmap 0x%016llx\n", - le64_to_cpu(cmd->bt_secondary_ci)); - - pos += scnprintf(buf+pos, bufsz-pos, - "BT Configuration CMD - 0=default, 1=never, 2=always\n"); - pos += scnprintf(buf+pos, bufsz-pos, "\tACK Kill msk idx %d\n", - mvm->bt_ack_kill_msk[0]); - pos += scnprintf(buf+pos, bufsz-pos, "\tCTS Kill msk idx %d\n", - mvm->bt_cts_kill_msk[0]); - - } else { - struct iwl_bt_coex_ci_cmd *cmd = &mvm->last_bt_ci_cmd; - - pos += scnprintf(buf+pos, bufsz-pos, - "Channel inhibition CMD\n"); - pos += scnprintf(buf+pos, bufsz-pos, - "\tPrimary Channel Bitmap 0x%016llx\n", - le64_to_cpu(cmd->bt_primary_ci)); - pos += scnprintf(buf+pos, bufsz-pos, - "\tSecondary Channel Bitmap 0x%016llx\n", - le64_to_cpu(cmd->bt_secondary_ci)); - } + pos += scnprintf(buf + pos, bufsz - pos, "Channel inhibition CMD\n"); + pos += scnprintf(buf + pos, bufsz - pos, + "\tPrimary Channel Bitmap 0x%016llx\n", + le64_to_cpu(cmd->bt_primary_ci)); + pos += scnprintf(buf + pos, bufsz - pos, + "\tSecondary Channel Bitmap 0x%016llx\n", + le64_to_cpu(cmd->bt_secondary_ci)); mutex_unlock(&mvm->mutex); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index f9430ee8f96b..f0e25971424e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -1470,22 +1470,6 @@ bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm, u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr, struct ieee80211_tx_info *info, u8 ac); -bool iwl_mvm_bt_coex_is_shared_ant_avail_old(struct iwl_mvm *mvm); -void iwl_mvm_bt_coex_vif_change_old(struct iwl_mvm *mvm); -int iwl_send_bt_init_conf_old(struct iwl_mvm *mvm); -void iwl_mvm_rx_bt_coex_notif_old(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb); -void iwl_mvm_bt_rssi_event_old(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - enum ieee80211_rssi_event_data); -u16 iwl_mvm_coex_agg_time_limit_old(struct iwl_mvm *mvm, - struct ieee80211_sta *sta); -bool iwl_mvm_bt_coex_is_mimo_allowed_old(struct iwl_mvm *mvm, - struct ieee80211_sta *sta); -bool iwl_mvm_bt_coex_is_tpc_allowed_old(struct iwl_mvm *mvm, - enum ieee80211_band band); -void iwl_mvm_rx_ant_coupling_notif_old(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb); - /* beacon filtering */ #ifdef CONFIG_IWLWIFI_DEBUGFS void diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sf.c b/drivers/net/wireless/intel/iwlwifi/mvm/sf.c index c2def1232a8c..443a42855c9e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sf.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sf.c @@ -193,7 +193,7 @@ static void iwl_mvm_fill_sf_command(struct iwl_mvm *mvm, } } - if (sta || IWL_UCODE_API(mvm->fw->ucode_ver) < 13) { + if (sta) { BUILD_BUG_ON(sizeof(sf_full_timeout) != sizeof(__le32) * SF_NUM_SCENARIO * SF_NUM_TIMEOUT_TYPES); @@ -220,9 +220,6 @@ static int iwl_mvm_sf_config(struct iwl_mvm *mvm, u8 sta_id, struct ieee80211_sta *sta; int ret = 0; - if (IWL_UCODE_API(mvm->fw->ucode_ver) < 13) - sf_cmd.state = cpu_to_le32(new_state); - if (mvm->cfg->disable_dummy_notification) sf_cmd.state |= cpu_to_le32(SF_CFG_DUMMY_NOTIF_OFF); @@ -235,8 +232,7 @@ static int iwl_mvm_sf_config(struct iwl_mvm *mvm, u8 sta_id, switch (new_state) { case SF_UNINIT: - if (IWL_UCODE_API(mvm->fw->ucode_ver) >= 13) - iwl_mvm_fill_sf_command(mvm, &sf_cmd, NULL); + iwl_mvm_fill_sf_command(mvm, &sf_cmd, NULL); break; case SF_FULL_ON: if (sta_id == IWL_MVM_STATION_COUNT) { diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c index 2440248c8e69..76866b9e5686 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c @@ -491,98 +491,12 @@ static void iwl_mvm_dump_umac_error_log(struct iwl_mvm *mvm) IWL_ERR(mvm, "0x%08X | isr status reg\n", table.nic_isr_pref); } -static void iwl_mvm_dump_nic_error_log_old(struct iwl_mvm *mvm) -{ - struct iwl_trans *trans = mvm->trans; - struct iwl_error_event_table_v1 table; - u32 base; - - base = mvm->error_event_table; - if (mvm->cur_ucode == IWL_UCODE_INIT) { - if (!base) - base = mvm->fw->init_errlog_ptr; - } else { - if (!base) - base = mvm->fw->inst_errlog_ptr; - } - - if (base < 0x800000) { - IWL_ERR(mvm, - "Not valid error log pointer 0x%08X for %s uCode\n", - base, - (mvm->cur_ucode == IWL_UCODE_INIT) - ? "Init" : "RT"); - return; - } - - iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table)); - - if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) { - IWL_ERR(trans, "Start IWL Error Log Dump:\n"); - IWL_ERR(trans, "Status: 0x%08lX, count: %d\n", - mvm->status, table.valid); - } - - /* Do not change this output - scripts rely on it */ - - IWL_ERR(mvm, "Loaded firmware version: %s\n", mvm->fw->fw_version); - - trace_iwlwifi_dev_ucode_error(trans->dev, table.error_id, table.tsf_low, - table.data1, table.data2, table.data3, - table.blink2, table.ilink1, table.ilink2, - table.bcon_time, table.gp1, table.gp2, - table.gp3, table.ucode_ver, 0, - table.hw_ver, table.brd_ver); - IWL_ERR(mvm, "0x%08X | %-28s\n", table.error_id, - desc_lookup(table.error_id)); - IWL_ERR(mvm, "0x%08X | uPc\n", table.pc); - IWL_ERR(mvm, "0x%08X | branchlink1\n", table.blink1); - IWL_ERR(mvm, "0x%08X | branchlink2\n", table.blink2); - IWL_ERR(mvm, "0x%08X | interruptlink1\n", table.ilink1); - IWL_ERR(mvm, "0x%08X | interruptlink2\n", table.ilink2); - IWL_ERR(mvm, "0x%08X | data1\n", table.data1); - IWL_ERR(mvm, "0x%08X | data2\n", table.data2); - IWL_ERR(mvm, "0x%08X | data3\n", table.data3); - IWL_ERR(mvm, "0x%08X | beacon time\n", table.bcon_time); - IWL_ERR(mvm, "0x%08X | tsf low\n", table.tsf_low); - IWL_ERR(mvm, "0x%08X | tsf hi\n", table.tsf_hi); - IWL_ERR(mvm, "0x%08X | time gp1\n", table.gp1); - IWL_ERR(mvm, "0x%08X | time gp2\n", table.gp2); - IWL_ERR(mvm, "0x%08X | time gp3\n", table.gp3); - IWL_ERR(mvm, "0x%08X | uCode version\n", table.ucode_ver); - IWL_ERR(mvm, "0x%08X | hw version\n", table.hw_ver); - IWL_ERR(mvm, "0x%08X | board version\n", table.brd_ver); - IWL_ERR(mvm, "0x%08X | hcmd\n", table.hcmd); - IWL_ERR(mvm, "0x%08X | isr0\n", table.isr0); - IWL_ERR(mvm, "0x%08X | isr1\n", table.isr1); - IWL_ERR(mvm, "0x%08X | isr2\n", table.isr2); - IWL_ERR(mvm, "0x%08X | isr3\n", table.isr3); - IWL_ERR(mvm, "0x%08X | isr4\n", table.isr4); - IWL_ERR(mvm, "0x%08X | isr_pref\n", table.isr_pref); - IWL_ERR(mvm, "0x%08X | wait_event\n", table.wait_event); - IWL_ERR(mvm, "0x%08X | l2p_control\n", table.l2p_control); - IWL_ERR(mvm, "0x%08X | l2p_duration\n", table.l2p_duration); - IWL_ERR(mvm, "0x%08X | l2p_mhvalid\n", table.l2p_mhvalid); - IWL_ERR(mvm, "0x%08X | l2p_addr_match\n", table.l2p_addr_match); - IWL_ERR(mvm, "0x%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel); - IWL_ERR(mvm, "0x%08X | timestamp\n", table.u_timestamp); - IWL_ERR(mvm, "0x%08X | flow_handler\n", table.flow_handler); - - if (mvm->support_umac_log) - iwl_mvm_dump_umac_error_log(mvm); -} - void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm) { struct iwl_trans *trans = mvm->trans; struct iwl_error_event_table table; u32 base; - if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_NEW_VERSION)) { - iwl_mvm_dump_nic_error_log_old(mvm); - return; - } - base = mvm->error_event_table; if (mvm->cur_ucode == IWL_UCODE_INIT) { if (!base) From b429a773c193ee7cb752144e590181a1b8cc8fb5 Mon Sep 17 00:00:00 2001 From: Eva Rachel Retuya Date: Sat, 19 Mar 2016 05:15:47 +0000 Subject: [PATCH 0100/1649] iwlwifi: dvm: use alloc_ordered_workqueue() Use alloc_ordered_workqueue() to allocate the workqueue instead of create_singlethread_workqueue() since the latter is deprecated and is scheduled for removal. There are work items doing related operations that shouldn't be swapped when queued in a certain order hence preserve the strict execution ordering of a single threaded (ST) workqueue by switching to alloc_ordered_workqueue(). WQ_MEM_RECLAIM flag is not needed since the worker is not depended during memory reclaim. Signed-off-by: Eva Rachel Retuya Acked-by: Tejun Heo Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/dvm/main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/main.c b/drivers/net/wireless/intel/iwlwifi/dvm/main.c index 85628127947f..614716251c39 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/main.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/main.c @@ -1071,7 +1071,7 @@ static void iwl_bg_restart(struct work_struct *data) static void iwl_setup_deferred_work(struct iwl_priv *priv) { - priv->workqueue = create_singlethread_workqueue(DRV_NAME); + priv->workqueue = alloc_ordered_workqueue(DRV_NAME, 0); INIT_WORK(&priv->restart, iwl_bg_restart); INIT_WORK(&priv->beacon_update, iwl_bg_beacon_update); From b238be07375e1d3aa976564397109fe9898d6123 Mon Sep 17 00:00:00 2001 From: Sara Sharon Date: Wed, 16 Mar 2016 13:57:50 +0200 Subject: [PATCH 0101/1649] iwlwifi: mvm: report checksum is done also for IPv6 packets Currently the code checks if hardware reported both L4 and L3 checksums as valid, and only then reports it as validated to the stack. However, IPv6 does not have checksum at all and the L3 checksum valid bit is always off for IPv6 packets, with the result of the stack re-validating L4 checksum. Fix code to set CHECKSUM_UNNECESSARY also for IPv6 packets whose TCP/UDP checksum was verified. Signed-off-by: Sara Sharon Signed-off-by: Emmanuel Grumbach --- .../net/wireless/intel/iwlwifi/mvm/fw-api-rx.h | 15 ++++++++++++++- drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c | 9 +++++++-- 2 files changed, 21 insertions(+), 3 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h index 7a16e55df012..4c086d048097 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h @@ -268,12 +268,25 @@ enum iwl_rx_mpdu_amsdu_info { IWL_RX_MPDU_AMSDU_LAST_SUBFRAME = 0x80, }; +enum iwl_rx_l3_proto_values { + IWL_RX_L3_TYPE_NONE, + IWL_RX_L3_TYPE_IPV4, + IWL_RX_L3_TYPE_IPV4_FRAG, + IWL_RX_L3_TYPE_IPV6_FRAG, + IWL_RX_L3_TYPE_IPV6, + IWL_RX_L3_TYPE_IPV6_IN_IPV4, + IWL_RX_L3_TYPE_ARP, + IWL_RX_L3_TYPE_EAPOL, +}; + +#define IWL_RX_L3_PROTO_POS 4 + enum iwl_rx_l3l4_flags { IWL_RX_L3L4_IP_HDR_CSUM_OK = BIT(0), IWL_RX_L3L4_TCP_UDP_CSUM_OK = BIT(1), IWL_RX_L3L4_TCP_FIN_SYN_RST_PSH = BIT(2), IWL_RX_L3L4_TCP_ACK = BIT(3), - IWL_RX_L3L4_L3_PROTO_MASK = 0xf << 4, + IWL_RX_L3L4_L3_PROTO_MASK = 0xf << IWL_RX_L3_PROTO_POS, IWL_RX_L3L4_L4_PROTO_MASK = 0xf << 8, IWL_RX_L3L4_RSS_HASH_MASK = 0xf << 12, }; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c index 9a54f2d2a66b..b2bc3d96a13f 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c @@ -294,10 +294,15 @@ static void iwl_mvm_rx_csum(struct ieee80211_sta *sta, { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif); + u16 flags = le16_to_cpu(desc->l3l4_flags); + u8 l3_prot = (u8)((flags & IWL_RX_L3L4_L3_PROTO_MASK) >> + IWL_RX_L3_PROTO_POS); if (mvmvif->features & NETIF_F_RXCSUM && - desc->l3l4_flags & cpu_to_le16(IWL_RX_L3L4_IP_HDR_CSUM_OK) && - desc->l3l4_flags & cpu_to_le16(IWL_RX_L3L4_TCP_UDP_CSUM_OK)) + flags & IWL_RX_L3L4_TCP_UDP_CSUM_OK && + (flags & IWL_RX_L3L4_IP_HDR_CSUM_OK || + l3_prot == IWL_RX_L3_TYPE_IPV6 || + l3_prot == IWL_RX_L3_TYPE_IPV6_FRAG)) skb->ip_summed = CHECKSUM_UNNECESSARY; } From 5db81fd401bd8bba4bcc4a615c60961a792d4df9 Mon Sep 17 00:00:00 2001 From: David Spinadel Date: Sun, 20 Mar 2016 10:35:10 +0200 Subject: [PATCH 0102/1649] iwlwifi: mvm: set aux STA ID in scan config Auxilary station ID in flag in scan config command wasn't set although we set the station ID. Add the flag. Signed-off-by: David Spinadel Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/mvm/scan.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c index 09eb72c4ae43..25b007cf7db7 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c @@ -961,6 +961,7 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm) SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS | SCAN_CONFIG_FLAG_SET_TX_CHAINS | SCAN_CONFIG_FLAG_SET_RX_CHAINS | + SCAN_CONFIG_FLAG_SET_AUX_STA_ID | SCAN_CONFIG_FLAG_SET_ALL_TIMES | SCAN_CONFIG_FLAG_SET_LEGACY_RATES | SCAN_CONFIG_FLAG_SET_MAC_ADDR | From 2a2e9d100739d79531d1109d7b768b3aaf681c06 Mon Sep 17 00:00:00 2001 From: Liad Kaufman Date: Thu, 17 Mar 2016 10:13:57 +0200 Subject: [PATCH 0103/1649] iwlwifi: trans: fix iwl_trans_txq_scd_cfg.sta_id sign For some reason, this was defined as a signed variable. Make it unsigned. Signed-off-by: Liad Kaufman Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/iwl-trans.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h index 91d74b3f666b..fa4ab4b9436f 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h @@ -7,6 +7,7 @@ * * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -33,6 +34,7 @@ * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -519,7 +521,7 @@ struct iwl_trans; struct iwl_trans_txq_scd_cfg { u8 fifo; - s8 sta_id; + u8 sta_id; u8 tid; bool aggregate; int frame_limit; From 0df1391feee699a79b36f284fa6e19ab26344d25 Mon Sep 17 00:00:00 2001 From: Chaya Rachel Ivgi Date: Thu, 17 Mar 2016 13:01:37 +0200 Subject: [PATCH 0104/1649] iwlwifi: mvm: remove uneeded D0I3 checking The driver can read the current state during D0I3, therefore there is no reason not to do it. Signed-off-by: Chaya Rachel Ivgi Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/mvm/tt.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c index 8d27137a9284..3f5df76f65a4 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c @@ -787,9 +787,6 @@ static int iwl_mvm_tcool_get_cur_state(struct thermal_cooling_device *cdev, { struct iwl_mvm *mvm = (struct iwl_mvm *)(cdev->devdata); - if (test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status)) - return -EBUSY; - *state = mvm->cooling_dev.cur_state; return 0; From 013a67ea69d7caac094e6d144507246f10f24d9a Mon Sep 17 00:00:00 2001 From: Sara Sharon Date: Tue, 22 Mar 2016 16:04:53 +0200 Subject: [PATCH 0105/1649] iwlwifi: pcie: request one more interrupt vector We want to request an interrupt vector for RSS queue per CPU, one vector for fallback queue, and one for non-rx interrupts. Future patch will make sure that no RSS traffic is directed to fallback queue. This will enable us to enable fast path on traffic that otherwise would have been received on the fallback queue. Signed-off-by: Sara Sharon Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/pcie/trans.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index 0c40209bd718..f1a506b609d7 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -1435,7 +1435,7 @@ static void iwl_pcie_set_interrupt_capa(struct pci_dev *pdev, int ret, i; if (trans->cfg->mq_rx_supported) { - max_vector = min_t(u32, (num_possible_cpus() + 1), + max_vector = min_t(u32, (num_possible_cpus() + 2), IWL_MAX_RX_HW_QUEUES); for (i = 0; i < max_vector; i++) trans_pcie->msix_entries[i].entry = i; From 854d773e4ab5869200004af4ca5d851730849903 Mon Sep 17 00:00:00 2001 From: Sara Sharon Date: Tue, 22 Mar 2016 15:55:58 +0200 Subject: [PATCH 0106/1649] iwlwifi: mvm: improve RSS configuration Improve current RSS configuration: * Use netdev_rss_key instead of keeping a local copy. * Configure also UDP hashing to have UDP traffic spread across queues. * Do not direct RSS traffic to our fallback queue. Signed-off-by: Sara Sharon Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c | 5 ++++- drivers/net/wireless/intel/iwlwifi/mvm/fw.c | 9 +++++++-- drivers/net/wireless/intel/iwlwifi/mvm/mvm.h | 1 - drivers/net/wireless/intel/iwlwifi/mvm/ops.c | 3 --- 4 files changed, 11 insertions(+), 7 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c index abc16f73f07b..362a54601a80 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c @@ -65,6 +65,7 @@ *****************************************************************************/ #include #include +#include #include "mvm.h" #include "fw-dbg.h" @@ -880,8 +881,10 @@ static ssize_t iwl_dbgfs_indirection_tbl_write(struct iwl_mvm *mvm, struct iwl_rss_config_cmd cmd = { .flags = cpu_to_le32(IWL_RSS_ENABLE), .hash_mask = IWL_RSS_HASH_TYPE_IPV4_TCP | + IWL_RSS_HASH_TYPE_IPV4_UDP | IWL_RSS_HASH_TYPE_IPV4_PAYLOAD | IWL_RSS_HASH_TYPE_IPV6_TCP | + IWL_RSS_HASH_TYPE_IPV6_UDP | IWL_RSS_HASH_TYPE_IPV6_PAYLOAD, }; int ret, i, num_repeats, nbytes = count / 2; @@ -905,7 +908,7 @@ static ssize_t iwl_dbgfs_indirection_tbl_write(struct iwl_mvm *mvm, memcpy(&cmd.indirection_table[i * nbytes], cmd.indirection_table, ARRAY_SIZE(cmd.indirection_table) % nbytes); - memcpy(cmd.secret_key, mvm->secret_key, sizeof(cmd.secret_key)); + netdev_rss_key_fill(cmd.secret_key, sizeof(cmd.secret_key)); mutex_lock(&mvm->mutex); ret = iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index f375275ee98e..2dc97a19246a 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -64,6 +64,7 @@ * *****************************************************************************/ #include +#include #include "iwl-trans.h" #include "iwl-op-mode.h" @@ -114,14 +115,18 @@ static int iwl_send_rss_cfg_cmd(struct iwl_mvm *mvm) struct iwl_rss_config_cmd cmd = { .flags = cpu_to_le32(IWL_RSS_ENABLE), .hash_mask = IWL_RSS_HASH_TYPE_IPV4_TCP | + IWL_RSS_HASH_TYPE_IPV4_UDP | IWL_RSS_HASH_TYPE_IPV4_PAYLOAD | IWL_RSS_HASH_TYPE_IPV6_TCP | + IWL_RSS_HASH_TYPE_IPV6_UDP | IWL_RSS_HASH_TYPE_IPV6_PAYLOAD, }; + /* Do not direct RSS traffic to Q 0 which is our fallback queue */ for (i = 0; i < ARRAY_SIZE(cmd.indirection_table); i++) - cmd.indirection_table[i] = i % mvm->trans->num_rx_queues; - memcpy(cmd.secret_key, mvm->secret_key, sizeof(cmd.secret_key)); + cmd.indirection_table[i] = + 1 + (i % (mvm->trans->num_rx_queues - 1)); + netdev_rss_key_fill(cmd.secret_key, sizeof(cmd.secret_key)); return iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index f0e25971424e..a9de2ad642bc 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -699,7 +699,6 @@ struct iwl_mvm { atomic_t pending_frames[IWL_MVM_STATION_COUNT]; u32 tfd_drained[IWL_MVM_STATION_COUNT]; u8 rx_ba_sessions; - u32 secret_key[IWL_RSS_HASH_KEY_CNT]; /* configured by mac80211 */ u32 rts_threshold; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index 9fc705ca5841..e36bcade69d1 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -725,9 +725,6 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, iwl_mvm_tof_init(mvm); - /* init RSS hash key */ - get_random_bytes(mvm->secret_key, sizeof(mvm->secret_key)); - return op_mode; out_unregister: From 0e32d5904ccad13a8fb6a5b0519ae43eef0e0a75 Mon Sep 17 00:00:00 2001 From: Oren Givon Date: Thu, 24 Mar 2016 10:20:28 +0200 Subject: [PATCH 0107/1649] iwlwifi: edit the 9000 series PCI IDs Edit some of the 9560 series and 5165 series PCI IDs. These devices do not exist yet. Signed-off-by: Oren Givon Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/pcie/drv.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index fb8b5ecd6abb..41c6dd5b9ccc 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -483,17 +483,19 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x24FD, 0x0810, iwl8265_2ac_cfg)}, /* 9000 Series */ + {IWL_PCI_DEVICE(0x9DF0, 0x0A10, iwl9560_2ac_cfg)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0010, iwl9560_2ac_cfg)}, {IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl5165_2ac_cfg)}, {IWL_PCI_DEVICE(0x9DF0, 0x2010, iwl5165_2ac_cfg)}, - {IWL_PCI_DEVICE(0x2526, 0x0A10, iwl9560_2ac_cfg)}, - {IWL_PCI_DEVICE(0x2526, 0x0010, iwl9560_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x1420, iwl5165_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x0010, iwl5165_2ac_cfg)}, {IWL_PCI_DEVICE(0x9DF0, 0x0000, iwl5165_2ac_cfg)}, {IWL_PCI_DEVICE(0x9DF0, 0x0310, iwl5165_2ac_cfg)}, {IWL_PCI_DEVICE(0x9DF0, 0x0510, iwl5165_2ac_cfg)}, {IWL_PCI_DEVICE(0x9DF0, 0x0710, iwl5165_2ac_cfg)}, - {IWL_PCI_DEVICE(0x2526, 0x0210, iwl9560_2ac_cfg)}, - {IWL_PCI_DEVICE(0x2526, 0x0410, iwl9560_2ac_cfg)}, - {IWL_PCI_DEVICE(0x2526, 0x0610, iwl9560_2ac_cfg)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0210, iwl9560_2ac_cfg)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0410, iwl9560_2ac_cfg)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0610, iwl9560_2ac_cfg)}, #endif /* CONFIG_IWLMVM */ {0} From d5216a28936add0a9c34bdc7d4f03c2e0a2261c2 Mon Sep 17 00:00:00 2001 From: Liad Kaufman Date: Sun, 9 Aug 2015 15:50:51 +0300 Subject: [PATCH 0108/1649] iwlwifi: mvm: use bss client queue for bss station Use the reserved BSS Client queue when connecting to an AP in DQA mode. Signed-off-by: Liad Kaufman Signed-off-by: Emmanuel Grumbach --- .../net/wireless/intel/iwlwifi/mvm/fw-api.h | 3 +++ drivers/net/wireless/intel/iwlwifi/mvm/sta.c | 18 +++++++++++++----- 2 files changed, 16 insertions(+), 5 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h index 8217eb25b090..965268766ac2 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h @@ -89,6 +89,8 @@ enum { /* * DQA queue numbers * + * @IWL_MVM_DQA_BSS_CLIENT_QUEUE: a queue reserved for BSS activity, to ensure + * that we are never left without the possibility to connect to an AP. * @IWL_MVM_DQA_MIN_MGMT_QUEUE: first TXQ in pool for MGMT and non-QOS frames. * Each MGMT queue is mapped to a single STA * MGMT frames are frames that return true on ieee80211_is_mgmt() @@ -100,6 +102,7 @@ enum { * @IWL_MVM_DQA_MAX_DATA_QUEUE: last TXQ in pool for DATA frames */ enum iwl_mvm_dqa_txq { + IWL_MVM_DQA_BSS_CLIENT_QUEUE = 4, IWL_MVM_DQA_MIN_MGMT_QUEUE = 5, IWL_MVM_DQA_MAX_MGMT_QUEUE = 8, IWL_MVM_DQA_MIN_DATA_QUEUE = 10, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index 3f36a661ec96..e157bd5a2204 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -336,7 +336,8 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, * as aggregatable. * Mark all DATA queues as allowing to be aggregated at some point */ - cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE); + cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE || + queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE); IWL_DEBUG_TX_QUEUES(mvm, "Allocating queue #%d to sta %d on tid %d\n", queue, mvmsta->sta_id, tid); @@ -448,7 +449,8 @@ void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk) } static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm, - struct ieee80211_sta *sta) + struct ieee80211_sta *sta, + enum nl80211_iftype vif_type) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); int queue; @@ -456,8 +458,13 @@ static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm, spin_lock_bh(&mvm->queue_info_lock); /* Make sure we have free resources for this STA */ - queue = iwl_mvm_find_free_queue(mvm, IWL_MVM_DQA_MIN_DATA_QUEUE, - IWL_MVM_DQA_MAX_DATA_QUEUE); + if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls && + !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].hw_queue_refcount && + !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].setup_reserved) + queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE; + else + queue = iwl_mvm_find_free_queue(mvm, IWL_MVM_DQA_MIN_DATA_QUEUE, + IWL_MVM_DQA_MAX_DATA_QUEUE); if (queue < 0) { spin_unlock_bh(&mvm->queue_info_lock); IWL_ERR(mvm, "No available queues for new station\n"); @@ -551,7 +558,8 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm, } if (iwl_mvm_is_dqa_supported(mvm)) { - ret = iwl_mvm_reserve_sta_stream(mvm, sta); + ret = iwl_mvm_reserve_sta_stream(mvm, sta, + ieee80211_vif_type_p2p(vif)); if (ret) goto err; } From f02669be45b44ffbb70d2f721f47544629f7a9a4 Mon Sep 17 00:00:00 2001 From: Liad Kaufman Date: Sun, 28 Feb 2016 16:15:07 +0200 Subject: [PATCH 0109/1649] iwlwifi: mvm: set sta_id in SCD_QUEUE_CONFIG cmd Set the correct sta_id in the SCD_QUEUE_CONFIG command sent to the FW when enabling/disabling queues. This is needed in DQA-mode to allow the FW to associate between queue and STA. Signed-off-by: Liad Kaufman Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/mvm/mvm.h | 1 + drivers/net/wireless/intel/iwlwifi/mvm/utils.c | 4 ++++ 2 files changed, 5 insertions(+) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index a9de2ad642bc..cd5f16e9cab4 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -665,6 +665,7 @@ struct iwl_mvm { /* Map to HW queue */ u32 hw_queue_to_mac80211; u8 hw_queue_refcount; + u8 ra_sta_id; /* The RA this queue is mapped to, if exists */ /* * This is to mark that queue is reserved for a STA but not yet * allocated. This is needed to make sure we have at least one diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c index 76866b9e5686..486c98541afc 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c @@ -608,6 +608,8 @@ void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, mvm->queue_info[queue].hw_queue_refcount++; if (mvm->queue_info[queue].hw_queue_refcount > 1) enable_queue = false; + else + mvm->queue_info[queue].ra_sta_id = cfg->sta_id; mvm->queue_info[queue].tid_bitmap |= BIT(cfg->tid); IWL_DEBUG_TX_QUEUES(mvm, @@ -693,6 +695,8 @@ void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, return; } + cmd.sta_id = mvm->queue_info[queue].ra_sta_id; + /* Make sure queue info is correct even though we overwrite it */ WARN(mvm->queue_info[queue].hw_queue_refcount || mvm->queue_info[queue].tid_bitmap || From 0e0e44205c14b557606b498ff0fcad53c7c2430a Mon Sep 17 00:00:00 2001 From: Liad Kaufman Date: Tue, 4 Aug 2015 15:13:38 +0300 Subject: [PATCH 0110/1649] iwlwifi: mvm: allocate dedicated queue for cab in dqa mode In DQA mode, allocate a dedicated queue (#3) for content after beacon (AKA "CaB"). Signed-off-by: Liad Kaufman Signed-off-by: Emmanuel Grumbach --- .../net/wireless/intel/iwlwifi/mvm/fw-api.h | 2 ++ .../net/wireless/intel/iwlwifi/mvm/mac-ctxt.c | 18 ++++++++++++------ 2 files changed, 14 insertions(+), 6 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h index 965268766ac2..b38cb03ec086 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h @@ -89,6 +89,7 @@ enum { /* * DQA queue numbers * + * @IWL_MVM_DQA_GCAST_QUEUE: a queue reserved for P2P GO/SoftAP GCAST frames * @IWL_MVM_DQA_BSS_CLIENT_QUEUE: a queue reserved for BSS activity, to ensure * that we are never left without the possibility to connect to an AP. * @IWL_MVM_DQA_MIN_MGMT_QUEUE: first TXQ in pool for MGMT and non-QOS frames. @@ -102,6 +103,7 @@ enum { * @IWL_MVM_DQA_MAX_DATA_QUEUE: last TXQ in pool for DATA frames */ enum iwl_mvm_dqa_txq { + IWL_MVM_DQA_GCAST_QUEUE = 3, IWL_MVM_DQA_BSS_CLIENT_QUEUE = 4, IWL_MVM_DQA_MIN_MGMT_QUEUE = 5, IWL_MVM_DQA_MAX_MGMT_QUEUE = 8, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c index c02c1055d534..43fd85725d45 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c @@ -447,13 +447,19 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm, /* Allocate the CAB queue for softAP and GO interfaces */ if (vif->type == NL80211_IFTYPE_AP) { - u8 queue = find_first_zero_bit(&used_hw_queues, - mvm->first_agg_queue); + u8 queue; - if (queue >= mvm->first_agg_queue) { - IWL_ERR(mvm, "Failed to allocate cab queue\n"); - ret = -EIO; - goto exit_fail; + if (!iwl_mvm_is_dqa_supported(mvm)) { + queue = find_first_zero_bit(&used_hw_queues, + mvm->first_agg_queue); + + if (queue >= mvm->first_agg_queue) { + IWL_ERR(mvm, "Failed to allocate cab queue\n"); + ret = -EIO; + goto exit_fail; + } + } else { + queue = IWL_MVM_DQA_GCAST_QUEUE; } vif->cab_queue = queue; From 097129c9e62540122b63cba79c1843a2602bec37 Mon Sep 17 00:00:00 2001 From: Liad Kaufman Date: Sun, 9 Aug 2015 18:28:43 +0300 Subject: [PATCH 0111/1649] iwlwifi: mvm: move cmd queue to be #0 in dqa mode Change the CMD queue to be queue #0 (rather than queue #9) when working in DQA mode. Signed-off-by: Liad Kaufman Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h | 2 ++ drivers/net/wireless/intel/iwlwifi/mvm/fw.c | 5 ++++- drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c | 8 ++++++-- drivers/net/wireless/intel/iwlwifi/mvm/ops.c | 5 ++++- 4 files changed, 16 insertions(+), 4 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h index b38cb03ec086..60eed8485aba 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h @@ -89,6 +89,7 @@ enum { /* * DQA queue numbers * + * @IWL_MVM_DQA_CMD_QUEUE: a queue reserved for sending HCMDs to the FW * @IWL_MVM_DQA_GCAST_QUEUE: a queue reserved for P2P GO/SoftAP GCAST frames * @IWL_MVM_DQA_BSS_CLIENT_QUEUE: a queue reserved for BSS activity, to ensure * that we are never left without the possibility to connect to an AP. @@ -103,6 +104,7 @@ enum { * @IWL_MVM_DQA_MAX_DATA_QUEUE: last TXQ in pool for DATA frames */ enum iwl_mvm_dqa_txq { + IWL_MVM_DQA_CMD_QUEUE = 0, IWL_MVM_DQA_GCAST_QUEUE = 3, IWL_MVM_DQA_BSS_CLIENT_QUEUE = 4, IWL_MVM_DQA_MIN_MGMT_QUEUE = 5, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index 2dc97a19246a..6ad5c602e84c 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -652,7 +652,10 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, */ memset(&mvm->queue_info, 0, sizeof(mvm->queue_info)); - mvm->queue_info[IWL_MVM_CMD_QUEUE].hw_queue_refcount = 1; + if (iwl_mvm_is_dqa_supported(mvm)) + mvm->queue_info[IWL_MVM_DQA_CMD_QUEUE].hw_queue_refcount = 1; + else + mvm->queue_info[IWL_MVM_CMD_QUEUE].hw_queue_refcount = 1; for (i = 0; i < IEEE80211_MAX_QUEUES; i++) atomic_set(&mvm->mac80211_queue_stop_count[i], 0); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c index 43fd85725d45..5f950568e92c 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c @@ -252,10 +252,14 @@ unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm, .exclude_vif = exclude_vif, .used_hw_queues = BIT(IWL_MVM_OFFCHANNEL_QUEUE) | - BIT(mvm->aux_queue) | - BIT(IWL_MVM_CMD_QUEUE), + BIT(mvm->aux_queue), }; + if (iwl_mvm_is_dqa_supported(mvm)) + data.used_hw_queues |= BIT(IWL_MVM_DQA_CMD_QUEUE); + else + data.used_hw_queues |= BIT(IWL_MVM_CMD_QUEUE); + lockdep_assert_held(&mvm->mutex); /* mark all VIF used hw queues */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index e36bcade69d1..cb0092609595 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -619,7 +619,10 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, trans_cfg.command_groups = iwl_mvm_groups; trans_cfg.command_groups_size = ARRAY_SIZE(iwl_mvm_groups); - trans_cfg.cmd_queue = IWL_MVM_CMD_QUEUE; + if (iwl_mvm_is_dqa_supported(mvm)) + trans_cfg.cmd_queue = IWL_MVM_DQA_CMD_QUEUE; + else + trans_cfg.cmd_queue = IWL_MVM_CMD_QUEUE; trans_cfg.cmd_fifo = IWL_MVM_TX_FIFO_CMD; trans_cfg.scd_set_active = true; From 728e825f81b1fe29eb177148fcabfa55a7f4c1bb Mon Sep 17 00:00:00 2001 From: Luca Coelho Date: Fri, 11 Mar 2016 09:20:37 +0200 Subject: [PATCH 0112/1649] iwlwifi: mvm: add a scan timeout for regular scans If something goes wrong with the firmware and we never get a scan complete notification, we stay stuck forever. In order to avoid this situation, add a timeout and trigger an NMI if it expires before receiving the notification., so we can clean things up. Signed-off-by: Luca Coelho Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/mvm/mvm.h | 2 ++ drivers/net/wireless/intel/iwlwifi/mvm/ops.c | 5 +++++ drivers/net/wireless/intel/iwlwifi/mvm/scan.c | 21 +++++++++++++++++++ 3 files changed, 28 insertions(+) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index cd5f16e9cab4..2d685e02d488 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -710,6 +710,7 @@ struct iwl_mvm { struct iwl_mcast_filter_cmd *mcast_filter_cmd; enum iwl_mvm_scan_type scan_type; enum iwl_mvm_sched_scan_pass_all_states sched_scan_pass_all; + struct timer_list scan_timer; /* max number of simultaneous scans the FW supports */ unsigned int max_scans; @@ -1314,6 +1315,7 @@ int iwl_mvm_scan_size(struct iwl_mvm *mvm); int iwl_mvm_scan_stop(struct iwl_mvm *mvm, int type, bool notify); int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm); void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm); +void iwl_mvm_scan_timeout(unsigned long data); /* Scheduled scan */ void iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index cb0092609595..656541c5360a 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -728,6 +728,9 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, iwl_mvm_tof_init(mvm); + setup_timer(&mvm->scan_timer, iwl_mvm_scan_timeout, + (unsigned long)mvm); + return op_mode; out_unregister: @@ -783,6 +786,8 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode) iwl_mvm_tof_clean(mvm); + del_timer_sync(&mvm->scan_timer); + mutex_destroy(&mvm->mutex); mutex_destroy(&mvm->d0i3_suspend_mutex); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c index 25b007cf7db7..c1d1be9c5d01 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c @@ -70,6 +70,7 @@ #include "mvm.h" #include "fw-api-scan.h" +#include "iwl-io.h" #define IWL_DENSE_EBS_SCAN_RATIO 5 #define IWL_SPARSE_EBS_SCAN_RATIO 1 @@ -398,6 +399,10 @@ void iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm, ieee80211_scan_completed(mvm->hw, scan_notif->status == IWL_SCAN_OFFLOAD_ABORTED); iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN); + del_timer(&mvm->scan_timer); + } else { + IWL_ERR(mvm, + "got scan complete notification but no scan is running\n"); } mvm->last_ebs_successful = @@ -1217,6 +1222,18 @@ static int iwl_mvm_check_running_scans(struct iwl_mvm *mvm, int type) return -EIO; } +#define SCAN_TIMEOUT (16 * HZ) + +void iwl_mvm_scan_timeout(unsigned long data) +{ + struct iwl_mvm *mvm = (struct iwl_mvm *)data; + + IWL_ERR(mvm, "regular scan timed out\n"); + + del_timer(&mvm->scan_timer); + iwl_force_nmi(mvm->trans); +} + int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct cfg80211_scan_request *req, struct ieee80211_scan_ies *ies) @@ -1296,6 +1313,8 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, mvm->scan_status |= IWL_MVM_SCAN_REGULAR; iwl_mvm_ref(mvm, IWL_MVM_REF_SCAN); + mod_timer(&mvm->scan_timer, jiffies + SCAN_TIMEOUT); + return 0; } @@ -1413,6 +1432,7 @@ void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm, if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_REGULAR) { ieee80211_scan_completed(mvm->hw, aborted); iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN); + del_timer(&mvm->scan_timer); } else if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_SCHED) { ieee80211_sched_scan_stopped(mvm->hw); mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED; @@ -1608,6 +1628,7 @@ out: * to release the scan reference here. */ iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN); + del_timer(&mvm->scan_timer); if (notify) ieee80211_scan_completed(mvm->hw, true); } else if (notify) { From 5e6a98dc4863e50a010ebdf09fa63c1e11929a85 Mon Sep 17 00:00:00 2001 From: Sara Sharon Date: Thu, 10 Mar 2016 17:40:56 +0200 Subject: [PATCH 0113/1649] iwlwifi: mvm: enable TCP/UDP checksum support for 9000 family Declare and enable support of RX and TX checksum for 9000 family. Configure offload_assist in the TX cmd accordingly to support TX csum. Signed-off-by: Sara Sharon Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/iwl-9000.c | 1 + .../net/wireless/intel/iwlwifi/iwl-config.h | 2 + .../net/wireless/intel/iwlwifi/mvm/mac80211.c | 13 +- drivers/net/wireless/intel/iwlwifi/mvm/tx.c | 124 +++++++++++++++++- 4 files changed, 133 insertions(+), 7 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-9000.c b/drivers/net/wireless/intel/iwlwifi/iwl-9000.c index 277396a30713..1f25ba69516f 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-9000.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-9000.c @@ -137,6 +137,7 @@ static const struct iwl_tt_params iwl9000_tt_params = { .dccm2_len = IWL9000_DCCM2_LEN, \ .smem_offset = IWL9000_SMEM_OFFSET, \ .smem_len = IWL9000_SMEM_LEN, \ + .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM, \ .thermal_params = &iwl9000_tt_params, \ .apmg_not_supported = true, \ .mq_rx_supported = true, \ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h index 8cbd24875ffc..b0025570c7bb 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h @@ -131,6 +131,8 @@ enum iwl_led_mode { #define IWL_MAX_WD_TIMEOUT 120000 #define IWL_DEFAULT_MAX_TX_POWER 22 +#define IWL_TX_CSUM_NETIF_FLAGS (NETIF_F_IPV6_CSUM | NETIF_F_IP_CSUM |\ + NETIF_F_TSO | NETIF_F_TSO6) /* Antenna presence definitions */ #define ANT_NONE 0x0 diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 115d7aa5e720..4f5ec495b460 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -665,12 +665,13 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) } hw->netdev_features |= mvm->cfg->features; - if (!iwl_mvm_is_csum_supported(mvm)) - hw->netdev_features &= ~NETIF_F_RXCSUM; - - if (IWL_MVM_SW_TX_CSUM_OFFLOAD) - hw->netdev_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | - NETIF_F_TSO | NETIF_F_TSO6; + if (!iwl_mvm_is_csum_supported(mvm)) { + hw->netdev_features &= ~(IWL_TX_CSUM_NETIF_FLAGS | + NETIF_F_RXCSUM); + /* We may support SW TX CSUM */ + if (IWL_MVM_SW_TX_CSUM_OFFLOAD) + hw->netdev_features |= IWL_TX_CSUM_NETIF_FLAGS; + } ret = ieee80211_register_hw(mvm->hw); if (ret) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index 24cff98ecca0..efb9b98c4c98 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -67,6 +67,7 @@ #include #include #include +#include #include "iwl-trans.h" #include "iwl-eeprom-parse.h" @@ -98,6 +99,111 @@ iwl_mvm_bar_check_trigger(struct iwl_mvm *mvm, const u8 *addr, addr, tid, ssn); } +#define OPT_HDR(type, skb, off) \ + (type *)(skb_network_header(skb) + (off)) + +static void iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb, + struct ieee80211_hdr *hdr, + struct ieee80211_tx_info *info, + struct iwl_tx_cmd *tx_cmd) +{ +#if IS_ENABLED(CONFIG_INET) + u16 mh_len = ieee80211_hdrlen(hdr->frame_control); + u16 offload_assist = le16_to_cpu(tx_cmd->offload_assist); + u8 protocol = 0; + + /* + * Do not compute checksum if already computed or if transport will + * compute it + */ + if (skb->ip_summed != CHECKSUM_PARTIAL || IWL_MVM_SW_TX_CSUM_OFFLOAD) + return; + + /* We do not expect to be requested to csum stuff we do not support */ + if (WARN_ONCE(!(mvm->hw->netdev_features & IWL_TX_CSUM_NETIF_FLAGS) || + (skb->protocol != htons(ETH_P_IP) && + skb->protocol != htons(ETH_P_IPV6)), + "No support for requested checksum\n")) { + skb_checksum_help(skb); + return; + } + + if (skb->protocol == htons(ETH_P_IP)) { + protocol = ip_hdr(skb)->protocol; + } else { +#if IS_ENABLED(CONFIG_IPV6) + struct ipv6hdr *ipv6h = + (struct ipv6hdr *)skb_network_header(skb); + unsigned int off = sizeof(*ipv6h); + + protocol = ipv6h->nexthdr; + while (protocol != NEXTHDR_NONE && ipv6_ext_hdr(protocol)) { + /* only supported extension headers */ + if (protocol != NEXTHDR_ROUTING && + protocol != NEXTHDR_HOP && + protocol != NEXTHDR_DEST && + protocol != NEXTHDR_FRAGMENT) { + skb_checksum_help(skb); + return; + } + + if (protocol == NEXTHDR_FRAGMENT) { + struct frag_hdr *hp = + OPT_HDR(struct frag_hdr, skb, off); + + protocol = hp->nexthdr; + off += sizeof(struct frag_hdr); + } else { + struct ipv6_opt_hdr *hp = + OPT_HDR(struct ipv6_opt_hdr, skb, off); + + protocol = hp->nexthdr; + off += ipv6_optlen(hp); + } + } + /* if we get here - protocol now should be TCP/UDP */ +#endif + } + + if (protocol != IPPROTO_TCP && protocol != IPPROTO_UDP) { + WARN_ON_ONCE(1); + skb_checksum_help(skb); + return; + } + + /* enable L4 csum */ + offload_assist |= BIT(TX_CMD_OFFLD_L4_EN); + + /* + * Set offset to IP header (snap). + * We don't support tunneling so no need to take care of inner header. + * Size is in words. + */ + offload_assist |= (4 << TX_CMD_OFFLD_IP_HDR); + + /* Do IPv4 csum for AMSDU only (no IP csum for Ipv6) */ + if (skb->protocol == htons(ETH_P_IP) && + (offload_assist & BIT(TX_CMD_OFFLD_AMSDU))) { + ip_hdr(skb)->check = 0; + offload_assist |= BIT(TX_CMD_OFFLD_L3_EN); + } + + /* reset UDP/TCP header csum */ + if (protocol == IPPROTO_TCP) + tcp_hdr(skb)->check = 0; + else + udp_hdr(skb)->check = 0; + + /* mac header len should include IV, size is in words */ + if (info->control.hw_key) + mh_len += info->control.hw_key->iv_len; + mh_len /= 2; + offload_assist |= mh_len << TX_CMD_OFFLD_MH_SIZE; + + tx_cmd->offload_assist = cpu_to_le16(offload_assist); +#endif +} + /* * Sets most of the Tx cmd's fields */ @@ -196,6 +302,8 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb, if (ieee80211_hdrlen(fc) % 4 && !(tx_cmd->offload_assist & cpu_to_le16(BIT(TX_CMD_OFFLD_AMSDU)))) tx_cmd->offload_assist |= cpu_to_le16(BIT(TX_CMD_OFFLD_PAD)); + + iwl_mvm_tx_csum(mvm, skb, hdr, info, tx_cmd); } /* @@ -466,6 +574,7 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, u16 ip_base_id = ipv4 ? ntohs(ip_hdr(skb)->id) : 0; u16 amsdu_add, snap_ip_tcp, pad, i = 0; unsigned int dbg_max_amsdu_len; + netdev_features_t netdev_features = NETIF_F_CSUM_MASK | NETIF_F_SG; u8 *qc, tid, txf; snap_ip_tcp = 8 + skb_transport_header(skb) - skb_network_header(skb) + @@ -484,6 +593,19 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, goto segment; } + /* + * Do not build AMSDU for IPv6 with extension headers. + * ask stack to segment and checkum the generated MPDUs for us. + */ + if (skb->protocol == htons(ETH_P_IPV6) && + ((struct ipv6hdr *)skb_network_header(skb))->nexthdr != + IPPROTO_TCP) { + num_subframes = 1; + pad = 0; + netdev_features &= ~NETIF_F_CSUM_MASK; + goto segment; + } + /* * No need to lock amsdu_in_ampdu_allowed since it can't be modified * during an BA session. @@ -577,7 +699,7 @@ segment: skb_shinfo(skb)->gso_size = num_subframes * mss; memcpy(cb, skb->cb, sizeof(cb)); - next = skb_gso_segment(skb, NETIF_F_CSUM_MASK | NETIF_F_SG); + next = skb_gso_segment(skb, netdev_features); skb_shinfo(skb)->gso_size = mss; if (WARN_ON_ONCE(IS_ERR(next))) return -EINVAL; From 9d9b21d1b61647d5a37241571c0e3eb7cc04b348 Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Thu, 24 Mar 2016 08:44:57 +0200 Subject: [PATCH 0114/1649] iwlwifi: remove IWL_*_UCODE_API_OK _UCODE_API_OK was a intermediate version between MIN and MAX. If a user had a firmware below _OK but above _MIN, the driver would work but the user would get a warning in the kernel log telling him to update his firmware. This is not needed since most users won't look for these messages in the kernel log if their wifi is working. Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/iwl-1000.c | 10 ++------ drivers/net/wireless/intel/iwlwifi/iwl-2000.c | 18 ++++---------- drivers/net/wireless/intel/iwlwifi/iwl-5000.c | 11 ++------- drivers/net/wireless/intel/iwlwifi/iwl-6000.c | 20 ++++------------ drivers/net/wireless/intel/iwlwifi/iwl-7000.c | 20 ++++------------ drivers/net/wireless/intel/iwlwifi/iwl-8000.c | 11 ++------- drivers/net/wireless/intel/iwlwifi/iwl-9000.c | 6 +---- .../net/wireless/intel/iwlwifi/iwl-config.h | 3 --- drivers/net/wireless/intel/iwlwifi/iwl-drv.c | 24 +------------------ 9 files changed, 21 insertions(+), 102 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-1000.c b/drivers/net/wireless/intel/iwlwifi/iwl-1000.c index a90dbab6bbbe..ef22c3d168fc 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-1000.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-1000.c @@ -34,10 +34,6 @@ #define IWL1000_UCODE_API_MAX 5 #define IWL100_UCODE_API_MAX 5 -/* Oldest version we won't warn about */ -#define IWL1000_UCODE_API_OK 5 -#define IWL100_UCODE_API_OK 5 - /* Lowest firmware API version supported */ #define IWL1000_UCODE_API_MIN 1 #define IWL100_UCODE_API_MIN 5 @@ -86,7 +82,6 @@ static const struct iwl_eeprom_params iwl1000_eeprom_params = { #define IWL_DEVICE_1000 \ .fw_name_pre = IWL1000_FW_PRE, \ .ucode_api_max = IWL1000_UCODE_API_MAX, \ - .ucode_api_ok = IWL1000_UCODE_API_OK, \ .ucode_api_min = IWL1000_UCODE_API_MIN, \ .device_family = IWL_DEVICE_FAMILY_1000, \ .max_inst_size = IWLAGN_RTC_INST_SIZE, \ @@ -112,7 +107,6 @@ const struct iwl_cfg iwl1000_bg_cfg = { #define IWL_DEVICE_100 \ .fw_name_pre = IWL100_FW_PRE, \ .ucode_api_max = IWL100_UCODE_API_MAX, \ - .ucode_api_ok = IWL100_UCODE_API_OK, \ .ucode_api_min = IWL100_UCODE_API_MIN, \ .device_family = IWL_DEVICE_FAMILY_100, \ .max_inst_size = IWLAGN_RTC_INST_SIZE, \ @@ -136,5 +130,5 @@ const struct iwl_cfg iwl100_bg_cfg = { IWL_DEVICE_100, }; -MODULE_FIRMWARE(IWL1000_MODULE_FIRMWARE(IWL1000_UCODE_API_OK)); -MODULE_FIRMWARE(IWL100_MODULE_FIRMWARE(IWL100_UCODE_API_OK)); +MODULE_FIRMWARE(IWL1000_MODULE_FIRMWARE(IWL1000_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL100_MODULE_FIRMWARE(IWL100_UCODE_API_MAX)); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-2000.c b/drivers/net/wireless/intel/iwlwifi/iwl-2000.c index a6da9594c4a5..dc246c997084 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-2000.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-2000.c @@ -36,12 +36,6 @@ #define IWL105_UCODE_API_MAX 6 #define IWL135_UCODE_API_MAX 6 -/* Oldest version we won't warn about */ -#define IWL2030_UCODE_API_OK 6 -#define IWL2000_UCODE_API_OK 6 -#define IWL105_UCODE_API_OK 6 -#define IWL135_UCODE_API_OK 6 - /* Lowest firmware API version supported */ #define IWL2030_UCODE_API_MIN 5 #define IWL2000_UCODE_API_MIN 5 @@ -114,7 +108,6 @@ static const struct iwl_eeprom_params iwl20x0_eeprom_params = { #define IWL_DEVICE_2000 \ .fw_name_pre = IWL2000_FW_PRE, \ .ucode_api_max = IWL2000_UCODE_API_MAX, \ - .ucode_api_ok = IWL2000_UCODE_API_OK, \ .ucode_api_min = IWL2000_UCODE_API_MIN, \ .device_family = IWL_DEVICE_FAMILY_2000, \ .max_inst_size = IWL60_RTC_INST_SIZE, \ @@ -142,7 +135,6 @@ const struct iwl_cfg iwl2000_2bgn_d_cfg = { #define IWL_DEVICE_2030 \ .fw_name_pre = IWL2030_FW_PRE, \ .ucode_api_max = IWL2030_UCODE_API_MAX, \ - .ucode_api_ok = IWL2030_UCODE_API_OK, \ .ucode_api_min = IWL2030_UCODE_API_MIN, \ .device_family = IWL_DEVICE_FAMILY_2030, \ .max_inst_size = IWL60_RTC_INST_SIZE, \ @@ -163,7 +155,6 @@ const struct iwl_cfg iwl2030_2bgn_cfg = { #define IWL_DEVICE_105 \ .fw_name_pre = IWL105_FW_PRE, \ .ucode_api_max = IWL105_UCODE_API_MAX, \ - .ucode_api_ok = IWL105_UCODE_API_OK, \ .ucode_api_min = IWL105_UCODE_API_MIN, \ .device_family = IWL_DEVICE_FAMILY_105, \ .max_inst_size = IWL60_RTC_INST_SIZE, \ @@ -191,7 +182,6 @@ const struct iwl_cfg iwl105_bgn_d_cfg = { #define IWL_DEVICE_135 \ .fw_name_pre = IWL135_FW_PRE, \ .ucode_api_max = IWL135_UCODE_API_MAX, \ - .ucode_api_ok = IWL135_UCODE_API_OK, \ .ucode_api_min = IWL135_UCODE_API_MIN, \ .device_family = IWL_DEVICE_FAMILY_135, \ .max_inst_size = IWL60_RTC_INST_SIZE, \ @@ -210,7 +200,7 @@ const struct iwl_cfg iwl135_bgn_cfg = { .ht_params = &iwl2000_ht_params, }; -MODULE_FIRMWARE(IWL2000_MODULE_FIRMWARE(IWL2000_UCODE_API_OK)); -MODULE_FIRMWARE(IWL2030_MODULE_FIRMWARE(IWL2030_UCODE_API_OK)); -MODULE_FIRMWARE(IWL105_MODULE_FIRMWARE(IWL105_UCODE_API_OK)); -MODULE_FIRMWARE(IWL135_MODULE_FIRMWARE(IWL135_UCODE_API_OK)); +MODULE_FIRMWARE(IWL2000_MODULE_FIRMWARE(IWL2000_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL2030_MODULE_FIRMWARE(IWL2030_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL105_MODULE_FIRMWARE(IWL105_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL135_MODULE_FIRMWARE(IWL135_UCODE_API_MAX)); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-5000.c b/drivers/net/wireless/intel/iwlwifi/iwl-5000.c index 8b5afdef2d83..4dcdab6781cc 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-5000.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-5000.c @@ -34,10 +34,6 @@ #define IWL5000_UCODE_API_MAX 5 #define IWL5150_UCODE_API_MAX 2 -/* Oldest version we won't warn about */ -#define IWL5000_UCODE_API_OK 5 -#define IWL5150_UCODE_API_OK 2 - /* Lowest firmware API version supported */ #define IWL5000_UCODE_API_MIN 1 #define IWL5150_UCODE_API_MIN 1 @@ -84,7 +80,6 @@ static const struct iwl_eeprom_params iwl5000_eeprom_params = { #define IWL_DEVICE_5000 \ .fw_name_pre = IWL5000_FW_PRE, \ .ucode_api_max = IWL5000_UCODE_API_MAX, \ - .ucode_api_ok = IWL5000_UCODE_API_OK, \ .ucode_api_min = IWL5000_UCODE_API_MIN, \ .device_family = IWL_DEVICE_FAMILY_5000, \ .max_inst_size = IWLAGN_RTC_INST_SIZE, \ @@ -132,7 +127,6 @@ const struct iwl_cfg iwl5350_agn_cfg = { .name = "Intel(R) WiMAX/WiFi Link 5350 AGN", .fw_name_pre = IWL5000_FW_PRE, .ucode_api_max = IWL5000_UCODE_API_MAX, - .ucode_api_ok = IWL5000_UCODE_API_OK, .ucode_api_min = IWL5000_UCODE_API_MIN, .device_family = IWL_DEVICE_FAMILY_5000, .max_inst_size = IWLAGN_RTC_INST_SIZE, @@ -149,7 +143,6 @@ const struct iwl_cfg iwl5350_agn_cfg = { #define IWL_DEVICE_5150 \ .fw_name_pre = IWL5150_FW_PRE, \ .ucode_api_max = IWL5150_UCODE_API_MAX, \ - .ucode_api_ok = IWL5150_UCODE_API_OK, \ .ucode_api_min = IWL5150_UCODE_API_MIN, \ .device_family = IWL_DEVICE_FAMILY_5150, \ .max_inst_size = IWLAGN_RTC_INST_SIZE, \ @@ -174,5 +167,5 @@ const struct iwl_cfg iwl5150_abg_cfg = { IWL_DEVICE_5150, }; -MODULE_FIRMWARE(IWL5000_MODULE_FIRMWARE(IWL5000_UCODE_API_OK)); -MODULE_FIRMWARE(IWL5150_MODULE_FIRMWARE(IWL5150_UCODE_API_OK)); +MODULE_FIRMWARE(IWL5000_MODULE_FIRMWARE(IWL5000_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL5150_MODULE_FIRMWARE(IWL5150_UCODE_API_MAX)); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-6000.c b/drivers/net/wireless/intel/iwlwifi/iwl-6000.c index 0b4ba781b631..9938f5340ac0 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-6000.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-6000.c @@ -36,13 +36,6 @@ #define IWL6000G2_UCODE_API_MAX 6 #define IWL6035_UCODE_API_MAX 6 -/* Oldest version we won't warn about */ -#define IWL6000_UCODE_API_OK 4 -#define IWL6000G2_UCODE_API_OK 5 -#define IWL6050_UCODE_API_OK 5 -#define IWL6000G2B_UCODE_API_OK 6 -#define IWL6035_UCODE_API_OK 6 - /* Lowest firmware API version supported */ #define IWL6000_UCODE_API_MIN 4 #define IWL6050_UCODE_API_MIN 4 @@ -136,7 +129,6 @@ static const struct iwl_eeprom_params iwl6000_eeprom_params = { #define IWL_DEVICE_6005 \ .fw_name_pre = IWL6005_FW_PRE, \ .ucode_api_max = IWL6000G2_UCODE_API_MAX, \ - .ucode_api_ok = IWL6000G2_UCODE_API_OK, \ .ucode_api_min = IWL6000G2_UCODE_API_MIN, \ .device_family = IWL_DEVICE_FAMILY_6005, \ .max_inst_size = IWL60_RTC_INST_SIZE, \ @@ -191,7 +183,6 @@ const struct iwl_cfg iwl6005_2agn_mow2_cfg = { #define IWL_DEVICE_6030 \ .fw_name_pre = IWL6030_FW_PRE, \ .ucode_api_max = IWL6000G2_UCODE_API_MAX, \ - .ucode_api_ok = IWL6000G2B_UCODE_API_OK, \ .ucode_api_min = IWL6000G2_UCODE_API_MIN, \ .device_family = IWL_DEVICE_FAMILY_6030, \ .max_inst_size = IWL60_RTC_INST_SIZE, \ @@ -228,7 +219,6 @@ const struct iwl_cfg iwl6030_2bg_cfg = { #define IWL_DEVICE_6035 \ .fw_name_pre = IWL6030_FW_PRE, \ .ucode_api_max = IWL6035_UCODE_API_MAX, \ - .ucode_api_ok = IWL6035_UCODE_API_OK, \ .ucode_api_min = IWL6035_UCODE_API_MIN, \ .device_family = IWL_DEVICE_FAMILY_6030, \ .max_inst_size = IWL60_RTC_INST_SIZE, \ @@ -282,7 +272,6 @@ const struct iwl_cfg iwl130_bg_cfg = { #define IWL_DEVICE_6000i \ .fw_name_pre = IWL6000_FW_PRE, \ .ucode_api_max = IWL6000_UCODE_API_MAX, \ - .ucode_api_ok = IWL6000_UCODE_API_OK, \ .ucode_api_min = IWL6000_UCODE_API_MIN, \ .device_family = IWL_DEVICE_FAMILY_6000i, \ .max_inst_size = IWL60_RTC_INST_SIZE, \ @@ -370,7 +359,6 @@ const struct iwl_cfg iwl6000_3agn_cfg = { .name = "Intel(R) Centrino(R) Ultimate-N 6300 AGN", .fw_name_pre = IWL6000_FW_PRE, .ucode_api_max = IWL6000_UCODE_API_MAX, - .ucode_api_ok = IWL6000_UCODE_API_OK, .ucode_api_min = IWL6000_UCODE_API_MIN, .device_family = IWL_DEVICE_FAMILY_6000, .max_inst_size = IWL60_RTC_INST_SIZE, @@ -383,7 +371,7 @@ const struct iwl_cfg iwl6000_3agn_cfg = { .led_mode = IWL_LED_BLINK, }; -MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_OK)); -MODULE_FIRMWARE(IWL6050_MODULE_FIRMWARE(IWL6050_UCODE_API_OK)); -MODULE_FIRMWARE(IWL6005_MODULE_FIRMWARE(IWL6000G2_UCODE_API_OK)); -MODULE_FIRMWARE(IWL6030_MODULE_FIRMWARE(IWL6000G2B_UCODE_API_OK)); +MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL6050_MODULE_FIRMWARE(IWL6050_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL6005_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL6030_MODULE_FIRMWARE(IWL6000G2B_UCODE_API_MAX)); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-7000.c b/drivers/net/wireless/intel/iwlwifi/iwl-7000.c index f4012a3f4d06..b6283c881d42 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-7000.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-7000.c @@ -76,12 +76,6 @@ #define IWL7265D_UCODE_API_MAX 21 #define IWL3168_UCODE_API_MAX 21 -/* Oldest version we won't warn about */ -#define IWL7260_UCODE_API_OK 16 -#define IWL7265_UCODE_API_OK 16 -#define IWL7265D_UCODE_API_OK 16 -#define IWL3168_UCODE_API_OK 20 - /* Lowest firmware API version supported */ #define IWL7260_UCODE_API_MIN 16 #define IWL7265_UCODE_API_MIN 16 @@ -179,25 +173,21 @@ static const struct iwl_ht_params iwl7000_ht_params = { #define IWL_DEVICE_7000 \ IWL_DEVICE_7000_COMMON, \ .ucode_api_max = IWL7260_UCODE_API_MAX, \ - .ucode_api_ok = IWL7260_UCODE_API_OK, \ .ucode_api_min = IWL7260_UCODE_API_MIN #define IWL_DEVICE_7005 \ IWL_DEVICE_7000_COMMON, \ .ucode_api_max = IWL7265_UCODE_API_MAX, \ - .ucode_api_ok = IWL7265_UCODE_API_OK, \ .ucode_api_min = IWL7265_UCODE_API_MIN #define IWL_DEVICE_3008 \ IWL_DEVICE_7000_COMMON, \ .ucode_api_max = IWL3168_UCODE_API_MAX, \ - .ucode_api_ok = IWL3168_UCODE_API_OK, \ .ucode_api_min = IWL3168_UCODE_API_MIN #define IWL_DEVICE_7005D \ IWL_DEVICE_7000_COMMON, \ .ucode_api_max = IWL7265D_UCODE_API_MAX, \ - .ucode_api_ok = IWL7265D_UCODE_API_OK, \ .ucode_api_min = IWL7265D_UCODE_API_MIN const struct iwl_cfg iwl7260_2ac_cfg = { @@ -388,8 +378,8 @@ const struct iwl_cfg iwl7265d_n_cfg = { .dccm_len = IWL7265_DCCM_LEN, }; -MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); -MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); -MODULE_FIRMWARE(IWL3168_MODULE_FIRMWARE(IWL3168_UCODE_API_OK)); -MODULE_FIRMWARE(IWL7265_MODULE_FIRMWARE(IWL7265_UCODE_API_OK)); -MODULE_FIRMWARE(IWL7265D_MODULE_FIRMWARE(IWL7265D_UCODE_API_OK)); +MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL7260_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL3168_MODULE_FIRMWARE(IWL3168_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL7265_MODULE_FIRMWARE(IWL7265_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL7265D_MODULE_FIRMWARE(IWL7265D_UCODE_API_MAX)); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c index 49bb2a5f9dcf..0728a288aa3d 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c @@ -73,10 +73,6 @@ #define IWL8000_UCODE_API_MAX 21 #define IWL8265_UCODE_API_MAX 21 -/* Oldest version we won't warn about */ -#define IWL8000_UCODE_API_OK 16 -#define IWL8265_UCODE_API_OK 20 - /* Lowest firmware API version supported */ #define IWL8000_UCODE_API_MIN 16 #define IWL8265_UCODE_API_MIN 20 @@ -175,19 +171,16 @@ static const struct iwl_tt_params iwl8000_tt_params = { #define IWL_DEVICE_8000 \ IWL_DEVICE_8000_COMMON, \ .ucode_api_max = IWL8000_UCODE_API_MAX, \ - .ucode_api_ok = IWL8000_UCODE_API_OK, \ .ucode_api_min = IWL8000_UCODE_API_MIN \ #define IWL_DEVICE_8260 \ IWL_DEVICE_8000_COMMON, \ .ucode_api_max = IWL8000_UCODE_API_MAX, \ - .ucode_api_ok = IWL8000_UCODE_API_OK, \ .ucode_api_min = IWL8000_UCODE_API_MIN \ #define IWL_DEVICE_8265 \ IWL_DEVICE_8000_COMMON, \ .ucode_api_max = IWL8265_UCODE_API_MAX, \ - .ucode_api_ok = IWL8265_UCODE_API_OK, \ .ucode_api_min = IWL8265_UCODE_API_MIN \ const struct iwl_cfg iwl8260_2n_cfg = { @@ -259,5 +252,5 @@ const struct iwl_cfg iwl4165_2ac_sdio_cfg = { .max_vht_ampdu_exponent = MAX_VHT_AMPDU_EXPONENT_8260_SDIO, }; -MODULE_FIRMWARE(IWL8000_MODULE_FIRMWARE(IWL8000_UCODE_API_OK)); -MODULE_FIRMWARE(IWL8265_MODULE_FIRMWARE(IWL8265_UCODE_API_OK)); +MODULE_FIRMWARE(IWL8000_MODULE_FIRMWARE(IWL8000_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL8265_MODULE_FIRMWARE(IWL8265_UCODE_API_MAX)); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-9000.c b/drivers/net/wireless/intel/iwlwifi/iwl-9000.c index 1f25ba69516f..a3d35aa291a9 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-9000.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-9000.c @@ -57,9 +57,6 @@ /* Highest firmware API version supported */ #define IWL9000_UCODE_API_MAX 21 -/* Oldest version we won't warn about */ -#define IWL9000_UCODE_API_OK 16 - /* Lowest firmware API version supported */ #define IWL9000_UCODE_API_MIN 16 @@ -122,7 +119,6 @@ static const struct iwl_tt_params iwl9000_tt_params = { #define IWL_DEVICE_9000 \ .ucode_api_max = IWL9000_UCODE_API_MAX, \ - .ucode_api_ok = IWL9000_UCODE_API_OK, \ .ucode_api_min = IWL9000_UCODE_API_MIN, \ .device_family = IWL_DEVICE_FAMILY_8000, \ .max_inst_size = IWL60_RTC_INST_SIZE, \ @@ -164,4 +160,4 @@ const struct iwl_cfg iwl5165_2ac_cfg = { .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, }; -MODULE_FIRMWARE(IWL9000_MODULE_FIRMWARE(IWL9000_UCODE_API_OK)); +MODULE_FIRMWARE(IWL9000_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX)); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h index b0025570c7bb..08bb4f4e424a 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h @@ -279,8 +279,6 @@ struct iwl_pwr_tx_backoff { * (.ucode) will be added to filename before loading from disk. The * filename is constructed as fw_name_pre.ucode. * @ucode_api_max: Highest version of uCode API supported by driver. - * @ucode_api_ok: oldest version of the uCode API that is OK to load - * without a warning, for use in transitions * @ucode_api_min: Lowest version of uCode API supported by driver. * @max_inst_size: The maximal length of the fw inst section * @max_data_size: The maximal length of the fw data section @@ -326,7 +324,6 @@ struct iwl_cfg { const char *name; const char *fw_name_pre; const unsigned int ucode_api_max; - const unsigned int ucode_api_ok; const unsigned int ucode_api_min; const enum iwl_device_family device_family; const u32 max_data_size; diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c index 9a680ac48820..605910f71037 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c @@ -1206,7 +1206,6 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) int err; struct iwl_firmware_pieces *pieces; const unsigned int api_max = drv->cfg->ucode_api_max; - unsigned int api_ok = drv->cfg->ucode_api_ok; const unsigned int api_min = drv->cfg->ucode_api_min; size_t trigger_tlv_sz[FW_DBG_TRIGGER_MAX]; u32 api_ver; @@ -1219,20 +1218,12 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE; fw->ucode_capa.n_scan_channels = IWL_DEFAULT_SCAN_CHANNELS; - if (!api_ok) - api_ok = api_max; - pieces = kzalloc(sizeof(*pieces), GFP_KERNEL); if (!pieces) return; - if (!ucode_raw) { - if (drv->fw_index <= api_ok) - IWL_ERR(drv, - "request for firmware file '%s' failed.\n", - drv->firmware_name); + if (!ucode_raw) goto try_again; - } IWL_DEBUG_INFO(drv, "Loaded firmware file '%s' (%zd bytes).\n", drv->firmware_name, ucode_raw->size); @@ -1271,19 +1262,6 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) api_max, api_ver); goto try_again; } - - if (api_ver < api_ok) { - if (api_ok != api_max) - IWL_ERR(drv, "Firmware has old API version, " - "expected v%u through v%u, got v%u.\n", - api_ok, api_max, api_ver); - else - IWL_ERR(drv, "Firmware has old API version, " - "expected v%u, got v%u.\n", - api_max, api_ver); - IWL_ERR(drv, "New firmware can be obtained from " - "http://www.intellinuxwireless.org/.\n"); - } } /* From 8d80717a12c138f3d765d91feab0c08190a21d85 Mon Sep 17 00:00:00 2001 From: Haim Dreyfuss Date: Sun, 27 Mar 2016 12:56:13 +0300 Subject: [PATCH 0115/1649] iwlwifi: pcie: Fix index iteration on free_irq in MSIX mode In MSIX mode we iterate over the allocated interrupt vectors and register them to an handler. In case of registration failure, we free all the allocated irq. we use the outer index mistakenly instead of the inner one. Signed-off-by: Haim Dreyfuss Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/pcie/trans.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index f1a506b609d7..5e1a13e82d60 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -1500,8 +1500,8 @@ static int iwl_pcie_init_msix_handler(struct pci_dev *pdev, IWL_ERR(trans_pcie->trans, "Error allocating IRQ %d\n", i); for (j = 0; j < i; j++) - free_irq(trans_pcie->msix_entries[i].vector, - &trans_pcie->msix_entries[i]); + free_irq(trans_pcie->msix_entries[j].vector, + &trans_pcie->msix_entries[j]); pci_disable_msix(pdev); return ret; } From a6017b9030f280ced61b825757b26f042e0785da Mon Sep 17 00:00:00 2001 From: Golan Ben-Ami Date: Mon, 14 Mar 2016 12:24:20 +0200 Subject: [PATCH 0116/1649] iwlwifi: store fw memory segments length and addresses in run-time Currently reading the fw memory segments is done according to addresses and data length that are hard-coded. Lately a new tlv was appended to the ucode, that contains the data type, length and address. Parse this tlv, and in run-time store the memory segments length and addresses that would be dumped upon a fw error. Signed-off-by: Golan Ben-Ami Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/iwl-drv.c | 45 ++++++++++++++ .../net/wireless/intel/iwlwifi/iwl-fw-file.h | 32 ++++++++++ drivers/net/wireless/intel/iwlwifi/iwl-fw.h | 2 + .../net/wireless/intel/iwlwifi/mvm/fw-dbg.c | 61 ++++++++++++++----- 4 files changed, 125 insertions(+), 15 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c index 605910f71037..48e873732d4e 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c @@ -179,6 +179,8 @@ static void iwl_dealloc_ucode(struct iwl_drv *drv) kfree(drv->fw.dbg_conf_tlv[i]); for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_trigger_tlv); i++) kfree(drv->fw.dbg_trigger_tlv[i]); + for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_mem_tlv); i++) + kfree(drv->fw.dbg_mem_tlv[i]); for (i = 0; i < IWL_UCODE_TYPE_MAX; i++) iwl_free_fw_img(drv, drv->fw.img + i); @@ -297,6 +299,7 @@ struct iwl_firmware_pieces { size_t dbg_conf_tlv_len[FW_DBG_CONF_MAX]; struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX]; size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX]; + struct iwl_fw_dbg_mem_seg_tlv *dbg_mem_tlv[FW_DBG_MEM_MAX]; }; /* @@ -1041,6 +1044,37 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, iwl_store_gscan_capa(&drv->fw, tlv_data, tlv_len); gscan_capa = true; break; + case IWL_UCODE_TLV_FW_MEM_SEG: { + struct iwl_fw_dbg_mem_seg_tlv *dbg_mem = + (void *)tlv_data; + u32 type; + + if (tlv_len != (sizeof(*dbg_mem))) + goto invalid_tlv_len; + + type = le32_to_cpu(dbg_mem->data_type); + drv->fw.dbg_dynamic_mem = true; + + if (type >= ARRAY_SIZE(drv->fw.dbg_mem_tlv)) { + IWL_ERR(drv, + "Skip unknown dbg mem segment: %u\n", + dbg_mem->data_type); + break; + } + + if (pieces->dbg_mem_tlv[type]) { + IWL_ERR(drv, + "Ignore duplicate mem segment: %u\n", + dbg_mem->data_type); + break; + } + + IWL_DEBUG_INFO(drv, "Found debug memory segment: %u\n", + dbg_mem->data_type); + + pieces->dbg_mem_tlv[type] = dbg_mem; + break; + } default: IWL_DEBUG_INFO(drv, "unknown TLV: %d\n", tlv_type); break; @@ -1350,6 +1384,17 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) } } + for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_mem_tlv); i++) { + if (pieces->dbg_mem_tlv[i]) { + drv->fw.dbg_mem_tlv[i] = + kmemdup(pieces->dbg_mem_tlv[i], + sizeof(*drv->fw.dbg_mem_tlv[i]), + GFP_KERNEL); + if (!drv->fw.dbg_mem_tlv[i]) + goto out_free_fw; + } + } + /* Now that we can no longer fail, copy information */ /* diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h index a6e8826c61db..843232bd8bbe 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h @@ -142,6 +142,7 @@ enum iwl_ucode_tlv_type { IWL_UCODE_TLV_FW_DBG_CONF = 39, IWL_UCODE_TLV_FW_DBG_TRIGGER = 40, IWL_UCODE_TLV_FW_GSCAN_CAPA = 50, + IWL_UCODE_TLV_FW_MEM_SEG = 51, }; struct iwl_ucode_tlv { @@ -491,6 +492,37 @@ enum iwl_fw_dbg_monitor_mode { MIPI_MODE = 3, }; +/** + * enum iwl_fw_mem_seg_type - data types for dumping on error + * + * @FW_DBG_MEM_SMEM: the data type is SMEM + * @FW_DBG_MEM_DCCM_LMAC: the data type is DCCM_LMAC + * @FW_DBG_MEM_DCCM_UMAC: the data type is DCCM_UMAC + */ +enum iwl_fw_dbg_mem_seg_type { + FW_DBG_MEM_DCCM_LMAC = 0, + FW_DBG_MEM_DCCM_UMAC, + FW_DBG_MEM_SMEM, + + /* Must be last */ + FW_DBG_MEM_MAX, +}; + +/** + * struct iwl_fw_dbg_mem_seg_tlv - configures the debug data memory segments + * + * @data_type: enum %iwl_fw_mem_seg_type + * @ofs: the memory segment offset + * @len: the memory segment length, in bytes + * + * This parses IWL_UCODE_TLV_FW_MEM_SEG + */ +struct iwl_fw_dbg_mem_seg_tlv { + __le32 data_type; + __le32 ofs; + __le32 len; +} __packed; + /** * struct iwl_fw_dbg_dest_tlv - configures the destination of the debug data * diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fw.h b/drivers/net/wireless/intel/iwlwifi/iwl-fw.h index 2942571c613f..e461d631893a 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-fw.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-fw.h @@ -286,6 +286,8 @@ struct iwl_fw { struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX]; size_t dbg_conf_tlv_len[FW_DBG_CONF_MAX]; struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX]; + struct iwl_fw_dbg_mem_seg_tlv *dbg_mem_tlv[FW_DBG_MEM_MAX]; + bool dbg_dynamic_mem; size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX]; u8 dbg_dest_reg_num; struct iwl_gscan_capabilities gscan_capa; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c index 6ef706c13cda..cbb5947b3fab 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c @@ -488,9 +488,11 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) struct iwl_fw_error_dump_trigger_desc *dump_trig; struct iwl_mvm_dump_ptrs *fw_error_dump; u32 sram_len, sram_ofs; + struct iwl_fw_dbg_mem_seg_tlv * const *fw_dbg_mem = + mvm->fw->dbg_mem_tlv; u32 file_len, fifo_data_len = 0, prph_len = 0, radio_len = 0; - u32 smem_len = mvm->cfg->smem_len; - u32 sram2_len = mvm->cfg->dccm2_len; + u32 smem_len = mvm->fw->dbg_dynamic_mem ? 0 : mvm->cfg->smem_len; + u32 sram2_len = mvm->fw->dbg_dynamic_mem ? 0 : mvm->cfg->dccm2_len; bool monitor_dump_only = false; int i; @@ -586,7 +588,6 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) file_len = sizeof(*dump_file) + sizeof(*dump_data) * 2 + - sram_len + sizeof(*dump_mem) + fifo_data_len + prph_len + radio_len + @@ -600,6 +601,13 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) if (sram2_len) file_len += sizeof(*dump_data) + sizeof(*dump_mem) + sram2_len; + /* Make room for MEM segments */ + for (i = 0; i < ARRAY_SIZE(mvm->fw->dbg_mem_tlv); i++) { + if (fw_dbg_mem[i]) + file_len += sizeof(*dump_data) + sizeof(*dump_mem) + + le32_to_cpu(fw_dbg_mem[i]->len); + } + /* Make room for fw's virtual image pages, if it exists */ if (mvm->fw->img[mvm->cur_ucode].paging_mem_size) file_len += mvm->num_of_paging_blk * @@ -625,6 +633,9 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) file_len += sizeof(*dump_data) + sizeof(*dump_trig) + mvm->fw_dump_desc->len; + if (!mvm->fw->dbg_dynamic_mem) + file_len += sram_len + sizeof(*dump_mem); + dump_file = vzalloc(file_len); if (!dump_file) { kfree(fw_error_dump); @@ -674,16 +685,36 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) if (monitor_dump_only) goto dump_trans_data; - dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM); - dump_data->len = cpu_to_le32(sram_len + sizeof(*dump_mem)); - dump_mem = (void *)dump_data->data; - dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SRAM); - dump_mem->offset = cpu_to_le32(sram_ofs); - iwl_trans_read_mem_bytes(mvm->trans, sram_ofs, dump_mem->data, - sram_len); + if (!mvm->fw->dbg_dynamic_mem) { + dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM); + dump_data->len = cpu_to_le32(sram_len + sizeof(*dump_mem)); + dump_mem = (void *)dump_data->data; + dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SRAM); + dump_mem->offset = cpu_to_le32(sram_ofs); + iwl_trans_read_mem_bytes(mvm->trans, sram_ofs, dump_mem->data, + sram_len); + dump_data = iwl_fw_error_next_data(dump_data); + } + + for (i = 0; i < ARRAY_SIZE(mvm->fw->dbg_mem_tlv); i++) { + if (fw_dbg_mem[i]) { + u32 len = le32_to_cpu(fw_dbg_mem[i]->len); + u32 ofs = le32_to_cpu(fw_dbg_mem[i]->ofs); + + dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM); + dump_data->len = cpu_to_le32(len + + sizeof(*dump_mem)); + dump_mem = (void *)dump_data->data; + dump_mem->type = fw_dbg_mem[i]->data_type; + dump_mem->offset = cpu_to_le32(ofs); + iwl_trans_read_mem_bytes(mvm->trans, ofs, + dump_mem->data, + len); + dump_data = iwl_fw_error_next_data(dump_data); + } + } if (smem_len) { - dump_data = iwl_fw_error_next_data(dump_data); dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM); dump_data->len = cpu_to_le32(smem_len + sizeof(*dump_mem)); dump_mem = (void *)dump_data->data; @@ -691,10 +722,10 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) dump_mem->offset = cpu_to_le32(mvm->cfg->smem_offset); iwl_trans_read_mem_bytes(mvm->trans, mvm->cfg->smem_offset, dump_mem->data, smem_len); + dump_data = iwl_fw_error_next_data(dump_data); } if (sram2_len) { - dump_data = iwl_fw_error_next_data(dump_data); dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM); dump_data->len = cpu_to_le32(sram2_len + sizeof(*dump_mem)); dump_mem = (void *)dump_data->data; @@ -702,11 +733,11 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) dump_mem->offset = cpu_to_le32(mvm->cfg->dccm2_offset); iwl_trans_read_mem_bytes(mvm->trans, mvm->cfg->dccm2_offset, dump_mem->data, sram2_len); + dump_data = iwl_fw_error_next_data(dump_data); } if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_8000 && CSR_HW_REV_STEP(mvm->trans->hw_rev) == SILICON_B_STEP) { - dump_data = iwl_fw_error_next_data(dump_data); dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM); dump_data->len = cpu_to_le32(IWL8260_ICCM_LEN + sizeof(*dump_mem)); @@ -715,6 +746,7 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) dump_mem->offset = cpu_to_le32(IWL8260_ICCM_OFFSET); iwl_trans_read_mem_bytes(mvm->trans, IWL8260_ICCM_OFFSET, dump_mem->data, IWL8260_ICCM_LEN); + dump_data = iwl_fw_error_next_data(dump_data); } /* Dump fw's virtual image */ @@ -724,7 +756,6 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) struct page *pages = mvm->fw_paging_db[i].fw_paging_block; - dump_data = iwl_fw_error_next_data(dump_data); dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PAGING); dump_data->len = cpu_to_le32(sizeof(*paging) + PAGING_BLOCK_SIZE); @@ -732,10 +763,10 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) paging->index = cpu_to_le32(i); memcpy(paging->data, page_address(pages), PAGING_BLOCK_SIZE); + dump_data = iwl_fw_error_next_data(dump_data); } } - dump_data = iwl_fw_error_next_data(dump_data); if (prph_len) iwl_dump_prph(mvm->trans, &dump_data); From d2515a99b2da2bf08d5a1decb7b365e25adbccea Mon Sep 17 00:00:00 2001 From: Liad Kaufman Date: Wed, 23 Mar 2016 16:31:08 +0200 Subject: [PATCH 0117/1649] iwlwifi: mvm: fix inconsistent lock in dqa mode When working in DQA mode, there is a lockdep log warning about an inconsistent state of the mvmsta->lock and the mvm->queue_info_lock. Fix this. This mode is not activated for now. Signed-off-by: Liad Kaufman Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/mvm/sta.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index e157bd5a2204..12614b7b7fe7 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -296,7 +296,7 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, lockdep_assert_held(&mvm->mutex); - spin_lock(&mvm->queue_info_lock); + spin_lock_bh(&mvm->queue_info_lock); /* * Non-QoS, QoS NDP and MGMT frames should go to a MGMT queue, if one @@ -324,7 +324,7 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, if (queue >= 0) mvm->queue_info[queue].setup_reserved = false; - spin_unlock(&mvm->queue_info_lock); + spin_unlock_bh(&mvm->queue_info_lock); /* TODO: support shared queues for same RA */ if (queue < 0) @@ -402,12 +402,12 @@ static void iwl_mvm_tx_deferred_stream(struct iwl_mvm *mvm, __skb_queue_head_init(&deferred_tx); + /* Disable bottom-halves when entering TX path */ + local_bh_disable(); spin_lock(&mvmsta->lock); skb_queue_splice_init(&tid_data->deferred_tx_frames, &deferred_tx); spin_unlock(&mvmsta->lock); - /* Disable bottom-halves when entering TX path */ - local_bh_disable(); while ((skb = __skb_dequeue(&deferred_tx))) if (no_queue || iwl_mvm_tx_skb(mvm, skb, sta)) ieee80211_free_txskb(mvm->hw, skb); From 489c546dcecbddbadcbef25472d8fb4d693850e2 Mon Sep 17 00:00:00 2001 From: Luca Coelho Date: Thu, 24 Mar 2016 11:10:12 +0200 Subject: [PATCH 0118/1649] iwlwifi: mvm: allow setting the thermal state in D0i3 We were not allowing the thermal state to be set when we were in D0i3 mode. It was not very clearly specified how it should work, but now a decision was made to allow the state to be set in D0i3 (which will cause a brief wake up). Remove the check in the set_cur_state operation. Signed-off-by: Luca Coelho Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/mvm/tt.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c index 3f5df76f65a4..eb3f460ce1b6 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c @@ -801,9 +801,6 @@ static int iwl_mvm_tcool_set_cur_state(struct thermal_cooling_device *cdev, if (!mvm->ucode_loaded || !(mvm->cur_ucode == IWL_UCODE_REGULAR)) return -EIO; - if (test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status)) - return -EBUSY; - mutex_lock(&mvm->mutex); if (new_state >= ARRAY_SIZE(iwl_mvm_cdev_budgets)) { From 46167a8fd4248533ad15867e6988ff20e76de641 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Mon, 28 Mar 2016 12:33:44 +0100 Subject: [PATCH 0119/1649] iwlwifi: pcie: remove duplicate assignment of variable isr_stats isr_stats is written twice with the same value, remove one of the redundant assignments to isr_stats. Signed-off-by: Colin Ian King Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/pcie/rx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c index 59a7e45b12df..7f8a2322cda2 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c @@ -1811,7 +1811,7 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id) struct msix_entry *entry = dev_id; struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry); struct iwl_trans *trans = trans_pcie->trans; - struct isr_statistics *isr_stats = isr_stats = &trans_pcie->isr_stats; + struct isr_statistics *isr_stats = &trans_pcie->isr_stats; u32 inta_fh, inta_hw; lock_map_acquire(&trans->sync_cmd_lockdep_map); From 5482cefaca89995c75f2ad507dd7810b0b1079c5 Mon Sep 17 00:00:00 2001 From: Srinivas Kandagatla Date: Thu, 21 Jan 2016 18:35:13 +0000 Subject: [PATCH 0120/1649] MAINTAINERS: add qcom i2c and spi drivers to list This patch adds i2c-qup and spi-qup drivers in to the qualcomm maintainer list, so that get maintainers scripts can get correct people to send patch to. Signed-off-by: Srinivas Kandagatla Acked-by: Ivan T. Ivanov Signed-off-by: Andy Gross --- MAINTAINERS | 2 ++ 1 file changed, 2 insertions(+) diff --git a/MAINTAINERS b/MAINTAINERS index 03e00c7c88eb..fcbd452ab62b 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1470,7 +1470,9 @@ F: arch/arm/boot/dts/qcom-*.dts F: arch/arm/boot/dts/qcom-*.dtsi F: arch/arm/mach-qcom/ F: arch/arm64/boot/dts/qcom/* +F: drivers/i2c/busses/i2c-qup.c F: drivers/soc/qcom/ +F: drivers/spi/spi-qup.c F: drivers/tty/serial/msm_serial.h F: drivers/tty/serial/msm_serial.c F: drivers/*/pm8???-* From 39a3366a31386eb58f6e5857cd49cebad3253ab8 Mon Sep 17 00:00:00 2001 From: Srinivas Kandagatla Date: Mon, 22 Feb 2016 11:55:26 +0000 Subject: [PATCH 0121/1649] MAINTAINERS: add qcom clocks to the maintainers list This patch adds qcom clock drivers to the QCOM/MSM support list so that get_maintainer.pl can pick up correct cc list. Signed-off-by: Srinivas Kandagatla Signed-off-by: Andy Gross --- MAINTAINERS | 1 + 1 file changed, 1 insertion(+) diff --git a/MAINTAINERS b/MAINTAINERS index fcbd452ab62b..ce1769341475 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1471,6 +1471,7 @@ F: arch/arm/boot/dts/qcom-*.dtsi F: arch/arm/mach-qcom/ F: arch/arm64/boot/dts/qcom/* F: drivers/i2c/busses/i2c-qup.c +F: drivers/clk/qcom/ F: drivers/soc/qcom/ F: drivers/spi/spi-qup.c F: drivers/tty/serial/msm_serial.h From e8b123e6008480b2b8d80dae060315d84b79f4bb Mon Sep 17 00:00:00 2001 From: Bjorn Andersson Date: Thu, 24 Dec 2015 00:28:38 -0800 Subject: [PATCH 0122/1649] soc: qcom: smem_state: Add stubs for disabled smem_state Signed-off-by: Bjorn Andersson Signed-off-by: Andy Gross --- include/linux/soc/qcom/smem_state.h | 35 +++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/include/linux/soc/qcom/smem_state.h b/include/linux/soc/qcom/smem_state.h index f35e1512fcaa..7b88697929e9 100644 --- a/include/linux/soc/qcom/smem_state.h +++ b/include/linux/soc/qcom/smem_state.h @@ -1,12 +1,17 @@ #ifndef __QCOM_SMEM_STATE__ #define __QCOM_SMEM_STATE__ +#include + +struct device_node; struct qcom_smem_state; struct qcom_smem_state_ops { int (*update_bits)(void *, u32, u32); }; +#ifdef CONFIG_QCOM_SMEM_STATE + struct qcom_smem_state *qcom_smem_state_get(struct device *dev, const char *con_id, unsigned *bit); void qcom_smem_state_put(struct qcom_smem_state *); @@ -15,4 +20,34 @@ int qcom_smem_state_update_bits(struct qcom_smem_state *state, u32 mask, u32 val struct qcom_smem_state *qcom_smem_state_register(struct device_node *of_node, const struct qcom_smem_state_ops *ops, void *data); void qcom_smem_state_unregister(struct qcom_smem_state *state); +#else + +static inline struct qcom_smem_state *qcom_smem_state_get(struct device *dev, + const char *con_id, unsigned *bit) +{ + return ERR_PTR(-EINVAL); +} + +static inline void qcom_smem_state_put(struct qcom_smem_state *state) +{ +} + +static inline int qcom_smem_state_update_bits(struct qcom_smem_state *state, + u32 mask, u32 value) +{ + return -EINVAL; +} + +static inline struct qcom_smem_state *qcom_smem_state_register(struct device_node *of_node, + const struct qcom_smem_state_ops *ops, void *data) +{ + return ERR_PTR(-EINVAL); +} + +static inline void qcom_smem_state_unregister(struct qcom_smem_state *state) +{ +} + +#endif + #endif From 3b904b046c7adfbadb124851c7a23276f7187ddb Mon Sep 17 00:00:00 2001 From: Lina Iyer Date: Mon, 4 Jan 2016 10:58:28 -0700 Subject: [PATCH 0123/1649] drivers: qcom: spm: avoid module usage in non-modular SPM driver SPM driver provides cpuidle support on some QC SoC's. The functionality is non-modular and there is no need for module support. Convert module platform init to builtin platform driver init. The driver functionality is not affected by this change. Cc: Paul Gortmaker Signed-off-by: Lina Iyer Acked-by: Daniel Lezcano Signed-off-by: Andy Gross --- drivers/soc/qcom/spm.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/drivers/soc/qcom/spm.c b/drivers/soc/qcom/spm.c index 5548a31e1a39..f324451e0940 100644 --- a/drivers/soc/qcom/spm.c +++ b/drivers/soc/qcom/spm.c @@ -2,6 +2,8 @@ * Copyright (c) 2011-2014, The Linux Foundation. All rights reserved. * Copyright (c) 2014,2015, Linaro Ltd. * + * SAW power controller driver + * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. @@ -12,7 +14,6 @@ * GNU General Public License for more details. */ -#include #include #include #include @@ -378,8 +379,5 @@ static struct platform_driver spm_driver = { .of_match_table = spm_match_table, }, }; -module_platform_driver(spm_driver); -MODULE_LICENSE("GPL v2"); -MODULE_DESCRIPTION("SAW power controller driver"); -MODULE_ALIAS("platform:saw"); +builtin_platform_driver(spm_driver); From 39f0db298e7c02a29371fb39cabdd5d76e6b726c Mon Sep 17 00:00:00 2001 From: Bjorn Andersson Date: Wed, 17 Feb 2016 22:39:02 -0800 Subject: [PATCH 0124/1649] soc: qcom: smd: Introduce callback setter Introduce a setter for the callback function pointer to clarify the locking around the operation and to reduce some duplication. Signed-off-by: Bjorn Andersson Signed-off-by: Bjorn Andersson Signed-off-by: Andy Gross --- drivers/soc/qcom/smd.c | 25 +++++++++++++++++-------- include/linux/soc/qcom/smd.h | 4 +++- 2 files changed, 20 insertions(+), 9 deletions(-) diff --git a/drivers/soc/qcom/smd.c b/drivers/soc/qcom/smd.c index 498fd0581a45..c357842b92e1 100644 --- a/drivers/soc/qcom/smd.c +++ b/drivers/soc/qcom/smd.c @@ -186,7 +186,7 @@ struct qcom_smd_channel { int fifo_size; void *bounce_buffer; - int (*cb)(struct qcom_smd_device *, const void *, size_t); + qcom_smd_cb_t cb; spinlock_t recv_lock; @@ -377,6 +377,19 @@ static void qcom_smd_channel_reset(struct qcom_smd_channel *channel) channel->pkt_size = 0; } +/* + * Set the callback for a channel, with appropriate locking + */ +static void qcom_smd_channel_set_callback(struct qcom_smd_channel *channel, + qcom_smd_cb_t cb) +{ + unsigned long flags; + + spin_lock_irqsave(&channel->recv_lock, flags); + channel->cb = cb; + spin_unlock_irqrestore(&channel->recv_lock, flags); +}; + /* * Calculate the amount of data available in the rx fifo */ @@ -814,8 +827,7 @@ static int qcom_smd_dev_probe(struct device *dev) if (!channel->bounce_buffer) return -ENOMEM; - channel->cb = qsdrv->callback; - + qcom_smd_channel_set_callback(channel, qsdrv->callback); qcom_smd_channel_set_state(channel, SMD_CHANNEL_OPENING); qcom_smd_channel_set_state(channel, SMD_CHANNEL_OPENED); @@ -831,7 +843,7 @@ static int qcom_smd_dev_probe(struct device *dev) err: dev_err(&qsdev->dev, "probe failed\n"); - channel->cb = NULL; + qcom_smd_channel_set_callback(channel, NULL); kfree(channel->bounce_buffer); channel->bounce_buffer = NULL; @@ -850,16 +862,13 @@ static int qcom_smd_dev_remove(struct device *dev) struct qcom_smd_device *qsdev = to_smd_device(dev); struct qcom_smd_driver *qsdrv = to_smd_driver(dev); struct qcom_smd_channel *channel = qsdev->channel; - unsigned long flags; qcom_smd_channel_set_state(channel, SMD_CHANNEL_CLOSING); /* * Make sure we don't race with the code receiving data. */ - spin_lock_irqsave(&channel->recv_lock, flags); - channel->cb = NULL; - spin_unlock_irqrestore(&channel->recv_lock, flags); + qcom_smd_channel_set_callback(channel, NULL); /* Wake up any sleepers in qcom_smd_send() */ wake_up_interruptible(&channel->fblockread_event); diff --git a/include/linux/soc/qcom/smd.h b/include/linux/soc/qcom/smd.h index d0cb6d189a0a..65a64fcdb1aa 100644 --- a/include/linux/soc/qcom/smd.h +++ b/include/linux/soc/qcom/smd.h @@ -26,6 +26,8 @@ struct qcom_smd_device { struct qcom_smd_channel *channel; }; +typedef int (*qcom_smd_cb_t)(struct qcom_smd_device *, const void *, size_t); + /** * struct qcom_smd_driver - smd driver struct * @driver: underlying device driver @@ -42,7 +44,7 @@ struct qcom_smd_driver { int (*probe)(struct qcom_smd_device *dev); void (*remove)(struct qcom_smd_device *dev); - int (*callback)(struct qcom_smd_device *, const void *, size_t); + qcom_smd_cb_t callback; }; int qcom_smd_driver_register(struct qcom_smd_driver *drv); From 995b170aeaef4afe0c3469d14b9c80ff2e8a98d7 Mon Sep 17 00:00:00 2001 From: Bjorn Andersson Date: Wed, 17 Feb 2016 22:39:03 -0800 Subject: [PATCH 0125/1649] soc: qcom: smd: Split discovery and state change work Split the two steps of channel discovery and state change handling into two different workers. This allows for new channels to be found while we're are probing, which is required as we introduce multi-channel support. Signed-off-by: Bjorn Andersson Signed-off-by: Bjorn Andersson Signed-off-by: Andy Gross --- drivers/soc/qcom/smd.c | 58 ++++++++++++++++++++++-------------------- 1 file changed, 31 insertions(+), 27 deletions(-) diff --git a/drivers/soc/qcom/smd.c b/drivers/soc/qcom/smd.c index c357842b92e1..e8972ddfee85 100644 --- a/drivers/soc/qcom/smd.c +++ b/drivers/soc/qcom/smd.c @@ -106,9 +106,9 @@ static const struct { * @channels: list of all channels detected on this edge * @channels_lock: guard for modifications of @channels * @allocated: array of bitmaps representing already allocated channels - * @need_rescan: flag that the @work needs to scan smem for new channels * @smem_available: last available amount of smem triggering a channel scan - * @work: work item for edge house keeping + * @scan_work: work item for discovering new channels + * @state_work: work item for edge state changes */ struct qcom_smd_edge { struct qcom_smd *smd; @@ -127,10 +127,10 @@ struct qcom_smd_edge { DECLARE_BITMAP(allocated[SMD_ALLOC_TBL_COUNT], SMD_ALLOC_TBL_SIZE); - bool need_rescan; unsigned smem_available; - struct work_struct work; + struct work_struct scan_work; + struct work_struct state_work; }; /* @@ -614,7 +614,8 @@ static irqreturn_t qcom_smd_edge_intr(int irq, void *data) struct qcom_smd_edge *edge = data; struct qcom_smd_channel *channel; unsigned available; - bool kick_worker = false; + bool kick_scanner = false; + bool kick_state = false; /* * Handle state changes or data on each of the channels on this edge @@ -622,7 +623,7 @@ static irqreturn_t qcom_smd_edge_intr(int irq, void *data) spin_lock(&edge->channels_lock); list_for_each_entry(channel, &edge->channels, list) { spin_lock(&channel->recv_lock); - kick_worker |= qcom_smd_channel_intr(channel); + kick_state |= qcom_smd_channel_intr(channel); spin_unlock(&channel->recv_lock); } spin_unlock(&edge->channels_lock); @@ -635,12 +636,13 @@ static irqreturn_t qcom_smd_edge_intr(int irq, void *data) available = qcom_smem_get_free_space(edge->remote_pid); if (available != edge->smem_available) { edge->smem_available = available; - edge->need_rescan = true; - kick_worker = true; + kick_scanner = true; } - if (kick_worker) - schedule_work(&edge->work); + if (kick_scanner) + schedule_work(&edge->scan_work); + if (kick_state) + schedule_work(&edge->state_work); return IRQ_HANDLED; } @@ -1098,8 +1100,9 @@ free_name_and_channel: * qcom_smd_create_channel() to create representations of these and add * them to the edge's list of channels. */ -static void qcom_discover_channels(struct qcom_smd_edge *edge) +static void qcom_channel_scan_worker(struct work_struct *work) { + struct qcom_smd_edge *edge = container_of(work, struct qcom_smd_edge, scan_work); struct qcom_smd_alloc_entry *alloc_tbl; struct qcom_smd_alloc_entry *entry; struct qcom_smd_channel *channel; @@ -1152,7 +1155,7 @@ static void qcom_discover_channels(struct qcom_smd_edge *edge) } } - schedule_work(&edge->work); + schedule_work(&edge->state_work); } /* @@ -1160,29 +1163,23 @@ static void qcom_discover_channels(struct qcom_smd_edge *edge) * then scans all registered channels for state changes that should be handled * by creating or destroying smd client devices for the registered channels. * - * LOCKING: edge->channels_lock is not needed to be held during the traversal - * of the channels list as it's done synchronously with the only writer. + * LOCKING: edge->channels_lock only needs to cover the list operations, as the + * worker is killed before any channels are deallocated */ static void qcom_channel_state_worker(struct work_struct *work) { struct qcom_smd_channel *channel; struct qcom_smd_edge *edge = container_of(work, struct qcom_smd_edge, - work); + state_work); unsigned remote_state; - - /* - * Rescan smem if we have reason to belive that there are new channels. - */ - if (edge->need_rescan) { - edge->need_rescan = false; - qcom_discover_channels(edge); - } + unsigned long flags; /* * Register a device for any closed channel where the remote processor * is showing interest in opening the channel. */ + spin_lock_irqsave(&edge->channels_lock, flags); list_for_each_entry(channel, &edge->channels, list) { if (channel->state != SMD_CHANNEL_CLOSED) continue; @@ -1192,7 +1189,9 @@ static void qcom_channel_state_worker(struct work_struct *work) remote_state != SMD_CHANNEL_OPENED) continue; + spin_unlock_irqrestore(&edge->channels_lock, flags); qcom_smd_create_device(channel); + spin_lock_irqsave(&edge->channels_lock, flags); } /* @@ -1209,8 +1208,11 @@ static void qcom_channel_state_worker(struct work_struct *work) remote_state == SMD_CHANNEL_OPENED) continue; + spin_unlock_irqrestore(&edge->channels_lock, flags); qcom_smd_destroy_device(channel); + spin_lock_irqsave(&edge->channels_lock, flags); } + spin_unlock_irqrestore(&edge->channels_lock, flags); } /* @@ -1228,7 +1230,8 @@ static int qcom_smd_parse_edge(struct device *dev, INIT_LIST_HEAD(&edge->channels); spin_lock_init(&edge->channels_lock); - INIT_WORK(&edge->work, qcom_channel_state_worker); + INIT_WORK(&edge->scan_work, qcom_channel_scan_worker); + INIT_WORK(&edge->state_work, qcom_channel_state_worker); edge->of_node = of_node_get(node); @@ -1317,8 +1320,7 @@ static int qcom_smd_probe(struct platform_device *pdev) if (ret) continue; - edge->need_rescan = true; - schedule_work(&edge->work); + schedule_work(&edge->scan_work); } platform_set_drvdata(pdev, smd); @@ -1341,8 +1343,10 @@ static int qcom_smd_remove(struct platform_device *pdev) edge = &smd->edges[i]; disable_irq(edge->irq); - cancel_work_sync(&edge->work); + cancel_work_sync(&edge->scan_work); + cancel_work_sync(&edge->state_work); + /* No need to lock here, because the writer is gone */ list_for_each_entry(channel, &edge->channels, list) { if (!channel->qsdev) continue; From 3fd3f2fd86478614fecbe261b201779b4fc6abd2 Mon Sep 17 00:00:00 2001 From: Bjorn Andersson Date: Wed, 17 Feb 2016 22:39:04 -0800 Subject: [PATCH 0126/1649] soc: qcom: smd: Refactor channel open and close handling Refactor opening and closing of channels into two separate functions instead of open coding this in the various places. Signed-off-by: Bjorn Andersson Signed-off-by: Bjorn Andersson Signed-off-by: Andy Gross --- drivers/soc/qcom/smd.c | 66 +++++++++++++++++++++++++++--------------- 1 file changed, 42 insertions(+), 24 deletions(-) diff --git a/drivers/soc/qcom/smd.c b/drivers/soc/qcom/smd.c index e8972ddfee85..d253e5cc233f 100644 --- a/drivers/soc/qcom/smd.c +++ b/drivers/soc/qcom/smd.c @@ -807,6 +807,43 @@ static int qcom_smd_dev_match(struct device *dev, struct device_driver *drv) return of_driver_match_device(dev, drv); } +/* + * Helper for opening a channel + */ +static int qcom_smd_channel_open(struct qcom_smd_channel *channel, + qcom_smd_cb_t cb) +{ + size_t bb_size; + + /* + * Packets are maximum 4k, but reduce if the fifo is smaller + */ + bb_size = min(channel->fifo_size, SZ_4K); + channel->bounce_buffer = kmalloc(bb_size, GFP_KERNEL); + if (!channel->bounce_buffer) + return -ENOMEM; + + qcom_smd_channel_set_callback(channel, cb); + qcom_smd_channel_set_state(channel, SMD_CHANNEL_OPENING); + qcom_smd_channel_set_state(channel, SMD_CHANNEL_OPENED); + + return 0; +} + +/* + * Helper for closing and resetting a channel + */ +static void qcom_smd_channel_close(struct qcom_smd_channel *channel) +{ + qcom_smd_channel_set_callback(channel, NULL); + + kfree(channel->bounce_buffer); + channel->bounce_buffer = NULL; + + qcom_smd_channel_set_state(channel, SMD_CHANNEL_CLOSED); + qcom_smd_channel_reset(channel); +} + /* * Probe the smd client. * @@ -818,21 +855,11 @@ static int qcom_smd_dev_probe(struct device *dev) struct qcom_smd_device *qsdev = to_smd_device(dev); struct qcom_smd_driver *qsdrv = to_smd_driver(dev); struct qcom_smd_channel *channel = qsdev->channel; - size_t bb_size; int ret; - /* - * Packets are maximum 4k, but reduce if the fifo is smaller - */ - bb_size = min(channel->fifo_size, SZ_4K); - channel->bounce_buffer = kmalloc(bb_size, GFP_KERNEL); - if (!channel->bounce_buffer) - return -ENOMEM; - - qcom_smd_channel_set_callback(channel, qsdrv->callback); - qcom_smd_channel_set_state(channel, SMD_CHANNEL_OPENING); - - qcom_smd_channel_set_state(channel, SMD_CHANNEL_OPENED); + ret = qcom_smd_channel_open(channel, qsdrv->callback); + if (ret) + return ret; ret = qsdrv->probe(qsdev); if (ret) @@ -845,11 +872,7 @@ static int qcom_smd_dev_probe(struct device *dev) err: dev_err(&qsdev->dev, "probe failed\n"); - qcom_smd_channel_set_callback(channel, NULL); - kfree(channel->bounce_buffer); - channel->bounce_buffer = NULL; - - qcom_smd_channel_set_state(channel, SMD_CHANNEL_CLOSED); + qcom_smd_channel_close(channel); return ret; } @@ -886,12 +909,7 @@ static int qcom_smd_dev_remove(struct device *dev) * The client is now gone, cleanup and reset the channel state. */ channel->qsdev = NULL; - kfree(channel->bounce_buffer); - channel->bounce_buffer = NULL; - - qcom_smd_channel_set_state(channel, SMD_CHANNEL_CLOSED); - - qcom_smd_channel_reset(channel); + qcom_smd_channel_close(channel); return 0; } From d5933855c0eb0a4103cf5db784cfdd4d7a85cd56 Mon Sep 17 00:00:00 2001 From: Bjorn Andersson Date: Wed, 17 Feb 2016 22:39:05 -0800 Subject: [PATCH 0127/1649] soc: qcom: smd: Support multiple channels per sdev This patch allows chaining additional channels to a SMD device, enabling implementation of multi-channel SMD devies - like Bluetooth. Signed-off-by: Bjorn Andersson Signed-off-by: Bjorn Andersson Signed-off-by: Andy Gross --- drivers/soc/qcom/smd.c | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/drivers/soc/qcom/smd.c b/drivers/soc/qcom/smd.c index d253e5cc233f..c3fa0fd724f7 100644 --- a/drivers/soc/qcom/smd.c +++ b/drivers/soc/qcom/smd.c @@ -193,6 +193,7 @@ struct qcom_smd_channel { int pkt_size; struct list_head list; + struct list_head dev_list; }; /** @@ -887,6 +888,8 @@ static int qcom_smd_dev_remove(struct device *dev) struct qcom_smd_device *qsdev = to_smd_device(dev); struct qcom_smd_driver *qsdrv = to_smd_driver(dev); struct qcom_smd_channel *channel = qsdev->channel; + struct qcom_smd_channel *tmp; + struct qcom_smd_channel *ch; qcom_smd_channel_set_state(channel, SMD_CHANNEL_CLOSING); @@ -906,10 +909,14 @@ static int qcom_smd_dev_remove(struct device *dev) qsdrv->remove(qsdev); /* - * The client is now gone, cleanup and reset the channel state. + * The client is now gone, close and release all channels associated + * with this sdev */ - channel->qsdev = NULL; - qcom_smd_channel_close(channel); + list_for_each_entry_safe(ch, tmp, &channel->dev_list, dev_list) { + qcom_smd_channel_close(ch); + list_del(&ch->dev_list); + ch->qsdev = NULL; + } return 0; } @@ -1056,6 +1063,7 @@ static struct qcom_smd_channel *qcom_smd_create_channel(struct qcom_smd_edge *ed if (!channel) return ERR_PTR(-ENOMEM); + INIT_LIST_HEAD(&channel->dev_list); channel->edge = edge; channel->name = devm_kstrdup(smd->dev, name, GFP_KERNEL); if (!channel->name) From 028021d29ea069390e1f60c6aa5b3511d218454b Mon Sep 17 00:00:00 2001 From: Bjorn Andersson Date: Wed, 17 Feb 2016 22:39:06 -0800 Subject: [PATCH 0128/1649] soc: qcom: smd: Support opening additional channels With the qcom_smd_open_channel() API we allow SMD devices to open additional SMD channels, to allow implementation of multi-channel SMD devices - like Bluetooth. Channels are opened from the same edge as the calling SMD device is tied to. Signed-off-by: Bjorn Andersson Signed-off-by: Bjorn Andersson Signed-off-by: Andy Gross --- drivers/soc/qcom/smd.c | 76 ++++++++++++++++++++++++++++++++++++ include/linux/soc/qcom/smd.h | 4 ++ 2 files changed, 80 insertions(+) diff --git a/drivers/soc/qcom/smd.c b/drivers/soc/qcom/smd.c index c3fa0fd724f7..b6434c4be86a 100644 --- a/drivers/soc/qcom/smd.c +++ b/drivers/soc/qcom/smd.c @@ -129,6 +129,8 @@ struct qcom_smd_edge { unsigned smem_available; + wait_queue_head_t new_channel_event; + struct work_struct scan_work; struct work_struct state_work; }; @@ -1042,6 +1044,77 @@ void qcom_smd_driver_unregister(struct qcom_smd_driver *qsdrv) } EXPORT_SYMBOL(qcom_smd_driver_unregister); +static struct qcom_smd_channel * +qcom_smd_find_channel(struct qcom_smd_edge *edge, const char *name) +{ + struct qcom_smd_channel *channel; + struct qcom_smd_channel *ret = NULL; + unsigned long flags; + unsigned state; + + spin_lock_irqsave(&edge->channels_lock, flags); + list_for_each_entry(channel, &edge->channels, list) { + if (strcmp(channel->name, name)) + continue; + + state = GET_RX_CHANNEL_INFO(channel, state); + if (state != SMD_CHANNEL_OPENING && + state != SMD_CHANNEL_OPENED) + continue; + + ret = channel; + break; + } + spin_unlock_irqrestore(&edge->channels_lock, flags); + + return ret; +} + +/** + * qcom_smd_open_channel() - claim additional channels on the same edge + * @sdev: smd_device handle + * @name: channel name + * @cb: callback method to use for incoming data + * + * Returns a channel handle on success, or -EPROBE_DEFER if the channel isn't + * ready. + */ +struct qcom_smd_channel *qcom_smd_open_channel(struct qcom_smd_device *sdev, + const char *name, + qcom_smd_cb_t cb) +{ + struct qcom_smd_channel *channel; + struct qcom_smd_edge *edge = sdev->channel->edge; + int ret; + + /* Wait up to HZ for the channel to appear */ + ret = wait_event_interruptible_timeout(edge->new_channel_event, + (channel = qcom_smd_find_channel(edge, name)) != NULL, + HZ); + if (!ret) + return ERR_PTR(-ETIMEDOUT); + + if (channel->state != SMD_CHANNEL_CLOSED) { + dev_err(&sdev->dev, "channel %s is busy\n", channel->name); + return ERR_PTR(-EBUSY); + } + + channel->qsdev = sdev; + ret = qcom_smd_channel_open(channel, cb); + if (ret) { + channel->qsdev = NULL; + return ERR_PTR(ret); + } + + /* + * Append the list of channel to the channels associated with the sdev + */ + list_add_tail(&channel->dev_list, &sdev->channel->dev_list); + + return channel; +} +EXPORT_SYMBOL(qcom_smd_open_channel); + /* * Allocate the qcom_smd_channel object for a newly found smd channel, * retrieving and validating the smem items involved. @@ -1178,6 +1251,8 @@ static void qcom_channel_scan_worker(struct work_struct *work) dev_dbg(smd->dev, "new channel found: '%s'\n", channel->name); set_bit(i, edge->allocated[tbl]); + + wake_up_interruptible(&edge->new_channel_event); } } @@ -1341,6 +1416,7 @@ static int qcom_smd_probe(struct platform_device *pdev) for_each_available_child_of_node(pdev->dev.of_node, node) { edge = &smd->edges[i++]; edge->smd = smd; + init_waitqueue_head(&edge->new_channel_event); ret = qcom_smd_parse_edge(&pdev->dev, node, edge); if (ret) diff --git a/include/linux/soc/qcom/smd.h b/include/linux/soc/qcom/smd.h index 65a64fcdb1aa..bd51c8a9d807 100644 --- a/include/linux/soc/qcom/smd.h +++ b/include/linux/soc/qcom/smd.h @@ -56,4 +56,8 @@ void qcom_smd_driver_unregister(struct qcom_smd_driver *drv); int qcom_smd_send(struct qcom_smd_channel *channel, const void *data, int len); +struct qcom_smd_channel *qcom_smd_open_channel(struct qcom_smd_device *sdev, + const char *name, + qcom_smd_cb_t cb); + #endif From 2349262397b89a421adfd142aad2a7dd33710f26 Mon Sep 17 00:00:00 2001 From: Yuchung Cheng Date: Wed, 30 Mar 2016 14:54:20 -0700 Subject: [PATCH 0129/1649] tcp: remove cwnd moderation after recovery For non-SACK connections, cwnd is lowered to inflight plus 3 packets when the recovery ends. This is an optional feature in the NewReno RFC 2582 to reduce the potential burst when cwnd is "re-opened" after recovery and inflight is low. This feature is questionably effective because of PRR: when the recovery ends (i.e., snd_una == high_seq) NewReno holds the CA_Recovery state for another round trip to prevent false fast retransmits. But if the inflight is low, PRR will overwrite the moderated cwnd in tcp_cwnd_reduction() later regardlessly. So if a receiver responds bogus ACKs (i.e., acking future data) to speed up transfer after recovery, it can only induce a burst up to a window worth of data packets by acking up to SND.NXT. A restart from (short) idle or receiving streched ACKs can both cause such bursts as well. On the other hand, if the recovery ends because the sender detects the losses were spurious (e.g., reordering). This feature unconditionally lowers a reverted cwnd even though nothing was lost. By principle loss recovery module should not update cwnd. Further pacing is much more effective to reduce burst. Hence this patch removes the cwnd moderation feature. v2 changes: revised commit message on bogus ACKs and burst, and missing signature Signed-off-by: Matt Mathis Signed-off-by: Neal Cardwell Signed-off-by: Soheil Hassas Yeganeh Signed-off-by: Yuchung Cheng Signed-off-by: David S. Miller --- include/net/tcp.h | 11 ----------- net/ipv4/tcp_input.c | 11 ----------- 2 files changed, 22 deletions(-) diff --git a/include/net/tcp.h b/include/net/tcp.h index b91370f61be6..f8bb4a4ed3d1 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -1039,17 +1039,6 @@ static inline __u32 tcp_max_tso_deferred_mss(const struct tcp_sock *tp) return 3; } -/* Slow start with delack produces 3 packets of burst, so that - * it is safe "de facto". This will be the default - same as - * the default reordering threshold - but if reordering increases, - * we must be able to allow cwnd to burst at least this much in order - * to not pull it back when holes are filled. - */ -static __inline__ __u32 tcp_max_burst(const struct tcp_sock *tp) -{ - return tp->reordering; -} - /* Returns end sequence number of the receiver's advertised window */ static inline u32 tcp_wnd_end(const struct tcp_sock *tp) { diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index e6e65f79ade8..f87b84a75691 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -2252,16 +2252,6 @@ static void tcp_update_scoreboard(struct sock *sk, int fast_rexmit) } } -/* CWND moderation, preventing bursts due to too big ACKs - * in dubious situations. - */ -static inline void tcp_moderate_cwnd(struct tcp_sock *tp) -{ - tp->snd_cwnd = min(tp->snd_cwnd, - tcp_packets_in_flight(tp) + tcp_max_burst(tp)); - tp->snd_cwnd_stamp = tcp_time_stamp; -} - static bool tcp_tsopt_ecr_before(const struct tcp_sock *tp, u32 when) { return tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && @@ -2410,7 +2400,6 @@ static bool tcp_try_undo_recovery(struct sock *sk) /* Hold old state until something *above* high_seq * is ACKed. For Reno it is MUST to prevent false * fast retransmits (RFC2582). SACK TCP is safe. */ - tcp_moderate_cwnd(tp); if (!tcp_any_retrans_done(sk)) tp->retrans_stamp = 0; return true; From 7822ce73e659ab0c5dd8289f077efbcc4cd03164 Mon Sep 17 00:00:00 2001 From: Haishuang Yan Date: Thu, 31 Mar 2016 18:21:38 +0800 Subject: [PATCH 0130/1649] netlink: use nla_get_in_addr and nla_put_in_addr for ipv4 address Since nla_get_in_addr and nla_put_in_addr were implemented, so use them appropriately. Signed-off-by: Haishuang Yan Signed-off-by: David S. Miller --- net/ipv4/ip_tunnel_core.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c index 6165f30c4d72..b3ab1205dfdf 100644 --- a/net/ipv4/ip_tunnel_core.c +++ b/net/ipv4/ip_tunnel_core.c @@ -247,10 +247,10 @@ static int ip_tun_build_state(struct net_device *dev, struct nlattr *attr, tun_info->key.tun_id = nla_get_be64(tb[LWTUNNEL_IP_ID]); if (tb[LWTUNNEL_IP_DST]) - tun_info->key.u.ipv4.dst = nla_get_be32(tb[LWTUNNEL_IP_DST]); + tun_info->key.u.ipv4.dst = nla_get_in_addr(tb[LWTUNNEL_IP_DST]); if (tb[LWTUNNEL_IP_SRC]) - tun_info->key.u.ipv4.src = nla_get_be32(tb[LWTUNNEL_IP_SRC]); + tun_info->key.u.ipv4.src = nla_get_in_addr(tb[LWTUNNEL_IP_SRC]); if (tb[LWTUNNEL_IP_TTL]) tun_info->key.ttl = nla_get_u8(tb[LWTUNNEL_IP_TTL]); @@ -275,8 +275,8 @@ static int ip_tun_fill_encap_info(struct sk_buff *skb, struct ip_tunnel_info *tun_info = lwt_tun_info(lwtstate); if (nla_put_be64(skb, LWTUNNEL_IP_ID, tun_info->key.tun_id) || - nla_put_be32(skb, LWTUNNEL_IP_DST, tun_info->key.u.ipv4.dst) || - nla_put_be32(skb, LWTUNNEL_IP_SRC, tun_info->key.u.ipv4.src) || + nla_put_in_addr(skb, LWTUNNEL_IP_DST, tun_info->key.u.ipv4.dst) || + nla_put_in_addr(skb, LWTUNNEL_IP_SRC, tun_info->key.u.ipv4.src) || nla_put_u8(skb, LWTUNNEL_IP_TOS, tun_info->key.tos) || nla_put_u8(skb, LWTUNNEL_IP_TTL, tun_info->key.ttl) || nla_put_be16(skb, LWTUNNEL_IP_FLAGS, tun_info->key.tun_flags)) From 5ada37b53ea2b310df143b2c7d6c48fbf14d5cb8 Mon Sep 17 00:00:00 2001 From: Lisheng Date: Thu, 31 Mar 2016 21:00:09 +0800 Subject: [PATCH 0131/1649] net: hns: add support of pause frame ctrl for HNS V2 The patch adds support of pause ctrl for HNS V2, and this feature is lost by HNS V1: 1) service ports can disable rx pause frame, 2) debug ports can open tx/rx pause frame. And this patch updates the REGs about the pause ctrl when updated status function called by upper layer routine. Signed-off-by: Lisheng Signed-off-by: Yisen Zhuang Reviewed-by: Andy Shevchenko Signed-off-by: David S. Miller --- .../net/ethernet/hisilicon/hns/hns_ae_adapt.c | 20 ++++- .../net/ethernet/hisilicon/hns/hns_dsaf_mac.c | 30 ++------ .../ethernet/hisilicon/hns/hns_dsaf_main.c | 75 ++++++++++++++++--- .../ethernet/hisilicon/hns/hns_dsaf_main.h | 5 ++ .../net/ethernet/hisilicon/hns/hns_dsaf_ppe.c | 6 +- .../net/ethernet/hisilicon/hns/hns_dsaf_reg.h | 6 ++ 6 files changed, 104 insertions(+), 38 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c index a1cb461ac45f..159142272afb 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c @@ -399,11 +399,16 @@ static void hns_ae_get_ring_bdnum_limit(struct hnae_queue *queue, static void hns_ae_get_pauseparam(struct hnae_handle *handle, u32 *auto_neg, u32 *rx_en, u32 *tx_en) { - assert(handle); + struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle); + struct dsaf_device *dsaf_dev = mac_cb->dsaf_dev; - hns_mac_get_autoneg(hns_get_mac_cb(handle), auto_neg); + hns_mac_get_autoneg(mac_cb, auto_neg); - hns_mac_get_pauseparam(hns_get_mac_cb(handle), rx_en, tx_en); + hns_mac_get_pauseparam(mac_cb, rx_en, tx_en); + + /* Service port's pause feature is provided by DSAF, not mac */ + if (handle->port_type == HNAE_PORT_SERVICE) + hns_dsaf_get_rx_mac_pause_en(dsaf_dev, mac_cb->mac_id, rx_en); } static int hns_ae_set_autoneg(struct hnae_handle *handle, u8 enable) @@ -436,12 +441,21 @@ static int hns_ae_set_pauseparam(struct hnae_handle *handle, u32 autoneg, u32 rx_en, u32 tx_en) { struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle); + struct dsaf_device *dsaf_dev = mac_cb->dsaf_dev; int ret; ret = hns_mac_set_autoneg(mac_cb, autoneg); if (ret) return ret; + /* Service port's pause feature is provided by DSAF, not mac */ + if (handle->port_type == HNAE_PORT_SERVICE) { + ret = hns_dsaf_set_rx_mac_pause_en(dsaf_dev, + mac_cb->mac_id, rx_en); + if (ret) + return ret; + rx_en = 0; + } return hns_mac_set_pauseparam(mac_cb, rx_en, tx_en); } diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c index a38084a22bf2..10c367d20955 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c @@ -439,9 +439,8 @@ int hns_mac_vm_config_bc_en(struct hns_mac_cb *mac_cb, u32 vmid, bool enable) void hns_mac_reset(struct hns_mac_cb *mac_cb) { - struct mac_driver *drv; - - drv = hns_mac_get_drv(mac_cb); + struct mac_driver *drv = hns_mac_get_drv(mac_cb); + bool is_ver1 = AE_IS_VER1(mac_cb->dsaf_dev->dsaf_ver); drv->mac_init(drv); @@ -456,7 +455,7 @@ void hns_mac_reset(struct hns_mac_cb *mac_cb) if (drv->mac_pausefrm_cfg) { if (mac_cb->mac_type == HNAE_PORT_DEBUG) - drv->mac_pausefrm_cfg(drv, 0, 0); + drv->mac_pausefrm_cfg(drv, !is_ver1, !is_ver1); else /* mac rx must disable, dsaf pfc close instead of it*/ drv->mac_pausefrm_cfg(drv, 0, 1); } @@ -561,14 +560,6 @@ void hns_mac_get_pauseparam(struct hns_mac_cb *mac_cb, u32 *rx_en, u32 *tx_en) *rx_en = 0; *tx_en = 0; } - - /* Due to the chip defect, the service mac's rx pause CAN'T be enabled. - * We set the rx pause frm always be true (1), because DSAF deals with - * the rx pause frm instead of service mac. After all, we still support - * rx pause frm. - */ - if (mac_cb->mac_type == HNAE_PORT_SERVICE) - *rx_en = 1; } /** @@ -602,20 +593,13 @@ int hns_mac_set_autoneg(struct hns_mac_cb *mac_cb, u8 enable) int hns_mac_set_pauseparam(struct hns_mac_cb *mac_cb, u32 rx_en, u32 tx_en) { struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb); + bool is_ver1 = AE_IS_VER1(mac_cb->dsaf_dev->dsaf_ver); - if (mac_cb->mac_type == HNAE_PORT_SERVICE) { - if (!rx_en) { - dev_err(mac_cb->dev, "disable rx_pause is not allowed!"); + if (mac_cb->mac_type == HNAE_PORT_DEBUG) { + if (is_ver1 && (tx_en || rx_en)) { + dev_err(mac_cb->dev, "macv1 cann't enable tx/rx_pause!"); return -EINVAL; } - } else if (mac_cb->mac_type == HNAE_PORT_DEBUG) { - if (tx_en || rx_en) { - dev_err(mac_cb->dev, "enable tx_pause or enable rx_pause are not allowed!"); - return -EINVAL; - } - } else { - dev_err(mac_cb->dev, "Unsupport this operation!"); - return -EINVAL; } if (mac_ctrl_drv->mac_pausefrm_cfg) diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c index 5978a5c8ef35..8439f6d8e360 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c @@ -1022,12 +1022,52 @@ static void hns_dsaf_tbl_tcam_init(struct dsaf_device *dsaf_dev) * @mac_cb: mac contrl block */ static void hns_dsaf_pfc_en_cfg(struct dsaf_device *dsaf_dev, - int mac_id, int en) + int mac_id, int tc_en) { - if (!en) - dsaf_write_dev(dsaf_dev, DSAF_PFC_EN_0_REG + mac_id * 4, 0); + dsaf_write_dev(dsaf_dev, DSAF_PFC_EN_0_REG + mac_id * 4, tc_en); +} + +static void hns_dsaf_set_pfc_pause(struct dsaf_device *dsaf_dev, + int mac_id, int tx_en, int rx_en) +{ + if (AE_IS_VER1(dsaf_dev->dsaf_ver)) { + if (!tx_en || !rx_en) + dev_err(dsaf_dev->dev, "dsaf v1 can not close pfc!\n"); + + return; + } + + dsaf_set_dev_bit(dsaf_dev, DSAF_PAUSE_CFG_REG + mac_id * 4, + DSAF_PFC_PAUSE_RX_EN_B, !!rx_en); + dsaf_set_dev_bit(dsaf_dev, DSAF_PAUSE_CFG_REG + mac_id * 4, + DSAF_PFC_PAUSE_TX_EN_B, !!tx_en); +} + +int hns_dsaf_set_rx_mac_pause_en(struct dsaf_device *dsaf_dev, int mac_id, + u32 en) +{ + if (AE_IS_VER1(dsaf_dev->dsaf_ver)) { + if (!en) + dev_err(dsaf_dev->dev, "dsafv1 can't close rx_pause!\n"); + + return -EINVAL; + } + + dsaf_set_dev_bit(dsaf_dev, DSAF_PAUSE_CFG_REG + mac_id * 4, + DSAF_MAC_PAUSE_RX_EN_B, !!en); + + return 0; +} + +void hns_dsaf_get_rx_mac_pause_en(struct dsaf_device *dsaf_dev, int mac_id, + u32 *en) +{ + if (AE_IS_VER1(dsaf_dev->dsaf_ver)) + *en = 1; else - dsaf_write_dev(dsaf_dev, DSAF_PFC_EN_0_REG + mac_id * 4, 0xff); + *en = dsaf_get_dev_bit(dsaf_dev, + DSAF_PAUSE_CFG_REG + mac_id * 4, + DSAF_MAC_PAUSE_RX_EN_B); } /** @@ -1039,6 +1079,7 @@ static void hns_dsaf_comm_init(struct dsaf_device *dsaf_dev) { u32 i; u32 o_dsaf_cfg; + bool is_ver1 = AE_IS_VER1(dsaf_dev->dsaf_ver); o_dsaf_cfg = dsaf_read_dev(dsaf_dev, DSAF_CFG_0_REG); dsaf_set_bit(o_dsaf_cfg, DSAF_CFG_EN_S, dsaf_dev->dsaf_en); @@ -1064,8 +1105,10 @@ static void hns_dsaf_comm_init(struct dsaf_device *dsaf_dev) hns_dsaf_sw_port_type_cfg(dsaf_dev, DSAF_SW_PORT_TYPE_NON_VLAN); /*set dsaf pfc to 0 for parseing rx pause*/ - for (i = 0; i < DSAF_COMM_CHN; i++) + for (i = 0; i < DSAF_COMM_CHN; i++) { hns_dsaf_pfc_en_cfg(dsaf_dev, i, 0); + hns_dsaf_set_pfc_pause(dsaf_dev, i, is_ver1, is_ver1); + } /*msk and clr exception irqs */ for (i = 0; i < DSAF_COMM_CHN; i++) { @@ -2013,6 +2056,8 @@ void hns_dsaf_update_stats(struct dsaf_device *dsaf_dev, u32 node_num) { struct dsaf_hw_stats *hw_stats = &dsaf_dev->hw_stats[node_num]; + bool is_ver1 = AE_IS_VER1(dsaf_dev->dsaf_ver); + u32 reg_tmp; hw_stats->pad_drop += dsaf_read_dev(dsaf_dev, DSAF_INODE_PAD_DISCARD_NUM_0_REG + 0x80 * (u64)node_num); @@ -2022,8 +2067,12 @@ void hns_dsaf_update_stats(struct dsaf_device *dsaf_dev, u32 node_num) DSAF_INODE_FINAL_IN_PKT_NUM_0_REG + 0x80 * (u64)node_num); hw_stats->rx_pkt_id += dsaf_read_dev(dsaf_dev, DSAF_INODE_SBM_PID_NUM_0_REG + 0x80 * (u64)node_num); - hw_stats->rx_pause_frame += dsaf_read_dev(dsaf_dev, - DSAF_INODE_FINAL_IN_PAUSE_NUM_0_REG + 0x80 * (u64)node_num); + + reg_tmp = is_ver1 ? DSAF_INODE_FINAL_IN_PAUSE_NUM_0_REG : + DSAFV2_INODE_FINAL_IN_PAUSE_NUM_0_REG; + hw_stats->rx_pause_frame += + dsaf_read_dev(dsaf_dev, reg_tmp + 0x80 * (u64)node_num); + hw_stats->release_buf_num += dsaf_read_dev(dsaf_dev, DSAF_INODE_SBM_RELS_NUM_0_REG + 0x80 * (u64)node_num); hw_stats->sbm_drop += dsaf_read_dev(dsaf_dev, @@ -2056,6 +2105,8 @@ void hns_dsaf_get_regs(struct dsaf_device *ddev, u32 port, void *data) u32 i = 0; u32 j; u32 *p = data; + u32 reg_tmp; + bool is_ver1 = AE_IS_VER1(ddev->dsaf_ver); /* dsaf common registers */ p[0] = dsaf_read_dev(ddev, DSAF_SRAM_INIT_OVER_0_REG); @@ -2120,8 +2171,9 @@ void hns_dsaf_get_regs(struct dsaf_device *ddev, u32 port, void *data) DSAF_INODE_FINAL_IN_PKT_NUM_0_REG + j * 0x80); p[190 + i] = dsaf_read_dev(ddev, DSAF_INODE_SBM_PID_NUM_0_REG + j * 0x80); - p[193 + i] = dsaf_read_dev(ddev, - DSAF_INODE_FINAL_IN_PAUSE_NUM_0_REG + j * 0x80); + reg_tmp = is_ver1 ? DSAF_INODE_FINAL_IN_PAUSE_NUM_0_REG : + DSAFV2_INODE_FINAL_IN_PAUSE_NUM_0_REG; + p[193 + i] = dsaf_read_dev(ddev, reg_tmp + j * 0x80); p[196 + i] = dsaf_read_dev(ddev, DSAF_INODE_SBM_RELS_NUM_0_REG + j * 0x80); p[199 + i] = dsaf_read_dev(ddev, @@ -2368,8 +2420,11 @@ void hns_dsaf_get_regs(struct dsaf_device *ddev, u32 port, void *data) p[496] = dsaf_read_dev(ddev, DSAF_NETPORT_CTRL_SIG_0_REG + port * 0x4); p[497] = dsaf_read_dev(ddev, DSAF_XGE_CTRL_SIG_CFG_0_REG + port * 0x4); + if (!is_ver1) + p[498] = dsaf_read_dev(ddev, DSAF_PAUSE_CFG_REG + port * 0x4); + /* mark end of dsaf regs */ - for (i = 498; i < 504; i++) + for (i = 499; i < 504; i++) p[i] = 0xdddddddd; } diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h index 5fea226efaf3..e8eedc571296 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h @@ -417,6 +417,11 @@ void hns_dsaf_get_strings(int stringset, u8 *data, int port); void hns_dsaf_get_regs(struct dsaf_device *ddev, u32 port, void *data); int hns_dsaf_get_regs_count(void); void hns_dsaf_set_promisc_mode(struct dsaf_device *dsaf_dev, u32 en); + +void hns_dsaf_get_rx_mac_pause_en(struct dsaf_device *dsaf_dev, int mac_id, + u32 *en); +int hns_dsaf_set_rx_mac_pause_en(struct dsaf_device *dsaf_dev, int mac_id, + u32 en); void hns_dsaf_set_inner_lb(struct dsaf_device *dsaf_dev, u32 mac_id, u32 en); #endif /* __HNS_DSAF_MAIN_H__ */ diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c index 5b7ae5ff43e8..ab27b3b14ca3 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c @@ -332,10 +332,12 @@ static void hns_ppe_init_hw(struct hns_ppe_cb *ppe_cb) /* clr and msk except irq*/ hns_ppe_exc_irq_en(ppe_cb, 0); - if (ppe_common_cb->ppe_mode == PPE_COMMON_MODE_DEBUG) + if (ppe_common_cb->ppe_mode == PPE_COMMON_MODE_DEBUG) { hns_ppe_set_port_mode(ppe_cb, PPE_MODE_GE); - else + dsaf_write_dev(ppe_cb, PPE_CFG_PAUSE_IDLE_CNT_REG, 0); + } else { hns_ppe_set_port_mode(ppe_cb, PPE_MODE_XGE); + } hns_ppe_checksum_hw(ppe_cb, 0xffffffff); hns_ppe_cnt_clr_ce(ppe_cb); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h index 7d7204f45e78..7ff195e60b02 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h @@ -137,6 +137,7 @@ #define DSAF_PPE_INT_STS_0_REG 0x1E0 #define DSAF_ROCEE_INT_STS_0_REG 0x200 #define DSAFV2_SERDES_LBK_0_REG 0x220 +#define DSAF_PAUSE_CFG_REG 0x240 #define DSAF_PPE_QID_CFG_0_REG 0x300 #define DSAF_SW_PORT_TYPE_0_REG 0x320 #define DSAF_STP_PORT_TYPE_0_REG 0x340 @@ -155,6 +156,7 @@ #define DSAF_INODE_FINAL_IN_PKT_NUM_0_REG 0x1030 #define DSAF_INODE_SBM_PID_NUM_0_REG 0x1038 #define DSAF_INODE_FINAL_IN_PAUSE_NUM_0_REG 0x103C +#define DSAFV2_INODE_FINAL_IN_PAUSE_NUM_0_REG 0x1024 #define DSAF_INODE_SBM_RELS_NUM_0_REG 0x104C #define DSAF_INODE_SBM_DROP_NUM_0_REG 0x1050 #define DSAF_INODE_CRC_FALSE_NUM_0_REG 0x1054 @@ -711,6 +713,10 @@ #define DSAF_PFC_UNINT_CNT_M ((1ULL << 9) - 1) #define DSAF_PFC_UNINT_CNT_S 0 +#define DSAF_MAC_PAUSE_RX_EN_B 2 +#define DSAF_PFC_PAUSE_RX_EN_B 1 +#define DSAF_PFC_PAUSE_TX_EN_B 0 + #define DSAF_PPE_QID_CFG_M 0xFF #define DSAF_PPE_QID_CFG_S 0 From f10a6a3541b4f79f6a4d9f0d4f8f16b92f8f1cfc Mon Sep 17 00:00:00 2001 From: Alexandre TORGUE Date: Fri, 1 Apr 2016 11:37:25 +0200 Subject: [PATCH 0132/1649] stmmac: rework get_hw_feature function On next GMAC IP generation (4.xx), the way to get hw feature is not the same than on previous 3.xx. As it is hardware dependent, the way to get hw capabilities should be defined in dma ops of each MAC IP. It will avoid also a huge computation of hw capabilities in stmmac_main. Signed-off-by: Alexandre TORGUE Signed-off-by: Giuseppe Cavallaro Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/common.h | 3 +- .../ethernet/stmicro/stmmac/dwmac1000_dma.c | 35 +++++++++++++- .../net/ethernet/stmicro/stmmac/stmmac_main.c | 46 ++----------------- 3 files changed, 40 insertions(+), 44 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index f96d257308b0..797a913ef618 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -412,7 +412,8 @@ struct stmmac_dma_ops { int (*dma_interrupt) (void __iomem *ioaddr, struct stmmac_extra_stats *x); /* If supported then get the optional core features */ - unsigned int (*get_hw_feature) (void __iomem *ioaddr); + void (*get_hw_feature)(void __iomem *ioaddr, + struct dma_features *dma_cap); /* Program the HW RX Watchdog */ void (*rx_watchdog) (void __iomem *ioaddr, u32 riwt); }; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c index da32d6037e3e..990746955216 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c @@ -215,9 +215,40 @@ static void dwmac1000_dump_dma_regs(void __iomem *ioaddr) } } -static unsigned int dwmac1000_get_hw_feature(void __iomem *ioaddr) +static void dwmac1000_get_hw_feature(void __iomem *ioaddr, + struct dma_features *dma_cap) { - return readl(ioaddr + DMA_HW_FEATURE); + u32 hw_cap = readl(ioaddr + DMA_HW_FEATURE); + + dma_cap->mbps_10_100 = (hw_cap & DMA_HW_FEAT_MIISEL); + dma_cap->mbps_1000 = (hw_cap & DMA_HW_FEAT_GMIISEL) >> 1; + dma_cap->half_duplex = (hw_cap & DMA_HW_FEAT_HDSEL) >> 2; + dma_cap->hash_filter = (hw_cap & DMA_HW_FEAT_HASHSEL) >> 4; + dma_cap->multi_addr = (hw_cap & DMA_HW_FEAT_ADDMAC) >> 5; + dma_cap->pcs = (hw_cap & DMA_HW_FEAT_PCSSEL) >> 6; + dma_cap->sma_mdio = (hw_cap & DMA_HW_FEAT_SMASEL) >> 8; + dma_cap->pmt_remote_wake_up = (hw_cap & DMA_HW_FEAT_RWKSEL) >> 9; + dma_cap->pmt_magic_frame = (hw_cap & DMA_HW_FEAT_MGKSEL) >> 10; + /* MMC */ + dma_cap->rmon = (hw_cap & DMA_HW_FEAT_MMCSEL) >> 11; + /* IEEE 1588-2002 */ + dma_cap->time_stamp = + (hw_cap & DMA_HW_FEAT_TSVER1SEL) >> 12; + /* IEEE 1588-2008 */ + dma_cap->atime_stamp = (hw_cap & DMA_HW_FEAT_TSVER2SEL) >> 13; + /* 802.3az - Energy-Efficient Ethernet (EEE) */ + dma_cap->eee = (hw_cap & DMA_HW_FEAT_EEESEL) >> 14; + dma_cap->av = (hw_cap & DMA_HW_FEAT_AVSEL) >> 15; + /* TX and RX csum */ + dma_cap->tx_coe = (hw_cap & DMA_HW_FEAT_TXCOESEL) >> 16; + dma_cap->rx_coe_type1 = (hw_cap & DMA_HW_FEAT_RXTYP1COE) >> 17; + dma_cap->rx_coe_type2 = (hw_cap & DMA_HW_FEAT_RXTYP2COE) >> 18; + dma_cap->rxfifo_over_2048 = (hw_cap & DMA_HW_FEAT_RXFIFOSIZE) >> 19; + /* TX and RX number of channels */ + dma_cap->number_rx_channel = (hw_cap & DMA_HW_FEAT_RXCHCNT) >> 20; + dma_cap->number_tx_channel = (hw_cap & DMA_HW_FEAT_TXCHCNT) >> 22; + /* Alternate (enhanced) DESC mode */ + dma_cap->enh_desc = (hw_cap & DMA_HW_FEAT_ENHDESSEL) >> 24; } static void dwmac1000_rx_watchdog(void __iomem *ioaddr, u32 riwt) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 78464fa7fe1f..b5db7513f36f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -1552,51 +1552,15 @@ static void stmmac_selec_desc_mode(struct stmmac_priv *priv) */ static int stmmac_get_hw_features(struct stmmac_priv *priv) { - u32 hw_cap = 0; + u32 ret = 0; if (priv->hw->dma->get_hw_feature) { - hw_cap = priv->hw->dma->get_hw_feature(priv->ioaddr); - - priv->dma_cap.mbps_10_100 = (hw_cap & DMA_HW_FEAT_MIISEL); - priv->dma_cap.mbps_1000 = (hw_cap & DMA_HW_FEAT_GMIISEL) >> 1; - priv->dma_cap.half_duplex = (hw_cap & DMA_HW_FEAT_HDSEL) >> 2; - priv->dma_cap.hash_filter = (hw_cap & DMA_HW_FEAT_HASHSEL) >> 4; - priv->dma_cap.multi_addr = (hw_cap & DMA_HW_FEAT_ADDMAC) >> 5; - priv->dma_cap.pcs = (hw_cap & DMA_HW_FEAT_PCSSEL) >> 6; - priv->dma_cap.sma_mdio = (hw_cap & DMA_HW_FEAT_SMASEL) >> 8; - priv->dma_cap.pmt_remote_wake_up = - (hw_cap & DMA_HW_FEAT_RWKSEL) >> 9; - priv->dma_cap.pmt_magic_frame = - (hw_cap & DMA_HW_FEAT_MGKSEL) >> 10; - /* MMC */ - priv->dma_cap.rmon = (hw_cap & DMA_HW_FEAT_MMCSEL) >> 11; - /* IEEE 1588-2002 */ - priv->dma_cap.time_stamp = - (hw_cap & DMA_HW_FEAT_TSVER1SEL) >> 12; - /* IEEE 1588-2008 */ - priv->dma_cap.atime_stamp = - (hw_cap & DMA_HW_FEAT_TSVER2SEL) >> 13; - /* 802.3az - Energy-Efficient Ethernet (EEE) */ - priv->dma_cap.eee = (hw_cap & DMA_HW_FEAT_EEESEL) >> 14; - priv->dma_cap.av = (hw_cap & DMA_HW_FEAT_AVSEL) >> 15; - /* TX and RX csum */ - priv->dma_cap.tx_coe = (hw_cap & DMA_HW_FEAT_TXCOESEL) >> 16; - priv->dma_cap.rx_coe_type1 = - (hw_cap & DMA_HW_FEAT_RXTYP1COE) >> 17; - priv->dma_cap.rx_coe_type2 = - (hw_cap & DMA_HW_FEAT_RXTYP2COE) >> 18; - priv->dma_cap.rxfifo_over_2048 = - (hw_cap & DMA_HW_FEAT_RXFIFOSIZE) >> 19; - /* TX and RX number of channels */ - priv->dma_cap.number_rx_channel = - (hw_cap & DMA_HW_FEAT_RXCHCNT) >> 20; - priv->dma_cap.number_tx_channel = - (hw_cap & DMA_HW_FEAT_TXCHCNT) >> 22; - /* Alternate (enhanced) DESC mode */ - priv->dma_cap.enh_desc = (hw_cap & DMA_HW_FEAT_ENHDESSEL) >> 24; + priv->hw->dma->get_hw_feature(priv->ioaddr, + &priv->dma_cap); + ret = 1; } - return hw_cap; + return ret; } /** From d0225e7de6229068df99ba8dacebc826d27e1cd5 Mon Sep 17 00:00:00 2001 From: Alexandre TORGUE Date: Fri, 1 Apr 2016 11:37:26 +0200 Subject: [PATCH 0133/1649] stmmac: rework the routines to show the ring status To avoid lot of check in stmmac_main for display ring management and support the GMAC4 chip, the display_ring function is moved into dedicated descriptor file. Signed-off-by: Alexandre TORGUE Signed-off-by: Giuseppe Cavallaro Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/common.h | 2 + .../net/ethernet/stmicro/stmmac/enh_desc.c | 21 ++++++ .../net/ethernet/stmicro/stmmac/norm_desc.c | 21 ++++++ .../net/ethernet/stmicro/stmmac/stmmac_main.c | 73 ++++++------------- 4 files changed, 67 insertions(+), 50 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index 797a913ef618..6cea61bc5de9 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -380,6 +380,8 @@ struct stmmac_desc_ops { u64(*get_timestamp) (void *desc, u32 ats); /* get rx timestamp status */ int (*get_rx_timestamp_status) (void *desc, u32 ats); + /* Display ring */ + void (*display_ring)(void *head, unsigned int size, bool rx); }; extern const struct stmmac_desc_ops enh_desc_ops; diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c index cfb018c7c5eb..38f19c99cf59 100644 --- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c +++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c @@ -411,6 +411,26 @@ static int enh_desc_get_rx_timestamp_status(void *desc, u32 ats) } } +static void enh_desc_display_ring(void *head, unsigned int size, bool rx) +{ + struct dma_extended_desc *ep = (struct dma_extended_desc *)head; + int i; + + pr_info("Extended %s descriptor ring:\n", rx ? "RX" : "TX"); + + for (i = 0; i < size; i++) { + u64 x; + + x = *(u64 *)ep; + pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n", + i, (unsigned int)virt_to_phys(ep), + (unsigned int)x, (unsigned int)(x >> 32), + ep->basic.des2, ep->basic.des3); + ep++; + } + pr_info("\n"); +} + const struct stmmac_desc_ops enh_desc_ops = { .tx_status = enh_desc_get_tx_status, .rx_status = enh_desc_get_rx_status, @@ -430,4 +450,5 @@ const struct stmmac_desc_ops enh_desc_ops = { .get_tx_timestamp_status = enh_desc_get_tx_timestamp_status, .get_timestamp = enh_desc_get_timestamp, .get_rx_timestamp_status = enh_desc_get_rx_timestamp_status, + .display_ring = enh_desc_display_ring, }; diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c index 011386f6f24d..2beacd0d3043 100644 --- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c +++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c @@ -279,6 +279,26 @@ static int ndesc_get_rx_timestamp_status(void *desc, u32 ats) return 1; } +static void ndesc_display_ring(void *head, unsigned int size, bool rx) +{ + struct dma_desc *p = (struct dma_desc *)head; + int i; + + pr_info("%s descriptor ring:\n", rx ? "RX" : "TX"); + + for (i = 0; i < size; i++) { + u64 x; + + x = *(u64 *)p; + pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x", + i, (unsigned int)virt_to_phys(p), + (unsigned int)x, (unsigned int)(x >> 32), + p->des2, p->des3); + p++; + } + pr_info("\n"); +} + const struct stmmac_desc_ops ndesc_ops = { .tx_status = ndesc_get_tx_status, .rx_status = ndesc_get_rx_status, @@ -297,4 +317,5 @@ const struct stmmac_desc_ops ndesc_ops = { .get_tx_timestamp_status = ndesc_get_tx_timestamp_status, .get_timestamp = ndesc_get_timestamp, .get_rx_timestamp_status = ndesc_get_rx_timestamp_status, + .display_ring = ndesc_display_ring, }; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index b5db7513f36f..0c9a2b9450d3 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -877,53 +877,22 @@ static int stmmac_init_phy(struct net_device *dev) return 0; } -/** - * stmmac_display_ring - display ring - * @head: pointer to the head of the ring passed. - * @size: size of the ring. - * @extend_desc: to verify if extended descriptors are used. - * Description: display the control/status and buffer descriptors. - */ -static void stmmac_display_ring(void *head, int size, int extend_desc) -{ - int i; - struct dma_extended_desc *ep = (struct dma_extended_desc *)head; - struct dma_desc *p = (struct dma_desc *)head; - - for (i = 0; i < size; i++) { - u64 x; - if (extend_desc) { - x = *(u64 *) ep; - pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n", - i, (unsigned int)virt_to_phys(ep), - (unsigned int)x, (unsigned int)(x >> 32), - ep->basic.des2, ep->basic.des3); - ep++; - } else { - x = *(u64 *) p; - pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x", - i, (unsigned int)virt_to_phys(p), - (unsigned int)x, (unsigned int)(x >> 32), - p->des2, p->des3); - p++; - } - pr_info("\n"); - } -} - static void stmmac_display_rings(struct stmmac_priv *priv) { + void *head_rx, *head_tx; + if (priv->extend_desc) { - pr_info("Extended RX descriptor ring:\n"); - stmmac_display_ring((void *)priv->dma_erx, DMA_RX_SIZE, 1); - pr_info("Extended TX descriptor ring:\n"); - stmmac_display_ring((void *)priv->dma_etx, DMA_TX_SIZE, 1); + head_rx = (void *)priv->dma_erx; + head_tx = (void *)priv->dma_etx; } else { - pr_info("RX descriptor ring:\n"); - stmmac_display_ring((void *)priv->dma_rx, DMA_RX_SIZE, 0); - pr_info("TX descriptor ring:\n"); - stmmac_display_ring((void *)priv->dma_tx, DMA_TX_SIZE, 0); + head_rx = (void *)priv->dma_rx; + head_tx = (void *)priv->dma_tx; } + + /* Display Rx ring */ + priv->hw->desc->display_ring(head_rx, DMA_RX_SIZE, true); + /* Display Tx ring */ + priv->hw->desc->display_ring(head_tx, DMA_TX_SIZE, false); } static int stmmac_set_bfsize(int mtu, int bufsize) @@ -1990,16 +1959,18 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) priv->cur_tx = entry; if (netif_msg_pktdata(priv)) { + void *tx_head; + pr_debug("%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d", __func__, priv->cur_tx, priv->dirty_tx, first_entry, entry, first, nfrags); if (priv->extend_desc) - stmmac_display_ring((void *)priv->dma_etx, - DMA_TX_SIZE, 1); + tx_head = (void *)priv->dma_etx; else - stmmac_display_ring((void *)priv->dma_tx, - DMA_TX_SIZE, 0); + tx_head = (void *)priv->dma_tx; + + priv->hw->desc->display_ring(tx_head, DMA_TX_SIZE, false); pr_debug(">>> frame to be transmitted: "); print_pkt(skb->data, skb->len); @@ -2184,13 +2155,15 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit) int coe = priv->hw->rx_csum; if (netif_msg_rx_status(priv)) { + void *rx_head; + pr_debug("%s: descriptor ring:\n", __func__); if (priv->extend_desc) - stmmac_display_ring((void *)priv->dma_erx, - DMA_RX_SIZE, 1); + rx_head = (void *)priv->dma_erx; else - stmmac_display_ring((void *)priv->dma_rx, - DMA_RX_SIZE, 0); + rx_head = (void *)priv->dma_rx; + + priv->hw->desc->display_ring(rx_head, DMA_RX_SIZE, true); } while (count < limit) { int status; From c623d149b18cbdb7e9f782ced0c859b1836ef3cd Mon Sep 17 00:00:00 2001 From: Alexandre TORGUE Date: Fri, 1 Apr 2016 11:37:27 +0200 Subject: [PATCH 0134/1649] stmmac: rework synopsys id read, moved to dwmac setup synopsys_uid is only used once after setup, to get synopsys_id by using shitf/mask operation. It's no longer used then. So, remove this temporary variable and directly compute synopsys_id from setup routine. Acked-by: Giuseppe Cavallaro Signed-off-by: Fabrice Gasnier Signed-off-by: Alexandre TORGUE Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/common.h | 27 ++++++++++++++-- .../ethernet/stmicro/stmmac/dwmac1000_core.c | 7 +++-- .../ethernet/stmicro/stmmac/dwmac100_core.c | 5 +-- .../net/ethernet/stmicro/stmmac/stmmac_main.c | 31 ++----------------- 4 files changed, 35 insertions(+), 35 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index 6cea61bc5de9..66e132f84aa8 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -498,7 +498,6 @@ struct mac_device_info { const struct stmmac_hwtimestamp *ptp; struct mii_regs mii; /* MII register Addresses */ struct mac_link link; - unsigned int synopsys_uid; void __iomem *pcsr; /* vpointer to device CSRs */ int multicast_filter_bins; int unicast_filter_entries; @@ -507,8 +506,10 @@ struct mac_device_info { }; struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr, int mcbins, - int perfect_uc_entries); -struct mac_device_info *dwmac100_setup(void __iomem *ioaddr); + int perfect_uc_entries, + int *synopsys_id); +struct mac_device_info *dwmac100_setup(void __iomem *ioaddr, int *synopsys_id); + void stmmac_set_mac_addr(void __iomem *ioaddr, u8 addr[6], unsigned int high, unsigned int low); @@ -521,4 +522,24 @@ void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr); extern const struct stmmac_mode_ops ring_mode_ops; extern const struct stmmac_mode_ops chain_mode_ops; +/** + * stmmac_get_synopsys_id - return the SYINID. + * @priv: driver private structure + * Description: this simple function is to decode and return the SYINID + * starting from the HW core register. + */ +static inline u32 stmmac_get_synopsys_id(u32 hwid) +{ + /* Check Synopsys Id (not available on old chips) */ + if (likely(hwid)) { + u32 uid = ((hwid & 0x0000ff00) >> 8); + u32 synid = (hwid & 0x000000ff); + + pr_info("stmmac - user ID: 0x%x, Synopsys ID: 0x%x\n", + uid, synid); + + return synid; + } + return 0; +} #endif /* __COMMON_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c index c2941172f6d1..fb1eb578e34e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c @@ -491,7 +491,8 @@ static const struct stmmac_ops dwmac1000_ops = { }; struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr, int mcbins, - int perfect_uc_entries) + int perfect_uc_entries, + int *synopsys_id) { struct mac_device_info *mac; u32 hwid = readl(ioaddr + GMAC_VERSION); @@ -516,7 +517,9 @@ struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr, int mcbins, mac->link.speed = GMAC_CONTROL_FES; mac->mii.addr = GMAC_MII_ADDR; mac->mii.data = GMAC_MII_DATA; - mac->synopsys_uid = hwid; + + /* Get and dump the chip ID */ + *synopsys_id = stmmac_get_synopsys_id(hwid); return mac; } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c index f8dd773f246c..6418b2e07619 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c @@ -173,7 +173,7 @@ static const struct stmmac_ops dwmac100_ops = { .get_umac_addr = dwmac100_get_umac_addr, }; -struct mac_device_info *dwmac100_setup(void __iomem *ioaddr) +struct mac_device_info *dwmac100_setup(void __iomem *ioaddr, int *synopsys_id) { struct mac_device_info *mac; @@ -192,7 +192,8 @@ struct mac_device_info *dwmac100_setup(void __iomem *ioaddr) mac->link.speed = 0; mac->mii.addr = MAC_MII_ADDR; mac->mii.data = MAC_MII_DATA; - mac->synopsys_uid = 0; + /* Synopsys Id is not available on old chips */ + *synopsys_id = 0; return mac; } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 0c9a2b9450d3..1186ac902bec 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -1461,29 +1461,6 @@ static void stmmac_mmc_setup(struct stmmac_priv *priv) pr_info(" No MAC Management Counters available\n"); } -/** - * stmmac_get_synopsys_id - return the SYINID. - * @priv: driver private structure - * Description: this simple function is to decode and return the SYINID - * starting from the HW core register. - */ -static u32 stmmac_get_synopsys_id(struct stmmac_priv *priv) -{ - u32 hwid = priv->hw->synopsys_uid; - - /* Check Synopsys Id (not available on old chips) */ - if (likely(hwid)) { - u32 uid = ((hwid & 0x0000ff00) >> 8); - u32 synid = (hwid & 0x000000ff); - - pr_info("stmmac - user ID: 0x%x, Synopsys ID: 0x%x\n", - uid, synid); - - return synid; - } - return 0; -} - /** * stmmac_selec_desc_mode - to select among: normal/alternate/extend descriptors * @priv: driver private structure @@ -2757,18 +2734,16 @@ static int stmmac_hw_init(struct stmmac_priv *priv) priv->dev->priv_flags |= IFF_UNICAST_FLT; mac = dwmac1000_setup(priv->ioaddr, priv->plat->multicast_filter_bins, - priv->plat->unicast_filter_entries); + priv->plat->unicast_filter_entries, + &priv->synopsys_id); } else { - mac = dwmac100_setup(priv->ioaddr); + mac = dwmac100_setup(priv->ioaddr, &priv->synopsys_id); } if (!mac) return -ENOMEM; priv->hw = mac; - /* Get and dump the chip ID */ - priv->synopsys_id = stmmac_get_synopsys_id(priv); - /* To use the chained or ring mode */ if (chain_mode) { priv->hw->mode = &chain_mode_ops; From 753a71090f3325b4c34622daccbb71ed574cca73 Mon Sep 17 00:00:00 2001 From: Alexandre TORGUE Date: Fri, 1 Apr 2016 11:37:28 +0200 Subject: [PATCH 0135/1649] stmmac: add descriptors function for GMAC 4.xx One of main changes of GMAC 4.xx IP is descriptors management. -descriptors are only used in ring mode. -A descriptor is composed of 4 32bits registers (no more extended descriptors) -descriptor mechanism (Tx for example, but it is exactly the same for RX): -useful registers: -DMA_CH#_TxDesc_Ring_Len: length of transmit descriptor ring -DMA_CH#_TxDesc_List_Address: start address of the ring -DMA_CH#_TxDesc_Tail_Pointer: address of the last descriptor to send + 1. -DMA_CH#_TxDesc_Current_App_TxDesc: address of the current descriptor -The descriptor Tail Pointer register contains the pointer to the descriptor address (N). The base address and the current descriptor decide the address of the current descriptor that the DMA can process. The descriptors up to one location less than the one indicated by the descriptor tail pointer (N-1) are owned by the DMA. The DMA continues to process the descriptors until the following condition occurs: "current descriptor pointer == Descriptor Tail pointer" Then the DMA goes into suspend mode. The application must perform a write to descriptor tail pointer register and update the tail pointer to have the following condition and to start a new transfer: "current descriptor pointer < Descriptor tail pointer" The DMA automatically wraps around the base address when the end of ring is reached. -New features are available on IP: -TSO (TCP Segmentation Offload) for TX only -Split header: to have header and payload in 2 different buffers Signed-off-by: Alexandre TORGUE Signed-off-by: Giuseppe Cavallaro Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/Makefile | 3 +- drivers/net/ethernet/stmicro/stmmac/common.h | 7 + .../ethernet/stmicro/stmmac/dwmac4_descs.c | 396 ++++++++++++++++++ .../ethernet/stmicro/stmmac/dwmac4_descs.h | 129 ++++++ 4 files changed, 534 insertions(+), 1 deletion(-) create mode 100644 drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c create mode 100644 drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile index b3901616f4f6..fa000fd36efc 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Makefile +++ b/drivers/net/ethernet/stmicro/stmmac/Makefile @@ -2,7 +2,8 @@ obj-$(CONFIG_STMMAC_ETH) += stmmac.o stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o \ chain_mode.o dwmac_lib.o dwmac1000_core.o dwmac1000_dma.o \ dwmac100_core.o dwmac100_dma.o enh_desc.o norm_desc.o \ - mmc_core.o stmmac_hwtstamp.o stmmac_ptp.o $(stmmac-y) + mmc_core.o stmmac_hwtstamp.o stmmac_ptp.o dwmac4_descs.o \ + $(stmmac-y) # Ordering matters. Generic driver must be last. obj-$(CONFIG_STMMAC_PLATFORM) += stmmac-platform.o diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index 66e132f84aa8..ea7eb0d5ce98 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -243,6 +243,7 @@ enum rx_frame_status { csum_none = 0x2, llc_snap = 0x4, dma_own = 0x8, + rx_not_ls = 0x10, }; /* Tx status */ @@ -348,6 +349,10 @@ struct stmmac_desc_ops { void (*prepare_tx_desc) (struct dma_desc *p, int is_fs, int len, bool csum_flag, int mode, bool tx_own, bool ls); + void (*prepare_tso_tx_desc)(struct dma_desc *p, int is_fs, int len1, + int len2, bool tx_own, bool ls, + unsigned int tcphdrlen, + unsigned int tcppayloadlen); /* Set/get the owner of the descriptor */ void (*set_tx_owner) (struct dma_desc *p); int (*get_tx_owner) (struct dma_desc *p); @@ -382,6 +387,8 @@ struct stmmac_desc_ops { int (*get_rx_timestamp_status) (void *desc, u32 ats); /* Display ring */ void (*display_ring)(void *head, unsigned int size, bool rx); + /* set MSS via context descriptor */ + void (*set_mss)(struct dma_desc *p, unsigned int mss); }; extern const struct stmmac_desc_ops enh_desc_ops; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c new file mode 100644 index 000000000000..d4952c7a836d --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c @@ -0,0 +1,396 @@ +/* + * This contains the functions to handle the descriptors for DesignWare databook + * 4.xx. + * + * Copyright (C) 2015 STMicroelectronics Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * Author: Alexandre Torgue + */ + +#include +#include "common.h" +#include "dwmac4_descs.h" + +static int dwmac4_wrback_get_tx_status(void *data, struct stmmac_extra_stats *x, + struct dma_desc *p, + void __iomem *ioaddr) +{ + struct net_device_stats *stats = (struct net_device_stats *)data; + unsigned int tdes3; + int ret = tx_done; + + tdes3 = p->des3; + + /* Get tx owner first */ + if (unlikely(tdes3 & TDES3_OWN)) + return tx_dma_own; + + /* Verify tx error by looking at the last segment. */ + if (likely(!(tdes3 & TDES3_LAST_DESCRIPTOR))) + return tx_not_ls; + + if (unlikely(tdes3 & TDES3_ERROR_SUMMARY)) { + if (unlikely(tdes3 & TDES3_JABBER_TIMEOUT)) + x->tx_jabber++; + if (unlikely(tdes3 & TDES3_PACKET_FLUSHED)) + x->tx_frame_flushed++; + if (unlikely(tdes3 & TDES3_LOSS_CARRIER)) { + x->tx_losscarrier++; + stats->tx_carrier_errors++; + } + if (unlikely(tdes3 & TDES3_NO_CARRIER)) { + x->tx_carrier++; + stats->tx_carrier_errors++; + } + if (unlikely((tdes3 & TDES3_LATE_COLLISION) || + (tdes3 & TDES3_EXCESSIVE_COLLISION))) + stats->collisions += + (tdes3 & TDES3_COLLISION_COUNT_MASK) + >> TDES3_COLLISION_COUNT_SHIFT; + + if (unlikely(tdes3 & TDES3_EXCESSIVE_DEFERRAL)) + x->tx_deferred++; + + if (unlikely(tdes3 & TDES3_UNDERFLOW_ERROR)) + x->tx_underflow++; + + if (unlikely(tdes3 & TDES3_IP_HDR_ERROR)) + x->tx_ip_header_error++; + + if (unlikely(tdes3 & TDES3_PAYLOAD_ERROR)) + x->tx_payload_error++; + + ret = tx_err; + } + + if (unlikely(tdes3 & TDES3_DEFERRED)) + x->tx_deferred++; + + return ret; +} + +static int dwmac4_wrback_get_rx_status(void *data, struct stmmac_extra_stats *x, + struct dma_desc *p) +{ + struct net_device_stats *stats = (struct net_device_stats *)data; + unsigned int rdes1 = p->des1; + unsigned int rdes2 = p->des2; + unsigned int rdes3 = p->des3; + int message_type; + int ret = good_frame; + + if (unlikely(rdes3 & RDES3_OWN)) + return dma_own; + + /* Verify rx error by looking at the last segment. */ + if (likely(!(rdes3 & RDES3_LAST_DESCRIPTOR))) + return discard_frame; + + if (unlikely(rdes3 & RDES3_ERROR_SUMMARY)) { + if (unlikely(rdes3 & RDES3_GIANT_PACKET)) + stats->rx_length_errors++; + if (unlikely(rdes3 & RDES3_OVERFLOW_ERROR)) + x->rx_gmac_overflow++; + + if (unlikely(rdes3 & RDES3_RECEIVE_WATCHDOG)) + x->rx_watchdog++; + + if (unlikely(rdes3 & RDES3_RECEIVE_ERROR)) + x->rx_mii++; + + if (unlikely(rdes3 & RDES3_CRC_ERROR)) { + x->rx_crc++; + stats->rx_crc_errors++; + } + + if (unlikely(rdes3 & RDES3_DRIBBLE_ERROR)) + x->dribbling_bit++; + + ret = discard_frame; + } + + message_type = (rdes1 & ERDES4_MSG_TYPE_MASK) >> 8; + + if (rdes1 & RDES1_IP_HDR_ERROR) + x->ip_hdr_err++; + if (rdes1 & RDES1_IP_CSUM_BYPASSED) + x->ip_csum_bypassed++; + if (rdes1 & RDES1_IPV4_HEADER) + x->ipv4_pkt_rcvd++; + if (rdes1 & RDES1_IPV6_HEADER) + x->ipv6_pkt_rcvd++; + if (message_type == RDES_EXT_SYNC) + x->rx_msg_type_sync++; + else if (message_type == RDES_EXT_FOLLOW_UP) + x->rx_msg_type_follow_up++; + else if (message_type == RDES_EXT_DELAY_REQ) + x->rx_msg_type_delay_req++; + else if (message_type == RDES_EXT_DELAY_RESP) + x->rx_msg_type_delay_resp++; + else if (message_type == RDES_EXT_PDELAY_REQ) + x->rx_msg_type_pdelay_req++; + else if (message_type == RDES_EXT_PDELAY_RESP) + x->rx_msg_type_pdelay_resp++; + else if (message_type == RDES_EXT_PDELAY_FOLLOW_UP) + x->rx_msg_type_pdelay_follow_up++; + else + x->rx_msg_type_ext_no_ptp++; + + if (rdes1 & RDES1_PTP_PACKET_TYPE) + x->ptp_frame_type++; + if (rdes1 & RDES1_PTP_VER) + x->ptp_ver++; + if (rdes1 & RDES1_TIMESTAMP_DROPPED) + x->timestamp_dropped++; + + if (unlikely(rdes2 & RDES2_SA_FILTER_FAIL)) { + x->sa_rx_filter_fail++; + ret = discard_frame; + } + if (unlikely(rdes2 & RDES2_DA_FILTER_FAIL)) { + x->da_rx_filter_fail++; + ret = discard_frame; + } + + if (rdes2 & RDES2_L3_FILTER_MATCH) + x->l3_filter_match++; + if (rdes2 & RDES2_L4_FILTER_MATCH) + x->l4_filter_match++; + if ((rdes2 & RDES2_L3_L4_FILT_NB_MATCH_MASK) + >> RDES2_L3_L4_FILT_NB_MATCH_SHIFT) + x->l3_l4_filter_no_match++; + + return ret; +} + +static int dwmac4_rd_get_tx_len(struct dma_desc *p) +{ + return (p->des2 & TDES2_BUFFER1_SIZE_MASK); +} + +static int dwmac4_get_tx_owner(struct dma_desc *p) +{ + return (p->des3 & TDES3_OWN) >> TDES3_OWN_SHIFT; +} + +static void dwmac4_set_tx_owner(struct dma_desc *p) +{ + p->des3 |= TDES3_OWN; +} + +static void dwmac4_set_rx_owner(struct dma_desc *p) +{ + p->des3 |= RDES3_OWN; +} + +static int dwmac4_get_tx_ls(struct dma_desc *p) +{ + return (p->des3 & TDES3_LAST_DESCRIPTOR) >> TDES3_LAST_DESCRIPTOR_SHIFT; +} + +static int dwmac4_wrback_get_rx_frame_len(struct dma_desc *p, int rx_coe) +{ + return (p->des3 & RDES3_PACKET_SIZE_MASK); +} + +static void dwmac4_rd_enable_tx_timestamp(struct dma_desc *p) +{ + p->des2 |= TDES2_TIMESTAMP_ENABLE; +} + +static int dwmac4_wrback_get_tx_timestamp_status(struct dma_desc *p) +{ + return (p->des3 & TDES3_TIMESTAMP_STATUS) + >> TDES3_TIMESTAMP_STATUS_SHIFT; +} + +/* NOTE: For RX CTX bit has to be checked before + * HAVE a specific function for TX and another one for RX + */ +static u64 dwmac4_wrback_get_timestamp(void *desc, u32 ats) +{ + struct dma_desc *p = (struct dma_desc *)desc; + u64 ns; + + ns = p->des0; + /* convert high/sec time stamp value to nanosecond */ + ns += p->des1 * 1000000000ULL; + + return ns; +} + +static int dwmac4_context_get_rx_timestamp_status(void *desc, u32 ats) +{ + struct dma_desc *p = (struct dma_desc *)desc; + + return (p->des1 & RDES1_TIMESTAMP_AVAILABLE) + >> RDES1_TIMESTAMP_AVAILABLE_SHIFT; +} + +static void dwmac4_rd_init_rx_desc(struct dma_desc *p, int disable_rx_ic, + int mode, int end) +{ + p->des3 = RDES3_OWN | RDES3_BUFFER1_VALID_ADDR; + + if (!disable_rx_ic) + p->des3 |= RDES3_INT_ON_COMPLETION_EN; +} + +static void dwmac4_rd_init_tx_desc(struct dma_desc *p, int mode, int end) +{ + p->des0 = 0; + p->des1 = 0; + p->des2 = 0; + p->des3 = 0; +} + +static void dwmac4_rd_prepare_tx_desc(struct dma_desc *p, int is_fs, int len, + bool csum_flag, int mode, bool tx_own, + bool ls) +{ + unsigned int tdes3 = p->des3; + + if (unlikely(len > BUF_SIZE_16KiB)) { + p->des2 |= (((len - BUF_SIZE_16KiB) << + TDES2_BUFFER2_SIZE_MASK_SHIFT) + & TDES2_BUFFER2_SIZE_MASK) + | (BUF_SIZE_16KiB & TDES2_BUFFER1_SIZE_MASK); + } else { + p->des2 |= (len & TDES2_BUFFER1_SIZE_MASK); + } + + if (is_fs) + tdes3 |= TDES3_FIRST_DESCRIPTOR; + else + tdes3 &= ~TDES3_FIRST_DESCRIPTOR; + + if (likely(csum_flag)) + tdes3 |= (TX_CIC_FULL << TDES3_CHECKSUM_INSERTION_SHIFT); + else + tdes3 &= ~(TX_CIC_FULL << TDES3_CHECKSUM_INSERTION_SHIFT); + + if (ls) + tdes3 |= TDES3_LAST_DESCRIPTOR; + else + tdes3 &= ~TDES3_LAST_DESCRIPTOR; + + /* Finally set the OWN bit. Later the DMA will start! */ + if (tx_own) + tdes3 |= TDES3_OWN; + + if (is_fs & tx_own) + /* When the own bit, for the first frame, has to be set, all + * descriptors for the same frame has to be set before, to + * avoid race condition. + */ + wmb(); + + p->des3 = tdes3; +} + +static void dwmac4_rd_prepare_tso_tx_desc(struct dma_desc *p, int is_fs, + int len1, int len2, bool tx_own, + bool ls, unsigned int tcphdrlen, + unsigned int tcppayloadlen) +{ + unsigned int tdes3 = p->des3; + + if (len1) + p->des2 |= (len1 & TDES2_BUFFER1_SIZE_MASK); + + if (len2) + p->des2 |= (len2 << TDES2_BUFFER2_SIZE_MASK_SHIFT) + & TDES2_BUFFER2_SIZE_MASK; + + if (is_fs) { + tdes3 |= TDES3_FIRST_DESCRIPTOR | + TDES3_TCP_SEGMENTATION_ENABLE | + ((tcphdrlen << TDES3_HDR_LEN_SHIFT) & + TDES3_SLOT_NUMBER_MASK) | + ((tcppayloadlen & TDES3_TCP_PKT_PAYLOAD_MASK)); + } else { + tdes3 &= ~TDES3_FIRST_DESCRIPTOR; + } + + if (ls) + tdes3 |= TDES3_LAST_DESCRIPTOR; + else + tdes3 &= ~TDES3_LAST_DESCRIPTOR; + + /* Finally set the OWN bit. Later the DMA will start! */ + if (tx_own) + tdes3 |= TDES3_OWN; + + if (is_fs & tx_own) + /* When the own bit, for the first frame, has to be set, all + * descriptors for the same frame has to be set before, to + * avoid race condition. + */ + wmb(); + + p->des3 = tdes3; +} + +static void dwmac4_release_tx_desc(struct dma_desc *p, int mode) +{ + p->des2 = 0; + p->des3 = 0; +} + +static void dwmac4_rd_set_tx_ic(struct dma_desc *p) +{ + p->des2 |= TDES2_INTERRUPT_ON_COMPLETION; +} + +static void dwmac4_display_ring(void *head, unsigned int size, bool rx) +{ + struct dma_desc *p = (struct dma_desc *)head; + int i; + + pr_info("%s descriptor ring:\n", rx ? "RX" : "TX"); + + for (i = 0; i < size; i++) { + if (p->des0) + pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n", + i, (unsigned int)virt_to_phys(p), + p->des0, p->des1, p->des2, p->des3); + p++; + } +} + +static void dwmac4_set_mss_ctxt(struct dma_desc *p, unsigned int mss) +{ + p->des0 = 0; + p->des1 = 0; + p->des2 = mss; + p->des3 = TDES3_CONTEXT_TYPE | TDES3_CTXT_TCMSSV; +} + +const struct stmmac_desc_ops dwmac4_desc_ops = { + .tx_status = dwmac4_wrback_get_tx_status, + .rx_status = dwmac4_wrback_get_rx_status, + .get_tx_len = dwmac4_rd_get_tx_len, + .get_tx_owner = dwmac4_get_tx_owner, + .set_tx_owner = dwmac4_set_tx_owner, + .set_rx_owner = dwmac4_set_rx_owner, + .get_tx_ls = dwmac4_get_tx_ls, + .get_rx_frame_len = dwmac4_wrback_get_rx_frame_len, + .enable_tx_timestamp = dwmac4_rd_enable_tx_timestamp, + .get_tx_timestamp_status = dwmac4_wrback_get_tx_timestamp_status, + .get_timestamp = dwmac4_wrback_get_timestamp, + .get_rx_timestamp_status = dwmac4_context_get_rx_timestamp_status, + .set_tx_ic = dwmac4_rd_set_tx_ic, + .prepare_tx_desc = dwmac4_rd_prepare_tx_desc, + .prepare_tso_tx_desc = dwmac4_rd_prepare_tso_tx_desc, + .release_tx_desc = dwmac4_release_tx_desc, + .init_rx_desc = dwmac4_rd_init_rx_desc, + .init_tx_desc = dwmac4_rd_init_tx_desc, + .display_ring = dwmac4_display_ring, + .set_mss = dwmac4_set_mss_ctxt, +}; + +const struct stmmac_mode_ops dwmac4_ring_mode_ops = { }; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h new file mode 100644 index 000000000000..0902a2edeaa9 --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h @@ -0,0 +1,129 @@ +/* + * Header File to describe the DMA descriptors and related definitions specific + * for DesignWare databook 4.xx. + * + * Copyright (C) 2015 STMicroelectronics Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * Author: Alexandre Torgue + */ + +#ifndef __DWMAC4_DESCS_H__ +#define __DWMAC4_DESCS_H__ + +#include + +/* Normal transmit descriptor defines (without split feature) */ + +/* TDES2 (read format) */ +#define TDES2_BUFFER1_SIZE_MASK GENMASK(13, 0) +#define TDES2_VLAN_TAG_MASK GENMASK(15, 14) +#define TDES2_BUFFER2_SIZE_MASK GENMASK(29, 16) +#define TDES2_BUFFER2_SIZE_MASK_SHIFT 16 +#define TDES2_TIMESTAMP_ENABLE BIT(30) +#define TDES2_INTERRUPT_ON_COMPLETION BIT(31) + +/* TDES3 (read format) */ +#define TDES3_PACKET_SIZE_MASK GENMASK(14, 0) +#define TDES3_CHECKSUM_INSERTION_MASK GENMASK(17, 16) +#define TDES3_CHECKSUM_INSERTION_SHIFT 16 +#define TDES3_TCP_PKT_PAYLOAD_MASK GENMASK(17, 0) +#define TDES3_TCP_SEGMENTATION_ENABLE BIT(18) +#define TDES3_HDR_LEN_SHIFT 19 +#define TDES3_SLOT_NUMBER_MASK GENMASK(22, 19) +#define TDES3_SA_INSERT_CTRL_MASK GENMASK(25, 23) +#define TDES3_CRC_PAD_CTRL_MASK GENMASK(27, 26) + +/* TDES3 (write back format) */ +#define TDES3_IP_HDR_ERROR BIT(0) +#define TDES3_DEFERRED BIT(1) +#define TDES3_UNDERFLOW_ERROR BIT(2) +#define TDES3_EXCESSIVE_DEFERRAL BIT(3) +#define TDES3_COLLISION_COUNT_MASK GENMASK(7, 4) +#define TDES3_COLLISION_COUNT_SHIFT 4 +#define TDES3_EXCESSIVE_COLLISION BIT(8) +#define TDES3_LATE_COLLISION BIT(9) +#define TDES3_NO_CARRIER BIT(10) +#define TDES3_LOSS_CARRIER BIT(11) +#define TDES3_PAYLOAD_ERROR BIT(12) +#define TDES3_PACKET_FLUSHED BIT(13) +#define TDES3_JABBER_TIMEOUT BIT(14) +#define TDES3_ERROR_SUMMARY BIT(15) +#define TDES3_TIMESTAMP_STATUS BIT(17) +#define TDES3_TIMESTAMP_STATUS_SHIFT 17 + +/* TDES3 context */ +#define TDES3_CTXT_TCMSSV BIT(26) + +/* TDES3 Common */ +#define TDES3_LAST_DESCRIPTOR BIT(28) +#define TDES3_LAST_DESCRIPTOR_SHIFT 28 +#define TDES3_FIRST_DESCRIPTOR BIT(29) +#define TDES3_CONTEXT_TYPE BIT(30) + +/* TDS3 use for both format (read and write back) */ +#define TDES3_OWN BIT(31) +#define TDES3_OWN_SHIFT 31 + +/* Normal receive descriptor defines (without split feature) */ + +/* RDES0 (write back format) */ +#define RDES0_VLAN_TAG_MASK GENMASK(15, 0) + +/* RDES1 (write back format) */ +#define RDES1_IP_PAYLOAD_TYPE_MASK GENMASK(2, 0) +#define RDES1_IP_HDR_ERROR BIT(3) +#define RDES1_IPV4_HEADER BIT(4) +#define RDES1_IPV6_HEADER BIT(5) +#define RDES1_IP_CSUM_BYPASSED BIT(6) +#define RDES1_IP_CSUM_ERROR BIT(7) +#define RDES1_PTP_MSG_TYPE_MASK GENMASK(11, 8) +#define RDES1_PTP_PACKET_TYPE BIT(12) +#define RDES1_PTP_VER BIT(13) +#define RDES1_TIMESTAMP_AVAILABLE BIT(14) +#define RDES1_TIMESTAMP_AVAILABLE_SHIFT 14 +#define RDES1_TIMESTAMP_DROPPED BIT(15) +#define RDES1_IP_TYPE1_CSUM_MASK GENMASK(31, 16) + +/* RDES2 (write back format) */ +#define RDES2_L3_L4_HEADER_SIZE_MASK GENMASK(9, 0) +#define RDES2_VLAN_FILTER_STATUS BIT(15) +#define RDES2_SA_FILTER_FAIL BIT(16) +#define RDES2_DA_FILTER_FAIL BIT(17) +#define RDES2_HASH_FILTER_STATUS BIT(18) +#define RDES2_MAC_ADDR_MATCH_MASK GENMASK(26, 19) +#define RDES2_HASH_VALUE_MATCH_MASK GENMASK(26, 19) +#define RDES2_L3_FILTER_MATCH BIT(27) +#define RDES2_L4_FILTER_MATCH BIT(28) +#define RDES2_L3_L4_FILT_NB_MATCH_MASK GENMASK(27, 26) +#define RDES2_L3_L4_FILT_NB_MATCH_SHIFT 26 + +/* RDES3 (write back format) */ +#define RDES3_PACKET_SIZE_MASK GENMASK(14, 0) +#define RDES3_ERROR_SUMMARY BIT(15) +#define RDES3_PACKET_LEN_TYPE_MASK GENMASK(18, 16) +#define RDES3_DRIBBLE_ERROR BIT(19) +#define RDES3_RECEIVE_ERROR BIT(20) +#define RDES3_OVERFLOW_ERROR BIT(21) +#define RDES3_RECEIVE_WATCHDOG BIT(22) +#define RDES3_GIANT_PACKET BIT(23) +#define RDES3_CRC_ERROR BIT(24) +#define RDES3_RDES0_VALID BIT(25) +#define RDES3_RDES1_VALID BIT(26) +#define RDES3_RDES2_VALID BIT(27) +#define RDES3_LAST_DESCRIPTOR BIT(28) +#define RDES3_FIRST_DESCRIPTOR BIT(29) +#define RDES3_CONTEXT_DESCRIPTOR BIT(30) + +/* RDES3 (read format) */ +#define RDES3_BUFFER1_VALID_ADDR BIT(24) +#define RDES3_BUFFER2_VALID_ADDR BIT(25) +#define RDES3_INT_ON_COMPLETION_EN BIT(30) + +/* TDS3 use for both format (read and write back) */ +#define RDES3_OWN BIT(31) + +#endif /* __DWMAC4_DESCS_H__ */ From 35f74c0c5dce138bd9000d98abf4959af782a96d Mon Sep 17 00:00:00 2001 From: Alexandre TORGUE Date: Fri, 1 Apr 2016 11:37:29 +0200 Subject: [PATCH 0136/1649] stmmac: add GMAC4 DMA/CORE Header File This is the main header file to define all the macro used for GMAC4 DMA and CORE parts. Signed-off-by: Alexandre TORGUE Signed-off-by: Giuseppe Cavallaro Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/dwmac4.h | 224 +++++++++++++++++++ 1 file changed, 224 insertions(+) create mode 100644 drivers/net/ethernet/stmicro/stmmac/dwmac4.h diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h new file mode 100644 index 000000000000..c12f15c9b351 --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h @@ -0,0 +1,224 @@ +/* + * DWMAC4 Header file. + * + * Copyright (C) 2015 STMicroelectronics Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * Author: Alexandre Torgue + */ + +#ifndef __DWMAC4_H__ +#define __DWMAC4_H__ + +#include "common.h" + +/* MAC registers */ +#define GMAC_CONFIG 0x00000000 +#define GMAC_PACKET_FILTER 0x00000008 +#define GMAC_HASH_TAB_0_31 0x00000010 +#define GMAC_HASH_TAB_32_63 0x00000014 +#define GMAC_RX_FLOW_CTRL 0x00000090 +#define GMAC_QX_TX_FLOW_CTRL(x) (0x70 + x * 4) +#define GMAC_INT_STATUS 0x000000b0 +#define GMAC_INT_EN 0x000000b4 +#define GMAC_AN_CTRL 0x000000e0 +#define GMAC_AN_STATUS 0x000000e4 +#define GMAC_AN_ADV 0x000000e8 +#define GMAC_AN_LPA 0x000000ec +#define GMAC_PMT 0x000000c0 +#define GMAC_VERSION 0x00000110 +#define GMAC_DEBUG 0x00000114 +#define GMAC_HW_FEATURE0 0x0000011c +#define GMAC_HW_FEATURE1 0x00000120 +#define GMAC_HW_FEATURE2 0x00000124 +#define GMAC_MDIO_ADDR 0x00000200 +#define GMAC_MDIO_DATA 0x00000204 +#define GMAC_ADDR_HIGH(reg) (0x300 + reg * 8) +#define GMAC_ADDR_LOW(reg) (0x304 + reg * 8) + +/* MAC Packet Filtering */ +#define GMAC_PACKET_FILTER_PR BIT(0) +#define GMAC_PACKET_FILTER_HMC BIT(2) +#define GMAC_PACKET_FILTER_PM BIT(4) + +#define GMAC_MAX_PERFECT_ADDRESSES 128 + +/* MAC Flow Control RX */ +#define GMAC_RX_FLOW_CTRL_RFE BIT(0) + +/* MAC Flow Control TX */ +#define GMAC_TX_FLOW_CTRL_TFE BIT(1) +#define GMAC_TX_FLOW_CTRL_PT_SHIFT 16 + +/* MAC Interrupt bitmap*/ +#define GMAC_INT_PMT_EN BIT(4) +#define GMAC_INT_LPI_EN BIT(5) + +enum dwmac4_irq_status { + time_stamp_irq = 0x00001000, + mmc_rx_csum_offload_irq = 0x00000800, + mmc_tx_irq = 0x00000400, + mmc_rx_irq = 0x00000200, + mmc_irq = 0x00000100, + pmt_irq = 0x00000010, + pcs_ane_irq = 0x00000004, + pcs_link_irq = 0x00000002, +}; + +/* MAC Auto-Neg bitmap*/ +#define GMAC_AN_CTRL_RAN BIT(9) +#define GMAC_AN_CTRL_ANE BIT(12) +#define GMAC_AN_CTRL_ELE BIT(14) +#define GMAC_AN_FD BIT(5) +#define GMAC_AN_HD BIT(6) +#define GMAC_AN_PSE_MASK GENMASK(8, 7) +#define GMAC_AN_PSE_SHIFT 7 + +/* MAC PMT bitmap */ +enum power_event { + pointer_reset = 0x80000000, + global_unicast = 0x00000200, + wake_up_rx_frame = 0x00000040, + magic_frame = 0x00000020, + wake_up_frame_en = 0x00000004, + magic_pkt_en = 0x00000002, + power_down = 0x00000001, +}; + +/* MAC Debug bitmap */ +#define GMAC_DEBUG_TFCSTS_MASK GENMASK(18, 17) +#define GMAC_DEBUG_TFCSTS_SHIFT 17 +#define GMAC_DEBUG_TFCSTS_IDLE 0 +#define GMAC_DEBUG_TFCSTS_WAIT 1 +#define GMAC_DEBUG_TFCSTS_GEN_PAUSE 2 +#define GMAC_DEBUG_TFCSTS_XFER 3 +#define GMAC_DEBUG_TPESTS BIT(16) +#define GMAC_DEBUG_RFCFCSTS_MASK GENMASK(2, 1) +#define GMAC_DEBUG_RFCFCSTS_SHIFT 1 +#define GMAC_DEBUG_RPESTS BIT(0) + +/* MAC config */ +#define GMAC_CONFIG_IPC BIT(27) +#define GMAC_CONFIG_2K BIT(22) +#define GMAC_CONFIG_ACS BIT(20) +#define GMAC_CONFIG_BE BIT(18) +#define GMAC_CONFIG_JD BIT(17) +#define GMAC_CONFIG_JE BIT(16) +#define GMAC_CONFIG_PS BIT(15) +#define GMAC_CONFIG_FES BIT(14) +#define GMAC_CONFIG_DM BIT(13) +#define GMAC_CONFIG_DCRS BIT(9) +#define GMAC_CONFIG_TE BIT(1) +#define GMAC_CONFIG_RE BIT(0) + +/* MAC HW features0 bitmap */ +#define GMAC_HW_FEAT_ADDMAC BIT(18) +#define GMAC_HW_FEAT_RXCOESEL BIT(16) +#define GMAC_HW_FEAT_TXCOSEL BIT(14) +#define GMAC_HW_FEAT_EEESEL BIT(13) +#define GMAC_HW_FEAT_TSSEL BIT(12) +#define GMAC_HW_FEAT_MMCSEL BIT(8) +#define GMAC_HW_FEAT_MGKSEL BIT(7) +#define GMAC_HW_FEAT_RWKSEL BIT(6) +#define GMAC_HW_FEAT_SMASEL BIT(5) +#define GMAC_HW_FEAT_VLHASH BIT(4) +#define GMAC_HW_FEAT_PCSSEL BIT(3) +#define GMAC_HW_FEAT_HDSEL BIT(2) +#define GMAC_HW_FEAT_GMIISEL BIT(1) +#define GMAC_HW_FEAT_MIISEL BIT(0) + +/* MAC HW features1 bitmap */ +#define GMAC_HW_FEAT_AVSEL BIT(20) +#define GMAC_HW_TSOEN BIT(18) + +/* MAC HW features2 bitmap */ +#define GMAC_HW_FEAT_TXCHCNT GENMASK(21, 18) +#define GMAC_HW_FEAT_RXCHCNT GENMASK(15, 12) + +/* MAC HW ADDR regs */ +#define GMAC_HI_DCS GENMASK(18, 16) +#define GMAC_HI_DCS_SHIFT 16 +#define GMAC_HI_REG_AE BIT(31) + +/* MTL registers */ +#define MTL_INT_STATUS 0x00000c20 +#define MTL_INT_Q0 BIT(0) + +#define MTL_CHAN_BASE_ADDR 0x00000d00 +#define MTL_CHAN_BASE_OFFSET 0x40 +#define MTL_CHANX_BASE_ADDR(x) (MTL_CHAN_BASE_ADDR + \ + (x * MTL_CHAN_BASE_OFFSET)) + +#define MTL_CHAN_TX_OP_MODE(x) MTL_CHANX_BASE_ADDR(x) +#define MTL_CHAN_TX_DEBUG(x) (MTL_CHANX_BASE_ADDR(x) + 0x8) +#define MTL_CHAN_INT_CTRL(x) (MTL_CHANX_BASE_ADDR(x) + 0x2c) +#define MTL_CHAN_RX_OP_MODE(x) (MTL_CHANX_BASE_ADDR(x) + 0x30) +#define MTL_CHAN_RX_DEBUG(x) (MTL_CHANX_BASE_ADDR(x) + 0x38) + +#define MTL_OP_MODE_RSF BIT(5) +#define MTL_OP_MODE_TSF BIT(1) + +#define MTL_OP_MODE_TTC_MASK 0x70 +#define MTL_OP_MODE_TTC_SHIFT 4 + +#define MTL_OP_MODE_TTC_32 0 +#define MTL_OP_MODE_TTC_64 (1 << MTL_OP_MODE_TTC_SHIFT) +#define MTL_OP_MODE_TTC_96 (2 << MTL_OP_MODE_TTC_SHIFT) +#define MTL_OP_MODE_TTC_128 (3 << MTL_OP_MODE_TTC_SHIFT) +#define MTL_OP_MODE_TTC_192 (4 << MTL_OP_MODE_TTC_SHIFT) +#define MTL_OP_MODE_TTC_256 (5 << MTL_OP_MODE_TTC_SHIFT) +#define MTL_OP_MODE_TTC_384 (6 << MTL_OP_MODE_TTC_SHIFT) +#define MTL_OP_MODE_TTC_512 (7 << MTL_OP_MODE_TTC_SHIFT) + +#define MTL_OP_MODE_RTC_MASK 0x18 +#define MTL_OP_MODE_RTC_SHIFT 3 + +#define MTL_OP_MODE_RTC_32 (1 << MTL_OP_MODE_RTC_SHIFT) +#define MTL_OP_MODE_RTC_64 0 +#define MTL_OP_MODE_RTC_96 (2 << MTL_OP_MODE_RTC_SHIFT) +#define MTL_OP_MODE_RTC_128 (3 << MTL_OP_MODE_RTC_SHIFT) + +/* MTL debug */ +#define MTL_DEBUG_TXSTSFSTS BIT(5) +#define MTL_DEBUG_TXFSTS BIT(4) +#define MTL_DEBUG_TWCSTS BIT(3) + +/* MTL debug: Tx FIFO Read Controller Status */ +#define MTL_DEBUG_TRCSTS_MASK GENMASK(2, 1) +#define MTL_DEBUG_TRCSTS_SHIFT 1 +#define MTL_DEBUG_TRCSTS_IDLE 0 +#define MTL_DEBUG_TRCSTS_READ 1 +#define MTL_DEBUG_TRCSTS_TXW 2 +#define MTL_DEBUG_TRCSTS_WRITE 3 +#define MTL_DEBUG_TXPAUSED BIT(0) + +/* MAC debug: GMII or MII Transmit Protocol Engine Status */ +#define MTL_DEBUG_RXFSTS_MASK GENMASK(5, 4) +#define MTL_DEBUG_RXFSTS_SHIFT 4 +#define MTL_DEBUG_RXFSTS_EMPTY 0 +#define MTL_DEBUG_RXFSTS_BT 1 +#define MTL_DEBUG_RXFSTS_AT 2 +#define MTL_DEBUG_RXFSTS_FULL 3 +#define MTL_DEBUG_RRCSTS_MASK GENMASK(2, 1) +#define MTL_DEBUG_RRCSTS_SHIFT 1 +#define MTL_DEBUG_RRCSTS_IDLE 0 +#define MTL_DEBUG_RRCSTS_RDATA 1 +#define MTL_DEBUG_RRCSTS_RSTAT 2 +#define MTL_DEBUG_RRCSTS_FLUSH 3 +#define MTL_DEBUG_RWCSTS BIT(0) + +/* MTL interrupt */ +#define MTL_RX_OVERFLOW_INT_EN BIT(24) +#define MTL_RX_OVERFLOW_INT BIT(16) + +/* Default operating mode of the MAC */ +#define GMAC_CORE_INIT (GMAC_CONFIG_JD | GMAC_CONFIG_PS | GMAC_CONFIG_ACS | \ + GMAC_CONFIG_BE | GMAC_CONFIG_DCRS) + +/* To dump the core regs excluding the Address Registers */ +#define GMAC_REG_NUM 132 + +#endif /* __DWMAC4_H__ */ From 48863ce5940fa5420096c8beba44e5e1bc0c8ca1 Mon Sep 17 00:00:00 2001 From: Alexandre TORGUE Date: Fri, 1 Apr 2016 11:37:30 +0200 Subject: [PATCH 0137/1649] stmmac: add DMA support for GMAC 4.xx DMA behavior is linked to descriptor management: -descriptor mechanism (Tx for example, but it is exactly the same for RX): -useful registers: -DMA_CH#_TxDesc_Ring_Len: length of transmit descriptor ring -DMA_CH#_TxDesc_List_Address: start address of the ring -DMA_CH#_TxDesc_Tail_Pointer: address of the last descriptor to send + 1. -DMA_CH#_TxDesc_Current_App_TxDesc: address of the current descriptor -The descriptor Tail Pointer register contains the pointer to the descriptor address (N). The base address and the current descriptor decide the address of the current descriptor that the DMA can process. The descriptors up to one location less than the one indicated by the descriptor tail pointer (N-1) are owned by the DMA. The DMA continues to process the descriptors until the following condition occurs: "current descriptor pointer == Descriptor Tail pointer" Then the DMA goes into suspend mode. The application must perform a write to descriptor tail pointer register and update the tail pointer to have the following condition and to start a new transfer: "current descriptor pointer < Descriptor tail pointer" The DMA automatically wraps around the base address when the end of ring is reached. Up to 8 DMA could be use but currently we only use one (channel0) Signed-off-by: Alexandre TORGUE Signed-off-by: Giuseppe Cavallaro Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/Makefile | 2 +- drivers/net/ethernet/stmicro/stmmac/common.h | 11 + .../net/ethernet/stmicro/stmmac/dwmac4_dma.c | 354 ++++++++++++++++++ .../net/ethernet/stmicro/stmmac/dwmac4_dma.h | 202 ++++++++++ .../net/ethernet/stmicro/stmmac/dwmac4_lib.c | 225 +++++++++++ 5 files changed, 793 insertions(+), 1 deletion(-) create mode 100644 drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c create mode 100644 drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h create mode 100644 drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile index fa000fd36efc..9398acef0125 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Makefile +++ b/drivers/net/ethernet/stmicro/stmmac/Makefile @@ -3,7 +3,7 @@ stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o \ chain_mode.o dwmac_lib.o dwmac1000_core.o dwmac1000_dma.o \ dwmac100_core.o dwmac100_dma.o enh_desc.o norm_desc.o \ mmc_core.o stmmac_hwtstamp.o stmmac_ptp.o dwmac4_descs.o \ - $(stmmac-y) + dwmac4_dma.o dwmac4_lib.o $(stmmac-y) # Ordering matters. Generic driver must be last. obj-$(CONFIG_STMMAC_PLATFORM) += stmmac-platform.o diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index ea7eb0d5ce98..2a5126e3d3df 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -41,6 +41,8 @@ /* Synopsys Core versions */ #define DWMAC_CORE_3_40 0x34 #define DWMAC_CORE_3_50 0x35 +#define DWMAC_CORE_4_00 0x40 +#define STMMAC_CHAN0 0 /* Always supported and default for all chips */ #define DMA_TX_SIZE 512 #define DMA_RX_SIZE 512 @@ -270,6 +272,7 @@ enum dma_irq_status { #define CORE_PCS_ANE_COMPLETE (1 << 5) #define CORE_PCS_LINK_STATUS (1 << 6) #define CORE_RGMII_IRQ (1 << 7) +#define CORE_IRQ_MTL_RX_OVERFLOW BIT(8) /* Physical Coding Sublayer */ struct rgmii_adv { @@ -301,8 +304,10 @@ struct dma_features { /* 802.3az - Energy-Efficient Ethernet (EEE) */ unsigned int eee; unsigned int av; + unsigned int tsoen; /* TX and RX csum */ unsigned int tx_coe; + unsigned int rx_coe; unsigned int rx_coe_type1; unsigned int rx_coe_type2; unsigned int rxfifo_over_2048; @@ -425,6 +430,11 @@ struct stmmac_dma_ops { struct dma_features *dma_cap); /* Program the HW RX Watchdog */ void (*rx_watchdog) (void __iomem *ioaddr, u32 riwt); + void (*set_tx_ring_len)(void __iomem *ioaddr, u32 len); + void (*set_rx_ring_len)(void __iomem *ioaddr, u32 len); + void (*set_rx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan); + void (*set_tx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan); + void (*enable_tso)(void __iomem *ioaddr, bool en, u32 chan); }; struct mac_device_info; @@ -473,6 +483,7 @@ struct stmmac_hwtimestamp { }; extern const struct stmmac_hwtimestamp stmmac_ptp; +extern const struct stmmac_mode_ops dwmac4_ring_mode_ops; struct mac_link { int port; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c new file mode 100644 index 000000000000..116151cd6a95 --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c @@ -0,0 +1,354 @@ +/* + * This is the driver for the GMAC on-chip Ethernet controller for ST SoCs. + * DWC Ether MAC version 4.xx has been used for developing this code. + * + * This contains the functions to handle the dma. + * + * Copyright (C) 2015 STMicroelectronics Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * Author: Alexandre Torgue + */ + +#include +#include "dwmac4.h" +#include "dwmac4_dma.h" + +static void dwmac4_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi) +{ + u32 value = readl(ioaddr + DMA_SYS_BUS_MODE); + int i; + + pr_info("dwmac4: Master AXI performs %s burst length\n", + (value & DMA_SYS_BUS_FB) ? "fixed" : "any"); + + if (axi->axi_lpi_en) + value |= DMA_AXI_EN_LPI; + if (axi->axi_xit_frm) + value |= DMA_AXI_LPI_XIT_FRM; + + value |= (axi->axi_wr_osr_lmt & DMA_AXI_OSR_MAX) << + DMA_AXI_WR_OSR_LMT_SHIFT; + + value |= (axi->axi_rd_osr_lmt & DMA_AXI_OSR_MAX) << + DMA_AXI_RD_OSR_LMT_SHIFT; + + /* Depending on the UNDEF bit the Master AXI will perform any burst + * length according to the BLEN programmed (by default all BLEN are + * set). + */ + for (i = 0; i < AXI_BLEN; i++) { + switch (axi->axi_blen[i]) { + case 256: + value |= DMA_AXI_BLEN256; + break; + case 128: + value |= DMA_AXI_BLEN128; + break; + case 64: + value |= DMA_AXI_BLEN64; + break; + case 32: + value |= DMA_AXI_BLEN32; + break; + case 16: + value |= DMA_AXI_BLEN16; + break; + case 8: + value |= DMA_AXI_BLEN8; + break; + case 4: + value |= DMA_AXI_BLEN4; + break; + } + } + + writel(value, ioaddr + DMA_SYS_BUS_MODE); +} + +static void dwmac4_dma_init_channel(void __iomem *ioaddr, int pbl, + u32 dma_tx_phy, u32 dma_rx_phy, + u32 channel) +{ + u32 value; + + /* set PBL for each channels. Currently we affect same configuration + * on each channel + */ + value = readl(ioaddr + DMA_CHAN_CONTROL(channel)); + value = value | DMA_BUS_MODE_PBL; + writel(value, ioaddr + DMA_CHAN_CONTROL(channel)); + + value = readl(ioaddr + DMA_CHAN_TX_CONTROL(channel)); + value = value | (pbl << DMA_BUS_MODE_PBL_SHIFT); + writel(value, ioaddr + DMA_CHAN_TX_CONTROL(channel)); + + value = readl(ioaddr + DMA_CHAN_RX_CONTROL(channel)); + value = value | (pbl << DMA_BUS_MODE_RPBL_SHIFT); + writel(value, ioaddr + DMA_CHAN_RX_CONTROL(channel)); + + /* Mask interrupts by writing to CSR7 */ + writel(DMA_CHAN_INTR_DEFAULT_MASK, ioaddr + DMA_CHAN_INTR_ENA(channel)); + + writel(dma_tx_phy, ioaddr + DMA_CHAN_TX_BASE_ADDR(channel)); + writel(dma_rx_phy, ioaddr + DMA_CHAN_RX_BASE_ADDR(channel)); +} + +static void dwmac4_dma_init(void __iomem *ioaddr, int pbl, int fb, int mb, + int aal, u32 dma_tx, u32 dma_rx, int atds) +{ + u32 value = readl(ioaddr + DMA_SYS_BUS_MODE); + int i; + + /* Set the Fixed burst mode */ + if (fb) + value |= DMA_SYS_BUS_FB; + + /* Mixed Burst has no effect when fb is set */ + if (mb) + value |= DMA_SYS_BUS_MB; + + if (aal) + value |= DMA_SYS_BUS_AAL; + + writel(value, ioaddr + DMA_SYS_BUS_MODE); + + for (i = 0; i < DMA_CHANNEL_NB_MAX; i++) + dwmac4_dma_init_channel(ioaddr, pbl, dma_tx, dma_rx, i); +} + +static void _dwmac4_dump_dma_regs(void __iomem *ioaddr, u32 channel) +{ + pr_debug(" Channel %d\n", channel); + pr_debug("\tDMA_CHAN_CONTROL, offset: 0x%x, val: 0x%x\n", 0, + readl(ioaddr + DMA_CHAN_CONTROL(channel))); + pr_debug("\tDMA_CHAN_TX_CONTROL, offset: 0x%x, val: 0x%x\n", 0x4, + readl(ioaddr + DMA_CHAN_TX_CONTROL(channel))); + pr_debug("\tDMA_CHAN_RX_CONTROL, offset: 0x%x, val: 0x%x\n", 0x8, + readl(ioaddr + DMA_CHAN_RX_CONTROL(channel))); + pr_debug("\tDMA_CHAN_TX_BASE_ADDR, offset: 0x%x, val: 0x%x\n", 0x14, + readl(ioaddr + DMA_CHAN_TX_BASE_ADDR(channel))); + pr_debug("\tDMA_CHAN_RX_BASE_ADDR, offset: 0x%x, val: 0x%x\n", 0x1c, + readl(ioaddr + DMA_CHAN_RX_BASE_ADDR(channel))); + pr_debug("\tDMA_CHAN_TX_END_ADDR, offset: 0x%x, val: 0x%x\n", 0x20, + readl(ioaddr + DMA_CHAN_TX_END_ADDR(channel))); + pr_debug("\tDMA_CHAN_RX_END_ADDR, offset: 0x%x, val: 0x%x\n", 0x28, + readl(ioaddr + DMA_CHAN_RX_END_ADDR(channel))); + pr_debug("\tDMA_CHAN_TX_RING_LEN, offset: 0x%x, val: 0x%x\n", 0x2c, + readl(ioaddr + DMA_CHAN_TX_RING_LEN(channel))); + pr_debug("\tDMA_CHAN_RX_RING_LEN, offset: 0x%x, val: 0x%x\n", 0x30, + readl(ioaddr + DMA_CHAN_RX_RING_LEN(channel))); + pr_debug("\tDMA_CHAN_INTR_ENA, offset: 0x%x, val: 0x%x\n", 0x34, + readl(ioaddr + DMA_CHAN_INTR_ENA(channel))); + pr_debug("\tDMA_CHAN_RX_WATCHDOG, offset: 0x%x, val: 0x%x\n", 0x38, + readl(ioaddr + DMA_CHAN_RX_WATCHDOG(channel))); + pr_debug("\tDMA_CHAN_SLOT_CTRL_STATUS, offset: 0x%x, val: 0x%x\n", 0x3c, + readl(ioaddr + DMA_CHAN_SLOT_CTRL_STATUS(channel))); + pr_debug("\tDMA_CHAN_CUR_TX_DESC, offset: 0x%x, val: 0x%x\n", 0x44, + readl(ioaddr + DMA_CHAN_CUR_TX_DESC(channel))); + pr_debug("\tDMA_CHAN_CUR_RX_DESC, offset: 0x%x, val: 0x%x\n", 0x4c, + readl(ioaddr + DMA_CHAN_CUR_RX_DESC(channel))); + pr_debug("\tDMA_CHAN_CUR_TX_BUF_ADDR, offset: 0x%x, val: 0x%x\n", 0x54, + readl(ioaddr + DMA_CHAN_CUR_TX_BUF_ADDR(channel))); + pr_debug("\tDMA_CHAN_CUR_RX_BUF_ADDR, offset: 0x%x, val: 0x%x\n", 0x5c, + readl(ioaddr + DMA_CHAN_CUR_RX_BUF_ADDR(channel))); + pr_debug("\tDMA_CHAN_STATUS, offset: 0x%x, val: 0x%x\n", 0x60, + readl(ioaddr + DMA_CHAN_STATUS(channel))); +} + +static void dwmac4_dump_dma_regs(void __iomem *ioaddr) +{ + int i; + + pr_debug(" GMAC4 DMA registers\n"); + + for (i = 0; i < DMA_CHANNEL_NB_MAX; i++) + _dwmac4_dump_dma_regs(ioaddr, i); +} + +static void dwmac4_rx_watchdog(void __iomem *ioaddr, u32 riwt) +{ + int i; + + for (i = 0; i < DMA_CHANNEL_NB_MAX; i++) + writel(riwt, ioaddr + DMA_CHAN_RX_WATCHDOG(i)); +} + +static void dwmac4_dma_chan_op_mode(void __iomem *ioaddr, int txmode, + int rxmode, u32 channel) +{ + u32 mtl_tx_op, mtl_rx_op, mtl_rx_int; + + /* Following code only done for channel 0, other channels not yet + * supported. + */ + mtl_tx_op = readl(ioaddr + MTL_CHAN_TX_OP_MODE(channel)); + + if (txmode == SF_DMA_MODE) { + pr_debug("GMAC: enable TX store and forward mode\n"); + /* Transmit COE type 2 cannot be done in cut-through mode. */ + mtl_tx_op |= MTL_OP_MODE_TSF; + } else { + pr_debug("GMAC: disabling TX SF (threshold %d)\n", txmode); + mtl_tx_op &= ~MTL_OP_MODE_TSF; + mtl_tx_op &= MTL_OP_MODE_TTC_MASK; + /* Set the transmit threshold */ + if (txmode <= 32) + mtl_tx_op |= MTL_OP_MODE_TTC_32; + else if (txmode <= 64) + mtl_tx_op |= MTL_OP_MODE_TTC_64; + else if (txmode <= 96) + mtl_tx_op |= MTL_OP_MODE_TTC_96; + else if (txmode <= 128) + mtl_tx_op |= MTL_OP_MODE_TTC_128; + else if (txmode <= 192) + mtl_tx_op |= MTL_OP_MODE_TTC_192; + else if (txmode <= 256) + mtl_tx_op |= MTL_OP_MODE_TTC_256; + else if (txmode <= 384) + mtl_tx_op |= MTL_OP_MODE_TTC_384; + else + mtl_tx_op |= MTL_OP_MODE_TTC_512; + } + + writel(mtl_tx_op, ioaddr + MTL_CHAN_TX_OP_MODE(channel)); + + mtl_rx_op = readl(ioaddr + MTL_CHAN_RX_OP_MODE(channel)); + + if (rxmode == SF_DMA_MODE) { + pr_debug("GMAC: enable RX store and forward mode\n"); + mtl_rx_op |= MTL_OP_MODE_RSF; + } else { + pr_debug("GMAC: disable RX SF mode (threshold %d)\n", rxmode); + mtl_rx_op &= ~MTL_OP_MODE_RSF; + mtl_rx_op &= MTL_OP_MODE_RTC_MASK; + if (rxmode <= 32) + mtl_rx_op |= MTL_OP_MODE_RTC_32; + else if (rxmode <= 64) + mtl_rx_op |= MTL_OP_MODE_RTC_64; + else if (rxmode <= 96) + mtl_rx_op |= MTL_OP_MODE_RTC_96; + else + mtl_rx_op |= MTL_OP_MODE_RTC_128; + } + + writel(mtl_rx_op, ioaddr + MTL_CHAN_RX_OP_MODE(channel)); + + /* Enable MTL RX overflow */ + mtl_rx_int = readl(ioaddr + MTL_CHAN_INT_CTRL(channel)); + writel(mtl_rx_int | MTL_RX_OVERFLOW_INT_EN, + ioaddr + MTL_CHAN_INT_CTRL(channel)); +} + +static void dwmac4_dma_operation_mode(void __iomem *ioaddr, int txmode, + int rxmode, int rxfifosz) +{ + /* Only Channel 0 is actually configured and used */ + dwmac4_dma_chan_op_mode(ioaddr, txmode, rxmode, 0); +} + +static void dwmac4_get_hw_feature(void __iomem *ioaddr, + struct dma_features *dma_cap) +{ + u32 hw_cap = readl(ioaddr + GMAC_HW_FEATURE0); + + /* MAC HW feature0 */ + dma_cap->mbps_10_100 = (hw_cap & GMAC_HW_FEAT_MIISEL); + dma_cap->mbps_1000 = (hw_cap & GMAC_HW_FEAT_GMIISEL) >> 1; + dma_cap->half_duplex = (hw_cap & GMAC_HW_FEAT_HDSEL) >> 2; + dma_cap->hash_filter = (hw_cap & GMAC_HW_FEAT_VLHASH) >> 4; + dma_cap->multi_addr = (hw_cap & GMAC_HW_FEAT_ADDMAC) >> 18; + dma_cap->pcs = (hw_cap & GMAC_HW_FEAT_PCSSEL) >> 3; + dma_cap->sma_mdio = (hw_cap & GMAC_HW_FEAT_SMASEL) >> 5; + dma_cap->pmt_remote_wake_up = (hw_cap & GMAC_HW_FEAT_RWKSEL) >> 6; + dma_cap->pmt_magic_frame = (hw_cap & GMAC_HW_FEAT_MGKSEL) >> 7; + /* MMC */ + dma_cap->rmon = (hw_cap & GMAC_HW_FEAT_MMCSEL) >> 8; + /* IEEE 1588-2008 */ + dma_cap->atime_stamp = (hw_cap & GMAC_HW_FEAT_TSSEL) >> 12; + /* 802.3az - Energy-Efficient Ethernet (EEE) */ + dma_cap->eee = (hw_cap & GMAC_HW_FEAT_EEESEL) >> 13; + /* TX and RX csum */ + dma_cap->tx_coe = (hw_cap & GMAC_HW_FEAT_TXCOSEL) >> 14; + dma_cap->rx_coe = (hw_cap & GMAC_HW_FEAT_RXCOESEL) >> 16; + + /* MAC HW feature1 */ + hw_cap = readl(ioaddr + GMAC_HW_FEATURE1); + dma_cap->av = (hw_cap & GMAC_HW_FEAT_AVSEL) >> 20; + dma_cap->tsoen = (hw_cap & GMAC_HW_TSOEN) >> 18; + /* MAC HW feature2 */ + hw_cap = readl(ioaddr + GMAC_HW_FEATURE2); + /* TX and RX number of channels */ + dma_cap->number_rx_channel = + ((hw_cap & GMAC_HW_FEAT_RXCHCNT) >> 12) + 1; + dma_cap->number_tx_channel = + ((hw_cap & GMAC_HW_FEAT_TXCHCNT) >> 18) + 1; + + /* IEEE 1588-2002 */ + dma_cap->time_stamp = 0; +} + +/* Enable/disable TSO feature and set MSS */ +static void dwmac4_enable_tso(void __iomem *ioaddr, bool en, u32 chan) +{ + u32 value; + + if (en) { + /* enable TSO */ + value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan)); + writel(value | DMA_CONTROL_TSE, + ioaddr + DMA_CHAN_TX_CONTROL(chan)); + } else { + /* enable TSO */ + value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan)); + writel(value & ~DMA_CONTROL_TSE, + ioaddr + DMA_CHAN_TX_CONTROL(chan)); + } +} + +const struct stmmac_dma_ops dwmac4_dma_ops = { + .reset = dwmac4_dma_reset, + .init = dwmac4_dma_init, + .axi = dwmac4_dma_axi, + .dump_regs = dwmac4_dump_dma_regs, + .dma_mode = dwmac4_dma_operation_mode, + .enable_dma_irq = dwmac4_enable_dma_irq, + .disable_dma_irq = dwmac4_disable_dma_irq, + .start_tx = dwmac4_dma_start_tx, + .stop_tx = dwmac4_dma_stop_tx, + .start_rx = dwmac4_dma_start_rx, + .stop_rx = dwmac4_dma_stop_rx, + .dma_interrupt = dwmac4_dma_interrupt, + .get_hw_feature = dwmac4_get_hw_feature, + .rx_watchdog = dwmac4_rx_watchdog, + .set_rx_ring_len = dwmac4_set_rx_ring_len, + .set_tx_ring_len = dwmac4_set_tx_ring_len, + .set_rx_tail_ptr = dwmac4_set_rx_tail_ptr, + .set_tx_tail_ptr = dwmac4_set_tx_tail_ptr, + .enable_tso = dwmac4_enable_tso, +}; + +const struct stmmac_dma_ops dwmac410_dma_ops = { + .reset = dwmac4_dma_reset, + .init = dwmac4_dma_init, + .axi = dwmac4_dma_axi, + .dump_regs = dwmac4_dump_dma_regs, + .dma_mode = dwmac4_dma_operation_mode, + .enable_dma_irq = dwmac410_enable_dma_irq, + .disable_dma_irq = dwmac4_disable_dma_irq, + .start_tx = dwmac4_dma_start_tx, + .stop_tx = dwmac4_dma_stop_tx, + .start_rx = dwmac4_dma_start_rx, + .stop_rx = dwmac4_dma_stop_rx, + .dma_interrupt = dwmac4_dma_interrupt, + .get_hw_feature = dwmac4_get_hw_feature, + .rx_watchdog = dwmac4_rx_watchdog, + .set_rx_ring_len = dwmac4_set_rx_ring_len, + .set_tx_ring_len = dwmac4_set_tx_ring_len, + .set_rx_tail_ptr = dwmac4_set_rx_tail_ptr, + .set_tx_tail_ptr = dwmac4_set_tx_tail_ptr, + .enable_tso = dwmac4_enable_tso, +}; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h new file mode 100644 index 000000000000..1b06df749e2b --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h @@ -0,0 +1,202 @@ +/* + * DWMAC4 DMA Header file. + * + * + * Copyright (C) 2007-2015 STMicroelectronics Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * Author: Alexandre Torgue + */ + +#ifndef __DWMAC4_DMA_H__ +#define __DWMAC4_DMA_H__ + +/* Define the max channel number used for tx (also rx). + * dwmac4 accepts up to 8 channels for TX (and also 8 channels for RX + */ +#define DMA_CHANNEL_NB_MAX 1 + +#define DMA_BUS_MODE 0x00001000 +#define DMA_SYS_BUS_MODE 0x00001004 +#define DMA_STATUS 0x00001008 +#define DMA_DEBUG_STATUS_0 0x0000100c +#define DMA_DEBUG_STATUS_1 0x00001010 +#define DMA_DEBUG_STATUS_2 0x00001014 +#define DMA_AXI_BUS_MODE 0x00001028 + +/* DMA Bus Mode bitmap */ +#define DMA_BUS_MODE_SFT_RESET BIT(0) + +/* DMA SYS Bus Mode bitmap */ +#define DMA_BUS_MODE_SPH BIT(24) +#define DMA_BUS_MODE_PBL BIT(16) +#define DMA_BUS_MODE_PBL_SHIFT 16 +#define DMA_BUS_MODE_RPBL_SHIFT 16 +#define DMA_BUS_MODE_MB BIT(14) +#define DMA_BUS_MODE_FB BIT(0) + +/* DMA Interrupt top status */ +#define DMA_STATUS_MAC BIT(17) +#define DMA_STATUS_MTL BIT(16) +#define DMA_STATUS_CHAN7 BIT(7) +#define DMA_STATUS_CHAN6 BIT(6) +#define DMA_STATUS_CHAN5 BIT(5) +#define DMA_STATUS_CHAN4 BIT(4) +#define DMA_STATUS_CHAN3 BIT(3) +#define DMA_STATUS_CHAN2 BIT(2) +#define DMA_STATUS_CHAN1 BIT(1) +#define DMA_STATUS_CHAN0 BIT(0) + +/* DMA debug status bitmap */ +#define DMA_DEBUG_STATUS_TS_MASK 0xf +#define DMA_DEBUG_STATUS_RS_MASK 0xf + +/* DMA AXI bitmap */ +#define DMA_AXI_EN_LPI BIT(31) +#define DMA_AXI_LPI_XIT_FRM BIT(30) +#define DMA_AXI_WR_OSR_LMT GENMASK(27, 24) +#define DMA_AXI_WR_OSR_LMT_SHIFT 24 +#define DMA_AXI_RD_OSR_LMT GENMASK(19, 16) +#define DMA_AXI_RD_OSR_LMT_SHIFT 16 + +#define DMA_AXI_OSR_MAX 0xf +#define DMA_AXI_MAX_OSR_LIMIT ((DMA_AXI_OSR_MAX << DMA_AXI_WR_OSR_LMT_SHIFT) | \ + (DMA_AXI_OSR_MAX << DMA_AXI_RD_OSR_LMT_SHIFT)) + +#define DMA_SYS_BUS_MB BIT(14) +#define DMA_AXI_1KBBE BIT(13) +#define DMA_SYS_BUS_AAL BIT(12) +#define DMA_AXI_BLEN256 BIT(7) +#define DMA_AXI_BLEN128 BIT(6) +#define DMA_AXI_BLEN64 BIT(5) +#define DMA_AXI_BLEN32 BIT(4) +#define DMA_AXI_BLEN16 BIT(3) +#define DMA_AXI_BLEN8 BIT(2) +#define DMA_AXI_BLEN4 BIT(1) +#define DMA_SYS_BUS_FB BIT(0) + +#define DMA_BURST_LEN_DEFAULT (DMA_AXI_BLEN256 | DMA_AXI_BLEN128 | \ + DMA_AXI_BLEN64 | DMA_AXI_BLEN32 | \ + DMA_AXI_BLEN16 | DMA_AXI_BLEN8 | \ + DMA_AXI_BLEN4) + +#define DMA_AXI_BURST_LEN_MASK 0x000000FE + +/* Following DMA defines are chanels oriented */ +#define DMA_CHAN_BASE_ADDR 0x00001100 +#define DMA_CHAN_BASE_OFFSET 0x80 +#define DMA_CHANX_BASE_ADDR(x) (DMA_CHAN_BASE_ADDR + \ + (x * DMA_CHAN_BASE_OFFSET)) +#define DMA_CHAN_REG_NUMBER 17 + +#define DMA_CHAN_CONTROL(x) DMA_CHANX_BASE_ADDR(x) +#define DMA_CHAN_TX_CONTROL(x) (DMA_CHANX_BASE_ADDR(x) + 0x4) +#define DMA_CHAN_RX_CONTROL(x) (DMA_CHANX_BASE_ADDR(x) + 0x8) +#define DMA_CHAN_TX_BASE_ADDR(x) (DMA_CHANX_BASE_ADDR(x) + 0x14) +#define DMA_CHAN_RX_BASE_ADDR(x) (DMA_CHANX_BASE_ADDR(x) + 0x1c) +#define DMA_CHAN_TX_END_ADDR(x) (DMA_CHANX_BASE_ADDR(x) + 0x20) +#define DMA_CHAN_RX_END_ADDR(x) (DMA_CHANX_BASE_ADDR(x) + 0x28) +#define DMA_CHAN_TX_RING_LEN(x) (DMA_CHANX_BASE_ADDR(x) + 0x2c) +#define DMA_CHAN_RX_RING_LEN(x) (DMA_CHANX_BASE_ADDR(x) + 0x30) +#define DMA_CHAN_INTR_ENA(x) (DMA_CHANX_BASE_ADDR(x) + 0x34) +#define DMA_CHAN_RX_WATCHDOG(x) (DMA_CHANX_BASE_ADDR(x) + 0x38) +#define DMA_CHAN_SLOT_CTRL_STATUS(x) (DMA_CHANX_BASE_ADDR(x) + 0x3c) +#define DMA_CHAN_CUR_TX_DESC(x) (DMA_CHANX_BASE_ADDR(x) + 0x44) +#define DMA_CHAN_CUR_RX_DESC(x) (DMA_CHANX_BASE_ADDR(x) + 0x4c) +#define DMA_CHAN_CUR_TX_BUF_ADDR(x) (DMA_CHANX_BASE_ADDR(x) + 0x54) +#define DMA_CHAN_CUR_RX_BUF_ADDR(x) (DMA_CHANX_BASE_ADDR(x) + 0x5c) +#define DMA_CHAN_STATUS(x) (DMA_CHANX_BASE_ADDR(x) + 0x60) + +/* DMA Control X */ +#define DMA_CONTROL_MSS_MASK GENMASK(13, 0) + +/* DMA Tx Channel X Control register defines */ +#define DMA_CONTROL_TSE BIT(12) +#define DMA_CONTROL_OSP BIT(4) +#define DMA_CONTROL_ST BIT(0) + +/* DMA Rx Channel X Control register defines */ +#define DMA_CONTROL_SR BIT(0) + +/* Interrupt status per channel */ +#define DMA_CHAN_STATUS_REB GENMASK(21, 19) +#define DMA_CHAN_STATUS_REB_SHIFT 19 +#define DMA_CHAN_STATUS_TEB GENMASK(18, 16) +#define DMA_CHAN_STATUS_TEB_SHIFT 16 +#define DMA_CHAN_STATUS_NIS BIT(15) +#define DMA_CHAN_STATUS_AIS BIT(14) +#define DMA_CHAN_STATUS_CDE BIT(13) +#define DMA_CHAN_STATUS_FBE BIT(12) +#define DMA_CHAN_STATUS_ERI BIT(11) +#define DMA_CHAN_STATUS_ETI BIT(10) +#define DMA_CHAN_STATUS_RWT BIT(9) +#define DMA_CHAN_STATUS_RPS BIT(8) +#define DMA_CHAN_STATUS_RBU BIT(7) +#define DMA_CHAN_STATUS_RI BIT(6) +#define DMA_CHAN_STATUS_TBU BIT(2) +#define DMA_CHAN_STATUS_TPS BIT(1) +#define DMA_CHAN_STATUS_TI BIT(0) + +/* Interrupt enable bits per channel */ +#define DMA_CHAN_INTR_ENA_NIE BIT(16) +#define DMA_CHAN_INTR_ENA_AIE BIT(15) +#define DMA_CHAN_INTR_ENA_NIE_4_10 BIT(15) +#define DMA_CHAN_INTR_ENA_AIE_4_10 BIT(14) +#define DMA_CHAN_INTR_ENA_CDE BIT(13) +#define DMA_CHAN_INTR_ENA_FBE BIT(12) +#define DMA_CHAN_INTR_ENA_ERE BIT(11) +#define DMA_CHAN_INTR_ENA_ETE BIT(10) +#define DMA_CHAN_INTR_ENA_RWE BIT(9) +#define DMA_CHAN_INTR_ENA_RSE BIT(8) +#define DMA_CHAN_INTR_ENA_RBUE BIT(7) +#define DMA_CHAN_INTR_ENA_RIE BIT(6) +#define DMA_CHAN_INTR_ENA_TBUE BIT(2) +#define DMA_CHAN_INTR_ENA_TSE BIT(1) +#define DMA_CHAN_INTR_ENA_TIE BIT(0) + +#define DMA_CHAN_INTR_NORMAL (DMA_CHAN_INTR_ENA_NIE | \ + DMA_CHAN_INTR_ENA_RIE | \ + DMA_CHAN_INTR_ENA_TIE) + +#define DMA_CHAN_INTR_ABNORMAL (DMA_CHAN_INTR_ENA_AIE | \ + DMA_CHAN_INTR_ENA_FBE) +/* DMA default interrupt mask for 4.00 */ +#define DMA_CHAN_INTR_DEFAULT_MASK (DMA_CHAN_INTR_NORMAL | \ + DMA_CHAN_INTR_ABNORMAL) + +#define DMA_CHAN_INTR_NORMAL_4_10 (DMA_CHAN_INTR_ENA_NIE_4_10 | \ + DMA_CHAN_INTR_ENA_RIE | \ + DMA_CHAN_INTR_ENA_TIE) + +#define DMA_CHAN_INTR_ABNORMAL_4_10 (DMA_CHAN_INTR_ENA_AIE_4_10 | \ + DMA_CHAN_INTR_ENA_FBE) +/* DMA default interrupt mask for 4.10a */ +#define DMA_CHAN_INTR_DEFAULT_MASK_4_10 (DMA_CHAN_INTR_NORMAL_4_10 | \ + DMA_CHAN_INTR_ABNORMAL_4_10) + +/* channel 0 specific fields */ +#define DMA_CHAN0_DBG_STAT_TPS GENMASK(15, 12) +#define DMA_CHAN0_DBG_STAT_TPS_SHIFT 12 +#define DMA_CHAN0_DBG_STAT_RPS GENMASK(11, 8) +#define DMA_CHAN0_DBG_STAT_RPS_SHIFT 8 + +int dwmac4_dma_reset(void __iomem *ioaddr); +void dwmac4_enable_dma_transmission(void __iomem *ioaddr, u32 tail_ptr); +void dwmac4_enable_dma_irq(void __iomem *ioaddr); +void dwmac410_enable_dma_irq(void __iomem *ioaddr); +void dwmac4_disable_dma_irq(void __iomem *ioaddr); +void dwmac4_dma_start_tx(void __iomem *ioaddr); +void dwmac4_dma_stop_tx(void __iomem *ioaddr); +void dwmac4_dma_start_rx(void __iomem *ioaddr); +void dwmac4_dma_stop_rx(void __iomem *ioaddr); +int dwmac4_dma_interrupt(void __iomem *ioaddr, + struct stmmac_extra_stats *x); +void dwmac4_set_rx_ring_len(void __iomem *ioaddr, u32 len); +void dwmac4_set_tx_ring_len(void __iomem *ioaddr, u32 len); +void dwmac4_set_rx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan); +void dwmac4_set_tx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan); + +#endif /* __DWMAC4_DMA_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c new file mode 100644 index 000000000000..c7326d5b2f43 --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c @@ -0,0 +1,225 @@ +/* + * Copyright (C) 2007-2015 STMicroelectronics Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * Author: Alexandre Torgue + */ + +#include +#include +#include "common.h" +#include "dwmac4_dma.h" +#include "dwmac4.h" + +int dwmac4_dma_reset(void __iomem *ioaddr) +{ + u32 value = readl(ioaddr + DMA_BUS_MODE); + int limit; + + /* DMA SW reset */ + value |= DMA_BUS_MODE_SFT_RESET; + writel(value, ioaddr + DMA_BUS_MODE); + limit = 10; + while (limit--) { + if (!(readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET)) + break; + mdelay(10); + } + + if (limit < 0) + return -EBUSY; + + return 0; +} + +void dwmac4_set_rx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan) +{ + writel(tail_ptr, ioaddr + DMA_CHAN_RX_END_ADDR(0)); +} + +void dwmac4_set_tx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan) +{ + writel(tail_ptr, ioaddr + DMA_CHAN_TX_END_ADDR(0)); +} + +void dwmac4_dma_start_tx(void __iomem *ioaddr) +{ + u32 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(STMMAC_CHAN0)); + + value |= DMA_CONTROL_ST; + writel(value, ioaddr + DMA_CHAN_TX_CONTROL(STMMAC_CHAN0)); + + value = readl(ioaddr + GMAC_CONFIG); + value |= GMAC_CONFIG_TE; + writel(value, ioaddr + GMAC_CONFIG); +} + +void dwmac4_dma_stop_tx(void __iomem *ioaddr) +{ + u32 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(STMMAC_CHAN0)); + + value &= ~DMA_CONTROL_ST; + writel(value, ioaddr + DMA_CHAN_TX_CONTROL(STMMAC_CHAN0)); + + value = readl(ioaddr + GMAC_CONFIG); + value &= ~GMAC_CONFIG_TE; + writel(value, ioaddr + GMAC_CONFIG); +} + +void dwmac4_dma_start_rx(void __iomem *ioaddr) +{ + u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(STMMAC_CHAN0)); + + value |= DMA_CONTROL_SR; + + writel(value, ioaddr + DMA_CHAN_RX_CONTROL(STMMAC_CHAN0)); + + value = readl(ioaddr + GMAC_CONFIG); + value |= GMAC_CONFIG_RE; + writel(value, ioaddr + GMAC_CONFIG); +} + +void dwmac4_dma_stop_rx(void __iomem *ioaddr) +{ + u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(STMMAC_CHAN0)); + + value &= ~DMA_CONTROL_SR; + writel(value, ioaddr + DMA_CHAN_RX_CONTROL(STMMAC_CHAN0)); + + value = readl(ioaddr + GMAC_CONFIG); + value &= ~GMAC_CONFIG_RE; + writel(value, ioaddr + GMAC_CONFIG); +} + +void dwmac4_set_tx_ring_len(void __iomem *ioaddr, u32 len) +{ + writel(len, ioaddr + DMA_CHAN_TX_RING_LEN(STMMAC_CHAN0)); +} + +void dwmac4_set_rx_ring_len(void __iomem *ioaddr, u32 len) +{ + writel(len, ioaddr + DMA_CHAN_RX_RING_LEN(STMMAC_CHAN0)); +} + +void dwmac4_enable_dma_irq(void __iomem *ioaddr) +{ + writel(DMA_CHAN_INTR_DEFAULT_MASK, ioaddr + + DMA_CHAN_INTR_ENA(STMMAC_CHAN0)); +} + +void dwmac410_enable_dma_irq(void __iomem *ioaddr) +{ + writel(DMA_CHAN_INTR_DEFAULT_MASK_4_10, + ioaddr + DMA_CHAN_INTR_ENA(STMMAC_CHAN0)); +} + +void dwmac4_disable_dma_irq(void __iomem *ioaddr) +{ + writel(0, ioaddr + DMA_CHAN_INTR_ENA(STMMAC_CHAN0)); +} + +int dwmac4_dma_interrupt(void __iomem *ioaddr, + struct stmmac_extra_stats *x) +{ + int ret = 0; + + u32 intr_status = readl(ioaddr + DMA_CHAN_STATUS(0)); + + /* ABNORMAL interrupts */ + if (unlikely(intr_status & DMA_CHAN_STATUS_AIS)) { + if (unlikely(intr_status & DMA_CHAN_STATUS_RBU)) + x->rx_buf_unav_irq++; + if (unlikely(intr_status & DMA_CHAN_STATUS_RPS)) + x->rx_process_stopped_irq++; + if (unlikely(intr_status & DMA_CHAN_STATUS_RWT)) + x->rx_watchdog_irq++; + if (unlikely(intr_status & DMA_CHAN_STATUS_ETI)) + x->tx_early_irq++; + if (unlikely(intr_status & DMA_CHAN_STATUS_TPS)) { + x->tx_process_stopped_irq++; + ret = tx_hard_error; + } + if (unlikely(intr_status & DMA_CHAN_STATUS_FBE)) { + x->fatal_bus_error_irq++; + ret = tx_hard_error; + } + } + /* TX/RX NORMAL interrupts */ + if (likely(intr_status & DMA_CHAN_STATUS_NIS)) { + x->normal_irq_n++; + if (likely(intr_status & DMA_CHAN_STATUS_RI)) { + u32 value; + + value = readl(ioaddr + DMA_CHAN_INTR_ENA(STMMAC_CHAN0)); + /* to schedule NAPI on real RIE event. */ + if (likely(value & DMA_CHAN_INTR_ENA_RIE)) { + x->rx_normal_irq_n++; + ret |= handle_rx; + } + } + if (likely(intr_status & DMA_CHAN_STATUS_TI)) { + x->tx_normal_irq_n++; + ret |= handle_tx; + } + if (unlikely(intr_status & DMA_CHAN_STATUS_ERI)) + x->rx_early_irq++; + } + + /* Clear the interrupt by writing a logic 1 to the chanX interrupt + * status [21-0] expect reserved bits [5-3] + */ + writel((intr_status & 0x3fffc7), + ioaddr + DMA_CHAN_STATUS(STMMAC_CHAN0)); + + return ret; +} + +void stmmac_dwmac4_set_mac_addr(void __iomem *ioaddr, u8 addr[6], + unsigned int high, unsigned int low) +{ + unsigned long data; + + data = (addr[5] << 8) | addr[4]; + /* For MAC Addr registers se have to set the Address Enable (AE) + * bit that has no effect on the High Reg 0 where the bit 31 (MO) + * is RO. + */ + data |= (STMMAC_CHAN0 << GMAC_HI_DCS_SHIFT); + writel(data | GMAC_HI_REG_AE, ioaddr + high); + data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0]; + writel(data, ioaddr + low); +} + +/* Enable disable MAC RX/TX */ +void stmmac_dwmac4_set_mac(void __iomem *ioaddr, bool enable) +{ + u32 value = readl(ioaddr + GMAC_CONFIG); + + if (enable) + value |= GMAC_CONFIG_RE | GMAC_CONFIG_TE; + else + value &= ~(GMAC_CONFIG_TE | GMAC_CONFIG_RE); + + writel(value, ioaddr + GMAC_CONFIG); +} + +void stmmac_dwmac4_get_mac_addr(void __iomem *ioaddr, unsigned char *addr, + unsigned int high, unsigned int low) +{ + unsigned int hi_addr, lo_addr; + + /* Read the MAC address from the hardware */ + hi_addr = readl(ioaddr + high); + lo_addr = readl(ioaddr + low); + + /* Extract the MAC address from the high and low words */ + addr[0] = lo_addr & 0xff; + addr[1] = (lo_addr >> 8) & 0xff; + addr[2] = (lo_addr >> 16) & 0xff; + addr[3] = (lo_addr >> 24) & 0xff; + addr[4] = hi_addr & 0xff; + addr[5] = (hi_addr >> 8) & 0xff; +} From 477286b53f5576ddec0a4df0f3d0c4bd7a0ed165 Mon Sep 17 00:00:00 2001 From: Alexandre TORGUE Date: Fri, 1 Apr 2016 11:37:31 +0200 Subject: [PATCH 0138/1649] stmmac: add GMAC4 core support This is the initial support for GMAC4 that includes the main callbacks to setup the core module: including Csum, basic filtering, mac address and interrupt (MMC, MTL, PMT) No LPI added. Signed-off-by: Alexandre TORGUE Signed-off-by: Giuseppe Cavallaro Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/Makefile | 2 +- drivers/net/ethernet/stmicro/stmmac/common.h | 10 +- drivers/net/ethernet/stmicro/stmmac/dwmac4.h | 31 ++ .../net/ethernet/stmicro/stmmac/dwmac4_core.c | 407 ++++++++++++++++++ 4 files changed, 447 insertions(+), 3 deletions(-) create mode 100644 drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile index 9398acef0125..0fb362d5a722 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Makefile +++ b/drivers/net/ethernet/stmicro/stmmac/Makefile @@ -3,7 +3,7 @@ stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o \ chain_mode.o dwmac_lib.o dwmac1000_core.o dwmac1000_dma.o \ dwmac100_core.o dwmac100_dma.o enh_desc.o norm_desc.o \ mmc_core.o stmmac_hwtstamp.o stmmac_ptp.o dwmac4_descs.o \ - dwmac4_dma.o dwmac4_lib.o $(stmmac-y) + dwmac4_dma.o dwmac4_lib.o dwmac4_core.o $(stmmac-y) # Ordering matters. Generic driver must be last. obj-$(CONFIG_STMMAC_PLATFORM) += stmmac-platform.o diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index 2a5126e3d3df..eabe86bd8f56 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -527,15 +527,21 @@ struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr, int mcbins, int perfect_uc_entries, int *synopsys_id); struct mac_device_info *dwmac100_setup(void __iomem *ioaddr, int *synopsys_id); - +struct mac_device_info *dwmac4_setup(void __iomem *ioaddr, int mcbins, + int perfect_uc_entries, int *synopsys_id); void stmmac_set_mac_addr(void __iomem *ioaddr, u8 addr[6], unsigned int high, unsigned int low); void stmmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr, unsigned int high, unsigned int low); - void stmmac_set_mac(void __iomem *ioaddr, bool enable); +void stmmac_dwmac4_set_mac_addr(void __iomem *ioaddr, u8 addr[6], + unsigned int high, unsigned int low); +void stmmac_dwmac4_get_mac_addr(void __iomem *ioaddr, unsigned char *addr, + unsigned int high, unsigned int low); +void stmmac_dwmac4_set_mac(void __iomem *ioaddr, bool enable); + void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr); extern const struct stmmac_mode_ops ring_mode_ops; extern const struct stmmac_mode_ops chain_mode_ops; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h index c12f15c9b351..bc50952a18e7 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h @@ -221,4 +221,35 @@ enum power_event { /* To dump the core regs excluding the Address Registers */ #define GMAC_REG_NUM 132 +/* MTL debug */ +#define MTL_DEBUG_TXSTSFSTS BIT(5) +#define MTL_DEBUG_TXFSTS BIT(4) +#define MTL_DEBUG_TWCSTS BIT(3) + +/* MTL debug: Tx FIFO Read Controller Status */ +#define MTL_DEBUG_TRCSTS_MASK GENMASK(2, 1) +#define MTL_DEBUG_TRCSTS_SHIFT 1 +#define MTL_DEBUG_TRCSTS_IDLE 0 +#define MTL_DEBUG_TRCSTS_READ 1 +#define MTL_DEBUG_TRCSTS_TXW 2 +#define MTL_DEBUG_TRCSTS_WRITE 3 +#define MTL_DEBUG_TXPAUSED BIT(0) + +/* MAC debug: GMII or MII Transmit Protocol Engine Status */ +#define MTL_DEBUG_RXFSTS_MASK GENMASK(5, 4) +#define MTL_DEBUG_RXFSTS_SHIFT 4 +#define MTL_DEBUG_RXFSTS_EMPTY 0 +#define MTL_DEBUG_RXFSTS_BT 1 +#define MTL_DEBUG_RXFSTS_AT 2 +#define MTL_DEBUG_RXFSTS_FULL 3 +#define MTL_DEBUG_RRCSTS_MASK GENMASK(2, 1) +#define MTL_DEBUG_RRCSTS_SHIFT 1 +#define MTL_DEBUG_RRCSTS_IDLE 0 +#define MTL_DEBUG_RRCSTS_RDATA 1 +#define MTL_DEBUG_RRCSTS_RSTAT 2 +#define MTL_DEBUG_RRCSTS_FLUSH 3 +#define MTL_DEBUG_RWCSTS BIT(0) + +extern const struct stmmac_dma_ops dwmac4_dma_ops; +extern const struct stmmac_dma_ops dwmac410_dma_ops; #endif /* __DWMAC4_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c new file mode 100644 index 000000000000..4f7283d05588 --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c @@ -0,0 +1,407 @@ +/* + * This is the driver for the GMAC on-chip Ethernet controller for ST SoCs. + * DWC Ether MAC version 4.00 has been used for developing this code. + * + * This only implements the mac core functions for this chip. + * + * Copyright (C) 2015 STMicroelectronics Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * Author: Alexandre Torgue + */ + +#include +#include +#include +#include +#include "dwmac4.h" + +static void dwmac4_core_init(struct mac_device_info *hw, int mtu) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value = readl(ioaddr + GMAC_CONFIG); + + value |= GMAC_CORE_INIT; + + if (mtu > 1500) + value |= GMAC_CONFIG_2K; + if (mtu > 2000) + value |= GMAC_CONFIG_JE; + + writel(value, ioaddr + GMAC_CONFIG); + + /* Mask GMAC interrupts */ + writel(GMAC_INT_PMT_EN, ioaddr + GMAC_INT_EN); +} + +static void dwmac4_dump_regs(struct mac_device_info *hw) +{ + void __iomem *ioaddr = hw->pcsr; + int i; + + pr_debug("\tDWMAC4 regs (base addr = 0x%p)\n", ioaddr); + + for (i = 0; i < GMAC_REG_NUM; i++) { + int offset = i * 4; + + pr_debug("\tReg No. %d (offset 0x%x): 0x%08x\n", i, + offset, readl(ioaddr + offset)); + } +} + +static int dwmac4_rx_ipc_enable(struct mac_device_info *hw) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value = readl(ioaddr + GMAC_CONFIG); + + if (hw->rx_csum) + value |= GMAC_CONFIG_IPC; + else + value &= ~GMAC_CONFIG_IPC; + + writel(value, ioaddr + GMAC_CONFIG); + + value = readl(ioaddr + GMAC_CONFIG); + + return !!(value & GMAC_CONFIG_IPC); +} + +static void dwmac4_pmt(struct mac_device_info *hw, unsigned long mode) +{ + void __iomem *ioaddr = hw->pcsr; + unsigned int pmt = 0; + + if (mode & WAKE_MAGIC) { + pr_debug("GMAC: WOL Magic frame\n"); + pmt |= power_down | magic_pkt_en; + } + if (mode & WAKE_UCAST) { + pr_debug("GMAC: WOL on global unicast\n"); + pmt |= global_unicast; + } + + writel(pmt, ioaddr + GMAC_PMT); +} + +static void dwmac4_set_umac_addr(struct mac_device_info *hw, + unsigned char *addr, unsigned int reg_n) +{ + void __iomem *ioaddr = hw->pcsr; + + stmmac_dwmac4_set_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n), + GMAC_ADDR_LOW(reg_n)); +} + +static void dwmac4_get_umac_addr(struct mac_device_info *hw, + unsigned char *addr, unsigned int reg_n) +{ + void __iomem *ioaddr = hw->pcsr; + + stmmac_dwmac4_get_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n), + GMAC_ADDR_LOW(reg_n)); +} + +static void dwmac4_set_filter(struct mac_device_info *hw, + struct net_device *dev) +{ + void __iomem *ioaddr = (void __iomem *)dev->base_addr; + unsigned int value = 0; + + if (dev->flags & IFF_PROMISC) { + value = GMAC_PACKET_FILTER_PR; + } else if ((dev->flags & IFF_ALLMULTI) || + (netdev_mc_count(dev) > HASH_TABLE_SIZE)) { + /* Pass all multi */ + value = GMAC_PACKET_FILTER_PM; + /* Set the 64 bits of the HASH tab. To be updated if taller + * hash table is used + */ + writel(0xffffffff, ioaddr + GMAC_HASH_TAB_0_31); + writel(0xffffffff, ioaddr + GMAC_HASH_TAB_32_63); + } else if (!netdev_mc_empty(dev)) { + u32 mc_filter[2]; + struct netdev_hw_addr *ha; + + /* Hash filter for multicast */ + value = GMAC_PACKET_FILTER_HMC; + + memset(mc_filter, 0, sizeof(mc_filter)); + netdev_for_each_mc_addr(ha, dev) { + /* The upper 6 bits of the calculated CRC are used to + * index the content of the Hash Table Reg 0 and 1. + */ + int bit_nr = + (bitrev32(~crc32_le(~0, ha->addr, 6)) >> 26); + /* The most significant bit determines the register + * to use while the other 5 bits determines the bit + * within the selected register + */ + mc_filter[bit_nr >> 5] |= (1 << (bit_nr & 0x1F)); + } + writel(mc_filter[0], ioaddr + GMAC_HASH_TAB_0_31); + writel(mc_filter[1], ioaddr + GMAC_HASH_TAB_32_63); + } + + /* Handle multiple unicast addresses */ + if (netdev_uc_count(dev) > GMAC_MAX_PERFECT_ADDRESSES) { + /* Switch to promiscuous mode if more than 128 addrs + * are required + */ + value |= GMAC_PACKET_FILTER_PR; + } else if (!netdev_uc_empty(dev)) { + int reg = 1; + struct netdev_hw_addr *ha; + + netdev_for_each_uc_addr(ha, dev) { + dwmac4_set_umac_addr(ioaddr, ha->addr, reg); + reg++; + } + } + + writel(value, ioaddr + GMAC_PACKET_FILTER); +} + +static void dwmac4_flow_ctrl(struct mac_device_info *hw, unsigned int duplex, + unsigned int fc, unsigned int pause_time) +{ + void __iomem *ioaddr = hw->pcsr; + u32 channel = STMMAC_CHAN0; /* FIXME */ + unsigned int flow = 0; + + pr_debug("GMAC Flow-Control:\n"); + if (fc & FLOW_RX) { + pr_debug("\tReceive Flow-Control ON\n"); + flow |= GMAC_RX_FLOW_CTRL_RFE; + writel(flow, ioaddr + GMAC_RX_FLOW_CTRL); + } + if (fc & FLOW_TX) { + pr_debug("\tTransmit Flow-Control ON\n"); + flow |= GMAC_TX_FLOW_CTRL_TFE; + writel(flow, ioaddr + GMAC_QX_TX_FLOW_CTRL(channel)); + + if (duplex) { + pr_debug("\tduplex mode: PAUSE %d\n", pause_time); + flow |= (pause_time << GMAC_TX_FLOW_CTRL_PT_SHIFT); + writel(flow, ioaddr + GMAC_QX_TX_FLOW_CTRL(channel)); + } + } +} + +static void dwmac4_ctrl_ane(struct mac_device_info *hw, bool restart) +{ + void __iomem *ioaddr = hw->pcsr; + + /* auto negotiation enable and External Loopback enable */ + u32 value = GMAC_AN_CTRL_ANE | GMAC_AN_CTRL_ELE; + + if (restart) + value |= GMAC_AN_CTRL_RAN; + + writel(value, ioaddr + GMAC_AN_CTRL); +} + +static void dwmac4_get_adv(struct mac_device_info *hw, struct rgmii_adv *adv) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value = readl(ioaddr + GMAC_AN_ADV); + + if (value & GMAC_AN_FD) + adv->duplex = DUPLEX_FULL; + if (value & GMAC_AN_HD) + adv->duplex |= DUPLEX_HALF; + + adv->pause = (value & GMAC_AN_PSE_MASK) >> GMAC_AN_PSE_SHIFT; + + value = readl(ioaddr + GMAC_AN_LPA); + + if (value & GMAC_AN_FD) + adv->lp_duplex = DUPLEX_FULL; + if (value & GMAC_AN_HD) + adv->lp_duplex = DUPLEX_HALF; + + adv->lp_pause = (value & GMAC_AN_PSE_MASK) >> GMAC_AN_PSE_SHIFT; +} + +static int dwmac4_irq_status(struct mac_device_info *hw, + struct stmmac_extra_stats *x) +{ + void __iomem *ioaddr = hw->pcsr; + u32 mtl_int_qx_status; + u32 intr_status; + int ret = 0; + + intr_status = readl(ioaddr + GMAC_INT_STATUS); + + /* Not used events (e.g. MMC interrupts) are not handled. */ + if ((intr_status & mmc_tx_irq)) + x->mmc_tx_irq_n++; + if (unlikely(intr_status & mmc_rx_irq)) + x->mmc_rx_irq_n++; + if (unlikely(intr_status & mmc_rx_csum_offload_irq)) + x->mmc_rx_csum_offload_irq_n++; + /* Clear the PMT bits 5 and 6 by reading the PMT status reg */ + if (unlikely(intr_status & pmt_irq)) { + readl(ioaddr + GMAC_PMT); + x->irq_receive_pmt_irq_n++; + } + + if ((intr_status & pcs_ane_irq) || (intr_status & pcs_link_irq)) { + readl(ioaddr + GMAC_AN_STATUS); + x->irq_pcs_ane_n++; + } + + mtl_int_qx_status = readl(ioaddr + MTL_INT_STATUS); + /* Check MTL Interrupt: Currently only one queue is used: Q0. */ + if (mtl_int_qx_status & MTL_INT_Q0) { + /* read Queue 0 Interrupt status */ + u32 status = readl(ioaddr + MTL_CHAN_INT_CTRL(STMMAC_CHAN0)); + + if (status & MTL_RX_OVERFLOW_INT) { + /* clear Interrupt */ + writel(status | MTL_RX_OVERFLOW_INT, + ioaddr + MTL_CHAN_INT_CTRL(STMMAC_CHAN0)); + ret = CORE_IRQ_MTL_RX_OVERFLOW; + } + } + + return ret; +} + +static void dwmac4_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x) +{ + u32 value; + + /* Currently only channel 0 is supported */ + value = readl(ioaddr + MTL_CHAN_TX_DEBUG(STMMAC_CHAN0)); + + if (value & MTL_DEBUG_TXSTSFSTS) + x->mtl_tx_status_fifo_full++; + if (value & MTL_DEBUG_TXFSTS) + x->mtl_tx_fifo_not_empty++; + if (value & MTL_DEBUG_TWCSTS) + x->mmtl_fifo_ctrl++; + if (value & MTL_DEBUG_TRCSTS_MASK) { + u32 trcsts = (value & MTL_DEBUG_TRCSTS_MASK) + >> MTL_DEBUG_TRCSTS_SHIFT; + if (trcsts == MTL_DEBUG_TRCSTS_WRITE) + x->mtl_tx_fifo_read_ctrl_write++; + else if (trcsts == MTL_DEBUG_TRCSTS_TXW) + x->mtl_tx_fifo_read_ctrl_wait++; + else if (trcsts == MTL_DEBUG_TRCSTS_READ) + x->mtl_tx_fifo_read_ctrl_read++; + else + x->mtl_tx_fifo_read_ctrl_idle++; + } + if (value & MTL_DEBUG_TXPAUSED) + x->mac_tx_in_pause++; + + value = readl(ioaddr + MTL_CHAN_RX_DEBUG(STMMAC_CHAN0)); + + if (value & MTL_DEBUG_RXFSTS_MASK) { + u32 rxfsts = (value & MTL_DEBUG_RXFSTS_MASK) + >> MTL_DEBUG_RRCSTS_SHIFT; + + if (rxfsts == MTL_DEBUG_RXFSTS_FULL) + x->mtl_rx_fifo_fill_level_full++; + else if (rxfsts == MTL_DEBUG_RXFSTS_AT) + x->mtl_rx_fifo_fill_above_thresh++; + else if (rxfsts == MTL_DEBUG_RXFSTS_BT) + x->mtl_rx_fifo_fill_below_thresh++; + else + x->mtl_rx_fifo_fill_level_empty++; + } + if (value & MTL_DEBUG_RRCSTS_MASK) { + u32 rrcsts = (value & MTL_DEBUG_RRCSTS_MASK) >> + MTL_DEBUG_RRCSTS_SHIFT; + + if (rrcsts == MTL_DEBUG_RRCSTS_FLUSH) + x->mtl_rx_fifo_read_ctrl_flush++; + else if (rrcsts == MTL_DEBUG_RRCSTS_RSTAT) + x->mtl_rx_fifo_read_ctrl_read_data++; + else if (rrcsts == MTL_DEBUG_RRCSTS_RDATA) + x->mtl_rx_fifo_read_ctrl_status++; + else + x->mtl_rx_fifo_read_ctrl_idle++; + } + if (value & MTL_DEBUG_RWCSTS) + x->mtl_rx_fifo_ctrl_active++; + + /* GMAC debug */ + value = readl(ioaddr + GMAC_DEBUG); + + if (value & GMAC_DEBUG_TFCSTS_MASK) { + u32 tfcsts = (value & GMAC_DEBUG_TFCSTS_MASK) + >> GMAC_DEBUG_TFCSTS_SHIFT; + + if (tfcsts == GMAC_DEBUG_TFCSTS_XFER) + x->mac_tx_frame_ctrl_xfer++; + else if (tfcsts == GMAC_DEBUG_TFCSTS_GEN_PAUSE) + x->mac_tx_frame_ctrl_pause++; + else if (tfcsts == GMAC_DEBUG_TFCSTS_WAIT) + x->mac_tx_frame_ctrl_wait++; + else + x->mac_tx_frame_ctrl_idle++; + } + if (value & GMAC_DEBUG_TPESTS) + x->mac_gmii_tx_proto_engine++; + if (value & GMAC_DEBUG_RFCFCSTS_MASK) + x->mac_rx_frame_ctrl_fifo = (value & GMAC_DEBUG_RFCFCSTS_MASK) + >> GMAC_DEBUG_RFCFCSTS_SHIFT; + if (value & GMAC_DEBUG_RPESTS) + x->mac_gmii_rx_proto_engine++; +} + +static const struct stmmac_ops dwmac4_ops = { + .core_init = dwmac4_core_init, + .rx_ipc = dwmac4_rx_ipc_enable, + .dump_regs = dwmac4_dump_regs, + .host_irq_status = dwmac4_irq_status, + .flow_ctrl = dwmac4_flow_ctrl, + .pmt = dwmac4_pmt, + .set_umac_addr = dwmac4_set_umac_addr, + .get_umac_addr = dwmac4_get_umac_addr, + .ctrl_ane = dwmac4_ctrl_ane, + .get_adv = dwmac4_get_adv, + .debug = dwmac4_debug, + .set_filter = dwmac4_set_filter, +}; + +struct mac_device_info *dwmac4_setup(void __iomem *ioaddr, int mcbins, + int perfect_uc_entries, int *synopsys_id) +{ + struct mac_device_info *mac; + u32 hwid = readl(ioaddr + GMAC_VERSION); + + mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL); + if (!mac) + return NULL; + + mac->pcsr = ioaddr; + mac->multicast_filter_bins = mcbins; + mac->unicast_filter_entries = perfect_uc_entries; + mac->mcast_bits_log2 = 0; + + if (mac->multicast_filter_bins) + mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins); + + mac->mac = &dwmac4_ops; + + mac->link.port = GMAC_CONFIG_PS; + mac->link.duplex = GMAC_CONFIG_DM; + mac->link.speed = GMAC_CONFIG_FES; + mac->mii.addr = GMAC_MDIO_ADDR; + mac->mii.data = GMAC_MDIO_DATA; + + /* Get and dump the chip ID */ + *synopsys_id = stmmac_get_synopsys_id(hwid); + + if (*synopsys_id > DWMAC_CORE_4_00) + mac->dma = &dwmac410_dma_ops; + else + mac->dma = &dwmac4_dma_ops; + + return mac; +} From 36ff7c1e94a5d43a0ea2d386b211087f77669017 Mon Sep 17 00:00:00 2001 From: Alexandre TORGUE Date: Fri, 1 Apr 2016 11:37:32 +0200 Subject: [PATCH 0139/1649] stmmac: enhance mmc counter management For gmac3, the MMC addr map is: 0x100 - 0x2fc For gmac4, the MMC addr map is: 0x700 - 0x8fc So instead of adding 0x600 to the IO address when setup the mmc, the RMON base address is saved inside the private structure and then used to manage the counters. Signed-off-by: Giuseppe Cavallaro Signed-off-by: Alexandre TORGUE Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/mmc.h | 4 + .../net/ethernet/stmicro/stmmac/mmc_core.c | 349 +++++++++--------- drivers/net/ethernet/stmicro/stmmac/stmmac.h | 1 + .../ethernet/stmicro/stmmac/stmmac_ethtool.c | 2 +- .../net/ethernet/stmicro/stmmac/stmmac_main.c | 8 +- 5 files changed, 188 insertions(+), 176 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc.h b/drivers/net/ethernet/stmicro/stmmac/mmc.h index 192c2491330b..38a1a5603293 100644 --- a/drivers/net/ethernet/stmicro/stmmac/mmc.h +++ b/drivers/net/ethernet/stmicro/stmmac/mmc.h @@ -35,6 +35,10 @@ * current value.*/ #define MMC_CNTRL_PRESET 0x10 #define MMC_CNTRL_FULL_HALF_PRESET 0x20 + +#define MMC_GMAC4_OFFSET 0x700 +#define MMC_GMAC3_X_OFFSET 0x100 + struct stmmac_counters { unsigned int mmc_tx_octetcount_gb; unsigned int mmc_tx_framecount_gb; diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c index 3f20bb1fe570..ce9aa792857b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c @@ -28,12 +28,12 @@ /* MAC Management Counters register offset */ -#define MMC_CNTRL 0x00000100 /* MMC Control */ -#define MMC_RX_INTR 0x00000104 /* MMC RX Interrupt */ -#define MMC_TX_INTR 0x00000108 /* MMC TX Interrupt */ -#define MMC_RX_INTR_MASK 0x0000010c /* MMC Interrupt Mask */ -#define MMC_TX_INTR_MASK 0x00000110 /* MMC Interrupt Mask */ -#define MMC_DEFAULT_MASK 0xffffffff +#define MMC_CNTRL 0x00 /* MMC Control */ +#define MMC_RX_INTR 0x04 /* MMC RX Interrupt */ +#define MMC_TX_INTR 0x08 /* MMC TX Interrupt */ +#define MMC_RX_INTR_MASK 0x0c /* MMC Interrupt Mask */ +#define MMC_TX_INTR_MASK 0x10 /* MMC Interrupt Mask */ +#define MMC_DEFAULT_MASK 0xffffffff /* MMC TX counter registers */ @@ -41,115 +41,115 @@ * _GB register stands for good and bad frames * _G is for good only. */ -#define MMC_TX_OCTETCOUNT_GB 0x00000114 -#define MMC_TX_FRAMECOUNT_GB 0x00000118 -#define MMC_TX_BROADCASTFRAME_G 0x0000011c -#define MMC_TX_MULTICASTFRAME_G 0x00000120 -#define MMC_TX_64_OCTETS_GB 0x00000124 -#define MMC_TX_65_TO_127_OCTETS_GB 0x00000128 -#define MMC_TX_128_TO_255_OCTETS_GB 0x0000012c -#define MMC_TX_256_TO_511_OCTETS_GB 0x00000130 -#define MMC_TX_512_TO_1023_OCTETS_GB 0x00000134 -#define MMC_TX_1024_TO_MAX_OCTETS_GB 0x00000138 -#define MMC_TX_UNICAST_GB 0x0000013c -#define MMC_TX_MULTICAST_GB 0x00000140 -#define MMC_TX_BROADCAST_GB 0x00000144 -#define MMC_TX_UNDERFLOW_ERROR 0x00000148 -#define MMC_TX_SINGLECOL_G 0x0000014c -#define MMC_TX_MULTICOL_G 0x00000150 -#define MMC_TX_DEFERRED 0x00000154 -#define MMC_TX_LATECOL 0x00000158 -#define MMC_TX_EXESSCOL 0x0000015c -#define MMC_TX_CARRIER_ERROR 0x00000160 -#define MMC_TX_OCTETCOUNT_G 0x00000164 -#define MMC_TX_FRAMECOUNT_G 0x00000168 -#define MMC_TX_EXCESSDEF 0x0000016c -#define MMC_TX_PAUSE_FRAME 0x00000170 -#define MMC_TX_VLAN_FRAME_G 0x00000174 +#define MMC_TX_OCTETCOUNT_GB 0x14 +#define MMC_TX_FRAMECOUNT_GB 0x18 +#define MMC_TX_BROADCASTFRAME_G 0x1c +#define MMC_TX_MULTICASTFRAME_G 0x20 +#define MMC_TX_64_OCTETS_GB 0x24 +#define MMC_TX_65_TO_127_OCTETS_GB 0x28 +#define MMC_TX_128_TO_255_OCTETS_GB 0x2c +#define MMC_TX_256_TO_511_OCTETS_GB 0x30 +#define MMC_TX_512_TO_1023_OCTETS_GB 0x34 +#define MMC_TX_1024_TO_MAX_OCTETS_GB 0x38 +#define MMC_TX_UNICAST_GB 0x3c +#define MMC_TX_MULTICAST_GB 0x40 +#define MMC_TX_BROADCAST_GB 0x44 +#define MMC_TX_UNDERFLOW_ERROR 0x48 +#define MMC_TX_SINGLECOL_G 0x4c +#define MMC_TX_MULTICOL_G 0x50 +#define MMC_TX_DEFERRED 0x54 +#define MMC_TX_LATECOL 0x58 +#define MMC_TX_EXESSCOL 0x5c +#define MMC_TX_CARRIER_ERROR 0x60 +#define MMC_TX_OCTETCOUNT_G 0x64 +#define MMC_TX_FRAMECOUNT_G 0x68 +#define MMC_TX_EXCESSDEF 0x6c +#define MMC_TX_PAUSE_FRAME 0x70 +#define MMC_TX_VLAN_FRAME_G 0x74 /* MMC RX counter registers */ -#define MMC_RX_FRAMECOUNT_GB 0x00000180 -#define MMC_RX_OCTETCOUNT_GB 0x00000184 -#define MMC_RX_OCTETCOUNT_G 0x00000188 -#define MMC_RX_BROADCASTFRAME_G 0x0000018c -#define MMC_RX_MULTICASTFRAME_G 0x00000190 -#define MMC_RX_CRC_ERROR 0x00000194 -#define MMC_RX_ALIGN_ERROR 0x00000198 -#define MMC_RX_RUN_ERROR 0x0000019C -#define MMC_RX_JABBER_ERROR 0x000001A0 -#define MMC_RX_UNDERSIZE_G 0x000001A4 -#define MMC_RX_OVERSIZE_G 0x000001A8 -#define MMC_RX_64_OCTETS_GB 0x000001AC -#define MMC_RX_65_TO_127_OCTETS_GB 0x000001b0 -#define MMC_RX_128_TO_255_OCTETS_GB 0x000001b4 -#define MMC_RX_256_TO_511_OCTETS_GB 0x000001b8 -#define MMC_RX_512_TO_1023_OCTETS_GB 0x000001bc -#define MMC_RX_1024_TO_MAX_OCTETS_GB 0x000001c0 -#define MMC_RX_UNICAST_G 0x000001c4 -#define MMC_RX_LENGTH_ERROR 0x000001c8 -#define MMC_RX_AUTOFRANGETYPE 0x000001cc -#define MMC_RX_PAUSE_FRAMES 0x000001d0 -#define MMC_RX_FIFO_OVERFLOW 0x000001d4 -#define MMC_RX_VLAN_FRAMES_GB 0x000001d8 -#define MMC_RX_WATCHDOG_ERROR 0x000001dc +#define MMC_RX_FRAMECOUNT_GB 0x80 +#define MMC_RX_OCTETCOUNT_GB 0x84 +#define MMC_RX_OCTETCOUNT_G 0x88 +#define MMC_RX_BROADCASTFRAME_G 0x8c +#define MMC_RX_MULTICASTFRAME_G 0x90 +#define MMC_RX_CRC_ERROR 0x94 +#define MMC_RX_ALIGN_ERROR 0x98 +#define MMC_RX_RUN_ERROR 0x9C +#define MMC_RX_JABBER_ERROR 0xA0 +#define MMC_RX_UNDERSIZE_G 0xA4 +#define MMC_RX_OVERSIZE_G 0xA8 +#define MMC_RX_64_OCTETS_GB 0xAC +#define MMC_RX_65_TO_127_OCTETS_GB 0xb0 +#define MMC_RX_128_TO_255_OCTETS_GB 0xb4 +#define MMC_RX_256_TO_511_OCTETS_GB 0xb8 +#define MMC_RX_512_TO_1023_OCTETS_GB 0xbc +#define MMC_RX_1024_TO_MAX_OCTETS_GB 0xc0 +#define MMC_RX_UNICAST_G 0xc4 +#define MMC_RX_LENGTH_ERROR 0xc8 +#define MMC_RX_AUTOFRANGETYPE 0xcc +#define MMC_RX_PAUSE_FRAMES 0xd0 +#define MMC_RX_FIFO_OVERFLOW 0xd4 +#define MMC_RX_VLAN_FRAMES_GB 0xd8 +#define MMC_RX_WATCHDOG_ERROR 0xdc /* IPC*/ -#define MMC_RX_IPC_INTR_MASK 0x00000200 -#define MMC_RX_IPC_INTR 0x00000208 +#define MMC_RX_IPC_INTR_MASK 0x100 +#define MMC_RX_IPC_INTR 0x108 /* IPv4*/ -#define MMC_RX_IPV4_GD 0x00000210 -#define MMC_RX_IPV4_HDERR 0x00000214 -#define MMC_RX_IPV4_NOPAY 0x00000218 -#define MMC_RX_IPV4_FRAG 0x0000021C -#define MMC_RX_IPV4_UDSBL 0x00000220 +#define MMC_RX_IPV4_GD 0x110 +#define MMC_RX_IPV4_HDERR 0x114 +#define MMC_RX_IPV4_NOPAY 0x118 +#define MMC_RX_IPV4_FRAG 0x11C +#define MMC_RX_IPV4_UDSBL 0x120 -#define MMC_RX_IPV4_GD_OCTETS 0x00000250 -#define MMC_RX_IPV4_HDERR_OCTETS 0x00000254 -#define MMC_RX_IPV4_NOPAY_OCTETS 0x00000258 -#define MMC_RX_IPV4_FRAG_OCTETS 0x0000025c -#define MMC_RX_IPV4_UDSBL_OCTETS 0x00000260 +#define MMC_RX_IPV4_GD_OCTETS 0x150 +#define MMC_RX_IPV4_HDERR_OCTETS 0x154 +#define MMC_RX_IPV4_NOPAY_OCTETS 0x158 +#define MMC_RX_IPV4_FRAG_OCTETS 0x15c +#define MMC_RX_IPV4_UDSBL_OCTETS 0x160 /* IPV6*/ -#define MMC_RX_IPV6_GD_OCTETS 0x00000264 -#define MMC_RX_IPV6_HDERR_OCTETS 0x00000268 -#define MMC_RX_IPV6_NOPAY_OCTETS 0x0000026c +#define MMC_RX_IPV6_GD_OCTETS 0x164 +#define MMC_RX_IPV6_HDERR_OCTETS 0x168 +#define MMC_RX_IPV6_NOPAY_OCTETS 0x16c -#define MMC_RX_IPV6_GD 0x00000224 -#define MMC_RX_IPV6_HDERR 0x00000228 -#define MMC_RX_IPV6_NOPAY 0x0000022c +#define MMC_RX_IPV6_GD 0x124 +#define MMC_RX_IPV6_HDERR 0x128 +#define MMC_RX_IPV6_NOPAY 0x12c /* Protocols*/ -#define MMC_RX_UDP_GD 0x00000230 -#define MMC_RX_UDP_ERR 0x00000234 -#define MMC_RX_TCP_GD 0x00000238 -#define MMC_RX_TCP_ERR 0x0000023c -#define MMC_RX_ICMP_GD 0x00000240 -#define MMC_RX_ICMP_ERR 0x00000244 +#define MMC_RX_UDP_GD 0x130 +#define MMC_RX_UDP_ERR 0x134 +#define MMC_RX_TCP_GD 0x138 +#define MMC_RX_TCP_ERR 0x13c +#define MMC_RX_ICMP_GD 0x140 +#define MMC_RX_ICMP_ERR 0x144 -#define MMC_RX_UDP_GD_OCTETS 0x00000270 -#define MMC_RX_UDP_ERR_OCTETS 0x00000274 -#define MMC_RX_TCP_GD_OCTETS 0x00000278 -#define MMC_RX_TCP_ERR_OCTETS 0x0000027c -#define MMC_RX_ICMP_GD_OCTETS 0x00000280 -#define MMC_RX_ICMP_ERR_OCTETS 0x00000284 +#define MMC_RX_UDP_GD_OCTETS 0x170 +#define MMC_RX_UDP_ERR_OCTETS 0x174 +#define MMC_RX_TCP_GD_OCTETS 0x178 +#define MMC_RX_TCP_ERR_OCTETS 0x17c +#define MMC_RX_ICMP_GD_OCTETS 0x180 +#define MMC_RX_ICMP_ERR_OCTETS 0x184 -void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode) +void dwmac_mmc_ctrl(void __iomem *mmcaddr, unsigned int mode) { - u32 value = readl(ioaddr + MMC_CNTRL); + u32 value = readl(mmcaddr + MMC_CNTRL); value |= (mode & 0x3F); - writel(value, ioaddr + MMC_CNTRL); + writel(value, mmcaddr + MMC_CNTRL); pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n", MMC_CNTRL, value); } /* To mask all all interrupts.*/ -void dwmac_mmc_intr_all_mask(void __iomem *ioaddr) +void dwmac_mmc_intr_all_mask(void __iomem *mmcaddr) { - writel(MMC_DEFAULT_MASK, ioaddr + MMC_RX_INTR_MASK); - writel(MMC_DEFAULT_MASK, ioaddr + MMC_TX_INTR_MASK); - writel(MMC_DEFAULT_MASK, ioaddr + MMC_RX_IPC_INTR_MASK); + writel(MMC_DEFAULT_MASK, mmcaddr + MMC_RX_INTR_MASK); + writel(MMC_DEFAULT_MASK, mmcaddr + MMC_TX_INTR_MASK); + writel(MMC_DEFAULT_MASK, mmcaddr + MMC_RX_IPC_INTR_MASK); } /* This reads the MAC core counters (if actaully supported). @@ -157,111 +157,116 @@ void dwmac_mmc_intr_all_mask(void __iomem *ioaddr) * counter after a read. So all the field of the mmc struct * have to be incremented. */ -void dwmac_mmc_read(void __iomem *ioaddr, struct stmmac_counters *mmc) +void dwmac_mmc_read(void __iomem *mmcaddr, struct stmmac_counters *mmc) { - mmc->mmc_tx_octetcount_gb += readl(ioaddr + MMC_TX_OCTETCOUNT_GB); - mmc->mmc_tx_framecount_gb += readl(ioaddr + MMC_TX_FRAMECOUNT_GB); - mmc->mmc_tx_broadcastframe_g += readl(ioaddr + MMC_TX_BROADCASTFRAME_G); - mmc->mmc_tx_multicastframe_g += readl(ioaddr + MMC_TX_MULTICASTFRAME_G); - mmc->mmc_tx_64_octets_gb += readl(ioaddr + MMC_TX_64_OCTETS_GB); + mmc->mmc_tx_octetcount_gb += readl(mmcaddr + MMC_TX_OCTETCOUNT_GB); + mmc->mmc_tx_framecount_gb += readl(mmcaddr + MMC_TX_FRAMECOUNT_GB); + mmc->mmc_tx_broadcastframe_g += readl(mmcaddr + + MMC_TX_BROADCASTFRAME_G); + mmc->mmc_tx_multicastframe_g += readl(mmcaddr + + MMC_TX_MULTICASTFRAME_G); + mmc->mmc_tx_64_octets_gb += readl(mmcaddr + MMC_TX_64_OCTETS_GB); mmc->mmc_tx_65_to_127_octets_gb += - readl(ioaddr + MMC_TX_65_TO_127_OCTETS_GB); + readl(mmcaddr + MMC_TX_65_TO_127_OCTETS_GB); mmc->mmc_tx_128_to_255_octets_gb += - readl(ioaddr + MMC_TX_128_TO_255_OCTETS_GB); + readl(mmcaddr + MMC_TX_128_TO_255_OCTETS_GB); mmc->mmc_tx_256_to_511_octets_gb += - readl(ioaddr + MMC_TX_256_TO_511_OCTETS_GB); + readl(mmcaddr + MMC_TX_256_TO_511_OCTETS_GB); mmc->mmc_tx_512_to_1023_octets_gb += - readl(ioaddr + MMC_TX_512_TO_1023_OCTETS_GB); + readl(mmcaddr + MMC_TX_512_TO_1023_OCTETS_GB); mmc->mmc_tx_1024_to_max_octets_gb += - readl(ioaddr + MMC_TX_1024_TO_MAX_OCTETS_GB); - mmc->mmc_tx_unicast_gb += readl(ioaddr + MMC_TX_UNICAST_GB); - mmc->mmc_tx_multicast_gb += readl(ioaddr + MMC_TX_MULTICAST_GB); - mmc->mmc_tx_broadcast_gb += readl(ioaddr + MMC_TX_BROADCAST_GB); - mmc->mmc_tx_underflow_error += readl(ioaddr + MMC_TX_UNDERFLOW_ERROR); - mmc->mmc_tx_singlecol_g += readl(ioaddr + MMC_TX_SINGLECOL_G); - mmc->mmc_tx_multicol_g += readl(ioaddr + MMC_TX_MULTICOL_G); - mmc->mmc_tx_deferred += readl(ioaddr + MMC_TX_DEFERRED); - mmc->mmc_tx_latecol += readl(ioaddr + MMC_TX_LATECOL); - mmc->mmc_tx_exesscol += readl(ioaddr + MMC_TX_EXESSCOL); - mmc->mmc_tx_carrier_error += readl(ioaddr + MMC_TX_CARRIER_ERROR); - mmc->mmc_tx_octetcount_g += readl(ioaddr + MMC_TX_OCTETCOUNT_G); - mmc->mmc_tx_framecount_g += readl(ioaddr + MMC_TX_FRAMECOUNT_G); - mmc->mmc_tx_excessdef += readl(ioaddr + MMC_TX_EXCESSDEF); - mmc->mmc_tx_pause_frame += readl(ioaddr + MMC_TX_PAUSE_FRAME); - mmc->mmc_tx_vlan_frame_g += readl(ioaddr + MMC_TX_VLAN_FRAME_G); + readl(mmcaddr + MMC_TX_1024_TO_MAX_OCTETS_GB); + mmc->mmc_tx_unicast_gb += readl(mmcaddr + MMC_TX_UNICAST_GB); + mmc->mmc_tx_multicast_gb += readl(mmcaddr + MMC_TX_MULTICAST_GB); + mmc->mmc_tx_broadcast_gb += readl(mmcaddr + MMC_TX_BROADCAST_GB); + mmc->mmc_tx_underflow_error += readl(mmcaddr + MMC_TX_UNDERFLOW_ERROR); + mmc->mmc_tx_singlecol_g += readl(mmcaddr + MMC_TX_SINGLECOL_G); + mmc->mmc_tx_multicol_g += readl(mmcaddr + MMC_TX_MULTICOL_G); + mmc->mmc_tx_deferred += readl(mmcaddr + MMC_TX_DEFERRED); + mmc->mmc_tx_latecol += readl(mmcaddr + MMC_TX_LATECOL); + mmc->mmc_tx_exesscol += readl(mmcaddr + MMC_TX_EXESSCOL); + mmc->mmc_tx_carrier_error += readl(mmcaddr + MMC_TX_CARRIER_ERROR); + mmc->mmc_tx_octetcount_g += readl(mmcaddr + MMC_TX_OCTETCOUNT_G); + mmc->mmc_tx_framecount_g += readl(mmcaddr + MMC_TX_FRAMECOUNT_G); + mmc->mmc_tx_excessdef += readl(mmcaddr + MMC_TX_EXCESSDEF); + mmc->mmc_tx_pause_frame += readl(mmcaddr + MMC_TX_PAUSE_FRAME); + mmc->mmc_tx_vlan_frame_g += readl(mmcaddr + MMC_TX_VLAN_FRAME_G); /* MMC RX counter registers */ - mmc->mmc_rx_framecount_gb += readl(ioaddr + MMC_RX_FRAMECOUNT_GB); - mmc->mmc_rx_octetcount_gb += readl(ioaddr + MMC_RX_OCTETCOUNT_GB); - mmc->mmc_rx_octetcount_g += readl(ioaddr + MMC_RX_OCTETCOUNT_G); - mmc->mmc_rx_broadcastframe_g += readl(ioaddr + MMC_RX_BROADCASTFRAME_G); - mmc->mmc_rx_multicastframe_g += readl(ioaddr + MMC_RX_MULTICASTFRAME_G); - mmc->mmc_rx_crc_error += readl(ioaddr + MMC_RX_CRC_ERROR); - mmc->mmc_rx_align_error += readl(ioaddr + MMC_RX_ALIGN_ERROR); - mmc->mmc_rx_run_error += readl(ioaddr + MMC_RX_RUN_ERROR); - mmc->mmc_rx_jabber_error += readl(ioaddr + MMC_RX_JABBER_ERROR); - mmc->mmc_rx_undersize_g += readl(ioaddr + MMC_RX_UNDERSIZE_G); - mmc->mmc_rx_oversize_g += readl(ioaddr + MMC_RX_OVERSIZE_G); - mmc->mmc_rx_64_octets_gb += readl(ioaddr + MMC_RX_64_OCTETS_GB); + mmc->mmc_rx_framecount_gb += readl(mmcaddr + MMC_RX_FRAMECOUNT_GB); + mmc->mmc_rx_octetcount_gb += readl(mmcaddr + MMC_RX_OCTETCOUNT_GB); + mmc->mmc_rx_octetcount_g += readl(mmcaddr + MMC_RX_OCTETCOUNT_G); + mmc->mmc_rx_broadcastframe_g += readl(mmcaddr + + MMC_RX_BROADCASTFRAME_G); + mmc->mmc_rx_multicastframe_g += readl(mmcaddr + + MMC_RX_MULTICASTFRAME_G); + mmc->mmc_rx_crc_error += readl(mmcaddr + MMC_RX_CRC_ERROR); + mmc->mmc_rx_align_error += readl(mmcaddr + MMC_RX_ALIGN_ERROR); + mmc->mmc_rx_run_error += readl(mmcaddr + MMC_RX_RUN_ERROR); + mmc->mmc_rx_jabber_error += readl(mmcaddr + MMC_RX_JABBER_ERROR); + mmc->mmc_rx_undersize_g += readl(mmcaddr + MMC_RX_UNDERSIZE_G); + mmc->mmc_rx_oversize_g += readl(mmcaddr + MMC_RX_OVERSIZE_G); + mmc->mmc_rx_64_octets_gb += readl(mmcaddr + MMC_RX_64_OCTETS_GB); mmc->mmc_rx_65_to_127_octets_gb += - readl(ioaddr + MMC_RX_65_TO_127_OCTETS_GB); + readl(mmcaddr + MMC_RX_65_TO_127_OCTETS_GB); mmc->mmc_rx_128_to_255_octets_gb += - readl(ioaddr + MMC_RX_128_TO_255_OCTETS_GB); + readl(mmcaddr + MMC_RX_128_TO_255_OCTETS_GB); mmc->mmc_rx_256_to_511_octets_gb += - readl(ioaddr + MMC_RX_256_TO_511_OCTETS_GB); + readl(mmcaddr + MMC_RX_256_TO_511_OCTETS_GB); mmc->mmc_rx_512_to_1023_octets_gb += - readl(ioaddr + MMC_RX_512_TO_1023_OCTETS_GB); + readl(mmcaddr + MMC_RX_512_TO_1023_OCTETS_GB); mmc->mmc_rx_1024_to_max_octets_gb += - readl(ioaddr + MMC_RX_1024_TO_MAX_OCTETS_GB); - mmc->mmc_rx_unicast_g += readl(ioaddr + MMC_RX_UNICAST_G); - mmc->mmc_rx_length_error += readl(ioaddr + MMC_RX_LENGTH_ERROR); - mmc->mmc_rx_autofrangetype += readl(ioaddr + MMC_RX_AUTOFRANGETYPE); - mmc->mmc_rx_pause_frames += readl(ioaddr + MMC_RX_PAUSE_FRAMES); - mmc->mmc_rx_fifo_overflow += readl(ioaddr + MMC_RX_FIFO_OVERFLOW); - mmc->mmc_rx_vlan_frames_gb += readl(ioaddr + MMC_RX_VLAN_FRAMES_GB); - mmc->mmc_rx_watchdog_error += readl(ioaddr + MMC_RX_WATCHDOG_ERROR); + readl(mmcaddr + MMC_RX_1024_TO_MAX_OCTETS_GB); + mmc->mmc_rx_unicast_g += readl(mmcaddr + MMC_RX_UNICAST_G); + mmc->mmc_rx_length_error += readl(mmcaddr + MMC_RX_LENGTH_ERROR); + mmc->mmc_rx_autofrangetype += readl(mmcaddr + MMC_RX_AUTOFRANGETYPE); + mmc->mmc_rx_pause_frames += readl(mmcaddr + MMC_RX_PAUSE_FRAMES); + mmc->mmc_rx_fifo_overflow += readl(mmcaddr + MMC_RX_FIFO_OVERFLOW); + mmc->mmc_rx_vlan_frames_gb += readl(mmcaddr + MMC_RX_VLAN_FRAMES_GB); + mmc->mmc_rx_watchdog_error += readl(mmcaddr + MMC_RX_WATCHDOG_ERROR); /* IPC */ - mmc->mmc_rx_ipc_intr_mask += readl(ioaddr + MMC_RX_IPC_INTR_MASK); - mmc->mmc_rx_ipc_intr += readl(ioaddr + MMC_RX_IPC_INTR); + mmc->mmc_rx_ipc_intr_mask += readl(mmcaddr + MMC_RX_IPC_INTR_MASK); + mmc->mmc_rx_ipc_intr += readl(mmcaddr + MMC_RX_IPC_INTR); /* IPv4 */ - mmc->mmc_rx_ipv4_gd += readl(ioaddr + MMC_RX_IPV4_GD); - mmc->mmc_rx_ipv4_hderr += readl(ioaddr + MMC_RX_IPV4_HDERR); - mmc->mmc_rx_ipv4_nopay += readl(ioaddr + MMC_RX_IPV4_NOPAY); - mmc->mmc_rx_ipv4_frag += readl(ioaddr + MMC_RX_IPV4_FRAG); - mmc->mmc_rx_ipv4_udsbl += readl(ioaddr + MMC_RX_IPV4_UDSBL); + mmc->mmc_rx_ipv4_gd += readl(mmcaddr + MMC_RX_IPV4_GD); + mmc->mmc_rx_ipv4_hderr += readl(mmcaddr + MMC_RX_IPV4_HDERR); + mmc->mmc_rx_ipv4_nopay += readl(mmcaddr + MMC_RX_IPV4_NOPAY); + mmc->mmc_rx_ipv4_frag += readl(mmcaddr + MMC_RX_IPV4_FRAG); + mmc->mmc_rx_ipv4_udsbl += readl(mmcaddr + MMC_RX_IPV4_UDSBL); - mmc->mmc_rx_ipv4_gd_octets += readl(ioaddr + MMC_RX_IPV4_GD_OCTETS); + mmc->mmc_rx_ipv4_gd_octets += readl(mmcaddr + MMC_RX_IPV4_GD_OCTETS); mmc->mmc_rx_ipv4_hderr_octets += - readl(ioaddr + MMC_RX_IPV4_HDERR_OCTETS); + readl(mmcaddr + MMC_RX_IPV4_HDERR_OCTETS); mmc->mmc_rx_ipv4_nopay_octets += - readl(ioaddr + MMC_RX_IPV4_NOPAY_OCTETS); - mmc->mmc_rx_ipv4_frag_octets += readl(ioaddr + MMC_RX_IPV4_FRAG_OCTETS); + readl(mmcaddr + MMC_RX_IPV4_NOPAY_OCTETS); + mmc->mmc_rx_ipv4_frag_octets += readl(mmcaddr + + MMC_RX_IPV4_FRAG_OCTETS); mmc->mmc_rx_ipv4_udsbl_octets += - readl(ioaddr + MMC_RX_IPV4_UDSBL_OCTETS); + readl(mmcaddr + MMC_RX_IPV4_UDSBL_OCTETS); /* IPV6 */ - mmc->mmc_rx_ipv6_gd_octets += readl(ioaddr + MMC_RX_IPV6_GD_OCTETS); + mmc->mmc_rx_ipv6_gd_octets += readl(mmcaddr + MMC_RX_IPV6_GD_OCTETS); mmc->mmc_rx_ipv6_hderr_octets += - readl(ioaddr + MMC_RX_IPV6_HDERR_OCTETS); + readl(mmcaddr + MMC_RX_IPV6_HDERR_OCTETS); mmc->mmc_rx_ipv6_nopay_octets += - readl(ioaddr + MMC_RX_IPV6_NOPAY_OCTETS); + readl(mmcaddr + MMC_RX_IPV6_NOPAY_OCTETS); - mmc->mmc_rx_ipv6_gd += readl(ioaddr + MMC_RX_IPV6_GD); - mmc->mmc_rx_ipv6_hderr += readl(ioaddr + MMC_RX_IPV6_HDERR); - mmc->mmc_rx_ipv6_nopay += readl(ioaddr + MMC_RX_IPV6_NOPAY); + mmc->mmc_rx_ipv6_gd += readl(mmcaddr + MMC_RX_IPV6_GD); + mmc->mmc_rx_ipv6_hderr += readl(mmcaddr + MMC_RX_IPV6_HDERR); + mmc->mmc_rx_ipv6_nopay += readl(mmcaddr + MMC_RX_IPV6_NOPAY); /* Protocols */ - mmc->mmc_rx_udp_gd += readl(ioaddr + MMC_RX_UDP_GD); - mmc->mmc_rx_udp_err += readl(ioaddr + MMC_RX_UDP_ERR); - mmc->mmc_rx_tcp_gd += readl(ioaddr + MMC_RX_TCP_GD); - mmc->mmc_rx_tcp_err += readl(ioaddr + MMC_RX_TCP_ERR); - mmc->mmc_rx_icmp_gd += readl(ioaddr + MMC_RX_ICMP_GD); - mmc->mmc_rx_icmp_err += readl(ioaddr + MMC_RX_ICMP_ERR); + mmc->mmc_rx_udp_gd += readl(mmcaddr + MMC_RX_UDP_GD); + mmc->mmc_rx_udp_err += readl(mmcaddr + MMC_RX_UDP_ERR); + mmc->mmc_rx_tcp_gd += readl(mmcaddr + MMC_RX_TCP_GD); + mmc->mmc_rx_tcp_err += readl(mmcaddr + MMC_RX_TCP_ERR); + mmc->mmc_rx_icmp_gd += readl(mmcaddr + MMC_RX_ICMP_GD); + mmc->mmc_rx_icmp_err += readl(mmcaddr + MMC_RX_ICMP_ERR); - mmc->mmc_rx_udp_gd_octets += readl(ioaddr + MMC_RX_UDP_GD_OCTETS); - mmc->mmc_rx_udp_err_octets += readl(ioaddr + MMC_RX_UDP_ERR_OCTETS); - mmc->mmc_rx_tcp_gd_octets += readl(ioaddr + MMC_RX_TCP_GD_OCTETS); - mmc->mmc_rx_tcp_err_octets += readl(ioaddr + MMC_RX_TCP_ERR_OCTETS); - mmc->mmc_rx_icmp_gd_octets += readl(ioaddr + MMC_RX_ICMP_GD_OCTETS); - mmc->mmc_rx_icmp_err_octets += readl(ioaddr + MMC_RX_ICMP_ERR_OCTETS); + mmc->mmc_rx_udp_gd_octets += readl(mmcaddr + MMC_RX_UDP_GD_OCTETS); + mmc->mmc_rx_udp_err_octets += readl(mmcaddr + MMC_RX_UDP_ERR_OCTETS); + mmc->mmc_rx_tcp_gd_octets += readl(mmcaddr + MMC_RX_TCP_GD_OCTETS); + mmc->mmc_rx_tcp_err_octets += readl(mmcaddr + MMC_RX_TCP_ERR_OCTETS); + mmc->mmc_rx_icmp_gd_octets += readl(mmcaddr + MMC_RX_ICMP_GD_OCTETS); + mmc->mmc_rx_icmp_err_octets += readl(mmcaddr + MMC_RX_ICMP_ERR_OCTETS); } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index 8bbab97895fe..26fb85531a61 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@ -128,6 +128,7 @@ struct stmmac_priv { int use_riwt; int irq_wake; spinlock_t ptp_lock; + void __iomem *mmcaddr; #ifdef CONFIG_DEBUG_FS struct dentry *dbgfs_dir; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c index 3c7928edfebb..fb2e7fc85ca7 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c @@ -506,7 +506,7 @@ static void stmmac_get_ethtool_stats(struct net_device *dev, else { /* If supported, for new GMAC chips expose the MMC counters */ if (priv->dma_cap.rmon) { - dwmac_mmc_read(priv->ioaddr, &priv->mmc); + dwmac_mmc_read(priv->mmcaddr, &priv->mmc); for (i = 0; i < STMMAC_MMC_STATS_LEN; i++) { char *p; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 1186ac902bec..00e508498a81 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -1450,12 +1450,14 @@ static void stmmac_dma_interrupt(struct stmmac_priv *priv) static void stmmac_mmc_setup(struct stmmac_priv *priv) { unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET | - MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET; + MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET; - dwmac_mmc_intr_all_mask(priv->ioaddr); + priv->mmcaddr = priv->ioaddr + MMC_GMAC3_X_OFFSET; + + dwmac_mmc_intr_all_mask(priv->mmcaddr); if (priv->dma_cap.rmon) { - dwmac_mmc_ctrl(priv->ioaddr, mode); + dwmac_mmc_ctrl(priv->mmcaddr, mode); memset(&priv->mmc, 0, sizeof(struct stmmac_counters)); } else pr_info(" No MAC Management Counters available\n"); From ee2ae1ed46251dcbdcc2c59b5e30f664ddfbacb1 Mon Sep 17 00:00:00 2001 From: Alexandre TORGUE Date: Fri, 1 Apr 2016 11:37:33 +0200 Subject: [PATCH 0140/1649] stmmac: add new DT platform entries for GMAC4 This is to support the snps,dwmac-4.00 and snps,dwmac-4.10a and related features on the platform driver. See binding doc for further details. Signed-off-by: Giuseppe Cavallaro Signed-off-by: Alexandre TORGUE Signed-off-by: David S. Miller --- Documentation/devicetree/bindings/net/stmmac.txt | 2 ++ drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c | 7 +++++++ include/linux/stmmac.h | 2 ++ 3 files changed, 11 insertions(+) diff --git a/Documentation/devicetree/bindings/net/stmmac.txt b/Documentation/devicetree/bindings/net/stmmac.txt index 6605d19601c2..4d302db657c0 100644 --- a/Documentation/devicetree/bindings/net/stmmac.txt +++ b/Documentation/devicetree/bindings/net/stmmac.txt @@ -59,6 +59,8 @@ Optional properties: - snps,fb: fixed-burst - snps,mb: mixed-burst - snps,rb: rebuild INCRx Burst + - snps,tso: this enables the TSO feature otherwise it will be managed by + MAC HW capability register. - mdio: with compatible = "snps,dwmac-mdio", create and register mdio bus. Examples: diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index cf37ea558ecc..effaa4ff5ab7 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -284,6 +284,13 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac) plat->pmt = 1; } + if (of_device_is_compatible(np, "snps,dwmac-4.00") || + of_device_is_compatible(np, "snps,dwmac-4.10a")) { + plat->has_gmac4 = 1; + plat->pmt = 1; + plat->tso_en = of_property_read_bool(np, "snps,tso"); + } + if (of_device_is_compatible(np, "snps,dwmac-3.610") || of_device_is_compatible(np, "snps,dwmac-3.710")) { plat->enh_desc = 1; diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h index e6bc30a42a74..ffdaca9c01af 100644 --- a/include/linux/stmmac.h +++ b/include/linux/stmmac.h @@ -137,5 +137,7 @@ struct plat_stmmacenet_data { void (*exit)(struct platform_device *pdev, void *priv); void *bsp_priv; struct stmmac_axi *axi; + int has_gmac4; + bool tso_en; }; #endif From f748be531d7012c456b97f66091d86b3675c5fef Mon Sep 17 00:00:00 2001 From: Alexandre TORGUE Date: Fri, 1 Apr 2016 11:37:34 +0200 Subject: [PATCH 0141/1649] stmmac: support new GMAC4 This patch adds the whole GMAC4 support inside the stmmac d.d. now able to use the new HW and some new features i.e.: TSO. It is missing the multi-queue and split Header support at this stage. This patch also updates the driver version and the stmmac.txt. Signed-off-by: Alexandre TORGUE Signed-off-by: Giuseppe Cavallaro Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/common.h | 4 + drivers/net/ethernet/stmicro/stmmac/stmmac.h | 6 +- .../ethernet/stmicro/stmmac/stmmac_ethtool.c | 5 +- .../net/ethernet/stmicro/stmmac/stmmac_main.c | 483 ++++++++++++++++-- 4 files changed, 444 insertions(+), 54 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index eabe86bd8f56..fc60368df2e7 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -169,6 +169,9 @@ struct stmmac_extra_stats { unsigned long mtl_rx_fifo_ctrl_active; unsigned long mac_rx_frame_ctrl_fifo; unsigned long mac_gmii_rx_proto_engine; + /* TSO */ + unsigned long tx_tso_frames; + unsigned long tx_tso_nfrags; }; /* CSR Frequency Access Defines*/ @@ -545,6 +548,7 @@ void stmmac_dwmac4_set_mac(void __iomem *ioaddr, bool enable); void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr); extern const struct stmmac_mode_ops ring_mode_ops; extern const struct stmmac_mode_ops chain_mode_ops; +extern const struct stmmac_desc_ops dwmac4_desc_ops; /** * stmmac_get_synopsys_id - return the SYINID. diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index 26fb85531a61..317ce3580e13 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@ -24,7 +24,7 @@ #define __STMMAC_H__ #define STMMAC_RESOURCE_NAME "stmmaceth" -#define DRV_MODULE_VERSION "Oct_2015" +#define DRV_MODULE_VERSION "Dec_2015" #include #include @@ -67,6 +67,7 @@ struct stmmac_priv { spinlock_t tx_lock; bool tx_path_in_lpi_mode; struct timer_list txtimer; + bool tso; struct dma_desc *dma_rx ____cacheline_aligned_in_smp; struct dma_extended_desc *dma_erx; @@ -129,6 +130,9 @@ struct stmmac_priv { int irq_wake; spinlock_t ptp_lock; void __iomem *mmcaddr; + u32 rx_tail_addr; + u32 tx_tail_addr; + u32 mss; #ifdef CONFIG_DEBUG_FS struct dentry *dbgfs_dir; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c index fb2e7fc85ca7..e2b98b01647e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c @@ -161,6 +161,9 @@ static const struct stmmac_stats stmmac_gstrings_stats[] = { STMMAC_STAT(mtl_rx_fifo_ctrl_active), STMMAC_STAT(mac_rx_frame_ctrl_fifo), STMMAC_STAT(mac_gmii_rx_proto_engine), + /* TSO */ + STMMAC_STAT(tx_tso_frames), + STMMAC_STAT(tx_tso_nfrags), }; #define STMMAC_STATS_LEN ARRAY_SIZE(stmmac_gstrings_stats) @@ -499,7 +502,7 @@ static void stmmac_get_ethtool_stats(struct net_device *dev, int i, j = 0; /* Update the DMA HW counters for dwmac10/100 */ - if (!priv->plat->has_gmac) + if (priv->hw->dma->dma_diagnostic_fr) priv->hw->dma->dma_diagnostic_fr(&dev->stats, (void *) &priv->xstats, priv->ioaddr); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 00e508498a81..3a13ddd3aac1 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -56,6 +56,7 @@ #include "dwmac1000.h" #define STMMAC_ALIGN(x) L1_CACHE_ALIGN(x) +#define TSO_MAX_BUFF_SIZE (SZ_16K - 1) /* Module parameters */ #define TX_TIMEO 5000 @@ -725,13 +726,15 @@ static void stmmac_adjust_link(struct net_device *dev) new_state = 1; switch (phydev->speed) { case 1000: - if (likely(priv->plat->has_gmac)) + if (likely((priv->plat->has_gmac) || + (priv->plat->has_gmac4))) ctrl &= ~priv->hw->link.port; stmmac_hw_fix_mac_speed(priv); break; case 100: case 10: - if (priv->plat->has_gmac) { + if (likely((priv->plat->has_gmac) || + (priv->plat->has_gmac4))) { ctrl |= priv->hw->link.port; if (phydev->speed == SPEED_100) { ctrl |= priv->hw->link.speed; @@ -971,7 +974,10 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p, return -EINVAL; } - p->des2 = priv->rx_skbuff_dma[i]; + if (priv->synopsys_id >= DWMAC_CORE_4_00) + p->des0 = priv->rx_skbuff_dma[i]; + else + p->des2 = priv->rx_skbuff_dma[i]; if ((priv->hw->mode->init_desc3) && (priv->dma_buf_sz == BUF_SIZE_16KiB)) @@ -1062,7 +1068,16 @@ static int init_dma_desc_rings(struct net_device *dev, gfp_t flags) p = &((priv->dma_etx + i)->basic); else p = priv->dma_tx + i; - p->des2 = 0; + + if (priv->synopsys_id >= DWMAC_CORE_4_00) { + p->des0 = 0; + p->des1 = 0; + p->des2 = 0; + p->des3 = 0; + } else { + p->des2 = 0; + } + priv->tx_skbuff_dma[i].buf = 0; priv->tx_skbuff_dma[i].map_as_page = false; priv->tx_skbuff_dma[i].len = 0; @@ -1325,9 +1340,13 @@ static void stmmac_tx_clean(struct stmmac_priv *priv) priv->tx_skbuff_dma[entry].len, DMA_TO_DEVICE); priv->tx_skbuff_dma[entry].buf = 0; + priv->tx_skbuff_dma[entry].len = 0; priv->tx_skbuff_dma[entry].map_as_page = false; } - priv->hw->mode->clean_desc3(priv, p); + + if (priv->hw->mode->clean_desc3) + priv->hw->mode->clean_desc3(priv, p); + priv->tx_skbuff_dma[entry].last_segment = false; priv->tx_skbuff_dma[entry].is_jumbo = false; @@ -1452,7 +1471,10 @@ static void stmmac_mmc_setup(struct stmmac_priv *priv) unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET | MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET; - priv->mmcaddr = priv->ioaddr + MMC_GMAC3_X_OFFSET; + if (priv->synopsys_id >= DWMAC_CORE_4_00) + priv->mmcaddr = priv->ioaddr + MMC_GMAC4_OFFSET; + else + priv->mmcaddr = priv->ioaddr + MMC_GMAC3_X_OFFSET; dwmac_mmc_intr_all_mask(priv->mmcaddr); @@ -1564,8 +1586,19 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv) priv->hw->dma->init(priv->ioaddr, pbl, fixed_burst, mixed_burst, aal, priv->dma_tx_phy, priv->dma_rx_phy, atds); - if ((priv->synopsys_id >= DWMAC_CORE_3_50) && - (priv->plat->axi && priv->hw->dma->axi)) + if (priv->synopsys_id >= DWMAC_CORE_4_00) { + priv->rx_tail_addr = priv->dma_rx_phy + + (DMA_RX_SIZE * sizeof(struct dma_desc)); + priv->hw->dma->set_rx_tail_ptr(priv->ioaddr, priv->rx_tail_addr, + STMMAC_CHAN0); + + priv->tx_tail_addr = priv->dma_tx_phy + + (DMA_TX_SIZE * sizeof(struct dma_desc)); + priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr, + STMMAC_CHAN0); + } + + if (priv->plat->axi && priv->hw->dma->axi) priv->hw->dma->axi(priv->ioaddr, priv->plat->axi); return ret; @@ -1645,7 +1678,10 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) } /* Enable the MAC Rx/Tx */ - stmmac_set_mac(priv->ioaddr, true); + if (priv->synopsys_id >= DWMAC_CORE_4_00) + stmmac_dwmac4_set_mac(priv->ioaddr, true); + else + stmmac_set_mac(priv->ioaddr, true); /* Set the HW DMA mode and the COE */ stmmac_dma_operation_mode(priv); @@ -1683,6 +1719,18 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) if (priv->pcs && priv->hw->mac->ctrl_ane) priv->hw->mac->ctrl_ane(priv->hw, 0); + /* set TX ring length */ + if (priv->hw->dma->set_tx_ring_len) + priv->hw->dma->set_tx_ring_len(priv->ioaddr, + (DMA_TX_SIZE - 1)); + /* set RX ring length */ + if (priv->hw->dma->set_rx_ring_len) + priv->hw->dma->set_rx_ring_len(priv->ioaddr, + (DMA_RX_SIZE - 1)); + /* Enable TSO */ + if (priv->tso) + priv->hw->dma->enable_tso(priv->ioaddr, 1, STMMAC_CHAN0); + return 0; } @@ -1847,6 +1895,239 @@ static int stmmac_release(struct net_device *dev) return 0; } +/** + * stmmac_tso_allocator - close entry point of the driver + * @priv: driver private structure + * @des: buffer start address + * @total_len: total length to fill in descriptors + * @last_segmant: condition for the last descriptor + * Description: + * This function fills descriptor and request new descriptors according to + * buffer length to fill + */ +static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des, + int total_len, bool last_segment) +{ + struct dma_desc *desc; + int tmp_len; + u32 buff_size; + + tmp_len = total_len; + + while (tmp_len > 0) { + priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE); + desc = priv->dma_tx + priv->cur_tx; + + desc->des0 = des + (total_len - tmp_len); + buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ? + TSO_MAX_BUFF_SIZE : tmp_len; + + priv->hw->desc->prepare_tso_tx_desc(desc, 0, buff_size, + 0, 1, + (last_segment) && (buff_size < TSO_MAX_BUFF_SIZE), + 0, 0); + + tmp_len -= TSO_MAX_BUFF_SIZE; + } +} + +/** + * stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO) + * @skb : the socket buffer + * @dev : device pointer + * Description: this is the transmit function that is called on TSO frames + * (support available on GMAC4 and newer chips). + * Diagram below show the ring programming in case of TSO frames: + * + * First Descriptor + * -------- + * | DES0 |---> buffer1 = L2/L3/L4 header + * | DES1 |---> TCP Payload (can continue on next descr...) + * | DES2 |---> buffer 1 and 2 len + * | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0] + * -------- + * | + * ... + * | + * -------- + * | DES0 | --| Split TCP Payload on Buffers 1 and 2 + * | DES1 | --| + * | DES2 | --> buffer 1 and 2 len + * | DES3 | + * -------- + * + * mss is fixed when enable tso, so w/o programming the TDES3 ctx field. + */ +static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) +{ + u32 pay_len, mss; + int tmp_pay_len = 0; + struct stmmac_priv *priv = netdev_priv(dev); + int nfrags = skb_shinfo(skb)->nr_frags; + unsigned int first_entry, des; + struct dma_desc *desc, *first, *mss_desc = NULL; + u8 proto_hdr_len; + int i; + + spin_lock(&priv->tx_lock); + + /* Compute header lengths */ + proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + + /* Desc availability based on threshold should be enough safe */ + if (unlikely(stmmac_tx_avail(priv) < + (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) { + if (!netif_queue_stopped(dev)) { + netif_stop_queue(dev); + /* This is a hard error, log it. */ + pr_err("%s: Tx Ring full when queue awake\n", __func__); + } + spin_unlock(&priv->tx_lock); + return NETDEV_TX_BUSY; + } + + pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */ + + mss = skb_shinfo(skb)->gso_size; + + /* set new MSS value if needed */ + if (mss != priv->mss) { + mss_desc = priv->dma_tx + priv->cur_tx; + priv->hw->desc->set_mss(mss_desc, mss); + priv->mss = mss; + priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE); + } + + if (netif_msg_tx_queued(priv)) { + pr_info("%s: tcphdrlen %d, hdr_len %d, pay_len %d, mss %d\n", + __func__, tcp_hdrlen(skb), proto_hdr_len, pay_len, mss); + pr_info("\tskb->len %d, skb->data_len %d\n", skb->len, + skb->data_len); + } + + first_entry = priv->cur_tx; + + desc = priv->dma_tx + first_entry; + first = desc; + + /* first descriptor: fill Headers on Buf1 */ + des = dma_map_single(priv->device, skb->data, skb_headlen(skb), + DMA_TO_DEVICE); + if (dma_mapping_error(priv->device, des)) + goto dma_map_err; + + priv->tx_skbuff_dma[first_entry].buf = des; + priv->tx_skbuff_dma[first_entry].len = skb_headlen(skb); + priv->tx_skbuff[first_entry] = skb; + + first->des0 = des; + + /* Fill start of payload in buff2 of first descriptor */ + if (pay_len) + first->des1 = des + proto_hdr_len; + + /* If needed take extra descriptors to fill the remaining payload */ + tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE; + + stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0)); + + /* Prepare fragments */ + for (i = 0; i < nfrags; i++) { + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + + des = skb_frag_dma_map(priv->device, frag, 0, + skb_frag_size(frag), + DMA_TO_DEVICE); + + stmmac_tso_allocator(priv, des, skb_frag_size(frag), + (i == nfrags - 1)); + + priv->tx_skbuff_dma[priv->cur_tx].buf = des; + priv->tx_skbuff_dma[priv->cur_tx].len = skb_frag_size(frag); + priv->tx_skbuff[priv->cur_tx] = NULL; + priv->tx_skbuff_dma[priv->cur_tx].map_as_page = true; + } + + priv->tx_skbuff_dma[priv->cur_tx].last_segment = true; + + priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE); + + if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) { + if (netif_msg_hw(priv)) + pr_debug("%s: stop transmitted packets\n", __func__); + netif_stop_queue(dev); + } + + dev->stats.tx_bytes += skb->len; + priv->xstats.tx_tso_frames++; + priv->xstats.tx_tso_nfrags += nfrags; + + /* Manage tx mitigation */ + priv->tx_count_frames += nfrags + 1; + if (likely(priv->tx_coal_frames > priv->tx_count_frames)) { + mod_timer(&priv->txtimer, + STMMAC_COAL_TIMER(priv->tx_coal_timer)); + } else { + priv->tx_count_frames = 0; + priv->hw->desc->set_tx_ic(desc); + priv->xstats.tx_set_ic_bit++; + } + + if (!priv->hwts_tx_en) + skb_tx_timestamp(skb); + + if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && + priv->hwts_tx_en)) { + /* declare that device is doing timestamping */ + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + priv->hw->desc->enable_tx_timestamp(first); + } + + /* Complete the first descriptor before granting the DMA */ + priv->hw->desc->prepare_tso_tx_desc(first, 1, + proto_hdr_len, + pay_len, + 1, priv->tx_skbuff_dma[first_entry].last_segment, + tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len)); + + /* If context desc is used to change MSS */ + if (mss_desc) + priv->hw->desc->set_tx_owner(mss_desc); + + /* The own bit must be the latest setting done when prepare the + * descriptor and then barrier is needed to make sure that + * all is coherent before granting the DMA engine. + */ + smp_wmb(); + + if (netif_msg_pktdata(priv)) { + pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n", + __func__, priv->cur_tx, priv->dirty_tx, first_entry, + priv->cur_tx, first, nfrags); + + priv->hw->desc->display_ring((void *)priv->dma_tx, DMA_TX_SIZE, + 0); + + pr_info(">>> frame to be transmitted: "); + print_pkt(skb->data, skb_headlen(skb)); + } + + netdev_sent_queue(dev, skb->len); + + priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr, + STMMAC_CHAN0); + + spin_unlock(&priv->tx_lock); + return NETDEV_TX_OK; + +dma_map_err: + spin_unlock(&priv->tx_lock); + dev_err(priv->device, "Tx dma map failed\n"); + dev_kfree_skb(skb); + priv->dev->stats.tx_dropped++; + return NETDEV_TX_OK; +} + /** * stmmac_xmit - Tx entry point of the driver * @skb : the socket buffer @@ -1864,6 +2145,13 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) unsigned int entry, first_entry; struct dma_desc *desc, *first; unsigned int enh_desc; + unsigned int des; + + /* Manage oversized TCP frames for GMAC4 device */ + if (skb_is_gso(skb) && priv->tso) { + if (ip_hdr(skb)->protocol == IPPROTO_TCP) + return stmmac_tso_xmit(skb, dev); + } spin_lock(&priv->tx_lock); @@ -1899,7 +2187,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) if (enh_desc) is_jumbo = priv->hw->mode->is_jumbo_frm(skb->len, enh_desc); - if (unlikely(is_jumbo)) { + if (unlikely(is_jumbo) && likely(priv->synopsys_id < + DWMAC_CORE_4_00)) { entry = priv->hw->mode->jumbo_frm(priv, skb, csum_insertion); if (unlikely(entry < 0)) goto dma_map_err; @@ -1917,13 +2206,21 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) else desc = priv->dma_tx + entry; - desc->des2 = skb_frag_dma_map(priv->device, frag, 0, len, - DMA_TO_DEVICE); - if (dma_mapping_error(priv->device, desc->des2)) + des = skb_frag_dma_map(priv->device, frag, 0, len, + DMA_TO_DEVICE); + if (dma_mapping_error(priv->device, des)) goto dma_map_err; /* should reuse desc w/o issues */ priv->tx_skbuff[entry] = NULL; - priv->tx_skbuff_dma[entry].buf = desc->des2; + + if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) { + desc->des0 = des; + priv->tx_skbuff_dma[entry].buf = desc->des0; + } else { + desc->des2 = des; + priv->tx_skbuff_dma[entry].buf = desc->des2; + } + priv->tx_skbuff_dma[entry].map_as_page = true; priv->tx_skbuff_dma[entry].len = len; priv->tx_skbuff_dma[entry].last_segment = last_segment; @@ -1988,12 +2285,19 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) if (likely(!is_jumbo)) { bool last_segment = (nfrags == 0); - first->des2 = dma_map_single(priv->device, skb->data, - nopaged_len, DMA_TO_DEVICE); - if (dma_mapping_error(priv->device, first->des2)) + des = dma_map_single(priv->device, skb->data, + nopaged_len, DMA_TO_DEVICE); + if (dma_mapping_error(priv->device, des)) goto dma_map_err; - priv->tx_skbuff_dma[first_entry].buf = first->des2; + if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) { + first->des0 = des; + priv->tx_skbuff_dma[first_entry].buf = first->des0; + } else { + first->des2 = des; + priv->tx_skbuff_dma[first_entry].buf = first->des2; + } + priv->tx_skbuff_dma[first_entry].len = nopaged_len; priv->tx_skbuff_dma[first_entry].last_segment = last_segment; @@ -2017,7 +2321,12 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) } netdev_sent_queue(dev, skb->len); - priv->hw->dma->enable_dma_transmission(priv->ioaddr); + + if (priv->synopsys_id < DWMAC_CORE_4_00) + priv->hw->dma->enable_dma_transmission(priv->ioaddr); + else + priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr, + STMMAC_CHAN0); spin_unlock(&priv->tx_lock); return NETDEV_TX_OK; @@ -2099,9 +2408,15 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv) dev_kfree_skb(skb); break; } - p->des2 = priv->rx_skbuff_dma[entry]; - priv->hw->mode->refill_desc3(priv, p); + if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) { + p->des0 = priv->rx_skbuff_dma[entry]; + p->des1 = 0; + } else { + p->des2 = priv->rx_skbuff_dma[entry]; + } + if (priv->hw->mode->refill_desc3) + priv->hw->mode->refill_desc3(priv, p); if (priv->rx_zeroc_thresh > 0) priv->rx_zeroc_thresh--; @@ -2109,9 +2424,13 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv) if (netif_msg_rx_status(priv)) pr_debug("\trefill entry #%d\n", entry); } - wmb(); - priv->hw->desc->set_rx_owner(p); + + if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) + priv->hw->desc->init_rx_desc(p, priv->use_riwt, 0, 0); + else + priv->hw->desc->set_rx_owner(p); + wmb(); entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE); @@ -2192,11 +2511,23 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit) } else { struct sk_buff *skb; int frame_len; + unsigned int des; + + if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) + des = p->des0; + else + des = p->des2; frame_len = priv->hw->desc->get_rx_frame_len(p, coe); - /* check if frame_len fits the preallocated memory */ + /* If frame length is greather than skb buffer size + * (preallocated during init) then the packet is + * ignored + */ if (frame_len > priv->dma_buf_sz) { + pr_err("%s: len %d larger than size (%d)\n", + priv->dev->name, frame_len, + priv->dma_buf_sz); priv->dev->stats.rx_length_errors++; break; } @@ -2209,14 +2540,19 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit) if (netif_msg_rx_status(priv)) { pr_debug("\tdesc: %p [entry %d] buff=0x%x\n", - p, entry, p->des2); + p, entry, des); if (frame_len > ETH_FRAME_LEN) pr_debug("\tframe size %d, COE: %d\n", frame_len, status); } - if (unlikely((frame_len < priv->rx_copybreak) || - stmmac_rx_threshold_count(priv))) { + /* The zero-copy is always used for all the sizes + * in case of GMAC4 because it needs + * to refill the used descriptors, always. + */ + if (unlikely(!priv->plat->has_gmac4 && + ((frame_len < priv->rx_copybreak) || + stmmac_rx_threshold_count(priv)))) { skb = netdev_alloc_skb_ip_align(priv->dev, frame_len); if (unlikely(!skb)) { @@ -2368,7 +2704,7 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu) return -EBUSY; } - if (priv->plat->enh_desc) + if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00)) max_mtu = JUMBO_LEN; else max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN); @@ -2382,6 +2718,7 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu) } dev->mtu = new_mtu; + netdev_update_features(dev); return 0; @@ -2406,6 +2743,14 @@ static netdev_features_t stmmac_fix_features(struct net_device *dev, if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN)) features &= ~NETIF_F_CSUM_MASK; + /* Disable tso if asked by ethtool */ + if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) { + if (features & NETIF_F_TSO) + priv->tso = true; + else + priv->tso = false; + } + return features; } @@ -2452,7 +2797,7 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id) } /* To handle GMAC own interrupts */ - if (priv->plat->has_gmac) { + if ((priv->plat->has_gmac) || (priv->plat->has_gmac4)) { int status = priv->hw->mac->host_irq_status(priv->hw, &priv->xstats); if (unlikely(status)) { @@ -2461,6 +2806,10 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id) priv->tx_path_in_lpi_mode = true; if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE) priv->tx_path_in_lpi_mode = false; + if (status & CORE_IRQ_MTL_RX_OVERFLOW) + priv->hw->dma->set_rx_tail_ptr(priv->ioaddr, + priv->rx_tail_addr, + STMMAC_CHAN0); } } @@ -2533,15 +2882,14 @@ static void sysfs_display_ring(void *head, int size, int extend_desc, x = *(u64 *) ep; seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n", i, (unsigned int)virt_to_phys(ep), - (unsigned int)x, (unsigned int)(x >> 32), + ep->basic.des0, ep->basic.des1, ep->basic.des2, ep->basic.des3); ep++; } else { x = *(u64 *) p; seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n", i, (unsigned int)virt_to_phys(ep), - (unsigned int)x, (unsigned int)(x >> 32), - p->des2, p->des3); + p->des0, p->des1, p->des2, p->des3); p++; } seq_printf(seq, "\n"); @@ -2624,10 +2972,15 @@ static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v) seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N"); seq_printf(seq, "\tChecksum Offload in TX: %s\n", (priv->dma_cap.tx_coe) ? "Y" : "N"); - seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n", - (priv->dma_cap.rx_coe_type1) ? "Y" : "N"); - seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n", - (priv->dma_cap.rx_coe_type2) ? "Y" : "N"); + if (priv->synopsys_id >= DWMAC_CORE_4_00) { + seq_printf(seq, "\tIP Checksum Offload in RX: %s\n", + (priv->dma_cap.rx_coe) ? "Y" : "N"); + } else { + seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n", + (priv->dma_cap.rx_coe_type1) ? "Y" : "N"); + seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n", + (priv->dma_cap.rx_coe_type2) ? "Y" : "N"); + } seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n", (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N"); seq_printf(seq, "\tNumber of Additional RX channel: %d\n", @@ -2738,6 +3091,12 @@ static int stmmac_hw_init(struct stmmac_priv *priv) priv->plat->multicast_filter_bins, priv->plat->unicast_filter_entries, &priv->synopsys_id); + } else if (priv->plat->has_gmac4) { + priv->dev->priv_flags |= IFF_UNICAST_FLT; + mac = dwmac4_setup(priv->ioaddr, + priv->plat->multicast_filter_bins, + priv->plat->unicast_filter_entries, + &priv->synopsys_id); } else { mac = dwmac100_setup(priv->ioaddr, &priv->synopsys_id); } @@ -2747,14 +3106,18 @@ static int stmmac_hw_init(struct stmmac_priv *priv) priv->hw = mac; /* To use the chained or ring mode */ - if (chain_mode) { - priv->hw->mode = &chain_mode_ops; - pr_info(" Chain mode enabled\n"); - priv->mode = STMMAC_CHAIN_MODE; + if (priv->synopsys_id >= DWMAC_CORE_4_00) { + priv->hw->mode = &dwmac4_ring_mode_ops; } else { - priv->hw->mode = &ring_mode_ops; - pr_info(" Ring mode enabled\n"); - priv->mode = STMMAC_RING_MODE; + if (chain_mode) { + priv->hw->mode = &chain_mode_ops; + pr_info(" Chain mode enabled\n"); + priv->mode = STMMAC_CHAIN_MODE; + } else { + priv->hw->mode = &ring_mode_ops; + pr_info(" Ring mode enabled\n"); + priv->mode = STMMAC_RING_MODE; + } } /* Get the HW capability (new GMAC newer than 3.50a) */ @@ -2770,11 +3133,9 @@ static int stmmac_hw_init(struct stmmac_priv *priv) priv->plat->enh_desc = priv->dma_cap.enh_desc; priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up; - /* TXCOE doesn't work in thresh DMA mode */ - if (priv->plat->force_thresh_dma_mode) - priv->plat->tx_coe = 0; - else - priv->plat->tx_coe = priv->dma_cap.tx_coe; + priv->plat->tx_coe = priv->dma_cap.tx_coe; + /* In case of GMAC4 rx_coe is from HW cap register. */ + priv->plat->rx_coe = priv->dma_cap.rx_coe; if (priv->dma_cap.rx_coe_type2) priv->plat->rx_coe = STMMAC_RX_COE_TYPE2; @@ -2784,13 +3145,17 @@ static int stmmac_hw_init(struct stmmac_priv *priv) } else pr_info(" No HW DMA feature register supported"); - /* To use alternate (extended) or normal descriptor structures */ - stmmac_selec_desc_mode(priv); + /* To use alternate (extended), normal or GMAC4 descriptor structures */ + if (priv->synopsys_id >= DWMAC_CORE_4_00) + priv->hw->desc = &dwmac4_desc_ops; + else + stmmac_selec_desc_mode(priv); if (priv->plat->rx_coe) { priv->hw->rx_csum = priv->plat->rx_coe; - pr_info(" RX Checksum Offload Engine supported (type %d)\n", - priv->plat->rx_coe); + pr_info(" RX Checksum Offload Engine supported\n"); + if (priv->synopsys_id < DWMAC_CORE_4_00) + pr_info("\tCOE Type %d\n", priv->hw->rx_csum); } if (priv->plat->tx_coe) pr_info(" TX Checksum insertion supported\n"); @@ -2800,6 +3165,9 @@ static int stmmac_hw_init(struct stmmac_priv *priv) device_set_wakeup_capable(priv->device, 1); } + if (priv->dma_cap.tsoen) + pr_info(" TSO supported\n"); + return 0; } @@ -2903,6 +3271,12 @@ int stmmac_dvr_probe(struct device *device, ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM; + + if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) { + ndev->hw_features |= NETIF_F_TSO; + priv->tso = true; + pr_info(" TSO feature enabled\n"); + } ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA; ndev->watchdog_timeo = msecs_to_jiffies(watchdog); #ifdef STMMAC_VLAN_TAG_USED @@ -3097,6 +3471,11 @@ int stmmac_resume(struct net_device *ndev) priv->dirty_rx = 0; priv->dirty_tx = 0; priv->cur_tx = 0; + /* reset private mss value to force mss context settings at + * next tso xmit (only used for gmac4). + */ + priv->mss = 0; + stmmac_clear_descriptors(priv); stmmac_hw_setup(ndev, false); From 0b7a43d37633614113ac54af73c193862dff4e50 Mon Sep 17 00:00:00 2001 From: Alexandre TORGUE Date: Fri, 1 Apr 2016 11:37:35 +0200 Subject: [PATCH 0142/1649] Documentation: networking: update stmmac Update stmmac driver documentation according to new GMAC 4.x family. Signed-off-by: Alexandre TORGUE Signed-off-by: David S. Miller --- Documentation/networking/stmmac.txt | 44 +++++++++++++++++++++++++---- 1 file changed, 38 insertions(+), 6 deletions(-) diff --git a/Documentation/networking/stmmac.txt b/Documentation/networking/stmmac.txt index d64a14714236..671fe3dd56d3 100644 --- a/Documentation/networking/stmmac.txt +++ b/Documentation/networking/stmmac.txt @@ -1,6 +1,6 @@ STMicroelectronics 10/100/1000 Synopsys Ethernet driver -Copyright (C) 2007-2014 STMicroelectronics Ltd +Copyright (C) 2007-2015 STMicroelectronics Ltd Author: Giuseppe Cavallaro This is the driver for the MAC 10/100/1000 on-chip Ethernet controllers @@ -138,6 +138,8 @@ struct plat_stmmacenet_data { int (*init)(struct platform_device *pdev, void *priv); void (*exit)(struct platform_device *pdev, void *priv); void *bsp_priv; + int has_gmac4; + bool tso_en; }; Where: @@ -181,6 +183,8 @@ Where: registers. init/exit callbacks should not use or modify platform data. o bsp_priv: another private pointer. + o has_gmac4: uses GMAC4 core. + o tso_en: Enables TSO (TCP Segmentation Offload) feature. For MDIO bus The we have: @@ -278,6 +282,13 @@ Please see the following document: o stmmac_ethtool.c: to implement the ethtool support; o stmmac.h: private driver structure; o common.h: common definitions and VFTs; + o mmc_core.c/mmc.h: Management MAC Counters; + o stmmac_hwtstamp.c: HW timestamp support for PTP; + o stmmac_ptp.c: PTP 1588 clock; + o dwmac-.c: these are for the platform glue-logic file; e.g. dwmac-sti.c + for STMicroelectronics SoCs. + +- GMAC 3.x o descs.h: descriptor structure definitions; o dwmac1000_core.c: dwmac GiGa core functions; o dwmac1000_dma.c: dma functions for the GMAC chip; @@ -289,11 +300,32 @@ Please see the following document: o enh_desc.c: functions for handling enhanced descriptors; o norm_desc.c: functions for handling normal descriptors; o chain_mode.c/ring_mode.c:: functions to manage RING/CHAINED modes; - o mmc_core.c/mmc.h: Management MAC Counters; - o stmmac_hwtstamp.c: HW timestamp support for PTP; - o stmmac_ptp.c: PTP 1588 clock; - o dwmac-.c: these are for the platform glue-logic file; e.g. dwmac-sti.c - for STMicroelectronics SoCs. + +- GMAC4.x generation + o dwmac4_core.c: dwmac GMAC4.x core functions; + o dwmac4_desc.c: functions for handling GMAC4.x descriptors; + o dwmac4_descs.h: descriptor definitions; + o dwmac4_dma.c: dma functions for the GMAC4.x chip; + o dwmac4_dma.h: dma definitions for the GMAC4.x chip; + o dwmac4.h: core definitions for the GMAC4.x chip; + o dwmac4_lib.c: generic GMAC4.x functions; + +4.12) TSO support (GMAC4.x) + +TSO (Tcp Segmentation Offload) feature is supported by GMAC 4.x chip family. +When a packet is sent through TCP protocol, the TCP stack ensures that +the SKB provided to the low level driver (stmmac in our case) matches with +the maximum frame len (IP header + TCP header + payload <= 1500 bytes (for +MTU set to 1500)). It means that if an application using TCP want to send a +packet which will have a length (after adding headers) > 1514 the packet +will be split in several TCP packets: The data payload is split and headers +(TCP/IP ..) are added. It is done by software. + +When TSO is enabled, the TCP stack doesn't care about the maximum frame +length and provide SKB packet to stmmac as it is. The GMAC IP will have to +perform the segmentation by it self to match with maximum frame length. + +This feature can be enabled in device tree through "snps,tso" entry. 5) Debug Information From 06bce7dd1507c9b943fb20c845e02c6f4c172a55 Mon Sep 17 00:00:00 2001 From: Alexandre TORGUE Date: Fri, 1 Apr 2016 11:37:36 +0200 Subject: [PATCH 0143/1649] stmmac: update version to Jan_2016 This patch just updates the driver to the version fully tested on STi platforms. This version is Jan_2016. Signed-off-by: Alexandre TORGUE Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/stmmac.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index 317ce3580e13..ff6750621ff7 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@ -24,7 +24,7 @@ #define __STMMAC_H__ #define STMMAC_RESOURCE_NAME "stmmaceth" -#define DRV_MODULE_VERSION "Dec_2015" +#define DRV_MODULE_VERSION "Jan_2016" #include #include From 91979b9db86340d7cd49392a498663fb1ac74639 Mon Sep 17 00:00:00 2001 From: Alexandre TORGUE Date: Fri, 1 Apr 2016 11:37:37 +0200 Subject: [PATCH 0144/1649] stmmac: update MAINTAINERS Signed-off-by: Alexandre TORGUE Signed-off-by: David S. Miller --- MAINTAINERS | 1 + 1 file changed, 1 insertion(+) diff --git a/MAINTAINERS b/MAINTAINERS index 7ba7bc485d74..67d99dd0e2e5 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -3348,6 +3348,7 @@ F: Documentation/powerpc/cxlflash.txt STMMAC ETHERNET DRIVER M: Giuseppe Cavallaro +M: Alexandre Torgue L: netdev@vger.kernel.org W: http://www.stlinux.com S: Supported From 47771902a9beb23859805721f1d98d03dee5da7c Mon Sep 17 00:00:00 2001 From: Raja Mani Date: Wed, 16 Mar 2016 18:13:33 +0530 Subject: [PATCH 0145/1649] ath10k: introduce Extended Resource Config support for 10.4 Add API support for Extended Resource Configuration for 10.4. This is useful to enable new features like Peer Stats, LTEU etc if the firmware advertises support for the service. This is also done to provide backward compatibility with older firmware. Also for clarity send default host platform type as 'WMI_HOST_PLATFORM_HIGH_PERF', though this should not make any difference in functionality Signed-off-by: Raja Mani Signed-off-by: Mohammed Shafi Shajakhan Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/wmi-ops.h | 23 +++++++++++++++++ drivers/net/wireless/ath/ath10k/wmi.c | 24 ++++++++++++++++++ drivers/net/wireless/ath/ath10k/wmi.h | 31 +++++++++++++++++++++++ 3 files changed, 78 insertions(+) diff --git a/drivers/net/wireless/ath/ath10k/wmi-ops.h b/drivers/net/wireless/ath/ath10k/wmi-ops.h index 32ab34edceb5..7fb00dcc03b8 100644 --- a/drivers/net/wireless/ath/ath10k/wmi-ops.h +++ b/drivers/net/wireless/ath/ath10k/wmi-ops.h @@ -186,6 +186,9 @@ struct wmi_ops { u8 enable, u32 detect_level, u32 detect_margin); + struct sk_buff *(*ext_resource_config)(struct ath10k *ar, + enum wmi_host_platform_type type, + u32 fw_feature_bitmap); int (*get_vdev_subtype)(struct ath10k *ar, enum wmi_vdev_subtype subtype); }; @@ -1329,6 +1332,26 @@ ath10k_wmi_pdev_enable_adaptive_cca(struct ath10k *ar, u8 enable, ar->wmi.cmd->pdev_enable_adaptive_cca_cmdid); } +static inline int +ath10k_wmi_ext_resource_config(struct ath10k *ar, + enum wmi_host_platform_type type, + u32 fw_feature_bitmap) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->ext_resource_config) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->ext_resource_config(ar, type, + fw_feature_bitmap); + + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, + ar->wmi.cmd->ext_resource_cfg_cmdid); +} + static inline int ath10k_wmi_get_vdev_subtype(struct ath10k *ar, enum wmi_vdev_subtype subtype) { diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index afed9dab74f4..3af1af74e84d 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -705,6 +705,7 @@ static struct wmi_cmd_map wmi_10_4_cmd_map = { .set_cca_params_cmdid = WMI_10_4_SET_CCA_PARAMS_CMDID, .pdev_bss_chan_info_request_cmdid = WMI_10_4_PDEV_BSS_CHAN_INFO_REQUEST_CMDID, + .ext_resource_cfg_cmdid = WMI_10_4_EXT_RESOURCE_CFG_CMDID, }; /* MAIN WMI VDEV param map */ @@ -7479,6 +7480,28 @@ static int ath10k_wmi_10_4_op_get_vdev_subtype(struct ath10k *ar, return -ENOTSUPP; } +static struct sk_buff * +ath10k_wmi_10_4_ext_resource_config(struct ath10k *ar, + enum wmi_host_platform_type type, + u32 fw_feature_bitmap) +{ + struct wmi_ext_resource_config_10_4_cmd *cmd; + struct sk_buff *skb; + + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); + if (!skb) + return ERR_PTR(-ENOMEM); + + cmd = (struct wmi_ext_resource_config_10_4_cmd *)skb->data; + cmd->host_platform_config = __cpu_to_le32(type); + cmd->fw_feature_bitmap = __cpu_to_le32(fw_feature_bitmap); + + ath10k_dbg(ar, ATH10K_DBG_WMI, + "wmi ext resource config host type %d firmware feature bitmap %08x\n", + type, fw_feature_bitmap); + return skb; +} + static const struct wmi_ops wmi_ops = { .rx = ath10k_wmi_op_rx, .map_svc = wmi_main_svc_map, @@ -7805,6 +7828,7 @@ static const struct wmi_ops wmi_10_4_ops = { .gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp, .gen_delba_send = ath10k_wmi_op_gen_delba_send, .fw_stats_fill = ath10k_wmi_10_4_op_fw_stats_fill, + .ext_resource_config = ath10k_wmi_10_4_ext_resource_config, /* shared with 10.2 */ .gen_request_stats = ath10k_wmi_op_gen_request_stats, diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h index bb42f7a6ba23..bd29f271524d 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.h +++ b/drivers/net/wireless/ath/ath10k/wmi.h @@ -816,6 +816,7 @@ struct wmi_cmd_map { u32 set_cca_params_cmdid; u32 pdev_bss_chan_info_request_cmdid; u32 pdev_enable_adaptive_cca_cmdid; + u32 ext_resource_cfg_cmdid; }; /* @@ -2667,6 +2668,31 @@ struct wmi_resource_config_10_4 { __le32 qwrap_config; } __packed; +/** + * enum wmi_10_4_feature_mask - WMI 10.4 feature enable/disable flags + * @WMI_10_4_LTEU_SUPPORT: LTEU config + * @WMI_10_4_COEX_GPIO_SUPPORT: COEX GPIO config + * @WMI_10_4_AUX_RADIO_SPECTRAL_INTF: AUX Radio Enhancement for spectral scan + * @WMI_10_4_AUX_RADIO_CHAN_LOAD_INTF: AUX Radio Enhancement for chan load scan + * @WMI_10_4_BSS_CHANNEL_INFO_64: BSS channel info stats + * @WMI_10_4_PEER_STATS: Per station stats + */ +enum wmi_10_4_feature_mask { + WMI_10_4_LTEU_SUPPORT = BIT(0), + WMI_10_4_COEX_GPIO_SUPPORT = BIT(1), + WMI_10_4_AUX_RADIO_SPECTRAL_INTF = BIT(2), + WMI_10_4_AUX_RADIO_CHAN_LOAD_INTF = BIT(3), + WMI_10_4_BSS_CHANNEL_INFO_64 = BIT(4), + WMI_10_4_PEER_STATS = BIT(5), +}; + +struct wmi_ext_resource_config_10_4_cmd { + /* contains enum wmi_host_platform_type */ + __le32 host_platform_config; + /* see enum wmi_10_4_feature_mask */ + __le32 fw_feature_bitmap; +}; + /* strucutre describing host memory chunk. */ struct host_memory_chunk { /* id of the request that is passed up in service ready */ @@ -6408,6 +6434,11 @@ struct wmi_pdev_set_adaptive_cca_params { __le32 cca_detect_margin; } __packed; +enum wmi_host_platform_type { + WMI_HOST_PLATFORM_HIGH_PERF, + WMI_HOST_PLATFORM_LOW_PERF, +}; + struct ath10k; struct ath10k_vif; struct ath10k_fw_stats_pdev; From f9575793d44ce68b574d9d8ffb9813eb05c3fd2b Mon Sep 17 00:00:00 2001 From: Mohammed Shafi Shajakhan Date: Wed, 16 Mar 2016 18:13:34 +0530 Subject: [PATCH 0146/1649] ath10k: enable parsing per station rx duration for 10.4 Rx duration support for per station is part of extended peer stats, enable provision to parse the same and provide backward compatibility based on the 'stats_id' event Signed-off-by: Mohammed Shafi Shajakhan Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/core.c | 19 +++++++++++++++- drivers/net/wireless/ath/ath10k/wmi.c | 30 ++++++++++++++++++++------ drivers/net/wireless/ath/ath10k/wmi.h | 16 ++++++++++++++ 3 files changed, 58 insertions(+), 7 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index 7a714d971615..b2c7fe3d30a4 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -1615,7 +1615,8 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar) ar->num_active_peers = TARGET_10_4_ACTIVE_PEERS; ar->max_num_vdevs = TARGET_10_4_NUM_VDEVS; ar->num_tids = TARGET_10_4_TGT_NUM_TIDS; - ar->fw_stats_req_mask = WMI_STAT_PEER; + ar->fw_stats_req_mask = WMI_10_4_STAT_PEER | + WMI_10_4_STAT_PEER_EXTD; ar->max_spatial_stream = ar->hw_params.max_spatial_stream; if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL, @@ -1660,6 +1661,7 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar) int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode) { int status; + u32 val; lockdep_assert_held(&ar->conf_mutex); @@ -1780,6 +1782,21 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode) ath10k_dbg(ar, ATH10K_DBG_BOOT, "firmware %s booted\n", ar->hw->wiphy->fw_version); + if (test_bit(WMI_SERVICE_EXT_RES_CFG_SUPPORT, ar->wmi.svc_map)) { + val = 0; + if (ath10k_peer_stats_enabled(ar)) + val = WMI_10_4_PEER_STATS; + + status = ath10k_wmi_ext_resource_config(ar, + WMI_HOST_PLATFORM_HIGH_PERF, val); + if (status) { + ath10k_err(ar, + "failed to send ext resource cfg command : %d\n", + status); + goto err_hif_stop; + } + } + status = ath10k_wmi_cmd_init(ar); if (status) { ath10k_err(ar, "could not send WMI init command (%d)\n", diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index 3af1af74e84d..ac8622718f58 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -2604,6 +2604,16 @@ void ath10k_wmi_pull_peer_stats(const struct wmi_peer_stats *src, dst->peer_tx_rate = __le32_to_cpu(src->peer_tx_rate); } +static void +ath10k_wmi_10_4_pull_peer_stats(const struct wmi_10_4_peer_stats *src, + struct ath10k_fw_stats_peer *dst) +{ + ether_addr_copy(dst->peer_macaddr, src->peer_macaddr.addr); + dst->peer_rssi = __le32_to_cpu(src->peer_rssi); + dst->peer_tx_rate = __le32_to_cpu(src->peer_tx_rate); + dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate); +} + static int ath10k_wmi_main_op_pull_fw_stats(struct ath10k *ar, struct sk_buff *skb, struct ath10k_fw_stats *stats) @@ -2894,6 +2904,7 @@ static int ath10k_wmi_10_4_op_pull_fw_stats(struct ath10k *ar, u32 num_pdev_ext_stats; u32 num_vdev_stats; u32 num_peer_stats; + u32 stats_id; int i; if (!skb_pull(skb, sizeof(*ev))) @@ -2903,6 +2914,7 @@ static int ath10k_wmi_10_4_op_pull_fw_stats(struct ath10k *ar, num_pdev_ext_stats = __le32_to_cpu(ev->num_pdev_ext_stats); num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats); num_peer_stats = __le32_to_cpu(ev->num_peer_stats); + stats_id = __le32_to_cpu(ev->stats_id); for (i = 0; i < num_pdev_stats; i++) { const struct wmi_10_4_pdev_stats *src; @@ -2942,22 +2954,28 @@ static int ath10k_wmi_10_4_op_pull_fw_stats(struct ath10k *ar, /* fw doesn't implement vdev stats */ for (i = 0; i < num_peer_stats; i++) { - const struct wmi_10_4_peer_stats *src; + const struct wmi_10_4_peer_extd_stats *src; struct ath10k_fw_stats_peer *dst; + int stats_len; + bool extd_peer_stats = !!(stats_id & WMI_10_4_STAT_PEER_EXTD); + + if (extd_peer_stats) + stats_len = sizeof(struct wmi_10_4_peer_extd_stats); + else + stats_len = sizeof(struct wmi_10_4_peer_stats); src = (void *)skb->data; - if (!skb_pull(skb, sizeof(*src))) + if (!skb_pull(skb, stats_len)) return -EPROTO; dst = kzalloc(sizeof(*dst), GFP_ATOMIC); if (!dst) continue; - ether_addr_copy(dst->peer_macaddr, src->peer_macaddr.addr); - dst->peer_rssi = __le32_to_cpu(src->peer_rssi); - dst->peer_tx_rate = __le32_to_cpu(src->peer_tx_rate); - dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate); + ath10k_wmi_10_4_pull_peer_stats(&src->common, dst); /* FIXME: expose 10.4 specific values */ + if (extd_peer_stats) + dst->rx_duration = __le32_to_cpu(src->rx_duration); list_add_tail(&dst->list, &stats->peers); } diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h index bd29f271524d..feebd19ff08c 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.h +++ b/drivers/net/wireless/ath/ath10k/wmi.h @@ -4104,6 +4104,13 @@ enum wmi_stats_id { WMI_STAT_VDEV_RATE = BIT(5), }; +enum wmi_10_4_stats_id { + WMI_10_4_STAT_PEER = BIT(0), + WMI_10_4_STAT_AP = BIT(1), + WMI_10_4_STAT_INST = BIT(2), + WMI_10_4_STAT_PEER_EXTD = BIT(3), +}; + struct wlan_inst_rssi_args { __le16 cfg_retry_count; __le16 retry_count; @@ -4303,6 +4310,15 @@ struct wmi_10_4_peer_stats { __le32 peer_rssi_changed; } __packed; +struct wmi_10_4_peer_extd_stats { + struct wmi_10_4_peer_stats common; + struct wmi_mac_addr peer_macaddr; + __le32 inactive_time; + __le32 peer_chain_rssi; + __le32 rx_duration; + __le32 reserved[10]; +} __packed; + struct wmi_10_2_pdev_ext_stats { __le32 rx_rssi_comb; __le32 rx_rssi[4]; From 59465fe46ef1c2caf2c1beca828c4f29d28b98ca Mon Sep 17 00:00:00 2001 From: Rajkumar Manoharan Date: Tue, 22 Mar 2016 17:22:11 +0530 Subject: [PATCH 0147/1649] ath10k: speedup htt rx descriptor processing for tx completion To optimize CPU usage htt rx descriptors will be reused instead of refilling it for htt rx copy engine (CE5). To support that all htt rx indications should be processed at same context. FIFO queue is used to maintain tx completion status for each msdu. This helps to retain the order of tx completion. Signed-off-by: Rajkumar Manoharan Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/htt.h | 18 ++++++-- drivers/net/wireless/ath/ath10k/htt_rx.c | 58 ++++++++++++++---------- drivers/net/wireless/ath/ath10k/htt_tx.c | 14 +++++- drivers/net/wireless/ath/ath10k/txrx.c | 12 ++--- 4 files changed, 65 insertions(+), 37 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h index d196bcc50e50..76c4bae0b434 100644 --- a/drivers/net/wireless/ath/ath10k/htt.h +++ b/drivers/net/wireless/ath/ath10k/htt.h @@ -22,6 +22,7 @@ #include #include #include +#include #include #include "htc.h" @@ -1526,10 +1527,15 @@ struct htt_resp { /*** host side structures follow ***/ struct htt_tx_done { - u32 msdu_id; - bool discard; - bool no_ack; - bool success; + u16 msdu_id; + u16 status; +}; + +enum htt_tx_compl_state { + HTT_TX_COMPL_STATE_NONE, + HTT_TX_COMPL_STATE_ACK, + HTT_TX_COMPL_STATE_NOACK, + HTT_TX_COMPL_STATE_DISCARD, }; struct htt_peer_map_event { @@ -1650,6 +1656,9 @@ struct ath10k_htt { struct idr pending_tx; wait_queue_head_t empty_tx_wq; + /* FIFO for storing tx done status {ack, no-ack, discard} and msdu id */ + DECLARE_KFIFO_PTR(txdone_fifo, struct htt_tx_done); + /* set if host-fw communication goes haywire * used to avoid further failures */ bool rx_confused; @@ -1658,7 +1667,6 @@ struct ath10k_htt { /* This is used to group tx/rx completions separately and process them * in batches to reduce cache stalls */ struct tasklet_struct txrx_compl_task; - struct sk_buff_head tx_compl_q; struct sk_buff_head rx_compl_q; struct sk_buff_head rx_in_ord_compl_q; struct sk_buff_head tx_fetch_ind_q; diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c index 06975bf49351..ea73a233ecd7 100644 --- a/drivers/net/wireless/ath/ath10k/htt_rx.c +++ b/drivers/net/wireless/ath/ath10k/htt_rx.c @@ -226,7 +226,6 @@ void ath10k_htt_rx_free(struct ath10k_htt *htt) tasklet_kill(&htt->rx_replenish_task); tasklet_kill(&htt->txrx_compl_task); - skb_queue_purge(&htt->tx_compl_q); skb_queue_purge(&htt->rx_compl_q); skb_queue_purge(&htt->rx_in_ord_compl_q); skb_queue_purge(&htt->tx_fetch_ind_q); @@ -567,7 +566,6 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt) tasklet_init(&htt->rx_replenish_task, ath10k_htt_rx_replenish_task, (unsigned long)htt); - skb_queue_head_init(&htt->tx_compl_q); skb_queue_head_init(&htt->rx_compl_q); skb_queue_head_init(&htt->rx_in_ord_compl_q); skb_queue_head_init(&htt->tx_fetch_ind_q); @@ -1678,7 +1676,7 @@ static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt, } } -static void ath10k_htt_rx_frm_tx_compl(struct ath10k *ar, +static void ath10k_htt_rx_tx_compl_ind(struct ath10k *ar, struct sk_buff *skb) { struct ath10k_htt *htt = &ar->htt; @@ -1690,19 +1688,19 @@ static void ath10k_htt_rx_frm_tx_compl(struct ath10k *ar, switch (status) { case HTT_DATA_TX_STATUS_NO_ACK: - tx_done.no_ack = true; + tx_done.status = HTT_TX_COMPL_STATE_NOACK; break; case HTT_DATA_TX_STATUS_OK: - tx_done.success = true; + tx_done.status = HTT_TX_COMPL_STATE_ACK; break; case HTT_DATA_TX_STATUS_DISCARD: case HTT_DATA_TX_STATUS_POSTPONE: case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL: - tx_done.discard = true; + tx_done.status = HTT_TX_COMPL_STATE_DISCARD; break; default: ath10k_warn(ar, "unhandled tx completion status %d\n", status); - tx_done.discard = true; + tx_done.status = HTT_TX_COMPL_STATE_DISCARD; break; } @@ -1712,7 +1710,20 @@ static void ath10k_htt_rx_frm_tx_compl(struct ath10k *ar, for (i = 0; i < resp->data_tx_completion.num_msdus; i++) { msdu_id = resp->data_tx_completion.msdus[i]; tx_done.msdu_id = __le16_to_cpu(msdu_id); - ath10k_txrx_tx_unref(htt, &tx_done); + + /* kfifo_put: In practice firmware shouldn't fire off per-CE + * interrupt and main interrupt (MSI/-X range case) for the same + * HTC service so it should be safe to use kfifo_put w/o lock. + * + * From kfifo_put() documentation: + * Note that with only one concurrent reader and one concurrent + * writer, you don't need extra locking to use these macro. + */ + if (!kfifo_put(&htt->txdone_fifo, tx_done)) { + ath10k_warn(ar, "txdone fifo overrun, msdu_id %d status %d\n", + tx_done.msdu_id, tx_done.status); + ath10k_txrx_tx_unref(htt, &tx_done); + } } } @@ -2339,18 +2350,17 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) struct htt_tx_done tx_done = {}; int status = __le32_to_cpu(resp->mgmt_tx_completion.status); - tx_done.msdu_id = - __le32_to_cpu(resp->mgmt_tx_completion.desc_id); + tx_done.msdu_id = __le32_to_cpu(resp->mgmt_tx_completion.desc_id); switch (status) { case HTT_MGMT_TX_STATUS_OK: - tx_done.success = true; + tx_done.status = HTT_TX_COMPL_STATE_ACK; break; case HTT_MGMT_TX_STATUS_RETRY: - tx_done.no_ack = true; + tx_done.status = HTT_TX_COMPL_STATE_NOACK; break; case HTT_MGMT_TX_STATUS_DROP: - tx_done.discard = true; + tx_done.status = HTT_TX_COMPL_STATE_DISCARD; break; } @@ -2364,9 +2374,9 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) break; } case HTT_T2H_MSG_TYPE_TX_COMPL_IND: - skb_queue_tail(&htt->tx_compl_q, skb); + ath10k_htt_rx_tx_compl_ind(htt->ar, skb); tasklet_schedule(&htt->txrx_compl_task); - return; + break; case HTT_T2H_MSG_TYPE_SEC_IND: { struct ath10k *ar = htt->ar; struct htt_security_indication *ev = &resp->security_indication; @@ -2475,7 +2485,7 @@ static void ath10k_htt_txrx_compl_task(unsigned long ptr) { struct ath10k_htt *htt = (struct ath10k_htt *)ptr; struct ath10k *ar = htt->ar; - struct sk_buff_head tx_q; + struct htt_tx_done tx_done = {}; struct sk_buff_head rx_q; struct sk_buff_head rx_ind_q; struct sk_buff_head tx_ind_q; @@ -2483,15 +2493,10 @@ static void ath10k_htt_txrx_compl_task(unsigned long ptr) struct sk_buff *skb; unsigned long flags; - __skb_queue_head_init(&tx_q); __skb_queue_head_init(&rx_q); __skb_queue_head_init(&rx_ind_q); __skb_queue_head_init(&tx_ind_q); - spin_lock_irqsave(&htt->tx_compl_q.lock, flags); - skb_queue_splice_init(&htt->tx_compl_q, &tx_q); - spin_unlock_irqrestore(&htt->tx_compl_q.lock, flags); - spin_lock_irqsave(&htt->rx_compl_q.lock, flags); skb_queue_splice_init(&htt->rx_compl_q, &rx_q); spin_unlock_irqrestore(&htt->rx_compl_q.lock, flags); @@ -2504,10 +2509,13 @@ static void ath10k_htt_txrx_compl_task(unsigned long ptr) skb_queue_splice_init(&htt->tx_fetch_ind_q, &tx_ind_q); spin_unlock_irqrestore(&htt->tx_fetch_ind_q.lock, flags); - while ((skb = __skb_dequeue(&tx_q))) { - ath10k_htt_rx_frm_tx_compl(htt->ar, skb); - dev_kfree_skb_any(skb); - } + /* kfifo_get: called only within txrx_tasklet so it's neatly serialized. + * From kfifo_get() documentation: + * Note that with only one concurrent reader and one concurrent writer, + * you don't need extra locking to use these macro. + */ + while (kfifo_get(&htt->txdone_fifo, &tx_done)) + ath10k_txrx_tx_unref(htt, &tx_done); while ((skb = __skb_dequeue(&tx_ind_q))) { ath10k_htt_rx_tx_fetch_ind(ar, skb); diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c index b2ae122381ca..9baa2e677f8a 100644 --- a/drivers/net/wireless/ath/ath10k/htt_tx.c +++ b/drivers/net/wireless/ath/ath10k/htt_tx.c @@ -339,8 +339,18 @@ int ath10k_htt_tx_alloc(struct ath10k_htt *htt) goto free_frag_desc; } + size = roundup_pow_of_two(htt->max_num_pending_tx); + ret = kfifo_alloc(&htt->txdone_fifo, size, GFP_KERNEL); + if (ret) { + ath10k_err(ar, "failed to alloc txdone fifo: %d\n", ret); + goto free_txq; + } + return 0; +free_txq: + ath10k_htt_tx_free_txq(htt); + free_frag_desc: ath10k_htt_tx_free_cont_frag_desc(htt); @@ -364,8 +374,8 @@ static int ath10k_htt_tx_clean_up_pending(int msdu_id, void *skb, void *ctx) ath10k_dbg(ar, ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n", msdu_id); - tx_done.discard = 1; tx_done.msdu_id = msdu_id; + tx_done.status = HTT_TX_COMPL_STATE_DISCARD; ath10k_txrx_tx_unref(htt, &tx_done); @@ -388,6 +398,8 @@ void ath10k_htt_tx_free(struct ath10k_htt *htt) ath10k_htt_tx_free_txq(htt); ath10k_htt_tx_free_cont_frag_desc(htt); + WARN_ON(!kfifo_is_empty(&htt->txdone_fifo)); + kfifo_free(&htt->txdone_fifo); } void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb) diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c index 48e26cdfe9a5..9369411a9ac0 100644 --- a/drivers/net/wireless/ath/ath10k/txrx.c +++ b/drivers/net/wireless/ath/ath10k/txrx.c @@ -61,9 +61,8 @@ int ath10k_txrx_tx_unref(struct ath10k_htt *htt, struct sk_buff *msdu; ath10k_dbg(ar, ATH10K_DBG_HTT, - "htt tx completion msdu_id %u discard %d no_ack %d success %d\n", - tx_done->msdu_id, !!tx_done->discard, - !!tx_done->no_ack, !!tx_done->success); + "htt tx completion msdu_id %u status %d\n", + tx_done->msdu_id, tx_done->status); if (tx_done->msdu_id >= htt->max_num_pending_tx) { ath10k_warn(ar, "warning: msdu_id %d too big, ignoring\n", @@ -101,7 +100,7 @@ int ath10k_txrx_tx_unref(struct ath10k_htt *htt, memset(&info->status, 0, sizeof(info->status)); trace_ath10k_txrx_tx_unref(ar, tx_done->msdu_id); - if (tx_done->discard) { + if (tx_done->status == HTT_TX_COMPL_STATE_DISCARD) { ieee80211_free_txskb(htt->ar->hw, msdu); return 0; } @@ -109,10 +108,11 @@ int ath10k_txrx_tx_unref(struct ath10k_htt *htt, if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) info->flags |= IEEE80211_TX_STAT_ACK; - if (tx_done->no_ack) + if (tx_done->status == HTT_TX_COMPL_STATE_NOACK) info->flags &= ~IEEE80211_TX_STAT_ACK; - if (tx_done->success && (info->flags & IEEE80211_TX_CTL_NO_ACK)) + if ((tx_done->status == HTT_TX_COMPL_STATE_ACK) && + (info->flags & IEEE80211_TX_CTL_NO_ACK)) info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED; ieee80211_tx_status(htt->ar->hw, msdu); From b2fdbccd15a27d1115a780dcbdcc874e0c9f4abe Mon Sep 17 00:00:00 2001 From: Rajkumar Manoharan Date: Tue, 22 Mar 2016 17:22:12 +0530 Subject: [PATCH 0148/1649] ath10k: copy tx fetch indication message To optmize CPU usage htt rx descriptors will be reused instead of refilling it for htt rx copy engine (CE5). To support that all htt rx indications should be proecssed at same context. Instead of queueing actual indication message, queue copied message for txrx processing. Signed-off-by: Rajkumar Manoharan Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/htt_rx.c | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c index ea73a233ecd7..552e8d1a3371 100644 --- a/drivers/net/wireless/ath/ath10k/htt_rx.c +++ b/drivers/net/wireless/ath/ath10k/htt_rx.c @@ -2449,10 +2449,17 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) } case HTT_T2H_MSG_TYPE_AGGR_CONF: break; - case HTT_T2H_MSG_TYPE_TX_FETCH_IND: - skb_queue_tail(&htt->tx_fetch_ind_q, skb); + case HTT_T2H_MSG_TYPE_TX_FETCH_IND: { + struct sk_buff *tx_fetch_ind = skb_copy(skb, GFP_ATOMIC); + + if (!tx_fetch_ind) { + ath10k_warn(ar, "failed to copy htt tx fetch ind\n"); + break; + } + skb_queue_tail(&htt->tx_fetch_ind_q, tx_fetch_ind); tasklet_schedule(&htt->txrx_compl_task); - return; + break; + } case HTT_T2H_MSG_TYPE_TX_FETCH_CONFIRM: ath10k_htt_rx_tx_fetch_confirm(ar, skb); break; From 6b61d6632a358bc72e14de03ba491907d871c94e Mon Sep 17 00:00:00 2001 From: Rajkumar Manoharan Date: Tue, 22 Mar 2016 17:22:13 +0530 Subject: [PATCH 0149/1649] ath10k: remove unused fw_desc processing The fw descriptor was never used and probably never will be. It makes little sense to maintain support for it. Remove it and simplify rx processing. This will make it easier to optimize rx processing later as well. Signed-off-by: Rajkumar Manoharan Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/htt_rx.c | 65 +----------------------- 1 file changed, 2 insertions(+), 63 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c index 552e8d1a3371..6a2cbd1a34d3 100644 --- a/drivers/net/wireless/ath/ath10k/htt_rx.c +++ b/drivers/net/wireless/ath/ath10k/htt_rx.c @@ -281,7 +281,6 @@ static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt) /* return: < 0 fatal error, 0 - non chained msdu, 1 chained msdu */ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt, - u8 **fw_desc, int *fw_desc_len, struct sk_buff_head *amsdu) { struct ath10k *ar = htt->ar; @@ -323,48 +322,6 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt, return -EIO; } - /* - * Copy the FW rx descriptor for this MSDU from the rx - * indication message into the MSDU's netbuf. HL uses the - * same rx indication message definition as LL, and simply - * appends new info (fields from the HW rx desc, and the - * MSDU payload itself). So, the offset into the rx - * indication message only has to account for the standard - * offset of the per-MSDU FW rx desc info within the - * message, and how many bytes of the per-MSDU FW rx desc - * info have already been consumed. (And the endianness of - * the host, since for a big-endian host, the rx ind - * message contents, including the per-MSDU rx desc bytes, - * were byteswapped during upload.) - */ - if (*fw_desc_len > 0) { - rx_desc->fw_desc.info0 = **fw_desc; - /* - * The target is expected to only provide the basic - * per-MSDU rx descriptors. Just to be sure, verify - * that the target has not attached extension data - * (e.g. LRO flow ID). - */ - - /* or more, if there's extension data */ - (*fw_desc)++; - (*fw_desc_len)--; - } else { - /* - * When an oversized AMSDU happened, FW will lost - * some of MSDU status - in this case, the FW - * descriptors provided will be less than the - * actual MSDUs inside this MPDU. Mark the FW - * descriptors so that it will still deliver to - * upper stack, if no CRC error for this MPDU. - * - * FIX THIS - the FW descriptors are actually for - * MSDUs in the end of this A-MSDU instead of the - * beginning. - */ - rx_desc->fw_desc.info0 = 0; - } - msdu_len_invalid = !!(__le32_to_cpu(rx_desc->attention.flags) & (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR | RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR)); @@ -1579,8 +1536,6 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt, struct htt_rx_indication_mpdu_range *mpdu_ranges; struct sk_buff_head amsdu; int num_mpdu_ranges; - int fw_desc_len; - u8 *fw_desc; int i, ret, mpdu_count = 0; lockdep_assert_held(&htt->rx_ring.lock); @@ -1588,9 +1543,6 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt, if (htt->rx_confused) return; - fw_desc_len = __le16_to_cpu(rx->prefix.fw_rx_desc_bytes); - fw_desc = (u8 *)&rx->fw_desc; - num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1), HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES); mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx); @@ -1605,8 +1557,7 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt, while (mpdu_count--) { __skb_queue_head_init(&amsdu); - ret = ath10k_htt_rx_amsdu_pop(htt, &fw_desc, - &fw_desc_len, &amsdu); + ret = ath10k_htt_rx_amsdu_pop(htt, &amsdu); if (ret < 0) { ath10k_warn(ar, "rx ring became corrupted: %d\n", ret); __skb_queue_purge(&amsdu); @@ -1634,17 +1585,11 @@ static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt, struct ieee80211_rx_status *rx_status = &htt->rx_status; struct sk_buff_head amsdu; int ret; - u8 *fw_desc; - int fw_desc_len; - - fw_desc_len = __le16_to_cpu(frag->fw_rx_desc_bytes); - fw_desc = (u8 *)frag->fw_msdu_rx_desc; __skb_queue_head_init(&amsdu); spin_lock_bh(&htt->rx_ring.lock); - ret = ath10k_htt_rx_amsdu_pop(htt, &fw_desc, &fw_desc_len, - &amsdu); + ret = ath10k_htt_rx_amsdu_pop(htt, &amsdu); spin_unlock_bh(&htt->rx_ring.lock); tasklet_schedule(&htt->rx_replenish_task); @@ -1668,12 +1613,6 @@ static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt, ath10k_htt_rx_h_filter(ar, &amsdu, rx_status); ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status); ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status); - - if (fw_desc_len > 0) { - ath10k_dbg(ar, ATH10K_DBG_HTT, - "expecting more fragmented rx in one indication %d\n", - fw_desc_len); - } } static void ath10k_htt_rx_tx_compl_ind(struct ath10k *ar, From 18235664e7f9a5664cbef25d23b222ff2faf55bc Mon Sep 17 00:00:00 2001 From: Rajkumar Manoharan Date: Tue, 22 Mar 2016 17:22:14 +0530 Subject: [PATCH 0150/1649] ath10k: cleanup amsdu processing for rx indication Make amsdu handlers (i.e amsdu_pop and rx_h_handler) common to both rx_ind and frag_ind htt events. It is sufficient to hold rx_ring lock for amsdu_pop alone and no need to hold it until the packets are delivered to mac80211. This helps to reduce rx_lock contention as well. Signed-off-by: Rajkumar Manoharan Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/htt_rx.c | 100 ++++++++++------------- 1 file changed, 41 insertions(+), 59 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c index 6a2cbd1a34d3..cf44e18313f6 100644 --- a/drivers/net/wireless/ath/ath10k/htt_rx.c +++ b/drivers/net/wireless/ath/ath10k/htt_rx.c @@ -1528,20 +1528,49 @@ static void ath10k_htt_rx_h_filter(struct ath10k *ar, __skb_queue_purge(amsdu); } +static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt) +{ + struct ath10k *ar = htt->ar; + static struct ieee80211_rx_status rx_status; + struct sk_buff_head amsdu; + int ret; + + __skb_queue_head_init(&amsdu); + + spin_lock_bh(&htt->rx_ring.lock); + if (htt->rx_confused) { + spin_unlock_bh(&htt->rx_ring.lock); + return -EIO; + } + ret = ath10k_htt_rx_amsdu_pop(htt, &amsdu); + spin_unlock_bh(&htt->rx_ring.lock); + + if (ret < 0) { + ath10k_warn(ar, "rx ring became corrupted: %d\n", ret); + __skb_queue_purge(&amsdu); + /* FIXME: It's probably a good idea to reboot the + * device instead of leaving it inoperable. + */ + htt->rx_confused = true; + return ret; + } + + ath10k_htt_rx_h_ppdu(ar, &amsdu, &rx_status, 0xffff); + ath10k_htt_rx_h_unchain(ar, &amsdu, ret > 0); + ath10k_htt_rx_h_filter(ar, &amsdu, &rx_status); + ath10k_htt_rx_h_mpdu(ar, &amsdu, &rx_status); + ath10k_htt_rx_h_deliver(ar, &amsdu, &rx_status); + + return 0; +} + static void ath10k_htt_rx_handler(struct ath10k_htt *htt, struct htt_rx_indication *rx) { struct ath10k *ar = htt->ar; - struct ieee80211_rx_status *rx_status = &htt->rx_status; struct htt_rx_indication_mpdu_range *mpdu_ranges; - struct sk_buff_head amsdu; int num_mpdu_ranges; - int i, ret, mpdu_count = 0; - - lockdep_assert_held(&htt->rx_ring.lock); - - if (htt->rx_confused) - return; + int i, mpdu_count = 0; num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1), HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES); @@ -1556,63 +1585,18 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt, mpdu_count += mpdu_ranges[i].mpdu_count; while (mpdu_count--) { - __skb_queue_head_init(&amsdu); - ret = ath10k_htt_rx_amsdu_pop(htt, &amsdu); - if (ret < 0) { - ath10k_warn(ar, "rx ring became corrupted: %d\n", ret); - __skb_queue_purge(&amsdu); - /* FIXME: It's probably a good idea to reboot the - * device instead of leaving it inoperable. - */ - htt->rx_confused = true; + if (ath10k_htt_rx_handle_amsdu(htt) < 0) break; - } - - ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff); - ath10k_htt_rx_h_unchain(ar, &amsdu, ret > 0); - ath10k_htt_rx_h_filter(ar, &amsdu, rx_status); - ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status); - ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status); } tasklet_schedule(&htt->rx_replenish_task); } -static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt, - struct htt_rx_fragment_indication *frag) +static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt) { - struct ath10k *ar = htt->ar; - struct ieee80211_rx_status *rx_status = &htt->rx_status; - struct sk_buff_head amsdu; - int ret; - - __skb_queue_head_init(&amsdu); - - spin_lock_bh(&htt->rx_ring.lock); - ret = ath10k_htt_rx_amsdu_pop(htt, &amsdu); - spin_unlock_bh(&htt->rx_ring.lock); + ath10k_htt_rx_handle_amsdu(htt); tasklet_schedule(&htt->rx_replenish_task); - - ath10k_dbg(ar, ATH10K_DBG_HTT_DUMP, "htt rx frag ahead\n"); - - if (ret) { - ath10k_warn(ar, "failed to pop amsdu from httr rx ring for fragmented rx %d\n", - ret); - __skb_queue_purge(&amsdu); - return; - } - - if (skb_queue_len(&amsdu) != 1) { - ath10k_warn(ar, "failed to pop frag amsdu: too many msdus\n"); - __skb_queue_purge(&amsdu); - return; - } - - ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff); - ath10k_htt_rx_h_filter(ar, &amsdu, rx_status); - ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status); - ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status); } static void ath10k_htt_rx_tx_compl_ind(struct ath10k *ar, @@ -2331,7 +2315,7 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) case HTT_T2H_MSG_TYPE_RX_FRAG_IND: { ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ", skb->data, skb->len); - ath10k_htt_rx_frag_handler(htt, &resp->rx_frag_ind); + ath10k_htt_rx_frag_handler(htt); break; } case HTT_T2H_MSG_TYPE_TEST: @@ -2472,9 +2456,7 @@ static void ath10k_htt_txrx_compl_task(unsigned long ptr) while ((skb = __skb_dequeue(&rx_q))) { resp = (struct htt_resp *)skb->data; - spin_lock_bh(&htt->rx_ring.lock); ath10k_htt_rx_handler(htt, &resp->rx_ind); - spin_unlock_bh(&htt->rx_ring.lock); dev_kfree_skb_any(skb); } From 3128b3d8a2b97ba8fe38b21c3ed70c2c66cc7a9e Mon Sep 17 00:00:00 2001 From: Rajkumar Manoharan Date: Tue, 22 Mar 2016 17:22:15 +0530 Subject: [PATCH 0151/1649] ath10k: speedup htt rx descriptor processing for rx_ind In follow up patch, htt rx descriptors will be reused instead of dealloc and refill. To achieve that htt rx indication messages should not be deferred and should be processed in pci tasklet itself. Also from rx indication message, mpdu_count alone is used. So it is maintained as atomic variable and all rx amsdu handlers are done processed from txrx tasklet. This change get rid of rx_compl_q usage. Signed-off-by: Rajkumar Manoharan Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/htt.h | 1 + drivers/net/wireless/ath/ath10k/htt_rx.c | 39 +++++++++++------------- 2 files changed, 19 insertions(+), 21 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h index 76c4bae0b434..27a65ecba7b0 100644 --- a/drivers/net/wireless/ath/ath10k/htt.h +++ b/drivers/net/wireless/ath/ath10k/htt.h @@ -1663,6 +1663,7 @@ struct ath10k_htt { * used to avoid further failures */ bool rx_confused; struct tasklet_struct rx_replenish_task; + atomic_t num_mpdus_ready; /* This is used to group tx/rx completions separately and process them * in batches to reduce cache stalls */ diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c index cf44e18313f6..6c616a36c4c4 100644 --- a/drivers/net/wireless/ath/ath10k/htt_rx.c +++ b/drivers/net/wireless/ath/ath10k/htt_rx.c @@ -526,6 +526,7 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt) skb_queue_head_init(&htt->rx_compl_q); skb_queue_head_init(&htt->rx_in_ord_compl_q); skb_queue_head_init(&htt->tx_fetch_ind_q); + atomic_set(&htt->num_mpdus_ready, 0); tasklet_init(&htt->txrx_compl_task, ath10k_htt_txrx_compl_task, (unsigned long)htt); @@ -1564,8 +1565,8 @@ static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt) return 0; } -static void ath10k_htt_rx_handler(struct ath10k_htt *htt, - struct htt_rx_indication *rx) +static void ath10k_htt_rx_proc_rx_ind(struct ath10k_htt *htt, + struct htt_rx_indication *rx) { struct ath10k *ar = htt->ar; struct htt_rx_indication_mpdu_range *mpdu_ranges; @@ -1584,19 +1585,16 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt, for (i = 0; i < num_mpdu_ranges; i++) mpdu_count += mpdu_ranges[i].mpdu_count; - while (mpdu_count--) { - if (ath10k_htt_rx_handle_amsdu(htt) < 0) - break; - } + atomic_add(mpdu_count, &htt->num_mpdus_ready); - tasklet_schedule(&htt->rx_replenish_task); + tasklet_schedule(&htt->txrx_compl_task); } static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt) { - ath10k_htt_rx_handle_amsdu(htt); + atomic_inc(&htt->num_mpdus_ready); - tasklet_schedule(&htt->rx_replenish_task); + tasklet_schedule(&htt->txrx_compl_task); } static void ath10k_htt_rx_tx_compl_ind(struct ath10k *ar, @@ -2250,9 +2248,8 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) break; } case HTT_T2H_MSG_TYPE_RX_IND: - skb_queue_tail(&htt->rx_compl_q, skb); - tasklet_schedule(&htt->txrx_compl_task); - return; + ath10k_htt_rx_proc_rx_ind(htt, &resp->rx_ind); + break; case HTT_T2H_MSG_TYPE_PEER_MAP: { struct htt_peer_map_event ev = { .vdev_id = resp->peer_map.vdev_id, @@ -2419,18 +2416,14 @@ static void ath10k_htt_txrx_compl_task(unsigned long ptr) struct sk_buff_head rx_q; struct sk_buff_head rx_ind_q; struct sk_buff_head tx_ind_q; - struct htt_resp *resp; struct sk_buff *skb; unsigned long flags; + int num_mpdus; __skb_queue_head_init(&rx_q); __skb_queue_head_init(&rx_ind_q); __skb_queue_head_init(&tx_ind_q); - spin_lock_irqsave(&htt->rx_compl_q.lock, flags); - skb_queue_splice_init(&htt->rx_compl_q, &rx_q); - spin_unlock_irqrestore(&htt->rx_compl_q.lock, flags); - spin_lock_irqsave(&htt->rx_in_ord_compl_q.lock, flags); skb_queue_splice_init(&htt->rx_in_ord_compl_q, &rx_ind_q); spin_unlock_irqrestore(&htt->rx_in_ord_compl_q.lock, flags); @@ -2454,10 +2447,12 @@ static void ath10k_htt_txrx_compl_task(unsigned long ptr) ath10k_mac_tx_push_pending(ar); - while ((skb = __skb_dequeue(&rx_q))) { - resp = (struct htt_resp *)skb->data; - ath10k_htt_rx_handler(htt, &resp->rx_ind); - dev_kfree_skb_any(skb); + num_mpdus = atomic_read(&htt->num_mpdus_ready); + atomic_sub(num_mpdus, &htt->num_mpdus_ready); + + while (num_mpdus--) { + if (ath10k_htt_rx_handle_amsdu(htt)) + break; } while ((skb = __skb_dequeue(&rx_ind_q))) { @@ -2466,4 +2461,6 @@ static void ath10k_htt_txrx_compl_task(unsigned long ptr) spin_unlock_bh(&htt->rx_ring.lock); dev_kfree_skb_any(skb); } + + tasklet_schedule(&htt->rx_replenish_task); } From e3a91f877c60ce8b29d8cd180c23f3de33a7d7e1 Mon Sep 17 00:00:00 2001 From: Rajkumar Manoharan Date: Tue, 22 Mar 2016 17:22:16 +0530 Subject: [PATCH 0152/1649] ath10k: register ath10k_htt_htc_t2h_msg_handler Except qca61x4 family chips (qca6164, qca6174), copy engine 5 is used for receiving target to host htt messages. In follow up patch, CE5 descriptors will be reused. In such case, same API can not be used as htc layer callback where the response messages will be freed at the end. Hence register new API for HTC layer that free up received message and keep the message handler common for both HTC and HIF layers. Signed-off-by: Rajkumar Manoharan Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/htt.c | 2 +- drivers/net/wireless/ath/ath10k/htt.h | 3 ++- drivers/net/wireless/ath/ath10k/htt_rx.c | 22 +++++++++++++++------- 3 files changed, 18 insertions(+), 9 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/htt.c b/drivers/net/wireless/ath/ath10k/htt.c index 7561f22f10f9..17a3008d9ab1 100644 --- a/drivers/net/wireless/ath/ath10k/htt.c +++ b/drivers/net/wireless/ath/ath10k/htt.c @@ -149,7 +149,7 @@ int ath10k_htt_connect(struct ath10k_htt *htt) memset(&conn_resp, 0, sizeof(conn_resp)); conn_req.ep_ops.ep_tx_complete = ath10k_htt_htc_tx_complete; - conn_req.ep_ops.ep_rx_complete = ath10k_htt_t2h_msg_handler; + conn_req.ep_ops.ep_rx_complete = ath10k_htt_htc_t2h_msg_handler; /* connect to control service */ conn_req.service_id = ATH10K_HTC_SVC_ID_HTT_DATA_MSG; diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h index 27a65ecba7b0..1adcae4faccf 100644 --- a/drivers/net/wireless/ath/ath10k/htt.h +++ b/drivers/net/wireless/ath/ath10k/htt.h @@ -1765,7 +1765,8 @@ int ath10k_htt_rx_ring_refill(struct ath10k *ar); void ath10k_htt_rx_free(struct ath10k_htt *htt); void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb); -void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb); +void ath10k_htt_htc_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb); +bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb); int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt); int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie); int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt); diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c index 6c616a36c4c4..ac16ce746afb 100644 --- a/drivers/net/wireless/ath/ath10k/htt_rx.c +++ b/drivers/net/wireless/ath/ath10k/htt_rx.c @@ -2219,7 +2219,18 @@ static inline enum ieee80211_band phy_mode_to_band(u32 phy_mode) return band; } -void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) +void ath10k_htt_htc_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) +{ + bool release; + + release = ath10k_htt_t2h_msg_handler(ar, skb); + + /* Free the indication buffer */ + if (release) + dev_kfree_skb_any(skb); +} + +bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) { struct ath10k_htt *htt = &ar->htt; struct htt_resp *resp = (struct htt_resp *)skb->data; @@ -2235,8 +2246,7 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) if (resp->hdr.msg_type >= ar->htt.t2h_msg_types_max) { ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, unsupported msg_type: 0x%0X\n max: 0x%0X", resp->hdr.msg_type, ar->htt.t2h_msg_types_max); - dev_kfree_skb_any(skb); - return; + return true; } type = ar->htt.t2h_msg_types[resp->hdr.msg_type]; @@ -2352,7 +2362,7 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND: { skb_queue_tail(&htt->rx_in_ord_compl_q, skb); tasklet_schedule(&htt->txrx_compl_task); - return; + return false; } case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND: break; @@ -2394,9 +2404,7 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) skb->data, skb->len); break; }; - - /* Free the indication buffer */ - dev_kfree_skb_any(skb); + return true; } EXPORT_SYMBOL(ath10k_htt_t2h_msg_handler); From 24d9ef5eff5057bb6339ed1cf852a2b2a7be324d Mon Sep 17 00:00:00 2001 From: Rajkumar Manoharan Date: Tue, 22 Mar 2016 17:22:17 +0530 Subject: [PATCH 0153/1649] ath10k: cleanup copy engine receive next completion The physical address necessary to unmap DMA ('bufferp') is stored in ath10k_skb_cb as 'paddr'. For diag register read and write operations, 'paddr' is stored in transfer context. ath10k doesn't rely on the meta/transfer_id. So the unused output arguments {bufferp, nbytesp and transfer_idp} are removed from CE recv_next completion. Signed-off-by: Rajkumar Manoharan Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/ce.c | 21 ++----------- drivers/net/wireless/ath/ath10k/ce.h | 10 ++----- drivers/net/wireless/ath/ath10k/pci.c | 43 +++++++++++---------------- 3 files changed, 22 insertions(+), 52 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c index edf3629288bc..d6da404c9fa7 100644 --- a/drivers/net/wireless/ath/ath10k/ce.c +++ b/drivers/net/wireless/ath/ath10k/ce.c @@ -444,14 +444,10 @@ int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr) */ int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state, void **per_transfer_contextp, - u32 *bufferp, - unsigned int *nbytesp, - unsigned int *transfer_idp, - unsigned int *flagsp) + unsigned int *nbytesp) { struct ath10k_ce_ring *dest_ring = ce_state->dest_ring; unsigned int nentries_mask = dest_ring->nentries_mask; - struct ath10k *ar = ce_state->ar; unsigned int sw_index = dest_ring->sw_index; struct ce_desc *base = dest_ring->base_addr_owner_space; @@ -476,14 +472,7 @@ int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state, desc->nbytes = 0; /* Return data from completed destination descriptor */ - *bufferp = __le32_to_cpu(sdesc.addr); *nbytesp = nbytes; - *transfer_idp = MS(__le16_to_cpu(sdesc.flags), CE_DESC_FLAGS_META_DATA); - - if (__le16_to_cpu(sdesc.flags) & CE_DESC_FLAGS_BYTE_SWAP) - *flagsp = CE_RECV_FLAG_SWAPPED; - else - *flagsp = 0; if (per_transfer_contextp) *per_transfer_contextp = @@ -501,10 +490,7 @@ int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state, int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state, void **per_transfer_contextp, - u32 *bufferp, - unsigned int *nbytesp, - unsigned int *transfer_idp, - unsigned int *flagsp) + unsigned int *nbytesp) { struct ath10k *ar = ce_state->ar; struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); @@ -513,8 +499,7 @@ int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state, spin_lock_bh(&ar_pci->ce_lock); ret = ath10k_ce_completed_recv_next_nolock(ce_state, per_transfer_contextp, - bufferp, nbytesp, - transfer_idp, flagsp); + nbytesp); spin_unlock_bh(&ar_pci->ce_lock); return ret; diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h index dac676817532..68717e5b9d89 100644 --- a/drivers/net/wireless/ath/ath10k/ce.h +++ b/drivers/net/wireless/ath/ath10k/ce.h @@ -177,10 +177,7 @@ int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr); */ int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state, void **per_transfer_contextp, - u32 *bufferp, - unsigned int *nbytesp, - unsigned int *transfer_idp, - unsigned int *flagsp); + unsigned int *nbytesp); /* * Supply data for the next completed unprocessed send descriptor. * Pops 1 completed send buffer from Source ring. @@ -212,10 +209,7 @@ int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state, int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state, void **per_transfer_contextp, - u32 *bufferp, - unsigned int *nbytesp, - unsigned int *transfer_idp, - unsigned int *flagsp); + unsigned int *nbytesp); /* * Support clean shutdown by allowing the caller to cancel diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index b3cff1d3364a..290a61afde1a 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c @@ -870,10 +870,8 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data, { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); int ret = 0; - u32 buf; + u32 *buf; unsigned int completed_nbytes, orig_nbytes, remaining_bytes; - unsigned int id; - unsigned int flags; struct ath10k_ce_pipe *ce_diag; /* Host buffer address in CE space */ u32 ce_data; @@ -909,7 +907,7 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data, nbytes = min_t(unsigned int, remaining_bytes, DIAG_TRANSFER_LIMIT); - ret = __ath10k_ce_rx_post_buf(ce_diag, NULL, ce_data); + ret = __ath10k_ce_rx_post_buf(ce_diag, &ce_data, ce_data); if (ret != 0) goto done; @@ -940,9 +938,10 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data, } i = 0; - while (ath10k_ce_completed_recv_next_nolock(ce_diag, NULL, &buf, - &completed_nbytes, - &id, &flags) != 0) { + while (ath10k_ce_completed_recv_next_nolock(ce_diag, + (void **)&buf, + &completed_nbytes) + != 0) { mdelay(1); if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) { @@ -956,7 +955,7 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data, goto done; } - if (buf != ce_data) { + if (*buf != ce_data) { ret = -EIO; goto done; } @@ -1026,10 +1025,8 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address, { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); int ret = 0; - u32 buf; + u32 *buf; unsigned int completed_nbytes, orig_nbytes, remaining_bytes; - unsigned int id; - unsigned int flags; struct ath10k_ce_pipe *ce_diag; void *data_buf = NULL; u32 ce_data; /* Host buffer address in CE space */ @@ -1078,7 +1075,7 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address, nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT); /* Set up to receive directly into Target(!) address */ - ret = __ath10k_ce_rx_post_buf(ce_diag, NULL, address); + ret = __ath10k_ce_rx_post_buf(ce_diag, &address, address); if (ret != 0) goto done; @@ -1103,9 +1100,10 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address, } i = 0; - while (ath10k_ce_completed_recv_next_nolock(ce_diag, NULL, &buf, - &completed_nbytes, - &id, &flags) != 0) { + while (ath10k_ce_completed_recv_next_nolock(ce_diag, + (void **)&buf, + &completed_nbytes) + != 0) { mdelay(1); if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) { @@ -1119,7 +1117,7 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address, goto done; } - if (buf != address) { + if (*buf != address) { ret = -EIO; goto done; } @@ -1181,15 +1179,11 @@ static void ath10k_pci_process_rx_cb(struct ath10k_ce_pipe *ce_state, struct sk_buff *skb; struct sk_buff_head list; void *transfer_context; - u32 ce_data; unsigned int nbytes, max_nbytes; - unsigned int transfer_id; - unsigned int flags; __skb_queue_head_init(&list); while (ath10k_ce_completed_recv_next(ce_state, &transfer_context, - &ce_data, &nbytes, &transfer_id, - &flags) == 0) { + &nbytes) == 0) { skb = transfer_context; max_nbytes = skb->len + skb_tailroom(skb); dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr, @@ -1835,13 +1829,10 @@ static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state) { struct ath10k *ar = ce_state->ar; struct bmi_xfer *xfer; - u32 ce_data; unsigned int nbytes; - unsigned int transfer_id; - unsigned int flags; - if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer, &ce_data, - &nbytes, &transfer_id, &flags)) + if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer, + &nbytes)) return; if (WARN_ON_ONCE(!xfer)) From 128abd09134a5b415fef4373841ea6d3fb7b680f Mon Sep 17 00:00:00 2001 From: Rajkumar Manoharan Date: Tue, 22 Mar 2016 17:22:18 +0530 Subject: [PATCH 0154/1649] ath10k: reuse copy engine 5 (htt rx) descriptors Whenever htt rx indication i.e target to host messages are received on rx copy engine (CE5), the message will be freed after processing the response. Then CE 5 will be refilled with new descriptors at post rx processing. This memory alloc and free operations can be avoided by reusing the same descriptors. During CE pipe allocation, full ring is not initialized i.e n-1 entries are filled up. So for CE 5 full ring should be filled up to reuse descriptors. Moreover CE 5 write index will be updated in single shot instead of incremental access. This could avoid multiple pci_write and ce_ring access. From experiments, It improves CPU usage by ~3% in IPQ4019 platform. Signed-off-by: Rajkumar Manoharan Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/ce.c | 23 ++++++++-- drivers/net/wireless/ath/ath10k/ce.h | 3 ++ drivers/net/wireless/ath/ath10k/pci.c | 63 ++++++++++++++++++++++++++- 3 files changed, 84 insertions(+), 5 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c index d6da404c9fa7..7212802eb327 100644 --- a/drivers/net/wireless/ath/ath10k/ce.c +++ b/drivers/net/wireless/ath/ath10k/ce.c @@ -411,7 +411,8 @@ int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr) lockdep_assert_held(&ar_pci->ce_lock); - if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) == 0) + if ((pipe->id != 5) && + CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) == 0) return -ENOSPC; desc->addr = __cpu_to_le32(paddr); @@ -425,6 +426,19 @@ int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr) return 0; } +void ath10k_ce_rx_update_write_idx(struct ath10k_ce_pipe *pipe, u32 nentries) +{ + struct ath10k *ar = pipe->ar; + struct ath10k_ce_ring *dest_ring = pipe->dest_ring; + unsigned int nentries_mask = dest_ring->nentries_mask; + unsigned int write_index = dest_ring->write_index; + u32 ctrl_addr = pipe->ctrl_addr; + + write_index = CE_RING_IDX_ADD(nentries_mask, write_index, nentries); + ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index); + dest_ring->write_index = write_index; +} + int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr) { struct ath10k *ar = pipe->ar; @@ -478,8 +492,11 @@ int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state, *per_transfer_contextp = dest_ring->per_transfer_context[sw_index]; - /* sanity */ - dest_ring->per_transfer_context[sw_index] = NULL; + /* Copy engine 5 (HTT Rx) will reuse the same transfer context. + * So update transfer context all CEs except CE5. + */ + if (ce_state->id != 5) + dest_ring->per_transfer_context[sw_index] = NULL; /* Update sw_index */ sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index); diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h index 68717e5b9d89..25cafcfd6b12 100644 --- a/drivers/net/wireless/ath/ath10k/ce.h +++ b/drivers/net/wireless/ath/ath10k/ce.h @@ -166,6 +166,7 @@ int ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe *pipe); int __ath10k_ce_rx_num_free_bufs(struct ath10k_ce_pipe *pipe); int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr); int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr); +void ath10k_ce_rx_update_write_idx(struct ath10k_ce_pipe *pipe, u32 nentries); /* recv flags */ /* Data is byte-swapped */ @@ -410,6 +411,8 @@ static inline u32 ath10k_ce_base_address(struct ath10k *ar, unsigned int ce_id) (((int)(toidx)-(int)(fromidx)) & (nentries_mask)) #define CE_RING_IDX_INCR(nentries_mask, idx) (((idx) + 1) & (nentries_mask)) +#define CE_RING_IDX_ADD(nentries_mask, idx, num) \ + (((idx) + (num)) & (nentries_mask)) #define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB \ ar->regs->ce_wrap_intr_sum_host_msi_lsb diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index 290a61afde1a..0b305efe6c94 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c @@ -809,7 +809,8 @@ static void ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe) spin_lock_bh(&ar_pci->ce_lock); num = __ath10k_ce_rx_num_free_bufs(ce_pipe); spin_unlock_bh(&ar_pci->ce_lock); - while (num--) { + + while (num >= 0) { ret = __ath10k_pci_rx_post_buf(pipe); if (ret) { if (ret == -ENOSPC) @@ -819,6 +820,7 @@ static void ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe) ATH10K_PCI_RX_POST_RETRY_MS); break; } + num--; } } @@ -1212,6 +1214,63 @@ static void ath10k_pci_process_rx_cb(struct ath10k_ce_pipe *ce_state, ath10k_pci_rx_post_pipe(pipe_info); } +static void ath10k_pci_process_htt_rx_cb(struct ath10k_ce_pipe *ce_state, + void (*callback)(struct ath10k *ar, + struct sk_buff *skb)) +{ + struct ath10k *ar = ce_state->ar; + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id]; + struct ath10k_ce_pipe *ce_pipe = pipe_info->ce_hdl; + struct sk_buff *skb; + struct sk_buff_head list; + void *transfer_context; + unsigned int nbytes, max_nbytes, nentries; + int orig_len; + + /* No need to aquire ce_lock for CE5, since this is the only place CE5 + * is processed other than init and deinit. Before releasing CE5 + * buffers, interrupts are disabled. Thus CE5 access is serialized. + */ + __skb_queue_head_init(&list); + while (ath10k_ce_completed_recv_next_nolock(ce_state, &transfer_context, + &nbytes) == 0) { + skb = transfer_context; + max_nbytes = skb->len + skb_tailroom(skb); + + if (unlikely(max_nbytes < nbytes)) { + ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)", + nbytes, max_nbytes); + continue; + } + + dma_sync_single_for_cpu(ar->dev, ATH10K_SKB_RXCB(skb)->paddr, + max_nbytes, DMA_FROM_DEVICE); + skb_put(skb, nbytes); + __skb_queue_tail(&list, skb); + } + + nentries = skb_queue_len(&list); + while ((skb = __skb_dequeue(&list))) { + ath10k_dbg(ar, ATH10K_DBG_PCI, "pci rx ce pipe %d len %d\n", + ce_state->id, skb->len); + ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci rx: ", + skb->data, skb->len); + + orig_len = skb->len; + callback(ar, skb); + skb_push(skb, orig_len - skb->len); + skb_reset_tail_pointer(skb); + skb_trim(skb, 0); + + /*let device gain the buffer again*/ + dma_sync_single_for_device(ar->dev, ATH10K_SKB_RXCB(skb)->paddr, + skb->len + skb_tailroom(skb), + DMA_FROM_DEVICE); + } + ath10k_ce_rx_update_write_idx(ce_pipe, nentries); +} + /* Called by lower (CE) layer when data is received from the Target. */ static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state) { @@ -1268,7 +1327,7 @@ static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state) */ ath10k_ce_per_engine_service(ce_state->ar, 4); - ath10k_pci_process_rx_cb(ce_state, ath10k_pci_htt_rx_deliver); + ath10k_pci_process_htt_rx_cb(ce_state, ath10k_pci_htt_rx_deliver); } int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id, From 5c86d97bcc1d42ce7f75685a61be4dad34ee8183 Mon Sep 17 00:00:00 2001 From: Rajkumar Manoharan Date: Tue, 22 Mar 2016 17:22:19 +0530 Subject: [PATCH 0155/1649] ath10k: combine txrx and replenish task Since tx completion and rx indication processing are moved out of txrx tasklet and rx ring lock contention also removed from txrx for rx_ind messages, it would be efficient to combine both replenish and txrx tasks. Refill threshold is adjusted for both AP135 and AP148 (low and high end systems). With this adjustment in AP135, TCP DL is improved from 603 Mbps to 620 Mbps and UDP DL is improved from 758 Mbps to 803 Mbps. Also no watchdog are observed on UDP BiDi. Signed-off-by: Rajkumar Manoharan Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/htt.h | 3 +-- drivers/net/wireless/ath/ath10k/htt_rx.c | 21 ++++++--------------- 2 files changed, 7 insertions(+), 17 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h index 1adcae4faccf..60bd9fe4b2d9 100644 --- a/drivers/net/wireless/ath/ath10k/htt.h +++ b/drivers/net/wireless/ath/ath10k/htt.h @@ -1662,7 +1662,6 @@ struct ath10k_htt { /* set if host-fw communication goes haywire * used to avoid further failures */ bool rx_confused; - struct tasklet_struct rx_replenish_task; atomic_t num_mpdus_ready; /* This is used to group tx/rx completions separately and process them @@ -1737,7 +1736,7 @@ struct htt_rx_desc { /* Refill a bunch of RX buffers for each refill round so that FW/HW can handle * aggregated traffic more nicely. */ -#define ATH10K_HTT_MAX_NUM_REFILL 16 +#define ATH10K_HTT_MAX_NUM_REFILL 100 /* * DMA_MAP expects the buffer to be an integral number of cache lines. diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c index ac16ce746afb..592421ec5635 100644 --- a/drivers/net/wireless/ath/ath10k/htt_rx.c +++ b/drivers/net/wireless/ath/ath10k/htt_rx.c @@ -31,6 +31,8 @@ /* when under memory pressure rx ring refill may fail and needs a retry */ #define HTT_RX_RING_REFILL_RETRY_MS 50 +#define HTT_RX_RING_REFILL_RESCHED_MS 5 + static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb); static void ath10k_htt_txrx_compl_task(unsigned long ptr); @@ -192,7 +194,8 @@ static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt) mod_timer(&htt->rx_ring.refill_retry_timer, jiffies + msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS)); } else if (num_deficit > 0) { - tasklet_schedule(&htt->rx_replenish_task); + mod_timer(&htt->rx_ring.refill_retry_timer, jiffies + + msecs_to_jiffies(HTT_RX_RING_REFILL_RESCHED_MS)); } spin_unlock_bh(&htt->rx_ring.lock); } @@ -223,7 +226,6 @@ int ath10k_htt_rx_ring_refill(struct ath10k *ar) void ath10k_htt_rx_free(struct ath10k_htt *htt) { del_timer_sync(&htt->rx_ring.refill_retry_timer); - tasklet_kill(&htt->rx_replenish_task); tasklet_kill(&htt->txrx_compl_task); skb_queue_purge(&htt->rx_compl_q); @@ -380,13 +382,6 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt, return msdu_chaining; } -static void ath10k_htt_rx_replenish_task(unsigned long ptr) -{ - struct ath10k_htt *htt = (struct ath10k_htt *)ptr; - - ath10k_htt_rx_msdu_buff_replenish(htt); -} - static struct sk_buff *ath10k_htt_rx_pop_paddr(struct ath10k_htt *htt, u32 paddr) { @@ -520,9 +515,6 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt) htt->rx_ring.sw_rd_idx.msdu_payld = 0; hash_init(htt->rx_ring.skb_table); - tasklet_init(&htt->rx_replenish_task, ath10k_htt_rx_replenish_task, - (unsigned long)htt); - skb_queue_head_init(&htt->rx_compl_q); skb_queue_head_init(&htt->rx_in_ord_compl_q); skb_queue_head_init(&htt->tx_fetch_ind_q); @@ -1912,8 +1904,7 @@ static void ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb) return; } } - - tasklet_schedule(&htt->rx_replenish_task); + ath10k_htt_rx_msdu_buff_replenish(htt); } static void ath10k_htt_rx_tx_fetch_resp_id_confirm(struct ath10k *ar, @@ -2470,5 +2461,5 @@ static void ath10k_htt_txrx_compl_task(unsigned long ptr) dev_kfree_skb_any(skb); } - tasklet_schedule(&htt->rx_replenish_task); + ath10k_htt_rx_msdu_buff_replenish(htt); } From 7d5efd0888331a0f7ed2e1d5a6977a32b7e9bfb7 Mon Sep 17 00:00:00 2001 From: Peter Oh Date: Tue, 22 Mar 2016 15:44:53 -0700 Subject: [PATCH 0156/1649] ath10k: parse Rx MAC timestamp in mgmt frame for FW 10.4 Check and parse Rx MAC timestamp when firmware sets its flag to status variable. 10.4 firmware adds it in Rx beacon frame only at this moment. Drivers and mac80211 may utilize it to detect such clockdrift or beacon collision and use the result for beacon collision avoidance. Signed-off-by: Peter Oh Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/wmi.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index ac8622718f58..f7ec65f263a0 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -2192,6 +2192,8 @@ static int ath10k_wmi_10_4_op_pull_mgmt_rx_ev(struct ath10k *ar, struct wmi_10_4_mgmt_rx_hdr *ev_hdr; size_t pull_len; u32 msdu_len; + struct wmi_mgmt_rx_ext_info *ext_info; + u32 len; ev = (struct wmi_10_4_mgmt_rx_event *)skb->data; ev_hdr = &ev->hdr; @@ -2212,6 +2214,13 @@ static int ath10k_wmi_10_4_op_pull_mgmt_rx_ev(struct ath10k *ar, if (skb->len < msdu_len) return -EPROTO; + if (le32_to_cpu(arg->status) & WMI_RX_STATUS_EXT_INFO) { + len = ALIGN(le32_to_cpu(arg->buf_len), 4); + ext_info = (struct wmi_mgmt_rx_ext_info *)(skb->data + len); + memcpy(&arg->ext_info, ext_info, + sizeof(struct wmi_mgmt_rx_ext_info)); + } + /* Make sure bytes added for padding are removed. */ skb_trim(skb, msdu_len); From 7b9bc799a445aea95f64f15e0083cb19b5789abe Mon Sep 17 00:00:00 2001 From: Joseph Salisbury Date: Mon, 14 Mar 2016 14:51:48 -0400 Subject: [PATCH 0157/1649] ath5k: Change led pin configuration for compaq c700 laptop BugLink: http://bugs.launchpad.net/bugs/972604 Commit 09c9bae26b0d3c9472cb6ae45010460a2cee8b8d ("ath5k: add led pin configuration for compaq c700 laptop") added a pin configuration for the Compaq c700 laptop. However, the polarity of the led pin is reversed. It should be red for wifi off and blue for wifi on, but it is the opposite. This bug was reported in the following bug report: http://pad.lv/972604 Fixes: 09c9bae26b0d3c9472cb6ae45010460a2cee8b8d ("ath5k: add led pin configuration for compaq c700 laptop") Signed-off-by: Joseph Salisbury Cc: stable@vger.kernel.org Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath5k/led.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/ath/ath5k/led.c b/drivers/net/wireless/ath/ath5k/led.c index 803030fd17d3..6a2a16856763 100644 --- a/drivers/net/wireless/ath/ath5k/led.c +++ b/drivers/net/wireless/ath/ath5k/led.c @@ -77,7 +77,7 @@ static const struct pci_device_id ath5k_led_devices[] = { /* HP Compaq CQ60-206US (ddreggors@jumptv.com) */ { ATH_SDEVICE(PCI_VENDOR_ID_HP, 0x0137a), ATH_LED(3, 1) }, /* HP Compaq C700 (nitrousnrg@gmail.com) */ - { ATH_SDEVICE(PCI_VENDOR_ID_HP, 0x0137b), ATH_LED(3, 1) }, + { ATH_SDEVICE(PCI_VENDOR_ID_HP, 0x0137b), ATH_LED(3, 0) }, /* LiteOn AR5BXB63 (magooz@salug.it) */ { ATH_SDEVICE(PCI_VENDOR_ID_ATHEROS, 0x3067), ATH_LED(3, 0) }, /* IBM-specific AR5212 (all others) */ From 69218a48005d0c93b8e9ec483f42ead481a43034 Mon Sep 17 00:00:00 2001 From: Lior David Date: Mon, 21 Mar 2016 22:01:11 +0200 Subject: [PATCH 0158/1649] wil6210: allow empty WMI commands in debugfs wmi_send There are many valid WMI commands with only header without any additional payload. Such WMI commands could not be sent using the debugfs wmi_send facility. Fix the code to allow sending of such commands. Signed-off-by: Lior David Signed-off-by: Maya Erez Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wil6210/debugfs.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c index a4d3f70c3d29..b338a09175ad 100644 --- a/drivers/net/wireless/ath/wil6210/debugfs.c +++ b/drivers/net/wireless/ath/wil6210/debugfs.c @@ -832,7 +832,7 @@ static ssize_t wil_write_file_wmi(struct file *file, const char __user *buf, u16 cmdid; int rc, rc1; - if (cmdlen <= 0) + if (cmdlen < 0) return -EINVAL; wmi = kmalloc(len, GFP_KERNEL); @@ -845,7 +845,7 @@ static ssize_t wil_write_file_wmi(struct file *file, const char __user *buf, return rc; } - cmd = &wmi[1]; + cmd = (cmdlen > 0) ? &wmi[1] : NULL; cmdid = le16_to_cpu(wmi->command_id); rc1 = wmi_send(wil, cmdid, cmd, cmdlen); From a2cb3d5f043e4cc646c396a3e188a171b38c15a0 Mon Sep 17 00:00:00 2001 From: Miaoqing Pan Date: Fri, 18 Mar 2016 17:54:56 +0800 Subject: [PATCH 0159/1649] ath9k: fix rng high cpu load If no valid ADC randomness output, ath9k rng will continuously reading ADC, which will cause high CPU load. So increase the delay to wait for ADC ready. Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=114261 Signed-off-by: Miaoqing Pan Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath9k/rng.c | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/ath/ath9k/rng.c b/drivers/net/wireless/ath/ath9k/rng.c index c9cb2aad7b6f..d38e50f96db7 100644 --- a/drivers/net/wireless/ath/ath9k/rng.c +++ b/drivers/net/wireless/ath/ath9k/rng.c @@ -55,11 +55,26 @@ static int ath9k_rng_data_read(struct ath_softc *sc, u32 *buf, u32 buf_size) return j << 2; } +static u32 ath9k_rng_delay_get(u32 fail_stats) +{ + u32 delay; + + if (fail_stats < 100) + delay = 10; + else if (fail_stats < 105) + delay = 1000; + else + delay = 10000; + + return delay; +} + static int ath9k_rng_kthread(void *data) { int bytes_read; struct ath_softc *sc = data; u32 *rng_buf; + u32 delay, fail_stats = 0; rng_buf = kmalloc_array(ATH9K_RNG_BUF_SIZE, sizeof(u32), GFP_KERNEL); if (!rng_buf) @@ -69,10 +84,13 @@ static int ath9k_rng_kthread(void *data) bytes_read = ath9k_rng_data_read(sc, rng_buf, ATH9K_RNG_BUF_SIZE); if (unlikely(!bytes_read)) { - msleep_interruptible(10); + delay = ath9k_rng_delay_get(++fail_stats); + msleep_interruptible(delay); continue; } + fail_stats = 0; + /* sleep until entropy bits under write_wakeup_threshold */ add_hwgenerator_randomness((void *)rng_buf, bytes_read, ATH9K_RNG_ENTROPY(bytes_read)); From 75b6462e965dc76d16254b5fcb3f41ca97f6fef0 Mon Sep 17 00:00:00 2001 From: Pavel Tikhomirov Date: Fri, 11 Dec 2015 17:05:14 +0300 Subject: [PATCH 0160/1649] ixgbe: on recv increment rx.ring->stats.yields It seem to be non intentionally changed to Tx in commit adc810900a70 ("ixgbe: Refactor busy poll socket code to address multiple issues") Lock is taken from ixgbe_low_latency_recv, and there under this lock we use ixgbe_clean_rx_irq so it looks wrong for me to increment Tx counter. Yield stats can be shown through ethtool: ethtool -S enp129s0 | grep yield Signed-off-by: Pavel Tikhomirov Tested-by: Phil Schmitt Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index e4949af7dd6b..9f64354c9c9e 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -456,7 +456,7 @@ static inline bool ixgbe_qv_lock_poll(struct ixgbe_q_vector *q_vector) IXGBE_QV_STATE_POLL); #ifdef BP_EXTENDED_STATS if (rc != IXGBE_QV_STATE_IDLE) - q_vector->tx.ring->stats.yields++; + q_vector->rx.ring->stats.yields++; #endif return rc == IXGBE_QV_STATE_IDLE; } From 39771b127b412377d6354893c7d43ee8f2edecfd Mon Sep 17 00:00:00 2001 From: Willem de Bruijn Date: Sat, 2 Apr 2016 23:08:06 -0400 Subject: [PATCH 0161/1649] sock: break up sock_cmsg_snd into __sock_cmsg_snd and loop To process cmsg's of the SOL_SOCKET level in addition to cmsgs of another level, protocols can call sock_cmsg_send(). This causes a double walk on the cmsghdr list, one for SOL_SOCKET and one for the other level. Extract the inner demultiplex logic from the loop that walks the list, to allow having this called directly from a walker in the protocol specific code. Signed-off-by: Willem de Bruijn Signed-off-by: Soheil Hassas Yeganeh Signed-off-by: David S. Miller --- include/net/sock.h | 2 ++ net/core/sock.c | 33 ++++++++++++++++++++++----------- 2 files changed, 24 insertions(+), 11 deletions(-) diff --git a/include/net/sock.h b/include/net/sock.h index 255d3e03727b..03772d4b06e6 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1420,6 +1420,8 @@ struct sockcm_cookie { u32 mark; }; +int __sock_cmsg_send(struct sock *sk, struct msghdr *msg, struct cmsghdr *cmsg, + struct sockcm_cookie *sockc); int sock_cmsg_send(struct sock *sk, struct msghdr *msg, struct sockcm_cookie *sockc); diff --git a/net/core/sock.c b/net/core/sock.c index b67b9aedb230..66976f88566b 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -1866,27 +1866,38 @@ struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size, } EXPORT_SYMBOL(sock_alloc_send_skb); +int __sock_cmsg_send(struct sock *sk, struct msghdr *msg, struct cmsghdr *cmsg, + struct sockcm_cookie *sockc) +{ + switch (cmsg->cmsg_type) { + case SO_MARK: + if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) + return -EPERM; + if (cmsg->cmsg_len != CMSG_LEN(sizeof(u32))) + return -EINVAL; + sockc->mark = *(u32 *)CMSG_DATA(cmsg); + break; + default: + return -EINVAL; + } + return 0; +} +EXPORT_SYMBOL(__sock_cmsg_send); + int sock_cmsg_send(struct sock *sk, struct msghdr *msg, struct sockcm_cookie *sockc) { struct cmsghdr *cmsg; + int ret; for_each_cmsghdr(cmsg, msg) { if (!CMSG_OK(msg, cmsg)) return -EINVAL; if (cmsg->cmsg_level != SOL_SOCKET) continue; - switch (cmsg->cmsg_type) { - case SO_MARK: - if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) - return -EPERM; - if (cmsg->cmsg_len != CMSG_LEN(sizeof(u32))) - return -EINVAL; - sockc->mark = *(u32 *)CMSG_DATA(cmsg); - break; - default: - return -EINVAL; - } + ret = __sock_cmsg_send(sk, msg, cmsg, sockc); + if (ret) + return ret; } return 0; } From 6db8b963a7a31047573f229492ff6fc0f51cc377 Mon Sep 17 00:00:00 2001 From: Soheil Hassas Yeganeh Date: Sat, 2 Apr 2016 23:08:07 -0400 Subject: [PATCH 0162/1649] tcp: accept SOF_TIMESTAMPING_OPT_ID for passive TFO SOF_TIMESTAMPING_OPT_ID is set to get data-independent IDs to associate timestamps with send calls. For TCP connections, tp->snd_una is used as the starting point to calculate relative IDs. This socket option will fail if set before the handshake on a passive TCP fast open connection with data in SYN or SYN/ACK, since setsockopt requires the connection to be in the ESTABLISHED state. To address these, instead of limiting the option to the ESTABLISHED state, accept the SOF_TIMESTAMPING_OPT_ID option as long as the connection is not in LISTEN or CLOSE states. Signed-off-by: Soheil Hassas Yeganeh Acked-by: Willem de Bruijn Acked-by: Yuchung Cheng Acked-by: Eric Dumazet Signed-off-by: David S. Miller --- net/core/sock.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/net/core/sock.c b/net/core/sock.c index 66976f88566b..0a64fe20ce5a 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -832,7 +832,8 @@ set_rcvbuf: !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)) { if (sk->sk_protocol == IPPROTO_TCP && sk->sk_type == SOCK_STREAM) { - if (sk->sk_state != TCP_ESTABLISHED) { + if ((1 << sk->sk_state) & + (TCPF_CLOSE | TCPF_LISTEN)) { ret = -EINVAL; break; } From 6b084928baac562ed61866f540a96120e9c9ddb7 Mon Sep 17 00:00:00 2001 From: Soheil Hassas Yeganeh Date: Sat, 2 Apr 2016 23:08:08 -0400 Subject: [PATCH 0163/1649] tcp: use one bit in TCP_SKB_CB to mark ACK timestamps Currently, to avoid a cache line miss for accessing skb_shinfo, tcp_ack_tstamp skips socket that do not have SOF_TIMESTAMPING_TX_ACK bit set in sk_tsflags. This is implemented based on an implicit assumption that the SOF_TIMESTAMPING_TX_ACK is set via socket options for the duration that ACK timestamps are needed. To implement per-write timestamps, this check should be removed and replaced with a per-packet alternative that quickly skips packets missing ACK timestamps marks without a cache-line miss. To enable per-packet marking without a cache line miss, use one bit in TCP_SKB_CB to mark a whether a SKB might need a ack tx timestamp or not. Further checks in tcp_ack_tstamp are not modified and work as before. Signed-off-by: Soheil Hassas Yeganeh Acked-by: Willem de Bruijn Acked-by: Eric Dumazet Signed-off-by: David S. Miller --- include/net/tcp.h | 3 ++- net/ipv4/tcp.c | 2 ++ net/ipv4/tcp_input.c | 2 +- 3 files changed, 5 insertions(+), 2 deletions(-) diff --git a/include/net/tcp.h b/include/net/tcp.h index f8bb4a4ed3d1..a23282996ca9 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -754,7 +754,8 @@ struct tcp_skb_cb { TCPCB_REPAIRED) __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */ - /* 1 byte hole */ + __u8 txstamp_ack:1, /* Record TX timestamp for ack? */ + unused:7; __u32 ack_seq; /* Sequence number ACK'd */ union { struct inet_skb_parm h4; diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 08b8b960a8ed..ce3c9eb901a0 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -432,10 +432,12 @@ static void tcp_tx_timestamp(struct sock *sk, struct sk_buff *skb) { if (sk->sk_tsflags) { struct skb_shared_info *shinfo = skb_shinfo(skb); + struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); sock_tx_timestamp(sk, &shinfo->tx_flags); if (shinfo->tx_flags & SKBTX_ANY_TSTAMP) shinfo->tskey = TCP_SKB_CB(skb)->seq + skb->len - 1; + tcb->txstamp_ack = !!(shinfo->tx_flags & SKBTX_ACK_TSTAMP); } } diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index f87b84a75691..a26e2d262358 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -3082,7 +3082,7 @@ static void tcp_ack_tstamp(struct sock *sk, struct sk_buff *skb, const struct skb_shared_info *shinfo; /* Avoid cache line misses to get skb_shinfo() and shinfo->tx_flags */ - if (likely(!(sk->sk_tsflags & SOF_TIMESTAMPING_TX_ACK))) + if (likely(!TCP_SKB_CB(skb)->txstamp_ack)) return; shinfo = skb_shinfo(skb); From 3dd17e63f5131bf2528f34aa5e3e57758175af92 Mon Sep 17 00:00:00 2001 From: Soheil Hassas Yeganeh Date: Sat, 2 Apr 2016 23:08:09 -0400 Subject: [PATCH 0164/1649] sock: accept SO_TIMESTAMPING flags in socket cmsg Accept SO_TIMESTAMPING in control messages of the SOL_SOCKET level as a basis to accept timestamping requests per write. This implementation only accepts TX recording flags (i.e., SOF_TIMESTAMPING_TX_HARDWARE, SOF_TIMESTAMPING_TX_SOFTWARE, SOF_TIMESTAMPING_TX_SCHED, and SOF_TIMESTAMPING_TX_ACK) in control messages. Users need to set reporting flags (e.g., SOF_TIMESTAMPING_OPT_ID) per socket via socket options. This commit adds a tsflags field in sockcm_cookie which is set in __sock_cmsg_send. It only override the SOF_TIMESTAMPING_TX_* bits in sockcm_cookie.tsflags allowing the control message to override the recording behavior per write, yet maintaining the value of other flags. This patch implements validating the control message and setting tsflags in struct sockcm_cookie. Next commits in this series will actually implement timestamping per write for different protocols. Signed-off-by: Soheil Hassas Yeganeh Acked-by: Willem de Bruijn Signed-off-by: David S. Miller --- include/net/sock.h | 1 + include/uapi/linux/net_tstamp.h | 10 ++++++++++ net/core/sock.c | 13 +++++++++++++ 3 files changed, 24 insertions(+) diff --git a/include/net/sock.h b/include/net/sock.h index 03772d4b06e6..af012da5e608 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1418,6 +1418,7 @@ void sk_send_sigurg(struct sock *sk); struct sockcm_cookie { u32 mark; + u16 tsflags; }; int __sock_cmsg_send(struct sock *sk, struct msghdr *msg, struct cmsghdr *cmsg, diff --git a/include/uapi/linux/net_tstamp.h b/include/uapi/linux/net_tstamp.h index 6d1abea9746e..264e515de16f 100644 --- a/include/uapi/linux/net_tstamp.h +++ b/include/uapi/linux/net_tstamp.h @@ -31,6 +31,16 @@ enum { SOF_TIMESTAMPING_LAST }; +/* + * SO_TIMESTAMPING flags are either for recording a packet timestamp or for + * reporting the timestamp to user space. + * Recording flags can be set both via socket options and control messages. + */ +#define SOF_TIMESTAMPING_TX_RECORD_MASK (SOF_TIMESTAMPING_TX_HARDWARE | \ + SOF_TIMESTAMPING_TX_SOFTWARE | \ + SOF_TIMESTAMPING_TX_SCHED | \ + SOF_TIMESTAMPING_TX_ACK) + /** * struct hwtstamp_config - %SIOCGHWTSTAMP and %SIOCSHWTSTAMP parameter * diff --git a/net/core/sock.c b/net/core/sock.c index 0a64fe20ce5a..315f5e57fffe 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -1870,6 +1870,8 @@ EXPORT_SYMBOL(sock_alloc_send_skb); int __sock_cmsg_send(struct sock *sk, struct msghdr *msg, struct cmsghdr *cmsg, struct sockcm_cookie *sockc) { + u32 tsflags; + switch (cmsg->cmsg_type) { case SO_MARK: if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) @@ -1878,6 +1880,17 @@ int __sock_cmsg_send(struct sock *sk, struct msghdr *msg, struct cmsghdr *cmsg, return -EINVAL; sockc->mark = *(u32 *)CMSG_DATA(cmsg); break; + case SO_TIMESTAMPING: + if (cmsg->cmsg_len != CMSG_LEN(sizeof(u32))) + return -EINVAL; + + tsflags = *(u32 *)CMSG_DATA(cmsg); + if (tsflags & ~SOF_TIMESTAMPING_TX_RECORD_MASK) + return -EINVAL; + + sockc->tsflags &= ~SOF_TIMESTAMPING_TX_RECORD_MASK; + sockc->tsflags |= tsflags; + break; default: return -EINVAL; } From 24025c465f77c3585f73450bab19501b2edd6fba Mon Sep 17 00:00:00 2001 From: Soheil Hassas Yeganeh Date: Sat, 2 Apr 2016 23:08:10 -0400 Subject: [PATCH 0165/1649] ipv4: process socket-level control messages in IPv4 Process socket-level control messages by invoking __sock_cmsg_send in ip_cmsg_send for control messages on the SOL_SOCKET layer. This makes sure whenever ip_cmsg_send is called in udp, icmp, and raw, we also process socket-level control messages. Note that this commit interprets new control messages that were ignored before. As such, this commit does not change the behavior of IPv4 control messages. Signed-off-by: Soheil Hassas Yeganeh Acked-by: Willem de Bruijn Signed-off-by: David S. Miller --- include/net/ip.h | 3 ++- net/ipv4/ip_sockglue.c | 9 ++++++++- net/ipv4/ping.c | 2 +- net/ipv4/raw.c | 2 +- net/ipv4/udp.c | 3 +-- 5 files changed, 13 insertions(+), 6 deletions(-) diff --git a/include/net/ip.h b/include/net/ip.h index fad74d323bd6..93725e546758 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -56,6 +56,7 @@ static inline unsigned int ip_hdrlen(const struct sk_buff *skb) } struct ipcm_cookie { + struct sockcm_cookie sockc; __be32 addr; int oif; struct ip_options_rcu *opt; @@ -550,7 +551,7 @@ int ip_options_rcv_srr(struct sk_buff *skb); void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb); void ip_cmsg_recv_offset(struct msghdr *msg, struct sk_buff *skb, int offset); -int ip_cmsg_send(struct net *net, struct msghdr *msg, +int ip_cmsg_send(struct sock *sk, struct msghdr *msg, struct ipcm_cookie *ipc, bool allow_ipv6); int ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen); diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index 035ad645a8d9..1b7c0776c805 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c @@ -219,11 +219,12 @@ void ip_cmsg_recv_offset(struct msghdr *msg, struct sk_buff *skb, } EXPORT_SYMBOL(ip_cmsg_recv_offset); -int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc, +int ip_cmsg_send(struct sock *sk, struct msghdr *msg, struct ipcm_cookie *ipc, bool allow_ipv6) { int err, val; struct cmsghdr *cmsg; + struct net *net = sock_net(sk); for_each_cmsghdr(cmsg, msg) { if (!CMSG_OK(msg, cmsg)) @@ -244,6 +245,12 @@ int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc, continue; } #endif + if (cmsg->cmsg_level == SOL_SOCKET) { + if (__sock_cmsg_send(sk, msg, cmsg, &ipc->sockc)) + return -EINVAL; + continue; + } + if (cmsg->cmsg_level != SOL_IP) continue; switch (cmsg->cmsg_type) { diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c index cf9700b1a106..670639bf9f7e 100644 --- a/net/ipv4/ping.c +++ b/net/ipv4/ping.c @@ -747,7 +747,7 @@ static int ping_v4_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) sock_tx_timestamp(sk, &ipc.tx_flags); if (msg->msg_controllen) { - err = ip_cmsg_send(sock_net(sk), msg, &ipc, false); + err = ip_cmsg_send(sk, msg, &ipc, false); if (unlikely(err)) { kfree(ipc.opt); return err; diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index 8d22de74080c..088ce665fc7b 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c @@ -548,7 +548,7 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) ipc.oif = sk->sk_bound_dev_if; if (msg->msg_controllen) { - err = ip_cmsg_send(net, msg, &ipc, false); + err = ip_cmsg_send(sk, msg, &ipc, false); if (unlikely(err)) { kfree(ipc.opt); goto out; diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 08eed5e16df0..bccb4e11047a 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -1034,8 +1034,7 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) sock_tx_timestamp(sk, &ipc.tx_flags); if (msg->msg_controllen) { - err = ip_cmsg_send(sock_net(sk), msg, &ipc, - sk->sk_family == AF_INET6); + err = ip_cmsg_send(sk, msg, &ipc, sk->sk_family == AF_INET6); if (unlikely(err)) { kfree(ipc.opt); return err; From ad1e46a837163a3e7160a1250825bcfafd2e714b Mon Sep 17 00:00:00 2001 From: Soheil Hassas Yeganeh Date: Sat, 2 Apr 2016 23:08:11 -0400 Subject: [PATCH 0166/1649] ipv6: process socket-level control messages in IPv6 Process socket-level control messages by invoking __sock_cmsg_send in ip6_datagram_send_ctl for control messages on the SOL_SOCKET layer. This makes sure whenever ip6_datagram_send_ctl is called for udp and raw, we also process socket-level control messages. This is a bit uglier than IPv4, since IPv6 does not have something like ipcm_cookie. Perhaps we can later create a control message cookie for IPv6? Note that this commit interprets new control messages that were ignored before. As such, this commit does not change the behavior of IPv6 control messages. Signed-off-by: Soheil Hassas Yeganeh Acked-by: Willem de Bruijn Signed-off-by: David S. Miller --- include/net/transp_v6.h | 3 ++- net/ipv6/datagram.c | 9 ++++++++- net/ipv6/ip6_flowlabel.c | 3 ++- net/ipv6/ipv6_sockglue.c | 3 ++- net/ipv6/raw.c | 6 +++++- net/ipv6/udp.c | 5 ++++- net/l2tp/l2tp_ip6.c | 8 +++++--- 7 files changed, 28 insertions(+), 9 deletions(-) diff --git a/include/net/transp_v6.h b/include/net/transp_v6.h index b927413dde86..2b1c3450ab20 100644 --- a/include/net/transp_v6.h +++ b/include/net/transp_v6.h @@ -42,7 +42,8 @@ void ip6_datagram_recv_specific_ctl(struct sock *sk, struct msghdr *msg, int ip6_datagram_send_ctl(struct net *net, struct sock *sk, struct msghdr *msg, struct flowi6 *fl6, struct ipv6_txoptions *opt, - int *hlimit, int *tclass, int *dontfrag); + int *hlimit, int *tclass, int *dontfrag, + struct sockcm_cookie *sockc); void ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp, __u16 srcp, __u16 destp, int bucket); diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index 428162155280..a73d70119fcd 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c @@ -685,7 +685,8 @@ EXPORT_SYMBOL_GPL(ip6_datagram_recv_ctl); int ip6_datagram_send_ctl(struct net *net, struct sock *sk, struct msghdr *msg, struct flowi6 *fl6, struct ipv6_txoptions *opt, - int *hlimit, int *tclass, int *dontfrag) + int *hlimit, int *tclass, int *dontfrag, + struct sockcm_cookie *sockc) { struct in6_pktinfo *src_info; struct cmsghdr *cmsg; @@ -702,6 +703,12 @@ int ip6_datagram_send_ctl(struct net *net, struct sock *sk, goto exit_f; } + if (cmsg->cmsg_level == SOL_SOCKET) { + if (__sock_cmsg_send(sk, msg, cmsg, sockc)) + return -EINVAL; + continue; + } + if (cmsg->cmsg_level != SOL_IPV6) continue; diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c index dc2db4f7b182..35d3ddc328f8 100644 --- a/net/ipv6/ip6_flowlabel.c +++ b/net/ipv6/ip6_flowlabel.c @@ -372,6 +372,7 @@ fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq, if (olen > 0) { struct msghdr msg; struct flowi6 flowi6; + struct sockcm_cookie sockc_junk; int junk; err = -ENOMEM; @@ -390,7 +391,7 @@ fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq, memset(&flowi6, 0, sizeof(flowi6)); err = ip6_datagram_send_ctl(net, sk, &msg, &flowi6, fl->opt, - &junk, &junk, &junk); + &junk, &junk, &junk, &sockc_junk); if (err) goto done; err = -EINVAL; diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index 4449ad1f8114..a5557d22f89e 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c @@ -471,6 +471,7 @@ sticky_done: struct ipv6_txoptions *opt = NULL; struct msghdr msg; struct flowi6 fl6; + struct sockcm_cookie sockc_junk; int junk; memset(&fl6, 0, sizeof(fl6)); @@ -503,7 +504,7 @@ sticky_done: msg.msg_control = (void *)(opt+1); retv = ip6_datagram_send_ctl(net, sk, &msg, &fl6, opt, &junk, - &junk, &junk); + &junk, &junk, &sockc_junk); if (retv) goto done; update: diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index fa59dd7a427e..f175ec0a97ce 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c @@ -745,6 +745,7 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) struct dst_entry *dst = NULL; struct raw6_frag_vec rfv; struct flowi6 fl6; + struct sockcm_cookie sockc; int addr_len = msg->msg_namelen; int hlimit = -1; int tclass = -1; @@ -821,13 +822,16 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) if (fl6.flowi6_oif == 0) fl6.flowi6_oif = sk->sk_bound_dev_if; + sockc.tsflags = 0; + if (msg->msg_controllen) { opt = &opt_space; memset(opt, 0, sizeof(struct ipv6_txoptions)); opt->tot_len = sizeof(struct ipv6_txoptions); err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt, - &hlimit, &tclass, &dontfrag); + &hlimit, &tclass, &dontfrag, + &sockc); if (err < 0) { fl6_sock_release(flowlabel); return err; diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 8125931106be..2a787af42163 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -1128,6 +1128,7 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) int connected = 0; int is_udplite = IS_UDPLITE(sk); int (*getfrag)(void *, char *, int, int, int, struct sk_buff *); + struct sockcm_cookie sockc; /* destination address check */ if (sin6) { @@ -1247,6 +1248,7 @@ do_udp_sendmsg: fl6.flowi6_oif = np->sticky_pktinfo.ipi6_ifindex; fl6.flowi6_mark = sk->sk_mark; + sockc.tsflags = 0; if (msg->msg_controllen) { opt = &opt_space; @@ -1254,7 +1256,8 @@ do_udp_sendmsg: opt->tot_len = sizeof(*opt); err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt, - &hlimit, &tclass, &dontfrag); + &hlimit, &tclass, &dontfrag, + &sockc); if (err < 0) { fl6_sock_release(flowlabel); return err; diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c index 6b54ff3ff4cb..4f29a4a0f360 100644 --- a/net/l2tp/l2tp_ip6.c +++ b/net/l2tp/l2tp_ip6.c @@ -492,6 +492,7 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) struct ip6_flowlabel *flowlabel = NULL; struct dst_entry *dst = NULL; struct flowi6 fl6; + struct sockcm_cookie sockc_unused = {0}; int addr_len = msg->msg_namelen; int hlimit = -1; int tclass = -1; @@ -562,9 +563,10 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) memset(opt, 0, sizeof(struct ipv6_txoptions)); opt->tot_len = sizeof(struct ipv6_txoptions); - err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt, - &hlimit, &tclass, &dontfrag); - if (err < 0) { + err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt, + &hlimit, &tclass, &dontfrag, + &sockc_unused); + if (err < 0) { fl6_sock_release(flowlabel); return err; } From c14ac9451c34832554db33386a4393be8bba3a7b Mon Sep 17 00:00:00 2001 From: Soheil Hassas Yeganeh Date: Sat, 2 Apr 2016 23:08:12 -0400 Subject: [PATCH 0167/1649] sock: enable timestamping using control messages Currently, SOL_TIMESTAMPING can only be enabled using setsockopt. This is very costly when users want to sample writes to gather tx timestamps. Add support for enabling SO_TIMESTAMPING via control messages by using tsflags added in `struct sockcm_cookie` (added in the previous patches in this series) to set the tx_flags of the last skb created in a sendmsg. With this patch, the timestamp recording bits in tx_flags of the skbuff is overridden if SO_TIMESTAMPING is passed in a cmsg. Please note that this is only effective for overriding the recording timestamps flags. Users should enable timestamp reporting (e.g., SOF_TIMESTAMPING_SOFTWARE | SOF_TIMESTAMPING_OPT_ID) using socket options and then should ask for SOF_TIMESTAMPING_TX_* using control messages per sendmsg to sample timestamps for each write. Signed-off-by: Soheil Hassas Yeganeh Acked-by: Willem de Bruijn Signed-off-by: David S. Miller --- drivers/net/tun.c | 3 ++- include/net/ipv6.h | 6 ++++-- include/net/sock.h | 10 ++++++---- net/can/raw.c | 2 +- net/ipv4/ping.c | 5 +++-- net/ipv4/raw.c | 11 ++++++----- net/ipv4/tcp.c | 20 +++++++++++++++----- net/ipv4/udp.c | 7 ++++--- net/ipv6/icmp.c | 6 ++++-- net/ipv6/ip6_output.c | 15 +++++++++------ net/ipv6/ping.c | 3 ++- net/ipv6/raw.c | 5 ++--- net/ipv6/udp.c | 7 ++++--- net/l2tp/l2tp_ip6.c | 2 +- net/packet/af_packet.c | 30 +++++++++++++++++++++++++----- net/socket.c | 10 +++++----- 16 files changed, 93 insertions(+), 49 deletions(-) diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 510e90a6bb26..9abc36bf77ea 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -861,7 +861,8 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev) goto drop; if (skb->sk && sk_fullsock(skb->sk)) { - sock_tx_timestamp(skb->sk, &skb_shinfo(skb)->tx_flags); + sock_tx_timestamp(skb->sk, skb->sk->sk_tsflags, + &skb_shinfo(skb)->tx_flags); sw_tx_timestamp(skb); } diff --git a/include/net/ipv6.h b/include/net/ipv6.h index d0aeb97aec5d..55ee1eb7d026 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -867,7 +867,8 @@ int ip6_append_data(struct sock *sk, int odd, struct sk_buff *skb), void *from, int length, int transhdrlen, int hlimit, int tclass, struct ipv6_txoptions *opt, struct flowi6 *fl6, - struct rt6_info *rt, unsigned int flags, int dontfrag); + struct rt6_info *rt, unsigned int flags, int dontfrag, + const struct sockcm_cookie *sockc); int ip6_push_pending_frames(struct sock *sk); @@ -884,7 +885,8 @@ struct sk_buff *ip6_make_skb(struct sock *sk, void *from, int length, int transhdrlen, int hlimit, int tclass, struct ipv6_txoptions *opt, struct flowi6 *fl6, struct rt6_info *rt, - unsigned int flags, int dontfrag); + unsigned int flags, int dontfrag, + const struct sockcm_cookie *sockc); static inline struct sk_buff *ip6_finish_skb(struct sock *sk) { diff --git a/include/net/sock.h b/include/net/sock.h index af012da5e608..e91b87f54f99 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -2057,19 +2057,21 @@ static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk, sk->sk_stamp = skb->tstamp; } -void __sock_tx_timestamp(const struct sock *sk, __u8 *tx_flags); +void __sock_tx_timestamp(__u16 tsflags, __u8 *tx_flags); /** * sock_tx_timestamp - checks whether the outgoing packet is to be time stamped * @sk: socket sending this packet + * @tsflags: timestamping flags to use * @tx_flags: completed with instructions for time stamping * * Note : callers should take care of initial *tx_flags value (usually 0) */ -static inline void sock_tx_timestamp(const struct sock *sk, __u8 *tx_flags) +static inline void sock_tx_timestamp(const struct sock *sk, __u16 tsflags, + __u8 *tx_flags) { - if (unlikely(sk->sk_tsflags)) - __sock_tx_timestamp(sk, tx_flags); + if (unlikely(tsflags)) + __sock_tx_timestamp(tsflags, tx_flags); if (unlikely(sock_flag(sk, SOCK_WIFI_STATUS))) *tx_flags |= SKBTX_WIFI_STATUS; } diff --git a/net/can/raw.c b/net/can/raw.c index 2e67b1423cd3..972c187d40ab 100644 --- a/net/can/raw.c +++ b/net/can/raw.c @@ -755,7 +755,7 @@ static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) if (err < 0) goto free_skb; - sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags); + sock_tx_timestamp(sk, sk->sk_tsflags, &skb_shinfo(skb)->tx_flags); skb->dev = dev; skb->sk = sk; diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c index 670639bf9f7e..66ddcb60519a 100644 --- a/net/ipv4/ping.c +++ b/net/ipv4/ping.c @@ -737,6 +737,7 @@ static int ping_v4_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) /* no remote port */ } + ipc.sockc.tsflags = sk->sk_tsflags; ipc.addr = inet->inet_saddr; ipc.opt = NULL; ipc.oif = sk->sk_bound_dev_if; @@ -744,8 +745,6 @@ static int ping_v4_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) ipc.ttl = 0; ipc.tos = -1; - sock_tx_timestamp(sk, &ipc.tx_flags); - if (msg->msg_controllen) { err = ip_cmsg_send(sk, msg, &ipc, false); if (unlikely(err)) { @@ -768,6 +767,8 @@ static int ping_v4_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) rcu_read_unlock(); } + sock_tx_timestamp(sk, ipc.sockc.tsflags, &ipc.tx_flags); + saddr = ipc.addr; ipc.addr = faddr = daddr; diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index 088ce665fc7b..438f50c1a676 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c @@ -339,8 +339,8 @@ int raw_rcv(struct sock *sk, struct sk_buff *skb) static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4, struct msghdr *msg, size_t length, - struct rtable **rtp, - unsigned int flags) + struct rtable **rtp, unsigned int flags, + const struct sockcm_cookie *sockc) { struct inet_sock *inet = inet_sk(sk); struct net *net = sock_net(sk); @@ -379,7 +379,7 @@ static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4, skb->ip_summed = CHECKSUM_NONE; - sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags); + sock_tx_timestamp(sk, sockc->tsflags, &skb_shinfo(skb)->tx_flags); skb->transport_header = skb->network_header; err = -EFAULT; @@ -540,6 +540,7 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) daddr = inet->inet_daddr; } + ipc.sockc.tsflags = sk->sk_tsflags; ipc.addr = inet->inet_saddr; ipc.opt = NULL; ipc.tx_flags = 0; @@ -638,10 +639,10 @@ back_from_confirm: if (inet->hdrincl) err = raw_send_hdrinc(sk, &fl4, msg, len, - &rt, msg->msg_flags); + &rt, msg->msg_flags, &ipc.sockc); else { - sock_tx_timestamp(sk, &ipc.tx_flags); + sock_tx_timestamp(sk, ipc.sockc.tsflags, &ipc.tx_flags); if (!ipc.addr) ipc.addr = fl4.daddr; diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index ce3c9eb901a0..4d73858991af 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -428,13 +428,13 @@ void tcp_init_sock(struct sock *sk) } EXPORT_SYMBOL(tcp_init_sock); -static void tcp_tx_timestamp(struct sock *sk, struct sk_buff *skb) +static void tcp_tx_timestamp(struct sock *sk, u16 tsflags, struct sk_buff *skb) { - if (sk->sk_tsflags) { + if (sk->sk_tsflags || tsflags) { struct skb_shared_info *shinfo = skb_shinfo(skb); struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); - sock_tx_timestamp(sk, &shinfo->tx_flags); + sock_tx_timestamp(sk, tsflags, &shinfo->tx_flags); if (shinfo->tx_flags & SKBTX_ANY_TSTAMP) shinfo->tskey = TCP_SKB_CB(skb)->seq + skb->len - 1; tcb->txstamp_ack = !!(shinfo->tx_flags & SKBTX_ACK_TSTAMP); @@ -959,7 +959,7 @@ new_segment: offset += copy; size -= copy; if (!size) { - tcp_tx_timestamp(sk, skb); + tcp_tx_timestamp(sk, sk->sk_tsflags, skb); goto out; } @@ -1079,6 +1079,7 @@ int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) { struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; + struct sockcm_cookie sockc; int flags, err, copied = 0; int mss_now = 0, size_goal, copied_syn = 0; bool sg; @@ -1121,6 +1122,15 @@ int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) /* 'common' sending to sendq */ } + sockc.tsflags = sk->sk_tsflags; + if (msg->msg_controllen) { + err = sock_cmsg_send(sk, msg, &sockc); + if (unlikely(err)) { + err = -EINVAL; + goto out_err; + } + } + /* This should be in poll */ sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); @@ -1239,7 +1249,7 @@ new_segment: copied += copy; if (!msg_data_left(msg)) { - tcp_tx_timestamp(sk, skb); + tcp_tx_timestamp(sk, sockc.tsflags, skb); goto out; } diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index bccb4e11047a..45ff590661f4 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -1027,12 +1027,11 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) */ connected = 1; } + + ipc.sockc.tsflags = sk->sk_tsflags; ipc.addr = inet->inet_saddr; - ipc.oif = sk->sk_bound_dev_if; - sock_tx_timestamp(sk, &ipc.tx_flags); - if (msg->msg_controllen) { err = ip_cmsg_send(sk, msg, &ipc, sk->sk_family == AF_INET6); if (unlikely(err)) { @@ -1059,6 +1058,8 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) saddr = ipc.addr; ipc.addr = faddr = daddr; + sock_tx_timestamp(sk, ipc.sockc.tsflags, &ipc.tx_flags); + if (ipc.opt && ipc.opt->opt.srr) { if (!daddr) return -EINVAL; diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index 0a37ddc7af51..6b573ebe49de 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c @@ -400,6 +400,7 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info) struct icmp6hdr tmp_hdr; struct flowi6 fl6; struct icmpv6_msg msg; + struct sockcm_cookie sockc_unused = {0}; int iif = 0; int addr_type = 0; int len; @@ -527,7 +528,7 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info) len + sizeof(struct icmp6hdr), sizeof(struct icmp6hdr), hlimit, np->tclass, NULL, &fl6, (struct rt6_info *)dst, - MSG_DONTWAIT, np->dontfrag); + MSG_DONTWAIT, np->dontfrag, &sockc_unused); if (err) { ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTERRORS); ip6_flush_pending_frames(sk); @@ -566,6 +567,7 @@ static void icmpv6_echo_reply(struct sk_buff *skb) int hlimit; u8 tclass; u32 mark = IP6_REPLY_MARK(net, skb->mark); + struct sockcm_cookie sockc_unused = {0}; saddr = &ipv6_hdr(skb)->daddr; @@ -617,7 +619,7 @@ static void icmpv6_echo_reply(struct sk_buff *skb) err = ip6_append_data(sk, icmpv6_getfrag, &msg, skb->len + sizeof(struct icmp6hdr), sizeof(struct icmp6hdr), hlimit, tclass, NULL, &fl6, (struct rt6_info *)dst, MSG_DONTWAIT, - np->dontfrag); + np->dontfrag, &sockc_unused); if (err) { ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTERRORS); diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 9428345d3a07..612f3d138bf0 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -1258,7 +1258,8 @@ static int __ip6_append_data(struct sock *sk, int getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb), void *from, int length, int transhdrlen, - unsigned int flags, int dontfrag) + unsigned int flags, int dontfrag, + const struct sockcm_cookie *sockc) { struct sk_buff *skb, *skb_prev = NULL; unsigned int maxfraglen, fragheaderlen, mtu, orig_mtu; @@ -1329,7 +1330,7 @@ emsgsize: csummode = CHECKSUM_PARTIAL; if (sk->sk_type == SOCK_DGRAM || sk->sk_type == SOCK_RAW) { - sock_tx_timestamp(sk, &tx_flags); + sock_tx_timestamp(sk, sockc->tsflags, &tx_flags); if (tx_flags & SKBTX_ANY_SW_TSTAMP && sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) tskey = sk->sk_tskey++; @@ -1565,7 +1566,8 @@ int ip6_append_data(struct sock *sk, int odd, struct sk_buff *skb), void *from, int length, int transhdrlen, int hlimit, int tclass, struct ipv6_txoptions *opt, struct flowi6 *fl6, - struct rt6_info *rt, unsigned int flags, int dontfrag) + struct rt6_info *rt, unsigned int flags, int dontfrag, + const struct sockcm_cookie *sockc) { struct inet_sock *inet = inet_sk(sk); struct ipv6_pinfo *np = inet6_sk(sk); @@ -1593,7 +1595,8 @@ int ip6_append_data(struct sock *sk, return __ip6_append_data(sk, fl6, &sk->sk_write_queue, &inet->cork.base, &np->cork, sk_page_frag(sk), getfrag, - from, length, transhdrlen, flags, dontfrag); + from, length, transhdrlen, flags, dontfrag, + sockc); } EXPORT_SYMBOL_GPL(ip6_append_data); @@ -1752,7 +1755,7 @@ struct sk_buff *ip6_make_skb(struct sock *sk, int hlimit, int tclass, struct ipv6_txoptions *opt, struct flowi6 *fl6, struct rt6_info *rt, unsigned int flags, - int dontfrag) + int dontfrag, const struct sockcm_cookie *sockc) { struct inet_cork_full cork; struct inet6_cork v6_cork; @@ -1779,7 +1782,7 @@ struct sk_buff *ip6_make_skb(struct sock *sk, err = __ip6_append_data(sk, fl6, &queue, &cork.base, &v6_cork, ¤t->task_frag, getfrag, from, length + exthdrlen, transhdrlen + exthdrlen, - flags, dontfrag); + flags, dontfrag, sockc); if (err) { __ip6_flush_pending_frames(sk, &queue, &cork, &v6_cork); return ERR_PTR(err); diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c index c382db7a2e73..da1cff79e447 100644 --- a/net/ipv6/ping.c +++ b/net/ipv6/ping.c @@ -62,6 +62,7 @@ static int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) struct dst_entry *dst; struct rt6_info *rt; struct pingfakehdr pfh; + struct sockcm_cookie junk = {0}; pr_debug("ping_v6_sendmsg(sk=%p,sk->num=%u)\n", inet, inet->inet_num); @@ -144,7 +145,7 @@ static int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) err = ip6_append_data(sk, ping_getfrag, &pfh, len, 0, hlimit, np->tclass, NULL, &fl6, rt, - MSG_DONTWAIT, np->dontfrag); + MSG_DONTWAIT, np->dontfrag, &junk); if (err) { ICMP6_INC_STATS(sock_net(sk), rt->rt6i_idev, diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index f175ec0a97ce..b07ce21983aa 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c @@ -822,8 +822,7 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) if (fl6.flowi6_oif == 0) fl6.flowi6_oif = sk->sk_bound_dev_if; - sockc.tsflags = 0; - + sockc.tsflags = sk->sk_tsflags; if (msg->msg_controllen) { opt = &opt_space; memset(opt, 0, sizeof(struct ipv6_txoptions)); @@ -901,7 +900,7 @@ back_from_confirm: lock_sock(sk); err = ip6_append_data(sk, raw6_getfrag, &rfv, len, 0, hlimit, tclass, opt, &fl6, (struct rt6_info *)dst, - msg->msg_flags, dontfrag); + msg->msg_flags, dontfrag, &sockc); if (err) ip6_flush_pending_frames(sk); diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 2a787af42163..b772a7641fbd 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -1248,7 +1248,7 @@ do_udp_sendmsg: fl6.flowi6_oif = np->sticky_pktinfo.ipi6_ifindex; fl6.flowi6_mark = sk->sk_mark; - sockc.tsflags = 0; + sockc.tsflags = sk->sk_tsflags; if (msg->msg_controllen) { opt = &opt_space; @@ -1324,7 +1324,7 @@ back_from_confirm: skb = ip6_make_skb(sk, getfrag, msg, ulen, sizeof(struct udphdr), hlimit, tclass, opt, &fl6, (struct rt6_info *)dst, - msg->msg_flags, dontfrag); + msg->msg_flags, dontfrag, &sockc); err = PTR_ERR(skb); if (!IS_ERR_OR_NULL(skb)) err = udp_v6_send_skb(skb, &fl6); @@ -1351,7 +1351,8 @@ do_append_data: err = ip6_append_data(sk, getfrag, msg, ulen, sizeof(struct udphdr), hlimit, tclass, opt, &fl6, (struct rt6_info *)dst, - corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags, dontfrag); + corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags, dontfrag, + &sockc); if (err) udp_v6_flush_pending_frames(sk); else if (!corkreq) diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c index 4f29a4a0f360..1a38f20b1ca6 100644 --- a/net/l2tp/l2tp_ip6.c +++ b/net/l2tp/l2tp_ip6.c @@ -627,7 +627,7 @@ back_from_confirm: err = ip6_append_data(sk, ip_generic_getfrag, msg, ulen, transhdrlen, hlimit, tclass, opt, &fl6, (struct rt6_info *)dst, - msg->msg_flags, dontfrag); + msg->msg_flags, dontfrag, &sockc_unused); if (err) ip6_flush_pending_frames(sk); else if (!(msg->msg_flags & MSG_MORE)) diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 1ecfa710ca98..0007e23202e4 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -1837,6 +1837,7 @@ static int packet_sendmsg_spkt(struct socket *sock, struct msghdr *msg, DECLARE_SOCKADDR(struct sockaddr_pkt *, saddr, msg->msg_name); struct sk_buff *skb = NULL; struct net_device *dev; + struct sockcm_cookie sockc; __be16 proto = 0; int err; int extra_len = 0; @@ -1925,12 +1926,21 @@ retry: goto out_unlock; } + sockc.tsflags = 0; + if (msg->msg_controllen) { + err = sock_cmsg_send(sk, msg, &sockc); + if (unlikely(err)) { + err = -EINVAL; + goto out_unlock; + } + } + skb->protocol = proto; skb->dev = dev; skb->priority = sk->sk_priority; skb->mark = sk->sk_mark; - sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags); + sock_tx_timestamp(sk, sockc.tsflags, &skb_shinfo(skb)->tx_flags); if (unlikely(extra_len == 4)) skb->no_fcs = 1; @@ -2486,7 +2496,8 @@ static int packet_snd_vnet_gso(struct sk_buff *skb, static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb, void *frame, struct net_device *dev, void *data, int tp_len, - __be16 proto, unsigned char *addr, int hlen, int copylen) + __be16 proto, unsigned char *addr, int hlen, int copylen, + const struct sockcm_cookie *sockc) { union tpacket_uhdr ph; int to_write, offset, len, nr_frags, len_max; @@ -2500,7 +2511,7 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb, skb->dev = dev; skb->priority = po->sk.sk_priority; skb->mark = po->sk.sk_mark; - sock_tx_timestamp(&po->sk, &skb_shinfo(skb)->tx_flags); + sock_tx_timestamp(&po->sk, sockc->tsflags, &skb_shinfo(skb)->tx_flags); skb_shinfo(skb)->destructor_arg = ph.raw; skb_reserve(skb, hlen); @@ -2624,6 +2635,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg) struct sk_buff *skb; struct net_device *dev; struct virtio_net_hdr *vnet_hdr = NULL; + struct sockcm_cookie sockc; __be16 proto; int err, reserve = 0; void *ph; @@ -2655,6 +2667,13 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg) dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex); } + sockc.tsflags = 0; + if (msg->msg_controllen) { + err = sock_cmsg_send(&po->sk, msg, &sockc); + if (unlikely(err)) + goto out; + } + err = -ENXIO; if (unlikely(dev == NULL)) goto out; @@ -2712,7 +2731,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg) goto out_status; } tp_len = tpacket_fill_skb(po, skb, ph, dev, data, tp_len, proto, - addr, hlen, copylen); + addr, hlen, copylen, &sockc); if (likely(tp_len >= 0) && tp_len > dev->mtu + reserve && !po->has_vnet_hdr && @@ -2851,6 +2870,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) if (unlikely(!(dev->flags & IFF_UP))) goto out_unlock; + sockc.tsflags = 0; sockc.mark = sk->sk_mark; if (msg->msg_controllen) { err = sock_cmsg_send(sk, msg, &sockc); @@ -2908,7 +2928,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) goto out_free; } - sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags); + sock_tx_timestamp(sk, sockc.tsflags, &skb_shinfo(skb)->tx_flags); if (!vnet_hdr.gso_type && (len > dev->mtu + reserve + extra_len) && !packet_extra_vlan_len_allowed(dev, skb)) { diff --git a/net/socket.c b/net/socket.c index 5f77a8e93830..979d3146b081 100644 --- a/net/socket.c +++ b/net/socket.c @@ -587,20 +587,20 @@ void sock_release(struct socket *sock) } EXPORT_SYMBOL(sock_release); -void __sock_tx_timestamp(const struct sock *sk, __u8 *tx_flags) +void __sock_tx_timestamp(__u16 tsflags, __u8 *tx_flags) { u8 flags = *tx_flags; - if (sk->sk_tsflags & SOF_TIMESTAMPING_TX_HARDWARE) + if (tsflags & SOF_TIMESTAMPING_TX_HARDWARE) flags |= SKBTX_HW_TSTAMP; - if (sk->sk_tsflags & SOF_TIMESTAMPING_TX_SOFTWARE) + if (tsflags & SOF_TIMESTAMPING_TX_SOFTWARE) flags |= SKBTX_SW_TSTAMP; - if (sk->sk_tsflags & SOF_TIMESTAMPING_TX_SCHED) + if (tsflags & SOF_TIMESTAMPING_TX_SCHED) flags |= SKBTX_SCHED_TSTAMP; - if (sk->sk_tsflags & SOF_TIMESTAMPING_TX_ACK) + if (tsflags & SOF_TIMESTAMPING_TX_ACK) flags |= SKBTX_ACK_TSTAMP; *tx_flags = flags; From fd91e12f594b40fdb2dad530e8b895cc5c07db21 Mon Sep 17 00:00:00 2001 From: Soheil Hassas Yeganeh Date: Sat, 2 Apr 2016 23:08:13 -0400 Subject: [PATCH 0168/1649] sock: document timestamping via cmsg in Documentation Update docs and add code snippet for using cmsg for timestamping. Signed-off-by: Soheil Hassas Yeganeh Acked-by: Willem de Bruijn Signed-off-by: David S. Miller --- Documentation/networking/timestamping.txt | 48 +++++++++++++++++++++-- 1 file changed, 45 insertions(+), 3 deletions(-) diff --git a/Documentation/networking/timestamping.txt b/Documentation/networking/timestamping.txt index a977339fbe0a..671cccf0dcd2 100644 --- a/Documentation/networking/timestamping.txt +++ b/Documentation/networking/timestamping.txt @@ -44,11 +44,17 @@ timeval of SO_TIMESTAMP (ms). Supports multiple types of timestamp requests. As a result, this socket option takes a bitmap of flags, not a boolean. In - err = setsockopt(fd, SOL_SOCKET, SO_TIMESTAMPING, (void *) val, &val); + err = setsockopt(fd, SOL_SOCKET, SO_TIMESTAMPING, (void *) val, + sizeof(val)); val is an integer with any of the following bits set. Setting other bit returns EINVAL and does not change the current state. +The socket option configures timestamp generation for individual +sk_buffs (1.3.1), timestamp reporting to the socket's error +queue (1.3.2) and options (1.3.3). Timestamp generation can also +be enabled for individual sendmsg calls using cmsg (1.3.4). + 1.3.1 Timestamp Generation @@ -71,13 +77,16 @@ SOF_TIMESTAMPING_RX_SOFTWARE: kernel receive stack. SOF_TIMESTAMPING_TX_HARDWARE: - Request tx timestamps generated by the network adapter. + Request tx timestamps generated by the network adapter. This flag + can be enabled via both socket options and control messages. SOF_TIMESTAMPING_TX_SOFTWARE: Request tx timestamps when data leaves the kernel. These timestamps are generated in the device driver as close as possible, but always prior to, passing the packet to the network interface. Hence, they require driver support and may not be available for all devices. + This flag can be enabled via both socket options and control messages. + SOF_TIMESTAMPING_TX_SCHED: Request tx timestamps prior to entering the packet scheduler. Kernel @@ -90,7 +99,8 @@ SOF_TIMESTAMPING_TX_SCHED: machines with virtual devices where a transmitted packet travels through multiple devices and, hence, multiple packet schedulers, a timestamp is generated at each layer. This allows for fine - grained measurement of queuing delay. + grained measurement of queuing delay. This flag can be enabled + via both socket options and control messages. SOF_TIMESTAMPING_TX_ACK: Request tx timestamps when all data in the send buffer has been @@ -99,6 +109,7 @@ SOF_TIMESTAMPING_TX_ACK: over-report measurement, because the timestamp is generated when all data up to and including the buffer at send() was acknowledged: the cumulative acknowledgment. The mechanism ignores SACK and FACK. + This flag can be enabled via both socket options and control messages. 1.3.2 Timestamp Reporting @@ -183,6 +194,37 @@ having access to the contents of the original packet, so cannot be combined with SOF_TIMESTAMPING_OPT_TSONLY. +1.3.4. Enabling timestamps via control messages + +In addition to socket options, timestamp generation can be requested +per write via cmsg, only for SOF_TIMESTAMPING_TX_* (see Section 1.3.1). +Using this feature, applications can sample timestamps per sendmsg() +without paying the overhead of enabling and disabling timestamps via +setsockopt: + + struct msghdr *msg; + ... + cmsg = CMSG_FIRSTHDR(msg); + cmsg->cmsg_level = SOL_SOCKET; + cmsg->cmsg_type = SO_TIMESTAMPING; + cmsg->cmsg_len = CMSG_LEN(sizeof(__u32)); + *((__u32 *) CMSG_DATA(cmsg)) = SOF_TIMESTAMPING_TX_SCHED | + SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_TX_ACK; + err = sendmsg(fd, msg, 0); + +The SOF_TIMESTAMPING_TX_* flags set via cmsg will override +the SOF_TIMESTAMPING_TX_* flags set via setsockopt. + +Moreover, applications must still enable timestamp reporting via +setsockopt to receive timestamps: + + __u32 val = SOF_TIMESTAMPING_SOFTWARE | + SOF_TIMESTAMPING_OPT_ID /* or any other flag */; + err = setsockopt(fd, SOL_SOCKET, SO_TIMESTAMPING, (void *) val, + sizeof(val)); + + 1.4 Bytestream Timestamps The SO_TIMESTAMPING interface supports timestamping of bytes in a From d5dd7c3fa4dbff70fc25acf54acb63cf971fd6e9 Mon Sep 17 00:00:00 2001 From: Emil Tantilov Date: Thu, 17 Dec 2015 17:32:55 -0800 Subject: [PATCH 0169/1649] ixgbevf: use bit operations for setting and checking resets Move the reset flags to adapter->state in order to make use of bit operations. This is an alternative patch to the one previously submitted by John Greene. Suggested-by: Alexander Duyck Reported-by: Scott Otto Reported-by: John Greene Signed-off-by: Emil Tantilov Tested-by: Phil Schmitt Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbevf/ixgbevf.h | 9 ++------- drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | 15 ++++++--------- 2 files changed, 8 insertions(+), 16 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h index 991eeae81473..5ac60eefb0cd 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h @@ -403,13 +403,6 @@ struct ixgbevf_adapter { u32 alloc_rx_page_failed; u32 alloc_rx_buff_failed; - /* Some features need tri-state capability, - * thus the additional *_CAPABLE flags. - */ - u32 flags; -#define IXGBEVF_FLAG_RESET_REQUESTED (u32)(1) -#define IXGBEVF_FLAG_QUEUE_RESET_REQUESTED (u32)(1 << 2) - struct msix_entry *msix_entries; /* OS defined structs */ @@ -461,6 +454,8 @@ enum ixbgevf_state_t { __IXGBEVF_REMOVING, __IXGBEVF_SERVICE_SCHED, __IXGBEVF_SERVICE_INITED, + __IXGBEVF_RESET_REQUESTED, + __IXGBEVF_QUEUE_RESET_REQUESTED, }; enum ixgbevf_boards { diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index b0edae94d73d..9a2eed0f5245 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -268,7 +268,7 @@ static void ixgbevf_tx_timeout_reset(struct ixgbevf_adapter *adapter) { /* Do the reset outside of interrupt context */ if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) { - adapter->flags |= IXGBEVF_FLAG_RESET_REQUESTED; + set_bit(__IXGBEVF_RESET_REQUESTED, &adapter->state); ixgbevf_service_event_schedule(adapter); } } @@ -1984,7 +1984,7 @@ static int ixgbevf_configure_dcb(struct ixgbevf_adapter *adapter) hw->mbx.timeout = 0; /* wait for watchdog to come around and bail us out */ - adapter->flags |= IXGBEVF_FLAG_QUEUE_RESET_REQUESTED; + set_bit(__IXGBEVF_QUEUE_RESET_REQUESTED, &adapter->state); } return 0; @@ -2749,11 +2749,9 @@ static void ixgbevf_service_timer(unsigned long data) static void ixgbevf_reset_subtask(struct ixgbevf_adapter *adapter) { - if (!(adapter->flags & IXGBEVF_FLAG_RESET_REQUESTED)) + if (!test_and_clear_bit(__IXGBEVF_RESET_REQUESTED, &adapter->state)) return; - adapter->flags &= ~IXGBEVF_FLAG_RESET_REQUESTED; - /* If we're already down or resetting, just bail */ if (test_bit(__IXGBEVF_DOWN, &adapter->state) || test_bit(__IXGBEVF_RESETTING, &adapter->state)) @@ -2821,7 +2819,7 @@ static void ixgbevf_watchdog_update_link(struct ixgbevf_adapter *adapter) /* if check for link returns error we will need to reset */ if (err && time_after(jiffies, adapter->last_reset + (10 * HZ))) { - adapter->flags |= IXGBEVF_FLAG_RESET_REQUESTED; + set_bit(__IXGBEVF_RESET_REQUESTED, &adapter->state); link_up = false; } @@ -3222,11 +3220,10 @@ static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter) { struct net_device *dev = adapter->netdev; - if (!(adapter->flags & IXGBEVF_FLAG_QUEUE_RESET_REQUESTED)) + if (!test_and_clear_bit(__IXGBEVF_QUEUE_RESET_REQUESTED, + &adapter->state)) return; - adapter->flags &= ~IXGBEVF_FLAG_QUEUE_RESET_REQUESTED; - /* if interface is down do nothing */ if (test_bit(__IXGBEVF_DOWN, &adapter->state) || test_bit(__IXGBEVF_RESETTING, &adapter->state)) From 1d96cf9822bf801b1a93a0817e45dd02af5ac0e6 Mon Sep 17 00:00:00 2001 From: chas williams <3chas3@gmail.com> Date: Tue, 5 Jan 2016 17:30:39 -0500 Subject: [PATCH 0170/1649] ixgbe: Extend trust to allow guest to set unicast address When running certain routing protocols like VRRP, VF guests need the ability to set the unicast address of the interface. Extend the new ndo trust feature to let the hypervisor trust a guest to set/update its own unicast address. Signed-off-by: Chas Williams <3chas3@gmail.com> Tested-by: Phil Schmitt Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index 8025a3f93598..80e47dbc530b 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -887,7 +887,7 @@ static int ixgbe_set_vf_mac_addr(struct ixgbe_adapter *adapter, return -1; } - if (adapter->vfinfo[vf].pf_set_mac && + if (adapter->vfinfo[vf].pf_set_mac && !adapter->vfinfo[vf].trusted && !ether_addr_equal(adapter->vfinfo[vf].vf_mac_addresses, new_mac)) { e_warn(drv, "VF %d attempted to override administratively set MAC address\n" From 18be4fce00fef206dc6f104a6a258b193e9871cf Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Wed, 6 Jan 2016 22:48:44 -0800 Subject: [PATCH 0171/1649] ixgbe: Do not allow PF to add VLVF entry unless it actually needs it While doing the work on igb I realized there were a few cases where we were still adding VLANs to the VLVF entries for the PF when they were not needed. This patch cleans that up so that the only time we add a PF entry to the VLVF is either for VLAN 0 or if the PF has requested a VLAN that a VF is already using. Signed-off-by: Alexander Duyck Tested-by: Phil Schmitt Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 8 ++--- .../net/ethernet/intel/ixgbe/ixgbe_sriov.c | 31 ++++++++++--------- 2 files changed, 21 insertions(+), 18 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 7df3fe29b210..a01a7f251e03 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -3908,7 +3908,9 @@ static int ixgbe_vlan_rx_add_vid(struct net_device *netdev, struct ixgbe_hw *hw = &adapter->hw; /* add VID to filter table */ - hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), true, true); + if (!vid || !(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)) + hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), true, !!vid); + set_bit(vid, adapter->active_vlans); return 0; @@ -3965,9 +3967,7 @@ static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev, struct ixgbe_hw *hw = &adapter->hw; /* remove VID from filter table */ - if (adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC) - ixgbe_update_pf_promisc_vlvf(adapter, vid); - else + if (vid && !(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)) hw->mac.ops.set_vfta(hw, vid, VMDQ_P(0), false, true); clear_bit(vid, adapter->active_vlans); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index 80e47dbc530b..4bc249632ec2 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -589,40 +589,40 @@ static void ixgbe_clear_vmvir(struct ixgbe_adapter *adapter, u32 vf) static void ixgbe_clear_vf_vlans(struct ixgbe_adapter *adapter, u32 vf) { struct ixgbe_hw *hw = &adapter->hw; - u32 i; + u32 vlvfb_mask, pool_mask, i; + + /* create mask for VF and other pools */ + pool_mask = ~(1 << (VMDQ_P(0) % 32)); + vlvfb_mask = 1 << (vf % 32); /* post increment loop, covers VLVF_ENTRIES - 1 to 0 */ for (i = IXGBE_VLVF_ENTRIES; i--;) { u32 bits[2], vlvfb, vid, vfta, vlvf; u32 word = i * 2 + vf / 32; - u32 mask = 1 << (vf % 32); + u32 mask; vlvfb = IXGBE_READ_REG(hw, IXGBE_VLVFB(word)); /* if our bit isn't set we can skip it */ - if (!(vlvfb & mask)) + if (!(vlvfb & vlvfb_mask)) continue; /* clear our bit from vlvfb */ - vlvfb ^= mask; + vlvfb ^= vlvfb_mask; /* create 64b mask to chedk to see if we should clear VLVF */ bits[word % 2] = vlvfb; bits[~word % 2] = IXGBE_READ_REG(hw, IXGBE_VLVFB(word ^ 1)); - /* if promisc is enabled, PF will be present, leave VFTA */ - if (adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC) { - bits[VMDQ_P(0) / 32] &= ~(1 << (VMDQ_P(0) % 32)); - - if (bits[0] || bits[1]) - goto update_vlvfb; - goto update_vlvf; - } - /* if other pools are present, just remove ourselves */ - if (bits[0] || bits[1]) + if (bits[(VMDQ_P(0) / 32) ^ 1] || + (bits[VMDQ_P(0) / 32] & pool_mask)) goto update_vlvfb; + /* if PF is present, leave VFTA */ + if (bits[0] || bits[1]) + goto update_vlvf; + /* if we cannot determine VLAN just remove ourselves */ vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(i)); if (!vlvf) @@ -638,6 +638,9 @@ static void ixgbe_clear_vf_vlans(struct ixgbe_adapter *adapter, u32 vf) update_vlvf: /* clear POOL selection enable */ IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), 0); + + if (!(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)) + vlvfb = 0; update_vlvfb: /* clear pool bits */ IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), vlvfb); From f51bdc236b6c5835fa5e0df772acc234288b8af2 Mon Sep 17 00:00:00 2001 From: Kazuya Mizuguchi Date: Sun, 3 Apr 2016 23:54:38 +0900 Subject: [PATCH 0172/1649] ravb: Add dma queue interrupt support This patch supports the following interrupts. - One interrupt for multiple (timestamp, error, gPTP) - One interrupt for emac - Four interrupts for dma queue (best effort rx/tx, network control rx/tx) This patch improve efficiency of the interrupt handler by adding the interrupt handler corresponding to each interrupt source described above. Additionally, it reduces the number of times of the access to EthernetAVB IF. Also this patch prevent this driver depends on the whim of a boot loader. [ykaneko0929@gmail.com: define bit names of registers] [ykaneko0929@gmail.com: add comment for gen3 only registers] [ykaneko0929@gmail.com: fix coding style] [ykaneko0929@gmail.com: update changelog] [ykaneko0929@gmail.com: gen3: fix initialization of interrupts] [ykaneko0929@gmail.com: gen3: fix clearing interrupts] [ykaneko0929@gmail.com: gen3: add helper function for request_irq()] [ykaneko0929@gmail.com: gen3: remove IRQF_SHARED flag for request_irq()] [ykaneko0929@gmail.com: revert ravb_close() and ravb_ptp_stop()] [ykaneko0929@gmail.com: avoid calling free_irq() to non-hooked interrupts] [ykaneko0929@gmail.com: make NC/BE interrupt handler a function] [ykaneko0929@gmail.com: make timestamp interrupt handler a function] [ykaneko0929@gmail.com: timestamp interrupt is handled in multiple interrupt handler instead of dma queue interrupt handler] Signed-off-by: Kazuya Mizuguchi Signed-off-by: Yoshihiro Kaneko Acked-by: Sergei Shtylyov Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/ravb.h | 204 +++++++++++++++++ drivers/net/ethernet/renesas/ravb_main.c | 266 +++++++++++++++++++---- drivers/net/ethernet/renesas/ravb_ptp.c | 17 +- 3 files changed, 439 insertions(+), 48 deletions(-) diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h index b2160d1b9c71..5c1624147778 100644 --- a/drivers/net/ethernet/renesas/ravb.h +++ b/drivers/net/ethernet/renesas/ravb.h @@ -157,6 +157,7 @@ enum ravb_reg { TIC = 0x0378, TIS = 0x037C, ISS = 0x0380, + CIE = 0x0384, /* R-Car Gen3 only */ GCCR = 0x0390, GMTT = 0x0394, GPTC = 0x0398, @@ -170,6 +171,15 @@ enum ravb_reg { GCT0 = 0x03B8, GCT1 = 0x03BC, GCT2 = 0x03C0, + GIE = 0x03CC, /* R-Car Gen3 only */ + GID = 0x03D0, /* R-Car Gen3 only */ + DIL = 0x0440, /* R-Car Gen3 only */ + RIE0 = 0x0460, /* R-Car Gen3 only */ + RID0 = 0x0464, /* R-Car Gen3 only */ + RIE2 = 0x0470, /* R-Car Gen3 only */ + RID2 = 0x0474, /* R-Car Gen3 only */ + TIE = 0x0478, /* R-Car Gen3 only */ + TID = 0x047c, /* R-Car Gen3 only */ /* E-MAC registers */ ECMR = 0x0500, @@ -556,6 +566,16 @@ enum ISS_BIT { ISS_DPS15 = 0x80000000, }; +/* CIE (R-Car Gen3 only) */ +enum CIE_BIT { + CIE_CRIE = 0x00000001, + CIE_CTIE = 0x00000100, + CIE_RQFM = 0x00010000, + CIE_CL0M = 0x00020000, + CIE_RFWL = 0x00040000, + CIE_RFFL = 0x00080000, +}; + /* GCCR */ enum GCCR_BIT { GCCR_TCR = 0x00000003, @@ -592,6 +612,188 @@ enum GIS_BIT { GIS_PTMF = 0x00000004, }; +/* GIE (R-Car Gen3 only) */ +enum GIE_BIT { + GIE_PTCS = 0x00000001, + GIE_PTOS = 0x00000002, + GIE_PTMS0 = 0x00000004, + GIE_PTMS1 = 0x00000008, + GIE_PTMS2 = 0x00000010, + GIE_PTMS3 = 0x00000020, + GIE_PTMS4 = 0x00000040, + GIE_PTMS5 = 0x00000080, + GIE_PTMS6 = 0x00000100, + GIE_PTMS7 = 0x00000200, + GIE_ATCS0 = 0x00010000, + GIE_ATCS1 = 0x00020000, + GIE_ATCS2 = 0x00040000, + GIE_ATCS3 = 0x00080000, + GIE_ATCS4 = 0x00100000, + GIE_ATCS5 = 0x00200000, + GIE_ATCS6 = 0x00400000, + GIE_ATCS7 = 0x00800000, + GIE_ATCS8 = 0x01000000, + GIE_ATCS9 = 0x02000000, + GIE_ATCS10 = 0x04000000, + GIE_ATCS11 = 0x08000000, + GIE_ATCS12 = 0x10000000, + GIE_ATCS13 = 0x20000000, + GIE_ATCS14 = 0x40000000, + GIE_ATCS15 = 0x80000000, +}; + +/* GID (R-Car Gen3 only) */ +enum GID_BIT { + GID_PTCD = 0x00000001, + GID_PTOD = 0x00000002, + GID_PTMD0 = 0x00000004, + GID_PTMD1 = 0x00000008, + GID_PTMD2 = 0x00000010, + GID_PTMD3 = 0x00000020, + GID_PTMD4 = 0x00000040, + GID_PTMD5 = 0x00000080, + GID_PTMD6 = 0x00000100, + GID_PTMD7 = 0x00000200, + GID_ATCD0 = 0x00010000, + GID_ATCD1 = 0x00020000, + GID_ATCD2 = 0x00040000, + GID_ATCD3 = 0x00080000, + GID_ATCD4 = 0x00100000, + GID_ATCD5 = 0x00200000, + GID_ATCD6 = 0x00400000, + GID_ATCD7 = 0x00800000, + GID_ATCD8 = 0x01000000, + GID_ATCD9 = 0x02000000, + GID_ATCD10 = 0x04000000, + GID_ATCD11 = 0x08000000, + GID_ATCD12 = 0x10000000, + GID_ATCD13 = 0x20000000, + GID_ATCD14 = 0x40000000, + GID_ATCD15 = 0x80000000, +}; + +/* RIE0 (R-Car Gen3 only) */ +enum RIE0_BIT { + RIE0_FRS0 = 0x00000001, + RIE0_FRS1 = 0x00000002, + RIE0_FRS2 = 0x00000004, + RIE0_FRS3 = 0x00000008, + RIE0_FRS4 = 0x00000010, + RIE0_FRS5 = 0x00000020, + RIE0_FRS6 = 0x00000040, + RIE0_FRS7 = 0x00000080, + RIE0_FRS8 = 0x00000100, + RIE0_FRS9 = 0x00000200, + RIE0_FRS10 = 0x00000400, + RIE0_FRS11 = 0x00000800, + RIE0_FRS12 = 0x00001000, + RIE0_FRS13 = 0x00002000, + RIE0_FRS14 = 0x00004000, + RIE0_FRS15 = 0x00008000, + RIE0_FRS16 = 0x00010000, + RIE0_FRS17 = 0x00020000, +}; + +/* RID0 (R-Car Gen3 only) */ +enum RID0_BIT { + RID0_FRD0 = 0x00000001, + RID0_FRD1 = 0x00000002, + RID0_FRD2 = 0x00000004, + RID0_FRD3 = 0x00000008, + RID0_FRD4 = 0x00000010, + RID0_FRD5 = 0x00000020, + RID0_FRD6 = 0x00000040, + RID0_FRD7 = 0x00000080, + RID0_FRD8 = 0x00000100, + RID0_FRD9 = 0x00000200, + RID0_FRD10 = 0x00000400, + RID0_FRD11 = 0x00000800, + RID0_FRD12 = 0x00001000, + RID0_FRD13 = 0x00002000, + RID0_FRD14 = 0x00004000, + RID0_FRD15 = 0x00008000, + RID0_FRD16 = 0x00010000, + RID0_FRD17 = 0x00020000, +}; + +/* RIE2 (R-Car Gen3 only) */ +enum RIE2_BIT { + RIE2_QFS0 = 0x00000001, + RIE2_QFS1 = 0x00000002, + RIE2_QFS2 = 0x00000004, + RIE2_QFS3 = 0x00000008, + RIE2_QFS4 = 0x00000010, + RIE2_QFS5 = 0x00000020, + RIE2_QFS6 = 0x00000040, + RIE2_QFS7 = 0x00000080, + RIE2_QFS8 = 0x00000100, + RIE2_QFS9 = 0x00000200, + RIE2_QFS10 = 0x00000400, + RIE2_QFS11 = 0x00000800, + RIE2_QFS12 = 0x00001000, + RIE2_QFS13 = 0x00002000, + RIE2_QFS14 = 0x00004000, + RIE2_QFS15 = 0x00008000, + RIE2_QFS16 = 0x00010000, + RIE2_QFS17 = 0x00020000, + RIE2_RFFS = 0x80000000, +}; + +/* RID2 (R-Car Gen3 only) */ +enum RID2_BIT { + RID2_QFD0 = 0x00000001, + RID2_QFD1 = 0x00000002, + RID2_QFD2 = 0x00000004, + RID2_QFD3 = 0x00000008, + RID2_QFD4 = 0x00000010, + RID2_QFD5 = 0x00000020, + RID2_QFD6 = 0x00000040, + RID2_QFD7 = 0x00000080, + RID2_QFD8 = 0x00000100, + RID2_QFD9 = 0x00000200, + RID2_QFD10 = 0x00000400, + RID2_QFD11 = 0x00000800, + RID2_QFD12 = 0x00001000, + RID2_QFD13 = 0x00002000, + RID2_QFD14 = 0x00004000, + RID2_QFD15 = 0x00008000, + RID2_QFD16 = 0x00010000, + RID2_QFD17 = 0x00020000, + RID2_RFFD = 0x80000000, +}; + +/* TIE (R-Car Gen3 only) */ +enum TIE_BIT { + TIE_FTS0 = 0x00000001, + TIE_FTS1 = 0x00000002, + TIE_FTS2 = 0x00000004, + TIE_FTS3 = 0x00000008, + TIE_TFUS = 0x00000100, + TIE_TFWS = 0x00000200, + TIE_MFUS = 0x00000400, + TIE_MFWS = 0x00000800, + TIE_TDPS0 = 0x00010000, + TIE_TDPS1 = 0x00020000, + TIE_TDPS2 = 0x00040000, + TIE_TDPS3 = 0x00080000, +}; + +/* TID (R-Car Gen3 only) */ +enum TID_BIT { + TID_FTD0 = 0x00000001, + TID_FTD1 = 0x00000002, + TID_FTD2 = 0x00000004, + TID_FTD3 = 0x00000008, + TID_TFUD = 0x00000100, + TID_TFWD = 0x00000200, + TID_MFUD = 0x00000400, + TID_MFWD = 0x00000800, + TID_TDPD0 = 0x00010000, + TID_TDPD1 = 0x00020000, + TID_TDPD2 = 0x00040000, + TID_TDPD3 = 0x00080000, +}; + /* ECMR */ enum ECMR_BIT { ECMR_PRM = 0x00000001, @@ -817,6 +1019,8 @@ struct ravb_private { int duplex; int emac_irq; enum ravb_chip_id chip_id; + int rx_irqs[NUM_RX_QUEUE]; + int tx_irqs[NUM_TX_QUEUE]; unsigned no_avb_link:1; unsigned avb_link_active_low:1; diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index 087e14a3fba7..4b71951e185d 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -42,6 +42,16 @@ NETIF_MSG_RX_ERR | \ NETIF_MSG_TX_ERR) +static const char *ravb_rx_irqs[NUM_RX_QUEUE] = { + "ch0", /* RAVB_BE */ + "ch1", /* RAVB_NC */ +}; + +static const char *ravb_tx_irqs[NUM_TX_QUEUE] = { + "ch18", /* RAVB_BE */ + "ch19", /* RAVB_NC */ +}; + void ravb_modify(struct net_device *ndev, enum ravb_reg reg, u32 clear, u32 set) { @@ -365,6 +375,7 @@ static void ravb_emac_init(struct net_device *ndev) /* Device init function for Ethernet AVB */ static int ravb_dmac_init(struct net_device *ndev) { + struct ravb_private *priv = netdev_priv(ndev); int error; /* Set CONFIG mode */ @@ -401,6 +412,12 @@ static int ravb_dmac_init(struct net_device *ndev) ravb_write(ndev, TCCR_TFEN, TCCR); /* Interrupt init: */ + if (priv->chip_id == RCAR_GEN3) { + /* Clear DIL.DPLx */ + ravb_write(ndev, 0, DIL); + /* Set queue specific interrupt */ + ravb_write(ndev, CIE_CRIE | CIE_CTIE | CIE_CL0M, CIE); + } /* Frame receive */ ravb_write(ndev, RIC0_FRE0 | RIC0_FRE1, RIC0); /* Disable FIFO full warning */ @@ -643,7 +660,7 @@ static int ravb_stop_dma(struct net_device *ndev) } /* E-MAC interrupt handler */ -static void ravb_emac_interrupt(struct net_device *ndev) +static void ravb_emac_interrupt_unlocked(struct net_device *ndev) { struct ravb_private *priv = netdev_priv(ndev); u32 ecsr, psr; @@ -669,6 +686,18 @@ static void ravb_emac_interrupt(struct net_device *ndev) } } +static irqreturn_t ravb_emac_interrupt(int irq, void *dev_id) +{ + struct net_device *ndev = dev_id; + struct ravb_private *priv = netdev_priv(ndev); + + spin_lock(&priv->lock); + ravb_emac_interrupt_unlocked(ndev); + mmiowb(); + spin_unlock(&priv->lock); + return IRQ_HANDLED; +} + /* Error interrupt handler */ static void ravb_error_interrupt(struct net_device *ndev) { @@ -695,6 +724,50 @@ static void ravb_error_interrupt(struct net_device *ndev) } } +static bool ravb_queue_interrupt(struct net_device *ndev, int q) +{ + struct ravb_private *priv = netdev_priv(ndev); + u32 ris0 = ravb_read(ndev, RIS0); + u32 ric0 = ravb_read(ndev, RIC0); + u32 tis = ravb_read(ndev, TIS); + u32 tic = ravb_read(ndev, TIC); + + if (((ris0 & ric0) & BIT(q)) || ((tis & tic) & BIT(q))) { + if (napi_schedule_prep(&priv->napi[q])) { + /* Mask RX and TX interrupts */ + if (priv->chip_id == RCAR_GEN2) { + ravb_write(ndev, ric0 & ~BIT(q), RIC0); + ravb_write(ndev, tic & ~BIT(q), TIC); + } else { + ravb_write(ndev, BIT(q), RID0); + ravb_write(ndev, BIT(q), TID); + } + __napi_schedule(&priv->napi[q]); + } else { + netdev_warn(ndev, + "ignoring interrupt, rx status 0x%08x, rx mask 0x%08x,\n", + ris0, ric0); + netdev_warn(ndev, + " tx status 0x%08x, tx mask 0x%08x.\n", + tis, tic); + } + return true; + } + return false; +} + +static bool ravb_timestamp_interrupt(struct net_device *ndev) +{ + u32 tis = ravb_read(ndev, TIS); + + if (tis & TIS_TFUF) { + ravb_write(ndev, ~TIS_TFUF, TIS); + ravb_get_tx_tstamp(ndev); + return true; + } + return false; +} + static irqreturn_t ravb_interrupt(int irq, void *dev_id) { struct net_device *ndev = dev_id; @@ -708,46 +781,22 @@ static irqreturn_t ravb_interrupt(int irq, void *dev_id) /* Received and transmitted interrupts */ if (iss & (ISS_FRS | ISS_FTS | ISS_TFUS)) { - u32 ris0 = ravb_read(ndev, RIS0); - u32 ric0 = ravb_read(ndev, RIC0); - u32 tis = ravb_read(ndev, TIS); - u32 tic = ravb_read(ndev, TIC); int q; /* Timestamp updated */ - if (tis & TIS_TFUF) { - ravb_write(ndev, ~TIS_TFUF, TIS); - ravb_get_tx_tstamp(ndev); + if (ravb_timestamp_interrupt(ndev)) result = IRQ_HANDLED; - } /* Network control and best effort queue RX/TX */ for (q = RAVB_NC; q >= RAVB_BE; q--) { - if (((ris0 & ric0) & BIT(q)) || - ((tis & tic) & BIT(q))) { - if (napi_schedule_prep(&priv->napi[q])) { - /* Mask RX and TX interrupts */ - ric0 &= ~BIT(q); - tic &= ~BIT(q); - ravb_write(ndev, ric0, RIC0); - ravb_write(ndev, tic, TIC); - __napi_schedule(&priv->napi[q]); - } else { - netdev_warn(ndev, - "ignoring interrupt, rx status 0x%08x, rx mask 0x%08x,\n", - ris0, ric0); - netdev_warn(ndev, - " tx status 0x%08x, tx mask 0x%08x.\n", - tis, tic); - } + if (ravb_queue_interrupt(ndev, q)) result = IRQ_HANDLED; - } } } /* E-MAC status summary */ if (iss & ISS_MS) { - ravb_emac_interrupt(ndev); + ravb_emac_interrupt_unlocked(ndev); result = IRQ_HANDLED; } @@ -757,6 +806,7 @@ static irqreturn_t ravb_interrupt(int irq, void *dev_id) result = IRQ_HANDLED; } + /* gPTP interrupt status summary */ if ((iss & ISS_CGIS) && ravb_ptp_interrupt(ndev) == IRQ_HANDLED) result = IRQ_HANDLED; @@ -765,6 +815,64 @@ static irqreturn_t ravb_interrupt(int irq, void *dev_id) return result; } +/* Timestamp/Error/gPTP interrupt handler */ +static irqreturn_t ravb_multi_interrupt(int irq, void *dev_id) +{ + struct net_device *ndev = dev_id; + struct ravb_private *priv = netdev_priv(ndev); + irqreturn_t result = IRQ_NONE; + u32 iss; + + spin_lock(&priv->lock); + /* Get interrupt status */ + iss = ravb_read(ndev, ISS); + + /* Timestamp updated */ + if ((iss & ISS_TFUS) && ravb_timestamp_interrupt(ndev)) + result = IRQ_HANDLED; + + /* Error status summary */ + if (iss & ISS_ES) { + ravb_error_interrupt(ndev); + result = IRQ_HANDLED; + } + + /* gPTP interrupt status summary */ + if ((iss & ISS_CGIS) && ravb_ptp_interrupt(ndev) == IRQ_HANDLED) + result = IRQ_HANDLED; + + mmiowb(); + spin_unlock(&priv->lock); + return result; +} + +static irqreturn_t ravb_dma_interrupt(int irq, void *dev_id, int q) +{ + struct net_device *ndev = dev_id; + struct ravb_private *priv = netdev_priv(ndev); + irqreturn_t result = IRQ_NONE; + + spin_lock(&priv->lock); + + /* Network control/Best effort queue RX/TX */ + if (ravb_queue_interrupt(ndev, q)) + result = IRQ_HANDLED; + + mmiowb(); + spin_unlock(&priv->lock); + return result; +} + +static irqreturn_t ravb_be_interrupt(int irq, void *dev_id) +{ + return ravb_dma_interrupt(irq, dev_id, RAVB_BE); +} + +static irqreturn_t ravb_nc_interrupt(int irq, void *dev_id) +{ + return ravb_dma_interrupt(irq, dev_id, RAVB_NC); +} + static int ravb_poll(struct napi_struct *napi, int budget) { struct net_device *ndev = napi->dev; @@ -804,8 +912,13 @@ static int ravb_poll(struct napi_struct *napi, int budget) /* Re-enable RX/TX interrupts */ spin_lock_irqsave(&priv->lock, flags); - ravb_modify(ndev, RIC0, mask, mask); - ravb_modify(ndev, TIC, mask, mask); + if (priv->chip_id == RCAR_GEN2) { + ravb_modify(ndev, RIC0, mask, mask); + ravb_modify(ndev, TIC, mask, mask); + } else { + ravb_write(ndev, mask, RIE0); + ravb_write(ndev, mask, TIE); + } mmiowb(); spin_unlock_irqrestore(&priv->lock, flags); @@ -1208,35 +1321,72 @@ static const struct ethtool_ops ravb_ethtool_ops = { .get_ts_info = ravb_get_ts_info, }; +static inline int ravb_hook_irq(unsigned int irq, irq_handler_t handler, + struct net_device *ndev, struct device *dev, + const char *ch) +{ + char *name; + int error; + + name = devm_kasprintf(dev, GFP_KERNEL, "%s:%s", ndev->name, ch); + if (!name) + return -ENOMEM; + error = request_irq(irq, handler, 0, name, ndev); + if (error) + netdev_err(ndev, "cannot request IRQ %s\n", name); + + return error; +} + /* Network device open function for Ethernet AVB */ static int ravb_open(struct net_device *ndev) { struct ravb_private *priv = netdev_priv(ndev); + struct platform_device *pdev = priv->pdev; + struct device *dev = &pdev->dev; int error; napi_enable(&priv->napi[RAVB_BE]); napi_enable(&priv->napi[RAVB_NC]); - error = request_irq(ndev->irq, ravb_interrupt, IRQF_SHARED, ndev->name, - ndev); - if (error) { - netdev_err(ndev, "cannot request IRQ\n"); - goto out_napi_off; - } - - if (priv->chip_id == RCAR_GEN3) { - error = request_irq(priv->emac_irq, ravb_interrupt, - IRQF_SHARED, ndev->name, ndev); + if (priv->chip_id == RCAR_GEN2) { + error = request_irq(ndev->irq, ravb_interrupt, IRQF_SHARED, + ndev->name, ndev); if (error) { netdev_err(ndev, "cannot request IRQ\n"); - goto out_free_irq; + goto out_napi_off; } + } else { + error = ravb_hook_irq(ndev->irq, ravb_multi_interrupt, ndev, + dev, "ch22:multi"); + if (error) + goto out_napi_off; + error = ravb_hook_irq(priv->emac_irq, ravb_emac_interrupt, ndev, + dev, "ch24:emac"); + if (error) + goto out_free_irq; + error = ravb_hook_irq(priv->rx_irqs[RAVB_BE], ravb_be_interrupt, + ndev, dev, "ch0:rx_be"); + if (error) + goto out_free_irq_emac; + error = ravb_hook_irq(priv->tx_irqs[RAVB_BE], ravb_be_interrupt, + ndev, dev, "ch18:tx_be"); + if (error) + goto out_free_irq_be_rx; + error = ravb_hook_irq(priv->rx_irqs[RAVB_NC], ravb_nc_interrupt, + ndev, dev, "ch1:rx_nc"); + if (error) + goto out_free_irq_be_tx; + error = ravb_hook_irq(priv->tx_irqs[RAVB_NC], ravb_nc_interrupt, + ndev, dev, "ch19:tx_nc"); + if (error) + goto out_free_irq_nc_rx; } /* Device init */ error = ravb_dmac_init(ndev); if (error) - goto out_free_irq2; + goto out_free_irq_nc_tx; ravb_emac_init(ndev); /* Initialise PTP Clock driver */ @@ -1256,9 +1406,18 @@ out_ptp_stop: /* Stop PTP Clock driver */ if (priv->chip_id == RCAR_GEN2) ravb_ptp_stop(ndev); -out_free_irq2: - if (priv->chip_id == RCAR_GEN3) - free_irq(priv->emac_irq, ndev); +out_free_irq_nc_tx: + if (priv->chip_id == RCAR_GEN2) + goto out_free_irq; + free_irq(priv->tx_irqs[RAVB_NC], ndev); +out_free_irq_nc_rx: + free_irq(priv->rx_irqs[RAVB_NC], ndev); +out_free_irq_be_tx: + free_irq(priv->tx_irqs[RAVB_BE], ndev); +out_free_irq_be_rx: + free_irq(priv->rx_irqs[RAVB_BE], ndev); +out_free_irq_emac: + free_irq(priv->emac_irq, ndev); out_free_irq: free_irq(ndev->irq, ndev); out_napi_off: @@ -1713,6 +1872,7 @@ static int ravb_probe(struct platform_device *pdev) struct net_device *ndev; int error, irq, q; struct resource *res; + int i; if (!np) { dev_err(&pdev->dev, @@ -1782,6 +1942,22 @@ static int ravb_probe(struct platform_device *pdev) goto out_release; } priv->emac_irq = irq; + for (i = 0; i < NUM_RX_QUEUE; i++) { + irq = platform_get_irq_byname(pdev, ravb_rx_irqs[i]); + if (irq < 0) { + error = irq; + goto out_release; + } + priv->rx_irqs[i] = irq; + } + for (i = 0; i < NUM_TX_QUEUE; i++) { + irq = platform_get_irq_byname(pdev, ravb_tx_irqs[i]); + if (irq < 0) { + error = irq; + goto out_release; + } + priv->tx_irqs[i] = irq; + } } priv->chip_id = chip_id; diff --git a/drivers/net/ethernet/renesas/ravb_ptp.c b/drivers/net/ethernet/renesas/ravb_ptp.c index 57992ccc4657..f1b2cbb336e8 100644 --- a/drivers/net/ethernet/renesas/ravb_ptp.c +++ b/drivers/net/ethernet/renesas/ravb_ptp.c @@ -194,7 +194,12 @@ static int ravb_ptp_extts(struct ptp_clock_info *ptp, priv->ptp.extts[req->index] = on; spin_lock_irqsave(&priv->lock, flags); - ravb_modify(ndev, GIC, GIC_PTCE, on ? GIC_PTCE : 0); + if (priv->chip_id == RCAR_GEN2) + ravb_modify(ndev, GIC, GIC_PTCE, on ? GIC_PTCE : 0); + else if (on) + ravb_write(ndev, GIE_PTCS, GIE); + else + ravb_write(ndev, GID_PTCD, GID); mmiowb(); spin_unlock_irqrestore(&priv->lock, flags); @@ -241,7 +246,10 @@ static int ravb_ptp_perout(struct ptp_clock_info *ptp, error = ravb_ptp_update_compare(priv, (u32)start_ns); if (!error) { /* Unmask interrupt */ - ravb_modify(ndev, GIC, GIC_PTME, GIC_PTME); + if (priv->chip_id == RCAR_GEN2) + ravb_modify(ndev, GIC, GIC_PTME, GIC_PTME); + else + ravb_write(ndev, GIE_PTMS0, GIE); } } else { spin_lock_irqsave(&priv->lock, flags); @@ -250,7 +258,10 @@ static int ravb_ptp_perout(struct ptp_clock_info *ptp, perout->period = 0; /* Mask interrupt */ - ravb_modify(ndev, GIC, GIC_PTME, 0); + if (priv->chip_id == RCAR_GEN2) + ravb_modify(ndev, GIC, GIC_PTME, 0); + else + ravb_write(ndev, GID_PTMD0, GID); } mmiowb(); spin_unlock_irqrestore(&priv->lock, flags); From 96ec6310908a5f02450b18206d85e531f08cfa97 Mon Sep 17 00:00:00 2001 From: Moritz Fischer Date: Tue, 29 Mar 2016 19:11:11 -0700 Subject: [PATCH 0173/1649] net: macb: Fix coding style error message checkpatch.pl gave the following error: ERROR: space required before the open parenthesis '(' + for(; p < end; p++, offset += 4) Acked-by: Nicolas Ferre Acked-by: Michal Simek Signed-off-by: Moritz Fischer Signed-off-by: David S. Miller --- drivers/net/ethernet/cadence/macb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c index 48a7d7dee846..b5aa96e8f1b5 100644 --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c @@ -499,7 +499,7 @@ static void macb_update_stats(struct macb *bp) WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4); - for(; p < end; p++, offset += 4) + for (; p < end; p++, offset += 4) *p += bp->macb_reg_readl(bp, offset); } From 64ec42fe272322c73ee159c68f7fb31896f65d1a Mon Sep 17 00:00:00 2001 From: Moritz Fischer Date: Tue, 29 Mar 2016 19:11:12 -0700 Subject: [PATCH 0174/1649] net: macb: Fix coding style warnings This commit takes care of the coding style warnings that are mostly due to a different comment style and lines over 80 chars, as well as a dangling else. Acked-by: Nicolas Ferre Signed-off-by: Moritz Fischer Signed-off-by: David S. Miller --- drivers/net/ethernet/cadence/macb.c | 100 ++++++++++++---------------- 1 file changed, 42 insertions(+), 58 deletions(-) diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c index b5aa96e8f1b5..f25681ba6418 100644 --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c @@ -61,8 +61,7 @@ #define MACB_WOL_HAS_MAGIC_PACKET (0x1 << 0) #define MACB_WOL_ENABLED (0x1 << 1) -/* - * Graceful stop timeouts in us. We should allow up to +/* Graceful stop timeouts in us. We should allow up to * 1 frame time (10 Mbits/s, full-duplex, ignoring collisions) */ #define MACB_HALT_TIMEOUT 1230 @@ -130,8 +129,7 @@ static void hw_writel(struct macb *bp, int offset, u32 value) writel_relaxed(value, bp->regs + offset); } -/* - * Find the CPU endianness by using the loopback bit of NCR register. When the +/* Find the CPU endianness by using the loopback bit of NCR register. When the * CPU is in big endian we need to program swaped mode for management * descriptor access. */ @@ -386,7 +384,8 @@ static int macb_mii_probe(struct net_device *dev) pdata = dev_get_platdata(&bp->pdev->dev); if (pdata && gpio_is_valid(pdata->phy_irq_pin)) { - ret = devm_gpio_request(&bp->pdev->dev, pdata->phy_irq_pin, "phy int"); + ret = devm_gpio_request(&bp->pdev->dev, pdata->phy_irq_pin, + "phy int"); if (!ret) { phy_irq = gpio_to_irq(pdata->phy_irq_pin); phydev->irq = (phy_irq < 0) ? PHY_POLL : phy_irq; @@ -452,7 +451,8 @@ static int macb_mii_init(struct macb *bp) err = of_mdiobus_register(bp->mii_bus, np); /* fallback to standard phy registration if no phy were - found during dt phy registration */ + * found during dt phy registration + */ if (!err && !phy_find_first(bp->mii_bus)) { for (i = 0; i < PHY_MAX_ADDR; i++) { struct phy_device *phydev; @@ -567,8 +567,7 @@ static void macb_tx_error_task(struct work_struct *work) /* Make sure nobody is trying to queue up new packets */ netif_tx_stop_all_queues(bp->dev); - /* - * Stop transmission now + /* Stop transmission now * (in case we have just queued new packets) * macb/gem must be halted to write TBQP register */ @@ -576,8 +575,7 @@ static void macb_tx_error_task(struct work_struct *work) /* Just complain for now, reinitializing TX path can be good */ netdev_err(bp->dev, "BUG: halt tx timed out\n"); - /* - * Treat frames in TX queue including the ones that caused the error. + /* Treat frames in TX queue including the ones that caused the error. * Free transmit buffers in upper layer. */ for (tail = queue->tx_tail; tail != queue->tx_head; tail++) { @@ -607,10 +605,9 @@ static void macb_tx_error_task(struct work_struct *work) bp->stats.tx_bytes += skb->len; } } else { - /* - * "Buffers exhausted mid-frame" errors may only happen - * if the driver is buggy, so complain loudly about those. - * Statistics are updated by hardware. + /* "Buffers exhausted mid-frame" errors may only happen + * if the driver is buggy, so complain loudly about + * those. Statistics are updated by hardware. */ if (ctrl & MACB_BIT(TX_BUF_EXHAUSTED)) netdev_err(bp->dev, @@ -722,7 +719,8 @@ static void gem_rx_refill(struct macb *bp) struct sk_buff *skb; dma_addr_t paddr; - while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail, RX_RING_SIZE) > 0) { + while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail, + RX_RING_SIZE) > 0) { entry = macb_rx_ring_wrap(bp->rx_prepared_head); /* Make hw descriptor updates visible to CPU */ @@ -741,7 +739,8 @@ static void gem_rx_refill(struct macb *bp) /* now fill corresponding descriptor entry */ paddr = dma_map_single(&bp->pdev->dev, skb->data, - bp->rx_buffer_size, DMA_FROM_DEVICE); + bp->rx_buffer_size, + DMA_FROM_DEVICE); if (dma_mapping_error(&bp->pdev->dev, paddr)) { dev_kfree_skb(skb); break; @@ -777,14 +776,14 @@ static void discard_partial_frame(struct macb *bp, unsigned int begin, for (frag = begin; frag != end; frag++) { struct macb_dma_desc *desc = macb_rx_desc(bp, frag); + desc->addr &= ~MACB_BIT(RX_USED); } /* Make descriptor updates visible to hardware */ wmb(); - /* - * When this happens, the hardware stats registers for + /* When this happens, the hardware stats registers for * whatever caused this is updated, so we don't have to record * anything. */ @@ -883,8 +882,7 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag, macb_rx_ring_wrap(first_frag), macb_rx_ring_wrap(last_frag), len); - /* - * The ethernet header starts NET_IP_ALIGN bytes into the + /* The ethernet header starts NET_IP_ALIGN bytes into the * first buffer. Since the header is 14 bytes, this makes the * payload word-aligned. * @@ -1099,8 +1097,7 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id) (unsigned long)status); if (status & MACB_RX_INT_FLAGS) { - /* - * There's no point taking any more interrupts + /* There's no point taking any more interrupts * until we have processed the buffers. The * scheduling call may fail if the poll routine * is already scheduled, so disable interrupts @@ -1129,8 +1126,7 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id) if (status & MACB_BIT(TCOMP)) macb_tx_interrupt(queue); - /* - * Link change detection isn't possible with RMII, so we'll + /* Link change detection isn't possible with RMII, so we'll * add that if/when we get our hands on a full-blown MII PHY. */ @@ -1161,8 +1157,7 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id) } if (status & MACB_BIT(HRESP)) { - /* - * TODO: Reset the hardware, and maybe move the + /* TODO: Reset the hardware, and maybe move the * netdev_err to a lower-priority context as well * (work queue?) */ @@ -1181,8 +1176,7 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id) } #ifdef CONFIG_NET_POLL_CONTROLLER -/* - * Polling receive - used by netconsole and other diagnostic tools +/* Polling receive - used by netconsole and other diagnostic tools * to allow network i/o with interrupts disabled. */ static void macb_poll_controller(struct net_device *dev) @@ -1478,10 +1472,10 @@ static int gem_alloc_rx_buffers(struct macb *bp) bp->rx_skbuff = kzalloc(size, GFP_KERNEL); if (!bp->rx_skbuff) return -ENOMEM; - else - netdev_dbg(bp->dev, - "Allocated %d RX struct sk_buff entries at %p\n", - RX_RING_SIZE, bp->rx_skbuff); + + netdev_dbg(bp->dev, + "Allocated %d RX struct sk_buff entries at %p\n", + RX_RING_SIZE, bp->rx_skbuff); return 0; } @@ -1494,10 +1488,10 @@ static int macb_alloc_rx_buffers(struct macb *bp) &bp->rx_buffers_dma, GFP_KERNEL); if (!bp->rx_buffers) return -ENOMEM; - else - netdev_dbg(bp->dev, - "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n", - size, (unsigned long)bp->rx_buffers_dma, bp->rx_buffers); + + netdev_dbg(bp->dev, + "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n", + size, (unsigned long)bp->rx_buffers_dma, bp->rx_buffers); return 0; } @@ -1588,8 +1582,7 @@ static void macb_reset_hw(struct macb *bp) struct macb_queue *queue; unsigned int q; - /* - * Disable RX and TX (XXX: Should we halt the transmission + /* Disable RX and TX (XXX: Should we halt the transmission * more gracefully?) */ macb_writel(bp, NCR, 0); @@ -1652,8 +1645,7 @@ static u32 macb_mdc_clk_div(struct macb *bp) return config; } -/* - * Get the DMA bus width field of the network configuration register that we +/* Get the DMA bus width field of the network configuration register that we * should program. We find the width from decoding the design configuration * register to find the maximum supported data bus width. */ @@ -1673,8 +1665,7 @@ static u32 macb_dbw(struct macb *bp) } } -/* - * Configure the receive DMA engine +/* Configure the receive DMA engine * - use the correct receive buffer size * - set best burst length for DMA operations * (if not supported by FIFO, it will fallback to default) @@ -1762,8 +1753,7 @@ static void macb_init_hw(struct macb *bp) macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE)); } -/* - * The hash address register is 64 bits long and takes up two +/* The hash address register is 64 bits long and takes up two * locations in the memory map. The least significant bits are stored * in EMAC_HSL and the most significant bits in EMAC_HSH. * @@ -1803,9 +1793,7 @@ static inline int hash_bit_value(int bitnr, __u8 *addr) return 0; } -/* - * Return the hash index value for the specified address. - */ +/* Return the hash index value for the specified address. */ static int hash_get_index(__u8 *addr) { int i, j, bitval; @@ -1821,9 +1809,7 @@ static int hash_get_index(__u8 *addr) return hash_index; } -/* - * Add multicast addresses to the internal multicast-hash table. - */ +/* Add multicast addresses to the internal multicast-hash table. */ static void macb_sethashtable(struct net_device *dev) { struct netdev_hw_addr *ha; @@ -1842,9 +1828,7 @@ static void macb_sethashtable(struct net_device *dev) macb_or_gem_writel(bp, HRT, mc_filter[1]); } -/* - * Enable/Disable promiscuous and multicast modes. - */ +/* Enable/Disable promiscuous and multicast modes. */ static void macb_set_rx_mode(struct net_device *dev) { unsigned long cfg; @@ -2161,9 +2145,8 @@ static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs, if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) regs_buff[12] = macb_or_gem_readl(bp, USRIO); - if (macb_is_gem(bp)) { + if (macb_is_gem(bp)) regs_buff[13] = gem_readl(bp, DMACFG); - } } static void macb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) @@ -2286,11 +2269,11 @@ static const struct net_device_ops macb_netdev_ops = { .ndo_set_features = macb_set_features, }; -/* - * Configure peripheral capabilities according to device tree +/* Configure peripheral capabilities according to device tree * and integration options used */ -static void macb_configure_caps(struct macb *bp, const struct macb_config *dt_conf) +static void macb_configure_caps(struct macb *bp, + const struct macb_config *dt_conf) { u32 dcfg; @@ -2996,6 +2979,7 @@ static int macb_probe(struct platform_device *pdev) phy_node = of_get_next_available_child(np, NULL); if (phy_node) { int gpio = of_get_named_gpio(phy_node, "reset-gpios", 0); + if (gpio_is_valid(gpio)) { bp->reset_gpio = gpio_to_desc(gpio); gpiod_direction_output(bp->reset_gpio, 1); From aa50b55262d459f69c7b32eb7ce38cde84cc089b Mon Sep 17 00:00:00 2001 From: Moritz Fischer Date: Tue, 29 Mar 2016 19:11:13 -0700 Subject: [PATCH 0175/1649] net: macb: Fix coding style suggestions This commit deals with a bunch of checkpatch suggestions that without changing behavior make checkpatch happier. Acked-by: Michal Simek Acked-by: Nicolas Ferre Signed-off-by: Moritz Fischer Signed-off-by: David S. Miller --- drivers/net/ethernet/cadence/macb.c | 46 +++++++++++++++-------------- 1 file changed, 24 insertions(+), 22 deletions(-) diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c index f25681ba6418..2ba0934e52dc 100644 --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c @@ -187,7 +187,7 @@ static void macb_get_hwaddr(struct macb *bp) pdata = dev_get_platdata(&bp->pdev->dev); - /* Check all 4 address register for vaild address */ + /* Check all 4 address register for valid address */ for (i = 0; i < 4; i++) { bottom = macb_or_gem_readl(bp, SA1B + i * 8); top = macb_or_gem_readl(bp, SA1T + i * 8); @@ -295,7 +295,7 @@ static void macb_set_tx_clk(struct clk *clk, int speed, struct net_device *dev) ferr = DIV_ROUND_UP(ferr, rate / 100000); if (ferr > 5) netdev_warn(dev, "unable to generate target frequency: %ld Hz\n", - rate); + rate); if (clk_set_rate(clk, rate_rounded)) netdev_err(dev, "adjusting tx_clk failed.\n"); @@ -429,7 +429,7 @@ static int macb_mii_init(struct macb *bp) macb_writel(bp, NCR, MACB_BIT(MPE)); bp->mii_bus = mdiobus_alloc(); - if (bp->mii_bus == NULL) { + if (!bp->mii_bus) { err = -ENOMEM; goto err_out; } @@ -438,7 +438,7 @@ static int macb_mii_init(struct macb *bp) bp->mii_bus->read = &macb_mdio_read; bp->mii_bus->write = &macb_mdio_write; snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", - bp->pdev->name, bp->pdev->id); + bp->pdev->name, bp->pdev->id); bp->mii_bus->priv = bp; bp->mii_bus->parent = &bp->dev->dev; pdata = dev_get_platdata(&bp->pdev->dev); @@ -659,7 +659,7 @@ static void macb_tx_interrupt(struct macb_queue *queue) queue_writel(queue, ISR, MACB_BIT(TCOMP)); netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n", - (unsigned long)status); + (unsigned long)status); head = queue->tx_head; for (tail = queue->tx_tail; tail != head; tail++) { @@ -728,10 +728,10 @@ static void gem_rx_refill(struct macb *bp) bp->rx_prepared_head++; - if (bp->rx_skbuff[entry] == NULL) { + if (!bp->rx_skbuff[entry]) { /* allocate sk_buff for this free entry in ring */ skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size); - if (unlikely(skb == NULL)) { + if (unlikely(!skb)) { netdev_err(bp->dev, "Unable to allocate sk_buff\n"); break; @@ -765,7 +765,7 @@ static void gem_rx_refill(struct macb *bp) wmb(); netdev_vdbg(bp->dev, "rx ring: prepared head %d, tail %d\n", - bp->rx_prepared_head, bp->rx_tail); + bp->rx_prepared_head, bp->rx_tail); } /* Mark DMA descriptors from begin up to and not including end as unused */ @@ -879,8 +879,8 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag, len = desc->ctrl & bp->rx_frm_len_mask; netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n", - macb_rx_ring_wrap(first_frag), - macb_rx_ring_wrap(last_frag), len); + macb_rx_ring_wrap(first_frag), + macb_rx_ring_wrap(last_frag), len); /* The ethernet header starts NET_IP_ALIGN bytes into the * first buffer. Since the header is 14 bytes, this makes the @@ -922,7 +922,8 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag, frag_len = len - offset; } skb_copy_to_linear_data_offset(skb, offset, - macb_rx_buffer(bp, frag), frag_len); + macb_rx_buffer(bp, frag), + frag_len); offset += bp->rx_buffer_size; desc = macb_rx_desc(bp, frag); desc->addr &= ~MACB_BIT(RX_USED); @@ -940,7 +941,7 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag, bp->stats.rx_packets++; bp->stats.rx_bytes += skb->len; netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n", - skb->len, skb->csum); + skb->len, skb->csum); netif_receive_skb(skb); return 0; @@ -1047,7 +1048,7 @@ static int macb_poll(struct napi_struct *napi, int budget) work_done = 0; netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n", - (unsigned long)status, budget); + (unsigned long)status, budget); work_done = bp->macbgem_ops.mog_rx(bp, budget); if (work_done < budget) { @@ -1262,7 +1263,7 @@ static unsigned int macb_tx_map(struct macb *bp, } /* Should never happen */ - if (unlikely(tx_skb == NULL)) { + if (unlikely(!tx_skb)) { netdev_err(bp->dev, "BUG! empty skb!\n"); return 0; } @@ -1332,16 +1333,16 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev) #if defined(DEBUG) && defined(VERBOSE_DEBUG) netdev_vdbg(bp->dev, - "start_xmit: queue %hu len %u head %p data %p tail %p end %p\n", - queue_index, skb->len, skb->head, skb->data, - skb_tail_pointer(skb), skb_end_pointer(skb)); + "start_xmit: queue %hu len %u head %p data %p tail %p end %p\n", + queue_index, skb->len, skb->head, skb->data, + skb_tail_pointer(skb), skb_end_pointer(skb)); print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_OFFSET, 16, 1, skb->data, 16, true); #endif /* Count how many TX buffer descriptors are needed to send this * socket buffer: skb fragments of jumbo frames may need to be - * splitted into many buffer descriptors. + * split into many buffer descriptors. */ count = DIV_ROUND_UP(skb_headlen(skb), bp->max_tx_length); nr_frags = skb_shinfo(skb)->nr_frags; @@ -1392,8 +1393,8 @@ static void macb_init_rx_buffer_size(struct macb *bp, size_t size) if (bp->rx_buffer_size % RX_BUFFER_MULTIPLE) { netdev_dbg(bp->dev, - "RX buffer must be multiple of %d bytes, expanding\n", - RX_BUFFER_MULTIPLE); + "RX buffer must be multiple of %d bytes, expanding\n", + RX_BUFFER_MULTIPLE); bp->rx_buffer_size = roundup(bp->rx_buffer_size, RX_BUFFER_MULTIPLE); } @@ -1416,7 +1417,7 @@ static void gem_free_rx_buffers(struct macb *bp) for (i = 0; i < RX_RING_SIZE; i++) { skb = bp->rx_skbuff[i]; - if (skb == NULL) + if (!skb) continue; desc = &bp->rx_ring[i]; @@ -1817,7 +1818,8 @@ static void macb_sethashtable(struct net_device *dev) unsigned int bitnr; struct macb *bp = netdev_priv(dev); - mc_filter[0] = mc_filter[1] = 0; + mc_filter[0] = 0; + mc_filter[1] = 0; netdev_for_each_mc_addr(ha, dev) { bitnr = hash_get_index(ha->addr); From eefb52d1ec8eb1354ff1bf55811a0da74bffccb8 Mon Sep 17 00:00:00 2001 From: Moritz Fischer Date: Tue, 29 Mar 2016 19:11:14 -0700 Subject: [PATCH 0176/1649] net: macb: Use ether_addr_copy over memcpy Checkpatch suggests using ether_addr_copy over memcpy to copy the mac address. Acked-by: Michal Simek Acked-by: Nicolas Ferre Signed-off-by: Moritz Fischer Signed-off-by: David S. Miller --- drivers/net/ethernet/cadence/macb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c index 2ba0934e52dc..01a8ffbfc464 100644 --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c @@ -2973,7 +2973,7 @@ static int macb_probe(struct platform_device *pdev) mac = of_get_mac_address(np); if (mac) - memcpy(bp->dev->dev_addr, mac, ETH_ALEN); + ether_addr_copy(bp->dev->dev_addr, mac); else macb_get_hwaddr(bp); From 88023beb2a467dcfd9aa958138f0f3b5e1c432e0 Mon Sep 17 00:00:00 2001 From: Moritz Fischer Date: Tue, 29 Mar 2016 19:11:15 -0700 Subject: [PATCH 0177/1649] net: macb: Fix simple typo Acked-by: Michal Simek Acked-by: Nicolas Ferre Signed-off-by: Moritz Fischer Signed-off-by: David S. Miller --- drivers/net/ethernet/cadence/macb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c index 01a8ffbfc464..eec3200ade4a 100644 --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c @@ -130,7 +130,7 @@ static void hw_writel(struct macb *bp, int offset, u32 value) } /* Find the CPU endianness by using the loopback bit of NCR register. When the - * CPU is in big endian we need to program swaped mode for management + * CPU is in big endian we need to program swapped mode for management * descriptor access. */ static bool hw_is_native_io(void __iomem *addr) From 9ef280c6c28f0c01aa9d909263ad47c796713a8e Mon Sep 17 00:00:00 2001 From: Simon Horman Date: Mon, 4 Apr 2016 10:44:39 +0900 Subject: [PATCH 0178/1649] irda: sh_irda: remove driver Remove the sh-irda driver as it appears to be unused since c0bb9b302769 ("ARCH: ARM: shmobile: Remove ag5evm board support"). Signed-off-by: Simon Horman Signed-off-by: David S. Miller --- drivers/net/irda/Kconfig | 7 - drivers/net/irda/Makefile | 1 - drivers/net/irda/sh_irda.c | 875 ------------------------------------- 3 files changed, 883 deletions(-) delete mode 100644 drivers/net/irda/sh_irda.c diff --git a/drivers/net/irda/Kconfig b/drivers/net/irda/Kconfig index a2c227bfb687..e070e1222733 100644 --- a/drivers/net/irda/Kconfig +++ b/drivers/net/irda/Kconfig @@ -394,12 +394,5 @@ config MCS_FIR To compile it as a module, choose M here: the module will be called mcs7780. -config SH_IRDA - tristate "SuperH IrDA driver" - depends on IRDA - depends on (ARCH_SHMOBILE || COMPILE_TEST) && HAS_IOMEM - help - Say Y here if your want to enable SuperH IrDA devices. - endmenu diff --git a/drivers/net/irda/Makefile b/drivers/net/irda/Makefile index be8ab5b9a4a2..4c344433dae5 100644 --- a/drivers/net/irda/Makefile +++ b/drivers/net/irda/Makefile @@ -19,7 +19,6 @@ obj-$(CONFIG_VIA_FIR) += via-ircc.o obj-$(CONFIG_PXA_FICP) += pxaficp_ir.o obj-$(CONFIG_MCS_FIR) += mcs7780.o obj-$(CONFIG_AU1000_FIR) += au1k_ir.o -obj-$(CONFIG_SH_IRDA) += sh_irda.o # SIR drivers obj-$(CONFIG_IRTTY_SIR) += irtty-sir.o sir-dev.o obj-$(CONFIG_BFIN_SIR) += bfin_sir.o diff --git a/drivers/net/irda/sh_irda.c b/drivers/net/irda/sh_irda.c deleted file mode 100644 index c96b46b2c3a8..000000000000 --- a/drivers/net/irda/sh_irda.c +++ /dev/null @@ -1,875 +0,0 @@ -/* - * SuperH IrDA Driver - * - * Copyright (C) 2010 Renesas Solutions Corp. - * Kuninori Morimoto - * - * Based on sh_sir.c - * Copyright (C) 2009 Renesas Solutions Corp. - * Copyright 2006-2009 Analog Devices Inc. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -/* - * CAUTION - * - * This driver is very simple. - * So, it doesn't have below support now - * - MIR/FIR support - * - DMA transfer support - * - FIFO mode support - */ -#include -#include -#include -#include -#include -#include -#include -#include - -#define DRIVER_NAME "sh_irda" - -#define __IRDARAM_LEN 0x1039 - -#define IRTMR 0x1F00 /* Transfer mode */ -#define IRCFR 0x1F02 /* Configuration */ -#define IRCTR 0x1F04 /* IR control */ -#define IRTFLR 0x1F20 /* Transmit frame length */ -#define IRTCTR 0x1F22 /* Transmit control */ -#define IRRFLR 0x1F40 /* Receive frame length */ -#define IRRCTR 0x1F42 /* Receive control */ -#define SIRISR 0x1F60 /* SIR-UART mode interrupt source */ -#define SIRIMR 0x1F62 /* SIR-UART mode interrupt mask */ -#define SIRICR 0x1F64 /* SIR-UART mode interrupt clear */ -#define SIRBCR 0x1F68 /* SIR-UART mode baud rate count */ -#define MFIRISR 0x1F70 /* MIR/FIR mode interrupt source */ -#define MFIRIMR 0x1F72 /* MIR/FIR mode interrupt mask */ -#define MFIRICR 0x1F74 /* MIR/FIR mode interrupt clear */ -#define CRCCTR 0x1F80 /* CRC engine control */ -#define CRCIR 0x1F86 /* CRC engine input data */ -#define CRCCR 0x1F8A /* CRC engine calculation */ -#define CRCOR 0x1F8E /* CRC engine output data */ -#define FIFOCP 0x1FC0 /* FIFO current pointer */ -#define FIFOFP 0x1FC2 /* FIFO follow pointer */ -#define FIFORSMSK 0x1FC4 /* FIFO receive status mask */ -#define FIFORSOR 0x1FC6 /* FIFO receive status OR */ -#define FIFOSEL 0x1FC8 /* FIFO select */ -#define FIFORS 0x1FCA /* FIFO receive status */ -#define FIFORFL 0x1FCC /* FIFO receive frame length */ -#define FIFORAMCP 0x1FCE /* FIFO RAM current pointer */ -#define FIFORAMFP 0x1FD0 /* FIFO RAM follow pointer */ -#define BIFCTL 0x1FD2 /* BUS interface control */ -#define IRDARAM 0x0000 /* IrDA buffer RAM */ -#define IRDARAM_LEN __IRDARAM_LEN /* - 8/16/32 (read-only for 32) */ - -/* IRTMR */ -#define TMD_MASK (0x3 << 14) /* Transfer Mode */ -#define TMD_SIR (0x0 << 14) -#define TMD_MIR (0x3 << 14) -#define TMD_FIR (0x2 << 14) - -#define FIFORIM (1 << 8) /* FIFO receive interrupt mask */ -#define MIM (1 << 4) /* MIR/FIR Interrupt Mask */ -#define SIM (1 << 0) /* SIR Interrupt Mask */ -#define xIM_MASK (FIFORIM | MIM | SIM) - -/* IRCFR */ -#define RTO_SHIFT 8 /* shift for Receive Timeout */ -#define RTO (0x3 << RTO_SHIFT) - -/* IRTCTR */ -#define ARMOD (1 << 15) /* Auto-Receive Mode */ -#define TE (1 << 0) /* Transmit Enable */ - -/* IRRFLR */ -#define RFL_MASK (0x1FFF) /* mask for Receive Frame Length */ - -/* IRRCTR */ -#define RE (1 << 0) /* Receive Enable */ - -/* - * SIRISR, SIRIMR, SIRICR, - * MFIRISR, MFIRIMR, MFIRICR - */ -#define FRE (1 << 15) /* Frame Receive End */ -#define TROV (1 << 11) /* Transfer Area Overflow */ -#define xIR_9 (1 << 9) -#define TOT xIR_9 /* for SIR Timeout */ -#define ABTD xIR_9 /* for MIR/FIR Abort Detection */ -#define xIR_8 (1 << 8) -#define FER xIR_8 /* for SIR Framing Error */ -#define CRCER xIR_8 /* for MIR/FIR CRC error */ -#define FTE (1 << 7) /* Frame Transmit End */ -#define xIR_MASK (FRE | TROV | xIR_9 | xIR_8 | FTE) - -/* SIRBCR */ -#define BRC_MASK (0x3F) /* mask for Baud Rate Count */ - -/* CRCCTR */ -#define CRC_RST (1 << 15) /* CRC Engine Reset */ -#define CRC_CT_MASK 0x0FFF /* mask for CRC Engine Input Data Count */ - -/* CRCIR */ -#define CRC_IN_MASK 0x0FFF /* mask for CRC Engine Input Data */ - -/************************************************************************ - - - enum / structure - - -************************************************************************/ -enum sh_irda_mode { - SH_IRDA_NONE = 0, - SH_IRDA_SIR, - SH_IRDA_MIR, - SH_IRDA_FIR, -}; - -struct sh_irda_self; -struct sh_irda_xir_func { - int (*xir_fre) (struct sh_irda_self *self); - int (*xir_trov) (struct sh_irda_self *self); - int (*xir_9) (struct sh_irda_self *self); - int (*xir_8) (struct sh_irda_self *self); - int (*xir_fte) (struct sh_irda_self *self); -}; - -struct sh_irda_self { - void __iomem *membase; - unsigned int irq; - struct platform_device *pdev; - - struct net_device *ndev; - - struct irlap_cb *irlap; - struct qos_info qos; - - iobuff_t tx_buff; - iobuff_t rx_buff; - - enum sh_irda_mode mode; - spinlock_t lock; - - struct sh_irda_xir_func *xir_func; -}; - -/************************************************************************ - - - common function - - -************************************************************************/ -static void sh_irda_write(struct sh_irda_self *self, u32 offset, u16 data) -{ - unsigned long flags; - - spin_lock_irqsave(&self->lock, flags); - iowrite16(data, self->membase + offset); - spin_unlock_irqrestore(&self->lock, flags); -} - -static u16 sh_irda_read(struct sh_irda_self *self, u32 offset) -{ - unsigned long flags; - u16 ret; - - spin_lock_irqsave(&self->lock, flags); - ret = ioread16(self->membase + offset); - spin_unlock_irqrestore(&self->lock, flags); - - return ret; -} - -static void sh_irda_update_bits(struct sh_irda_self *self, u32 offset, - u16 mask, u16 data) -{ - unsigned long flags; - u16 old, new; - - spin_lock_irqsave(&self->lock, flags); - old = ioread16(self->membase + offset); - new = (old & ~mask) | data; - if (old != new) - iowrite16(data, self->membase + offset); - spin_unlock_irqrestore(&self->lock, flags); -} - -/************************************************************************ - - - mode function - - -************************************************************************/ -/*===================================== - * - * common - * - *=====================================*/ -static void sh_irda_rcv_ctrl(struct sh_irda_self *self, int enable) -{ - struct device *dev = &self->ndev->dev; - - sh_irda_update_bits(self, IRRCTR, RE, enable ? RE : 0); - dev_dbg(dev, "recv %s\n", enable ? "enable" : "disable"); -} - -static int sh_irda_set_timeout(struct sh_irda_self *self, int interval) -{ - struct device *dev = &self->ndev->dev; - - if (SH_IRDA_SIR != self->mode) - interval = 0; - - if (interval < 0 || interval > 2) { - dev_err(dev, "unsupported timeout interval\n"); - return -EINVAL; - } - - sh_irda_update_bits(self, IRCFR, RTO, interval << RTO_SHIFT); - return 0; -} - -static int sh_irda_set_baudrate(struct sh_irda_self *self, int baudrate) -{ - struct device *dev = &self->ndev->dev; - u16 val; - - if (baudrate < 0) - return 0; - - if (SH_IRDA_SIR != self->mode) { - dev_err(dev, "it is not SIR mode\n"); - return -EINVAL; - } - - /* - * Baud rate (bits/s) = - * (48 MHz / 26) / (baud rate counter value + 1) x 16 - */ - val = (48000000 / 26 / 16 / baudrate) - 1; - dev_dbg(dev, "baudrate = %d, val = 0x%02x\n", baudrate, val); - - sh_irda_update_bits(self, SIRBCR, BRC_MASK, val); - - return 0; -} - -static int sh_irda_get_rcv_length(struct sh_irda_self *self) -{ - return RFL_MASK & sh_irda_read(self, IRRFLR); -} - -/*===================================== - * - * NONE MODE - * - *=====================================*/ -static int sh_irda_xir_fre(struct sh_irda_self *self) -{ - struct device *dev = &self->ndev->dev; - dev_err(dev, "none mode: frame recv\n"); - return 0; -} - -static int sh_irda_xir_trov(struct sh_irda_self *self) -{ - struct device *dev = &self->ndev->dev; - dev_err(dev, "none mode: buffer ram over\n"); - return 0; -} - -static int sh_irda_xir_9(struct sh_irda_self *self) -{ - struct device *dev = &self->ndev->dev; - dev_err(dev, "none mode: time over\n"); - return 0; -} - -static int sh_irda_xir_8(struct sh_irda_self *self) -{ - struct device *dev = &self->ndev->dev; - dev_err(dev, "none mode: framing error\n"); - return 0; -} - -static int sh_irda_xir_fte(struct sh_irda_self *self) -{ - struct device *dev = &self->ndev->dev; - dev_err(dev, "none mode: frame transmit end\n"); - return 0; -} - -static struct sh_irda_xir_func sh_irda_xir_func = { - .xir_fre = sh_irda_xir_fre, - .xir_trov = sh_irda_xir_trov, - .xir_9 = sh_irda_xir_9, - .xir_8 = sh_irda_xir_8, - .xir_fte = sh_irda_xir_fte, -}; - -/*===================================== - * - * MIR/FIR MODE - * - * MIR/FIR are not supported now - *=====================================*/ -static struct sh_irda_xir_func sh_irda_mfir_func = { - .xir_fre = sh_irda_xir_fre, - .xir_trov = sh_irda_xir_trov, - .xir_9 = sh_irda_xir_9, - .xir_8 = sh_irda_xir_8, - .xir_fte = sh_irda_xir_fte, -}; - -/*===================================== - * - * SIR MODE - * - *=====================================*/ -static int sh_irda_sir_fre(struct sh_irda_self *self) -{ - struct device *dev = &self->ndev->dev; - u16 data16; - u8 *data = (u8 *)&data16; - int len = sh_irda_get_rcv_length(self); - int i, j; - - if (len > IRDARAM_LEN) - len = IRDARAM_LEN; - - dev_dbg(dev, "frame recv length = %d\n", len); - - for (i = 0; i < len; i++) { - j = i % 2; - if (!j) - data16 = sh_irda_read(self, IRDARAM + i); - - async_unwrap_char(self->ndev, &self->ndev->stats, - &self->rx_buff, data[j]); - } - self->ndev->last_rx = jiffies; - - sh_irda_rcv_ctrl(self, 1); - - return 0; -} - -static int sh_irda_sir_trov(struct sh_irda_self *self) -{ - struct device *dev = &self->ndev->dev; - - dev_err(dev, "buffer ram over\n"); - sh_irda_rcv_ctrl(self, 1); - return 0; -} - -static int sh_irda_sir_tot(struct sh_irda_self *self) -{ - struct device *dev = &self->ndev->dev; - - dev_err(dev, "time over\n"); - sh_irda_set_baudrate(self, 9600); - sh_irda_rcv_ctrl(self, 1); - return 0; -} - -static int sh_irda_sir_fer(struct sh_irda_self *self) -{ - struct device *dev = &self->ndev->dev; - - dev_err(dev, "framing error\n"); - sh_irda_rcv_ctrl(self, 1); - return 0; -} - -static int sh_irda_sir_fte(struct sh_irda_self *self) -{ - struct device *dev = &self->ndev->dev; - - dev_dbg(dev, "frame transmit end\n"); - netif_wake_queue(self->ndev); - - return 0; -} - -static struct sh_irda_xir_func sh_irda_sir_func = { - .xir_fre = sh_irda_sir_fre, - .xir_trov = sh_irda_sir_trov, - .xir_9 = sh_irda_sir_tot, - .xir_8 = sh_irda_sir_fer, - .xir_fte = sh_irda_sir_fte, -}; - -static void sh_irda_set_mode(struct sh_irda_self *self, enum sh_irda_mode mode) -{ - struct device *dev = &self->ndev->dev; - struct sh_irda_xir_func *func; - const char *name; - u16 data; - - switch (mode) { - case SH_IRDA_SIR: - name = "SIR"; - data = TMD_SIR; - func = &sh_irda_sir_func; - break; - case SH_IRDA_MIR: - name = "MIR"; - data = TMD_MIR; - func = &sh_irda_mfir_func; - break; - case SH_IRDA_FIR: - name = "FIR"; - data = TMD_FIR; - func = &sh_irda_mfir_func; - break; - default: - name = "NONE"; - data = 0; - func = &sh_irda_xir_func; - break; - } - - self->mode = mode; - self->xir_func = func; - sh_irda_update_bits(self, IRTMR, TMD_MASK, data); - - dev_dbg(dev, "switch to %s mode", name); -} - -/************************************************************************ - - - irq function - - -************************************************************************/ -static void sh_irda_set_irq_mask(struct sh_irda_self *self) -{ - u16 tmr_hole; - u16 xir_reg; - - /* set all mask */ - sh_irda_update_bits(self, IRTMR, xIM_MASK, xIM_MASK); - sh_irda_update_bits(self, SIRIMR, xIR_MASK, xIR_MASK); - sh_irda_update_bits(self, MFIRIMR, xIR_MASK, xIR_MASK); - - /* clear irq */ - sh_irda_update_bits(self, SIRICR, xIR_MASK, xIR_MASK); - sh_irda_update_bits(self, MFIRICR, xIR_MASK, xIR_MASK); - - switch (self->mode) { - case SH_IRDA_SIR: - tmr_hole = SIM; - xir_reg = SIRIMR; - break; - case SH_IRDA_MIR: - case SH_IRDA_FIR: - tmr_hole = MIM; - xir_reg = MFIRIMR; - break; - default: - tmr_hole = 0; - xir_reg = 0; - break; - } - - /* open mask */ - if (xir_reg) { - sh_irda_update_bits(self, IRTMR, tmr_hole, 0); - sh_irda_update_bits(self, xir_reg, xIR_MASK, 0); - } -} - -static irqreturn_t sh_irda_irq(int irq, void *dev_id) -{ - struct sh_irda_self *self = dev_id; - struct sh_irda_xir_func *func = self->xir_func; - u16 isr = sh_irda_read(self, SIRISR); - - /* clear irq */ - sh_irda_write(self, SIRICR, isr); - - if (isr & FRE) - func->xir_fre(self); - if (isr & TROV) - func->xir_trov(self); - if (isr & xIR_9) - func->xir_9(self); - if (isr & xIR_8) - func->xir_8(self); - if (isr & FTE) - func->xir_fte(self); - - return IRQ_HANDLED; -} - -/************************************************************************ - - - CRC function - - -************************************************************************/ -static void sh_irda_crc_reset(struct sh_irda_self *self) -{ - sh_irda_write(self, CRCCTR, CRC_RST); -} - -static void sh_irda_crc_add(struct sh_irda_self *self, u16 data) -{ - sh_irda_write(self, CRCIR, data & CRC_IN_MASK); -} - -static u16 sh_irda_crc_cnt(struct sh_irda_self *self) -{ - return CRC_CT_MASK & sh_irda_read(self, CRCCTR); -} - -static u16 sh_irda_crc_out(struct sh_irda_self *self) -{ - return sh_irda_read(self, CRCOR); -} - -static int sh_irda_crc_init(struct sh_irda_self *self) -{ - struct device *dev = &self->ndev->dev; - int ret = -EIO; - u16 val; - - sh_irda_crc_reset(self); - - sh_irda_crc_add(self, 0xCC); - sh_irda_crc_add(self, 0xF5); - sh_irda_crc_add(self, 0xF1); - sh_irda_crc_add(self, 0xA7); - - val = sh_irda_crc_cnt(self); - if (4 != val) { - dev_err(dev, "CRC count error %x\n", val); - goto crc_init_out; - } - - val = sh_irda_crc_out(self); - if (0x51DF != val) { - dev_err(dev, "CRC result error%x\n", val); - goto crc_init_out; - } - - ret = 0; - -crc_init_out: - - sh_irda_crc_reset(self); - return ret; -} - -/************************************************************************ - - - iobuf function - - -************************************************************************/ -static void sh_irda_remove_iobuf(struct sh_irda_self *self) -{ - kfree(self->rx_buff.head); - - self->tx_buff.head = NULL; - self->tx_buff.data = NULL; - self->rx_buff.head = NULL; - self->rx_buff.data = NULL; -} - -static int sh_irda_init_iobuf(struct sh_irda_self *self, int rxsize, int txsize) -{ - if (self->rx_buff.head || - self->tx_buff.head) { - dev_err(&self->ndev->dev, "iobuff has already existed."); - return -EINVAL; - } - - /* rx_buff */ - self->rx_buff.head = kmalloc(rxsize, GFP_KERNEL); - if (!self->rx_buff.head) - return -ENOMEM; - - self->rx_buff.truesize = rxsize; - self->rx_buff.in_frame = FALSE; - self->rx_buff.state = OUTSIDE_FRAME; - self->rx_buff.data = self->rx_buff.head; - - /* tx_buff */ - self->tx_buff.head = self->membase + IRDARAM; - self->tx_buff.truesize = IRDARAM_LEN; - - return 0; -} - -/************************************************************************ - - - net_device_ops function - - -************************************************************************/ -static int sh_irda_hard_xmit(struct sk_buff *skb, struct net_device *ndev) -{ - struct sh_irda_self *self = netdev_priv(ndev); - struct device *dev = &self->ndev->dev; - int speed = irda_get_next_speed(skb); - int ret; - - dev_dbg(dev, "hard xmit\n"); - - netif_stop_queue(ndev); - sh_irda_rcv_ctrl(self, 0); - - ret = sh_irda_set_baudrate(self, speed); - if (ret < 0) - goto sh_irda_hard_xmit_end; - - self->tx_buff.len = 0; - if (skb->len) { - unsigned long flags; - - spin_lock_irqsave(&self->lock, flags); - self->tx_buff.len = async_wrap_skb(skb, - self->tx_buff.head, - self->tx_buff.truesize); - spin_unlock_irqrestore(&self->lock, flags); - - if (self->tx_buff.len > self->tx_buff.truesize) - self->tx_buff.len = self->tx_buff.truesize; - - sh_irda_write(self, IRTFLR, self->tx_buff.len); - sh_irda_write(self, IRTCTR, ARMOD | TE); - } else - goto sh_irda_hard_xmit_end; - - dev_kfree_skb(skb); - - return 0; - -sh_irda_hard_xmit_end: - sh_irda_set_baudrate(self, 9600); - netif_wake_queue(self->ndev); - sh_irda_rcv_ctrl(self, 1); - dev_kfree_skb(skb); - - return ret; - -} - -static int sh_irda_ioctl(struct net_device *ndev, struct ifreq *ifreq, int cmd) -{ - /* - * FIXME - * - * This function is needed for irda framework. - * But nothing to do now - */ - return 0; -} - -static struct net_device_stats *sh_irda_stats(struct net_device *ndev) -{ - struct sh_irda_self *self = netdev_priv(ndev); - - return &self->ndev->stats; -} - -static int sh_irda_open(struct net_device *ndev) -{ - struct sh_irda_self *self = netdev_priv(ndev); - int err; - - pm_runtime_get_sync(&self->pdev->dev); - err = sh_irda_crc_init(self); - if (err) - goto open_err; - - sh_irda_set_mode(self, SH_IRDA_SIR); - sh_irda_set_timeout(self, 2); - sh_irda_set_baudrate(self, 9600); - - self->irlap = irlap_open(ndev, &self->qos, DRIVER_NAME); - if (!self->irlap) { - err = -ENODEV; - goto open_err; - } - - netif_start_queue(ndev); - sh_irda_rcv_ctrl(self, 1); - sh_irda_set_irq_mask(self); - - dev_info(&ndev->dev, "opened\n"); - - return 0; - -open_err: - pm_runtime_put_sync(&self->pdev->dev); - - return err; -} - -static int sh_irda_stop(struct net_device *ndev) -{ - struct sh_irda_self *self = netdev_priv(ndev); - - /* Stop IrLAP */ - if (self->irlap) { - irlap_close(self->irlap); - self->irlap = NULL; - } - - netif_stop_queue(ndev); - pm_runtime_put_sync(&self->pdev->dev); - - dev_info(&ndev->dev, "stopped\n"); - - return 0; -} - -static const struct net_device_ops sh_irda_ndo = { - .ndo_open = sh_irda_open, - .ndo_stop = sh_irda_stop, - .ndo_start_xmit = sh_irda_hard_xmit, - .ndo_do_ioctl = sh_irda_ioctl, - .ndo_get_stats = sh_irda_stats, -}; - -/************************************************************************ - - - platform_driver function - - -************************************************************************/ -static int sh_irda_probe(struct platform_device *pdev) -{ - struct net_device *ndev; - struct sh_irda_self *self; - struct resource *res; - int irq; - int err = -ENOMEM; - - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - irq = platform_get_irq(pdev, 0); - if (!res || irq < 0) { - dev_err(&pdev->dev, "Not enough platform resources.\n"); - goto exit; - } - - ndev = alloc_irdadev(sizeof(*self)); - if (!ndev) - goto exit; - - self = netdev_priv(ndev); - self->membase = ioremap_nocache(res->start, resource_size(res)); - if (!self->membase) { - err = -ENXIO; - dev_err(&pdev->dev, "Unable to ioremap.\n"); - goto err_mem_1; - } - - err = sh_irda_init_iobuf(self, IRDA_SKB_MAX_MTU, IRDA_SIR_MAX_FRAME); - if (err) - goto err_mem_2; - - self->pdev = pdev; - pm_runtime_enable(&pdev->dev); - - irda_init_max_qos_capabilies(&self->qos); - - ndev->netdev_ops = &sh_irda_ndo; - ndev->irq = irq; - - self->ndev = ndev; - self->qos.baud_rate.bits &= IR_9600; /* FIXME */ - self->qos.min_turn_time.bits = 1; /* 10 ms or more */ - spin_lock_init(&self->lock); - - irda_qos_bits_to_value(&self->qos); - - err = register_netdev(ndev); - if (err) - goto err_mem_4; - - platform_set_drvdata(pdev, ndev); - err = devm_request_irq(&pdev->dev, irq, sh_irda_irq, 0, "sh_irda", self); - if (err) { - dev_warn(&pdev->dev, "Unable to attach sh_irda interrupt\n"); - goto err_mem_4; - } - - dev_info(&pdev->dev, "SuperH IrDA probed\n"); - - goto exit; - -err_mem_4: - pm_runtime_disable(&pdev->dev); - sh_irda_remove_iobuf(self); -err_mem_2: - iounmap(self->membase); -err_mem_1: - free_netdev(ndev); -exit: - return err; -} - -static int sh_irda_remove(struct platform_device *pdev) -{ - struct net_device *ndev = platform_get_drvdata(pdev); - struct sh_irda_self *self = netdev_priv(ndev); - - if (!self) - return 0; - - unregister_netdev(ndev); - pm_runtime_disable(&pdev->dev); - sh_irda_remove_iobuf(self); - iounmap(self->membase); - free_netdev(ndev); - - return 0; -} - -static int sh_irda_runtime_nop(struct device *dev) -{ - /* Runtime PM callback shared between ->runtime_suspend() - * and ->runtime_resume(). Simply returns success. - * - * This driver re-initializes all registers after - * pm_runtime_get_sync() anyway so there is no need - * to save and restore registers here. - */ - return 0; -} - -static const struct dev_pm_ops sh_irda_pm_ops = { - .runtime_suspend = sh_irda_runtime_nop, - .runtime_resume = sh_irda_runtime_nop, -}; - -static struct platform_driver sh_irda_driver = { - .probe = sh_irda_probe, - .remove = sh_irda_remove, - .driver = { - .name = DRIVER_NAME, - .pm = &sh_irda_pm_ops, - }, -}; - -module_platform_driver(sh_irda_driver); - -MODULE_AUTHOR("Kuninori Morimoto "); -MODULE_DESCRIPTION("SuperH IrDA driver"); -MODULE_LICENSE("GPL"); From 06bb1c39d8be0b2ee60b5bc9384fdac6e19bc270 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Wed, 6 Jan 2016 22:48:50 -0800 Subject: [PATCH 0179/1649] ixgbe: Avoid adding VLAN 0 twice to VLVF and VFTA We were adding VLAN 0 twice each time we restored the VLAN configuration. Instead of doing it twice we can just start working through the active VLANs from ID 1 on and skip the double write. Signed-off-by: Alexander Duyck Tested-by: Phil Schmitt Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index a01a7f251e03..297e7d32adfd 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -4172,11 +4172,11 @@ static void ixgbe_vlan_promisc_disable(struct ixgbe_adapter *adapter) static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter) { - u16 vid; + u16 vid = 1; ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0); - for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) + for_each_set_bit_from(vid, adapter->active_vlans, VLAN_N_VID) ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid); } From 37689010da28c6dfd9f59e60d7f42c47b775171c Mon Sep 17 00:00:00 2001 From: Mark Rustad Date: Thu, 7 Jan 2016 10:13:03 -0800 Subject: [PATCH 0180/1649] ixgbe: Make all unchanging ops structures const The source for the ops structure contents are const, so make them so. Copy them in place with structure assignments instead of memcpys. Make the mbx_ops accessed by reference instead of making a copy of the source structure. Update copyright date on the touched files. Reported-by: Julia Lawall Signed-off-by: Mark Rustad Acked-by: Julia Lawall Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe.h | 12 +++--- .../net/ethernet/intel/ixgbe/ixgbe_82598.c | 10 ++--- .../net/ethernet/intel/ixgbe/ixgbe_82599.c | 10 ++--- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 10 ++--- drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c | 40 +++++++++---------- drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h | 4 +- drivers/net/ethernet/intel/ixgbe/ixgbe_type.h | 12 +++--- drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c | 10 ++--- drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c | 18 ++++----- 9 files changed, 63 insertions(+), 63 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index 9f64354c9c9e..4590fabdedf0 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2013 Intel Corporation. + Copyright(c) 1999 - 2016 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -862,11 +862,11 @@ enum ixgbe_boards { board_X550EM_x, }; -extern struct ixgbe_info ixgbe_82598_info; -extern struct ixgbe_info ixgbe_82599_info; -extern struct ixgbe_info ixgbe_X540_info; -extern struct ixgbe_info ixgbe_X550_info; -extern struct ixgbe_info ixgbe_X550EM_x_info; +extern const struct ixgbe_info ixgbe_82598_info; +extern const struct ixgbe_info ixgbe_82599_info; +extern const struct ixgbe_info ixgbe_X540_info; +extern const struct ixgbe_info ixgbe_X550_info; +extern const struct ixgbe_info ixgbe_X550EM_x_info; #ifdef CONFIG_IXGBE_DCB extern const struct dcbnl_rtnl_ops dcbnl_ops; #endif diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c index d8a9fb8a59e2..9790d0274f7f 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2015 Intel Corporation. + Copyright(c) 1999 - 2016 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -1160,7 +1160,7 @@ static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb, IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), IXGBE_TXPBSIZE_40KB); } -static struct ixgbe_mac_operations mac_ops_82598 = { +static const struct ixgbe_mac_operations mac_ops_82598 = { .init_hw = &ixgbe_init_hw_generic, .reset_hw = &ixgbe_reset_hw_82598, .start_hw = &ixgbe_start_hw_82598, @@ -1203,7 +1203,7 @@ static struct ixgbe_mac_operations mac_ops_82598 = { .disable_rx = &ixgbe_disable_rx_generic, }; -static struct ixgbe_eeprom_operations eeprom_ops_82598 = { +static const struct ixgbe_eeprom_operations eeprom_ops_82598 = { .init_params = &ixgbe_init_eeprom_params_generic, .read = &ixgbe_read_eerd_generic, .write = &ixgbe_write_eeprom_generic, @@ -1214,7 +1214,7 @@ static struct ixgbe_eeprom_operations eeprom_ops_82598 = { .update_checksum = &ixgbe_update_eeprom_checksum_generic, }; -static struct ixgbe_phy_operations phy_ops_82598 = { +static const struct ixgbe_phy_operations phy_ops_82598 = { .identify = &ixgbe_identify_phy_generic, .identify_sfp = &ixgbe_identify_module_generic, .init = &ixgbe_init_phy_ops_82598, @@ -1230,7 +1230,7 @@ static struct ixgbe_phy_operations phy_ops_82598 = { .check_overtemp = &ixgbe_tn_check_overtemp, }; -struct ixgbe_info ixgbe_82598_info = { +const struct ixgbe_info ixgbe_82598_info = { .mac = ixgbe_mac_82598EB, .get_invariants = &ixgbe_get_invariants_82598, .mac_ops = &mac_ops_82598, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c index fa8d4f40ac2a..b276fe0bc665 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2015 Intel Corporation. + Copyright(c) 1999 - 2016 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -2181,7 +2181,7 @@ release_i2c_access: return status; } -static struct ixgbe_mac_operations mac_ops_82599 = { +static const struct ixgbe_mac_operations mac_ops_82599 = { .init_hw = &ixgbe_init_hw_generic, .reset_hw = &ixgbe_reset_hw_82599, .start_hw = &ixgbe_start_hw_82599, @@ -2235,7 +2235,7 @@ static struct ixgbe_mac_operations mac_ops_82599 = { .disable_rx = &ixgbe_disable_rx_generic, }; -static struct ixgbe_eeprom_operations eeprom_ops_82599 = { +static const struct ixgbe_eeprom_operations eeprom_ops_82599 = { .init_params = &ixgbe_init_eeprom_params_generic, .read = &ixgbe_read_eeprom_82599, .read_buffer = &ixgbe_read_eeprom_buffer_82599, @@ -2246,7 +2246,7 @@ static struct ixgbe_eeprom_operations eeprom_ops_82599 = { .update_checksum = &ixgbe_update_eeprom_checksum_generic, }; -static struct ixgbe_phy_operations phy_ops_82599 = { +static const struct ixgbe_phy_operations phy_ops_82599 = { .identify = &ixgbe_identify_phy_82599, .identify_sfp = &ixgbe_identify_module_generic, .init = &ixgbe_init_phy_ops_82599, @@ -2263,7 +2263,7 @@ static struct ixgbe_phy_operations phy_ops_82599 = { .check_overtemp = &ixgbe_tn_check_overtemp, }; -struct ixgbe_info ixgbe_82599_info = { +const struct ixgbe_info ixgbe_82599_info = { .mac = ixgbe_mac_82599EB, .get_invariants = &ixgbe_get_invariants_82599, .mac_ops = &mac_ops_82599, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 297e7d32adfd..5f4ecf50eedd 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2015 Intel Corporation. + Copyright(c) 1999 - 2016 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -9136,12 +9136,12 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name)); /* Setup hw api */ - memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops)); + hw->mac.ops = *ii->mac_ops; hw->mac.type = ii->mac; hw->mvals = ii->mvals; /* EEPROM */ - memcpy(&hw->eeprom.ops, ii->eeprom_ops, sizeof(hw->eeprom.ops)); + hw->eeprom.ops = *ii->eeprom_ops; eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); if (ixgbe_removed(hw->hw_addr)) { err = -EIO; @@ -9152,7 +9152,7 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic; /* PHY */ - memcpy(&hw->phy.ops, ii->phy_ops, sizeof(hw->phy.ops)); + hw->phy.ops = *ii->phy_ops; hw->phy.sfp_type = ixgbe_sfp_type_unknown; /* ixgbe_identify_phy_generic will set prtad and mmds properly */ hw->phy.mdio.prtad = MDIO_PRTAD_NONE; @@ -9215,7 +9215,7 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto skip_sriov; /* Mailbox */ ixgbe_init_mbx_params_pf(hw); - memcpy(&hw->mbx.ops, ii->mbx_ops, sizeof(hw->mbx.ops)); + hw->mbx.ops = ii->mbx_ops; pci_sriov_set_totalvfs(pdev, IXGBE_MAX_VFS_DRV_LIMIT); ixgbe_enable_sriov(adapter); skip_sriov: diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c index 9993a471d668..2837c94d6e35 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2014 Intel Corporation. + Copyright(c) 1999 - 2016 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -48,10 +48,10 @@ s32 ixgbe_read_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id) if (size > mbx->size) size = mbx->size; - if (!mbx->ops.read) + if (!mbx->ops) return IXGBE_ERR_MBX; - return mbx->ops.read(hw, msg, size, mbx_id); + return mbx->ops->read(hw, msg, size, mbx_id); } /** @@ -70,10 +70,10 @@ s32 ixgbe_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id) if (size > mbx->size) return IXGBE_ERR_MBX; - if (!mbx->ops.write) + if (!mbx->ops) return IXGBE_ERR_MBX; - return mbx->ops.write(hw, msg, size, mbx_id); + return mbx->ops->write(hw, msg, size, mbx_id); } /** @@ -87,10 +87,10 @@ s32 ixgbe_check_for_msg(struct ixgbe_hw *hw, u16 mbx_id) { struct ixgbe_mbx_info *mbx = &hw->mbx; - if (!mbx->ops.check_for_msg) + if (!mbx->ops) return IXGBE_ERR_MBX; - return mbx->ops.check_for_msg(hw, mbx_id); + return mbx->ops->check_for_msg(hw, mbx_id); } /** @@ -104,10 +104,10 @@ s32 ixgbe_check_for_ack(struct ixgbe_hw *hw, u16 mbx_id) { struct ixgbe_mbx_info *mbx = &hw->mbx; - if (!mbx->ops.check_for_ack) + if (!mbx->ops) return IXGBE_ERR_MBX; - return mbx->ops.check_for_ack(hw, mbx_id); + return mbx->ops->check_for_ack(hw, mbx_id); } /** @@ -121,10 +121,10 @@ s32 ixgbe_check_for_rst(struct ixgbe_hw *hw, u16 mbx_id) { struct ixgbe_mbx_info *mbx = &hw->mbx; - if (!mbx->ops.check_for_rst) + if (!mbx->ops) return IXGBE_ERR_MBX; - return mbx->ops.check_for_rst(hw, mbx_id); + return mbx->ops->check_for_rst(hw, mbx_id); } /** @@ -139,10 +139,10 @@ static s32 ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id) struct ixgbe_mbx_info *mbx = &hw->mbx; int countdown = mbx->timeout; - if (!countdown || !mbx->ops.check_for_msg) + if (!countdown || !mbx->ops) return IXGBE_ERR_MBX; - while (mbx->ops.check_for_msg(hw, mbx_id)) { + while (mbx->ops->check_for_msg(hw, mbx_id)) { countdown--; if (!countdown) return IXGBE_ERR_MBX; @@ -164,10 +164,10 @@ static s32 ixgbe_poll_for_ack(struct ixgbe_hw *hw, u16 mbx_id) struct ixgbe_mbx_info *mbx = &hw->mbx; int countdown = mbx->timeout; - if (!countdown || !mbx->ops.check_for_ack) + if (!countdown || !mbx->ops) return IXGBE_ERR_MBX; - while (mbx->ops.check_for_ack(hw, mbx_id)) { + while (mbx->ops->check_for_ack(hw, mbx_id)) { countdown--; if (!countdown) return IXGBE_ERR_MBX; @@ -193,7 +193,7 @@ static s32 ixgbe_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, struct ixgbe_mbx_info *mbx = &hw->mbx; s32 ret_val; - if (!mbx->ops.read) + if (!mbx->ops) return IXGBE_ERR_MBX; ret_val = ixgbe_poll_for_msg(hw, mbx_id); @@ -201,7 +201,7 @@ static s32 ixgbe_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, return ret_val; /* if ack received read message */ - return mbx->ops.read(hw, msg, size, mbx_id); + return mbx->ops->read(hw, msg, size, mbx_id); } /** @@ -221,11 +221,11 @@ static s32 ixgbe_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, s32 ret_val; /* exit if either we can't write or there isn't a defined timeout */ - if (!mbx->ops.write || !mbx->timeout) + if (!mbx->ops || !mbx->timeout) return IXGBE_ERR_MBX; /* send msg */ - ret_val = mbx->ops.write(hw, msg, size, mbx_id); + ret_val = mbx->ops->write(hw, msg, size, mbx_id); if (ret_val) return ret_val; @@ -446,7 +446,7 @@ void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw) } #endif /* CONFIG_PCI_IOV */ -struct ixgbe_mbx_operations mbx_ops_generic = { +const struct ixgbe_mbx_operations mbx_ops_generic = { .read = ixgbe_read_mbx_pf, .write = ixgbe_write_mbx_pf, .read_posted = ixgbe_read_posted_mbx, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h index 8daa95f74548..01c2667c0f92 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2013 Intel Corporation. + Copyright(c) 1999 - 2016 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -123,6 +123,6 @@ s32 ixgbe_check_for_rst(struct ixgbe_hw *, u16); void ixgbe_init_mbx_params_pf(struct ixgbe_hw *); #endif /* CONFIG_PCI_IOV */ -extern struct ixgbe_mbx_operations mbx_ops_generic; +extern const struct ixgbe_mbx_operations mbx_ops_generic; #endif /* _IXGBE_MBX_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index bf7367a08716..29e0b0e1cb67 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2015 Intel Corporation. + Copyright(c) 1999 - 2016 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -3442,7 +3442,7 @@ struct ixgbe_mbx_stats { }; struct ixgbe_mbx_info { - struct ixgbe_mbx_operations ops; + const struct ixgbe_mbx_operations *ops; struct ixgbe_mbx_stats stats; u32 timeout; u32 usec_delay; @@ -3475,10 +3475,10 @@ struct ixgbe_hw { struct ixgbe_info { enum ixgbe_mac_type mac; s32 (*get_invariants)(struct ixgbe_hw *); - struct ixgbe_mac_operations *mac_ops; - struct ixgbe_eeprom_operations *eeprom_ops; - struct ixgbe_phy_operations *phy_ops; - struct ixgbe_mbx_operations *mbx_ops; + const struct ixgbe_mac_operations *mac_ops; + const struct ixgbe_eeprom_operations *eeprom_ops; + const struct ixgbe_phy_operations *phy_ops; + const struct ixgbe_mbx_operations *mbx_ops; const u32 *mvals; }; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c index 2358c1b7d586..0d69564f3a1b 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2014 Intel Corporation. + Copyright(c) 1999 - 2016 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -810,7 +810,7 @@ s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index) return 0; } -static struct ixgbe_mac_operations mac_ops_X540 = { +static const struct ixgbe_mac_operations mac_ops_X540 = { .init_hw = &ixgbe_init_hw_generic, .reset_hw = &ixgbe_reset_hw_X540, .start_hw = &ixgbe_start_hw_X540, @@ -863,7 +863,7 @@ static struct ixgbe_mac_operations mac_ops_X540 = { .disable_rx = &ixgbe_disable_rx_generic, }; -static struct ixgbe_eeprom_operations eeprom_ops_X540 = { +static const struct ixgbe_eeprom_operations eeprom_ops_X540 = { .init_params = &ixgbe_init_eeprom_params_X540, .read = &ixgbe_read_eerd_X540, .read_buffer = &ixgbe_read_eerd_buffer_X540, @@ -874,7 +874,7 @@ static struct ixgbe_eeprom_operations eeprom_ops_X540 = { .update_checksum = &ixgbe_update_eeprom_checksum_X540, }; -static struct ixgbe_phy_operations phy_ops_X540 = { +static const struct ixgbe_phy_operations phy_ops_X540 = { .identify = &ixgbe_identify_phy_generic, .identify_sfp = &ixgbe_identify_sfp_module_generic, .init = NULL, @@ -897,7 +897,7 @@ static const u32 ixgbe_mvals_X540[IXGBE_MVALS_IDX_LIMIT] = { IXGBE_MVALS_INIT(X540) }; -struct ixgbe_info ixgbe_X540_info = { +const struct ixgbe_info ixgbe_X540_info = { .mac = ixgbe_mac_X540, .get_invariants = &ixgbe_get_invariants_X540, .mac_ops = &mac_ops_X540, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c index 68a9c646498e..26e0b8df2afe 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel 10 Gigabit PCI Express Linux driver - * Copyright(c) 1999 - 2015 Intel Corporation. + * Copyright(c) 1999 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -2342,7 +2342,7 @@ static void ixgbe_release_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask) .enable_rx = &ixgbe_enable_rx_generic, \ .disable_rx = &ixgbe_disable_rx_x550, \ -static struct ixgbe_mac_operations mac_ops_X550 = { +static const struct ixgbe_mac_operations mac_ops_X550 = { X550_COMMON_MAC .reset_hw = &ixgbe_reset_hw_X540, .get_media_type = &ixgbe_get_media_type_X540, @@ -2356,7 +2356,7 @@ static struct ixgbe_mac_operations mac_ops_X550 = { .release_swfw_sync = &ixgbe_release_swfw_sync_X540, }; -static struct ixgbe_mac_operations mac_ops_X550EM_x = { +static const struct ixgbe_mac_operations mac_ops_X550EM_x = { X550_COMMON_MAC .reset_hw = &ixgbe_reset_hw_X550em, .get_media_type = &ixgbe_get_media_type_X550em, @@ -2379,12 +2379,12 @@ static struct ixgbe_mac_operations mac_ops_X550EM_x = { .update_checksum = &ixgbe_update_eeprom_checksum_X550, \ .calc_checksum = &ixgbe_calc_eeprom_checksum_X550, \ -static struct ixgbe_eeprom_operations eeprom_ops_X550 = { +static const struct ixgbe_eeprom_operations eeprom_ops_X550 = { X550_COMMON_EEP .init_params = &ixgbe_init_eeprom_params_X550, }; -static struct ixgbe_eeprom_operations eeprom_ops_X550EM_x = { +static const struct ixgbe_eeprom_operations eeprom_ops_X550EM_x = { X550_COMMON_EEP .init_params = &ixgbe_init_eeprom_params_X540, }; @@ -2405,13 +2405,13 @@ static struct ixgbe_eeprom_operations eeprom_ops_X550EM_x = { .check_overtemp = &ixgbe_tn_check_overtemp, \ .get_firmware_version = &ixgbe_get_phy_firmware_version_generic, -static struct ixgbe_phy_operations phy_ops_X550 = { +static const struct ixgbe_phy_operations phy_ops_X550 = { X550_COMMON_PHY .init = NULL, .identify = &ixgbe_identify_phy_generic, }; -static struct ixgbe_phy_operations phy_ops_X550EM_x = { +static const struct ixgbe_phy_operations phy_ops_X550EM_x = { X550_COMMON_PHY .init = &ixgbe_init_phy_ops_X550em, .identify = &ixgbe_identify_phy_x550em, @@ -2430,7 +2430,7 @@ static const u32 ixgbe_mvals_X550EM_x[IXGBE_MVALS_IDX_LIMIT] = { IXGBE_MVALS_INIT(X550EM_x) }; -struct ixgbe_info ixgbe_X550_info = { +const struct ixgbe_info ixgbe_X550_info = { .mac = ixgbe_mac_X550, .get_invariants = &ixgbe_get_invariants_X540, .mac_ops = &mac_ops_X550, @@ -2440,7 +2440,7 @@ struct ixgbe_info ixgbe_X550_info = { .mvals = ixgbe_mvals_X550, }; -struct ixgbe_info ixgbe_X550EM_x_info = { +const struct ixgbe_info ixgbe_X550EM_x_info = { .mac = ixgbe_mac_X550EM_x, .get_invariants = &ixgbe_get_invariants_X550_x, .mac_ops = &mac_ops_X550EM_x, From c7374b5a767cb6c7d9acbfc82656dc89afeae257 Mon Sep 17 00:00:00 2001 From: Sowmini Varadhan Date: Tue, 12 Jan 2016 19:32:30 -0800 Subject: [PATCH 0181/1649] ixgbe: use eth_platform_get_mac_address() This commit converts commit c762dff24c06 ("ixgbe: Look up MAC address in Open Firmware or IDPROM") to use eth_platform_get_mac_address() added by commit c7f5d105495a ("net: Add eth_platform_get_mac_address() helper.") Signed-off-by: Sowmini Varadhan Tested-by: Phil Schmitt Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 35 ++----------------- 1 file changed, 2 insertions(+), 33 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 5f4ecf50eedd..bce5737b1a96 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -54,15 +54,6 @@ #include #include -#ifdef CONFIG_OF -#include -#endif - -#ifdef CONFIG_SPARC -#include -#include -#endif - #include "ixgbe.h" #include "ixgbe_common.h" #include "ixgbe_dcb_82599.h" @@ -9010,29 +9001,6 @@ int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id, return is_wol_supported; } -/** - * ixgbe_get_platform_mac_addr - Look up MAC address in Open Firmware / IDPROM - * @adapter: Pointer to adapter struct - */ -static void ixgbe_get_platform_mac_addr(struct ixgbe_adapter *adapter) -{ -#ifdef CONFIG_OF - struct device_node *dp = pci_device_to_OF_node(adapter->pdev); - struct ixgbe_hw *hw = &adapter->hw; - const unsigned char *addr; - - addr = of_get_mac_address(dp); - if (addr) { - ether_addr_copy(hw->mac.perm_addr, addr); - return; - } -#endif /* CONFIG_OF */ - -#ifdef CONFIG_SPARC - ether_addr_copy(hw->mac.perm_addr, idprom->id_ethaddr); -#endif /* CONFIG_SPARC */ -} - /** * ixgbe_probe - Device Initialization Routine * @pdev: PCI device information struct @@ -9304,7 +9272,8 @@ skip_sriov: goto err_sw_init; } - ixgbe_get_platform_mac_addr(adapter); + eth_platform_get_mac_address(&adapter->pdev->dev, + adapter->hw.mac.perm_addr); memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len); From 49763de0425560eed50a186428010189eae69372 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Wed, 13 Jan 2016 07:31:11 -0800 Subject: [PATCH 0182/1649] ixgbe: Add support for generic Tx checksums This patch adds support for generic Tx checksums to the ixgbe driver. It turns out this is actually pretty easy after going over the datasheet as we were doing a number of steps we didn't need to. In order to perform a Tx checksum for an L4 header we need to fill in the following fields in the Tx descriptor: MACLEN (maximum of 127), retrieved from: skb_network_offset() IPLEN (maximum of 511), retrieved from: skb_checksum_start_offset() - skb_network_offset() TUCMD.L4T indicates offset and if checksum or crc32c, based on: skb->csum_offset The added advantage to doing this is that we can support inner checksum offloads for tunnels and MPLS while still being able to transparently insert VLAN tags. I also took the opportunity to clean-up many of the feature flag configuration bits to make them a bit more consistent between drivers. Signed-off-by: Alexander Duyck Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 170 +++++++----------- 1 file changed, 62 insertions(+), 108 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index bce5737b1a96..0f007d9a2a13 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -7202,103 +7202,61 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring, return 1; } +static inline bool ixgbe_ipv6_csum_is_sctp(struct sk_buff *skb) +{ + unsigned int offset = 0; + + ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL); + + return offset == skb_checksum_start_offset(skb); +} + static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring, struct ixgbe_tx_buffer *first) { struct sk_buff *skb = first->skb; u32 vlan_macip_lens = 0; - u32 mss_l4len_idx = 0; u32 type_tucmd = 0; if (skb->ip_summed != CHECKSUM_PARTIAL) { - if (!(first->tx_flags & IXGBE_TX_FLAGS_HW_VLAN) && - !(first->tx_flags & IXGBE_TX_FLAGS_CC)) +csum_failed: + if (!(first->tx_flags & (IXGBE_TX_FLAGS_HW_VLAN | + IXGBE_TX_FLAGS_CC))) return; - vlan_macip_lens = skb_network_offset(skb) << - IXGBE_ADVTXD_MACLEN_SHIFT; - } else { - u8 l4_hdr = 0; - union { - struct iphdr *ipv4; - struct ipv6hdr *ipv6; - u8 *raw; - } network_hdr; - union { - struct tcphdr *tcphdr; - u8 *raw; - } transport_hdr; - __be16 frag_off; - - if (skb->encapsulation) { - network_hdr.raw = skb_inner_network_header(skb); - transport_hdr.raw = skb_inner_transport_header(skb); - vlan_macip_lens = skb_inner_network_offset(skb) << - IXGBE_ADVTXD_MACLEN_SHIFT; - } else { - network_hdr.raw = skb_network_header(skb); - transport_hdr.raw = skb_transport_header(skb); - vlan_macip_lens = skb_network_offset(skb) << - IXGBE_ADVTXD_MACLEN_SHIFT; - } - - /* use first 4 bits to determine IP version */ - switch (network_hdr.ipv4->version) { - case IPVERSION: - vlan_macip_lens |= transport_hdr.raw - network_hdr.raw; - type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; - l4_hdr = network_hdr.ipv4->protocol; - break; - case 6: - vlan_macip_lens |= transport_hdr.raw - network_hdr.raw; - l4_hdr = network_hdr.ipv6->nexthdr; - if (likely((transport_hdr.raw - network_hdr.raw) == - sizeof(struct ipv6hdr))) - break; - ipv6_skip_exthdr(skb, network_hdr.raw - skb->data + - sizeof(struct ipv6hdr), - &l4_hdr, &frag_off); - if (unlikely(frag_off)) - l4_hdr = NEXTHDR_FRAGMENT; - break; - default: - break; - } - - switch (l4_hdr) { - case IPPROTO_TCP: - type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP; - mss_l4len_idx = (transport_hdr.tcphdr->doff * 4) << - IXGBE_ADVTXD_L4LEN_SHIFT; - break; - case IPPROTO_SCTP: - type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP; - mss_l4len_idx = sizeof(struct sctphdr) << - IXGBE_ADVTXD_L4LEN_SHIFT; - break; - case IPPROTO_UDP: - mss_l4len_idx = sizeof(struct udphdr) << - IXGBE_ADVTXD_L4LEN_SHIFT; - break; - default: - if (unlikely(net_ratelimit())) { - dev_warn(tx_ring->dev, - "partial checksum, version=%d, l4 proto=%x\n", - network_hdr.ipv4->version, l4_hdr); - } - skb_checksum_help(skb); - goto no_csum; - } - - /* update TX checksum flag */ - first->tx_flags |= IXGBE_TX_FLAGS_CSUM; + goto no_csum; } + switch (skb->csum_offset) { + case offsetof(struct tcphdr, check): + type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP; + /* fall through */ + case offsetof(struct udphdr, check): + break; + case offsetof(struct sctphdr, checksum): + /* validate that this is actually an SCTP request */ + if (((first->protocol == htons(ETH_P_IP)) && + (ip_hdr(skb)->protocol == IPPROTO_SCTP)) || + ((first->protocol == htons(ETH_P_IPV6)) && + ixgbe_ipv6_csum_is_sctp(skb))) { + type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_SCTP; + break; + } + /* fall through */ + default: + skb_checksum_help(skb); + goto csum_failed; + } + + /* update TX checksum flag */ + first->tx_flags |= IXGBE_TX_FLAGS_CSUM; + vlan_macip_lens = skb_checksum_start_offset(skb) - + skb_network_offset(skb); no_csum: /* vlan_macip_lens: MACLEN, VLAN tag */ + vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; - ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, - type_tucmd, mss_l4len_idx); + ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, type_tucmd, 0); } #define IXGBE_SET_FLAG(_input, _flag, _result) \ @@ -9190,41 +9148,37 @@ skip_sriov: #endif netdev->features = NETIF_F_SG | - NETIF_F_IP_CSUM | - NETIF_F_IPV6_CSUM | - NETIF_F_HW_VLAN_CTAG_TX | - NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_RXHASH | - NETIF_F_RXCSUM; + NETIF_F_RXCSUM | + NETIF_F_HW_CSUM | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX; - netdev->hw_features = netdev->features | NETIF_F_HW_L2FW_DOFFLOAD; - - switch (adapter->hw.mac.type) { - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - case ixgbe_mac_X550: - case ixgbe_mac_X550EM_x: + if (hw->mac.type >= ixgbe_mac_82599EB) netdev->features |= NETIF_F_SCTP_CRC; - netdev->hw_features |= NETIF_F_SCTP_CRC | - NETIF_F_NTUPLE | - NETIF_F_HW_TC; - break; - default: - break; - } - netdev->hw_features |= NETIF_F_RXALL; + /* copy netdev features into list of user selectable features */ + netdev->hw_features |= netdev->features; + netdev->hw_features |= NETIF_F_RXALL | + NETIF_F_HW_L2FW_DOFFLOAD; + + if (hw->mac.type >= ixgbe_mac_82599EB) + netdev->hw_features |= NETIF_F_NTUPLE | + NETIF_F_HW_TC; + + /* set this bit last since it cannot be part of hw_features */ netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; - netdev->vlan_features |= NETIF_F_TSO; - netdev->vlan_features |= NETIF_F_TSO6; - netdev->vlan_features |= NETIF_F_IP_CSUM; - netdev->vlan_features |= NETIF_F_IPV6_CSUM; - netdev->vlan_features |= NETIF_F_SG; + netdev->vlan_features |= NETIF_F_SG | + NETIF_F_TSO | + NETIF_F_TSO6 | + NETIF_F_HW_CSUM | + NETIF_F_SCTP_CRC; - netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; + netdev->mpls_features |= NETIF_F_HW_CSUM; + netdev->hw_enc_features |= NETIF_F_HW_CSUM; netdev->priv_flags |= IFF_UNICAST_FLT; netdev->priv_flags |= IFF_SUPP_NOFCS; From cb2b3edbece804d9836647c1ca51282ad384d425 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Wed, 13 Jan 2016 07:31:17 -0800 Subject: [PATCH 0183/1649] ixgbevf: Add support for generic Tx checksums This patch adds support for generic Tx checksums to the ixgbevf driver. It turns out this is actually pretty easy after going over the datasheet as we were doing a number of steps we didn't need to. In order to perform a Tx checksum for an L4 header we need to fill in the following fields in the Tx descriptor: MACLEN (maximum of 127), retrieved from: skb_network_offset() IPLEN (maximum of 511), retrieved from: skb_checksum_start_offset() - skb_network_offset() TUCMD.L4T indicates offset and if checksum or crc32c, based on: skb->csum_offset The added advantage to doing this is that we can support inner checksum offloads for tunnels and MPLS while still being able to transparently insert VLAN tags. I also took the opportunity to clean-up many of the feature flag configuration bits to make them a bit more consistent between drivers. In the case of the VF drivers this meant adding support for SCTP CRCs, and inner checksum offloads for MPLS and various tunnel types. Signed-off-by: Alexander Duyck Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- .../net/ethernet/intel/ixgbevf/ixgbevf_main.c | 104 ++++++++---------- 1 file changed, 43 insertions(+), 61 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 9a2eed0f5245..50b6bfffaf32 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -3334,76 +3334,55 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring, return 1; } +static inline bool ixgbevf_ipv6_csum_is_sctp(struct sk_buff *skb) +{ + unsigned int offset = 0; + + ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL); + + return offset == skb_checksum_start_offset(skb); +} + static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring, struct ixgbevf_tx_buffer *first) { struct sk_buff *skb = first->skb; u32 vlan_macip_lens = 0; - u32 mss_l4len_idx = 0; u32 type_tucmd = 0; - if (skb->ip_summed == CHECKSUM_PARTIAL) { - u8 l4_hdr = 0; - __be16 frag_off; + if (skb->ip_summed != CHECKSUM_PARTIAL) + goto no_csum; - switch (first->protocol) { - case htons(ETH_P_IP): - vlan_macip_lens |= skb_network_header_len(skb); - type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; - l4_hdr = ip_hdr(skb)->protocol; - break; - case htons(ETH_P_IPV6): - vlan_macip_lens |= skb_network_header_len(skb); - l4_hdr = ipv6_hdr(skb)->nexthdr; - if (likely(skb_network_header_len(skb) == - sizeof(struct ipv6hdr))) - break; - ipv6_skip_exthdr(skb, skb_network_offset(skb) + - sizeof(struct ipv6hdr), - &l4_hdr, &frag_off); - if (unlikely(frag_off)) - l4_hdr = NEXTHDR_FRAGMENT; - break; - default: + switch (skb->csum_offset) { + case offsetof(struct tcphdr, check): + type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP; + /* fall through */ + case offsetof(struct udphdr, check): + break; + case offsetof(struct sctphdr, checksum): + /* validate that this is actually an SCTP request */ + if (((first->protocol == htons(ETH_P_IP)) && + (ip_hdr(skb)->protocol == IPPROTO_SCTP)) || + ((first->protocol == htons(ETH_P_IPV6)) && + ixgbevf_ipv6_csum_is_sctp(skb))) { + type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_SCTP; break; } - - switch (l4_hdr) { - case IPPROTO_TCP: - type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP; - mss_l4len_idx = tcp_hdrlen(skb) << - IXGBE_ADVTXD_L4LEN_SHIFT; - break; - case IPPROTO_SCTP: - type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP; - mss_l4len_idx = sizeof(struct sctphdr) << - IXGBE_ADVTXD_L4LEN_SHIFT; - break; - case IPPROTO_UDP: - mss_l4len_idx = sizeof(struct udphdr) << - IXGBE_ADVTXD_L4LEN_SHIFT; - break; - default: - if (unlikely(net_ratelimit())) { - dev_warn(tx_ring->dev, - "partial checksum, l3 proto=%x, l4 proto=%x\n", - first->protocol, l4_hdr); - } - skb_checksum_help(skb); - goto no_csum; - } - - /* update TX checksum flag */ - first->tx_flags |= IXGBE_TX_FLAGS_CSUM; + /* fall through */ + default: + skb_checksum_help(skb); + goto no_csum; } - + /* update TX checksum flag */ + first->tx_flags |= IXGBE_TX_FLAGS_CSUM; + vlan_macip_lens = skb_checksum_start_offset(skb) - + skb_network_offset(skb); no_csum: /* vlan_macip_lens: MACLEN, VLAN tag */ vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; - ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, - type_tucmd, mss_l4len_idx); + ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, 0); } static __le32 ixgbevf_tx_cmd_type(u32 tx_flags) @@ -4010,22 +3989,25 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } netdev->hw_features = NETIF_F_SG | - NETIF_F_IP_CSUM | - NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO6 | - NETIF_F_RXCSUM; + NETIF_F_RXCSUM | + NETIF_F_HW_CSUM | + NETIF_F_SCTP_CRC; netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER; - netdev->vlan_features |= NETIF_F_TSO | + netdev->vlan_features |= NETIF_F_SG | + NETIF_F_TSO | NETIF_F_TSO6 | - NETIF_F_IP_CSUM | - NETIF_F_IPV6_CSUM | - NETIF_F_SG; + NETIF_F_HW_CSUM | + NETIF_F_SCTP_CRC; + + netdev->mpls_features |= NETIF_F_HW_CSUM; + netdev->hw_enc_features |= NETIF_F_HW_CSUM; if (pci_using_dac) netdev->features |= NETIF_F_HIGHDMA; From afdc71e4d6dc46d0f5bea7461ce356e6056f5ba8 Mon Sep 17 00:00:00 2001 From: Mark Rustad Date: Mon, 25 Jan 2016 16:32:10 -0800 Subject: [PATCH 0184/1649] ixgbe: Fix flow control for Xeon D KR backplane Xeon D KR backplane is different from other backplanes, in that we can't use auto-negotiation to determine the mode. Instead, use whatever the user configured. Signed-off-by: Mark Rustad Tested-by: Phil Schmitt Signed-off-by: Jeff Kirsher --- .../net/ethernet/intel/ixgbe/ixgbe_82598.c | 1 + .../net/ethernet/intel/ixgbe/ixgbe_82599.c | 1 + .../net/ethernet/intel/ixgbe/ixgbe_common.c | 8 +- .../net/ethernet/intel/ixgbe/ixgbe_common.h | 3 +- drivers/net/ethernet/intel/ixgbe/ixgbe_type.h | 5 ++ drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c | 1 + drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c | 86 ++++++++++++++++++- 7 files changed, 98 insertions(+), 7 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c index 9790d0274f7f..f47eb12a9c50 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c @@ -1192,6 +1192,7 @@ static const struct ixgbe_mac_operations mac_ops_82598 = { .clear_vfta = &ixgbe_clear_vfta_82598, .set_vfta = &ixgbe_set_vfta_82598, .fc_enable = &ixgbe_fc_enable_82598, + .setup_fc = ixgbe_setup_fc_generic, .set_fw_drv_ver = NULL, .acquire_swfw_sync = &ixgbe_acquire_swfw_sync, .release_swfw_sync = &ixgbe_release_swfw_sync, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c index b276fe0bc665..c3ae5a701d43 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c @@ -2220,6 +2220,7 @@ static const struct ixgbe_mac_operations mac_ops_82599 = { .clear_vfta = &ixgbe_clear_vfta_generic, .set_vfta = &ixgbe_set_vfta_generic, .fc_enable = &ixgbe_fc_enable_generic, + .setup_fc = ixgbe_setup_fc_generic, .set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic, .init_uta_tables = &ixgbe_init_uta_tables_generic, .setup_sfp = &ixgbe_setup_sfp_modules_82599, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c index 64045053e874..8c7e78b21c4e 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2015 Intel Corporation. + Copyright(c) 1999 - 2016 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -111,12 +111,12 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw) } /** - * ixgbe_setup_fc - Set up flow control + * ixgbe_setup_fc_generic - Set up flow control * @hw: pointer to hardware structure * * Called at init time to set up flow control. **/ -static s32 ixgbe_setup_fc(struct ixgbe_hw *hw) +s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw) { s32 ret_val = 0; u32 reg = 0, reg_bp = 0; @@ -296,7 +296,7 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw) IXGBE_WRITE_FLUSH(hw); /* Setup flow control */ - ret_val = ixgbe_setup_fc(hw); + ret_val = hw->mac.ops.setup_fc(hw); if (ret_val) return ret_val; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h index 2b9563137fd8..2e290150ab54 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2014 Intel Corporation. + Copyright(c) 1999 - 2016 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -81,6 +81,7 @@ s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw); s32 ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw); s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval); s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw); +s32 ixgbe_setup_fc_generic(struct ixgbe_hw *); bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw); void ixgbe_fc_autoneg(struct ixgbe_hw *hw); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index 29e0b0e1cb67..787d2b21465e 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -3308,6 +3308,7 @@ struct ixgbe_mac_operations { /* Flow Control */ s32 (*fc_enable)(struct ixgbe_hw *); + s32 (*setup_fc)(struct ixgbe_hw *); /* Manageability interface */ s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8); @@ -3525,6 +3526,7 @@ struct ixgbe_info { #define IXGBE_KRM_PORT_CAR_GEN_CTRL(P) ((P) ? 0x8010 : 0x4010) #define IXGBE_KRM_LINK_CTRL_1(P) ((P) ? 0x820C : 0x420C) +#define IXGBE_KRM_AN_CNTL_1(P) ((P) ? 0x822C : 0x422C) #define IXGBE_KRM_DSP_TXFFE_STATE_4(P) ((P) ? 0x8634 : 0x4634) #define IXGBE_KRM_DSP_TXFFE_STATE_5(P) ((P) ? 0x8638 : 0x4638) #define IXGBE_KRM_RX_TRN_LINKUP_CTRL(P) ((P) ? 0x8B00 : 0x4B00) @@ -3547,6 +3549,9 @@ struct ixgbe_info { #define IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE (1 << 29) #define IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART (1 << 31) +#define IXGBE_KRM_AN_CNTL_1_SYM_PAUSE (1 << 28) +#define IXGBE_KRM_AN_CNTL_1_ASM_PAUSE (1 << 29) + #define IXGBE_KRM_DSP_TXFFE_STATE_C0_EN (1 << 6) #define IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN (1 << 15) #define IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN (1 << 16) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c index 0d69564f3a1b..c00b67b4c1dc 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c @@ -846,6 +846,7 @@ static const struct ixgbe_mac_operations mac_ops_X540 = { .clear_vfta = &ixgbe_clear_vfta_generic, .set_vfta = &ixgbe_set_vfta_generic, .fc_enable = &ixgbe_fc_enable_generic, + .setup_fc = ixgbe_setup_fc_generic, .set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic, .init_uta_tables = &ixgbe_init_uta_tables_generic, .setup_sfp = NULL, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c index 26e0b8df2afe..972c9aa17503 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c @@ -27,6 +27,7 @@ #include "ixgbe_phy.h" static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *, ixgbe_link_speed); +static s32 ixgbe_setup_fc_x550em(struct ixgbe_hw *); static s32 ixgbe_get_invariants_X550_x(struct ixgbe_hw *hw) { @@ -1342,15 +1343,18 @@ static void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw) mac->ops.enable_tx_laser = NULL; mac->ops.flap_tx_laser = NULL; mac->ops.setup_link = ixgbe_setup_mac_link_multispeed_fiber; + mac->ops.setup_fc = ixgbe_setup_fc_x550em; mac->ops.setup_mac_link = ixgbe_setup_mac_link_sfp_x550em; mac->ops.set_rate_select_speed = ixgbe_set_soft_rate_select_speed; break; case ixgbe_media_type_copper: mac->ops.setup_link = ixgbe_setup_mac_link_t_X550em; + mac->ops.setup_fc = ixgbe_setup_fc_generic; mac->ops.check_link = ixgbe_check_link_t_X550em; break; default: + mac->ops.setup_fc = ixgbe_setup_fc_x550em; break; } } @@ -1842,6 +1846,82 @@ static s32 ixgbe_get_lcd_t_x550em(struct ixgbe_hw *hw, return status; } +/** + * ixgbe_setup_fc_x550em - Set up flow control + * @hw: pointer to hardware structure + */ +static s32 ixgbe_setup_fc_x550em(struct ixgbe_hw *hw) +{ + bool pause, asm_dir; + u32 reg_val; + s32 rc; + + /* Validate the requested mode */ + if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) { + hw_err(hw, "ixgbe_fc_rx_pause not valid in strict IEEE mode\n"); + return IXGBE_ERR_INVALID_LINK_SETTINGS; + } + + /* 10gig parts do not have a word in the EEPROM to determine the + * default flow control setting, so we explicitly set it to full. + */ + if (hw->fc.requested_mode == ixgbe_fc_default) + hw->fc.requested_mode = ixgbe_fc_full; + + /* Determine PAUSE and ASM_DIR bits. */ + switch (hw->fc.requested_mode) { + case ixgbe_fc_none: + pause = false; + asm_dir = false; + break; + case ixgbe_fc_tx_pause: + pause = false; + asm_dir = true; + break; + case ixgbe_fc_rx_pause: + /* Rx Flow control is enabled and Tx Flow control is + * disabled by software override. Since there really + * isn't a way to advertise that we are capable of RX + * Pause ONLY, we will advertise that we support both + * symmetric and asymmetric Rx PAUSE, as such we fall + * through to the fc_full statement. Later, we will + * disable the adapter's ability to send PAUSE frames. + */ + /* Fallthrough */ + case ixgbe_fc_full: + pause = true; + asm_dir = true; + break; + default: + hw_err(hw, "Flow control param set incorrectly\n"); + return IXGBE_ERR_CONFIG; + } + + if (hw->device_id != IXGBE_DEV_ID_X550EM_X_KR) + return 0; + + rc = ixgbe_read_iosf_sb_reg_x550(hw, + IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + if (rc) + return rc; + + reg_val &= ~(IXGBE_KRM_AN_CNTL_1_SYM_PAUSE | + IXGBE_KRM_AN_CNTL_1_ASM_PAUSE); + if (pause) + reg_val |= IXGBE_KRM_AN_CNTL_1_SYM_PAUSE; + if (asm_dir) + reg_val |= IXGBE_KRM_AN_CNTL_1_ASM_PAUSE; + rc = ixgbe_write_iosf_sb_reg_x550(hw, + IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + + /* This device does not fully support AN. */ + hw->fc.disable_fc_autoneg = true; + + return rc; +} + /** ixgbe_enter_lplu_x550em - Transition to low power states * @hw: pointer to hardware structure * @@ -2337,8 +2417,6 @@ static void ixgbe_release_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask) .enable_rx_buff = &ixgbe_enable_rx_buff_generic, \ .get_thermal_sensor_data = NULL, \ .init_thermal_sensor_thresh = NULL, \ - .prot_autoc_read = &prot_autoc_read_generic, \ - .prot_autoc_write = &prot_autoc_write_generic, \ .enable_rx = &ixgbe_enable_rx_generic, \ .disable_rx = &ixgbe_disable_rx_x550, \ @@ -2354,6 +2432,9 @@ static const struct ixgbe_mac_operations mac_ops_X550 = { .setup_sfp = NULL, .acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X540, .release_swfw_sync = &ixgbe_release_swfw_sync_X540, + .prot_autoc_read = prot_autoc_read_generic, + .prot_autoc_write = prot_autoc_write_generic, + .setup_fc = ixgbe_setup_fc_generic, }; static const struct ixgbe_mac_operations mac_ops_X550EM_x = { @@ -2368,6 +2449,7 @@ static const struct ixgbe_mac_operations mac_ops_X550EM_x = { .setup_sfp = ixgbe_setup_sfp_modules_X550em, .acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X550em, .release_swfw_sync = &ixgbe_release_swfw_sync_X550em, + .setup_fc = NULL, /* defined later */ }; #define X550_COMMON_EEP \ From c04f90e592431489df114971ff025265d429e48f Mon Sep 17 00:00:00 2001 From: Rostislav Pehlivanov Date: Wed, 27 Jan 2016 18:33:30 +0000 Subject: [PATCH 0185/1649] ixgbe: add a callback to set the maximum transmit bitrate This commit adds a callback which allows to adjust the maximum transmit bitrate the card can output. This makes it possible to get a smooth traffic instead of the default burst-y behaviour when trying to output e.g. a video stream. Much of the logic needed to get a correct bcnrc_val was taken from the ixgbe_set_vf_rate_limit() function. Signed-off-by: Rostislav Pehlivanov Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 31 +++++++++++++++++++ .../net/ethernet/intel/ixgbe/ixgbe_sriov.c | 2 +- .../net/ethernet/intel/ixgbe/ixgbe_sriov.h | 1 + 3 files changed, 33 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 0f007d9a2a13..115656c690bf 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -1077,6 +1077,36 @@ static void ixgbe_tx_timeout_reset(struct ixgbe_adapter *adapter) } } +/** + * ixgbe_tx_maxrate - callback to set the maximum per-queue bitrate + **/ +static int ixgbe_tx_maxrate(struct net_device *netdev, + int queue_index, u32 maxrate) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + u32 bcnrc_val = ixgbe_link_mbps(adapter); + + if (!maxrate) + return 0; + + /* Calculate the rate factor values to set */ + bcnrc_val <<= IXGBE_RTTBCNRC_RF_INT_SHIFT; + bcnrc_val /= maxrate; + + /* clear everything but the rate factor */ + bcnrc_val &= IXGBE_RTTBCNRC_RF_INT_MASK | + IXGBE_RTTBCNRC_RF_DEC_MASK; + + /* enable the rate scheduler */ + bcnrc_val |= IXGBE_RTTBCNRC_RS_ENA; + + IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, queue_index); + IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val); + + return 0; +} + /** * ixgbe_clean_tx_irq - Reclaim resources after transmit completes * @q_vector: structure containing interrupt and ring information @@ -8807,6 +8837,7 @@ static const struct net_device_ops ixgbe_netdev_ops = { .ndo_set_mac_address = ixgbe_set_mac, .ndo_change_mtu = ixgbe_change_mtu, .ndo_tx_timeout = ixgbe_tx_timeout, + .ndo_set_tx_maxrate = ixgbe_tx_maxrate, .ndo_vlan_rx_add_vid = ixgbe_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid, .ndo_do_ioctl = ixgbe_ioctl, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index 4bc249632ec2..adcf00002483 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -1398,7 +1398,7 @@ out: return err; } -static int ixgbe_link_mbps(struct ixgbe_adapter *adapter) +int ixgbe_link_mbps(struct ixgbe_adapter *adapter) { switch (adapter->link_speed) { case IXGBE_LINK_SPEED_100_FULL: diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h index dad925706f4c..47e65e2f886a 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h @@ -44,6 +44,7 @@ void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter); int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int queue, u8 *mac); int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int queue, u16 vlan, u8 qos); +int ixgbe_link_mbps(struct ixgbe_adapter *adapter); int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate, int max_tx_rate); int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting); From dbd15b8f9cc3f0f8d665d048a31c0f4b5c9150a5 Mon Sep 17 00:00:00 2001 From: Don Skidmore Date: Wed, 9 Mar 2016 16:45:00 -0500 Subject: [PATCH 0186/1649] ixgbe: Place SWFW semaphore in known valid state at probe It is possible on some HW that a system reset could occur when we are holding the SWFW semaphore lock. So next time the driver was loaded we would see it incorrectly as locked. This patch will recover from that state by: Attempting to acquire the semaphore and then regardless of whether or not it was acquire we immediately release it. This will force us into a known good state. Signed-off-by: Don Skidmore Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- .../net/ethernet/intel/ixgbe/ixgbe_82598.c | 1 + .../net/ethernet/intel/ixgbe/ixgbe_82599.c | 1 + drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 4 ++++ drivers/net/ethernet/intel/ixgbe/ixgbe_type.h | 1 + drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c | 20 +++++++++++++++++++ drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h | 1 + drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c | 2 ++ 7 files changed, 30 insertions(+) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c index f47eb12a9c50..6ecd598c6ef5 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c @@ -1196,6 +1196,7 @@ static const struct ixgbe_mac_operations mac_ops_82598 = { .set_fw_drv_ver = NULL, .acquire_swfw_sync = &ixgbe_acquire_swfw_sync, .release_swfw_sync = &ixgbe_release_swfw_sync, + .init_swfw_sync = NULL, .get_thermal_sensor_data = NULL, .init_thermal_sensor_thresh = NULL, .prot_autoc_read = &prot_autoc_read_generic, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c index c3ae5a701d43..4bb6b685263b 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c @@ -2228,6 +2228,7 @@ static const struct ixgbe_mac_operations mac_ops_82599 = { .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing, .acquire_swfw_sync = &ixgbe_acquire_swfw_sync, .release_swfw_sync = &ixgbe_release_swfw_sync, + .init_swfw_sync = NULL, .get_thermal_sensor_data = &ixgbe_get_thermal_sensor_data_generic, .init_thermal_sensor_thresh = &ixgbe_init_thermal_sensor_thresh_generic, .prot_autoc_read = &prot_autoc_read_82599, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 115656c690bf..77c1c85a957c 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -9126,6 +9126,10 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (err) goto err_sw_init; + /* Make sure the SWFW semaphore is in a valid state */ + if (hw->mac.ops.init_swfw_sync) + hw->mac.ops.init_swfw_sync(hw); + /* Make it possible the adapter to be woken up via WOL */ switch (adapter->hw.mac.type) { case ixgbe_mac_82599EB: diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index 787d2b21465e..bc012ab48475 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -3266,6 +3266,7 @@ struct ixgbe_mac_operations { s32 (*enable_rx_dma)(struct ixgbe_hw *, u32); s32 (*acquire_swfw_sync)(struct ixgbe_hw *, u32); void (*release_swfw_sync)(struct ixgbe_hw *, u32); + void (*init_swfw_sync)(struct ixgbe_hw *); s32 (*prot_autoc_read)(struct ixgbe_hw *, bool *, u32 *); s32 (*prot_autoc_write)(struct ixgbe_hw *, u32, bool); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c index c00b67b4c1dc..40824d85d807 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c @@ -746,6 +746,25 @@ static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw) IXGBE_WRITE_FLUSH(hw); } +/** + * ixgbe_init_swfw_sync_X540 - Release hardware semaphore + * @hw: pointer to hardware structure + * + * This function reset hardware semaphore bits for a semaphore that may + * have be left locked due to a catastrophic failure. + **/ +void ixgbe_init_swfw_sync_X540(struct ixgbe_hw *hw) +{ + /* First try to grab the semaphore but we don't need to bother + * looking to see whether we got the lock or not since we do + * the same thing regardless of whether we got the lock or not. + * We got the lock - we release it. + * We timeout trying to get the lock - we force its release. + */ + ixgbe_get_swfw_sync_semaphore(hw); + ixgbe_release_swfw_sync_semaphore(hw); +} + /** * ixgbe_blink_led_start_X540 - Blink LED based on index. * @hw: pointer to hardware structure @@ -854,6 +873,7 @@ static const struct ixgbe_mac_operations mac_ops_X540 = { .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing, .acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X540, .release_swfw_sync = &ixgbe_release_swfw_sync_X540, + .init_swfw_sync = &ixgbe_init_swfw_sync_X540, .disable_rx_buff = &ixgbe_disable_rx_buff_generic, .enable_rx_buff = &ixgbe_enable_rx_buff_generic, .get_thermal_sensor_data = NULL, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h index a1468b1f4d8a..e21cd48491d3 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h @@ -36,4 +36,5 @@ s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index); s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index); s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask); void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask); +void ixgbe_init_swfw_sync_X540(struct ixgbe_hw *hw); s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c index 972c9aa17503..9d3f765638cc 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c @@ -2432,6 +2432,7 @@ static const struct ixgbe_mac_operations mac_ops_X550 = { .setup_sfp = NULL, .acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X540, .release_swfw_sync = &ixgbe_release_swfw_sync_X540, + .init_swfw_sync = &ixgbe_init_swfw_sync_X540, .prot_autoc_read = prot_autoc_read_generic, .prot_autoc_write = prot_autoc_write_generic, .setup_fc = ixgbe_setup_fc_generic, @@ -2449,6 +2450,7 @@ static const struct ixgbe_mac_operations mac_ops_X550EM_x = { .setup_sfp = ixgbe_setup_sfp_modules_X550em, .acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X550em, .release_swfw_sync = &ixgbe_release_swfw_sync_X550em, + .init_swfw_sync = &ixgbe_init_swfw_sync_X540, .setup_fc = NULL, /* defined later */ }; From 4ae7834221679bff2d7f75ba80a20673cecb38ad Mon Sep 17 00:00:00 2001 From: Amritha Nambiar Date: Wed, 9 Mar 2016 18:32:16 -0500 Subject: [PATCH 0187/1649] ixgbe: Extend cls_u32 offload to support UDP headers Added support to match on UDP fields in the transport layer. Extended core logic to support multiple headers. Verified with the following filters : handle 1: u32 divisor 1 u32 ht 800: order 1 link 1: \ offset at 0 mask 0f00 shift 6 plus 0 eat match ip protocol 6 ff u32 ht 1: order 2 \ match tcp src 1024 ffff match tcp dst 23 ffff action drop handle 2: u32 divisor 1 u32 ht 800: order 3 link 2: \ offset at 0 mask 0f00 shift 6 plus 0 eat match ip protocol 17 ff u32 ht 2: order 4 \ match udp src 1025 ffff match udp dst 24 ffff action drop Signed-off-by: Amritha Nambiar Acked-by: John Fastabend Acked-by: Sridhar Samudrala Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 19 ++++++++++--------- .../net/ethernet/intel/ixgbe/ixgbe_model.h | 8 ++++++++ 2 files changed, 18 insertions(+), 9 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 77c1c85a957c..359869cf5b09 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -8257,19 +8257,20 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter, return -EINVAL; for (i = 0; nexthdr[i].jump; i++) { - if (nexthdr->o != cls->knode.sel->offoff || - nexthdr->s != cls->knode.sel->offshift || - nexthdr->m != cls->knode.sel->offmask || + if (nexthdr[i].o != cls->knode.sel->offoff || + nexthdr[i].s != cls->knode.sel->offshift || + nexthdr[i].m != cls->knode.sel->offmask || /* do not support multiple key jumps its just mad */ cls->knode.sel->nkeys > 1) return -EINVAL; - if (nexthdr->off != cls->knode.sel->keys[0].off || - nexthdr->val != cls->knode.sel->keys[0].val || - nexthdr->mask != cls->knode.sel->keys[0].mask) - return -EINVAL; - - adapter->jump_tables[link_uhtid] = nexthdr->jump; + if (nexthdr[i].off == cls->knode.sel->keys[0].off && + nexthdr[i].val == cls->knode.sel->keys[0].val && + nexthdr[i].mask == cls->knode.sel->keys[0].mask) { + adapter->jump_tables[link_uhtid] = + nexthdr[i].jump; + break; + } } return 0; } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h index 74c53ad9d268..60adde55a8c3 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h @@ -82,6 +82,12 @@ static struct ixgbe_mat_field ixgbe_tcp_fields[] = { { .val = NULL } /* terminal node */ }; +static struct ixgbe_mat_field ixgbe_udp_fields[] = { + {.off = 0, .val = ixgbe_mat_prgm_ports, + .type = IXGBE_ATR_FLOW_TYPE_UDPV4}, + { .val = NULL } /* terminal node */ +}; + struct ixgbe_nexthdr { /* offset, shift, and mask of position to next header */ unsigned int o; @@ -98,6 +104,8 @@ struct ixgbe_nexthdr { static struct ixgbe_nexthdr ixgbe_ipv4_jumps[] = { { .o = 0, .s = 6, .m = 0xf, .off = 8, .val = 0x600, .mask = 0xff00, .jump = ixgbe_tcp_fields}, + { .o = 0, .s = 6, .m = 0xf, + .off = 8, .val = 0x1100, .mask = 0xff00, .jump = ixgbe_udp_fields}, { .jump = NULL } /* terminal node */ }; #endif /* _IXGBE_MODEL_H_ */ From 0c5a616650a08b766e529511348274c1914ef4bf Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Thu, 10 Mar 2016 10:01:10 -0800 Subject: [PATCH 0188/1649] ixgbe: Add support for toggling VLAN filtering flag via ethtool This change makes it so that we can use the ethtool rx-vlan-filter flag to toggle Rx VLAN filtering on and off. This is basically just an extension of the existing VLAN promisc work in that it just adds support for the additional ethtool flag. Signed-off-by: Alexander Duyck Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 27 ++++++++++--------- 1 file changed, 14 insertions(+), 13 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 359869cf5b09..19bf3860d3d8 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -4447,6 +4447,7 @@ void ixgbe_set_rx_mode(struct net_device *netdev) struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE; + netdev_features_t features = netdev->features; int count; /* Check for Promiscuous and All Multicast modes */ @@ -4464,14 +4465,13 @@ void ixgbe_set_rx_mode(struct net_device *netdev) hw->addr_ctrl.user_set_promisc = true; fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); vmolr |= IXGBE_VMOLR_MPE; - ixgbe_vlan_promisc_enable(adapter); + features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; } else { if (netdev->flags & IFF_ALLMULTI) { fctrl |= IXGBE_FCTRL_MPE; vmolr |= IXGBE_VMOLR_MPE; } hw->addr_ctrl.user_set_promisc = false; - ixgbe_vlan_promisc_disable(adapter); } /* @@ -4504,7 +4504,7 @@ void ixgbe_set_rx_mode(struct net_device *netdev) } /* This is useful for sniffing bad packets. */ - if (adapter->netdev->features & NETIF_F_RXALL) { + if (features & NETIF_F_RXALL) { /* UPE and MPE will be handled by normal PROMISC logic * in e1000e_set_rx_mode */ fctrl |= (IXGBE_FCTRL_SBP | /* Receive bad packets */ @@ -4517,10 +4517,15 @@ void ixgbe_set_rx_mode(struct net_device *netdev) IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); - if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) + if (features & NETIF_F_HW_VLAN_CTAG_RX) ixgbe_vlan_strip_enable(adapter); else ixgbe_vlan_strip_disable(adapter); + + if (features & NETIF_F_HW_VLAN_CTAG_FILTER) + ixgbe_vlan_promisc_disable(adapter); + else + ixgbe_vlan_promisc_enable(adapter); } static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter) @@ -8495,11 +8500,6 @@ static int ixgbe_set_features(struct net_device *netdev, adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; } - if (features & NETIF_F_HW_VLAN_CTAG_RX) - ixgbe_vlan_strip_enable(adapter); - else - ixgbe_vlan_strip_disable(adapter); - if (changed & NETIF_F_RXALL) need_reset = true; @@ -8516,6 +8516,9 @@ static int ixgbe_set_features(struct net_device *netdev, if (need_reset) ixgbe_do_reset(netdev); + else if (changed & (NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_FILTER)) + ixgbe_set_rx_mode(netdev); return 0; } @@ -9190,7 +9193,8 @@ skip_sriov: NETIF_F_RXCSUM | NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_TX | - NETIF_F_HW_VLAN_CTAG_RX; + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_FILTER; if (hw->mac.type >= ixgbe_mac_82599EB) netdev->features |= NETIF_F_SCTP_CRC; @@ -9204,9 +9208,6 @@ skip_sriov: netdev->hw_features |= NETIF_F_NTUPLE | NETIF_F_HW_TC; - /* set this bit last since it cannot be part of hw_features */ - netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; - netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | From 2e7bd5ef98030999eaf4bf101707e595432fc8c2 Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Thu, 31 Mar 2016 16:53:41 -0400 Subject: [PATCH 0189/1649] net: dsa: mv88e6xxx: protect SID register access Introduce a mv88e6xxx_has_stu() helper to protect the access to the GLOBAL_VTU_SID register, instead of checking switch families. Signed-off-by: Vivien Didelot Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx.c | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index 50454be86570..29b24446bd2b 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c @@ -482,6 +482,16 @@ static bool mv88e6xxx_6352_family(struct dsa_switch *ds) return false; } +static bool mv88e6xxx_has_stu(struct dsa_switch *ds) +{ + /* Does the device have STU and dedicated SID registers for VTU ops? */ + if (mv88e6xxx_6097_family(ds) || mv88e6xxx_6165_family(ds) || + mv88e6xxx_6351_family(ds) || mv88e6xxx_6352_family(ds)) + return true; + + return false; +} + /* We expect the switch to perform auto negotiation if there is a real * phy. However, in the case of a fixed link phy, we force the port * settings from the fixed link settings. @@ -1329,7 +1339,9 @@ static int _mv88e6xxx_vtu_getnext(struct dsa_switch *ds, return ret; next.fid = ret & GLOBAL_VTU_FID_MASK; + } + if (mv88e6xxx_has_stu(ds)) { ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_VTU_SID); if (ret < 0) @@ -1412,8 +1424,7 @@ static int _mv88e6xxx_vtu_loadpurge(struct dsa_switch *ds, if (ret < 0) return ret; - if (mv88e6xxx_6097_family(ds) || mv88e6xxx_6165_family(ds) || - mv88e6xxx_6351_family(ds) || mv88e6xxx_6352_family(ds)) { + if (mv88e6xxx_has_stu(ds)) { reg = entry->sid & GLOBAL_VTU_SID_MASK; ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_SID, reg); if (ret < 0) From b426e5f7fe4cd7041b3d8bed58f1e60bba1d11bf Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Thu, 31 Mar 2016 16:53:42 -0400 Subject: [PATCH 0190/1649] net: dsa: mv88e6xxx: protect FID registers access Only switch families with 4096 address databases have dedicated FID registers for ATU and VTU operations. Factorize the access to the GLOBAL_ATU_FID register and introduce a mv88e6xxx_has_fid_reg() helper function to protect the access to GLOBAL_ATU_FID and GLOBAL_VTU_FID. Signed-off-by: Vivien Didelot Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx.c | 42 ++++++++++++++++++++----------------- 1 file changed, 23 insertions(+), 19 deletions(-) diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index 29b24446bd2b..ebb9ae927426 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c @@ -482,6 +482,16 @@ static bool mv88e6xxx_6352_family(struct dsa_switch *ds) return false; } +static bool mv88e6xxx_has_fid_reg(struct dsa_switch *ds) +{ + /* Does the device have dedicated FID registers for ATU and VTU ops? */ + if (mv88e6xxx_6097_family(ds) || mv88e6xxx_6165_family(ds) || + mv88e6xxx_6351_family(ds) || mv88e6xxx_6352_family(ds)) + return true; + + return false; +} + static bool mv88e6xxx_has_stu(struct dsa_switch *ds) { /* Does the device have STU and dedicated SID registers for VTU ops? */ @@ -961,10 +971,16 @@ out: return ret; } -static int _mv88e6xxx_atu_cmd(struct dsa_switch *ds, u16 cmd) +static int _mv88e6xxx_atu_cmd(struct dsa_switch *ds, u16 fid, u16 cmd) { int ret; + if (mv88e6xxx_has_fid_reg(ds)) { + ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_FID, fid); + if (ret < 0) + return ret; + } + ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_OP, cmd); if (ret < 0) return ret; @@ -1011,11 +1027,6 @@ static int _mv88e6xxx_atu_flush_move(struct dsa_switch *ds, return err; if (entry->fid) { - err = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_FID, - entry->fid); - if (err) - return err; - op = static_too ? GLOBAL_ATU_OP_FLUSH_MOVE_ALL_DB : GLOBAL_ATU_OP_FLUSH_MOVE_NON_STATIC_DB; } else { @@ -1023,7 +1034,7 @@ static int _mv88e6xxx_atu_flush_move(struct dsa_switch *ds, GLOBAL_ATU_OP_FLUSH_MOVE_NON_STATIC; } - return _mv88e6xxx_atu_cmd(ds, op); + return _mv88e6xxx_atu_cmd(ds, entry->fid, op); } static int _mv88e6xxx_atu_flush(struct dsa_switch *ds, u16 fid, bool static_too) @@ -1331,8 +1342,7 @@ static int _mv88e6xxx_vtu_getnext(struct dsa_switch *ds, if (ret < 0) return ret; - if (mv88e6xxx_6097_family(ds) || mv88e6xxx_6165_family(ds) || - mv88e6xxx_6351_family(ds) || mv88e6xxx_6352_family(ds)) { + if (mv88e6xxx_has_fid_reg(ds)) { ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_VTU_FID); if (ret < 0) @@ -1429,7 +1439,9 @@ static int _mv88e6xxx_vtu_loadpurge(struct dsa_switch *ds, ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_SID, reg); if (ret < 0) return ret; + } + if (mv88e6xxx_has_fid_reg(ds)) { reg = entry->fid & GLOBAL_VTU_FID_MASK; ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_FID, reg); if (ret < 0) @@ -1976,11 +1988,7 @@ static int _mv88e6xxx_atu_load(struct dsa_switch *ds, if (ret < 0) return ret; - ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_FID, entry->fid); - if (ret < 0) - return ret; - - return _mv88e6xxx_atu_cmd(ds, GLOBAL_ATU_OP_LOAD_DB); + return _mv88e6xxx_atu_cmd(ds, entry->fid, GLOBAL_ATU_OP_LOAD_DB); } static int _mv88e6xxx_port_fdb_load(struct dsa_switch *ds, int port, @@ -2063,11 +2071,7 @@ static int _mv88e6xxx_atu_getnext(struct dsa_switch *ds, u16 fid, if (ret < 0) return ret; - ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_FID, fid); - if (ret < 0) - return ret; - - ret = _mv88e6xxx_atu_cmd(ds, GLOBAL_ATU_OP_GET_NEXT_DB); + ret = _mv88e6xxx_atu_cmd(ds, fid, GLOBAL_ATU_OP_GET_NEXT_DB); if (ret < 0) return ret; From f74df0be82d7d29747bfd68d955f4f573f9e5691 Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Thu, 31 Mar 2016 16:53:43 -0400 Subject: [PATCH 0191/1649] net: dsa: mv88e6xxx: variable number of databases Marvell switch chips have different number of address databases. The code currently only supports models with 4096 databases. Such switch has dedicated FID registers for ATU and VTU operations. Models with fewer databases have their FID split in several registers. List them all but only support models with 4096 databases at the moment. Signed-off-by: Vivien Didelot Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx.c | 38 +++++++++++++++++++++++++++++++++---- 1 file changed, 34 insertions(+), 4 deletions(-) diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index ebb9ae927426..f103319dbe14 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c @@ -482,6 +482,30 @@ static bool mv88e6xxx_6352_family(struct dsa_switch *ds) return false; } +static unsigned int mv88e6xxx_num_databases(struct dsa_switch *ds) +{ + struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + + /* The following devices have 4-bit identifiers for 16 databases */ + if (ps->id == PORT_SWITCH_ID_6061) + return 16; + + /* The following devices have 6-bit identifiers for 64 databases */ + if (ps->id == PORT_SWITCH_ID_6065) + return 64; + + /* The following devices have 8-bit identifiers for 256 databases */ + if (mv88e6xxx_6095_family(ds) || mv88e6xxx_6185_family(ds)) + return 256; + + /* The following devices have 12-bit identifiers for 4096 databases */ + if (mv88e6xxx_6097_family(ds) || mv88e6xxx_6165_family(ds) || + mv88e6xxx_6351_family(ds) || mv88e6xxx_6352_family(ds)) + return 4096; + + return 0; +} + static bool mv88e6xxx_has_fid_reg(struct dsa_switch *ds) { /* Does the device have dedicated FID registers for ATU and VTU ops? */ @@ -1534,9 +1558,15 @@ loadpurge: static int _mv88e6xxx_port_fid(struct dsa_switch *ds, int port, u16 *new, u16 *old) { + u16 upper_mask; u16 fid; int ret; + if (mv88e6xxx_num_databases(ds) == 4096) + upper_mask = 0xff; + else + return -EOPNOTSUPP; + /* Port's default FID bits 3:0 are located in reg 0x06, offset 12 */ ret = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_BASE_VLAN); if (ret < 0) @@ -1559,11 +1589,11 @@ static int _mv88e6xxx_port_fid(struct dsa_switch *ds, int port, u16 *new, if (ret < 0) return ret; - fid |= (ret & PORT_CONTROL_1_FID_11_4_MASK) << 4; + fid |= (ret & upper_mask) << 4; if (new) { - ret &= ~PORT_CONTROL_1_FID_11_4_MASK; - ret |= (*new >> 4) & PORT_CONTROL_1_FID_11_4_MASK; + ret &= ~upper_mask; + ret |= (*new >> 4) & upper_mask; ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_CONTROL_1, ret); @@ -1627,7 +1657,7 @@ static int _mv88e6xxx_fid_new(struct dsa_switch *ds, u16 *fid) * databases are not needed. Return the next positive available. */ *fid = find_next_zero_bit(fid_bitmap, MV88E6XXX_N_FID, 1); - if (unlikely(*fid == MV88E6XXX_N_FID)) + if (unlikely(*fid >= mv88e6xxx_num_databases(ds))) return -ENOSPC; /* Clear the database */ From 11ea809f1a74b006f08e7dad0b257e06c817f313 Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Thu, 31 Mar 2016 16:53:44 -0400 Subject: [PATCH 0192/1649] net: dsa: mv88e6xxx: support 256 databases The 6185 family of devices has only 256 address databases. Their 8-bit FID for ATU and VTU operations are split into ATU Control and ATU/VTU Operation registers. Signed-off-by: Vivien Didelot Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx.c | 36 +++++++++++++++++++++++++++++++++++- 1 file changed, 35 insertions(+), 1 deletion(-) diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index f103319dbe14..75a4abc595b1 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c @@ -1003,6 +1003,20 @@ static int _mv88e6xxx_atu_cmd(struct dsa_switch *ds, u16 fid, u16 cmd) ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_FID, fid); if (ret < 0) return ret; + } else if (mv88e6xxx_num_databases(ds) == 256) { + /* ATU DBNum[7:4] are located in ATU Control 15:12 */ + ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_ATU_CONTROL); + if (ret < 0) + return ret; + + ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_CONTROL, + (ret & 0xfff) | + ((fid << 8) & 0xf000)); + if (ret < 0) + return ret; + + /* ATU DBNum[3:0] are located in ATU Operation 3:0 */ + cmd |= fid & 0xf; } ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_OP, cmd); @@ -1373,6 +1387,17 @@ static int _mv88e6xxx_vtu_getnext(struct dsa_switch *ds, return ret; next.fid = ret & GLOBAL_VTU_FID_MASK; + } else if (mv88e6xxx_num_databases(ds) == 256) { + /* VTU DBNum[7:4] are located in VTU Operation 11:8, and + * VTU DBNum[3:0] are located in VTU Operation 3:0 + */ + ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, + GLOBAL_VTU_OP); + if (ret < 0) + return ret; + + next.fid = (ret & 0xf00) >> 4; + next.fid |= ret & 0xf; } if (mv88e6xxx_has_stu(ds)) { @@ -1443,6 +1468,7 @@ unlock: static int _mv88e6xxx_vtu_loadpurge(struct dsa_switch *ds, struct mv88e6xxx_vtu_stu_entry *entry) { + u16 op = GLOBAL_VTU_OP_VTU_LOAD_PURGE; u16 reg = 0; int ret; @@ -1470,6 +1496,12 @@ static int _mv88e6xxx_vtu_loadpurge(struct dsa_switch *ds, ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_FID, reg); if (ret < 0) return ret; + } else if (mv88e6xxx_num_databases(ds) == 256) { + /* VTU DBNum[7:4] are located in VTU Operation 11:8, and + * VTU DBNum[3:0] are located in VTU Operation 3:0 + */ + op |= (entry->fid & 0xf0) << 8; + op |= entry->fid & 0xf; } reg = GLOBAL_VTU_VID_VALID; @@ -1479,7 +1511,7 @@ loadpurge: if (ret < 0) return ret; - return _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_VTU_LOAD_PURGE); + return _mv88e6xxx_vtu_cmd(ds, op); } static int _mv88e6xxx_stu_getnext(struct dsa_switch *ds, u8 sid, @@ -1564,6 +1596,8 @@ static int _mv88e6xxx_port_fid(struct dsa_switch *ds, int port, u16 *new, if (mv88e6xxx_num_databases(ds) == 4096) upper_mask = 0xff; + else if (mv88e6xxx_num_databases(ds) == 256) + upper_mask = 0xf; else return -EOPNOTSUPP; From f93dd042de08633ff481d092245f2d9b4a3cdb6a Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Thu, 31 Mar 2016 16:53:45 -0400 Subject: [PATCH 0193/1649] net: dsa: mv88e6xxx: map destination addresses for 6185 The 88E6185 switch also has a MapDA bit in its Port Control 2 register. When this bit is cleared, all frames are sent out to the CPU port. Set this bit to rely on address databases (ATU) hits and direct frames out of the correct ports, and thus allow hardware bridging. Signed-off-by: Vivien Didelot Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index 75a4abc595b1..0dda2817d0ec 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c @@ -2523,7 +2523,8 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port) reg = 0; if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) || mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) || - mv88e6xxx_6095_family(ds) || mv88e6xxx_6320_family(ds)) + mv88e6xxx_6095_family(ds) || mv88e6xxx_6320_family(ds) || + mv88e6xxx_6185_family(ds)) reg = PORT_CONTROL_2_MAP_DA; if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) || From 26892ffc80b4276f6f0d61232a769100023b38ab Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Thu, 31 Mar 2016 16:53:46 -0400 Subject: [PATCH 0194/1649] net: dsa: mv88e6131: enable hardware bridging By adding support for bridge operations, FDB operations, and optionally VLAN operations (for 802.1Q and VLAN filtering aware systems), the switch bridges ports correctly, the CPU is able to populate the hardware address databases, and thus hardware bridging becomes functional within the 88E6185 family of switches. Signed-off-by: Vivien Didelot Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6131.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/drivers/net/dsa/mv88e6131.c b/drivers/net/dsa/mv88e6131.c index a92ca651c399..24070287c2bc 100644 --- a/drivers/net/dsa/mv88e6131.c +++ b/drivers/net/dsa/mv88e6131.c @@ -169,6 +169,17 @@ struct dsa_switch_driver mv88e6131_switch_driver = { .get_ethtool_stats = mv88e6xxx_get_ethtool_stats, .get_sset_count = mv88e6xxx_get_sset_count, .adjust_link = mv88e6xxx_adjust_link, + .port_bridge_join = mv88e6xxx_port_bridge_join, + .port_bridge_leave = mv88e6xxx_port_bridge_leave, + .port_vlan_filtering = mv88e6xxx_port_vlan_filtering, + .port_vlan_prepare = mv88e6xxx_port_vlan_prepare, + .port_vlan_add = mv88e6xxx_port_vlan_add, + .port_vlan_del = mv88e6xxx_port_vlan_del, + .port_vlan_dump = mv88e6xxx_port_vlan_dump, + .port_fdb_prepare = mv88e6xxx_port_fdb_prepare, + .port_fdb_add = mv88e6xxx_port_fdb_add, + .port_fdb_del = mv88e6xxx_port_fdb_del, + .port_fdb_dump = mv88e6xxx_port_fdb_dump, }; MODULE_ALIAS("platform:mv88e6085"); From a4298e4522d687a79af8f8fbb7eca68399ab2d81 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 1 Apr 2016 08:52:12 -0700 Subject: [PATCH 0195/1649] net: add SOCK_RCU_FREE socket flag We want a generic way to insert an RCU grace period before socket freeing for cases where RCU_SLAB_DESTROY_BY_RCU is adding too much overhead. SLAB_DESTROY_BY_RCU strict rules force us to take a reference on the socket sk_refcnt, and it is a performance problem for UDP encapsulation, or TCP synflood behavior, as many CPUs might attempt the atomic operations on a shared sk_refcnt UDP sockets and TCP listeners can set SOCK_RCU_FREE so that their lookup can use traditional RCU rules, without refcount changes. They can set the flag only once hashed and visible by other cpus. Signed-off-by: Eric Dumazet Cc: Tom Herbert Tested-by: Tom Herbert Signed-off-by: David S. Miller --- include/net/sock.h | 2 ++ net/core/sock.c | 14 +++++++++++++- 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/include/net/sock.h b/include/net/sock.h index e91b87f54f99..9e77353a92ae 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -438,6 +438,7 @@ struct sock { struct sk_buff *skb); void (*sk_destruct)(struct sock *sk); struct sock_reuseport __rcu *sk_reuseport_cb; + struct rcu_head sk_rcu; }; #define __sk_user_data(sk) ((*((void __rcu **)&(sk)->sk_user_data))) @@ -720,6 +721,7 @@ enum sock_flags { */ SOCK_FILTER_LOCKED, /* Filter cannot be changed anymore */ SOCK_SELECT_ERR_QUEUE, /* Wake select on error queue */ + SOCK_RCU_FREE, /* wait rcu grace period in sk_destruct() */ }; #define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE)) diff --git a/net/core/sock.c b/net/core/sock.c index 315f5e57fffe..7a6a063b28b3 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -1419,8 +1419,12 @@ struct sock *sk_alloc(struct net *net, int family, gfp_t priority, } EXPORT_SYMBOL(sk_alloc); -void sk_destruct(struct sock *sk) +/* Sockets having SOCK_RCU_FREE will call this function after one RCU + * grace period. This is the case for UDP sockets and TCP listeners. + */ +static void __sk_destruct(struct rcu_head *head) { + struct sock *sk = container_of(head, struct sock, sk_rcu); struct sk_filter *filter; if (sk->sk_destruct) @@ -1449,6 +1453,14 @@ void sk_destruct(struct sock *sk) sk_prot_free(sk->sk_prot_creator, sk); } +void sk_destruct(struct sock *sk) +{ + if (sock_flag(sk, SOCK_RCU_FREE)) + call_rcu(&sk->sk_rcu, __sk_destruct); + else + __sk_destruct(&sk->sk_rcu); +} + static void __sk_free(struct sock *sk) { if (unlikely(sock_diag_has_destroy_listeners(sk) && sk->sk_net_refcnt)) From ca065d0cf80fa547724440a8bf37f1e674d917c0 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 1 Apr 2016 08:52:13 -0700 Subject: [PATCH 0196/1649] udp: no longer use SLAB_DESTROY_BY_RCU Tom Herbert would like not touching UDP socket refcnt for encapsulated traffic. For this to happen, we need to use normal RCU rules, with a grace period before freeing a socket. UDP sockets are not short lived in the high usage case, so the added cost of call_rcu() should not be a concern. This actually removes a lot of complexity in UDP stack. Multicast receives no longer need to hold a bucket spinlock. Note that ip early demux still needs to take a reference on the socket. Same remark for functions used by xt_socket and xt_PROXY netfilter modules, but this might be changed later. Performance for a single UDP socket receiving flood traffic from many RX queues/cpus. Simple udp_rx using simple recvfrom() loop : 438 kpps instead of 374 kpps : 17 % increase of the peak rate. v2: Addressed Willem de Bruijn feedback in multicast handling - keep early demux break in __udp4_lib_demux_lookup() Signed-off-by: Eric Dumazet Cc: Tom Herbert Cc: Willem de Bruijn Tested-by: Tom Herbert Signed-off-by: David S. Miller --- include/linux/udp.h | 8 +- include/net/sock.h | 12 +- include/net/udp.h | 2 +- net/ipv4/udp.c | 293 +++++++++++++------------------------------- net/ipv4/udp_diag.c | 18 +-- net/ipv6/udp.c | 198 ++++++++++-------------------- 6 files changed, 172 insertions(+), 359 deletions(-) diff --git a/include/linux/udp.h b/include/linux/udp.h index 87c094961bd5..32342754643a 100644 --- a/include/linux/udp.h +++ b/include/linux/udp.h @@ -98,11 +98,11 @@ static inline bool udp_get_no_check6_rx(struct sock *sk) return udp_sk(sk)->no_check6_rx; } -#define udp_portaddr_for_each_entry(__sk, node, list) \ - hlist_nulls_for_each_entry(__sk, node, list, __sk_common.skc_portaddr_node) +#define udp_portaddr_for_each_entry(__sk, list) \ + hlist_for_each_entry(__sk, list, __sk_common.skc_portaddr_node) -#define udp_portaddr_for_each_entry_rcu(__sk, node, list) \ - hlist_nulls_for_each_entry_rcu(__sk, node, list, __sk_common.skc_portaddr_node) +#define udp_portaddr_for_each_entry_rcu(__sk, list) \ + hlist_for_each_entry_rcu(__sk, list, __sk_common.skc_portaddr_node) #define IS_UDPLITE(__sk) (udp_sk(__sk)->pcflag) diff --git a/include/net/sock.h b/include/net/sock.h index 9e77353a92ae..7ad73db9dde2 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -178,7 +178,7 @@ struct sock_common { int skc_bound_dev_if; union { struct hlist_node skc_bind_node; - struct hlist_nulls_node skc_portaddr_node; + struct hlist_node skc_portaddr_node; }; struct proto *skc_prot; possible_net_t skc_net; @@ -670,18 +670,18 @@ static inline void sk_add_bind_node(struct sock *sk, hlist_for_each_entry(__sk, list, sk_bind_node) /** - * sk_nulls_for_each_entry_offset - iterate over a list at a given struct offset + * sk_for_each_entry_offset_rcu - iterate over a list at a given struct offset * @tpos: the type * to use as a loop cursor. * @pos: the &struct hlist_node to use as a loop cursor. * @head: the head for your list. * @offset: offset of hlist_node within the struct. * */ -#define sk_nulls_for_each_entry_offset(tpos, pos, head, offset) \ - for (pos = (head)->first; \ - (!is_a_nulls(pos)) && \ +#define sk_for_each_entry_offset_rcu(tpos, pos, head, offset) \ + for (pos = rcu_dereference((head)->first); \ + pos != NULL && \ ({ tpos = (typeof(*tpos) *)((void *)pos - offset); 1;}); \ - pos = pos->next) + pos = rcu_dereference(pos->next)) static inline struct user_namespace *sk_user_ns(struct sock *sk) { diff --git a/include/net/udp.h b/include/net/udp.h index 92927f729ac8..d870ec1611c4 100644 --- a/include/net/udp.h +++ b/include/net/udp.h @@ -59,7 +59,7 @@ struct udp_skb_cb { * @lock: spinlock protecting changes to head/count */ struct udp_hslot { - struct hlist_nulls_head head; + struct hlist_head head; int count; spinlock_t lock; } __attribute__((aligned(2 * sizeof(long)))); diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 45ff590661f4..355bdb221057 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -143,10 +143,9 @@ static int udp_lib_lport_inuse(struct net *net, __u16 num, unsigned int log) { struct sock *sk2; - struct hlist_nulls_node *node; kuid_t uid = sock_i_uid(sk); - sk_nulls_for_each(sk2, node, &hslot->head) { + sk_for_each(sk2, &hslot->head) { if (net_eq(sock_net(sk2), net) && sk2 != sk && (bitmap || udp_sk(sk2)->udp_port_hash == num) && @@ -177,12 +176,11 @@ static int udp_lib_lport_inuse2(struct net *net, __u16 num, bool match_wildcard)) { struct sock *sk2; - struct hlist_nulls_node *node; kuid_t uid = sock_i_uid(sk); int res = 0; spin_lock(&hslot2->lock); - udp_portaddr_for_each_entry(sk2, node, &hslot2->head) { + udp_portaddr_for_each_entry(sk2, &hslot2->head) { if (net_eq(sock_net(sk2), net) && sk2 != sk && (udp_sk(sk2)->udp_port_hash == num) && @@ -207,11 +205,10 @@ static int udp_reuseport_add_sock(struct sock *sk, struct udp_hslot *hslot, bool match_wildcard)) { struct net *net = sock_net(sk); - struct hlist_nulls_node *node; kuid_t uid = sock_i_uid(sk); struct sock *sk2; - sk_nulls_for_each(sk2, node, &hslot->head) { + sk_for_each(sk2, &hslot->head) { if (net_eq(sock_net(sk2), net) && sk2 != sk && sk2->sk_family == sk->sk_family && @@ -333,17 +330,18 @@ found: goto fail_unlock; } - sk_nulls_add_node_rcu(sk, &hslot->head); + sk_add_node_rcu(sk, &hslot->head); hslot->count++; sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash); spin_lock(&hslot2->lock); - hlist_nulls_add_head_rcu(&udp_sk(sk)->udp_portaddr_node, + hlist_add_head_rcu(&udp_sk(sk)->udp_portaddr_node, &hslot2->head); hslot2->count++; spin_unlock(&hslot2->lock); } + sock_set_flag(sk, SOCK_RCU_FREE); error = 0; fail_unlock: spin_unlock_bh(&hslot->lock); @@ -497,37 +495,27 @@ static struct sock *udp4_lib_lookup2(struct net *net, struct sk_buff *skb) { struct sock *sk, *result; - struct hlist_nulls_node *node; int score, badness, matches = 0, reuseport = 0; - bool select_ok = true; u32 hash = 0; -begin: result = NULL; badness = 0; - udp_portaddr_for_each_entry_rcu(sk, node, &hslot2->head) { + udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) { score = compute_score2(sk, net, saddr, sport, daddr, hnum, dif); if (score > badness) { - result = sk; - badness = score; reuseport = sk->sk_reuseport; if (reuseport) { hash = udp_ehashfn(net, daddr, hnum, saddr, sport); - if (select_ok) { - struct sock *sk2; - - sk2 = reuseport_select_sock(sk, hash, skb, + result = reuseport_select_sock(sk, hash, skb, sizeof(struct udphdr)); - if (sk2) { - result = sk2; - select_ok = false; - goto found; - } - } + if (result) + return result; matches = 1; } + badness = score; + result = sk; } else if (score == badness && reuseport) { matches++; if (reciprocal_scale(hash, matches) == 0) @@ -535,23 +523,6 @@ begin: hash = next_pseudo_random32(hash); } } - /* - * if the nulls value we got at the end of this lookup is - * not the expected one, we must restart lookup. - * We probably met an item that was moved to another chain. - */ - if (get_nulls_value(node) != slot2) - goto begin; - if (result) { -found: - if (unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2))) - result = NULL; - else if (unlikely(compute_score2(result, net, saddr, sport, - daddr, hnum, dif) < badness)) { - sock_put(result); - goto begin; - } - } return result; } @@ -563,15 +534,12 @@ struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, int dif, struct udp_table *udptable, struct sk_buff *skb) { struct sock *sk, *result; - struct hlist_nulls_node *node; unsigned short hnum = ntohs(dport); unsigned int hash2, slot2, slot = udp_hashfn(net, hnum, udptable->mask); struct udp_hslot *hslot2, *hslot = &udptable->hash[slot]; int score, badness, matches = 0, reuseport = 0; - bool select_ok = true; u32 hash = 0; - rcu_read_lock(); if (hslot->count > 10) { hash2 = udp4_portaddr_hash(net, daddr, hnum); slot2 = hash2 & udptable->mask; @@ -593,35 +561,27 @@ struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, htonl(INADDR_ANY), hnum, dif, hslot2, slot2, skb); } - rcu_read_unlock(); return result; } begin: result = NULL; badness = 0; - sk_nulls_for_each_rcu(sk, node, &hslot->head) { + sk_for_each_rcu(sk, &hslot->head) { score = compute_score(sk, net, saddr, hnum, sport, daddr, dport, dif); if (score > badness) { - result = sk; - badness = score; reuseport = sk->sk_reuseport; if (reuseport) { hash = udp_ehashfn(net, daddr, hnum, saddr, sport); - if (select_ok) { - struct sock *sk2; - - sk2 = reuseport_select_sock(sk, hash, skb, + result = reuseport_select_sock(sk, hash, skb, sizeof(struct udphdr)); - if (sk2) { - result = sk2; - select_ok = false; - goto found; - } - } + if (result) + return result; matches = 1; } + result = sk; + badness = score; } else if (score == badness && reuseport) { matches++; if (reciprocal_scale(hash, matches) == 0) @@ -629,25 +589,6 @@ begin: hash = next_pseudo_random32(hash); } } - /* - * if the nulls value we got at the end of this lookup is - * not the expected one, we must restart lookup. - * We probably met an item that was moved to another chain. - */ - if (get_nulls_value(node) != slot) - goto begin; - - if (result) { -found: - if (unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2))) - result = NULL; - else if (unlikely(compute_score(result, net, saddr, hnum, sport, - daddr, dport, dif) < badness)) { - sock_put(result); - goto begin; - } - } - rcu_read_unlock(); return result; } EXPORT_SYMBOL_GPL(__udp4_lib_lookup); @@ -663,13 +604,24 @@ static inline struct sock *__udp4_lib_lookup_skb(struct sk_buff *skb, udptable, skb); } +/* Must be called under rcu_read_lock(). + * Does increment socket refcount. + */ +#if IS_ENABLED(CONFIG_NETFILTER_XT_MATCH_SOCKET) || \ + IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TPROXY) struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport, __be32 daddr, __be16 dport, int dif) { - return __udp4_lib_lookup(net, saddr, sport, daddr, dport, dif, - &udp_table, NULL); + struct sock *sk; + + sk = __udp4_lib_lookup(net, saddr, sport, daddr, dport, + dif, &udp_table, NULL); + if (sk && !atomic_inc_not_zero(&sk->sk_refcnt)) + sk = NULL; + return sk; } EXPORT_SYMBOL_GPL(udp4_lib_lookup); +#endif static inline bool __udp_is_mcast_sock(struct net *net, struct sock *sk, __be16 loc_port, __be32 loc_addr, @@ -771,7 +723,7 @@ void __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable) sk->sk_err = err; sk->sk_error_report(sk); out: - sock_put(sk); + return; } void udp_err(struct sk_buff *skb, u32 info) @@ -1474,13 +1426,13 @@ void udp_lib_unhash(struct sock *sk) spin_lock_bh(&hslot->lock); if (rcu_access_pointer(sk->sk_reuseport_cb)) reuseport_detach_sock(sk); - if (sk_nulls_del_node_init_rcu(sk)) { + if (sk_del_node_init_rcu(sk)) { hslot->count--; inet_sk(sk)->inet_num = 0; sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); spin_lock(&hslot2->lock); - hlist_nulls_del_init_rcu(&udp_sk(sk)->udp_portaddr_node); + hlist_del_init_rcu(&udp_sk(sk)->udp_portaddr_node); hslot2->count--; spin_unlock(&hslot2->lock); } @@ -1513,12 +1465,12 @@ void udp_lib_rehash(struct sock *sk, u16 newhash) if (hslot2 != nhslot2) { spin_lock(&hslot2->lock); - hlist_nulls_del_init_rcu(&udp_sk(sk)->udp_portaddr_node); + hlist_del_init_rcu(&udp_sk(sk)->udp_portaddr_node); hslot2->count--; spin_unlock(&hslot2->lock); spin_lock(&nhslot2->lock); - hlist_nulls_add_head_rcu(&udp_sk(sk)->udp_portaddr_node, + hlist_add_head_rcu(&udp_sk(sk)->udp_portaddr_node, &nhslot2->head); nhslot2->count++; spin_unlock(&nhslot2->lock); @@ -1697,35 +1649,6 @@ drop: return -1; } -static void flush_stack(struct sock **stack, unsigned int count, - struct sk_buff *skb, unsigned int final) -{ - unsigned int i; - struct sk_buff *skb1 = NULL; - struct sock *sk; - - for (i = 0; i < count; i++) { - sk = stack[i]; - if (likely(!skb1)) - skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC); - - if (!skb1) { - atomic_inc(&sk->sk_drops); - UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS, - IS_UDPLITE(sk)); - UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, - IS_UDPLITE(sk)); - } - - if (skb1 && udp_queue_rcv_skb(sk, skb1) <= 0) - skb1 = NULL; - - sock_put(sk); - } - if (unlikely(skb1)) - kfree_skb(skb1); -} - /* For TCP sockets, sk_rx_dst is protected by socket lock * For UDP, we use xchg() to guard against concurrent changes. */ @@ -1749,14 +1672,14 @@ static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb, struct udp_table *udptable, int proto) { - struct sock *sk, *stack[256 / sizeof(struct sock *)]; - struct hlist_nulls_node *node; + struct sock *sk, *first = NULL; unsigned short hnum = ntohs(uh->dest); struct udp_hslot *hslot = udp_hashslot(udptable, net, hnum); - int dif = skb->dev->ifindex; - unsigned int count = 0, offset = offsetof(typeof(*sk), sk_nulls_node); unsigned int hash2 = 0, hash2_any = 0, use_hash2 = (hslot->count > 10); - bool inner_flushed = false; + unsigned int offset = offsetof(typeof(*sk), sk_node); + int dif = skb->dev->ifindex; + struct hlist_node *node; + struct sk_buff *nskb; if (use_hash2) { hash2_any = udp4_portaddr_hash(net, htonl(INADDR_ANY), hnum) & @@ -1767,23 +1690,28 @@ start_lookup: offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node); } - spin_lock(&hslot->lock); - sk_nulls_for_each_entry_offset(sk, node, &hslot->head, offset) { - if (__udp_is_mcast_sock(net, sk, - uh->dest, daddr, - uh->source, saddr, - dif, hnum)) { - if (unlikely(count == ARRAY_SIZE(stack))) { - flush_stack(stack, count, skb, ~0); - inner_flushed = true; - count = 0; - } - stack[count++] = sk; - sock_hold(sk); - } - } + sk_for_each_entry_offset_rcu(sk, node, &hslot->head, offset) { + if (!__udp_is_mcast_sock(net, sk, uh->dest, daddr, + uh->source, saddr, dif, hnum)) + continue; - spin_unlock(&hslot->lock); + if (!first) { + first = sk; + continue; + } + nskb = skb_clone(skb, GFP_ATOMIC); + + if (unlikely(!nskb)) { + atomic_inc(&sk->sk_drops); + UDP_INC_STATS_BH(net, UDP_MIB_RCVBUFERRORS, + IS_UDPLITE(sk)); + UDP_INC_STATS_BH(net, UDP_MIB_INERRORS, + IS_UDPLITE(sk)); + continue; + } + if (udp_queue_rcv_skb(sk, nskb) > 0) + consume_skb(nskb); + } /* Also lookup *:port if we are using hash2 and haven't done so yet. */ if (use_hash2 && hash2 != hash2_any) { @@ -1791,16 +1719,13 @@ start_lookup: goto start_lookup; } - /* - * do the slow work with no lock held - */ - if (count) { - flush_stack(stack, count, skb, count - 1); + if (first) { + if (udp_queue_rcv_skb(first, skb) > 0) + consume_skb(skb); } else { - if (!inner_flushed) - UDP_INC_STATS_BH(net, UDP_MIB_IGNOREDMULTI, - proto == IPPROTO_UDPLITE); - consume_skb(skb); + kfree_skb(skb); + UDP_INC_STATS_BH(net, UDP_MIB_IGNOREDMULTI, + proto == IPPROTO_UDPLITE); } return 0; } @@ -1897,7 +1822,6 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, inet_compute_pseudo); ret = udp_queue_rcv_skb(sk, skb); - sock_put(sk); /* a return value > 0 means to resubmit the input, but * it wants the return to be -protocol, or 0 @@ -1958,49 +1882,24 @@ static struct sock *__udp4_lib_mcast_demux_lookup(struct net *net, int dif) { struct sock *sk, *result; - struct hlist_nulls_node *node; unsigned short hnum = ntohs(loc_port); - unsigned int count, slot = udp_hashfn(net, hnum, udp_table.mask); + unsigned int slot = udp_hashfn(net, hnum, udp_table.mask); struct udp_hslot *hslot = &udp_table.hash[slot]; /* Do not bother scanning a too big list */ if (hslot->count > 10) return NULL; - rcu_read_lock(); -begin: - count = 0; result = NULL; - sk_nulls_for_each_rcu(sk, node, &hslot->head) { - if (__udp_is_mcast_sock(net, sk, - loc_port, loc_addr, - rmt_port, rmt_addr, - dif, hnum)) { + sk_for_each_rcu(sk, &hslot->head) { + if (__udp_is_mcast_sock(net, sk, loc_port, loc_addr, + rmt_port, rmt_addr, dif, hnum)) { + if (result) + return NULL; result = sk; - ++count; } } - /* - * if the nulls value we got at the end of this lookup is - * not the expected one, we must restart lookup. - * We probably met an item that was moved to another chain. - */ - if (get_nulls_value(node) != slot) - goto begin; - if (result) { - if (count != 1 || - unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2))) - result = NULL; - else if (unlikely(!__udp_is_mcast_sock(net, result, - loc_port, loc_addr, - rmt_port, rmt_addr, - dif, hnum))) { - sock_put(result); - result = NULL; - } - } - rcu_read_unlock(); return result; } @@ -2013,37 +1912,22 @@ static struct sock *__udp4_lib_demux_lookup(struct net *net, __be16 rmt_port, __be32 rmt_addr, int dif) { - struct sock *sk, *result; - struct hlist_nulls_node *node; unsigned short hnum = ntohs(loc_port); unsigned int hash2 = udp4_portaddr_hash(net, loc_addr, hnum); unsigned int slot2 = hash2 & udp_table.mask; struct udp_hslot *hslot2 = &udp_table.hash2[slot2]; INET_ADDR_COOKIE(acookie, rmt_addr, loc_addr); const __portpair ports = INET_COMBINED_PORTS(rmt_port, hnum); + struct sock *sk; - rcu_read_lock(); - result = NULL; - udp_portaddr_for_each_entry_rcu(sk, node, &hslot2->head) { - if (INET_MATCH(sk, net, acookie, - rmt_addr, loc_addr, ports, dif)) - result = sk; + udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) { + if (INET_MATCH(sk, net, acookie, rmt_addr, + loc_addr, ports, dif)) + return sk; /* Only check first socket in chain */ break; } - - if (result) { - if (unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2))) - result = NULL; - else if (unlikely(!INET_MATCH(sk, net, acookie, - rmt_addr, loc_addr, - ports, dif))) { - sock_put(result); - result = NULL; - } - } - rcu_read_unlock(); - return result; + return NULL; } void udp_v4_early_demux(struct sk_buff *skb) @@ -2051,7 +1935,7 @@ void udp_v4_early_demux(struct sk_buff *skb) struct net *net = dev_net(skb->dev); const struct iphdr *iph; const struct udphdr *uh; - struct sock *sk; + struct sock *sk = NULL; struct dst_entry *dst; int dif = skb->dev->ifindex; int ours; @@ -2083,11 +1967,9 @@ void udp_v4_early_demux(struct sk_buff *skb) } else if (skb->pkt_type == PACKET_HOST) { sk = __udp4_lib_demux_lookup(net, uh->dest, iph->daddr, uh->source, iph->saddr, dif); - } else { - return; } - if (!sk) + if (!sk || !atomic_inc_not_zero_hint(&sk->sk_refcnt, 2)) return; skb->sk = sk; @@ -2387,14 +2269,13 @@ static struct sock *udp_get_first(struct seq_file *seq, int start) for (state->bucket = start; state->bucket <= state->udp_table->mask; ++state->bucket) { - struct hlist_nulls_node *node; struct udp_hslot *hslot = &state->udp_table->hash[state->bucket]; - if (hlist_nulls_empty(&hslot->head)) + if (hlist_empty(&hslot->head)) continue; spin_lock_bh(&hslot->lock); - sk_nulls_for_each(sk, node, &hslot->head) { + sk_for_each(sk, &hslot->head) { if (!net_eq(sock_net(sk), net)) continue; if (sk->sk_family == state->family) @@ -2413,7 +2294,7 @@ static struct sock *udp_get_next(struct seq_file *seq, struct sock *sk) struct net *net = seq_file_net(seq); do { - sk = sk_nulls_next(sk); + sk = sk_next(sk); } while (sk && (!net_eq(sock_net(sk), net) || sk->sk_family != state->family)); if (!sk) { @@ -2622,12 +2503,12 @@ void __init udp_table_init(struct udp_table *table, const char *name) table->hash2 = table->hash + (table->mask + 1); for (i = 0; i <= table->mask; i++) { - INIT_HLIST_NULLS_HEAD(&table->hash[i].head, i); + INIT_HLIST_HEAD(&table->hash[i].head); table->hash[i].count = 0; spin_lock_init(&table->hash[i].lock); } for (i = 0; i <= table->mask; i++) { - INIT_HLIST_NULLS_HEAD(&table->hash2[i].head, i); + INIT_HLIST_HEAD(&table->hash2[i].head); table->hash2[i].count = 0; spin_lock_init(&table->hash2[i].lock); } diff --git a/net/ipv4/udp_diag.c b/net/ipv4/udp_diag.c index df1966f3b6ec..3d5ccf4b1412 100644 --- a/net/ipv4/udp_diag.c +++ b/net/ipv4/udp_diag.c @@ -36,10 +36,11 @@ static int udp_dump_one(struct udp_table *tbl, struct sk_buff *in_skb, const struct inet_diag_req_v2 *req) { int err = -EINVAL; - struct sock *sk; + struct sock *sk = NULL; struct sk_buff *rep; struct net *net = sock_net(in_skb->sk); + rcu_read_lock(); if (req->sdiag_family == AF_INET) sk = __udp4_lib_lookup(net, req->id.idiag_src[0], req->id.idiag_sport, @@ -54,9 +55,9 @@ static int udp_dump_one(struct udp_table *tbl, struct sk_buff *in_skb, req->id.idiag_dport, req->id.idiag_if, tbl, NULL); #endif - else - goto out_nosk; - + if (sk && !atomic_inc_not_zero(&sk->sk_refcnt)) + sk = NULL; + rcu_read_unlock(); err = -ENOENT; if (!sk) goto out_nosk; @@ -96,24 +97,23 @@ static void udp_dump(struct udp_table *table, struct sk_buff *skb, struct netlink_callback *cb, const struct inet_diag_req_v2 *r, struct nlattr *bc) { - int num, s_num, slot, s_slot; struct net *net = sock_net(skb->sk); + int num, s_num, slot, s_slot; s_slot = cb->args[0]; num = s_num = cb->args[1]; for (slot = s_slot; slot <= table->mask; s_num = 0, slot++) { - struct sock *sk; - struct hlist_nulls_node *node; struct udp_hslot *hslot = &table->hash[slot]; + struct sock *sk; num = 0; - if (hlist_nulls_empty(&hslot->head)) + if (hlist_empty(&hslot->head)) continue; spin_lock_bh(&hslot->lock); - sk_nulls_for_each(sk, node, &hslot->head) { + sk_for_each(sk, &hslot->head) { struct inet_sock *inet = inet_sk(sk); if (!net_eq(sock_net(sk), net)) diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index b772a7641fbd..78a7dfd12707 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -213,37 +213,28 @@ static struct sock *udp6_lib_lookup2(struct net *net, struct sk_buff *skb) { struct sock *sk, *result; - struct hlist_nulls_node *node; int score, badness, matches = 0, reuseport = 0; - bool select_ok = true; u32 hash = 0; -begin: result = NULL; badness = -1; - udp_portaddr_for_each_entry_rcu(sk, node, &hslot2->head) { + udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) { score = compute_score2(sk, net, saddr, sport, daddr, hnum, dif); if (score > badness) { - result = sk; - badness = score; reuseport = sk->sk_reuseport; if (reuseport) { hash = udp6_ehashfn(net, daddr, hnum, saddr, sport); - if (select_ok) { - struct sock *sk2; - sk2 = reuseport_select_sock(sk, hash, skb, + result = reuseport_select_sock(sk, hash, skb, sizeof(struct udphdr)); - if (sk2) { - result = sk2; - select_ok = false; - goto found; - } - } + if (result) + return result; matches = 1; } + result = sk; + badness = score; } else if (score == badness && reuseport) { matches++; if (reciprocal_scale(hash, matches) == 0) @@ -251,27 +242,10 @@ begin: hash = next_pseudo_random32(hash); } } - /* - * if the nulls value we got at the end of this lookup is - * not the expected one, we must restart lookup. - * We probably met an item that was moved to another chain. - */ - if (get_nulls_value(node) != slot2) - goto begin; - - if (result) { -found: - if (unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2))) - result = NULL; - else if (unlikely(compute_score2(result, net, saddr, sport, - daddr, hnum, dif) < badness)) { - sock_put(result); - goto begin; - } - } return result; } +/* rcu_read_lock() must be held */ struct sock *__udp6_lib_lookup(struct net *net, const struct in6_addr *saddr, __be16 sport, const struct in6_addr *daddr, __be16 dport, @@ -279,15 +253,12 @@ struct sock *__udp6_lib_lookup(struct net *net, struct sk_buff *skb) { struct sock *sk, *result; - struct hlist_nulls_node *node; unsigned short hnum = ntohs(dport); unsigned int hash2, slot2, slot = udp_hashfn(net, hnum, udptable->mask); struct udp_hslot *hslot2, *hslot = &udptable->hash[slot]; int score, badness, matches = 0, reuseport = 0; - bool select_ok = true; u32 hash = 0; - rcu_read_lock(); if (hslot->count > 10) { hash2 = udp6_portaddr_hash(net, daddr, hnum); slot2 = hash2 & udptable->mask; @@ -309,34 +280,26 @@ struct sock *__udp6_lib_lookup(struct net *net, &in6addr_any, hnum, dif, hslot2, slot2, skb); } - rcu_read_unlock(); return result; } begin: result = NULL; badness = -1; - sk_nulls_for_each_rcu(sk, node, &hslot->head) { + sk_for_each_rcu(sk, &hslot->head) { score = compute_score(sk, net, hnum, saddr, sport, daddr, dport, dif); if (score > badness) { - result = sk; - badness = score; reuseport = sk->sk_reuseport; if (reuseport) { hash = udp6_ehashfn(net, daddr, hnum, saddr, sport); - if (select_ok) { - struct sock *sk2; - - sk2 = reuseport_select_sock(sk, hash, skb, + result = reuseport_select_sock(sk, hash, skb, sizeof(struct udphdr)); - if (sk2) { - result = sk2; - select_ok = false; - goto found; - } - } + if (result) + return result; matches = 1; } + result = sk; + badness = score; } else if (score == badness && reuseport) { matches++; if (reciprocal_scale(hash, matches) == 0) @@ -344,25 +307,6 @@ begin: hash = next_pseudo_random32(hash); } } - /* - * if the nulls value we got at the end of this lookup is - * not the expected one, we must restart lookup. - * We probably met an item that was moved to another chain. - */ - if (get_nulls_value(node) != slot) - goto begin; - - if (result) { -found: - if (unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2))) - result = NULL; - else if (unlikely(compute_score(result, net, hnum, saddr, sport, - daddr, dport, dif) < badness)) { - sock_put(result); - goto begin; - } - } - rcu_read_unlock(); return result; } EXPORT_SYMBOL_GPL(__udp6_lib_lookup); @@ -382,12 +326,24 @@ static struct sock *__udp6_lib_lookup_skb(struct sk_buff *skb, udptable, skb); } +/* Must be called under rcu_read_lock(). + * Does increment socket refcount. + */ +#if IS_ENABLED(CONFIG_NETFILTER_XT_MATCH_SOCKET) || \ + IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TPROXY) struct sock *udp6_lib_lookup(struct net *net, const struct in6_addr *saddr, __be16 sport, const struct in6_addr *daddr, __be16 dport, int dif) { - return __udp6_lib_lookup(net, saddr, sport, daddr, dport, dif, &udp_table, NULL); + struct sock *sk; + + sk = __udp6_lib_lookup(net, saddr, sport, daddr, dport, + dif, &udp_table, NULL); + if (sk && !atomic_inc_not_zero(&sk->sk_refcnt)) + sk = NULL; + return sk; } EXPORT_SYMBOL_GPL(udp6_lib_lookup); +#endif /* * This should be easy, if there is something there we @@ -585,7 +541,7 @@ void __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt, sk->sk_err = err; sk->sk_error_report(sk); out: - sock_put(sk); + return; } static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) @@ -747,33 +703,6 @@ static bool __udp_v6_is_mcast_sock(struct net *net, struct sock *sk, return true; } -static void flush_stack(struct sock **stack, unsigned int count, - struct sk_buff *skb, unsigned int final) -{ - struct sk_buff *skb1 = NULL; - struct sock *sk; - unsigned int i; - - for (i = 0; i < count; i++) { - sk = stack[i]; - if (likely(!skb1)) - skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC); - if (!skb1) { - atomic_inc(&sk->sk_drops); - UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS, - IS_UDPLITE(sk)); - UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, - IS_UDPLITE(sk)); - } - - if (skb1 && udpv6_queue_rcv_skb(sk, skb1) <= 0) - skb1 = NULL; - sock_put(sk); - } - if (unlikely(skb1)) - kfree_skb(skb1); -} - static void udp6_csum_zero_error(struct sk_buff *skb) { /* RFC 2460 section 8.1 says that we SHOULD log @@ -792,15 +721,15 @@ static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb, const struct in6_addr *saddr, const struct in6_addr *daddr, struct udp_table *udptable, int proto) { - struct sock *sk, *stack[256 / sizeof(struct sock *)]; + struct sock *sk, *first = NULL; const struct udphdr *uh = udp_hdr(skb); - struct hlist_nulls_node *node; unsigned short hnum = ntohs(uh->dest); struct udp_hslot *hslot = udp_hashslot(udptable, net, hnum); - int dif = inet6_iif(skb); - unsigned int count = 0, offset = offsetof(typeof(*sk), sk_nulls_node); + unsigned int offset = offsetof(typeof(*sk), sk_node); unsigned int hash2 = 0, hash2_any = 0, use_hash2 = (hslot->count > 10); - bool inner_flushed = false; + int dif = inet6_iif(skb); + struct hlist_node *node; + struct sk_buff *nskb; if (use_hash2) { hash2_any = udp6_portaddr_hash(net, &in6addr_any, hnum) & @@ -811,27 +740,32 @@ start_lookup: offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node); } - spin_lock(&hslot->lock); - sk_nulls_for_each_entry_offset(sk, node, &hslot->head, offset) { - if (__udp_v6_is_mcast_sock(net, sk, - uh->dest, daddr, - uh->source, saddr, - dif, hnum) && - /* If zero checksum and no_check is not on for - * the socket then skip it. - */ - (uh->check || udp_sk(sk)->no_check6_rx)) { - if (unlikely(count == ARRAY_SIZE(stack))) { - flush_stack(stack, count, skb, ~0); - inner_flushed = true; - count = 0; - } - stack[count++] = sk; - sock_hold(sk); + sk_for_each_entry_offset_rcu(sk, node, &hslot->head, offset) { + if (!__udp_v6_is_mcast_sock(net, sk, uh->dest, daddr, + uh->source, saddr, dif, hnum)) + continue; + /* If zero checksum and no_check is not on for + * the socket then skip it. + */ + if (!uh->check && !udp_sk(sk)->no_check6_rx) + continue; + if (!first) { + first = sk; + continue; + } + nskb = skb_clone(skb, GFP_ATOMIC); + if (unlikely(!nskb)) { + atomic_inc(&sk->sk_drops); + UDP6_INC_STATS_BH(net, UDP_MIB_RCVBUFERRORS, + IS_UDPLITE(sk)); + UDP6_INC_STATS_BH(net, UDP_MIB_INERRORS, + IS_UDPLITE(sk)); + continue; } - } - spin_unlock(&hslot->lock); + if (udpv6_queue_rcv_skb(sk, nskb) > 0) + consume_skb(nskb); + } /* Also lookup *:port if we are using hash2 and haven't done so yet. */ if (use_hash2 && hash2 != hash2_any) { @@ -839,13 +773,13 @@ start_lookup: goto start_lookup; } - if (count) { - flush_stack(stack, count, skb, count - 1); + if (first) { + if (udpv6_queue_rcv_skb(first, skb) > 0) + consume_skb(skb); } else { - if (!inner_flushed) - UDP6_INC_STATS_BH(net, UDP_MIB_IGNOREDMULTI, - proto == IPPROTO_UDPLITE); - consume_skb(skb); + kfree_skb(skb); + UDP6_INC_STATS_BH(net, UDP_MIB_IGNOREDMULTI, + proto == IPPROTO_UDPLITE); } return 0; } @@ -853,10 +787,10 @@ start_lookup: int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, int proto) { - struct net *net = dev_net(skb->dev); - struct sock *sk; - struct udphdr *uh; const struct in6_addr *saddr, *daddr; + struct net *net = dev_net(skb->dev); + struct udphdr *uh; + struct sock *sk; u32 ulen = 0; if (!pskb_may_pull(skb, sizeof(struct udphdr))) @@ -910,7 +844,6 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, int ret; if (!uh->check && !udp_sk(sk)->no_check6_rx) { - sock_put(sk); udp6_csum_zero_error(skb); goto csum_error; } @@ -920,7 +853,6 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, ip6_compute_pseudo); ret = udpv6_queue_rcv_skb(sk, skb); - sock_put(sk); /* a return value > 0 means to resubmit the input */ if (ret > 0) From ee3cf32a4a5e6cf5ccc0f0de9865fda3ebc46436 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 1 Apr 2016 08:52:14 -0700 Subject: [PATCH 0197/1649] tcp/dccp: remove BH disable/enable in lookup Since linux 2.6.29, lookups only use rcu locking. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/net/inet_hashtables.h | 7 +------ net/ipv6/inet6_hashtables.c | 2 -- 2 files changed, 1 insertion(+), 8 deletions(-) diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h index 50f635c2c536..a77acee93aaf 100644 --- a/include/net/inet_hashtables.h +++ b/include/net/inet_hashtables.h @@ -280,11 +280,8 @@ static inline struct sock *inet_lookup_listener(struct net *net, net_eq(sock_net(__sk), (__net))) #endif /* 64-bit arch */ -/* - * Sockets in TCP_CLOSE state are _always_ taken out of the hash, so we need +/* Sockets in TCP_CLOSE state are _always_ taken out of the hash, so we need * not check it for lookups anymore, thanks Alexey. -DaveM - * - * Local BH must be disabled here. */ struct sock *__inet_lookup_established(struct net *net, struct inet_hashinfo *hashinfo, @@ -326,10 +323,8 @@ static inline struct sock *inet_lookup(struct net *net, { struct sock *sk; - local_bh_disable(); sk = __inet_lookup(net, hashinfo, skb, doff, saddr, sport, daddr, dport, dif); - local_bh_enable(); return sk; } diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c index 70f2628be6fa..d253f32874c9 100644 --- a/net/ipv6/inet6_hashtables.c +++ b/net/ipv6/inet6_hashtables.c @@ -200,10 +200,8 @@ struct sock *inet6_lookup(struct net *net, struct inet_hashinfo *hashinfo, { struct sock *sk; - local_bh_disable(); sk = __inet6_lookup(net, hashinfo, skb, doff, saddr, sport, daddr, ntohs(dport), dif); - local_bh_enable(); return sk; } From 2d331915a04144dad738e725769d8fac06ef6155 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 1 Apr 2016 08:52:15 -0700 Subject: [PATCH 0198/1649] tcp/dccp: use rcu locking in inet_diag_find_one_icsk() RX packet processing holds rcu_read_lock(), so we can remove pairs of rcu_read_lock()/rcu_read_unlock() in lookup functions if inet_diag also holds rcu before calling them. This is needed anyway as __inet_lookup_listener() and inet6_lookup_listener() will soon no longer increment refcount on the found listener. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/ipv4/inet_diag.c | 7 +++++-- net/ipv4/inet_hashtables.c | 4 ---- net/ipv6/inet6_hashtables.c | 4 ---- 3 files changed, 5 insertions(+), 10 deletions(-) diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c index 5fdb02f5598e..ea8df527b279 100644 --- a/net/ipv4/inet_diag.c +++ b/net/ipv4/inet_diag.c @@ -356,6 +356,7 @@ struct sock *inet_diag_find_one_icsk(struct net *net, { struct sock *sk; + rcu_read_lock(); if (req->sdiag_family == AF_INET) sk = inet_lookup(net, hashinfo, NULL, 0, req->id.idiag_dst[0], req->id.idiag_dport, req->id.idiag_src[0], @@ -376,9 +377,11 @@ struct sock *inet_diag_find_one_icsk(struct net *net, req->id.idiag_if); } #endif - else + else { + rcu_read_unlock(); return ERR_PTR(-EINVAL); - + } + rcu_read_unlock(); if (!sk) return ERR_PTR(-ENOENT); diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c index bc68eced0105..387338d71dcd 100644 --- a/net/ipv4/inet_hashtables.c +++ b/net/ipv4/inet_hashtables.c @@ -220,7 +220,6 @@ struct sock *__inet_lookup_listener(struct net *net, bool select_ok = true; u32 phash = 0; - rcu_read_lock(); begin: result = NULL; hiscore = 0; @@ -269,7 +268,6 @@ found: goto begin; } } - rcu_read_unlock(); return result; } EXPORT_SYMBOL_GPL(__inet_lookup_listener); @@ -312,7 +310,6 @@ struct sock *__inet_lookup_established(struct net *net, unsigned int slot = hash & hashinfo->ehash_mask; struct inet_ehash_bucket *head = &hashinfo->ehash[slot]; - rcu_read_lock(); begin: sk_nulls_for_each_rcu(sk, node, &head->chain) { if (sk->sk_hash != hash) @@ -339,7 +336,6 @@ begin: out: sk = NULL; found: - rcu_read_unlock(); return sk; } EXPORT_SYMBOL_GPL(__inet_lookup_established); diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c index d253f32874c9..e6ef6ce1ed74 100644 --- a/net/ipv6/inet6_hashtables.c +++ b/net/ipv6/inet6_hashtables.c @@ -69,7 +69,6 @@ struct sock *__inet6_lookup_established(struct net *net, struct inet_ehash_bucket *head = &hashinfo->ehash[slot]; - rcu_read_lock(); begin: sk_nulls_for_each_rcu(sk, node, &head->chain) { if (sk->sk_hash != hash) @@ -90,7 +89,6 @@ begin: out: sk = NULL; found: - rcu_read_unlock(); return sk; } EXPORT_SYMBOL(__inet6_lookup_established); @@ -138,7 +136,6 @@ struct sock *inet6_lookup_listener(struct net *net, unsigned int hash = inet_lhashfn(net, hnum); struct inet_listen_hashbucket *ilb = &hashinfo->listening_hash[hash]; - rcu_read_lock(); begin: result = NULL; hiscore = 0; @@ -187,7 +184,6 @@ found: goto begin; } } - rcu_read_unlock(); return result; } EXPORT_SYMBOL_GPL(inet6_lookup_listener); From 3a5d1c0e7cb5ba91aabbd7e28626e3cc925f8093 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 1 Apr 2016 08:52:16 -0700 Subject: [PATCH 0199/1649] inet: reqsk_alloc() needs to take care of dead listeners We'll soon no longer take a refcount on listeners, so reqsk_alloc() can not assume a listener refcount is not zero. We need to use atomic_inc_not_zero() Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/net/request_sock.h | 31 +++++++++++++++---------------- 1 file changed, 15 insertions(+), 16 deletions(-) diff --git a/include/net/request_sock.h b/include/net/request_sock.h index f49759decb28..6ebe13eb1c4c 100644 --- a/include/net/request_sock.h +++ b/include/net/request_sock.h @@ -85,24 +85,23 @@ reqsk_alloc(const struct request_sock_ops *ops, struct sock *sk_listener, struct request_sock *req; req = kmem_cache_alloc(ops->slab, GFP_ATOMIC | __GFP_NOWARN); - - if (req) { - req->rsk_ops = ops; - if (attach_listener) { - sock_hold(sk_listener); - req->rsk_listener = sk_listener; - } else { - req->rsk_listener = NULL; + if (!req) + return NULL; + req->rsk_listener = NULL; + if (attach_listener) { + if (unlikely(!atomic_inc_not_zero(&sk_listener->sk_refcnt))) { + kmem_cache_free(ops->slab, req); + return NULL; } - req_to_sk(req)->sk_prot = sk_listener->sk_prot; - sk_node_init(&req_to_sk(req)->sk_node); - sk_tx_queue_clear(req_to_sk(req)); - req->saved_syn = NULL; - /* Following is temporary. It is coupled with debugging - * helpers in reqsk_put() & reqsk_free() - */ - atomic_set(&req->rsk_refcnt, 0); + req->rsk_listener = sk_listener; } + req->rsk_ops = ops; + req_to_sk(req)->sk_prot = sk_listener->sk_prot; + sk_node_init(&req_to_sk(req)->sk_node); + sk_tx_queue_clear(req_to_sk(req)); + req->saved_syn = NULL; + atomic_set(&req->rsk_refcnt, 0); + return req; } From 3b24d854cb35383c30642116e5992fd619bdc9bc Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 1 Apr 2016 08:52:17 -0700 Subject: [PATCH 0200/1649] tcp/dccp: do not touch listener sk_refcnt under synflood When a SYNFLOOD targets a non SO_REUSEPORT listener, multiple cpus contend on sk->sk_refcnt and sk->sk_wmem_alloc changes. By letting listeners use SOCK_RCU_FREE infrastructure, we can relax TCP_LISTEN lookup rules and avoid touching sk_refcnt Note that we still use SLAB_DESTROY_BY_RCU rules for other sockets, only listeners are impacted by this change. Peak performance under SYNFLOOD is increased by ~33% : On my test machine, I could process 3.2 Mpps instead of 2.4 Mpps Most consuming functions are now skb_set_owner_w() and sock_wfree() contending on sk->sk_wmem_alloc when cooking SYNACK and freeing them. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/net/inet6_hashtables.h | 12 ++++-- include/net/inet_hashtables.h | 40 +++++++++++-------- net/dccp/ipv4.c | 7 +++- net/dccp/ipv6.c | 7 +++- net/ipv4/inet_diag.c | 3 +- net/ipv4/inet_hashtables.c | 73 ++++++++++++---------------------- net/ipv4/tcp_ipv4.c | 66 +++++++++++++++--------------- net/ipv6/inet6_hashtables.c | 56 +++++++------------------- net/ipv6/tcp_ipv6.c | 27 +++++++------ net/netfilter/xt_socket.c | 6 +-- 10 files changed, 134 insertions(+), 163 deletions(-) diff --git a/include/net/inet6_hashtables.h b/include/net/inet6_hashtables.h index 28332bdac333..b87becacd9d3 100644 --- a/include/net/inet6_hashtables.h +++ b/include/net/inet6_hashtables.h @@ -66,13 +66,15 @@ static inline struct sock *__inet6_lookup(struct net *net, const __be16 sport, const struct in6_addr *daddr, const u16 hnum, - const int dif) + const int dif, + bool *refcounted) { struct sock *sk = __inet6_lookup_established(net, hashinfo, saddr, sport, daddr, hnum, dif); + *refcounted = true; if (sk) return sk; - + *refcounted = false; return inet6_lookup_listener(net, hashinfo, skb, doff, saddr, sport, daddr, hnum, dif); } @@ -81,17 +83,19 @@ static inline struct sock *__inet6_lookup_skb(struct inet_hashinfo *hashinfo, struct sk_buff *skb, int doff, const __be16 sport, const __be16 dport, - int iif) + int iif, + bool *refcounted) { struct sock *sk = skb_steal_sock(skb); + *refcounted = true; if (sk) return sk; return __inet6_lookup(dev_net(skb_dst(skb)->dev), hashinfo, skb, doff, &ipv6_hdr(skb)->saddr, sport, &ipv6_hdr(skb)->daddr, ntohs(dport), - iif); + iif, refcounted); } struct sock *inet6_lookup(struct net *net, struct inet_hashinfo *hashinfo, diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h index a77acee93aaf..0574493e3899 100644 --- a/include/net/inet_hashtables.h +++ b/include/net/inet_hashtables.h @@ -100,14 +100,10 @@ struct inet_bind_hashbucket { /* * Sockets can be hashed in established or listening table - * We must use different 'nulls' end-of-chain value for listening - * hash table, or we might find a socket that was closed and - * reallocated/inserted into established hash table */ -#define LISTENING_NULLS_BASE (1U << 29) struct inet_listen_hashbucket { spinlock_t lock; - struct hlist_nulls_head head; + struct hlist_head head; }; /* This is for listening sockets, thus all sockets which possess wildcards. */ @@ -304,14 +300,20 @@ static inline struct sock *__inet_lookup(struct net *net, struct sk_buff *skb, int doff, const __be32 saddr, const __be16 sport, const __be32 daddr, const __be16 dport, - const int dif) + const int dif, + bool *refcounted) { u16 hnum = ntohs(dport); - struct sock *sk = __inet_lookup_established(net, hashinfo, - saddr, sport, daddr, hnum, dif); + struct sock *sk; - return sk ? : __inet_lookup_listener(net, hashinfo, skb, doff, saddr, - sport, daddr, hnum, dif); + sk = __inet_lookup_established(net, hashinfo, saddr, sport, + daddr, hnum, dif); + *refcounted = true; + if (sk) + return sk; + *refcounted = false; + return __inet_lookup_listener(net, hashinfo, skb, doff, saddr, + sport, daddr, hnum, dif); } static inline struct sock *inet_lookup(struct net *net, @@ -322,10 +324,13 @@ static inline struct sock *inet_lookup(struct net *net, const int dif) { struct sock *sk; + bool refcounted; sk = __inet_lookup(net, hashinfo, skb, doff, saddr, sport, daddr, - dport, dif); + dport, dif, &refcounted); + if (sk && !refcounted && !atomic_inc_not_zero(&sk->sk_refcnt)) + sk = NULL; return sk; } @@ -333,17 +338,20 @@ static inline struct sock *__inet_lookup_skb(struct inet_hashinfo *hashinfo, struct sk_buff *skb, int doff, const __be16 sport, - const __be16 dport) + const __be16 dport, + bool *refcounted) { struct sock *sk = skb_steal_sock(skb); const struct iphdr *iph = ip_hdr(skb); + *refcounted = true; if (sk) return sk; - else - return __inet_lookup(dev_net(skb_dst(skb)->dev), hashinfo, skb, - doff, iph->saddr, sport, - iph->daddr, dport, inet_iif(skb)); + + return __inet_lookup(dev_net(skb_dst(skb)->dev), hashinfo, skb, + doff, iph->saddr, sport, + iph->daddr, dport, inet_iif(skb), + refcounted); } u32 sk_ehashfn(const struct sock *sk); diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index 9c67a961ba53..6438c5a7efc4 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c @@ -764,6 +764,7 @@ static int dccp_v4_rcv(struct sk_buff *skb) { const struct dccp_hdr *dh; const struct iphdr *iph; + bool refcounted; struct sock *sk; int min_cov; @@ -801,7 +802,7 @@ static int dccp_v4_rcv(struct sk_buff *skb) lookup: sk = __inet_lookup_skb(&dccp_hashinfo, skb, __dccp_hdr_len(dh), - dh->dccph_sport, dh->dccph_dport); + dh->dccph_sport, dh->dccph_dport, &refcounted); if (!sk) { dccp_pr_debug("failed to look up flow ID in table and " "get corresponding socket\n"); @@ -830,6 +831,7 @@ lookup: goto lookup; } sock_hold(sk); + refcounted = true; nsk = dccp_check_req(sk, skb, req); if (!nsk) { reqsk_put(req); @@ -886,7 +888,8 @@ discard_it: return 0; discard_and_relse: - sock_put(sk); + if (refcounted) + sock_put(sk); goto discard_it; } diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index 4663a01d5039..71bf1deba4c5 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c @@ -642,6 +642,7 @@ discard: static int dccp_v6_rcv(struct sk_buff *skb) { const struct dccp_hdr *dh; + bool refcounted; struct sock *sk; int min_cov; @@ -670,7 +671,7 @@ static int dccp_v6_rcv(struct sk_buff *skb) lookup: sk = __inet6_lookup_skb(&dccp_hashinfo, skb, __dccp_hdr_len(dh), dh->dccph_sport, dh->dccph_dport, - inet6_iif(skb)); + inet6_iif(skb), &refcounted); if (!sk) { dccp_pr_debug("failed to look up flow ID in table and " "get corresponding socket\n"); @@ -699,6 +700,7 @@ lookup: goto lookup; } sock_hold(sk); + refcounted = true; nsk = dccp_check_req(sk, skb, req); if (!nsk) { reqsk_put(req); @@ -752,7 +754,8 @@ discard_it: return 0; discard_and_relse: - sock_put(sk); + if (refcounted) + sock_put(sk); goto discard_it; } diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c index ea8df527b279..bd591eb81ec9 100644 --- a/net/ipv4/inet_diag.c +++ b/net/ipv4/inet_diag.c @@ -775,13 +775,12 @@ void inet_diag_dump_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *skb, for (i = s_i; i < INET_LHTABLE_SIZE; i++) { struct inet_listen_hashbucket *ilb; - struct hlist_nulls_node *node; struct sock *sk; num = 0; ilb = &hashinfo->listening_hash[i]; spin_lock_bh(&ilb->lock); - sk_nulls_for_each(sk, node, &ilb->head) { + sk_for_each(sk, &ilb->head) { struct inet_sock *inet = inet_sk(sk); if (!net_eq(sock_net(sk), net)) diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c index 387338d71dcd..98ba03b6f87d 100644 --- a/net/ipv4/inet_hashtables.c +++ b/net/ipv4/inet_hashtables.c @@ -198,13 +198,13 @@ static inline int compute_score(struct sock *sk, struct net *net, } /* - * Don't inline this cruft. Here are some nice properties to exploit here. The - * BSD API does not allow a listening sock to specify the remote port nor the + * Here are some nice properties to exploit here. The BSD API + * does not allow a listening sock to specify the remote port nor the * remote address for the connection. So always assume those are both * wildcarded during the search since they can never be otherwise. */ - +/* called with rcu_read_lock() : No refcount taken on the socket */ struct sock *__inet_lookup_listener(struct net *net, struct inet_hashinfo *hashinfo, struct sk_buff *skb, int doff, @@ -212,37 +212,27 @@ struct sock *__inet_lookup_listener(struct net *net, const __be32 daddr, const unsigned short hnum, const int dif) { - struct sock *sk, *result; - struct hlist_nulls_node *node; unsigned int hash = inet_lhashfn(net, hnum); struct inet_listen_hashbucket *ilb = &hashinfo->listening_hash[hash]; - int score, hiscore, matches = 0, reuseport = 0; - bool select_ok = true; + int score, hiscore = 0, matches = 0, reuseport = 0; + struct sock *sk, *result = NULL; u32 phash = 0; -begin: - result = NULL; - hiscore = 0; - sk_nulls_for_each_rcu(sk, node, &ilb->head) { + sk_for_each_rcu(sk, &ilb->head) { score = compute_score(sk, net, hnum, daddr, dif); if (score > hiscore) { - result = sk; - hiscore = score; reuseport = sk->sk_reuseport; if (reuseport) { phash = inet_ehashfn(net, daddr, hnum, saddr, sport); - if (select_ok) { - struct sock *sk2; - sk2 = reuseport_select_sock(sk, phash, - skb, doff); - if (sk2) { - result = sk2; - goto found; - } - } + result = reuseport_select_sock(sk, phash, + skb, doff); + if (result) + return result; matches = 1; } + result = sk; + hiscore = score; } else if (score == hiscore && reuseport) { matches++; if (reciprocal_scale(phash, matches) == 0) @@ -250,24 +240,6 @@ begin: phash = next_pseudo_random32(phash); } } - /* - * if the nulls value we got at the end of this lookup is - * not the expected one, we must restart lookup. - * We probably met an item that was moved to another chain. - */ - if (get_nulls_value(node) != hash + LISTENING_NULLS_BASE) - goto begin; - if (result) { -found: - if (unlikely(!atomic_inc_not_zero(&result->sk_refcnt))) - result = NULL; - else if (unlikely(compute_score(result, net, hnum, daddr, - dif) < hiscore)) { - sock_put(result); - select_ok = false; - goto begin; - } - } return result; } EXPORT_SYMBOL_GPL(__inet_lookup_listener); @@ -508,7 +480,8 @@ int __inet_hash(struct sock *sk, struct sock *osk, if (err) goto unlock; } - __sk_nulls_add_node_rcu(sk, &ilb->head); + hlist_add_head_rcu(&sk->sk_node, &ilb->head); + sock_set_flag(sk, SOCK_RCU_FREE); sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); unlock: spin_unlock(&ilb->lock); @@ -535,20 +508,25 @@ void inet_unhash(struct sock *sk) { struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo; spinlock_t *lock; + bool listener = false; int done; if (sk_unhashed(sk)) return; - if (sk->sk_state == TCP_LISTEN) + if (sk->sk_state == TCP_LISTEN) { lock = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)].lock; - else + listener = true; + } else { lock = inet_ehash_lockp(hashinfo, sk->sk_hash); - + } spin_lock_bh(lock); if (rcu_access_pointer(sk->sk_reuseport_cb)) reuseport_detach_sock(sk); - done = __sk_nulls_del_node_init_rcu(sk); + if (listener) + done = __sk_del_node_init(sk); + else + done = __sk_nulls_del_node_init_rcu(sk); if (done) sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); spin_unlock_bh(lock); @@ -684,9 +662,8 @@ void inet_hashinfo_init(struct inet_hashinfo *h) for (i = 0; i < INET_LHTABLE_SIZE; i++) { spin_lock_init(&h->listening_hash[i].lock); - INIT_HLIST_NULLS_HEAD(&h->listening_hash[i].head, - i + LISTENING_NULLS_BASE); - } + INIT_HLIST_HEAD(&h->listening_hash[i].head); + } } EXPORT_SYMBOL_GPL(inet_hashinfo_init); diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index ad450509029b..e5f924b29946 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -628,6 +628,7 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb) net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev); #ifdef CONFIG_TCP_MD5SIG + rcu_read_lock(); hash_location = tcp_parse_md5sig_option(th); if (sk && sk_fullsock(sk)) { key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *) @@ -646,16 +647,18 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb) ntohs(th->source), inet_iif(skb)); /* don't send rst if it can't find key */ if (!sk1) - return; - rcu_read_lock(); + goto out; + key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *) &ip_hdr(skb)->saddr, AF_INET); if (!key) - goto release_sk1; + goto out; + genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, skb); if (genhash || memcmp(hash_location, newhash, 16) != 0) - goto release_sk1; + goto out; + } if (key) { @@ -698,11 +701,8 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb) TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS); #ifdef CONFIG_TCP_MD5SIG -release_sk1: - if (sk1) { - rcu_read_unlock(); - sock_put(sk1); - } +out: + rcu_read_unlock(); #endif } @@ -1538,11 +1538,12 @@ EXPORT_SYMBOL(tcp_prequeue); int tcp_v4_rcv(struct sk_buff *skb) { + struct net *net = dev_net(skb->dev); const struct iphdr *iph; const struct tcphdr *th; + bool refcounted; struct sock *sk; int ret; - struct net *net = dev_net(skb->dev); if (skb->pkt_type != PACKET_HOST) goto discard_it; @@ -1588,7 +1589,7 @@ int tcp_v4_rcv(struct sk_buff *skb) lookup: sk = __inet_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th), th->source, - th->dest); + th->dest, &refcounted); if (!sk) goto no_tcp_socket; @@ -1609,7 +1610,11 @@ process: inet_csk_reqsk_queue_drop_and_put(sk, req); goto lookup; } + /* We own a reference on the listener, increase it again + * as we might lose it too soon. + */ sock_hold(sk); + refcounted = true; nsk = tcp_check_req(sk, skb, req, false); if (!nsk) { reqsk_put(req); @@ -1665,7 +1670,8 @@ process: bh_unlock_sock(sk); put_and_return: - sock_put(sk); + if (refcounted) + sock_put(sk); return ret; @@ -1688,7 +1694,8 @@ discard_it: return 0; discard_and_relse: - sock_put(sk); + if (refcounted) + sock_put(sk); goto discard_it; do_time_wait: @@ -1712,6 +1719,7 @@ do_time_wait: if (sk2) { inet_twsk_deschedule_put(inet_twsk(sk)); sk = sk2; + refcounted = false; goto process; } /* Fall through to ACK */ @@ -1845,17 +1853,17 @@ EXPORT_SYMBOL(tcp_v4_destroy_sock); */ static void *listening_get_next(struct seq_file *seq, void *cur) { - struct inet_connection_sock *icsk; - struct hlist_nulls_node *node; - struct sock *sk = cur; - struct inet_listen_hashbucket *ilb; struct tcp_iter_state *st = seq->private; struct net *net = seq_file_net(seq); + struct inet_listen_hashbucket *ilb; + struct inet_connection_sock *icsk; + struct sock *sk = cur; if (!sk) { +get_head: ilb = &tcp_hashinfo.listening_hash[st->bucket]; spin_lock_bh(&ilb->lock); - sk = sk_nulls_head(&ilb->head); + sk = sk_head(&ilb->head); st->offset = 0; goto get_sk; } @@ -1863,28 +1871,20 @@ static void *listening_get_next(struct seq_file *seq, void *cur) ++st->num; ++st->offset; - sk = sk_nulls_next(sk); + sk = sk_next(sk); get_sk: - sk_nulls_for_each_from(sk, node) { + sk_for_each_from(sk) { if (!net_eq(sock_net(sk), net)) continue; - if (sk->sk_family == st->family) { - cur = sk; - goto out; - } + if (sk->sk_family == st->family) + return sk; icsk = inet_csk(sk); } spin_unlock_bh(&ilb->lock); st->offset = 0; - if (++st->bucket < INET_LHTABLE_SIZE) { - ilb = &tcp_hashinfo.listening_hash[st->bucket]; - spin_lock_bh(&ilb->lock); - sk = sk_nulls_head(&ilb->head); - goto get_sk; - } - cur = NULL; -out: - return cur; + if (++st->bucket < INET_LHTABLE_SIZE) + goto get_head; + return NULL; } static void *listening_get_idx(struct seq_file *seq, loff_t *pos) diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c index e6ef6ce1ed74..607da088344d 100644 --- a/net/ipv6/inet6_hashtables.c +++ b/net/ipv6/inet6_hashtables.c @@ -120,6 +120,7 @@ static inline int compute_score(struct sock *sk, struct net *net, return score; } +/* called with rcu_read_lock() */ struct sock *inet6_lookup_listener(struct net *net, struct inet_hashinfo *hashinfo, struct sk_buff *skb, int doff, @@ -127,38 +128,27 @@ struct sock *inet6_lookup_listener(struct net *net, const __be16 sport, const struct in6_addr *daddr, const unsigned short hnum, const int dif) { - struct sock *sk; - const struct hlist_nulls_node *node; - struct sock *result; - int score, hiscore, matches = 0, reuseport = 0; - bool select_ok = true; - u32 phash = 0; unsigned int hash = inet_lhashfn(net, hnum); struct inet_listen_hashbucket *ilb = &hashinfo->listening_hash[hash]; + int score, hiscore = 0, matches = 0, reuseport = 0; + struct sock *sk, *result = NULL; + u32 phash = 0; -begin: - result = NULL; - hiscore = 0; - sk_nulls_for_each(sk, node, &ilb->head) { + sk_for_each(sk, &ilb->head) { score = compute_score(sk, net, hnum, daddr, dif); if (score > hiscore) { hiscore = score; - result = sk; - reuseport = sk->sk_reuseport; if (reuseport) { phash = inet6_ehashfn(net, daddr, hnum, saddr, sport); - if (select_ok) { - struct sock *sk2; - sk2 = reuseport_select_sock(sk, phash, - skb, doff); - if (sk2) { - result = sk2; - goto found; - } - } + result = reuseport_select_sock(sk, phash, + skb, doff); + if (result) + return result; matches = 1; } + result = sk; + reuseport = sk->sk_reuseport; } else if (score == hiscore && reuseport) { matches++; if (reciprocal_scale(phash, matches) == 0) @@ -166,24 +156,6 @@ begin: phash = next_pseudo_random32(phash); } } - /* - * if the nulls value we got at the end of this lookup is - * not the expected one, we must restart lookup. - * We probably met an item that was moved to another chain. - */ - if (get_nulls_value(node) != hash + LISTENING_NULLS_BASE) - goto begin; - if (result) { -found: - if (unlikely(!atomic_inc_not_zero(&result->sk_refcnt))) - result = NULL; - else if (unlikely(compute_score(result, net, hnum, daddr, - dif) < hiscore)) { - sock_put(result); - select_ok = false; - goto begin; - } - } return result; } EXPORT_SYMBOL_GPL(inet6_lookup_listener); @@ -195,10 +167,12 @@ struct sock *inet6_lookup(struct net *net, struct inet_hashinfo *hashinfo, const int dif) { struct sock *sk; + bool refcounted; sk = __inet6_lookup(net, hashinfo, skb, doff, saddr, sport, daddr, - ntohs(dport), dif); - + ntohs(dport), dif, &refcounted); + if (sk && !refcounted && !atomic_inc_not_zero(&sk->sk_refcnt)) + sk = NULL; return sk; } EXPORT_SYMBOL_GPL(inet6_lookup); diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 711d209f9124..f0422e782731 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -858,6 +858,7 @@ static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb) return; #ifdef CONFIG_TCP_MD5SIG + rcu_read_lock(); hash_location = tcp_parse_md5sig_option(th); if (sk && sk_fullsock(sk)) { key = tcp_v6_md5_do_lookup(sk, &ipv6h->saddr); @@ -875,16 +876,15 @@ static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb) th->source, &ipv6h->daddr, ntohs(th->source), tcp_v6_iif(skb)); if (!sk1) - return; + goto out; - rcu_read_lock(); key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr); if (!key) - goto release_sk1; + goto out; genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, skb); if (genhash || memcmp(hash_location, newhash, 16) != 0) - goto release_sk1; + goto out; } #endif @@ -898,11 +898,8 @@ static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb) tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0); #ifdef CONFIG_TCP_MD5SIG -release_sk1: - if (sk1) { - rcu_read_unlock(); - sock_put(sk1); - } +out: + rcu_read_unlock(); #endif } @@ -1351,6 +1348,7 @@ static int tcp_v6_rcv(struct sk_buff *skb) { const struct tcphdr *th; const struct ipv6hdr *hdr; + bool refcounted; struct sock *sk; int ret; struct net *net = dev_net(skb->dev); @@ -1381,7 +1379,8 @@ static int tcp_v6_rcv(struct sk_buff *skb) lookup: sk = __inet6_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th), - th->source, th->dest, inet6_iif(skb)); + th->source, th->dest, inet6_iif(skb), + &refcounted); if (!sk) goto no_tcp_socket; @@ -1404,6 +1403,7 @@ process: goto lookup; } sock_hold(sk); + refcounted = true; nsk = tcp_check_req(sk, skb, req, false); if (!nsk) { reqsk_put(req); @@ -1460,7 +1460,8 @@ process: bh_unlock_sock(sk); put_and_return: - sock_put(sk); + if (refcounted) + sock_put(sk); return ret ? -1 : 0; no_tcp_socket: @@ -1483,7 +1484,8 @@ discard_it: return 0; discard_and_relse: - sock_put(sk); + if (refcounted) + sock_put(sk); goto discard_it; do_time_wait: @@ -1514,6 +1516,7 @@ do_time_wait: inet_twsk_deschedule_put(tw); sk = sk2; tcp_v6_restore_cb(skb); + refcounted = false; goto process; } /* Fall through to ACK */ diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c index 49d14ecad444..b10ade272b50 100644 --- a/net/netfilter/xt_socket.c +++ b/net/netfilter/xt_socket.c @@ -120,9 +120,9 @@ xt_socket_get_sock_v4(struct net *net, struct sk_buff *skb, const int doff, { switch (protocol) { case IPPROTO_TCP: - return __inet_lookup(net, &tcp_hashinfo, skb, doff, - saddr, sport, daddr, dport, - in->ifindex); + return inet_lookup(net, &tcp_hashinfo, skb, doff, + saddr, sport, daddr, dport, + in->ifindex); case IPPROTO_UDP: return udp4_lib_lookup(net, saddr, sport, daddr, dport, in->ifindex); From 15239302edd46b184e758048253541fb211e315e Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 1 Apr 2016 08:52:18 -0700 Subject: [PATCH 0201/1649] sock_diag: add SK_MEMINFO_DROPS Reporting sk_drops to user space was available for UDP sockets using /proc interface. Add this to sock_diag, so that we can have the same information available to ss users, and we'll be able to add sk_drops indications for TCP sockets as well. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/uapi/linux/sock_diag.h | 1 + net/core/sock_diag.c | 1 + 2 files changed, 2 insertions(+) diff --git a/include/uapi/linux/sock_diag.h b/include/uapi/linux/sock_diag.h index bae2d80034d4..7ff505d8a47b 100644 --- a/include/uapi/linux/sock_diag.h +++ b/include/uapi/linux/sock_diag.h @@ -20,6 +20,7 @@ enum { SK_MEMINFO_WMEM_QUEUED, SK_MEMINFO_OPTMEM, SK_MEMINFO_BACKLOG, + SK_MEMINFO_DROPS, SK_MEMINFO_VARS, }; diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c index a996ce8c8fb2..ca9e35bbe13c 100644 --- a/net/core/sock_diag.c +++ b/net/core/sock_diag.c @@ -67,6 +67,7 @@ int sock_diag_put_meminfo(struct sock *sk, struct sk_buff *skb, int attrtype) mem[SK_MEMINFO_WMEM_QUEUED] = sk->sk_wmem_queued; mem[SK_MEMINFO_OPTMEM] = atomic_read(&sk->sk_omem_alloc); mem[SK_MEMINFO_BACKLOG] = sk->sk_backlog.len; + mem[SK_MEMINFO_DROPS] = atomic_read(&sk->sk_drops); return nla_put(skb, attrtype, sizeof(mem), &mem); } From 532182cd610782db8c18230c2747626562032205 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 1 Apr 2016 08:52:19 -0700 Subject: [PATCH 0202/1649] tcp: increment sk_drops for dropped rx packets Now ss can report sk_drops, we can instruct TCP to increment this per socket counter when it drops an incoming frame, to refine monitoring and debugging. Following patch takes care of listeners drops. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/net/sock.h | 7 +++++++ net/ipv4/tcp_input.c | 33 ++++++++++++++++++++------------- net/ipv4/tcp_ipv4.c | 1 + net/ipv6/tcp_ipv6.c | 1 + 4 files changed, 29 insertions(+), 13 deletions(-) diff --git a/include/net/sock.h b/include/net/sock.h index 7ad73db9dde2..310c4367ea83 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -2012,6 +2012,13 @@ sock_skb_set_dropcount(const struct sock *sk, struct sk_buff *skb) SOCK_SKB_CB(skb)->dropcount = atomic_read(&sk->sk_drops); } +static inline void sk_drops_add(struct sock *sk, const struct sk_buff *skb) +{ + int segs = max_t(u16, 1, skb_shinfo(skb)->gso_segs); + + atomic_add(segs, &sk->sk_drops); +} + void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb); void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk, diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index a26e2d262358..0ffcd07e3409 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -4307,6 +4307,12 @@ static bool tcp_try_coalesce(struct sock *sk, return true; } +static void tcp_drop(struct sock *sk, struct sk_buff *skb) +{ + sk_drops_add(sk, skb); + __kfree_skb(skb); +} + /* This one checks to see if we can put data from the * out_of_order queue into the receive_queue. */ @@ -4331,7 +4337,7 @@ static void tcp_ofo_queue(struct sock *sk) __skb_unlink(skb, &tp->out_of_order_queue); if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) { SOCK_DEBUG(sk, "ofo packet was already received\n"); - __kfree_skb(skb); + tcp_drop(sk, skb); continue; } SOCK_DEBUG(sk, "ofo requeuing : rcv_next %X seq %X - %X\n", @@ -4383,7 +4389,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) if (unlikely(tcp_try_rmem_schedule(sk, skb, skb->truesize))) { NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFODROP); - __kfree_skb(skb); + tcp_drop(sk, skb); return; } @@ -4447,7 +4453,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) { /* All the bits are present. Drop. */ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFOMERGE); - __kfree_skb(skb); + tcp_drop(sk, skb); skb = NULL; tcp_dsack_set(sk, seq, end_seq); goto add_sack; @@ -4486,7 +4492,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq, TCP_SKB_CB(skb1)->end_seq); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFOMERGE); - __kfree_skb(skb1); + tcp_drop(sk, skb1); } add_sack: @@ -4569,12 +4575,13 @@ err: static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) { struct tcp_sock *tp = tcp_sk(sk); - int eaten = -1; bool fragstolen = false; + int eaten = -1; - if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) - goto drop; - + if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) { + __kfree_skb(skb); + return; + } skb_dst_drop(skb); __skb_pull(skb, tcp_hdr(skb)->doff * 4); @@ -4656,7 +4663,7 @@ out_of_window: tcp_enter_quickack_mode(sk); inet_csk_schedule_ack(sk); drop: - __kfree_skb(skb); + tcp_drop(sk, skb); return; } @@ -5233,7 +5240,7 @@ syn_challenge: return true; discard: - __kfree_skb(skb); + tcp_drop(sk, skb); return false; } @@ -5451,7 +5458,7 @@ csum_error: TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS); discard: - __kfree_skb(skb); + tcp_drop(sk, skb); } EXPORT_SYMBOL(tcp_rcv_established); @@ -5682,7 +5689,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, TCP_DELACK_MAX, TCP_RTO_MAX); discard: - __kfree_skb(skb); + tcp_drop(sk, skb); return 0; } else { tcp_send_ack(sk); @@ -6043,7 +6050,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb) if (!queued) { discard: - __kfree_skb(skb); + tcp_drop(sk, skb); } return 0; } diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index e5f924b29946..059a98f5e7e1 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -1694,6 +1694,7 @@ discard_it: return 0; discard_and_relse: + sk_drops_add(sk, skb); if (refcounted) sock_put(sk); goto discard_it; diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index f0422e782731..5fa8fea394c9 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -1484,6 +1484,7 @@ discard_it: return 0; discard_and_relse: + sk_drops_add(sk, skb); if (refcounted) sock_put(sk); goto discard_it; From 9caad864151e525929d323de96cad382da49c3b2 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 1 Apr 2016 08:52:20 -0700 Subject: [PATCH 0203/1649] tcp: increment sk_drops for listeners Goal: packets dropped by a listener are accounted for. This adds tcp_listendrop() helper, and clears sk_drops in sk_clone_lock() so that children do not inherit their parent drop count. Note that we no longer increment LINUX_MIB_LISTENDROPS counter when sending a SYNCOOKIE, since the SYN packet generated a SYNACK. We already have a separate LINUX_MIB_SYNCOOKIESSENT Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/net/tcp.h | 13 +++++++++++++ net/core/sock.c | 1 + net/ipv4/tcp_input.c | 8 +++++--- net/ipv4/tcp_ipv4.c | 6 +++--- net/ipv6/tcp_ipv6.c | 4 ++-- 5 files changed, 24 insertions(+), 8 deletions(-) diff --git a/include/net/tcp.h b/include/net/tcp.h index a23282996ca9..74d3ed5eb219 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -1836,4 +1836,17 @@ static inline void tcp_segs_in(struct tcp_sock *tp, const struct sk_buff *skb) tp->data_segs_in += segs_in; } +/* + * TCP listen path runs lockless. + * We forced "struct sock" to be const qualified to make sure + * we don't modify one of its field by mistake. + * Here, we increment sk_drops which is an atomic_t, so we can safely + * make sock writable again. + */ +static inline void tcp_listendrop(const struct sock *sk) +{ + atomic_inc(&((struct sock *)sk)->sk_drops); + NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); +} + #endif /* _TCP_H */ diff --git a/net/core/sock.c b/net/core/sock.c index 7a6a063b28b3..2f517ea56786 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -1525,6 +1525,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) newsk->sk_dst_cache = NULL; newsk->sk_wmem_queued = 0; newsk->sk_forward_alloc = 0; + atomic_set(&newsk->sk_drops, 0); newsk->sk_send_head = NULL; newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK; diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 0ffcd07e3409..983f04c11177 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -6339,8 +6339,10 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops, inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT); af_ops->send_synack(sk, dst, &fl, req, &foc, !want_cookie); - if (want_cookie) - goto drop_and_free; + if (want_cookie) { + reqsk_free(req); + return 0; + } } reqsk_put(req); return 0; @@ -6350,7 +6352,7 @@ drop_and_release: drop_and_free: reqsk_free(req); drop: - NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); + tcp_listendrop(sk); return 0; } EXPORT_SYMBOL(tcp_conn_request); diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 059a98f5e7e1..f3ce0afe70aa 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -329,7 +329,7 @@ void tcp_req_err(struct sock *sk, u32 seq, bool abort) * errors returned from accept(). */ inet_csk_reqsk_queue_drop(req->rsk_listener, req); - NET_INC_STATS_BH(net, LINUX_MIB_LISTENDROPS); + tcp_listendrop(req->rsk_listener); } reqsk_put(req); } @@ -1246,7 +1246,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) &tcp_request_sock_ipv4_ops, sk, skb); drop: - NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); + tcp_listendrop(sk); return 0; } EXPORT_SYMBOL(tcp_v4_conn_request); @@ -1348,7 +1348,7 @@ exit_overflow: exit_nonewsk: dst_release(dst); exit: - NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); + tcp_listendrop(sk); return NULL; put_and_exit: inet_csk_prepare_forced_close(newsk); diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 5fa8fea394c9..7cde1b6fdda3 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -964,7 +964,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) &tcp_request_sock_ipv6_ops, sk, skb); drop: - NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); + tcp_listendrop(sk); return 0; /* don't send reset */ } @@ -1169,7 +1169,7 @@ out_overflow: out_nonewsk: dst_release(dst); out: - NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); + tcp_listendrop(sk); return NULL; } From a9d6532b567489196dac4ce60c62343e43228759 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 1 Apr 2016 08:52:21 -0700 Subject: [PATCH 0204/1649] ipv4: tcp: set SOCK_USE_WRITE_QUEUE for ip_send_unicast_reply() TCP uses per cpu 'sockets' to send some packets : - RST packets ( tcp_v4_send_reset()) ) - ACK packets for SYN_RECV and TIMEWAIT sockets By setting SOCK_USE_WRITE_QUEUE flag, we tell sock_wfree() to not call sk_write_space() since these internal sockets do not care. This gives a small performance improvement, merely by allowing cpu to properly predict the sock_wfree() conditional branch, and avoiding one atomic operation. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/ipv4/tcp_ipv4.c | 1 + 1 file changed, 1 insertion(+) diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index f3ce0afe70aa..456ff3d6a132 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -2384,6 +2384,7 @@ static int __net_init tcp_sk_init(struct net *net) IPPROTO_TCP, net); if (res) goto fail; + sock_set_flag(sk, SOCK_USE_WRITE_QUEUE); *per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk; } From 4ce7e93cb3fe87db5b700050172dc41def9834b3 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 1 Apr 2016 08:52:22 -0700 Subject: [PATCH 0205/1649] tcp: rate limit ACK sent by SYN_RECV request sockets MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Attackers like to use SYNFLOOD targeting one 5-tuple, as they hit a single RX queue (and cpu) on the victim. If they use random sequence numbers in their SYN, we detect they do not match the expected window and send back an ACK. This patch adds a rate limitation, so that the effect of such attacks is limited to ingress only. We roughly double our ability to absorb such attacks. Signed-off-by: Eric Dumazet Cc: Willem de Bruijn Cc: Neal Cardwell Cc: Maciej Żenczykowski Acked-by: Neal Cardwell Signed-off-by: David S. Miller --- net/ipv4/tcp_minisocks.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index acb366dd61e6..4c53e7c86586 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -704,7 +704,10 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq, tcp_rsk(req)->rcv_nxt, tcp_rsk(req)->rcv_nxt + req->rsk_rcv_wnd)) { /* Out of window: send ACK and drop. */ - if (!(flg & TCP_FLAG_RST)) + if (!(flg & TCP_FLAG_RST) && + !tcp_oow_rate_limited(sock_net(sk), skb, + LINUX_MIB_TCPACKSKIPPEDSYNRECV, + &tcp_rsk(req)->last_oow_ack_time)) req->rsk_ops->send_ack(sk, skb, req); if (paws_reject) NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED); From 08ca38742b63ae3825096e943de371a3b372c4a0 Mon Sep 17 00:00:00 2001 From: Stefan Assmann Date: Wed, 3 Feb 2016 09:20:47 +0100 Subject: [PATCH 0206/1649] i40e: call ndo_stop() instead of dev_close() when running offline selftest Calling dev_close() causes IFF_UP to be cleared which will remove the interfaces routes and some addresses. That's probably not what the user intended when running the offline selftest. Besides this does not happen if the interface is brought down before the test, so the current behaviour is inconsistent. Instead call the net_device_ops ndo_stop function directly and avoid touching IFF_UP at all. Signed-off-by: Stefan Assmann Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e.h | 2 +- drivers/net/ethernet/intel/i40e/i40e_ethtool.c | 4 ++-- drivers/net/ethernet/intel/i40e/i40e_main.c | 4 ---- 3 files changed, 3 insertions(+), 7 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 1ce6e9c0427d..f208570cfdbf 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -811,6 +811,7 @@ int i40e_vlan_rx_kill_vid(struct net_device *netdev, __always_unused __be16 proto, u16 vid); #endif int i40e_open(struct net_device *netdev); +int i40e_close(struct net_device *netdev); int i40e_vsi_open(struct i40e_vsi *vsi); void i40e_vlan_stripping_disable(struct i40e_vsi *vsi); int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid); @@ -823,7 +824,6 @@ bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi); struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr, bool is_vf, bool is_netdev); #ifdef I40E_FCOE -int i40e_close(struct net_device *netdev); int __i40e_setup_tc(struct net_device *netdev, u32 handle, __be16 proto, struct tc_to_netdev *tc); void i40e_netpoll(struct net_device *netdev); diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 784b1659457a..410d237f9137 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -1714,7 +1714,7 @@ static void i40e_diag_test(struct net_device *netdev, /* If the device is online then take it offline */ if (if_running) /* indicate we're in test mode */ - dev_close(netdev); + i40e_close(netdev); else /* This reset does not affect link - if it is * changed to a type of reset that does affect @@ -1743,7 +1743,7 @@ static void i40e_diag_test(struct net_device *netdev, i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED)); if (if_running) - dev_open(netdev); + i40e_open(netdev); } else { /* Online tests */ netif_info(pf, drv, netdev, "online testing starting\n"); diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 67006431726a..650336e50255 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -5509,11 +5509,7 @@ static void i40e_fdir_filter_exit(struct i40e_pf *pf) * * Returns 0, this is not allowed to fail **/ -#ifdef I40E_FCOE int i40e_close(struct net_device *netdev) -#else -static int i40e_close(struct net_device *netdev) -#endif { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; From 5c4654daf2e2f25dfbd7fa572c59937ea6d4198b Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Fri, 19 Feb 2016 12:17:08 -0800 Subject: [PATCH 0207/1649] i40e/i40evf: Allow up to 12K bytes of data per Tx descriptor instead of 8K From what I can tell the practical limitation on the size of the Tx data buffer is the fact that the Tx descriptor is limited to 14 bits. As such we cannot use 16K as is typically used on the other Intel drivers. However artificially limiting ourselves to 8K can be expensive as this means that we will consume up to 10 descriptors (1 context, 1 for header, and 9 for payload, non-8K aligned) in a single send. I propose that we can reduce this by increasing the maximum data for a 4K aligned block to 12K. We can reduce the descriptors used for a 32K aligned block by 1 by increasing the size like this. In addition we still have the 4K - 1 of space that is still unused. We can use this as a bit of extra padding when dealing with data that is not aligned to 4K. By aligning the descriptors after the first to 4K we can improve the efficiency of PCIe accesses as we can avoid using byte enables and can fetch full TLP transactions after the first fetch of the buffer. This helps to improve PCIe efficiency. Below is the results of testing before and after with this patch: Recv Send Send Utilization Service Demand Socket Socket Message Elapsed Send Recv Send Recv Size Size Size Time Throughput local remote local remote bytes bytes bytes secs. 10^6bits/s % S % U us/KB us/KB Before: 87380 16384 16384 10.00 33682.24 20.27 -1.00 0.592 -1.00 After: 87380 16384 16384 10.00 34204.08 20.54 -1.00 0.590 -1.00 So the net result of this patch is that we have a small gain in throughput due to a reduction in overhead for putting together the frame. Signed-off-by: Alexander Duyck Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_fcoe.c | 2 +- drivers/net/ethernet/intel/i40e/i40e_txrx.c | 13 ++++--- drivers/net/ethernet/intel/i40e/i40e_txrx.h | 35 +++++++++++++++++-- drivers/net/ethernet/intel/i40evf/i40e_txrx.c | 13 ++++--- drivers/net/ethernet/intel/i40evf/i40e_txrx.h | 35 +++++++++++++++++-- 5 files changed, 83 insertions(+), 15 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c index 8ad162c16f61..92d2208d13c7 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c +++ b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c @@ -1371,7 +1371,7 @@ static netdev_tx_t i40e_fcoe_xmit_frame(struct sk_buff *skb, if (i40e_chk_linearize(skb, count)) { if (__skb_linearize(skb)) goto out_drop; - count = TXD_USE_COUNT(skb->len); + count = i40e_txd_use_count(skb->len); tx_ring->tx_stats.tx_linearize++; } diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 084d0ab316b7..9af1411bd423 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -2717,6 +2717,8 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, tx_bi = first; for (frag = &skb_shinfo(skb)->frags[0];; frag++) { + unsigned int max_data = I40E_MAX_DATA_PER_TXD_ALIGNED; + if (dma_mapping_error(tx_ring->dev, dma)) goto dma_error; @@ -2724,12 +2726,14 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, dma_unmap_len_set(tx_bi, len, size); dma_unmap_addr_set(tx_bi, dma, dma); + /* align size to end of page */ + max_data += -dma & (I40E_MAX_READ_REQ_SIZE - 1); tx_desc->buffer_addr = cpu_to_le64(dma); while (unlikely(size > I40E_MAX_DATA_PER_TXD)) { tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset, - I40E_MAX_DATA_PER_TXD, td_tag); + max_data, td_tag); tx_desc++; i++; @@ -2740,9 +2744,10 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, i = 0; } - dma += I40E_MAX_DATA_PER_TXD; - size -= I40E_MAX_DATA_PER_TXD; + dma += max_data; + size -= max_data; + max_data = I40E_MAX_DATA_PER_TXD_ALIGNED; tx_desc->buffer_addr = cpu_to_le64(dma); } @@ -2892,7 +2897,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, if (i40e_chk_linearize(skb, count)) { if (__skb_linearize(skb)) goto out_drop; - count = TXD_USE_COUNT(skb->len); + count = i40e_txd_use_count(skb->len); tx_ring->tx_stats.tx_linearize++; } diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index cdd5dc00aec5..9e654e611642 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -146,10 +146,39 @@ enum i40e_dyn_idx_t { #define I40E_MAX_BUFFER_TXD 8 #define I40E_MIN_TX_LEN 17 -#define I40E_MAX_DATA_PER_TXD 8192 + +/* The size limit for a transmit buffer in a descriptor is (16K - 1). + * In order to align with the read requests we will align the value to + * the nearest 4K which represents our maximum read request size. + */ +#define I40E_MAX_READ_REQ_SIZE 4096 +#define I40E_MAX_DATA_PER_TXD (16 * 1024 - 1) +#define I40E_MAX_DATA_PER_TXD_ALIGNED \ + (I40E_MAX_DATA_PER_TXD & ~(I40E_MAX_READ_REQ_SIZE - 1)) + +/* This ugly bit of math is equivalent to DIV_ROUNDUP(size, X) where X is + * the value I40E_MAX_DATA_PER_TXD_ALIGNED. It is needed due to the fact + * that 12K is not a power of 2 and division is expensive. It is used to + * approximate the number of descriptors used per linear buffer. Note + * that this will overestimate in some cases as it doesn't account for the + * fact that we will add up to 4K - 1 in aligning the 12K buffer, however + * the error should not impact things much as large buffers usually mean + * we will use fewer descriptors then there are frags in an skb. + */ +static inline unsigned int i40e_txd_use_count(unsigned int size) +{ + const unsigned int max = I40E_MAX_DATA_PER_TXD_ALIGNED; + const unsigned int reciprocal = ((1ull << 32) - 1 + (max / 2)) / max; + unsigned int adjust = ~(u32)0; + + /* if we rounded up on the reciprocal pull down the adjustment */ + if ((max * reciprocal) > adjust) + adjust = ~(u32)(reciprocal - 1); + + return (u32)((((u64)size * reciprocal) + adjust) >> 32); +} /* Tx Descriptors needed, worst case */ -#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), I40E_MAX_DATA_PER_TXD) #define DESC_NEEDED (MAX_SKB_FRAGS + 4) #define I40E_MIN_DESC_PENDING 4 @@ -377,7 +406,7 @@ static inline int i40e_xmit_descriptor_count(struct sk_buff *skb) int count = 0, size = skb_headlen(skb); for (;;) { - count += TXD_USE_COUNT(size); + count += i40e_txd_use_count(size); if (!nr_frags--) break; diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index ebcc25c05796..5f9c1bbab1fa 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -1936,6 +1936,8 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, tx_bi = first; for (frag = &skb_shinfo(skb)->frags[0];; frag++) { + unsigned int max_data = I40E_MAX_DATA_PER_TXD_ALIGNED; + if (dma_mapping_error(tx_ring->dev, dma)) goto dma_error; @@ -1943,12 +1945,14 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, dma_unmap_len_set(tx_bi, len, size); dma_unmap_addr_set(tx_bi, dma, dma); + /* align size to end of page */ + max_data += -dma & (I40E_MAX_READ_REQ_SIZE - 1); tx_desc->buffer_addr = cpu_to_le64(dma); while (unlikely(size > I40E_MAX_DATA_PER_TXD)) { tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset, - I40E_MAX_DATA_PER_TXD, td_tag); + max_data, td_tag); tx_desc++; i++; @@ -1959,9 +1963,10 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, i = 0; } - dma += I40E_MAX_DATA_PER_TXD; - size -= I40E_MAX_DATA_PER_TXD; + dma += max_data; + size -= max_data; + max_data = I40E_MAX_DATA_PER_TXD_ALIGNED; tx_desc->buffer_addr = cpu_to_le64(dma); } @@ -2110,7 +2115,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, if (i40e_chk_linearize(skb, count)) { if (__skb_linearize(skb)) goto out_drop; - count = TXD_USE_COUNT(skb->len); + count = i40e_txd_use_count(skb->len); tx_ring->tx_stats.tx_linearize++; } diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h index c1dd8c5c9666..3ec0ea5ea3db 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h @@ -146,10 +146,39 @@ enum i40e_dyn_idx_t { #define I40E_MAX_BUFFER_TXD 8 #define I40E_MIN_TX_LEN 17 -#define I40E_MAX_DATA_PER_TXD 8192 + +/* The size limit for a transmit buffer in a descriptor is (16K - 1). + * In order to align with the read requests we will align the value to + * the nearest 4K which represents our maximum read request size. + */ +#define I40E_MAX_READ_REQ_SIZE 4096 +#define I40E_MAX_DATA_PER_TXD (16 * 1024 - 1) +#define I40E_MAX_DATA_PER_TXD_ALIGNED \ + (I40E_MAX_DATA_PER_TXD & ~(I40E_MAX_READ_REQ_SIZE - 1)) + +/* This ugly bit of math is equivalent to DIV_ROUNDUP(size, X) where X is + * the value I40E_MAX_DATA_PER_TXD_ALIGNED. It is needed due to the fact + * that 12K is not a power of 2 and division is expensive. It is used to + * approximate the number of descriptors used per linear buffer. Note + * that this will overestimate in some cases as it doesn't account for the + * fact that we will add up to 4K - 1 in aligning the 12K buffer, however + * the error should not impact things much as large buffers usually mean + * we will use fewer descriptors then there are frags in an skb. + */ +static inline unsigned int i40e_txd_use_count(unsigned int size) +{ + const unsigned int max = I40E_MAX_DATA_PER_TXD_ALIGNED; + const unsigned int reciprocal = ((1ull << 32) - 1 + (max / 2)) / max; + unsigned int adjust = ~(u32)0; + + /* if we rounded up on the reciprocal pull down the adjustment */ + if ((max * reciprocal) > adjust) + adjust = ~(u32)(reciprocal - 1); + + return (u32)((((u64)size * reciprocal) + adjust) >> 32); +} /* Tx Descriptors needed, worst case */ -#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), I40E_MAX_DATA_PER_TXD) #define DESC_NEEDED (MAX_SKB_FRAGS + 4) #define I40E_MIN_DESC_PENDING 4 @@ -359,7 +388,7 @@ static inline int i40e_xmit_descriptor_count(struct sk_buff *skb) int count = 0, size = skb_headlen(skb); for (;;) { - count += TXD_USE_COUNT(size); + count += i40e_txd_use_count(size); if (!nr_frags--) break; From 311f23e9a4314f62fed6c13e112c998b07e37e63 Mon Sep 17 00:00:00 2001 From: Alan Cox Date: Tue, 1 Mar 2016 16:02:15 -0800 Subject: [PATCH 0208/1649] i40evf: remove dead code The only error case is when the malloc fails, in which case the clean up loop does nothing at all, so remove it Signed-off-by: Alan Cox Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40evf/i40evf_main.c | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index 4b70aae2fa84..820ad94c932b 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -1507,7 +1507,7 @@ static int i40evf_alloc_q_vectors(struct i40evf_adapter *adapter) adapter->q_vectors = kcalloc(num_q_vectors, sizeof(*q_vector), GFP_KERNEL); if (!adapter->q_vectors) - goto err_out; + return -ENOMEM; for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { q_vector = &adapter->q_vectors[q_idx]; @@ -1519,15 +1519,6 @@ static int i40evf_alloc_q_vectors(struct i40evf_adapter *adapter) } return 0; - -err_out: - while (q_idx) { - q_idx--; - q_vector = &adapter->q_vectors[q_idx]; - netif_napi_del(&q_vector->napi); - } - kfree(adapter->q_vectors); - return -ENOMEM; } /** From 98bd147d7903580ca5d5dfa0bc39c2d16714d84e Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Wed, 16 Mar 2016 14:29:08 +0100 Subject: [PATCH 0209/1649] wext: unregister_pernet_subsys() on notifier registration failure If register_netdevice_notifier() fails (which in practice it can't right now), we should call unregister_pernet_subsys(). Do that. Reported-by: Ben Hutchings Signed-off-by: Johannes Berg --- net/wireless/wext-core.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c index b50ee5d622e1..6250b1cfcde5 100644 --- a/net/wireless/wext-core.c +++ b/net/wireless/wext-core.c @@ -399,7 +399,10 @@ static int __init wireless_nlevent_init(void) if (err) return err; - return register_netdevice_notifier(&wext_netdev_notifier); + err = register_netdevice_notifier(&wext_netdev_notifier); + if (err) + unregister_pernet_subsys(&wext_pernet_ops); + return err; } subsys_initcall(wireless_nlevent_init); From 1948b2a2ec132115b422ae1feba1a3f5598f4acd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Paulo=20Rechi=20Vita?= Date: Mon, 22 Feb 2016 11:36:39 -0500 Subject: [PATCH 0210/1649] rfkill: Use switch to demux userspace operations MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Using a switch to handle different ev.op values in rfkill_fop_write() makes the code easier to extend, as out-of-range values can always be handled by the default case. Signed-off-by: João Paulo Rechi Vita [roll in fix for RFKILL_OP_CHANGE from Jouni] Signed-off-by: Johannes Berg --- net/rfkill/core.c | 36 ++++++++++++++++++++++-------------- 1 file changed, 22 insertions(+), 14 deletions(-) diff --git a/net/rfkill/core.c b/net/rfkill/core.c index 03f26e3a6f48..884027f62783 100644 --- a/net/rfkill/core.c +++ b/net/rfkill/core.c @@ -1141,6 +1141,7 @@ static ssize_t rfkill_fop_write(struct file *file, const char __user *buf, { struct rfkill *rfkill; struct rfkill_event ev; + int ret; /* we don't need the 'hard' variable but accept it */ if (count < RFKILL_EVENT_SIZE_V1 - 1) @@ -1155,29 +1156,36 @@ static ssize_t rfkill_fop_write(struct file *file, const char __user *buf, if (copy_from_user(&ev, buf, count)) return -EFAULT; - if (ev.op != RFKILL_OP_CHANGE && ev.op != RFKILL_OP_CHANGE_ALL) - return -EINVAL; - if (ev.type >= NUM_RFKILL_TYPES) return -EINVAL; mutex_lock(&rfkill_global_mutex); - if (ev.op == RFKILL_OP_CHANGE_ALL) + switch (ev.op) { + case RFKILL_OP_CHANGE_ALL: rfkill_update_global_state(ev.type, ev.soft); - - list_for_each_entry(rfkill, &rfkill_list, node) { - if (rfkill->idx != ev.idx && ev.op != RFKILL_OP_CHANGE_ALL) - continue; - - if (rfkill->type != ev.type && ev.type != RFKILL_TYPE_ALL) - continue; - - rfkill_set_block(rfkill, ev.soft); + list_for_each_entry(rfkill, &rfkill_list, node) + if (rfkill->type == ev.type || + ev.type == RFKILL_TYPE_ALL) + rfkill_set_block(rfkill, ev.soft); + ret = 0; + break; + case RFKILL_OP_CHANGE: + list_for_each_entry(rfkill, &rfkill_list, node) + if (rfkill->idx == ev.idx && + (rfkill->type == ev.type || + ev.type == RFKILL_TYPE_ALL)) + rfkill_set_block(rfkill, ev.soft); + ret = 0; + break; + default: + ret = -EINVAL; + break; } + mutex_unlock(&rfkill_global_mutex); - return count; + return ret ?: count; } static int rfkill_fop_release(struct inode *inode, struct file *file) From 646e76bb5daf4ca38438c69ffb72cccb605f3466 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Tue, 23 Feb 2016 15:43:35 +0100 Subject: [PATCH 0211/1649] mac80211: parse VHT info in injected frames Add VHT radiotap parsing support to ieee80211_parse_tx_radiotap(). That capability has been tested using a d-link dir-860l rev b1 running OpenWrt trunk and mt76 driver Signed-off-by: Lorenzo Bianconi Signed-off-by: Johannes Berg --- .../networking/mac80211-injection.txt | 13 ++++++++ net/mac80211/tx.c | 31 +++++++++++++++++++ 2 files changed, 44 insertions(+) diff --git a/Documentation/networking/mac80211-injection.txt b/Documentation/networking/mac80211-injection.txt index ec8f934c2eb2..e0efcaf5b0ee 100644 --- a/Documentation/networking/mac80211-injection.txt +++ b/Documentation/networking/mac80211-injection.txt @@ -45,6 +45,19 @@ radiotap headers and used to control injection: number of retries when either IEEE80211_RADIOTAP_RATE or IEEE80211_RADIOTAP_MCS was used + * IEEE80211_RADIOTAP_VHT + + VHT mcs and number of streams used in the transmission (only for devices + without own rate control). Also other fields are parsed + + flags field + IEEE80211_RADIOTAP_VHT_FLAG_SGI: use short guard interval + + bandwidth field + 1: send using 40MHz channel width + 4: send using 80MHz channel width + 11: send using 160MHz channel width + The injection code can also skip all other currently defined radiotap fields facilitating replay of captured radiotap headers directly. diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 62ad5321257d..c485fc26fa0c 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -1692,6 +1692,8 @@ static bool ieee80211_parse_tx_radiotap(struct ieee80211_local *local, u8 rate_retries = 0; u16 rate_flags = 0; u8 mcs_known, mcs_flags; + u16 vht_known; + u8 vht_mcs = 0, vht_nss = 0; int i; info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT | @@ -1772,6 +1774,32 @@ static bool ieee80211_parse_tx_radiotap(struct ieee80211_local *local, rate_flags |= IEEE80211_TX_RC_40_MHZ_WIDTH; break; + case IEEE80211_RADIOTAP_VHT: + vht_known = get_unaligned_le16(iterator.this_arg); + rate_found = true; + + rate_flags = IEEE80211_TX_RC_VHT_MCS; + if ((vht_known & IEEE80211_RADIOTAP_VHT_KNOWN_GI) && + (iterator.this_arg[2] & + IEEE80211_RADIOTAP_VHT_FLAG_SGI)) + rate_flags |= IEEE80211_TX_RC_SHORT_GI; + if (vht_known & + IEEE80211_RADIOTAP_VHT_KNOWN_BANDWIDTH) { + if (iterator.this_arg[3] == 1) + rate_flags |= + IEEE80211_TX_RC_40_MHZ_WIDTH; + else if (iterator.this_arg[3] == 4) + rate_flags |= + IEEE80211_TX_RC_80_MHZ_WIDTH; + else if (iterator.this_arg[3] == 11) + rate_flags |= + IEEE80211_TX_RC_160_MHZ_WIDTH; + } + + vht_mcs = iterator.this_arg[4] >> 4; + vht_nss = iterator.this_arg[4] & 0xF; + break; + /* * Please update the file * Documentation/networking/mac80211-injection.txt @@ -1797,6 +1825,9 @@ static bool ieee80211_parse_tx_radiotap(struct ieee80211_local *local, if (rate_flags & IEEE80211_TX_RC_MCS) { info->control.rates[0].idx = rate; + } else if (rate_flags & IEEE80211_TX_RC_VHT_MCS) { + ieee80211_rate_set_vht(info->control.rates, vht_mcs, + vht_nss); } else { for (i = 0; i < sband->n_bitrates; i++) { if (rate * 5 != sband->bitrates[i].bitrate) From 162dd6a7253ab009c6335c21ce6b80cf227ddda4 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Tue, 23 Feb 2016 23:05:06 +0200 Subject: [PATCH 0212/1649] mac80211: allow drivers to report CLOCK_BOOTTIME for scan results This was requested by Android, and the appropriate cfg80211 API had been added by Dmitry. Support it in mac80211, allowing drivers to provide the timestamp. Signed-off-by: Johannes Berg --- include/net/mac80211.h | 3 +++ net/mac80211/scan.c | 4 +++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 0c09da34b67a..1b9f729bb074 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -1120,6 +1120,8 @@ enum mac80211_rx_vht_flags { * * @mactime: value in microseconds of the 64-bit Time Synchronization Function * (TSF) timer when the first data symbol (MPDU) arrived at the hardware. + * @boottime_ns: CLOCK_BOOTTIME timestamp the frame was received at, this is + * needed only for beacons and probe responses that update the scan cache. * @device_timestamp: arbitrary timestamp for the device, mac80211 doesn't use * it but can store it and pass it back to the driver for synchronisation * @band: the active band when this frame was received @@ -1146,6 +1148,7 @@ enum mac80211_rx_vht_flags { */ struct ieee80211_rx_status { u64 mactime; + u64 boottime_ns; u32 device_timestamp; u32 ampdu_reference; u32 flag; diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c index ae980ce8daff..a3fea1f35ef9 100644 --- a/net/mac80211/scan.c +++ b/net/mac80211/scan.c @@ -66,7 +66,9 @@ ieee80211_bss_info_update(struct ieee80211_local *local, struct cfg80211_bss *cbss; struct ieee80211_bss *bss; int clen, srlen; - struct cfg80211_inform_bss bss_meta = {}; + struct cfg80211_inform_bss bss_meta = { + .boottime_ns = rx_status->boottime_ns, + }; bool signal_valid; if (ieee80211_hw_check(&local->hw, SIGNAL_DBM)) From f980ebc058c2fa2a552e495db1de0b330082ab70 Mon Sep 17 00:00:00 2001 From: Sara Sharon Date: Wed, 24 Feb 2016 11:49:45 +0200 Subject: [PATCH 0213/1649] mac80211: allow not sending MIC up from driver for HW crypto When HW crypto is used, there's no need for the CCMP/GCMP MIC to be available to mac80211, and the hardware might have removed it already after checking. The MIC is also useless to have when the frame is already decrypted, so allow indicating that it's not present. Since we are running out of bits in mac80211_rx_flags, make the flags field a u64. Signed-off-by: Sara Sharon Signed-off-by: Emmanuel Grumbach Signed-off-by: Johannes Berg --- drivers/net/wireless/ath/ath10k/htt_rx.c | 2 +- drivers/net/wireless/ath/wcn36xx/txrx.c | 2 +- include/net/mac80211.h | 5 ++++- net/mac80211/util.c | 5 +++-- net/mac80211/wpa.c | 26 +++++++++++++----------- 5 files changed, 23 insertions(+), 17 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c index ae9b686a4e91..feab80a5b6eb 100644 --- a/drivers/net/wireless/ath/ath10k/htt_rx.c +++ b/drivers/net/wireless/ath/ath10k/htt_rx.c @@ -979,7 +979,7 @@ static void ath10k_process_rx(struct ath10k *ar, *status = *rx_status; ath10k_dbg(ar, ATH10K_DBG_DATA, - "rx skb %p len %u peer %pM %s %s sn %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n", + "rx skb %p len %u peer %pM %s %s sn %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%llx fcs-err %i mic-err %i amsdu-more %i\n", skb, skb->len, ieee80211_get_SA(hdr), diff --git a/drivers/net/wireless/ath/wcn36xx/txrx.c b/drivers/net/wireless/ath/wcn36xx/txrx.c index 9bec8237231d..99c21aac68bd 100644 --- a/drivers/net/wireless/ath/wcn36xx/txrx.c +++ b/drivers/net/wireless/ath/wcn36xx/txrx.c @@ -57,7 +57,7 @@ int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb) RX_FLAG_MMIC_STRIPPED | RX_FLAG_DECRYPTED; - wcn36xx_dbg(WCN36XX_DBG_RX, "status.flags=%x\n", status.flag); + wcn36xx_dbg(WCN36XX_DBG_RX, "status.flags=%llx\n", status.flag); memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status)); diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 1b9f729bb074..7cb791f21722 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -1034,6 +1034,8 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info) * on this subframe * @RX_FLAG_AMPDU_DELIM_CRC_KNOWN: The delimiter CRC field is known (the CRC * is stored in the @ampdu_delimiter_crc field) + * @RX_FLAG_MIC_STRIPPED: The mic was stripped of this packet. Decryption was + * done by the hardware * @RX_FLAG_LDPC: LDPC was used * @RX_FLAG_ONLY_MONITOR: Report frame only to monitor interfaces without * processing it in any regular way. @@ -1091,6 +1093,7 @@ enum mac80211_rx_flags { RX_FLAG_5MHZ = BIT(29), RX_FLAG_AMSDU_MORE = BIT(30), RX_FLAG_RADIOTAP_VENDOR_DATA = BIT(31), + RX_FLAG_MIC_STRIPPED = BIT_ULL(32), }; #define RX_FLAG_STBC_SHIFT 26 @@ -1151,7 +1154,7 @@ struct ieee80211_rx_status { u64 boottime_ns; u32 device_timestamp; u32 ampdu_reference; - u32 flag; + u64 flag; u16 freq; u8 vht_flag; u8 rate_idx; diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 7390de4946a9..0319d6d4f863 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -2724,8 +2724,9 @@ u64 ieee80211_calculate_rx_timestamp(struct ieee80211_local *local, rate = cfg80211_calculate_bitrate(&ri); if (WARN_ONCE(!rate, - "Invalid bitrate: flags=0x%x, idx=%d, vht_nss=%d\n", - status->flag, status->rate_idx, status->vht_nss)) + "Invalid bitrate: flags=0x%llx, idx=%d, vht_nss=%d\n", + (unsigned long long)status->flag, status->rate_idx, + status->vht_nss)) return 0; /* rewind from end of MPDU */ diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c index 18848258adde..7e4f2652bca7 100644 --- a/net/mac80211/wpa.c +++ b/net/mac80211/wpa.c @@ -504,18 +504,20 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx, !ieee80211_is_robust_mgmt_frame(skb)) return RX_CONTINUE; - data_len = skb->len - hdrlen - IEEE80211_CCMP_HDR_LEN - mic_len; - if (!rx->sta || data_len < 0) - return RX_DROP_UNUSABLE; - if (status->flag & RX_FLAG_DECRYPTED) { if (!pskb_may_pull(rx->skb, hdrlen + IEEE80211_CCMP_HDR_LEN)) return RX_DROP_UNUSABLE; + if (status->flag & RX_FLAG_MIC_STRIPPED) + mic_len = 0; } else { if (skb_linearize(rx->skb)) return RX_DROP_UNUSABLE; } + data_len = skb->len - hdrlen - IEEE80211_CCMP_HDR_LEN - mic_len; + if (!rx->sta || data_len < 0) + return RX_DROP_UNUSABLE; + if (!(status->flag & RX_FLAG_PN_VALIDATED)) { ccmp_hdr2pn(pn, skb->data + hdrlen); @@ -720,8 +722,7 @@ ieee80211_crypto_gcmp_decrypt(struct ieee80211_rx_data *rx) struct sk_buff *skb = rx->skb; struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); u8 pn[IEEE80211_GCMP_PN_LEN]; - int data_len; - int queue; + int data_len, queue, mic_len = IEEE80211_GCMP_MIC_LEN; hdrlen = ieee80211_hdrlen(hdr->frame_control); @@ -729,19 +730,20 @@ ieee80211_crypto_gcmp_decrypt(struct ieee80211_rx_data *rx) !ieee80211_is_robust_mgmt_frame(skb)) return RX_CONTINUE; - data_len = skb->len - hdrlen - IEEE80211_GCMP_HDR_LEN - - IEEE80211_GCMP_MIC_LEN; - if (!rx->sta || data_len < 0) - return RX_DROP_UNUSABLE; - if (status->flag & RX_FLAG_DECRYPTED) { if (!pskb_may_pull(rx->skb, hdrlen + IEEE80211_GCMP_HDR_LEN)) return RX_DROP_UNUSABLE; + if (status->flag & RX_FLAG_MIC_STRIPPED) + mic_len = 0; } else { if (skb_linearize(rx->skb)) return RX_DROP_UNUSABLE; } + data_len = skb->len - hdrlen - IEEE80211_GCMP_HDR_LEN - mic_len; + if (!rx->sta || data_len < 0) + return RX_DROP_UNUSABLE; + if (!(status->flag & RX_FLAG_PN_VALIDATED)) { gcmp_hdr2pn(pn, skb->data + hdrlen); @@ -772,7 +774,7 @@ ieee80211_crypto_gcmp_decrypt(struct ieee80211_rx_data *rx) } /* Remove GCMP header and MIC */ - if (pskb_trim(skb, skb->len - IEEE80211_GCMP_MIC_LEN)) + if (pskb_trim(skb, skb->len - mic_len)) return RX_DROP_UNUSABLE; memmove(skb->data + IEEE80211_GCMP_HDR_LEN, skb->data, hdrlen); skb_pull(skb, IEEE80211_GCMP_HDR_LEN); From 5c05803a3e2054257d7e8e737a6efaf2c7f6b725 Mon Sep 17 00:00:00 2001 From: Sven Eckelmann Date: Wed, 24 Feb 2016 16:25:48 +0100 Subject: [PATCH 0214/1649] mac80211: document only injected *_RADIOTAP_* flags Not the internal flags but the radiotap flags are parsed when the monitor injected frames are prepared for transmission. Thus the documentation should only document these. Reported-by: Lorenzo Bianconi Reported-by: Johannes Berg Fixes: dfdfc2beb0dd ("mac80211: Parse legacy and HT rate in injected frames") Signed-off-by: Sven Eckelmann Signed-off-by: Johannes Berg --- Documentation/networking/mac80211-injection.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Documentation/networking/mac80211-injection.txt b/Documentation/networking/mac80211-injection.txt index e0efcaf5b0ee..d58d78df9ca2 100644 --- a/Documentation/networking/mac80211-injection.txt +++ b/Documentation/networking/mac80211-injection.txt @@ -37,8 +37,8 @@ radiotap headers and used to control injection: HT rate for the transmission (only for devices without own rate control). Also some flags are parsed - IEEE80211_TX_RC_SHORT_GI: use short guard interval - IEEE80211_TX_RC_40_MHZ_WIDTH: send in HT40 mode + IEEE80211_RADIOTAP_MCS_SGI: use short guard interval + IEEE80211_RADIOTAP_MCS_BW_40: send in HT40 mode * IEEE80211_RADIOTAP_DATA_RETRIES From f2edaaaa392bc21c24f532ea9bcc952a54a22367 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Mon, 7 Mar 2016 09:29:57 -0800 Subject: [PATCH 0215/1649] i40e/i40evf: Fix handling of boolean logic in polling routines In the polling routines for i40e and i40evf we were using bitwise operators to avoid the side effects of the logical operators, specifically the fact that if the first case is true with "||" we skip the second case, or if it is false with "&&" we skip the second case. This fixes an earlier patch that converted the bitwise operators over to the logical operators and instead replaces the entire thing with just an if statement since it should be more readable what we are trying to do this way. Fixes: 1a36d7fadd14 ("i40e/i40evf: use logical operators, not bitwise") Signed-off-by: Alexander Duyck Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_txrx.c | 13 ++++++++----- drivers/net/ethernet/intel/i40evf/i40e_txrx.c | 13 ++++++++----- 2 files changed, 16 insertions(+), 10 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 9af1411bd423..8fb2a966d70e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -1975,9 +1975,11 @@ int i40e_napi_poll(struct napi_struct *napi, int budget) * budget and be more aggressive about cleaning up the Tx descriptors. */ i40e_for_each_ring(ring, q_vector->tx) { - clean_complete = clean_complete && - i40e_clean_tx_irq(ring, vsi->work_limit); - arm_wb = arm_wb || ring->arm_wb; + if (!i40e_clean_tx_irq(ring, vsi->work_limit)) { + clean_complete = false; + continue; + } + arm_wb |= ring->arm_wb; ring->arm_wb = false; } @@ -1999,8 +2001,9 @@ int i40e_napi_poll(struct napi_struct *napi, int budget) cleaned = i40e_clean_rx_irq_1buf(ring, budget_per_ring); work_done += cleaned; - /* if we didn't clean as many as budgeted, we must be done */ - clean_complete = clean_complete && (budget_per_ring > cleaned); + /* if we clean as many as budgeted, we must not be done */ + if (cleaned >= budget_per_ring) + clean_complete = false; } /* If work not completed, return budget and polling will return */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index 5f9c1bbab1fa..839a6df62f72 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -1411,9 +1411,11 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget) * budget and be more aggressive about cleaning up the Tx descriptors. */ i40e_for_each_ring(ring, q_vector->tx) { - clean_complete = clean_complete && - i40e_clean_tx_irq(ring, vsi->work_limit); - arm_wb = arm_wb || ring->arm_wb; + if (!i40e_clean_tx_irq(ring, vsi->work_limit)) { + clean_complete = false; + continue; + } + arm_wb |= ring->arm_wb; ring->arm_wb = false; } @@ -1435,8 +1437,9 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget) cleaned = i40e_clean_rx_irq_1buf(ring, budget_per_ring); work_done += cleaned; - /* if we didn't clean as many as budgeted, we must be done */ - clean_complete = clean_complete && (budget_per_ring > cleaned); + /* if we clean as many as budgeted, we must not be done */ + if (cleaned >= budget_per_ring) + clean_complete = false; } /* If work not completed, return budget and polling will return */ From 818965d3917774955fad52f87b59d690d8be9e8b Mon Sep 17 00:00:00 2001 From: Jouni Malinen Date: Fri, 26 Feb 2016 22:12:47 +0200 Subject: [PATCH 0216/1649] cfg80211: Allow a scan request for a specific BSSID This allows scans for a specific BSSID to be optimized by the user space application by requesting the driver to set the Probe Request frame BSSID field (Address 3) to the specified BSSID instead of the wildcard BSSID. This prevents other APs from replying which reduces airtime need and latency in getting the response from the target AP through. This is an optimization and as such, it is acceptable for some of the drivers not to support the mechanism. If not supported, the wildcard BSSID will be used and more responses may be received. Signed-off-by: Jouni Malinen Signed-off-by: Johannes Berg --- include/net/cfg80211.h | 2 ++ include/uapi/linux/nl80211.h | 4 +++- net/wireless/nl80211.c | 6 ++++++ net/wireless/scan.c | 2 ++ net/wireless/sme.c | 2 ++ 5 files changed, 15 insertions(+), 1 deletion(-) diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 9e1b24c29f0c..14c0c437d973 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -1455,6 +1455,7 @@ struct cfg80211_ssid { * @mac_addr_mask: MAC address mask used with randomisation, bits that * are 0 in the mask should be randomised, bits that are 1 should * be taken from the @mac_addr + * @bssid: BSSID to scan for (most commonly, the wildcard BSSID) */ struct cfg80211_scan_request { struct cfg80211_ssid *ssids; @@ -1471,6 +1472,7 @@ struct cfg80211_scan_request { u8 mac_addr[ETH_ALEN] __aligned(2); u8 mac_addr_mask[ETH_ALEN] __aligned(2); + u8 bssid[ETH_ALEN] __aligned(2); /* internal */ struct wiphy *wiphy; diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index 5a30a7563633..23bf0667540c 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -322,7 +322,9 @@ * @NL80211_CMD_GET_SCAN: get scan results * @NL80211_CMD_TRIGGER_SCAN: trigger a new scan with the given parameters * %NL80211_ATTR_TX_NO_CCK_RATE is used to decide whether to send the - * probe requests at CCK rate or not. + * probe requests at CCK rate or not. %NL80211_ATTR_MAC can be used to + * specify a BSSID to scan for; if not included, the wildcard BSSID will + * be used. * @NL80211_CMD_NEW_SCAN_RESULTS: scan notification (as a reply to * NL80211_CMD_GET_SCAN and on the "scan" multicast group) * @NL80211_CMD_SCAN_ABORTED: scan was aborted, for unspecified reasons, diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 98c924260b3d..1b43f7839eeb 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -5996,6 +5996,12 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info) request->no_cck = nla_get_flag(info->attrs[NL80211_ATTR_TX_NO_CCK_RATE]); + if (info->attrs[NL80211_ATTR_MAC]) + memcpy(request->bssid, nla_data(info->attrs[NL80211_ATTR_MAC]), + ETH_ALEN); + else + eth_broadcast_addr(request->bssid); + request->wdev = wdev; request->wiphy = &rdev->wiphy; request->scan_start = jiffies; diff --git a/net/wireless/scan.c b/net/wireless/scan.c index 14d5369eb778..50ea8e3fcbeb 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c @@ -1293,6 +1293,8 @@ int cfg80211_wext_siwscan(struct net_device *dev, if (wiphy->bands[i]) creq->rates[i] = (1 << wiphy->bands[i]->n_bitrates) - 1; + eth_broadcast_addr(creq->bssid); + rdev->scan_req = creq; err = rdev_scan(rdev, creq); if (err) { diff --git a/net/wireless/sme.c b/net/wireless/sme.c index 544558171787..65882d2777c0 100644 --- a/net/wireless/sme.c +++ b/net/wireless/sme.c @@ -119,6 +119,8 @@ static int cfg80211_conn_scan(struct wireless_dev *wdev) wdev->conn->params.ssid_len); request->ssids[0].ssid_len = wdev->conn->params.ssid_len; + eth_broadcast_addr(request->bssid); + request->wdev = wdev; request->wiphy = &rdev->wiphy; request->scan_start = jiffies; From e345f44f2b7c6a77c1c0677b7c8606a0bb1c5c5c Mon Sep 17 00:00:00 2001 From: Jouni Malinen Date: Fri, 26 Feb 2016 22:12:48 +0200 Subject: [PATCH 0217/1649] mac80211: Support a scan request for a specific BSSID If the cfg80211 scan trigger operation specifies a single BSSID, use that value instead of the wildcard BSSID in the Probe Request frames. Signed-off-by: Jouni Malinen Signed-off-by: Johannes Berg --- net/mac80211/scan.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c index a3fea1f35ef9..41aa728e5468 100644 --- a/net/mac80211/scan.c +++ b/net/mac80211/scan.c @@ -305,6 +305,7 @@ static bool ieee80211_prep_hw_scan(struct ieee80211_local *local) ether_addr_copy(local->hw_scan_req->req.mac_addr, req->mac_addr); ether_addr_copy(local->hw_scan_req->req.mac_addr_mask, req->mac_addr_mask); + ether_addr_copy(local->hw_scan_req->req.bssid, req->bssid); return true; } @@ -499,7 +500,7 @@ static void ieee80211_scan_state_send_probe(struct ieee80211_local *local, for (i = 0; i < scan_req->n_ssids; i++) ieee80211_send_probe_req( - sdata, local->scan_addr, NULL, + sdata, local->scan_addr, scan_req->bssid, scan_req->ssids[i].ssid, scan_req->ssids[i].ssid_len, scan_req->ie, scan_req->ie_len, scan_req->rates[band], false, @@ -564,6 +565,7 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata, req->n_channels * sizeof(req->channels[0]); local->hw_scan_req->req.ie = ies; local->hw_scan_req->req.flags = req->flags; + eth_broadcast_addr(local->hw_scan_req->req.bssid); local->hw_scan_band = 0; From 12880d169471fb14c46d6f323f31127702a6d5e6 Mon Sep 17 00:00:00 2001 From: Jouni Malinen Date: Fri, 26 Feb 2016 22:12:49 +0200 Subject: [PATCH 0218/1649] mac80211_hwsim: Support a hw scan request for a specific BSSID If the hw scan request specifies a single BSSID, use that value instead of the wildcard BSSID in the Probe Request frames. Signed-off-by: Jouni Malinen Signed-off-by: Johannes Berg --- drivers/net/wireless/mac80211_hwsim.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index e85e0737771c..2b185feb1aa0 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -1909,6 +1909,7 @@ static void hw_scan_work(struct work_struct *work) /* send probes */ for (i = 0; i < req->n_ssids; i++) { struct sk_buff *probe; + struct ieee80211_mgmt *mgmt; probe = ieee80211_probereq_get(hwsim->hw, hwsim->scan_addr, @@ -1918,6 +1919,10 @@ static void hw_scan_work(struct work_struct *work) if (!probe) continue; + mgmt = (struct ieee80211_mgmt *) probe->data; + memcpy(mgmt->da, req->bssid, ETH_ALEN); + memcpy(mgmt->bssid, req->bssid, ETH_ALEN); + if (req->ie_len) memcpy(skb_put(probe, req->ie_len), req->ie, req->ie_len); From 2bdaf386f99c4a82788812e583ff59c6714ae4d6 Mon Sep 17 00:00:00 2001 From: Bob Copeland Date: Sun, 28 Feb 2016 20:03:56 -0500 Subject: [PATCH 0219/1649] mac80211: mesh: move path tables into if_mesh The mesh path and mesh gate hashtables are global, containing all of the mpaths for every mesh interface, but the paths are all tied logically to a single interface. The common case is just a single mesh interface, so optimize for that by moving the global hashtable into the per-interface struct. Doing so allows us to drop sdata pointer comparisons inside the lookups and also saves a few bytes of BSS and data. Signed-off-by: Bob Copeland Signed-off-by: Johannes Berg --- net/mac80211/cfg.c | 4 +- net/mac80211/ieee80211_i.h | 12 +++ net/mac80211/mesh.c | 10 +- net/mac80211/mesh.h | 10 +- net/mac80211/mesh_pathtbl.c | 181 ++++++++++++++++-------------------- net/mac80211/tx.c | 2 +- 6 files changed, 104 insertions(+), 115 deletions(-) diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index fe1704c4e8fb..b37adb60c9cb 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -1499,7 +1499,7 @@ static void mpath_set_pinfo(struct mesh_path *mpath, u8 *next_hop, memset(pinfo, 0, sizeof(*pinfo)); - pinfo->generation = mesh_paths_generation; + pinfo->generation = mpath->sdata->u.mesh.mesh_paths_generation; pinfo->filled = MPATH_INFO_FRAME_QLEN | MPATH_INFO_SN | @@ -1577,7 +1577,7 @@ static void mpp_set_pinfo(struct mesh_path *mpath, u8 *mpp, memset(pinfo, 0, sizeof(*pinfo)); memcpy(mpp, mpath->mpp, ETH_ALEN); - pinfo->generation = mpp_paths_generation; + pinfo->generation = mpath->sdata->u.mesh.mpp_paths_generation; } static int ieee80211_get_mpp(struct wiphy *wiphy, struct net_device *dev, diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 804575ff7af5..db7f0dbebc4b 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -696,6 +696,18 @@ struct ieee80211_if_mesh { /* offset from skb->data while building IE */ int meshconf_offset; + + struct mesh_table __rcu *mesh_paths; + struct mesh_table __rcu *mpp_paths; /* Store paths for MPP&MAP */ + int mesh_paths_generation; + int mpp_paths_generation; + + /* Protects assignment of the mesh_paths/mpp_paths table + * pointer for resize against reading it for add/delete + * of individual paths. Pure readers (lookups) just use + * RCU. + */ + rwlock_t pathtbl_resize_lock; }; #ifdef CONFIG_MAC80211_MESH diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index d32cefcb63b0..c92af2a7714d 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c @@ -25,7 +25,6 @@ bool mesh_action_is_path_sel(struct ieee80211_mgmt *mgmt) void ieee80211s_init(void) { - mesh_pathtbl_init(); mesh_allocated = 1; rm_cache = kmem_cache_create("mesh_rmc", sizeof(struct rmc_entry), 0, 0, NULL); @@ -35,7 +34,6 @@ void ieee80211s_stop(void) { if (!mesh_allocated) return; - mesh_pathtbl_unregister(); kmem_cache_destroy(rm_cache); } @@ -902,6 +900,7 @@ void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata) /* flush STAs and mpaths on this iface */ sta_info_flush(sdata); mesh_path_flush_by_iface(sdata); + mesh_pathtbl_unregister(sdata); /* free all potentially still buffered group-addressed frames */ local->total_ps_buffered -= skb_queue_len(&ifmsh->ps.bc_buf); @@ -1349,10 +1348,10 @@ void ieee80211_mesh_work(struct ieee80211_sub_if_data *sdata) mesh_path_start_discovery(sdata); if (test_and_clear_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags)) - mesh_mpath_table_grow(); + mesh_mpath_table_grow(sdata); if (test_and_clear_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags)) - mesh_mpp_table_grow(); + mesh_mpp_table_grow(sdata); if (test_and_clear_bit(MESH_WORK_HOUSEKEEPING, &ifmsh->wrkq_flags)) ieee80211_mesh_housekeeping(sdata); @@ -1388,6 +1387,9 @@ void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata) /* Allocate all mesh structures when creating the first mesh interface. */ if (!mesh_allocated) ieee80211s_init(); + + mesh_pathtbl_init(sdata); + setup_timer(&ifmsh->mesh_path_timer, ieee80211_mesh_path_timer, (unsigned long) sdata); diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h index 87c017a3b1ce..601992b6cd8a 100644 --- a/net/mac80211/mesh.h +++ b/net/mac80211/mesh.h @@ -300,8 +300,8 @@ void mesh_sta_cleanup(struct sta_info *sta); /* Private interfaces */ /* Mesh tables */ -void mesh_mpath_table_grow(void); -void mesh_mpp_table_grow(void); +void mesh_mpath_table_grow(struct ieee80211_sub_if_data *sdata); +void mesh_mpp_table_grow(struct ieee80211_sub_if_data *sdata); /* Mesh paths */ int mesh_path_error_tx(struct ieee80211_sub_if_data *sdata, u8 ttl, const u8 *target, u32 target_sn, @@ -309,8 +309,8 @@ int mesh_path_error_tx(struct ieee80211_sub_if_data *sdata, void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta); void mesh_path_flush_pending(struct mesh_path *mpath); void mesh_path_tx_pending(struct mesh_path *mpath); -int mesh_pathtbl_init(void); -void mesh_pathtbl_unregister(void); +int mesh_pathtbl_init(struct ieee80211_sub_if_data *sdata); +void mesh_pathtbl_unregister(struct ieee80211_sub_if_data *sdata); int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr); void mesh_path_timer(unsigned long data); void mesh_path_flush_by_nexthop(struct sta_info *sta); @@ -319,8 +319,6 @@ void mesh_path_discard_frame(struct ieee80211_sub_if_data *sdata, void mesh_path_tx_root_frame(struct ieee80211_sub_if_data *sdata); bool mesh_action_is_path_sel(struct ieee80211_mgmt *mgmt); -extern int mesh_paths_generation; -extern int mpp_paths_generation; #ifdef CONFIG_MAC80211_MESH static inline diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c index 2ba7aa56b11c..0508b37b0471 100644 --- a/net/mac80211/mesh_pathtbl.c +++ b/net/mac80211/mesh_pathtbl.c @@ -40,36 +40,24 @@ struct mpath_node { struct mesh_path *mpath; }; -static struct mesh_table __rcu *mesh_paths; -static struct mesh_table __rcu *mpp_paths; /* Store paths for MPP&MAP */ - -int mesh_paths_generation; -int mpp_paths_generation; - -/* This lock will have the grow table function as writer and add / delete nodes - * as readers. RCU provides sufficient protection only when reading the table - * (i.e. doing lookups). Adding or adding or removing nodes requires we take - * the read lock or we risk operating on an old table. The write lock is only - * needed when modifying the number of buckets a table. - */ -static DEFINE_RWLOCK(pathtbl_resize_lock); - - static inline struct mesh_table *resize_dereference_paths( + struct ieee80211_sub_if_data *sdata, struct mesh_table __rcu *table) { return rcu_dereference_protected(table, - lockdep_is_held(&pathtbl_resize_lock)); + lockdep_is_held(&sdata->u.mesh.pathtbl_resize_lock)); } -static inline struct mesh_table *resize_dereference_mesh_paths(void) +static inline struct mesh_table *resize_dereference_mesh_paths( + struct ieee80211_sub_if_data *sdata) { - return resize_dereference_paths(mesh_paths); + return resize_dereference_paths(sdata, sdata->u.mesh.mesh_paths); } -static inline struct mesh_table *resize_dereference_mpp_paths(void) +static inline struct mesh_table *resize_dereference_mpp_paths( + struct ieee80211_sub_if_data *sdata) { - return resize_dereference_paths(mpp_paths); + return resize_dereference_paths(sdata, sdata->u.mesh.mpp_paths); } /* @@ -346,8 +334,7 @@ static struct mesh_path *mpath_lookup(struct mesh_table *tbl, const u8 *dst, bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)]; hlist_for_each_entry_rcu(node, bucket, list) { mpath = node->mpath; - if (mpath->sdata == sdata && - ether_addr_equal(dst, mpath->dst)) { + if (ether_addr_equal(dst, mpath->dst)) { if (mpath_expired(mpath)) { spin_lock_bh(&mpath->state_lock); mpath->flags &= ~MESH_PATH_ACTIVE; @@ -371,13 +358,15 @@ static struct mesh_path *mpath_lookup(struct mesh_table *tbl, const u8 *dst, struct mesh_path * mesh_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst) { - return mpath_lookup(rcu_dereference(mesh_paths), dst, sdata); + return mpath_lookup(rcu_dereference(sdata->u.mesh.mesh_paths), dst, + sdata); } struct mesh_path * mpp_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst) { - return mpath_lookup(rcu_dereference(mpp_paths), dst, sdata); + return mpath_lookup(rcu_dereference(sdata->u.mesh.mpp_paths), dst, + sdata); } @@ -393,14 +382,12 @@ mpp_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst) struct mesh_path * mesh_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx) { - struct mesh_table *tbl = rcu_dereference(mesh_paths); + struct mesh_table *tbl = rcu_dereference(sdata->u.mesh.mesh_paths); struct mpath_node *node; int i; int j = 0; for_each_mesh_entry(tbl, node, i) { - if (sdata && node->mpath->sdata != sdata) - continue; if (j++ == idx) { if (mpath_expired(node->mpath)) { spin_lock_bh(&node->mpath->state_lock); @@ -426,14 +413,12 @@ mesh_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx) struct mesh_path * mpp_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx) { - struct mesh_table *tbl = rcu_dereference(mpp_paths); + struct mesh_table *tbl = rcu_dereference(sdata->u.mesh.mpp_paths); struct mpath_node *node; int i; int j = 0; for_each_mesh_entry(tbl, node, i) { - if (sdata && node->mpath->sdata != sdata) - continue; if (j++ == idx) return node->mpath; } @@ -452,7 +437,7 @@ int mesh_path_add_gate(struct mesh_path *mpath) int err; rcu_read_lock(); - tbl = rcu_dereference(mesh_paths); + tbl = rcu_dereference(mpath->sdata->u.mesh.mesh_paths); hlist_for_each_entry_rcu(gate, tbl->known_gates, list) if (gate->mpath == mpath) { @@ -550,8 +535,8 @@ struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata, if (atomic_add_unless(&sdata->u.mesh.mpaths, 1, MESH_MAX_MPATHS) == 0) return ERR_PTR(-ENOSPC); - read_lock_bh(&pathtbl_resize_lock); - tbl = resize_dereference_mesh_paths(); + read_lock_bh(&sdata->u.mesh.pathtbl_resize_lock); + tbl = resize_dereference_mesh_paths(sdata); hash_idx = mesh_table_hash(dst, sdata, tbl); bucket = &tbl->hash_buckets[hash_idx]; @@ -560,8 +545,7 @@ struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata, hlist_for_each_entry(node, bucket, list) { mpath = node->mpath; - if (mpath->sdata == sdata && - ether_addr_equal(dst, mpath->dst)) + if (ether_addr_equal(dst, mpath->dst)) goto found; } @@ -592,7 +576,7 @@ struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata, MEAN_CHAIN_LEN * (tbl->hash_mask + 1)) grow = 1; - mesh_paths_generation++; + sdata->u.mesh.mesh_paths_generation++; if (grow) { set_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags); @@ -601,7 +585,7 @@ struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata, mpath = new_mpath; found: spin_unlock(&tbl->hashwlock[hash_idx]); - read_unlock_bh(&pathtbl_resize_lock); + read_unlock_bh(&sdata->u.mesh.pathtbl_resize_lock); return mpath; err_node_alloc: @@ -609,7 +593,7 @@ err_node_alloc: err_path_alloc: atomic_dec(&sdata->u.mesh.mpaths); spin_unlock(&tbl->hashwlock[hash_idx]); - read_unlock_bh(&pathtbl_resize_lock); + read_unlock_bh(&sdata->u.mesh.pathtbl_resize_lock); return ERR_PTR(err); } @@ -620,12 +604,12 @@ static void mesh_table_free_rcu(struct rcu_head *rcu) mesh_table_free(tbl, false); } -void mesh_mpath_table_grow(void) +void mesh_mpath_table_grow(struct ieee80211_sub_if_data *sdata) { struct mesh_table *oldtbl, *newtbl; - write_lock_bh(&pathtbl_resize_lock); - oldtbl = resize_dereference_mesh_paths(); + write_lock_bh(&sdata->u.mesh.pathtbl_resize_lock); + oldtbl = resize_dereference_mesh_paths(sdata); newtbl = mesh_table_alloc(oldtbl->size_order + 1); if (!newtbl) goto out; @@ -633,20 +617,20 @@ void mesh_mpath_table_grow(void) __mesh_table_free(newtbl); goto out; } - rcu_assign_pointer(mesh_paths, newtbl); + rcu_assign_pointer(sdata->u.mesh.mesh_paths, newtbl); call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu); out: - write_unlock_bh(&pathtbl_resize_lock); + write_unlock_bh(&sdata->u.mesh.pathtbl_resize_lock); } -void mesh_mpp_table_grow(void) +void mesh_mpp_table_grow(struct ieee80211_sub_if_data *sdata) { struct mesh_table *oldtbl, *newtbl; - write_lock_bh(&pathtbl_resize_lock); - oldtbl = resize_dereference_mpp_paths(); + write_lock_bh(&sdata->u.mesh.pathtbl_resize_lock); + oldtbl = resize_dereference_mpp_paths(sdata); newtbl = mesh_table_alloc(oldtbl->size_order + 1); if (!newtbl) goto out; @@ -654,11 +638,11 @@ void mesh_mpp_table_grow(void) __mesh_table_free(newtbl); goto out; } - rcu_assign_pointer(mpp_paths, newtbl); + rcu_assign_pointer(sdata->u.mesh.mpp_paths, newtbl); call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu); out: - write_unlock_bh(&pathtbl_resize_lock); + write_unlock_bh(&sdata->u.mesh.pathtbl_resize_lock); } int mpp_path_add(struct ieee80211_sub_if_data *sdata, @@ -690,7 +674,7 @@ int mpp_path_add(struct ieee80211_sub_if_data *sdata, if (!new_node) goto err_node_alloc; - read_lock_bh(&pathtbl_resize_lock); + read_lock_bh(&sdata->u.mesh.pathtbl_resize_lock); memcpy(new_mpath->dst, dst, ETH_ALEN); memcpy(new_mpath->mpp, mpp, ETH_ALEN); new_mpath->sdata = sdata; @@ -701,7 +685,7 @@ int mpp_path_add(struct ieee80211_sub_if_data *sdata, new_mpath->exp_time = jiffies; spin_lock_init(&new_mpath->state_lock); - tbl = resize_dereference_mpp_paths(); + tbl = resize_dereference_mpp_paths(sdata); hash_idx = mesh_table_hash(dst, sdata, tbl); bucket = &tbl->hash_buckets[hash_idx]; @@ -711,8 +695,7 @@ int mpp_path_add(struct ieee80211_sub_if_data *sdata, err = -EEXIST; hlist_for_each_entry(node, bucket, list) { mpath = node->mpath; - if (mpath->sdata == sdata && - ether_addr_equal(dst, mpath->dst)) + if (ether_addr_equal(dst, mpath->dst)) goto err_exists; } @@ -722,9 +705,9 @@ int mpp_path_add(struct ieee80211_sub_if_data *sdata, grow = 1; spin_unlock(&tbl->hashwlock[hash_idx]); - read_unlock_bh(&pathtbl_resize_lock); + read_unlock_bh(&sdata->u.mesh.pathtbl_resize_lock); - mpp_paths_generation++; + sdata->u.mesh.mpp_paths_generation++; if (grow) { set_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags); @@ -734,7 +717,7 @@ int mpp_path_add(struct ieee80211_sub_if_data *sdata, err_exists: spin_unlock(&tbl->hashwlock[hash_idx]); - read_unlock_bh(&pathtbl_resize_lock); + read_unlock_bh(&sdata->u.mesh.pathtbl_resize_lock); kfree(new_node); err_node_alloc: kfree(new_mpath); @@ -761,7 +744,7 @@ void mesh_plink_broken(struct sta_info *sta) int i; rcu_read_lock(); - tbl = rcu_dereference(mesh_paths); + tbl = rcu_dereference(sdata->u.mesh.mesh_paths); for_each_mesh_entry(tbl, node, i) { mpath = node->mpath; if (rcu_access_pointer(mpath->next_hop) == sta && @@ -819,14 +802,15 @@ static void __mesh_path_del(struct mesh_table *tbl, struct mpath_node *node) */ void mesh_path_flush_by_nexthop(struct sta_info *sta) { + struct ieee80211_sub_if_data *sdata = sta->sdata; struct mesh_table *tbl; struct mesh_path *mpath; struct mpath_node *node; int i; rcu_read_lock(); - read_lock_bh(&pathtbl_resize_lock); - tbl = resize_dereference_mesh_paths(); + read_lock_bh(&sdata->u.mesh.pathtbl_resize_lock); + tbl = resize_dereference_mesh_paths(sdata); for_each_mesh_entry(tbl, node, i) { mpath = node->mpath; if (rcu_access_pointer(mpath->next_hop) == sta) { @@ -835,7 +819,7 @@ void mesh_path_flush_by_nexthop(struct sta_info *sta) spin_unlock(&tbl->hashwlock[i]); } } - read_unlock_bh(&pathtbl_resize_lock); + read_unlock_bh(&sdata->u.mesh.pathtbl_resize_lock); rcu_read_unlock(); } @@ -848,8 +832,8 @@ static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata, int i; rcu_read_lock(); - read_lock_bh(&pathtbl_resize_lock); - tbl = resize_dereference_mpp_paths(); + read_lock_bh(&sdata->u.mesh.pathtbl_resize_lock); + tbl = resize_dereference_mpp_paths(sdata); for_each_mesh_entry(tbl, node, i) { mpp = node->mpath; if (ether_addr_equal(mpp->mpp, proxy)) { @@ -858,7 +842,7 @@ static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata, spin_unlock(&tbl->hashwlock[i]); } } - read_unlock_bh(&pathtbl_resize_lock); + read_unlock_bh(&sdata->u.mesh.pathtbl_resize_lock); rcu_read_unlock(); } @@ -872,8 +856,6 @@ static void table_flush_by_iface(struct mesh_table *tbl, WARN_ON(!rcu_read_lock_held()); for_each_mesh_entry(tbl, node, i) { mpath = node->mpath; - if (mpath->sdata != sdata) - continue; spin_lock_bh(&tbl->hashwlock[i]); __mesh_path_del(tbl, node); spin_unlock_bh(&tbl->hashwlock[i]); @@ -893,12 +875,12 @@ void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata) struct mesh_table *tbl; rcu_read_lock(); - read_lock_bh(&pathtbl_resize_lock); - tbl = resize_dereference_mesh_paths(); + read_lock_bh(&sdata->u.mesh.pathtbl_resize_lock); + tbl = resize_dereference_mesh_paths(sdata); table_flush_by_iface(tbl, sdata); - tbl = resize_dereference_mpp_paths(); + tbl = resize_dereference_mpp_paths(sdata); table_flush_by_iface(tbl, sdata); - read_unlock_bh(&pathtbl_resize_lock); + read_unlock_bh(&sdata->u.mesh.pathtbl_resize_lock); rcu_read_unlock(); } @@ -922,15 +904,14 @@ static int table_path_del(struct mesh_table __rcu *rcu_tbl, int hash_idx; int err = 0; - tbl = resize_dereference_paths(rcu_tbl); + tbl = resize_dereference_paths(sdata, rcu_tbl); hash_idx = mesh_table_hash(addr, sdata, tbl); bucket = &tbl->hash_buckets[hash_idx]; spin_lock(&tbl->hashwlock[hash_idx]); hlist_for_each_entry(node, bucket, list) { mpath = node->mpath; - if (mpath->sdata == sdata && - ether_addr_equal(addr, mpath->dst)) { + if (ether_addr_equal(addr, mpath->dst)) { __mesh_path_del(tbl, node); goto enddel; } @@ -957,10 +938,10 @@ int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr) /* flush relevant mpp entries first */ mpp_flush_by_proxy(sdata, addr); - read_lock_bh(&pathtbl_resize_lock); - err = table_path_del(mesh_paths, sdata, addr); - mesh_paths_generation++; - read_unlock_bh(&pathtbl_resize_lock); + read_lock_bh(&sdata->u.mesh.pathtbl_resize_lock); + err = table_path_del(sdata->u.mesh.mesh_paths, sdata, addr); + sdata->u.mesh.mesh_paths_generation++; + read_unlock_bh(&sdata->u.mesh.pathtbl_resize_lock); return err; } @@ -977,10 +958,10 @@ static int mpp_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr) { int err = 0; - read_lock_bh(&pathtbl_resize_lock); - err = table_path_del(mpp_paths, sdata, addr); - mpp_paths_generation++; - read_unlock_bh(&pathtbl_resize_lock); + read_lock_bh(&sdata->u.mesh.pathtbl_resize_lock); + err = table_path_del(sdata->u.mesh.mpp_paths, sdata, addr); + sdata->u.mesh.mpp_paths_generation++; + read_unlock_bh(&sdata->u.mesh.pathtbl_resize_lock); return err; } @@ -1020,7 +1001,7 @@ int mesh_path_send_to_gates(struct mesh_path *mpath) struct hlist_head *known_gates; rcu_read_lock(); - tbl = rcu_dereference(mesh_paths); + tbl = rcu_dereference(sdata->u.mesh.mesh_paths); known_gates = tbl->known_gates; rcu_read_unlock(); @@ -1028,9 +1009,6 @@ int mesh_path_send_to_gates(struct mesh_path *mpath) return -EHOSTUNREACH; hlist_for_each_entry_rcu(gate, known_gates, list) { - if (gate->mpath->sdata != sdata) - continue; - if (gate->mpath->flags & MESH_PATH_ACTIVE) { mpath_dbg(sdata, "Forwarding to %pM\n", gate->mpath->dst); mesh_path_move_to_queue(gate->mpath, from_mpath, copy); @@ -1043,11 +1021,10 @@ int mesh_path_send_to_gates(struct mesh_path *mpath) } } - hlist_for_each_entry_rcu(gate, known_gates, list) - if (gate->mpath->sdata == sdata) { - mpath_dbg(sdata, "Sending to %pM\n", gate->mpath->dst); - mesh_path_tx_pending(gate->mpath); - } + hlist_for_each_entry_rcu(gate, known_gates, list) { + mpath_dbg(sdata, "Sending to %pM\n", gate->mpath->dst); + mesh_path_tx_pending(gate->mpath); + } return (from_mpath == mpath) ? -EHOSTUNREACH : 0; } @@ -1136,7 +1113,7 @@ static int mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl) return 0; } -int mesh_pathtbl_init(void) +int mesh_pathtbl_init(struct ieee80211_sub_if_data *sdata) { struct mesh_table *tbl_path, *tbl_mpp; int ret; @@ -1168,9 +1145,11 @@ int mesh_pathtbl_init(void) } INIT_HLIST_HEAD(tbl_mpp->known_gates); + rwlock_init(&sdata->u.mesh.pathtbl_resize_lock); + /* Need no locking since this is during init */ - RCU_INIT_POINTER(mesh_paths, tbl_path); - RCU_INIT_POINTER(mpp_paths, tbl_mpp); + RCU_INIT_POINTER(sdata->u.mesh.mesh_paths, tbl_path); + RCU_INIT_POINTER(sdata->u.mesh.mpp_paths, tbl_mpp); return 0; @@ -1189,33 +1168,31 @@ void mesh_path_expire(struct ieee80211_sub_if_data *sdata) int i; rcu_read_lock(); - tbl = rcu_dereference(mesh_paths); + tbl = rcu_dereference(sdata->u.mesh.mesh_paths); for_each_mesh_entry(tbl, node, i) { - if (node->mpath->sdata != sdata) - continue; mpath = node->mpath; if ((!(mpath->flags & MESH_PATH_RESOLVING)) && (!(mpath->flags & MESH_PATH_FIXED)) && time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE)) - mesh_path_del(mpath->sdata, mpath->dst); + mesh_path_del(sdata, mpath->dst); } - tbl = rcu_dereference(mpp_paths); + tbl = rcu_dereference(sdata->u.mesh.mpp_paths); for_each_mesh_entry(tbl, node, i) { - if (node->mpath->sdata != sdata) - continue; mpath = node->mpath; if ((!(mpath->flags & MESH_PATH_FIXED)) && time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE)) - mpp_path_del(mpath->sdata, mpath->dst); + mpp_path_del(sdata, mpath->dst); } rcu_read_unlock(); } -void mesh_pathtbl_unregister(void) +void mesh_pathtbl_unregister(struct ieee80211_sub_if_data *sdata) { /* no need for locking during exit path */ - mesh_table_free(rcu_dereference_protected(mesh_paths, 1), true); - mesh_table_free(rcu_dereference_protected(mpp_paths, 1), true); + mesh_table_free(rcu_dereference_protected(sdata->u.mesh.mesh_paths, 1), + true); + mesh_table_free(rcu_dereference_protected(sdata->u.mesh.mpp_paths, 1), + true); } diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index c485fc26fa0c..b3196b1e15c2 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -2212,7 +2212,7 @@ static struct sk_buff *ieee80211_build_hdr(struct ieee80211_sub_if_data *sdata, } if (mppath && mpath) - mesh_path_del(mpath->sdata, mpath->dst); + mesh_path_del(sdata, mpath->dst); } /* From 443954815b63b36f09623d74170520e6554f5fac Mon Sep 17 00:00:00 2001 From: Bob Copeland Date: Sun, 28 Feb 2016 20:03:57 -0500 Subject: [PATCH 0220/1649] mac80211: mesh: don't hash sdata in mpath tables Now that the sdata pointer is the same for all entries of a path table, hashing it is pointless, so hash only the address. Signed-off-by: Bob Copeland Signed-off-by: Johannes Berg --- net/mac80211/mesh_pathtbl.c | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c index 0508b37b0471..fc3cc350df8c 100644 --- a/net/mac80211/mesh_pathtbl.c +++ b/net/mac80211/mesh_pathtbl.c @@ -177,12 +177,10 @@ errcopy: return -ENOMEM; } -static u32 mesh_table_hash(const u8 *addr, struct ieee80211_sub_if_data *sdata, - struct mesh_table *tbl) +static u32 mesh_table_hash(const u8 *addr, struct mesh_table *tbl) { - /* Use last four bytes of hw addr and interface index as hash index */ - return jhash_2words(*(u32 *)(addr+2), sdata->dev->ifindex, - tbl->hash_rnd) & tbl->hash_mask; + /* Use last four bytes of hw addr as hash index */ + return jhash_1word(*(u32 *)(addr+2), tbl->hash_rnd) & tbl->hash_mask; } @@ -331,7 +329,7 @@ static struct mesh_path *mpath_lookup(struct mesh_table *tbl, const u8 *dst, struct hlist_head *bucket; struct mpath_node *node; - bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)]; + bucket = &tbl->hash_buckets[mesh_table_hash(dst, tbl)]; hlist_for_each_entry_rcu(node, bucket, list) { mpath = node->mpath; if (ether_addr_equal(dst, mpath->dst)) { @@ -538,7 +536,7 @@ struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata, read_lock_bh(&sdata->u.mesh.pathtbl_resize_lock); tbl = resize_dereference_mesh_paths(sdata); - hash_idx = mesh_table_hash(dst, sdata, tbl); + hash_idx = mesh_table_hash(dst, tbl); bucket = &tbl->hash_buckets[hash_idx]; spin_lock(&tbl->hashwlock[hash_idx]); @@ -687,7 +685,7 @@ int mpp_path_add(struct ieee80211_sub_if_data *sdata, tbl = resize_dereference_mpp_paths(sdata); - hash_idx = mesh_table_hash(dst, sdata, tbl); + hash_idx = mesh_table_hash(dst, tbl); bucket = &tbl->hash_buckets[hash_idx]; spin_lock(&tbl->hashwlock[hash_idx]); @@ -905,7 +903,7 @@ static int table_path_del(struct mesh_table __rcu *rcu_tbl, int err = 0; tbl = resize_dereference_paths(sdata, rcu_tbl); - hash_idx = mesh_table_hash(addr, sdata, tbl); + hash_idx = mesh_table_hash(addr, tbl); bucket = &tbl->hash_buckets[hash_idx]; spin_lock(&tbl->hashwlock[hash_idx]); @@ -1107,7 +1105,7 @@ static int mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl) node = hlist_entry(p, struct mpath_node, list); mpath = node->mpath; new_node->mpath = mpath; - hash_idx = mesh_table_hash(mpath->dst, mpath->sdata, newtbl); + hash_idx = mesh_table_hash(mpath->dst, newtbl); hlist_add_head(&new_node->list, &newtbl->hash_buckets[hash_idx]); return 0; From b15dc38b9817729d3d4962f8c84bbda0eccb3532 Mon Sep 17 00:00:00 2001 From: Bob Copeland Date: Sun, 28 Feb 2016 20:03:58 -0500 Subject: [PATCH 0221/1649] mac80211: mesh: factor out common mesh path allocation code Remove duplicate code to allocate and initialize a mesh path or mesh proxy path. Signed-off-by: Bob Copeland Signed-off-by: Johannes Berg --- net/mac80211/mesh_pathtbl.c | 51 ++++++++++++++++++++----------------- 1 file changed, 28 insertions(+), 23 deletions(-) diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c index fc3cc350df8c..4794240e8f94 100644 --- a/net/mac80211/mesh_pathtbl.c +++ b/net/mac80211/mesh_pathtbl.c @@ -501,6 +501,31 @@ int mesh_gate_num(struct ieee80211_sub_if_data *sdata) return sdata->u.mesh.num_gates; } +static +struct mesh_path *mesh_path_new(struct ieee80211_sub_if_data *sdata, + const u8 *dst, gfp_t gfp_flags) +{ + struct mesh_path *new_mpath; + + new_mpath = kzalloc(sizeof(struct mesh_path), gfp_flags); + if (!new_mpath) + return NULL; + + memcpy(new_mpath->dst, dst, ETH_ALEN); + eth_broadcast_addr(new_mpath->rann_snd_addr); + new_mpath->is_root = false; + new_mpath->sdata = sdata; + new_mpath->flags = 0; + skb_queue_head_init(&new_mpath->frame_queue); + new_mpath->timer.data = (unsigned long) new_mpath; + new_mpath->timer.function = mesh_path_timer; + new_mpath->exp_time = jiffies; + spin_lock_init(&new_mpath->state_lock); + init_timer(&new_mpath->timer); + + return new_mpath; +} + /** * mesh_path_add - allocate and add a new path to the mesh path table * @dst: destination address of the path (ETH_ALEN length) @@ -548,7 +573,7 @@ struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata, } err = -ENOMEM; - new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC); + new_mpath = mesh_path_new(sdata, dst, GFP_ATOMIC); if (!new_mpath) goto err_path_alloc; @@ -556,19 +581,7 @@ struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata, if (!new_node) goto err_node_alloc; - memcpy(new_mpath->dst, dst, ETH_ALEN); - eth_broadcast_addr(new_mpath->rann_snd_addr); - new_mpath->is_root = false; - new_mpath->sdata = sdata; - new_mpath->flags = 0; - skb_queue_head_init(&new_mpath->frame_queue); new_node->mpath = new_mpath; - new_mpath->timer.data = (unsigned long) new_mpath; - new_mpath->timer.function = mesh_path_timer; - new_mpath->exp_time = jiffies; - spin_lock_init(&new_mpath->state_lock); - init_timer(&new_mpath->timer); - hlist_add_head_rcu(&new_node->list, bucket); if (atomic_inc_return(&tbl->entries) >= MEAN_CHAIN_LEN * (tbl->hash_mask + 1)) @@ -664,7 +677,7 @@ int mpp_path_add(struct ieee80211_sub_if_data *sdata, return -ENOTSUPP; err = -ENOMEM; - new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC); + new_mpath = mesh_path_new(sdata, dst, GFP_ATOMIC); if (!new_mpath) goto err_path_alloc; @@ -672,17 +685,9 @@ int mpp_path_add(struct ieee80211_sub_if_data *sdata, if (!new_node) goto err_node_alloc; - read_lock_bh(&sdata->u.mesh.pathtbl_resize_lock); - memcpy(new_mpath->dst, dst, ETH_ALEN); memcpy(new_mpath->mpp, mpp, ETH_ALEN); - new_mpath->sdata = sdata; - new_mpath->flags = 0; - skb_queue_head_init(&new_mpath->frame_queue); new_node->mpath = new_mpath; - init_timer(&new_mpath->timer); - new_mpath->exp_time = jiffies; - spin_lock_init(&new_mpath->state_lock); - + read_lock_bh(&sdata->u.mesh.pathtbl_resize_lock); tbl = resize_dereference_mpp_paths(sdata); hash_idx = mesh_table_hash(dst, tbl); From 947c2a0eccec29fcd30e717787e65792b1e607ed Mon Sep 17 00:00:00 2001 From: Bob Copeland Date: Sun, 28 Feb 2016 20:03:59 -0500 Subject: [PATCH 0222/1649] mac80211: mesh: embed known gates list in struct mesh_path The mesh path table uses a struct mesh_node in its hlists in order to support a resizable hash table: the mesh_node provides an indirection to the actual mesh path so that two different bucket lists can point to the same path entry. However, for the known gates list, we don't need this indirection because there is ever only one list. So we can just embed the hlist_node in the mesh path itself, which simplifies things a bit and saves a linear search whenever we need to find an item in the list. Signed-off-by: Bob Copeland Signed-off-by: Johannes Berg --- net/mac80211/mesh.h | 1 + net/mac80211/mesh_pathtbl.c | 100 ++++++++++++++++-------------------- 2 files changed, 45 insertions(+), 56 deletions(-) diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h index 601992b6cd8a..f3cc3917e048 100644 --- a/net/mac80211/mesh.h +++ b/net/mac80211/mesh.h @@ -105,6 +105,7 @@ enum mesh_deferred_task_flags { struct mesh_path { u8 dst[ETH_ALEN]; u8 mpp[ETH_ALEN]; /* used for MPP or MAP */ + struct hlist_node gate_list; struct ieee80211_sub_if_data *sdata; struct sta_info __rcu *next_hop; struct timer_list timer; diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c index 4794240e8f94..e4daf4b94eaf 100644 --- a/net/mac80211/mesh_pathtbl.c +++ b/net/mac80211/mesh_pathtbl.c @@ -119,10 +119,18 @@ static void mesh_table_free(struct mesh_table *tbl, bool free_leafs) { struct hlist_head *mesh_hash; struct hlist_node *p, *q; - struct mpath_node *gate; + struct mesh_path *gate; int i; mesh_hash = tbl->hash_buckets; + if (free_leafs) { + spin_lock_bh(&tbl->gates_lock); + hlist_for_each_entry_safe(gate, q, + tbl->known_gates, gate_list) + hlist_del(&gate->gate_list); + kfree(tbl->known_gates); + spin_unlock_bh(&tbl->gates_lock); + } for (i = 0; i <= tbl->hash_mask; i++) { spin_lock_bh(&tbl->hashwlock[i]); hlist_for_each_safe(p, q, &mesh_hash[i]) { @@ -131,16 +139,6 @@ static void mesh_table_free(struct mesh_table *tbl, bool free_leafs) } spin_unlock_bh(&tbl->hashwlock[i]); } - if (free_leafs) { - spin_lock_bh(&tbl->gates_lock); - hlist_for_each_entry_safe(gate, q, - tbl->known_gates, list) { - hlist_del(&gate->list); - kfree(gate); - } - kfree(tbl->known_gates); - spin_unlock_bh(&tbl->gates_lock); - } __mesh_table_free(tbl); } @@ -431,30 +429,26 @@ mpp_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx) int mesh_path_add_gate(struct mesh_path *mpath) { struct mesh_table *tbl; - struct mpath_node *gate, *new_gate; int err; rcu_read_lock(); tbl = rcu_dereference(mpath->sdata->u.mesh.mesh_paths); - hlist_for_each_entry_rcu(gate, tbl->known_gates, list) - if (gate->mpath == mpath) { - err = -EEXIST; - goto err_rcu; - } - - new_gate = kzalloc(sizeof(struct mpath_node), GFP_ATOMIC); - if (!new_gate) { - err = -ENOMEM; + spin_lock_bh(&mpath->state_lock); + if (mpath->is_gate) { + err = -EEXIST; + spin_unlock_bh(&mpath->state_lock); goto err_rcu; } - mpath->is_gate = true; mpath->sdata->u.mesh.num_gates++; - new_gate->mpath = mpath; - spin_lock_bh(&tbl->gates_lock); - hlist_add_head_rcu(&new_gate->list, tbl->known_gates); - spin_unlock_bh(&tbl->gates_lock); + + spin_lock(&tbl->gates_lock); + hlist_add_head_rcu(&mpath->gate_list, tbl->known_gates); + spin_unlock(&tbl->gates_lock); + + spin_unlock_bh(&mpath->state_lock); + mpath_dbg(mpath->sdata, "Mesh path: Recorded new gate: %pM. %d known gates\n", mpath->dst, mpath->sdata->u.mesh.num_gates); @@ -468,28 +462,22 @@ err_rcu: * mesh_gate_del - remove a mesh gate from the list of known gates * @tbl: table which holds our list of known gates * @mpath: gate mpath - * - * Locking: must be called inside rcu_read_lock() section */ static void mesh_gate_del(struct mesh_table *tbl, struct mesh_path *mpath) { - struct mpath_node *gate; - struct hlist_node *q; + lockdep_assert_held(&mpath->state_lock); + if (!mpath->is_gate) + return; - hlist_for_each_entry_safe(gate, q, tbl->known_gates, list) { - if (gate->mpath != mpath) - continue; - spin_lock_bh(&tbl->gates_lock); - hlist_del_rcu(&gate->list); - kfree_rcu(gate, rcu); - spin_unlock_bh(&tbl->gates_lock); - mpath->sdata->u.mesh.num_gates--; - mpath->is_gate = false; - mpath_dbg(mpath->sdata, - "Mesh path: Deleted gate: %pM. %d known gates\n", - mpath->dst, mpath->sdata->u.mesh.num_gates); - break; - } + mpath->is_gate = false; + spin_lock_bh(&tbl->gates_lock); + hlist_del_rcu(&mpath->gate_list); + mpath->sdata->u.mesh.num_gates--; + spin_unlock_bh(&tbl->gates_lock); + + mpath_dbg(mpath->sdata, + "Mesh path: Deleted gate: %pM. %d known gates\n", + mpath->dst, mpath->sdata->u.mesh.num_gates); } /** @@ -781,13 +769,13 @@ static void __mesh_path_del(struct mesh_table *tbl, struct mpath_node *node) struct mesh_path *mpath = node->mpath; struct ieee80211_sub_if_data *sdata = node->mpath->sdata; - spin_lock(&mpath->state_lock); + spin_lock_bh(&mpath->state_lock); mpath->flags |= MESH_PATH_RESOLVING; if (mpath->is_gate) mesh_gate_del(tbl, mpath); hlist_del_rcu(&node->list); call_rcu(&node->rcu, mesh_path_node_reclaim); - spin_unlock(&mpath->state_lock); + spin_unlock_bh(&mpath->state_lock); atomic_dec(&sdata->u.mesh.mpaths); atomic_dec(&tbl->entries); } @@ -999,7 +987,7 @@ int mesh_path_send_to_gates(struct mesh_path *mpath) struct ieee80211_sub_if_data *sdata = mpath->sdata; struct mesh_table *tbl; struct mesh_path *from_mpath = mpath; - struct mpath_node *gate = NULL; + struct mesh_path *gate = NULL; bool copy = false; struct hlist_head *known_gates; @@ -1011,22 +999,22 @@ int mesh_path_send_to_gates(struct mesh_path *mpath) if (!known_gates) return -EHOSTUNREACH; - hlist_for_each_entry_rcu(gate, known_gates, list) { - if (gate->mpath->flags & MESH_PATH_ACTIVE) { - mpath_dbg(sdata, "Forwarding to %pM\n", gate->mpath->dst); - mesh_path_move_to_queue(gate->mpath, from_mpath, copy); - from_mpath = gate->mpath; + hlist_for_each_entry_rcu(gate, known_gates, gate_list) { + if (gate->flags & MESH_PATH_ACTIVE) { + mpath_dbg(sdata, "Forwarding to %pM\n", gate->dst); + mesh_path_move_to_queue(gate, from_mpath, copy); + from_mpath = gate; copy = true; } else { mpath_dbg(sdata, "Not forwarding to %pM (flags %#x)\n", - gate->mpath->dst, gate->mpath->flags); + gate->dst, gate->flags); } } - hlist_for_each_entry_rcu(gate, known_gates, list) { - mpath_dbg(sdata, "Sending to %pM\n", gate->mpath->dst); - mesh_path_tx_pending(gate->mpath); + hlist_for_each_entry_rcu(gate, known_gates, gate_list) { + mpath_dbg(sdata, "Sending to %pM\n", gate->dst); + mesh_path_tx_pending(gate); } return (from_mpath == mpath) ? -EHOSTUNREACH : 0; From 8f6fd83c6c5ec66a4a70c728535ddcdfef4f3697 Mon Sep 17 00:00:00 2001 From: Bob Copeland Date: Wed, 2 Mar 2016 10:09:19 -0500 Subject: [PATCH 0223/1649] rhashtable: accept GFP flags in rhashtable_walk_init In certain cases, the 802.11 mesh pathtable code wants to iterate over all of the entries in the forwarding table from the receive path, which is inside an RCU read-side critical section. Enable walks inside atomic sections by allowing GFP_ATOMIC allocations for the walker state. Change all existing callsites to pass in GFP_KERNEL. Acked-by: Thomas Graf Signed-off-by: Bob Copeland [also adjust gfs2/glock.c and rhashtable tests] Signed-off-by: Johannes Berg --- fs/gfs2/glock.c | 4 ++-- include/linux/rhashtable.h | 3 ++- lib/rhashtable.c | 6 ++++-- lib/test_rhashtable.c | 2 +- net/ipv6/ila/ila_xlat.c | 3 ++- net/netfilter/nft_hash.c | 4 ++-- net/netlink/af_netlink.c | 3 ++- net/sctp/proc.c | 3 ++- 8 files changed, 17 insertions(+), 11 deletions(-) diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index 6539131c52a2..4b73bd101bdc 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c @@ -1913,7 +1913,7 @@ static int gfs2_glocks_open(struct inode *inode, struct file *file) if (seq->buf) seq->size = GFS2_SEQ_GOODSIZE; gi->gl = NULL; - ret = rhashtable_walk_init(&gl_hash_table, &gi->hti); + ret = rhashtable_walk_init(&gl_hash_table, &gi->hti, GFP_KERNEL); } return ret; } @@ -1941,7 +1941,7 @@ static int gfs2_glstats_open(struct inode *inode, struct file *file) if (seq->buf) seq->size = GFS2_SEQ_GOODSIZE; gi->gl = NULL; - ret = rhashtable_walk_init(&gl_hash_table, &gi->hti); + ret = rhashtable_walk_init(&gl_hash_table, &gi->hti, GFP_KERNEL); } return ret; } diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h index 63bd7601b6de..3eef0802a0cd 100644 --- a/include/linux/rhashtable.h +++ b/include/linux/rhashtable.h @@ -346,7 +346,8 @@ struct bucket_table *rhashtable_insert_slow(struct rhashtable *ht, struct bucket_table *old_tbl); int rhashtable_insert_rehash(struct rhashtable *ht, struct bucket_table *tbl); -int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter); +int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter, + gfp_t gfp); void rhashtable_walk_exit(struct rhashtable_iter *iter); int rhashtable_walk_start(struct rhashtable_iter *iter) __acquires(RCU); void *rhashtable_walk_next(struct rhashtable_iter *iter); diff --git a/lib/rhashtable.c b/lib/rhashtable.c index cc808707d1cf..5d845ffd7982 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -487,6 +487,7 @@ EXPORT_SYMBOL_GPL(rhashtable_insert_slow); * rhashtable_walk_init - Initialise an iterator * @ht: Table to walk over * @iter: Hash table Iterator + * @gfp: GFP flags for allocations * * This function prepares a hash table walk. * @@ -504,14 +505,15 @@ EXPORT_SYMBOL_GPL(rhashtable_insert_slow); * You must call rhashtable_walk_exit if this function returns * successfully. */ -int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter) +int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter, + gfp_t gfp) { iter->ht = ht; iter->p = NULL; iter->slot = 0; iter->skip = 0; - iter->walker = kmalloc(sizeof(*iter->walker), GFP_KERNEL); + iter->walker = kmalloc(sizeof(*iter->walker), gfp); if (!iter->walker) return -ENOMEM; diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c index 270bf7289b1e..297fdb5e74bd 100644 --- a/lib/test_rhashtable.c +++ b/lib/test_rhashtable.c @@ -143,7 +143,7 @@ static void test_bucket_stats(struct rhashtable *ht) struct rhashtable_iter hti; struct rhash_head *pos; - err = rhashtable_walk_init(ht, &hti); + err = rhashtable_walk_init(ht, &hti, GFP_KERNEL); if (err) { pr_warn("Test failed: allocation error"); return; diff --git a/net/ipv6/ila/ila_xlat.c b/net/ipv6/ila/ila_xlat.c index 295ca29a23c3..0b03533453e4 100644 --- a/net/ipv6/ila/ila_xlat.c +++ b/net/ipv6/ila/ila_xlat.c @@ -501,7 +501,8 @@ static int ila_nl_dump_start(struct netlink_callback *cb) struct ila_net *ilan = net_generic(net, ila_net_id); struct ila_dump_iter *iter = (struct ila_dump_iter *)cb->args; - return rhashtable_walk_init(&ilan->rhash_table, &iter->rhiter); + return rhashtable_walk_init(&ilan->rhash_table, &iter->rhiter, + GFP_KERNEL); } static int ila_nl_dump_done(struct netlink_callback *cb) diff --git a/net/netfilter/nft_hash.c b/net/netfilter/nft_hash.c index 3f9d45d3d9b7..6fa016564f90 100644 --- a/net/netfilter/nft_hash.c +++ b/net/netfilter/nft_hash.c @@ -192,7 +192,7 @@ static void nft_hash_walk(const struct nft_ctx *ctx, const struct nft_set *set, u8 genmask = nft_genmask_cur(read_pnet(&set->pnet)); int err; - err = rhashtable_walk_init(&priv->ht, &hti); + err = rhashtable_walk_init(&priv->ht, &hti, GFP_KERNEL); iter->err = err; if (err) return; @@ -248,7 +248,7 @@ static void nft_hash_gc(struct work_struct *work) priv = container_of(work, struct nft_hash, gc_work.work); set = nft_set_container_of(priv); - err = rhashtable_walk_init(&priv->ht, &hti); + err = rhashtable_walk_init(&priv->ht, &hti, GFP_KERNEL); if (err) goto schedule; diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 215fc08c02ab..0f16bf635480 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -2343,7 +2343,8 @@ static int netlink_walk_start(struct nl_seq_iter *iter) { int err; - err = rhashtable_walk_init(&nl_table[iter->link].hash, &iter->hti); + err = rhashtable_walk_init(&nl_table[iter->link].hash, &iter->hti, + GFP_KERNEL); if (err) { iter->link = MAX_LINKS; return err; diff --git a/net/sctp/proc.c b/net/sctp/proc.c index 5cfac8d5d3b3..6d45d53321e6 100644 --- a/net/sctp/proc.c +++ b/net/sctp/proc.c @@ -319,7 +319,8 @@ static int sctp_transport_walk_start(struct seq_file *seq) struct sctp_ht_iter *iter = seq->private; int err; - err = rhashtable_walk_init(&sctp_transport_hashtable, &iter->hti); + err = rhashtable_walk_init(&sctp_transport_hashtable, &iter->hti, + GFP_KERNEL); if (err) return err; From 60854fd94573f0d3b80b55b40cf0140a0430f3ab Mon Sep 17 00:00:00 2001 From: Bob Copeland Date: Wed, 2 Mar 2016 10:09:20 -0500 Subject: [PATCH 0224/1649] mac80211: mesh: convert path table to rhashtable In the time since the mesh path table was implemented as an RCU-traversable, dynamically growing hash table, a generic RCU hashtable implementation was added to the kernel. Switch the mesh path table over to rhashtable to remove some code and also gain some features like automatic shrinking. Cc: Thomas Graf Cc: netdev@vger.kernel.org Signed-off-by: Bob Copeland Signed-off-by: Johannes Berg --- net/mac80211/ieee80211_i.h | 11 +- net/mac80211/mesh.c | 6 - net/mac80211/mesh.h | 31 +- net/mac80211/mesh_pathtbl.c | 798 ++++++++++++------------------------ 4 files changed, 265 insertions(+), 581 deletions(-) diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index db7f0dbebc4b..c8945e2d8a86 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -697,17 +697,10 @@ struct ieee80211_if_mesh { /* offset from skb->data while building IE */ int meshconf_offset; - struct mesh_table __rcu *mesh_paths; - struct mesh_table __rcu *mpp_paths; /* Store paths for MPP&MAP */ + struct mesh_table *mesh_paths; + struct mesh_table *mpp_paths; /* Store paths for MPP&MAP */ int mesh_paths_generation; int mpp_paths_generation; - - /* Protects assignment of the mesh_paths/mpp_paths table - * pointer for resize against reading it for add/delete - * of individual paths. Pure readers (lookups) just use - * RCU. - */ - rwlock_t pathtbl_resize_lock; }; #ifdef CONFIG_MAC80211_MESH diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index c92af2a7714d..a216c439b6f2 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c @@ -1347,12 +1347,6 @@ void ieee80211_mesh_work(struct ieee80211_sub_if_data *sdata) ifmsh->last_preq + msecs_to_jiffies(ifmsh->mshcfg.dot11MeshHWMPpreqMinInterval))) mesh_path_start_discovery(sdata); - if (test_and_clear_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags)) - mesh_mpath_table_grow(sdata); - - if (test_and_clear_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags)) - mesh_mpp_table_grow(sdata); - if (test_and_clear_bit(MESH_WORK_HOUSEKEEPING, &ifmsh->wrkq_flags)) ieee80211_mesh_housekeeping(sdata); diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h index f3cc3917e048..cc6854db156e 100644 --- a/net/mac80211/mesh.h +++ b/net/mac80211/mesh.h @@ -51,10 +51,6 @@ enum mesh_path_flags { * * * @MESH_WORK_HOUSEKEEPING: run the periodic mesh housekeeping tasks - * @MESH_WORK_GROW_MPATH_TABLE: the mesh path table is full and needs - * to grow. - * @MESH_WORK_GROW_MPP_TABLE: the mesh portals table is full and needs to - * grow * @MESH_WORK_ROOT: the mesh root station needs to send a frame * @MESH_WORK_DRIFT_ADJUST: time to compensate for clock drift relative to other * mesh nodes @@ -62,8 +58,6 @@ enum mesh_path_flags { */ enum mesh_deferred_task_flags { MESH_WORK_HOUSEKEEPING, - MESH_WORK_GROW_MPATH_TABLE, - MESH_WORK_GROW_MPP_TABLE, MESH_WORK_ROOT, MESH_WORK_DRIFT_ADJUST, MESH_WORK_MBSS_CHANGED, @@ -105,6 +99,7 @@ enum mesh_deferred_task_flags { struct mesh_path { u8 dst[ETH_ALEN]; u8 mpp[ETH_ALEN]; /* used for MPP or MAP */ + struct rhash_head rhash; struct hlist_node gate_list; struct ieee80211_sub_if_data *sdata; struct sta_info __rcu *next_hop; @@ -129,34 +124,17 @@ struct mesh_path { /** * struct mesh_table * - * @hash_buckets: array of hash buckets of the table - * @hashwlock: array of locks to protect write operations, one per bucket - * @hash_mask: 2^size_order - 1, used to compute hash idx - * @hash_rnd: random value used for hash computations * @entries: number of entries in the table - * @free_node: function to free nodes of the table - * @copy_node: function to copy nodes of the table - * @size_order: determines size of the table, there will be 2^size_order hash - * buckets * @known_gates: list of known mesh gates and their mpaths by the station. The * gate's mpath may or may not be resolved and active. - * - * rcu_head: RCU head to free the table + * @rhash: the rhashtable containing struct mesh_paths, keyed by dest addr */ struct mesh_table { - /* Number of buckets will be 2^N */ - struct hlist_head *hash_buckets; - spinlock_t *hashwlock; /* One per bucket, for add/del */ - unsigned int hash_mask; /* (2^size_order) - 1 */ - __u32 hash_rnd; /* Used for hash generation */ atomic_t entries; /* Up to MAX_MESH_NEIGHBOURS */ - void (*free_node) (struct hlist_node *p, bool free_leafs); - int (*copy_node) (struct hlist_node *p, struct mesh_table *newtbl); - int size_order; struct hlist_head *known_gates; spinlock_t gates_lock; - struct rcu_head rcu_head; + struct rhashtable rhead; }; /* Recent multicast cache */ @@ -300,9 +278,6 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, void mesh_sta_cleanup(struct sta_info *sta); /* Private interfaces */ -/* Mesh tables */ -void mesh_mpath_table_grow(struct ieee80211_sub_if_data *sdata); -void mesh_mpp_table_grow(struct ieee80211_sub_if_data *sdata); /* Mesh paths */ int mesh_path_error_tx(struct ieee80211_sub_if_data *sdata, u8 ttl, const u8 *target, u32 target_sn, diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c index e4daf4b94eaf..7455397f8c3b 100644 --- a/net/mac80211/mesh_pathtbl.c +++ b/net/mac80211/mesh_pathtbl.c @@ -18,11 +18,20 @@ #include "ieee80211_i.h" #include "mesh.h" -/* There will be initially 2^INIT_PATHS_SIZE_ORDER buckets */ -#define INIT_PATHS_SIZE_ORDER 2 +static u32 mesh_table_hash(const void *addr, u32 len, u32 seed) +{ + /* Use last four bytes of hw addr as hash index */ + return jhash_1word(*(u32 *)(addr+2), seed); +} -/* Keep the mean chain length below this constant */ -#define MEAN_CHAIN_LEN 2 +static const struct rhashtable_params mesh_rht_params = { + .nelem_hint = 2, + .automatic_shrinking = true, + .key_len = ETH_ALEN, + .key_offset = offsetof(struct mesh_path, dst), + .head_offset = offsetof(struct mesh_path, rhash), + .hashfn = mesh_table_hash, +}; static inline bool mpath_expired(struct mesh_path *mpath) { @@ -31,157 +40,47 @@ static inline bool mpath_expired(struct mesh_path *mpath) !(mpath->flags & MESH_PATH_FIXED); } -struct mpath_node { - struct hlist_node list; - struct rcu_head rcu; - /* This indirection allows two different tables to point to the same - * mesh_path structure, useful when resizing - */ - struct mesh_path *mpath; -}; - -static inline struct mesh_table *resize_dereference_paths( - struct ieee80211_sub_if_data *sdata, - struct mesh_table __rcu *table) +static void mesh_path_reclaim(struct rcu_head *rp) { - return rcu_dereference_protected(table, - lockdep_is_held(&sdata->u.mesh.pathtbl_resize_lock)); + struct mesh_path *mpath = container_of(rp, struct mesh_path, rcu); + + del_timer_sync(&mpath->timer); + kfree(mpath); } -static inline struct mesh_table *resize_dereference_mesh_paths( - struct ieee80211_sub_if_data *sdata) +static void mesh_path_rht_free(void *ptr, void *unused_arg) { - return resize_dereference_paths(sdata, sdata->u.mesh.mesh_paths); + struct mesh_path *mpath = ptr; + call_rcu(&mpath->rcu, mesh_path_reclaim); } -static inline struct mesh_table *resize_dereference_mpp_paths( - struct ieee80211_sub_if_data *sdata) +static struct mesh_table *mesh_table_alloc(void) { - return resize_dereference_paths(sdata, sdata->u.mesh.mpp_paths); -} - -/* - * CAREFUL -- "tbl" must not be an expression, - * in particular not an rcu_dereference(), since - * it's used twice. So it is illegal to do - * for_each_mesh_entry(rcu_dereference(...), ...) - */ -#define for_each_mesh_entry(tbl, node, i) \ - for (i = 0; i <= tbl->hash_mask; i++) \ - hlist_for_each_entry_rcu(node, &tbl->hash_buckets[i], list) - - -static struct mesh_table *mesh_table_alloc(int size_order) -{ - int i; struct mesh_table *newtbl; newtbl = kmalloc(sizeof(struct mesh_table), GFP_ATOMIC); if (!newtbl) return NULL; - newtbl->hash_buckets = kzalloc(sizeof(struct hlist_head) * - (1 << size_order), GFP_ATOMIC); - - if (!newtbl->hash_buckets) { + newtbl->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC); + if (!newtbl->known_gates) { kfree(newtbl); return NULL; } - - newtbl->hashwlock = kmalloc(sizeof(spinlock_t) * - (1 << size_order), GFP_ATOMIC); - if (!newtbl->hashwlock) { - kfree(newtbl->hash_buckets); - kfree(newtbl); - return NULL; - } - - newtbl->size_order = size_order; - newtbl->hash_mask = (1 << size_order) - 1; + INIT_HLIST_HEAD(newtbl->known_gates); atomic_set(&newtbl->entries, 0); - get_random_bytes(&newtbl->hash_rnd, - sizeof(newtbl->hash_rnd)); - for (i = 0; i <= newtbl->hash_mask; i++) - spin_lock_init(&newtbl->hashwlock[i]); spin_lock_init(&newtbl->gates_lock); return newtbl; } -static void __mesh_table_free(struct mesh_table *tbl) +static void mesh_table_free(struct mesh_table *tbl) { - kfree(tbl->hash_buckets); - kfree(tbl->hashwlock); + rhashtable_free_and_destroy(&tbl->rhead, + mesh_path_rht_free, NULL); kfree(tbl); } -static void mesh_table_free(struct mesh_table *tbl, bool free_leafs) -{ - struct hlist_head *mesh_hash; - struct hlist_node *p, *q; - struct mesh_path *gate; - int i; - - mesh_hash = tbl->hash_buckets; - if (free_leafs) { - spin_lock_bh(&tbl->gates_lock); - hlist_for_each_entry_safe(gate, q, - tbl->known_gates, gate_list) - hlist_del(&gate->gate_list); - kfree(tbl->known_gates); - spin_unlock_bh(&tbl->gates_lock); - } - for (i = 0; i <= tbl->hash_mask; i++) { - spin_lock_bh(&tbl->hashwlock[i]); - hlist_for_each_safe(p, q, &mesh_hash[i]) { - tbl->free_node(p, free_leafs); - atomic_dec(&tbl->entries); - } - spin_unlock_bh(&tbl->hashwlock[i]); - } - - __mesh_table_free(tbl); -} - -static int mesh_table_grow(struct mesh_table *oldtbl, - struct mesh_table *newtbl) -{ - struct hlist_head *oldhash; - struct hlist_node *p, *q; - int i; - - if (atomic_read(&oldtbl->entries) - < MEAN_CHAIN_LEN * (oldtbl->hash_mask + 1)) - return -EAGAIN; - - newtbl->free_node = oldtbl->free_node; - newtbl->copy_node = oldtbl->copy_node; - newtbl->known_gates = oldtbl->known_gates; - atomic_set(&newtbl->entries, atomic_read(&oldtbl->entries)); - - oldhash = oldtbl->hash_buckets; - for (i = 0; i <= oldtbl->hash_mask; i++) - hlist_for_each(p, &oldhash[i]) - if (oldtbl->copy_node(p, newtbl) < 0) - goto errcopy; - - return 0; - -errcopy: - for (i = 0; i <= newtbl->hash_mask; i++) { - hlist_for_each_safe(p, q, &newtbl->hash_buckets[i]) - oldtbl->free_node(p, 0); - } - return -ENOMEM; -} - -static u32 mesh_table_hash(const u8 *addr, struct mesh_table *tbl) -{ - /* Use last four bytes of hw addr as hash index */ - return jhash_1word(*(u32 *)(addr+2), tbl->hash_rnd) & tbl->hash_mask; -} - - /** * * mesh_path_assign_nexthop - update mesh path next hop @@ -324,22 +223,15 @@ static struct mesh_path *mpath_lookup(struct mesh_table *tbl, const u8 *dst, struct ieee80211_sub_if_data *sdata) { struct mesh_path *mpath; - struct hlist_head *bucket; - struct mpath_node *node; - bucket = &tbl->hash_buckets[mesh_table_hash(dst, tbl)]; - hlist_for_each_entry_rcu(node, bucket, list) { - mpath = node->mpath; - if (ether_addr_equal(dst, mpath->dst)) { - if (mpath_expired(mpath)) { - spin_lock_bh(&mpath->state_lock); - mpath->flags &= ~MESH_PATH_ACTIVE; - spin_unlock_bh(&mpath->state_lock); - } - return mpath; - } + mpath = rhashtable_lookup_fast(&tbl->rhead, dst, mesh_rht_params); + + if (mpath && mpath_expired(mpath)) { + spin_lock_bh(&mpath->state_lock); + mpath->flags &= ~MESH_PATH_ACTIVE; + spin_unlock_bh(&mpath->state_lock); } - return NULL; + return mpath; } /** @@ -354,17 +246,52 @@ static struct mesh_path *mpath_lookup(struct mesh_table *tbl, const u8 *dst, struct mesh_path * mesh_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst) { - return mpath_lookup(rcu_dereference(sdata->u.mesh.mesh_paths), dst, - sdata); + return mpath_lookup(sdata->u.mesh.mesh_paths, dst, sdata); } struct mesh_path * mpp_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst) { - return mpath_lookup(rcu_dereference(sdata->u.mesh.mpp_paths), dst, - sdata); + return mpath_lookup(sdata->u.mesh.mpp_paths, dst, sdata); } +static struct mesh_path * +__mesh_path_lookup_by_idx(struct mesh_table *tbl, int idx) +{ + int i = 0, ret; + struct mesh_path *mpath = NULL; + struct rhashtable_iter iter; + + ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC); + if (ret) + return NULL; + + ret = rhashtable_walk_start(&iter); + if (ret && ret != -EAGAIN) + goto err; + + while ((mpath = rhashtable_walk_next(&iter))) { + if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN) + continue; + if (IS_ERR(mpath)) + break; + if (i++ == idx) + break; + } +err: + rhashtable_walk_stop(&iter); + rhashtable_walk_exit(&iter); + + if (IS_ERR(mpath) || !mpath) + return NULL; + + if (mpath_expired(mpath)) { + spin_lock_bh(&mpath->state_lock); + mpath->flags &= ~MESH_PATH_ACTIVE; + spin_unlock_bh(&mpath->state_lock); + } + return mpath; +} /** * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index @@ -378,23 +305,7 @@ mpp_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst) struct mesh_path * mesh_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx) { - struct mesh_table *tbl = rcu_dereference(sdata->u.mesh.mesh_paths); - struct mpath_node *node; - int i; - int j = 0; - - for_each_mesh_entry(tbl, node, i) { - if (j++ == idx) { - if (mpath_expired(node->mpath)) { - spin_lock_bh(&node->mpath->state_lock); - node->mpath->flags &= ~MESH_PATH_ACTIVE; - spin_unlock_bh(&node->mpath->state_lock); - } - return node->mpath; - } - } - - return NULL; + return __mesh_path_lookup_by_idx(sdata->u.mesh.mesh_paths, idx); } /** @@ -409,17 +320,7 @@ mesh_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx) struct mesh_path * mpp_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx) { - struct mesh_table *tbl = rcu_dereference(sdata->u.mesh.mpp_paths); - struct mpath_node *node; - int i; - int j = 0; - - for_each_mesh_entry(tbl, node, i) { - if (j++ == idx) - return node->mpath; - } - - return NULL; + return __mesh_path_lookup_by_idx(sdata->u.mesh.mpp_paths, idx); } /** @@ -432,7 +333,7 @@ int mesh_path_add_gate(struct mesh_path *mpath) int err; rcu_read_lock(); - tbl = rcu_dereference(mpath->sdata->u.mesh.mesh_paths); + tbl = mpath->sdata->u.mesh.mesh_paths; spin_lock_bh(&mpath->state_lock); if (mpath->is_gate) { @@ -526,15 +427,9 @@ struct mesh_path *mesh_path_new(struct ieee80211_sub_if_data *sdata, struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata, const u8 *dst) { - struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; - struct ieee80211_local *local = sdata->local; struct mesh_table *tbl; struct mesh_path *mpath, *new_mpath; - struct mpath_node *node, *new_node; - struct hlist_head *bucket; - int grow = 0; - int err; - u32 hash_idx; + int ret; if (ether_addr_equal(dst, sdata->vif.addr)) /* never add ourselves as neighbours */ @@ -546,116 +441,44 @@ struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata, if (atomic_add_unless(&sdata->u.mesh.mpaths, 1, MESH_MAX_MPATHS) == 0) return ERR_PTR(-ENOSPC); - read_lock_bh(&sdata->u.mesh.pathtbl_resize_lock); - tbl = resize_dereference_mesh_paths(sdata); - - hash_idx = mesh_table_hash(dst, tbl); - bucket = &tbl->hash_buckets[hash_idx]; - - spin_lock(&tbl->hashwlock[hash_idx]); - - hlist_for_each_entry(node, bucket, list) { - mpath = node->mpath; - if (ether_addr_equal(dst, mpath->dst)) - goto found; - } - - err = -ENOMEM; new_mpath = mesh_path_new(sdata, dst, GFP_ATOMIC); if (!new_mpath) - goto err_path_alloc; + return ERR_PTR(-ENOMEM); - new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC); - if (!new_node) - goto err_node_alloc; + tbl = sdata->u.mesh.mesh_paths; + do { + ret = rhashtable_lookup_insert_fast(&tbl->rhead, + &new_mpath->rhash, + mesh_rht_params); - new_node->mpath = new_mpath; - hlist_add_head_rcu(&new_node->list, bucket); - if (atomic_inc_return(&tbl->entries) >= - MEAN_CHAIN_LEN * (tbl->hash_mask + 1)) - grow = 1; + if (ret == -EEXIST) + mpath = rhashtable_lookup_fast(&tbl->rhead, + dst, + mesh_rht_params); + } while (unlikely(ret == -EEXIST && !mpath)); + + if (ret && ret != -EEXIST) + return ERR_PTR(ret); + + /* At this point either new_mpath was added, or we found a + * matching entry already in the table; in the latter case + * free the unnecessary new entry. + */ + if (ret == -EEXIST) { + kfree(new_mpath); + new_mpath = mpath; + } sdata->u.mesh.mesh_paths_generation++; - - if (grow) { - set_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags); - ieee80211_queue_work(&local->hw, &sdata->work); - } - mpath = new_mpath; -found: - spin_unlock(&tbl->hashwlock[hash_idx]); - read_unlock_bh(&sdata->u.mesh.pathtbl_resize_lock); - return mpath; - -err_node_alloc: - kfree(new_mpath); -err_path_alloc: - atomic_dec(&sdata->u.mesh.mpaths); - spin_unlock(&tbl->hashwlock[hash_idx]); - read_unlock_bh(&sdata->u.mesh.pathtbl_resize_lock); - return ERR_PTR(err); -} - -static void mesh_table_free_rcu(struct rcu_head *rcu) -{ - struct mesh_table *tbl = container_of(rcu, struct mesh_table, rcu_head); - - mesh_table_free(tbl, false); -} - -void mesh_mpath_table_grow(struct ieee80211_sub_if_data *sdata) -{ - struct mesh_table *oldtbl, *newtbl; - - write_lock_bh(&sdata->u.mesh.pathtbl_resize_lock); - oldtbl = resize_dereference_mesh_paths(sdata); - newtbl = mesh_table_alloc(oldtbl->size_order + 1); - if (!newtbl) - goto out; - if (mesh_table_grow(oldtbl, newtbl) < 0) { - __mesh_table_free(newtbl); - goto out; - } - rcu_assign_pointer(sdata->u.mesh.mesh_paths, newtbl); - - call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu); - - out: - write_unlock_bh(&sdata->u.mesh.pathtbl_resize_lock); -} - -void mesh_mpp_table_grow(struct ieee80211_sub_if_data *sdata) -{ - struct mesh_table *oldtbl, *newtbl; - - write_lock_bh(&sdata->u.mesh.pathtbl_resize_lock); - oldtbl = resize_dereference_mpp_paths(sdata); - newtbl = mesh_table_alloc(oldtbl->size_order + 1); - if (!newtbl) - goto out; - if (mesh_table_grow(oldtbl, newtbl) < 0) { - __mesh_table_free(newtbl); - goto out; - } - rcu_assign_pointer(sdata->u.mesh.mpp_paths, newtbl); - call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu); - - out: - write_unlock_bh(&sdata->u.mesh.pathtbl_resize_lock); + return new_mpath; } int mpp_path_add(struct ieee80211_sub_if_data *sdata, const u8 *dst, const u8 *mpp) { - struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; - struct ieee80211_local *local = sdata->local; struct mesh_table *tbl; - struct mesh_path *mpath, *new_mpath; - struct mpath_node *node, *new_node; - struct hlist_head *bucket; - int grow = 0; - int err = 0; - u32 hash_idx; + struct mesh_path *new_mpath; + int ret; if (ether_addr_equal(dst, sdata->vif.addr)) /* never add ourselves as neighbours */ @@ -664,56 +487,19 @@ int mpp_path_add(struct ieee80211_sub_if_data *sdata, if (is_multicast_ether_addr(dst)) return -ENOTSUPP; - err = -ENOMEM; new_mpath = mesh_path_new(sdata, dst, GFP_ATOMIC); - if (!new_mpath) - goto err_path_alloc; - new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC); - if (!new_node) - goto err_node_alloc; + if (!new_mpath) + return -ENOMEM; memcpy(new_mpath->mpp, mpp, ETH_ALEN); - new_node->mpath = new_mpath; - read_lock_bh(&sdata->u.mesh.pathtbl_resize_lock); - tbl = resize_dereference_mpp_paths(sdata); - - hash_idx = mesh_table_hash(dst, tbl); - bucket = &tbl->hash_buckets[hash_idx]; - - spin_lock(&tbl->hashwlock[hash_idx]); - - err = -EEXIST; - hlist_for_each_entry(node, bucket, list) { - mpath = node->mpath; - if (ether_addr_equal(dst, mpath->dst)) - goto err_exists; - } - - hlist_add_head_rcu(&new_node->list, bucket); - if (atomic_inc_return(&tbl->entries) >= - MEAN_CHAIN_LEN * (tbl->hash_mask + 1)) - grow = 1; - - spin_unlock(&tbl->hashwlock[hash_idx]); - read_unlock_bh(&sdata->u.mesh.pathtbl_resize_lock); + tbl = sdata->u.mesh.mpp_paths; + ret = rhashtable_lookup_insert_fast(&tbl->rhead, + &new_mpath->rhash, + mesh_rht_params); sdata->u.mesh.mpp_paths_generation++; - - if (grow) { - set_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags); - ieee80211_queue_work(&local->hw, &sdata->work); - } - return 0; - -err_exists: - spin_unlock(&tbl->hashwlock[hash_idx]); - read_unlock_bh(&sdata->u.mesh.pathtbl_resize_lock); - kfree(new_node); -err_node_alloc: - kfree(new_mpath); -err_path_alloc: - return err; + return ret; } @@ -727,17 +513,26 @@ err_path_alloc: */ void mesh_plink_broken(struct sta_info *sta) { - struct mesh_table *tbl; + struct ieee80211_sub_if_data *sdata = sta->sdata; + struct mesh_table *tbl = sdata->u.mesh.mesh_paths; static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; struct mesh_path *mpath; - struct mpath_node *node; - struct ieee80211_sub_if_data *sdata = sta->sdata; - int i; + struct rhashtable_iter iter; + int ret; - rcu_read_lock(); - tbl = rcu_dereference(sdata->u.mesh.mesh_paths); - for_each_mesh_entry(tbl, node, i) { - mpath = node->mpath; + ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC); + if (ret) + return; + + ret = rhashtable_walk_start(&iter); + if (ret && ret != -EAGAIN) + goto out; + + while ((mpath = rhashtable_walk_next(&iter))) { + if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN) + continue; + if (IS_ERR(mpath)) + break; if (rcu_access_pointer(mpath->next_hop) == sta && mpath->flags & MESH_PATH_ACTIVE && !(mpath->flags & MESH_PATH_FIXED)) { @@ -751,30 +546,20 @@ void mesh_plink_broken(struct sta_info *sta) WLAN_REASON_MESH_PATH_DEST_UNREACHABLE, bcast); } } - rcu_read_unlock(); +out: + rhashtable_walk_stop(&iter); + rhashtable_walk_exit(&iter); } -static void mesh_path_node_reclaim(struct rcu_head *rp) +static void __mesh_path_del(struct mesh_table *tbl, struct mesh_path *mpath) { - struct mpath_node *node = container_of(rp, struct mpath_node, rcu); - - del_timer_sync(&node->mpath->timer); - kfree(node->mpath); - kfree(node); -} - -/* needs to be called with the corresponding hashwlock taken */ -static void __mesh_path_del(struct mesh_table *tbl, struct mpath_node *node) -{ - struct mesh_path *mpath = node->mpath; - struct ieee80211_sub_if_data *sdata = node->mpath->sdata; + struct ieee80211_sub_if_data *sdata = mpath->sdata; + rhashtable_remove_fast(&tbl->rhead, &mpath->rhash, mesh_rht_params); spin_lock_bh(&mpath->state_lock); mpath->flags |= MESH_PATH_RESOLVING; - if (mpath->is_gate) - mesh_gate_del(tbl, mpath); - hlist_del_rcu(&node->list); - call_rcu(&node->rcu, mesh_path_node_reclaim); + mesh_gate_del(tbl, mpath); + call_rcu(&mpath->rcu, mesh_path_reclaim); spin_unlock_bh(&mpath->state_lock); atomic_dec(&sdata->u.mesh.mpaths); atomic_dec(&tbl->entries); @@ -794,63 +579,87 @@ static void __mesh_path_del(struct mesh_table *tbl, struct mpath_node *node) void mesh_path_flush_by_nexthop(struct sta_info *sta) { struct ieee80211_sub_if_data *sdata = sta->sdata; - struct mesh_table *tbl; + struct mesh_table *tbl = sdata->u.mesh.mesh_paths; struct mesh_path *mpath; - struct mpath_node *node; - int i; + struct rhashtable_iter iter; + int ret; - rcu_read_lock(); - read_lock_bh(&sdata->u.mesh.pathtbl_resize_lock); - tbl = resize_dereference_mesh_paths(sdata); - for_each_mesh_entry(tbl, node, i) { - mpath = node->mpath; - if (rcu_access_pointer(mpath->next_hop) == sta) { - spin_lock(&tbl->hashwlock[i]); - __mesh_path_del(tbl, node); - spin_unlock(&tbl->hashwlock[i]); - } + ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC); + if (ret) + return; + + ret = rhashtable_walk_start(&iter); + if (ret && ret != -EAGAIN) + goto out; + + while ((mpath = rhashtable_walk_next(&iter))) { + if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN) + continue; + if (IS_ERR(mpath)) + break; + + if (rcu_access_pointer(mpath->next_hop) == sta) + __mesh_path_del(tbl, mpath); } - read_unlock_bh(&sdata->u.mesh.pathtbl_resize_lock); - rcu_read_unlock(); +out: + rhashtable_walk_stop(&iter); + rhashtable_walk_exit(&iter); } static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata, const u8 *proxy) { - struct mesh_table *tbl; - struct mesh_path *mpp; - struct mpath_node *node; - int i; + struct mesh_table *tbl = sdata->u.mesh.mpp_paths; + struct mesh_path *mpath; + struct rhashtable_iter iter; + int ret; - rcu_read_lock(); - read_lock_bh(&sdata->u.mesh.pathtbl_resize_lock); - tbl = resize_dereference_mpp_paths(sdata); - for_each_mesh_entry(tbl, node, i) { - mpp = node->mpath; - if (ether_addr_equal(mpp->mpp, proxy)) { - spin_lock(&tbl->hashwlock[i]); - __mesh_path_del(tbl, node); - spin_unlock(&tbl->hashwlock[i]); - } + ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC); + if (ret) + return; + + ret = rhashtable_walk_start(&iter); + if (ret && ret != -EAGAIN) + goto out; + + while ((mpath = rhashtable_walk_next(&iter))) { + if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN) + continue; + if (IS_ERR(mpath)) + break; + + if (ether_addr_equal(mpath->mpp, proxy)) + __mesh_path_del(tbl, mpath); } - read_unlock_bh(&sdata->u.mesh.pathtbl_resize_lock); - rcu_read_unlock(); +out: + rhashtable_walk_stop(&iter); + rhashtable_walk_exit(&iter); } -static void table_flush_by_iface(struct mesh_table *tbl, - struct ieee80211_sub_if_data *sdata) +static void table_flush_by_iface(struct mesh_table *tbl) { struct mesh_path *mpath; - struct mpath_node *node; - int i; + struct rhashtable_iter iter; + int ret; - WARN_ON(!rcu_read_lock_held()); - for_each_mesh_entry(tbl, node, i) { - mpath = node->mpath; - spin_lock_bh(&tbl->hashwlock[i]); - __mesh_path_del(tbl, node); - spin_unlock_bh(&tbl->hashwlock[i]); + ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC); + if (ret) + return; + + ret = rhashtable_walk_start(&iter); + if (ret && ret != -EAGAIN) + goto out; + + while ((mpath = rhashtable_walk_next(&iter))) { + if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN) + continue; + if (IS_ERR(mpath)) + break; + __mesh_path_del(tbl, mpath); } +out: + rhashtable_walk_stop(&iter); + rhashtable_walk_exit(&iter); } /** @@ -863,16 +672,8 @@ static void table_flush_by_iface(struct mesh_table *tbl, */ void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata) { - struct mesh_table *tbl; - - rcu_read_lock(); - read_lock_bh(&sdata->u.mesh.pathtbl_resize_lock); - tbl = resize_dereference_mesh_paths(sdata); - table_flush_by_iface(tbl, sdata); - tbl = resize_dereference_mpp_paths(sdata); - table_flush_by_iface(tbl, sdata); - read_unlock_bh(&sdata->u.mesh.pathtbl_resize_lock); - rcu_read_unlock(); + table_flush_by_iface(sdata->u.mesh.mesh_paths); + table_flush_by_iface(sdata->u.mesh.mpp_paths); } /** @@ -884,36 +685,25 @@ void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata) * * Returns: 0 if successful */ -static int table_path_del(struct mesh_table __rcu *rcu_tbl, +static int table_path_del(struct mesh_table *tbl, struct ieee80211_sub_if_data *sdata, const u8 *addr) { - struct mesh_table *tbl; struct mesh_path *mpath; - struct mpath_node *node; - struct hlist_head *bucket; - int hash_idx; - int err = 0; - tbl = resize_dereference_paths(sdata, rcu_tbl); - hash_idx = mesh_table_hash(addr, tbl); - bucket = &tbl->hash_buckets[hash_idx]; - - spin_lock(&tbl->hashwlock[hash_idx]); - hlist_for_each_entry(node, bucket, list) { - mpath = node->mpath; - if (ether_addr_equal(addr, mpath->dst)) { - __mesh_path_del(tbl, node); - goto enddel; - } + rcu_read_lock(); + mpath = rhashtable_lookup_fast(&tbl->rhead, addr, mesh_rht_params); + if (!mpath) { + rcu_read_unlock(); + return -ENXIO; } - err = -ENXIO; -enddel: - spin_unlock(&tbl->hashwlock[hash_idx]); - return err; + __mesh_path_del(tbl, mpath); + rcu_read_unlock(); + return 0; } + /** * mesh_path_del - delete a mesh path from the table * @@ -924,36 +714,13 @@ enddel: */ int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr) { - int err = 0; + int err; /* flush relevant mpp entries first */ mpp_flush_by_proxy(sdata, addr); - read_lock_bh(&sdata->u.mesh.pathtbl_resize_lock); err = table_path_del(sdata->u.mesh.mesh_paths, sdata, addr); sdata->u.mesh.mesh_paths_generation++; - read_unlock_bh(&sdata->u.mesh.pathtbl_resize_lock); - - return err; -} - -/** - * mpp_path_del - delete a mesh proxy path from the table - * - * @addr: addr address (ETH_ALEN length) - * @sdata: local subif - * - * Returns: 0 if successful - */ -static int mpp_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr) -{ - int err = 0; - - read_lock_bh(&sdata->u.mesh.pathtbl_resize_lock); - err = table_path_del(sdata->u.mesh.mpp_paths, sdata, addr); - sdata->u.mesh.mpp_paths_generation++; - read_unlock_bh(&sdata->u.mesh.pathtbl_resize_lock); - return err; } @@ -987,18 +754,17 @@ int mesh_path_send_to_gates(struct mesh_path *mpath) struct ieee80211_sub_if_data *sdata = mpath->sdata; struct mesh_table *tbl; struct mesh_path *from_mpath = mpath; - struct mesh_path *gate = NULL; + struct mesh_path *gate; bool copy = false; struct hlist_head *known_gates; - rcu_read_lock(); - tbl = rcu_dereference(sdata->u.mesh.mesh_paths); + tbl = sdata->u.mesh.mesh_paths; known_gates = tbl->known_gates; - rcu_read_unlock(); if (!known_gates) return -EHOSTUNREACH; + rcu_read_lock(); hlist_for_each_entry_rcu(gate, known_gates, gate_list) { if (gate->flags & MESH_PATH_ACTIVE) { mpath_dbg(sdata, "Forwarding to %pM\n", gate->dst); @@ -1016,6 +782,7 @@ int mesh_path_send_to_gates(struct mesh_path *mpath) mpath_dbg(sdata, "Sending to %pM\n", gate->dst); mesh_path_tx_pending(gate); } + rcu_read_unlock(); return (from_mpath == mpath) ? -EHOSTUNREACH : 0; } @@ -1072,118 +839,73 @@ void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop) mesh_path_tx_pending(mpath); } -static void mesh_path_node_free(struct hlist_node *p, bool free_leafs) -{ - struct mesh_path *mpath; - struct mpath_node *node = hlist_entry(p, struct mpath_node, list); - mpath = node->mpath; - hlist_del_rcu(p); - if (free_leafs) { - del_timer_sync(&mpath->timer); - kfree(mpath); - } - kfree(node); -} - -static int mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl) -{ - struct mesh_path *mpath; - struct mpath_node *node, *new_node; - u32 hash_idx; - - new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC); - if (new_node == NULL) - return -ENOMEM; - - node = hlist_entry(p, struct mpath_node, list); - mpath = node->mpath; - new_node->mpath = mpath; - hash_idx = mesh_table_hash(mpath->dst, newtbl); - hlist_add_head(&new_node->list, - &newtbl->hash_buckets[hash_idx]); - return 0; -} - int mesh_pathtbl_init(struct ieee80211_sub_if_data *sdata) { struct mesh_table *tbl_path, *tbl_mpp; int ret; - tbl_path = mesh_table_alloc(INIT_PATHS_SIZE_ORDER); + tbl_path = mesh_table_alloc(); if (!tbl_path) return -ENOMEM; - tbl_path->free_node = &mesh_path_node_free; - tbl_path->copy_node = &mesh_path_node_copy; - tbl_path->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC); - if (!tbl_path->known_gates) { - ret = -ENOMEM; - goto free_path; - } - INIT_HLIST_HEAD(tbl_path->known_gates); - - tbl_mpp = mesh_table_alloc(INIT_PATHS_SIZE_ORDER); + tbl_mpp = mesh_table_alloc(); if (!tbl_mpp) { ret = -ENOMEM; goto free_path; } - tbl_mpp->free_node = &mesh_path_node_free; - tbl_mpp->copy_node = &mesh_path_node_copy; - tbl_mpp->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC); - if (!tbl_mpp->known_gates) { - ret = -ENOMEM; - goto free_mpp; - } - INIT_HLIST_HEAD(tbl_mpp->known_gates); - rwlock_init(&sdata->u.mesh.pathtbl_resize_lock); + rhashtable_init(&tbl_path->rhead, &mesh_rht_params); + rhashtable_init(&tbl_mpp->rhead, &mesh_rht_params); - /* Need no locking since this is during init */ - RCU_INIT_POINTER(sdata->u.mesh.mesh_paths, tbl_path); - RCU_INIT_POINTER(sdata->u.mesh.mpp_paths, tbl_mpp); + sdata->u.mesh.mesh_paths = tbl_path; + sdata->u.mesh.mpp_paths = tbl_mpp; return 0; -free_mpp: - mesh_table_free(tbl_mpp, true); free_path: - mesh_table_free(tbl_path, true); + mesh_table_free(tbl_path); return ret; } +static +void mesh_path_tbl_expire(struct ieee80211_sub_if_data *sdata, + struct mesh_table *tbl) +{ + struct mesh_path *mpath; + struct rhashtable_iter iter; + int ret; + + ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_KERNEL); + if (ret) + return; + + ret = rhashtable_walk_start(&iter); + if (ret && ret != -EAGAIN) + goto out; + + while ((mpath = rhashtable_walk_next(&iter))) { + if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN) + continue; + if (IS_ERR(mpath)) + break; + if ((!(mpath->flags & MESH_PATH_RESOLVING)) && + (!(mpath->flags & MESH_PATH_FIXED)) && + time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE)) + __mesh_path_del(tbl, mpath); + } +out: + rhashtable_walk_stop(&iter); + rhashtable_walk_exit(&iter); +} + void mesh_path_expire(struct ieee80211_sub_if_data *sdata) { - struct mesh_table *tbl; - struct mesh_path *mpath; - struct mpath_node *node; - int i; - - rcu_read_lock(); - tbl = rcu_dereference(sdata->u.mesh.mesh_paths); - for_each_mesh_entry(tbl, node, i) { - mpath = node->mpath; - if ((!(mpath->flags & MESH_PATH_RESOLVING)) && - (!(mpath->flags & MESH_PATH_FIXED)) && - time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE)) - mesh_path_del(sdata, mpath->dst); - } - - tbl = rcu_dereference(sdata->u.mesh.mpp_paths); - for_each_mesh_entry(tbl, node, i) { - mpath = node->mpath; - if ((!(mpath->flags & MESH_PATH_FIXED)) && - time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE)) - mpp_path_del(sdata, mpath->dst); - } - - rcu_read_unlock(); + mesh_path_tbl_expire(sdata, sdata->u.mesh.mesh_paths); + mesh_path_tbl_expire(sdata, sdata->u.mesh.mpp_paths); } void mesh_pathtbl_unregister(struct ieee80211_sub_if_data *sdata) { - /* no need for locking during exit path */ - mesh_table_free(rcu_dereference_protected(sdata->u.mesh.mesh_paths, 1), - true); - mesh_table_free(rcu_dereference_protected(sdata->u.mesh.mpp_paths, 1), - true); + mesh_table_free(sdata->u.mesh.mesh_paths); + mesh_table_free(sdata->u.mesh.mpp_paths); } From f59374eb427fb1377fdb7b8b3691c48e0c77a3c4 Mon Sep 17 00:00:00 2001 From: Sara Sharon Date: Wed, 2 Mar 2016 23:46:14 +0200 Subject: [PATCH 0225/1649] mac80211: synchronize driver rx queues before removing a station Some devices, like iwlwifi, have RSS queues. This may cause a situation where a disassociation is handled in control path and results in station removal while there are prior RX frames that were still not processed in other queues. When they will be processed the station will be gone, and the frames will be dropped. Add a synchronization interface to avoid that. When driver returns from the synchronization mac80211 may remove the station. Signed-off-by: Sara Sharon Signed-off-by: Emmanuel Grumbach Signed-off-by: Johannes Berg --- include/net/mac80211.h | 5 +++++ net/mac80211/driver-ops.h | 15 +++++++++++++++ net/mac80211/sta_info.c | 9 ++++++++- net/mac80211/trace.h | 12 ++++++++++++ 4 files changed, 40 insertions(+), 1 deletion(-) diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 7cb791f21722..a53333cb1528 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -3354,6 +3354,10 @@ enum ieee80211_reconfig_type { * the function call. * * @wake_tx_queue: Called when new packets have been added to the queue. + * @sync_rx_queues: Process all pending frames in RSS queues. This is a + * synchronization which is needed in case driver has in its RSS queues + * pending frames that were received prior to the control path action + * currently taken (e.g. disassociation) but are not processed yet. */ struct ieee80211_ops { void (*tx)(struct ieee80211_hw *hw, @@ -3591,6 +3595,7 @@ struct ieee80211_ops { void (*wake_tx_queue)(struct ieee80211_hw *hw, struct ieee80211_txq *txq); + void (*sync_rx_queues)(struct ieee80211_hw *hw); }; /** diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h index 18b0d65baff0..184473c257eb 100644 --- a/net/mac80211/driver-ops.h +++ b/net/mac80211/driver-ops.h @@ -1,3 +1,8 @@ +/* +* Portions of this file +* Copyright(c) 2016 Intel Deutschland GmbH +*/ + #ifndef __MAC80211_DRIVER_OPS #define __MAC80211_DRIVER_OPS @@ -29,6 +34,16 @@ static inline void drv_tx(struct ieee80211_local *local, local->ops->tx(&local->hw, control, skb); } +static inline void drv_sync_rx_queues(struct ieee80211_local *local, + struct sta_info *sta) +{ + if (local->ops->sync_rx_queues) { + trace_drv_sync_rx_queues(local, sta->sdata, &sta->sta); + local->ops->sync_rx_queues(&local->hw); + trace_drv_return_void(local); + } +} + static inline void drv_get_et_strings(struct ieee80211_sub_if_data *sdata, u32 sset, u8 *data) { diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index d20bab5c146c..00c82fb152c0 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -2,7 +2,7 @@ * Copyright 2002-2005, Instant802 Networks, Inc. * Copyright 2006-2007 Jiri Benc * Copyright 2013-2014 Intel Mobile Communications GmbH - * Copyright (C) 2015 Intel Deutschland GmbH + * Copyright (C) 2015 - 2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -875,6 +875,13 @@ static int __must_check __sta_info_destroy_part1(struct sta_info *sta) set_sta_flag(sta, WLAN_STA_BLOCK_BA); ieee80211_sta_tear_down_BA_sessions(sta, AGG_STOP_DESTROY_STA); + /* + * Before removing the station from the driver there might be pending + * rx frames on RSS queues sent prior to the disassociation - wait for + * all such frames to be processed. + */ + drv_sync_rx_queues(local, sta); + ret = sta_info_hash_del(local, sta); if (WARN_ON(ret)) return ret; diff --git a/net/mac80211/trace.h b/net/mac80211/trace.h index 2b0a17ee907a..8c3b7ae103bc 100644 --- a/net/mac80211/trace.h +++ b/net/mac80211/trace.h @@ -1,3 +1,8 @@ +/* +* Portions of this file +* Copyright(c) 2016 Intel Deutschland GmbH +*/ + #if !defined(__MAC80211_DRIVER_TRACE) || defined(TRACE_HEADER_MULTI_READ) #define __MAC80211_DRIVER_TRACE @@ -899,6 +904,13 @@ DEFINE_EVENT(sta_event, drv_sta_pre_rcu_remove, TP_ARGS(local, sdata, sta) ); +DEFINE_EVENT(sta_event, drv_sync_rx_queues, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_sta *sta), + TP_ARGS(local, sdata, sta) +); + DEFINE_EVENT(sta_event, drv_sta_rate_tbl_update, TP_PROTO(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, From 38de03d2a28925b489c11546804e2f5418cc17a4 Mon Sep 17 00:00:00 2001 From: Arend van Spriel Date: Wed, 2 Mar 2016 20:37:18 +0100 Subject: [PATCH 0226/1649] nl80211: add feature for BSS selection support Introducing a new feature that the driver can use to indicate the driver/firmware supports configuration of BSS selection criteria upon CONNECT command. This can be useful when multiple BSS-es are found belonging to the same ESS, ie. Infra-BSS with same SSID. The criteria can then be used to offload selection of a preferred BSS. Reviewed-by: Hante Meuleman Reviewed-by: Franky (Zhenhui) Lin Reviewed-by: Pieter-Paul Giesberts Reviewed-by: Lei Zhang Signed-off-by: Arend van Spriel [move wiphy support check into parse_bss_select()] Signed-off-by: Johannes Berg --- include/net/cfg80211.h | 34 +++++++++++ include/uapi/linux/nl80211.h | 52 ++++++++++++++++ net/wireless/core.c | 7 +++ net/wireless/nl80211.c | 111 +++++++++++++++++++++++++++++++++++ 4 files changed, 204 insertions(+) diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 14c0c437d973..0bbfbf3cbca8 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -1858,6 +1858,33 @@ struct cfg80211_ibss_params { struct ieee80211_ht_cap ht_capa_mask; }; +/** + * struct cfg80211_bss_select_adjust - BSS selection with RSSI adjustment. + * + * @band: band of BSS which should match for RSSI level adjustment. + * @delta: value of RSSI level adjustment. + */ +struct cfg80211_bss_select_adjust { + enum ieee80211_band band; + s8 delta; +}; + +/** + * struct cfg80211_bss_selection - connection parameters for BSS selection. + * + * @behaviour: requested BSS selection behaviour. + * @param: parameters for requestion behaviour. + * @band_pref: preferred band for %NL80211_BSS_SELECT_ATTR_BAND_PREF. + * @adjust: parameters for %NL80211_BSS_SELECT_ATTR_RSSI_ADJUST. + */ +struct cfg80211_bss_selection { + enum nl80211_bss_select_attr behaviour; + union { + enum ieee80211_band band_pref; + struct cfg80211_bss_select_adjust adjust; + } param; +}; + /** * struct cfg80211_connect_params - Connection parameters * @@ -1895,6 +1922,7 @@ struct cfg80211_ibss_params { * @vht_capa_mask: The bits of vht_capa which are to be used. * @pbss: if set, connect to a PCP instead of AP. Valid for DMG * networks. + * @bss_select: criteria to be used for BSS selection. */ struct cfg80211_connect_params { struct ieee80211_channel *channel; @@ -1918,6 +1946,7 @@ struct cfg80211_connect_params { struct ieee80211_vht_cap vht_capa; struct ieee80211_vht_cap vht_capa_mask; bool pbss; + struct cfg80211_bss_selection bss_select; }; /** @@ -3186,6 +3215,9 @@ struct wiphy_vendor_command { * low rssi when a frame is heard on different channel, then it should set * this variable to the maximal offset for which it can compensate. * This value should be set in MHz. + * @bss_select_support: bitmask indicating the BSS selection criteria supported + * by the driver in the .connect() callback. The bit position maps to the + * attribute indices defined in &enum nl80211_bss_select_attr. */ struct wiphy { /* assign these fields before you register the wiphy */ @@ -3308,6 +3340,8 @@ struct wiphy { u8 max_num_csa_counters; u8 max_adj_channel_rssi_comp; + u32 bss_select_support; + char priv[0] __aligned(NETDEV_ALIGN); }; diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index 23bf0667540c..b2a8d8c84a57 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -1797,6 +1797,12 @@ enum nl80211_commands { * in a PBSS. Specified in %NL80211_CMD_CONNECT to request * connecting to a PCP, and in %NL80211_CMD_START_AP to start * a PCP instead of AP. Relevant for DMG networks only. + * @NL80211_ATTR_BSS_SELECT: nested attribute for driver supporting the + * BSS selection feature. When used with %NL80211_CMD_GET_WIPHY it contains + * attributes according &enum nl80211_bss_select_attr to indicate what + * BSS selection behaviours are supported. When used with %NL80211_CMD_CONNECT + * it contains the behaviour-specific attribute containing the parameters for + * BSS selection to be done by driver and/or firmware. * * @NUM_NL80211_ATTR: total number of nl80211_attrs available * @NL80211_ATTR_MAX: highest attribute number currently defined @@ -2174,6 +2180,8 @@ enum nl80211_attrs { NL80211_ATTR_PBSS, + NL80211_ATTR_BSS_SELECT, + /* add attributes here, update the policy in nl80211.c */ __NL80211_ATTR_AFTER_LAST, @@ -4667,4 +4675,48 @@ enum nl80211_sched_scan_plan { __NL80211_SCHED_SCAN_PLAN_AFTER_LAST - 1 }; +/** + * struct nl80211_bss_select_rssi_adjust - RSSI adjustment parameters. + * + * @band: band of BSS that must match for RSSI value adjustment. + * @delta: value used to adjust the RSSI value of matching BSS. + */ +struct nl80211_bss_select_rssi_adjust { + __u8 band; + __s8 delta; +} __attribute__((packed)); + +/** + * enum nl80211_bss_select_attr - attributes for bss selection. + * + * @__NL80211_BSS_SELECT_ATTR_INVALID: reserved. + * @NL80211_BSS_SELECT_ATTR_RSSI: Flag indicating only RSSI-based BSS selection + * is requested. + * @NL80211_BSS_SELECT_ATTR_BAND_PREF: attribute indicating BSS + * selection should be done such that the specified band is preferred. + * When there are multiple BSS-es in the preferred band, the driver + * shall use RSSI-based BSS selection as a second step. The value of + * this attribute is according to &enum nl80211_band (u32). + * @NL80211_BSS_SELECT_ATTR_RSSI_ADJUST: When present the RSSI level for + * BSS-es in the specified band is to be adjusted before doing + * RSSI-based BSS selection. The attribute value is a packed structure + * value as specified by &struct nl80211_bss_select_rssi_adjust. + * @NL80211_BSS_SELECT_ATTR_MAX: highest bss select attribute number. + * @__NL80211_BSS_SELECT_ATTR_AFTER_LAST: internal use. + * + * One and only one of these attributes are found within %NL80211_ATTR_BSS_SELECT + * for %NL80211_CMD_CONNECT. It specifies the required BSS selection behaviour + * which the driver shall use. + */ +enum nl80211_bss_select_attr { + __NL80211_BSS_SELECT_ATTR_INVALID, + NL80211_BSS_SELECT_ATTR_RSSI, + NL80211_BSS_SELECT_ATTR_BAND_PREF, + NL80211_BSS_SELECT_ATTR_RSSI_ADJUST, + + /* keep last */ + __NL80211_BSS_SELECT_ATTR_AFTER_LAST, + NL80211_BSS_SELECT_ATTR_MAX = __NL80211_BSS_SELECT_ATTR_AFTER_LAST - 1 +}; + #endif /* __LINUX_NL80211_H */ diff --git a/net/wireless/core.c b/net/wireless/core.c index 9f1c4aa851ef..5327e4b974fa 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c @@ -626,6 +626,13 @@ int wiphy_register(struct wiphy *wiphy) !rdev->ops->set_mac_acl))) return -EINVAL; + /* assure only valid behaviours are flagged by driver + * hence subtract 2 as bit 0 is invalid. + */ + if (WARN_ON(wiphy->bss_select_support && + (wiphy->bss_select_support & ~(BIT(__NL80211_BSS_SELECT_ATTR_AFTER_LAST) - 2)))) + return -EINVAL; + if (wiphy->addresses) memcpy(wiphy->perm_addr, wiphy->addresses[0].addr, ETH_ALEN); diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 1b43f7839eeb..d6c6449c0389 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -402,6 +402,7 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = { [NL80211_ATTR_SCHED_SCAN_DELAY] = { .type = NLA_U32 }, [NL80211_ATTR_REG_INDOOR] = { .type = NLA_FLAG }, [NL80211_ATTR_PBSS] = { .type = NLA_FLAG }, + [NL80211_ATTR_BSS_SELECT] = { .type = NLA_NESTED }, }; /* policy for the key attributes */ @@ -486,6 +487,15 @@ nl80211_plan_policy[NL80211_SCHED_SCAN_PLAN_MAX + 1] = { [NL80211_SCHED_SCAN_PLAN_ITERATIONS] = { .type = NLA_U32 }, }; +static const struct nla_policy +nl80211_bss_select_policy[NL80211_BSS_SELECT_ATTR_MAX + 1] = { + [NL80211_BSS_SELECT_ATTR_RSSI] = { .type = NLA_FLAG }, + [NL80211_BSS_SELECT_ATTR_BAND_PREF] = { .type = NLA_U32 }, + [NL80211_BSS_SELECT_ATTR_RSSI_ADJUST] = { + .len = sizeof(struct nl80211_bss_select_rssi_adjust) + }, +}; + static int nl80211_prepare_wdev_dump(struct sk_buff *skb, struct netlink_callback *cb, struct cfg80211_registered_device **rdev, @@ -1731,6 +1741,25 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev, rdev->wiphy.ext_features)) goto nla_put_failure; + if (rdev->wiphy.bss_select_support) { + struct nlattr *nested; + u32 bss_select_support = rdev->wiphy.bss_select_support; + + nested = nla_nest_start(msg, NL80211_ATTR_BSS_SELECT); + if (!nested) + goto nla_put_failure; + + i = 0; + while (bss_select_support) { + if ((bss_select_support & 1) && + nla_put_flag(msg, i)) + goto nla_put_failure; + i++; + bss_select_support >>= 1; + } + nla_nest_end(msg, nested); + } + /* done */ state->split_start = 0; break; @@ -5758,6 +5787,73 @@ static int validate_scan_freqs(struct nlattr *freqs) return n_channels; } +static bool is_band_valid(struct wiphy *wiphy, enum ieee80211_band b) +{ + return b < IEEE80211_NUM_BANDS && wiphy->bands[b]; +} + +static int parse_bss_select(struct nlattr *nla, struct wiphy *wiphy, + struct cfg80211_bss_selection *bss_select) +{ + struct nlattr *attr[NL80211_BSS_SELECT_ATTR_MAX + 1]; + struct nlattr *nest; + int err; + bool found = false; + int i; + + /* only process one nested attribute */ + nest = nla_data(nla); + if (!nla_ok(nest, nla_len(nest))) + return -EINVAL; + + err = nla_parse(attr, NL80211_BSS_SELECT_ATTR_MAX, nla_data(nest), + nla_len(nest), nl80211_bss_select_policy); + if (err) + return err; + + /* only one attribute may be given */ + for (i = 0; i <= NL80211_BSS_SELECT_ATTR_MAX; i++) { + if (attr[i]) { + if (found) + return -EINVAL; + found = true; + } + } + + bss_select->behaviour = __NL80211_BSS_SELECT_ATTR_INVALID; + + if (attr[NL80211_BSS_SELECT_ATTR_RSSI]) + bss_select->behaviour = NL80211_BSS_SELECT_ATTR_RSSI; + + if (attr[NL80211_BSS_SELECT_ATTR_BAND_PREF]) { + bss_select->behaviour = NL80211_BSS_SELECT_ATTR_BAND_PREF; + bss_select->param.band_pref = + nla_get_u32(attr[NL80211_BSS_SELECT_ATTR_BAND_PREF]); + if (!is_band_valid(wiphy, bss_select->param.band_pref)) + return -EINVAL; + } + + if (attr[NL80211_BSS_SELECT_ATTR_RSSI_ADJUST]) { + struct nl80211_bss_select_rssi_adjust *adj_param; + + adj_param = nla_data(attr[NL80211_BSS_SELECT_ATTR_RSSI_ADJUST]); + bss_select->behaviour = NL80211_BSS_SELECT_ATTR_RSSI_ADJUST; + bss_select->param.adjust.band = adj_param->band; + bss_select->param.adjust.delta = adj_param->delta; + if (!is_band_valid(wiphy, bss_select->param.adjust.band)) + return -EINVAL; + } + + /* user-space did not provide behaviour attribute */ + if (bss_select->behaviour == __NL80211_BSS_SELECT_ATTR_INVALID) + return -EINVAL; + + if (!(wiphy->bss_select_support & BIT(bss_select->behaviour))) + return -EINVAL; + + return 0; +} + static int nl80211_parse_random_mac(struct nlattr **attrs, u8 *mac_addr, u8 *mac_addr_mask) { @@ -8001,6 +8097,21 @@ static int nl80211_connect(struct sk_buff *skb, struct genl_info *info) return -EOPNOTSUPP; } + if (info->attrs[NL80211_ATTR_BSS_SELECT]) { + /* bss selection makes no sense if bssid is set */ + if (connect.bssid) { + kzfree(connkeys); + return -EINVAL; + } + + err = parse_bss_select(info->attrs[NL80211_ATTR_BSS_SELECT], + wiphy, &connect.bss_select); + if (err) { + kzfree(connkeys); + return err; + } + } + wdev_lock(dev->ieee80211_ptr); err = cfg80211_connect(rdev, dev, &connect, connkeys, NULL); wdev_unlock(dev->ieee80211_ptr); From f66b60f6524c970d43af7a68dd50dcce289887e7 Mon Sep 17 00:00:00 2001 From: Sven Eckelmann Date: Wed, 24 Feb 2016 16:25:49 +0100 Subject: [PATCH 0227/1649] mac80211: fix parsing of 40Mhz in injected radiotap header The MCS bandwidth part of the radiotap header is 2 bits wide. The full 2 bit have to compared against IEEE80211_RADIOTAP_MCS_BW_40 and not only if the first bit is set. Otherwise IEEE80211_RADIOTAP_MCS_BW_40 can be confused with IEEE80211_RADIOTAP_MCS_BW_20U. Fixes: dfdfc2beb0dd ("mac80211: Parse legacy and HT rate in injected frames") Signed-off-by: Sven Eckelmann Signed-off-by: Johannes Berg --- net/mac80211/tx.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index b3196b1e15c2..51e225e4b450 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -1691,7 +1691,7 @@ static bool ieee80211_parse_tx_radiotap(struct ieee80211_local *local, bool rate_found = false; u8 rate_retries = 0; u16 rate_flags = 0; - u8 mcs_known, mcs_flags; + u8 mcs_known, mcs_flags, mcs_bw; u16 vht_known; u8 vht_mcs = 0, vht_nss = 0; int i; @@ -1769,8 +1769,9 @@ static bool ieee80211_parse_tx_radiotap(struct ieee80211_local *local, mcs_flags & IEEE80211_RADIOTAP_MCS_SGI) rate_flags |= IEEE80211_TX_RC_SHORT_GI; + mcs_bw = mcs_flags & IEEE80211_RADIOTAP_MCS_BW_MASK; if (mcs_known & IEEE80211_RADIOTAP_MCS_HAVE_BW && - mcs_flags & IEEE80211_RADIOTAP_MCS_BW_40) + mcs_bw == IEEE80211_RADIOTAP_MCS_BW_40) rate_flags |= IEEE80211_TX_RC_40_MHZ_WIDTH; break; From 07310a63147164eaf44a68932fbe9dbbde0fa82b Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Wed, 2 Mar 2016 15:54:51 +0100 Subject: [PATCH 0228/1649] mac80211: do not pass injected frames without a valid rate to the driver Fall back to rate control if the requested bitrate was not found. Fixes: dfdfc2beb0dd ("mac80211: Parse legacy and HT rate in injected frames") Signed-off-by: Felix Fietkau Signed-off-by: Johannes Berg --- net/mac80211/tx.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 51e225e4b450..597c8fe672a3 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -1839,6 +1839,9 @@ static bool ieee80211_parse_tx_radiotap(struct ieee80211_local *local, } } + if (info->control.rates[0].idx < 0) + info->control.flags &= ~IEEE80211_TX_CTRL_RATE_INJECT; + info->control.rates[0].flags = rate_flags; info->control.rates[0].count = min_t(u8, rate_retries + 1, local->hw.max_rate_tries); From a619afe814453300684f1d5a6478d67f791bc723 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Mon, 7 Mar 2016 09:30:03 -0800 Subject: [PATCH 0229/1649] i40e/i40evf: Add support for bulk free in Tx cleanup This patch enables bulk Tx clean for skbs. In order to enable it we need to pass the napi_budget value as that is used to determine if we are truly running in NAPI mode or if we are simply calling the routine from netpoll with a budget of 0. In order to avoid adding too many more variables I thought it best to pass the VSI directly in a fashion similar to what we do on igb and ixgbe with the q_vector. Signed-off-by: Alexander Duyck Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_txrx.c | 20 ++++++++++--------- drivers/net/ethernet/intel/i40evf/i40e_txrx.c | 20 ++++++++++--------- 2 files changed, 22 insertions(+), 18 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 8fb2a966d70e..01cff073f8db 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -636,19 +636,21 @@ u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw) /** * i40e_clean_tx_irq - Reclaim resources after transmit completes - * @tx_ring: tx ring to clean - * @budget: how many cleans we're allowed + * @vsi: the VSI we care about + * @tx_ring: Tx ring to clean + * @napi_budget: Used to determine if we are in netpoll * * Returns true if there's any budget left (e.g. the clean is finished) **/ -static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) +static bool i40e_clean_tx_irq(struct i40e_vsi *vsi, + struct i40e_ring *tx_ring, int napi_budget) { u16 i = tx_ring->next_to_clean; struct i40e_tx_buffer *tx_buf; struct i40e_tx_desc *tx_head; struct i40e_tx_desc *tx_desc; - unsigned int total_packets = 0; - unsigned int total_bytes = 0; + unsigned int total_bytes = 0, total_packets = 0; + unsigned int budget = vsi->work_limit; tx_buf = &tx_ring->tx_bi[i]; tx_desc = I40E_TX_DESC(tx_ring, i); @@ -678,7 +680,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) total_packets += tx_buf->gso_segs; /* free the skb */ - dev_consume_skb_any(tx_buf->skb); + napi_consume_skb(tx_buf->skb, napi_budget); /* unmap skb header data */ dma_unmap_single(tx_ring->dev, @@ -749,7 +751,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) if (budget && ((j / (WB_STRIDE + 1)) == 0) && (j != 0) && - !test_bit(__I40E_DOWN, &tx_ring->vsi->state) && + !test_bit(__I40E_DOWN, &vsi->state) && (I40E_DESC_UNUSED(tx_ring) != tx_ring->count)) tx_ring->arm_wb = true; } @@ -767,7 +769,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) smp_mb(); if (__netif_subqueue_stopped(tx_ring->netdev, tx_ring->queue_index) && - !test_bit(__I40E_DOWN, &tx_ring->vsi->state)) { + !test_bit(__I40E_DOWN, &vsi->state)) { netif_wake_subqueue(tx_ring->netdev, tx_ring->queue_index); ++tx_ring->tx_stats.restart_queue; @@ -1975,7 +1977,7 @@ int i40e_napi_poll(struct napi_struct *napi, int budget) * budget and be more aggressive about cleaning up the Tx descriptors. */ i40e_for_each_ring(ring, q_vector->tx) { - if (!i40e_clean_tx_irq(ring, vsi->work_limit)) { + if (!i40e_clean_tx_irq(vsi, ring, budget)) { clean_complete = false; continue; } diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index 839a6df62f72..9e911363c11b 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -155,19 +155,21 @@ u32 i40evf_get_tx_pending(struct i40e_ring *ring, bool in_sw) /** * i40e_clean_tx_irq - Reclaim resources after transmit completes - * @tx_ring: tx ring to clean - * @budget: how many cleans we're allowed + * @vsi: the VSI we care about + * @tx_ring: Tx ring to clean + * @napi_budget: Used to determine if we are in netpoll * * Returns true if there's any budget left (e.g. the clean is finished) **/ -static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) +static bool i40e_clean_tx_irq(struct i40e_vsi *vsi, + struct i40e_ring *tx_ring, int napi_budget) { u16 i = tx_ring->next_to_clean; struct i40e_tx_buffer *tx_buf; struct i40e_tx_desc *tx_head; struct i40e_tx_desc *tx_desc; - unsigned int total_packets = 0; - unsigned int total_bytes = 0; + unsigned int total_bytes = 0, total_packets = 0; + unsigned int budget = vsi->work_limit; tx_buf = &tx_ring->tx_bi[i]; tx_desc = I40E_TX_DESC(tx_ring, i); @@ -197,7 +199,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) total_packets += tx_buf->gso_segs; /* free the skb */ - dev_kfree_skb_any(tx_buf->skb); + napi_consume_skb(tx_buf->skb, napi_budget); /* unmap skb header data */ dma_unmap_single(tx_ring->dev, @@ -267,7 +269,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) if (budget && ((j / (WB_STRIDE + 1)) == 0) && (j > 0) && - !test_bit(__I40E_DOWN, &tx_ring->vsi->state) && + !test_bit(__I40E_DOWN, &vsi->state) && (I40E_DESC_UNUSED(tx_ring) != tx_ring->count)) tx_ring->arm_wb = true; } @@ -285,7 +287,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) smp_mb(); if (__netif_subqueue_stopped(tx_ring->netdev, tx_ring->queue_index) && - !test_bit(__I40E_DOWN, &tx_ring->vsi->state)) { + !test_bit(__I40E_DOWN, &vsi->state)) { netif_wake_subqueue(tx_ring->netdev, tx_ring->queue_index); ++tx_ring->tx_stats.restart_queue; @@ -1411,7 +1413,7 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget) * budget and be more aggressive about cleaning up the Tx descriptors. */ i40e_for_each_ring(ring, q_vector->tx) { - if (!i40e_clean_tx_irq(ring, vsi->work_limit)) { + if (!i40e_clean_tx_irq(vsi, ring, budget)) { clean_complete = false; continue; } From 4ea623922d1d73c162da53e02cce1d0d3fd55893 Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Thu, 10 Mar 2016 14:59:39 -0800 Subject: [PATCH 0230/1649] i40e/i40evf: Fix casting in transmit code Simple cast to fix a sparse warning. Fixes: commit 5453205cd097 ("i40e/i40evf: Enable support for SKB_GSO_UDP_TUNNEL_CSUM") Signed-off-by: Jesse Brandeburg Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_txrx.c | 5 +++-- drivers/net/ethernet/intel/i40evf/i40e_txrx.c | 5 +++-- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 01cff073f8db..5bef5b0f00d9 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -2305,7 +2305,8 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, /* remove payload length from outer checksum */ paylen = (__force u16)l4.udp->check; - paylen += ntohs(1) * (u16)~(skb->len - l4_offset); + paylen += ntohs((__force __be16)1) * + (u16)~(skb->len - l4_offset); l4.udp->check = ~csum_fold((__force __wsum)paylen); } @@ -2327,7 +2328,7 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, /* remove payload length from inner checksum */ paylen = (__force u16)l4.tcp->check; - paylen += ntohs(1) * (u16)~(skb->len - l4_offset); + paylen += ntohs((__force __be16)1) * (u16)~(skb->len - l4_offset); l4.tcp->check = ~csum_fold((__force __wsum)paylen); /* compute length of segmentation header */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index 9e911363c11b..570348d93e5d 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -1572,7 +1572,8 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, /* remove payload length from outer checksum */ paylen = (__force u16)l4.udp->check; - paylen += ntohs(1) * (u16)~(skb->len - l4_offset); + paylen += ntohs((__force __be16)1) * + (u16)~(skb->len - l4_offset); l4.udp->check = ~csum_fold((__force __wsum)paylen); } @@ -1594,7 +1595,7 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, /* remove payload length from inner checksum */ paylen = (__force u16)l4.tcp->check; - paylen += ntohs(1) * (u16)~(skb->len - l4_offset); + paylen += ntohs((__force __be16)1) * (u16)~(skb->len - l4_offset); l4.tcp->check = ~csum_fold((__force __wsum)paylen); /* compute length of segmentation header */ From b7c359376429953dc1672224dbc9845eadf2a29c Mon Sep 17 00:00:00 2001 From: Catherine Sullivan Date: Thu, 10 Mar 2016 14:59:40 -0800 Subject: [PATCH 0231/1649] i40e/i40evf: Remove I40E_MAX_USER_PRIORITY define This patch removes the duplicate definition of I40E_MAX_USER_PRIORITY in i40e.h that is not needed. Signed-off-by: Catherine Sullivan Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e.h | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index f208570cfdbf..d25b3be5ba89 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -244,7 +244,6 @@ struct i40e_fdir_filter { #define I40E_DCB_PRIO_TYPE_STRICT 0 #define I40E_DCB_PRIO_TYPE_ETS 1 #define I40E_DCB_STRICT_PRIO_CREDITS 127 -#define I40E_MAX_USER_PRIORITY 8 /* DCB per TC information data structure */ struct i40e_tc_info { u16 qoffset; /* Queue offset from base queue */ From 06171e9c0bd54bd7dde67dfb2fa4cced23cff880 Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Thu, 3 Mar 2016 23:25:42 +0100 Subject: [PATCH 0232/1649] mac80211: minstrel_ht: improve sample rate skip logic There were a few issues that were slowing down the process of finding the optimal rate, especially on devices with multi-rate retry limitations: When max_tp_rate[0] was slower than max_tp_rate[1], the code did not sample max_tp_rate[1], which would often allow it to switch places with max_tp_rate[0] (e.g. if only the first sampling attempts were bad, but the rate is otherwise good). Also, sample attempts of rates between max_tp_rate[0] and [1] were being ignored in this case, because the code only checked if the rate was slower than [1]. Fix this by checking against the fastest / second fastest max_tp_rate instead of assuming a specific order between the two. In my tests this patch significantly reduces the time until minstrel_ht finds the optimal rate right after assoc Signed-off-by: Felix Fietkau Signed-off-by: Johannes Berg --- net/mac80211/rc80211_minstrel_ht.c | 23 ++++++++++++++++------- 1 file changed, 16 insertions(+), 7 deletions(-) diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c index 370d677b547b..46ce08ed70b5 100644 --- a/net/mac80211/rc80211_minstrel_ht.c +++ b/net/mac80211/rc80211_minstrel_ht.c @@ -924,6 +924,7 @@ minstrel_get_sample_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi) struct minstrel_rate_stats *mrs; struct minstrel_mcs_group_data *mg; unsigned int sample_dur, sample_group, cur_max_tp_streams; + int tp_rate1, tp_rate2; int sample_idx = 0; if (mi->sample_wait > 0) { @@ -945,14 +946,22 @@ minstrel_get_sample_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi) mrs = &mg->rates[sample_idx]; sample_idx += sample_group * MCS_GROUP_RATES; + /* Set tp_rate1, tp_rate2 to the highest / second highest max_tp_rate */ + if (minstrel_get_duration(mi->max_tp_rate[0]) > + minstrel_get_duration(mi->max_tp_rate[1])) { + tp_rate1 = mi->max_tp_rate[1]; + tp_rate2 = mi->max_tp_rate[0]; + } else { + tp_rate1 = mi->max_tp_rate[0]; + tp_rate2 = mi->max_tp_rate[1]; + } + /* * Sampling might add some overhead (RTS, no aggregation) - * to the frame. Hence, don't use sampling for the currently - * used rates. + * to the frame. Hence, don't use sampling for the highest currently + * used highest throughput or probability rate. */ - if (sample_idx == mi->max_tp_rate[0] || - sample_idx == mi->max_tp_rate[1] || - sample_idx == mi->max_prob_rate) + if (sample_idx == mi->max_tp_rate[0] || sample_idx == mi->max_prob_rate) return -1; /* @@ -967,10 +976,10 @@ minstrel_get_sample_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi) * if the link is working perfectly. */ - cur_max_tp_streams = minstrel_mcs_groups[mi->max_tp_rate[0] / + cur_max_tp_streams = minstrel_mcs_groups[tp_rate1 / MCS_GROUP_RATES].streams; sample_dur = minstrel_get_duration(sample_idx); - if (sample_dur >= minstrel_get_duration(mi->max_tp_rate[1]) && + if (sample_dur >= minstrel_get_duration(tp_rate2) && (cur_max_tp_streams - 1 < minstrel_mcs_groups[sample_group].streams || sample_dur >= minstrel_get_duration(mi->max_prob_rate))) { From 2aa4d45635ad09fbd7ff6b6155d2d50b2b31cf90 Mon Sep 17 00:00:00 2001 From: Akira Moroo Date: Tue, 8 Mar 2016 23:17:42 +0900 Subject: [PATCH 0233/1649] cfg80211: fix kernel-doc struct name This patch fix a structure name mismatch in cfg80211.h. Signed-off-by: Moroo Akira Reviewed-by: Julian Calaby Signed-off-by: Johannes Berg --- include/net/cfg80211.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 0bbfbf3cbca8..4ece4f961f40 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -1619,7 +1619,7 @@ struct cfg80211_inform_bss { }; /** - * struct cfg80211_bss_ie_data - BSS entry IE data + * struct cfg80211_bss_ies - BSS entry IE data * @tsf: TSF contained in the frame that carried these IEs * @rcu_head: internal use, for freeing * @len: length of the IEs From f278ce4ffaaa2ee3adb957add3df7b41e6ecc1b3 Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Wed, 9 Mar 2016 10:08:27 +0200 Subject: [PATCH 0234/1649] mac80211: Set global RRM capability Allow publishing RRM capabilities for features that are not HW dependent. Signed-off-by: Emmanuel Grumbach Signed-off-by: Johannes Berg --- net/mac80211/main.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/net/mac80211/main.c b/net/mac80211/main.c index 8190bf27ebff..815596140057 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c @@ -558,6 +558,8 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len, if (!ops->set_key) wiphy->flags |= WIPHY_FLAG_IBSS_RSN; + wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_RRM); + wiphy->bss_priv_size = sizeof(struct ieee80211_bss); local = wiphy_priv(wiphy); From e03521232d2f808b1b593f44565665efb7b242b1 Mon Sep 17 00:00:00 2001 From: Sara Sharon Date: Wed, 9 Mar 2016 13:27:08 +0200 Subject: [PATCH 0235/1649] mac80211: add NETIF_F_RXCSUM to features white list NETIF_F_RXCSUM is not in the white list, though some drivers may want to set it in order to enable seeing the actual RX checksum status in ethtool. Signed-off-by: Sara Sharon Signed-off-by: Emmanuel Grumbach Signed-off-by: Johannes Berg --- net/mac80211/main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/mac80211/main.c b/net/mac80211/main.c index 815596140057..33c80de61eca 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c @@ -856,7 +856,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) /* Only HW csum features are currently compatible with mac80211 */ feature_whitelist = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM | NETIF_F_SG | NETIF_F_HIGHDMA | - NETIF_F_GSO_SOFTWARE; + NETIF_F_GSO_SOFTWARE | NETIF_F_RXCSUM; if (WARN_ON(hw->netdev_features & ~feature_whitelist)) return -EINVAL; From 1e0bbebaae660f27c24cbd9c3e693420234115ff Mon Sep 17 00:00:00 2001 From: Sara Sharon Date: Wed, 9 Mar 2016 13:27:09 +0200 Subject: [PATCH 0236/1649] mac80211: enable starting BA session with custom timeout Currently the debugfs entry for starting aggregation session starts it with timeout of 5 seconds. Allow opening a session with a custom timeout (according to spec 0 is no timeout). while at it, refactor the function and remove the magic numbers. Signed-off-by: Sara Sharon Signed-off-by: Emmanuel Grumbach Signed-off-by: Johannes Berg --- net/mac80211/debugfs_sta.c | 53 ++++++++++++++++++++++++-------------- 1 file changed, 33 insertions(+), 20 deletions(-) diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c index a39512f09f9e..fbbd66c9ca51 100644 --- a/net/mac80211/debugfs_sta.c +++ b/net/mac80211/debugfs_sta.c @@ -3,6 +3,7 @@ * Copyright (c) 2006 Jiri Benc * Copyright 2007 Johannes Berg * Copyright 2013-2014 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -151,11 +152,12 @@ static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf, static ssize_t sta_agg_status_write(struct file *file, const char __user *userbuf, size_t count, loff_t *ppos) { - char _buf[12] = {}, *buf = _buf; + char _buf[25] = {}, *buf = _buf; struct sta_info *sta = file->private_data; bool start, tx; unsigned long tid; - int ret; + char *pos; + int ret, timeout = 5000; if (count > sizeof(_buf)) return -EINVAL; @@ -164,37 +166,48 @@ static ssize_t sta_agg_status_write(struct file *file, const char __user *userbu return -EFAULT; buf[sizeof(_buf) - 1] = '\0'; - - if (strncmp(buf, "tx ", 3) == 0) { - buf += 3; - tx = true; - } else if (strncmp(buf, "rx ", 3) == 0) { - buf += 3; - tx = false; - } else + pos = buf; + buf = strsep(&pos, " "); + if (!buf) return -EINVAL; - if (strncmp(buf, "start ", 6) == 0) { - buf += 6; + if (!strcmp(buf, "tx")) + tx = true; + else if (!strcmp(buf, "rx")) + tx = false; + else + return -EINVAL; + + buf = strsep(&pos, " "); + if (!buf) + return -EINVAL; + if (!strcmp(buf, "start")) { start = true; if (!tx) return -EINVAL; - } else if (strncmp(buf, "stop ", 5) == 0) { - buf += 5; + } else if (!strcmp(buf, "stop")) { start = false; - } else + } else { return -EINVAL; + } + + buf = strsep(&pos, " "); + if (!buf) + return -EINVAL; + if (sscanf(buf, "timeout=%d", &timeout) == 1) { + buf = strsep(&pos, " "); + if (!buf || !tx || !start) + return -EINVAL; + } ret = kstrtoul(buf, 0, &tid); - if (ret) - return ret; - - if (tid >= IEEE80211_NUM_TIDS) + if (ret || tid >= IEEE80211_NUM_TIDS) return -EINVAL; if (tx) { if (start) - ret = ieee80211_start_tx_ba_session(&sta->sta, tid, 5000); + ret = ieee80211_start_tx_ba_session(&sta->sta, tid, + timeout); else ret = ieee80211_stop_tx_ba_session(&sta->sta, tid); } else { From b6caccaccf749dddd296f3056111d6c4b94500c1 Mon Sep 17 00:00:00 2001 From: Kevin Scott Date: Thu, 10 Mar 2016 14:59:41 -0800 Subject: [PATCH 0237/1649] i40e: Save off VSI resource count when updating VSI When updating a VSI, save off the number of allocated and unallocated VSIs as we do when adding a VSI. Signed-off-by: Kevin Scott Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_common.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index 4596294c2ab1..b0fd6844bcd7 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -2157,6 +2157,9 @@ i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw, struct i40e_aq_desc desc; struct i40e_aqc_add_get_update_vsi *cmd = (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw; + struct i40e_aqc_add_get_update_vsi_completion *resp = + (struct i40e_aqc_add_get_update_vsi_completion *) + &desc.params.raw; i40e_status status; i40e_fill_default_direct_cmd_desc(&desc, @@ -2168,6 +2171,9 @@ i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw, status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info, sizeof(vsi_ctx->info), cmd_details); + vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used); + vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free); + return status; } From 618290262e960469758d4ab67457fcb2ea356d51 Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Thu, 10 Mar 2016 14:59:42 -0800 Subject: [PATCH 0238/1649] i40e: Fix up return code The i40e_common.c typically uses i40e_status as a return code, but got missed this one case. Signed-off-by: Jesse Brandeburg Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_common.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index b0fd6844bcd7..8276a1393e6d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -1901,13 +1901,13 @@ i40e_status i40e_aq_set_phy_int_mask(struct i40e_hw *hw, * * Reset the external PHY. **/ -enum i40e_status_code i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags, - struct i40e_asq_cmd_details *cmd_details) +i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags, + struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_set_phy_debug *cmd = (struct i40e_aqc_set_phy_debug *)&desc.params.raw; - enum i40e_status_code status; + i40e_status status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_phy_debug); From 692783980ad6163e8586baa36c066cd0d22fc7ce Mon Sep 17 00:00:00 2001 From: Shannon Nelson Date: Thu, 10 Mar 2016 14:59:43 -0800 Subject: [PATCH 0239/1649] i40e: Remove MSIx only if created When cleaning up the interrupt handling, clean up the IRQs only if we actually got them set up. There are a couple of error recovery paths that were violating this and causing the kernel a bit of indigestion. Signed-off-by: Shannon Nelson Reviewed-by: Williams, Mitch A Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 650336e50255..2464dca88f79 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -4164,7 +4164,7 @@ static void i40e_clear_interrupt_scheme(struct i40e_pf *pf) int i; i40e_stop_misc_vector(pf); - if (pf->flags & I40E_FLAG_MSIX_ENABLED) { + if (pf->flags & I40E_FLAG_MSIX_ENABLED && pf->msix_entries) { synchronize_irq(pf->msix_entries[0].vector); free_irq(pf->msix_entries[0].vector, pf); } From 96f321c9d42a61aa1e2760a47a574f286b028be2 Mon Sep 17 00:00:00 2001 From: Mohammed Shafi Shajakhan Date: Sat, 19 Mar 2016 19:59:43 +0530 Subject: [PATCH 0240/1649] mac80211: Remove unused variable in per STA debugfs struct Remove unused variable in per STA debugfs structure, 'commit 34e895075e21 ("mac80211: allow station add/remove to sleep")' removed the only user of 'add_has_run'. Signed-off-by: Mohammed Shafi Shajakhan Signed-off-by: Johannes Berg --- net/mac80211/debugfs_sta.c | 2 -- net/mac80211/sta_info.h | 1 - 2 files changed, 3 deletions(-) diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c index fbbd66c9ca51..051b22505720 100644 --- a/net/mac80211/debugfs_sta.c +++ b/net/mac80211/debugfs_sta.c @@ -352,8 +352,6 @@ void ieee80211_sta_debugfs_add(struct sta_info *sta) struct dentry *stations_dir = sta->sdata->debugfs.subdir_stations; u8 mac[3*ETH_ALEN]; - sta->debugfs.add_has_run = true; - if (!stations_dir) return; diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h index 053f5c4fa495..276056e99862 100644 --- a/net/mac80211/sta_info.h +++ b/net/mac80211/sta_info.h @@ -488,7 +488,6 @@ struct sta_info { #ifdef CONFIG_MAC80211_DEBUGFS struct sta_info_debugfsdentries { struct dentry *dir; - bool add_has_run; } debugfs; #endif From fc4a25c5b741ecb4ef4d0f1802775e8a88d7e0a7 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Tue, 5 Apr 2016 11:59:05 +0200 Subject: [PATCH 0241/1649] mac80211: remove sta_info debugfs sub-struct Since the previous patch, the struct only has a single member, so remove the struct and leave just the single member. Signed-off-by: Johannes Berg --- net/mac80211/debugfs_sta.c | 22 +++++++++++----------- net/mac80211/rate.h | 4 ++-- net/mac80211/sta_info.h | 6 ++---- 3 files changed, 15 insertions(+), 17 deletions(-) diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c index 051b22505720..af034912abbe 100644 --- a/net/mac80211/debugfs_sta.c +++ b/net/mac80211/debugfs_sta.c @@ -335,14 +335,14 @@ STA_OPS(vht_capa); #define DEBUGFS_ADD(name) \ debugfs_create_file(#name, 0400, \ - sta->debugfs.dir, sta, &sta_ ##name## _ops); + sta->debugfs_dir, sta, &sta_ ##name## _ops); #define DEBUGFS_ADD_COUNTER(name, field) \ if (sizeof(sta->field) == sizeof(u32)) \ - debugfs_create_u32(#name, 0400, sta->debugfs.dir, \ + debugfs_create_u32(#name, 0400, sta->debugfs_dir, \ (u32 *) &sta->field); \ else \ - debugfs_create_u64(#name, 0400, sta->debugfs.dir, \ + debugfs_create_u64(#name, 0400, sta->debugfs_dir, \ (u64 *) &sta->field); void ieee80211_sta_debugfs_add(struct sta_info *sta) @@ -366,8 +366,8 @@ void ieee80211_sta_debugfs_add(struct sta_info *sta) * destroyed quickly enough the old station's debugfs * dir might still be around. */ - sta->debugfs.dir = debugfs_create_dir(mac, stations_dir); - if (!sta->debugfs.dir) + sta->debugfs_dir = debugfs_create_dir(mac, stations_dir); + if (!sta->debugfs_dir) return; DEBUGFS_ADD(flags); @@ -383,14 +383,14 @@ void ieee80211_sta_debugfs_add(struct sta_info *sta) if (sizeof(sta->driver_buffered_tids) == sizeof(u32)) debugfs_create_x32("driver_buffered_tids", 0400, - sta->debugfs.dir, + sta->debugfs_dir, (u32 *)&sta->driver_buffered_tids); else debugfs_create_x64("driver_buffered_tids", 0400, - sta->debugfs.dir, + sta->debugfs_dir, (u64 *)&sta->driver_buffered_tids); - drv_sta_add_debugfs(local, sdata, &sta->sta, sta->debugfs.dir); + drv_sta_add_debugfs(local, sdata, &sta->sta, sta->debugfs_dir); } void ieee80211_sta_debugfs_remove(struct sta_info *sta) @@ -398,7 +398,7 @@ void ieee80211_sta_debugfs_remove(struct sta_info *sta) struct ieee80211_local *local = sta->local; struct ieee80211_sub_if_data *sdata = sta->sdata; - drv_sta_remove_debugfs(local, sdata, &sta->sta, sta->debugfs.dir); - debugfs_remove_recursive(sta->debugfs.dir); - sta->debugfs.dir = NULL; + drv_sta_remove_debugfs(local, sdata, &sta->sta, sta->debugfs_dir); + debugfs_remove_recursive(sta->debugfs_dir); + sta->debugfs_dir = NULL; } diff --git a/net/mac80211/rate.h b/net/mac80211/rate.h index 624fe5b81615..8d3260785b94 100644 --- a/net/mac80211/rate.h +++ b/net/mac80211/rate.h @@ -96,9 +96,9 @@ static inline void rate_control_add_sta_debugfs(struct sta_info *sta) { #ifdef CONFIG_MAC80211_DEBUGFS struct rate_control_ref *ref = sta->rate_ctrl; - if (ref && sta->debugfs.dir && ref->ops->add_sta_debugfs) + if (ref && sta->debugfs_dir && ref->ops->add_sta_debugfs) ref->ops->add_sta_debugfs(ref->priv, sta->rate_ctrl_priv, - sta->debugfs.dir); + sta->debugfs_dir); #endif } diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h index 276056e99862..3b2105562d8b 100644 --- a/net/mac80211/sta_info.h +++ b/net/mac80211/sta_info.h @@ -371,7 +371,7 @@ DECLARE_EWMA(signal, 1024, 8) * @ampdu_mlme: A-MPDU state machine state * @timer_to_tid: identity mapping to ID timers * @mesh: mesh STA information - * @debugfs: debug filesystem info + * @debugfs_dir: debug filesystem directory dentry * @dead: set to true when sta is unlinked * @removed: set to true when sta is being removed from sta_list * @uploaded: set to true when sta is uploaded to the driver @@ -486,9 +486,7 @@ struct sta_info { u8 timer_to_tid[IEEE80211_NUM_TIDS]; #ifdef CONFIG_MAC80211_DEBUGFS - struct sta_info_debugfsdentries { - struct dentry *dir; - } debugfs; + struct dentry *debugfs_dir; #endif enum ieee80211_sta_rx_bandwidth cur_max_bandwidth; From de03d2b0ef6520cf9da2e429cd7afb534782b737 Mon Sep 17 00:00:00 2001 From: Shannon Nelson Date: Thu, 10 Mar 2016 14:59:44 -0800 Subject: [PATCH 0242/1649] i40e: Assure that adminq is alive in debug mode When dropping into debug mode in a failed probe, make sure that the AdminQ is left alive for possible hand debug of driver and firmware states. Move the mutex_init calls earlier in probe so that if init fails, the admin queue interface is still available for debugging purposes. Signed-off-by: Shannon Nelson Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_main.c | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 2464dca88f79..56d4416c9a11 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -10822,6 +10822,12 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) hw->bus.func = PCI_FUNC(pdev->devfn); pf->instance = pfs_found; + /* set up the locks for the AQ, do this only once in probe + * and destroy them only once in remove + */ + mutex_init(&hw->aq.asq_mutex); + mutex_init(&hw->aq.arq_mutex); + if (debug != -1) { pf->msg_enable = pf->hw.debug_mask; pf->msg_enable = debug; @@ -10867,12 +10873,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* set up a default setting for link flow control */ pf->hw.fc.requested_mode = I40E_FC_NONE; - /* set up the locks for the AQ, do this only once in probe - * and destroy them only once in remove - */ - mutex_init(&hw->aq.asq_mutex); - mutex_init(&hw->aq.arq_mutex); - err = i40e_init_adminq(hw); if (err) { if (err == I40E_ERR_FIRMWARE_API_VERSION) @@ -11265,7 +11265,6 @@ err_init_lan_hmc: kfree(pf->qp_pile); err_sw_init: err_adminq_setup: - (void)i40e_shutdown_adminq(hw); err_pf_reset: iounmap(hw->hw_addr); err_ioremap: From 602fae425cf3ade41a4787f8ddf850af418faa3b Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Thu, 17 Mar 2016 15:02:52 +0200 Subject: [PATCH 0243/1649] mac80211: don't start dynamic PS timer if not needed If the device implements dynamic PS itself, there's no need to ever start the dynamic powersave timer on RX. While at it, fix up some indentation in this code. Signed-off-by: Johannes Berg --- net/mac80211/rx.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index dc27becb9b71..36214e332225 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -2474,14 +2474,14 @@ ieee80211_rx_h_data(struct ieee80211_rx_data *rx) rx->skb->dev = dev; - if (local->ps_sdata && local->hw.conf.dynamic_ps_timeout > 0 && + if (!ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS) && + local->ps_sdata && local->hw.conf.dynamic_ps_timeout > 0 && !is_multicast_ether_addr( ((struct ethhdr *)rx->skb->data)->h_dest) && (!local->scanning && - !test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state))) { - mod_timer(&local->dynamic_ps_timer, jiffies + - msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout)); - } + !test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state))) + mod_timer(&local->dynamic_ps_timer, jiffies + + msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout)); ieee80211_deliver_skb(rx); From 3c5bcb2e1930bdbccd14a49660895f014349b51d Mon Sep 17 00:00:00 2001 From: Avraham Stern Date: Thu, 17 Mar 2016 15:02:53 +0200 Subject: [PATCH 0244/1649] ieee80211: support parsing Fine Timing Measurement action frame Add definition for Fine Timing Measurement (FTM) frame format as defined in IEEE802.11-REVmcD5.0 section 9.6.8.33 Signed-off-by: Avraham Stern Signed-off-by: Emmanuel Grumbach Signed-off-by: Johannes Berg --- include/linux/ieee80211.h | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index 3b1f6cef9513..bf9706c5b0bd 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -7,6 +7,7 @@ * Copyright (c) 2005, Devicescape Software, Inc. * Copyright (c) 2006, Michael Wu * Copyright (c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright (c) 2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -1011,6 +1012,16 @@ struct ieee80211_mgmt { u8 tpc_elem_length; struct ieee80211_tpc_report_ie tpc; } __packed tpc_report; + struct { + u8 action_code; + u8 dialog_token; + u8 follow_up; + u8 tod[6]; + u8 toa[6]; + __le16 tod_error; + __le16 toa_error; + u8 variable[0]; + } __packed ftm; } u; } __packed action; } u; From c84387d2f2c83d1d49a8dfefed13a8b39f017230 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Thu, 17 Mar 2016 15:02:54 +0200 Subject: [PATCH 0245/1649] mac80211: clean up station flags debugfs Avoid the really strange %s%s%s expression, use an array of flag names and check that all flags are present. Signed-off-by: Johannes Berg --- net/mac80211/debugfs_sta.c | 61 ++++++++++++++++++++++++++------------ net/mac80211/sta_info.h | 4 +++ 2 files changed, 46 insertions(+), 19 deletions(-) diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c index af034912abbe..33dfcbc2bf9c 100644 --- a/net/mac80211/debugfs_sta.c +++ b/net/mac80211/debugfs_sta.c @@ -52,31 +52,54 @@ static const struct file_operations sta_ ##name## _ops = { \ STA_FILE(aid, sta.aid, D); +static const char * const sta_flag_names[] = { +#define FLAG(F) [WLAN_STA_##F] = #F + FLAG(AUTH), + FLAG(ASSOC), + FLAG(PS_STA), + FLAG(AUTHORIZED), + FLAG(SHORT_PREAMBLE), + FLAG(WDS), + FLAG(CLEAR_PS_FILT), + FLAG(MFP), + FLAG(BLOCK_BA), + FLAG(PS_DRIVER), + FLAG(PSPOLL), + FLAG(TDLS_PEER), + FLAG(TDLS_PEER_AUTH), + FLAG(TDLS_INITIATOR), + FLAG(TDLS_CHAN_SWITCH), + FLAG(TDLS_OFF_CHANNEL), + FLAG(TDLS_WIDER_BW), + FLAG(UAPSD), + FLAG(SP), + FLAG(4ADDR_EVENT), + FLAG(INSERTED), + FLAG(RATE_CONTROL), + FLAG(TOFFSET_KNOWN), + FLAG(MPSP_OWNER), + FLAG(MPSP_RECIPIENT), + FLAG(PS_DELIVER), +#undef FLAG +}; + static ssize_t sta_flags_read(struct file *file, char __user *userbuf, size_t count, loff_t *ppos) { - char buf[121]; + char buf[16 * NUM_WLAN_STA_FLAGS], *pos = buf; + char *end = buf + sizeof(buf) - 1; struct sta_info *sta = file->private_data; + unsigned int flg; -#define TEST(flg) \ - test_sta_flag(sta, WLAN_STA_##flg) ? #flg "\n" : "" + BUILD_BUG_ON(ARRAY_SIZE(sta_flag_names) != NUM_WLAN_STA_FLAGS); - int res = scnprintf(buf, sizeof(buf), - "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s", - TEST(AUTH), TEST(ASSOC), TEST(PS_STA), - TEST(PS_DRIVER), TEST(AUTHORIZED), - TEST(SHORT_PREAMBLE), - sta->sta.wme ? "WME\n" : "", - TEST(WDS), TEST(CLEAR_PS_FILT), - TEST(MFP), TEST(BLOCK_BA), TEST(PSPOLL), - TEST(UAPSD), TEST(SP), TEST(TDLS_PEER), - TEST(TDLS_PEER_AUTH), TEST(TDLS_INITIATOR), - TEST(TDLS_CHAN_SWITCH), TEST(TDLS_OFF_CHANNEL), - TEST(4ADDR_EVENT), TEST(INSERTED), - TEST(RATE_CONTROL), TEST(TOFFSET_KNOWN), - TEST(MPSP_OWNER), TEST(MPSP_RECIPIENT)); -#undef TEST - return simple_read_from_buffer(userbuf, count, ppos, buf, res); + for (flg = 0; flg < NUM_WLAN_STA_FLAGS; flg++) { + if (test_sta_flag(sta, flg)) + pos += scnprintf(pos, end - pos, "%s\n", + sta_flag_names[flg]); + } + + return simple_read_from_buffer(userbuf, count, ppos, buf, strlen(buf)); } STA_OPS(flags); diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h index 3b2105562d8b..4e1ed6f26484 100644 --- a/net/mac80211/sta_info.h +++ b/net/mac80211/sta_info.h @@ -69,6 +69,8 @@ * @WLAN_STA_MPSP_RECIPIENT: local STA is recipient of a MPSP. * @WLAN_STA_PS_DELIVER: station woke up, but we're still blocking TX * until pending frames are delivered + * + * @NUM_WLAN_STA_FLAGS: number of defined flags */ enum ieee80211_sta_info_flags { WLAN_STA_AUTH, @@ -97,6 +99,8 @@ enum ieee80211_sta_info_flags { WLAN_STA_MPSP_OWNER, WLAN_STA_MPSP_RECIPIENT, WLAN_STA_PS_DELIVER, + + NUM_WLAN_STA_FLAGS, }; #define ADDBA_RESP_INTERVAL HZ From 2c61cf9c56cbc4e0a4475232659ac30bb4c28674 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Thu, 17 Mar 2016 15:02:55 +0200 Subject: [PATCH 0246/1649] mac80211: fix cipher scheme function name The code is only used with iwlwifi, but still should have proper mac80211 naming scheme; fix that. Signed-off-by: Johannes Berg --- net/mac80211/rx.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 36214e332225..009bb90d7f5a 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -722,8 +722,8 @@ static int ieee80211_get_mmie_keyidx(struct sk_buff *skb) return -1; } -static int iwl80211_get_cs_keyid(const struct ieee80211_cipher_scheme *cs, - struct sk_buff *skb) +static int ieee80211_get_cs_keyid(const struct ieee80211_cipher_scheme *cs, + struct sk_buff *skb) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; __le16 fc; @@ -1586,7 +1586,7 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx) if (ieee80211_has_protected(fc) && rx->sta->cipher_scheme) { cs = rx->sta->cipher_scheme; - keyid = iwl80211_get_cs_keyid(cs, rx->skb); + keyid = ieee80211_get_cs_keyid(cs, rx->skb); if (unlikely(keyid < 0)) return RX_DROP_UNUSABLE; } @@ -1670,7 +1670,7 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx) hdrlen = ieee80211_hdrlen(fc); if (cs) { - keyidx = iwl80211_get_cs_keyid(cs, rx->skb); + keyidx = ieee80211_get_cs_keyid(cs, rx->skb); if (unlikely(keyidx < 0)) return RX_DROP_UNUSABLE; From b555cf4a50c17a9714715a2d7c8574dca1a7b356 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Tue, 5 Apr 2016 10:20:02 +0200 Subject: [PATCH 0247/1649] mlxsw: spectrum: Reduce number of supported 802.1D bridges Resources allocated for these bridges at init time cannot be later used for other purposes. While current number is supported by the device, it's mostly theoretical with regards to any real use case, which leads to poor utilization of device's resources. Solve that by reducing the number. The long term plan is to make this value (along with others) user configurable via devlink and write it to NVRAM, so that it can be used during the next init. Until then we must hardcode such values. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 4b8abaf06321..d58ab0cd9507 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -50,7 +50,7 @@ #define MLXSW_SP_VFID_BASE VLAN_N_VID #define MLXSW_SP_VFID_PORT_MAX 512 /* Non-bridged VLAN interfaces */ -#define MLXSW_SP_VFID_BR_MAX 8192 /* Bridged VLAN interfaces */ +#define MLXSW_SP_VFID_BR_MAX 6144 /* Bridged VLAN interfaces */ #define MLXSW_SP_VFID_MAX (MLXSW_SP_VFID_PORT_MAX + MLXSW_SP_VFID_BR_MAX) #define MLXSW_SP_LAG_MAX 64 From 75f3a1018f0103025558caa60e24132a4cc9ce8f Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Tue, 5 Apr 2016 10:20:03 +0200 Subject: [PATCH 0248/1649] switchdev: Use switch ID in suggested udev rule Since there can be multiple switch ASICs on the same system we should use the switch ID in order to differentiate between them and set the switch name (e.g. swX) accordingly. Also, replace the order of the "Switch ID" and "Port Netdev Naming" sections following the above change. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- Documentation/networking/switchdev.txt | 28 +++++++++++++------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/Documentation/networking/switchdev.txt b/Documentation/networking/switchdev.txt index 2f659129694b..31c39115834d 100644 --- a/Documentation/networking/switchdev.txt +++ b/Documentation/networking/switchdev.txt @@ -89,6 +89,18 @@ Typically, the management port is not participating in offloaded data plane and is loaded with a different driver, such as a NIC driver, on the management port device. +Switch ID +^^^^^^^^^ + +The switchdev driver must implement the switchdev op switchdev_port_attr_get +for SWITCHDEV_ATTR_ID_PORT_PARENT_ID for each port netdev, returning the same +physical ID for each port of a switch. The ID must be unique between switches +on the same system. The ID does not need to be unique between switches on +different systems. + +The switch ID is used to locate ports on a switch and to know if aggregated +ports belong to the same switch. + Port Netdev Naming ^^^^^^^^^^^^^^^^^^ @@ -104,25 +116,13 @@ external configuration. For example, if a physical 40G port is split logically into 4 10G ports, resulting in 4 port netdevs, the device can give a unique name for each port using port PHYS name. The udev rule would be: -SUBSYSTEM=="net", ACTION=="add", DRIVER="", ATTR{phys_port_name}!="", \ - NAME="$attr{phys_port_name}" +SUBSYSTEM=="net", ACTION=="add", ATTR{phys_switch_id}=="", \ + ATTR{phys_port_name}!="", NAME="swX$attr{phys_port_name}" Suggested naming convention is "swXpYsZ", where X is the switch name or ID, Y is the port name or ID, and Z is the sub-port name or ID. For example, sw1p1s0 would be sub-port 0 on port 1 on switch 1. -Switch ID -^^^^^^^^^ - -The switchdev driver must implement the switchdev op switchdev_port_attr_get -for SWITCHDEV_ATTR_ID_PORT_PARENT_ID for each port netdev, returning the same -physical ID for each port of a switch. The ID must be unique between switches -on the same system. The ID does not need to be unique between switches on -different systems. - -The switch ID is used to locate ports on a switch and to know if aggregated -ports belong to the same switch. - Port Features ^^^^^^^^^^^^^ From 2bf9a58675c5d6d37cdcb4301e6320009d299080 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Tue, 5 Apr 2016 10:20:04 +0200 Subject: [PATCH 0249/1649] mlxsw: spectrum: Add support for physical port names Export to userspace the front panel name of the port, so that udev can rename the ports accordingly. The convention suggested by switchdev documentation is used: 1) Non-split: pX 2) Split: pXsY Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/spectrum.c | 45 +++++++++++++++++-- 1 file changed, 42 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 4afbc3e9e381..cb5f36e497e9 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -305,9 +305,9 @@ mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port) return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl); } -static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, - u8 local_port, u8 *p_module, - u8 *p_width) +static int __mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, + u8 local_port, u8 *p_module, + u8 *p_width, u8 *p_lane) { char pmlp_pl[MLXSW_REG_PMLP_LEN]; int err; @@ -318,9 +318,20 @@ static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, return err; *p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0); *p_width = mlxsw_reg_pmlp_width_get(pmlp_pl); + *p_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0); return 0; } +static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, + u8 local_port, u8 *p_module, + u8 *p_width) +{ + u8 lane; + + return __mlxsw_sp_port_module_info_get(mlxsw_sp, local_port, p_module, + p_width, &lane); +} + static int mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u8 local_port, u8 module, u8 width, u8 lane) { @@ -861,6 +872,33 @@ int mlxsw_sp_port_kill_vid(struct net_device *dev, return 0; } +static int mlxsw_sp_port_get_phys_port_name(struct net_device *dev, char *name, + size_t len) +{ + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + u8 module, width, lane; + int err; + + err = __mlxsw_sp_port_module_info_get(mlxsw_sp_port->mlxsw_sp, + mlxsw_sp_port->local_port, + &module, &width, &lane); + if (err) { + netdev_err(dev, "Failed to retrieve module information\n"); + return err; + } + + if (!mlxsw_sp_port->split) + err = snprintf(name, len, "p%d", module + 1); + else + err = snprintf(name, len, "p%ds%d", module + 1, + lane / width); + + if (err >= len) + return -EINVAL; + + return 0; +} + static const struct net_device_ops mlxsw_sp_port_netdev_ops = { .ndo_open = mlxsw_sp_port_open, .ndo_stop = mlxsw_sp_port_stop, @@ -877,6 +915,7 @@ static const struct net_device_ops mlxsw_sp_port_netdev_ops = { .ndo_bridge_setlink = switchdev_port_bridge_setlink, .ndo_bridge_getlink = switchdev_port_bridge_getlink, .ndo_bridge_dellink = switchdev_port_bridge_dellink, + .ndo_get_phys_port_name = mlxsw_sp_port_get_phys_port_name, }; static void mlxsw_sp_port_get_drvinfo(struct net_device *dev, From c99abb4cb8227bf8172c085213c91bf155c6618a Mon Sep 17 00:00:00 2001 From: Shannon Nelson Date: Thu, 10 Mar 2016 14:59:45 -0800 Subject: [PATCH 0250/1649] i40e: Remove timer and task only if created In some error scenarios, we may find ourselves trying to remove a non-existent timer or worktask. This causes the kernel some bit of consternation, so don't do it. Signed-off-by: Shannon Nelson Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_main.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 56d4416c9a11..e615f66f576f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -11306,8 +11306,10 @@ static void i40e_remove(struct pci_dev *pdev) /* no more scheduling of any task */ set_bit(__I40E_SUSPENDED, &pf->state); set_bit(__I40E_DOWN, &pf->state); - del_timer_sync(&pf->service_timer); - cancel_work_sync(&pf->service_task); + if (pf->service_timer.data) + del_timer_sync(&pf->service_timer); + if (pf->service_task.func) + cancel_work_sync(&pf->service_task); if (pf->flags & I40E_FLAG_SRIOV_ENABLED) { i40e_free_vfs(pf); From d3ce57344100023faa8f514eb66dfb110b53629c Mon Sep 17 00:00:00 2001 From: Mitch Williams Date: Thu, 10 Mar 2016 14:59:46 -0800 Subject: [PATCH 0251/1649] i40e: Notify VFs of all resets Notify VFs in the reset interrupt handler, instead of the actual reset initiation code. This allows the VFs to get properly notified for all resets, including resets initiated by different PFs on the same physical device. Signed-off-by: Mitch Williams Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_main.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index e615f66f576f..98bc749ce9f0 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -5534,8 +5534,6 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags) WARN_ON(in_interrupt()); - if (i40e_check_asq_alive(&pf->hw)) - i40e_vc_notify_reset(pf); /* do the biggest reset indicated */ if (reset_flags & BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED)) { @@ -6738,6 +6736,8 @@ static void i40e_prep_for_reset(struct i40e_pf *pf) clear_bit(__I40E_RESET_INTR_RECEIVED, &pf->state); if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) return; + if (i40e_check_asq_alive(&pf->hw)) + i40e_vc_notify_reset(pf); dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n"); From 7e5a313ed9b18ba9f35df2523eb9e386a195a2c4 Mon Sep 17 00:00:00 2001 From: Mitch Williams Date: Thu, 10 Mar 2016 14:59:47 -0800 Subject: [PATCH 0252/1649] i40e: Added code to prevent double resets Clear the VFLR bit after reset processing, instead of before. This prevents double resets on VF init. Signed-off-by: Mitch Williams Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 816c6bbf7093..291d6282f95b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -917,9 +917,9 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr) { struct i40e_pf *pf = vf->pf; struct i40e_hw *hw = &pf->hw; + u32 reg, reg_idx, bit_idx; bool rsd = false; int i; - u32 reg; if (test_and_set_bit(__I40E_VF_DISABLE, &pf->state)) return; @@ -988,6 +988,11 @@ complete_reset: } /* tell the VF the reset is done */ wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE); + + /* clear the VFLR bit in GLGEN_VFLRSTAT */ + reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32; + bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32; + wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx)); i40e_flush(hw); clear_bit(__I40E_VF_DISABLE, &pf->state); } @@ -2293,9 +2298,7 @@ int i40e_vc_process_vflr_event(struct i40e_pf *pf) vf = &pf->vf[vf_id]; reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx)); if (reg & BIT(bit_idx)) { - /* clear the bit in GLGEN_VFLRSTAT */ - wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx)); - + /* i40e_reset_vf will clear the bit in GLGEN_VFLRSTAT */ if (!test_bit(__I40E_DOWN, &pf->state)) i40e_reset_vf(vf, true); } From 56e5ca688f3d334ddc2acab27cb7efa83b238557 Mon Sep 17 00:00:00 2001 From: Shannon Nelson Date: Thu, 10 Mar 2016 14:59:48 -0800 Subject: [PATCH 0253/1649] i40e: Change unknown event error msg to ignore message There's no real error in an unknown event from the Firmware, we're just posting a useful FYI notice, so this patch simply removes the "Error" word. Signed-off-by: Shannon Nelson Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 98bc749ce9f0..38410050401c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -6371,7 +6371,7 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf) break; default: dev_info(&pf->pdev->dev, - "ARQ Error: Unknown event 0x%04x received\n", + "ARQ: Unknown event 0x%04x ignored\n", opcode); break; } From 19b73d8efaa459a66665b5e0a3e7acedd05f4901 Mon Sep 17 00:00:00 2001 From: Mitch Williams Date: Thu, 10 Mar 2016 14:59:49 -0800 Subject: [PATCH 0254/1649] i40evf: Add additional check for reset If the driver happens to read a register during the time in which the device is undergoing reset, it will receive a value of 0xdeadbeef instead of a valid value. Unfortunately, the driver may misinterpret this as a valid value, especially if it's just looking for individual bits. Add an explicit check for this value when we are looking for admin queue errors, and trigger reset recovery if we find it. Signed-off-by: Mitch Williams Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40evf/i40evf_main.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index 820ad94c932b..d783c1b19a16 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -1994,6 +1994,8 @@ static void i40evf_adminq_task(struct work_struct *work) /* check for error indications */ val = rd32(hw, hw->aq.arq.len); + if (val == 0xdeadbeef) /* indicates device in reset */ + goto freedom; oldval = val; if (val & I40E_VF_ARQLEN1_ARQVFE_MASK) { dev_info(&adapter->pdev->dev, "ARQ VF Error detected\n"); From 55f7d7233bd15c8a3fcf7051c681b05de5980a18 Mon Sep 17 00:00:00 2001 From: Mitch Williams Date: Thu, 10 Mar 2016 14:59:50 -0800 Subject: [PATCH 0255/1649] i40e: Change comment to reflect correct function name Minor correction in the comment to reflect the correct function name Signed-off-by: Mitch Williams Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 291d6282f95b..47b9e62473c4 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -63,7 +63,7 @@ static void i40e_vc_vf_broadcast(struct i40e_pf *pf, } /** - * i40e_vc_notify_link_state + * i40e_vc_notify_vf_link_state * @vf: pointer to the VF structure * * send a link status message to a single VF From 50f26a507664499ccef017607a29cc1456695343 Mon Sep 17 00:00:00 2001 From: Catherine Sullivan Date: Thu, 10 Mar 2016 14:59:51 -0800 Subject: [PATCH 0256/1649] i40e/i40evf: Bump patch from 1.4.25 to 1.5.1 Signed-off-by: Catherine Sullivan Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_main.c | 4 ++-- drivers/net/ethernet/intel/i40evf/i40evf_main.c | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 38410050401c..297fd39ba255 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -45,8 +45,8 @@ static const char i40e_driver_string[] = #define DRV_KERN "-k" #define DRV_VERSION_MAJOR 1 -#define DRV_VERSION_MINOR 4 -#define DRV_VERSION_BUILD 25 +#define DRV_VERSION_MINOR 5 +#define DRV_VERSION_BUILD 1 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ __stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_BUILD) DRV_KERN diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index d783c1b19a16..e3973684746b 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -37,8 +37,8 @@ static const char i40evf_driver_string[] = #define DRV_KERN "-k" #define DRV_VERSION_MAJOR 1 -#define DRV_VERSION_MINOR 4 -#define DRV_VERSION_BUILD 15 +#define DRV_VERSION_MINOR 5 +#define DRV_VERSION_BUILD 1 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ __stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_BUILD) \ From b100e5d622aa8719cc2e776c397817afe24b1f3b Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Thu, 17 Mar 2016 15:41:37 +0200 Subject: [PATCH 0257/1649] mac80211: avoid useless memory write on each frame RX In the likely case that probe_count is 0, don't write to the memory there. Also use ifmgd consistently in the function, instead of using sdata->u.mgd as well. Signed-off-by: Johannes Berg Signed-off-by: Emmanuel Grumbach Signed-off-by: Johannes Berg --- net/mac80211/mlme.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 281b8d6e5109..2112df4ffb7b 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -122,15 +122,16 @@ void ieee80211_sta_reset_conn_monitor(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; - if (unlikely(!sdata->u.mgd.associated)) + if (unlikely(!ifmgd->associated)) return; - ifmgd->probe_send_count = 0; + if (ifmgd->probe_send_count) + ifmgd->probe_send_count = 0; if (ieee80211_hw_check(&sdata->local->hw, CONNECTION_MONITOR)) return; - mod_timer(&sdata->u.mgd.conn_mon_timer, + mod_timer(&ifmgd->conn_mon_timer, round_jiffies_up(jiffies + IEEE80211_CONNECTION_IDLE_TIME)); } From 17b942478643c5a90c06d978479bd326040bfa19 Mon Sep 17 00:00:00 2001 From: Ayala Beker Date: Thu, 17 Mar 2016 15:41:38 +0200 Subject: [PATCH 0258/1649] cfg80211: allow userspace to specify client P2P PS support Legacy clients don't support P2P power save mechanisms, and thus if a P2P GO has a legacy client connected to it, it has to make some changes in the PS behavior. To handle this, add an attribute to specify whether a station supports P2P PS or not. If the attribute was not specified cfg80211 will assume that station supports it for P2P GO interface, and does NOT support it for AP interface, matching the current assumptions in the code. Signed-off-by: Ayala Beker Signed-off-by: Emmanuel Grumbach Signed-off-by: Johannes Berg --- include/net/cfg80211.h | 2 ++ include/uapi/linux/nl80211.h | 19 +++++++++++++++++++ net/wireless/nl80211.c | 34 ++++++++++++++++++++++++++++++++++ 3 files changed, 55 insertions(+) diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 4ece4f961f40..568c10f6d564 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -816,6 +816,7 @@ enum station_parameters_apply_mask { * @supported_oper_classes_len: number of supported operating classes * @opmode_notif: operating mode field from Operating Mode Notification * @opmode_notif_used: information if operating mode field is used + * @support_p2p_ps: information if station supports P2P PS mechanism */ struct station_parameters { const u8 *supported_rates; @@ -841,6 +842,7 @@ struct station_parameters { u8 supported_oper_classes_len; u8 opmode_notif; bool opmode_notif_used; + int support_p2p_ps; }; /** diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index b2a8d8c84a57..6da52d7b48c4 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -1804,6 +1804,9 @@ enum nl80211_commands { * it contains the behaviour-specific attribute containing the parameters for * BSS selection to be done by driver and/or firmware. * + * @NL80211_ATTR_STA_SUPPORT_P2P_PS: whether P2P PS mechanism supported + * or not. u8, one of the values of &enum nl80211_sta_p2p_ps_status + * * @NUM_NL80211_ATTR: total number of nl80211_attrs available * @NL80211_ATTR_MAX: highest attribute number currently defined * @__NL80211_ATTR_AFTER_LAST: internal use @@ -2182,6 +2185,8 @@ enum nl80211_attrs { NL80211_ATTR_BSS_SELECT, + NL80211_ATTR_STA_SUPPORT_P2P_PS, + /* add attributes here, update the policy in nl80211.c */ __NL80211_ATTR_AFTER_LAST, @@ -2325,6 +2330,20 @@ enum nl80211_sta_flags { NL80211_STA_FLAG_MAX = __NL80211_STA_FLAG_AFTER_LAST - 1 }; +/** + * enum nl80211_sta_p2p_ps_status - station support of P2P PS + * + * @NL80211_P2P_PS_UNSUPPORTED: station doesn't support P2P PS mechanism + * @@NL80211_P2P_PS_SUPPORTED: station supports P2P PS mechanism + * @NUM_NL80211_P2P_PS_STATUS: number of values + */ +enum nl80211_sta_p2p_ps_status { + NL80211_P2P_PS_UNSUPPORTED = 0, + NL80211_P2P_PS_SUPPORTED, + + NUM_NL80211_P2P_PS_STATUS, +}; + #define NL80211_STA_FLAG_MAX_OLD_API NL80211_STA_FLAG_TDLS_PEER /** diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index d6c6449c0389..824569b1c5a1 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -403,6 +403,7 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = { [NL80211_ATTR_REG_INDOOR] = { .type = NLA_FLAG }, [NL80211_ATTR_PBSS] = { .type = NLA_FLAG }, [NL80211_ATTR_BSS_SELECT] = { .type = NLA_NESTED }, + [NL80211_ATTR_STA_SUPPORT_P2P_PS] = { .type = NLA_U8 }, }; /* policy for the key attributes */ @@ -4006,6 +4007,10 @@ int cfg80211_check_station_change(struct wiphy *wiphy, statype != CFG80211_STA_AP_CLIENT_UNASSOC) return -EINVAL; + if (params->support_p2p_ps != -1 && + statype != CFG80211_STA_AP_CLIENT_UNASSOC) + return -EINVAL; + if (params->aid && !(params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)) && statype != CFG80211_STA_AP_CLIENT_UNASSOC) @@ -4299,6 +4304,18 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info) else params.listen_interval = -1; + if (info->attrs[NL80211_ATTR_STA_SUPPORT_P2P_PS]) { + u8 tmp; + + tmp = nla_get_u8(info->attrs[NL80211_ATTR_STA_SUPPORT_P2P_PS]); + if (tmp >= NUM_NL80211_P2P_PS_STATUS) + return -EINVAL; + + params.support_p2p_ps = tmp; + } else { + params.support_p2p_ps = -1; + } + if (!info->attrs[NL80211_ATTR_MAC]) return -EINVAL; @@ -4422,6 +4439,23 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info) params.listen_interval = nla_get_u16(info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL]); + if (info->attrs[NL80211_ATTR_STA_SUPPORT_P2P_PS]) { + u8 tmp; + + tmp = nla_get_u8(info->attrs[NL80211_ATTR_STA_SUPPORT_P2P_PS]); + if (tmp >= NUM_NL80211_P2P_PS_STATUS) + return -EINVAL; + + params.support_p2p_ps = tmp; + } else { + /* + * if not specified, assume it's supported for P2P GO interface, + * and is NOT supported for AP interface + */ + params.support_p2p_ps = + dev->ieee80211_ptr->iftype == NL80211_IFTYPE_P2P_GO; + } + if (info->attrs[NL80211_ATTR_PEER_AID]) params.aid = nla_get_u16(info->attrs[NL80211_ATTR_PEER_AID]); else From 52cfa1d6146c5aa48360b02533fc7e039a66086e Mon Sep 17 00:00:00 2001 From: Ayala Beker Date: Thu, 17 Mar 2016 15:41:39 +0200 Subject: [PATCH 0259/1649] mac80211: track and tell driver about GO client P2P PS abilities Legacy clients don't support P2P power save mechanism, and thus if a P2P GO has a legacy client connected to it, it should disable P2P PS mechanisms. Let the driver know about this with a new bss_conf parameter. Signed-off-by: Ayala Beker Signed-off-by: Emmanuel Grumbach Signed-off-by: Johannes Berg --- include/net/mac80211.h | 8 +++++++- net/mac80211/cfg.c | 4 ++++ net/mac80211/sta_info.c | 29 +++++++++++++++++++++++++++++ 3 files changed, 40 insertions(+), 1 deletion(-) diff --git a/include/net/mac80211.h b/include/net/mac80211.h index a53333cb1528..6e346750cb29 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -291,7 +291,7 @@ struct ieee80211_vif_chanctx_switch { * @BSS_CHANGED_PS: PS changed for this BSS (STA mode) * @BSS_CHANGED_TXPOWER: TX power setting changed for this interface * @BSS_CHANGED_P2P_PS: P2P powersave settings (CTWindow, opportunistic PS) - * changed (currently only in P2P client mode, GO mode will be later) + * changed * @BSS_CHANGED_BEACON_INFO: Data from the AP's beacon became available: * currently dtim_period only is under consideration. * @BSS_CHANGED_BANDWIDTH: The bandwidth used by this interface changed, @@ -526,6 +526,9 @@ struct ieee80211_mu_group_data { * userspace), whereas TPC is disabled if %txpower_type is set to * NL80211_TX_POWER_FIXED (use value configured from userspace) * @p2p_noa_attr: P2P NoA attribute for P2P powersave + * @allow_p2p_go_ps: indication for AP or P2P GO interface, whether it's allowed + * to use P2P PS mechanism or not. AP/P2P GO is not allowed to use P2P PS + * if it has associated clients without P2P PS support. */ struct ieee80211_bss_conf { const u8 *bssid; @@ -563,6 +566,7 @@ struct ieee80211_bss_conf { int txpower; enum nl80211_tx_power_setting txpower_type; struct ieee80211_p2p_noa_attr p2p_noa_attr; + bool allow_p2p_go_ps; }; /** @@ -1741,6 +1745,7 @@ struct ieee80211_sta_rates { * size is min(max_amsdu_len, 7935) bytes. * Both additional HT limits must be enforced by the low level driver. * This is defined by the spec (IEEE 802.11-2012 section 8.3.2.2 NOTE 2). + * @support_p2p_ps: indicates whether the STA supports P2P PS mechanism or not. * @txq: per-TID data TX queues (if driver uses the TXQ abstraction) */ struct ieee80211_sta { @@ -1761,6 +1766,7 @@ struct ieee80211_sta { bool mfp; u8 max_amsdu_subframes; u16 max_amsdu_len; + bool support_p2p_ps; struct ieee80211_txq *txq[IEEE80211_NUM_TIDS]; diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index b37adb60c9cb..62a90f270f03 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -732,6 +732,7 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev, sdata->vif.bss_conf.beacon_int = params->beacon_interval; sdata->vif.bss_conf.dtim_period = params->dtim_period; sdata->vif.bss_conf.enable_beacon = true; + sdata->vif.bss_conf.allow_p2p_go_ps = sdata->vif.p2p; sdata->vif.bss_conf.ssid_len = params->ssid_len; if (params->ssid_len) @@ -1202,6 +1203,9 @@ static int sta_apply_parameters(struct ieee80211_local *local, params->opmode_notif, band); } + if (params->support_p2p_ps >= 0) + sta->sta.support_p2p_ps = params->support_p2p_ps; + if (ieee80211_vif_is_mesh(&sdata->vif)) sta_apply_mesh_params(local, sta, params); diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index 00c82fb152c0..01e070c6e713 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -1767,6 +1767,31 @@ void ieee80211_sta_set_buffered(struct ieee80211_sta *pubsta, } EXPORT_SYMBOL(ieee80211_sta_set_buffered); +static void +ieee80211_recalc_p2p_go_ps_allowed(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_local *local = sdata->local; + bool allow_p2p_go_ps = sdata->vif.p2p; + struct sta_info *sta; + + rcu_read_lock(); + list_for_each_entry_rcu(sta, &local->sta_list, list) { + if (sdata != sta->sdata || + !test_sta_flag(sta, WLAN_STA_ASSOC)) + continue; + if (!sta->sta.support_p2p_ps) { + allow_p2p_go_ps = false; + break; + } + } + rcu_read_unlock(); + + if (allow_p2p_go_ps != sdata->vif.bss_conf.allow_p2p_go_ps) { + sdata->vif.bss_conf.allow_p2p_go_ps = allow_p2p_go_ps; + ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_P2P_PS); + } +} + int sta_info_move_state(struct sta_info *sta, enum ieee80211_sta_state new_state) { @@ -1828,12 +1853,16 @@ int sta_info_move_state(struct sta_info *sta, } else if (sta->sta_state == IEEE80211_STA_ASSOC) { clear_bit(WLAN_STA_ASSOC, &sta->_flags); ieee80211_recalc_min_chandef(sta->sdata); + if (!sta->sta.support_p2p_ps) + ieee80211_recalc_p2p_go_ps_allowed(sta->sdata); } break; case IEEE80211_STA_ASSOC: if (sta->sta_state == IEEE80211_STA_AUTH) { set_bit(WLAN_STA_ASSOC, &sta->_flags); ieee80211_recalc_min_chandef(sta->sdata); + if (!sta->sta.support_p2p_ps) + ieee80211_recalc_p2p_go_ps_allowed(sta->sdata); } else if (sta->sta_state == IEEE80211_STA_AUTHORIZED) { if (sta->sdata->vif.type == NL80211_IFTYPE_AP || (sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN && From 749329594b5e0fb612b2de642a692323ddf661dd Mon Sep 17 00:00:00 2001 From: Bob Copeland Date: Fri, 18 Mar 2016 22:03:24 -0400 Subject: [PATCH 0260/1649] mac80211: mesh: fix crash in mesh_path_timer The mesh_path_reclaim() function, called from an rcu callback, cancels the mesh_path_timer associated with a mesh path. Unfortunately, this call can happen much later, perhaps after the hash table itself is destroyed. Such a situation led to the following crash in mesh_path_send_to_gates() when dereferencing the tbl pointer: [ 23.901661] BUG: unable to handle kernel NULL pointer dereference at 0000000000000008 [ 23.905516] IP: [] mesh_path_send_to_gates+0x2b/0x740 [ 23.908757] PGD 99ca067 PUD 99c4067 PMD 0 [ 23.910789] Oops: 0000 [#1] PREEMPT SMP DEBUG_PAGEALLOC [ 23.913485] CPU: 0 PID: 0 Comm: swapper/0 Not tainted 4.5.0-rc6-wt+ #43 [ 23.916675] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Debian-1.8.2-1 04/01/2014 [ 23.920471] task: ffffffff81685500 ti: ffffffff81678000 task.ti: ffffffff81678000 [ 23.922619] RIP: 0010:[] [] mesh_path_send_to_gates+0x2b/0x740 [ 23.925237] RSP: 0018:ffff88000b403d30 EFLAGS: 00010286 [ 23.926739] RAX: 0000000000000000 RBX: ffff880009bc0d20 RCX: 0000000000000102 [ 23.928796] RDX: 000000000000002e RSI: 0000000000000001 RDI: ffff880009bc0d20 [ 23.930895] RBP: ffff88000b403e18 R08: 0000000000000001 R09: 0000000000000001 [ 23.932917] R10: 0000000000000000 R11: 0000000000000001 R12: ffff880009c20940 [ 23.936370] R13: ffff880009bc0e70 R14: ffff880009c21c40 R15: ffff880009bc0d20 [ 23.939823] FS: 0000000000000000(0000) GS:ffff88000b400000(0000) knlGS:0000000000000000 [ 23.943688] CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b [ 23.946429] CR2: 0000000000000008 CR3: 00000000099c5000 CR4: 00000000000006b0 [ 23.949861] Stack: [ 23.950840] 000000000000002e ffff880009c20940 ffff88000b403da8 ffffffff8109e551 [ 23.954467] ffffffff82711be2 000000000000002e 0000000000000000 ffffffff8166a5f5 [ 23.958141] 0000000000685ce8 0000000000000246 ffff880009bc0d20 ffff880009c20940 [ 23.961801] Call Trace: [ 23.962987] [ 23.963963] [] ? vprintk_emit+0x351/0x5e0 [ 23.966782] [] ? vprintk_default+0x1f/0x30 [ 23.969529] [] ? printk+0x48/0x50 [ 23.971956] [] mesh_path_timer+0x133/0x160 [ 23.974707] [] ? mesh_nexthop_resolve+0x230/0x230 [ 23.977775] [] call_timer_fn+0xce/0x330 [ 23.980448] [] ? call_timer_fn+0x5/0x330 [ 23.983126] [] ? mesh_nexthop_resolve+0x230/0x230 [ 23.986091] [] run_timer_softirq+0x22c/0x390 Instead of cancelling in the RCU callback, set a new flag to prevent the timer from being rearmed, and then cancel the timer synchronously when freeing the mesh path. This leaves mesh_path_reclaim() doing nothing but kfree, so switch to kfree_rcu(). Fixes: 3b302ada7f0a ("mac80211: mesh: move path tables into if_mesh") Signed-off-by: Bob Copeland Signed-off-by: Johannes Berg --- net/mac80211/mesh.h | 3 +++ net/mac80211/mesh_hwmp.c | 4 ++++ net/mac80211/mesh_pathtbl.c | 33 ++++++++++++++++++--------------- 3 files changed, 25 insertions(+), 15 deletions(-) diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h index cc6854db156e..e1415c952e9c 100644 --- a/net/mac80211/mesh.h +++ b/net/mac80211/mesh.h @@ -32,6 +32,8 @@ * @MESH_PATH_RESOLVED: the mesh path can has been resolved * @MESH_PATH_REQ_QUEUED: there is an unsent path request for this destination * already queued up, waiting for the discovery process to start. + * @MESH_PATH_DELETED: the mesh path has been deleted and should no longer + * be used * * MESH_PATH_RESOLVED is used by the mesh path timer to * decide when to stop or cancel the mesh path discovery. @@ -43,6 +45,7 @@ enum mesh_path_flags { MESH_PATH_FIXED = BIT(3), MESH_PATH_RESOLVED = BIT(4), MESH_PATH_REQ_QUEUED = BIT(5), + MESH_PATH_DELETED = BIT(6), }; /** diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c index 5b6aec1a0630..2748cf627ee3 100644 --- a/net/mac80211/mesh_hwmp.c +++ b/net/mac80211/mesh_hwmp.c @@ -1012,6 +1012,10 @@ void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata) goto enddiscovery; spin_lock_bh(&mpath->state_lock); + if (mpath->flags & MESH_PATH_DELETED) { + spin_unlock_bh(&mpath->state_lock); + goto enddiscovery; + } mpath->flags &= ~MESH_PATH_REQ_QUEUED; if (preq_node->flags & PREQ_Q_F_START) { if (mpath->flags & MESH_PATH_RESOLVING) { diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c index 7455397f8c3b..1c9412a29ca3 100644 --- a/net/mac80211/mesh_pathtbl.c +++ b/net/mac80211/mesh_pathtbl.c @@ -18,6 +18,8 @@ #include "ieee80211_i.h" #include "mesh.h" +static void mesh_path_free_rcu(struct mesh_table *tbl, struct mesh_path *mpath); + static u32 mesh_table_hash(const void *addr, u32 len, u32 seed) { /* Use last four bytes of hw addr as hash index */ @@ -40,18 +42,12 @@ static inline bool mpath_expired(struct mesh_path *mpath) !(mpath->flags & MESH_PATH_FIXED); } -static void mesh_path_reclaim(struct rcu_head *rp) -{ - struct mesh_path *mpath = container_of(rp, struct mesh_path, rcu); - - del_timer_sync(&mpath->timer); - kfree(mpath); -} - -static void mesh_path_rht_free(void *ptr, void *unused_arg) +static void mesh_path_rht_free(void *ptr, void *tblptr) { struct mesh_path *mpath = ptr; - call_rcu(&mpath->rcu, mesh_path_reclaim); + struct mesh_table *tbl = tblptr; + + mesh_path_free_rcu(tbl, mpath); } static struct mesh_table *mesh_table_alloc(void) @@ -77,7 +73,7 @@ static struct mesh_table *mesh_table_alloc(void) static void mesh_table_free(struct mesh_table *tbl) { rhashtable_free_and_destroy(&tbl->rhead, - mesh_path_rht_free, NULL); + mesh_path_rht_free, tbl); kfree(tbl); } @@ -551,18 +547,25 @@ out: rhashtable_walk_exit(&iter); } -static void __mesh_path_del(struct mesh_table *tbl, struct mesh_path *mpath) +static void mesh_path_free_rcu(struct mesh_table *tbl, + struct mesh_path *mpath) { struct ieee80211_sub_if_data *sdata = mpath->sdata; - rhashtable_remove_fast(&tbl->rhead, &mpath->rhash, mesh_rht_params); spin_lock_bh(&mpath->state_lock); - mpath->flags |= MESH_PATH_RESOLVING; + mpath->flags |= MESH_PATH_RESOLVING | MESH_PATH_DELETED; mesh_gate_del(tbl, mpath); - call_rcu(&mpath->rcu, mesh_path_reclaim); spin_unlock_bh(&mpath->state_lock); + del_timer_sync(&mpath->timer); atomic_dec(&sdata->u.mesh.mpaths); atomic_dec(&tbl->entries); + kfree_rcu(mpath, rcu); +} + +static void __mesh_path_del(struct mesh_table *tbl, struct mesh_path *mpath) +{ + rhashtable_remove_fast(&tbl->rhead, &mpath->rhash, mesh_rht_params); + mesh_path_free_rcu(tbl, mpath); } /** From 0aa7fabbd5d9da1f8a8fdc3e2837c532bcfa5664 Mon Sep 17 00:00:00 2001 From: Bob Copeland Date: Fri, 18 Mar 2016 22:11:28 -0400 Subject: [PATCH 0261/1649] mac80211: mesh: handle failed alloc for rmc cache In the unlikely case that mesh_rmc_init() fails with -ENOMEM, the rmc pointer will be left as NULL but the interface is still operational because ieee80211_mesh_init_sdata() is not allowed to fail. If this happens, we would blindly dereference rmc when checking whether a multicast frame is in the cache. Instead just drop the frames in the forwarding path. Signed-off-by: Bob Copeland Signed-off-by: Johannes Berg --- net/mac80211/mesh.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index a216c439b6f2..d0d8eeaa8129 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c @@ -220,6 +220,9 @@ int mesh_rmc_check(struct ieee80211_sub_if_data *sdata, u8 idx; struct rmc_entry *p, *n; + if (!rmc) + return -1; + /* Don't care about endianness since only match matters */ memcpy(&seqnum, &mesh_hdr->seqnum, sizeof(mesh_hdr->seqnum)); idx = le32_to_cpu(mesh_hdr->seqnum) & rmc->idx_mask; From 47a0489ce1e518f4936c7fedb93b3d2abd7ccd2e Mon Sep 17 00:00:00 2001 From: Bob Copeland Date: Fri, 18 Mar 2016 22:11:29 -0400 Subject: [PATCH 0262/1649] mac80211: mesh: use hlist for rmc cache The RMC cache has 256 list heads plus a u32, which puts it at the unfortunate size of 4104 bytes with padding. kmalloc() will then round this up to the next power-of-two, so we wind up actually using two pages here where most of the second is wasted. Switch to hlist heads here to reduce the structure size down to fit within a page. Signed-off-by: Bob Copeland Signed-off-by: Johannes Berg --- net/mac80211/mesh.c | 18 ++++++++++-------- net/mac80211/mesh.h | 4 ++-- 2 files changed, 12 insertions(+), 10 deletions(-) diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index d0d8eeaa8129..1a2aaf461e98 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c @@ -174,22 +174,23 @@ int mesh_rmc_init(struct ieee80211_sub_if_data *sdata) return -ENOMEM; sdata->u.mesh.rmc->idx_mask = RMC_BUCKETS - 1; for (i = 0; i < RMC_BUCKETS; i++) - INIT_LIST_HEAD(&sdata->u.mesh.rmc->bucket[i]); + INIT_HLIST_HEAD(&sdata->u.mesh.rmc->bucket[i]); return 0; } void mesh_rmc_free(struct ieee80211_sub_if_data *sdata) { struct mesh_rmc *rmc = sdata->u.mesh.rmc; - struct rmc_entry *p, *n; + struct rmc_entry *p; + struct hlist_node *n; int i; if (!sdata->u.mesh.rmc) return; for (i = 0; i < RMC_BUCKETS; i++) { - list_for_each_entry_safe(p, n, &rmc->bucket[i], list) { - list_del(&p->list); + hlist_for_each_entry_safe(p, n, &rmc->bucket[i], list) { + hlist_del(&p->list); kmem_cache_free(rm_cache, p); } } @@ -218,7 +219,8 @@ int mesh_rmc_check(struct ieee80211_sub_if_data *sdata, u32 seqnum = 0; int entries = 0; u8 idx; - struct rmc_entry *p, *n; + struct rmc_entry *p; + struct hlist_node *n; if (!rmc) return -1; @@ -226,11 +228,11 @@ int mesh_rmc_check(struct ieee80211_sub_if_data *sdata, /* Don't care about endianness since only match matters */ memcpy(&seqnum, &mesh_hdr->seqnum, sizeof(mesh_hdr->seqnum)); idx = le32_to_cpu(mesh_hdr->seqnum) & rmc->idx_mask; - list_for_each_entry_safe(p, n, &rmc->bucket[idx], list) { + hlist_for_each_entry_safe(p, n, &rmc->bucket[idx], list) { ++entries; if (time_after(jiffies, p->exp_time) || entries == RMC_QUEUE_MAX_LEN) { - list_del(&p->list); + hlist_del(&p->list); kmem_cache_free(rm_cache, p); --entries; } else if ((seqnum == p->seqnum) && ether_addr_equal(sa, p->sa)) @@ -244,7 +246,7 @@ int mesh_rmc_check(struct ieee80211_sub_if_data *sdata, p->seqnum = seqnum; p->exp_time = jiffies + RMC_TIMEOUT; memcpy(p->sa, sa, ETH_ALEN); - list_add(&p->list, &rmc->bucket[idx]); + hlist_add_head(&p->list, &rmc->bucket[idx]); return 0; } diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h index e1415c952e9c..bc3f9a32b5a4 100644 --- a/net/mac80211/mesh.h +++ b/net/mac80211/mesh.h @@ -158,14 +158,14 @@ struct mesh_table { * that are found in the cache. */ struct rmc_entry { - struct list_head list; + struct hlist_node list; u32 seqnum; unsigned long exp_time; u8 sa[ETH_ALEN]; }; struct mesh_rmc { - struct list_head bucket[RMC_BUCKETS]; + struct hlist_head bucket[RMC_BUCKETS]; u32 idx_mask; }; From 18b27ff7d2e232b0f07f2f51aa8052ff2a617908 Mon Sep 17 00:00:00 2001 From: Bob Copeland Date: Fri, 18 Mar 2016 22:11:30 -0400 Subject: [PATCH 0263/1649] mac80211: mesh: embed gates hlist head directly Since we have converted the mesh path tables to rhashtable, we are no longer swapping out the entire mesh_pathtbl pointer with RCU. As a result, we no longer need indirection to the hlist head for the gates list and can simply embed it, saving a pair of pointer-sized allocations. Signed-off-by: Bob Copeland Signed-off-by: Johannes Berg --- net/mac80211/mesh.h | 2 +- net/mac80211/mesh_pathtbl.c | 18 ++++-------------- 2 files changed, 5 insertions(+), 15 deletions(-) diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h index bc3f9a32b5a4..46b540a25d9d 100644 --- a/net/mac80211/mesh.h +++ b/net/mac80211/mesh.h @@ -134,7 +134,7 @@ struct mesh_path { */ struct mesh_table { atomic_t entries; /* Up to MAX_MESH_NEIGHBOURS */ - struct hlist_head *known_gates; + struct hlist_head known_gates; spinlock_t gates_lock; struct rhashtable rhead; diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c index 1c9412a29ca3..6db2ddfa0695 100644 --- a/net/mac80211/mesh_pathtbl.c +++ b/net/mac80211/mesh_pathtbl.c @@ -58,12 +58,7 @@ static struct mesh_table *mesh_table_alloc(void) if (!newtbl) return NULL; - newtbl->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC); - if (!newtbl->known_gates) { - kfree(newtbl); - return NULL; - } - INIT_HLIST_HEAD(newtbl->known_gates); + INIT_HLIST_HEAD(&newtbl->known_gates); atomic_set(&newtbl->entries, 0); spin_lock_init(&newtbl->gates_lock); @@ -341,7 +336,7 @@ int mesh_path_add_gate(struct mesh_path *mpath) mpath->sdata->u.mesh.num_gates++; spin_lock(&tbl->gates_lock); - hlist_add_head_rcu(&mpath->gate_list, tbl->known_gates); + hlist_add_head_rcu(&mpath->gate_list, &tbl->known_gates); spin_unlock(&tbl->gates_lock); spin_unlock_bh(&mpath->state_lock); @@ -759,16 +754,11 @@ int mesh_path_send_to_gates(struct mesh_path *mpath) struct mesh_path *from_mpath = mpath; struct mesh_path *gate; bool copy = false; - struct hlist_head *known_gates; tbl = sdata->u.mesh.mesh_paths; - known_gates = tbl->known_gates; - - if (!known_gates) - return -EHOSTUNREACH; rcu_read_lock(); - hlist_for_each_entry_rcu(gate, known_gates, gate_list) { + hlist_for_each_entry_rcu(gate, &tbl->known_gates, gate_list) { if (gate->flags & MESH_PATH_ACTIVE) { mpath_dbg(sdata, "Forwarding to %pM\n", gate->dst); mesh_path_move_to_queue(gate, from_mpath, copy); @@ -781,7 +771,7 @@ int mesh_path_send_to_gates(struct mesh_path *mpath) } } - hlist_for_each_entry_rcu(gate, known_gates, gate_list) { + hlist_for_each_entry_rcu(gate, &tbl->known_gates, gate_list) { mpath_dbg(sdata, "Sending to %pM\n", gate->dst); mesh_path_tx_pending(gate); } From 3257523bed496316dad95d5a341bfd49ac16624b Mon Sep 17 00:00:00 2001 From: Bob Copeland Date: Fri, 18 Mar 2016 22:11:31 -0400 Subject: [PATCH 0264/1649] mac80211: mesh: reorder structure members Reduce padding waste in struct mesh_table and struct rmc_entry by moving the smaller fields to the end. Signed-off-by: Bob Copeland Signed-off-by: Johannes Berg --- net/mac80211/mesh.h | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h index 46b540a25d9d..4a59c034cc6d 100644 --- a/net/mac80211/mesh.h +++ b/net/mac80211/mesh.h @@ -133,11 +133,10 @@ struct mesh_path { * @rhash: the rhashtable containing struct mesh_paths, keyed by dest addr */ struct mesh_table { - atomic_t entries; /* Up to MAX_MESH_NEIGHBOURS */ struct hlist_head known_gates; spinlock_t gates_lock; - struct rhashtable rhead; + atomic_t entries; /* Up to MAX_MESH_NEIGHBOURS */ }; /* Recent multicast cache */ @@ -159,8 +158,8 @@ struct mesh_table { */ struct rmc_entry { struct hlist_node list; - u32 seqnum; unsigned long exp_time; + u32 seqnum; u8 sa[ETH_ALEN]; }; From 68bb54b47ea1130e57049d86d172d0e098edb3f4 Mon Sep 17 00:00:00 2001 From: Bob Copeland Date: Fri, 18 Mar 2016 22:11:32 -0400 Subject: [PATCH 0265/1649] mac80211: mesh: fix mesh path kerneldoc Several of the mesh path fields are undocumented and some of the documentation is no longer correct or relevant after the switch to rhashtable. Clean up the kernel doc accordingly and reorder some fields to match the structure layout. Signed-off-by: Bob Copeland Signed-off-by: Johannes Berg --- net/mac80211/mesh.h | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h index 4a59c034cc6d..f298987228c9 100644 --- a/net/mac80211/mesh.h +++ b/net/mac80211/mesh.h @@ -21,8 +21,6 @@ /** * enum mesh_path_flags - mac80211 mesh path flags * - * - * * @MESH_PATH_ACTIVE: the mesh path can be used for forwarding * @MESH_PATH_RESOLVING: the discovery process is running for this mesh path * @MESH_PATH_SN_VALID: the mesh path contains a valid destination sequence @@ -70,12 +68,16 @@ enum mesh_deferred_task_flags { * struct mesh_path - mac80211 mesh path structure * * @dst: mesh path destination mac address + * @mpp: mesh proxy mac address + * @rhash: rhashtable list pointer + * @gate_list: list pointer for known gates list * @sdata: mesh subif * @next_hop: mesh neighbor to which frames for this destination will be * forwarded * @timer: mesh path discovery timer * @frame_queue: pending queue for frames sent to this destination while the * path is unresolved + * @rcu: rcu head for freeing mesh path * @sn: target sequence number * @metric: current metric to this destination * @hop_count: hops to destination @@ -94,10 +96,10 @@ enum mesh_deferred_task_flags { * @is_gate: the destination station of this path is a mesh gate * * - * The combination of dst and sdata is unique in the mesh path table. Since the - * next_hop STA is only protected by RCU as well, deleting the STA must also - * remove/substitute the mesh_path structure and wait until that is no longer - * reachable before destroying the STA completely. + * The dst address is unique in the mesh path table. Since the mesh_path is + * protected by RCU, deleting the next_hop STA must remove / substitute the + * mesh_path structure and wait until that is no longer reachable before + * destroying the STA completely. */ struct mesh_path { u8 dst[ETH_ALEN]; @@ -127,10 +129,11 @@ struct mesh_path { /** * struct mesh_table * - * @entries: number of entries in the table * @known_gates: list of known mesh gates and their mpaths by the station. The * gate's mpath may or may not be resolved and active. - * @rhash: the rhashtable containing struct mesh_paths, keyed by dest addr + * @gates_lock: protects updates to known_gates + * @rhead: the rhashtable containing struct mesh_paths, keyed by dest addr + * @entries: number of entries in the table */ struct mesh_table { struct hlist_head known_gates; @@ -151,6 +154,7 @@ struct mesh_table { * @seqnum: mesh sequence number of the frame * @exp_time: expiration time of the entry, in jiffies * @sa: source address of the frame + * @list: hashtable list pointer * * The Recent Multicast Cache keeps track of the latest multicast frames that * have been received by a mesh interface and discards received multicast frames From 0371a08fbb3e557f19db41e47a199ad8300c9c97 Mon Sep 17 00:00:00 2001 From: Bob Copeland Date: Sat, 26 Mar 2016 11:27:18 -0400 Subject: [PATCH 0266/1649] mac80211: mesh: fix cleanup for mesh pathtable The mesh path table needs to be around for the entire time the interface is in mesh mode, as users can perform an mpath dump at any time. The existing path table lifetime is instead tied to the mesh BSS which can cause crashes when different MBSSes are joined in the context of a single interface, or when the path table is dumped when no MBSS is joined. Introduce a new function to perform the final teardown of the interface and perform path table cleanup there. We already free the individual path elements when the leaving the mesh so no additional cleanup is needed there. This fixes the following crash: [ 47.753026] BUG: unable to handle kernel paging request at fffffff0 [ 47.753026] IP: [] kthread_data+0xa/0xe [ 47.753026] *pde = 00741067 *pte = 00000000 [ 47.753026] Oops: 0000 [#4] PREEMPT [ 47.753026] Modules linked in: ppp_generic slhc 8021q garp mrp sch_fq_codel iptable_mangle ipt_MASQUERADE nf_nat_masquerade_ipv4 iptable_nat nf_conntrack_ipv4 nf_defrag_ipv4 nf_nat_ipv4 nf_nat ip_tables ath9k_htc ath5k 8139too ath10k_pci ath10k_core arc4 ath9k ath9k_common ath9k_hw mac80211 ath cfg80211 cpufreq_powersave br_netfilter bridge stp llc ipw usb_wwan sierra_net usbnet af_alg natsemi via_rhine mii iTCO_wdt iTCO_vendor_support gpio_ich sierra coretemp pcspkr i2c_i801 lpc_ich ata_generic ata_piix libata ide_pci_generic piix e1000e igb i2c_algo_bit ptp pps_core [last unloaded: 8139too] [ 47.753026] CPU: 0 PID: 12 Comm: kworker/u2:1 Tainted: G D W 4.5.0-wt-V3 #6 [ 47.753026] Hardware name: To Be Filled By O.E.M./To be filled by O.E.M., BIOS 080016 11/07/2014 [ 47.753026] task: f645a0c0 ti: f6462000 task.ti: f6462000 [ 47.753026] EIP: 0060:[] EFLAGS: 00010002 CPU: 0 [ 47.753026] EIP is at kthread_data+0xa/0xe [ 47.753026] EAX: 00000000 EBX: 00000000 ECX: 00000000 EDX: 00000000 [ 47.753026] ESI: f645a0c0 EDI: f645a2fc EBP: f6463a80 ESP: f6463a78 [ 47.753026] DS: 007b ES: 007b FS: 0000 GS: 0000 SS: 0068 [ 47.753026] CR0: 8005003b CR2: 00000014 CR3: 353e5000 CR4: 00000690 [ 47.753026] Stack: [ 47.753026] c0236866 00000000 f6463aac c05768b4 00000009 f6463ba8 f6463ab0 c0247010 [ 47.753026] 00000000 f645a0c0 f6464000 00000009 f6463ba8 f6463ab8 c0576eb2 f645a0c0 [ 47.753026] f6463aec c0228be4 c06335a4 f6463adc f6463ad0 c06c06d4 f6463ae4 c02471b0 [ 47.753026] Call Trace: [ 47.753026] [] ? wq_worker_sleeping+0xb/0x78 [ 47.753026] [] __schedule+0xda/0x587 [ 47.753026] [] ? vprintk_default+0x12/0x14 [ 47.753026] [] schedule+0x72/0x89 [ 47.753026] [] do_exit+0xb8/0x71d [ 47.753026] [] ? kmsg_dump+0xa9/0xae [ 47.753026] [] oops_end+0x69/0x70 [ 47.753026] [] no_context+0x1bb/0x1c5 [ 47.753026] [] __bad_area_nosemaphore+0x136/0x140 [ 47.753026] [] ? vmalloc_sync_all+0x19a/0x19a [ 47.753026] [] bad_area_nosemaphore+0xd/0x10 [ 47.753026] [] __do_page_fault+0x26c/0x320 [ 47.753026] [] ? vmalloc_sync_all+0x19a/0x19a [ 47.753026] [] do_page_fault+0xb/0xd [ 47.753026] [] error_code+0x58/0x60 [ 47.753026] [] ? vmalloc_sync_all+0x19a/0x19a [ 47.753026] [] ? kthread_data+0xa/0xe [ 47.753026] [] ? wq_worker_sleeping+0xb/0x78 [ 47.753026] [] __schedule+0xda/0x587 [ 47.753026] [] ? vprintk_default+0x12/0x14 [ 47.753026] [] schedule+0x72/0x89 [ 47.753026] [] do_exit+0xb8/0x71d [ 47.753026] [] ? kmsg_dump+0xa9/0xae [ 47.753026] [] oops_end+0x69/0x70 [ 47.753026] [] no_context+0x1bb/0x1c5 [ 47.753026] [] __bad_area_nosemaphore+0x136/0x140 [ 47.753026] [] ? vmalloc_sync_all+0x19a/0x19a [ 47.753026] [] bad_area_nosemaphore+0xd/0x10 [ 47.753026] [] __do_page_fault+0x26c/0x320 [ 47.753026] [] ? vmalloc_sync_all+0x19a/0x19a [ 47.753026] [] do_page_fault+0xb/0xd [ 47.753026] [] error_code+0x58/0x60 [ 47.753026] [] ? vmalloc_sync_all+0x19a/0x19a [ 47.753026] [] ? kthread_data+0xa/0xe [ 47.753026] [] ? wq_worker_sleeping+0xb/0x78 [ 47.753026] [] __schedule+0xda/0x587 [ 47.753026] [] ? put_io_context_active+0x6d/0x95 [ 47.753026] [] schedule+0x72/0x89 [ 47.753026] [] do_exit+0x6cc/0x71d [ 47.753026] [] oops_end+0x69/0x70 [ 47.753026] [] no_context+0x1bb/0x1c5 [ 47.753026] [] __bad_area_nosemaphore+0x136/0x140 [ 47.753026] [] ? vmalloc_sync_all+0x19a/0x19a [ 47.753026] [] bad_area_nosemaphore+0xd/0x10 [ 47.753026] [] __do_page_fault+0x26c/0x320 [ 47.753026] [] ? debug_smp_processor_id+0x12/0x16 [ 47.753026] [] ? __switch_to+0x24/0x40e [ 47.753026] [] ? vmalloc_sync_all+0x19a/0x19a [ 47.753026] [] do_page_fault+0xb/0xd [ 47.753026] [] error_code+0x58/0x60 [ 47.753026] [] ? vmalloc_sync_all+0x19a/0x19a [ 47.753026] [] ? rhashtable_walk_init+0x5c/0x93 [ 47.753026] [] mesh_path_tbl_expire.isra.24+0x19/0x82 [mac80211] [ 47.753026] [] mesh_path_expire+0x11/0x1f [mac80211] [ 47.753026] [] ieee80211_mesh_work+0x73/0x1a9 [mac80211] [ 47.753026] [] ieee80211_iface_work+0x2ff/0x311 [mac80211] [ 47.753026] [] process_one_work+0x14b/0x24e [ 47.753026] [] worker_thread+0x249/0x343 [ 47.753026] [] ? process_scheduled_works+0x24/0x24 [ 47.753026] [] kthread+0x9e/0xa3 [ 47.753026] [] ret_from_kernel_thread+0x20/0x40 [ 47.753026] [] ? kthread_parkme+0x18/0x18 [ 47.753026] Code: 6b c0 85 c0 75 05 e8 fb 74 fc ff 89 f8 84 c0 75 08 8d 45 e8 e8 34 dd 33 00 83 c4 28 5b 5e 5f 5d c3 55 8b 80 10 02 00 00 89 e5 5d <8b> 40 f0 c3 55 b9 04 00 00 00 89 e5 52 8b 90 10 02 00 00 8d 45 [ 47.753026] EIP: [] kthread_data+0xa/0xe SS:ESP 0068:f6463a78 [ 47.753026] CR2: 00000000fffffff0 [ 47.753026] ---[ end trace 867ca0bdd0767790 ]--- Fixes: 3b302ada7f0a ("mac80211: mesh: move path tables into if_mesh") Reported-by: Fred Veldini Signed-off-by: Bob Copeland Signed-off-by: Johannes Berg --- net/mac80211/iface.c | 2 +- net/mac80211/mesh.c | 7 ++++++- net/mac80211/mesh.h | 1 + 3 files changed, 8 insertions(+), 2 deletions(-) diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index 453b4e741780..097ece8b5c02 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -1093,7 +1093,7 @@ static void ieee80211_teardown_sdata(struct ieee80211_sub_if_data *sdata) sdata->fragment_next = 0; if (ieee80211_vif_is_mesh(&sdata->vif)) - mesh_rmc_free(sdata); + ieee80211_mesh_teardown_sdata(sdata); } static void ieee80211_uninit(struct net_device *dev) diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index 1a2aaf461e98..dcc1facc807c 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c @@ -905,7 +905,6 @@ void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata) /* flush STAs and mpaths on this iface */ sta_info_flush(sdata); mesh_path_flush_by_iface(sdata); - mesh_pathtbl_unregister(sdata); /* free all potentially still buffered group-addressed frames */ local->total_ps_buffered -= skb_queue_len(&ifmsh->ps.bc_buf); @@ -1403,3 +1402,9 @@ void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata) sdata->vif.bss_conf.bssid = zero_addr; } + +void ieee80211_mesh_teardown_sdata(struct ieee80211_sub_if_data *sdata) +{ + mesh_rmc_free(sdata); + mesh_pathtbl_unregister(sdata); +} diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h index f298987228c9..26b9ccbe1fce 100644 --- a/net/mac80211/mesh.h +++ b/net/mac80211/mesh.h @@ -219,6 +219,7 @@ void ieee80211s_init(void); void ieee80211s_update_metric(struct ieee80211_local *local, struct sta_info *sta, struct sk_buff *skb); void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata); +void ieee80211_mesh_teardown_sdata(struct ieee80211_sub_if_data *sdata); int ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata); void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata); void ieee80211_mesh_root_setup(struct ieee80211_if_mesh *ifmsh); From e596af827960c41a6051d4e719bafcfb7da11b64 Mon Sep 17 00:00:00 2001 From: Bob Copeland Date: Sat, 26 Mar 2016 11:27:19 -0400 Subject: [PATCH 0267/1649] mac80211: mesh: flush paths outside of plink lock Lockdep warned of a lock dependency between the mesh_plink lock and the internal lock for the rhashtable. The problem is that the rhashtable code uses a spin lock with softirqs enabled, while mesh_plink_timer executes a walk (to flush paths on a state change) inside a softirq with the plink lock held. This leads to the following deadlock if the timer fires while rht lock is held on this CPU, and plink lock is held on another CPU: CPU0 CPU1 ---- ---- lock(&(&ht->lock)->rlock); local_irq_disable(); lock(&(&sta->mesh->plink_lock)->rlock); lock(&(&ht->lock)->rlock); lock(&(&sta->mesh->plink_lock)->rlock); *** DEADLOCK *** Fix by waiting until we drop the plink lock to flush paths. Fixes: d48a1b7cd439 ("mac80211: mesh: convert path table to rhashtable") Signed-off-by: Bob Copeland Signed-off-by: Johannes Berg --- net/mac80211/mesh_plink.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c index a07e93c21c9e..ecfba8ad29e4 100644 --- a/net/mac80211/mesh_plink.c +++ b/net/mac80211/mesh_plink.c @@ -331,7 +331,9 @@ free: * * @sta: mesh peer link to deactivate * - * All mesh paths with this peer as next hop will be flushed + * Mesh paths with this peer as next hop should be flushed + * by the caller outside of plink_lock. + * * Returns beacon changed flag if the beacon content changed. * * Locking: the caller must hold sta->mesh->plink_lock @@ -346,7 +348,6 @@ static u32 __mesh_plink_deactivate(struct sta_info *sta) if (sta->mesh->plink_state == NL80211_PLINK_ESTAB) changed = mesh_plink_dec_estab_count(sdata); sta->mesh->plink_state = NL80211_PLINK_BLOCKED; - mesh_path_flush_by_nexthop(sta); ieee80211_mps_sta_status_update(sta); changed |= ieee80211_mps_set_sta_local_pm(sta, @@ -374,6 +375,7 @@ u32 mesh_plink_deactivate(struct sta_info *sta) sta->sta.addr, sta->mesh->llid, sta->mesh->plid, sta->mesh->reason); spin_unlock_bh(&sta->mesh->plink_lock); + mesh_path_flush_by_nexthop(sta); return changed; } @@ -748,6 +750,7 @@ u32 mesh_plink_block(struct sta_info *sta) changed = __mesh_plink_deactivate(sta); sta->mesh->plink_state = NL80211_PLINK_BLOCKED; spin_unlock_bh(&sta->mesh->plink_lock); + mesh_path_flush_by_nexthop(sta); return changed; } @@ -797,6 +800,7 @@ static u32 mesh_plink_fsm(struct ieee80211_sub_if_data *sdata, struct mesh_config *mshcfg = &sdata->u.mesh.mshcfg; enum ieee80211_self_protected_actioncode action = 0; u32 changed = 0; + bool flush = false; mpl_dbg(sdata, "peer %pM in state %s got event %s\n", sta->sta.addr, mplstates[sta->mesh->plink_state], mplevents[event]); @@ -885,6 +889,7 @@ static u32 mesh_plink_fsm(struct ieee80211_sub_if_data *sdata, changed |= mesh_set_short_slot_time(sdata); mesh_plink_close(sdata, sta, event); action = WLAN_SP_MESH_PEERING_CLOSE; + flush = true; break; case OPN_ACPT: action = WLAN_SP_MESH_PEERING_CONFIRM; @@ -916,6 +921,8 @@ static u32 mesh_plink_fsm(struct ieee80211_sub_if_data *sdata, break; } spin_unlock_bh(&sta->mesh->plink_lock); + if (flush) + mesh_path_flush_by_nexthop(sta); if (action) { mesh_plink_frame_tx(sdata, sta, action, sta->sta.addr, sta->mesh->llid, sta->mesh->plid, From 1aab144c507a9849d5b4557d6d78db185ceaef37 Mon Sep 17 00:00:00 2001 From: Bruce Allan Date: Tue, 22 Dec 2015 13:43:44 -0800 Subject: [PATCH 0268/1649] fm10k: Move constants to the right of binary operators The semantic patch that makes this change is available in scripts/coccinelle/misc/compare_const_fl.cocci. More information about semantic patching is available at http://coccinelle.lip6.fr/ Signed-off-by: Bruce Allan Tested-by: Krishneil Singh Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/fm10k/fm10k_main.c | 2 +- drivers/net/ethernet/intel/fm10k/fm10k_pci.c | 16 ++++++++-------- drivers/net/ethernet/intel/fm10k/fm10k_pf.c | 6 +++--- 3 files changed, 12 insertions(+), 12 deletions(-) diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c index 4de17db3808c..d411aa506661 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c @@ -420,7 +420,7 @@ static inline void fm10k_rx_hash(struct fm10k_ring *ring, return; skb_set_hash(skb, le32_to_cpu(rx_desc->d.rss), - (FM10K_RSS_L4_TYPES_MASK & (1ul << rss_type)) ? + ((1ul << rss_type) & FM10K_RSS_L4_TYPES_MASK) ? PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3); } diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c index 4eb7a6fa6b0d..86700a45fe13 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c @@ -579,7 +579,7 @@ static void fm10k_configure_tx_ring(struct fm10k_intfc *interface, u64 tdba = ring->dma; u32 size = ring->count * sizeof(struct fm10k_tx_desc); u32 txint = FM10K_INT_MAP_DISABLE; - u32 txdctl = FM10K_TXDCTL_ENABLE | (1 << FM10K_TXDCTL_MAX_TIME_SHIFT); + u32 txdctl = (1 << FM10K_TXDCTL_MAX_TIME_SHIFT) | FM10K_TXDCTL_ENABLE; u8 reg_idx = ring->reg_idx; /* disable queue to avoid issues while updating state */ @@ -903,8 +903,8 @@ static irqreturn_t fm10k_msix_mbx_vf(int __always_unused irq, void *data) /* re-enable mailbox interrupt and indicate 20us delay */ fm10k_write_reg(hw, FM10K_VFITR(FM10K_MBX_VECTOR), - FM10K_ITR_ENABLE | (FM10K_MBX_INT_DELAY >> - hw->mac.itr_scale)); + (FM10K_MBX_INT_DELAY >> hw->mac.itr_scale) | + FM10K_ITR_ENABLE); /* service upstream mailbox */ if (fm10k_mbx_trylock(interface)) { @@ -1135,8 +1135,8 @@ static irqreturn_t fm10k_msix_mbx_pf(int __always_unused irq, void *data) /* re-enable mailbox interrupt and indicate 20us delay */ fm10k_write_reg(hw, FM10K_ITR(FM10K_MBX_VECTOR), - FM10K_ITR_ENABLE | (FM10K_MBX_INT_DELAY >> - hw->mac.itr_scale)); + (FM10K_MBX_INT_DELAY >> hw->mac.itr_scale) | + FM10K_ITR_ENABLE); return IRQ_HANDLED; } @@ -1253,7 +1253,7 @@ static int fm10k_mbx_request_irq_vf(struct fm10k_intfc *interface) int err; /* Use timer0 for interrupt moderation on the mailbox */ - u32 itr = FM10K_INT_MAP_TIMER0 | entry->entry; + u32 itr = entry->entry | FM10K_INT_MAP_TIMER0; /* register mailbox handlers */ err = hw->mbx.ops.register_handlers(&hw->mbx, vf_mbx_data); @@ -1420,8 +1420,8 @@ static int fm10k_mbx_request_irq_pf(struct fm10k_intfc *interface) int err; /* Use timer0 for interrupt moderation on the mailbox */ - u32 mbx_itr = FM10K_INT_MAP_TIMER0 | entry->entry; - u32 other_itr = FM10K_INT_MAP_IMMEDIATE | entry->entry; + u32 mbx_itr = entry->entry | FM10K_INT_MAP_TIMER0; + u32 other_itr = entry->entry | FM10K_INT_MAP_IMMEDIATE; /* register mailbox handlers */ err = hw->mbx.ops.register_handlers(&hw->mbx, pf_mbx_data); diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c index 62ccebc5f728..34a0b035887d 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c @@ -711,8 +711,8 @@ static s32 fm10k_iov_assign_resources_pf(struct fm10k_hw *hw, u16 num_vfs, FM10K_RXDCTL_WRITE_BACK_MIN_DELAY | FM10K_RXDCTL_DROP_ON_EMPTY); fm10k_write_reg(hw, FM10K_RXQCTL(vf_q_idx), - FM10K_RXQCTL_VF | - (i << FM10K_RXQCTL_VF_SHIFT)); + (i << FM10K_RXQCTL_VF_SHIFT) | + FM10K_RXQCTL_VF); /* map queue pair to VF */ fm10k_write_reg(hw, FM10K_TQMAP(qmap_idx), vf_q_idx); @@ -987,7 +987,7 @@ static s32 fm10k_iov_reset_resources_pf(struct fm10k_hw *hw, txqctl = ((u32)vf_vid << FM10K_TXQCTL_VID_SHIFT) | (vf_idx << FM10K_TXQCTL_TC_SHIFT) | FM10K_TXQCTL_VF | vf_idx; - rxqctl = FM10K_RXQCTL_VF | (vf_idx << FM10K_RXQCTL_VF_SHIFT); + rxqctl = (vf_idx << FM10K_RXQCTL_VF_SHIFT) | FM10K_RXQCTL_VF; /* stop further DMA and reset queue ownership back to VF */ for (i = vf_q_idx; i < (queues_per_pool + vf_q_idx); i++) { From fcdb0a9951d8a5edfc47e89a7fe62457c25e18c4 Mon Sep 17 00:00:00 2001 From: Bruce Allan Date: Tue, 22 Dec 2015 13:43:49 -0800 Subject: [PATCH 0269/1649] fm10k: cleanup remaining right-bit-shifted 1 Use BIT() macro instead. Signed-off-by: Bruce Allan Tested-by: Krishneil Singh Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/fm10k/fm10k.h | 12 +++++----- .../net/ethernet/intel/fm10k/fm10k_ethtool.c | 20 +++++++--------- drivers/net/ethernet/intel/fm10k/fm10k_main.c | 20 ++++++++-------- .../net/ethernet/intel/fm10k/fm10k_netdev.c | 2 +- drivers/net/ethernet/intel/fm10k/fm10k_pci.c | 8 +++---- drivers/net/ethernet/intel/fm10k/fm10k_pf.c | 12 +++++----- drivers/net/ethernet/intel/fm10k/fm10k_tlv.c | 24 +++++++++---------- drivers/net/ethernet/intel/fm10k/fm10k_type.h | 8 +++---- 8 files changed, 52 insertions(+), 54 deletions(-) diff --git a/drivers/net/ethernet/intel/fm10k/fm10k.h b/drivers/net/ethernet/intel/fm10k/fm10k.h index b34bb008b104..83f386714e87 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k.h @@ -262,12 +262,12 @@ struct fm10k_intfc { unsigned long state; u32 flags; -#define FM10K_FLAG_RESET_REQUESTED (u32)(1 << 0) -#define FM10K_FLAG_RSS_FIELD_IPV4_UDP (u32)(1 << 1) -#define FM10K_FLAG_RSS_FIELD_IPV6_UDP (u32)(1 << 2) -#define FM10K_FLAG_RX_TS_ENABLED (u32)(1 << 3) -#define FM10K_FLAG_SWPRI_CONFIG (u32)(1 << 4) -#define FM10K_FLAG_DEBUG_STATS (u32)(1 << 5) +#define FM10K_FLAG_RESET_REQUESTED (u32)(BIT(0)) +#define FM10K_FLAG_RSS_FIELD_IPV4_UDP (u32)(BIT(1)) +#define FM10K_FLAG_RSS_FIELD_IPV6_UDP (u32)(BIT(2)) +#define FM10K_FLAG_RX_TS_ENABLED (u32)(BIT(3)) +#define FM10K_FLAG_SWPRI_CONFIG (u32)(BIT(4)) +#define FM10K_FLAG_DEBUG_STATS (u32)(BIT(5)) int xcast_mode; /* Tx fast path data */ diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c index 2f6a05b57228..28837ae099df 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c @@ -425,7 +425,7 @@ static void fm10k_get_regs(struct net_device *netdev, u32 *buff = p; u16 i; - regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id; + regs->version = BIT(24) | (hw->revision_id << 16) | hw->device_id; switch (hw->mac.type) { case fm10k_mac_pf: @@ -942,8 +942,8 @@ static int fm10k_mbx_test(struct fm10k_intfc *interface, u64 *data) return 0; /* loop through both nested and unnested attribute types */ - for (attr_flag = (1 << FM10K_TEST_MSG_UNSET); - attr_flag < (1 << (2 * FM10K_TEST_MSG_NESTED)); + for (attr_flag = BIT(FM10K_TEST_MSG_UNSET); + attr_flag < BIT(2 * FM10K_TEST_MSG_NESTED); attr_flag += attr_flag) { /* generate message to be tested */ fm10k_tlv_msg_test_create(test_msg, attr_flag); @@ -1005,7 +1005,7 @@ static u32 fm10k_get_priv_flags(struct net_device *netdev) u32 priv_flags = 0; if (interface->flags & FM10K_FLAG_DEBUG_STATS) - priv_flags |= 1 << FM10K_PRV_FLAG_DEBUG_STATS; + priv_flags |= BIT(FM10K_PRV_FLAG_DEBUG_STATS); return priv_flags; } @@ -1014,10 +1014,10 @@ static int fm10k_set_priv_flags(struct net_device *netdev, u32 priv_flags) { struct fm10k_intfc *interface = netdev_priv(netdev); - if (priv_flags >= (1 << FM10K_PRV_FLAG_LEN)) + if (priv_flags >= BIT(FM10K_PRV_FLAG_LEN)) return -EINVAL; - if (priv_flags & (1 << FM10K_PRV_FLAG_DEBUG_STATS)) + if (priv_flags & BIT(FM10K_PRV_FLAG_DEBUG_STATS)) interface->flags |= FM10K_FLAG_DEBUG_STATS; else interface->flags &= ~FM10K_FLAG_DEBUG_STATS; @@ -1145,7 +1145,7 @@ static unsigned int fm10k_max_channels(struct net_device *dev) /* For QoS report channels per traffic class */ if (tcs > 1) - max_combined = 1 << (fls(max_combined / tcs) - 1); + max_combined = BIT((fls(max_combined / tcs) - 1)); return max_combined; } @@ -1210,11 +1210,9 @@ static int fm10k_get_ts_info(struct net_device *dev, else info->phc_index = -1; - info->tx_types = (1 << HWTSTAMP_TX_OFF) | - (1 << HWTSTAMP_TX_ON); + info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON); - info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | - (1 << HWTSTAMP_FILTER_ALL); + info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL); return 0; } diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c index d411aa506661..db4353ba0932 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c @@ -401,10 +401,10 @@ static inline void fm10k_rx_checksum(struct fm10k_ring *ring, } #define FM10K_RSS_L4_TYPES_MASK \ - ((1ul << FM10K_RSSTYPE_IPV4_TCP) | \ - (1ul << FM10K_RSSTYPE_IPV4_UDP) | \ - (1ul << FM10K_RSSTYPE_IPV6_TCP) | \ - (1ul << FM10K_RSSTYPE_IPV6_UDP)) + (BIT(FM10K_RSSTYPE_IPV4_TCP) | \ + BIT(FM10K_RSSTYPE_IPV4_UDP) | \ + BIT(FM10K_RSSTYPE_IPV6_TCP) | \ + BIT(FM10K_RSSTYPE_IPV6_UDP)) static inline void fm10k_rx_hash(struct fm10k_ring *ring, union fm10k_rx_desc *rx_desc, @@ -420,7 +420,7 @@ static inline void fm10k_rx_hash(struct fm10k_ring *ring, return; skb_set_hash(skb, le32_to_cpu(rx_desc->d.rss), - ((1ul << rss_type) & FM10K_RSS_L4_TYPES_MASK) ? + (BIT(rss_type) & FM10K_RSS_L4_TYPES_MASK) ? PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3); } @@ -1409,7 +1409,7 @@ static void fm10k_update_itr(struct fm10k_ring_container *ring_container) * accounts for changes in the ITR due to PCIe link speed. */ itr_round = ACCESS_ONCE(ring_container->itr_scale) + 8; - avg_wire_size += (1 << itr_round) - 1; + avg_wire_size += BIT(itr_round) - 1; avg_wire_size >>= itr_round; /* write back value and retain adaptive flag */ @@ -1511,17 +1511,17 @@ static bool fm10k_set_qos_queues(struct fm10k_intfc *interface) /* set QoS mask and indices */ f = &interface->ring_feature[RING_F_QOS]; f->indices = pcs; - f->mask = (1 << fls(pcs - 1)) - 1; + f->mask = BIT(fls(pcs - 1)) - 1; /* determine the upper limit for our current DCB mode */ rss_i = interface->hw.mac.max_queues / pcs; - rss_i = 1 << (fls(rss_i) - 1); + rss_i = BIT(fls(rss_i) - 1); /* set RSS mask and indices */ f = &interface->ring_feature[RING_F_RSS]; rss_i = min_t(u16, rss_i, f->limit); f->indices = rss_i; - f->mask = (1 << fls(rss_i - 1)) - 1; + f->mask = BIT(fls(rss_i - 1)) - 1; /* configure pause class to queue mapping */ for (i = 0; i < pcs; i++) @@ -1551,7 +1551,7 @@ static bool fm10k_set_rss_queues(struct fm10k_intfc *interface) /* record indices and power of 2 mask for RSS */ f->indices = rss_i; - f->mask = (1 << fls(rss_i - 1)) - 1; + f->mask = BIT(fls(rss_i - 1)) - 1; interface->num_rx_queues = rss_i; interface->num_tx_queues = rss_i; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c index d09a8dd71fc2..0ff68747c6c4 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c @@ -1429,7 +1429,7 @@ struct net_device *fm10k_alloc_netdev(const struct fm10k_info *info) /* configure default debug level */ interface = netdev_priv(dev); - interface->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1; + interface->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1; /* configure default features */ dev->features |= NETIF_F_IP_CSUM | diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c index 86700a45fe13..c9324c79c879 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c @@ -579,7 +579,7 @@ static void fm10k_configure_tx_ring(struct fm10k_intfc *interface, u64 tdba = ring->dma; u32 size = ring->count * sizeof(struct fm10k_tx_desc); u32 txint = FM10K_INT_MAP_DISABLE; - u32 txdctl = (1 << FM10K_TXDCTL_MAX_TIME_SHIFT) | FM10K_TXDCTL_ENABLE; + u32 txdctl = BIT(FM10K_TXDCTL_MAX_TIME_SHIFT) | FM10K_TXDCTL_ENABLE; u8 reg_idx = ring->reg_idx; /* disable queue to avoid issues while updating state */ @@ -730,7 +730,7 @@ static void fm10k_configure_rx_ring(struct fm10k_intfc *interface, if (interface->pfc_en) rx_pause = interface->pfc_en; #endif - if (!(rx_pause & (1 << ring->qos_pc))) + if (!(rx_pause & BIT(ring->qos_pc))) rxdctl |= FM10K_RXDCTL_DROP_ON_EMPTY; fm10k_write_reg(hw, FM10K_RXDCTL(reg_idx), rxdctl); @@ -779,7 +779,7 @@ void fm10k_update_rx_drop_en(struct fm10k_intfc *interface) u32 rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY; u8 reg_idx = ring->reg_idx; - if (!(rx_pause & (1 << ring->qos_pc))) + if (!(rx_pause & BIT(ring->qos_pc))) rxdctl |= FM10K_RXDCTL_DROP_ON_EMPTY; fm10k_write_reg(hw, FM10K_RXDCTL(reg_idx), rxdctl); @@ -1065,7 +1065,7 @@ static void fm10k_reset_drop_on_empty(struct fm10k_intfc *interface, u32 eicr) if (maxholdq) fm10k_write_reg(hw, FM10K_MAXHOLDQ(7), maxholdq); for (q = 255;;) { - if (maxholdq & (1 << 31)) { + if (maxholdq & BIT(31)) { if (q < FM10K_MAX_QUEUES_PF) { interface->rx_overrun_pf++; fm10k_write_reg(hw, FM10K_RXDCTL(q), rxdctl); diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c index 34a0b035887d..23de956d1acc 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c @@ -527,8 +527,8 @@ static s32 fm10k_configure_dglort_map_pf(struct fm10k_hw *hw, return FM10K_ERR_PARAM; /* determine count of VSIs and queues */ - queue_count = 1 << (dglort->rss_l + dglort->pc_l); - vsi_count = 1 << (dglort->vsi_l + dglort->queue_l); + queue_count = BIT(dglort->rss_l + dglort->pc_l); + vsi_count = BIT(dglort->vsi_l + dglort->queue_l); glort = dglort->glort; q_idx = dglort->queue_b; @@ -544,8 +544,8 @@ static s32 fm10k_configure_dglort_map_pf(struct fm10k_hw *hw, } /* determine count of PCs and queues */ - queue_count = 1 << (dglort->queue_l + dglort->rss_l + dglort->vsi_l); - pc_count = 1 << dglort->pc_l; + queue_count = BIT(dglort->queue_l + dglort->rss_l + dglort->vsi_l); + pc_count = BIT(dglort->pc_l); /* configure PC for Tx queues */ for (pc = 0; pc < pc_count; pc++) { @@ -952,7 +952,7 @@ static s32 fm10k_iov_reset_resources_pf(struct fm10k_hw *hw, return FM10K_ERR_PARAM; /* clear event notification of VF FLR */ - fm10k_write_reg(hw, FM10K_PFVFLREC(vf_idx / 32), 1 << (vf_idx % 32)); + fm10k_write_reg(hw, FM10K_PFVFLREC(vf_idx / 32), BIT(vf_idx % 32)); /* force timeout and then disconnect the mailbox */ vf_info->mbx.timeout = 0; @@ -1370,7 +1370,7 @@ s32 fm10k_iov_msg_lport_state_pf(struct fm10k_hw *hw, u32 **results, mode = fm10k_iov_supported_xcast_mode_pf(vf_info, mode); /* if mode is not currently enabled, enable it */ - if (!(FM10K_VF_FLAG_ENABLED(vf_info) & (1 << mode))) + if (!(FM10K_VF_FLAG_ENABLED(vf_info) & BIT(mode))) fm10k_update_xcast_mode_pf(hw, vf_info->glort, mode); /* swap mode back to a bit flag */ diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c index ab01bb30752f..b999897e50d8 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c @@ -222,7 +222,7 @@ s32 fm10k_tlv_attr_put_value(u32 *msg, u16 attr_id, s64 value, u32 len) attr = &msg[FM10K_TLV_DWORD_LEN(*msg)]; if (len < 4) { - attr[1] = (u32)value & ((0x1ul << (8 * len)) - 1); + attr[1] = (u32)value & (BIT(8 * len) - 1); } else { attr[1] = (u32)value; if (len > 4) @@ -652,29 +652,29 @@ const struct fm10k_tlv_attr fm10k_tlv_msg_test_attr[] = { **/ static void fm10k_tlv_msg_test_generate_data(u32 *msg, u32 attr_flags) { - if (attr_flags & (1 << FM10K_TEST_MSG_STRING)) + if (attr_flags & BIT(FM10K_TEST_MSG_STRING)) fm10k_tlv_attr_put_null_string(msg, FM10K_TEST_MSG_STRING, test_str); - if (attr_flags & (1 << FM10K_TEST_MSG_MAC_ADDR)) + if (attr_flags & BIT(FM10K_TEST_MSG_MAC_ADDR)) fm10k_tlv_attr_put_mac_vlan(msg, FM10K_TEST_MSG_MAC_ADDR, test_mac, test_vlan); - if (attr_flags & (1 << FM10K_TEST_MSG_U8)) + if (attr_flags & BIT(FM10K_TEST_MSG_U8)) fm10k_tlv_attr_put_u8(msg, FM10K_TEST_MSG_U8, test_u8); - if (attr_flags & (1 << FM10K_TEST_MSG_U16)) + if (attr_flags & BIT(FM10K_TEST_MSG_U16)) fm10k_tlv_attr_put_u16(msg, FM10K_TEST_MSG_U16, test_u16); - if (attr_flags & (1 << FM10K_TEST_MSG_U32)) + if (attr_flags & BIT(FM10K_TEST_MSG_U32)) fm10k_tlv_attr_put_u32(msg, FM10K_TEST_MSG_U32, test_u32); - if (attr_flags & (1 << FM10K_TEST_MSG_U64)) + if (attr_flags & BIT(FM10K_TEST_MSG_U64)) fm10k_tlv_attr_put_u64(msg, FM10K_TEST_MSG_U64, test_u64); - if (attr_flags & (1 << FM10K_TEST_MSG_S8)) + if (attr_flags & BIT(FM10K_TEST_MSG_S8)) fm10k_tlv_attr_put_s8(msg, FM10K_TEST_MSG_S8, test_s8); - if (attr_flags & (1 << FM10K_TEST_MSG_S16)) + if (attr_flags & BIT(FM10K_TEST_MSG_S16)) fm10k_tlv_attr_put_s16(msg, FM10K_TEST_MSG_S16, test_s16); - if (attr_flags & (1 << FM10K_TEST_MSG_S32)) + if (attr_flags & BIT(FM10K_TEST_MSG_S32)) fm10k_tlv_attr_put_s32(msg, FM10K_TEST_MSG_S32, test_s32); - if (attr_flags & (1 << FM10K_TEST_MSG_S64)) + if (attr_flags & BIT(FM10K_TEST_MSG_S64)) fm10k_tlv_attr_put_s64(msg, FM10K_TEST_MSG_S64, test_s64); - if (attr_flags & (1 << FM10K_TEST_MSG_LE_STRUCT)) + if (attr_flags & BIT(FM10K_TEST_MSG_LE_STRUCT)) fm10k_tlv_attr_put_le_struct(msg, FM10K_TEST_MSG_LE_STRUCT, test_le, 8); } diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_type.h b/drivers/net/ethernet/intel/fm10k/fm10k_type.h index 854ebb1906bf..5c0533054c5f 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_type.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k_type.h @@ -617,10 +617,10 @@ struct fm10k_vf_info { */ }; -#define FM10K_VF_FLAG_ALLMULTI_CAPABLE ((u8)1 << FM10K_XCAST_MODE_ALLMULTI) -#define FM10K_VF_FLAG_MULTI_CAPABLE ((u8)1 << FM10K_XCAST_MODE_MULTI) -#define FM10K_VF_FLAG_PROMISC_CAPABLE ((u8)1 << FM10K_XCAST_MODE_PROMISC) -#define FM10K_VF_FLAG_NONE_CAPABLE ((u8)1 << FM10K_XCAST_MODE_NONE) +#define FM10K_VF_FLAG_ALLMULTI_CAPABLE (u8)(BIT(FM10K_XCAST_MODE_ALLMULTI)) +#define FM10K_VF_FLAG_MULTI_CAPABLE (u8)(BIT(FM10K_XCAST_MODE_MULTI)) +#define FM10K_VF_FLAG_PROMISC_CAPABLE (u8)(BIT(FM10K_XCAST_MODE_PROMISC)) +#define FM10K_VF_FLAG_NONE_CAPABLE (u8)(BIT(FM10K_XCAST_MODE_NONE)) #define FM10K_VF_FLAG_CAPABLE(vf_info) ((vf_info)->vf_flags & (u8)0xF) #define FM10K_VF_FLAG_ENABLED(vf_info) ((vf_info)->vf_flags >> 4) #define FM10K_VF_FLAG_SET_MODE(mode) ((u8)0x10 << (mode)) From 838e6102920a288a88f5bba10784ab10b2f2eb3e Mon Sep 17 00:00:00 2001 From: Bruce Allan Date: Tue, 22 Dec 2015 14:55:20 -0800 Subject: [PATCH 0270/1649] fm10k: demote BUG_ON() to WARN_ON() where appropriate We don't need to crash the kernel in this instance so just warn about the condition and play on. Signed-off-by: Bruce Allan Tested-by: Krishneil Singh Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/fm10k/fm10k_pci.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c index c9324c79c879..60a70e9730a0 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c @@ -99,7 +99,7 @@ void fm10k_service_event_schedule(struct fm10k_intfc *interface) static void fm10k_service_event_complete(struct fm10k_intfc *interface) { - BUG_ON(!test_bit(__FM10K_SERVICE_SCHED, &interface->state)); + WARN_ON(!test_bit(__FM10K_SERVICE_SCHED, &interface->state)); /* flush memory to make sure state is correct before next watchog */ smp_mb__before_atomic(); From 1905add427cdee1b52380241574ab1339b1df413 Mon Sep 17 00:00:00 2001 From: Bruce Allan Date: Tue, 22 Dec 2015 14:55:26 -0800 Subject: [PATCH 0271/1649] fm10k: cleanup SPACE_BEFORE_TAB checkpatch warning Signed-off-by: Bruce Allan Tested-by: Krishneil Singh Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/fm10k/fm10k_ptp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ptp.c b/drivers/net/ethernet/intel/fm10k/fm10k_ptp.c index b4945e8abe03..1c1ccade6538 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_ptp.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_ptp.c @@ -416,7 +416,7 @@ void fm10k_ptp_register(struct fm10k_intfc *interface) /* This math is simply the inverse of the math in * fm10k_adjust_systime_pf applied to an adjustment value * of 2^30 - 1 which is the maximum value of the register: - * max_ppb == ((2^30 - 1) * 5^9) / 2^31 + * max_ppb == ((2^30 - 1) * 5^9) / 2^31 */ ptp_caps->max_adj = 976562; ptp_caps->adjfreq = fm10k_ptp_adjfreq; From 11c49f79b294081010f7e13a95c6b40c4d36b1de Mon Sep 17 00:00:00 2001 From: Bruce Allan Date: Mon, 28 Dec 2015 18:00:30 -0800 Subject: [PATCH 0272/1649] fm10k: use ether_addr_copy to copy MAC address Cleanup the remaining instances of using memcpy() instead of the preferred ether_addr_copy(). Signed-off-by: Bruce Allan Tested-by: Krishneil Singh Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/fm10k/fm10k_pci.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c index 60a70e9730a0..6190a81b7c32 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c @@ -1776,8 +1776,8 @@ static int fm10k_sw_init(struct fm10k_intfc *interface, netdev->addr_assign_type |= NET_ADDR_RANDOM; } - memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len); - memcpy(netdev->perm_addr, hw->mac.addr, netdev->addr_len); + ether_addr_copy(netdev->dev_addr, hw->mac.addr); + ether_addr_copy(netdev->perm_addr, hw->mac.addr); if (!is_valid_ether_addr(netdev->perm_addr)) { dev_err(&pdev->dev, "Invalid MAC Address\n"); From de66c610a6adade32bf955f67b4f4f4aaeeeff85 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Thu, 4 Feb 2016 10:47:54 -0800 Subject: [PATCH 0273/1649] fm10k: prevent null pointer dereference of msix_entries table According to the C standard dereferencing a variable before it is checked invokes undefined behavior, and thus compilers are free to assume the check for NULL isn't necessary. Prevent this by re-ordering the NULL check of msix_entries in fm10k_free_mbx_irq. Signed-off-by: Jacob Keller Tested-by: Krishneil Singh Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/fm10k/fm10k_pci.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c index 6190a81b7c32..8c23fb3df572 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c @@ -1143,14 +1143,16 @@ static irqreturn_t fm10k_msix_mbx_pf(int __always_unused irq, void *data) void fm10k_mbx_free_irq(struct fm10k_intfc *interface) { - struct msix_entry *entry = &interface->msix_entries[FM10K_MBX_VECTOR]; struct fm10k_hw *hw = &interface->hw; + struct msix_entry *entry; int itr_reg; /* no mailbox IRQ to free if MSI-X is not enabled */ if (!interface->msix_entries) return; + entry = &interface->msix_entries[FM10K_MBX_VECTOR]; + /* disconnect the mailbox */ hw->mbx.ops.disconnect(hw, &hw->mbx); From e72319bba814b115c47785b3b88f7263d0b8a1b8 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Thu, 4 Feb 2016 10:47:55 -0800 Subject: [PATCH 0274/1649] fm10k: don't initialize service task until later in probe Delay initialization of the service timer and service task until late probe. If we don't wait, failures in probe do not properly cleanup the service timer or service task items, which results in the kernel panic below, potentially freezing the whole system. In addition, ensure that the SERVICE_DISABLE bit is set before we request the MBX IRQ since the MBX interrupt attempts to schedule the service task otherwise. This prevents a similar trace from occurring after this change. We didn't notice this issue before because probe almost always completes successfully. I discovered it due to a mis-ordered mailbox handler array, which resulted in the following failure when requesting mailbox interrupt. [ 555.325619] ------------[ cut here ]------------ [ 555.325628] WARNING: CPU: 0 PID: 4941 at lib/list_debug.c:33 __list_add+0xa0/0xd0() [ 555.325631] list_add corruption. prev->next should be next (ffffffff81f46648), but was (null). (prev=ffff8807fad5d0e8). [ 555.325722] CPU: 0 PID: 4941 Comm: insmod Tainted: G OE 4.0.4-303.fc22.x86_64 #1 [ 555.325725] Hardware name: Intel Corporation S2600CO/S2600CO, BIOS SE5C600.86B.02.03.8x23.060520140825 06/05/2014 [ 555.325727] 0000000000000000 00000000b4f161b3 ffff88081a21f8e8 ffffffff81783124 [ 555.325734] 0000000000000000 ffff88081a21f940 ffff88081a21f928 ffffffff8109c66a [ 555.325740] 0000000064000000 ffff8807fad5d0e8 ffff8807fad5d0e8 ffffffff81f46648 [ 555.325746] Call Trace: [ 555.325752] [] dump_stack+0x45/0x57 [ 555.325757] [] warn_slowpath_common+0x8a/0xc0 [ 555.325759] [] warn_slowpath_fmt+0x55/0x70 [ 555.325763] [] __list_add+0xa0/0xd0 [ 555.325768] [] __internal_add_timer+0x9d/0x110 [ 555.325771] [] internal_add_timer+0x2f/0xc0 [ 555.325774] [] mod_timer+0x12a/0x230 [ 555.325782] [] fm10k_probe+0x69a/0xc80 [fm10k] [ 555.325787] [] local_pci_probe+0x45/0xa0 [ 555.325791] [] ? sysfs_do_create_link_sd.isra.2+0x72/0xc0 [ 555.325794] [] pci_device_probe+0xf9/0x150 [ 555.325799] [] driver_probe_device+0xa3/0x400 [ 555.325802] [] __driver_attach+0x9b/0xa0 [ 555.325805] [] ? __device_attach+0x40/0x40 [ 555.325808] [] bus_for_each_dev+0x73/0xc0 [ 555.325811] [] driver_attach+0x1e/0x20 [ 555.325815] [] bus_add_driver+0x180/0x250 [ 555.325819] [] ? 0xffffffffa03b2000 [ 555.325823] [] driver_register+0x64/0xf0 [ 555.325826] [] __pci_register_driver+0x4c/0x50 [ 555.325832] [] fm10k_register_pci_driver+0x23/0x30 [fm10k] [ 555.325838] [] fm10k_init_module+0x80/0x1000 [fm10k] [ 555.325843] [] do_one_initcall+0xb8/0x200 [ 555.325848] [] ? __vunmap+0xa2/0x100 [ 555.325852] [] ? kmem_cache_alloc_trace+0x1b9/0x240 [ 555.325855] [] ? do_init_module+0x28/0x1cb [ 555.325858] [] do_init_module+0x60/0x1cb [ 555.325862] [] load_module+0x205e/0x26b0 [ 555.325866] [] ? store_uevent+0x70/0x70 [ 555.325870] [] ? kernel_read+0x50/0x80 [ 555.325873] [] SyS_finit_module+0xbe/0xf0 [ 555.325878] [] system_call_fastpath+0x12/0x17 [ 555.325880] ---[ end trace 9e0f58d071eafd2a ]--- Signed-off-by: Jacob Keller Tested-by: Krishneil Singh Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/fm10k/fm10k_pci.c | 25 +++++++++++++------- 1 file changed, 16 insertions(+), 9 deletions(-) diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c index 8c23fb3df572..ed1f8cf39508 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c @@ -1795,15 +1795,6 @@ static int fm10k_sw_init(struct fm10k_intfc *interface, /* initialize DCBNL interface */ fm10k_dcbnl_set_ops(netdev); - /* Initialize service timer and service task */ - set_bit(__FM10K_SERVICE_DISABLE, &interface->state); - setup_timer(&interface->service_timer, &fm10k_service_timer, - (unsigned long)interface); - INIT_WORK(&interface->service_task, fm10k_service_task); - - /* kick off service timer now, even when interface is down */ - mod_timer(&interface->service_timer, (HZ * 2) + jiffies); - /* Intitialize timestamp data */ fm10k_ts_init(interface); @@ -1989,6 +1980,12 @@ static int fm10k_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (err) goto err_sw_init; + /* the mbx interrupt might attempt to schedule the service task, so we + * must ensure it is disabled since we haven't yet requested the timer + * or work item. + */ + set_bit(__FM10K_SERVICE_DISABLE, &interface->state); + err = fm10k_mbx_request_irq(interface); if (err) goto err_mbx_interrupt; @@ -2008,6 +2005,16 @@ static int fm10k_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* stop all the transmit queues from transmitting until link is up */ netif_tx_stop_all_queues(netdev); + /* Initialize service timer and service task late in order to avoid + * cleanup issues. + */ + setup_timer(&interface->service_timer, &fm10k_service_timer, + (unsigned long)interface); + INIT_WORK(&interface->service_task, fm10k_service_task); + + /* kick off service timer now, even when interface is down */ + mod_timer(&interface->service_timer, (HZ * 2) + jiffies); + /* Register PTP interface */ fm10k_ptp_register(interface); From b3525696adba1ecddff3d667680461cc533e63a4 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Thu, 4 Feb 2016 10:47:56 -0800 Subject: [PATCH 0275/1649] fm10k: base queue scheme covered by RSS In fm10k_set_num_queues, we previously assigned the base template. This would always be overwritten by either fm10k_set_qos_queues or fm10k_set_rss_queues. In either case, we don't need the base values, so we can just remove them. Signed-off-by: Jacob Keller Tested-by: Krishneil Singh Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/fm10k/fm10k_main.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c index db4353ba0932..b87401c38571 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c @@ -1572,13 +1572,11 @@ static bool fm10k_set_rss_queues(struct fm10k_intfc *interface) **/ static void fm10k_set_num_queues(struct fm10k_intfc *interface) { - /* Start with base case */ - interface->num_rx_queues = 1; - interface->num_tx_queues = 1; - + /* Attempt to setup QoS and RSS first */ if (fm10k_set_qos_queues(interface)) return; + /* If we don't have QoS, just fallback to only RSS. */ fm10k_set_rss_queues(interface); } From 61e0217e83353cf895f8b2d0a187804171d119ca Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Thu, 4 Feb 2016 10:47:57 -0800 Subject: [PATCH 0276/1649] fm10k: print error message when stop_hw fails fm10k_stop_hw_generic calls fm10k_disable_queues_generic, which may return an error code indicating that the queues were not stopped within the time limit. Notify the user by displaying a message in the kernel message ring, in a similar way to how we notify the user when reset_hw fails. There isn't much we can do to recover from this error, so currently nothing else is done. Signed-off-by: Jacob Keller Tested-by: Krishneil Singh Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/fm10k/fm10k_pci.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c index ed1f8cf39508..3c7c819ac8d9 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c @@ -1656,6 +1656,7 @@ void fm10k_down(struct fm10k_intfc *interface) { struct net_device *netdev = interface->netdev; struct fm10k_hw *hw = &interface->hw; + int err; /* signal that we are down to the interrupt handler and service task */ set_bit(__FM10K_DOWN, &interface->state); @@ -1680,7 +1681,9 @@ void fm10k_down(struct fm10k_intfc *interface) fm10k_update_stats(interface); /* Disable DMA engine for Tx/Rx */ - hw->mac.ops.stop_hw(hw); + err = hw->mac.ops.stop_hw(hw); + if (err) + dev_err(&interface->pdev->dev, "stop_hw failed: %d\n", err); /* free any buffers still on the rings */ fm10k_clean_all_tx_rings(interface); From c8ed563bebeabbf0b1085b52916dd2fb6e219276 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Thu, 4 Feb 2016 10:47:58 -0800 Subject: [PATCH 0277/1649] fm10k: free MBX IRQ before clearing interrupt scheme During fm10k_io_error_detected we were clearing the interrupt scheme before we freed the MBX IRQ. This causes a kernel panic because the MBX IRQ are assigned after MSI-X initialization. Clearing the interrupt scheme results in removing the MSI-X entry table. Fix this by freeing the MBX IRQ before we clear the interrupt scheme, as we do elsewhere in the driver. Signed-off-by: Jacob Keller Tested-by: Krishneil Singh Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/fm10k/fm10k_pci.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c index 3c7c819ac8d9..da38af052519 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c @@ -2274,11 +2274,11 @@ static pci_ers_result_t fm10k_io_error_detected(struct pci_dev *pdev, if (netif_running(netdev)) fm10k_close(netdev); + fm10k_mbx_free_irq(interface); + /* free interrupts */ fm10k_clear_queueing_scheme(interface); - fm10k_mbx_free_irq(interface); - pci_disable_device(pdev); /* Request a slot reset. */ From d2e0721b18f320232dc36a0e4cc7beb620e8c9bd Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Fri, 5 Feb 2016 10:43:08 -0800 Subject: [PATCH 0278/1649] fm10k: add helper functions to set strings and data for ethtool stats Reduce duplicate code and the amount of indentation by adding fm10k_add_stat_strings and fm10k_add_ethtool_stats functions which help add fm10k_stat structures to the ethtool stats callbacks. This helps increase ease of use for future stat additions, and increases code readability. Skip handling of the per-queue stats as these will be reworked in a following patch. Signed-off-by: Jacob Keller Tested-by: Krishneil Singh Signed-off-by: Jeff Kirsher --- .../net/ethernet/intel/fm10k/fm10k_ethtool.c | 164 +++++++++--------- 1 file changed, 83 insertions(+), 81 deletions(-) diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c index 28837ae099df..c67121cc7b23 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c @@ -153,57 +153,51 @@ static const char fm10k_prv_flags[FM10K_PRV_FLAG_LEN][ETH_GSTRING_LEN] = { "debug-statistics", }; +static void fm10k_add_stat_strings(char **p, const char *prefix, + const struct fm10k_stats stats[], + const unsigned int size) +{ + unsigned int i; + + for (i = 0; i < size; i++) { + snprintf(*p, ETH_GSTRING_LEN, "%s%s", + prefix, stats[i].stat_string); + *p += ETH_GSTRING_LEN; + } +} + static void fm10k_get_stat_strings(struct net_device *dev, u8 *data) { struct fm10k_intfc *interface = netdev_priv(dev); struct fm10k_iov_data *iov_data = interface->iov_data; char *p = (char *)data; unsigned int i; - unsigned int j; - for (i = 0; i < FM10K_NETDEV_STATS_LEN; i++) { - memcpy(p, fm10k_gstrings_net_stats[i].stat_string, - ETH_GSTRING_LEN); - p += ETH_GSTRING_LEN; - } + fm10k_add_stat_strings(&p, "", fm10k_gstrings_net_stats, + FM10K_NETDEV_STATS_LEN); - for (i = 0; i < FM10K_GLOBAL_STATS_LEN; i++) { - memcpy(p, fm10k_gstrings_global_stats[i].stat_string, - ETH_GSTRING_LEN); - p += ETH_GSTRING_LEN; - } + fm10k_add_stat_strings(&p, "", fm10k_gstrings_global_stats, + FM10K_GLOBAL_STATS_LEN); - if (interface->flags & FM10K_FLAG_DEBUG_STATS) { - for (i = 0; i < FM10K_DEBUG_STATS_LEN; i++) { - memcpy(p, fm10k_gstrings_debug_stats[i].stat_string, - ETH_GSTRING_LEN); - p += ETH_GSTRING_LEN; - } - } + if (interface->flags & FM10K_FLAG_DEBUG_STATS) + fm10k_add_stat_strings(&p, "", fm10k_gstrings_debug_stats, + FM10K_DEBUG_STATS_LEN); - for (i = 0; i < FM10K_MBX_STATS_LEN; i++) { - memcpy(p, fm10k_gstrings_mbx_stats[i].stat_string, - ETH_GSTRING_LEN); - p += ETH_GSTRING_LEN; - } + fm10k_add_stat_strings(&p, "", fm10k_gstrings_mbx_stats, + FM10K_MBX_STATS_LEN); - if (interface->hw.mac.type != fm10k_mac_vf) { - for (i = 0; i < FM10K_PF_STATS_LEN; i++) { - memcpy(p, fm10k_gstrings_pf_stats[i].stat_string, - ETH_GSTRING_LEN); - p += ETH_GSTRING_LEN; - } - } + if (interface->hw.mac.type != fm10k_mac_vf) + fm10k_add_stat_strings(&p, "", fm10k_gstrings_pf_stats, + FM10K_PF_STATS_LEN); if ((interface->flags & FM10K_FLAG_DEBUG_STATS) && iov_data) { for (i = 0; i < iov_data->num_vfs; i++) { - for (j = 0; j < FM10K_MBX_STATS_LEN; j++) { - snprintf(p, - ETH_GSTRING_LEN, - "vf_%u_%s", i, - fm10k_gstrings_mbx_stats[j].stat_string); - p += ETH_GSTRING_LEN; - } + char prefix[ETH_GSTRING_LEN]; + + snprintf(prefix, ETH_GSTRING_LEN, "vf_%u_", i); + fm10k_add_stat_strings(&p, prefix, + fm10k_gstrings_mbx_stats, + FM10K_MBX_STATS_LEN); } } @@ -271,6 +265,41 @@ static int fm10k_get_sset_count(struct net_device *dev, int sset) } } +static void fm10k_add_ethtool_stats(u64 **data, void *pointer, + const struct fm10k_stats stats[], + const unsigned int size) +{ + unsigned int i; + char *p; + + /* simply skip forward if we were not given a valid pointer */ + if (!pointer) { + *data += size; + return; + } + + for (i = 0; i < size; i++) { + p = (char *)pointer + stats[i].stat_offset; + + switch (stats[i].sizeof_stat) { + case sizeof(u64): + *((*data)++) = *(u64 *)p; + break; + case sizeof(u32): + *((*data)++) = *(u32 *)p; + break; + case sizeof(u16): + *((*data)++) = *(u16 *)p; + break; + case sizeof(u8): + *((*data)++) = *(u8 *)p; + break; + default: + *((*data)++) = 0; + } + } +} + static void fm10k_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats __always_unused *stats, u64 *data) @@ -279,47 +308,29 @@ static void fm10k_get_ethtool_stats(struct net_device *netdev, struct fm10k_intfc *interface = netdev_priv(netdev); struct fm10k_iov_data *iov_data = interface->iov_data; struct net_device_stats *net_stats = &netdev->stats; - char *p; int i, j; fm10k_update_stats(interface); - for (i = 0; i < FM10K_NETDEV_STATS_LEN; i++) { - p = (char *)net_stats + fm10k_gstrings_net_stats[i].stat_offset; - *(data++) = (fm10k_gstrings_net_stats[i].sizeof_stat == - sizeof(u64)) ? *(u64 *)p : *(u32 *)p; - } + fm10k_add_ethtool_stats(&data, net_stats, fm10k_gstrings_net_stats, + FM10K_NETDEV_STATS_LEN); - for (i = 0; i < FM10K_GLOBAL_STATS_LEN; i++) { - p = (char *)interface + - fm10k_gstrings_global_stats[i].stat_offset; - *(data++) = (fm10k_gstrings_global_stats[i].sizeof_stat == - sizeof(u64)) ? *(u64 *)p : *(u32 *)p; - } + fm10k_add_ethtool_stats(&data, interface, fm10k_gstrings_global_stats, + FM10K_GLOBAL_STATS_LEN); - if (interface->flags & FM10K_FLAG_DEBUG_STATS) { - for (i = 0; i < FM10K_DEBUG_STATS_LEN; i++) { - p = (char *)interface + - fm10k_gstrings_debug_stats[i].stat_offset; - *(data++) = (fm10k_gstrings_debug_stats[i].sizeof_stat == - sizeof(u64)) ? *(u64 *)p : *(u32 *)p; - } - } + if (interface->flags & FM10K_FLAG_DEBUG_STATS) + fm10k_add_ethtool_stats(&data, interface, + fm10k_gstrings_debug_stats, + FM10K_DEBUG_STATS_LEN); - for (i = 0; i < FM10K_MBX_STATS_LEN; i++) { - p = (char *)&interface->hw.mbx + - fm10k_gstrings_mbx_stats[i].stat_offset; - *(data++) = (fm10k_gstrings_mbx_stats[i].sizeof_stat == - sizeof(u64)) ? *(u64 *)p : *(u32 *)p; - } + fm10k_add_ethtool_stats(&data, &interface->hw.mbx, + fm10k_gstrings_mbx_stats, + FM10K_MBX_STATS_LEN); if (interface->hw.mac.type != fm10k_mac_vf) { - for (i = 0; i < FM10K_PF_STATS_LEN; i++) { - p = (char *)interface + - fm10k_gstrings_pf_stats[i].stat_offset; - *(data++) = (fm10k_gstrings_pf_stats[i].sizeof_stat == - sizeof(u64)) ? *(u64 *)p : *(u32 *)p; - } + fm10k_add_ethtool_stats(&data, interface, + fm10k_gstrings_pf_stats, + FM10K_PF_STATS_LEN); } if ((interface->flags & FM10K_FLAG_DEBUG_STATS) && iov_data) { @@ -328,18 +339,9 @@ static void fm10k_get_ethtool_stats(struct net_device *netdev, vf_info = &iov_data->vf_info[i]; - /* skip stats if we don't have a vf info */ - if (!vf_info) { - data += FM10K_MBX_STATS_LEN; - continue; - } - - for (j = 0; j < FM10K_MBX_STATS_LEN; j++) { - p = (char *)&vf_info->mbx + - fm10k_gstrings_mbx_stats[j].stat_offset; - *(data++) = (fm10k_gstrings_mbx_stats[j].sizeof_stat == - sizeof(u64)) ? *(u64 *)p : *(u32 *)p; - } + fm10k_add_ethtool_stats(&data, &vf_info->mbx, + fm10k_gstrings_mbx_stats, + FM10K_MBX_STATS_LEN); } } From c4114e3db6429c665adc3db871685c474a467efe Mon Sep 17 00:00:00 2001 From: Bruce Allan Date: Wed, 10 Feb 2016 14:45:47 -0800 Subject: [PATCH 0279/1649] fm10k: prevent possibly uninitialized variable If 'attr_flag < (1 << (2 * FM10K_TEST_MSG_NESTED))' is ever false, err will be used uninitialized. Signed-off-by: Bruce Allan Tested-by: Krishneil Singh Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c index c67121cc7b23..2e4ea8861852 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c @@ -937,7 +937,7 @@ static int fm10k_mbx_test(struct fm10k_intfc *interface, u64 *data) struct fm10k_mbx_info *mbx = &hw->mbx; u32 attr_flag, test_msg[6]; unsigned long timeout; - int err; + int err = -EINVAL; /* For now this is a VF only feature */ if (hw->mac.type != fm10k_mac_vf) From 4be37c42a40cec94b0381b42e5796d3316f96c32 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Wed, 10 Feb 2016 14:45:50 -0800 Subject: [PATCH 0280/1649] fm10k: correctly clean up when init_queueing_scheme fails Fix a kernel panic that occurs during surprise removal. Clear the interface queue counts upon fm10k_init_msix_capability failure. This prevents further code (fm10k_update_stats etc.) from attempting to access unallocated queue vector or ring memory. [ 628.692648] BUG: unable to handle kernel NULL pointer dereference at 0000000000000068 [ 628.692805] IP: [] fm10k_update_stats+0x7f/0x2c0 [fm10k] [ 628.693173] PGD 0 [ 628.693759] Oops: 0000 [#1] SMP [ 628.699321] CPU: 10 PID: 8164 Comm: kworker/10:0 Tainted: G OE ------------ 3.10.0-327.el7.x86_64 #1 [ 628.700096] Hardware name: Supermicro X9DAi/X9DAi, BIOS 3.2 05/09/2015 [ 628.700894] Workqueue: pciehp-1 pciehp_power_thread [ 628.701686] task: ffff88086559c500 ti: ffff8808593c0000 task.ti: ffff8808593c0000 [ 628.702493] RIP: 0010:[] [] fm10k_update_stats+0x7f/0x2c0 [fm10k] [ 628.703310] RSP: 0018:ffff8808593c3b00 EFLAGS: 00010282 [ 628.704132] RAX: 0000000000000000 RBX: ffff880860760000 RCX: 0000000000000000 [ 628.704963] RDX: ffff880860760b08 RSI: 0000000000000000 RDI: 0000000000000000 [ 628.705794] RBP: ffff8808593c3b40 R08: 0000000000000000 R09: 0000000000000000 [ 628.706604] R10: 0000000000000000 R11: ffff880860760c40 R12: 0000000000000080 [ 628.707420] R13: ffff8808607608c0 R14: ffff880860779ec0 R15: ffff880860779f40 [ 628.708238] FS: 0000000000000000(0000) GS:ffff88086f000000(0000) knlGS:0000000000000000 [ 628.709071] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 628.709923] CR2: 0000000000000068 CR3: 000000000194a000 CR4: 00000000001407e0 [ 628.710752] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 [ 628.711596] DR3: 0000000000000000 DR6: 00000000ffff0ff0 DR7: 0000000000000400 [ 628.712438] Stack: [ 628.713255] ffff880860764458 ffff8808607608c0 ffff880860760000 ffff880860760000 [ 628.714088] 0000000000000080 ffff8808607608c0 ffff880860779ec0 ffff880860779f40 [ 628.714925] ffff8808593c3b88 ffffffffa04780c5 ffff880860764458 0000000a8163cb5b [ 628.715752] Call Trace: [ 628.716560] [] fm10k_down+0x155/0x1f0 [fm10k] [ 628.717367] [] fm10k_close+0x28/0xd0 [fm10k] [ 628.718184] [] __dev_close_many+0x85/0xd0 [ 628.718986] [] dev_close_many+0x98/0x120 [ 628.719764] [] rollback_registered_many+0xa8/0x230 [ 628.720527] [] rollback_registered+0x40/0x70 [ 628.721294] [] unregister_netdevice_queue+0x48/0x80 [ 628.722052] [] unregister_netdev+0x1c/0x30 [ 628.722816] [] fm10k_remove+0xd8/0xe0 [fm10k] [ 628.723581] [] pci_device_remove+0x3b/0xb0 [ 628.724340] [] __device_release_driver+0x7f/0xf0 [ 628.725088] [] device_release_driver+0x23/0x30 [ 628.725814] [] pci_stop_bus_device+0x94/0xa0 [ 628.726535] [] pci_stop_and_remove_bus_device+0x12/0x20 [ 628.727249] [] pciehp_unconfigure_device+0xb0/0x1b0 [ 628.727964] [] pciehp_disable_slot+0x52/0xd0 [ 628.728664] [] pciehp_power_thread+0xea/0x150 [ 628.729358] [] process_one_work+0x17b/0x470 [ 628.730036] [] worker_thread+0x11b/0x400 [ 628.730730] [] ? rescuer_thread+0x400/0x400 [ 628.731385] [] kthread+0xcf/0xe0 [ 628.732036] [] ? kthread_create_on_node+0x140/0x140 [ 628.732674] [] ret_from_fork+0x58/0x90 [ 628.733289] [] ? kthread_create_on_node+0x140/0x140 [ 628.733883] Code: 83 e8 01 48 8d 97 40 02 00 00 45 31 c0 4c 8d 9c c7 48 02 0 [ 628.735202] RIP [] fm10k_update_stats+0x7f/0x2c0 [fm10k] [ 628.735732] RSP [ 628.736285] CR2: 0000000000000068 [ 628.736846] ---[ end trace 9156088b311aff42 ]--- Signed-off-by: Jacob Keller Tested-by: Krishneil Singh Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/fm10k/fm10k_main.c | 35 ++++++++++++++----- 1 file changed, 26 insertions(+), 9 deletions(-) diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c index b87401c38571..31179afb8468 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c @@ -1580,6 +1580,20 @@ static void fm10k_set_num_queues(struct fm10k_intfc *interface) fm10k_set_rss_queues(interface); } +/** + * fm10k_reset_num_queues - Reset the number of queues to zero + * @interface: board private structure + * + * This function should be called whenever we need to reset the number of + * queues after an error condition. + */ +static void fm10k_reset_num_queues(struct fm10k_intfc *interface) +{ + interface->num_tx_queues = 0; + interface->num_rx_queues = 0; + interface->num_q_vectors = 0; +} + /** * fm10k_alloc_q_vector - Allocate memory for a single interrupt vector * @interface: board private structure to initialize @@ -1763,9 +1777,7 @@ static int fm10k_alloc_q_vectors(struct fm10k_intfc *interface) return 0; err_out: - interface->num_tx_queues = 0; - interface->num_rx_queues = 0; - interface->num_q_vectors = 0; + fm10k_reset_num_queues(interface); while (v_idx--) fm10k_free_q_vector(interface, v_idx); @@ -1785,9 +1797,7 @@ static void fm10k_free_q_vectors(struct fm10k_intfc *interface) { int v_idx = interface->num_q_vectors; - interface->num_tx_queues = 0; - interface->num_rx_queues = 0; - interface->num_q_vectors = 0; + fm10k_reset_num_queues(interface); while (v_idx--) fm10k_free_q_vector(interface, v_idx); @@ -1995,14 +2005,15 @@ int fm10k_init_queueing_scheme(struct fm10k_intfc *interface) if (err) { dev_err(&interface->pdev->dev, "Unable to initialize MSI-X capability\n"); - return err; + goto err_init_msix; } /* Allocate memory for queues */ err = fm10k_alloc_q_vectors(interface); if (err) { - fm10k_reset_msix_capability(interface); - return err; + dev_err(&interface->pdev->dev, + "Unable to allocate queue vectors\n"); + goto err_alloc_q_vectors; } /* Map rings to devices, and map devices to physical queues */ @@ -2012,6 +2023,12 @@ int fm10k_init_queueing_scheme(struct fm10k_intfc *interface) fm10k_init_reta(interface); return 0; + +err_alloc_q_vectors: + fm10k_reset_msix_capability(interface); +err_init_msix: + fm10k_reset_num_queues(interface); + return err; } /** From d8ec92f2cdcc7f2d06dd0a40b600b6da7d9d1070 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Wed, 10 Feb 2016 14:45:51 -0800 Subject: [PATCH 0281/1649] fm10k: fix a minor typo in some comments s/funciton/function to resolve a typo, and cleanup grammar on a few comments regarding processing the VF mailboxes. Signed-off-by: Jacob Keller Tested-by: Krishneil Singh Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/fm10k/fm10k_iov.c | 4 ++-- drivers/net/ethernet/intel/fm10k/fm10k_netdev.c | 4 ++-- drivers/net/ethernet/intel/fm10k/fm10k_pci.c | 6 +++--- drivers/net/ethernet/intel/fm10k/fm10k_pf.c | 2 +- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c index acfb8b1f88a7..bbf7c4bac303 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c @@ -50,7 +50,7 @@ s32 fm10k_iov_event(struct fm10k_intfc *interface) s64 vflre; int i; - /* if there is no iov_data then there is no mailboxes to process */ + /* if there is no iov_data then there is no mailbox to process */ if (!ACCESS_ONCE(interface->iov_data)) return 0; @@ -98,7 +98,7 @@ s32 fm10k_iov_mbx(struct fm10k_intfc *interface) struct fm10k_iov_data *iov_data; int i; - /* if there is no iov_data then there is no mailboxes to process */ + /* if there is no iov_data then there is no mailbox to process */ if (!ACCESS_ONCE(interface->iov_data)) return 0; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c index 0ff68747c6c4..1d0f0583222c 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c @@ -440,7 +440,7 @@ static void fm10k_restore_vxlan_port(struct fm10k_intfc *interface) * @sa_family: Address family of new port * @port: port number used for VXLAN * - * This funciton is called when a new VXLAN interface has added a new port + * This function is called when a new VXLAN interface has added a new port * number to the range that is currently in use for VXLAN. The new port * number is always added to the tail so that the port number list should * match the order in which the ports were allocated. The head of the list @@ -484,7 +484,7 @@ insert_tail: * @sa_family: Address family of freed port * @port: port number used for VXLAN * - * This funciton is called when a new VXLAN interface has freed a port + * This function is called when a new VXLAN interface has freed a port * number from the range that is currently in use for VXLAN. The freed * port is removed from the list and the new head is used to determine * the port number for offloads. diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c index da38af052519..f0992950e228 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c @@ -1379,7 +1379,7 @@ static s32 fm10k_1588_msg_pf(struct fm10k_hw *hw, u32 **results, return 0; } - /* if there is no iov_data then there is no mailboxes to process */ + /* if there is no iov_data then there is no mailbox to process */ if (!ACCESS_ONCE(interface->iov_data)) return FM10K_ERR_PARAM; @@ -2394,7 +2394,7 @@ static struct pci_driver fm10k_driver = { /** * fm10k_register_pci_driver - register driver interface * - * This funciton is called on module load in order to register the driver. + * This function is called on module load in order to register the driver. **/ int fm10k_register_pci_driver(void) { @@ -2404,7 +2404,7 @@ int fm10k_register_pci_driver(void) /** * fm10k_unregister_pci_driver - unregister driver interface * - * This funciton is called on module unload in order to remove the driver. + * This function is called on module unload in order to remove the driver. **/ void fm10k_unregister_pci_driver(void) { diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c index 23de956d1acc..ecc99f9d2cce 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c @@ -1604,7 +1604,7 @@ static s32 fm10k_request_lport_map_pf(struct fm10k_hw *hw) * @hw: pointer to hardware structure * @switch_ready: pointer to boolean value that will record switch state * - * This funciton will check the DMA_CTRL2 register and mailbox in order + * This function will check the DMA_CTRL2 register and mailbox in order * to determine if the switch is ready for the PF to begin requesting * addresses and mapping traffic to the local interface. **/ From 0ea7fae44094b4ca06ea68105457a7dc64041bd3 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Tue, 16 Feb 2016 16:19:24 -0800 Subject: [PATCH 0282/1649] fm10k: use ethtool_rxfh_indir_default for default redirection table The fm10k driver used its own code for generating a default indirection table on device load, which was not the same as the default generated by ethtool when indir_size of 0 is passed to SRXFH. Take advantage of ethtool_rxfh_indir_default() and simplify code to write the redirection table to reduce some code duplication. Signed-off-by: Jacob Keller Tested-by: Krishneil Singh Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/fm10k/fm10k.h | 2 + .../net/ethernet/intel/fm10k/fm10k_ethtool.c | 37 +++++++++++-------- drivers/net/ethernet/intel/fm10k/fm10k_main.c | 24 +++++------- 3 files changed, 34 insertions(+), 29 deletions(-) diff --git a/drivers/net/ethernet/intel/fm10k/fm10k.h b/drivers/net/ethernet/intel/fm10k/fm10k.h index 83f386714e87..9c7fafef7cf6 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k.h @@ -510,6 +510,8 @@ int fm10k_close(struct net_device *netdev); /* Ethtool */ void fm10k_set_ethtool_ops(struct net_device *dev); +u32 fm10k_get_reta_size(struct net_device *netdev); +void fm10k_write_reta(struct fm10k_intfc *interface, const u32 *indir); /* IOV */ s32 fm10k_iov_event(struct fm10k_intfc *interface); diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c index 2e4ea8861852..a23748777b1b 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c @@ -1027,11 +1027,31 @@ static int fm10k_set_priv_flags(struct net_device *netdev, u32 priv_flags) return 0; } -static u32 fm10k_get_reta_size(struct net_device __always_unused *netdev) +u32 fm10k_get_reta_size(struct net_device __always_unused *netdev) { return FM10K_RETA_SIZE * FM10K_RETA_ENTRIES_PER_REG; } +void fm10k_write_reta(struct fm10k_intfc *interface, const u32 *indir) +{ + struct fm10k_hw *hw = &interface->hw; + int i; + + /* record entries to reta table */ + for (i = 0; i < FM10K_RETA_SIZE; i++, indir += 4) { + u32 reta = indir[0] | + (indir[1] << 8) | + (indir[2] << 16) | + (indir[3] << 24); + + if (interface->reta[i] == reta) + continue; + + interface->reta[i] = reta; + fm10k_write_reg(hw, FM10K_RETA(0, i), reta); + } +} + static int fm10k_get_reta(struct net_device *netdev, u32 *indir) { struct fm10k_intfc *interface = netdev_priv(netdev); @@ -1055,7 +1075,6 @@ static int fm10k_get_reta(struct net_device *netdev, u32 *indir) static int fm10k_set_reta(struct net_device *netdev, const u32 *indir) { struct fm10k_intfc *interface = netdev_priv(netdev); - struct fm10k_hw *hw = &interface->hw; int i; u16 rss_i; @@ -1070,19 +1089,7 @@ static int fm10k_set_reta(struct net_device *netdev, const u32 *indir) return -EINVAL; } - /* record entries to reta table */ - for (i = 0; i < FM10K_RETA_SIZE; i++, indir += 4) { - u32 reta = indir[0] | - (indir[1] << 8) | - (indir[2] << 16) | - (indir[3] << 24); - - if (interface->reta[i] == reta) - continue; - - interface->reta[i] = reta; - fm10k_write_reg(hw, FM10K_RETA(0, i), reta); - } + fm10k_write_reta(interface, indir); return 0; } diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c index 31179afb8468..0b465394f88a 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c @@ -1943,7 +1943,8 @@ static void fm10k_assign_rings(struct fm10k_intfc *interface) static void fm10k_init_reta(struct fm10k_intfc *interface) { u16 i, rss_i = interface->ring_feature[RING_F_RSS].indices; - u32 reta, base; + struct net_device *netdev = interface->netdev; + u32 reta, *indir; /* If the Rx flow indirection table has been configured manually, we * need to maintain it when possible. @@ -1968,21 +1969,16 @@ static void fm10k_init_reta(struct fm10k_intfc *interface) } repopulate_reta: - /* Populate the redirection table 4 entries at a time. To do this - * we are generating the results for n and n+2 and then interleaving - * those with the results with n+1 and n+3. - */ - for (i = FM10K_RETA_SIZE; i--;) { - /* first pass generates n and n+2 */ - base = ((i * 0x00040004) + 0x00020000) * rss_i; - reta = (base & 0x3F803F80) >> 7; + indir = kcalloc(fm10k_get_reta_size(netdev), + sizeof(indir[0]), GFP_KERNEL); - /* second pass generates n+1 and n+3 */ - base += 0x00010001 * rss_i; - reta |= (base & 0x3F803F80) << 1; + /* generate redirection table using the default kernel policy */ + for (i = 0; i < fm10k_get_reta_size(netdev); i++) + indir[i] = ethtool_rxfh_indir_default(i, rss_i); - interface->reta[i] = reta; - } + fm10k_write_reta(interface, indir); + + kfree(indir); } /** From 11f15ed394782dd018d60a0bb550616a8571b43c Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Tue, 5 Apr 2016 14:08:55 -0400 Subject: [PATCH 0283/1649] bnxt_en: Update to Firmware 1.2.2 spec. Use new field names in API structs and stop using deprecated fields auto_link_speed and auto_duplex in phy_cfg/phy_qcfg structs. Update copyright year to 2016. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 20 +- drivers/net/ethernet/broadcom/bnxt/bnxt.h | 8 +- .../net/ethernet/broadcom/bnxt/bnxt_ethtool.c | 4 +- .../net/ethernet/broadcom/bnxt/bnxt_ethtool.h | 2 +- .../net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h | 2 +- drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h | 433 ++++++++++++++++-- .../ethernet/broadcom/bnxt/bnxt_nvm_defs.h | 2 +- .../net/ethernet/broadcom/bnxt/bnxt_sriov.c | 14 +- .../net/ethernet/broadcom/bnxt/bnxt_sriov.h | 2 +- 9 files changed, 431 insertions(+), 56 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 12a009d720cd..bfe98cbcefca 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -1,6 +1,6 @@ /* Broadcom NetXtreme-C/E network driver. * - * Copyright (c) 2014-2015 Broadcom Corporation + * Copyright (c) 2014-2016 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -2763,7 +2763,7 @@ static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp) * only checks if it is non-zero to enable async event forwarding */ req.async_event_fwd[0] |= cpu_to_le32(1); - req.os_type = cpu_to_le16(1); + req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX); req.ver_maj = DRV_VER_MAJ; req.ver_min = DRV_VER_MIN; req.ver_upd = DRV_VER_UPD; @@ -3726,7 +3726,7 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp) pf->fw_fid = le16_to_cpu(resp->fid); pf->port_id = le16_to_cpu(resp->port_id); - memcpy(pf->mac_addr, resp->perm_mac_address, ETH_ALEN); + memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN); memcpy(bp->dev->dev_addr, pf->mac_addr, ETH_ALEN); pf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); pf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings); @@ -3751,7 +3751,7 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp) struct bnxt_vf_info *vf = &bp->vf; vf->fw_fid = le16_to_cpu(resp->fid); - memcpy(vf->mac_addr, resp->perm_mac_address, ETH_ALEN); + memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN); if (is_valid_ether_addr(vf->mac_addr)) /* overwrite netdev dev_adr with admin VF MAC */ memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN); @@ -3842,6 +3842,8 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp) memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output)); + bp->hwrm_spec_code = resp->hwrm_intf_maj << 16 | + resp->hwrm_intf_min << 8 | resp->hwrm_intf_upd; if (resp->hwrm_intf_maj < 1) { netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n", resp->hwrm_intf_maj, resp->hwrm_intf_min, @@ -4523,7 +4525,6 @@ static int bnxt_update_link(struct bnxt *bp, bool chng_link_state) else link_info->link_speed = 0; link_info->force_link_speed = le16_to_cpu(resp->force_link_speed); - link_info->auto_link_speed = le16_to_cpu(resp->auto_link_speed); link_info->support_speeds = le16_to_cpu(resp->support_speeds); link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask); link_info->lp_auto_link_speeds = @@ -4533,8 +4534,8 @@ static int bnxt_update_link(struct bnxt *bp, bool chng_link_state) link_info->phy_ver[1] = resp->phy_min; link_info->phy_ver[2] = resp->phy_bld; link_info->media_type = resp->media_type; - link_info->transceiver = resp->transceiver_type; - link_info->phy_addr = resp->phy_addr; + link_info->transceiver = resp->xcvr_pkg_type; + link_info->phy_addr = resp->eee_config_phy_addr; /* TODO: need to add more logic to report VF link */ if (chng_link_state) { @@ -4581,7 +4582,7 @@ static void bnxt_hwrm_set_link_common(struct bnxt *bp, if (autoneg & BNXT_AUTONEG_SPEED) { req->auto_mode |= - PORT_PHY_CFG_REQ_AUTO_MODE_MASK; + PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK; req->enables |= cpu_to_le32( PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK); @@ -4595,9 +4596,6 @@ static void bnxt_hwrm_set_link_common(struct bnxt *bp, req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE); } - /* currently don't support half duplex */ - req->auto_duplex = PORT_PHY_CFG_REQ_AUTO_DUPLEX_FULL; - req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX); /* tell chimp that the setting takes effect immediately */ req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY); } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 709b95b8fcba..e98c37ae81f2 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -1,6 +1,6 @@ /* Broadcom NetXtreme-C/E network driver. * - * Copyright (c) 2014-2015 Broadcom Corporation + * Copyright (c) 2014-2016 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -11,7 +11,7 @@ #define BNXT_H #define DRV_MODULE_NAME "bnxt_en" -#define DRV_MODULE_VERSION "1.0.0" +#define DRV_MODULE_VERSION "1.2.0" #define DRV_VER_MAJ 1 #define DRV_VER_MIN 0 @@ -788,7 +788,7 @@ struct bnxt_link_info { #define BNXT_LINK_AUTO_ALLSPDS PORT_PHY_QCFG_RESP_AUTO_MODE_ALL_SPEEDS #define BNXT_LINK_AUTO_ONESPD PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_SPEED #define BNXT_LINK_AUTO_ONEORBELOW PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_OR_BELOW -#define BNXT_LINK_AUTO_MSK PORT_PHY_QCFG_RESP_AUTO_MODE_MASK +#define BNXT_LINK_AUTO_MSK PORT_PHY_QCFG_RESP_AUTO_MODE_SPEED_MASK #define PHY_VER_LEN 3 u8 phy_ver[PHY_VER_LEN]; u16 link_speed; @@ -813,7 +813,6 @@ struct bnxt_link_info { #define BNXT_LINK_SPEED_MSK_40GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_40GB #define BNXT_LINK_SPEED_MSK_50GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_50GB u16 lp_auto_link_speeds; - u16 auto_link_speed; u16 force_link_speed; u32 preemphasis; @@ -940,6 +939,7 @@ struct bnxt { u32 msg_enable; + u32 hwrm_spec_code; u16 hwrm_cmd_seq; u32 hwrm_intr_seq_id; void *hwrm_cmd_resp_addr; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 2e472f6dbf2d..f103f9b06e6d 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -1,6 +1,6 @@ /* Broadcom NetXtreme-C/E network driver. * - * Copyright (c) 2014-2015 Broadcom Corporation + * Copyright (c) 2014-2016 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -728,7 +728,7 @@ static int bnxt_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) ethtool_speed = bnxt_fw_to_ethtool_speed(link_info->link_speed); ethtool_cmd_speed_set(cmd, ethtool_speed); if (link_info->transceiver == - PORT_PHY_QCFG_RESP_TRANSCEIVER_TYPE_XCVR_INTERNAL) + PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_INTERNAL) cmd->transceiver = XCVR_INTERNAL; else cmd->transceiver = XCVR_EXTERNAL; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h index 98fa81e08b58..b2d8bd3a37fb 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h @@ -1,6 +1,6 @@ /* Broadcom NetXtreme-C/E network driver. * - * Copyright (c) 2014-2015 Broadcom Corporation + * Copyright (c) 2014-2016 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h index e0aac65c6d82..461675caaacd 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h @@ -1,6 +1,6 @@ /* Broadcom NetXtreme-C/E network driver. * - * Copyright (c) 2014-2015 Broadcom Corporation + * Copyright (c) 2014-2016 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h index 4badbedcb421..80f95560086d 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h @@ -1,6 +1,6 @@ /* Broadcom NetXtreme-C/E network driver. * - * Copyright (c) 2014-2015 Broadcom Corporation + * Copyright (c) 2014-2016 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -104,6 +104,7 @@ struct hwrm_async_event_cmpl { #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE (0x3UL << 0) #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED (0x4UL << 0) #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED (0x5UL << 0) + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE (0x6UL << 0) #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD (0x10UL << 0) #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD (0x11UL << 0) #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD (0x20UL << 0) @@ -111,6 +112,7 @@ struct hwrm_async_event_cmpl { #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR (0x30UL << 0) #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE (0x31UL << 0) #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE (0x32UL << 0) + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE (0x33UL << 0) #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR (0xffUL << 0) __le32 event_data2; u8 opaque_v; @@ -141,6 +143,7 @@ struct hwrm_async_event_cmpl_link_status_change { #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE 0x1UL #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_DOWN (0x0UL << 0) #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_UP (0x1UL << 0) + #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_LAST HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_UP #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_MASK 0xeUL #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_SFT 1 #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffff0UL @@ -195,6 +198,9 @@ struct hwrm_async_event_cmpl_link_speed_change { #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_25GB (0xfaUL << 1) #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_40GB (0x190UL << 1) #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_50GB (0x1f4UL << 1) + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_100GB (0x3e8UL << 1) + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_10MB (0xffffUL << 1) + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_LAST HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_10MB #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffff0000UL #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_PORT_ID_SFT 16 }; @@ -237,6 +243,55 @@ struct hwrm_async_event_cmpl_port_conn_not_allowed { __le32 event_data1; #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK 0xffffUL #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_SFT 0 + #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_MASK 0xff0000UL + #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_SFT 16 + #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_NONE (0x0UL << 16) + #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_DISABLETX (0x1UL << 16) + #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_WARNINGMSG (0x2UL << 16) + #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_PWRDOWN (0x3UL << 16) + #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_LAST HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_PWRDOWN +}; + +/* HWRM Asynchronous Event Completion Record for link speed config not allowed (16 bytes) */ +struct hwrm_async_event_cmpl_link_speed_cfg_not_allowed { + __le16 type; + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_MASK 0x3fUL + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_SFT 0 + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0) + __le16 event_id; + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED (0x5UL << 0) + __le32 event_data2; + u8 opaque_v; + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_V 0x1UL + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_OPAQUE_MASK 0xfeUL + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK 0xffffUL + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_DATA1_PORT_ID_SFT 0 +}; + +/* HWRM Asynchronous Event Completion Record for link speed configuration change (16 bytes) */ +struct hwrm_async_event_cmpl_link_speed_cfg_change { + __le16 type; + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_MASK 0x3fUL + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_SFT 0 + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0) + __le16 event_id; + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LINK_SPEED_CFG_CHANGE (0x6UL << 0) + __le32 event_data2; + u8 opaque_v; + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_V 0x1UL + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_OPAQUE_MASK 0xfeUL + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffffUL + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_PORT_ID_SFT 0 + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_SUPPORTED_LINK_SPEEDS_CHANGE 0x10000UL + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_ILLEGAL_LINK_SPEED_CFG 0x20000UL }; /* HWRM Asynchronous Event Completion Record for Function Driver Unload (16 bytes) */ @@ -363,6 +418,47 @@ struct hwrm_async_event_cmpl_vf_mac_addr_change { #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_DATA1_VF_ID_SFT 0 }; +/* HWRM Asynchronous Event Completion Record for PF-VF communication status change (16 bytes) */ +struct hwrm_async_event_cmpl_pf_vf_comm_status_change { + __le16 type; + #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_MASK 0x3fUL + #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_SFT 0 + #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0) + __le16 event_id; + #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_EVENT_ID_PF_VF_COMM_STATUS_CHANGE (0x32UL << 0) + __le32 event_data2; + u8 opaque_v; + #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_V 0x1UL + #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_OPAQUE_MASK 0xfeUL + #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_EVENT_DATA1_COMM_ESTABLISHED 0x1UL +}; + +/* HWRM Asynchronous Event Completion Record for VF configuration change (16 bytes) */ +struct hwrm_async_event_cmpl_vf_cfg_change { + __le16 type; + #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_MASK 0x3fUL + #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_SFT 0 + #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0) + __le16 event_id; + #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_VF_CFG_CHANGE (0x33UL << 0) + __le32 event_data2; + u8 opaque_v; + #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_V 0x1UL + #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_OPAQUE_MASK 0xfeUL + #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MTU_CHANGE 0x1UL + #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MRU_CHANGE 0x2UL + #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_MAC_ADDR_CHANGE 0x4UL + #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_VLAN_CHANGE 0x8UL +}; + /* HWRM Asynchronous Event Completion Record for HWRM Error (16 bytes) */ struct hwrm_async_event_cmpl_hwrm_error { __le16 type; @@ -377,6 +473,7 @@ struct hwrm_async_event_cmpl_hwrm_error { #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_WARNING (0x0UL << 0) #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_NONFATAL (0x1UL << 0) #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL (0x2UL << 0) + #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_LAST HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL u8 opaque_v; #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_V 0x1UL #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_MASK 0xfeUL @@ -387,12 +484,12 @@ struct hwrm_async_event_cmpl_hwrm_error { #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA1_TIMESTAMP 0x1UL }; -/* HW Resource Manager Specification 1.0.0 */ +/* HW Resource Manager Specification 1.2.2 */ #define HWRM_VERSION_MAJOR 1 -#define HWRM_VERSION_MINOR 0 -#define HWRM_VERSION_UPDATE 0 +#define HWRM_VERSION_MINOR 2 +#define HWRM_VERSION_UPDATE 2 -#define HWRM_VERSION_STR "1.0.0" +#define HWRM_VERSION_STR "1.2.2" /* * Following is the signature for HWRM message field that indicates not * applicable (All F's). Need to cast it the size of the field if needed. @@ -444,7 +541,7 @@ struct cmd_nums { #define HWRM_FUNC_BUF_RGTR (0x1fUL) #define HWRM_PORT_PHY_CFG (0x20UL) #define HWRM_PORT_MAC_CFG (0x21UL) - #define RESERVED2 (0x22UL) + #define HWRM_PORT_TS_QUERY (0x22UL) #define HWRM_PORT_QSTATS (0x23UL) #define HWRM_PORT_LPBK_QSTATS (0x24UL) #define HWRM_PORT_CLR_STATS (0x25UL) @@ -452,6 +549,9 @@ struct cmd_nums { #define HWRM_PORT_PHY_QCFG (0x27UL) #define HWRM_PORT_MAC_QCFG (0x28UL) #define HWRM_PORT_BLINK_LED (0x29UL) + #define HWRM_PORT_PHY_QCAPS (0x2aUL) + #define HWRM_PORT_PHY_I2C_WRITE (0x2bUL) + #define HWRM_PORT_PHY_I2C_READ (0x2cUL) #define HWRM_QUEUE_QPORTCFG (0x30UL) #define HWRM_QUEUE_QCFG (0x31UL) #define HWRM_QUEUE_CFG (0x32UL) @@ -531,6 +631,7 @@ struct cmd_nums { __le16 unused_0[3]; }; +/* Return Codes (8 bytes) */ struct ret_codes { __le16 error_code; #define HWRM_ERR_CODE_SUCCESS (0x0UL) @@ -875,10 +976,11 @@ struct hwrm_func_vf_cfg_input { #define FUNC_VF_CFG_REQ_ENABLES_MTU 0x1UL #define FUNC_VF_CFG_REQ_ENABLES_GUEST_VLAN 0x2UL #define FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR 0x4UL + #define FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR 0x8UL __le16 mtu; __le16 guest_vlan; __le16 async_event_cr; - __le16 unused_0[3]; + u8 dflt_mac_addr[6]; }; /* Output (16 bytes) */ @@ -917,7 +1019,8 @@ struct hwrm_func_qcaps_output { __le32 flags; #define FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED 0x1UL #define FUNC_QCAPS_RESP_FLAGS_GLOBAL_MSIX_AUTOMASKING 0x2UL - u8 perm_mac_address[6]; + #define FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED 0x4UL + u8 mac_address[6]; __le16 max_rsscos_ctx; __le16 max_cmpl_rings; __le16 max_tx_rings; @@ -942,6 +1045,67 @@ struct hwrm_func_qcaps_output { u8 valid; }; +/* hwrm_func_qcfg */ +/* Input (24 bytes) */ +struct hwrm_func_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 fid; + __le16 unused_0[3]; +}; + +/* Output (72 bytes) */ +struct hwrm_func_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 fid; + __le16 port_id; + __le16 vlan; + u8 unused_0; + u8 unused_1; + u8 mac_address[6]; + __le16 pci_id; + __le16 alloc_rsscos_ctx; + __le16 alloc_cmpl_rings; + __le16 alloc_tx_rings; + __le16 alloc_rx_rings; + __le16 alloc_l2_ctx; + __le16 alloc_vnics; + __le16 mtu; + __le16 mru; + __le16 stat_ctx_id; + u8 port_partition_type; + #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_SPF (0x0UL << 0) + #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_MPFS (0x1UL << 0) + #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0 (0x2UL << 0) + #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5 (0x3UL << 0) + #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0 (0x4UL << 0) + #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_UNKNOWN (0xffUL << 0) + u8 unused_2; + __le16 dflt_vnic_id; + u8 unused_3; + u8 unused_4; + __le32 min_bw; + __le32 max_bw; + u8 evb_mode; + #define FUNC_QCFG_RESP_EVB_MODE_NO_EVB (0x0UL << 0) + #define FUNC_QCFG_RESP_EVB_MODE_VEB (0x1UL << 0) + #define FUNC_QCFG_RESP_EVB_MODE_VEPA (0x2UL << 0) + u8 unused_5; + __le16 unused_6; + __le32 alloc_mcast_filters; + __le32 alloc_hw_ring_grps; + u8 unused_7; + u8 unused_8; + u8 unused_9; + u8 valid; +}; + /* hwrm_func_cfg */ /* Input (88 bytes) */ struct hwrm_func_cfg_input { @@ -1171,6 +1335,7 @@ struct hwrm_func_drv_rgtr_input { #define FUNC_DRV_RGTR_REQ_OS_TYPE_UNKNOWN (0x0UL << 0) #define FUNC_DRV_RGTR_REQ_OS_TYPE_OTHER (0x1UL << 0) #define FUNC_DRV_RGTR_REQ_OS_TYPE_MSDOS (0xeUL << 0) + #define FUNC_DRV_RGTR_REQ_OS_TYPE_WINDOWS (0x12UL << 0) #define FUNC_DRV_RGTR_REQ_OS_TYPE_SOLARIS (0x1dUL << 0) #define FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX (0x24UL << 0) #define FUNC_DRV_RGTR_REQ_OS_TYPE_FREEBSD (0x2aUL << 0) @@ -1302,6 +1467,7 @@ struct hwrm_func_drv_qver_output { #define FUNC_DRV_QVER_RESP_OS_TYPE_UNKNOWN (0x0UL << 0) #define FUNC_DRV_QVER_RESP_OS_TYPE_OTHER (0x1UL << 0) #define FUNC_DRV_QVER_RESP_OS_TYPE_MSDOS (0xeUL << 0) + #define FUNC_DRV_QVER_RESP_OS_TYPE_WINDOWS (0x12UL << 0) #define FUNC_DRV_QVER_RESP_OS_TYPE_SOLARIS (0x1dUL << 0) #define FUNC_DRV_QVER_RESP_OS_TYPE_LINUX (0x24UL << 0) #define FUNC_DRV_QVER_RESP_OS_TYPE_FREEBSD (0x2aUL << 0) @@ -1317,7 +1483,7 @@ struct hwrm_func_drv_qver_output { }; /* hwrm_port_phy_cfg */ -/* Input (48 bytes) */ +/* Input (56 bytes) */ struct hwrm_port_phy_cfg_input { __le16 req_type; __le16 cmpl_ring; @@ -1329,6 +1495,10 @@ struct hwrm_port_phy_cfg_input { #define PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DOWN 0x2UL #define PORT_PHY_CFG_REQ_FLAGS_FORCE 0x4UL #define PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG 0x8UL + #define PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE 0x10UL + #define PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE 0x20UL + #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE 0x40UL + #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE 0x80UL __le32 enables; #define PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE 0x1UL #define PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX 0x2UL @@ -1339,6 +1509,8 @@ struct hwrm_port_phy_cfg_input { #define PORT_PHY_CFG_REQ_ENABLES_LPBK 0x40UL #define PORT_PHY_CFG_REQ_ENABLES_PREEMPHASIS 0x80UL #define PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE 0x100UL + #define PORT_PHY_CFG_REQ_ENABLES_EEE_LINK_SPEED_MASK 0x200UL + #define PORT_PHY_CFG_REQ_ENABLES_TX_LPI_TIMER 0x400UL __le16 port_id; __le16 force_link_speed; #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100MB (0x1UL << 0) @@ -1350,12 +1522,14 @@ struct hwrm_port_phy_cfg_input { #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB (0xfaUL << 0) #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB (0x190UL << 0) #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB (0x1f4UL << 0) + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB (0x3e8UL << 0) + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10MB (0xffffUL << 0) u8 auto_mode; #define PORT_PHY_CFG_REQ_AUTO_MODE_NONE (0x0UL << 0) #define PORT_PHY_CFG_REQ_AUTO_MODE_ALL_SPEEDS (0x1UL << 0) #define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_SPEED (0x2UL << 0) #define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_OR_BELOW (0x3UL << 0) - #define PORT_PHY_CFG_REQ_AUTO_MODE_MASK (0x4UL << 0) + #define PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK (0x4UL << 0) u8 auto_duplex; #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_HALF (0x0UL << 0) #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_FULL (0x1UL << 0) @@ -1363,6 +1537,7 @@ struct hwrm_port_phy_cfg_input { u8 auto_pause; #define PORT_PHY_CFG_REQ_AUTO_PAUSE_TX 0x1UL #define PORT_PHY_CFG_REQ_AUTO_PAUSE_RX 0x2UL + #define PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE 0x4UL u8 unused_0; __le16 auto_link_speed; #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100MB (0x1UL << 0) @@ -1374,6 +1549,8 @@ struct hwrm_port_phy_cfg_input { #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_25GB (0xfaUL << 0) #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB (0x190UL << 0) #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB (0x1f4UL << 0) + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100GB (0x3e8UL << 0) + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10MB (0xffffUL << 0) __le16 auto_link_speed_mask; #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MBHD 0x1UL #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MB 0x2UL @@ -1386,6 +1563,9 @@ struct hwrm_port_phy_cfg_input { #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_25GB 0x100UL #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_40GB 0x200UL #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_50GB 0x400UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100GB 0x800UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MBHD 0x1000UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MB 0x2000UL u8 wirespeed; #define PORT_PHY_CFG_REQ_WIRESPEED_OFF (0x0UL << 0) #define PORT_PHY_CFG_REQ_WIRESPEED_ON (0x1UL << 0) @@ -1398,7 +1578,20 @@ struct hwrm_port_phy_cfg_input { #define PORT_PHY_CFG_REQ_FORCE_PAUSE_RX 0x2UL u8 unused_1; __le32 preemphasis; - __le32 unused_2; + __le16 eee_link_speed_mask; + #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD1 0x1UL + #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_100MB 0x2UL + #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD2 0x4UL + #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_1GB 0x8UL + #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD3 0x10UL + #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD4 0x20UL + #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_10GB 0x40UL + u8 unused_2; + u8 unused_3; + __le32 tx_lpi_timer; + __le32 unused_4; + #define PORT_PHY_CFG_REQ_TX_LPI_TIMER_MASK 0xffffffUL + #define PORT_PHY_CFG_REQ_TX_LPI_TIMER_SFT 0 }; /* Output (16 bytes) */ @@ -1426,7 +1619,7 @@ struct hwrm_port_phy_qcfg_input { __le16 unused_0[3]; }; -/* Output (48 bytes) */ +/* Output (96 bytes) */ struct hwrm_port_phy_qcfg_output { __le16 error_code; __le16 req_type; @@ -1447,6 +1640,8 @@ struct hwrm_port_phy_qcfg_output { #define PORT_PHY_QCFG_RESP_LINK_SPEED_25GB (0xfaUL << 0) #define PORT_PHY_QCFG_RESP_LINK_SPEED_40GB (0x190UL << 0) #define PORT_PHY_QCFG_RESP_LINK_SPEED_50GB (0x1f4UL << 0) + #define PORT_PHY_QCFG_RESP_LINK_SPEED_100GB (0x3e8UL << 0) + #define PORT_PHY_QCFG_RESP_LINK_SPEED_10MB (0xffffUL << 0) u8 duplex; #define PORT_PHY_QCFG_RESP_DUPLEX_HALF (0x0UL << 0) #define PORT_PHY_QCFG_RESP_DUPLEX_FULL (0x1UL << 0) @@ -1465,6 +1660,9 @@ struct hwrm_port_phy_qcfg_output { #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_25GB 0x100UL #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_40GB 0x200UL #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_50GB 0x400UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100GB 0x800UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MBHD 0x1000UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MB 0x2000UL __le16 force_link_speed; #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100MB (0x1UL << 0) #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_1GB (0xaUL << 0) @@ -1475,15 +1673,18 @@ struct hwrm_port_phy_qcfg_output { #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_25GB (0xfaUL << 0) #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_40GB (0x190UL << 0) #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_50GB (0x1f4UL << 0) + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100GB (0x3e8UL << 0) + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10MB (0xffffUL << 0) u8 auto_mode; #define PORT_PHY_QCFG_RESP_AUTO_MODE_NONE (0x0UL << 0) #define PORT_PHY_QCFG_RESP_AUTO_MODE_ALL_SPEEDS (0x1UL << 0) #define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_SPEED (0x2UL << 0) #define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_OR_BELOW (0x3UL << 0) - #define PORT_PHY_QCFG_RESP_AUTO_MODE_MASK (0x4UL << 0) + #define PORT_PHY_QCFG_RESP_AUTO_MODE_SPEED_MASK (0x4UL << 0) u8 auto_pause; #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_TX 0x1UL #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_RX 0x2UL + #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_AUTONEG_PAUSE 0x4UL __le16 auto_link_speed; #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100MB (0x1UL << 0) #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_1GB (0xaUL << 0) @@ -1494,6 +1695,8 @@ struct hwrm_port_phy_qcfg_output { #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_25GB (0xfaUL << 0) #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_40GB (0x190UL << 0) #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_50GB (0x1f4UL << 0) + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100GB (0x3e8UL << 0) + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10MB (0xffffUL << 0) __le16 auto_link_speed_mask; #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MBHD 0x1UL #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MB 0x2UL @@ -1506,6 +1709,9 @@ struct hwrm_port_phy_qcfg_output { #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_25GB 0x100UL #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_40GB 0x200UL #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_50GB 0x400UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100GB 0x800UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MBHD 0x1000UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MB 0x2000UL u8 wirespeed; #define PORT_PHY_QCFG_RESP_WIRESPEED_OFF (0x0UL << 0) #define PORT_PHY_QCFG_RESP_WIRESPEED_ON (0x1UL << 0) @@ -1516,31 +1722,49 @@ struct hwrm_port_phy_qcfg_output { u8 force_pause; #define PORT_PHY_QCFG_RESP_FORCE_PAUSE_TX 0x1UL #define PORT_PHY_QCFG_RESP_FORCE_PAUSE_RX 0x2UL - u8 reserved1; + u8 module_status; + #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NONE (0x0UL << 0) + #define PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX (0x1UL << 0) + #define PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG (0x2UL << 0) + #define PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN (0x3UL << 0) + #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTINSERTED (0x4UL << 0) + #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTAPPLICABLE (0xffUL << 0) __le32 preemphasis; u8 phy_maj; u8 phy_min; u8 phy_bld; u8 phy_type; - #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASECR4 (0x1UL << 0) + #define PORT_PHY_QCFG_RESP_PHY_TYPE_UNKNOWN (0x0UL << 0) + #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASECR (0x1UL << 0) #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR4 (0x2UL << 0) - #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASELR4 (0x3UL << 0) - #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASESR4 (0x4UL << 0) + #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASELR (0x3UL << 0) + #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASESR (0x4UL << 0) #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR2 (0x5UL << 0) - #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKX4 (0x6UL << 0) + #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKX (0x6UL << 0) #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR (0x7UL << 0) #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASET (0x8UL << 0) + #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE (0x9UL << 0) + #define PORT_PHY_QCFG_RESP_PHY_TYPE_SGMIIEXTPHY (0xaUL << 0) u8 media_type; + #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_UNKNOWN (0x0UL << 0) #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP (0x1UL << 0) #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC (0x2UL << 0) #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE (0x3UL << 0) - u8 transceiver_type; - #define PORT_PHY_QCFG_RESP_TRANSCEIVER_TYPE_XCVR_INTERNAL (0x1UL << 0) - #define PORT_PHY_QCFG_RESP_TRANSCEIVER_TYPE_XCVR_EXTERNAL (0x2UL << 0) - u8 phy_addr; + u8 xcvr_pkg_type; + #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_INTERNAL (0x1UL << 0) + #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_EXTERNAL (0x2UL << 0) + u8 eee_config_phy_addr; #define PORT_PHY_QCFG_RESP_PHY_ADDR_MASK 0x1fUL #define PORT_PHY_QCFG_RESP_PHY_ADDR_SFT 0 - u8 unused_2; + #define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED 0x20UL + #define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE 0x40UL + #define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI 0x80UL + #define PORT_PHY_QCFG_RESP_EEE_CONFIG_MASK 0xe0UL + #define PORT_PHY_QCFG_RESP_EEE_CONFIG_SFT 5 + u8 parallel_detect; + #define PORT_PHY_QCFG_RESP_PARALLEL_DETECT 0x1UL + #define PORT_PHY_QCFG_RESP_RESERVED_MASK 0xfeUL + #define PORT_PHY_QCFG_RESP_RESERVED_SFT 1 __le16 link_partner_adv_speeds; #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100MBHD 0x1UL #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100MB 0x2UL @@ -1553,15 +1777,48 @@ struct hwrm_port_phy_qcfg_output { #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_25GB 0x100UL #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_40GB 0x200UL #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_50GB 0x400UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100GB 0x800UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10MBHD 0x1000UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10MB 0x2000UL u8 link_partner_adv_auto_mode; #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_NONE (0x0UL << 0) #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ALL_SPEEDS (0x1UL << 0) #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_SPEED (0x2UL << 0) #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_OR_BELOW (0x3UL << 0) - #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_MASK (0x4UL << 0) + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_SPEED_MASK (0x4UL << 0) u8 link_partner_adv_pause; #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_TX 0x1UL #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_RX 0x2UL + __le16 adv_eee_link_speed_mask; + #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD1 0x1UL + #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_100MB 0x2UL + #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD2 0x4UL + #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_1GB 0x8UL + #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD3 0x10UL + #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD4 0x20UL + #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_10GB 0x40UL + __le16 link_partner_adv_eee_link_speed_mask; + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD1 0x1UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_100MB 0x2UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD2 0x4UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_1GB 0x8UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD3 0x10UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD4 0x20UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_10GB 0x40UL + __le32 xcvr_identifier_type_tx_lpi_timer; + #define PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK 0xffffffUL + #define PORT_PHY_QCFG_RESP_TX_LPI_TIMER_SFT 0 + #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_MASK 0xff000000UL + #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_SFT 24 + #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_UNKNOWN (0x0UL << 24) + #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_SFP (0x3UL << 24) + #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP (0xcUL << 24) + #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFPPLUS (0xdUL << 24) + #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP28 (0x11UL << 24) + __le32 unused_1; + char phy_vendor_name[16]; + char phy_vendor_partnumber[16]; + __le32 unused_2; u8 unused_3; u8 unused_4; u8 unused_5; @@ -1569,7 +1826,7 @@ struct hwrm_port_phy_qcfg_output { }; /* hwrm_port_mac_cfg */ -/* Input (32 bytes) */ +/* Input (40 bytes) */ struct hwrm_port_mac_cfg_input { __le16 req_type; __le16 cmpl_ring; @@ -1581,6 +1838,10 @@ struct hwrm_port_mac_cfg_input { #define PORT_MAC_CFG_REQ_FLAGS_COS_ASSIGNMENT_ENABLE 0x2UL #define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_ENABLE 0x4UL #define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_ENABLE 0x8UL + #define PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_ENABLE 0x10UL + #define PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE 0x20UL + #define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_ENABLE 0x40UL + #define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_DISABLE 0x80UL __le32 enables; #define PORT_MAC_CFG_REQ_ENABLES_IPG 0x1UL #define PORT_MAC_CFG_REQ_ENABLES_LPBK 0x2UL @@ -1588,6 +1849,8 @@ struct hwrm_port_mac_cfg_input { #define PORT_MAC_CFG_REQ_ENABLES_LCOS_MAP_PRI 0x8UL #define PORT_MAC_CFG_REQ_ENABLES_TUNNEL_PRI2COS_MAP_PRI 0x10UL #define PORT_MAC_CFG_REQ_ENABLES_DSCP2COS_MAP_PRI 0x20UL + #define PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE 0x40UL + #define PORT_MAC_CFG_REQ_ENABLES_TX_TS_CAPTURE_PTP_MSG_TYPE 0x80UL __le16 port_id; u8 ipg; u8 lpbk; @@ -1598,6 +1861,9 @@ struct hwrm_port_mac_cfg_input { u8 lcos_map_pri; u8 tunnel_pri2cos_map_pri; u8 dscp2pri_map_pri; + __le16 rx_ts_capture_ptp_msg_type; + __le16 tx_ts_capture_ptp_msg_type; + __le32 unused_0; }; /* Output (16 bytes) */ @@ -1754,7 +2020,79 @@ struct hwrm_port_blink_led_output { u8 valid; }; -/* hwrm_queue_qportcfg */ +/* hwrm_port_phy_qcaps */ +/* Input (24 bytes) */ +struct hwrm_port_phy_qcaps_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 port_id; + __le16 unused_0[3]; +}; + +/* Output (24 bytes) */ +struct hwrm_port_phy_qcaps_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 eee_supported; + #define PORT_PHY_QCAPS_RESP_EEE_SUPPORTED 0x1UL + #define PORT_PHY_QCAPS_RESP_RSVD1_MASK 0xfeUL + #define PORT_PHY_QCAPS_RESP_RSVD1_SFT 1 + u8 unused_0; + __le16 supported_speeds_force_mode; + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100MBHD 0x1UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100MB 0x2UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_1GBHD 0x4UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_1GB 0x8UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_2GB 0x10UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_2_5GB 0x20UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10GB 0x40UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_20GB 0x80UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_25GB 0x100UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_40GB 0x200UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_50GB 0x400UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100GB 0x800UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10MBHD 0x1000UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10MB 0x2000UL + __le16 supported_speeds_auto_mode; + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100MBHD 0x1UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100MB 0x2UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_1GBHD 0x4UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_1GB 0x8UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_2GB 0x10UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_2_5GB 0x20UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10GB 0x40UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_20GB 0x80UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_25GB 0x100UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_40GB 0x200UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_50GB 0x400UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100GB 0x800UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10MBHD 0x1000UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10MB 0x2000UL + __le16 supported_speeds_eee_mode; + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD1 0x1UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_100MB 0x2UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD2 0x4UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_1GB 0x8UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD3 0x10UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD4 0x20UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_10GB 0x40UL + __le32 tx_lpi_timer_low; + #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK 0xffffffUL + #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_SFT 0 + #define PORT_PHY_QCAPS_RESP_RSVD2_MASK 0xff000000UL + #define PORT_PHY_QCAPS_RESP_RSVD2_SFT 24 + __le32 valid_tx_lpi_timer_high; + #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK 0xffffffUL + #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_SFT 0 + #define PORT_PHY_QCAPS_RESP_VALID_MASK 0xff000000UL + #define PORT_PHY_QCAPS_RESP_VALID_SFT 24 +}; + /* Input (24 bytes) */ struct hwrm_queue_qportcfg_input { __le16 req_type; @@ -1766,6 +2104,7 @@ struct hwrm_queue_qportcfg_input { #define QUEUE_QPORTCFG_REQ_FLAGS_PATH 0x1UL #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_TX (0x0UL << 0) #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX (0x1UL << 0) + #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_LAST QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX __le16 port_id; __le16 unused_0; }; @@ -1838,6 +2177,7 @@ struct hwrm_queue_cfg_input { #define QUEUE_CFG_REQ_FLAGS_PATH 0x1UL #define QUEUE_CFG_REQ_FLAGS_PATH_TX (0x0UL << 0) #define QUEUE_CFG_REQ_FLAGS_PATH_RX (0x1UL << 0) + #define QUEUE_CFG_REQ_FLAGS_PATH_LAST QUEUE_CFG_REQ_FLAGS_PATH_RX __le32 enables; #define QUEUE_CFG_REQ_ENABLES_DFLT_LEN 0x1UL #define QUEUE_CFG_REQ_ENABLES_SERVICE_PROFILE 0x2UL @@ -1875,6 +2215,7 @@ struct hwrm_queue_buffers_cfg_input { #define QUEUE_BUFFERS_CFG_REQ_FLAGS_PATH 0x1UL #define QUEUE_BUFFERS_CFG_REQ_FLAGS_PATH_TX (0x0UL << 0) #define QUEUE_BUFFERS_CFG_REQ_FLAGS_PATH_RX (0x1UL << 0) + #define QUEUE_BUFFERS_CFG_REQ_FLAGS_PATH_LAST QUEUE_BUFFERS_CFG_REQ_FLAGS_PATH_RX __le32 enables; #define QUEUE_BUFFERS_CFG_REQ_ENABLES_RESERVED 0x1UL #define QUEUE_BUFFERS_CFG_REQ_ENABLES_SHARED 0x2UL @@ -1952,6 +2293,7 @@ struct hwrm_queue_pri2cos_cfg_input { #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH 0x1UL #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_TX (0x0UL << 0) #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_RX (0x1UL << 0) + #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_LAST QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_RX #define QUEUE_PRI2COS_CFG_REQ_FLAGS_IVLAN 0x2UL __le32 enables; u8 port_id; @@ -2158,6 +2500,8 @@ struct hwrm_vnic_cfg_input { #define VNIC_CFG_REQ_FLAGS_DEFAULT 0x1UL #define VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE 0x2UL #define VNIC_CFG_REQ_FLAGS_BD_STALL_MODE 0x4UL + #define VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE 0x8UL + #define VNIC_CFG_REQ_FLAGS_ROCE_ONLY_VNIC_MODE 0x10UL __le32 enables; #define VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP 0x1UL #define VNIC_CFG_REQ_ENABLES_RSS_RULE 0x2UL @@ -2622,6 +2966,7 @@ struct hwrm_cfa_l2_filter_alloc_input { #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH 0x1UL #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_TX (0x0UL << 0) #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX (0x1UL << 0) + #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x2UL #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_DROP 0x4UL #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST 0x8UL @@ -2747,6 +3092,7 @@ struct hwrm_cfa_l2_filter_cfg_input { #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH 0x1UL #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_TX (0x0UL << 0) #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX (0x1UL << 0) + #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX #define CFA_L2_FILTER_CFG_REQ_FLAGS_DROP 0x2UL __le32 enables; #define CFA_L2_FILTER_CFG_REQ_ENABLES_DST_ID 0x1UL @@ -3337,6 +3683,41 @@ struct hwrm_fw_reset_output { u8 valid; }; +/* hwrm_fw_qstatus */ +/* Input (24 bytes) */ +struct hwrm_fw_qstatus_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 embedded_proc_type; + #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_BOOT (0x0UL << 0) + #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_MGMT (0x1UL << 0) + #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_NETCTRL (0x2UL << 0) + #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_ROCE (0x3UL << 0) + #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_RSVD (0x4UL << 0) + u8 unused_0[7]; +}; + +/* Output (16 bytes) */ +struct hwrm_fw_qstatus_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 selfrst_status; + #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTNONE (0x0UL << 0) + #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTASAP (0x1UL << 0) + #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPCIERST (0x2UL << 0) + u8 unused_0; + __le16 unused_1; + u8 unused_2; + u8 unused_3; + u8 unused_4; + u8 valid; +}; + /* hwrm_exec_fwd_resp */ /* Input (128 bytes) */ struct hwrm_exec_fwd_resp_input { diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h index 43ef392c8588..40a7b0e09612 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h @@ -1,6 +1,6 @@ /* Broadcom NetXtreme-C/E network driver. * - * Copyright (c) 2014-2015 Broadcom Corporation + * Copyright (c) 2014-2016 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c index 0c5f510492f1..8457850b0bdd 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c @@ -1,6 +1,6 @@ /* Broadcom NetXtreme-C/E network driver. * - * Copyright (c) 2014-2015 Broadcom Corporation + * Copyright (c) 2014-2016 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -771,12 +771,8 @@ static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf) PORT_PHY_QCFG_RESP_LINK_NO_LINK) { phy_qcfg_resp.link = PORT_PHY_QCFG_RESP_LINK_LINK; - if (phy_qcfg_resp.auto_link_speed) - phy_qcfg_resp.link_speed = - phy_qcfg_resp.auto_link_speed; - else - phy_qcfg_resp.link_speed = - phy_qcfg_resp.force_link_speed; + phy_qcfg_resp.link_speed = cpu_to_le16( + PORT_PHY_QCFG_RESP_LINK_SPEED_10GB); phy_qcfg_resp.duplex = PORT_PHY_QCFG_RESP_DUPLEX_FULL; phy_qcfg_resp.pause = @@ -859,8 +855,8 @@ void bnxt_update_vf_mac(struct bnxt *bp) * default but the stored zero MAC will allow the VF user to change * the random MAC address using ndo_set_mac_address() if he wants. */ - if (!ether_addr_equal(resp->perm_mac_address, bp->vf.mac_addr)) - memcpy(bp->vf.mac_addr, resp->perm_mac_address, ETH_ALEN); + if (!ether_addr_equal(resp->mac_address, bp->vf.mac_addr)) + memcpy(bp->vf.mac_addr, resp->mac_address, ETH_ALEN); /* overwrite netdev dev_addr with admin VF MAC */ if (is_valid_ether_addr(bp->vf.mac_addr)) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h index c151280e3980..3f08354a247e 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h @@ -1,6 +1,6 @@ /* Broadcom NetXtreme-C/E network driver. * - * Copyright (c) 2014-2015 Broadcom Corporation + * Copyright (c) 2014-2016 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by From c9ee9516c161da2d072e035907aa35a35dfa68a8 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Tue, 5 Apr 2016 14:08:56 -0400 Subject: [PATCH 0284/1649] bnxt_en: Improve flow control autoneg with Firmware 1.2.1 interface. Make use of the new AUTONEG_PAUSE bit in the new interface to better control autoneg flow control settings, independent of RX and TX advertisement settings. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 28 +++++++++++++++---- .../net/ethernet/broadcom/bnxt/bnxt_ethtool.c | 10 +++---- 2 files changed, 27 insertions(+), 11 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index bfe98cbcefca..2b5a54162f80 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -4557,6 +4557,9 @@ static void bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req) { if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) { + if (bp->hwrm_spec_code >= 0x10201) + req->auto_pause = + PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE; if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX) req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX; if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX) @@ -4570,6 +4573,11 @@ bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req) req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX; req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE); + if (bp->hwrm_spec_code >= 0x10201) { + req->auto_pause = req->force_pause; + req->enables |= cpu_to_le32( + PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE); + } } } @@ -4656,7 +4664,8 @@ static int bnxt_update_phy_setting(struct bnxt *bp) return rc; } if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) && - link_info->auto_pause_setting != link_info->req_flow_ctrl) + (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) != + link_info->req_flow_ctrl) update_pause = true; if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) && link_info->force_pause_setting != link_info->req_flow_ctrl) @@ -5825,15 +5834,24 @@ static int bnxt_probe_phy(struct bnxt *bp) /*initialize the ethool setting copy with NVM settings */ if (BNXT_AUTO_MODE(link_info->auto_mode)) { - link_info->autoneg = BNXT_AUTONEG_SPEED | - BNXT_AUTONEG_FLOW_CTRL; + link_info->autoneg = BNXT_AUTONEG_SPEED; + if (bp->hwrm_spec_code >= 0x10201) { + if (link_info->auto_pause_setting & + PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE) + link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; + } else { + link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; + } link_info->advertising = link_info->auto_link_speeds; - link_info->req_flow_ctrl = link_info->auto_pause_setting; } else { link_info->req_link_speed = link_info->force_link_speed; link_info->req_duplex = link_info->duplex_setting; - link_info->req_flow_ctrl = link_info->force_pause_setting; } + if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) + link_info->req_flow_ctrl = + link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH; + else + link_info->req_flow_ctrl = link_info->force_pause_setting; return rc; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index f103f9b06e6d..99b1740781d5 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -874,7 +874,9 @@ static int bnxt_set_pauseparam(struct net_device *dev, return -EINVAL; link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; - link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_BOTH; + if (bp->hwrm_spec_code >= 0x10201) + link_info->req_flow_ctrl = + PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE; } else { /* when transition from auto pause to force pause, * force a link change @@ -882,17 +884,13 @@ static int bnxt_set_pauseparam(struct net_device *dev, if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) link_info->force_link_chng = true; link_info->autoneg &= ~BNXT_AUTONEG_FLOW_CTRL; - link_info->req_flow_ctrl &= ~BNXT_LINK_PAUSE_BOTH; + link_info->req_flow_ctrl = 0; } if (epause->rx_pause) link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_RX; - else - link_info->req_flow_ctrl &= ~BNXT_LINK_PAUSE_RX; if (epause->tx_pause) link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_TX; - else - link_info->req_flow_ctrl &= ~BNXT_LINK_PAUSE_TX; if (netif_running(dev)) rc = bnxt_hwrm_set_pause(bp); From 170ce01301a2a1a87808765531d938fa0b023641 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Tue, 5 Apr 2016 14:08:57 -0400 Subject: [PATCH 0285/1649] bnxt_en: Add basic EEE support. Get EEE capability and the initial EEE settings from firmware. Add "EEE is active | not active" to link up dmesg. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 82 ++++++++++++++++++- drivers/net/ethernet/broadcom/bnxt/bnxt.h | 4 + .../net/ethernet/broadcom/bnxt/bnxt_ethtool.c | 2 +- .../net/ethernet/broadcom/bnxt/bnxt_ethtool.h | 1 + 4 files changed, 87 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 2b5a54162f80..7442e206760f 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -4488,12 +4488,49 @@ static void bnxt_report_link(struct bnxt *bp) speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed); netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n", speed, duplex, flow_ctrl); + if (bp->flags & BNXT_FLAG_EEE_CAP) + netdev_info(bp->dev, "EEE is %s\n", + bp->eee.eee_active ? "active" : + "not active"); } else { netif_carrier_off(bp->dev); netdev_err(bp->dev, "NIC Link is Down\n"); } } +static int bnxt_hwrm_phy_qcaps(struct bnxt *bp) +{ + int rc = 0; + struct hwrm_port_phy_qcaps_input req = {0}; + struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr; + + if (bp->hwrm_spec_code < 0x10201) + return 0; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCAPS, -1, -1); + + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (rc) + goto hwrm_phy_qcaps_exit; + + if (resp->eee_supported & PORT_PHY_QCAPS_RESP_EEE_SUPPORTED) { + struct ethtool_eee *eee = &bp->eee; + u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode); + + bp->flags |= BNXT_FLAG_EEE_CAP; + eee->supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0); + bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) & + PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK; + bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) & + PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK; + } + +hwrm_phy_qcaps_exit: + mutex_unlock(&bp->hwrm_cmd_lock); + return rc; +} + static int bnxt_update_link(struct bnxt *bp, bool chng_link_state) { int rc = 0; @@ -4535,8 +4572,44 @@ static int bnxt_update_link(struct bnxt *bp, bool chng_link_state) link_info->phy_ver[2] = resp->phy_bld; link_info->media_type = resp->media_type; link_info->transceiver = resp->xcvr_pkg_type; - link_info->phy_addr = resp->eee_config_phy_addr; + link_info->phy_addr = resp->eee_config_phy_addr & + PORT_PHY_QCFG_RESP_PHY_ADDR_MASK; + if (bp->flags & BNXT_FLAG_EEE_CAP) { + struct ethtool_eee *eee = &bp->eee; + u16 fw_speeds; + + eee->eee_active = 0; + if (resp->eee_config_phy_addr & + PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) { + eee->eee_active = 1; + fw_speeds = le16_to_cpu( + resp->link_partner_adv_eee_link_speed_mask); + eee->lp_advertised = + _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0); + } + + /* Pull initial EEE config */ + if (!chng_link_state) { + if (resp->eee_config_phy_addr & + PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED) + eee->eee_enabled = 1; + + fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask); + eee->advertised = + _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0); + + if (resp->eee_config_phy_addr & + PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) { + __le32 tmr; + + eee->tx_lpi_enabled = 1; + tmr = resp->xcvr_identifier_type_tx_lpi_timer; + eee->tx_lpi_timer = le32_to_cpu(tmr) & + PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK; + } + } + } /* TODO: need to add more logic to report VF link */ if (chng_link_state) { if (link_info->phy_link_status == BNXT_LINK_LINK) @@ -5825,6 +5898,13 @@ static int bnxt_probe_phy(struct bnxt *bp) int rc = 0; struct bnxt_link_info *link_info = &bp->link_info; + rc = bnxt_hwrm_phy_qcaps(bp); + if (rc) { + netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n", + rc); + return rc; + } + rc = bnxt_update_link(bp, false); if (rc) { netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n", diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index e98c37ae81f2..5e8340523cf6 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -874,6 +874,7 @@ struct bnxt { #define BNXT_FLAG_RFS 0x100 #define BNXT_FLAG_SHARED_RINGS 0x200 #define BNXT_FLAG_PORT_STATS 0x400 + #define BNXT_FLAG_EEE_CAP 0x1000 #define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \ BNXT_FLAG_RFS | \ @@ -1011,6 +1012,9 @@ struct bnxt { int ntp_fltr_count; struct bnxt_link_info link_info; + struct ethtool_eee eee; + u32 lpi_tmr_lo; + u32 lpi_tmr_hi; }; #ifdef CONFIG_NET_RX_BUSY_POLL diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 99b1740781d5..bdc62209a9c1 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -597,7 +597,7 @@ static void bnxt_get_drvinfo(struct net_device *dev, kfree(pkglog); } -static u32 _bnxt_fw_to_ethtool_adv_spds(u16 fw_speeds, u8 fw_pause) +u32 _bnxt_fw_to_ethtool_adv_spds(u16 fw_speeds, u8 fw_pause) { u32 speed_mask = 0; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h index b2d8bd3a37fb..e061f8f7a3ae 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h @@ -12,6 +12,7 @@ extern const struct ethtool_ops bnxt_ethtool_ops; +u32 _bnxt_fw_to_ethtool_adv_spds(u16, u8); u32 bnxt_fw_to_ethtool_speed(u16); #endif From 939f7f0ca442187db2a4ec7a40979c711b0c939e Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Tue, 5 Apr 2016 14:08:58 -0400 Subject: [PATCH 0286/1649] bnxt_en: Add EEE setup code. 1. Add bnxt_hwrm_set_eee() function to setup EEE firmware parameters based on the bp->eee settings. 2. The new function bnxt_eee_config_ok() will check if EEE parameters need to be modified due to autoneg changes. 3. bnxt_hwrm_set_link() has added a new parameter to update EEE. If the parameter is set, it will call bnxt_hwrm_set_eee(). Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 58 ++++++++++++++++++- drivers/net/ethernet/broadcom/bnxt/bnxt.h | 2 +- .../net/ethernet/broadcom/bnxt/bnxt_ethtool.c | 4 +- .../net/ethernet/broadcom/bnxt/bnxt_ethtool.h | 1 + 4 files changed, 60 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 7442e206760f..2c3c7950bfea 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -4711,7 +4711,30 @@ int bnxt_hwrm_set_pause(struct bnxt *bp) return rc; } -int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause) +static void bnxt_hwrm_set_eee(struct bnxt *bp, + struct hwrm_port_phy_cfg_input *req) +{ + struct ethtool_eee *eee = &bp->eee; + + if (eee->eee_enabled) { + u16 eee_speeds; + u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE; + + if (eee->tx_lpi_enabled) + flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE; + else + flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE; + + req->flags |= cpu_to_le32(flags); + eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised); + req->eee_link_speed_mask = cpu_to_le16(eee_speeds); + req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer); + } else { + req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE); + } +} + +int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee) { struct hwrm_port_phy_cfg_input req = {0}; @@ -4720,14 +4743,42 @@ int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause) bnxt_hwrm_set_pause_common(bp, &req); bnxt_hwrm_set_link_common(bp, &req); + + if (set_eee) + bnxt_hwrm_set_eee(bp, &req); return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); } +static bool bnxt_eee_config_ok(struct bnxt *bp) +{ + struct ethtool_eee *eee = &bp->eee; + struct bnxt_link_info *link_info = &bp->link_info; + + if (!(bp->flags & BNXT_FLAG_EEE_CAP)) + return true; + + if (eee->eee_enabled) { + u32 advertising = + _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0); + + if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { + eee->eee_enabled = 0; + return false; + } + if (eee->advertised & ~advertising) { + eee->advertised = advertising & eee->supported; + return false; + } + } + return true; +} + static int bnxt_update_phy_setting(struct bnxt *bp) { int rc; bool update_link = false; bool update_pause = false; + bool update_eee = false; struct bnxt_link_info *link_info = &bp->link_info; rc = bnxt_update_link(bp, true); @@ -4757,8 +4808,11 @@ static int bnxt_update_phy_setting(struct bnxt *bp) update_link = true; } + if (!bnxt_eee_config_ok(bp)) + update_eee = true; + if (update_link) - rc = bnxt_hwrm_set_link_setting(bp, update_pause); + rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee); else if (update_pause) rc = bnxt_hwrm_set_pause(bp); if (rc) { diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 5e8340523cf6..a981e2c17107 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -1112,7 +1112,7 @@ int hwrm_send_message_silent(struct bnxt *, void *, u32, int); int bnxt_hwrm_set_coal(struct bnxt *); int bnxt_hwrm_func_qcaps(struct bnxt *); int bnxt_hwrm_set_pause(struct bnxt *); -int bnxt_hwrm_set_link_setting(struct bnxt *, bool); +int bnxt_hwrm_set_link_setting(struct bnxt *, bool, bool); int bnxt_open_nic(struct bnxt *, bool, bool); int bnxt_close_nic(struct bnxt *, bool, bool); int bnxt_get_max_rings(struct bnxt *, int *, int *, bool); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index bdc62209a9c1..14f0520c5668 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -763,7 +763,7 @@ static u32 bnxt_get_fw_speed(struct net_device *dev, u16 ethtool_speed) return 0; } -static u16 bnxt_get_fw_auto_link_speeds(u32 advertising) +u16 bnxt_get_fw_auto_link_speeds(u32 advertising) { u16 fw_speed_mask = 0; @@ -840,7 +840,7 @@ static int bnxt_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) } if (netif_running(dev)) - rc = bnxt_hwrm_set_link_setting(bp, set_pause); + rc = bnxt_hwrm_set_link_setting(bp, set_pause, false); set_setting_exit: return rc; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h index e061f8f7a3ae..3abc03b60dbc 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h @@ -14,5 +14,6 @@ extern const struct ethtool_ops bnxt_ethtool_ops; u32 _bnxt_fw_to_ethtool_adv_spds(u16, u8); u32 bnxt_fw_to_ethtool_speed(u16); +u16 bnxt_get_fw_auto_link_speeds(u32); #endif From 72b34f04e0b00956dd679ae18bf2163669df8b56 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Tue, 5 Apr 2016 14:08:59 -0400 Subject: [PATCH 0287/1649] bnxt_en: Add get_eee() and set_eee() ethtool support. Allow users to get|set EEE parameters. v2: Added comment for preserving the tx_lpi_timer value in get_eee. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- .../net/ethernet/broadcom/bnxt/bnxt_ethtool.c | 76 +++++++++++++++++++ 1 file changed, 76 insertions(+) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 14f0520c5668..47e08a8b1563 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -1379,6 +1379,80 @@ static int bnxt_set_eeprom(struct net_device *dev, eeprom->len); } +static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata) +{ + struct bnxt *bp = netdev_priv(dev); + struct ethtool_eee *eee = &bp->eee; + struct bnxt_link_info *link_info = &bp->link_info; + u32 advertising = + _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0); + int rc = 0; + + if (BNXT_VF(bp)) + return 0; + + if (!(bp->flags & BNXT_FLAG_EEE_CAP)) + return -EOPNOTSUPP; + + if (!edata->eee_enabled) + goto eee_ok; + + if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { + netdev_warn(dev, "EEE requires autoneg\n"); + return -EINVAL; + } + if (edata->tx_lpi_enabled) { + if (bp->lpi_tmr_hi && (edata->tx_lpi_timer > bp->lpi_tmr_hi || + edata->tx_lpi_timer < bp->lpi_tmr_lo)) { + netdev_warn(dev, "Valid LPI timer range is %d and %d microsecs\n", + bp->lpi_tmr_lo, bp->lpi_tmr_hi); + return -EINVAL; + } else if (!bp->lpi_tmr_hi) { + edata->tx_lpi_timer = eee->tx_lpi_timer; + } + } + if (!edata->advertised) { + edata->advertised = advertising & eee->supported; + } else if (edata->advertised & ~advertising) { + netdev_warn(dev, "EEE advertised %x must be a subset of autoneg advertised speeds %x\n", + edata->advertised, advertising); + return -EINVAL; + } + + eee->advertised = edata->advertised; + eee->tx_lpi_enabled = edata->tx_lpi_enabled; + eee->tx_lpi_timer = edata->tx_lpi_timer; +eee_ok: + eee->eee_enabled = edata->eee_enabled; + + if (netif_running(dev)) + rc = bnxt_hwrm_set_link_setting(bp, false, true); + + return rc; +} + +static int bnxt_get_eee(struct net_device *dev, struct ethtool_eee *edata) +{ + struct bnxt *bp = netdev_priv(dev); + + if (!(bp->flags & BNXT_FLAG_EEE_CAP)) + return -EOPNOTSUPP; + + *edata = bp->eee; + if (!bp->eee.eee_enabled) { + /* Preserve tx_lpi_timer so that the last value will be used + * by default when it is re-enabled. + */ + edata->advertised = 0; + edata->tx_lpi_enabled = 0; + } + + if (!bp->eee.eee_active) + edata->lp_advertised = 0; + + return 0; +} + const struct ethtool_ops bnxt_ethtool_ops = { .get_settings = bnxt_get_settings, .set_settings = bnxt_set_settings, @@ -1407,4 +1481,6 @@ const struct ethtool_ops bnxt_ethtool_ops = { .get_eeprom = bnxt_get_eeprom, .set_eeprom = bnxt_set_eeprom, .get_link = bnxt_get_link, + .get_eee = bnxt_get_eee, + .set_eee = bnxt_set_eee, }; From 25be862370031056989ee76e3c48c3ac8ff67fd4 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Tue, 5 Apr 2016 14:09:00 -0400 Subject: [PATCH 0288/1649] bnxt_en: Set async event bits when registering with the firmware. Currently, the driver only sets bit 0 of the async_event_fwd fields. To be compatible with the latest spec, we need to set the appropriate event bits handled by the driver. We should be handling link change and PF driver unload events, so these 2 bits should be set. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 2c3c7950bfea..dd0b32c58a24 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -118,6 +118,11 @@ static const u16 bnxt_vf_req_snif[] = { HWRM_CFA_L2_FILTER_ALLOC, }; +static const u16 bnxt_async_events_arr[] = { + HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE, + HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD, +}; + static bool bnxt_vf_pciid(enum board_idx idx) { return (idx == BCM57304_VF || idx == BCM57404_VF); @@ -2751,6 +2756,8 @@ static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp) { struct hwrm_func_drv_rgtr_input req = {0}; int i; + DECLARE_BITMAP(async_events_bmap, 256); + u32 *events = (u32 *)async_events_bmap; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1); @@ -2759,10 +2766,13 @@ static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp) FUNC_DRV_RGTR_REQ_ENABLES_VER | FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD); - /* TODO: current async event fwd bits are not defined and the firmware - * only checks if it is non-zero to enable async event forwarding - */ - req.async_event_fwd[0] |= cpu_to_le32(1); + memset(async_events_bmap, 0, sizeof(async_events_bmap)); + for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) + __set_bit(bnxt_async_events_arr[i], async_events_bmap); + + for (i = 0; i < 8; i++) + req.async_event_fwd[i] |= cpu_to_le32(events[i]); + req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX); req.ver_maj = DRV_VER_MAJ; req.ver_min = DRV_VER_MIN; From 4bb13abf208cb484a9b9d1af9233b0ef850c2fe7 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Tue, 5 Apr 2016 14:09:01 -0400 Subject: [PATCH 0289/1649] bnxt_en: Add unsupported SFP+ module warnings. Add the PORT_CONN_NOT_ALLOWED async event handling logic. The driver will print an appropriate warning to reflect the SFP+ module enforcement policy done in the firmware. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 55 +++++++++++++++++++++++ drivers/net/ethernet/broadcom/bnxt/bnxt.h | 3 ++ 2 files changed, 58 insertions(+) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index dd0b32c58a24..597e4724a474 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -121,6 +121,7 @@ static const u16 bnxt_vf_req_snif[] = { static const u16 bnxt_async_events_arr[] = { HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE, HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD, + HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED, }; static bool bnxt_vf_pciid(enum board_idx idx) @@ -1236,6 +1237,19 @@ next_rx_no_prod: return rc; } +#define BNXT_GET_EVENT_PORT(data) \ + ((data) & \ + HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK) + +#define BNXT_EVENT_POLICY_MASK \ + HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_MASK + +#define BNXT_EVENT_POLICY_SFT \ + HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_SFT + +#define BNXT_GET_EVENT_POLICY(data) \ + (((data) & BNXT_EVENT_POLICY_MASK) >> BNXT_EVENT_POLICY_SFT) + static int bnxt_async_event_process(struct bnxt *bp, struct hwrm_async_event_cmpl *cmpl) { @@ -1249,6 +1263,22 @@ static int bnxt_async_event_process(struct bnxt *bp, case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD: set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event); break; + case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: { + u32 data1 = le32_to_cpu(cmpl->event_data1); + u16 port_id = BNXT_GET_EVENT_PORT(data1); + + if (BNXT_VF(bp)) + break; + + if (bp->pf.port_id != port_id) + break; + + bp->link_info.last_port_module_event = + BNXT_GET_EVENT_POLICY(data1); + + set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event); + break; + } default: netdev_err(bp->dev, "unhandled ASYNC event (id 0x%x)\n", event_id); @@ -5447,6 +5477,28 @@ bnxt_restart_timer: mod_timer(&bp->timer, jiffies + bp->current_interval); } +static void bnxt_port_module_event(struct bnxt *bp) +{ + struct bnxt_link_info *link_info = &bp->link_info; + struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp; + + if (bnxt_update_link(bp, true)) + return; + + if (link_info->last_port_module_event != 0) { + netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n", + bp->pf.port_id); + if (bp->hwrm_spec_code >= 0x10201) { + netdev_warn(bp->dev, "Module part number %s\n", + resp->phy_vendor_partnumber); + } + } + if (link_info->last_port_module_event == 1) + netdev_warn(bp->dev, "TX is disabled\n"); + if (link_info->last_port_module_event == 3) + netdev_warn(bp->dev, "Shutdown SFP+ module\n"); +} + static void bnxt_cfg_ntp_filters(struct bnxt *); static void bnxt_sp_task(struct work_struct *work) @@ -5494,6 +5546,9 @@ static void bnxt_sp_task(struct work_struct *work) rtnl_unlock(); } + if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) + bnxt_port_module_event(bp); + if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) bnxt_hwrm_port_qstats(bp); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index a981e2c17107..cc8e38a9f684 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -825,6 +825,8 @@ struct bnxt_link_info { u16 req_link_speed; u32 advertising; bool force_link_chng; + + u8 last_port_module_event; /* a copy of phy_qcfg output used to report link * info to VF */ @@ -992,6 +994,7 @@ struct bnxt { #define BNXT_RST_RING_SP_EVENT 7 #define BNXT_HWRM_PF_UNLOAD_SP_EVENT 8 #define BNXT_PERIODIC_STATS_SP_EVENT 9 +#define BNXT_HWRM_PORT_MODULE_SP_EVENT 10 struct bnxt_pf_info pf; #ifdef CONFIG_BNXT_SRIOV From 9d9cee08fc9f5c4df84ef314158fd19c013bcec6 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Tue, 5 Apr 2016 14:09:02 -0400 Subject: [PATCH 0290/1649] bnxt_en: Check for valid forced speed during ethtool -s. Check that the forced speed is a valid speed supported by firmware. If not supported, return -EINVAL. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- .../net/ethernet/broadcom/bnxt/bnxt_ethtool.c | 48 +++++++++++++++---- 1 file changed, 38 insertions(+), 10 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 47e08a8b1563..952b5ba1c4da 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -739,28 +739,49 @@ static int bnxt_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) static u32 bnxt_get_fw_speed(struct net_device *dev, u16 ethtool_speed) { + struct bnxt *bp = netdev_priv(dev); + struct bnxt_link_info *link_info = &bp->link_info; + u16 support_spds = link_info->support_speeds; + u32 fw_speed = 0; + switch (ethtool_speed) { case SPEED_100: - return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100MB; + if (support_spds & BNXT_LINK_SPEED_MSK_100MB) + fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100MB; + break; case SPEED_1000: - return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_1GB; + if (support_spds & BNXT_LINK_SPEED_MSK_1GB) + fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_1GB; + break; case SPEED_2500: - return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2_5GB; + if (support_spds & BNXT_LINK_SPEED_MSK_2_5GB) + fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2_5GB; + break; case SPEED_10000: - return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10GB; + if (support_spds & BNXT_LINK_SPEED_MSK_10GB) + fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10GB; + break; case SPEED_20000: - return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_20GB; + if (support_spds & BNXT_LINK_SPEED_MSK_20GB) + fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_20GB; + break; case SPEED_25000: - return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_25GB; + if (support_spds & BNXT_LINK_SPEED_MSK_25GB) + fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_25GB; + break; case SPEED_40000: - return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB; + if (support_spds & BNXT_LINK_SPEED_MSK_40GB) + fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB; + break; case SPEED_50000: - return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB; + if (support_spds & BNXT_LINK_SPEED_MSK_50GB) + fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB; + break; default: netdev_err(dev, "unsupported speed!\n"); break; } - return 0; + return fw_speed; } u16 bnxt_get_fw_auto_link_speeds(u32 advertising) @@ -823,6 +844,8 @@ static int bnxt_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) */ set_pause = true; } else { + u16 fw_speed; + /* TODO: currently don't support half duplex */ if (cmd->duplex == DUPLEX_HALF) { netdev_err(dev, "HALF DUPLEX is not supported!\n"); @@ -833,7 +856,12 @@ static int bnxt_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) if (cmd->duplex == DUPLEX_UNKNOWN) cmd->duplex = DUPLEX_FULL; speed = ethtool_cmd_speed(cmd); - link_info->req_link_speed = bnxt_get_fw_speed(dev, speed); + fw_speed = bnxt_get_fw_speed(dev, speed); + if (!fw_speed) { + rc = -EINVAL; + goto set_setting_exit; + } + link_info->req_link_speed = fw_speed; link_info->req_duplex = BNXT_LINK_DUPLEX_FULL; link_info->autoneg = 0; link_info->advertising = 0; From 29c262fed4067c52977ba279cf71520f9991a050 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Tue, 5 Apr 2016 14:09:03 -0400 Subject: [PATCH 0291/1649] bnxt_en: Improve ethtool .get_settings(). If autoneg is off, we should always report the speed and duplex settings even if it is link down so the user knows the current settings. The unknown speed and duplex should only be used for autoneg when link is down. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- .../net/ethernet/broadcom/bnxt/bnxt_ethtool.c | 21 ++++++++++++------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 952b5ba1c4da..a2e93241b06b 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -698,10 +698,23 @@ static int bnxt_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) if (link_info->phy_link_status == BNXT_LINK_LINK) cmd->lp_advertising = bnxt_fw_to_ethtool_lp_adv(link_info); + ethtool_speed = bnxt_fw_to_ethtool_speed(link_info->link_speed); + if (!netif_carrier_ok(dev)) + cmd->duplex = DUPLEX_UNKNOWN; + else if (link_info->duplex & BNXT_LINK_DUPLEX_FULL) + cmd->duplex = DUPLEX_FULL; + else + cmd->duplex = DUPLEX_HALF; } else { cmd->autoneg = AUTONEG_DISABLE; cmd->advertising = 0; + ethtool_speed = + bnxt_fw_to_ethtool_speed(link_info->req_link_speed); + cmd->duplex = DUPLEX_HALF; + if (link_info->req_duplex == BNXT_LINK_DUPLEX_FULL) + cmd->duplex = DUPLEX_FULL; } + ethtool_cmd_speed_set(cmd, ethtool_speed); cmd->port = PORT_NONE; if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) { @@ -719,14 +732,6 @@ static int bnxt_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) cmd->port = PORT_FIBRE; } - if (link_info->phy_link_status == BNXT_LINK_LINK) { - if (link_info->duplex & BNXT_LINK_DUPLEX_FULL) - cmd->duplex = DUPLEX_FULL; - } else { - cmd->duplex = DUPLEX_UNKNOWN; - } - ethtool_speed = bnxt_fw_to_ethtool_speed(link_info->link_speed); - ethtool_cmd_speed_set(cmd, ethtool_speed); if (link_info->transceiver == PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_INTERNAL) cmd->transceiver = XCVR_INTERNAL; From b9bb53f3836f4eb2bdeb3447be11042bd29c2408 Mon Sep 17 00:00:00 2001 From: Willem de Bruijn Date: Tue, 5 Apr 2016 12:41:14 -0400 Subject: [PATCH 0292/1649] sock: convert sk_peek_offset functions to WRITE_ONCE Make the peek offset interface safe to use in lockless environments. Use READ_ONCE and WRITE_ONCE to avoid race conditions between testing and updating the peek offset. Suggested-by: Eric Dumazet Signed-off-by: Willem de Bruijn Signed-off-by: David S. Miller --- include/net/sock.h | 24 +++++++++++++----------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/include/net/sock.h b/include/net/sock.h index 310c4367ea83..09aec75eb184 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -459,26 +459,28 @@ struct sock { static inline int sk_peek_offset(struct sock *sk, int flags) { - if ((flags & MSG_PEEK) && (sk->sk_peek_off >= 0)) - return sk->sk_peek_off; - else - return 0; + if (unlikely(flags & MSG_PEEK)) { + s32 off = READ_ONCE(sk->sk_peek_off); + if (off >= 0) + return off; + } + + return 0; } static inline void sk_peek_offset_bwd(struct sock *sk, int val) { - if (sk->sk_peek_off >= 0) { - if (sk->sk_peek_off >= val) - sk->sk_peek_off -= val; - else - sk->sk_peek_off = 0; + s32 off = READ_ONCE(sk->sk_peek_off); + + if (unlikely(off >= 0)) { + off = max_t(s32, off - val, 0); + WRITE_ONCE(sk->sk_peek_off, off); } } static inline void sk_peek_offset_fwd(struct sock *sk, int val) { - if (sk->sk_peek_off >= 0) - sk->sk_peek_off += val; + sk_peek_offset_bwd(sk, -val); } /* From e6afc8ace6dd5cef5e812f26c72579da8806f5ac Mon Sep 17 00:00:00 2001 From: samanthakumar Date: Tue, 5 Apr 2016 12:41:15 -0400 Subject: [PATCH 0293/1649] udp: remove headers from UDP packets before queueing Remove UDP transport headers before queueing packets for reception. This change simplifies a follow-up patch to add MSG_PEEK support. Signed-off-by: Sam Kumar Signed-off-by: Willem de Bruijn Signed-off-by: David S. Miller --- include/net/sock.h | 1 + include/net/udp.h | 9 +++++++++ net/core/sock.c | 19 +++++++++++++------ net/ipv4/udp.c | 20 +++++++++++--------- net/ipv6/udp.c | 12 +++++++----- 5 files changed, 41 insertions(+), 20 deletions(-) diff --git a/include/net/sock.h b/include/net/sock.h index 09aec75eb184..b75998952482 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1864,6 +1864,7 @@ void sk_reset_timer(struct sock *sk, struct timer_list *timer, void sk_stop_timer(struct sock *sk, struct timer_list *timer); +int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb); diff --git a/include/net/udp.h b/include/net/udp.h index d870ec1611c4..a0b0da97164c 100644 --- a/include/net/udp.h +++ b/include/net/udp.h @@ -158,6 +158,15 @@ static inline __sum16 udp_v4_check(int len, __be32 saddr, void udp_set_csum(bool nocheck, struct sk_buff *skb, __be32 saddr, __be32 daddr, int len); +static inline void udp_csum_pull_header(struct sk_buff *skb) +{ + if (skb->ip_summed == CHECKSUM_NONE) + skb->csum = csum_partial(udp_hdr(skb), sizeof(struct udphdr), + skb->csum); + skb_pull_rcsum(skb, sizeof(struct udphdr)); + UDP_SKB_CB(skb)->cscov -= sizeof(struct udphdr); +} + struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb, struct udphdr *uh); int udp_gro_complete(struct sk_buff *skb, int nhoff); diff --git a/net/core/sock.c b/net/core/sock.c index 2f517ea56786..e12197b359fd 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -402,9 +402,8 @@ static void sock_disable_timestamp(struct sock *sk, unsigned long flags) } -int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) +int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) { - int err; unsigned long flags; struct sk_buff_head *list = &sk->sk_receive_queue; @@ -414,10 +413,6 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) return -ENOMEM; } - err = sk_filter(sk, skb); - if (err) - return err; - if (!sk_rmem_schedule(sk, skb, skb->truesize)) { atomic_inc(&sk->sk_drops); return -ENOBUFS; @@ -440,6 +435,18 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) sk->sk_data_ready(sk); return 0; } +EXPORT_SYMBOL(__sock_queue_rcv_skb); + +int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) +{ + int err; + + err = sk_filter(sk, skb); + if (err) + return err; + + return __sock_queue_rcv_skb(sk, skb); +} EXPORT_SYMBOL(sock_queue_rcv_skb); int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested) diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 355bdb221057..cf747e86ce52 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -1309,7 +1309,7 @@ try_again: if (!skb) goto out; - ulen = skb->len - sizeof(struct udphdr); + ulen = skb->len; copied = len; if (copied > ulen) copied = ulen; @@ -1329,11 +1329,9 @@ try_again: } if (checksum_valid || skb_csum_unnecessary(skb)) - err = skb_copy_datagram_msg(skb, sizeof(struct udphdr), - msg, copied); + err = skb_copy_datagram_msg(skb, 0, msg, copied); else { - err = skb_copy_and_csum_datagram_msg(skb, sizeof(struct udphdr), - msg); + err = skb_copy_and_csum_datagram_msg(skb, 0, msg); if (err == -EINVAL) goto csum_copy_err; @@ -1500,7 +1498,7 @@ static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) sk_incoming_cpu_update(sk); } - rc = sock_queue_rcv_skb(sk, skb); + rc = __sock_queue_rcv_skb(sk, skb); if (rc < 0) { int is_udplite = IS_UDPLITE(sk); @@ -1616,10 +1614,14 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) } } - if (rcu_access_pointer(sk->sk_filter) && - udp_lib_checksum_complete(skb)) - goto csum_error; + if (rcu_access_pointer(sk->sk_filter)) { + if (udp_lib_checksum_complete(skb)) + goto csum_error; + if (sk_filter(sk, skb)) + goto drop; + } + udp_csum_pull_header(skb); if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) { UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS, is_udplite); diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 78a7dfd12707..84c8d7b66820 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -376,7 +376,7 @@ try_again: if (!skb) goto out; - ulen = skb->len - sizeof(struct udphdr); + ulen = skb->len; copied = len; if (copied > ulen) copied = ulen; @@ -398,10 +398,9 @@ try_again: } if (checksum_valid || skb_csum_unnecessary(skb)) - err = skb_copy_datagram_msg(skb, sizeof(struct udphdr), - msg, copied); + err = skb_copy_datagram_msg(skb, 0, msg, copied); else { - err = skb_copy_and_csum_datagram_msg(skb, sizeof(struct udphdr), msg); + err = skb_copy_and_csum_datagram_msg(skb, 0, msg); if (err == -EINVAL) goto csum_copy_err; } @@ -554,7 +553,7 @@ static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) sk_incoming_cpu_update(sk); } - rc = sock_queue_rcv_skb(sk, skb); + rc = __sock_queue_rcv_skb(sk, skb); if (rc < 0) { int is_udplite = IS_UDPLITE(sk); @@ -648,8 +647,11 @@ int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) if (rcu_access_pointer(sk->sk_filter)) { if (udp_lib_checksum_complete(skb)) goto csum_error; + if (sk_filter(sk, skb)) + goto drop; } + udp_csum_pull_header(skb); if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) { UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS, is_udplite); From 627d2d6b550094d88f9e518e15967e7bf906ebbf Mon Sep 17 00:00:00 2001 From: samanthakumar Date: Tue, 5 Apr 2016 12:41:16 -0400 Subject: [PATCH 0294/1649] udp: enable MSG_PEEK at non-zero offset Enable peeking at UDP datagrams at the offset specified with socket option SOL_SOCKET/SO_PEEK_OFF. Peek at any datagram in the queue, up to the end of the given datagram. Implement the SO_PEEK_OFF semantics introduced in commit ef64a54f6e55 ("sock: Introduce the SO_PEEK_OFF sock option"). Increase the offset on peek, decrease it on regular reads. When peeking, always checksum the packet immediately, to avoid recomputation on subsequent peeks and final read. The socket lock is not held for the duration of udp_recvmsg, so peek and read operations can run concurrently. Only the last store to sk_peek_off is preserved. Signed-off-by: Sam Kumar Signed-off-by: Willem de Bruijn Signed-off-by: David S. Miller --- include/linux/skbuff.h | 7 ++++++- include/net/sock.h | 2 ++ net/core/datagram.c | 9 ++++++--- net/core/sock.c | 9 +++++++++ net/ipv4/af_inet.c | 1 + net/ipv4/udp.c | 22 +++++++++++----------- net/ipv6/af_inet6.c | 1 + net/ipv6/udp.c | 22 +++++++++++----------- 8 files changed, 47 insertions(+), 26 deletions(-) diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 15d0df943466..007381270ff8 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -2949,7 +2949,12 @@ int skb_copy_datagram_from_iter(struct sk_buff *skb, int offset, struct iov_iter *from, int len); int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *frm); void skb_free_datagram(struct sock *sk, struct sk_buff *skb); -void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb); +void __skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb, int len); +static inline void skb_free_datagram_locked(struct sock *sk, + struct sk_buff *skb) +{ + __skb_free_datagram_locked(sk, skb, 0); +} int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags); int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len); int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len); diff --git a/include/net/sock.h b/include/net/sock.h index b75998952482..1decb7a22261 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -457,6 +457,8 @@ struct sock { #define SK_CAN_REUSE 1 #define SK_FORCE_REUSE 2 +int sk_set_peek_off(struct sock *sk, int val); + static inline int sk_peek_offset(struct sock *sk, int flags) { if (unlikely(flags & MSG_PEEK)) { diff --git a/net/core/datagram.c b/net/core/datagram.c index fa9dc6450b08..b7de71f8d5d3 100644 --- a/net/core/datagram.c +++ b/net/core/datagram.c @@ -301,16 +301,19 @@ void skb_free_datagram(struct sock *sk, struct sk_buff *skb) } EXPORT_SYMBOL(skb_free_datagram); -void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb) +void __skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb, int len) { bool slow; if (likely(atomic_read(&skb->users) == 1)) smp_rmb(); - else if (likely(!atomic_dec_and_test(&skb->users))) + else if (likely(!atomic_dec_and_test(&skb->users))) { + sk_peek_offset_bwd(sk, len); return; + } slow = lock_sock_fast(sk); + sk_peek_offset_bwd(sk, len); skb_orphan(skb); sk_mem_reclaim_partial(sk); unlock_sock_fast(sk, slow); @@ -318,7 +321,7 @@ void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb) /* skb is now orphaned, can be freed outside of locked section */ __kfree_skb(skb); } -EXPORT_SYMBOL(skb_free_datagram_locked); +EXPORT_SYMBOL(__skb_free_datagram_locked); /** * skb_kill_datagram - Free a datagram skbuff forcibly diff --git a/net/core/sock.c b/net/core/sock.c index e12197b359fd..2ce76e82857f 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -2187,6 +2187,15 @@ void __sk_mem_reclaim(struct sock *sk, int amount) } EXPORT_SYMBOL(__sk_mem_reclaim); +int sk_set_peek_off(struct sock *sk, int val) +{ + if (val < 0) + return -EINVAL; + + sk->sk_peek_off = val; + return 0; +} +EXPORT_SYMBOL_GPL(sk_set_peek_off); /* * Set of default routines for initialising struct proto_ops when diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 9e481992dbae..a38b9910af60 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -948,6 +948,7 @@ const struct proto_ops inet_dgram_ops = { .recvmsg = inet_recvmsg, .mmap = sock_no_mmap, .sendpage = inet_sendpage, + .set_peek_off = sk_set_peek_off, #ifdef CONFIG_COMPAT .compat_setsockopt = compat_sock_common_setsockopt, .compat_getsockopt = compat_sock_common_getsockopt, diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index cf747e86ce52..d80312ddbb8a 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -1294,7 +1294,7 @@ int udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock, DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name); struct sk_buff *skb; unsigned int ulen, copied; - int peeked, off = 0; + int peeked, peeking, off; int err; int is_udplite = IS_UDPLITE(sk); bool checksum_valid = false; @@ -1304,15 +1304,16 @@ int udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock, return ip_recv_error(sk, msg, len, addr_len); try_again: + peeking = off = sk_peek_offset(sk, flags); skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0), &peeked, &off, &err); if (!skb) - goto out; + return err; ulen = skb->len; copied = len; - if (copied > ulen) - copied = ulen; + if (copied > ulen - off) + copied = ulen - off; else if (copied < ulen) msg->msg_flags |= MSG_TRUNC; @@ -1322,16 +1323,16 @@ try_again: * coverage checksum (UDP-Lite), do it before the copy. */ - if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) { + if (copied < ulen || UDP_SKB_CB(skb)->partial_cov || peeking) { checksum_valid = !udp_lib_checksum_complete(skb); if (!checksum_valid) goto csum_copy_err; } if (checksum_valid || skb_csum_unnecessary(skb)) - err = skb_copy_datagram_msg(skb, 0, msg, copied); + err = skb_copy_datagram_msg(skb, off, msg, copied); else { - err = skb_copy_and_csum_datagram_msg(skb, 0, msg); + err = skb_copy_and_csum_datagram_msg(skb, off, msg); if (err == -EINVAL) goto csum_copy_err; @@ -1344,7 +1345,8 @@ try_again: UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite); } - goto out_free; + skb_free_datagram_locked(sk, skb); + return err; } if (!peeked) @@ -1368,9 +1370,7 @@ try_again: if (flags & MSG_TRUNC) err = ulen; -out_free: - skb_free_datagram_locked(sk, skb); -out: + __skb_free_datagram_locked(sk, skb, peeking ? -err : err); return err; csum_copy_err: diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index b11c37cfd67c..2b78aad0d52f 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c @@ -561,6 +561,7 @@ const struct proto_ops inet6_dgram_ops = { .recvmsg = inet_recvmsg, /* ok */ .mmap = sock_no_mmap, .sendpage = sock_no_sendpage, + .set_peek_off = sk_set_peek_off, #ifdef CONFIG_COMPAT .compat_setsockopt = compat_sock_common_setsockopt, .compat_getsockopt = compat_sock_common_getsockopt, diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 84c8d7b66820..87bd7aff88b4 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -357,7 +357,7 @@ int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, struct inet_sock *inet = inet_sk(sk); struct sk_buff *skb; unsigned int ulen, copied; - int peeked, off = 0; + int peeked, peeking, off; int err; int is_udplite = IS_UDPLITE(sk); bool checksum_valid = false; @@ -371,15 +371,16 @@ int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, return ipv6_recv_rxpmtu(sk, msg, len, addr_len); try_again: + peeking = off = sk_peek_offset(sk, flags); skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0), &peeked, &off, &err); if (!skb) - goto out; + return err; ulen = skb->len; copied = len; - if (copied > ulen) - copied = ulen; + if (copied > ulen - off) + copied = ulen - off; else if (copied < ulen) msg->msg_flags |= MSG_TRUNC; @@ -391,16 +392,16 @@ try_again: * coverage checksum (UDP-Lite), do it before the copy. */ - if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) { + if (copied < ulen || UDP_SKB_CB(skb)->partial_cov || peeking) { checksum_valid = !udp_lib_checksum_complete(skb); if (!checksum_valid) goto csum_copy_err; } if (checksum_valid || skb_csum_unnecessary(skb)) - err = skb_copy_datagram_msg(skb, 0, msg, copied); + err = skb_copy_datagram_msg(skb, off, msg, copied); else { - err = skb_copy_and_csum_datagram_msg(skb, 0, msg); + err = skb_copy_and_csum_datagram_msg(skb, off, msg); if (err == -EINVAL) goto csum_copy_err; } @@ -417,7 +418,8 @@ try_again: UDP_MIB_INERRORS, is_udplite); } - goto out_free; + skb_free_datagram_locked(sk, skb); + return err; } if (!peeked) { if (is_udp4) @@ -465,9 +467,7 @@ try_again: if (flags & MSG_TRUNC) err = ulen; -out_free: - skb_free_datagram_locked(sk, skb); -out: + __skb_free_datagram_locked(sk, skb, peeking ? -err : err); return err; csum_copy_err: From f5a9ec20b3b2adf03a0b01902cda913b05abd382 Mon Sep 17 00:00:00 2001 From: Petri Gynther Date: Tue, 5 Apr 2016 13:59:59 -0700 Subject: [PATCH 0295/1649] net: bcmgenet: cleanup for bcmgenet_xmit() 1. Readability: Move nr_frags assignment a few lines down in order to bundle index -> ring -> txq calculations together. 2. Readability: Add parentheses around nr_frags + 1. 3. Minor fix: Stop the Tx queue and throw the error message only if the Tx queue hasn't already been stopped. Signed-off-by: Petri Gynther Acked-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/genet/bcmgenet.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index cf6445d148ca..7f85a8494eee 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -1447,15 +1447,19 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev) else index -= 1; - nr_frags = skb_shinfo(skb)->nr_frags; ring = &priv->tx_rings[index]; txq = netdev_get_tx_queue(dev, ring->queue); + nr_frags = skb_shinfo(skb)->nr_frags; + spin_lock_irqsave(&ring->lock, flags); - if (ring->free_bds <= nr_frags + 1) { - netif_tx_stop_queue(txq); - netdev_err(dev, "%s: tx ring %d full when queue %d awake\n", - __func__, index, ring->queue); + if (ring->free_bds <= (nr_frags + 1)) { + if (!netif_tx_queue_stopped(txq)) { + netif_tx_stop_queue(txq); + netdev_err(dev, + "%s: tx ring %d full when queue %d awake\n", + __func__, index, ring->queue); + } ret = NETDEV_TX_BUSY; goto out; } From 824ba603573d910e32df75fe6a5e7d7ec2a0a6a7 Mon Sep 17 00:00:00 2001 From: Petri Gynther Date: Tue, 5 Apr 2016 14:00:00 -0700 Subject: [PATCH 0296/1649] net: bcmgenet: cleanup for bcmgenet_xmit_frag() Add frag_size = skb_frag_size(frag) and use it when needed. Signed-off-by: Petri Gynther Acked-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/genet/bcmgenet.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 7f85a8494eee..d77cd6dee092 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -1331,6 +1331,7 @@ static int bcmgenet_xmit_frag(struct net_device *dev, struct bcmgenet_priv *priv = netdev_priv(dev); struct device *kdev = &priv->pdev->dev; struct enet_cb *tx_cb_ptr; + unsigned int frag_size; dma_addr_t mapping; int ret; @@ -1338,10 +1339,12 @@ static int bcmgenet_xmit_frag(struct net_device *dev, if (unlikely(!tx_cb_ptr)) BUG(); + tx_cb_ptr->skb = NULL; - mapping = skb_frag_dma_map(kdev, frag, 0, - skb_frag_size(frag), DMA_TO_DEVICE); + frag_size = skb_frag_size(frag); + + mapping = skb_frag_dma_map(kdev, frag, 0, frag_size, DMA_TO_DEVICE); ret = dma_mapping_error(kdev, mapping); if (ret) { priv->mib.tx_dma_failed++; @@ -1351,10 +1354,10 @@ static int bcmgenet_xmit_frag(struct net_device *dev, } dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping); - dma_unmap_len_set(tx_cb_ptr, dma_len, frag->size); + dma_unmap_len_set(tx_cb_ptr, dma_len, frag_size); dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping, - (frag->size << DMA_BUFLENGTH_SHIFT) | dma_desc_flags | + (frag_size << DMA_BUFLENGTH_SHIFT) | dma_desc_flags | (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT)); return 0; From 7ee4062562bae8f248f0aa72dfb3023ddea65942 Mon Sep 17 00:00:00 2001 From: Petri Gynther Date: Tue, 5 Apr 2016 14:00:01 -0700 Subject: [PATCH 0297/1649] net: bcmgenet: cleanup for dmadesc_set() dmadesc_set() is used for setting the Tx buffer DMA address, length, and status bits on a Tx ring descriptor when a frame is being Tx'ed. Always set the Tx buffer DMA address first, before updating the length and status bits, i.e. giving the Tx descriptor to the hardware. The reason this is a cleanup rather than a fix is that the hardware won't transmit anything from a Tx ring until the TDMA producer index has been incremented. As long as the dmadesc_set() writes complete before the TDMA producer index write, life is good. Signed-off-by: Petri Gynther Acked-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/genet/bcmgenet.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index d77cd6dee092..f7b42b9fc979 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -104,8 +104,8 @@ static inline void dmadesc_set_addr(struct bcmgenet_priv *priv, static inline void dmadesc_set(struct bcmgenet_priv *priv, void __iomem *d, dma_addr_t addr, u32 val) { - dmadesc_set_length_status(priv, d, val); dmadesc_set_addr(priv, d, addr); + dmadesc_set_length_status(priv, d, val); } static inline dma_addr_t dmadesc_get_addr(struct bcmgenet_priv *priv, From ad7775dc7b8b0b5585ff114b04d5ad50737c423e Mon Sep 17 00:00:00 2001 From: Thomas Falcon Date: Fri, 1 Apr 2016 17:20:34 -0500 Subject: [PATCH 0298/1649] ibmvnic: map L2/L3/L4 header descriptors to firmware Allow the VNIC driver to provide descriptors containing L2/L3/L4 headers to firmware. This feature is needed for greater hardware compatibility and enablement of checksum and TCP offloading features. A new function is included for the hypervisor call, H_SEND_SUBCRQ_INDIRECT, allowing a DMA-mapped array of SCRQ descriptor elements to be sent to the VNIC server. These additions will help fully enable checksum offloading as well as other features as they are included later. Signed-off-by: Thomas Falcon Cc: John Allen Signed-off-by: David S. Miller --- drivers/net/ethernet/ibm/ibmvnic.c | 195 ++++++++++++++++++++++++++++- drivers/net/ethernet/ibm/ibmvnic.h | 3 + 2 files changed, 194 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 6e9e16eee5d0..4e97e762e297 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -61,6 +61,7 @@ #include #include #include +#include #include #include #include @@ -94,6 +95,7 @@ static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *); static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *); static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle, union sub_crq *sub_crq); +static int send_subcrq_indirect(struct ibmvnic_adapter *, u64, u64, u64); static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance); static int enable_scrq_irq(struct ibmvnic_adapter *, struct ibmvnic_sub_crq_queue *); @@ -561,10 +563,141 @@ static int ibmvnic_close(struct net_device *netdev) return 0; } +/** + * build_hdr_data - creates L2/L3/L4 header data buffer + * @hdr_field - bitfield determining needed headers + * @skb - socket buffer + * @hdr_len - array of header lengths + * @tot_len - total length of data + * + * Reads hdr_field to determine which headers are needed by firmware. + * Builds a buffer containing these headers. Saves individual header + * lengths and total buffer length to be used to build descriptors. + */ +static int build_hdr_data(u8 hdr_field, struct sk_buff *skb, + int *hdr_len, u8 *hdr_data) +{ + int len = 0; + u8 *hdr; + + hdr_len[0] = sizeof(struct ethhdr); + + if (skb->protocol == htons(ETH_P_IP)) { + hdr_len[1] = ip_hdr(skb)->ihl * 4; + if (ip_hdr(skb)->protocol == IPPROTO_TCP) + hdr_len[2] = tcp_hdrlen(skb); + else if (ip_hdr(skb)->protocol == IPPROTO_UDP) + hdr_len[2] = sizeof(struct udphdr); + } else if (skb->protocol == htons(ETH_P_IPV6)) { + hdr_len[1] = sizeof(struct ipv6hdr); + if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) + hdr_len[2] = tcp_hdrlen(skb); + else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP) + hdr_len[2] = sizeof(struct udphdr); + } + + memset(hdr_data, 0, 120); + if ((hdr_field >> 6) & 1) { + hdr = skb_mac_header(skb); + memcpy(hdr_data, hdr, hdr_len[0]); + len += hdr_len[0]; + } + + if ((hdr_field >> 5) & 1) { + hdr = skb_network_header(skb); + memcpy(hdr_data + len, hdr, hdr_len[1]); + len += hdr_len[1]; + } + + if ((hdr_field >> 4) & 1) { + hdr = skb_transport_header(skb); + memcpy(hdr_data + len, hdr, hdr_len[2]); + len += hdr_len[2]; + } + return len; +} + +/** + * create_hdr_descs - create header and header extension descriptors + * @hdr_field - bitfield determining needed headers + * @data - buffer containing header data + * @len - length of data buffer + * @hdr_len - array of individual header lengths + * @scrq_arr - descriptor array + * + * Creates header and, if needed, header extension descriptors and + * places them in a descriptor array, scrq_arr + */ + +static void create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len, + union sub_crq *scrq_arr) +{ + union sub_crq hdr_desc; + int tmp_len = len; + u8 *data, *cur; + int tmp; + + while (tmp_len > 0) { + cur = hdr_data + len - tmp_len; + + memset(&hdr_desc, 0, sizeof(hdr_desc)); + if (cur != hdr_data) { + data = hdr_desc.hdr_ext.data; + tmp = tmp_len > 29 ? 29 : tmp_len; + hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD; + hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC; + hdr_desc.hdr_ext.len = tmp; + } else { + data = hdr_desc.hdr.data; + tmp = tmp_len > 24 ? 24 : tmp_len; + hdr_desc.hdr.first = IBMVNIC_CRQ_CMD; + hdr_desc.hdr.type = IBMVNIC_HDR_DESC; + hdr_desc.hdr.len = tmp; + hdr_desc.hdr.l2_len = (u8)hdr_len[0]; + hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]); + hdr_desc.hdr.l4_len = (u8)hdr_len[2]; + hdr_desc.hdr.flag = hdr_field << 1; + } + memcpy(data, cur, tmp); + tmp_len -= tmp; + *scrq_arr = hdr_desc; + scrq_arr++; + } +} + +/** + * build_hdr_descs_arr - build a header descriptor array + * @skb - socket buffer + * @num_entries - number of descriptors to be sent + * @subcrq - first TX descriptor + * @hdr_field - bit field determining which headers will be sent + * + * This function will build a TX descriptor array with applicable + * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect. + */ + +static void build_hdr_descs_arr(struct ibmvnic_tx_buff *txbuff, + int *num_entries, u8 hdr_field) +{ + int hdr_len[3] = {0, 0, 0}; + int tot_len, len; + u8 *hdr_data = txbuff->hdr_data; + + tot_len = build_hdr_data(hdr_field, txbuff->skb, hdr_len, + txbuff->hdr_data); + len = tot_len; + len -= 24; + if (len > 0) + num_entries += len % 29 ? len / 29 + 1 : len / 29; + create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len, + txbuff->indir_arr + 1); +} + static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) { struct ibmvnic_adapter *adapter = netdev_priv(netdev); int queue_num = skb_get_queue_mapping(skb); + u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req; struct device *dev = &adapter->vdev->dev; struct ibmvnic_tx_buff *tx_buff = NULL; struct ibmvnic_tx_pool *tx_pool; @@ -579,6 +712,7 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) unsigned long lpar_rc; union sub_crq tx_crq; unsigned int offset; + int num_entries = 1; unsigned char *dst; u64 *handle_array; int index = 0; @@ -644,11 +778,34 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP; } - if (skb->ip_summed == CHECKSUM_PARTIAL) + if (skb->ip_summed == CHECKSUM_PARTIAL) { tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD; - - lpar_rc = send_subcrq(adapter, handle_array[0], &tx_crq); - + hdrs += 2; + } + /* determine if l2/3/4 headers are sent to firmware */ + if ((*hdrs >> 7) & 1 && + (skb->protocol == htons(ETH_P_IP) || + skb->protocol == htons(ETH_P_IPV6))) { + build_hdr_descs_arr(tx_buff, &num_entries, *hdrs); + tx_crq.v1.n_crq_elem = num_entries; + tx_buff->indir_arr[0] = tx_crq; + tx_buff->indir_dma = dma_map_single(dev, tx_buff->indir_arr, + sizeof(tx_buff->indir_arr), + DMA_TO_DEVICE); + if (dma_mapping_error(dev, tx_buff->indir_dma)) { + if (!firmware_has_feature(FW_FEATURE_CMO)) + dev_err(dev, "tx: unable to map descriptor array\n"); + tx_map_failed++; + tx_dropped++; + ret = NETDEV_TX_BUSY; + goto out; + } + lpar_rc = send_subcrq_indirect(adapter, handle_array[0], + (u64)tx_buff->indir_dma, + (u64)num_entries); + } else { + lpar_rc = send_subcrq(adapter, handle_array[0], &tx_crq); + } if (lpar_rc != H_SUCCESS) { dev_err(dev, "tx failed with code %ld\n", lpar_rc); @@ -1159,6 +1316,7 @@ static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter, union sub_crq *next; int index; int i, j; + u8 first; restart_loop: while (pending_scrq(adapter, scrq)) { @@ -1181,6 +1339,13 @@ restart_loop: txbuff->data_dma[j] = 0; txbuff->used_bounce = false; } + /* if sub_crq was sent indirectly */ + first = txbuff->indir_arr[0].generic.first; + if (first == IBMVNIC_CRQ_CMD) { + dma_unmap_single(dev, txbuff->indir_dma, + sizeof(txbuff->indir_arr), + DMA_TO_DEVICE); + } if (txbuff->last_frag) dev_kfree_skb_any(txbuff->skb); @@ -1494,6 +1659,28 @@ static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle, return rc; } +static int send_subcrq_indirect(struct ibmvnic_adapter *adapter, + u64 remote_handle, u64 ioba, u64 num_entries) +{ + unsigned int ua = adapter->vdev->unit_address; + struct device *dev = &adapter->vdev->dev; + int rc; + + /* Make sure the hypervisor sees the complete request */ + mb(); + rc = plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT, ua, + cpu_to_be64(remote_handle), + ioba, num_entries); + + if (rc) { + if (rc == H_CLOSED) + dev_warn(dev, "CRQ Queue closed\n"); + dev_err(dev, "Send (indirect) error (rc=%d)\n", rc); + } + + return rc; +} + static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter, union ibmvnic_crq *crq) { diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h index 1a9993cc79b5..5af8a796e523 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.h +++ b/drivers/net/ethernet/ibm/ibmvnic.h @@ -879,6 +879,9 @@ struct ibmvnic_tx_buff { int pool_index; bool last_frag; bool used_bounce; + union sub_crq indir_arr[6]; + u8 hdr_data[140]; + dma_addr_t indir_dma; }; struct ibmvnic_tx_pool { From 9be02cdfa601776f9e65013d9f1b949d5024f457 Mon Sep 17 00:00:00 2001 From: Thomas Falcon Date: Fri, 1 Apr 2016 17:20:35 -0500 Subject: [PATCH 0299/1649] ibmvnic: enable RX checksum offload Enable RX Checksum offload feature in the ibmvnic driver. Signed-off-by: Thomas Falcon Cc: John Allen Signed-off-by: David S. Miller --- drivers/net/ethernet/ibm/ibmvnic.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 4e97e762e297..21bccf6eb919 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -2105,6 +2105,10 @@ static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter) if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum) adapter->netdev->features |= NETIF_F_IPV6_CSUM; + if ((adapter->netdev->features & + (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))) + adapter->netdev->features |= NETIF_F_RXCSUM; + memset(&crq, 0, sizeof(crq)); crq.control_ip_offload.first = IBMVNIC_CRQ_CMD; crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD; From 4da46cebbd3b4dc445195a9672c99c1353af5695 Mon Sep 17 00:00:00 2001 From: Aaron Conole Date: Sat, 2 Apr 2016 15:26:43 -0400 Subject: [PATCH 0300/1649] net/core/dev: Warn on a too-short GRO frame When signaling that a GRO frame is ready to be processed, the network stack correctly checks length and aborts processing when a frame is less than 14 bytes. However, such a condition is really indicative of a broken driver, and should be loudly signaled, rather than silently dropped as the case is today. Convert the condition to use net_warn_ratelimited() to ensure the stack loudly complains about such broken drivers. Signed-off-by: Aaron Conole Signed-off-by: David S. Miller --- net/core/dev.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/net/core/dev.c b/net/core/dev.c index b9bcbe77d913..273f10d1e306 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -4663,6 +4663,8 @@ static struct sk_buff *napi_frags_skb(struct napi_struct *napi) if (unlikely(skb_gro_header_hard(skb, hlen))) { eth = skb_gro_header_slow(skb, hlen, 0); if (unlikely(!eth)) { + net_warn_ratelimited("%s: dropping impossible skb from %s\n", + __func__, napi->dev->name); napi_reuse_skb(napi, skb); return NULL; } From afb8ece4326f2151771f4c40b8d9f799cee5ae6e Mon Sep 17 00:00:00 2001 From: Colin King Date: Sat, 13 Feb 2016 23:57:16 +0000 Subject: [PATCH 0301/1649] i40e: remove redundant check on vsi->active_vlans active_vlans is an unsigned long array, hence a null check on this array is superfluous and can be removed. Detected with static analysis by smatch: drivers/net/ethernet/intel/i40e/i40e_debugfs.c:386 i40e_dbg_dump_vsi_seid() warn: this array is probably non-NULL. 'vsi->active_vlans' Signed-off-by: Colin Ian King Acked-by: Shannon Nelson Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_debugfs.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index 0c97733d253c..83dccf1792e7 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -147,9 +147,8 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) dev_info(&pf->pdev->dev, " vlan_features = 0x%08lx\n", (unsigned long int)nd->vlan_features); } - if (vsi->active_vlans) - dev_info(&pf->pdev->dev, - " vlgrp: & = %p\n", vsi->active_vlans); + dev_info(&pf->pdev->dev, + " vlgrp: & = %p\n", vsi->active_vlans); dev_info(&pf->pdev->dev, " state = %li flags = 0x%08lx, netdev_registered = %i, current_netdev_flags = 0x%04x\n", vsi->state, vsi->flags, From 3845ccea34df30680b5be7ec119f5c74ab57fdc0 Mon Sep 17 00:00:00 2001 From: Anjali Singhai Jain Date: Fri, 18 Mar 2016 12:18:05 -0700 Subject: [PATCH 0302/1649] i40e: Enable Geneve offload for FW API ver > 1.4 for XL710/X710 devices This patch enables the Capability for XL710/X710 devices with FW API version higher than 1.4 to do geneve Rx offload. Change-ID: I9a8f87772c48d7d67dc85e3701d2e0b845034c0b Signed-off-by: Anjali Singhai Jain Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_main.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 297fd39ba255..fdcb50ac5028 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -9158,6 +9158,12 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) I40E_VLAN_ANY, false, true); spin_unlock_bh(&vsi->mac_filter_list_lock); } + } else if ((pf->hw.aq.api_maj_ver > 1) || + ((pf->hw.aq.api_maj_ver == 1) && + (pf->hw.aq.api_min_ver > 4))) { + /* Supported in FW API version higher than 1.4 */ + pf->flags |= I40E_FLAG_GENEVE_OFFLOAD_CAPABLE; + pf->auto_disable_flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE; } else { /* relate the VSI_VMDQ name to the VSI_MAIN name */ snprintf(netdev->name, IFNAMSIZ, "%sv%%d", From 442b25e455f5e693c23f9d3a32b208ca9ab25cf0 Mon Sep 17 00:00:00 2001 From: Mitch Williams Date: Fri, 18 Mar 2016 12:18:06 -0700 Subject: [PATCH 0303/1649] i40e: Remove unused variable This variable is vestigial, a remnant of the primordial code from which this driver spawned. We can safely remove it. Change-ID: I24e0fe338e7c7c50d27dc5515564f33caefbb93a Signed-off-by: Mitch Williams Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 47b9e62473c4..150002ed3ad6 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -1311,8 +1311,8 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) struct i40e_pf *pf = vf->pf; i40e_status aq_ret = 0; struct i40e_vsi *vsi; - int i = 0, len = 0; int num_vsis = 1; + int len = 0; int ret; if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { @@ -1374,15 +1374,14 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) vfres->num_queue_pairs = vf->num_queue_pairs; vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf; if (vf->lan_vsi_idx) { - vfres->vsi_res[i].vsi_id = vf->lan_vsi_id; - vfres->vsi_res[i].vsi_type = I40E_VSI_SRIOV; - vfres->vsi_res[i].num_queue_pairs = vsi->alloc_queue_pairs; + vfres->vsi_res[0].vsi_id = vf->lan_vsi_id; + vfres->vsi_res[0].vsi_type = I40E_VSI_SRIOV; + vfres->vsi_res[0].num_queue_pairs = vsi->alloc_queue_pairs; /* VFs only use TC 0 */ - vfres->vsi_res[i].qset_handle + vfres->vsi_res[0].qset_handle = le16_to_cpu(vsi->info.qs_handle[0]); - ether_addr_copy(vfres->vsi_res[i].default_mac_addr, + ether_addr_copy(vfres->vsi_res[0].default_mac_addr, vf->default_lan_addr.addr); - i++; } set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states); From c4445aedfe092907c2e792ff76ed4338d9a1cd52 Mon Sep 17 00:00:00 2001 From: Mitch Williams Date: Fri, 18 Mar 2016 12:18:07 -0700 Subject: [PATCH 0304/1649] i40evf: Fix VLAN features Users of ethtool were being given the mistaken impression that this driver was able to change its VLAN tagging features, and were disappointed that this was not actually the case. Implement ndo_fix_features method so that we can adjust these flags as needed to avoid false impressions. Change-ID: I08584f103a4fa73d6a4128d472e4ef44dcfda57f Signed-off-by: Mitch Williams Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- .../net/ethernet/intel/i40evf/i40evf_main.c | 23 +++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index e3973684746b..2d018b4d1afc 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -2252,6 +2252,28 @@ static int i40evf_change_mtu(struct net_device *netdev, int new_mtu) return 0; } +#define I40EVF_VLAN_FEATURES (NETIF_F_HW_VLAN_CTAG_TX |\ + NETIF_F_HW_VLAN_CTAG_RX |\ + NETIF_F_HW_VLAN_CTAG_FILTER) + +/** + * i40evf_fix_features - fix up the netdev feature bits + * @netdev: our net device + * @features: desired feature bits + * + * Returns fixed-up features bits + **/ +static netdev_features_t i40evf_fix_features(struct net_device *netdev, + netdev_features_t features) +{ + struct i40evf_adapter *adapter = netdev_priv(netdev); + + features &= ~I40EVF_VLAN_FEATURES; + if (adapter->vf_res->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_VLAN) + features |= I40EVF_VLAN_FEATURES; + return features; +} + static const struct net_device_ops i40evf_netdev_ops = { .ndo_open = i40evf_open, .ndo_stop = i40evf_close, @@ -2264,6 +2286,7 @@ static const struct net_device_ops i40evf_netdev_ops = { .ndo_tx_timeout = i40evf_tx_timeout, .ndo_vlan_rx_add_vid = i40evf_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = i40evf_vlan_rx_kill_vid, + .ndo_fix_features = i40evf_fix_features, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = i40evf_netpoll, #endif From d6bf58c2e88f9e0cfc029c158e1182eb1f07d7eb Mon Sep 17 00:00:00 2001 From: Catherine Sullivan Date: Fri, 18 Mar 2016 12:18:08 -0700 Subject: [PATCH 0305/1649] i40e: Add new device ID for X722 The new device ID is 0x37D3 and it should follow the same flows and branding string as for 0x37D0. Change-ID: Ia5ad4a1910268c4666a3fd46a7afffbec55b4fc2 Signed-off-by: Catherine Sullivan Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_common.c | 1 + drivers/net/ethernet/intel/i40e/i40e_devids.h | 1 + drivers/net/ethernet/intel/i40e/i40e_main.c | 1 + drivers/net/ethernet/intel/i40evf/i40e_common.c | 1 + drivers/net/ethernet/intel/i40evf/i40e_devids.h | 1 + 5 files changed, 5 insertions(+) diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index 8276a1393e6d..ebcc0d3ecbfb 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -60,6 +60,7 @@ static i40e_status i40e_set_mac_type(struct i40e_hw *hw) case I40E_DEV_ID_SFP_X722: case I40E_DEV_ID_1G_BASE_T_X722: case I40E_DEV_ID_10G_BASE_T_X722: + case I40E_DEV_ID_SFP_I_X722: hw->mac.type = I40E_MAC_X722; break; default: diff --git a/drivers/net/ethernet/intel/i40e/i40e_devids.h b/drivers/net/ethernet/intel/i40e/i40e_devids.h index 99257fcd1ef4..dd4457d29e98 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_devids.h +++ b/drivers/net/ethernet/intel/i40e/i40e_devids.h @@ -44,6 +44,7 @@ #define I40E_DEV_ID_SFP_X722 0x37D0 #define I40E_DEV_ID_1G_BASE_T_X722 0x37D1 #define I40E_DEV_ID_10G_BASE_T_X722 0x37D2 +#define I40E_DEV_ID_SFP_I_X722 0x37D3 #define i40e_is_40G_device(d) ((d) == I40E_DEV_ID_QSFP_A || \ (d) == I40E_DEV_ID_QSFP_B || \ diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index fdcb50ac5028..73d4bea4c574 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -90,6 +90,7 @@ static const struct pci_device_id i40e_pci_tbl[] = { {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0}, + {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_I_X722), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0}, /* required last entry */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c index 771ac6ad8cda..4db0c0326185 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_common.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c @@ -58,6 +58,7 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw) case I40E_DEV_ID_SFP_X722: case I40E_DEV_ID_1G_BASE_T_X722: case I40E_DEV_ID_10G_BASE_T_X722: + case I40E_DEV_ID_SFP_I_X722: hw->mac.type = I40E_MAC_X722; break; case I40E_DEV_ID_X722_VF: diff --git a/drivers/net/ethernet/intel/i40evf/i40e_devids.h b/drivers/net/ethernet/intel/i40evf/i40e_devids.h index ca8b58c3d1f5..70235706915e 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_devids.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_devids.h @@ -44,6 +44,7 @@ #define I40E_DEV_ID_SFP_X722 0x37D0 #define I40E_DEV_ID_1G_BASE_T_X722 0x37D1 #define I40E_DEV_ID_10G_BASE_T_X722 0x37D2 +#define I40E_DEV_ID_SFP_I_X722 0x37D3 #define I40E_DEV_ID_X722_VF 0x37CD #define I40E_DEV_ID_X722_VF_HV 0x37D9 From 7369ca8745499d001663e1dccf15064a3eb34b4d Mon Sep 17 00:00:00 2001 From: Mitch Williams Date: Fri, 18 Mar 2016 12:18:09 -0700 Subject: [PATCH 0306/1649] i40e: Make VF resets more reliable Clear the VFLR bit immediately after triggering a reset instead of waiting until after cleanup is complete. Make sure to trigger a reset every time, not just if the PF is up. These changes fix a problem where VF resets would get lost by the PF, preventing the VF driver from initializing. Change-ID: I5945cf2884095b7b0554867c64df8617e71d9d29 Signed-off-by: Mitch Williams Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 150002ed3ad6..169c256fd6ba 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -937,6 +937,10 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr) wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg); i40e_flush(hw); } + /* clear the VFLR bit in GLGEN_VFLRSTAT */ + reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32; + bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32; + wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx)); if (i40e_quiesce_vf_pci(vf)) dev_err(&pf->pdev->dev, "VF %d PCI transactions stuck\n", @@ -989,10 +993,6 @@ complete_reset: /* tell the VF the reset is done */ wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE); - /* clear the VFLR bit in GLGEN_VFLRSTAT */ - reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32; - bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32; - wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx)); i40e_flush(hw); clear_bit(__I40E_VF_DISABLE, &pf->state); } @@ -2296,11 +2296,9 @@ int i40e_vc_process_vflr_event(struct i40e_pf *pf) /* read GLGEN_VFLRSTAT register to find out the flr VFs */ vf = &pf->vf[vf_id]; reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx)); - if (reg & BIT(bit_idx)) { + if (reg & BIT(bit_idx)) /* i40e_reset_vf will clear the bit in GLGEN_VFLRSTAT */ - if (!test_bit(__I40E_DOWN, &pf->state)) - i40e_reset_vf(vf, true); - } + i40e_reset_vf(vf, true); } return 0; From 22ead37f8af83b4fa32c15cc21d3541e74661339 Mon Sep 17 00:00:00 2001 From: Mitch Williams Date: Fri, 18 Mar 2016 12:18:10 -0700 Subject: [PATCH 0307/1649] i40evf: Add longer wait after remove module Upon module remove, wait a little longer after requesting a reset before checking to see if the firmware responded. This change prevents double resets when the firmware is busy. Change-ID: Ieedc988ee82fac1f32a074bf4d9e4dba426bfa58 Signed-off-by: Mitch Williams Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40evf/i40evf_main.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index 2d018b4d1afc..6561a33d84aa 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -2854,11 +2854,11 @@ static void i40evf_remove(struct pci_dev *pdev) adapter->state = __I40EVF_REMOVE; adapter->aq_required = 0; i40evf_request_reset(adapter); - msleep(20); + msleep(50); /* If the FW isn't responding, kick it once, but only once. */ if (!i40evf_asq_done(hw)) { i40evf_request_reset(adapter); - msleep(20); + msleep(50); } if (adapter->msix_entries) { From 8c806b676d21a49628250731f4e30a8a071d080c Mon Sep 17 00:00:00 2001 From: Shannon Nelson Date: Fri, 18 Mar 2016 12:18:11 -0700 Subject: [PATCH 0308/1649] i40e: Disable link polling Periodic link polling was added when the link events were found not to be trustworthy. This was the case early on, but was likely because the link event mask was being used incorrectly. As this has been fixed in recent code, we can disable the link polling to lessen the AQ traffic. Change-ID: Id890b5ee3c2d04381fc76ffa434777644f5d8eb0 Signed-off-by: Shannon Nelson Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_main.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 73d4bea4c574..184f3f965a01 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -8448,7 +8448,6 @@ static int i40e_sw_init(struct i40e_pf *pf) /* Set default capability flags */ pf->flags = I40E_FLAG_RX_CSUM_ENABLED | I40E_FLAG_MSI_ENABLED | - I40E_FLAG_LINK_POLLING_ENABLED | I40E_FLAG_MSIX_ENABLED; if (iommu_present(&pci_bus_type)) From 539a379c50220d6ac19c7300671fe25819bd3f1b Mon Sep 17 00:00:00 2001 From: Catherine Sullivan Date: Fri, 18 Mar 2016 12:18:12 -0700 Subject: [PATCH 0309/1649] i40evf: Fix get_rss_aq We were passing in the seed where we should just be passing false because we want the VSI table not the pf table. Change-ID: I9b633ab06eb59468087f0c0af8539857e99f9495 Signed-off-by: Catherine Sullivan Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40evf/i40evf_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index 6561a33d84aa..2d1fe560cf7d 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -1341,7 +1341,7 @@ static int i40evf_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed, } if (lut) { - ret = i40evf_aq_get_rss_lut(hw, vsi->id, seed, lut, lut_size); + ret = i40evf_aq_get_rss_lut(hw, vsi->id, false, lut, lut_size); if (ret) { dev_err(&adapter->pdev->dev, "Cannot get RSS lut, err %s aq_err %s\n", From 16badc34695ff489a39e450b4e4e5a241ac85a31 Mon Sep 17 00:00:00 2001 From: Avinash Dayanand Date: Fri, 18 Mar 2016 12:18:13 -0700 Subject: [PATCH 0310/1649] i40e: Fix for supported link modes in 10GBaseT PHY's 100baseT/Full is now listed and supported link mode for 10GBaseT PHY. This is a fix to list all the supported link modes of 10GBaseT PHY. Change-ID: If2be3212ef0fef85fd5d6e4550c7783de2f915e9 Signed-off-by: Avinash Dayanand Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_ethtool.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 410d237f9137..8a83d4514812 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -313,6 +313,13 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw, ecmd->advertising |= ADVERTISED_10000baseT_Full; if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) ecmd->advertising |= ADVERTISED_1000baseT_Full; + /* adding 100baseT support for 10GBASET_PHY */ + if (pf->flags & I40E_FLAG_HAVE_10GBASET_PHY) { + ecmd->supported |= SUPPORTED_100baseT_Full; + ecmd->advertising |= ADVERTISED_100baseT_Full | + ADVERTISED_1000baseT_Full | + ADVERTISED_10000baseT_Full; + } break; case I40E_PHY_TYPE_1000BASE_T_OPTICAL: ecmd->supported = SUPPORTED_Autoneg | @@ -325,6 +332,15 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw, SUPPORTED_100baseT_Full; if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB) ecmd->advertising |= ADVERTISED_100baseT_Full; + /* firmware detects 10G phy as 100M phy at 100M speed */ + if (pf->flags & I40E_FLAG_HAVE_10GBASET_PHY) { + ecmd->supported |= SUPPORTED_10000baseT_Full | + SUPPORTED_1000baseT_Full; + ecmd->advertising |= ADVERTISED_Autoneg | + ADVERTISED_100baseT_Full | + ADVERTISED_1000baseT_Full | + ADVERTISED_10000baseT_Full; + } break; case I40E_PHY_TYPE_10GBASE_CR1_CU: case I40E_PHY_TYPE_10GBASE_CR1: From 18b7af57d9c1165c2b8f13ec4668d6d7f51708cf Mon Sep 17 00:00:00 2001 From: Mitch Williams Date: Fri, 18 Mar 2016 12:18:14 -0700 Subject: [PATCH 0311/1649] i40e: Lower some message levels These conditions can happen any time VFs are enabled or disabled and are not really indicative of fatal problems unless they happen continuously. Lower the log level so that people don't get scared. Change-ID: I1ceb4adbd10d03cbeed54d1f5b7f20d60328351d Signed-off-by: Mitch Williams Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 169c256fd6ba..9924503c88f5 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -1232,8 +1232,8 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode, /* single place to detect unsuccessful return values */ if (v_retval) { vf->num_invalid_msgs++; - dev_err(&pf->pdev->dev, "VF %d failed opcode %d, error: %d\n", - vf->vf_id, v_opcode, v_retval); + dev_info(&pf->pdev->dev, "VF %d failed opcode %d, retval: %d\n", + vf->vf_id, v_opcode, v_retval); if (vf->num_invalid_msgs > I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED) { dev_err(&pf->pdev->dev, @@ -1251,9 +1251,9 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode, aq_ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval, msg, msglen, NULL); if (aq_ret) { - dev_err(&pf->pdev->dev, - "Unable to send the message to VF %d aq_err %d\n", - vf->vf_id, pf->hw.aq.asq_last_status); + dev_info(&pf->pdev->dev, + "Unable to send the message to VF %d aq_err %d\n", + vf->vf_id, pf->hw.aq.asq_last_status); return -EIO; } From 867a79e37ed9a3a5a2051cc11df21a57a8a00bfe Mon Sep 17 00:00:00 2001 From: Shannon Nelson Date: Fri, 18 Mar 2016 12:18:15 -0700 Subject: [PATCH 0312/1649] i40e: Request PHY media event at reset time Add the Media Not Available flag to the link event mask. It seems that event comes first if you have a DA cable pulled out, but there's no follow-up event for Link Down; if you're not looking for MEDIA_NA you will get no event, even though there's now no Link. Change-ID: cb3340a2849805bb881f64f6f2ae810eef46eba7 Signed-off-by: Shannon Nelson Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_main.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 184f3f965a01..d2c0106fe71e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -6859,6 +6859,7 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) */ ret = i40e_aq_set_phy_int_mask(&pf->hw, ~(I40E_AQ_EVENT_LINK_UPDOWN | + I40E_AQ_EVENT_MEDIA_NA | I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL); if (ret) dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n", @@ -11070,6 +11071,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) */ err = i40e_aq_set_phy_int_mask(&pf->hw, ~(I40E_AQ_EVENT_LINK_UPDOWN | + I40E_AQ_EVENT_MEDIA_NA | I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL); if (err) dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n", From 066439ce791b5d8533556a89836c0849589c2b41 Mon Sep 17 00:00:00 2001 From: Avinash Dayanand Date: Fri, 18 Mar 2016 12:18:16 -0700 Subject: [PATCH 0313/1649] i40e/i40evf: Bump patch from 1.5.1 to 1.5.2 Signed-off-by: Avinash Dayanand Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_main.c | 2 +- drivers/net/ethernet/intel/i40evf/i40evf_main.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index d2c0106fe71e..d6147f899062 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -46,7 +46,7 @@ static const char i40e_driver_string[] = #define DRV_VERSION_MAJOR 1 #define DRV_VERSION_MINOR 5 -#define DRV_VERSION_BUILD 1 +#define DRV_VERSION_BUILD 2 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ __stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_BUILD) DRV_KERN diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index 2d1fe560cf7d..f4dada02bbcf 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -38,7 +38,7 @@ static const char i40evf_driver_string[] = #define DRV_VERSION_MAJOR 1 #define DRV_VERSION_MINOR 5 -#define DRV_VERSION_BUILD 1 +#define DRV_VERSION_BUILD 2 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ __stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_BUILD) \ From 24d41e5e2c9afe99b0584832206ba8779dfb783e Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Fri, 18 Mar 2016 16:06:47 -0700 Subject: [PATCH 0314/1649] i40e/i40evf: Fix TSO checksum pseudo-header adjustment With IPv4 and IPv6 now using the same format for checksums based on the length of the frame we need to update the i40e and i40evf drivers so that they correctly account for lengths greater than or equal to 64K. With this patch the driver should now correctly update checksums for frames up to 16776960 in length which should be more than large enough for all possible TSO frames in the near future. Signed-off-by: Alexander Duyck Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_txrx.c | 11 ++++------- drivers/net/ethernet/intel/i40evf/i40e_txrx.c | 11 ++++------- 2 files changed, 8 insertions(+), 14 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 5bef5b0f00d9..5d5fa5359a1d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -2304,10 +2304,8 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, l4_offset = l4.hdr - skb->data; /* remove payload length from outer checksum */ - paylen = (__force u16)l4.udp->check; - paylen += ntohs((__force __be16)1) * - (u16)~(skb->len - l4_offset); - l4.udp->check = ~csum_fold((__force __wsum)paylen); + paylen = skb->len - l4_offset; + csum_replace_by_diff(&l4.udp->check, htonl(paylen)); } /* reset pointers to inner headers */ @@ -2327,9 +2325,8 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, l4_offset = l4.hdr - skb->data; /* remove payload length from inner checksum */ - paylen = (__force u16)l4.tcp->check; - paylen += ntohs((__force __be16)1) * (u16)~(skb->len - l4_offset); - l4.tcp->check = ~csum_fold((__force __wsum)paylen); + paylen = skb->len - l4_offset; + csum_replace_by_diff(&l4.tcp->check, htonl(paylen)); /* compute length of segmentation header */ *hdr_len = (l4.tcp->doff * 4) + l4_offset; diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index 570348d93e5d..04aabc52ba0d 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -1571,10 +1571,8 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, l4_offset = l4.hdr - skb->data; /* remove payload length from outer checksum */ - paylen = (__force u16)l4.udp->check; - paylen += ntohs((__force __be16)1) * - (u16)~(skb->len - l4_offset); - l4.udp->check = ~csum_fold((__force __wsum)paylen); + paylen = skb->len - l4_offset; + csum_replace_by_diff(&l4.udp->check, htonl(paylen)); } /* reset pointers to inner headers */ @@ -1594,9 +1592,8 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, l4_offset = l4.hdr - skb->data; /* remove payload length from inner checksum */ - paylen = (__force u16)l4.tcp->check; - paylen += ntohs((__force __be16)1) * (u16)~(skb->len - l4_offset); - l4.tcp->check = ~csum_fold((__force __wsum)paylen); + paylen = skb->len - l4_offset; + csum_replace_by_diff(&l4.tcp->check, htonl(paylen)); /* compute length of segmentation header */ *hdr_len = (l4.tcp->doff * 4) + l4_offset; From d63b548fffdbd239a5e65bb89424be19229048ba Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Thu, 31 Mar 2016 20:02:02 +0300 Subject: [PATCH 0315/1649] mac80211: allow passing transmitter station on RX Sometimes drivers already looked up, or know out-of-band from their device, which station transmitted a given RX frame. Allow them to pass the station pointer to mac80211 to save the extra lookup. Signed-off-by: Johannes Berg --- drivers/net/wireless/intel/iwlwifi/dvm/rx.c | 2 +- .../net/wireless/intel/iwlwifi/mvm/mac-ctxt.c | 2 +- drivers/net/wireless/intel/iwlwifi/mvm/rx.c | 2 +- drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c | 2 +- include/net/mac80211.h | 7 ++++--- net/mac80211/rx.c | 18 +++++++++++++----- 6 files changed, 21 insertions(+), 12 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rx.c b/drivers/net/wireless/intel/iwlwifi/dvm/rx.c index 52ab1e012e8f..27ea61e3a390 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/rx.c @@ -686,7 +686,7 @@ static void iwlagn_pass_packet_to_mac80211(struct iwl_priv *priv, memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats)); - ieee80211_rx_napi(priv->hw, skb, priv->napi); + ieee80211_rx_napi(priv->hw, NULL, skb, priv->napi); } static u32 iwlagn_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c index e885db3464b0..fba1cd2ce1ec 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c @@ -1499,5 +1499,5 @@ void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm, memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status)); /* pass it as regular rx to mac80211 */ - ieee80211_rx_napi(mvm->hw, skb, NULL); + ieee80211_rx_napi(mvm->hw, NULL, skb, NULL); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c index 485cfc1a4daa..d8cadf2fe098 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c @@ -131,7 +131,7 @@ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm, fraglen, rxb->truesize); } - ieee80211_rx_napi(mvm->hw, skb, napi); + ieee80211_rx_napi(mvm->hw, NULL, skb, napi); } /* diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c index 9a54f2d2a66b..38e7fa9bd675 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c @@ -210,7 +210,7 @@ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm, if (iwl_mvm_check_pn(mvm, skb, queue, sta)) kfree_skb(skb); else - ieee80211_rx_napi(mvm->hw, skb, napi); + ieee80211_rx_napi(mvm->hw, NULL, skb, napi); } static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm, diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 6e346750cb29..fd5ec446a7a9 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -3855,11 +3855,12 @@ void ieee80211_restart_hw(struct ieee80211_hw *hw); * This function must be called with BHs disabled. * * @hw: the hardware this frame came in on + * @sta: the station the frame was received from, or %NULL * @skb: the buffer to receive, owned by mac80211 after this call * @napi: the NAPI context */ -void ieee80211_rx_napi(struct ieee80211_hw *hw, struct sk_buff *skb, - struct napi_struct *napi); +void ieee80211_rx_napi(struct ieee80211_hw *hw, struct ieee80211_sta *sta, + struct sk_buff *skb, struct napi_struct *napi); /** * ieee80211_rx - receive frame @@ -3883,7 +3884,7 @@ void ieee80211_rx_napi(struct ieee80211_hw *hw, struct sk_buff *skb, */ static inline void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb) { - ieee80211_rx_napi(hw, skb, NULL); + ieee80211_rx_napi(hw, NULL, skb, NULL); } /** diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 009bb90d7f5a..212b9993c8dc 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -3552,6 +3552,7 @@ static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx, * be called with rcu_read_lock protection. */ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw, + struct ieee80211_sta *pubsta, struct sk_buff *skb, struct napi_struct *napi) { @@ -3561,7 +3562,6 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw, __le16 fc; struct ieee80211_rx_data rx; struct ieee80211_sub_if_data *prev; - struct sta_info *sta, *prev_sta; struct rhash_head *tmp; int err = 0; @@ -3597,7 +3597,14 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw, ieee80211_is_beacon(hdr->frame_control))) ieee80211_scan_rx(local, skb); - if (ieee80211_is_data(fc)) { + if (pubsta) { + rx.sta = container_of(pubsta, struct sta_info, sta); + rx.sdata = rx.sta->sdata; + if (ieee80211_prepare_and_rx_handle(&rx, skb, true)) + return; + goto out; + } else if (ieee80211_is_data(fc)) { + struct sta_info *sta, *prev_sta; const struct bucket_table *tbl; prev_sta = NULL; @@ -3671,8 +3678,8 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw, * This is the receive path handler. It is called by a low level driver when an * 802.11 MPDU is received from the hardware. */ -void ieee80211_rx_napi(struct ieee80211_hw *hw, struct sk_buff *skb, - struct napi_struct *napi) +void ieee80211_rx_napi(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta, + struct sk_buff *skb, struct napi_struct *napi) { struct ieee80211_local *local = hw_to_local(hw); struct ieee80211_rate *rate = NULL; @@ -3771,7 +3778,8 @@ void ieee80211_rx_napi(struct ieee80211_hw *hw, struct sk_buff *skb, ieee80211_tpt_led_trig_rx(local, ((struct ieee80211_hdr *)skb->data)->frame_control, skb->len); - __ieee80211_rx_handle_packet(hw, skb, napi); + + __ieee80211_rx_handle_packet(hw, pubsta, skb, napi); rcu_read_unlock(); From de8f18d3a80bee94ee8a2d3c511707390dad88d6 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Thu, 31 Mar 2016 20:02:03 +0300 Subject: [PATCH 0316/1649] mac80211: count MSDUs in A-MSDU properly For the RX MSDU statistics, we need to count the number of MSDUs created and accepted from an A-MSDU. Right now, all frames in any A-MSDUs were completely ignored. Fix this by moving the RX MSDU statistics accounting into the deliver function. Signed-off-by: Johannes Berg --- net/mac80211/rx.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 212b9993c8dc..a94d314d0055 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -2129,6 +2129,15 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx) ieee80211_rx_stats(dev, skb->len); + if (rx->sta) { + /* The seqno index has the same property as needed + * for the rx_msdu field, i.e. it is IEEE80211_NUM_TIDS + * for non-QoS-data frames. Here we know it's a data + * frame, so count MSDUs. + */ + rx->sta->rx_stats.msdu[rx->seqno_idx]++; + } + if ((sdata->vif.type == NL80211_IFTYPE_AP || sdata->vif.type == NL80211_IFTYPE_AP_VLAN) && !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) && @@ -2415,15 +2424,6 @@ ieee80211_rx_h_data(struct ieee80211_rx_data *rx) if (unlikely(!ieee80211_is_data_present(hdr->frame_control))) return RX_DROP_MONITOR; - if (rx->sta) { - /* The seqno index has the same property as needed - * for the rx_msdu field, i.e. it is IEEE80211_NUM_TIDS - * for non-QoS-data frames. Here we know it's a data - * frame, so count MSDUs. - */ - rx->sta->rx_stats.msdu[rx->seqno_idx]++; - } - /* * Send unexpected-4addr-frame event to hostapd. For older versions, * also drop the frame to cooked monitor interfaces. From 8ebaa5b0a791631dddbb3a215b342fabb2a5307b Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Thu, 31 Mar 2016 20:02:04 +0300 Subject: [PATCH 0317/1649] mac80211: move semicolon out of CALL_RXH macro Move the semicolon, people typically assume that and once line already put a semicolon behind the "call". Signed-off-by: Johannes Berg --- net/mac80211/rx.c | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index a94d314d0055..570ae3d03ae1 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -3201,7 +3201,7 @@ static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx, res = rxh(rx); \ if (res != RX_CONTINUE) \ goto rxh_next; \ - } while (0); + } while (0) /* Lock here to avoid hitting all of the data used in the RX * path (e.g. key data, station data, ...) concurrently when @@ -3219,30 +3219,30 @@ static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx, */ rx->skb = skb; - CALL_RXH(ieee80211_rx_h_check_more_data) - CALL_RXH(ieee80211_rx_h_uapsd_and_pspoll) - CALL_RXH(ieee80211_rx_h_sta_process) - CALL_RXH(ieee80211_rx_h_decrypt) - CALL_RXH(ieee80211_rx_h_defragment) - CALL_RXH(ieee80211_rx_h_michael_mic_verify) + CALL_RXH(ieee80211_rx_h_check_more_data); + CALL_RXH(ieee80211_rx_h_uapsd_and_pspoll); + CALL_RXH(ieee80211_rx_h_sta_process); + CALL_RXH(ieee80211_rx_h_decrypt); + CALL_RXH(ieee80211_rx_h_defragment); + CALL_RXH(ieee80211_rx_h_michael_mic_verify); /* must be after MMIC verify so header is counted in MPDU mic */ #ifdef CONFIG_MAC80211_MESH if (ieee80211_vif_is_mesh(&rx->sdata->vif)) CALL_RXH(ieee80211_rx_h_mesh_fwding); #endif - CALL_RXH(ieee80211_rx_h_amsdu) - CALL_RXH(ieee80211_rx_h_data) + CALL_RXH(ieee80211_rx_h_amsdu); + CALL_RXH(ieee80211_rx_h_data); /* special treatment -- needs the queue */ res = ieee80211_rx_h_ctrl(rx, frames); if (res != RX_CONTINUE) goto rxh_next; - CALL_RXH(ieee80211_rx_h_mgmt_check) - CALL_RXH(ieee80211_rx_h_action) - CALL_RXH(ieee80211_rx_h_userspace_mgmt) - CALL_RXH(ieee80211_rx_h_action_return) - CALL_RXH(ieee80211_rx_h_mgmt) + CALL_RXH(ieee80211_rx_h_mgmt_check); + CALL_RXH(ieee80211_rx_h_action); + CALL_RXH(ieee80211_rx_h_userspace_mgmt); + CALL_RXH(ieee80211_rx_h_action_return); + CALL_RXH(ieee80211_rx_h_mgmt); rxh_next: ieee80211_rx_handlers_result(rx, res); @@ -3265,10 +3265,10 @@ static void ieee80211_invoke_rx_handlers(struct ieee80211_rx_data *rx) res = rxh(rx); \ if (res != RX_CONTINUE) \ goto rxh_next; \ - } while (0); + } while (0) - CALL_RXH(ieee80211_rx_h_check_dup) - CALL_RXH(ieee80211_rx_h_check) + CALL_RXH(ieee80211_rx_h_check_dup); + CALL_RXH(ieee80211_rx_h_check); ieee80211_rx_reorder_ampdu(rx, &reorder_release); From 0be6ed133835b1a5e492f86099ce372b5a2e2296 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Thu, 31 Mar 2016 20:02:05 +0300 Subject: [PATCH 0318/1649] mac80211: move averaged values out of rx_stats Move the averaged values out of rx_stats and into rx_stats_avg, to cleanly split them out. The averaged ones cannot be supported for parallel RX in a per-CPU fashion, while the other values can be collected per CPU and then combined/selected when needed. Signed-off-by: Johannes Berg --- net/mac80211/mesh_plink.c | 2 +- net/mac80211/rx.c | 4 ++-- net/mac80211/sta_info.c | 10 +++++----- net/mac80211/sta_info.h | 6 ++++-- 4 files changed, 12 insertions(+), 10 deletions(-) diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c index ecfba8ad29e4..563bea050383 100644 --- a/net/mac80211/mesh_plink.c +++ b/net/mac80211/mesh_plink.c @@ -61,7 +61,7 @@ static bool rssi_threshold_check(struct ieee80211_sub_if_data *sdata, s32 rssi_threshold = sdata->u.mesh.mshcfg.rssi_threshold; return rssi_threshold == 0 || (sta && - (s8)-ewma_signal_read(&sta->rx_stats.avg_signal) > + (s8)-ewma_signal_read(&sta->rx_stats_avg.signal) > rssi_threshold); } diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 570ae3d03ae1..d14c66df9e86 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -1455,7 +1455,7 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx) sta->rx_stats.bytes += rx->skb->len; if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) { sta->rx_stats.last_signal = status->signal; - ewma_signal_add(&sta->rx_stats.avg_signal, -status->signal); + ewma_signal_add(&sta->rx_stats_avg.signal, -status->signal); } if (status->chains) { @@ -1467,7 +1467,7 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx) continue; sta->rx_stats.chain_signal_last[i] = signal; - ewma_signal_add(&sta->rx_stats.chain_signal_avg[i], + ewma_signal_add(&sta->rx_stats_avg.chain_signal[i], -signal); } } diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index 01e070c6e713..4f19505f3757 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -341,9 +341,9 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata, sta->reserved_tid = IEEE80211_TID_UNRESERVED; sta->last_connected = ktime_get_seconds(); - ewma_signal_init(&sta->rx_stats.avg_signal); - for (i = 0; i < ARRAY_SIZE(sta->rx_stats.chain_signal_avg); i++) - ewma_signal_init(&sta->rx_stats.chain_signal_avg[i]); + ewma_signal_init(&sta->rx_stats_avg.signal); + for (i = 0; i < ARRAY_SIZE(sta->rx_stats_avg.chain_signal); i++) + ewma_signal_init(&sta->rx_stats_avg.chain_signal[i]); if (local->ops->wake_tx_queue) { void *txq_data; @@ -2056,7 +2056,7 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo) if (!(sinfo->filled & BIT(NL80211_STA_INFO_SIGNAL_AVG))) { sinfo->signal_avg = - -ewma_signal_read(&sta->rx_stats.avg_signal); + -ewma_signal_read(&sta->rx_stats_avg.signal); sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL_AVG); } } @@ -2072,7 +2072,7 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo) sinfo->chain_signal[i] = sta->rx_stats.chain_signal_last[i]; sinfo->chain_signal_avg[i] = - -ewma_signal_read(&sta->rx_stats.chain_signal_avg[i]); + -ewma_signal_read(&sta->rx_stats_avg.chain_signal[i]); } } diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h index 4e1ed6f26484..93dc567e6100 100644 --- a/net/mac80211/sta_info.h +++ b/net/mac80211/sta_info.h @@ -450,16 +450,18 @@ struct sta_info { unsigned long fragments; unsigned long dropped; int last_signal; - struct ewma_signal avg_signal; u8 chains; s8 chain_signal_last[IEEE80211_MAX_CHAINS]; - struct ewma_signal chain_signal_avg[IEEE80211_MAX_CHAINS]; int last_rate_idx; u32 last_rate_flag; u32 last_rate_vht_flag; u8 last_rate_vht_nss; u64 msdu[IEEE80211_NUM_TIDS + 1]; } rx_stats; + struct { + struct ewma_signal signal; + struct ewma_signal chain_signal[IEEE80211_MAX_CHAINS]; + } rx_stats_avg; /* Plus 1 for non-QoS frames */ __le16 last_seq_ctrl[IEEE80211_NUM_TIDS + 1]; From 2df8bfd7240117b91241a01e3f50f2e83827ccab Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Thu, 31 Mar 2016 20:02:06 +0300 Subject: [PATCH 0319/1649] mac80211: remove rx_stats.last_rx update after sta alloc There's no need to update rx_stats.last_rx after allocating a station since it's already updated during allocation. Signed-off-by: Johannes Berg --- net/mac80211/ibss.c | 4 ---- net/mac80211/ocb.c | 2 -- 2 files changed, 6 deletions(-) diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c index fc3238376b39..b3407dbe4b7d 100644 --- a/net/mac80211/ibss.c +++ b/net/mac80211/ibss.c @@ -649,8 +649,6 @@ ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata, const u8 *bssid, return NULL; } - sta->rx_stats.last_rx = jiffies; - /* make sure mandatory rates are always added */ sband = local->hw.wiphy->bands[band]; sta->sta.supp_rates[band] = supp_rates | @@ -1236,8 +1234,6 @@ void ieee80211_ibss_rx_no_sta(struct ieee80211_sub_if_data *sdata, if (!sta) return; - sta->rx_stats.last_rx = jiffies; - /* make sure mandatory rates are always added */ sband = local->hw.wiphy->bands[band]; sta->sta.supp_rates[band] = supp_rates | diff --git a/net/mac80211/ocb.c b/net/mac80211/ocb.c index 0be0aadfc559..88e6ebbbe24f 100644 --- a/net/mac80211/ocb.c +++ b/net/mac80211/ocb.c @@ -75,8 +75,6 @@ void ieee80211_ocb_rx_no_sta(struct ieee80211_sub_if_data *sdata, if (!sta) return; - sta->rx_stats.last_rx = jiffies; - /* Add only mandatory rates for now */ sband = local->hw.wiphy->bands[band]; sta->sta.supp_rates[band] = From b8da6b6a99b4b0d8d464b621ba7dcbcb08172b7d Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Thu, 31 Mar 2016 20:02:07 +0300 Subject: [PATCH 0320/1649] mac80211: add separate last_ack variable Instead of touching the rx_stats.last_rx from the status path, introduce and use a status_stats.last_ack variable. This will make rx_stats.last_rx indicate when the last frame was received, making it available for real "last_rx" and statistics gathering; statistics, when done per-CPU, will need to figure out which place was updated last for those items where the "last" value is exposed. Signed-off-by: Johannes Berg --- net/mac80211/ibss.c | 13 ++++++++----- net/mac80211/sta_info.c | 13 +++++++++++-- net/mac80211/sta_info.h | 3 +++ net/mac80211/status.c | 4 ++-- 4 files changed, 24 insertions(+), 9 deletions(-) diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c index b3407dbe4b7d..c6d4b75eb60b 100644 --- a/net/mac80211/ibss.c +++ b/net/mac80211/ibss.c @@ -668,10 +668,11 @@ static int ieee80211_sta_active_ibss(struct ieee80211_sub_if_data *sdata) rcu_read_lock(); list_for_each_entry_rcu(sta, &local->sta_list, list) { + unsigned long last_active = ieee80211_sta_last_active(sta); + if (sta->sdata == sdata && - time_after(sta->rx_stats.last_rx + - IEEE80211_IBSS_MERGE_INTERVAL, - jiffies)) { + time_is_after_jiffies(last_active + + IEEE80211_IBSS_MERGE_INTERVAL)) { active++; break; } @@ -1255,11 +1256,13 @@ static void ieee80211_ibss_sta_expire(struct ieee80211_sub_if_data *sdata) mutex_lock(&local->sta_mtx); list_for_each_entry_safe(sta, tmp, &local->sta_list, list) { + unsigned long last_active = ieee80211_sta_last_active(sta); + if (sdata != sta->sdata) continue; - if (time_after(jiffies, sta->rx_stats.last_rx + exp_time) || - (time_after(jiffies, sta->rx_stats.last_rx + exp_rsn) && + if (time_is_before_jiffies(last_active + exp_time) || + (time_is_before_jiffies(last_active + exp_rsn) && sta->sta_state != IEEE80211_STA_AUTHORIZED)) { sta_dbg(sta->sdata, "expiring inactive %sSTA %pM\n", sta->sta_state != IEEE80211_STA_AUTHORIZED ? diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index 4f19505f3757..ac73b9c7e8d8 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -1094,10 +1094,12 @@ void ieee80211_sta_expire(struct ieee80211_sub_if_data *sdata, mutex_lock(&local->sta_mtx); list_for_each_entry_safe(sta, tmp, &local->sta_list, list) { + unsigned long last_active = ieee80211_sta_last_active(sta); + if (sdata != sta->sdata) continue; - if (time_after(jiffies, sta->rx_stats.last_rx + exp_time)) { + if (time_is_before_jiffies(last_active + exp_time)) { sta_dbg(sta->sdata, "expiring inactive STA %pM\n", sta->sta.addr); @@ -2000,7 +2002,7 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo) sinfo->connected_time = ktime_get_seconds() - sta->last_connected; sinfo->inactive_time = - jiffies_to_msecs(jiffies - sta->rx_stats.last_rx); + jiffies_to_msecs(jiffies - ieee80211_sta_last_active(sta)); if (!(sinfo->filled & (BIT(NL80211_STA_INFO_TX_BYTES64) | BIT(NL80211_STA_INFO_TX_BYTES)))) { @@ -2186,3 +2188,10 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo) sinfo->expected_throughput = thr; } } + +unsigned long ieee80211_sta_last_active(struct sta_info *sta) +{ + if (time_after(sta->rx_stats.last_rx, sta->status_stats.last_ack)) + return sta->rx_stats.last_rx; + return sta->status_stats.last_ack; +} diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h index 93dc567e6100..8a8e84bfe3d2 100644 --- a/net/mac80211/sta_info.h +++ b/net/mac80211/sta_info.h @@ -474,6 +474,7 @@ struct sta_info { unsigned long last_tdls_pkt_time; u64 msdu_retries[IEEE80211_NUM_TIDS + 1]; u64 msdu_failed[IEEE80211_NUM_TIDS + 1]; + unsigned long last_ack; } status_stats; /* Updated from TX path only, no locking requirements */ @@ -680,4 +681,6 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta); void ieee80211_sta_ps_deliver_poll_response(struct sta_info *sta); void ieee80211_sta_ps_deliver_uapsd(struct sta_info *sta); +unsigned long ieee80211_sta_last_active(struct sta_info *sta); + #endif /* STA_INFO_H */ diff --git a/net/mac80211/status.c b/net/mac80211/status.c index 8b1b2ea03eb5..c6d5c724e032 100644 --- a/net/mac80211/status.c +++ b/net/mac80211/status.c @@ -188,7 +188,7 @@ static void ieee80211_frame_acked(struct sta_info *sta, struct sk_buff *skb) struct ieee80211_sub_if_data *sdata = sta->sdata; if (ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) - sta->rx_stats.last_rx = jiffies; + sta->status_stats.last_ack = jiffies; if (ieee80211_is_data_qos(mgmt->frame_control)) { struct ieee80211_hdr *hdr = (void *) skb->data; @@ -647,7 +647,7 @@ void ieee80211_tx_status_noskb(struct ieee80211_hw *hw, sta->status_stats.retry_count += retry_count; if (acked) { - sta->rx_stats.last_rx = jiffies; + sta->status_stats.last_ack = jiffies; if (sta->status_stats.lost_packets) sta->status_stats.lost_packets = 0; From 4f6b1b3daaf167bf927174224e07efd17ed95984 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Thu, 31 Mar 2016 20:02:08 +0300 Subject: [PATCH 0321/1649] mac80211: fix last RX rate data consistency When storing the last_rate_* values in the RX code, there's nothing to guarantee consistency, so a concurrent reader could see, e.g. last_rate_idx on the new value, but last_rate_flag still on the old, getting completely bogus values in the end. To fix this, I lifted the sta_stats_encode_rate() function from my old rate statistics code, which encodes the entire rate data into a single 16-bit value, avoiding the consistency issue. Signed-off-by: Johannes Berg --- net/mac80211/rx.c | 21 ++++------------ net/mac80211/sta_info.c | 56 ++++++++++++++++++++++------------------- net/mac80211/sta_info.h | 45 +++++++++++++++++++++++++++++---- 3 files changed, 75 insertions(+), 47 deletions(-) diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index d14c66df9e86..5a6c36c3aed6 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -1421,16 +1421,9 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx) test_sta_flag(sta, WLAN_STA_AUTHORIZED)) { sta->rx_stats.last_rx = jiffies; if (ieee80211_is_data(hdr->frame_control) && - !is_multicast_ether_addr(hdr->addr1)) { - sta->rx_stats.last_rate_idx = - status->rate_idx; - sta->rx_stats.last_rate_flag = - status->flag; - sta->rx_stats.last_rate_vht_flag = - status->vht_flag; - sta->rx_stats.last_rate_vht_nss = - status->vht_nss; - } + !is_multicast_ether_addr(hdr->addr1)) + sta->rx_stats.last_rate = + sta_stats_encode_rate(status); } } else if (rx->sdata->vif.type == NL80211_IFTYPE_OCB) { sta->rx_stats.last_rx = jiffies; @@ -1440,12 +1433,8 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx) * match the current local configuration when processed. */ sta->rx_stats.last_rx = jiffies; - if (ieee80211_is_data(hdr->frame_control)) { - sta->rx_stats.last_rate_idx = status->rate_idx; - sta->rx_stats.last_rate_flag = status->flag; - sta->rx_stats.last_rate_vht_flag = status->vht_flag; - sta->rx_stats.last_rate_vht_nss = status->vht_nss; - } + if (ieee80211_is_data(hdr->frame_control)) + sta->rx_stats.last_rate = sta_stats_encode_rate(status); } if (rx->sdata->vif.type == NL80211_IFTYPE_STATION) diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index ac73b9c7e8d8..0b50ae3f0b05 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -1928,43 +1928,47 @@ u8 sta_info_tx_streams(struct sta_info *sta) >> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT) + 1; } -static void sta_set_rate_info_rx(struct sta_info *sta, struct rate_info *rinfo) +static void sta_stats_decode_rate(struct ieee80211_local *local, u16 rate, + struct rate_info *rinfo) { - rinfo->flags = 0; + rinfo->bw = (rate & STA_STATS_RATE_BW_MASK) >> + STA_STATS_RATE_BW_SHIFT; - if (sta->rx_stats.last_rate_flag & RX_FLAG_HT) { - rinfo->flags |= RATE_INFO_FLAGS_MCS; - rinfo->mcs = sta->rx_stats.last_rate_idx; - } else if (sta->rx_stats.last_rate_flag & RX_FLAG_VHT) { - rinfo->flags |= RATE_INFO_FLAGS_VHT_MCS; - rinfo->nss = sta->rx_stats.last_rate_vht_nss; - rinfo->mcs = sta->rx_stats.last_rate_idx; - } else { + if (rate & STA_STATS_RATE_VHT) { + rinfo->flags = RATE_INFO_FLAGS_VHT_MCS; + rinfo->mcs = rate & 0xf; + rinfo->nss = (rate & 0xf0) >> 4; + } else if (rate & STA_STATS_RATE_HT) { + rinfo->flags = RATE_INFO_FLAGS_MCS; + rinfo->mcs = rate & 0xff; + } else if (rate & STA_STATS_RATE_LEGACY) { struct ieee80211_supported_band *sband; - int shift = ieee80211_vif_get_shift(&sta->sdata->vif); u16 brate; + unsigned int shift; - sband = sta->local->hw.wiphy->bands[ - ieee80211_get_sdata_band(sta->sdata)]; - brate = sband->bitrates[sta->rx_stats.last_rate_idx].bitrate; + sband = local->hw.wiphy->bands[(rate >> 4) & 0xf]; + brate = sband->bitrates[rate & 0xf].bitrate; + if (rinfo->bw == RATE_INFO_BW_5) + shift = 2; + else if (rinfo->bw == RATE_INFO_BW_10) + shift = 1; + else + shift = 0; rinfo->legacy = DIV_ROUND_UP(brate, 1 << shift); } - if (sta->rx_stats.last_rate_flag & RX_FLAG_SHORT_GI) + if (rate & STA_STATS_RATE_SGI) rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI; +} - if (sta->rx_stats.last_rate_flag & RX_FLAG_5MHZ) - rinfo->bw = RATE_INFO_BW_5; - else if (sta->rx_stats.last_rate_flag & RX_FLAG_10MHZ) - rinfo->bw = RATE_INFO_BW_10; - else if (sta->rx_stats.last_rate_flag & RX_FLAG_40MHZ) - rinfo->bw = RATE_INFO_BW_40; - else if (sta->rx_stats.last_rate_vht_flag & RX_VHT_FLAG_80MHZ) - rinfo->bw = RATE_INFO_BW_80; - else if (sta->rx_stats.last_rate_vht_flag & RX_VHT_FLAG_160MHZ) - rinfo->bw = RATE_INFO_BW_160; +static void sta_set_rate_info_rx(struct sta_info *sta, struct rate_info *rinfo) +{ + u16 rate = ACCESS_ONCE(sta->rx_stats.last_rate); + + if (rate == STA_STATS_RATE_INVALID) + rinfo->flags = 0; else - rinfo->bw = RATE_INFO_BW_20; + sta_stats_decode_rate(sta->local, rate, rinfo); } void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo) diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h index 8a8e84bfe3d2..5549ceb9cbb3 100644 --- a/net/mac80211/sta_info.h +++ b/net/mac80211/sta_info.h @@ -1,7 +1,7 @@ /* * Copyright 2002-2005, Devicescape Software, Inc. * Copyright 2013-2014 Intel Mobile Communications GmbH - * Copyright(c) 2015 Intel Deutschland GmbH + * Copyright(c) 2015-2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -452,10 +452,7 @@ struct sta_info { int last_signal; u8 chains; s8 chain_signal_last[IEEE80211_MAX_CHAINS]; - int last_rate_idx; - u32 last_rate_flag; - u32 last_rate_vht_flag; - u8 last_rate_vht_nss; + u16 last_rate; u64 msdu[IEEE80211_NUM_TIDS + 1]; } rx_stats; struct { @@ -683,4 +680,42 @@ void ieee80211_sta_ps_deliver_uapsd(struct sta_info *sta); unsigned long ieee80211_sta_last_active(struct sta_info *sta); +#define STA_STATS_RATE_INVALID 0 +#define STA_STATS_RATE_VHT 0x8000 +#define STA_STATS_RATE_HT 0x4000 +#define STA_STATS_RATE_LEGACY 0x2000 +#define STA_STATS_RATE_SGI 0x1000 +#define STA_STATS_RATE_BW_SHIFT 9 +#define STA_STATS_RATE_BW_MASK (0x7 << STA_STATS_RATE_BW_SHIFT) + +static inline u16 sta_stats_encode_rate(struct ieee80211_rx_status *s) +{ + u16 r = s->rate_idx; + + if (s->vht_flag & RX_VHT_FLAG_80MHZ) + r |= RATE_INFO_BW_80 << STA_STATS_RATE_BW_SHIFT; + else if (s->vht_flag & RX_VHT_FLAG_160MHZ) + r |= RATE_INFO_BW_160 << STA_STATS_RATE_BW_SHIFT; + else if (s->flag & RX_FLAG_40MHZ) + r |= RATE_INFO_BW_40 << STA_STATS_RATE_BW_SHIFT; + else if (s->flag & RX_FLAG_10MHZ) + r |= RATE_INFO_BW_10 << STA_STATS_RATE_BW_SHIFT; + else if (s->flag & RX_FLAG_5MHZ) + r |= RATE_INFO_BW_5 << STA_STATS_RATE_BW_SHIFT; + else + r |= RATE_INFO_BW_20 << STA_STATS_RATE_BW_SHIFT; + + if (s->flag & RX_FLAG_SHORT_GI) + r |= STA_STATS_RATE_SGI; + + if (s->flag & RX_FLAG_VHT) + r |= STA_STATS_RATE_VHT | (s->vht_nss << 4); + else if (s->flag & RX_FLAG_HT) + r |= STA_STATS_RATE_HT; + else + r |= STA_STATS_RATE_LEGACY | (s->band << 4); + + return r; +} + #endif /* STA_INFO_H */ From 0f9c5a61d4b2330b12c59126aa5a9108dbfce555 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Thu, 31 Mar 2016 20:02:09 +0300 Subject: [PATCH 0322/1649] mac80211: fix RX u64 stats consistency on 32-bit platforms On 32-bit platforms, the 64-bit counters we keep need to be protected to be consistently read. Use the u64_stats_sync mechanism to do that. In order to not end up with overly long lines, refactor the tidstats assignments a bit. Signed-off-by: Johannes Berg --- net/mac80211/rx.c | 6 ++++ net/mac80211/sta_info.c | 72 +++++++++++++++++++++++++---------------- net/mac80211/sta_info.h | 5 ++- 3 files changed, 54 insertions(+), 29 deletions(-) diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 5a6c36c3aed6..2863832b0db4 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -1441,7 +1441,11 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx) ieee80211_sta_rx_notify(rx->sdata, hdr); sta->rx_stats.fragments++; + + u64_stats_update_begin(&rx->sta->rx_stats.syncp); sta->rx_stats.bytes += rx->skb->len; + u64_stats_update_end(&rx->sta->rx_stats.syncp); + if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) { sta->rx_stats.last_signal = status->signal; ewma_signal_add(&sta->rx_stats_avg.signal, -status->signal); @@ -2124,7 +2128,9 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx) * for non-QoS-data frames. Here we know it's a data * frame, so count MSDUs. */ + u64_stats_update_begin(&rx->sta->rx_stats.syncp); rx->sta->rx_stats.msdu[rx->seqno_idx]++; + u64_stats_update_end(&rx->sta->rx_stats.syncp); } if ((sdata->vif.type == NL80211_IFTYPE_AP || diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index 0b50ae3f0b05..bdd303e8b577 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -335,6 +335,8 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata, sta->sdata = sdata; sta->rx_stats.last_rx = jiffies; + u64_stats_init(&sta->rx_stats.syncp); + sta->sta_state = IEEE80211_STA_NONE; /* Mark TID as unreserved */ @@ -1971,6 +1973,41 @@ static void sta_set_rate_info_rx(struct sta_info *sta, struct rate_info *rinfo) sta_stats_decode_rate(sta->local, rate, rinfo); } +static void sta_set_tidstats(struct sta_info *sta, + struct cfg80211_tid_stats *tidstats, + int tid) +{ + struct ieee80211_local *local = sta->local; + + if (!(tidstats->filled & BIT(NL80211_TID_STATS_RX_MSDU))) { + unsigned int start; + + do { + start = u64_stats_fetch_begin(&sta->rx_stats.syncp); + tidstats->rx_msdu = sta->rx_stats.msdu[tid]; + } while (u64_stats_fetch_retry(&sta->rx_stats.syncp, start)); + + tidstats->filled |= BIT(NL80211_TID_STATS_RX_MSDU); + } + + if (!(tidstats->filled & BIT(NL80211_TID_STATS_TX_MSDU))) { + tidstats->filled |= BIT(NL80211_TID_STATS_TX_MSDU); + tidstats->tx_msdu = sta->tx_stats.msdu[tid]; + } + + if (!(tidstats->filled & BIT(NL80211_TID_STATS_TX_MSDU_RETRIES)) && + ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) { + tidstats->filled |= BIT(NL80211_TID_STATS_TX_MSDU_RETRIES); + tidstats->tx_msdu_retries = sta->status_stats.msdu_retries[tid]; + } + + if (!(tidstats->filled & BIT(NL80211_TID_STATS_TX_MSDU_FAILED)) && + ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) { + tidstats->filled |= BIT(NL80211_TID_STATS_TX_MSDU_FAILED); + tidstats->tx_msdu_failed = sta->status_stats.msdu_failed[tid]; + } +} + void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo) { struct ieee80211_sub_if_data *sdata = sta->sdata; @@ -2025,7 +2062,12 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo) if (!(sinfo->filled & (BIT(NL80211_STA_INFO_RX_BYTES64) | BIT(NL80211_STA_INFO_RX_BYTES)))) { - sinfo->rx_bytes = sta->rx_stats.bytes; + unsigned int start; + + do { + start = u64_stats_fetch_begin(&sta->rx_stats.syncp); + sinfo->rx_bytes = sta->rx_stats.bytes; + } while (u64_stats_fetch_retry(&sta->rx_stats.syncp, start)); sinfo->filled |= BIT(NL80211_STA_INFO_RX_BYTES64); } @@ -2097,33 +2139,7 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo) for (i = 0; i < IEEE80211_NUM_TIDS + 1; i++) { struct cfg80211_tid_stats *tidstats = &sinfo->pertid[i]; - if (!(tidstats->filled & BIT(NL80211_TID_STATS_RX_MSDU))) { - tidstats->filled |= BIT(NL80211_TID_STATS_RX_MSDU); - tidstats->rx_msdu = sta->rx_stats.msdu[i]; - } - - if (!(tidstats->filled & BIT(NL80211_TID_STATS_TX_MSDU))) { - tidstats->filled |= BIT(NL80211_TID_STATS_TX_MSDU); - tidstats->tx_msdu = sta->tx_stats.msdu[i]; - } - - if (!(tidstats->filled & - BIT(NL80211_TID_STATS_TX_MSDU_RETRIES)) && - ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) { - tidstats->filled |= - BIT(NL80211_TID_STATS_TX_MSDU_RETRIES); - tidstats->tx_msdu_retries = - sta->status_stats.msdu_retries[i]; - } - - if (!(tidstats->filled & - BIT(NL80211_TID_STATS_TX_MSDU_FAILED)) && - ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) { - tidstats->filled |= - BIT(NL80211_TID_STATS_TX_MSDU_FAILED); - tidstats->tx_msdu_failed = - sta->status_stats.msdu_failed[i]; - } + sta_set_tidstats(sta, tidstats, i); } if (ieee80211_vif_is_mesh(&sdata->vif)) { diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h index 5549ceb9cbb3..7c23b575672e 100644 --- a/net/mac80211/sta_info.h +++ b/net/mac80211/sta_info.h @@ -18,6 +18,7 @@ #include #include #include +#include #include "key.h" /** @@ -444,7 +445,6 @@ struct sta_info { /* Updated from RX path only, no locking requirements */ struct { unsigned long packets; - u64 bytes; unsigned long last_rx; unsigned long num_duplicates; unsigned long fragments; @@ -453,6 +453,9 @@ struct sta_info { u8 chains; s8 chain_signal_last[IEEE80211_MAX_CHAINS]; u16 last_rate; + + struct u64_stats_sync syncp; + u64 bytes; u64 msdu[IEEE80211_NUM_TIDS + 1]; } rx_stats; struct { From 49ddf8e6e2347cffdcf83d1ca2d04ff929820178 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Thu, 31 Mar 2016 20:02:10 +0300 Subject: [PATCH 0323/1649] mac80211: add fast-rx path The regular RX path has a lot of code, but with a few assumptions on the hardware it's possible to reduce the amount of code significantly. Currently the assumptions on the driver are the following: * hardware/driver reordering buffer (if supporting aggregation) * hardware/driver decryption & PN checking (if using encryption) * hardware/driver did de-duplication * hardware/driver did A-MSDU deaggregation * AP_LINK_PS is used (in AP mode) * no client powersave handling in mac80211 (in client mode) of which some are actually checked per packet: * de-duplication * PN checking * decryption and additionally packets must * not be A-MSDU (have been deaggregated by driver/device) * be data packets * not be fragmented * be unicast * have RFC 1042 header Additionally dynamically we assume: * no encryption or CCMP/GCMP, TKIP/WEP/other not allowed * station must be authorized * 4-addr format not enabled Some data needed for the RX path is cached in a new per-station "fast_rx" structure, so that we only need to look at this and the packet, no other memory when processing packets on the fast RX path. After doing the above per-packet checks, the data path collapses down to a pretty simple conversion function taking advantage of the data cached in the small fast_rx struct. This should speed up the RX processing, and will make it easier to reason about parallelizing RX (for which statistics will need to be per-CPU still.) Signed-off-by: Johannes Berg --- include/linux/ieee80211.h | 10 ++ net/mac80211/cfg.c | 10 +- net/mac80211/ieee80211_i.h | 5 + net/mac80211/key.c | 1 + net/mac80211/mlme.c | 9 + net/mac80211/rx.c | 351 +++++++++++++++++++++++++++++++++++++ net/mac80211/sta_info.c | 2 + net/mac80211/sta_info.h | 34 ++++ 8 files changed, 419 insertions(+), 3 deletions(-) diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index bf9706c5b0bd..113bfc468a4d 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -638,6 +638,16 @@ static inline bool ieee80211_is_first_frag(__le16 seq_ctrl) return (seq_ctrl & cpu_to_le16(IEEE80211_SCTL_FRAG)) == 0; } +/** + * ieee80211_is_frag - check if a frame is a fragment + * @hdr: 802.11 header of the frame + */ +static inline bool ieee80211_is_frag(struct ieee80211_hdr *hdr) +{ + return ieee80211_has_morefrags(hdr->frame_control) || + hdr->seq_ctrl & cpu_to_le16(IEEE80211_SCTL_FRAG); +} + struct ieee80211s_hdr { u8 flags; u8 ttl; diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 62a90f270f03..fc4730b938d0 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -65,11 +65,13 @@ static int ieee80211_change_iface(struct wiphy *wiphy, return ret; if (type == NL80211_IFTYPE_AP_VLAN && - params && params->use_4addr == 0) + params && params->use_4addr == 0) { RCU_INIT_POINTER(sdata->u.vlan.sta, NULL); - else if (type == NL80211_IFTYPE_STATION && - params && params->use_4addr >= 0) + ieee80211_check_fast_rx_iface(sdata); + } else if (type == NL80211_IFTYPE_STATION && + params && params->use_4addr >= 0) { sdata->u.mgd.use_4addr = params->use_4addr; + } if (sdata->vif.type == NL80211_IFTYPE_MONITOR && flags) { struct ieee80211_local *local = sdata->local; @@ -1367,6 +1369,7 @@ static int ieee80211_change_station(struct wiphy *wiphy, rcu_assign_pointer(vlansdata->u.vlan.sta, sta); new_4addr = true; + __ieee80211_check_fast_rx_iface(vlansdata); } if (sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN && @@ -1889,6 +1892,7 @@ static int ieee80211_change_bss(struct wiphy *wiphy, sdata->flags |= IEEE80211_SDATA_DONT_BRIDGE_PACKETS; else sdata->flags &= ~IEEE80211_SDATA_DONT_BRIDGE_PACKETS; + ieee80211_check_fast_rx_iface(sdata); } if (params->ht_opmode >= 0) { diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index c8945e2d8a86..6243109979ed 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -1494,6 +1494,11 @@ u64 ieee80211_mgmt_tx_cookie(struct ieee80211_local *local); int ieee80211_attach_ack_skb(struct ieee80211_local *local, struct sk_buff *skb, u64 *cookie, gfp_t gfp); +void ieee80211_check_fast_rx(struct sta_info *sta); +void __ieee80211_check_fast_rx_iface(struct ieee80211_sub_if_data *sdata); +void ieee80211_check_fast_rx_iface(struct ieee80211_sub_if_data *sdata); +void ieee80211_clear_fast_rx(struct sta_info *sta); + /* STA code */ void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata); int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata, diff --git a/net/mac80211/key.c b/net/mac80211/key.c index 3df7b0392d30..edd6f2945f69 100644 --- a/net/mac80211/key.c +++ b/net/mac80211/key.c @@ -338,6 +338,7 @@ static void ieee80211_key_replace(struct ieee80211_sub_if_data *sdata, } else { rcu_assign_pointer(sta->gtk[idx], new); } + ieee80211_check_fast_rx(sta); } else { defunikey = old && old == key_mtx_dereference(sdata->local, diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 2112df4ffb7b..d3c75ac8a029 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -2217,6 +2217,7 @@ static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata) const u8 *ssid; u8 *dst = ifmgd->associated->bssid; u8 unicast_limit = max(1, max_probe_tries - 3); + struct sta_info *sta; /* * Try sending broadcast probe requests for the last three @@ -2235,6 +2236,14 @@ static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata) */ ifmgd->probe_send_count++; + if (dst) { + mutex_lock(&sdata->local->sta_mtx); + sta = sta_info_get(sdata, dst); + if (!WARN_ON(!sta)) + ieee80211_check_fast_rx(sta); + mutex_unlock(&sdata->local->sta_mtx); + } + if (ieee80211_hw_check(&sdata->local->hw, REPORTS_TX_ACK_STATUS)) { ifmgd->nullfunc_failed = false; ieee80211_send_nullfunc(sdata->local, sdata, false); diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 2863832b0db4..96f8bbf21649 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -3508,6 +3508,342 @@ static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx) return false; } +void ieee80211_check_fast_rx(struct sta_info *sta) +{ + struct ieee80211_sub_if_data *sdata = sta->sdata; + struct ieee80211_local *local = sdata->local; + struct ieee80211_key *key; + struct ieee80211_fast_rx fastrx = { + .dev = sdata->dev, + .vif_type = sdata->vif.type, + .control_port_protocol = sdata->control_port_protocol, + }, *old, *new = NULL; + bool assign = false; + + /* use sparse to check that we don't return without updating */ + __acquire(check_fast_rx); + + BUILD_BUG_ON(sizeof(fastrx.rfc1042_hdr) != sizeof(rfc1042_header)); + BUILD_BUG_ON(sizeof(fastrx.rfc1042_hdr) != ETH_ALEN); + ether_addr_copy(fastrx.rfc1042_hdr, rfc1042_header); + ether_addr_copy(fastrx.vif_addr, sdata->vif.addr); + + /* fast-rx doesn't do reordering */ + if (ieee80211_hw_check(&local->hw, AMPDU_AGGREGATION) && + !ieee80211_hw_check(&local->hw, SUPPORTS_REORDERING_BUFFER)) + goto clear; + + switch (sdata->vif.type) { + case NL80211_IFTYPE_STATION: + /* 4-addr is harder to deal with, later maybe */ + if (sdata->u.mgd.use_4addr) + goto clear; + /* software powersave is a huge mess, avoid all of it */ + if (ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK)) + goto clear; + if (ieee80211_hw_check(&local->hw, SUPPORTS_PS) && + !ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS)) + goto clear; + if (sta->sta.tdls) { + fastrx.da_offs = offsetof(struct ieee80211_hdr, addr1); + fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr2); + fastrx.expected_ds_bits = 0; + } else { + fastrx.sta_notify = sdata->u.mgd.probe_send_count > 0; + fastrx.da_offs = offsetof(struct ieee80211_hdr, addr1); + fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr3); + fastrx.expected_ds_bits = + cpu_to_le16(IEEE80211_FCTL_FROMDS); + } + break; + case NL80211_IFTYPE_AP_VLAN: + case NL80211_IFTYPE_AP: + /* parallel-rx requires this, at least with calls to + * ieee80211_sta_ps_transition() + */ + if (!ieee80211_hw_check(&local->hw, AP_LINK_PS)) + goto clear; + fastrx.da_offs = offsetof(struct ieee80211_hdr, addr3); + fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr2); + fastrx.expected_ds_bits = cpu_to_le16(IEEE80211_FCTL_TODS); + + fastrx.internal_forward = + !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) && + (sdata->vif.type != NL80211_IFTYPE_AP_VLAN || + !sdata->u.vlan.sta); + break; + default: + goto clear; + } + + if (!test_sta_flag(sta, WLAN_STA_AUTHORIZED)) + goto clear; + + rcu_read_lock(); + key = rcu_dereference(sta->ptk[sta->ptk_idx]); + if (key) { + switch (key->conf.cipher) { + case WLAN_CIPHER_SUITE_TKIP: + /* we don't want to deal with MMIC in fast-rx */ + goto clear_rcu; + case WLAN_CIPHER_SUITE_CCMP: + case WLAN_CIPHER_SUITE_CCMP_256: + case WLAN_CIPHER_SUITE_GCMP: + case WLAN_CIPHER_SUITE_GCMP_256: + break; + default: + /* we also don't want to deal with WEP or cipher scheme + * since those require looking up the key idx in the + * frame, rather than assuming the PTK is used + * (we need to revisit this once we implement the real + * PTK index, which is now valid in the spec, but we + * haven't implemented that part yet) + */ + goto clear_rcu; + } + + fastrx.key = true; + fastrx.icv_len = key->conf.icv_len; + } + + assign = true; + clear_rcu: + rcu_read_unlock(); + clear: + __release(check_fast_rx); + + if (assign) + new = kmemdup(&fastrx, sizeof(fastrx), GFP_KERNEL); + + spin_lock_bh(&sta->lock); + old = rcu_dereference_protected(sta->fast_rx, true); + rcu_assign_pointer(sta->fast_rx, new); + spin_unlock_bh(&sta->lock); + + if (old) + kfree_rcu(old, rcu_head); +} + +void ieee80211_clear_fast_rx(struct sta_info *sta) +{ + struct ieee80211_fast_rx *old; + + spin_lock_bh(&sta->lock); + old = rcu_dereference_protected(sta->fast_rx, true); + RCU_INIT_POINTER(sta->fast_rx, NULL); + spin_unlock_bh(&sta->lock); + + if (old) + kfree_rcu(old, rcu_head); +} + +void __ieee80211_check_fast_rx_iface(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_local *local = sdata->local; + struct sta_info *sta; + + lockdep_assert_held(&local->sta_mtx); + + list_for_each_entry_rcu(sta, &local->sta_list, list) { + if (sdata != sta->sdata && + (!sta->sdata->bss || sta->sdata->bss != sdata->bss)) + continue; + ieee80211_check_fast_rx(sta); + } +} + +void ieee80211_check_fast_rx_iface(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_local *local = sdata->local; + + mutex_lock(&local->sta_mtx); + __ieee80211_check_fast_rx_iface(sdata); + mutex_unlock(&local->sta_mtx); +} + +static bool ieee80211_invoke_fast_rx(struct ieee80211_rx_data *rx, + struct ieee80211_fast_rx *fast_rx) +{ + struct sk_buff *skb = rx->skb; + struct ieee80211_hdr *hdr = (void *)skb->data; + struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); + struct sta_info *sta = rx->sta; + int orig_len = skb->len; + int snap_offs = ieee80211_hdrlen(hdr->frame_control); + struct { + u8 snap[sizeof(rfc1042_header)]; + __be16 proto; + } *payload __aligned(2); + struct { + u8 da[ETH_ALEN]; + u8 sa[ETH_ALEN]; + } addrs __aligned(2); + + /* for parallel-rx, we need to have DUP_VALIDATED, otherwise we write + * to a common data structure; drivers can implement that per queue + * but we don't have that information in mac80211 + */ + if (!(status->flag & RX_FLAG_DUP_VALIDATED)) + return false; + +#define FAST_RX_CRYPT_FLAGS (RX_FLAG_PN_VALIDATED | RX_FLAG_DECRYPTED) + + /* If using encryption, we also need to have: + * - PN_VALIDATED: similar, but the implementation is tricky + * - DECRYPTED: necessary for PN_VALIDATED + */ + if (fast_rx->key && + (status->flag & FAST_RX_CRYPT_FLAGS) != FAST_RX_CRYPT_FLAGS) + return false; + + /* we don't deal with A-MSDU deaggregation here */ + if (status->rx_flags & IEEE80211_RX_AMSDU) + return false; + + if (unlikely(!ieee80211_is_data_present(hdr->frame_control))) + return false; + + if (unlikely(ieee80211_is_frag(hdr))) + return false; + + /* Since our interface address cannot be multicast, this + * implicitly also rejects multicast frames without the + * explicit check. + * + * We shouldn't get any *data* frames not addressed to us + * (AP mode will accept multicast *management* frames), but + * punting here will make it go through the full checks in + * ieee80211_accept_frame(). + */ + if (!ether_addr_equal(fast_rx->vif_addr, hdr->addr1)) + return false; + + if ((hdr->frame_control & cpu_to_le16(IEEE80211_FCTL_FROMDS | + IEEE80211_FCTL_TODS)) != + fast_rx->expected_ds_bits) + goto drop; + + /* assign the key to drop unencrypted frames (later) + * and strip the IV/MIC if necessary + */ + if (fast_rx->key && !(status->flag & RX_FLAG_IV_STRIPPED)) { + /* GCMP header length is the same */ + snap_offs += IEEE80211_CCMP_HDR_LEN; + } + + if (!pskb_may_pull(skb, snap_offs + sizeof(*payload))) + goto drop; + payload = (void *)(skb->data + snap_offs); + + if (!ether_addr_equal(payload->snap, fast_rx->rfc1042_hdr)) + return false; + + /* Don't handle these here since they require special code. + * Accept AARP and IPX even though they should come with a + * bridge-tunnel header - but if we get them this way then + * there's little point in discarding them. + */ + if (unlikely(payload->proto == cpu_to_be16(ETH_P_TDLS) || + payload->proto == fast_rx->control_port_protocol)) + return false; + + /* after this point, don't punt to the slowpath! */ + + if (rx->key && !(status->flag & RX_FLAG_MIC_STRIPPED) && + pskb_trim(skb, skb->len - fast_rx->icv_len)) + goto drop; + + if (unlikely(fast_rx->sta_notify)) { + ieee80211_sta_rx_notify(rx->sdata, hdr); + fast_rx->sta_notify = false; + } + + /* statistics part of ieee80211_rx_h_sta_process() */ + sta->rx_stats.last_rx = jiffies; + sta->rx_stats.last_rate = sta_stats_encode_rate(status); + + sta->rx_stats.fragments++; + + if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) { + sta->rx_stats.last_signal = status->signal; + ewma_signal_add(&sta->rx_stats_avg.signal, -status->signal); + } + + if (status->chains) { + int i; + + sta->rx_stats.chains = status->chains; + for (i = 0; i < ARRAY_SIZE(status->chain_signal); i++) { + int signal = status->chain_signal[i]; + + if (!(status->chains & BIT(i))) + continue; + + sta->rx_stats.chain_signal_last[i] = signal; + ewma_signal_add(&sta->rx_stats_avg.chain_signal[i], + -signal); + } + } + /* end of statistics */ + + if (rx->key && !ieee80211_has_protected(hdr->frame_control)) + goto drop; + + /* do the header conversion - first grab the addresses */ + ether_addr_copy(addrs.da, skb->data + fast_rx->da_offs); + ether_addr_copy(addrs.sa, skb->data + fast_rx->sa_offs); + /* remove the SNAP but leave the ethertype */ + skb_pull(skb, snap_offs + sizeof(rfc1042_header)); + /* push the addresses in front */ + memcpy(skb_push(skb, sizeof(addrs)), &addrs, sizeof(addrs)); + + skb->dev = fast_rx->dev; + + ieee80211_rx_stats(fast_rx->dev, skb->len); + + /* The seqno index has the same property as needed + * for the rx_msdu field, i.e. it is IEEE80211_NUM_TIDS + * for non-QoS-data frames. Here we know it's a data + * frame, so count MSDUs. + */ + u64_stats_update_begin(&sta->rx_stats.syncp); + sta->rx_stats.msdu[rx->seqno_idx]++; + sta->rx_stats.bytes += orig_len; + u64_stats_update_end(&sta->rx_stats.syncp); + + if (fast_rx->internal_forward) { + struct sta_info *dsta = sta_info_get(rx->sdata, skb->data); + + if (dsta) { + /* + * Send to wireless media and increase priority by 256 + * to keep the received priority instead of + * reclassifying the frame (see cfg80211_classify8021d). + */ + skb->priority += 256; + skb->protocol = htons(ETH_P_802_3); + skb_reset_network_header(skb); + skb_reset_mac_header(skb); + dev_queue_xmit(skb); + return true; + } + } + + /* deliver to local stack */ + skb->protocol = eth_type_trans(skb, fast_rx->dev); + memset(skb->cb, 0, sizeof(skb->cb)); + if (rx->napi) + napi_gro_receive(rx->napi, skb); + else + netif_receive_skb(skb); + + return true; + drop: + dev_kfree_skb(skb); + sta->rx_stats.dropped++; + return true; +} + /* * This function returns whether or not the SKB * was destined for RX processing or not, which, @@ -3522,6 +3858,21 @@ static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx, rx->skb = skb; + /* See if we can do fast-rx; if we have to copy we already lost, + * so punt in that case. We should never have to deliver a data + * frame to multiple interfaces anyway. + * + * We skip the ieee80211_accept_frame() call and do the necessary + * checking inside ieee80211_invoke_fast_rx(). + */ + if (consume && rx->sta) { + struct ieee80211_fast_rx *fast_rx; + + fast_rx = rcu_dereference(rx->sta->fast_rx); + if (fast_rx && ieee80211_invoke_fast_rx(rx, fast_rx)) + return true; + } + if (!ieee80211_accept_frame(rx)) return false; diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index bdd303e8b577..a0ce7e40f420 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -1874,6 +1874,7 @@ int sta_info_move_state(struct sta_info *sta, atomic_dec(&sta->sdata->bss->num_mcast_sta); clear_bit(WLAN_STA_AUTHORIZED, &sta->_flags); ieee80211_clear_fast_xmit(sta); + ieee80211_clear_fast_rx(sta); } break; case IEEE80211_STA_AUTHORIZED: @@ -1884,6 +1885,7 @@ int sta_info_move_state(struct sta_info *sta, atomic_inc(&sta->sdata->bss->num_mcast_sta); set_bit(WLAN_STA_AUTHORIZED, &sta->_flags); ieee80211_check_fast_xmit(sta); + ieee80211_check_fast_rx(sta); } break; default: diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h index 7c23b575672e..a0a06609338d 100644 --- a/net/mac80211/sta_info.h +++ b/net/mac80211/sta_info.h @@ -285,6 +285,38 @@ struct ieee80211_fast_tx { struct rcu_head rcu_head; }; +/** + * struct ieee80211_fast_rx - RX fastpath information + * @dev: netdevice for reporting the SKB + * @vif_type: (P2P-less) interface type of the original sdata (sdata->vif.type) + * @vif_addr: interface address + * @rfc1042_hdr: copy of the RFC 1042 SNAP header (to have in cache) + * @control_port_protocol: control port protocol copied from sdata + * @expected_ds_bits: from/to DS bits expected + * @icv_len: length of the MIC if present + * @key: bool indicating encryption is expected (key is set) + * @sta_notify: notify the MLME code (once) + * @internal_forward: forward froms internally on AP/VLAN type interfaces + * @da_offs: offset of the DA in the header (for header conversion) + * @sa_offs: offset of the SA in the header (for header conversion) + * @rcu_head: RCU head for freeing this structure + */ +struct ieee80211_fast_rx { + struct net_device *dev; + enum nl80211_iftype vif_type; + u8 vif_addr[ETH_ALEN] __aligned(2); + u8 rfc1042_hdr[6] __aligned(2); + __be16 control_port_protocol; + __le16 expected_ds_bits; + u8 icv_len; + u8 key:1, + sta_notify:1, + internal_forward:1; + u8 da_offs, sa_offs; + + struct rcu_head rcu_head; +}; + /** * struct mesh_sta - mesh STA information * @plink_lock: serialize access to plink fields @@ -391,6 +423,7 @@ DECLARE_EWMA(signal, 1024, 8) * @cipher_scheme: optional cipher scheme for this station * @reserved_tid: reserved TID (if any, otherwise IEEE80211_TID_UNRESERVED) * @fast_tx: TX fastpath information + * @fast_rx: RX fastpath information * @tdls_chandef: a TDLS peer can have a wider chandef that is compatible to * the BSS one. * @tx_stats: TX statistics @@ -414,6 +447,7 @@ struct sta_info { spinlock_t lock; struct ieee80211_fast_tx __rcu *fast_tx; + struct ieee80211_fast_rx __rcu *fast_rx; #ifdef CONFIG_MAC80211_MESH struct mesh_sta *mesh; From c9c5962b56c10c34d8fedc20cd6d6ebdaa2383c6 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Thu, 31 Mar 2016 20:02:11 +0300 Subject: [PATCH 0324/1649] mac80211: enable collecting station statistics per-CPU If the driver advertises the new HW flag USE_RSS, make the station statistics on the fast-rx path per-CPU. This will enable calling the RX in parallel, only hitting locking or shared cachelines when the fast-RX path isn't available. Signed-off-by: Johannes Berg --- include/net/mac80211.h | 4 ++ net/mac80211/debugfs.c | 1 + net/mac80211/rx.c | 37 ++++++++------ net/mac80211/sta_info.c | 108 ++++++++++++++++++++++++++++++++++------ net/mac80211/sta_info.h | 38 ++++++++------ 5 files changed, 142 insertions(+), 46 deletions(-) diff --git a/include/net/mac80211.h b/include/net/mac80211.h index fd5ec446a7a9..5f4b4c773a92 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -1980,6 +1980,9 @@ struct ieee80211_txq { * order and does not need to manage its own reorder buffer or BA session * timeout. * + * @IEEE80211_HW_USES_RSS: The device uses RSS and thus requires parallel RX, + * which implies using per-CPU station statistics. + * * @NUM_IEEE80211_HW_FLAGS: number of hardware flags, used for sizing arrays */ enum ieee80211_hw_flags { @@ -2017,6 +2020,7 @@ enum ieee80211_hw_flags { IEEE80211_HW_BEACON_TX_STATUS, IEEE80211_HW_NEEDS_UNIQUE_STA_ADDR, IEEE80211_HW_SUPPORTS_REORDERING_BUFFER, + IEEE80211_HW_USES_RSS, /* keep last, obviously */ NUM_IEEE80211_HW_FLAGS diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c index 4ab5c522ceee..52ed2afc408d 100644 --- a/net/mac80211/debugfs.c +++ b/net/mac80211/debugfs.c @@ -127,6 +127,7 @@ static const char *hw_flag_names[] = { FLAG(BEACON_TX_STATUS), FLAG(NEEDS_UNIQUE_STA_ADDR), FLAG(SUPPORTS_REORDERING_BUFFER), + FLAG(USES_RSS), #undef FLAG }; diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 96f8bbf21649..c2b659e9a9f9 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -3528,6 +3528,8 @@ void ieee80211_check_fast_rx(struct sta_info *sta) ether_addr_copy(fastrx.rfc1042_hdr, rfc1042_header); ether_addr_copy(fastrx.vif_addr, sdata->vif.addr); + fastrx.uses_rss = ieee80211_hw_check(&local->hw, USES_RSS); + /* fast-rx doesn't do reordering */ if (ieee80211_hw_check(&local->hw, AMPDU_AGGREGATION) && !ieee80211_hw_check(&local->hw, SUPPORTS_REORDERING_BUFFER)) @@ -3678,6 +3680,10 @@ static bool ieee80211_invoke_fast_rx(struct ieee80211_rx_data *rx, u8 da[ETH_ALEN]; u8 sa[ETH_ALEN]; } addrs __aligned(2); + struct ieee80211_sta_rx_stats *stats = &sta->rx_stats; + + if (fast_rx->uses_rss) + stats = this_cpu_ptr(sta->pcpu_rx_stats); /* for parallel-rx, we need to have DUP_VALIDATED, otherwise we write * to a common data structure; drivers can implement that per queue @@ -3759,29 +3765,32 @@ static bool ieee80211_invoke_fast_rx(struct ieee80211_rx_data *rx, } /* statistics part of ieee80211_rx_h_sta_process() */ - sta->rx_stats.last_rx = jiffies; - sta->rx_stats.last_rate = sta_stats_encode_rate(status); + stats->last_rx = jiffies; + stats->last_rate = sta_stats_encode_rate(status); - sta->rx_stats.fragments++; + stats->fragments++; if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) { - sta->rx_stats.last_signal = status->signal; - ewma_signal_add(&sta->rx_stats_avg.signal, -status->signal); + stats->last_signal = status->signal; + if (!fast_rx->uses_rss) + ewma_signal_add(&sta->rx_stats_avg.signal, + -status->signal); } if (status->chains) { int i; - sta->rx_stats.chains = status->chains; + stats->chains = status->chains; for (i = 0; i < ARRAY_SIZE(status->chain_signal); i++) { int signal = status->chain_signal[i]; if (!(status->chains & BIT(i))) continue; - sta->rx_stats.chain_signal_last[i] = signal; - ewma_signal_add(&sta->rx_stats_avg.chain_signal[i], - -signal); + stats->chain_signal_last[i] = signal; + if (!fast_rx->uses_rss) + ewma_signal_add(&sta->rx_stats_avg.chain_signal[i], + -signal); } } /* end of statistics */ @@ -3806,10 +3815,10 @@ static bool ieee80211_invoke_fast_rx(struct ieee80211_rx_data *rx, * for non-QoS-data frames. Here we know it's a data * frame, so count MSDUs. */ - u64_stats_update_begin(&sta->rx_stats.syncp); - sta->rx_stats.msdu[rx->seqno_idx]++; - sta->rx_stats.bytes += orig_len; - u64_stats_update_end(&sta->rx_stats.syncp); + u64_stats_update_begin(&stats->syncp); + stats->msdu[rx->seqno_idx]++; + stats->bytes += orig_len; + u64_stats_update_end(&stats->syncp); if (fast_rx->internal_forward) { struct sta_info *dsta = sta_info_get(rx->sdata, skb->data); @@ -3840,7 +3849,7 @@ static bool ieee80211_invoke_fast_rx(struct ieee80211_rx_data *rx, return true; drop: dev_kfree_skb(skb); - sta->rx_stats.dropped++; + stats->dropped++; return true; } diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index a0ce7e40f420..cf2aca0cc200 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -254,6 +254,7 @@ void sta_info_free(struct ieee80211_local *local, struct sta_info *sta) #ifdef CONFIG_MAC80211_MESH kfree(sta->mesh); #endif + free_percpu(sta->pcpu_rx_stats); kfree(sta); } @@ -311,6 +312,13 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata, if (!sta) return NULL; + if (ieee80211_hw_check(hw, USES_RSS)) { + sta->pcpu_rx_stats = + alloc_percpu(struct ieee80211_sta_rx_stats); + if (!sta->pcpu_rx_stats) + goto free; + } + spin_lock_init(&sta->lock); spin_lock_init(&sta->ps_lock); INIT_WORK(&sta->drv_deliver_wk, sta_deliver_ps_frames); @@ -1932,6 +1940,28 @@ u8 sta_info_tx_streams(struct sta_info *sta) >> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT) + 1; } +static struct ieee80211_sta_rx_stats * +sta_get_last_rx_stats(struct sta_info *sta) +{ + struct ieee80211_sta_rx_stats *stats = &sta->rx_stats; + struct ieee80211_local *local = sta->local; + int cpu; + + if (!ieee80211_hw_check(&local->hw, USES_RSS)) + return stats; + + for_each_possible_cpu(cpu) { + struct ieee80211_sta_rx_stats *cpustats; + + cpustats = per_cpu_ptr(sta->pcpu_rx_stats, cpu); + + if (time_after(cpustats->last_rx, stats->last_rx)) + stats = cpustats; + } + + return stats; +} + static void sta_stats_decode_rate(struct ieee80211_local *local, u16 rate, struct rate_info *rinfo) { @@ -1967,7 +1997,7 @@ static void sta_stats_decode_rate(struct ieee80211_local *local, u16 rate, static void sta_set_rate_info_rx(struct sta_info *sta, struct rate_info *rinfo) { - u16 rate = ACCESS_ONCE(sta->rx_stats.last_rate); + u16 rate = ACCESS_ONCE(sta_get_last_rx_stats(sta)->last_rate); if (rate == STA_STATS_RATE_INVALID) rinfo->flags = 0; @@ -2010,13 +2040,29 @@ static void sta_set_tidstats(struct sta_info *sta, } } +static inline u64 sta_get_stats_bytes(struct ieee80211_sta_rx_stats *rxstats) +{ + unsigned int start; + u64 value; + + do { + start = u64_stats_fetch_begin(&rxstats->syncp); + value = rxstats->bytes; + } while (u64_stats_fetch_retry(&rxstats->syncp, start)); + + return value; +} + void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo) { struct ieee80211_sub_if_data *sdata = sta->sdata; struct ieee80211_local *local = sdata->local; struct rate_control_ref *ref = NULL; u32 thr = 0; - int i, ac; + int i, ac, cpu; + struct ieee80211_sta_rx_stats *last_rxstats; + + last_rxstats = sta_get_last_rx_stats(sta); if (test_sta_flag(sta, WLAN_STA_RATE_CONTROL)) ref = local->rate_ctrl; @@ -2064,17 +2110,30 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo) if (!(sinfo->filled & (BIT(NL80211_STA_INFO_RX_BYTES64) | BIT(NL80211_STA_INFO_RX_BYTES)))) { - unsigned int start; + sinfo->rx_bytes += sta_get_stats_bytes(&sta->rx_stats); + + if (sta->pcpu_rx_stats) { + for_each_possible_cpu(cpu) { + struct ieee80211_sta_rx_stats *cpurxs; + + cpurxs = per_cpu_ptr(sta->pcpu_rx_stats, cpu); + sinfo->rx_bytes += sta_get_stats_bytes(cpurxs); + } + } - do { - start = u64_stats_fetch_begin(&sta->rx_stats.syncp); - sinfo->rx_bytes = sta->rx_stats.bytes; - } while (u64_stats_fetch_retry(&sta->rx_stats.syncp, start)); sinfo->filled |= BIT(NL80211_STA_INFO_RX_BYTES64); } if (!(sinfo->filled & BIT(NL80211_STA_INFO_RX_PACKETS))) { sinfo->rx_packets = sta->rx_stats.packets; + if (sta->pcpu_rx_stats) { + for_each_possible_cpu(cpu) { + struct ieee80211_sta_rx_stats *cpurxs; + + cpurxs = per_cpu_ptr(sta->pcpu_rx_stats, cpu); + sinfo->rx_packets += cpurxs->packets; + } + } sinfo->filled |= BIT(NL80211_STA_INFO_RX_PACKETS); } @@ -2089,6 +2148,14 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo) } sinfo->rx_dropped_misc = sta->rx_stats.dropped; + if (sta->pcpu_rx_stats) { + for_each_possible_cpu(cpu) { + struct ieee80211_sta_rx_stats *cpurxs; + + cpurxs = per_cpu_ptr(sta->pcpu_rx_stats, cpu); + sinfo->rx_packets += cpurxs->dropped; + } + } if (sdata->vif.type == NL80211_IFTYPE_STATION && !(sdata->vif.driver_flags & IEEE80211_VIF_BEACON_FILTER)) { @@ -2100,27 +2167,34 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo) if (ieee80211_hw_check(&sta->local->hw, SIGNAL_DBM) || ieee80211_hw_check(&sta->local->hw, SIGNAL_UNSPEC)) { if (!(sinfo->filled & BIT(NL80211_STA_INFO_SIGNAL))) { - sinfo->signal = (s8)sta->rx_stats.last_signal; + sinfo->signal = (s8)last_rxstats->last_signal; sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL); } - if (!(sinfo->filled & BIT(NL80211_STA_INFO_SIGNAL_AVG))) { + if (!sta->pcpu_rx_stats && + !(sinfo->filled & BIT(NL80211_STA_INFO_SIGNAL_AVG))) { sinfo->signal_avg = -ewma_signal_read(&sta->rx_stats_avg.signal); sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL_AVG); } } - if (sta->rx_stats.chains && + /* for the average - if pcpu_rx_stats isn't set - rxstats must point to + * the sta->rx_stats struct, so the check here is fine with and without + * pcpu statistics + */ + if (last_rxstats->chains && !(sinfo->filled & (BIT(NL80211_STA_INFO_CHAIN_SIGNAL) | BIT(NL80211_STA_INFO_CHAIN_SIGNAL_AVG)))) { - sinfo->filled |= BIT(NL80211_STA_INFO_CHAIN_SIGNAL) | - BIT(NL80211_STA_INFO_CHAIN_SIGNAL_AVG); + sinfo->filled |= BIT(NL80211_STA_INFO_CHAIN_SIGNAL); + if (!sta->pcpu_rx_stats) + sinfo->filled |= BIT(NL80211_STA_INFO_CHAIN_SIGNAL_AVG); + + sinfo->chains = last_rxstats->chains; - sinfo->chains = sta->rx_stats.chains; for (i = 0; i < ARRAY_SIZE(sinfo->chain_signal); i++) { sinfo->chain_signal[i] = - sta->rx_stats.chain_signal_last[i]; + last_rxstats->chain_signal_last[i]; sinfo->chain_signal_avg[i] = -ewma_signal_read(&sta->rx_stats_avg.chain_signal[i]); } @@ -2213,7 +2287,9 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo) unsigned long ieee80211_sta_last_active(struct sta_info *sta) { - if (time_after(sta->rx_stats.last_rx, sta->status_stats.last_ack)) - return sta->rx_stats.last_rx; + struct ieee80211_sta_rx_stats *stats = sta_get_last_rx_stats(sta); + + if (time_after(stats->last_rx, sta->status_stats.last_ack)) + return stats->last_rx; return sta->status_stats.last_ack; } diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h index a0a06609338d..dd6c6d400208 100644 --- a/net/mac80211/sta_info.h +++ b/net/mac80211/sta_info.h @@ -297,6 +297,7 @@ struct ieee80211_fast_tx { * @key: bool indicating encryption is expected (key is set) * @sta_notify: notify the MLME code (once) * @internal_forward: forward froms internally on AP/VLAN type interfaces + * @uses_rss: copy of USES_RSS hw flag * @da_offs: offset of the DA in the header (for header conversion) * @sa_offs: offset of the SA in the header (for header conversion) * @rcu_head: RCU head for freeing this structure @@ -311,7 +312,8 @@ struct ieee80211_fast_rx { u8 icv_len; u8 key:1, sta_notify:1, - internal_forward:1; + internal_forward:1, + uses_rss:1; u8 da_offs, sa_offs; struct rcu_head rcu_head; @@ -367,6 +369,21 @@ struct mesh_sta { DECLARE_EWMA(signal, 1024, 8) +struct ieee80211_sta_rx_stats { + unsigned long packets; + unsigned long last_rx; + unsigned long num_duplicates; + unsigned long fragments; + unsigned long dropped; + int last_signal; + u8 chains; + s8 chain_signal_last[IEEE80211_MAX_CHAINS]; + u16 last_rate; + struct u64_stats_sync syncp; + u64 bytes; + u64 msdu[IEEE80211_NUM_TIDS + 1]; +}; + /** * struct sta_info - STA information * @@ -428,6 +445,8 @@ DECLARE_EWMA(signal, 1024, 8) * the BSS one. * @tx_stats: TX statistics * @rx_stats: RX statistics + * @pcpu_rx_stats: per-CPU RX statistics, assigned only if the driver needs + * this (by advertising the USES_RSS hw flag) * @status_stats: TX status statistics */ struct sta_info { @@ -448,6 +467,7 @@ struct sta_info { struct ieee80211_fast_tx __rcu *fast_tx; struct ieee80211_fast_rx __rcu *fast_rx; + struct ieee80211_sta_rx_stats __percpu *pcpu_rx_stats; #ifdef CONFIG_MAC80211_MESH struct mesh_sta *mesh; @@ -477,21 +497,7 @@ struct sta_info { long last_connected; /* Updated from RX path only, no locking requirements */ - struct { - unsigned long packets; - unsigned long last_rx; - unsigned long num_duplicates; - unsigned long fragments; - unsigned long dropped; - int last_signal; - u8 chains; - s8 chain_signal_last[IEEE80211_MAX_CHAINS]; - u16 last_rate; - - struct u64_stats_sync syncp; - u64 bytes; - u64 msdu[IEEE80211_NUM_TIDS + 1]; - } rx_stats; + struct ieee80211_sta_rx_stats rx_stats; struct { struct ewma_signal signal; struct ewma_signal chain_signal[IEEE80211_MAX_CHAINS]; From 6e0456b5454561c4e9fa9e8a4acea405e6d56c80 Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Thu, 3 Mar 2016 22:59:00 +0100 Subject: [PATCH 0325/1649] mac80211: add A-MSDU tx support Requires software tx queueing and fast-xmit support. For good performance, drivers need frag_list support as well. This avoids the need for copying data of aggregated frames. Running without it is only supported for debugging purposes. To avoid performance and packet size issues, the rate control module or driver needs to limit the maximum A-MSDU size by setting max_rc_amsdu_len in struct ieee80211_sta. Signed-off-by: Felix Fietkau [fix locking issue] Signed-off-by: Johannes Berg --- include/linux/ieee80211.h | 3 + include/net/mac80211.h | 19 +++++ net/mac80211/agg-tx.c | 5 ++ net/mac80211/debugfs.c | 2 + net/mac80211/ieee80211_i.h | 1 + net/mac80211/sta_info.c | 2 + net/mac80211/tx.c | 156 +++++++++++++++++++++++++++++++++++++ 7 files changed, 188 insertions(+) diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index 113bfc468a4d..b118744d3382 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -164,6 +164,9 @@ static inline u16 ieee80211_sn_sub(u16 sn1, u16 sn2) /* 30 byte 4 addr hdr, 2 byte QoS, 2304 byte MSDU, 12 byte crypt, 4 byte FCS */ #define IEEE80211_MAX_FRAME_LEN 2352 +/* Maximal size of an A-MSDU that can be transported in a HT BA session */ +#define IEEE80211_MAX_MPDU_LEN_HT_BA 4095 + /* Maximal size of an A-MSDU */ #define IEEE80211_MAX_MPDU_LEN_HT_3839 3839 #define IEEE80211_MAX_MPDU_LEN_HT_7935 7935 diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 5f4b4c773a92..a3ee76559791 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -713,6 +713,7 @@ enum mac80211_tx_info_flags { * @IEEE80211_TX_CTRL_PS_RESPONSE: This frame is a response to a poll * frame (PS-Poll or uAPSD). * @IEEE80211_TX_CTRL_RATE_INJECT: This frame is injected with rate information + * @IEEE80211_TX_CTRL_AMSDU: This frame is an A-MSDU frame * * These flags are used in tx_info->control.flags. */ @@ -720,6 +721,7 @@ enum mac80211_tx_control_flags { IEEE80211_TX_CTRL_PORT_CTRL_PROTO = BIT(0), IEEE80211_TX_CTRL_PS_RESPONSE = BIT(1), IEEE80211_TX_CTRL_RATE_INJECT = BIT(2), + IEEE80211_TX_CTRL_AMSDU = BIT(3), }; /* @@ -1746,6 +1748,7 @@ struct ieee80211_sta_rates { * Both additional HT limits must be enforced by the low level driver. * This is defined by the spec (IEEE 802.11-2012 section 8.3.2.2 NOTE 2). * @support_p2p_ps: indicates whether the STA supports P2P PS mechanism or not. + * @max_rc_amsdu_len: Maximum A-MSDU size in bytes recommended by rate control. * @txq: per-TID data TX queues (if driver uses the TXQ abstraction) */ struct ieee80211_sta { @@ -1767,6 +1770,7 @@ struct ieee80211_sta { u8 max_amsdu_subframes; u16 max_amsdu_len; bool support_p2p_ps; + u16 max_rc_amsdu_len; struct ieee80211_txq *txq[IEEE80211_NUM_TIDS]; @@ -1983,6 +1987,15 @@ struct ieee80211_txq { * @IEEE80211_HW_USES_RSS: The device uses RSS and thus requires parallel RX, * which implies using per-CPU station statistics. * + * @IEEE80211_HW_TX_AMSDU: Hardware (or driver) supports software aggregated + * A-MSDU frames. Requires software tx queueing and fast-xmit support. + * When not using minstrel/minstrel_ht rate control, the driver must + * limit the maximum A-MSDU size based on the current tx rate by setting + * max_rc_amsdu_len in struct ieee80211_sta. + * + * @IEEE80211_HW_TX_FRAG_LIST: Hardware (or driver) supports sending frag_list + * skbs, needed for zero-copy software A-MSDU. + * * @NUM_IEEE80211_HW_FLAGS: number of hardware flags, used for sizing arrays */ enum ieee80211_hw_flags { @@ -2021,6 +2034,8 @@ enum ieee80211_hw_flags { IEEE80211_HW_NEEDS_UNIQUE_STA_ADDR, IEEE80211_HW_SUPPORTS_REORDERING_BUFFER, IEEE80211_HW_USES_RSS, + IEEE80211_HW_TX_AMSDU, + IEEE80211_HW_TX_FRAG_LIST, /* keep last, obviously */ NUM_IEEE80211_HW_FLAGS @@ -2093,6 +2108,9 @@ enum ieee80211_hw_flags { * size is smaller (an example is LinkSys WRT120N with FW v1.0.07 * build 002 Jun 18 2012). * + * @max_tx_fragments: maximum number of tx buffers per (A)-MSDU, sum + * of 1 + skb_shinfo(skb)->nr_frags for each skb in the frag_list. + * * @offchannel_tx_hw_queue: HW queue ID to use for offchannel TX * (if %IEEE80211_HW_QUEUE_CONTROL is set) * @@ -2147,6 +2165,7 @@ struct ieee80211_hw { u8 max_rate_tries; u8 max_rx_aggregation_subframes; u8 max_tx_aggregation_subframes; + u8 max_tx_fragments; u8 offchannel_tx_hw_queue; u8 radiotap_mcs_details; u16 radiotap_vht_details; diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c index 4932e9f243a2..42fa81031dfa 100644 --- a/net/mac80211/agg-tx.c +++ b/net/mac80211/agg-tx.c @@ -935,6 +935,7 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local, size_t len) { struct tid_ampdu_tx *tid_tx; + struct ieee80211_txq *txq; u16 capab, tid; u8 buf_size; bool amsdu; @@ -945,6 +946,10 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local, buf_size = (capab & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK) >> 6; buf_size = min(buf_size, local->hw.max_tx_aggregation_subframes); + txq = sta->sta.txq[tid]; + if (!amsdu && txq) + set_bit(IEEE80211_TXQ_NO_AMSDU, &to_txq_info(txq)->flags); + mutex_lock(&sta->ampdu_mlme.mtx); tid_tx = rcu_dereference_protected_tid_tx(sta, tid); diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c index 52ed2afc408d..b251b2f7f8dd 100644 --- a/net/mac80211/debugfs.c +++ b/net/mac80211/debugfs.c @@ -128,6 +128,8 @@ static const char *hw_flag_names[] = { FLAG(NEEDS_UNIQUE_STA_ADDR), FLAG(SUPPORTS_REORDERING_BUFFER), FLAG(USES_RSS), + FLAG(TX_AMSDU), + FLAG(TX_FRAG_LIST), #undef FLAG }; diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 6243109979ed..40c1d343992c 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -802,6 +802,7 @@ struct mac80211_qos_map { enum txq_info_flags { IEEE80211_TXQ_STOP, IEEE80211_TXQ_AMPDU, + IEEE80211_TXQ_NO_AMSDU, }; struct txq_info { diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index cf2aca0cc200..960e13d8ed30 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -416,6 +416,8 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata, } } + sta->sta.max_rc_amsdu_len = IEEE80211_MAX_MPDU_LEN_HT_BA; + sta_dbg(sdata, "Allocated STA %pM\n", sta->sta.addr); return sta; diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 597c8fe672a3..4fa2842ddb25 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -1324,6 +1324,10 @@ struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw, out: spin_unlock_bh(&txqi->queue.lock); + if (skb && skb_has_frag_list(skb) && + !ieee80211_hw_check(&local->hw, TX_FRAG_LIST)) + skb_linearize(skb); + return skb; } EXPORT_SYMBOL(ieee80211_tx_dequeue); @@ -2802,6 +2806,154 @@ void ieee80211_clear_fast_xmit(struct sta_info *sta) kfree_rcu(fast_tx, rcu_head); } +static bool ieee80211_amsdu_realloc_pad(struct ieee80211_local *local, + struct sk_buff *skb, int headroom, + int *subframe_len) +{ + int amsdu_len = *subframe_len + sizeof(struct ethhdr); + int padding = (4 - amsdu_len) & 3; + + if (skb_headroom(skb) < headroom || skb_tailroom(skb) < padding) { + I802_DEBUG_INC(local->tx_expand_skb_head); + + if (pskb_expand_head(skb, headroom, padding, GFP_ATOMIC)) { + wiphy_debug(local->hw.wiphy, + "failed to reallocate TX buffer\n"); + return false; + } + } + + if (padding) { + *subframe_len += padding; + memset(skb_put(skb, padding), 0, padding); + } + + return true; +} + +static bool ieee80211_amsdu_prepare_head(struct ieee80211_sub_if_data *sdata, + struct ieee80211_fast_tx *fast_tx, + struct sk_buff *skb) +{ + struct ieee80211_local *local = sdata->local; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_hdr *hdr; + struct ethhdr amsdu_hdr; + int hdr_len = fast_tx->hdr_len - sizeof(rfc1042_header); + int subframe_len = skb->len - hdr_len; + void *data; + u8 *qc; + + if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) + return false; + + if (info->control.flags & IEEE80211_TX_CTRL_AMSDU) + return true; + + if (!ieee80211_amsdu_realloc_pad(local, skb, sizeof(amsdu_hdr), + &subframe_len)) + return false; + + amsdu_hdr.h_proto = cpu_to_be16(subframe_len); + memcpy(amsdu_hdr.h_source, skb->data + fast_tx->sa_offs, ETH_ALEN); + memcpy(amsdu_hdr.h_dest, skb->data + fast_tx->da_offs, ETH_ALEN); + + data = skb_push(skb, sizeof(amsdu_hdr)); + memmove(data, data + sizeof(amsdu_hdr), hdr_len); + memcpy(data + hdr_len, &amsdu_hdr, sizeof(amsdu_hdr)); + + hdr = data; + qc = ieee80211_get_qos_ctl(hdr); + *qc |= IEEE80211_QOS_CTL_A_MSDU_PRESENT; + + info->control.flags |= IEEE80211_TX_CTRL_AMSDU; + + return true; +} + +static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata, + struct sta_info *sta, + struct ieee80211_fast_tx *fast_tx, + struct sk_buff *skb) +{ + struct ieee80211_local *local = sdata->local; + u8 tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK; + struct ieee80211_txq *txq = sta->sta.txq[tid]; + struct txq_info *txqi; + struct sk_buff **frag_tail, *head; + int subframe_len = skb->len - ETH_ALEN; + u8 max_subframes = sta->sta.max_amsdu_subframes; + int max_frags = local->hw.max_tx_fragments; + int max_amsdu_len = sta->sta.max_amsdu_len; + __be16 len; + void *data; + bool ret = false; + int n = 1, nfrags; + + if (!ieee80211_hw_check(&local->hw, TX_AMSDU)) + return false; + + if (!txq) + return false; + + txqi = to_txq_info(txq); + if (test_bit(IEEE80211_TXQ_NO_AMSDU, &txqi->flags)) + return false; + + if (sta->sta.max_rc_amsdu_len) + max_amsdu_len = min_t(int, max_amsdu_len, + sta->sta.max_rc_amsdu_len); + + spin_lock_bh(&txqi->queue.lock); + + head = skb_peek_tail(&txqi->queue); + if (!head) + goto out; + + if (skb->len + head->len > max_amsdu_len) + goto out; + + if (!ieee80211_amsdu_prepare_head(sdata, fast_tx, head)) + goto out; + + nfrags = 1 + skb_shinfo(skb)->nr_frags; + nfrags += 1 + skb_shinfo(head)->nr_frags; + frag_tail = &skb_shinfo(head)->frag_list; + while (*frag_tail) { + nfrags += 1 + skb_shinfo(*frag_tail)->nr_frags; + frag_tail = &(*frag_tail)->next; + n++; + } + + if (max_subframes && n > max_subframes) + goto out; + + if (max_frags && nfrags > max_frags) + goto out; + + if (!ieee80211_amsdu_realloc_pad(local, skb, sizeof(rfc1042_header) + 2, + &subframe_len)) + goto out; + + ret = true; + data = skb_push(skb, ETH_ALEN + 2); + memmove(data, data + ETH_ALEN + 2, 2 * ETH_ALEN); + + data += 2 * ETH_ALEN; + len = cpu_to_be16(subframe_len); + memcpy(data, &len, 2); + memcpy(data + 2, rfc1042_header, sizeof(rfc1042_header)); + + head->len += skb->len; + head->data_len += skb->len; + *frag_tail = skb; + +out: + spin_unlock_bh(&txqi->queue.lock); + + return ret; +} + static bool ieee80211_xmit_fast(struct ieee80211_sub_if_data *sdata, struct net_device *dev, struct sta_info *sta, struct ieee80211_fast_tx *fast_tx, @@ -2856,6 +3008,10 @@ static bool ieee80211_xmit_fast(struct ieee80211_sub_if_data *sdata, ieee80211_tx_stats(dev, skb->len + extra_head); + if ((hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) && + ieee80211_amsdu_aggregate(sdata, sta, fast_tx, skb)) + return true; + /* will not be crypto-handled beyond what we do here, so use false * as the may-encrypt argument for the resize to not account for * more room than we already have in 'extra_head' From 918fe04b288b3784f4ca90d3dff12fc23dc2751f Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Thu, 3 Mar 2016 22:59:01 +0100 Subject: [PATCH 0326/1649] mac80211: minstrel_ht: set A-MSDU tx limits based on selected max_prob_rate Prevents excessive A-MSDU aggregation at low data rates or bad conditions. Signed-off-by: Felix Fietkau Signed-off-by: Johannes Berg --- net/mac80211/rc80211_minstrel_ht.c | 54 ++++++++++++++++++++++++++++++ 1 file changed, 54 insertions(+) diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c index 46ce08ed70b5..d77a9a842338 100644 --- a/net/mac80211/rc80211_minstrel_ht.c +++ b/net/mac80211/rc80211_minstrel_ht.c @@ -883,6 +883,59 @@ minstrel_ht_set_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi, ratetbl->rate[offset].flags = flags; } +static inline int +minstrel_ht_get_prob_ewma(struct minstrel_ht_sta *mi, int rate) +{ + int group = rate / MCS_GROUP_RATES; + rate %= MCS_GROUP_RATES; + return mi->groups[group].rates[rate].prob_ewma; +} + +static int +minstrel_ht_get_max_amsdu_len(struct minstrel_ht_sta *mi) +{ + int group = mi->max_prob_rate / MCS_GROUP_RATES; + const struct mcs_group *g = &minstrel_mcs_groups[group]; + int rate = mi->max_prob_rate % MCS_GROUP_RATES; + + /* Disable A-MSDU if max_prob_rate is bad */ + if (mi->groups[group].rates[rate].prob_ewma < MINSTREL_FRAC(50, 100)) + return 1; + + /* If the rate is slower than single-stream MCS1, make A-MSDU limit small */ + if (g->duration[rate] > MCS_DURATION(1, 0, 52)) + return 500; + + /* + * If the rate is slower than single-stream MCS4, limit A-MSDU to usual + * data packet size + */ + if (g->duration[rate] > MCS_DURATION(1, 0, 104)) + return 1600; + + /* + * If the rate is slower than single-stream MCS7, or if the max throughput + * rate success probability is less than 75%, limit A-MSDU to twice the usual + * data packet size + */ + if (g->duration[rate] > MCS_DURATION(1, 0, 260) || + (minstrel_ht_get_prob_ewma(mi, mi->max_tp_rate[0]) < + MINSTREL_FRAC(75, 100))) + return 3200; + + /* + * HT A-MPDU limits maximum MPDU size under BA agreement to 4095 bytes. + * Since aggregation sessions are started/stopped without txq flush, use + * the limit here to avoid the complexity of having to de-aggregate + * packets in the queue. + */ + if (!mi->sta->vht_cap.vht_supported) + return IEEE80211_MAX_MPDU_LEN_HT_BA; + + /* unlimited */ + return 0; +} + static void minstrel_ht_update_rates(struct minstrel_priv *mp, struct minstrel_ht_sta *mi) { @@ -907,6 +960,7 @@ minstrel_ht_update_rates(struct minstrel_priv *mp, struct minstrel_ht_sta *mi) minstrel_ht_set_rate(mp, mi, rates, i++, mi->max_prob_rate); } + mi->sta->max_rc_amsdu_len = minstrel_ht_get_max_amsdu_len(mi); rates->rate[i].idx = -1; rate_control_set_rates(mp->hw, mi->sta, rates); } From ba6fbacf9c073effaedf0c52fe7e52e2baf67725 Mon Sep 17 00:00:00 2001 From: Jouni Malinen Date: Tue, 29 Mar 2016 13:53:27 +0300 Subject: [PATCH 0327/1649] cfg80211: Add option to specify previous BSSID for Connect command This extends NL80211_CMD_CONNECT to allow the NL80211_ATTR_PREV_BSSID attribute to be used similarly to way this was already allowed with NL80211_CMD_ASSOCIATE. This allows user space to request reassociation (instead of association) when already connected to an AP. This provides an option to reassociate within an ESS without having to disconnect and associate with the AP. Signed-off-by: Jouni Malinen Signed-off-by: Johannes Berg --- include/net/cfg80211.h | 2 ++ net/wireless/nl80211.c | 4 ++++ net/wireless/trace.h | 6 ++++-- 3 files changed, 10 insertions(+), 2 deletions(-) diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 568c10f6d564..b39277eb251f 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -1925,6 +1925,7 @@ struct cfg80211_bss_selection { * @pbss: if set, connect to a PCP instead of AP. Valid for DMG * networks. * @bss_select: criteria to be used for BSS selection. + * @prev_bssid: previous BSSID, if not %NULL use reassociate frame */ struct cfg80211_connect_params { struct ieee80211_channel *channel; @@ -1949,6 +1950,7 @@ struct cfg80211_connect_params { struct ieee80211_vht_cap vht_capa_mask; bool pbss; struct cfg80211_bss_selection bss_select; + const u8 *prev_bssid; }; /** diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 824569b1c5a1..4f89e2dbb70e 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -8058,6 +8058,10 @@ static int nl80211_connect(struct sk_buff *skb, struct genl_info *info) connect.mfp = NL80211_MFP_NO; } + if (info->attrs[NL80211_ATTR_PREV_BSSID]) + connect.prev_bssid = + nla_data(info->attrs[NL80211_ATTR_PREV_BSSID]); + if (info->attrs[NL80211_ATTR_WIPHY_FREQ]) { connect.channel = nl80211_get_valid_chan( wiphy, info->attrs[NL80211_ATTR_WIPHY_FREQ]); diff --git a/net/wireless/trace.h b/net/wireless/trace.h index 09b242b09bed..8da1fae23cfb 100644 --- a/net/wireless/trace.h +++ b/net/wireless/trace.h @@ -1259,6 +1259,7 @@ TRACE_EVENT(rdev_connect, __field(bool, privacy) __field(u32, wpa_versions) __field(u32, flags) + MAC_ENTRY(prev_bssid) ), TP_fast_assign( WIPHY_ASSIGN; @@ -1270,13 +1271,14 @@ TRACE_EVENT(rdev_connect, __entry->privacy = sme->privacy; __entry->wpa_versions = sme->crypto.wpa_versions; __entry->flags = sme->flags; + MAC_ASSIGN(prev_bssid, sme->prev_bssid); ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", bssid: " MAC_PR_FMT ", ssid: %s, auth type: %d, privacy: %s, wpa versions: %u, " - "flags: %u", + "flags: %u, previous bssid: " MAC_PR_FMT, WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(bssid), __entry->ssid, __entry->auth_type, BOOL_TO_STR(__entry->privacy), - __entry->wpa_versions, __entry->flags) + __entry->wpa_versions, __entry->flags, MAC_PR_ARG(prev_bssid)) ); TRACE_EVENT(rdev_set_cqm_rssi_config, From 4ce2bd9c4c1dfb416206ff1ad5283f6d24af4031 Mon Sep 17 00:00:00 2001 From: Jouni Malinen Date: Tue, 29 Mar 2016 13:53:28 +0300 Subject: [PATCH 0328/1649] cfg80211: Allow reassociation to be requested with internal SME If the user space issues a NL80211_CMD_CONNECT with NL80211_ATTR_PREV_BSSID when there is already a connection, allow this to proceed as a reassociation instead of rejecting the new connect command with EALREADY. Signed-off-by: Jouni Malinen [validate prev_bssid] Signed-off-by: Johannes Berg --- net/wireless/nl80211.c | 3 ++- net/wireless/sme.c | 14 ++++++++++++-- 2 files changed, 14 insertions(+), 3 deletions(-) diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 4f89e2dbb70e..4f45a2913104 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -8151,7 +8151,8 @@ static int nl80211_connect(struct sk_buff *skb, struct genl_info *info) } wdev_lock(dev->ieee80211_ptr); - err = cfg80211_connect(rdev, dev, &connect, connkeys, NULL); + err = cfg80211_connect(rdev, dev, &connect, connkeys, + connect.prev_bssid); wdev_unlock(dev->ieee80211_ptr); if (err) kzfree(connkeys); diff --git a/net/wireless/sme.c b/net/wireless/sme.c index 65882d2777c0..1fba41676428 100644 --- a/net/wireless/sme.c +++ b/net/wireless/sme.c @@ -492,8 +492,18 @@ static int cfg80211_sme_connect(struct wireless_dev *wdev, if (!rdev->ops->auth || !rdev->ops->assoc) return -EOPNOTSUPP; - if (wdev->current_bss) - return -EALREADY; + if (wdev->current_bss) { + if (!prev_bssid) + return -EALREADY; + if (prev_bssid && + !ether_addr_equal(prev_bssid, wdev->current_bss->pub.bssid)) + return -ENOTCONN; + cfg80211_unhold_bss(wdev->current_bss); + cfg80211_put_bss(wdev->wiphy, &wdev->current_bss->pub); + wdev->current_bss = NULL; + + cfg80211_sme_free(wdev); + } if (WARN_ON(wdev->conn)) return -EINPROGRESS; From f8df33da2c9bbea3a72dff5326bb5de2ef8392d6 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Mon, 14 Mar 2016 16:31:10 +0100 Subject: [PATCH 0329/1649] mwifiex: Spelling s/minmum/minimum/, s/bandwidth/bandwith/ Signed-off-by: Geert Uytterhoeven Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/tdls.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/marvell/mwifiex/tdls.c b/drivers/net/wireless/marvell/mwifiex/tdls.c index 150649602e98..df9704de0715 100644 --- a/drivers/net/wireless/marvell/mwifiex/tdls.c +++ b/drivers/net/wireless/marvell/mwifiex/tdls.c @@ -285,7 +285,7 @@ static int mwifiex_tdls_add_vht_oper(struct mwifiex_private *priv, else usr_vht_cap_info = adapter->usr_dot_11ac_dev_cap_bg; - /* find the minmum bandwith between AP/TDLS peers */ + /* find the minimum bandwidth between AP/TDLS peers */ vht_cap = &sta_ptr->tdls_cap.vhtcap; supp_chwd_set = GET_VHTCAP_CHWDSET(usr_vht_cap_info); peer_supp_chwd_set = From 977bc523000d51693c4b083463dc93bbb692a662 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Tue, 15 Mar 2016 10:06:10 +0300 Subject: [PATCH 0330/1649] brcmfmac: uninitialized "ret" variable There is an error path where "ret" isn't initialized. Signed-off-by: Dan Carpenter Signed-off-by: Kalle Valo --- drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c index da0cdd313880..2fc0597f2cd0 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c @@ -250,7 +250,7 @@ static int brcmf_sdiod_request_data(struct brcmf_sdio_dev *sdiodev, u8 fn, u32 addr, u8 regsz, void *data, bool write) { struct sdio_func *func; - int ret; + int ret = -EINVAL; brcmf_dbg(SDIO, "rw=%d, func=%d, addr=0x%05x, nbytes=%d\n", write, fn, addr, regsz); From 0026b32d723e958bac8f335ba3f47825b11b7287 Mon Sep 17 00:00:00 2001 From: Amitkumar Karwar Date: Wed, 16 Mar 2016 07:46:16 -0700 Subject: [PATCH 0331/1649] mwifiex: fix Tx timeout issue during suspend test Call netif_carrier_off/on while stoping/starting netdev queues. This fixes netdev watchdog warning and ->ndo_tx_timeout() invocation during suspend resume stress test. Signed-off-by: Amitkumar Karwar Fixes: 54f008497b9f09f ('mwifiex: Empty Tx queue during suspend') Tested-by: Wei-Ning Huang Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/cfg80211.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c index bb7235e1b9d1..b0663bdea5b5 100644 --- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c +++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c @@ -3272,8 +3272,11 @@ static int mwifiex_cfg80211_suspend(struct wiphy *wiphy, for (i = 0; i < adapter->priv_num; i++) { priv = adapter->priv[i]; - if (priv && priv->netdev) + if (priv && priv->netdev) { mwifiex_stop_net_dev_queue(priv->netdev, adapter); + if (netif_carrier_ok(priv->netdev)) + netif_carrier_off(priv->netdev); + } } for (i = 0; i < retry_num; i++) { @@ -3344,8 +3347,11 @@ static int mwifiex_cfg80211_resume(struct wiphy *wiphy) for (i = 0; i < adapter->priv_num; i++) { priv = adapter->priv[i]; - if (priv && priv->netdev) + if (priv && priv->netdev) { + if (!netif_carrier_ok(priv->netdev)) + netif_carrier_on(priv->netdev); mwifiex_wake_up_net_dev_queue(priv->netdev, adapter); + } } priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA); From c18d8f5095715c56bb3cd9cba64242542632054b Mon Sep 17 00:00:00 2001 From: Larry Finger Date: Wed, 16 Mar 2016 13:33:34 -0500 Subject: [PATCH 0332/1649] rtlwifi: rtl8723be: Add antenna select module parameter A number of new laptops have been delivered with only a single antenna. In principle, this is OK; however, a problem arises when the on-board EEPROM is programmed to use the other antenna connection. The option of opening the computer and moving the connector is not always possible as it will void the warranty in some cases. In addition, this solution breaks the Windows driver when the box dual boots Linux and Windows. A fix involving a new module parameter has been developed. This commit adds the new parameter and implements the changes needed for the driver. Signed-off-by: Larry Finger Cc: Stable [V4.0+] Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c | 5 +++++ drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c | 3 +++ drivers/net/wireless/realtek/rtlwifi/wifi.h | 3 +++ 3 files changed, 11 insertions(+) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c index c983d2fe147f..5a3df9198ddf 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c @@ -2684,6 +2684,7 @@ void rtl8723be_read_bt_coexist_info_from_hwpg(struct ieee80211_hw *hw, bool auto_load_fail, u8 *hwinfo) { struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_mod_params *mod_params = rtlpriv->cfg->mod_params; u8 value; u32 tmpu_32; @@ -2702,6 +2703,10 @@ void rtl8723be_read_bt_coexist_info_from_hwpg(struct ieee80211_hw *hw, rtlpriv->btcoexist.btc_info.ant_num = ANT_X2; } + /* override ant_num / ant_path */ + if (mod_params->ant_sel) + rtlpriv->btcoexist.btc_info.ant_num = + (mod_params->ant_sel == 1 ? ANT_X2 : ANT_X1); } void rtl8723be_bt_reg_init(struct ieee80211_hw *hw) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c index a78eaeda0008..2101793438ed 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c @@ -273,6 +273,7 @@ static struct rtl_mod_params rtl8723be_mod_params = { .msi_support = false, .disable_watchdog = false, .debug = DBG_EMERG, + .ant_sel = 0, }; static struct rtl_hal_cfg rtl8723be_hal_cfg = { @@ -394,6 +395,7 @@ module_param_named(fwlps, rtl8723be_mod_params.fwctrl_lps, bool, 0444); module_param_named(msi, rtl8723be_mod_params.msi_support, bool, 0444); module_param_named(disable_watchdog, rtl8723be_mod_params.disable_watchdog, bool, 0444); +module_param_named(ant_sel, rtl8723be_mod_params.ant_sel, int, 0444); MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n"); MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n"); MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n"); @@ -402,6 +404,7 @@ MODULE_PARM_DESC(msi, "Set to 1 to use MSI interrupts mode (default 0)\n"); MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)"); MODULE_PARM_DESC(disable_watchdog, "Set to 1 to disable the watchdog (default 0)\n"); +MODULE_PARM_DESC(ant_sel, "Set to 1 or 2 to force antenna number (default 0)\n"); static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume); diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h index 554d81420f19..93bd7fcd2b61 100644 --- a/drivers/net/wireless/realtek/rtlwifi/wifi.h +++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h @@ -2246,6 +2246,9 @@ struct rtl_mod_params { /* default 0: 1 means do not disable interrupts */ bool int_clear; + + /* select antenna */ + int ant_sel; }; struct rtl_hal_usbint_cfg { From baa1702290953295e421f0f433e2b1ff4815827c Mon Sep 17 00:00:00 2001 From: Larry Finger Date: Wed, 16 Mar 2016 13:33:35 -0500 Subject: [PATCH 0333/1649] rtlwifi: btcoexist: Implement antenna selection The previous patch added an option to rtl8723be to manually select the antenna for those cases when only a single antenna is present, and the on-board EEPROM is incorrectly programmed. This patch implements the necessary changes in the Bluetooth coexistence driver. Signed-off-by: Larry Finger Cc: Stable [V4.0+] Signed-off-by: Kalle Valo --- .../rtlwifi/btcoexist/halbtc8723b2ant.c | 9 +++++-- .../realtek/rtlwifi/btcoexist/halbtcoutsrc.c | 27 ++++++++++++++++++- .../realtek/rtlwifi/btcoexist/halbtcoutsrc.h | 2 +- .../realtek/rtlwifi/btcoexist/rtl_btc.c | 5 +++- 4 files changed, 38 insertions(+), 5 deletions(-) diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c index c43ab59a690a..77cbd10e807d 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c @@ -1203,7 +1203,6 @@ static void btc8723b2ant_set_ant_path(struct btc_coexist *btcoexist, /* Force GNT_BT to low */ btcoexist->btc_write_1byte_bitmask(btcoexist, 0x765, 0x18, 0x0); - btcoexist->btc_write_2byte(btcoexist, 0x948, 0x0); if (board_info->btdm_ant_pos == BTC_ANTENNA_AT_MAIN_PORT) { /* tell firmware "no antenna inverse" */ @@ -1211,19 +1210,25 @@ static void btc8723b2ant_set_ant_path(struct btc_coexist *btcoexist, h2c_parameter[1] = 1; /* ext switch type */ btcoexist->btc_fill_h2c(btcoexist, 0x65, 2, h2c_parameter); + btcoexist->btc_write_2byte(btcoexist, 0x948, 0x0); } else { /* tell firmware "antenna inverse" */ h2c_parameter[0] = 1; h2c_parameter[1] = 1; /* ext switch type */ btcoexist->btc_fill_h2c(btcoexist, 0x65, 2, h2c_parameter); + btcoexist->btc_write_2byte(btcoexist, 0x948, 0x280); } } /* ext switch setting */ if (use_ext_switch) { /* fixed internal switch S1->WiFi, S0->BT */ - btcoexist->btc_write_2byte(btcoexist, 0x948, 0x0); + if (board_info->btdm_ant_pos == BTC_ANTENNA_AT_MAIN_PORT) + btcoexist->btc_write_2byte(btcoexist, 0x948, 0x0); + else + btcoexist->btc_write_2byte(btcoexist, 0x948, 0x280); + switch (antpos_type) { case BTC_ANT_WIFI_AT_MAIN: /* ext switch main at wifi */ diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c index b2791c893417..babd1490f20c 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c @@ -965,13 +965,38 @@ void exhalbtc_set_chip_type(u8 chip_type) } } -void exhalbtc_set_ant_num(u8 type, u8 ant_num) +void exhalbtc_set_ant_num(struct rtl_priv *rtlpriv, u8 type, u8 ant_num) { if (BT_COEX_ANT_TYPE_PG == type) { gl_bt_coexist.board_info.pg_ant_num = ant_num; gl_bt_coexist.board_info.btdm_ant_num = ant_num; + /* The antenna position: + * Main (default) or Aux for pgAntNum=2 && btdmAntNum =1. + * The antenna position should be determined by + * auto-detect mechanism. + * The following is assumed to main, + * and those must be modified + * if y auto-detect mechanism is ready + */ + if ((gl_bt_coexist.board_info.pg_ant_num == 2) && + (gl_bt_coexist.board_info.btdm_ant_num == 1)) + gl_bt_coexist.board_info.btdm_ant_pos = + BTC_ANTENNA_AT_MAIN_PORT; + else + gl_bt_coexist.board_info.btdm_ant_pos = + BTC_ANTENNA_AT_MAIN_PORT; } else if (BT_COEX_ANT_TYPE_ANTDIV == type) { gl_bt_coexist.board_info.btdm_ant_num = ant_num; + gl_bt_coexist.board_info.btdm_ant_pos = + BTC_ANTENNA_AT_MAIN_PORT; + } else if (type == BT_COEX_ANT_TYPE_DETECTED) { + gl_bt_coexist.board_info.btdm_ant_num = ant_num; + if (rtlpriv->cfg->mod_params->ant_sel == 1) + gl_bt_coexist.board_info.btdm_ant_pos = + BTC_ANTENNA_AT_AUX_PORT; + else + gl_bt_coexist.board_info.btdm_ant_pos = + BTC_ANTENNA_AT_MAIN_PORT; } } diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h index 0a903ea179ef..f41ca57dd8a7 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h @@ -535,7 +535,7 @@ void exhalbtc_set_bt_patch_version(u16 bt_hci_version, u16 bt_patch_version); void exhalbtc_update_min_bt_rssi(char bt_rssi); void exhalbtc_set_bt_exist(bool bt_exist); void exhalbtc_set_chip_type(u8 chip_type); -void exhalbtc_set_ant_num(u8 type, u8 ant_num); +void exhalbtc_set_ant_num(struct rtl_priv *rtlpriv, u8 type, u8 ant_num); void exhalbtc_display_bt_coex_info(struct btc_coexist *btcoexist); void exhalbtc_signal_compensation(struct btc_coexist *btcoexist, u8 *rssi_wifi, u8 *rssi_bt); diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c index b9b0cb7af8ea..d3fd9211b3a4 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c @@ -72,7 +72,10 @@ void rtl_btc_init_hal_vars(struct rtl_priv *rtlpriv) __func__, bt_type); exhalbtc_set_chip_type(bt_type); - exhalbtc_set_ant_num(BT_COEX_ANT_TYPE_PG, ant_num); + if (rtlpriv->cfg->mod_params->ant_sel == 1) + exhalbtc_set_ant_num(rtlpriv, BT_COEX_ANT_TYPE_DETECTED, 1); + else + exhalbtc_set_ant_num(rtlpriv, BT_COEX_ANT_TYPE_PG, ant_num); } void rtl_btc_init_hw_config(struct rtl_priv *rtlpriv) From 37c52934c66810205707ddeb42eb08b06a5af4c4 Mon Sep 17 00:00:00 2001 From: Larry Finger Date: Thu, 17 Mar 2016 13:40:56 -0500 Subject: [PATCH 0334/1649] rtlwifi: Fix Smatch warnings Smatch reports the following: CHECK drivers/net/wireless/realtek/rtlwifi/pci.c drivers/net/wireless/realtek/rtlwifi/pci.c:366 rtl_pci_check_buddy_priv() error: we previously assumed 'tpriv' could be null (see line 368) drivers/net/wireless/realtek/rtlwifi/pci.c:1216 _rtl_pci_init_struct() warn: inconsistent indenting Signed-off-by: Larry Finger Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtlwifi/pci.c | 37 +++++++++++----------- 1 file changed, 18 insertions(+), 19 deletions(-) diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c index 283d608b9973..1ac41b8bd19a 100644 --- a/drivers/net/wireless/realtek/rtlwifi/pci.c +++ b/drivers/net/wireless/realtek/rtlwifi/pci.c @@ -359,30 +359,28 @@ static bool rtl_pci_check_buddy_priv(struct ieee80211_hw *hw, struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); bool find_buddy_priv = false; - struct rtl_priv *tpriv = NULL; + struct rtl_priv *tpriv; struct rtl_pci_priv *tpcipriv = NULL; if (!list_empty(&rtlpriv->glb_var->glb_priv_list)) { list_for_each_entry(tpriv, &rtlpriv->glb_var->glb_priv_list, list) { - if (tpriv) { - tpcipriv = (struct rtl_pci_priv *)tpriv->priv; - RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, - "pcipriv->ndis_adapter.funcnumber %x\n", - pcipriv->ndis_adapter.funcnumber); - RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, - "tpcipriv->ndis_adapter.funcnumber %x\n", - tpcipriv->ndis_adapter.funcnumber); + tpcipriv = (struct rtl_pci_priv *)tpriv->priv; + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "pcipriv->ndis_adapter.funcnumber %x\n", + pcipriv->ndis_adapter.funcnumber); + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "tpcipriv->ndis_adapter.funcnumber %x\n", + tpcipriv->ndis_adapter.funcnumber); - if ((pcipriv->ndis_adapter.busnumber == - tpcipriv->ndis_adapter.busnumber) && - (pcipriv->ndis_adapter.devnumber == - tpcipriv->ndis_adapter.devnumber) && - (pcipriv->ndis_adapter.funcnumber != - tpcipriv->ndis_adapter.funcnumber)) { - find_buddy_priv = true; - break; - } + if ((pcipriv->ndis_adapter.busnumber == + tpcipriv->ndis_adapter.busnumber) && + (pcipriv->ndis_adapter.devnumber == + tpcipriv->ndis_adapter.devnumber) && + (pcipriv->ndis_adapter.funcnumber != + tpcipriv->ndis_adapter.funcnumber)) { + find_buddy_priv = true; + break; } } } @@ -1213,7 +1211,8 @@ static void _rtl_pci_init_struct(struct ieee80211_hw *hw, /*Tx/Rx related var */ _rtl_pci_init_trx_var(hw); - /*IBSS*/ mac->beacon_interval = 100; + /*IBSS*/ + mac->beacon_interval = 100; /*AMPDU*/ mac->min_space_cfg = 0; From 2e074fab347e1231bc1da156a12b37b6f746712c Mon Sep 17 00:00:00 2001 From: Larry Finger Date: Thu, 17 Mar 2016 13:40:57 -0500 Subject: [PATCH 0335/1649] rtlwifi: btcoexist: Fix Smatch warning Smatch reports the following: CHECK drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c:3078 btc8723b2ant_run_coexist_mechanism() warn: inconsistent indenting Signed-off-by: Larry Finger Signed-off-by: Kalle Valo --- .../net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c index 77cbd10e807d..205f78b3ab23 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c @@ -3075,7 +3075,7 @@ static void btc8723b2ant_run_coexist_mechanism(struct btc_coexist *btcoexist) "[BTCoex], Action 2-Ant, " "algorithm = HS mode.\n"); btc8723b2ant_action_pan_hs(btcoexist); - break; + break; case BT_8723B_2ANT_COEX_ALGO_PANEDR_A2DP: BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, "[BTCoex], Action 2-Ant, " From 844026f609fc35918228cbb9ff2fd48e373504f7 Mon Sep 17 00:00:00 2001 From: Larry Finger Date: Thu, 17 Mar 2016 13:40:58 -0500 Subject: [PATCH 0336/1649] rtlwifi: rtl8188ee: Fix Smatch warnings Smatch reports the following: CHECK drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c:1140 rtl88e_dm_check_txpower_tracking() warn: inconsistent indenting CHECK drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c:1906 _rtl88e_phy_lc_calibrate() warn: inconsistent indenting Signed-off-by: Larry Finger Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c | 2 +- drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c index ce4da9d79fbd..db9a7829d568 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c @@ -1137,7 +1137,7 @@ void rtl88e_dm_check_txpower_tracking(struct ieee80211_hw *hw) } else { RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "Schedule TxPowerTracking !!\n"); - dm_txpower_track_cb_therm(hw); + dm_txpower_track_cb_therm(hw); rtlpriv->dm.tm_trigger = 0; } } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c index a2bb02c7b837..416a9ba6382e 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c @@ -1903,8 +1903,7 @@ static void _rtl88e_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t) } else { rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00); } -RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "\n"); - + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "\n"); } static void _rtl88e_phy_set_rfpath_switch(struct ieee80211_hw *hw, From de8a9a6eeb572f7f0e8a87df9f29264c04503af4 Mon Sep 17 00:00:00 2001 From: Larry Finger Date: Thu, 17 Mar 2016 13:40:59 -0500 Subject: [PATCH 0337/1649] rtlwifi: rtl8192c-common: Fix Smatch warning Smatch lists the following: CHECK drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c:243 rtl92c_dm_false_alarm_counter_statistics() warn: inconsistent indenting Signed-off-by: Larry Finger Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c index 03cbe4cf110b..316be5ff69ca 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c @@ -240,7 +240,7 @@ static void rtl92c_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw) ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER3, MASKDWORD); falsealm_cnt->cnt_mcs_fail = (ret_value & 0xffff); - ret_value = rtl_get_bbreg(hw, ROFDM0_FRAMESYNC, MASKDWORD); + ret_value = rtl_get_bbreg(hw, ROFDM0_FRAMESYNC, MASKDWORD); falsealm_cnt->cnt_fast_fsync_fail = (ret_value & 0xffff); falsealm_cnt->cnt_sb_search_fail = ((ret_value & 0xffff0000) >> 16); From 05d9e1bba43b3b9e722ca06fc45b79d93374be18 Mon Sep 17 00:00:00 2001 From: Larry Finger Date: Thu, 17 Mar 2016 13:41:00 -0500 Subject: [PATCH 0338/1649] rtlwifi: rtl8192ee: Fix Smatch warning Smatch lists the following: CHECK drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c:371 rtl92ee_rx_query_desc() warn: inconsistent indenting Signed-off-by: Larry Finger Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c index 24eff8ea4c2e..35e6bf7e233d 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c @@ -368,7 +368,7 @@ bool rtl92ee_rx_query_desc(struct ieee80211_hw *hw, status->decrypted = !GET_RX_DESC_SWDEC(pdesc); status->rate = (u8)GET_RX_DESC_RXMCS(pdesc); status->isampdu = (bool)(GET_RX_DESC_PAGGR(pdesc) == 1); - status->timestamp_low = GET_RX_DESC_TSFL(pdesc); + status->timestamp_low = GET_RX_DESC_TSFL(pdesc); status->is_cck = RTL92EE_RX_HAL_IS_CCK_RATE(status->rate); status->macid = GET_RX_DESC_MACID(pdesc); From c42ceccec17056940d0c97da79ff14d71062cc28 Mon Sep 17 00:00:00 2001 From: Larry Finger Date: Thu, 17 Mar 2016 13:41:01 -0500 Subject: [PATCH 0339/1649] rtlwifi: rtl8192se: Fix Smatch warning Smatch lists the following: CHECK drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c:648 rtl92s_phy_set_rf_power_state() warn: inconsistent indenting Signed-off-by: Larry Finger Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c index 4b4612fe2fdb..881821f4e243 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c @@ -645,7 +645,7 @@ bool rtl92s_phy_set_rf_power_state(struct ieee80211_hw *hw, rtlpriv->psc.state_inap); ppsc->last_sleep_jiffies = jiffies; _rtl92se_phy_set_rf_sleep(hw); - break; + break; default: RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "switch case not processed\n"); From 154fb486df3d8e2fb346dfb9777abe20b23e1d6f Mon Sep 17 00:00:00 2001 From: Larry Finger Date: Thu, 17 Mar 2016 13:41:02 -0500 Subject: [PATCH 0340/1649] rtlwifi: rtl8723ae: Fix Smatch warning Smatch reports the following: CHECK drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.c drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.c:137 rtl8723e_dm_bt_need_to_dec_bt_pwr() warn: inconsistent indenting Signed-off-by: Larry Finger Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.c index 00a0531cc5f4..44de695dc999 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.c @@ -134,9 +134,9 @@ static bool rtl8723e_dm_bt_need_to_dec_bt_pwr(struct ieee80211_hw *hw) if (mgnt_link_status_query(hw) == RT_MEDIA_CONNECT) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, "Need to decrease bt power\n"); - rtlpriv->btcoexist.cstate |= - BT_COEX_STATE_DEC_BT_POWER; - return true; + rtlpriv->btcoexist.cstate |= + BT_COEX_STATE_DEC_BT_POWER; + return true; } rtlpriv->btcoexist.cstate &= ~BT_COEX_STATE_DEC_BT_POWER; From b3c4201bce5e32a353e68e1daf2aed213b8495e7 Mon Sep 17 00:00:00 2001 From: Larry Finger Date: Thu, 17 Mar 2016 13:41:03 -0500 Subject: [PATCH 0341/1649] rtlwifi: rtl8723be: Fix Smatch warnings Smatch reports the following: CHECK drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c:1726 _rtl8723be_phy_path_a_rx_iqk() warn: inconsistent indenting drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c:2304 _rtl8723be_phy_lc_calibrate() warn: inconsistent indenting drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c:2609 _rtl8723be_phy_set_rf_power_state() warn: inconsistent indenting CHECK drivers/net/wireless/realtek/rtlwifi/rtl8723be/rf.c drivers/net/wireless/realtek/rtlwifi/rtl8723be/rf.c:306 _rtl8723be_get_txpower_writeval_by_regulatory() warn: inconsistent indenting Signed-off-by: Larry Finger Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c | 10 ++++------ drivers/net/wireless/realtek/rtlwifi/rtl8723be/rf.c | 4 ++-- 2 files changed, 6 insertions(+), 8 deletions(-) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c index b7b73cbe346d..445f681d08c0 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c @@ -1723,8 +1723,8 @@ static u8 _rtl8723be_phy_path_a_rx_iqk(struct ieee80211_hw *hw) /* Allen 20131125 */ tmp = (reg_eac & 0x03FF0000) >> 16; - if ((tmp & 0x200) > 0) - tmp = 0x400 - tmp; + if ((tmp & 0x200) > 0) + tmp = 0x400 - tmp; /* if Tx is OK, check whether Rx is OK */ if (!(reg_eac & BIT(27)) && (((reg_ea4 & 0x03FF0000) >> 16) != 0x132) && @@ -2301,8 +2301,7 @@ static void _rtl8723be_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t) } else { rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00); } -RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "\n"); - + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "\n"); } static void _rtl8723be_phy_set_rfpath_switch(struct ieee80211_hw *hw, @@ -2606,8 +2605,7 @@ static bool _rtl8723be_phy_set_rf_power_state(struct ieee80211_hw *hw, "IPS Set eRf nic enable\n"); rtstatus = rtl_ps_enable_nic(hw); } while (!rtstatus && (initializecount < 10)); - RT_CLEAR_PS_LEVEL(ppsc, - RT_RF_OFF_LEVL_HALT_NIC); + RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC); } else { RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG, "Set ERFON sleeped:%d ms\n", diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/rf.c index 5ed4492d3c80..97f5a0377e7a 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/rf.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/rf.c @@ -303,8 +303,8 @@ static void _rtl8723be_get_txpower_writeval_by_regulatory( [chnlgroup][index + (rf ? 8 : 0)] & (0x7f << (i * 8))) >> (i * 8)); - if (pwr_diff_limit[i] > pwr_diff) - pwr_diff_limit[i] = pwr_diff; + if (pwr_diff_limit[i] > pwr_diff) + pwr_diff_limit[i] = pwr_diff; } customer_limit = (pwr_diff_limit[3] << 24) | From 1e812458206e3b787951868d6f8acaae5e3f4aca Mon Sep 17 00:00:00 2001 From: Larry Finger Date: Thu, 17 Mar 2016 13:41:04 -0500 Subject: [PATCH 0342/1649] rtlwifi: rtl8821ae: Fix Smatch warnings Smatch reports the following: CHECK drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c:1960 rtl8812ae_dm_txpower_tracking_callback_thermalmeter() warn: inconsistent indenting CHECK drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c:455 phy_get_tx_swing_8812A() warn: inconsistent indenting drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c:517 phy_get_tx_swing_8812A() warn: inconsistent indenting Signed-off-by: Larry Finger Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c | 6 +++--- drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c index 95dcbff4673b..e346cb86cb08 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c @@ -1957,9 +1957,9 @@ void rtl8812ae_dm_txpower_tracking_callback_thermalmeter( rtldm->swing_idx_ofdm_base[p] = rtldm->swing_idx_ofdm[p]; - RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, - "pDM_Odm->RFCalibrateInfo.ThermalValue =%d ThermalValue= %d\n", - rtldm->thermalvalue, thermal_value); + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "pDM_Odm->RFCalibrateInfo.ThermalValue =%d ThermalValue= %d\n", + rtldm->thermalvalue, thermal_value); /*Record last Power Tracking Thermal Value*/ rtldm->thermalvalue = thermal_value; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c index 74165b3eb362..ddf74d527017 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c @@ -418,9 +418,9 @@ u32 phy_get_tx_swing_8812A(struct ieee80211_hw *hw, u8 band, out = 0x16A; /* -3 dB */ } } else { - u32 swing = 0, swing_a = 0, swing_b = 0; + u32 swing = 0, swing_a = 0, swing_b = 0; - if (band == BAND_ON_2_4G) { + if (band == BAND_ON_2_4G) { if (reg_swing_2g == auto_temp) { efuse_shadow_read(hw, 1, 0xC6, (u32 *)&swing); swing = (swing == 0xFF) ? 0x00 : swing; @@ -514,7 +514,7 @@ u32 phy_get_tx_swing_8812A(struct ieee80211_hw *hw, u8 band, RT_TRACE(rtlpriv, COMP_SCAN, DBG_LOUD, "<=== PHY_GetTxBBSwing_8812A, out = 0x%X\n", out); - return out; + return out; } void rtl8821ae_phy_switch_wirelessband(struct ieee80211_hw *hw, u8 band) From 466414a084a90a15b81601bdde0c5803dc061ecd Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Thu, 17 Mar 2016 17:00:21 -0700 Subject: [PATCH 0343/1649] rtlwifi: btcoexist: Convert BTC_PRINTK to btc__dbg Use a more common logging style. Miscellanea: o Add specific logging macros for ALGORITHM and INTERFACE types o Output the messages at KERN_DEBUG o Coalesce formats o Align arguments o Whitespace style adjustments for only these changes Signed-off-by: Joe Perches Signed-off-by: Kalle Valo --- .../rtlwifi/btcoexist/halbtc8192e2ant.c | 847 +++++++++-------- .../rtlwifi/btcoexist/halbtc8723b1ant.c | 611 ++++++------- .../rtlwifi/btcoexist/halbtc8723b2ant.c | 854 ++++++++---------- .../rtlwifi/btcoexist/halbtc8821a1ant.c | 652 +++++++------ .../rtlwifi/btcoexist/halbtc8821a2ant.c | 851 +++++++++-------- .../realtek/rtlwifi/btcoexist/halbtcoutsrc.c | 4 +- .../realtek/rtlwifi/btcoexist/halbtcoutsrc.h | 17 +- 7 files changed, 1851 insertions(+), 1985 deletions(-) diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c index 451456835f87..a30af6cc21f3 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c @@ -70,83 +70,83 @@ static u8 halbtc8192e2ant_btrssi_state(u8 level_num, u8 rssi_thresh, if (level_num == 2) { if ((coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_LOW) || (coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, - "BT Rssi pre state = LOW\n"); + btc_alg_dbg(ALGO_BT_RSSI_STATE, + "BT Rssi pre state = LOW\n"); if (btrssi >= (rssi_thresh + BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) { btrssi_state = BTC_RSSI_STATE_HIGH; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, - "BT Rssi state switch to High\n"); + btc_alg_dbg(ALGO_BT_RSSI_STATE, + "BT Rssi state switch to High\n"); } else { btrssi_state = BTC_RSSI_STATE_STAY_LOW; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, - "BT Rssi state stay at Low\n"); + btc_alg_dbg(ALGO_BT_RSSI_STATE, + "BT Rssi state stay at Low\n"); } } else { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, - "BT Rssi pre state = HIGH\n"); + btc_alg_dbg(ALGO_BT_RSSI_STATE, + "BT Rssi pre state = HIGH\n"); if (btrssi < rssi_thresh) { btrssi_state = BTC_RSSI_STATE_LOW; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, - "BT Rssi state switch to Low\n"); + btc_alg_dbg(ALGO_BT_RSSI_STATE, + "BT Rssi state switch to Low\n"); } else { btrssi_state = BTC_RSSI_STATE_STAY_HIGH; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, - "BT Rssi state stay at High\n"); + btc_alg_dbg(ALGO_BT_RSSI_STATE, + "BT Rssi state stay at High\n"); } } } else if (level_num == 3) { if (rssi_thresh > rssi_thresh1) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, - "BT Rssi thresh error!!\n"); + btc_alg_dbg(ALGO_BT_RSSI_STATE, + "BT Rssi thresh error!!\n"); return coex_sta->pre_bt_rssi_state; } if ((coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_LOW) || (coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, - "BT Rssi pre state = LOW\n"); + btc_alg_dbg(ALGO_BT_RSSI_STATE, + "BT Rssi pre state = LOW\n"); if (btrssi >= (rssi_thresh + BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) { btrssi_state = BTC_RSSI_STATE_MEDIUM; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, - "BT Rssi state switch to Medium\n"); + btc_alg_dbg(ALGO_BT_RSSI_STATE, + "BT Rssi state switch to Medium\n"); } else { btrssi_state = BTC_RSSI_STATE_STAY_LOW; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, - "BT Rssi state stay at Low\n"); + btc_alg_dbg(ALGO_BT_RSSI_STATE, + "BT Rssi state stay at Low\n"); } } else if ((coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_MEDIUM) || (coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, - "[BTCoex], BT Rssi pre state = MEDIUM\n"); + btc_alg_dbg(ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi pre state = MEDIUM\n"); if (btrssi >= (rssi_thresh1 + BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) { btrssi_state = BTC_RSSI_STATE_HIGH; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, - "BT Rssi state switch to High\n"); + btc_alg_dbg(ALGO_BT_RSSI_STATE, + "BT Rssi state switch to High\n"); } else if (btrssi < rssi_thresh) { btrssi_state = BTC_RSSI_STATE_LOW; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, - "BT Rssi state switch to Low\n"); + btc_alg_dbg(ALGO_BT_RSSI_STATE, + "BT Rssi state switch to Low\n"); } else { btrssi_state = BTC_RSSI_STATE_STAY_MEDIUM; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, - "BT Rssi state stay at Medium\n"); + btc_alg_dbg(ALGO_BT_RSSI_STATE, + "BT Rssi state stay at Medium\n"); } } else { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, - "BT Rssi pre state = HIGH\n"); + btc_alg_dbg(ALGO_BT_RSSI_STATE, + "BT Rssi pre state = HIGH\n"); if (btrssi < rssi_thresh1) { btrssi_state = BTC_RSSI_STATE_MEDIUM; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, - "BT Rssi state switch to Medium\n"); + btc_alg_dbg(ALGO_BT_RSSI_STATE, + "BT Rssi state switch to Medium\n"); } else { btrssi_state = BTC_RSSI_STATE_STAY_HIGH; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, - "BT Rssi state stay at High\n"); + btc_alg_dbg(ALGO_BT_RSSI_STATE, + "BT Rssi state stay at High\n"); } } } @@ -173,32 +173,28 @@ static u8 halbtc8192e2ant_wifirssi_state(struct btc_coexist *btcoexist, if (wifirssi >= (rssi_thresh + BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) { wifirssi_state = BTC_RSSI_STATE_HIGH; - BTC_PRINT(BTC_MSG_ALGORITHM, - ALGO_WIFI_RSSI_STATE, - "wifi RSSI state switch to High\n"); + btc_alg_dbg(ALGO_WIFI_RSSI_STATE, + "wifi RSSI state switch to High\n"); } else { wifirssi_state = BTC_RSSI_STATE_STAY_LOW; - BTC_PRINT(BTC_MSG_ALGORITHM, - ALGO_WIFI_RSSI_STATE, - "wifi RSSI state stay at Low\n"); + btc_alg_dbg(ALGO_WIFI_RSSI_STATE, + "wifi RSSI state stay at Low\n"); } } else { if (wifirssi < rssi_thresh) { wifirssi_state = BTC_RSSI_STATE_LOW; - BTC_PRINT(BTC_MSG_ALGORITHM, - ALGO_WIFI_RSSI_STATE, - "wifi RSSI state switch to Low\n"); + btc_alg_dbg(ALGO_WIFI_RSSI_STATE, + "wifi RSSI state switch to Low\n"); } else { wifirssi_state = BTC_RSSI_STATE_STAY_HIGH; - BTC_PRINT(BTC_MSG_ALGORITHM, - ALGO_WIFI_RSSI_STATE, - "wifi RSSI state stay at High\n"); + btc_alg_dbg(ALGO_WIFI_RSSI_STATE, + "wifi RSSI state stay at High\n"); } } } else if (level_num == 3) { if (rssi_thresh > rssi_thresh1) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, - "wifi RSSI thresh error!!\n"); + btc_alg_dbg(ALGO_WIFI_RSSI_STATE, + "wifi RSSI thresh error!!\n"); return coex_sta->pre_wifi_rssi_state[index]; } @@ -209,14 +205,12 @@ static u8 halbtc8192e2ant_wifirssi_state(struct btc_coexist *btcoexist, if (wifirssi >= (rssi_thresh + BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) { wifirssi_state = BTC_RSSI_STATE_MEDIUM; - BTC_PRINT(BTC_MSG_ALGORITHM, - ALGO_WIFI_RSSI_STATE, - "wifi RSSI state switch to Medium\n"); + btc_alg_dbg(ALGO_WIFI_RSSI_STATE, + "wifi RSSI state switch to Medium\n"); } else { wifirssi_state = BTC_RSSI_STATE_STAY_LOW; - BTC_PRINT(BTC_MSG_ALGORITHM, - ALGO_WIFI_RSSI_STATE, - "wifi RSSI state stay at Low\n"); + btc_alg_dbg(ALGO_WIFI_RSSI_STATE, + "wifi RSSI state stay at Low\n"); } } else if ((coex_sta->pre_wifi_rssi_state[index] == BTC_RSSI_STATE_MEDIUM) || @@ -225,31 +219,26 @@ static u8 halbtc8192e2ant_wifirssi_state(struct btc_coexist *btcoexist, if (wifirssi >= (rssi_thresh1 + BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) { wifirssi_state = BTC_RSSI_STATE_HIGH; - BTC_PRINT(BTC_MSG_ALGORITHM, - ALGO_WIFI_RSSI_STATE, - "wifi RSSI state switch to High\n"); + btc_alg_dbg(ALGO_WIFI_RSSI_STATE, + "wifi RSSI state switch to High\n"); } else if (wifirssi < rssi_thresh) { wifirssi_state = BTC_RSSI_STATE_LOW; - BTC_PRINT(BTC_MSG_ALGORITHM, - ALGO_WIFI_RSSI_STATE, - "wifi RSSI state switch to Low\n"); + btc_alg_dbg(ALGO_WIFI_RSSI_STATE, + "wifi RSSI state switch to Low\n"); } else { wifirssi_state = BTC_RSSI_STATE_STAY_MEDIUM; - BTC_PRINT(BTC_MSG_ALGORITHM, - ALGO_WIFI_RSSI_STATE, - "wifi RSSI state stay at Medium\n"); + btc_alg_dbg(ALGO_WIFI_RSSI_STATE, + "wifi RSSI state stay at Medium\n"); } } else { if (wifirssi < rssi_thresh1) { wifirssi_state = BTC_RSSI_STATE_MEDIUM; - BTC_PRINT(BTC_MSG_ALGORITHM, - ALGO_WIFI_RSSI_STATE, - "wifi RSSI state switch to Medium\n"); + btc_alg_dbg(ALGO_WIFI_RSSI_STATE, + "wifi RSSI state switch to Medium\n"); } else { wifirssi_state = BTC_RSSI_STATE_STAY_HIGH; - BTC_PRINT(BTC_MSG_ALGORITHM, - ALGO_WIFI_RSSI_STATE, - "wifi RSSI state stay at High\n"); + btc_alg_dbg(ALGO_WIFI_RSSI_STATE, + "wifi RSSI state stay at High\n"); } } } @@ -284,26 +273,26 @@ static void btc8192e2ant_monitor_bt_enable_dis(struct btc_coexist *btcoexist) bt_disabled = false; btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE, &bt_disabled); - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, - "[BTCoex], BT is enabled !!\n"); + btc_alg_dbg(ALGO_BT_MONITOR, + "[BTCoex], BT is enabled !!\n"); } else { bt_disable_cnt++; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, - "[BTCoex], bt all counters = 0, %d times!!\n", - bt_disable_cnt); + btc_alg_dbg(ALGO_BT_MONITOR, + "[BTCoex], bt all counters = 0, %d times!!\n", + bt_disable_cnt); if (bt_disable_cnt >= 2) { bt_disabled = true; btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE, &bt_disabled); - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, - "[BTCoex], BT is disabled !!\n"); + btc_alg_dbg(ALGO_BT_MONITOR, + "[BTCoex], BT is disabled !!\n"); } } if (pre_bt_disabled != bt_disabled) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, - "[BTCoex], BT is from %s to %s!!\n", - (pre_bt_disabled ? "disabled" : "enabled"), - (bt_disabled ? "disabled" : "enabled")); + btc_alg_dbg(ALGO_BT_MONITOR, + "[BTCoex], BT is from %s to %s!!\n", + (pre_bt_disabled ? "disabled" : "enabled"), + (bt_disabled ? "disabled" : "enabled")); pre_bt_disabled = bt_disabled; } } @@ -499,12 +488,12 @@ static void halbtc8192e2ant_monitor_bt_ctr(struct btc_coexist *btcoexist) coex_sta->low_priority_tx = reg_lp_tx; coex_sta->low_priority_rx = reg_lp_rx; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, - "[BTCoex] High Priority Tx/Rx (reg 0x%x) = 0x%x(%d)/0x%x(%d)\n", - reg_hp_txrx, reg_hp_tx, reg_hp_tx, reg_hp_rx, reg_hp_rx); - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, - "[BTCoex] Low Priority Tx/Rx (reg 0x%x) = 0x%x(%d)/0x%x(%d)\n", - reg_lp_txrx, reg_lp_tx, reg_lp_tx, reg_lp_rx, reg_lp_rx); + btc_alg_dbg(ALGO_BT_MONITOR, + "[BTCoex] High Priority Tx/Rx (reg 0x%x) = 0x%x(%d)/0x%x(%d)\n", + reg_hp_txrx, reg_hp_tx, reg_hp_tx, reg_hp_rx, reg_hp_rx); + btc_alg_dbg(ALGO_BT_MONITOR, + "[BTCoex] Low Priority Tx/Rx (reg 0x%x) = 0x%x(%d)/0x%x(%d)\n", + reg_lp_txrx, reg_lp_tx, reg_lp_tx, reg_lp_rx, reg_lp_rx); /* reset counter */ btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc); @@ -518,9 +507,9 @@ static void halbtc8192e2ant_querybt_info(struct btc_coexist *btcoexist) h2c_parameter[0] |= BIT0; /* trigger */ - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, - "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n", - h2c_parameter[0]); + btc_alg_dbg(ALGO_TRACE_FW_EXEC, + "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n", + h2c_parameter[0]); btcoexist->btc_fill_h2c(btcoexist, 0x61, 1, h2c_parameter); } @@ -592,8 +581,8 @@ static u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist) btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hson); if (!bt_link_info->bt_link_exist) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "No BT link exists!!!\n"); + btc_alg_dbg(ALGO_TRACE, + "No BT link exists!!!\n"); return algorithm; } @@ -608,27 +597,27 @@ static u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist) if (numdiffprofile == 1) { if (bt_link_info->sco_exist) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "SCO only\n"); + btc_alg_dbg(ALGO_TRACE, + "SCO only\n"); algorithm = BT_8192E_2ANT_COEX_ALGO_SCO; } else { if (bt_link_info->hid_exist) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "HID only\n"); + btc_alg_dbg(ALGO_TRACE, + "HID only\n"); algorithm = BT_8192E_2ANT_COEX_ALGO_HID; } else if (bt_link_info->a2dp_exist) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "A2DP only\n"); + btc_alg_dbg(ALGO_TRACE, + "A2DP only\n"); algorithm = BT_8192E_2ANT_COEX_ALGO_A2DP; } else if (bt_link_info->pan_exist) { if (bt_hson) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "PAN(HS) only\n"); + btc_alg_dbg(ALGO_TRACE, + "PAN(HS) only\n"); algorithm = BT_8192E_2ANT_COEX_ALGO_PANHS; } else { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "PAN(EDR) only\n"); + btc_alg_dbg(ALGO_TRACE, + "PAN(EDR) only\n"); algorithm = BT_8192E_2ANT_COEX_ALGO_PANEDR; } @@ -637,21 +626,21 @@ static u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist) } else if (numdiffprofile == 2) { if (bt_link_info->sco_exist) { if (bt_link_info->hid_exist) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "SCO + HID\n"); + btc_alg_dbg(ALGO_TRACE, + "SCO + HID\n"); algorithm = BT_8192E_2ANT_COEX_ALGO_SCO; } else if (bt_link_info->a2dp_exist) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "SCO + A2DP ==> SCO\n"); + btc_alg_dbg(ALGO_TRACE, + "SCO + A2DP ==> SCO\n"); algorithm = BT_8192E_2ANT_COEX_ALGO_PANEDR_HID; } else if (bt_link_info->pan_exist) { if (bt_hson) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "SCO + PAN(HS)\n"); + btc_alg_dbg(ALGO_TRACE, + "SCO + PAN(HS)\n"); algorithm = BT_8192E_2ANT_COEX_ALGO_SCO; } else { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "SCO + PAN(EDR)\n"); + btc_alg_dbg(ALGO_TRACE, + "SCO + PAN(EDR)\n"); algorithm = BT_8192E_2ANT_COEX_ALGO_SCO_PAN; } @@ -660,38 +649,38 @@ static u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist) if (bt_link_info->hid_exist && bt_link_info->a2dp_exist) { if (stack_info->num_of_hid >= 2) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "HID*2 + A2DP\n"); + btc_alg_dbg(ALGO_TRACE, + "HID*2 + A2DP\n"); algorithm = BT_8192E_2ANT_COEX_ALGO_HID_A2DP_PANEDR; } else { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "HID + A2DP\n"); + btc_alg_dbg(ALGO_TRACE, + "HID + A2DP\n"); algorithm = BT_8192E_2ANT_COEX_ALGO_HID_A2DP; } } else if (bt_link_info->hid_exist && bt_link_info->pan_exist) { if (bt_hson) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "HID + PAN(HS)\n"); + btc_alg_dbg(ALGO_TRACE, + "HID + PAN(HS)\n"); algorithm = BT_8192E_2ANT_COEX_ALGO_HID; } else { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "HID + PAN(EDR)\n"); + btc_alg_dbg(ALGO_TRACE, + "HID + PAN(EDR)\n"); algorithm = BT_8192E_2ANT_COEX_ALGO_PANEDR_HID; } } else if (bt_link_info->pan_exist && bt_link_info->a2dp_exist) { if (bt_hson) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "A2DP + PAN(HS)\n"); + btc_alg_dbg(ALGO_TRACE, + "A2DP + PAN(HS)\n"); algorithm = BT_8192E_2ANT_COEX_ALGO_A2DP_PANHS; } else { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "A2DP + PAN(EDR)\n"); + btc_alg_dbg(ALGO_TRACE, + "A2DP + PAN(EDR)\n"); algorithm = BT_8192E_2ANT_COEX_ALGO_PANEDR_A2DP; } @@ -701,30 +690,30 @@ static u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist) if (bt_link_info->sco_exist) { if (bt_link_info->hid_exist && bt_link_info->a2dp_exist) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "SCO + HID + A2DP ==> HID\n"); + btc_alg_dbg(ALGO_TRACE, + "SCO + HID + A2DP ==> HID\n"); algorithm = BT_8192E_2ANT_COEX_ALGO_PANEDR_HID; } else if (bt_link_info->hid_exist && bt_link_info->pan_exist) { if (bt_hson) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "SCO + HID + PAN(HS)\n"); + btc_alg_dbg(ALGO_TRACE, + "SCO + HID + PAN(HS)\n"); algorithm = BT_8192E_2ANT_COEX_ALGO_SCO; } else { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "SCO + HID + PAN(EDR)\n"); + btc_alg_dbg(ALGO_TRACE, + "SCO + HID + PAN(EDR)\n"); algorithm = BT_8192E_2ANT_COEX_ALGO_SCO_PAN; } } else if (bt_link_info->pan_exist && bt_link_info->a2dp_exist) { if (bt_hson) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "SCO + A2DP + PAN(HS)\n"); + btc_alg_dbg(ALGO_TRACE, + "SCO + A2DP + PAN(HS)\n"); algorithm = BT_8192E_2ANT_COEX_ALGO_SCO; } else { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "SCO + A2DP + PAN(EDR)\n"); + btc_alg_dbg(ALGO_TRACE, + "SCO + A2DP + PAN(EDR)\n"); algorithm = BT_8192E_2ANT_COEX_ALGO_PANEDR_HID; } @@ -734,13 +723,13 @@ static u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist) bt_link_info->pan_exist && bt_link_info->a2dp_exist) { if (bt_hson) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "HID + A2DP + PAN(HS)\n"); + btc_alg_dbg(ALGO_TRACE, + "HID + A2DP + PAN(HS)\n"); algorithm = BT_8192E_2ANT_COEX_ALGO_HID_A2DP; } else { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "HID + A2DP + PAN(EDR)\n"); + btc_alg_dbg(ALGO_TRACE, + "HID + A2DP + PAN(EDR)\n"); algorithm = BT_8192E_2ANT_COEX_ALGO_HID_A2DP_PANEDR; } @@ -752,12 +741,12 @@ static u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist) bt_link_info->pan_exist && bt_link_info->a2dp_exist) { if (bt_hson) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "ErrorSCO+HID+A2DP+PAN(HS)\n"); + btc_alg_dbg(ALGO_TRACE, + "ErrorSCO+HID+A2DP+PAN(HS)\n"); } else { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "SCO+HID+A2DP+PAN(EDR)\n"); + btc_alg_dbg(ALGO_TRACE, + "SCO+HID+A2DP+PAN(EDR)\n"); algorithm = BT_8192E_2ANT_COEX_ALGO_PANEDR_HID; } @@ -778,10 +767,10 @@ static void halbtc8192e2ant_setfw_dac_swinglevel(struct btc_coexist *btcoexist, */ h2c_parameter[0] = dac_swinglvl; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, - "[BTCoex], Set Dac Swing Level = 0x%x\n", dac_swinglvl); - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, - "[BTCoex], FW write 0x64 = 0x%x\n", h2c_parameter[0]); + btc_alg_dbg(ALGO_TRACE_FW_EXEC, + "[BTCoex], Set Dac Swing Level = 0x%x\n", dac_swinglvl); + btc_alg_dbg(ALGO_TRACE_FW_EXEC, + "[BTCoex], FW write 0x64 = 0x%x\n", h2c_parameter[0]); btcoexist->btc_fill_h2c(btcoexist, 0x64, 1, h2c_parameter); } @@ -793,9 +782,9 @@ static void halbtc8192e2ant_set_fwdec_btpwr(struct btc_coexist *btcoexist, h2c_parameter[0] = dec_btpwr_lvl; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, - "[BTCoex] decrease Bt Power level = %d, FW write 0x62 = 0x%x\n", - dec_btpwr_lvl, h2c_parameter[0]); + btc_alg_dbg(ALGO_TRACE_FW_EXEC, + "[BTCoex] decrease Bt Power level = %d, FW write 0x62 = 0x%x\n", + dec_btpwr_lvl, h2c_parameter[0]); btcoexist->btc_fill_h2c(btcoexist, 0x62, 1, h2c_parameter); } @@ -803,15 +792,15 @@ static void halbtc8192e2ant_set_fwdec_btpwr(struct btc_coexist *btcoexist, static void halbtc8192e2ant_dec_btpwr(struct btc_coexist *btcoexist, bool force_exec, u8 dec_btpwr_lvl) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, - "[BTCoex], %s Dec BT power level = %d\n", - (force_exec ? "force to" : ""), dec_btpwr_lvl); + btc_alg_dbg(ALGO_TRACE_FW, + "[BTCoex], %s Dec BT power level = %d\n", + (force_exec ? "force to" : ""), dec_btpwr_lvl); coex_dm->cur_dec_bt_pwr = dec_btpwr_lvl; if (!force_exec) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, - "[BTCoex], preBtDecPwrLvl=%d, curBtDecPwrLvl=%d\n", - coex_dm->pre_dec_bt_pwr, coex_dm->cur_dec_bt_pwr); + btc_alg_dbg(ALGO_TRACE_FW_DETAIL, + "[BTCoex], preBtDecPwrLvl=%d, curBtDecPwrLvl=%d\n", + coex_dm->pre_dec_bt_pwr, coex_dm->cur_dec_bt_pwr); } halbtc8192e2ant_set_fwdec_btpwr(btcoexist, coex_dm->cur_dec_bt_pwr); @@ -828,10 +817,10 @@ static void halbtc8192e2ant_set_bt_autoreport(struct btc_coexist *btcoexist, if (enable_autoreport) h2c_parameter[0] |= BIT0; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, - "[BTCoex], BT FW auto report : %s, FW write 0x68 = 0x%x\n", - (enable_autoreport ? "Enabled!!" : "Disabled!!"), - h2c_parameter[0]); + btc_alg_dbg(ALGO_TRACE_FW_EXEC, + "[BTCoex], BT FW auto report : %s, FW write 0x68 = 0x%x\n", + (enable_autoreport ? "Enabled!!" : "Disabled!!"), + h2c_parameter[0]); btcoexist->btc_fill_h2c(btcoexist, 0x68, 1, h2c_parameter); } @@ -840,17 +829,17 @@ static void halbtc8192e2ant_bt_autoreport(struct btc_coexist *btcoexist, bool force_exec, bool enable_autoreport) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, - "[BTCoex], %s BT Auto report = %s\n", - (force_exec ? "force to" : ""), - ((enable_autoreport) ? "Enabled" : "Disabled")); + btc_alg_dbg(ALGO_TRACE_FW, + "[BTCoex], %s BT Auto report = %s\n", + (force_exec ? "force to" : ""), + ((enable_autoreport) ? "Enabled" : "Disabled")); coex_dm->cur_bt_auto_report = enable_autoreport; if (!force_exec) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, - "[BTCoex] bPreBtAutoReport=%d, bCurBtAutoReport=%d\n", - coex_dm->pre_bt_auto_report, - coex_dm->cur_bt_auto_report); + btc_alg_dbg(ALGO_TRACE_FW_DETAIL, + "[BTCoex] bPreBtAutoReport=%d, bCurBtAutoReport=%d\n", + coex_dm->pre_bt_auto_report, + coex_dm->cur_bt_auto_report); if (coex_dm->pre_bt_auto_report == coex_dm->cur_bt_auto_report) return; @@ -864,16 +853,16 @@ static void halbtc8192e2ant_bt_autoreport(struct btc_coexist *btcoexist, static void halbtc8192e2ant_fw_dac_swinglvl(struct btc_coexist *btcoexist, bool force_exec, u8 fw_dac_swinglvl) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, - "[BTCoex], %s set FW Dac Swing level = %d\n", - (force_exec ? "force to" : ""), fw_dac_swinglvl); + btc_alg_dbg(ALGO_TRACE_FW, + "[BTCoex], %s set FW Dac Swing level = %d\n", + (force_exec ? "force to" : ""), fw_dac_swinglvl); coex_dm->cur_fw_dac_swing_lvl = fw_dac_swinglvl; if (!force_exec) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, - "[BTCoex] preFwDacSwingLvl=%d, curFwDacSwingLvl=%d\n", - coex_dm->pre_fw_dac_swing_lvl, - coex_dm->cur_fw_dac_swing_lvl); + btc_alg_dbg(ALGO_TRACE_FW_DETAIL, + "[BTCoex] preFwDacSwingLvl=%d, curFwDacSwingLvl=%d\n", + coex_dm->pre_fw_dac_swing_lvl, + coex_dm->cur_fw_dac_swing_lvl); if (coex_dm->pre_fw_dac_swing_lvl == coex_dm->cur_fw_dac_swing_lvl) @@ -891,8 +880,8 @@ static void btc8192e2ant_set_sw_rf_rx_lpf_corner(struct btc_coexist *btcoexist, { if (rx_rf_shrink_on) { /* Shrink RF Rx LPF corner */ - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, - "[BTCoex], Shrink RF Rx LPF corner!!\n"); + btc_alg_dbg(ALGO_TRACE_SW_EXEC, + "[BTCoex], Shrink RF Rx LPF corner!!\n"); btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e, 0xfffff, 0xffffc); } else { @@ -900,8 +889,8 @@ static void btc8192e2ant_set_sw_rf_rx_lpf_corner(struct btc_coexist *btcoexist, * After initialized, we can use coex_dm->btRf0x1eBackup */ if (btcoexist->initilized) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, - "[BTCoex], Resume RF Rx LPF corner!!\n"); + btc_alg_dbg(ALGO_TRACE_SW_EXEC, + "[BTCoex], Resume RF Rx LPF corner!!\n"); btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e, 0xfffff, coex_dm->bt_rf0x1e_backup); @@ -912,17 +901,17 @@ static void btc8192e2ant_set_sw_rf_rx_lpf_corner(struct btc_coexist *btcoexist, static void halbtc8192e2ant_rf_shrink(struct btc_coexist *btcoexist, bool force_exec, bool rx_rf_shrink_on) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, - "[BTCoex], %s turn Rx RF Shrink = %s\n", - (force_exec ? "force to" : ""), - ((rx_rf_shrink_on) ? "ON" : "OFF")); + btc_alg_dbg(ALGO_TRACE_SW, + "[BTCoex], %s turn Rx RF Shrink = %s\n", + (force_exec ? "force to" : ""), + ((rx_rf_shrink_on) ? "ON" : "OFF")); coex_dm->cur_rf_rx_lpf_shrink = rx_rf_shrink_on; if (!force_exec) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, - "[BTCoex]bPreRfRxLpfShrink=%d,bCurRfRxLpfShrink=%d\n", - coex_dm->pre_rf_rx_lpf_shrink, - coex_dm->cur_rf_rx_lpf_shrink); + btc_alg_dbg(ALGO_TRACE_SW_DETAIL, + "[BTCoex]bPreRfRxLpfShrink=%d,bCurRfRxLpfShrink=%d\n", + coex_dm->pre_rf_rx_lpf_shrink, + coex_dm->cur_rf_rx_lpf_shrink); if (coex_dm->pre_rf_rx_lpf_shrink == coex_dm->cur_rf_rx_lpf_shrink) @@ -939,8 +928,8 @@ static void halbtc8192e2ant_set_dac_swingreg(struct btc_coexist *btcoexist, { u8 val = (u8)level; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, - "[BTCoex], Write SwDacSwing = 0x%x\n", level); + btc_alg_dbg(ALGO_TRACE_SW_EXEC, + "[BTCoex], Write SwDacSwing = 0x%x\n", level); btcoexist->btc_write_1byte_bitmask(btcoexist, 0x883, 0x3e, val); } @@ -958,22 +947,22 @@ static void halbtc8192e2ant_DacSwing(struct btc_coexist *btcoexist, bool force_exec, bool dac_swingon, u32 dac_swinglvl) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, - "[BTCoex], %s turn DacSwing=%s, dac_swinglvl = 0x%x\n", - (force_exec ? "force to" : ""), - ((dac_swingon) ? "ON" : "OFF"), dac_swinglvl); + btc_alg_dbg(ALGO_TRACE_SW, + "[BTCoex], %s turn DacSwing=%s, dac_swinglvl = 0x%x\n", + (force_exec ? "force to" : ""), + ((dac_swingon) ? "ON" : "OFF"), dac_swinglvl); coex_dm->cur_dac_swing_on = dac_swingon; coex_dm->cur_dac_swing_lvl = dac_swinglvl; if (!force_exec) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, - "[BTCoex], bPreDacSwingOn=%d, preDacSwingLvl = 0x%x, ", - coex_dm->pre_dac_swing_on, - coex_dm->pre_dac_swing_lvl); - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, - "bCurDacSwingOn=%d, curDacSwingLvl = 0x%x\n", - coex_dm->cur_dac_swing_on, - coex_dm->cur_dac_swing_lvl); + btc_alg_dbg(ALGO_TRACE_SW_DETAIL, + "[BTCoex], bPreDacSwingOn=%d, preDacSwingLvl = 0x%x, ", + coex_dm->pre_dac_swing_on, + coex_dm->pre_dac_swing_lvl); + btc_alg_dbg(ALGO_TRACE_SW_DETAIL, + "bCurDacSwingOn=%d, curDacSwingLvl = 0x%x\n", + coex_dm->cur_dac_swing_on, + coex_dm->cur_dac_swing_lvl); if ((coex_dm->pre_dac_swing_on == coex_dm->cur_dac_swing_on) && (coex_dm->pre_dac_swing_lvl == coex_dm->cur_dac_swing_lvl)) @@ -991,8 +980,8 @@ static void halbtc8192e2ant_set_agc_table(struct btc_coexist *btcoexist, { /* BB AGC Gain Table */ if (agc_table_en) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, - "[BTCoex], BB Agc Table On!\n"); + btc_alg_dbg(ALGO_TRACE_SW_EXEC, + "[BTCoex], BB Agc Table On!\n"); btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x0a1A0001); btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x091B0001); btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x081C0001); @@ -1000,8 +989,8 @@ static void halbtc8192e2ant_set_agc_table(struct btc_coexist *btcoexist, btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x061E0001); btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x051F0001); } else { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, - "[BTCoex], BB Agc Table Off!\n"); + btc_alg_dbg(ALGO_TRACE_SW_EXEC, + "[BTCoex], BB Agc Table Off!\n"); btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xaa1A0001); btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa91B0001); btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa81C0001); @@ -1014,16 +1003,17 @@ static void halbtc8192e2ant_set_agc_table(struct btc_coexist *btcoexist, static void halbtc8192e2ant_AgcTable(struct btc_coexist *btcoexist, bool force_exec, bool agc_table_en) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, - "[BTCoex], %s %s Agc Table\n", - (force_exec ? "force to" : ""), - ((agc_table_en) ? "Enable" : "Disable")); + btc_alg_dbg(ALGO_TRACE_SW, + "[BTCoex], %s %s Agc Table\n", + (force_exec ? "force to" : ""), + ((agc_table_en) ? "Enable" : "Disable")); coex_dm->cur_agc_table_en = agc_table_en; if (!force_exec) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, - "[BTCoex], bPreAgcTableEn=%d, bCurAgcTableEn=%d\n", - coex_dm->pre_agc_table_en, coex_dm->cur_agc_table_en); + btc_alg_dbg(ALGO_TRACE_SW_DETAIL, + "[BTCoex], bPreAgcTableEn=%d, bCurAgcTableEn=%d\n", + coex_dm->pre_agc_table_en, + coex_dm->cur_agc_table_en); if (coex_dm->pre_agc_table_en == coex_dm->cur_agc_table_en) return; @@ -1037,20 +1027,20 @@ static void halbtc8192e2ant_set_coex_table(struct btc_coexist *btcoexist, u32 val0x6c0, u32 val0x6c4, u32 val0x6c8, u8 val0x6cc) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, - "[BTCoex], set coex table, set 0x6c0 = 0x%x\n", val0x6c0); + btc_alg_dbg(ALGO_TRACE_SW_EXEC, + "[BTCoex], set coex table, set 0x6c0 = 0x%x\n", val0x6c0); btcoexist->btc_write_4byte(btcoexist, 0x6c0, val0x6c0); - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, - "[BTCoex], set coex table, set 0x6c4 = 0x%x\n", val0x6c4); + btc_alg_dbg(ALGO_TRACE_SW_EXEC, + "[BTCoex], set coex table, set 0x6c4 = 0x%x\n", val0x6c4); btcoexist->btc_write_4byte(btcoexist, 0x6c4, val0x6c4); - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, - "[BTCoex], set coex table, set 0x6c8 = 0x%x\n", val0x6c8); + btc_alg_dbg(ALGO_TRACE_SW_EXEC, + "[BTCoex], set coex table, set 0x6c8 = 0x%x\n", val0x6c8); btcoexist->btc_write_4byte(btcoexist, 0x6c8, val0x6c8); - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, - "[BTCoex], set coex table, set 0x6cc = 0x%x\n", val0x6cc); + btc_alg_dbg(ALGO_TRACE_SW_EXEC, + "[BTCoex], set coex table, set 0x6cc = 0x%x\n", val0x6cc); btcoexist->btc_write_1byte(btcoexist, 0x6cc, val0x6cc); } @@ -1059,30 +1049,30 @@ static void halbtc8192e2ant_coex_table(struct btc_coexist *btcoexist, u32 val0x6c0, u32 val0x6c4, u32 val0x6c8, u8 val0x6cc) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, - "[BTCoex], %s write Coex Table 0x6c0 = 0x%x, ", - (force_exec ? "force to" : ""), val0x6c0); - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, - "0x6c4 = 0x%x, 0x6c8 = 0x%x, 0x6cc = 0x%x\n", - val0x6c4, val0x6c8, val0x6cc); + btc_alg_dbg(ALGO_TRACE_SW, + "[BTCoex], %s write Coex Table 0x6c0 = 0x%x, ", + (force_exec ? "force to" : ""), val0x6c0); + btc_alg_dbg(ALGO_TRACE_SW, + "0x6c4 = 0x%x, 0x6c8 = 0x%x, 0x6cc = 0x%x\n", + val0x6c4, val0x6c8, val0x6cc); coex_dm->cur_val0x6c0 = val0x6c0; coex_dm->cur_val0x6c4 = val0x6c4; coex_dm->cur_val0x6c8 = val0x6c8; coex_dm->cur_val0x6cc = val0x6cc; if (!force_exec) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, - "[BTCoex], preVal0x6c0 = 0x%x, preVal0x6c4 = 0x%x, ", - coex_dm->pre_val0x6c0, coex_dm->pre_val0x6c4); - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, - "preVal0x6c8 = 0x%x, preVal0x6cc = 0x%x !!\n", - coex_dm->pre_val0x6c8, coex_dm->pre_val0x6cc); - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, - "[BTCoex], curVal0x6c0 = 0x%x, curVal0x6c4 = 0x%x,\n", - coex_dm->cur_val0x6c0, coex_dm->cur_val0x6c4); - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, - "curVal0x6c8 = 0x%x, curVal0x6cc = 0x%x !!\n", - coex_dm->cur_val0x6c8, coex_dm->cur_val0x6cc); + btc_alg_dbg(ALGO_TRACE_SW_DETAIL, + "[BTCoex], preVal0x6c0 = 0x%x, preVal0x6c4 = 0x%x, ", + coex_dm->pre_val0x6c0, coex_dm->pre_val0x6c4); + btc_alg_dbg(ALGO_TRACE_SW_DETAIL, + "preVal0x6c8 = 0x%x, preVal0x6cc = 0x%x !!\n", + coex_dm->pre_val0x6c8, coex_dm->pre_val0x6cc); + btc_alg_dbg(ALGO_TRACE_SW_DETAIL, + "[BTCoex], curVal0x6c0 = 0x%x, curVal0x6c4 = 0x%x\n", + coex_dm->cur_val0x6c0, coex_dm->cur_val0x6c4); + btc_alg_dbg(ALGO_TRACE_SW_DETAIL, + "curVal0x6c8 = 0x%x, curVal0x6cc = 0x%x !!\n", + coex_dm->cur_val0x6c8, coex_dm->cur_val0x6cc); if ((coex_dm->pre_val0x6c0 == coex_dm->cur_val0x6c0) && (coex_dm->pre_val0x6c4 == coex_dm->cur_val0x6c4) && @@ -1136,9 +1126,9 @@ static void halbtc8192e2ant_set_fw_ignore_wlanact(struct btc_coexist *btcoexist, if (enable) h2c_parameter[0] |= BIT0; /* function enable */ - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, - "[BTCoex]set FW for BT Ignore Wlan_Act, FW write 0x63 = 0x%x\n", - h2c_parameter[0]); + btc_alg_dbg(ALGO_TRACE_FW_EXEC, + "[BTCoex]set FW for BT Ignore Wlan_Act, FW write 0x63 = 0x%x\n", + h2c_parameter[0]); btcoexist->btc_fill_h2c(btcoexist, 0x63, 1, h2c_parameter); } @@ -1146,18 +1136,18 @@ static void halbtc8192e2ant_set_fw_ignore_wlanact(struct btc_coexist *btcoexist, static void halbtc8192e2ant_IgnoreWlanAct(struct btc_coexist *btcoexist, bool force_exec, bool enable) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, - "[BTCoex], %s turn Ignore WlanAct %s\n", - (force_exec ? "force to" : ""), (enable ? "ON" : "OFF")); + btc_alg_dbg(ALGO_TRACE_FW, + "[BTCoex], %s turn Ignore WlanAct %s\n", + (force_exec ? "force to" : ""), (enable ? "ON" : "OFF")); coex_dm->cur_ignore_wlan_act = enable; if (!force_exec) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, - "[BTCoex], bPreIgnoreWlanAct = %d ", - coex_dm->pre_ignore_wlan_act); - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, - "bCurIgnoreWlanAct = %d!!\n", - coex_dm->cur_ignore_wlan_act); + btc_alg_dbg(ALGO_TRACE_FW_DETAIL, + "[BTCoex], bPreIgnoreWlanAct = %d ", + coex_dm->pre_ignore_wlan_act); + btc_alg_dbg(ALGO_TRACE_FW_DETAIL, + "bCurIgnoreWlanAct = %d!!\n", + coex_dm->cur_ignore_wlan_act); if (coex_dm->pre_ignore_wlan_act == coex_dm->cur_ignore_wlan_act) @@ -1185,11 +1175,11 @@ static void halbtc8192e2ant_SetFwPstdma(struct btc_coexist *btcoexist, u8 byte1, coex_dm->ps_tdma_para[3] = byte4; coex_dm->ps_tdma_para[4] = byte5; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, - "[BTCoex], FW write 0x60(5bytes) = 0x%x%08x\n", - h2c_parameter[0], - h2c_parameter[1] << 24 | h2c_parameter[2] << 16 | - h2c_parameter[3] << 8 | h2c_parameter[4]); + btc_alg_dbg(ALGO_TRACE_FW_EXEC, + "[BTCoex], FW write 0x60(5bytes) = 0x%x%08x\n", + h2c_parameter[0], + h2c_parameter[1] << 24 | h2c_parameter[2] << 16 | + h2c_parameter[3] << 8 | h2c_parameter[4]); btcoexist->btc_fill_h2c(btcoexist, 0x60, 5, h2c_parameter); } @@ -1213,20 +1203,20 @@ static void btc8192e2ant_sw_mec2(struct btc_coexist *btcoexist, static void halbtc8192e2ant_ps_tdma(struct btc_coexist *btcoexist, bool force_exec, bool turn_on, u8 type) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, - "[BTCoex], %s turn %s PS TDMA, type=%d\n", - (force_exec ? "force to" : ""), - (turn_on ? "ON" : "OFF"), type); + btc_alg_dbg(ALGO_TRACE_FW, + "[BTCoex], %s turn %s PS TDMA, type=%d\n", + (force_exec ? "force to" : ""), + (turn_on ? "ON" : "OFF"), type); coex_dm->cur_ps_tdma_on = turn_on; coex_dm->cur_ps_tdma = type; if (!force_exec) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, - "[BTCoex], bPrePsTdmaOn = %d, bCurPsTdmaOn = %d!!\n", - coex_dm->pre_ps_tdma_on, coex_dm->cur_ps_tdma_on); - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, - "[BTCoex], prePsTdma = %d, curPsTdma = %d!!\n", - coex_dm->pre_ps_tdma, coex_dm->cur_ps_tdma); + btc_alg_dbg(ALGO_TRACE_FW_DETAIL, + "[BTCoex], bPrePsTdmaOn = %d, bCurPsTdmaOn = %d!!\n", + coex_dm->pre_ps_tdma_on, coex_dm->cur_ps_tdma_on); + btc_alg_dbg(ALGO_TRACE_FW_DETAIL, + "[BTCoex], prePsTdma = %d, curPsTdma = %d!!\n", + coex_dm->pre_ps_tdma, coex_dm->cur_ps_tdma); if ((coex_dm->pre_ps_tdma_on == coex_dm->cur_ps_tdma_on) && (coex_dm->pre_ps_tdma == coex_dm->cur_ps_tdma)) @@ -1353,8 +1343,8 @@ static void halbtc8192e2ant_set_switch_sstype(struct btc_coexist *btcoexist, u8 mimops = BTC_MIMO_PS_DYNAMIC; u32 disra_mask = 0x0; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], REAL set SS Type = %d\n", sstype); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], REAL set SS Type = %d\n", sstype); disra_mask = halbtc8192e2ant_decidera_mask(btcoexist, sstype, coex_dm->curra_masktype); @@ -1386,9 +1376,9 @@ static void halbtc8192e2ant_set_switch_sstype(struct btc_coexist *btcoexist, static void halbtc8192e2ant_switch_sstype(struct btc_coexist *btcoexist, bool force_exec, u8 new_sstype) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], %s Switch SS Type = %d\n", - (force_exec ? "force to" : ""), new_sstype); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], %s Switch SS Type = %d\n", + (force_exec ? "force to" : ""), new_sstype); coex_dm->cur_sstype = new_sstype; if (!force_exec) { @@ -1469,8 +1459,8 @@ static bool halbtc8192e2ant_is_common_action(struct btc_coexist *btcoexist) btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER, &low_pwr_disable); - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], Wifi non-connected idle!!\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], Wifi non-connected idle!!\n"); if ((BT_8192E_2ANT_BT_STATUS_NON_CONNECTED_IDLE == coex_dm->bt_status) || @@ -1506,8 +1496,8 @@ static bool halbtc8192e2ant_is_common_action(struct btc_coexist *btcoexist) BTC_SET_ACT_DISABLE_LOW_POWER, &low_pwr_disable); - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "Wifi connected + BT non connected-idle!!\n"); + btc_alg_dbg(ALGO_TRACE, + "Wifi connected + BT non connected-idle!!\n"); halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 2); @@ -1534,8 +1524,8 @@ static bool halbtc8192e2ant_is_common_action(struct btc_coexist *btcoexist) if (bt_hson) return false; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "Wifi connected + BT connected-idle!!\n"); + btc_alg_dbg(ALGO_TRACE, + "Wifi connected + BT connected-idle!!\n"); halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 2); @@ -1560,12 +1550,12 @@ static bool halbtc8192e2ant_is_common_action(struct btc_coexist *btcoexist) &low_pwr_disable); if (wifi_busy) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "Wifi Connected-Busy + BT Busy!!\n"); + btc_alg_dbg(ALGO_TRACE, + "Wifi Connected-Busy + BT Busy!!\n"); common = false; } else { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "Wifi Connected-Idle + BT Busy!!\n"); + btc_alg_dbg(ALGO_TRACE, + "Wifi Connected-Idle + BT Busy!!\n"); halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1); @@ -1592,9 +1582,8 @@ static void btc8192e_int1(struct btc_coexist *btcoexist, bool tx_pause, int result) { if (tx_pause) { - BTC_PRINT(BTC_MSG_ALGORITHM, - ALGO_TRACE_FW_DETAIL, - "[BTCoex], TxPause = 1\n"); + btc_alg_dbg(ALGO_TRACE_FW_DETAIL, + "[BTCoex], TxPause = 1\n"); if (coex_dm->cur_ps_tdma == 71) { halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, @@ -1689,9 +1678,8 @@ static void btc8192e_int1(struct btc_coexist *btcoexist, bool tx_pause, } } } else { - BTC_PRINT(BTC_MSG_ALGORITHM, - ALGO_TRACE_FW_DETAIL, - "[BTCoex], TxPause = 0\n"); + btc_alg_dbg(ALGO_TRACE_FW_DETAIL, + "[BTCoex], TxPause = 0\n"); if (coex_dm->cur_ps_tdma == 5) { halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 71); @@ -1795,9 +1783,8 @@ static void btc8192e_int2(struct btc_coexist *btcoexist, bool tx_pause, int result) { if (tx_pause) { - BTC_PRINT(BTC_MSG_ALGORITHM, - ALGO_TRACE_FW_DETAIL, - "[BTCoex], TxPause = 1\n"); + btc_alg_dbg(ALGO_TRACE_FW_DETAIL, + "[BTCoex], TxPause = 1\n"); if (coex_dm->cur_ps_tdma == 1) { halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 6); @@ -1886,9 +1873,8 @@ static void btc8192e_int2(struct btc_coexist *btcoexist, bool tx_pause, } } } else { - BTC_PRINT(BTC_MSG_ALGORITHM, - ALGO_TRACE_FW_DETAIL, - "[BTCoex], TxPause = 0\n"); + btc_alg_dbg(ALGO_TRACE_FW_DETAIL, + "[BTCoex], TxPause = 0\n"); if (coex_dm->cur_ps_tdma == 5) { halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 2); @@ -1983,9 +1969,8 @@ static void btc8192e_int3(struct btc_coexist *btcoexist, bool tx_pause, int result) { if (tx_pause) { - BTC_PRINT(BTC_MSG_ALGORITHM, - ALGO_TRACE_FW_DETAIL, - "[BTCoex], TxPause = 1\n"); + btc_alg_dbg(ALGO_TRACE_FW_DETAIL, + "[BTCoex], TxPause = 1\n"); if (coex_dm->cur_ps_tdma == 1) { halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 7); @@ -2074,9 +2059,8 @@ static void btc8192e_int3(struct btc_coexist *btcoexist, bool tx_pause, } } } else { - BTC_PRINT(BTC_MSG_ALGORITHM, - ALGO_TRACE_FW_DETAIL, - "[BTCoex], TxPause = 0\n"); + btc_alg_dbg(ALGO_TRACE_FW_DETAIL, + "[BTCoex], TxPause = 0\n"); if (coex_dm->cur_ps_tdma == 5) { halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3); @@ -2178,13 +2162,13 @@ static void halbtc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist, int result; u8 retry_cnt = 0; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, - "[BTCoex], TdmaDurationAdjust()\n"); + btc_alg_dbg(ALGO_TRACE_FW, + "[BTCoex], TdmaDurationAdjust()\n"); if (!coex_dm->auto_tdma_adjust) { coex_dm->auto_tdma_adjust = true; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, - "[BTCoex], first run TdmaDurationAdjust()!!\n"); + btc_alg_dbg(ALGO_TRACE_FW_DETAIL, + "[BTCoex], first run TdmaDurationAdjust()!!\n"); if (sco_hid) { if (tx_pause) { if (max_interval == 1) { @@ -2288,11 +2272,11 @@ static void halbtc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist, } else { /* accquire the BT TRx retry count from BT_Info byte2 */ retry_cnt = coex_sta->bt_retry_cnt; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, - "[BTCoex], retry_cnt = %d\n", retry_cnt); - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, - "[BTCoex], up=%d, dn=%d, m=%d, n=%d, wait_cnt=%d\n", - up, dn, m, n, wait_cnt); + btc_alg_dbg(ALGO_TRACE_FW_DETAIL, + "[BTCoex], retry_cnt = %d\n", retry_cnt); + btc_alg_dbg(ALGO_TRACE_FW_DETAIL, + "[BTCoex], up=%d, dn=%d, m=%d, n=%d, wait_cnt=%d\n", + up, dn, m, n, wait_cnt); result = 0; wait_cnt++; /* no retry in the last 2-second duration */ @@ -2309,9 +2293,8 @@ static void halbtc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist, up = 0; dn = 0; result = 1; - BTC_PRINT(BTC_MSG_ALGORITHM, - ALGO_TRACE_FW_DETAIL, - "[BTCoex]Increase wifi duration!!\n"); + btc_alg_dbg(ALGO_TRACE_FW_DETAIL, + "[BTCoex]Increase wifi duration!!\n"); } } else if (retry_cnt <= 3) { up--; @@ -2334,9 +2317,8 @@ static void halbtc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist, dn = 0; wait_cnt = 0; result = -1; - BTC_PRINT(BTC_MSG_ALGORITHM, - ALGO_TRACE_FW_DETAIL, - "Reduce wifi duration for retry<3\n"); + btc_alg_dbg(ALGO_TRACE_FW_DETAIL, + "Reduce wifi duration for retry<3\n"); } } else { if (wait_cnt == 1) @@ -2352,12 +2334,12 @@ static void halbtc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist, dn = 0; wait_cnt = 0; result = -1; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, - "Decrease wifi duration for retryCounter>3!!\n"); + btc_alg_dbg(ALGO_TRACE_FW_DETAIL, + "Decrease wifi duration for retryCounter>3!!\n"); } - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, - "[BTCoex], max Interval = %d\n", max_interval); + btc_alg_dbg(ALGO_TRACE_FW_DETAIL, + "[BTCoex], max Interval = %d\n", max_interval); if (max_interval == 1) btc8192e_int1(btcoexist, tx_pause, result); else if (max_interval == 2) @@ -2373,11 +2355,11 @@ static void halbtc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist, if (coex_dm->cur_ps_tdma != coex_dm->tdma_adj_type) { bool scan = false, link = false, roam = false; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, - "[BTCoex], PsTdma type dismatch!!!, "); - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, - "curPsTdma=%d, recordPsTdma=%d\n", - coex_dm->cur_ps_tdma, coex_dm->tdma_adj_type); + btc_alg_dbg(ALGO_TRACE_FW_DETAIL, + "[BTCoex], PsTdma type dismatch!!!, "); + btc_alg_dbg(ALGO_TRACE_FW_DETAIL, + "curPsTdma=%d, recordPsTdma=%d\n", + coex_dm->cur_ps_tdma, coex_dm->tdma_adj_type); btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan); btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link); @@ -2388,9 +2370,8 @@ static void halbtc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist, true, coex_dm->tdma_adj_type); else - BTC_PRINT(BTC_MSG_ALGORITHM, - ALGO_TRACE_FW_DETAIL, - "[BTCoex], roaming/link/scan is under progress, will adjust next time!!!\n"); + btc_alg_dbg(ALGO_TRACE_FW_DETAIL, + "[BTCoex], roaming/link/scan is under progress, will adjust next time!!!\n"); } } @@ -2594,8 +2575,8 @@ static void halbtc8192e2ant_action_a2dp(struct btc_coexist *btcoexist) btrssi_state == BTC_RSSI_STATE_STAY_LOW) && (wifirssi_state == BTC_RSSI_STATE_LOW || wifirssi_state == BTC_RSSI_STATE_STAY_LOW)) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], A2dp, wifi/bt rssi both LOW!!\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], A2dp, wifi/bt rssi both LOW!!\n"); long_dist = true; } if (long_dist) { @@ -3100,105 +3081,105 @@ static void halbtc8192e2ant_run_coexist_mechanism(struct btc_coexist *btcoexist) { u8 algorithm = 0; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], RunCoexistMechanism()===>\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], RunCoexistMechanism()===>\n"); if (btcoexist->manual_control) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], return for Manual CTRL <===\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], return for Manual CTRL <===\n"); return; } if (coex_sta->under_ips) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], wifi is under IPS !!!\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], wifi is under IPS !!!\n"); return; } algorithm = halbtc8192e2ant_action_algorithm(btcoexist); if (coex_sta->c2h_bt_inquiry_page && (BT_8192E_2ANT_COEX_ALGO_PANHS != algorithm)) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], BT is under inquiry/page scan !!\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], BT is under inquiry/page scan !!\n"); halbtc8192e2ant_action_bt_inquiry(btcoexist); return; } coex_dm->cur_algorithm = algorithm; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], Algorithm = %d\n", coex_dm->cur_algorithm); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], Algorithm = %d\n", coex_dm->cur_algorithm); if (halbtc8192e2ant_is_common_action(btcoexist)) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], Action 2-Ant common.\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], Action 2-Ant common\n"); coex_dm->auto_tdma_adjust = false; } else { if (coex_dm->cur_algorithm != coex_dm->pre_algorithm) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex] preAlgorithm=%d, curAlgorithm=%d\n", - coex_dm->pre_algorithm, - coex_dm->cur_algorithm); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex] preAlgorithm=%d, curAlgorithm=%d\n", + coex_dm->pre_algorithm, + coex_dm->cur_algorithm); coex_dm->auto_tdma_adjust = false; } switch (coex_dm->cur_algorithm) { case BT_8192E_2ANT_COEX_ALGO_SCO: - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "Action 2-Ant, algorithm = SCO.\n"); + btc_alg_dbg(ALGO_TRACE, + "Action 2-Ant, algorithm = SCO\n"); halbtc8192e2ant_action_sco(btcoexist); break; case BT_8192E_2ANT_COEX_ALGO_SCO_PAN: - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "Action 2-Ant, algorithm = SCO+PAN(EDR).\n"); + btc_alg_dbg(ALGO_TRACE, + "Action 2-Ant, algorithm = SCO+PAN(EDR)\n"); halbtc8192e2ant_action_sco_pan(btcoexist); break; case BT_8192E_2ANT_COEX_ALGO_HID: - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "Action 2-Ant, algorithm = HID.\n"); + btc_alg_dbg(ALGO_TRACE, + "Action 2-Ant, algorithm = HID\n"); halbtc8192e2ant_action_hid(btcoexist); break; case BT_8192E_2ANT_COEX_ALGO_A2DP: - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "Action 2-Ant, algorithm = A2DP.\n"); + btc_alg_dbg(ALGO_TRACE, + "Action 2-Ant, algorithm = A2DP\n"); halbtc8192e2ant_action_a2dp(btcoexist); break; case BT_8192E_2ANT_COEX_ALGO_A2DP_PANHS: - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "Action 2-Ant, algorithm = A2DP+PAN(HS).\n"); + btc_alg_dbg(ALGO_TRACE, + "Action 2-Ant, algorithm = A2DP+PAN(HS)\n"); halbtc8192e2ant_action_a2dp_pan_hs(btcoexist); break; case BT_8192E_2ANT_COEX_ALGO_PANEDR: - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "Action 2-Ant, algorithm = PAN(EDR).\n"); + btc_alg_dbg(ALGO_TRACE, + "Action 2-Ant, algorithm = PAN(EDR)\n"); halbtc8192e2ant_action_pan_edr(btcoexist); break; case BT_8192E_2ANT_COEX_ALGO_PANHS: - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "Action 2-Ant, algorithm = HS mode.\n"); + btc_alg_dbg(ALGO_TRACE, + "Action 2-Ant, algorithm = HS mode\n"); halbtc8192e2ant_action_pan_hs(btcoexist); break; case BT_8192E_2ANT_COEX_ALGO_PANEDR_A2DP: - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "Action 2-Ant, algorithm = PAN+A2DP.\n"); + btc_alg_dbg(ALGO_TRACE, + "Action 2-Ant, algorithm = PAN+A2DP\n"); halbtc8192e2ant_action_pan_edr_a2dp(btcoexist); break; case BT_8192E_2ANT_COEX_ALGO_PANEDR_HID: - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "Action 2-Ant, algorithm = PAN(EDR)+HID.\n"); + btc_alg_dbg(ALGO_TRACE, + "Action 2-Ant, algorithm = PAN(EDR)+HID\n"); halbtc8192e2ant_action_pan_edr_hid(btcoexist); break; case BT_8192E_2ANT_COEX_ALGO_HID_A2DP_PANEDR: - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "Action 2-Ant, algorithm = HID+A2DP+PAN.\n"); + btc_alg_dbg(ALGO_TRACE, + "Action 2-Ant, algorithm = HID+A2DP+PAN\n"); btc8192e2ant_action_hid_a2dp_pan_edr(btcoexist); break; case BT_8192E_2ANT_COEX_ALGO_HID_A2DP: - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "Action 2-Ant, algorithm = HID+A2DP.\n"); + btc_alg_dbg(ALGO_TRACE, + "Action 2-Ant, algorithm = HID+A2DP\n"); halbtc8192e2ant_action_hid_a2dp(btcoexist); break; default: - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "Action 2-Ant, algorithm = unknown!!\n"); + btc_alg_dbg(ALGO_TRACE, + "Action 2-Ant, algorithm = unknown!!\n"); /* halbtc8192e2ant_coex_alloff(btcoexist); */ break; } @@ -3212,8 +3193,8 @@ static void halbtc8192e2ant_init_hwconfig(struct btc_coexist *btcoexist, u16 u16tmp = 0; u8 u8tmp = 0; - BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, - "[BTCoex], 2Ant Init HW Config!!\n"); + btc_iface_dbg(INTF_INIT, + "[BTCoex], 2Ant Init HW Config!!\n"); if (backup) { /* backup rf 0x1e value */ @@ -3296,8 +3277,8 @@ void ex_halbtc8192e2ant_init_hwconfig(struct btc_coexist *btcoexist) void ex_halbtc8192e2ant_init_coex_dm(struct btc_coexist *btcoexist) { - BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, - "[BTCoex], Coex Mechanism Init!!\n"); + btc_iface_dbg(INTF_INIT, + "[BTCoex], Coex Mechanism Init!!\n"); halbtc8192e2ant_init_coex_dm(btcoexist); } @@ -3525,13 +3506,13 @@ void ex_halbtc8192e2ant_display_coex_info(struct btc_coexist *btcoexist) void ex_halbtc8192e2ant_ips_notify(struct btc_coexist *btcoexist, u8 type) { if (BTC_IPS_ENTER == type) { - BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, - "[BTCoex], IPS ENTER notify\n"); + btc_iface_dbg(INTF_NOTIFY, + "[BTCoex], IPS ENTER notify\n"); coex_sta->under_ips = true; halbtc8192e2ant_coex_alloff(btcoexist); } else if (BTC_IPS_LEAVE == type) { - BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, - "[BTCoex], IPS LEAVE notify\n"); + btc_iface_dbg(INTF_NOTIFY, + "[BTCoex], IPS LEAVE notify\n"); coex_sta->under_ips = false; } } @@ -3539,12 +3520,12 @@ void ex_halbtc8192e2ant_ips_notify(struct btc_coexist *btcoexist, u8 type) void ex_halbtc8192e2ant_lps_notify(struct btc_coexist *btcoexist, u8 type) { if (BTC_LPS_ENABLE == type) { - BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, - "[BTCoex], LPS ENABLE notify\n"); + btc_iface_dbg(INTF_NOTIFY, + "[BTCoex], LPS ENABLE notify\n"); coex_sta->under_lps = true; } else if (BTC_LPS_DISABLE == type) { - BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, - "[BTCoex], LPS DISABLE notify\n"); + btc_iface_dbg(INTF_NOTIFY, + "[BTCoex], LPS DISABLE notify\n"); coex_sta->under_lps = false; } } @@ -3552,21 +3533,21 @@ void ex_halbtc8192e2ant_lps_notify(struct btc_coexist *btcoexist, u8 type) void ex_halbtc8192e2ant_scan_notify(struct btc_coexist *btcoexist, u8 type) { if (BTC_SCAN_START == type) - BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, - "[BTCoex], SCAN START notify\n"); + btc_iface_dbg(INTF_NOTIFY, + "[BTCoex], SCAN START notify\n"); else if (BTC_SCAN_FINISH == type) - BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, - "[BTCoex], SCAN FINISH notify\n"); + btc_iface_dbg(INTF_NOTIFY, + "[BTCoex], SCAN FINISH notify\n"); } void ex_halbtc8192e2ant_connect_notify(struct btc_coexist *btcoexist, u8 type) { if (BTC_ASSOCIATE_START == type) - BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, - "[BTCoex], CONNECT START notify\n"); + btc_iface_dbg(INTF_NOTIFY, + "[BTCoex], CONNECT START notify\n"); else if (BTC_ASSOCIATE_FINISH == type) - BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, - "[BTCoex], CONNECT FINISH notify\n"); + btc_iface_dbg(INTF_NOTIFY, + "[BTCoex], CONNECT FINISH notify\n"); } void ex_halbtc8192e2ant_media_status_notify(struct btc_coexist *btcoexist, @@ -3582,11 +3563,11 @@ void ex_halbtc8192e2ant_media_status_notify(struct btc_coexist *btcoexist, return; if (BTC_MEDIA_CONNECT == type) - BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, - "[BTCoex], MEDIA connect notify\n"); + btc_iface_dbg(INTF_NOTIFY, + "[BTCoex], MEDIA connect notify\n"); else - BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, - "[BTCoex], MEDIA disconnect notify\n"); + btc_iface_dbg(INTF_NOTIFY, + "[BTCoex], MEDIA disconnect notify\n"); /* only 2.4G we need to inform bt the chnl mask */ btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_CENTRAL_CHNL, @@ -3606,10 +3587,10 @@ void ex_halbtc8192e2ant_media_status_notify(struct btc_coexist *btcoexist, coex_dm->wifi_chnl_info[1] = h2c_parameter[1]; coex_dm->wifi_chnl_info[2] = h2c_parameter[2]; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, - "[BTCoex], FW write 0x66 = 0x%x\n", - h2c_parameter[0] << 16 | h2c_parameter[1] << 8 | - h2c_parameter[2]); + btc_alg_dbg(ALGO_TRACE_FW_EXEC, + "[BTCoex], FW write 0x66 = 0x%x\n", + h2c_parameter[0] << 16 | h2c_parameter[1] << 8 | + h2c_parameter[2]); btcoexist->btc_fill_h2c(btcoexist, 0x66, 3, h2c_parameter); } @@ -3618,8 +3599,8 @@ void ex_halbtc8192e2ant_special_packet_notify(struct btc_coexist *btcoexist, u8 type) { if (type == BTC_PACKET_DHCP) - BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, - "[BTCoex], DHCP Packet notify\n"); + btc_iface_dbg(INTF_NOTIFY, + "[BTCoex], DHCP Packet notify\n"); } void ex_halbtc8192e2ant_bt_info_notify(struct btc_coexist *btcoexist, @@ -3637,19 +3618,19 @@ void ex_halbtc8192e2ant_bt_info_notify(struct btc_coexist *btcoexist, rsp_source = BT_INFO_SRC_8192E_2ANT_WIFI_FW; coex_sta->bt_info_c2h_cnt[rsp_source]++; - BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, - "[BTCoex], Bt info[%d], length=%d, hex data = [", - rsp_source, length); + btc_iface_dbg(INTF_NOTIFY, + "[BTCoex], Bt info[%d], length=%d, hex data = [", + rsp_source, length); for (i = 0; i < length; i++) { coex_sta->bt_info_c2h[rsp_source][i] = tmp_buf[i]; if (i == 1) bt_info = tmp_buf[i]; if (i == length-1) - BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, - "0x%02x]\n", tmp_buf[i]); + btc_iface_dbg(INTF_NOTIFY, + "0x%02x]\n", tmp_buf[i]); else - BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, - "0x%02x, ", tmp_buf[i]); + btc_iface_dbg(INTF_NOTIFY, + "0x%02x, ", tmp_buf[i]); } if (BT_INFO_SRC_8192E_2ANT_WIFI_FW != rsp_source) { @@ -3666,8 +3647,8 @@ void ex_halbtc8192e2ant_bt_info_notify(struct btc_coexist *btcoexist, * because bt is reset and loss of the info. */ if ((coex_sta->bt_info_ext & BIT1)) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "bit1, send wifi BW&Chnl to BT!!\n"); + btc_alg_dbg(ALGO_TRACE, + "bit1, send wifi BW&Chnl to BT!!\n"); btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED, &wifi_connected); if (wifi_connected) @@ -3683,8 +3664,8 @@ void ex_halbtc8192e2ant_bt_info_notify(struct btc_coexist *btcoexist, if ((coex_sta->bt_info_ext & BIT3)) { if (!btcoexist->manual_control && !btcoexist->stop_coex_dm) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "bit3, BT NOT ignore Wlan active!\n"); + btc_alg_dbg(ALGO_TRACE, + "bit3, BT NOT ignore Wlan active!\n"); halbtc8192e2ant_IgnoreWlanAct(btcoexist, FORCE_EXEC, false); @@ -3742,25 +3723,25 @@ void ex_halbtc8192e2ant_bt_info_notify(struct btc_coexist *btcoexist, if (!(bt_info&BT_INFO_8192E_2ANT_B_CONNECTION)) { coex_dm->bt_status = BT_8192E_2ANT_BT_STATUS_NON_CONNECTED_IDLE; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], BT Non-Connected idle!!!\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], BT Non-Connected idle!!!\n"); } else if (bt_info == BT_INFO_8192E_2ANT_B_CONNECTION) { coex_dm->bt_status = BT_8192E_2ANT_BT_STATUS_CONNECTED_IDLE; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], bt_infoNotify(), BT Connected-idle!!!\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], bt_infoNotify(), BT Connected-idle!!!\n"); } else if ((bt_info&BT_INFO_8192E_2ANT_B_SCO_ESCO) || (bt_info&BT_INFO_8192E_2ANT_B_SCO_BUSY)) { coex_dm->bt_status = BT_8192E_2ANT_BT_STATUS_SCO_BUSY; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], bt_infoNotify(), BT SCO busy!!!\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], bt_infoNotify(), BT SCO busy!!!\n"); } else if (bt_info&BT_INFO_8192E_2ANT_B_ACL_BUSY) { coex_dm->bt_status = BT_8192E_2ANT_BT_STATUS_ACL_BUSY; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], bt_infoNotify(), BT ACL busy!!!\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], bt_infoNotify(), BT ACL busy!!!\n"); } else { coex_dm->bt_status = BT_8192E_2ANT_BT_STATUS_MAX; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex]bt_infoNotify(), BT Non-Defined state!!!\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex]bt_infoNotify(), BT Non-Defined state!!!\n"); } if ((BT_8192E_2ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) || @@ -3788,7 +3769,7 @@ void ex_halbtc8192e2ant_stack_operation_notify(struct btc_coexist *btcoexist, void ex_halbtc8192e2ant_halt_notify(struct btc_coexist *btcoexist) { - BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, "[BTCoex], Halt notify\n"); + btc_iface_dbg(INTF_NOTIFY, "[BTCoex], Halt notify\n"); halbtc8192e2ant_IgnoreWlanAct(btcoexist, FORCE_EXEC, true); ex_halbtc8192e2ant_media_status_notify(btcoexist, BTC_MEDIA_DISCONNECT); @@ -3801,29 +3782,29 @@ void ex_halbtc8192e2ant_periodical(struct btc_coexist *btcoexist) struct btc_board_info *board_info = &btcoexist->board_info; struct btc_stack_info *stack_info = &btcoexist->stack_info; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "=======================Periodical=======================\n"); + btc_alg_dbg(ALGO_TRACE, + "=======================Periodical=======================\n"); if (dis_ver_info_cnt <= 5) { dis_ver_info_cnt += 1; - BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, - "************************************************\n"); - BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, - "Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n", - board_info->pg_ant_num, board_info->btdm_ant_num, - board_info->btdm_ant_pos); - BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, - "BT stack/ hci ext ver = %s / %d\n", - ((stack_info->profile_notified) ? "Yes" : "No"), - stack_info->hci_version); + btc_iface_dbg(INTF_INIT, + "************************************************\n"); + btc_iface_dbg(INTF_INIT, + "Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n", + board_info->pg_ant_num, board_info->btdm_ant_num, + board_info->btdm_ant_pos); + btc_iface_dbg(INTF_INIT, + "BT stack/ hci ext ver = %s / %d\n", + ((stack_info->profile_notified) ? "Yes" : "No"), + stack_info->hci_version); btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER, &bt_patch_ver); btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver); - BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, - "CoexVer/ FwVer/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n", - glcoex_ver_date_8192e_2ant, glcoex_ver_8192e_2ant, - fw_ver, bt_patch_ver, bt_patch_ver); - BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, - "************************************************\n"); + btc_iface_dbg(INTF_INIT, + "CoexVer/ FwVer/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n", + glcoex_ver_date_8192e_2ant, glcoex_ver_8192e_2ant, + fw_ver, bt_patch_ver, bt_patch_ver); + btc_iface_dbg(INTF_INIT, + "************************************************\n"); } #if (BT_AUTO_REPORT_ONLY_8192E_2ANT == 0) diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c index 7e239d3cea26..16add42a62af 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c @@ -74,28 +74,28 @@ static u8 halbtc8723b1ant_bt_rssi_state(u8 level_num, u8 rssi_thresh, if (bt_rssi >= rssi_thresh + BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) { bt_rssi_state = BTC_RSSI_STATE_HIGH; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, - "[BTCoex], BT Rssi state switch to High\n"); + btc_alg_dbg(ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state switch to High\n"); } else { bt_rssi_state = BTC_RSSI_STATE_STAY_LOW; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, - "[BTCoex], BT Rssi state stay at Low\n"); + btc_alg_dbg(ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state stay at Low\n"); } } else { if (bt_rssi < rssi_thresh) { bt_rssi_state = BTC_RSSI_STATE_LOW; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, - "[BTCoex], BT Rssi state switch to Low\n"); + btc_alg_dbg(ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state switch to Low\n"); } else { bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, - "[BTCoex], BT Rssi state stay at High\n"); + btc_alg_dbg(ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state stay at High\n"); } } } else if (level_num == 3) { if (rssi_thresh > rssi_thresh1) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, - "[BTCoex], BT Rssi thresh error!!\n"); + btc_alg_dbg(ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi thresh error!!\n"); return coex_sta->pre_bt_rssi_state; } @@ -104,12 +104,12 @@ static u8 halbtc8723b1ant_bt_rssi_state(u8 level_num, u8 rssi_thresh, if (bt_rssi >= rssi_thresh + BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) { bt_rssi_state = BTC_RSSI_STATE_MEDIUM; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, - "[BTCoex], BT Rssi state switch to Medium\n"); + btc_alg_dbg(ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state switch to Medium\n"); } else { bt_rssi_state = BTC_RSSI_STATE_STAY_LOW; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, - "[BTCoex], BT Rssi state stay at Low\n"); + btc_alg_dbg(ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state stay at Low\n"); } } else if ((coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_MEDIUM) || @@ -118,26 +118,26 @@ static u8 halbtc8723b1ant_bt_rssi_state(u8 level_num, u8 rssi_thresh, if (bt_rssi >= rssi_thresh1 + BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) { bt_rssi_state = BTC_RSSI_STATE_HIGH; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, - "[BTCoex], BT Rssi state switch to High\n"); + btc_alg_dbg(ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state switch to High\n"); } else if (bt_rssi < rssi_thresh) { bt_rssi_state = BTC_RSSI_STATE_LOW; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, - "[BTCoex], BT Rssi state switch to Low\n"); + btc_alg_dbg(ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state switch to Low\n"); } else { bt_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, - "[BTCoex], BT Rssi state stay at Medium\n"); + btc_alg_dbg(ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state stay at Medium\n"); } } else { if (bt_rssi < rssi_thresh1) { bt_rssi_state = BTC_RSSI_STATE_MEDIUM; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, - "[BTCoex], BT Rssi state switch to Medium\n"); + btc_alg_dbg(ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state switch to Medium\n"); } else { bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, - "[BTCoex], BT Rssi state stay at High\n"); + btc_alg_dbg(ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state stay at High\n"); } } } @@ -165,32 +165,28 @@ static u8 halbtc8723b1ant_wifi_rssi_state(struct btc_coexist *btcoexist, if (wifi_rssi >= rssi_thresh + BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) { wifi_rssi_state = BTC_RSSI_STATE_HIGH; - BTC_PRINT(BTC_MSG_ALGORITHM, - ALGO_WIFI_RSSI_STATE, - "[BTCoex], wifi RSSI state switch to High\n"); + btc_alg_dbg(ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state switch to High\n"); } else { wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW; - BTC_PRINT(BTC_MSG_ALGORITHM, - ALGO_WIFI_RSSI_STATE, - "[BTCoex], wifi RSSI state stay at Low\n"); + btc_alg_dbg(ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state stay at Low\n"); } } else { if (wifi_rssi < rssi_thresh) { wifi_rssi_state = BTC_RSSI_STATE_LOW; - BTC_PRINT(BTC_MSG_ALGORITHM, - ALGO_WIFI_RSSI_STATE, - "[BTCoex], wifi RSSI state switch to Low\n"); + btc_alg_dbg(ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state switch to Low\n"); } else { wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH; - BTC_PRINT(BTC_MSG_ALGORITHM, - ALGO_WIFI_RSSI_STATE, - "[BTCoex], wifi RSSI state stay at High\n"); + btc_alg_dbg(ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state stay at High\n"); } } } else if (level_num == 3) { if (rssi_thresh > rssi_thresh1) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, - "[BTCoex], wifi RSSI thresh error!!\n"); + btc_alg_dbg(ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI thresh error!!\n"); return coex_sta->pre_wifi_rssi_state[index]; } @@ -201,14 +197,12 @@ static u8 halbtc8723b1ant_wifi_rssi_state(struct btc_coexist *btcoexist, if (wifi_rssi >= rssi_thresh + BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) { wifi_rssi_state = BTC_RSSI_STATE_MEDIUM; - BTC_PRINT(BTC_MSG_ALGORITHM, - ALGO_WIFI_RSSI_STATE, - "[BTCoex], wifi RSSI state switch to Medium\n"); + btc_alg_dbg(ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state switch to Medium\n"); } else { wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW; - BTC_PRINT(BTC_MSG_ALGORITHM, - ALGO_WIFI_RSSI_STATE, - "[BTCoex], wifi RSSI state stay at Low\n"); + btc_alg_dbg(ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state stay at Low\n"); } } else if ((coex_sta->pre_wifi_rssi_state[index] == BTC_RSSI_STATE_MEDIUM) || @@ -217,31 +211,26 @@ static u8 halbtc8723b1ant_wifi_rssi_state(struct btc_coexist *btcoexist, if (wifi_rssi >= rssi_thresh1 + BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) { wifi_rssi_state = BTC_RSSI_STATE_HIGH; - BTC_PRINT(BTC_MSG_ALGORITHM, - ALGO_WIFI_RSSI_STATE, - "[BTCoex], wifi RSSI state switch to High\n"); + btc_alg_dbg(ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state switch to High\n"); } else if (wifi_rssi < rssi_thresh) { wifi_rssi_state = BTC_RSSI_STATE_LOW; - BTC_PRINT(BTC_MSG_ALGORITHM, - ALGO_WIFI_RSSI_STATE, - "[BTCoex], wifi RSSI state switch to Low\n"); + btc_alg_dbg(ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state switch to Low\n"); } else { wifi_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM; - BTC_PRINT(BTC_MSG_ALGORITHM, - ALGO_WIFI_RSSI_STATE, - "[BTCoex], wifi RSSI state stay at Medium\n"); + btc_alg_dbg(ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state stay at Medium\n"); } } else { if (wifi_rssi < rssi_thresh1) { wifi_rssi_state = BTC_RSSI_STATE_MEDIUM; - BTC_PRINT(BTC_MSG_ALGORITHM, - ALGO_WIFI_RSSI_STATE, - "[BTCoex], wifi RSSI state switch to Medium\n"); + btc_alg_dbg(ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state switch to Medium\n"); } else { wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH; - BTC_PRINT(BTC_MSG_ALGORITHM, - ALGO_WIFI_RSSI_STATE, - "[BTCoex], wifi RSSI state stay at High\n"); + btc_alg_dbg(ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state stay at High\n"); } } } @@ -435,9 +424,9 @@ static void halbtc8723b1ant_query_bt_info(struct btc_coexist *btcoexist) h2c_parameter[0] |= BIT0; /* trigger*/ - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, - "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n", - h2c_parameter[0]); + btc_alg_dbg(ALGO_TRACE_FW_EXEC, + "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n", + h2c_parameter[0]); btcoexist->btc_fill_h2c(btcoexist, 0x61, 1, h2c_parameter); } @@ -532,8 +521,8 @@ static u8 halbtc8723b1ant_action_algorithm(struct btc_coexist *btcoexist) btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on); if (!bt_link_info->bt_link_exist) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], No BT link exists!!!\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], No BT link exists!!!\n"); return algorithm; } @@ -548,27 +537,27 @@ static u8 halbtc8723b1ant_action_algorithm(struct btc_coexist *btcoexist) if (numdiffprofile == 1) { if (bt_link_info->sco_exist) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], BT Profile = SCO only\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], BT Profile = SCO only\n"); algorithm = BT_8723B_1ANT_COEX_ALGO_SCO; } else { if (bt_link_info->hid_exist) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], BT Profile = HID only\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], BT Profile = HID only\n"); algorithm = BT_8723B_1ANT_COEX_ALGO_HID; } else if (bt_link_info->a2dp_exist) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], BT Profile = A2DP only\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], BT Profile = A2DP only\n"); algorithm = BT_8723B_1ANT_COEX_ALGO_A2DP; } else if (bt_link_info->pan_exist) { if (bt_hs_on) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], BT Profile = PAN(HS) only\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], BT Profile = PAN(HS) only\n"); algorithm = BT_8723B_1ANT_COEX_ALGO_PANHS; } else { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], BT Profile = PAN(EDR) only\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], BT Profile = PAN(EDR) only\n"); algorithm = BT_8723B_1ANT_COEX_ALGO_PANEDR; } @@ -577,21 +566,21 @@ static u8 halbtc8723b1ant_action_algorithm(struct btc_coexist *btcoexist) } else if (numdiffprofile == 2) { if (bt_link_info->sco_exist) { if (bt_link_info->hid_exist) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], BT Profile = SCO + HID\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], BT Profile = SCO + HID\n"); algorithm = BT_8723B_1ANT_COEX_ALGO_HID; } else if (bt_link_info->a2dp_exist) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], BT Profile = SCO + A2DP ==> SCO\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], BT Profile = SCO + A2DP ==> SCO\n"); algorithm = BT_8723B_1ANT_COEX_ALGO_SCO; } else if (bt_link_info->pan_exist) { if (bt_hs_on) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], BT Profile = SCO + PAN(HS)\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], BT Profile = SCO + PAN(HS)\n"); algorithm = BT_8723B_1ANT_COEX_ALGO_SCO; } else { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], BT Profile = SCO + PAN(EDR)\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], BT Profile = SCO + PAN(EDR)\n"); algorithm = BT_8723B_1ANT_COEX_ALGO_PANEDR_HID; } @@ -599,32 +588,32 @@ static u8 halbtc8723b1ant_action_algorithm(struct btc_coexist *btcoexist) } else { if (bt_link_info->hid_exist && bt_link_info->a2dp_exist) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], BT Profile = HID + A2DP\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], BT Profile = HID + A2DP\n"); algorithm = BT_8723B_1ANT_COEX_ALGO_HID_A2DP; } else if (bt_link_info->hid_exist && bt_link_info->pan_exist) { if (bt_hs_on) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], BT Profile = HID + PAN(HS)\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], BT Profile = HID + PAN(HS)\n"); algorithm = BT_8723B_1ANT_COEX_ALGO_HID_A2DP; } else { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], BT Profile = HID + PAN(EDR)\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], BT Profile = HID + PAN(EDR)\n"); algorithm = BT_8723B_1ANT_COEX_ALGO_PANEDR_HID; } } else if (bt_link_info->pan_exist && bt_link_info->a2dp_exist) { if (bt_hs_on) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], BT Profile = A2DP + PAN(HS)\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], BT Profile = A2DP + PAN(HS)\n"); algorithm = BT_8723B_1ANT_COEX_ALGO_A2DP_PANHS; } else { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], BT Profile = A2DP + PAN(EDR)\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], BT Profile = A2DP + PAN(EDR)\n"); algorithm = BT_8723B_1ANT_COEX_ALGO_PANEDR_A2DP; } @@ -634,31 +623,31 @@ static u8 halbtc8723b1ant_action_algorithm(struct btc_coexist *btcoexist) if (bt_link_info->sco_exist) { if (bt_link_info->hid_exist && bt_link_info->a2dp_exist) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], BT Profile = SCO + HID + A2DP ==> HID\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], BT Profile = SCO + HID + A2DP ==> HID\n"); algorithm = BT_8723B_1ANT_COEX_ALGO_HID; } else if (bt_link_info->hid_exist && bt_link_info->pan_exist) { if (bt_hs_on) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], BT Profile = SCO + HID + PAN(HS)\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], BT Profile = SCO + HID + PAN(HS)\n"); algorithm = BT_8723B_1ANT_COEX_ALGO_HID_A2DP; } else { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], BT Profile = SCO + HID + PAN(EDR)\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], BT Profile = SCO + HID + PAN(EDR)\n"); algorithm = BT_8723B_1ANT_COEX_ALGO_PANEDR_HID; } } else if (bt_link_info->pan_exist && bt_link_info->a2dp_exist) { if (bt_hs_on) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], BT Profile = SCO + A2DP + PAN(HS)\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], BT Profile = SCO + A2DP + PAN(HS)\n"); algorithm = BT_8723B_1ANT_COEX_ALGO_SCO; } else { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], BT Profile = SCO + A2DP + PAN(EDR) ==> HID\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], BT Profile = SCO + A2DP + PAN(EDR) ==> HID\n"); algorithm = BT_8723B_1ANT_COEX_ALGO_PANEDR_HID; } @@ -668,13 +657,13 @@ static u8 halbtc8723b1ant_action_algorithm(struct btc_coexist *btcoexist) bt_link_info->pan_exist && bt_link_info->a2dp_exist) { if (bt_hs_on) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], BT Profile = HID + A2DP + PAN(HS)\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], BT Profile = HID + A2DP + PAN(HS)\n"); algorithm = BT_8723B_1ANT_COEX_ALGO_HID_A2DP; } else { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], BT Profile = HID + A2DP + PAN(EDR)\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], BT Profile = HID + A2DP + PAN(EDR)\n"); algorithm = BT_8723B_1ANT_COEX_ALGO_HID_A2DP_PANEDR; } @@ -686,11 +675,11 @@ static u8 halbtc8723b1ant_action_algorithm(struct btc_coexist *btcoexist) bt_link_info->pan_exist && bt_link_info->a2dp_exist) { if (bt_hs_on) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], Error!!! BT Profile = SCO + HID + A2DP + PAN(HS)\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], Error!!! BT Profile = SCO + HID + A2DP + PAN(HS)\n"); } else { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], BT Profile = SCO + HID + A2DP + PAN(EDR)==>PAN(EDR)+HID\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], BT Profile = SCO + HID + A2DP + PAN(EDR)==>PAN(EDR)+HID\n"); algorithm = BT_8723B_1ANT_COEX_ALGO_PANEDR_HID; } @@ -717,9 +706,9 @@ static void btc8723b1ant_set_sw_pen_tx_rate_adapt(struct btc_coexist *btcoexist, h2c_parameter[5] = 0xf9; /*MCS5 or OFDM36 */ } - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, - "[BTCoex], set WiFi Low-Penalty Retry: %s", - (low_penalty_ra ? "ON!!" : "OFF!!")); + btc_alg_dbg(ALGO_TRACE_FW_EXEC, + "[BTCoex], set WiFi Low-Penalty Retry: %s", + (low_penalty_ra ? "ON!!" : "OFF!!")); btcoexist->btc_fill_h2c(btcoexist, 0x69, 6, h2c_parameter); } @@ -743,20 +732,20 @@ static void halbtc8723b1ant_set_coex_table(struct btc_coexist *btcoexist, u32 val0x6c0, u32 val0x6c4, u32 val0x6c8, u8 val0x6cc) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, - "[BTCoex], set coex table, set 0x6c0 = 0x%x\n", val0x6c0); + btc_alg_dbg(ALGO_TRACE_SW_EXEC, + "[BTCoex], set coex table, set 0x6c0 = 0x%x\n", val0x6c0); btcoexist->btc_write_4byte(btcoexist, 0x6c0, val0x6c0); - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, - "[BTCoex], set coex table, set 0x6c4 = 0x%x\n", val0x6c4); + btc_alg_dbg(ALGO_TRACE_SW_EXEC, + "[BTCoex], set coex table, set 0x6c4 = 0x%x\n", val0x6c4); btcoexist->btc_write_4byte(btcoexist, 0x6c4, val0x6c4); - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, - "[BTCoex], set coex table, set 0x6c8 = 0x%x\n", val0x6c8); + btc_alg_dbg(ALGO_TRACE_SW_EXEC, + "[BTCoex], set coex table, set 0x6c8 = 0x%x\n", val0x6c8); btcoexist->btc_write_4byte(btcoexist, 0x6c8, val0x6c8); - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, - "[BTCoex], set coex table, set 0x6cc = 0x%x\n", val0x6cc); + btc_alg_dbg(ALGO_TRACE_SW_EXEC, + "[BTCoex], set coex table, set 0x6cc = 0x%x\n", val0x6cc); btcoexist->btc_write_1byte(btcoexist, 0x6cc, val0x6cc); } @@ -765,10 +754,10 @@ static void halbtc8723b1ant_coex_table(struct btc_coexist *btcoexist, u32 val0x6c4, u32 val0x6c8, u8 val0x6cc) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, - "[BTCoex], %s write Coex Table 0x6c0 = 0x%x, 0x6c4 = 0x%x, 0x6cc = 0x%x\n", - (force_exec ? "force to" : ""), - val0x6c0, val0x6c4, val0x6cc); + btc_alg_dbg(ALGO_TRACE_SW, + "[BTCoex], %s write Coex Table 0x6c0 = 0x%x, 0x6c4 = 0x%x, 0x6cc = 0x%x\n", + (force_exec ? "force to" : ""), + val0x6c0, val0x6c4, val0x6cc); coex_dm->cur_val0x6c0 = val0x6c0; coex_dm->cur_val0x6c4 = val0x6c4; coex_dm->cur_val0x6c8 = val0x6c8; @@ -839,9 +828,9 @@ static void halbtc8723b1ant_SetFwIgnoreWlanAct(struct btc_coexist *btcoexist, if (enable) h2c_parameter[0] |= BIT0; /* function enable */ - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, - "[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x63 = 0x%x\n", - h2c_parameter[0]); + btc_alg_dbg(ALGO_TRACE_FW_EXEC, + "[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x63 = 0x%x\n", + h2c_parameter[0]); btcoexist->btc_fill_h2c(btcoexist, 0x63, 1, h2c_parameter); } @@ -849,16 +838,16 @@ static void halbtc8723b1ant_SetFwIgnoreWlanAct(struct btc_coexist *btcoexist, static void halbtc8723b1ant_ignore_wlan_act(struct btc_coexist *btcoexist, bool force_exec, bool enable) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, - "[BTCoex], %s turn Ignore WlanAct %s\n", - (force_exec ? "force to" : ""), (enable ? "ON" : "OFF")); + btc_alg_dbg(ALGO_TRACE_FW, + "[BTCoex], %s turn Ignore WlanAct %s\n", + (force_exec ? "force to" : ""), (enable ? "ON" : "OFF")); coex_dm->cur_ignore_wlan_act = enable; if (!force_exec) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, - "[BTCoex], bPreIgnoreWlanAct = %d, bCurIgnoreWlanAct = %d!!\n", - coex_dm->pre_ignore_wlan_act, - coex_dm->cur_ignore_wlan_act); + btc_alg_dbg(ALGO_TRACE_FW_DETAIL, + "[BTCoex], bPreIgnoreWlanAct = %d, bCurIgnoreWlanAct = %d!!\n", + coex_dm->pre_ignore_wlan_act, + coex_dm->cur_ignore_wlan_act); if (coex_dm->pre_ignore_wlan_act == coex_dm->cur_ignore_wlan_act) @@ -882,8 +871,8 @@ static void halbtc8723b1ant_set_fw_ps_tdma(struct btc_coexist *btcoexist, if (ap_enable) { if ((byte1 & BIT4) && !(byte1 & BIT5)) { - BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, - "[BTCoex], FW for 1Ant AP mode\n"); + btc_iface_dbg(INTF_NOTIFY, + "[BTCoex], FW for 1Ant AP mode\n"); real_byte1 &= ~BIT4; real_byte1 |= BIT5; @@ -904,13 +893,13 @@ static void halbtc8723b1ant_set_fw_ps_tdma(struct btc_coexist *btcoexist, coex_dm->ps_tdma_para[3] = byte4; coex_dm->ps_tdma_para[4] = real_byte5; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, - "[BTCoex], PS-TDMA H2C cmd =0x%x%08x\n", - h2c_parameter[0], - h2c_parameter[1] << 24 | - h2c_parameter[2] << 16 | - h2c_parameter[3] << 8 | - h2c_parameter[4]); + btc_alg_dbg(ALGO_TRACE_FW_EXEC, + "[BTCoex], PS-TDMA H2C cmd =0x%x%08x\n", + h2c_parameter[0], + h2c_parameter[1] << 24 | + h2c_parameter[2] << 16 | + h2c_parameter[3] << 8 | + h2c_parameter[4]); btcoexist->btc_fill_h2c(btcoexist, 0x60, 5, h2c_parameter); } @@ -929,22 +918,22 @@ static void halbtc8723b1ant_LpsRpwm(struct btc_coexist *btcoexist, bool force_exec, u8 lps_val, u8 rpwm_val) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, - "[BTCoex], %s set lps/rpwm = 0x%x/0x%x\n", - (force_exec ? "force to" : ""), lps_val, rpwm_val); + btc_alg_dbg(ALGO_TRACE_FW, + "[BTCoex], %s set lps/rpwm = 0x%x/0x%x\n", + (force_exec ? "force to" : ""), lps_val, rpwm_val); coex_dm->cur_lps = lps_val; coex_dm->cur_rpwm = rpwm_val; if (!force_exec) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, - "[BTCoex], LPS-RxBeaconMode = 0x%x , LPS-RPWM = 0x%x!!\n", - coex_dm->cur_lps, coex_dm->cur_rpwm); + btc_alg_dbg(ALGO_TRACE_FW_DETAIL, + "[BTCoex], LPS-RxBeaconMode = 0x%x , LPS-RPWM = 0x%x!!\n", + coex_dm->cur_lps, coex_dm->cur_rpwm); if ((coex_dm->pre_lps == coex_dm->cur_lps) && (coex_dm->pre_rpwm == coex_dm->cur_rpwm)) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, - "[BTCoex], LPS-RPWM_Last = 0x%x , LPS-RPWM_Now = 0x%x!!\n", - coex_dm->pre_rpwm, coex_dm->cur_rpwm); + btc_alg_dbg(ALGO_TRACE_FW_DETAIL, + "[BTCoex], LPS-RPWM_Last = 0x%x , LPS-RPWM_Now = 0x%x!!\n", + coex_dm->pre_rpwm, coex_dm->cur_rpwm); return; } @@ -958,8 +947,8 @@ static void halbtc8723b1ant_LpsRpwm(struct btc_coexist *btcoexist, static void halbtc8723b1ant_sw_mechanism(struct btc_coexist *btcoexist, bool low_penalty_ra) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, - "[BTCoex], SM[LpRA] = %d\n", low_penalty_ra); + btc_alg_dbg(ALGO_BT_MONITOR, + "[BTCoex], SM[LpRA] = %d\n", low_penalty_ra); halbtc8723b1ant_low_penalty_ra(btcoexist, NORMAL_EXEC, low_penalty_ra); } @@ -1174,13 +1163,13 @@ static void halbtc8723b1ant_ps_tdma(struct btc_coexist *btcoexist, if (!force_exec) { if (coex_dm->cur_ps_tdma_on) - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, - "[BTCoex], ******** TDMA(on, %d) *********\n", - coex_dm->cur_ps_tdma); + btc_alg_dbg(ALGO_TRACE_FW_DETAIL, + "[BTCoex], ******** TDMA(on, %d) *********\n", + coex_dm->cur_ps_tdma); else - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, - "[BTCoex], ******** TDMA(off, %d) ********\n", - coex_dm->cur_ps_tdma); + btc_alg_dbg(ALGO_TRACE_FW_DETAIL, + "[BTCoex], ******** TDMA(off, %d) ********\n", + coex_dm->cur_ps_tdma); if ((coex_dm->pre_ps_tdma_on == coex_dm->cur_ps_tdma_on) && (coex_dm->pre_ps_tdma == coex_dm->cur_ps_tdma)) @@ -1394,45 +1383,45 @@ static bool halbtc8723b1ant_is_common_action(struct btc_coexist *btcoexist) if (!wifi_connected && BT_8723B_1ANT_BT_STATUS_NON_CONNECTED_IDLE == coex_dm->bt_status) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], Wifi non connected-idle + BT non connected-idle!!\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], Wifi non connected-idle + BT non connected-idle!!\n"); halbtc8723b1ant_sw_mechanism(btcoexist, false); commom = true; } else if (wifi_connected && (BT_8723B_1ANT_BT_STATUS_NON_CONNECTED_IDLE == coex_dm->bt_status)) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], Wifi connected + BT non connected-idle!!\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], Wifi connected + BT non connected-idle!!\n"); halbtc8723b1ant_sw_mechanism(btcoexist, false); commom = true; } else if (!wifi_connected && (BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE == coex_dm->bt_status)) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], Wifi non connected-idle + BT connected-idle!!\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], Wifi non connected-idle + BT connected-idle!!\n"); halbtc8723b1ant_sw_mechanism(btcoexist, false); commom = true; } else if (wifi_connected && (BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE == coex_dm->bt_status)) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], Wifi connected + BT connected-idle!!\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], Wifi connected + BT connected-idle!!\n"); halbtc8723b1ant_sw_mechanism(btcoexist, false); commom = true; } else if (!wifi_connected && (BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE != coex_dm->bt_status)) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - ("[BTCoex], Wifi non connected-idle + BT Busy!!\n")); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], Wifi non connected-idle + BT Busy!!\n"); halbtc8723b1ant_sw_mechanism(btcoexist, false); commom = true; } else { if (wifi_busy) - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], Wifi Connected-Busy + BT Busy!!\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], Wifi Connected-Busy + BT Busy!!\n"); else - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], Wifi Connected-Idle + BT Busy!!\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], Wifi Connected-Idle + BT Busy!!\n"); commom = false; } @@ -1451,8 +1440,8 @@ static void btc8723b1ant_tdma_dur_adj_for_acl(struct btc_coexist *btcoexist, u8 retry_count = 0, bt_info_ext; bool wifi_busy = false; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, - "[BTCoex], TdmaDurationAdjustForAcl()\n"); + btc_alg_dbg(ALGO_TRACE_FW, + "[BTCoex], TdmaDurationAdjustForAcl()\n"); if (BT_8723B_1ANT_WIFI_STATUS_CONNECTED_BUSY == wifi_status) wifi_busy = true; @@ -1481,8 +1470,8 @@ static void btc8723b1ant_tdma_dur_adj_for_acl(struct btc_coexist *btcoexist, if (!coex_dm->auto_tdma_adjust) { coex_dm->auto_tdma_adjust = true; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, - "[BTCoex], first run TdmaDurationAdjust()!!\n"); + btc_alg_dbg(ALGO_TRACE_FW_DETAIL, + "[BTCoex], first run TdmaDurationAdjust()!!\n"); halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 2); coex_dm->tdma_adj_type = 2; @@ -1513,9 +1502,8 @@ static void btc8723b1ant_tdma_dur_adj_for_acl(struct btc_coexist *btcoexist, up = 0; dn = 0; result = 1; - BTC_PRINT(BTC_MSG_ALGORITHM, - ALGO_TRACE_FW_DETAIL, - "[BTCoex], Increase wifi duration!!\n"); + btc_alg_dbg(ALGO_TRACE_FW_DETAIL, + "[BTCoex], Increase wifi duration!!\n"); } } else if (retry_count <= 3) { up--; @@ -1538,9 +1526,8 @@ static void btc8723b1ant_tdma_dur_adj_for_acl(struct btc_coexist *btcoexist, dn = 0; wait_count = 0; result = -1; - BTC_PRINT(BTC_MSG_ALGORITHM, - ALGO_TRACE_FW_DETAIL, - "[BTCoex], Decrease wifi duration for retryCounter<3!!\n"); + btc_alg_dbg(ALGO_TRACE_FW_DETAIL, + "[BTCoex], Decrease wifi duration for retryCounter<3!!\n"); } } else { if (wait_count == 1) @@ -1556,8 +1543,8 @@ static void btc8723b1ant_tdma_dur_adj_for_acl(struct btc_coexist *btcoexist, dn = 0; wait_count = 0; result = -1; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, - "[BTCoex], Decrease wifi duration for retryCounter>3!!\n"); + btc_alg_dbg(ALGO_TRACE_FW_DETAIL, + "[BTCoex], Decrease wifi duration for retryCounter>3!!\n"); } if (result == -1) { @@ -1602,9 +1589,9 @@ static void btc8723b1ant_tdma_dur_adj_for_acl(struct btc_coexist *btcoexist, } } else { /*no change */ /*if busy / idle change */ - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, - "[BTCoex],********* TDMA(on, %d) ********\n", - coex_dm->cur_ps_tdma); + btc_alg_dbg(ALGO_TRACE_FW_DETAIL, + "[BTCoex],********* TDMA(on, %d) ********\n", + coex_dm->cur_ps_tdma); } if (coex_dm->cur_ps_tdma != 1 && coex_dm->cur_ps_tdma != 2 && @@ -2010,15 +1997,15 @@ static void halbtc8723b1ant_action_wifi_connected(struct btc_coexist *btcoexist) bool scan = false, link = false, roam = false; bool under_4way = false, ap_enable = false; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], CoexForWifiConnect()===>\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], CoexForWifiConnect()===>\n"); btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_4_WAY_PROGRESS, &under_4way); if (under_4way) { halbtc8723b1ant_action_wifi_connected_special_packet(btcoexist); - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], CoexForWifiConnect(), return for wifi is under 4way<===\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], CoexForWifiConnect(), return for wifi is under 4way<===\n"); return; } @@ -2032,8 +2019,8 @@ static void halbtc8723b1ant_action_wifi_connected(struct btc_coexist *btcoexist) else halbtc8723b1ant_action_wifi_connected_special_packet( btcoexist); - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], CoexForWifiConnect(), return for wifi is under scan<===\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], CoexForWifiConnect(), return for wifi is under scan<===\n"); return; } @@ -2102,58 +2089,58 @@ static void btc8723b1ant_run_sw_coex_mech(struct btc_coexist *btcoexist) if (!halbtc8723b1ant_is_common_action(btcoexist)) { switch (coex_dm->cur_algorithm) { case BT_8723B_1ANT_COEX_ALGO_SCO: - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], Action algorithm = SCO.\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], Action algorithm = SCO\n"); halbtc8723b1ant_action_sco(btcoexist); break; case BT_8723B_1ANT_COEX_ALGO_HID: - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], Action algorithm = HID.\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], Action algorithm = HID\n"); halbtc8723b1ant_action_hid(btcoexist); break; case BT_8723B_1ANT_COEX_ALGO_A2DP: - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], Action algorithm = A2DP.\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], Action algorithm = A2DP\n"); halbtc8723b1ant_action_a2dp(btcoexist); break; case BT_8723B_1ANT_COEX_ALGO_A2DP_PANHS: - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], Action algorithm = A2DP+PAN(HS).\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], Action algorithm = A2DP+PAN(HS)\n"); halbtc8723b1ant_action_a2dp_pan_hs(btcoexist); break; case BT_8723B_1ANT_COEX_ALGO_PANEDR: - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], Action algorithm = PAN(EDR).\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], Action algorithm = PAN(EDR)\n"); halbtc8723b1ant_action_pan_edr(btcoexist); break; case BT_8723B_1ANT_COEX_ALGO_PANHS: - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], Action algorithm = HS mode.\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], Action algorithm = HS mode\n"); halbtc8723b1ant_action_pan_hs(btcoexist); break; case BT_8723B_1ANT_COEX_ALGO_PANEDR_A2DP: - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], Action algorithm = PAN+A2DP.\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], Action algorithm = PAN+A2DP\n"); halbtc8723b1ant_action_pan_edr_a2dp(btcoexist); break; case BT_8723B_1ANT_COEX_ALGO_PANEDR_HID: - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], Action algorithm = PAN(EDR)+HID.\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], Action algorithm = PAN(EDR)+HID\n"); halbtc8723b1ant_action_pan_edr_hid(btcoexist); break; case BT_8723B_1ANT_COEX_ALGO_HID_A2DP_PANEDR: - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], Action algorithm = HID+A2DP+PAN.\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], Action algorithm = HID+A2DP+PAN\n"); btc8723b1ant_action_hid_a2dp_pan_edr(btcoexist); break; case BT_8723B_1ANT_COEX_ALGO_HID_A2DP: - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], Action algorithm = HID+A2DP.\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], Action algorithm = HID+A2DP\n"); halbtc8723b1ant_action_hid_a2dp(btcoexist); break; default: - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], Action algorithm = coexist All Off!!\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], Action algorithm = coexist All Off!!\n"); break; } coex_dm->pre_algorithm = coex_dm->cur_algorithm; @@ -2171,24 +2158,24 @@ static void halbtc8723b1ant_run_coexist_mechanism(struct btc_coexist *btcoexist) u32 wifi_link_status = 0; u32 num_of_wifi_link = 0; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], RunCoexistMechanism()===>\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], RunCoexistMechanism()===>\n"); if (btcoexist->manual_control) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], RunCoexistMechanism(), return for Manual CTRL <===\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], RunCoexistMechanism(), return for Manual CTRL <===\n"); return; } if (btcoexist->stop_coex_dm) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], RunCoexistMechanism(), return for Stop Coex DM <===\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], RunCoexistMechanism(), return for Stop Coex DM <===\n"); return; } if (coex_sta->under_ips) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], wifi is under IPS !!!\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], wifi is under IPS !!!\n"); return; } @@ -2267,8 +2254,8 @@ static void halbtc8723b1ant_run_coexist_mechanism(struct btc_coexist *btcoexist) if (!wifi_connected) { bool scan = false, link = false, roam = false; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], wifi is non connected-idle !!!\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], wifi is non connected-idle !!!\n"); btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan); btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link); @@ -2305,8 +2292,8 @@ static void halbtc8723b1ant_init_hw_config(struct btc_coexist *btcoexist, u8 u8tmp = 0; u32 cnt_bt_cal_chk = 0; - BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, - "[BTCoex], 1Ant Init HW Config!!\n"); + btc_iface_dbg(INTF_INIT, + "[BTCoex], 1Ant Init HW Config!!\n"); if (backup) {/* backup rf 0x1e value */ coex_dm->backup_arfr_cnt1 = @@ -2333,14 +2320,14 @@ static void halbtc8723b1ant_init_hw_config(struct btc_coexist *btcoexist, u32tmp = btcoexist->btc_read_4byte(btcoexist, 0x49d); cnt_bt_cal_chk++; if (u32tmp & BIT0) { - BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, - "[BTCoex], ########### BT calibration(cnt=%d) ###########\n", - cnt_bt_cal_chk); + btc_iface_dbg(INTF_INIT, + "[BTCoex], ########### BT calibration(cnt=%d) ###########\n", + cnt_bt_cal_chk); mdelay(50); } else { - BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, - "[BTCoex], ********** BT NOT calibration (cnt=%d)**********\n", - cnt_bt_cal_chk); + btc_iface_dbg(INTF_INIT, + "[BTCoex], ********** BT NOT calibration (cnt=%d)**********\n", + cnt_bt_cal_chk); break; } } @@ -2383,8 +2370,8 @@ void ex_halbtc8723b1ant_init_hwconfig(struct btc_coexist *btcoexist) void ex_halbtc8723b1ant_init_coex_dm(struct btc_coexist *btcoexist) { - BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, - "[BTCoex], Coex Mechanism Init!!\n"); + btc_iface_dbg(INTF_INIT, + "[BTCoex], Coex Mechanism Init!!\n"); btcoexist->stop_coex_dm = false; @@ -2677,8 +2664,8 @@ void ex_halbtc8723b1ant_ips_notify(struct btc_coexist *btcoexist, u8 type) return; if (BTC_IPS_ENTER == type) { - BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, - "[BTCoex], IPS ENTER notify\n"); + btc_iface_dbg(INTF_NOTIFY, + "[BTCoex], IPS ENTER notify\n"); coex_sta->under_ips = true; halbtc8723b1ant_SetAntPath(btcoexist, BTC_ANT_PATH_BT, @@ -2689,8 +2676,8 @@ void ex_halbtc8723b1ant_ips_notify(struct btc_coexist *btcoexist, u8 type) NORMAL_EXEC, 0); halbtc8723b1ant_wifi_off_hw_cfg(btcoexist); } else if (BTC_IPS_LEAVE == type) { - BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, - "[BTCoex], IPS LEAVE notify\n"); + btc_iface_dbg(INTF_NOTIFY, + "[BTCoex], IPS LEAVE notify\n"); coex_sta->under_ips = false; halbtc8723b1ant_init_hw_config(btcoexist, false); @@ -2705,12 +2692,12 @@ void ex_halbtc8723b1ant_lps_notify(struct btc_coexist *btcoexist, u8 type) return; if (BTC_LPS_ENABLE == type) { - BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, - "[BTCoex], LPS ENABLE notify\n"); + btc_iface_dbg(INTF_NOTIFY, + "[BTCoex], LPS ENABLE notify\n"); coex_sta->under_lps = true; } else if (BTC_LPS_DISABLE == type) { - BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, - "[BTCoex], LPS DISABLE notify\n"); + btc_iface_dbg(INTF_NOTIFY, + "[BTCoex], LPS DISABLE notify\n"); coex_sta->under_lps = false; } } @@ -2753,15 +2740,15 @@ void ex_halbtc8723b1ant_scan_notify(struct btc_coexist *btcoexist, u8 type) } if (BTC_SCAN_START == type) { - BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, - "[BTCoex], SCAN START notify\n"); + btc_iface_dbg(INTF_NOTIFY, + "[BTCoex], SCAN START notify\n"); if (!wifi_connected) /* non-connected scan */ btc8723b1ant_action_wifi_not_conn_scan(btcoexist); else /* wifi is connected */ btc8723b1ant_action_wifi_conn_scan(btcoexist); } else if (BTC_SCAN_FINISH == type) { - BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, - "[BTCoex], SCAN FINISH notify\n"); + btc_iface_dbg(INTF_NOTIFY, + "[BTCoex], SCAN FINISH notify\n"); if (!wifi_connected) /* non-connected scan */ btc8723b1ant_action_wifi_not_conn(btcoexist); else @@ -2802,12 +2789,12 @@ void ex_halbtc8723b1ant_connect_notify(struct btc_coexist *btcoexist, u8 type) } if (BTC_ASSOCIATE_START == type) { - BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, - "[BTCoex], CONNECT START notify\n"); + btc_iface_dbg(INTF_NOTIFY, + "[BTCoex], CONNECT START notify\n"); btc8723b1ant_act_wifi_not_conn_asso_auth(btcoexist); } else if (BTC_ASSOCIATE_FINISH == type) { - BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, - "[BTCoex], CONNECT FINISH notify\n"); + btc_iface_dbg(INTF_NOTIFY, + "[BTCoex], CONNECT FINISH notify\n"); btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED, &wifi_connected); @@ -2830,11 +2817,11 @@ void ex_halbtc8723b1ant_media_status_notify(struct btc_coexist *btcoexist, return; if (BTC_MEDIA_CONNECT == type) - BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, - "[BTCoex], MEDIA connect notify\n"); + btc_iface_dbg(INTF_NOTIFY, + "[BTCoex], MEDIA connect notify\n"); else - BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, - "[BTCoex], MEDIA disconnect notify\n"); + btc_iface_dbg(INTF_NOTIFY, + "[BTCoex], MEDIA disconnect notify\n"); /* only 2.4G we need to inform bt the chnl mask */ btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_CENTRAL_CHNL, @@ -2855,10 +2842,10 @@ void ex_halbtc8723b1ant_media_status_notify(struct btc_coexist *btcoexist, coex_dm->wifi_chnl_info[1] = h2c_parameter[1]; coex_dm->wifi_chnl_info[2] = h2c_parameter[2]; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, - "[BTCoex], FW write 0x66 = 0x%x\n", - h2c_parameter[0] << 16 | h2c_parameter[1] << 8 | - h2c_parameter[2]); + btc_alg_dbg(ALGO_TRACE_FW_EXEC, + "[BTCoex], FW write 0x66 = 0x%x\n", + h2c_parameter[0] << 16 | h2c_parameter[1] << 8 | + h2c_parameter[2]); btcoexist->btc_fill_h2c(btcoexist, 0x66, 3, h2c_parameter); } @@ -2900,8 +2887,8 @@ void ex_halbtc8723b1ant_special_packet_notify(struct btc_coexist *btcoexist, if (BTC_PACKET_DHCP == type || BTC_PACKET_EAPOL == type) { - BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, - "[BTCoex], special Packet(%d) notify\n", type); + btc_iface_dbg(INTF_NOTIFY, + "[BTCoex], special Packet(%d) notify\n", type); halbtc8723b1ant_action_wifi_connected_special_packet(btcoexist); } } @@ -2921,19 +2908,19 @@ void ex_halbtc8723b1ant_bt_info_notify(struct btc_coexist *btcoexist, rsp_source = BT_INFO_SRC_8723B_1ANT_WIFI_FW; coex_sta->bt_info_c2h_cnt[rsp_source]++; - BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, - "[BTCoex], Bt info[%d], length=%d, hex data = [", - rsp_source, length); + btc_iface_dbg(INTF_NOTIFY, + "[BTCoex], Bt info[%d], length=%d, hex data = [", + rsp_source, length); for (i = 0; i < length; i++) { coex_sta->bt_info_c2h[rsp_source][i] = tmp_buf[i]; if (i == 1) bt_info = tmp_buf[i]; if (i == length - 1) - BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, - "0x%02x]\n", tmp_buf[i]); + btc_iface_dbg(INTF_NOTIFY, + "0x%02x]\n", tmp_buf[i]); else - BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, - "0x%02x, ", tmp_buf[i]); + btc_iface_dbg(INTF_NOTIFY, + "0x%02x, ", tmp_buf[i]); } if (BT_INFO_SRC_8723B_1ANT_WIFI_FW != rsp_source) { @@ -2950,8 +2937,8 @@ void ex_halbtc8723b1ant_bt_info_notify(struct btc_coexist *btcoexist, * because bt is reset and loss of the info. */ if (coex_sta->bt_info_ext & BIT1) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], BT ext info bit1 check, send wifi BW&Chnl to BT!!\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], BT ext info bit1 check, send wifi BW&Chnl to BT!!\n"); btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED, &wifi_connected); if (wifi_connected) @@ -2965,8 +2952,8 @@ void ex_halbtc8723b1ant_bt_info_notify(struct btc_coexist *btcoexist, if (coex_sta->bt_info_ext & BIT3) { if (!btcoexist->manual_control && !btcoexist->stop_coex_dm) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], BT ext info bit3 check, set BT NOT ignore Wlan active!!\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], BT ext info bit3 check, set BT NOT ignore Wlan active!!\n"); halbtc8723b1ant_ignore_wlan_act(btcoexist, FORCE_EXEC, false); @@ -3021,30 +3008,30 @@ void ex_halbtc8723b1ant_bt_info_notify(struct btc_coexist *btcoexist, if (!(bt_info&BT_INFO_8723B_1ANT_B_CONNECTION)) { coex_dm->bt_status = BT_8723B_1ANT_BT_STATUS_NON_CONNECTED_IDLE; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], BtInfoNotify(), BT Non-Connected idle!\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], BtInfoNotify(), BT Non-Connected idle!\n"); /* connection exists but no busy */ } else if (bt_info == BT_INFO_8723B_1ANT_B_CONNECTION) { coex_dm->bt_status = BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], BtInfoNotify(), BT Connected-idle!!!\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], BtInfoNotify(), BT Connected-idle!!!\n"); } else if ((bt_info & BT_INFO_8723B_1ANT_B_SCO_ESCO) || (bt_info & BT_INFO_8723B_1ANT_B_SCO_BUSY)) { coex_dm->bt_status = BT_8723B_1ANT_BT_STATUS_SCO_BUSY; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], BtInfoNotify(), BT SCO busy!!!\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], BtInfoNotify(), BT SCO busy!!!\n"); } else if (bt_info & BT_INFO_8723B_1ANT_B_ACL_BUSY) { if (BT_8723B_1ANT_BT_STATUS_ACL_BUSY != coex_dm->bt_status) coex_dm->auto_tdma_adjust = false; coex_dm->bt_status = BT_8723B_1ANT_BT_STATUS_ACL_BUSY; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], BtInfoNotify(), BT ACL busy!!!\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], BtInfoNotify(), BT ACL busy!!!\n"); } else { coex_dm->bt_status = BT_8723B_1ANT_BT_STATUS_MAX; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], BtInfoNotify(), BT Non-Defined state!!\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], BtInfoNotify(), BT Non-Defined state!!\n"); } if ((BT_8723B_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) || @@ -3060,7 +3047,7 @@ void ex_halbtc8723b1ant_bt_info_notify(struct btc_coexist *btcoexist, void ex_halbtc8723b1ant_halt_notify(struct btc_coexist *btcoexist) { - BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, "[BTCoex], Halt notify\n"); + btc_iface_dbg(INTF_NOTIFY, "[BTCoex], Halt notify\n"); btcoexist->stop_coex_dm = true; @@ -3078,11 +3065,11 @@ void ex_halbtc8723b1ant_halt_notify(struct btc_coexist *btcoexist) void ex_halbtc8723b1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state) { - BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, "[BTCoex], Pnp notify\n"); + btc_iface_dbg(INTF_NOTIFY, "[BTCoex], Pnp notify\n"); if (BTC_WIFI_PNP_SLEEP == pnp_state) { - BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, - "[BTCoex], Pnp notify to SLEEP\n"); + btc_iface_dbg(INTF_NOTIFY, + "[BTCoex], Pnp notify to SLEEP\n"); btcoexist->stop_coex_dm = true; halbtc8723b1ant_SetAntPath(btcoexist, BTC_ANT_PATH_BT, false, true); @@ -3092,8 +3079,8 @@ void ex_halbtc8723b1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state) halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2); halbtc8723b1ant_wifi_off_hw_cfg(btcoexist); } else if (BTC_WIFI_PNP_WAKE_UP == pnp_state) { - BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, - "[BTCoex], Pnp notify to WAKE UP\n"); + btc_iface_dbg(INTF_NOTIFY, + "[BTCoex], Pnp notify to WAKE UP\n"); btcoexist->stop_coex_dm = false; halbtc8723b1ant_init_hw_config(btcoexist, false); halbtc8723b1ant_init_coex_dm(btcoexist); @@ -3103,8 +3090,8 @@ void ex_halbtc8723b1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state) void ex_halbtc8723b1ant_coex_dm_reset(struct btc_coexist *btcoexist) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], *****************Coex DM Reset****************\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], *****************Coex DM Reset****************\n"); halbtc8723b1ant_init_hw_config(btcoexist, false); btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0); @@ -3119,31 +3106,31 @@ void ex_halbtc8723b1ant_periodical(struct btc_coexist *btcoexist) static u8 dis_ver_info_cnt; u32 fw_ver = 0, bt_patch_ver = 0; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], ==========================Periodical===========================\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], ==========================Periodical===========================\n"); if (dis_ver_info_cnt <= 5) { dis_ver_info_cnt += 1; - BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, - "[BTCoex], ****************************************************************\n"); - BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, - "[BTCoex], Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n", - board_info->pg_ant_num, board_info->btdm_ant_num, - board_info->btdm_ant_pos); - BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, - "[BTCoex], BT stack/ hci ext ver = %s / %d\n", - ((stack_info->profile_notified) ? "Yes" : "No"), - stack_info->hci_version); + btc_iface_dbg(INTF_INIT, + "[BTCoex], ****************************************************************\n"); + btc_iface_dbg(INTF_INIT, + "[BTCoex], Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n", + board_info->pg_ant_num, board_info->btdm_ant_num, + board_info->btdm_ant_pos); + btc_iface_dbg(INTF_INIT, + "[BTCoex], BT stack/ hci ext ver = %s / %d\n", + stack_info->profile_notified ? "Yes" : "No", + stack_info->hci_version); btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER, &bt_patch_ver); btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver); - BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, - "[BTCoex], CoexVer/ FwVer/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n", - glcoex_ver_date_8723b_1ant, - glcoex_ver_8723b_1ant, fw_ver, - bt_patch_ver, bt_patch_ver); - BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, - "[BTCoex], ****************************************************************\n"); + btc_iface_dbg(INTF_INIT, + "[BTCoex], CoexVer/ FwVer/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n", + glcoex_ver_date_8723b_1ant, + glcoex_ver_8723b_1ant, fw_ver, + bt_patch_ver, bt_patch_ver); + btc_iface_dbg(INTF_INIT, + "[BTCoex], ****************************************************************\n"); } #if (BT_AUTO_REPORT_ONLY_8723B_1ANT == 0) diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c index 205f78b3ab23..5f488ecaef70 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c @@ -72,32 +72,28 @@ static u8 btc8723b2ant_bt_rssi_state(u8 level_num, u8 rssi_thresh, if (bt_rssi >= rssi_thresh + BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) { bt_rssi_state = BTC_RSSI_STATE_HIGH; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, - "[BTCoex], BT Rssi state " - "switch to High\n"); + btc_alg_dbg(ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state switch to High\n"); } else { bt_rssi_state = BTC_RSSI_STATE_STAY_LOW; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, - "[BTCoex], BT Rssi state " - "stay at Low\n"); + btc_alg_dbg(ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state stay at Low\n"); } } else { if (bt_rssi < rssi_thresh) { bt_rssi_state = BTC_RSSI_STATE_LOW; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, - "[BTCoex], BT Rssi state " - "switch to Low\n"); + btc_alg_dbg(ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state switch to Low\n"); } else { bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, - "[BTCoex], BT Rssi state " - "stay at High\n"); + btc_alg_dbg(ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state stay at High\n"); } } } else if (level_num == 3) { if (rssi_thresh > rssi_thresh1) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, - "[BTCoex], BT Rssi thresh error!!\n"); + btc_alg_dbg(ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi thresh error!!\n"); return coex_sta->pre_bt_rssi_state; } @@ -106,14 +102,12 @@ static u8 btc8723b2ant_bt_rssi_state(u8 level_num, u8 rssi_thresh, if (bt_rssi >= rssi_thresh + BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) { bt_rssi_state = BTC_RSSI_STATE_MEDIUM; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, - "[BTCoex], BT Rssi state " - "switch to Medium\n"); + btc_alg_dbg(ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state switch to Medium\n"); } else { bt_rssi_state = BTC_RSSI_STATE_STAY_LOW; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, - "[BTCoex], BT Rssi state " - "stay at Low\n"); + btc_alg_dbg(ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state stay at Low\n"); } } else if ((coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_MEDIUM) || @@ -122,31 +116,26 @@ static u8 btc8723b2ant_bt_rssi_state(u8 level_num, u8 rssi_thresh, if (bt_rssi >= rssi_thresh1 + BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) { bt_rssi_state = BTC_RSSI_STATE_HIGH; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, - "[BTCoex], BT Rssi state " - "switch to High\n"); + btc_alg_dbg(ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state switch to High\n"); } else if (bt_rssi < rssi_thresh) { bt_rssi_state = BTC_RSSI_STATE_LOW; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, - "[BTCoex], BT Rssi state " - "switch to Low\n"); + btc_alg_dbg(ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state switch to Low\n"); } else { bt_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, - "[BTCoex], BT Rssi state " - "stay at Medium\n"); + btc_alg_dbg(ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state stay at Medium\n"); } } else { if (bt_rssi < rssi_thresh1) { bt_rssi_state = BTC_RSSI_STATE_MEDIUM; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, - "[BTCoex], BT Rssi state " - "switch to Medium\n"); + btc_alg_dbg(ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state switch to Medium\n"); } else { bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, - "[BTCoex], BT Rssi state " - "stay at High\n"); + btc_alg_dbg(ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state stay at High\n"); } } } @@ -173,36 +162,28 @@ static u8 btc8723b2ant_wifi_rssi_state(struct btc_coexist *btcoexist, if (wifi_rssi >= rssi_thresh + BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) { wifi_rssi_state = BTC_RSSI_STATE_HIGH; - BTC_PRINT(BTC_MSG_ALGORITHM, - ALGO_WIFI_RSSI_STATE, - "[BTCoex], wifi RSSI state " - "switch to High\n"); + btc_alg_dbg(ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state switch to High\n"); } else { wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW; - BTC_PRINT(BTC_MSG_ALGORITHM, - ALGO_WIFI_RSSI_STATE, - "[BTCoex], wifi RSSI state " - "stay at Low\n"); + btc_alg_dbg(ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state stay at Low\n"); } } else { if (wifi_rssi < rssi_thresh) { wifi_rssi_state = BTC_RSSI_STATE_LOW; - BTC_PRINT(BTC_MSG_ALGORITHM, - ALGO_WIFI_RSSI_STATE, - "[BTCoex], wifi RSSI state " - "switch to Low\n"); + btc_alg_dbg(ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state switch to Low\n"); } else { wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH; - BTC_PRINT(BTC_MSG_ALGORITHM, - ALGO_WIFI_RSSI_STATE, - "[BTCoex], wifi RSSI state " - "stay at High\n"); + btc_alg_dbg(ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state stay at High\n"); } } } else if (level_num == 3) { if (rssi_thresh > rssi_thresh1) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, - "[BTCoex], wifi RSSI thresh error!!\n"); + btc_alg_dbg(ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI thresh error!!\n"); return coex_sta->pre_wifi_rssi_state[index]; } @@ -213,16 +194,12 @@ static u8 btc8723b2ant_wifi_rssi_state(struct btc_coexist *btcoexist, if (wifi_rssi >= rssi_thresh + BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) { wifi_rssi_state = BTC_RSSI_STATE_MEDIUM; - BTC_PRINT(BTC_MSG_ALGORITHM, - ALGO_WIFI_RSSI_STATE, - "[BTCoex], wifi RSSI state " - "switch to Medium\n"); + btc_alg_dbg(ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state switch to Medium\n"); } else { wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW; - BTC_PRINT(BTC_MSG_ALGORITHM, - ALGO_WIFI_RSSI_STATE, - "[BTCoex], wifi RSSI state " - "stay at Low\n"); + btc_alg_dbg(ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state stay at Low\n"); } } else if ((coex_sta->pre_wifi_rssi_state[index] == BTC_RSSI_STATE_MEDIUM) || @@ -231,36 +208,26 @@ static u8 btc8723b2ant_wifi_rssi_state(struct btc_coexist *btcoexist, if (wifi_rssi >= rssi_thresh1 + BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) { wifi_rssi_state = BTC_RSSI_STATE_HIGH; - BTC_PRINT(BTC_MSG_ALGORITHM, - ALGO_WIFI_RSSI_STATE, - "[BTCoex], wifi RSSI state " - "switch to High\n"); + btc_alg_dbg(ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state switch to High\n"); } else if (wifi_rssi < rssi_thresh) { wifi_rssi_state = BTC_RSSI_STATE_LOW; - BTC_PRINT(BTC_MSG_ALGORITHM, - ALGO_WIFI_RSSI_STATE, - "[BTCoex], wifi RSSI state " - "switch to Low\n"); + btc_alg_dbg(ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state switch to Low\n"); } else { wifi_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM; - BTC_PRINT(BTC_MSG_ALGORITHM, - ALGO_WIFI_RSSI_STATE, - "[BTCoex], wifi RSSI state " - "stay at Medium\n"); + btc_alg_dbg(ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state stay at Medium\n"); } } else { if (wifi_rssi < rssi_thresh1) { wifi_rssi_state = BTC_RSSI_STATE_MEDIUM; - BTC_PRINT(BTC_MSG_ALGORITHM, - ALGO_WIFI_RSSI_STATE, - "[BTCoex], wifi RSSI state " - "switch to Medium\n"); + btc_alg_dbg(ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state switch to Medium\n"); } else { wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH; - BTC_PRINT(BTC_MSG_ALGORITHM, - ALGO_WIFI_RSSI_STATE, - "[BTCoex], wifi RSSI state " - "stay at High\n"); + btc_alg_dbg(ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state stay at High\n"); } } } @@ -292,12 +259,12 @@ static void btc8723b2ant_monitor_bt_ctr(struct btc_coexist *btcoexist) coex_sta->low_priority_tx = reg_lp_tx; coex_sta->low_priority_rx = reg_lp_rx; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, - "[BTCoex], High Priority Tx/Rx(reg 0x%x)=0x%x(%d)/0x%x(%d)\n", - reg_hp_txrx, reg_hp_tx, reg_hp_tx, reg_hp_rx, reg_hp_rx); - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, - "[BTCoex], Low Priority Tx/Rx(reg 0x%x)=0x%x(%d)/0x%x(%d)\n", - reg_lp_txrx, reg_lp_tx, reg_lp_tx, reg_lp_rx, reg_lp_rx); + btc_alg_dbg(ALGO_BT_MONITOR, + "[BTCoex], High Priority Tx/Rx(reg 0x%x)=0x%x(%d)/0x%x(%d)\n", + reg_hp_txrx, reg_hp_tx, reg_hp_tx, reg_hp_rx, reg_hp_rx); + btc_alg_dbg(ALGO_BT_MONITOR, + "[BTCoex], Low Priority Tx/Rx(reg 0x%x)=0x%x(%d)/0x%x(%d)\n", + reg_lp_txrx, reg_lp_tx, reg_lp_tx, reg_lp_rx, reg_lp_rx); /* reset counter */ btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc); @@ -311,9 +278,9 @@ static void btc8723b2ant_query_bt_info(struct btc_coexist *btcoexist) h2c_parameter[0] |= BIT0; /* trigger */ - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, - "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n", - h2c_parameter[0]); + btc_alg_dbg(ALGO_TRACE_FW_EXEC, + "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n", + h2c_parameter[0]); btcoexist->btc_fill_h2c(btcoexist, 0x61, 1, h2c_parameter); } @@ -427,8 +394,8 @@ static u8 btc8723b2ant_action_algorithm(struct btc_coexist *btcoexist) btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on); if (!bt_link_info->bt_link_exist) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], No BT link exists!!!\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], No BT link exists!!!\n"); return algorithm; } @@ -443,27 +410,27 @@ static u8 btc8723b2ant_action_algorithm(struct btc_coexist *btcoexist) if (num_of_diff_profile == 1) { if (bt_link_info->sco_exist) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], SCO only\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], SCO only\n"); algorithm = BT_8723B_2ANT_COEX_ALGO_SCO; } else { if (bt_link_info->hid_exist) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], HID only\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], HID only\n"); algorithm = BT_8723B_2ANT_COEX_ALGO_HID; } else if (bt_link_info->a2dp_exist) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], A2DP only\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], A2DP only\n"); algorithm = BT_8723B_2ANT_COEX_ALGO_A2DP; } else if (bt_link_info->pan_exist) { if (bt_hs_on) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], PAN(HS) only\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], PAN(HS) only\n"); algorithm = BT_8723B_2ANT_COEX_ALGO_PANHS; } else { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], PAN(EDR) only\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], PAN(EDR) only\n"); algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR; } @@ -472,21 +439,21 @@ static u8 btc8723b2ant_action_algorithm(struct btc_coexist *btcoexist) } else if (num_of_diff_profile == 2) { if (bt_link_info->sco_exist) { if (bt_link_info->hid_exist) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], SCO + HID\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], SCO + HID\n"); algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR_HID; } else if (bt_link_info->a2dp_exist) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], SCO + A2DP ==> SCO\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], SCO + A2DP ==> SCO\n"); algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR_HID; } else if (bt_link_info->pan_exist) { if (bt_hs_on) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], SCO + PAN(HS)\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], SCO + PAN(HS)\n"); algorithm = BT_8723B_2ANT_COEX_ALGO_SCO; } else { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], SCO + PAN(EDR)\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], SCO + PAN(EDR)\n"); algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR_HID; } @@ -494,31 +461,31 @@ static u8 btc8723b2ant_action_algorithm(struct btc_coexist *btcoexist) } else { if (bt_link_info->hid_exist && bt_link_info->a2dp_exist) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], HID + A2DP\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], HID + A2DP\n"); algorithm = BT_8723B_2ANT_COEX_ALGO_HID_A2DP; } else if (bt_link_info->hid_exist && bt_link_info->pan_exist) { if (bt_hs_on) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], HID + PAN(HS)\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], HID + PAN(HS)\n"); algorithm = BT_8723B_2ANT_COEX_ALGO_HID; } else { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], HID + PAN(EDR)\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], HID + PAN(EDR)\n"); algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR_HID; } } else if (bt_link_info->pan_exist && bt_link_info->a2dp_exist) { if (bt_hs_on) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], A2DP + PAN(HS)\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], A2DP + PAN(HS)\n"); algorithm = BT_8723B_2ANT_COEX_ALGO_A2DP_PANHS; } else { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex],A2DP + PAN(EDR)\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex],A2DP + PAN(EDR)\n"); algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR_A2DP; } @@ -528,37 +495,32 @@ static u8 btc8723b2ant_action_algorithm(struct btc_coexist *btcoexist) if (bt_link_info->sco_exist) { if (bt_link_info->hid_exist && bt_link_info->a2dp_exist) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], SCO + HID + A2DP" - " ==> HID\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], SCO + HID + A2DP ==> HID\n"); algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR_HID; } else if (bt_link_info->hid_exist && bt_link_info->pan_exist) { if (bt_hs_on) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], SCO + HID + " - "PAN(HS)\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], SCO + HID + PAN(HS)\n"); algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR_HID; } else { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], SCO + HID + " - "PAN(EDR)\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], SCO + HID + PAN(EDR)\n"); algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR_HID; } } else if (bt_link_info->pan_exist && bt_link_info->a2dp_exist) { if (bt_hs_on) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], SCO + A2DP + " - "PAN(HS)\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], SCO + A2DP + PAN(HS)\n"); algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR_HID; } else { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], SCO + A2DP + " - "PAN(EDR) ==> HID\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], SCO + A2DP + PAN(EDR) ==> HID\n"); algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR_HID; } @@ -568,15 +530,13 @@ static u8 btc8723b2ant_action_algorithm(struct btc_coexist *btcoexist) bt_link_info->pan_exist && bt_link_info->a2dp_exist) { if (bt_hs_on) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], HID + A2DP + " - "PAN(HS)\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], HID + A2DP + PAN(HS)\n"); algorithm = BT_8723B_2ANT_COEX_ALGO_HID_A2DP; } else { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], HID + A2DP + " - "PAN(EDR)\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], HID + A2DP + PAN(EDR)\n"); algorithm = BT_8723B_2ANT_COEX_ALGO_HID_A2DP_PANEDR; } @@ -588,13 +548,11 @@ static u8 btc8723b2ant_action_algorithm(struct btc_coexist *btcoexist) bt_link_info->pan_exist && bt_link_info->a2dp_exist) { if (bt_hs_on) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], Error!!! SCO + HID" - " + A2DP + PAN(HS)\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], Error!!! SCO + HID + A2DP + PAN(HS)\n"); } else { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], SCO + HID + A2DP +" - " PAN(EDR)==>PAN(EDR)+HID\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], SCO + HID + A2DP + PAN(EDR)==>PAN(EDR)+HID\n"); algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR_HID; } @@ -624,17 +582,15 @@ static bool btc8723b_need_dec_pwr(struct btc_coexist *btcoexist) if (wifi_connected) { if (bt_hs_on) { if (bt_hs_rssi > 37) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, - "[BTCoex], Need to decrease bt " - "power for HS mode!!\n"); + btc_alg_dbg(ALGO_TRACE_FW, + "[BTCoex], Need to decrease bt power for HS mode!!\n"); ret = true; } } else { if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, - "[BTCoex], Need to decrease bt " - "power for Wifi is connected!!\n"); + btc_alg_dbg(ALGO_TRACE_FW, + "[BTCoex], Need to decrease bt power for Wifi is connected!!\n"); ret = true; } } @@ -653,10 +609,10 @@ static void btc8723b2ant_set_fw_dac_swing_level(struct btc_coexist *btcoexist, */ h2c_parameter[0] = dac_swing_lvl; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, - "[BTCoex], Set Dac Swing Level=0x%x\n", dac_swing_lvl); - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, - "[BTCoex], FW write 0x64=0x%x\n", h2c_parameter[0]); + btc_alg_dbg(ALGO_TRACE_FW_EXEC, + "[BTCoex], Set Dac Swing Level=0x%x\n", dac_swing_lvl); + btc_alg_dbg(ALGO_TRACE_FW_EXEC, + "[BTCoex], FW write 0x64=0x%x\n", h2c_parameter[0]); btcoexist->btc_fill_h2c(btcoexist, 0x64, 1, h2c_parameter); } @@ -671,9 +627,9 @@ static void btc8723b2ant_set_fw_dec_bt_pwr(struct btc_coexist *btcoexist, if (dec_bt_pwr) h2c_parameter[0] |= BIT1; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, - "[BTCoex], decrease Bt Power : %s, FW write 0x62=0x%x\n", - (dec_bt_pwr ? "Yes!!" : "No!!"), h2c_parameter[0]); + btc_alg_dbg(ALGO_TRACE_FW_EXEC, + "[BTCoex], decrease Bt Power : %s, FW write 0x62=0x%x\n", + (dec_bt_pwr ? "Yes!!" : "No!!"), h2c_parameter[0]); btcoexist->btc_fill_h2c(btcoexist, 0x62, 1, h2c_parameter); } @@ -681,15 +637,15 @@ static void btc8723b2ant_set_fw_dec_bt_pwr(struct btc_coexist *btcoexist, static void btc8723b2ant_dec_bt_pwr(struct btc_coexist *btcoexist, bool force_exec, bool dec_bt_pwr) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, - "[BTCoex], %s Dec BT power = %s\n", - (force_exec ? "force to" : ""), (dec_bt_pwr ? "ON" : "OFF")); + btc_alg_dbg(ALGO_TRACE_FW, + "[BTCoex], %s Dec BT power = %s\n", + force_exec ? "force to" : "", dec_bt_pwr ? "ON" : "OFF"); coex_dm->cur_dec_bt_pwr = dec_bt_pwr; if (!force_exec) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, - "[BTCoex], bPreDecBtPwr=%d, bCurDecBtPwr=%d\n", - coex_dm->pre_dec_bt_pwr, coex_dm->cur_dec_bt_pwr); + btc_alg_dbg(ALGO_TRACE_FW_DETAIL, + "[BTCoex], bPreDecBtPwr=%d, bCurDecBtPwr=%d\n", + coex_dm->pre_dec_bt_pwr, coex_dm->cur_dec_bt_pwr); if (coex_dm->pre_dec_bt_pwr == coex_dm->cur_dec_bt_pwr) return; @@ -702,17 +658,16 @@ static void btc8723b2ant_dec_bt_pwr(struct btc_coexist *btcoexist, static void btc8723b2ant_fw_dac_swing_lvl(struct btc_coexist *btcoexist, bool force_exec, u8 fw_dac_swing_lvl) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, - "[BTCoex], %s set FW Dac Swing level = %d\n", - (force_exec ? "force to" : ""), fw_dac_swing_lvl); + btc_alg_dbg(ALGO_TRACE_FW, + "[BTCoex], %s set FW Dac Swing level = %d\n", + (force_exec ? "force to" : ""), fw_dac_swing_lvl); coex_dm->cur_fw_dac_swing_lvl = fw_dac_swing_lvl; if (!force_exec) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, - "[BTCoex], preFwDacSwingLvl=%d, " - "curFwDacSwingLvl=%d\n", - coex_dm->pre_fw_dac_swing_lvl, - coex_dm->cur_fw_dac_swing_lvl); + btc_alg_dbg(ALGO_TRACE_FW_DETAIL, + "[BTCoex], preFwDacSwingLvl=%d, curFwDacSwingLvl=%d\n", + coex_dm->pre_fw_dac_swing_lvl, + coex_dm->cur_fw_dac_swing_lvl); if (coex_dm->pre_fw_dac_swing_lvl == coex_dm->cur_fw_dac_swing_lvl) @@ -729,16 +684,16 @@ static void btc8723b2ant_set_sw_rf_rx_lpf_corner(struct btc_coexist *btcoexist, { if (rx_rf_shrink_on) { /* Shrink RF Rx LPF corner */ - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, - "[BTCoex], Shrink RF Rx LPF corner!!\n"); + btc_alg_dbg(ALGO_TRACE_SW_EXEC, + "[BTCoex], Shrink RF Rx LPF corner!!\n"); btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e, 0xfffff, 0xffffc); } else { /* Resume RF Rx LPF corner */ /* After initialized, we can use coex_dm->btRf0x1eBackup */ if (btcoexist->initilized) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, - "[BTCoex], Resume RF Rx LPF corner!!\n"); + btc_alg_dbg(ALGO_TRACE_SW_EXEC, + "[BTCoex], Resume RF Rx LPF corner!!\n"); btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e, 0xfffff, coex_dm->bt_rf0x1e_backup); @@ -749,18 +704,17 @@ static void btc8723b2ant_set_sw_rf_rx_lpf_corner(struct btc_coexist *btcoexist, static void btc8723b2ant_rf_shrink(struct btc_coexist *btcoexist, bool force_exec, bool rx_rf_shrink_on) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, - "[BTCoex], %s turn Rx RF Shrink = %s\n", - (force_exec ? "force to" : ""), (rx_rf_shrink_on ? - "ON" : "OFF")); + btc_alg_dbg(ALGO_TRACE_SW, + "[BTCoex], %s turn Rx RF Shrink = %s\n", + (force_exec ? "force to" : ""), (rx_rf_shrink_on ? + "ON" : "OFF")); coex_dm->cur_rf_rx_lpf_shrink = rx_rf_shrink_on; if (!force_exec) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, - "[BTCoex], bPreRfRxLpfShrink=%d, " - "bCurRfRxLpfShrink=%d\n", - coex_dm->pre_rf_rx_lpf_shrink, - coex_dm->cur_rf_rx_lpf_shrink); + btc_alg_dbg(ALGO_TRACE_SW_DETAIL, + "[BTCoex], bPreRfRxLpfShrink=%d, bCurRfRxLpfShrink=%d\n", + coex_dm->pre_rf_rx_lpf_shrink, + coex_dm->cur_rf_rx_lpf_shrink); if (coex_dm->pre_rf_rx_lpf_shrink == coex_dm->cur_rf_rx_lpf_shrink) @@ -788,9 +742,9 @@ static void btc8723b_set_penalty_txrate(struct btc_coexist *btcoexist, h2c_parameter[5] = 0xf9; /*MCS5 or OFDM36*/ } - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, - "[BTCoex], set WiFi Low-Penalty Retry: %s", - (low_penalty_ra ? "ON!!" : "OFF!!")); + btc_alg_dbg(ALGO_TRACE_FW_EXEC, + "[BTCoex], set WiFi Low-Penalty Retry: %s", + (low_penalty_ra ? "ON!!" : "OFF!!")); btcoexist->btc_fill_h2c(btcoexist, 0x69, 6, h2c_parameter); } @@ -799,18 +753,17 @@ static void btc8723b2ant_low_penalty_ra(struct btc_coexist *btcoexist, bool force_exec, bool low_penalty_ra) { /*return; */ - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, - "[BTCoex], %s turn LowPenaltyRA = %s\n", - (force_exec ? "force to" : ""), (low_penalty_ra ? - "ON" : "OFF")); + btc_alg_dbg(ALGO_TRACE_SW, + "[BTCoex], %s turn LowPenaltyRA = %s\n", + (force_exec ? "force to" : ""), (low_penalty_ra ? + "ON" : "OFF")); coex_dm->cur_low_penalty_ra = low_penalty_ra; if (!force_exec) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, - "[BTCoex], bPreLowPenaltyRa=%d, " - "bCurLowPenaltyRa=%d\n", - coex_dm->pre_low_penalty_ra, - coex_dm->cur_low_penalty_ra); + btc_alg_dbg(ALGO_TRACE_SW_DETAIL, + "[BTCoex], bPreLowPenaltyRa=%d, bCurLowPenaltyRa=%d\n", + coex_dm->pre_low_penalty_ra, + coex_dm->cur_low_penalty_ra); if (coex_dm->pre_low_penalty_ra == coex_dm->cur_low_penalty_ra) return; @@ -824,8 +777,8 @@ static void btc8723b2ant_set_dac_swing_reg(struct btc_coexist *btcoexist, u32 level) { u8 val = (u8) level; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, - "[BTCoex], Write SwDacSwing = 0x%x\n", level); + btc_alg_dbg(ALGO_TRACE_SW_EXEC, + "[BTCoex], Write SwDacSwing = 0x%x\n", level); btcoexist->btc_write_1byte_bitmask(btcoexist, 0x883, 0x3e, val); } @@ -843,20 +796,20 @@ static void btc8723b2ant_dac_swing(struct btc_coexist *btcoexist, bool force_exec, bool dac_swing_on, u32 dac_swing_lvl) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, - "[BTCoex], %s turn DacSwing=%s, dac_swing_lvl=0x%x\n", - (force_exec ? "force to" : ""), - (dac_swing_on ? "ON" : "OFF"), dac_swing_lvl); + btc_alg_dbg(ALGO_TRACE_SW, + "[BTCoex], %s turn DacSwing=%s, dac_swing_lvl=0x%x\n", + (force_exec ? "force to" : ""), + (dac_swing_on ? "ON" : "OFF"), dac_swing_lvl); coex_dm->cur_dac_swing_on = dac_swing_on; coex_dm->cur_dac_swing_lvl = dac_swing_lvl; if (!force_exec) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, - "[BTCoex], bPreDacSwingOn=%d, preDacSwingLvl=0x%x," - " bCurDacSwingOn=%d, curDacSwingLvl=0x%x\n", - coex_dm->pre_dac_swing_on, coex_dm->pre_dac_swing_lvl, - coex_dm->cur_dac_swing_on, - coex_dm->cur_dac_swing_lvl); + btc_alg_dbg(ALGO_TRACE_SW_DETAIL, + "[BTCoex], bPreDacSwingOn=%d, preDacSwingLvl=0x%x, bCurDacSwingOn=%d, curDacSwingLvl=0x%x\n", + coex_dm->pre_dac_swing_on, + coex_dm->pre_dac_swing_lvl, + coex_dm->cur_dac_swing_on, + coex_dm->cur_dac_swing_lvl); if ((coex_dm->pre_dac_swing_on == coex_dm->cur_dac_swing_on) && (coex_dm->pre_dac_swing_lvl == coex_dm->cur_dac_swing_lvl)) @@ -877,8 +830,8 @@ static void btc8723b2ant_set_agc_table(struct btc_coexist *btcoexist, /* BB AGC Gain Table */ if (agc_table_en) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, - "[BTCoex], BB Agc Table On!\n"); + btc_alg_dbg(ALGO_TRACE_SW_EXEC, + "[BTCoex], BB Agc Table On!\n"); btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x6e1A0001); btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x6d1B0001); btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x6c1C0001); @@ -887,8 +840,8 @@ static void btc8723b2ant_set_agc_table(struct btc_coexist *btcoexist, btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x691F0001); btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x68200001); } else { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, - "[BTCoex], BB Agc Table Off!\n"); + btc_alg_dbg(ALGO_TRACE_SW_EXEC, + "[BTCoex], BB Agc Table Off!\n"); btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xaa1A0001); btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa91B0001); btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa81C0001); @@ -901,15 +854,15 @@ static void btc8723b2ant_set_agc_table(struct btc_coexist *btcoexist, /* RF Gain */ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0xef, 0xfffff, 0x02000); if (agc_table_en) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, - "[BTCoex], Agc Table On!\n"); + btc_alg_dbg(ALGO_TRACE_SW_EXEC, + "[BTCoex], Agc Table On!\n"); btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b, 0xfffff, 0x38fff); btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b, 0xfffff, 0x38ffe); } else { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, - "[BTCoex], Agc Table Off!\n"); + btc_alg_dbg(ALGO_TRACE_SW_EXEC, + "[BTCoex], Agc Table Off!\n"); btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b, 0xfffff, 0x380c3); btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b, @@ -920,15 +873,15 @@ static void btc8723b2ant_set_agc_table(struct btc_coexist *btcoexist, btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0xed, 0xfffff, 0x1); if (agc_table_en) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, - "[BTCoex], Agc Table On!\n"); + btc_alg_dbg(ALGO_TRACE_SW_EXEC, + "[BTCoex], Agc Table On!\n"); btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x40, 0xfffff, 0x38fff); btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x40, 0xfffff, 0x38ffe); } else { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, - "[BTCoex], Agc Table Off!\n"); + btc_alg_dbg(ALGO_TRACE_SW_EXEC, + "[BTCoex], Agc Table Off!\n"); btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x40, 0xfffff, 0x380c3); btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x40, @@ -946,16 +899,17 @@ static void btc8723b2ant_set_agc_table(struct btc_coexist *btcoexist, static void btc8723b2ant_agc_table(struct btc_coexist *btcoexist, bool force_exec, bool agc_table_en) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, - "[BTCoex], %s %s Agc Table\n", - (force_exec ? "force to" : ""), - (agc_table_en ? "Enable" : "Disable")); + btc_alg_dbg(ALGO_TRACE_SW, + "[BTCoex], %s %s Agc Table\n", + (force_exec ? "force to" : ""), + (agc_table_en ? "Enable" : "Disable")); coex_dm->cur_agc_table_en = agc_table_en; if (!force_exec) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, - "[BTCoex], bPreAgcTableEn=%d, bCurAgcTableEn=%d\n", - coex_dm->pre_agc_table_en, coex_dm->cur_agc_table_en); + btc_alg_dbg(ALGO_TRACE_SW_DETAIL, + "[BTCoex], bPreAgcTableEn=%d, bCurAgcTableEn=%d\n", + coex_dm->pre_agc_table_en, + coex_dm->cur_agc_table_en); if (coex_dm->pre_agc_table_en == coex_dm->cur_agc_table_en) return; @@ -969,20 +923,20 @@ static void btc8723b2ant_set_coex_table(struct btc_coexist *btcoexist, u32 val0x6c0, u32 val0x6c4, u32 val0x6c8, u8 val0x6cc) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, - "[BTCoex], set coex table, set 0x6c0=0x%x\n", val0x6c0); + btc_alg_dbg(ALGO_TRACE_SW_EXEC, + "[BTCoex], set coex table, set 0x6c0=0x%x\n", val0x6c0); btcoexist->btc_write_4byte(btcoexist, 0x6c0, val0x6c0); - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, - "[BTCoex], set coex table, set 0x6c4=0x%x\n", val0x6c4); + btc_alg_dbg(ALGO_TRACE_SW_EXEC, + "[BTCoex], set coex table, set 0x6c4=0x%x\n", val0x6c4); btcoexist->btc_write_4byte(btcoexist, 0x6c4, val0x6c4); - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, - "[BTCoex], set coex table, set 0x6c8=0x%x\n", val0x6c8); + btc_alg_dbg(ALGO_TRACE_SW_EXEC, + "[BTCoex], set coex table, set 0x6c8=0x%x\n", val0x6c8); btcoexist->btc_write_4byte(btcoexist, 0x6c8, val0x6c8); - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, - "[BTCoex], set coex table, set 0x6cc=0x%x\n", val0x6cc); + btc_alg_dbg(ALGO_TRACE_SW_EXEC, + "[BTCoex], set coex table, set 0x6cc=0x%x\n", val0x6cc); btcoexist->btc_write_1byte(btcoexist, 0x6cc, val0x6cc); } @@ -991,29 +945,24 @@ static void btc8723b2ant_coex_table(struct btc_coexist *btcoexist, u32 val0x6c4, u32 val0x6c8, u8 val0x6cc) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, - "[BTCoex], %s write Coex Table 0x6c0=0x%x," - " 0x6c4=0x%x, 0x6c8=0x%x, 0x6cc=0x%x\n", - (force_exec ? "force to" : ""), val0x6c0, - val0x6c4, val0x6c8, val0x6cc); + btc_alg_dbg(ALGO_TRACE_SW, + "[BTCoex], %s write Coex Table 0x6c0=0x%x, 0x6c4=0x%x, 0x6c8=0x%x, 0x6cc=0x%x\n", + force_exec ? "force to" : "", + val0x6c0, val0x6c4, val0x6c8, val0x6cc); coex_dm->cur_val0x6c0 = val0x6c0; coex_dm->cur_val0x6c4 = val0x6c4; coex_dm->cur_val0x6c8 = val0x6c8; coex_dm->cur_val0x6cc = val0x6cc; if (!force_exec) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, - "[BTCoex], preVal0x6c0=0x%x, " - "preVal0x6c4=0x%x, preVal0x6c8=0x%x, " - "preVal0x6cc=0x%x !!\n", - coex_dm->pre_val0x6c0, coex_dm->pre_val0x6c4, - coex_dm->pre_val0x6c8, coex_dm->pre_val0x6cc); - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, - "[BTCoex], curVal0x6c0=0x%x, " - "curVal0x6c4=0x%x, curVal0x6c8=0x%x, " - "curVal0x6cc=0x%x !!\n", - coex_dm->cur_val0x6c0, coex_dm->cur_val0x6c4, - coex_dm->cur_val0x6c8, coex_dm->cur_val0x6cc); + btc_alg_dbg(ALGO_TRACE_SW_DETAIL, + "[BTCoex], preVal0x6c0=0x%x, preVal0x6c4=0x%x, preVal0x6c8=0x%x, preVal0x6cc=0x%x !!\n", + coex_dm->pre_val0x6c0, coex_dm->pre_val0x6c4, + coex_dm->pre_val0x6c8, coex_dm->pre_val0x6cc); + btc_alg_dbg(ALGO_TRACE_SW_DETAIL, + "[BTCoex], curVal0x6c0=0x%x, curVal0x6c4=0x%x, curVal0x6c8=0x%x, curVal0x6cc=0x%x !!\n", + coex_dm->cur_val0x6c0, coex_dm->cur_val0x6c4, + coex_dm->cur_val0x6c8, coex_dm->cur_val0x6cc); if ((coex_dm->pre_val0x6c0 == coex_dm->cur_val0x6c0) && (coex_dm->pre_val0x6c4 == coex_dm->cur_val0x6c4) && @@ -1099,9 +1048,9 @@ static void btc8723b2ant_set_fw_ignore_wlan_act(struct btc_coexist *btcoexist, if (enable) h2c_parameter[0] |= BIT0;/* function enable*/ - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, - "[BTCoex], set FW for BT Ignore Wlan_Act, " - "FW write 0x63=0x%x\n", h2c_parameter[0]); + btc_alg_dbg(ALGO_TRACE_FW_EXEC, + "[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x63=0x%x\n", + h2c_parameter[0]); btcoexist->btc_fill_h2c(btcoexist, 0x63, 1, h2c_parameter); } @@ -1109,17 +1058,16 @@ static void btc8723b2ant_set_fw_ignore_wlan_act(struct btc_coexist *btcoexist, static void btc8723b2ant_ignore_wlan_act(struct btc_coexist *btcoexist, bool force_exec, bool enable) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, - "[BTCoex], %s turn Ignore WlanAct %s\n", - (force_exec ? "force to" : ""), (enable ? "ON" : "OFF")); + btc_alg_dbg(ALGO_TRACE_FW, + "[BTCoex], %s turn Ignore WlanAct %s\n", + (force_exec ? "force to" : ""), (enable ? "ON" : "OFF")); coex_dm->cur_ignore_wlan_act = enable; if (!force_exec) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, - "[BTCoex], bPreIgnoreWlanAct = %d, " - "bCurIgnoreWlanAct = %d!!\n", - coex_dm->pre_ignore_wlan_act, - coex_dm->cur_ignore_wlan_act); + btc_alg_dbg(ALGO_TRACE_FW_DETAIL, + "[BTCoex], bPreIgnoreWlanAct = %d, bCurIgnoreWlanAct = %d!!\n", + coex_dm->pre_ignore_wlan_act, + coex_dm->cur_ignore_wlan_act); if (coex_dm->pre_ignore_wlan_act == coex_dm->cur_ignore_wlan_act) @@ -1147,11 +1095,11 @@ static void btc8723b2ant_set_fw_ps_tdma(struct btc_coexist *btcoexist, u8 byte1, coex_dm->ps_tdma_para[3] = byte4; coex_dm->ps_tdma_para[4] = byte5; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, - "[BTCoex], FW write 0x60(5bytes)=0x%x%08x\n", - h2c_parameter[0], - h2c_parameter[1] << 24 | h2c_parameter[2] << 16 | - h2c_parameter[3] << 8 | h2c_parameter[4]); + btc_alg_dbg(ALGO_TRACE_FW_EXEC, + "[BTCoex], FW write 0x60(5bytes)=0x%x%08x\n", + h2c_parameter[0], + h2c_parameter[1] << 24 | h2c_parameter[2] << 16 | + h2c_parameter[3] << 8 | h2c_parameter[4]); btcoexist->btc_fill_h2c(btcoexist, 0x60, 5, h2c_parameter); } @@ -1260,20 +1208,20 @@ static void btc8723b2ant_set_ant_path(struct btc_coexist *btcoexist, static void btc8723b2ant_ps_tdma(struct btc_coexist *btcoexist, bool force_exec, bool turn_on, u8 type) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, - "[BTCoex], %s turn %s PS TDMA, type=%d\n", - (force_exec ? "force to" : ""), - (turn_on ? "ON" : "OFF"), type); + btc_alg_dbg(ALGO_TRACE_FW, + "[BTCoex], %s turn %s PS TDMA, type=%d\n", + (force_exec ? "force to" : ""), + (turn_on ? "ON" : "OFF"), type); coex_dm->cur_ps_tdma_on = turn_on; coex_dm->cur_ps_tdma = type; if (!force_exec) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, - "[BTCoex], bPrePsTdmaOn = %d, bCurPsTdmaOn = %d!!\n", - coex_dm->pre_ps_tdma_on, coex_dm->cur_ps_tdma_on); - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, - "[BTCoex], prePsTdma = %d, curPsTdma = %d!!\n", - coex_dm->pre_ps_tdma, coex_dm->cur_ps_tdma); + btc_alg_dbg(ALGO_TRACE_FW_DETAIL, + "[BTCoex], bPrePsTdmaOn = %d, bCurPsTdmaOn = %d!!\n", + coex_dm->pre_ps_tdma_on, coex_dm->cur_ps_tdma_on); + btc_alg_dbg(ALGO_TRACE_FW_DETAIL, + "[BTCoex], prePsTdma = %d, curPsTdma = %d!!\n", + coex_dm->pre_ps_tdma, coex_dm->cur_ps_tdma); if ((coex_dm->pre_ps_tdma_on == coex_dm->cur_ps_tdma_on) && (coex_dm->pre_ps_tdma == coex_dm->cur_ps_tdma)) @@ -1471,8 +1419,8 @@ static bool btc8723b2ant_is_common_action(struct btc_coexist *btcoexist) btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER, &low_pwr_disable); - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], Wifi non-connected idle!!\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], Wifi non-connected idle!!\n"); btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0); @@ -1495,9 +1443,8 @@ static bool btc8723b2ant_is_common_action(struct btc_coexist *btcoexist) BTC_SET_ACT_DISABLE_LOW_POWER, &low_pwr_disable); - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], Wifi connected + " - "BT non connected-idle!!\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], Wifi connected + BT non connected-idle!!\n"); btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0); @@ -1523,9 +1470,8 @@ static bool btc8723b2ant_is_common_action(struct btc_coexist *btcoexist) if (bt_hs_on) return false; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], Wifi connected + " - "BT connected-idle!!\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], Wifi connected + BT connected-idle!!\n"); btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0); @@ -1549,17 +1495,15 @@ static bool btc8723b2ant_is_common_action(struct btc_coexist *btcoexist) &low_pwr_disable); if (wifi_busy) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], Wifi Connected-Busy + " - "BT Busy!!\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], Wifi Connected-Busy + BT Busy!!\n"); common = false; } else { if (bt_hs_on) return false; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], Wifi Connected-Idle + " - "BT Busy!!\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], Wifi Connected-Idle + BT Busy!!\n"); btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0); @@ -1597,9 +1541,8 @@ static void set_tdma_int1(struct btc_coexist *btcoexist, bool tx_pause, { /* Set PS TDMA for max interval == 1 */ if (tx_pause) { - BTC_PRINT(BTC_MSG_ALGORITHM, - ALGO_TRACE_FW_DETAIL, - "[BTCoex], TxPause = 1\n"); + btc_alg_dbg(ALGO_TRACE_FW_DETAIL, + "[BTCoex], TxPause = 1\n"); if (coex_dm->cur_ps_tdma == 71) { btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, @@ -1695,9 +1638,8 @@ static void set_tdma_int1(struct btc_coexist *btcoexist, bool tx_pause, } } } else { - BTC_PRINT(BTC_MSG_ALGORITHM, - ALGO_TRACE_FW_DETAIL, - "[BTCoex], TxPause = 0\n"); + btc_alg_dbg(ALGO_TRACE_FW_DETAIL, + "[BTCoex], TxPause = 0\n"); if (coex_dm->cur_ps_tdma == 5) { btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 71); coex_dm->tdma_adj_type = 71; @@ -1795,9 +1737,8 @@ static void set_tdma_int2(struct btc_coexist *btcoexist, bool tx_pause, { /* Set PS TDMA for max interval == 2 */ if (tx_pause) { - BTC_PRINT(BTC_MSG_ALGORITHM, - ALGO_TRACE_FW_DETAIL, - "[BTCoex], TxPause = 1\n"); + btc_alg_dbg(ALGO_TRACE_FW_DETAIL, + "[BTCoex], TxPause = 1\n"); if (coex_dm->cur_ps_tdma == 1) { btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 6); coex_dm->tdma_adj_type = 6; @@ -1878,9 +1819,8 @@ static void set_tdma_int2(struct btc_coexist *btcoexist, bool tx_pause, } } } else { - BTC_PRINT(BTC_MSG_ALGORITHM, - ALGO_TRACE_FW_DETAIL, - "[BTCoex], TxPause = 0\n"); + btc_alg_dbg(ALGO_TRACE_FW_DETAIL, + "[BTCoex], TxPause = 0\n"); if (coex_dm->cur_ps_tdma == 5) { btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 2); coex_dm->tdma_adj_type = 2; @@ -1968,9 +1908,8 @@ static void set_tdma_int3(struct btc_coexist *btcoexist, bool tx_pause, { /* Set PS TDMA for max interval == 3 */ if (tx_pause) { - BTC_PRINT(BTC_MSG_ALGORITHM, - ALGO_TRACE_FW_DETAIL, - "[BTCoex], TxPause = 1\n"); + btc_alg_dbg(ALGO_TRACE_FW_DETAIL, + "[BTCoex], TxPause = 1\n"); if (coex_dm->cur_ps_tdma == 1) { btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 7); coex_dm->tdma_adj_type = 7; @@ -2051,9 +1990,8 @@ static void set_tdma_int3(struct btc_coexist *btcoexist, bool tx_pause, } } } else { - BTC_PRINT(BTC_MSG_ALGORITHM, - ALGO_TRACE_FW_DETAIL, - "[BTCoex], TxPause = 0\n"); + btc_alg_dbg(ALGO_TRACE_FW_DETAIL, + "[BTCoex], TxPause = 0\n"); if (coex_dm->cur_ps_tdma == 5) { btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3); coex_dm->tdma_adj_type = 3; @@ -2145,13 +2083,13 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist, s32 result; u8 retry_count = 0; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, - "[BTCoex], TdmaDurationAdjust()\n"); + btc_alg_dbg(ALGO_TRACE_FW, + "[BTCoex], TdmaDurationAdjust()\n"); if (!coex_dm->auto_tdma_adjust) { coex_dm->auto_tdma_adjust = true; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, - "[BTCoex], first run TdmaDurationAdjust()!!\n"); + btc_alg_dbg(ALGO_TRACE_FW_DETAIL, + "[BTCoex], first run TdmaDurationAdjust()!!\n"); if (sco_hid) { if (tx_pause) { if (max_interval == 1) { @@ -2255,11 +2193,11 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist, } else { /*accquire the BT TRx retry count from BT_Info byte2*/ retry_count = coex_sta->bt_retry_cnt; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, - "[BTCoex], retry_count = %d\n", retry_count); - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, - "[BTCoex], up=%d, dn=%d, m=%d, n=%d, wait_count=%d\n", - up, dn, m, n, wait_count); + btc_alg_dbg(ALGO_TRACE_FW_DETAIL, + "[BTCoex], retry_count = %d\n", retry_count); + btc_alg_dbg(ALGO_TRACE_FW_DETAIL, + "[BTCoex], up=%d, dn=%d, m=%d, n=%d, wait_count=%d\n", + up, dn, m, n, wait_count); result = 0; wait_count++; /* no retry in the last 2-second duration*/ @@ -2276,10 +2214,8 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist, up = 0; dn = 0; result = 1; - BTC_PRINT(BTC_MSG_ALGORITHM, - ALGO_TRACE_FW_DETAIL, - "[BTCoex], Increase wifi " - "duration!!\n"); + btc_alg_dbg(ALGO_TRACE_FW_DETAIL, + "[BTCoex], Increase wifi duration!!\n"); } /* <=3 retry in the last 2-second duration*/ } else if (retry_count <= 3) { up--; @@ -2302,10 +2238,8 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist, dn = 0; wait_count = 0; result = -1; - BTC_PRINT(BTC_MSG_ALGORITHM, - ALGO_TRACE_FW_DETAIL, - "[BTCoex], Decrease wifi duration " - "for retry_counter<3!!\n"); + btc_alg_dbg(ALGO_TRACE_FW_DETAIL, + "[BTCoex], Decrease wifi duration for retry_counter<3!!\n"); } } else { if (wait_count == 1) @@ -2321,13 +2255,12 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist, dn = 0; wait_count = 0; result = -1; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, - "[BTCoex], Decrease wifi duration " - "for retry_counter>3!!\n"); + btc_alg_dbg(ALGO_TRACE_FW_DETAIL, + "[BTCoex], Decrease wifi duration for retry_counter>3!!\n"); } - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, - "[BTCoex], max Interval = %d\n", max_interval); + btc_alg_dbg(ALGO_TRACE_FW_DETAIL, + "[BTCoex], max Interval = %d\n", max_interval); if (max_interval == 1) set_tdma_int1(btcoexist, tx_pause, result); else if (max_interval == 2) @@ -2341,10 +2274,9 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist, */ if (coex_dm->cur_ps_tdma != coex_dm->tdma_adj_type) { bool scan = false, link = false, roam = false; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, - "[BTCoex], PsTdma type dismatch!!!, " - "curPsTdma=%d, recordPsTdma=%d\n", - coex_dm->cur_ps_tdma, coex_dm->tdma_adj_type); + btc_alg_dbg(ALGO_TRACE_FW_DETAIL, + "[BTCoex], PsTdma type dismatch!!!, curPsTdma=%d, recordPsTdma=%d\n", + coex_dm->cur_ps_tdma, coex_dm->tdma_adj_type); btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan); btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link); @@ -2354,9 +2286,8 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist, btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, coex_dm->tdma_adj_type); else - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, - "[BTCoex], roaming/link/scan is under" - " progress, will adjust next time!!!\n"); + btc_alg_dbg(ALGO_TRACE_FW_DETAIL, + "[BTCoex], roaming/link/scan is under progress, will adjust next time!!!\n"); } } @@ -2994,27 +2925,26 @@ static void btc8723b2ant_run_coexist_mechanism(struct btc_coexist *btcoexist) { u8 algorithm = 0; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], RunCoexistMechanism()===>\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], RunCoexistMechanism()===>\n"); if (btcoexist->manual_control) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], RunCoexistMechanism(), " - "return for Manual CTRL <===\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], RunCoexistMechanism(), return for Manual CTRL <===\n"); return; } if (coex_sta->under_ips) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], wifi is under IPS !!!\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], wifi is under IPS !!!\n"); return; } algorithm = btc8723b2ant_action_algorithm(btcoexist); if (coex_sta->c2h_bt_inquiry_page && (BT_8723B_2ANT_COEX_ALGO_PANHS != algorithm)) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], BT is under inquiry/page scan !!\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], BT is under inquiry/page scan !!\n"); btc8723b2ant_action_bt_inquiry(btcoexist); return; } else { @@ -3026,84 +2956,75 @@ static void btc8723b2ant_run_coexist_mechanism(struct btc_coexist *btcoexist) } coex_dm->cur_algorithm = algorithm; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, "[BTCoex], Algorithm = %d\n", - coex_dm->cur_algorithm); + btc_alg_dbg(ALGO_TRACE, "[BTCoex], Algorithm = %d\n", + coex_dm->cur_algorithm); if (btc8723b2ant_is_common_action(btcoexist)) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], Action 2-Ant common.\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], Action 2-Ant common\n"); coex_dm->auto_tdma_adjust = false; } else { if (coex_dm->cur_algorithm != coex_dm->pre_algorithm) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], preAlgorithm=%d, " - "curAlgorithm=%d\n", coex_dm->pre_algorithm, - coex_dm->cur_algorithm); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], preAlgorithm=%d, curAlgorithm=%d\n", + coex_dm->pre_algorithm, + coex_dm->cur_algorithm); coex_dm->auto_tdma_adjust = false; } switch (coex_dm->cur_algorithm) { case BT_8723B_2ANT_COEX_ALGO_SCO: - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], Action 2-Ant, algorithm = SCO.\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], Action 2-Ant, algorithm = SCO\n"); btc8723b2ant_action_sco(btcoexist); break; case BT_8723B_2ANT_COEX_ALGO_HID: - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], Action 2-Ant, algorithm = HID.\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], Action 2-Ant, algorithm = HID\n"); btc8723b2ant_action_hid(btcoexist); break; case BT_8723B_2ANT_COEX_ALGO_A2DP: - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], Action 2-Ant, " - "algorithm = A2DP.\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], Action 2-Ant, algorithm = A2DP\n"); btc8723b2ant_action_a2dp(btcoexist); break; case BT_8723B_2ANT_COEX_ALGO_A2DP_PANHS: - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], Action 2-Ant, " - "algorithm = A2DP+PAN(HS).\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], Action 2-Ant, algorithm = A2DP+PAN(HS)\n"); btc8723b2ant_action_a2dp_pan_hs(btcoexist); break; case BT_8723B_2ANT_COEX_ALGO_PANEDR: - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], Action 2-Ant, " - "algorithm = PAN(EDR).\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], Action 2-Ant, algorithm = PAN(EDR)\n"); btc8723b2ant_action_pan_edr(btcoexist); break; case BT_8723B_2ANT_COEX_ALGO_PANHS: - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], Action 2-Ant, " - "algorithm = HS mode.\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], Action 2-Ant, algorithm = HS mode\n"); btc8723b2ant_action_pan_hs(btcoexist); break; case BT_8723B_2ANT_COEX_ALGO_PANEDR_A2DP: - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], Action 2-Ant, " - "algorithm = PAN+A2DP.\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], Action 2-Ant, algorithm = PAN+A2DP\n"); btc8723b2ant_action_pan_edr_a2dp(btcoexist); break; case BT_8723B_2ANT_COEX_ALGO_PANEDR_HID: - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], Action 2-Ant, " - "algorithm = PAN(EDR)+HID.\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], Action 2-Ant, algorithm = PAN(EDR)+HID\n"); btc8723b2ant_action_pan_edr_hid(btcoexist); break; case BT_8723B_2ANT_COEX_ALGO_HID_A2DP_PANEDR: - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], Action 2-Ant, " - "algorithm = HID+A2DP+PAN.\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], Action 2-Ant, algorithm = HID+A2DP+PAN\n"); btc8723b2ant_action_hid_a2dp_pan_edr(btcoexist); break; case BT_8723B_2ANT_COEX_ALGO_HID_A2DP: - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], Action 2-Ant, " - "algorithm = HID+A2DP.\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], Action 2-Ant, algorithm = HID+A2DP\n"); btc8723b2ant_action_hid_a2dp(btcoexist); break; default: - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], Action 2-Ant, " - "algorithm = coexist All Off!!\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], Action 2-Ant, algorithm = coexist All Off!!\n"); btc8723b2ant_coex_alloff(btcoexist); break; } @@ -3131,8 +3052,8 @@ void ex_btc8723b2ant_init_hwconfig(struct btc_coexist *btcoexist) { u8 u8tmp = 0; - BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, - "[BTCoex], 2Ant Init HW Config!!\n"); + btc_iface_dbg(INTF_INIT, + "[BTCoex], 2Ant Init HW Config!!\n"); coex_dm->bt_rf0x1e_backup = btcoexist->btc_get_rf_reg(btcoexist, BTC_RF_A, 0x1e, 0xfffff); @@ -3157,8 +3078,8 @@ void ex_btc8723b2ant_init_hwconfig(struct btc_coexist *btcoexist) void ex_btc8723b2ant_init_coex_dm(struct btc_coexist *btcoexist) { - BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, - "[BTCoex], Coex Mechanism Init!!\n"); + btc_iface_dbg(INTF_INIT, + "[BTCoex], Coex Mechanism Init!!\n"); btc8723b2ant_init_coex_dm(btcoexist); } @@ -3393,15 +3314,15 @@ void ex_btc8723b2ant_display_coex_info(struct btc_coexist *btcoexist) void ex_btc8723b2ant_ips_notify(struct btc_coexist *btcoexist, u8 type) { if (BTC_IPS_ENTER == type) { - BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, - "[BTCoex], IPS ENTER notify\n"); + btc_iface_dbg(INTF_NOTIFY, + "[BTCoex], IPS ENTER notify\n"); coex_sta->under_ips = true; btc8723b2ant_wifioff_hwcfg(btcoexist); btc8723b2ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true); btc8723b2ant_coex_alloff(btcoexist); } else if (BTC_IPS_LEAVE == type) { - BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, - "[BTCoex], IPS LEAVE notify\n"); + btc_iface_dbg(INTF_NOTIFY, + "[BTCoex], IPS LEAVE notify\n"); coex_sta->under_ips = false; ex_btc8723b2ant_init_hwconfig(btcoexist); btc8723b2ant_init_coex_dm(btcoexist); @@ -3412,12 +3333,12 @@ void ex_btc8723b2ant_ips_notify(struct btc_coexist *btcoexist, u8 type) void ex_btc8723b2ant_lps_notify(struct btc_coexist *btcoexist, u8 type) { if (BTC_LPS_ENABLE == type) { - BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, - "[BTCoex], LPS ENABLE notify\n"); + btc_iface_dbg(INTF_NOTIFY, + "[BTCoex], LPS ENABLE notify\n"); coex_sta->under_lps = true; } else if (BTC_LPS_DISABLE == type) { - BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, - "[BTCoex], LPS DISABLE notify\n"); + btc_iface_dbg(INTF_NOTIFY, + "[BTCoex], LPS DISABLE notify\n"); coex_sta->under_lps = false; } } @@ -3425,21 +3346,21 @@ void ex_btc8723b2ant_lps_notify(struct btc_coexist *btcoexist, u8 type) void ex_btc8723b2ant_scan_notify(struct btc_coexist *btcoexist, u8 type) { if (BTC_SCAN_START == type) - BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, - "[BTCoex], SCAN START notify\n"); + btc_iface_dbg(INTF_NOTIFY, + "[BTCoex], SCAN START notify\n"); else if (BTC_SCAN_FINISH == type) - BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, - "[BTCoex], SCAN FINISH notify\n"); + btc_iface_dbg(INTF_NOTIFY, + "[BTCoex], SCAN FINISH notify\n"); } void ex_btc8723b2ant_connect_notify(struct btc_coexist *btcoexist, u8 type) { if (BTC_ASSOCIATE_START == type) - BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, - "[BTCoex], CONNECT START notify\n"); + btc_iface_dbg(INTF_NOTIFY, + "[BTCoex], CONNECT START notify\n"); else if (BTC_ASSOCIATE_FINISH == type) - BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, - "[BTCoex], CONNECT FINISH notify\n"); + btc_iface_dbg(INTF_NOTIFY, + "[BTCoex], CONNECT FINISH notify\n"); } void ex_btc8723b2ant_media_status_notify(struct btc_coexist *btcoexist, @@ -3450,11 +3371,11 @@ void ex_btc8723b2ant_media_status_notify(struct btc_coexist *btcoexist, u8 wifi_central_chnl; if (BTC_MEDIA_CONNECT == type) - BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, - "[BTCoex], MEDIA connect notify\n"); + btc_iface_dbg(INTF_NOTIFY, + "[BTCoex], MEDIA connect notify\n"); else - BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, - "[BTCoex], MEDIA disconnect notify\n"); + btc_iface_dbg(INTF_NOTIFY, + "[BTCoex], MEDIA disconnect notify\n"); /* only 2.4G we need to inform bt the chnl mask */ btcoexist->btc_get(btcoexist, @@ -3475,10 +3396,10 @@ void ex_btc8723b2ant_media_status_notify(struct btc_coexist *btcoexist, coex_dm->wifi_chnl_info[1] = h2c_parameter[1]; coex_dm->wifi_chnl_info[2] = h2c_parameter[2]; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, - "[BTCoex], FW write 0x66=0x%x\n", - h2c_parameter[0] << 16 | h2c_parameter[1] << 8 | - h2c_parameter[2]); + btc_alg_dbg(ALGO_TRACE_FW_EXEC, + "[BTCoex], FW write 0x66=0x%x\n", + h2c_parameter[0] << 16 | h2c_parameter[1] << 8 | + h2c_parameter[2]); btcoexist->btc_fill_h2c(btcoexist, 0x66, 3, h2c_parameter); } @@ -3487,8 +3408,8 @@ void ex_btc8723b2ant_special_packet_notify(struct btc_coexist *btcoexist, u8 type) { if (type == BTC_PACKET_DHCP) - BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, - "[BTCoex], DHCP Packet notify\n"); + btc_iface_dbg(INTF_NOTIFY, + "[BTCoex], DHCP Packet notify\n"); } void ex_btc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist, @@ -3506,25 +3427,24 @@ void ex_btc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist, rsp_source = BT_INFO_SRC_8723B_2ANT_WIFI_FW; coex_sta->bt_info_c2h_cnt[rsp_source]++; - BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, - "[BTCoex], Bt info[%d], length=%d, hex data=[", - rsp_source, length); + btc_iface_dbg(INTF_NOTIFY, + "[BTCoex], Bt info[%d], length=%d, hex data=[", + rsp_source, length); for (i = 0; i < length; i++) { coex_sta->bt_info_c2h[rsp_source][i] = tmpbuf[i]; if (i == 1) bt_info = tmpbuf[i]; if (i == length-1) - BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, - "0x%02x]\n", tmpbuf[i]); + btc_iface_dbg(INTF_NOTIFY, + "0x%02x]\n", tmpbuf[i]); else - BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, - "0x%02x, ", tmpbuf[i]); + btc_iface_dbg(INTF_NOTIFY, + "0x%02x, ", tmpbuf[i]); } if (btcoexist->manual_control) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], BtInfoNotify(), " - "return for Manual CTRL<===\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], BtInfoNotify(), return for Manual CTRL<===\n"); return; } @@ -3542,9 +3462,8 @@ void ex_btc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist, because bt is reset and loss of the info. */ if ((coex_sta->bt_info_ext & BIT1)) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], BT ext info bit1 check," - " send wifi BW&Chnl to BT!!\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], BT ext info bit1 check, send wifi BW&Chnl to BT!!\n"); btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED, &wifi_connected); if (wifi_connected) @@ -3558,9 +3477,8 @@ void ex_btc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist, } if ((coex_sta->bt_info_ext & BIT3)) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], BT ext info bit3 check, " - "set BT NOT to ignore Wlan active!!\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], BT ext info bit3 check, set BT NOT to ignore Wlan active!!\n"); btc8723b2ant_ignore_wlan_act(btcoexist, FORCE_EXEC, false); } else { @@ -3613,28 +3531,26 @@ void ex_btc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist, if (!(bt_info & BT_INFO_8723B_2ANT_B_CONNECTION)) { coex_dm->bt_status = BT_8723B_2ANT_BT_STATUS_NON_CONNECTED_IDLE; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], BtInfoNotify(), " - "BT Non-Connected idle!!!\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], BtInfoNotify(), BT Non-Connected idle!!!\n"); /* connection exists but no busy */ } else if (bt_info == BT_INFO_8723B_2ANT_B_CONNECTION) { coex_dm->bt_status = BT_8723B_2ANT_BT_STATUS_CONNECTED_IDLE; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], BtInfoNotify(), BT Connected-idle!!!\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], BtInfoNotify(), BT Connected-idle!!!\n"); } else if ((bt_info & BT_INFO_8723B_2ANT_B_SCO_ESCO) || (bt_info & BT_INFO_8723B_2ANT_B_SCO_BUSY)) { coex_dm->bt_status = BT_8723B_2ANT_BT_STATUS_SCO_BUSY; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], BtInfoNotify(), BT SCO busy!!!\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], BtInfoNotify(), BT SCO busy!!!\n"); } else if (bt_info&BT_INFO_8723B_2ANT_B_ACL_BUSY) { coex_dm->bt_status = BT_8723B_2ANT_BT_STATUS_ACL_BUSY; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], BtInfoNotify(), BT ACL busy!!!\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], BtInfoNotify(), BT ACL busy!!!\n"); } else { coex_dm->bt_status = BT_8723B_2ANT_BT_STATUS_MAX; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], BtInfoNotify(), " - "BT Non-Defined state!!!\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], BtInfoNotify(), BT Non-Defined state!!!\n"); } if ((BT_8723B_2ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) || @@ -3657,7 +3573,7 @@ void ex_btc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist, void ex_btc8723b2ant_halt_notify(struct btc_coexist *btcoexist) { - BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, "[BTCoex], Halt notify\n"); + btc_iface_dbg(INTF_NOTIFY, "[BTCoex], Halt notify\n"); btc8723b2ant_wifioff_hwcfg(btcoexist); btc8723b2ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true); @@ -3671,33 +3587,31 @@ void ex_btc8723b2ant_periodical(struct btc_coexist *btcoexist) static u8 dis_ver_info_cnt; u32 fw_ver = 0, bt_patch_ver = 0; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], ==========================" - "Periodical===========================\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], ==========================Periodical===========================\n"); if (dis_ver_info_cnt <= 5) { dis_ver_info_cnt += 1; - BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, - "[BTCoex], ****************************" - "************************************\n"); - BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, - "[BTCoex], Ant PG Num/ Ant Mech/ " - "Ant Pos = %d/ %d/ %d\n", board_info->pg_ant_num, - board_info->btdm_ant_num, board_info->btdm_ant_pos); - BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, - "[BTCoex], BT stack/ hci ext ver = %s / %d\n", - ((stack_info->profile_notified) ? "Yes" : "No"), - stack_info->hci_version); + btc_iface_dbg(INTF_INIT, + "[BTCoex], ****************************************************************\n"); + btc_iface_dbg(INTF_INIT, + "[BTCoex], Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n", + board_info->pg_ant_num, + board_info->btdm_ant_num, + board_info->btdm_ant_pos); + btc_iface_dbg(INTF_INIT, + "[BTCoex], BT stack/ hci ext ver = %s / %d\n", + stack_info->profile_notified ? "Yes" : "No", + stack_info->hci_version); btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER, &bt_patch_ver); btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver); - BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, - "[BTCoex], CoexVer/ fw_ver/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n", - glcoex_ver_date_8723b_2ant, glcoex_ver_8723b_2ant, - fw_ver, bt_patch_ver, bt_patch_ver); - BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, - "[BTCoex], *****************************" - "***********************************\n"); + btc_iface_dbg(INTF_INIT, + "[BTCoex], CoexVer/ fw_ver/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n", + glcoex_ver_date_8723b_2ant, glcoex_ver_8723b_2ant, + fw_ver, bt_patch_ver, bt_patch_ver); + btc_iface_dbg(INTF_INIT, + "[BTCoex], ****************************************************************\n"); } #if (BT_AUTO_REPORT_ONLY_8723B_2ANT == 0) diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c index 9cecf174a37d..3ce47c70bfa4 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c @@ -76,28 +76,28 @@ static u8 halbtc8821a1ant_bt_rssi_state(u8 level_num, u8 rssi_thresh, if (bt_rssi >= (rssi_thresh + BTC_RSSI_COEX_THRESH_TOL_8821A_1ANT)) { bt_rssi_state = BTC_RSSI_STATE_HIGH; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, - "[BTCoex], BT Rssi state switch to High\n"); + btc_alg_dbg(ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state switch to High\n"); } else { bt_rssi_state = BTC_RSSI_STATE_STAY_LOW; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, - "[BTCoex], BT Rssi state stay at Low\n"); + btc_alg_dbg(ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state stay at Low\n"); } } else { if (bt_rssi < rssi_thresh) { bt_rssi_state = BTC_RSSI_STATE_LOW; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, - "[BTCoex], BT Rssi state switch to Low\n"); + btc_alg_dbg(ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state switch to Low\n"); } else { bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, - "[BTCoex], BT Rssi state stay at High\n"); + btc_alg_dbg(ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state stay at High\n"); } } } else if (level_num == 3) { if (rssi_thresh > rssi_thresh1) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, - "[BTCoex], BT Rssi thresh error!!\n"); + btc_alg_dbg(ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi thresh error!!\n"); return coex_sta->pre_bt_rssi_state; } @@ -106,12 +106,12 @@ static u8 halbtc8821a1ant_bt_rssi_state(u8 level_num, u8 rssi_thresh, if (bt_rssi >= (rssi_thresh + BTC_RSSI_COEX_THRESH_TOL_8821A_1ANT)) { bt_rssi_state = BTC_RSSI_STATE_MEDIUM; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, - "[BTCoex], BT Rssi state switch to Medium\n"); + btc_alg_dbg(ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state switch to Medium\n"); } else { bt_rssi_state = BTC_RSSI_STATE_STAY_LOW; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, - "[BTCoex], BT Rssi state stay at Low\n"); + btc_alg_dbg(ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state stay at Low\n"); } } else if ((coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_MEDIUM) || @@ -120,26 +120,26 @@ static u8 halbtc8821a1ant_bt_rssi_state(u8 level_num, u8 rssi_thresh, if (bt_rssi >= (rssi_thresh1 + BTC_RSSI_COEX_THRESH_TOL_8821A_1ANT)) { bt_rssi_state = BTC_RSSI_STATE_HIGH; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, - "[BTCoex], BT Rssi state switch to High\n"); + btc_alg_dbg(ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state switch to High\n"); } else if (bt_rssi < rssi_thresh) { bt_rssi_state = BTC_RSSI_STATE_LOW; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, - "[BTCoex], BT Rssi state switch to Low\n"); + btc_alg_dbg(ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state switch to Low\n"); } else { bt_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, - "[BTCoex], BT Rssi state stay at Medium\n"); + btc_alg_dbg(ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state stay at Medium\n"); } } else { if (bt_rssi < rssi_thresh1) { bt_rssi_state = BTC_RSSI_STATE_MEDIUM; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, - "[BTCoex], BT Rssi state switch to Medium\n"); + btc_alg_dbg(ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state switch to Medium\n"); } else { bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, - "[BTCoex], BT Rssi state stay at High\n"); + btc_alg_dbg(ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state stay at High\n"); } } } @@ -165,32 +165,28 @@ static u8 halbtc8821a1ant_WifiRssiState(struct btc_coexist *btcoexist, if (wifi_rssi >= (rssi_thresh+BTC_RSSI_COEX_THRESH_TOL_8821A_1ANT)) { wifi_rssi_state = BTC_RSSI_STATE_HIGH; - BTC_PRINT(BTC_MSG_ALGORITHM, - ALGO_WIFI_RSSI_STATE, - "[BTCoex], wifi RSSI state switch to High\n"); + btc_alg_dbg(ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state switch to High\n"); } else { wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW; - BTC_PRINT(BTC_MSG_ALGORITHM, - ALGO_WIFI_RSSI_STATE, - "[BTCoex], wifi RSSI state stay at Low\n"); + btc_alg_dbg(ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state stay at Low\n"); } } else { if (wifi_rssi < rssi_thresh) { wifi_rssi_state = BTC_RSSI_STATE_LOW; - BTC_PRINT(BTC_MSG_ALGORITHM, - ALGO_WIFI_RSSI_STATE, - "[BTCoex], wifi RSSI state switch to Low\n"); + btc_alg_dbg(ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state switch to Low\n"); } else { wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH; - BTC_PRINT(BTC_MSG_ALGORITHM, - ALGO_WIFI_RSSI_STATE, - "[BTCoex], wifi RSSI state stay at High\n"); + btc_alg_dbg(ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state stay at High\n"); } } } else if (level_num == 3) { if (rssi_thresh > rssi_thresh1) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, - "[BTCoex], wifi RSSI thresh error!!\n"); + btc_alg_dbg(ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI thresh error!!\n"); return coex_sta->pre_wifi_rssi_state[index]; } @@ -201,14 +197,12 @@ static u8 halbtc8821a1ant_WifiRssiState(struct btc_coexist *btcoexist, if (wifi_rssi >= (rssi_thresh+BTC_RSSI_COEX_THRESH_TOL_8821A_1ANT)) { wifi_rssi_state = BTC_RSSI_STATE_MEDIUM; - BTC_PRINT(BTC_MSG_ALGORITHM, - ALGO_WIFI_RSSI_STATE, - "[BTCoex], wifi RSSI state switch to Medium\n"); + btc_alg_dbg(ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state switch to Medium\n"); } else { wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW; - BTC_PRINT(BTC_MSG_ALGORITHM, - ALGO_WIFI_RSSI_STATE, - "[BTCoex], wifi RSSI state stay at Low\n"); + btc_alg_dbg(ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state stay at Low\n"); } } else if ((coex_sta->pre_wifi_rssi_state[index] == BTC_RSSI_STATE_MEDIUM) || @@ -218,31 +212,26 @@ static u8 halbtc8821a1ant_WifiRssiState(struct btc_coexist *btcoexist, (rssi_thresh1 + BTC_RSSI_COEX_THRESH_TOL_8821A_1ANT)) { wifi_rssi_state = BTC_RSSI_STATE_HIGH; - BTC_PRINT(BTC_MSG_ALGORITHM, - ALGO_WIFI_RSSI_STATE, - "[BTCoex], wifi RSSI state switch to High\n"); + btc_alg_dbg(ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state switch to High\n"); } else if (wifi_rssi < rssi_thresh) { wifi_rssi_state = BTC_RSSI_STATE_LOW; - BTC_PRINT(BTC_MSG_ALGORITHM, - ALGO_WIFI_RSSI_STATE, - "[BTCoex], wifi RSSI state switch to Low\n"); + btc_alg_dbg(ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state switch to Low\n"); } else { wifi_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM; - BTC_PRINT(BTC_MSG_ALGORITHM, - ALGO_WIFI_RSSI_STATE, - "[BTCoex], wifi RSSI state stay at Medium\n"); + btc_alg_dbg(ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state stay at Medium\n"); } } else { if (wifi_rssi < rssi_thresh1) { wifi_rssi_state = BTC_RSSI_STATE_MEDIUM; - BTC_PRINT(BTC_MSG_ALGORITHM, - ALGO_WIFI_RSSI_STATE, - "[BTCoex], wifi RSSI state switch to Medium\n"); + btc_alg_dbg(ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state switch to Medium\n"); } else { wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH; - BTC_PRINT(BTC_MSG_ALGORITHM, - ALGO_WIFI_RSSI_STATE, - "[BTCoex], wifi RSSI state stay at High\n"); + btc_alg_dbg(ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state stay at High\n"); } } } @@ -431,9 +420,9 @@ static void halbtc8821a1ant_query_bt_info(struct btc_coexist *btcoexist) h2c_parameter[0] |= BIT0; /* trigger*/ - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, - "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n", - h2c_parameter[0]); + btc_alg_dbg(ALGO_TRACE_FW_EXEC, + "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n", + h2c_parameter[0]); btcoexist->btc_fill_h2c(btcoexist, 0x61, 1, h2c_parameter); } @@ -504,8 +493,8 @@ static u8 halbtc8821a1ant_action_algorithm(struct btc_coexist *btcoexist) btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on); if (!bt_link_info->bt_link_exist) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], No BT link exists!!!\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], No BT link exists!!!\n"); return algorithm; } @@ -520,26 +509,26 @@ static u8 halbtc8821a1ant_action_algorithm(struct btc_coexist *btcoexist) if (num_of_diff_profile == 1) { if (bt_link_info->sco_exist) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], BT Profile = SCO only\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], BT Profile = SCO only\n"); algorithm = BT_8821A_1ANT_COEX_ALGO_SCO; } else { if (bt_link_info->hid_exist) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], BT Profile = HID only\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], BT Profile = HID only\n"); algorithm = BT_8821A_1ANT_COEX_ALGO_HID; } else if (bt_link_info->a2dp_exist) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], BT Profile = A2DP only\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], BT Profile = A2DP only\n"); algorithm = BT_8821A_1ANT_COEX_ALGO_A2DP; } else if (bt_link_info->pan_exist) { if (bt_hs_on) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], BT Profile = PAN(HS) only\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], BT Profile = PAN(HS) only\n"); algorithm = BT_8821A_1ANT_COEX_ALGO_PANHS; } else { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], BT Profile = PAN(EDR) only\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], BT Profile = PAN(EDR) only\n"); algorithm = BT_8821A_1ANT_COEX_ALGO_PANEDR; } } @@ -547,50 +536,50 @@ static u8 halbtc8821a1ant_action_algorithm(struct btc_coexist *btcoexist) } else if (num_of_diff_profile == 2) { if (bt_link_info->sco_exist) { if (bt_link_info->hid_exist) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], BT Profile = SCO + HID\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], BT Profile = SCO + HID\n"); algorithm = BT_8821A_1ANT_COEX_ALGO_HID; } else if (bt_link_info->a2dp_exist) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], BT Profile = SCO + A2DP ==> SCO\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], BT Profile = SCO + A2DP ==> SCO\n"); algorithm = BT_8821A_1ANT_COEX_ALGO_SCO; } else if (bt_link_info->pan_exist) { if (bt_hs_on) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], BT Profile = SCO + PAN(HS)\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], BT Profile = SCO + PAN(HS)\n"); algorithm = BT_8821A_1ANT_COEX_ALGO_SCO; } else { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], BT Profile = SCO + PAN(EDR)\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], BT Profile = SCO + PAN(EDR)\n"); algorithm = BT_8821A_1ANT_COEX_ALGO_PANEDR_HID; } } } else { if (bt_link_info->hid_exist && bt_link_info->a2dp_exist) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], BT Profile = HID + A2DP\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], BT Profile = HID + A2DP\n"); algorithm = BT_8821A_1ANT_COEX_ALGO_HID_A2DP; } else if (bt_link_info->hid_exist && bt_link_info->pan_exist) { if (bt_hs_on) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], BT Profile = HID + PAN(HS)\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], BT Profile = HID + PAN(HS)\n"); algorithm = BT_8821A_1ANT_COEX_ALGO_HID_A2DP; } else { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], BT Profile = HID + PAN(EDR)\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], BT Profile = HID + PAN(EDR)\n"); algorithm = BT_8821A_1ANT_COEX_ALGO_PANEDR_HID; } } else if (bt_link_info->pan_exist && bt_link_info->a2dp_exist) { if (bt_hs_on) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], BT Profile = A2DP + PAN(HS)\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], BT Profile = A2DP + PAN(HS)\n"); algorithm = BT_8821A_1ANT_COEX_ALGO_A2DP_PANHS; } else { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], BT Profile = A2DP + PAN(EDR)\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], BT Profile = A2DP + PAN(EDR)\n"); algorithm = BT_8821A_1ANT_COEX_ALGO_PANEDR_A2DP; } } @@ -599,29 +588,29 @@ static u8 halbtc8821a1ant_action_algorithm(struct btc_coexist *btcoexist) if (bt_link_info->sco_exist) { if (bt_link_info->hid_exist && bt_link_info->a2dp_exist) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], BT Profile = SCO + HID + A2DP ==> HID\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], BT Profile = SCO + HID + A2DP ==> HID\n"); algorithm = BT_8821A_1ANT_COEX_ALGO_HID; } else if (bt_link_info->hid_exist && bt_link_info->pan_exist) { if (bt_hs_on) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], BT Profile = SCO + HID + PAN(HS)\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], BT Profile = SCO + HID + PAN(HS)\n"); algorithm = BT_8821A_1ANT_COEX_ALGO_HID_A2DP; } else { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], BT Profile = SCO + HID + PAN(EDR)\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], BT Profile = SCO + HID + PAN(EDR)\n"); algorithm = BT_8821A_1ANT_COEX_ALGO_PANEDR_HID; } } else if (bt_link_info->pan_exist && bt_link_info->a2dp_exist) { if (bt_hs_on) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], BT Profile = SCO + A2DP + PAN(HS)\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], BT Profile = SCO + A2DP + PAN(HS)\n"); algorithm = BT_8821A_1ANT_COEX_ALGO_SCO; } else { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], BT Profile = SCO + A2DP + PAN(EDR) ==> HID\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], BT Profile = SCO + A2DP + PAN(EDR) ==> HID\n"); algorithm = BT_8821A_1ANT_COEX_ALGO_PANEDR_HID; } } @@ -630,12 +619,12 @@ static u8 halbtc8821a1ant_action_algorithm(struct btc_coexist *btcoexist) bt_link_info->pan_exist && bt_link_info->a2dp_exist) { if (bt_hs_on) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], BT Profile = HID + A2DP + PAN(HS)\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], BT Profile = HID + A2DP + PAN(HS)\n"); algorithm = BT_8821A_1ANT_COEX_ALGO_HID_A2DP; } else { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], BT Profile = HID + A2DP + PAN(EDR)\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], BT Profile = HID + A2DP + PAN(EDR)\n"); algorithm = BT_8821A_1ANT_COEX_ALGO_HID_A2DP_PANEDR; } } @@ -646,12 +635,12 @@ static u8 halbtc8821a1ant_action_algorithm(struct btc_coexist *btcoexist) bt_link_info->pan_exist && bt_link_info->a2dp_exist) { if (bt_hs_on) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], Error!!! BT Profile = SCO + HID + A2DP + PAN(HS)\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], Error!!! BT Profile = SCO + HID + A2DP + PAN(HS)\n"); } else { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], BT Profile = SCO + HID + A2DP + PAN(EDR)==>PAN(EDR)+HID\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], BT Profile = SCO + HID + A2DP + PAN(EDR)==>PAN(EDR)+HID\n"); algorithm = BT_8821A_1ANT_COEX_ALGO_PANEDR_HID; } } @@ -670,10 +659,10 @@ static void halbtc8821a1ant_set_bt_auto_report(struct btc_coexist *btcoexist, if (enable_auto_report) h2c_parameter[0] |= BIT0; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, - "[BTCoex], BT FW auto report : %s, FW write 0x68 = 0x%x\n", - (enable_auto_report ? "Enabled!!" : "Disabled!!"), - h2c_parameter[0]); + btc_alg_dbg(ALGO_TRACE_FW_EXEC, + "[BTCoex], BT FW auto report : %s, FW write 0x68 = 0x%x\n", + (enable_auto_report ? "Enabled!!" : "Disabled!!"), + h2c_parameter[0]); btcoexist->btc_fill_h2c(btcoexist, 0x68, 1, h2c_parameter); } @@ -682,17 +671,16 @@ static void halbtc8821a1ant_bt_auto_report(struct btc_coexist *btcoexist, bool force_exec, bool enable_auto_report) { - BTC_PRINT(BTC_MSG_ALGORITHM, - ALGO_TRACE_FW, "[BTCoex], %s BT Auto report = %s\n", - (force_exec ? "force to" : ""), ((enable_auto_report) ? - "Enabled" : "Disabled")); + btc_alg_dbg(ALGO_TRACE_FW, "[BTCoex], %s BT Auto report = %s\n", + (force_exec ? "force to" : ""), ((enable_auto_report) ? + "Enabled" : "Disabled")); coex_dm->cur_bt_auto_report = enable_auto_report; if (!force_exec) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, - "[BTCoex], pre_bt_auto_report = %d, cur_bt_auto_report = %d\n", - coex_dm->pre_bt_auto_report, - coex_dm->cur_bt_auto_report); + btc_alg_dbg(ALGO_TRACE_FW_DETAIL, + "[BTCoex], pre_bt_auto_report = %d, cur_bt_auto_report = %d\n", + coex_dm->pre_bt_auto_report, + coex_dm->cur_bt_auto_report); if (coex_dm->pre_bt_auto_report == coex_dm->cur_bt_auto_report) return; @@ -718,9 +706,9 @@ static void btc8821a1ant_set_sw_pen_tx_rate(struct btc_coexist *btcoexist, h2c_parameter[5] = 0xf9; /*MCS5 or OFDM36*/ } - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, - "[BTCoex], set WiFi Low-Penalty Retry: %s", - (low_penalty_ra ? "ON!!" : "OFF!!")); + btc_alg_dbg(ALGO_TRACE_FW_EXEC, + "[BTCoex], set WiFi Low-Penalty Retry: %s", + (low_penalty_ra ? "ON!!" : "OFF!!")); btcoexist->btc_fill_h2c(btcoexist, 0x69, 6, h2c_parameter); } @@ -743,20 +731,20 @@ static void halbtc8821a1ant_set_coex_table(struct btc_coexist *btcoexist, u32 val0x6c0, u32 val0x6c4, u32 val0x6c8, u8 val0x6cc) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, - "[BTCoex], set coex table, set 0x6c0 = 0x%x\n", val0x6c0); + btc_alg_dbg(ALGO_TRACE_SW_EXEC, + "[BTCoex], set coex table, set 0x6c0 = 0x%x\n", val0x6c0); btcoexist->btc_write_4byte(btcoexist, 0x6c0, val0x6c0); - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, - "[BTCoex], set coex table, set 0x6c4 = 0x%x\n", val0x6c4); + btc_alg_dbg(ALGO_TRACE_SW_EXEC, + "[BTCoex], set coex table, set 0x6c4 = 0x%x\n", val0x6c4); btcoexist->btc_write_4byte(btcoexist, 0x6c4, val0x6c4); - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, - "[BTCoex], set coex table, set 0x6c8 = 0x%x\n", val0x6c8); + btc_alg_dbg(ALGO_TRACE_SW_EXEC, + "[BTCoex], set coex table, set 0x6c8 = 0x%x\n", val0x6c8); btcoexist->btc_write_4byte(btcoexist, 0x6c8, val0x6c8); - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, - "[BTCoex], set coex table, set 0x6cc = 0x%x\n", val0x6cc); + btc_alg_dbg(ALGO_TRACE_SW_EXEC, + "[BTCoex], set coex table, set 0x6cc = 0x%x\n", val0x6cc); btcoexist->btc_write_1byte(btcoexist, 0x6cc, val0x6cc); } @@ -764,10 +752,10 @@ static void halbtc8821a1ant_coex_table(struct btc_coexist *btcoexist, bool force_exec, u32 val0x6c0, u32 val0x6c4, u32 val0x6c8, u8 val0x6cc) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, - "[BTCoex], %s write Coex Table 0x6c0 = 0x%x, 0x6c4 = 0x%x, 0x6c8 = 0x%x, 0x6cc = 0x%x\n", - (force_exec ? "force to" : ""), val0x6c0, val0x6c4, - val0x6c8, val0x6cc); + btc_alg_dbg(ALGO_TRACE_SW, + "[BTCoex], %s write Coex Table 0x6c0 = 0x%x, 0x6c4 = 0x%x, 0x6c8 = 0x%x, 0x6cc = 0x%x\n", + (force_exec ? "force to" : ""), val0x6c0, val0x6c4, + val0x6c8, val0x6cc); coex_dm->cur_val_0x6c0 = val0x6c0; coex_dm->cur_val_0x6c4 = val0x6c4; coex_dm->cur_val_0x6c8 = val0x6c8; @@ -839,9 +827,9 @@ static void btc8821a1ant_set_fw_ignore_wlan_act(struct btc_coexist *btcoexist, if (enable) h2c_parameter[0] |= BIT0; /* function enable*/ - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, - "[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x63 = 0x%x\n", - h2c_parameter[0]); + btc_alg_dbg(ALGO_TRACE_FW_EXEC, + "[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x63 = 0x%x\n", + h2c_parameter[0]); btcoexist->btc_fill_h2c(btcoexist, 0x63, 1, h2c_parameter); } @@ -849,16 +837,16 @@ static void btc8821a1ant_set_fw_ignore_wlan_act(struct btc_coexist *btcoexist, static void halbtc8821a1ant_ignore_wlan_act(struct btc_coexist *btcoexist, bool force_exec, bool enable) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, - "[BTCoex], %s turn Ignore WlanAct %s\n", - (force_exec ? "force to" : ""), (enable ? "ON" : "OFF")); + btc_alg_dbg(ALGO_TRACE_FW, + "[BTCoex], %s turn Ignore WlanAct %s\n", + (force_exec ? "force to" : ""), (enable ? "ON" : "OFF")); coex_dm->cur_ignore_wlan_act = enable; if (!force_exec) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, - "[BTCoex], pre_ignore_wlan_act = %d, cur_ignore_wlan_act = %d!!\n", - coex_dm->pre_ignore_wlan_act, - coex_dm->cur_ignore_wlan_act); + btc_alg_dbg(ALGO_TRACE_FW_DETAIL, + "[BTCoex], pre_ignore_wlan_act = %d, cur_ignore_wlan_act = %d!!\n", + coex_dm->pre_ignore_wlan_act, + coex_dm->cur_ignore_wlan_act); if (coex_dm->pre_ignore_wlan_act == coex_dm->cur_ignore_wlan_act) @@ -887,13 +875,13 @@ static void halbtc8821a1ant_set_fw_pstdma(struct btc_coexist *btcoexist, coex_dm->ps_tdma_para[3] = byte4; coex_dm->ps_tdma_para[4] = byte5; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, - "[BTCoex], PS-TDMA H2C cmd =0x%x%08x\n", - h2c_parameter[0], - h2c_parameter[1]<<24 | - h2c_parameter[2]<<16 | - h2c_parameter[3]<<8 | - h2c_parameter[4]); + btc_alg_dbg(ALGO_TRACE_FW_EXEC, + "[BTCoex], PS-TDMA H2C cmd =0x%x%08x\n", + h2c_parameter[0], + h2c_parameter[1] << 24 | + h2c_parameter[2] << 16 | + h2c_parameter[3] << 8 | + h2c_parameter[4]); btcoexist->btc_fill_h2c(btcoexist, 0x60, 5, h2c_parameter); } @@ -910,22 +898,22 @@ static void halbtc8821a1ant_set_lps_rpwm(struct btc_coexist *btcoexist, static void halbtc8821a1ant_lps_rpwm(struct btc_coexist *btcoexist, bool force_exec, u8 lps_val, u8 rpwm_val) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, - "[BTCoex], %s set lps/rpwm = 0x%x/0x%x\n", - (force_exec ? "force to" : ""), lps_val, rpwm_val); + btc_alg_dbg(ALGO_TRACE_FW, + "[BTCoex], %s set lps/rpwm = 0x%x/0x%x\n", + (force_exec ? "force to" : ""), lps_val, rpwm_val); coex_dm->cur_lps = lps_val; coex_dm->cur_rpwm = rpwm_val; if (!force_exec) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, - "[BTCoex], LPS-RxBeaconMode = 0x%x, LPS-RPWM = 0x%x!!\n", - coex_dm->cur_lps, coex_dm->cur_rpwm); + btc_alg_dbg(ALGO_TRACE_FW_DETAIL, + "[BTCoex], LPS-RxBeaconMode = 0x%x, LPS-RPWM = 0x%x!!\n", + coex_dm->cur_lps, coex_dm->cur_rpwm); if ((coex_dm->pre_lps == coex_dm->cur_lps) && (coex_dm->pre_rpwm == coex_dm->cur_rpwm)) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, - "[BTCoex], LPS-RPWM_Last = 0x%x, LPS-RPWM_Now = 0x%x!!\n", - coex_dm->pre_rpwm, coex_dm->cur_rpwm); + btc_alg_dbg(ALGO_TRACE_FW_DETAIL, + "[BTCoex], LPS-RPWM_Last = 0x%x, LPS-RPWM_Now = 0x%x!!\n", + coex_dm->pre_rpwm, coex_dm->cur_rpwm); return; } @@ -939,8 +927,8 @@ static void halbtc8821a1ant_lps_rpwm(struct btc_coexist *btcoexist, static void halbtc8821a1ant_sw_mechanism(struct btc_coexist *btcoexist, bool low_penalty_ra) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, - "[BTCoex], SM[LpRA] = %d\n", low_penalty_ra); + btc_alg_dbg(ALGO_BT_MONITOR, + "[BTCoex], SM[LpRA] = %d\n", low_penalty_ra); halbtc8821a1ant_low_penalty_ra(btcoexist, NORMAL_EXEC, low_penalty_ra); } @@ -1036,13 +1024,13 @@ static void halbtc8821a1ant_ps_tdma(struct btc_coexist *btcoexist, if (!force_exec) { if (coex_dm->cur_ps_tdma_on) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, - "[BTCoex], ********** TDMA(on, %d) **********\n", - coex_dm->cur_ps_tdma); + btc_alg_dbg(ALGO_TRACE_FW_DETAIL, + "[BTCoex], ********** TDMA(on, %d) **********\n", + coex_dm->cur_ps_tdma); } else { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, - "[BTCoex], ********** TDMA(off, %d) **********\n", - coex_dm->cur_ps_tdma); + btc_alg_dbg(ALGO_TRACE_FW_DETAIL, + "[BTCoex], ********** TDMA(off, %d) **********\n", + coex_dm->cur_ps_tdma); } if ((coex_dm->pre_ps_tdma_on == coex_dm->cur_ps_tdma_on) && (coex_dm->pre_ps_tdma == coex_dm->cur_ps_tdma)) @@ -1253,50 +1241,50 @@ static bool halbtc8821a1ant_is_common_action(struct btc_coexist *btcoexist) if (!wifi_connected && BT_8821A_1ANT_BT_STATUS_NON_CONNECTED_IDLE == coex_dm->bt_status) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], Wifi non connected-idle + BT non connected-idle!!\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], Wifi non connected-idle + BT non connected-idle!!\n"); halbtc8821a1ant_sw_mechanism(btcoexist, false); common = true; } else if (wifi_connected && (BT_8821A_1ANT_BT_STATUS_NON_CONNECTED_IDLE == coex_dm->bt_status)) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], Wifi connected + BT non connected-idle!!\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], Wifi connected + BT non connected-idle!!\n"); halbtc8821a1ant_sw_mechanism(btcoexist, false); common = true; } else if (!wifi_connected && (BT_8821A_1ANT_BT_STATUS_CONNECTED_IDLE == coex_dm->bt_status)) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], Wifi non connected-idle + BT connected-idle!!\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], Wifi non connected-idle + BT connected-idle!!\n"); halbtc8821a1ant_sw_mechanism(btcoexist, false); common = true; } else if (wifi_connected && (BT_8821A_1ANT_BT_STATUS_CONNECTED_IDLE == coex_dm->bt_status)) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], Wifi connected + BT connected-idle!!\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], Wifi connected + BT connected-idle!!\n"); halbtc8821a1ant_sw_mechanism(btcoexist, false); common = true; } else if (!wifi_connected && (BT_8821A_1ANT_BT_STATUS_CONNECTED_IDLE != coex_dm->bt_status)) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], Wifi non connected-idle + BT Busy!!\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], Wifi non connected-idle + BT Busy!!\n"); halbtc8821a1ant_sw_mechanism(btcoexist, false); common = true; } else { if (wifi_busy) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], Wifi Connected-Busy + BT Busy!!\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], Wifi Connected-Busy + BT Busy!!\n"); } else { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], Wifi Connected-Idle + BT Busy!!\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], Wifi Connected-Idle + BT Busy!!\n"); } common = false; @@ -1313,8 +1301,8 @@ static void btc8821a1ant_tdma_dur_adj(struct btc_coexist *btcoexist, long result; u8 retry_count = 0, bt_info_ext; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, - "[BTCoex], TdmaDurationAdjustForAcl()\n"); + btc_alg_dbg(ALGO_TRACE_FW, + "[BTCoex], TdmaDurationAdjustForAcl()\n"); if ((BT_8821A_1ANT_WIFI_STATUS_NON_CONNECTED_ASSO_AUTH_SCAN == wifi_status) || @@ -1342,8 +1330,8 @@ static void btc8821a1ant_tdma_dur_adj(struct btc_coexist *btcoexist, if (!coex_dm->auto_tdma_adjust) { coex_dm->auto_tdma_adjust = true; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, - "[BTCoex], first run TdmaDurationAdjust()!!\n"); + btc_alg_dbg(ALGO_TRACE_FW_DETAIL, + "[BTCoex], first run TdmaDurationAdjust()!!\n"); halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 2); coex_dm->tdma_adj_type = 2; @@ -1378,9 +1366,8 @@ static void btc8821a1ant_tdma_dur_adj(struct btc_coexist *btcoexist, up = 0; dn = 0; result = 1; - BTC_PRINT(BTC_MSG_ALGORITHM, - ALGO_TRACE_FW_DETAIL, - "[BTCoex], Increase wifi duration!!\n"); + btc_alg_dbg(ALGO_TRACE_FW_DETAIL, + "[BTCoex], Increase wifi duration!!\n"); } } else if (retry_count <= 3) { /* <=3 retry in the last 2-second duration*/ @@ -1410,9 +1397,8 @@ static void btc8821a1ant_tdma_dur_adj(struct btc_coexist *btcoexist, dn = 0; wait_count = 0; result = -1; - BTC_PRINT(BTC_MSG_ALGORITHM, - ALGO_TRACE_FW_DETAIL, - "[BTCoex], Decrease wifi duration for retryCounter<3!!\n"); + btc_alg_dbg(ALGO_TRACE_FW_DETAIL, + "[BTCoex], Decrease wifi duration for retryCounter<3!!\n"); } } else { /* retry count > 3, if retry count > 3 happens once, @@ -1433,8 +1419,8 @@ static void btc8821a1ant_tdma_dur_adj(struct btc_coexist *btcoexist, dn = 0; wait_count = 0; result = -1; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, - "[BTCoex], Decrease wifi duration for retryCounter>3!!\n"); + btc_alg_dbg(ALGO_TRACE_FW_DETAIL, + "[BTCoex], Decrease wifi duration for retryCounter>3!!\n"); } if (result == -1) { @@ -1479,9 +1465,9 @@ static void btc8821a1ant_tdma_dur_adj(struct btc_coexist *btcoexist, } } else { /*no change*/ - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, - "[BTCoex], ********** TDMA(on, %d) **********\n", - coex_dm->cur_ps_tdma); + btc_alg_dbg(ALGO_TRACE_FW_DETAIL, + "[BTCoex], ********** TDMA(on, %d) **********\n", + coex_dm->cur_ps_tdma); } if (coex_dm->cur_ps_tdma != 1 && @@ -1603,27 +1589,27 @@ static void btc8821a1ant_mon_bt_en_dis(struct btc_coexist *btcoexist) bt_disabled = false; btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE, &bt_disabled); - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, - "[BTCoex], BT is enabled !!\n"); + btc_alg_dbg(ALGO_BT_MONITOR, + "[BTCoex], BT is enabled !!\n"); } else { bt_disable_cnt++; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, - "[BTCoex], bt all counters = 0, %d times!!\n", - bt_disable_cnt); + btc_alg_dbg(ALGO_BT_MONITOR, + "[BTCoex], bt all counters = 0, %d times!!\n", + bt_disable_cnt); if (bt_disable_cnt >= 2) { bt_disabled = true; btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE, &bt_disabled); - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, - "[BTCoex], BT is disabled !!\n"); + btc_alg_dbg(ALGO_BT_MONITOR, + "[BTCoex], BT is disabled !!\n"); halbtc8821a1ant_action_wifi_only(btcoexist); } } if (pre_bt_disabled != bt_disabled) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, - "[BTCoex], BT is from %s to %s!!\n", - (pre_bt_disabled ? "disabled" : "enabled"), - (bt_disabled ? "disabled" : "enabled")); + btc_alg_dbg(ALGO_BT_MONITOR, + "[BTCoex], BT is from %s to %s!!\n", + (pre_bt_disabled ? "disabled" : "enabled"), + (bt_disabled ? "disabled" : "enabled")); pre_bt_disabled = bt_disabled; if (bt_disabled) { btcoexist->btc_set(btcoexist, BTC_SET_ACT_LEAVE_LPS, @@ -1897,15 +1883,15 @@ static void halbtc8821a1ant_action_wifi_connected(struct btc_coexist *btcoexist) bool scan = false, link = false, roam = false; bool under_4way = false; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], CoexForWifiConnect()===>\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], CoexForWifiConnect()===>\n"); btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_4_WAY_PROGRESS, &under_4way); if (under_4way) { btc8821a1ant_act_wifi_conn_sp_pkt(btcoexist); - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], CoexForWifiConnect(), return for wifi is under 4way<===\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], CoexForWifiConnect(), return for wifi is under 4way<===\n"); return; } @@ -1914,8 +1900,8 @@ static void halbtc8821a1ant_action_wifi_connected(struct btc_coexist *btcoexist) btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam); if (scan || link || roam) { halbtc8821a1ant_action_wifi_connected_scan(btcoexist); - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], CoexForWifiConnect(), return for wifi is under scan<===\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], CoexForWifiConnect(), return for wifi is under scan<===\n"); return; } @@ -1976,58 +1962,58 @@ static void btc8821a1ant_run_sw_coex_mech(struct btc_coexist *btcoexist) if (!halbtc8821a1ant_is_common_action(btcoexist)) { switch (coex_dm->cur_algorithm) { case BT_8821A_1ANT_COEX_ALGO_SCO: - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], Action algorithm = SCO.\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], Action algorithm = SCO\n"); halbtc8821a1ant_action_sco(btcoexist); break; case BT_8821A_1ANT_COEX_ALGO_HID: - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], Action algorithm = HID.\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], Action algorithm = HID\n"); halbtc8821a1ant_action_hid(btcoexist); break; case BT_8821A_1ANT_COEX_ALGO_A2DP: - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], Action algorithm = A2DP.\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], Action algorithm = A2DP\n"); halbtc8821a1ant_action_a2dp(btcoexist); break; case BT_8821A_1ANT_COEX_ALGO_A2DP_PANHS: - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], Action algorithm = A2DP+PAN(HS).\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], Action algorithm = A2DP+PAN(HS)\n"); halbtc8821a1ant_action_a2dp_pan_hs(btcoexist); break; case BT_8821A_1ANT_COEX_ALGO_PANEDR: - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], Action algorithm = PAN(EDR).\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], Action algorithm = PAN(EDR)\n"); halbtc8821a1ant_action_pan_edr(btcoexist); break; case BT_8821A_1ANT_COEX_ALGO_PANHS: - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], Action algorithm = HS mode.\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], Action algorithm = HS mode\n"); halbtc8821a1ant_action_pan_hs(btcoexist); break; case BT_8821A_1ANT_COEX_ALGO_PANEDR_A2DP: - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], Action algorithm = PAN+A2DP.\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], Action algorithm = PAN+A2DP\n"); halbtc8821a1ant_action_pan_edr_a2dp(btcoexist); break; case BT_8821A_1ANT_COEX_ALGO_PANEDR_HID: - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], Action algorithm = PAN(EDR)+HID.\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], Action algorithm = PAN(EDR)+HID\n"); halbtc8821a1ant_action_pan_edr_hid(btcoexist); break; case BT_8821A_1ANT_COEX_ALGO_HID_A2DP_PANEDR: - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], Action algorithm = HID+A2DP+PAN.\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], Action algorithm = HID+A2DP+PAN\n"); btc8821a1ant_action_hid_a2dp_pan_edr(btcoexist); break; case BT_8821A_1ANT_COEX_ALGO_HID_A2DP: - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], Action algorithm = HID+A2DP.\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], Action algorithm = HID+A2DP\n"); halbtc8821a1ant_action_hid_a2dp(btcoexist); break; default: - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], Action algorithm = coexist All Off!!\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], Action algorithm = coexist All Off!!\n"); /*halbtc8821a1ant_coex_all_off(btcoexist);*/ break; } @@ -2045,31 +2031,31 @@ static void halbtc8821a1ant_run_coexist_mechanism(struct btc_coexist *btcoexist) u8 wifi_rssi_state = BTC_RSSI_STATE_HIGH; bool wifi_under_5g = false; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], RunCoexistMechanism()===>\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], RunCoexistMechanism()===>\n"); if (btcoexist->manual_control) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], RunCoexistMechanism(), return for Manual CTRL <===\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], RunCoexistMechanism(), return for Manual CTRL <===\n"); return; } if (btcoexist->stop_coex_dm) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], RunCoexistMechanism(), return for Stop Coex DM <===\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], RunCoexistMechanism(), return for Stop Coex DM <===\n"); return; } if (coex_sta->under_ips) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], wifi is under IPS !!!\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], wifi is under IPS !!!\n"); return; } btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g); if (wifi_under_5g) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], RunCoexistMechanism(), return for 5G <===\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], RunCoexistMechanism(), return for 5G <===\n"); halbtc8821a1ant_coex_under_5g(btcoexist); return; } @@ -2135,8 +2121,8 @@ static void halbtc8821a1ant_run_coexist_mechanism(struct btc_coexist *btcoexist) if (!wifi_connected) { bool scan = false, link = false, roam = false; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], wifi is non connected-idle !!!\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], wifi is non connected-idle !!!\n"); btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan); btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link); @@ -2168,8 +2154,8 @@ static void halbtc8821a1ant_init_hw_config(struct btc_coexist *btcoexist, u8 u1_tmp = 0; bool wifi_under_5g = false; - BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, - "[BTCoex], 1Ant Init HW Config!!\n"); + btc_iface_dbg(INTF_INIT, + "[BTCoex], 1Ant Init HW Config!!\n"); if (back_up) { coex_dm->backup_arfr_cnt1 = btcoexist->btc_read_4byte(btcoexist, @@ -2220,8 +2206,8 @@ void ex_halbtc8821a1ant_init_hwconfig(struct btc_coexist *btcoexist) void ex_halbtc8821a1ant_init_coex_dm(struct btc_coexist *btcoexist) { - BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, - "[BTCoex], Coex Mechanism Init!!\n"); + btc_iface_dbg(INTF_INIT, + "[BTCoex], Coex Mechanism Init!!\n"); btcoexist->stop_coex_dm = false; @@ -2515,8 +2501,8 @@ void ex_halbtc8821a1ant_ips_notify(struct btc_coexist *btcoexist, u8 type) return; if (BTC_IPS_ENTER == type) { - BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, - "[BTCoex], IPS ENTER notify\n"); + btc_iface_dbg(INTF_NOTIFY, + "[BTCoex], IPS ENTER notify\n"); coex_sta->under_ips = true; halbtc8821a1ant_set_ant_path(btcoexist, BTC_ANT_PATH_BT, false, true); @@ -2525,8 +2511,8 @@ void ex_halbtc8821a1ant_ips_notify(struct btc_coexist *btcoexist, u8 type) halbtc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0); } else if (BTC_IPS_LEAVE == type) { - BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, - "[BTCoex], IPS LEAVE notify\n"); + btc_iface_dbg(INTF_NOTIFY, + "[BTCoex], IPS LEAVE notify\n"); coex_sta->under_ips = false; halbtc8821a1ant_run_coexist_mechanism(btcoexist); @@ -2539,12 +2525,12 @@ void ex_halbtc8821a1ant_lps_notify(struct btc_coexist *btcoexist, u8 type) return; if (BTC_LPS_ENABLE == type) { - BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, - "[BTCoex], LPS ENABLE notify\n"); + btc_iface_dbg(INTF_NOTIFY, + "[BTCoex], LPS ENABLE notify\n"); coex_sta->under_Lps = true; } else if (BTC_LPS_DISABLE == type) { - BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, - "[BTCoex], LPS DISABLE notify\n"); + btc_iface_dbg(INTF_NOTIFY, + "[BTCoex], LPS DISABLE notify\n"); coex_sta->under_Lps = false; } } @@ -2574,8 +2560,8 @@ void ex_halbtc8821a1ant_scan_notify(struct btc_coexist *btcoexist, u8 type) } if (BTC_SCAN_START == type) { - BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, - "[BTCoex], SCAN START notify\n"); + btc_iface_dbg(INTF_NOTIFY, + "[BTCoex], SCAN START notify\n"); if (!wifi_connected) { /* non-connected scan*/ btc8821a1ant_act_wifi_not_conn_scan(btcoexist); @@ -2584,8 +2570,8 @@ void ex_halbtc8821a1ant_scan_notify(struct btc_coexist *btcoexist, u8 type) halbtc8821a1ant_action_wifi_connected_scan(btcoexist); } } else if (BTC_SCAN_FINISH == type) { - BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, - "[BTCoex], SCAN FINISH notify\n"); + btc_iface_dbg(INTF_NOTIFY, + "[BTCoex], SCAN FINISH notify\n"); if (!wifi_connected) { /* non-connected scan*/ halbtc8821a1ant_action_wifi_not_connected(btcoexist); @@ -2614,12 +2600,12 @@ void ex_halbtc8821a1ant_connect_notify(struct btc_coexist *btcoexist, u8 type) } if (BTC_ASSOCIATE_START == type) { - BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, - "[BTCoex], CONNECT START notify\n"); + btc_iface_dbg(INTF_NOTIFY, + "[BTCoex], CONNECT START notify\n"); btc8821a1ant_act_wifi_not_conn_scan(btcoexist); } else if (BTC_ASSOCIATE_FINISH == type) { - BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, - "[BTCoex], CONNECT FINISH notify\n"); + btc_iface_dbg(INTF_NOTIFY, + "[BTCoex], CONNECT FINISH notify\n"); btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED, &wifi_connected); @@ -2645,11 +2631,11 @@ void ex_halbtc8821a1ant_media_status_notify(struct btc_coexist *btcoexist, return; if (BTC_MEDIA_CONNECT == type) { - BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, - "[BTCoex], MEDIA connect notify\n"); + btc_iface_dbg(INTF_NOTIFY, + "[BTCoex], MEDIA connect notify\n"); } else { - BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, - "[BTCoex], MEDIA disconnect notify\n"); + btc_iface_dbg(INTF_NOTIFY, + "[BTCoex], MEDIA disconnect notify\n"); } /* only 2.4G we need to inform bt the chnl mask*/ @@ -2672,9 +2658,11 @@ void ex_halbtc8821a1ant_media_status_notify(struct btc_coexist *btcoexist, coex_dm->wifi_chnl_info[1] = h2c_parameter[1]; coex_dm->wifi_chnl_info[2] = h2c_parameter[2]; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, - "[BTCoex], FW write 0x66 = 0x%x\n", - h2c_parameter[0]<<16|h2c_parameter[1]<<8|h2c_parameter[2]); + btc_alg_dbg(ALGO_TRACE_FW_EXEC, + "[BTCoex], FW write 0x66 = 0x%x\n", + h2c_parameter[0] << 16 | + h2c_parameter[1] << 8 | + h2c_parameter[2]); btcoexist->btc_fill_h2c(btcoexist, 0x66, 3, h2c_parameter); } @@ -2702,8 +2690,8 @@ void ex_halbtc8821a1ant_special_packet_notify(struct btc_coexist *btcoexist, if (BTC_PACKET_DHCP == type || BTC_PACKET_EAPOL == type) { - BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, - "[BTCoex], special Packet(%d) notify\n", type); + btc_iface_dbg(INTF_NOTIFY, + "[BTCoex], special Packet(%d) notify\n", type); btc8821a1ant_act_wifi_conn_sp_pkt(btcoexist); } } @@ -2727,19 +2715,19 @@ void ex_halbtc8821a1ant_bt_info_notify(struct btc_coexist *btcoexist, rsp_source = BT_INFO_SRC_8821A_1ANT_WIFI_FW; coex_sta->bt_info_c2h_cnt[rsp_source]++; - BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, - "[BTCoex], Bt info[%d], length = %d, hex data = [", - rsp_source, length); + btc_iface_dbg(INTF_NOTIFY, + "[BTCoex], Bt info[%d], length = %d, hex data = [", + rsp_source, length); for (i = 0; i < length; i++) { coex_sta->bt_info_c2h[rsp_source][i] = tmp_buf[i]; if (i == 1) bt_info = tmp_buf[i]; if (i == length-1) { - BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, - "0x%02x]\n", tmp_buf[i]); + btc_iface_dbg(INTF_NOTIFY, + "0x%02x]\n", tmp_buf[i]); } else { - BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, - "0x%02x, ", tmp_buf[i]); + btc_iface_dbg(INTF_NOTIFY, + "0x%02x, ", tmp_buf[i]); } } @@ -2756,8 +2744,8 @@ void ex_halbtc8821a1ant_bt_info_notify(struct btc_coexist *btcoexist, /* Here we need to resend some wifi info to BT*/ /* because bt is reset and loss of the info.*/ if (coex_sta->bt_info_ext & BIT1) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], BT ext info bit1 check, send wifi BW&Chnl to BT!!\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], BT ext info bit1 check, send wifi BW&Chnl to BT!!\n"); btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED, &wifi_connected); @@ -2773,8 +2761,8 @@ void ex_halbtc8821a1ant_bt_info_notify(struct btc_coexist *btcoexist, if ((coex_sta->bt_info_ext & BIT3) && !wifi_under_5g) { if (!btcoexist->manual_control && !btcoexist->stop_coex_dm) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], BT ext info bit3 check, set BT NOT to ignore Wlan active!!\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], BT ext info bit3 check, set BT NOT to ignore Wlan active!!\n"); halbtc8821a1ant_ignore_wlan_act(btcoexist, FORCE_EXEC, false); @@ -2782,8 +2770,8 @@ void ex_halbtc8821a1ant_bt_info_notify(struct btc_coexist *btcoexist, } #if (BT_AUTO_REPORT_ONLY_8821A_1ANT == 0) if (!(coex_sta->bt_info_ext & BIT4)) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], BT ext info bit4 check, set BT to enable Auto Report!!\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], BT ext info bit4 check, set BT to enable Auto Report!!\n"); halbtc8821a1ant_bt_auto_report(btcoexist, FORCE_EXEC, true); } @@ -2828,28 +2816,28 @@ void ex_halbtc8821a1ant_bt_info_notify(struct btc_coexist *btcoexist, if (!(bt_info&BT_INFO_8821A_1ANT_B_CONNECTION)) { coex_dm->bt_status = BT_8821A_1ANT_BT_STATUS_NON_CONNECTED_IDLE; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], BtInfoNotify(), BT Non-Connected idle!!!\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], BtInfoNotify(), BT Non-Connected idle!!!\n"); } else if (bt_info == BT_INFO_8821A_1ANT_B_CONNECTION) { /* connection exists but no busy*/ coex_dm->bt_status = BT_8821A_1ANT_BT_STATUS_CONNECTED_IDLE; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], BtInfoNotify(), BT Connected-idle!!!\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], BtInfoNotify(), BT Connected-idle!!!\n"); } else if ((bt_info&BT_INFO_8821A_1ANT_B_SCO_ESCO) || (bt_info&BT_INFO_8821A_1ANT_B_SCO_BUSY)) { coex_dm->bt_status = BT_8821A_1ANT_BT_STATUS_SCO_BUSY; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], BtInfoNotify(), BT SCO busy!!!\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], BtInfoNotify(), BT SCO busy!!!\n"); } else if (bt_info&BT_INFO_8821A_1ANT_B_ACL_BUSY) { if (BT_8821A_1ANT_BT_STATUS_ACL_BUSY != coex_dm->bt_status) coex_dm->auto_tdma_adjust = false; coex_dm->bt_status = BT_8821A_1ANT_BT_STATUS_ACL_BUSY; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], BtInfoNotify(), BT ACL busy!!!\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], BtInfoNotify(), BT ACL busy!!!\n"); } else { coex_dm->bt_status = BT_8821A_1ANT_BT_STATUS_MAX; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], BtInfoNotify(), BT Non-Defined state!!!\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], BtInfoNotify(), BT Non-Defined state!!!\n"); } if ((BT_8821A_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) || @@ -2866,8 +2854,8 @@ void ex_halbtc8821a1ant_bt_info_notify(struct btc_coexist *btcoexist, void ex_halbtc8821a1ant_halt_notify(struct btc_coexist *btcoexist) { - BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, - "[BTCoex], Halt notify\n"); + btc_iface_dbg(INTF_NOTIFY, + "[BTCoex], Halt notify\n"); btcoexist->stop_coex_dm = true; @@ -2885,20 +2873,20 @@ void ex_halbtc8821a1ant_halt_notify(struct btc_coexist *btcoexist) void ex_halbtc8821a1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state) { - BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, - "[BTCoex], Pnp notify\n"); + btc_iface_dbg(INTF_NOTIFY, + "[BTCoex], Pnp notify\n"); if (BTC_WIFI_PNP_SLEEP == pnp_state) { - BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, - "[BTCoex], Pnp notify to SLEEP\n"); + btc_iface_dbg(INTF_NOTIFY, + "[BTCoex], Pnp notify to SLEEP\n"); btcoexist->stop_coex_dm = true; halbtc8821a1ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true); halbtc8821a1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0); halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 9); } else if (BTC_WIFI_PNP_WAKE_UP == pnp_state) { - BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, - "[BTCoex], Pnp notify to WAKE UP\n"); + btc_iface_dbg(INTF_NOTIFY, + "[BTCoex], Pnp notify to WAKE UP\n"); btcoexist->stop_coex_dm = false; halbtc8821a1ant_init_hw_config(btcoexist, false); halbtc8821a1ant_init_coex_dm(btcoexist); @@ -2914,33 +2902,33 @@ ex_halbtc8821a1ant_periodical( struct btc_board_info *board_info = &btcoexist->board_info; struct btc_stack_info *stack_info = &btcoexist->stack_info; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], ==========================Periodical===========================\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], ==========================Periodical===========================\n"); if (dis_ver_info_cnt <= 5) { dis_ver_info_cnt += 1; - BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, - "[BTCoex], ****************************************************************\n"); - BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, - "[BTCoex], Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n", - board_info->pg_ant_num, - board_info->btdm_ant_num, - board_info->btdm_ant_pos); - BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, - "[BTCoex], BT stack/ hci ext ver = %s / %d\n", - ((stack_info->profile_notified) ? "Yes" : "No"), - stack_info->hci_version); + btc_iface_dbg(INTF_INIT, + "[BTCoex], ****************************************************************\n"); + btc_iface_dbg(INTF_INIT, + "[BTCoex], Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n", + board_info->pg_ant_num, + board_info->btdm_ant_num, + board_info->btdm_ant_pos); + btc_iface_dbg(INTF_INIT, + "[BTCoex], BT stack/ hci ext ver = %s / %d\n", + stack_info->profile_notified ? "Yes" : "No", + stack_info->hci_version); btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER, &bt_patch_ver); btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver); - BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, - "[BTCoex], CoexVer/ FwVer/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n", - glcoex_ver_date_8821a_1ant, - glcoex_ver_8821a_1ant, - fw_ver, bt_patch_ver, - bt_patch_ver); - BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, - "[BTCoex], ****************************************************************\n"); + btc_iface_dbg(INTF_INIT, + "[BTCoex], CoexVer/ FwVer/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n", + glcoex_ver_date_8821a_1ant, + glcoex_ver_8821a_1ant, + fw_ver, bt_patch_ver, + bt_patch_ver); + btc_iface_dbg(INTF_INIT, + "[BTCoex], ****************************************************************\n"); } #if (BT_AUTO_REPORT_ONLY_8821A_1ANT == 0) diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c index 044d914291c0..81f843bba771 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c @@ -80,28 +80,28 @@ static u8 halbtc8821a2ant_bt_rssi_state(u8 level_num, u8 rssi_thresh, BTC_RSSI_COEX_THRESH_TOL_8821A_2ANT; if (bt_rssi >= tmp) { bt_rssi_state = BTC_RSSI_STATE_HIGH; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, - "[BTCoex], BT Rssi state switch to High\n"); + btc_alg_dbg(ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state switch to High\n"); } else { bt_rssi_state = BTC_RSSI_STATE_STAY_LOW; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, - "[BTCoex], BT Rssi state stay at Low\n"); + btc_alg_dbg(ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state stay at Low\n"); } } else { if (bt_rssi < rssi_thresh) { bt_rssi_state = BTC_RSSI_STATE_LOW; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, - "[BTCoex], BT Rssi state switch to Low\n"); + btc_alg_dbg(ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state switch to Low\n"); } else { bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, - "[BTCoex], BT Rssi state stay at High\n"); + btc_alg_dbg(ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state stay at High\n"); } } } else if (level_num == 3) { if (rssi_thresh > rssi_thresh1) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, - "[BTCoex], BT Rssi thresh error!!\n"); + btc_alg_dbg(ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi thresh error!!\n"); return coex_sta->pre_bt_rssi_state; } @@ -110,12 +110,12 @@ static u8 halbtc8821a2ant_bt_rssi_state(u8 level_num, u8 rssi_thresh, if (bt_rssi >= (rssi_thresh+BTC_RSSI_COEX_THRESH_TOL_8821A_2ANT)) { bt_rssi_state = BTC_RSSI_STATE_MEDIUM; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, - "[BTCoex], BT Rssi state switch to Medium\n"); + btc_alg_dbg(ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state switch to Medium\n"); } else { bt_rssi_state = BTC_RSSI_STATE_STAY_LOW; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, - "[BTCoex], BT Rssi state stay at Low\n"); + btc_alg_dbg(ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state stay at Low\n"); } } else if ((coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_MEDIUM) || @@ -125,26 +125,26 @@ static u8 halbtc8821a2ant_bt_rssi_state(u8 level_num, u8 rssi_thresh, (rssi_thresh1 + BTC_RSSI_COEX_THRESH_TOL_8821A_2ANT)) { bt_rssi_state = BTC_RSSI_STATE_HIGH; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, - "[BTCoex], BT Rssi state switch to High\n"); + btc_alg_dbg(ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state switch to High\n"); } else if (bt_rssi < rssi_thresh) { bt_rssi_state = BTC_RSSI_STATE_LOW; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, - "[BTCoex], BT Rssi state switch to Low\n"); + btc_alg_dbg(ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state switch to Low\n"); } else { bt_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, - "[BTCoex], BT Rssi state stay at Medium\n"); + btc_alg_dbg(ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state stay at Medium\n"); } } else { if (bt_rssi < rssi_thresh1) { bt_rssi_state = BTC_RSSI_STATE_MEDIUM; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, - "[BTCoex], BT Rssi state switch to Medium\n"); + btc_alg_dbg(ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state switch to Medium\n"); } else { bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, - "[BTCoex], BT Rssi state stay at High\n"); + btc_alg_dbg(ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state stay at High\n"); } } } @@ -171,32 +171,28 @@ static u8 halbtc8821a2ant_wifi_rssi_state(struct btc_coexist *btcoexist, if (wifi_rssi >= (rssi_thresh+BTC_RSSI_COEX_THRESH_TOL_8821A_2ANT)) { wifi_rssi_state = BTC_RSSI_STATE_HIGH; - BTC_PRINT(BTC_MSG_ALGORITHM, - ALGO_WIFI_RSSI_STATE, - "[BTCoex], wifi RSSI state switch to High\n"); + btc_alg_dbg(ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state switch to High\n"); } else { wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW; - BTC_PRINT(BTC_MSG_ALGORITHM, - ALGO_WIFI_RSSI_STATE, - "[BTCoex], wifi RSSI state stay at Low\n"); + btc_alg_dbg(ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state stay at Low\n"); } } else { if (wifi_rssi < rssi_thresh) { wifi_rssi_state = BTC_RSSI_STATE_LOW; - BTC_PRINT(BTC_MSG_ALGORITHM, - ALGO_WIFI_RSSI_STATE, - "[BTCoex], wifi RSSI state switch to Low\n"); + btc_alg_dbg(ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state switch to Low\n"); } else { wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH; - BTC_PRINT(BTC_MSG_ALGORITHM, - ALGO_WIFI_RSSI_STATE, - "[BTCoex], wifi RSSI state stay at High\n"); + btc_alg_dbg(ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state stay at High\n"); } } } else if (level_num == 3) { if (rssi_thresh > rssi_thresh1) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, - "[BTCoex], wifi RSSI thresh error!!\n"); + btc_alg_dbg(ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI thresh error!!\n"); return coex_sta->pre_wifi_rssi_state[index]; } @@ -207,14 +203,12 @@ static u8 halbtc8821a2ant_wifi_rssi_state(struct btc_coexist *btcoexist, if (wifi_rssi >= (rssi_thresh+BTC_RSSI_COEX_THRESH_TOL_8821A_2ANT)) { wifi_rssi_state = BTC_RSSI_STATE_MEDIUM; - BTC_PRINT(BTC_MSG_ALGORITHM, - ALGO_WIFI_RSSI_STATE, - "[BTCoex], wifi RSSI state switch to Medium\n"); + btc_alg_dbg(ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state switch to Medium\n"); } else { wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW; - BTC_PRINT(BTC_MSG_ALGORITHM, - ALGO_WIFI_RSSI_STATE, - "[BTCoex], wifi RSSI state stay at Low\n"); + btc_alg_dbg(ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state stay at Low\n"); } } else if ((coex_sta->pre_wifi_rssi_state[index] == BTC_RSSI_STATE_MEDIUM) || @@ -223,31 +217,26 @@ static u8 halbtc8821a2ant_wifi_rssi_state(struct btc_coexist *btcoexist, if (wifi_rssi >= (rssi_thresh1 + BTC_RSSI_COEX_THRESH_TOL_8821A_2ANT)) { wifi_rssi_state = BTC_RSSI_STATE_HIGH; - BTC_PRINT(BTC_MSG_ALGORITHM, - ALGO_WIFI_RSSI_STATE, - "[BTCoex], wifi RSSI state switch to High\n"); + btc_alg_dbg(ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state switch to High\n"); } else if (wifi_rssi < rssi_thresh) { wifi_rssi_state = BTC_RSSI_STATE_LOW; - BTC_PRINT(BTC_MSG_ALGORITHM, - ALGO_WIFI_RSSI_STATE, - "[BTCoex], wifi RSSI state switch to Low\n"); + btc_alg_dbg(ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state switch to Low\n"); } else { wifi_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM; - BTC_PRINT(BTC_MSG_ALGORITHM, - ALGO_WIFI_RSSI_STATE, - "[BTCoex], wifi RSSI state stay at Medium\n"); + btc_alg_dbg(ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state stay at Medium\n"); } } else { if (wifi_rssi < rssi_thresh1) { wifi_rssi_state = BTC_RSSI_STATE_MEDIUM; - BTC_PRINT(BTC_MSG_ALGORITHM, - ALGO_WIFI_RSSI_STATE, - "[BTCoex], wifi RSSI state switch to Medium\n"); + btc_alg_dbg(ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state switch to Medium\n"); } else { wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH; - BTC_PRINT(BTC_MSG_ALGORITHM, - ALGO_WIFI_RSSI_STATE, - "[BTCoex], wifi RSSI state stay at High\n"); + btc_alg_dbg(ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state stay at High\n"); } } } @@ -279,26 +268,26 @@ static void btc8821a2ant_mon_bt_en_dis(struct btc_coexist *btcoexist) bt_disabled = false; btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE, &bt_disabled); - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, - "[BTCoex], BT is enabled !!\n"); + btc_alg_dbg(ALGO_BT_MONITOR, + "[BTCoex], BT is enabled !!\n"); } else { bt_disable_cnt++; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, - "[BTCoex], bt all counters = 0, %d times!!\n", - bt_disable_cnt); + btc_alg_dbg(ALGO_BT_MONITOR, + "[BTCoex], bt all counters = 0, %d times!!\n", + bt_disable_cnt); if (bt_disable_cnt >= 2) { bt_disabled = true; btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE, &bt_disabled); - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, - "[BTCoex], BT is disabled !!\n"); + btc_alg_dbg(ALGO_BT_MONITOR, + "[BTCoex], BT is disabled !!\n"); } } if (pre_bt_disabled != bt_disabled) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, - "[BTCoex], BT is from %s to %s!!\n", - (pre_bt_disabled ? "disabled" : "enabled"), - (bt_disabled ? "disabled" : "enabled")); + btc_alg_dbg(ALGO_BT_MONITOR, + "[BTCoex], BT is from %s to %s!!\n", + (pre_bt_disabled ? "disabled" : "enabled"), + (bt_disabled ? "disabled" : "enabled")); pre_bt_disabled = bt_disabled; } } @@ -324,12 +313,12 @@ static void halbtc8821a2ant_monitor_bt_ctr(struct btc_coexist *btcoexist) coex_sta->low_priority_tx = reg_lp_tx; coex_sta->low_priority_rx = reg_lp_rx; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, - "[BTCoex], High Priority Tx/Rx (reg 0x%x) = 0x%x(%d)/0x%x(%d)\n", - reg_hp_txrx, reg_hp_tx, reg_hp_tx, reg_hp_rx, reg_hp_rx); - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, - "[BTCoex], Low Priority Tx/Rx (reg 0x%x) = 0x%x(%d)/0x%x(%d)\n", - reg_lp_txrx, reg_lp_tx, reg_lp_tx, reg_lp_rx, reg_lp_rx); + btc_alg_dbg(ALGO_BT_MONITOR, + "[BTCoex], High Priority Tx/Rx (reg 0x%x) = 0x%x(%d)/0x%x(%d)\n", + reg_hp_txrx, reg_hp_tx, reg_hp_tx, reg_hp_rx, reg_hp_rx); + btc_alg_dbg(ALGO_BT_MONITOR, + "[BTCoex], Low Priority Tx/Rx (reg 0x%x) = 0x%x(%d)/0x%x(%d)\n", + reg_lp_txrx, reg_lp_tx, reg_lp_tx, reg_lp_rx, reg_lp_rx); /* reset counter */ btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc); @@ -343,9 +332,9 @@ static void halbtc8821a2ant_query_bt_info(struct btc_coexist *btcoexist) h2c_parameter[0] |= BIT0; /* trigger */ - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, - "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n", - h2c_parameter[0]); + btc_alg_dbg(ALGO_TRACE_FW_EXEC, + "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n", + h2c_parameter[0]); btcoexist->btc_fill_h2c(btcoexist, 0x61, 1, h2c_parameter); } @@ -368,8 +357,8 @@ static u8 halbtc8821a2ant_action_algorithm(struct btc_coexist *btcoexist) stack_info->bt_link_exist = coex_sta->bt_link_exist; if (!coex_sta->bt_link_exist) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], No profile exists!!!\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], No profile exists!!!\n"); return algorithm; } @@ -384,26 +373,26 @@ static u8 halbtc8821a2ant_action_algorithm(struct btc_coexist *btcoexist) if (num_of_diff_profile == 1) { if (coex_sta->sco_exist) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], SCO only\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], SCO only\n"); algorithm = BT_8821A_2ANT_COEX_ALGO_SCO; } else { if (coex_sta->hid_exist) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], HID only\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], HID only\n"); algorithm = BT_8821A_2ANT_COEX_ALGO_HID; } else if (coex_sta->a2dp_exist) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], A2DP only\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], A2DP only\n"); algorithm = BT_8821A_2ANT_COEX_ALGO_A2DP; } else if (coex_sta->pan_exist) { if (bt_hs_on) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], PAN(HS) only\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], PAN(HS) only\n"); algorithm = BT_8821A_2ANT_COEX_ALGO_PANHS; } else { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], PAN(EDR) only\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], PAN(EDR) only\n"); algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR; } } @@ -411,50 +400,50 @@ static u8 halbtc8821a2ant_action_algorithm(struct btc_coexist *btcoexist) } else if (num_of_diff_profile == 2) { if (coex_sta->sco_exist) { if (coex_sta->hid_exist) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], SCO + HID\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], SCO + HID\n"); algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID; } else if (coex_sta->a2dp_exist) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], SCO + A2DP ==> SCO\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], SCO + A2DP ==> SCO\n"); algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID; } else if (coex_sta->pan_exist) { if (bt_hs_on) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], SCO + PAN(HS)\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], SCO + PAN(HS)\n"); algorithm = BT_8821A_2ANT_COEX_ALGO_SCO; } else { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], SCO + PAN(EDR)\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], SCO + PAN(EDR)\n"); algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID; } } } else { if (coex_sta->hid_exist && coex_sta->a2dp_exist) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], HID + A2DP\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], HID + A2DP\n"); algorithm = BT_8821A_2ANT_COEX_ALGO_HID_A2DP; } else if (coex_sta->hid_exist && coex_sta->pan_exist) { if (bt_hs_on) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], HID + PAN(HS)\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], HID + PAN(HS)\n"); algorithm = BT_8821A_2ANT_COEX_ALGO_HID; } else { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], HID + PAN(EDR)\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], HID + PAN(EDR)\n"); algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID; } } else if (coex_sta->pan_exist && coex_sta->a2dp_exist) { if (bt_hs_on) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], A2DP + PAN(HS)\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], A2DP + PAN(HS)\n"); algorithm = BT_8821A_2ANT_COEX_ALGO_A2DP_PANHS; } else { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], A2DP + PAN(EDR)\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], A2DP + PAN(EDR)\n"); algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_A2DP; } } @@ -463,29 +452,29 @@ static u8 halbtc8821a2ant_action_algorithm(struct btc_coexist *btcoexist) if (coex_sta->sco_exist) { if (coex_sta->hid_exist && coex_sta->a2dp_exist) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], SCO + HID + A2DP ==> HID\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], SCO + HID + A2DP ==> HID\n"); algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID; } else if (coex_sta->hid_exist && coex_sta->pan_exist) { if (bt_hs_on) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], SCO + HID + PAN(HS)\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], SCO + HID + PAN(HS)\n"); algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID; } else { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], SCO + HID + PAN(EDR)\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], SCO + HID + PAN(EDR)\n"); algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID; } } else if (coex_sta->pan_exist && coex_sta->a2dp_exist) { if (bt_hs_on) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], SCO + A2DP + PAN(HS)\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], SCO + A2DP + PAN(HS)\n"); algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID; } else { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], SCO + A2DP + PAN(EDR) ==> HID\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], SCO + A2DP + PAN(EDR) ==> HID\n"); algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID; } } @@ -494,12 +483,12 @@ static u8 halbtc8821a2ant_action_algorithm(struct btc_coexist *btcoexist) coex_sta->pan_exist && coex_sta->a2dp_exist) { if (bt_hs_on) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], HID + A2DP + PAN(HS)\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], HID + A2DP + PAN(HS)\n"); algorithm = BT_8821A_2ANT_COEX_ALGO_HID_A2DP; } else { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], HID + A2DP + PAN(EDR)\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], HID + A2DP + PAN(EDR)\n"); algorithm = BT_8821A_2ANT_COEX_ALGO_HID_A2DP_PANEDR; } } @@ -510,12 +499,12 @@ static u8 halbtc8821a2ant_action_algorithm(struct btc_coexist *btcoexist) coex_sta->pan_exist && coex_sta->a2dp_exist) { if (bt_hs_on) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], Error!!! SCO + HID + A2DP + PAN(HS)\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], Error!!! SCO + HID + A2DP + PAN(HS)\n"); } else { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], SCO + HID + A2DP + PAN(EDR)==>PAN(EDR)+HID\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], SCO + HID + A2DP + PAN(EDR)==>PAN(EDR)+HID\n"); algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID; } } @@ -544,15 +533,15 @@ static bool halbtc8821a2ant_need_to_dec_bt_pwr(struct btc_coexist *btcoexist) if (wifi_connected) { if (bt_hs_on) { if (bt_hs_rssi > 37) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, - "[BTCoex], Need to decrease bt power for HS mode!!\n"); + btc_alg_dbg(ALGO_TRACE_FW, + "[BTCoex], Need to decrease bt power for HS mode!!\n"); ret = true; } } else { if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, - "[BTCoex], Need to decrease bt power for Wifi is connected!!\n"); + btc_alg_dbg(ALGO_TRACE_FW, + "[BTCoex], Need to decrease bt power for Wifi is connected!!\n"); ret = true; } } @@ -570,10 +559,10 @@ static void btc8821a2ant_set_fw_dac_swing_lev(struct btc_coexist *btcoexist, */ h2c_parameter[0] = dac_swing_lvl; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, - "[BTCoex], Set Dac Swing Level = 0x%x\n", dac_swing_lvl); - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, - "[BTCoex], FW write 0x64 = 0x%x\n", h2c_parameter[0]); + btc_alg_dbg(ALGO_TRACE_FW_EXEC, + "[BTCoex], Set Dac Swing Level = 0x%x\n", dac_swing_lvl); + btc_alg_dbg(ALGO_TRACE_FW_EXEC, + "[BTCoex], FW write 0x64 = 0x%x\n", h2c_parameter[0]); btcoexist->btc_fill_h2c(btcoexist, 0x64, 1, h2c_parameter); } @@ -588,9 +577,9 @@ static void halbtc8821a2ant_set_fw_dec_bt_pwr(struct btc_coexist *btcoexist, if (dec_bt_pwr) h2c_parameter[0] |= BIT1; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, - "[BTCoex], decrease Bt Power : %s, FW write 0x62 = 0x%x\n", - (dec_bt_pwr ? "Yes!!" : "No!!"), h2c_parameter[0]); + btc_alg_dbg(ALGO_TRACE_FW_EXEC, + "[BTCoex], decrease Bt Power : %s, FW write 0x62 = 0x%x\n", + (dec_bt_pwr ? "Yes!!" : "No!!"), h2c_parameter[0]); btcoexist->btc_fill_h2c(btcoexist, 0x62, 1, h2c_parameter); } @@ -598,16 +587,16 @@ static void halbtc8821a2ant_set_fw_dec_bt_pwr(struct btc_coexist *btcoexist, static void halbtc8821a2ant_dec_bt_pwr(struct btc_coexist *btcoexist, bool force_exec, bool dec_bt_pwr) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, - "[BTCoex], %s Dec BT power = %s\n", - (force_exec ? "force to" : ""), - ((dec_bt_pwr) ? "ON" : "OFF")); + btc_alg_dbg(ALGO_TRACE_FW, + "[BTCoex], %s Dec BT power = %s\n", + (force_exec ? "force to" : ""), + ((dec_bt_pwr) ? "ON" : "OFF")); coex_dm->cur_dec_bt_pwr = dec_bt_pwr; if (!force_exec) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, - "[BTCoex], pre_dec_bt_pwr = %d, cur_dec_bt_pwr = %d\n", - coex_dm->pre_dec_bt_pwr, coex_dm->cur_dec_bt_pwr); + btc_alg_dbg(ALGO_TRACE_FW_DETAIL, + "[BTCoex], pre_dec_bt_pwr = %d, cur_dec_bt_pwr = %d\n", + coex_dm->pre_dec_bt_pwr, coex_dm->cur_dec_bt_pwr); if (coex_dm->pre_dec_bt_pwr == coex_dm->cur_dec_bt_pwr) return; @@ -627,10 +616,10 @@ static void btc8821a2ant_set_fw_bt_lna_constr(struct btc_coexist *btcoexist, if (bt_lna_cons_on) h2c_parameter[1] |= BIT0; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, - "[BTCoex], set BT LNA Constrain: %s, FW write 0x69 = 0x%x\n", - (bt_lna_cons_on ? "ON!!" : "OFF!!"), - h2c_parameter[0]<<8|h2c_parameter[1]); + btc_alg_dbg(ALGO_TRACE_FW_EXEC, + "[BTCoex], set BT LNA Constrain: %s, FW write 0x69 = 0x%x\n", + bt_lna_cons_on ? "ON!!" : "OFF!!", + h2c_parameter[0] << 8 | h2c_parameter[1]); btcoexist->btc_fill_h2c(btcoexist, 0x69, 2, h2c_parameter); } @@ -638,17 +627,17 @@ static void btc8821a2ant_set_fw_bt_lna_constr(struct btc_coexist *btcoexist, static void btc8821a2_set_bt_lna_const(struct btc_coexist *btcoexist, bool force_exec, bool bt_lna_cons_on) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, - "[BTCoex], %s BT Constrain = %s\n", - (force_exec ? "force" : ""), - ((bt_lna_cons_on) ? "ON" : "OFF")); + btc_alg_dbg(ALGO_TRACE_FW, + "[BTCoex], %s BT Constrain = %s\n", + (force_exec ? "force" : ""), + ((bt_lna_cons_on) ? "ON" : "OFF")); coex_dm->cur_bt_lna_constrain = bt_lna_cons_on; if (!force_exec) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, - "[BTCoex], pre_bt_lna_constrain = %d,cur_bt_lna_constrain = %d\n", - coex_dm->pre_bt_lna_constrain, - coex_dm->cur_bt_lna_constrain); + btc_alg_dbg(ALGO_TRACE_FW_DETAIL, + "[BTCoex], pre_bt_lna_constrain = %d,cur_bt_lna_constrain = %d\n", + coex_dm->pre_bt_lna_constrain, + coex_dm->cur_bt_lna_constrain); if (coex_dm->pre_bt_lna_constrain == coex_dm->cur_bt_lna_constrain) @@ -669,10 +658,10 @@ static void halbtc8821a2ant_set_fw_bt_psd_mode(struct btc_coexist *btcoexist, h2c_parameter[1] = bt_psd_mode; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, - "[BTCoex], set BT PSD mode = 0x%x, FW write 0x69 = 0x%x\n", - h2c_parameter[1], - h2c_parameter[0]<<8|h2c_parameter[1]); + btc_alg_dbg(ALGO_TRACE_FW_EXEC, + "[BTCoex], set BT PSD mode = 0x%x, FW write 0x69 = 0x%x\n", + h2c_parameter[1], + h2c_parameter[0] << 8 | h2c_parameter[1]); btcoexist->btc_fill_h2c(btcoexist, 0x69, 2, h2c_parameter); } @@ -680,15 +669,15 @@ static void halbtc8821a2ant_set_fw_bt_psd_mode(struct btc_coexist *btcoexist, static void halbtc8821a2ant_set_bt_psd_mode(struct btc_coexist *btcoexist, bool force_exec, u8 bt_psd_mode) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, - "[BTCoex], %s BT PSD mode = 0x%x\n", - (force_exec ? "force" : ""), bt_psd_mode); + btc_alg_dbg(ALGO_TRACE_FW, + "[BTCoex], %s BT PSD mode = 0x%x\n", + (force_exec ? "force" : ""), bt_psd_mode); coex_dm->cur_bt_psd_mode = bt_psd_mode; if (!force_exec) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, - "[BTCoex], pre_bt_psd_mode = 0x%x, cur_bt_psd_mode = 0x%x\n", - coex_dm->pre_bt_psd_mode, coex_dm->cur_bt_psd_mode); + btc_alg_dbg(ALGO_TRACE_FW_DETAIL, + "[BTCoex], pre_bt_psd_mode = 0x%x, cur_bt_psd_mode = 0x%x\n", + coex_dm->pre_bt_psd_mode, coex_dm->cur_bt_psd_mode); if (coex_dm->pre_bt_psd_mode == coex_dm->cur_bt_psd_mode) return; @@ -709,10 +698,10 @@ static void halbtc8821a2ant_set_bt_auto_report(struct btc_coexist *btcoexist, if (enable_auto_report) h2c_parameter[0] |= BIT0; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, - "[BTCoex], BT FW auto report : %s, FW write 0x68 = 0x%x\n", - (enable_auto_report ? "Enabled!!" : "Disabled!!"), - h2c_parameter[0]); + btc_alg_dbg(ALGO_TRACE_FW_EXEC, + "[BTCoex], BT FW auto report : %s, FW write 0x68 = 0x%x\n", + (enable_auto_report ? "Enabled!!" : "Disabled!!"), + h2c_parameter[0]); btcoexist->btc_fill_h2c(btcoexist, 0x68, 1, h2c_parameter); } @@ -721,17 +710,17 @@ static void halbtc8821a2ant_bt_auto_report(struct btc_coexist *btcoexist, bool force_exec, bool enable_auto_report) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, - "[BTCoex], %s BT Auto report = %s\n", - (force_exec ? "force to" : ""), - ((enable_auto_report) ? "Enabled" : "Disabled")); + btc_alg_dbg(ALGO_TRACE_FW, + "[BTCoex], %s BT Auto report = %s\n", + (force_exec ? "force to" : ""), + ((enable_auto_report) ? "Enabled" : "Disabled")); coex_dm->cur_bt_auto_report = enable_auto_report; if (!force_exec) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, - "[BTCoex], pre_bt_auto_report = %d, cur_bt_auto_report = %d\n", - coex_dm->pre_bt_auto_report, - coex_dm->cur_bt_auto_report); + btc_alg_dbg(ALGO_TRACE_FW_DETAIL, + "[BTCoex], pre_bt_auto_report = %d, cur_bt_auto_report = %d\n", + coex_dm->pre_bt_auto_report, + coex_dm->cur_bt_auto_report); if (coex_dm->pre_bt_auto_report == coex_dm->cur_bt_auto_report) return; @@ -746,16 +735,16 @@ static void halbtc8821a2ant_fw_dac_swing_lvl(struct btc_coexist *btcoexist, bool force_exec, u8 fw_dac_swing_lvl) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, - "[BTCoex], %s set FW Dac Swing level = %d\n", - (force_exec ? "force to" : ""), fw_dac_swing_lvl); + btc_alg_dbg(ALGO_TRACE_FW, + "[BTCoex], %s set FW Dac Swing level = %d\n", + (force_exec ? "force to" : ""), fw_dac_swing_lvl); coex_dm->cur_fw_dac_swing_lvl = fw_dac_swing_lvl; if (!force_exec) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, - "[BTCoex], pre_fw_dac_swing_lvl = %d, cur_fw_dac_swing_lvl = %d\n", - coex_dm->pre_fw_dac_swing_lvl, - coex_dm->cur_fw_dac_swing_lvl); + btc_alg_dbg(ALGO_TRACE_FW_DETAIL, + "[BTCoex], pre_fw_dac_swing_lvl = %d, cur_fw_dac_swing_lvl = %d\n", + coex_dm->pre_fw_dac_swing_lvl, + coex_dm->cur_fw_dac_swing_lvl); if (coex_dm->pre_fw_dac_swing_lvl == coex_dm->cur_fw_dac_swing_lvl) @@ -773,8 +762,8 @@ static void btc8821a2ant_set_sw_rf_rx_lpf_corner(struct btc_coexist *btcoexist, { if (rx_rf_shrink_on) { /* Shrink RF Rx LPF corner */ - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, - "[BTCoex], Shrink RF Rx LPF corner!!\n"); + btc_alg_dbg(ALGO_TRACE_SW_EXEC, + "[BTCoex], Shrink RF Rx LPF corner!!\n"); btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e, 0xfffff, 0xffffc); } else { @@ -782,8 +771,8 @@ static void btc8821a2ant_set_sw_rf_rx_lpf_corner(struct btc_coexist *btcoexist, * After initialized, we can use coex_dm->bt_rf0x1e_backup */ if (btcoexist->initilized) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, - "[BTCoex], Resume RF Rx LPF corner!!\n"); + btc_alg_dbg(ALGO_TRACE_SW_EXEC, + "[BTCoex], Resume RF Rx LPF corner!!\n"); btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e, 0xfffff, coex_dm->bt_rf0x1e_backup); @@ -794,17 +783,17 @@ static void btc8821a2ant_set_sw_rf_rx_lpf_corner(struct btc_coexist *btcoexist, static void halbtc8821a2ant_RfShrink(struct btc_coexist *btcoexist, bool force_exec, bool rx_rf_shrink_on) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, - "[BTCoex], %s turn Rx RF Shrink = %s\n", - (force_exec ? "force to" : ""), - ((rx_rf_shrink_on) ? "ON" : "OFF")); + btc_alg_dbg(ALGO_TRACE_SW, + "[BTCoex], %s turn Rx RF Shrink = %s\n", + (force_exec ? "force to" : ""), + ((rx_rf_shrink_on) ? "ON" : "OFF")); coex_dm->cur_rf_rx_lpf_shrink = rx_rf_shrink_on; if (!force_exec) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, - "[BTCoex], pre_rf_rx_lpf_shrink = %d, cur_rf_rx_lpf_shrink = %d\n", - coex_dm->pre_rf_rx_lpf_shrink, - coex_dm->cur_rf_rx_lpf_shrink); + btc_alg_dbg(ALGO_TRACE_SW_DETAIL, + "[BTCoex], pre_rf_rx_lpf_shrink = %d, cur_rf_rx_lpf_shrink = %d\n", + coex_dm->pre_rf_rx_lpf_shrink, + coex_dm->cur_rf_rx_lpf_shrink); if (coex_dm->pre_rf_rx_lpf_shrink == coex_dm->cur_rf_rx_lpf_shrink) @@ -835,9 +824,9 @@ static void btc8821a2ant_SetSwPenTxRateAdapt(struct btc_coexist *btcoexist, h2c_parameter[5] = 0xf9; } - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, - "[BTCoex], set WiFi Low-Penalty Retry: %s", - (low_penalty_ra ? "ON!!" : "OFF!!")); + btc_alg_dbg(ALGO_TRACE_FW_EXEC, + "[BTCoex], set WiFi Low-Penalty Retry: %s", + (low_penalty_ra ? "ON!!" : "OFF!!")); btcoexist->btc_fill_h2c(btcoexist, 0x69, 6, h2c_parameter); } @@ -846,17 +835,17 @@ static void halbtc8821a2ant_low_penalty_ra(struct btc_coexist *btcoexist, bool force_exec, bool low_penalty_ra) { /*return;*/ - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, - "[BTCoex], %s turn LowPenaltyRA = %s\n", - (force_exec ? "force to" : ""), - ((low_penalty_ra) ? "ON" : "OFF")); + btc_alg_dbg(ALGO_TRACE_SW, + "[BTCoex], %s turn LowPenaltyRA = %s\n", + (force_exec ? "force to" : ""), + ((low_penalty_ra) ? "ON" : "OFF")); coex_dm->cur_low_penalty_ra = low_penalty_ra; if (!force_exec) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, - "[BTCoex], pre_low_penalty_ra = %d, cur_low_penalty_ra = %d\n", - coex_dm->pre_low_penalty_ra, - coex_dm->cur_low_penalty_ra); + btc_alg_dbg(ALGO_TRACE_SW_DETAIL, + "[BTCoex], pre_low_penalty_ra = %d, cur_low_penalty_ra = %d\n", + coex_dm->pre_low_penalty_ra, + coex_dm->cur_low_penalty_ra); if (coex_dm->pre_low_penalty_ra == coex_dm->cur_low_penalty_ra) return; @@ -872,8 +861,8 @@ static void halbtc8821a2ant_set_dac_swing_reg(struct btc_coexist *btcoexist, { u8 val = (u8)level; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, - "[BTCoex], Write SwDacSwing = 0x%x\n", level); + btc_alg_dbg(ALGO_TRACE_SW_EXEC, + "[BTCoex], Write SwDacSwing = 0x%x\n", level); btcoexist->btc_write_1byte_bitmask(btcoexist, 0xc5b, 0x3e, val); } @@ -891,21 +880,21 @@ static void halbtc8821a2ant_dac_swing(struct btc_coexist *btcoexist, bool force_exec, bool dac_swing_on, u32 dac_swing_lvl) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, - "[BTCoex], %s turn DacSwing = %s, dac_swing_lvl = 0x%x\n", - (force_exec ? "force to" : ""), - ((dac_swing_on) ? "ON" : "OFF"), - dac_swing_lvl); + btc_alg_dbg(ALGO_TRACE_SW, + "[BTCoex], %s turn DacSwing = %s, dac_swing_lvl = 0x%x\n", + (force_exec ? "force to" : ""), + ((dac_swing_on) ? "ON" : "OFF"), + dac_swing_lvl); coex_dm->cur_dac_swing_on = dac_swing_on; coex_dm->cur_dac_swing_lvl = dac_swing_lvl; if (!force_exec) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, - "[BTCoex], pre_dac_swing_on = %d, pre_dac_swing_lvl = 0x%x, cur_dac_swing_on = %d, cur_dac_swing_lvl = 0x%x\n", - coex_dm->pre_dac_swing_on, - coex_dm->pre_dac_swing_lvl, - coex_dm->cur_dac_swing_on, - coex_dm->cur_dac_swing_lvl); + btc_alg_dbg(ALGO_TRACE_SW_DETAIL, + "[BTCoex], pre_dac_swing_on = %d, pre_dac_swing_lvl = 0x%x, cur_dac_swing_on = %d, cur_dac_swing_lvl = 0x%x\n", + coex_dm->pre_dac_swing_on, + coex_dm->pre_dac_swing_lvl, + coex_dm->cur_dac_swing_on, + coex_dm->cur_dac_swing_lvl); if ((coex_dm->pre_dac_swing_on == coex_dm->cur_dac_swing_on) && (coex_dm->pre_dac_swing_lvl == @@ -924,12 +913,12 @@ static void halbtc8821a2ant_set_adc_back_off(struct btc_coexist *btcoexist, bool adc_back_off) { if (adc_back_off) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, - "[BTCoex], BB BackOff Level On!\n"); + btc_alg_dbg(ALGO_TRACE_SW_EXEC, + "[BTCoex], BB BackOff Level On!\n"); btcoexist->btc_write_1byte_bitmask(btcoexist, 0x8db, 0x60, 0x3); } else { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, - "[BTCoex], BB BackOff Level Off!\n"); + btc_alg_dbg(ALGO_TRACE_SW_EXEC, + "[BTCoex], BB BackOff Level Off!\n"); btcoexist->btc_write_1byte_bitmask(btcoexist, 0x8db, 0x60, 0x1); } } @@ -937,16 +926,17 @@ static void halbtc8821a2ant_set_adc_back_off(struct btc_coexist *btcoexist, static void halbtc8821a2ant_adc_back_off(struct btc_coexist *btcoexist, bool force_exec, bool adc_back_off) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, - "[BTCoex], %s turn AdcBackOff = %s\n", - (force_exec ? "force to" : ""), - ((adc_back_off) ? "ON" : "OFF")); + btc_alg_dbg(ALGO_TRACE_SW, + "[BTCoex], %s turn AdcBackOff = %s\n", + (force_exec ? "force to" : ""), + ((adc_back_off) ? "ON" : "OFF")); coex_dm->cur_adc_back_off = adc_back_off; if (!force_exec) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, - "[BTCoex], pre_adc_back_off = %d, cur_adc_back_off = %d\n", - coex_dm->pre_adc_back_off, coex_dm->cur_adc_back_off); + btc_alg_dbg(ALGO_TRACE_SW_DETAIL, + "[BTCoex], pre_adc_back_off = %d, cur_adc_back_off = %d\n", + coex_dm->pre_adc_back_off, + coex_dm->cur_adc_back_off); if (coex_dm->pre_adc_back_off == coex_dm->cur_adc_back_off) return; @@ -960,20 +950,20 @@ static void halbtc8821a2ant_set_coex_table(struct btc_coexist *btcoexist, u32 val0x6c0, u32 val0x6c4, u32 val0x6c8, u8 val0x6cc) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, - "[BTCoex], set coex table, set 0x6c0 = 0x%x\n", val0x6c0); + btc_alg_dbg(ALGO_TRACE_SW_EXEC, + "[BTCoex], set coex table, set 0x6c0 = 0x%x\n", val0x6c0); btcoexist->btc_write_4byte(btcoexist, 0x6c0, val0x6c0); - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, - "[BTCoex], set coex table, set 0x6c4 = 0x%x\n", val0x6c4); + btc_alg_dbg(ALGO_TRACE_SW_EXEC, + "[BTCoex], set coex table, set 0x6c4 = 0x%x\n", val0x6c4); btcoexist->btc_write_4byte(btcoexist, 0x6c4, val0x6c4); - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, - "[BTCoex], set coex table, set 0x6c8 = 0x%x\n", val0x6c8); + btc_alg_dbg(ALGO_TRACE_SW_EXEC, + "[BTCoex], set coex table, set 0x6c8 = 0x%x\n", val0x6c8); btcoexist->btc_write_4byte(btcoexist, 0x6c8, val0x6c8); - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, - "[BTCoex], set coex table, set 0x6cc = 0x%x\n", val0x6cc); + btc_alg_dbg(ALGO_TRACE_SW_EXEC, + "[BTCoex], set coex table, set 0x6cc = 0x%x\n", val0x6cc); btcoexist->btc_write_1byte(btcoexist, 0x6cc, val0x6cc); } @@ -981,28 +971,28 @@ static void halbtc8821a2ant_coex_table(struct btc_coexist *btcoexist, bool force_exec, u32 val0x6c0, u32 val0x6c4, u32 val0x6c8, u8 val0x6cc) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, - "[BTCoex], %s write Coex Table 0x6c0 = 0x%x, 0x6c4 = 0x%x, 0x6c8 = 0x%x, 0x6cc = 0x%x\n", - (force_exec ? "force to" : ""), - val0x6c0, val0x6c4, val0x6c8, val0x6cc); + btc_alg_dbg(ALGO_TRACE_SW, + "[BTCoex], %s write Coex Table 0x6c0 = 0x%x, 0x6c4 = 0x%x, 0x6c8 = 0x%x, 0x6cc = 0x%x\n", + (force_exec ? "force to" : ""), + val0x6c0, val0x6c4, val0x6c8, val0x6cc); coex_dm->cur_val0x6c0 = val0x6c0; coex_dm->cur_val0x6c4 = val0x6c4; coex_dm->cur_val0x6c8 = val0x6c8; coex_dm->cur_val0x6cc = val0x6cc; if (!force_exec) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, - "[BTCoex], pre_val0x6c0 = 0x%x, pre_val0x6c4 = 0x%x, pre_val0x6c8 = 0x%x, pre_val0x6cc = 0x%x !!\n", - coex_dm->pre_val0x6c0, - coex_dm->pre_val0x6c4, - coex_dm->pre_val0x6c8, - coex_dm->pre_val0x6cc); - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, - "[BTCoex], cur_val0x6c0 = 0x%x, cur_val0x6c4 = 0x%x, cur_val0x6c8 = 0x%x, cur_val0x6cc = 0x%x !!\n", - coex_dm->cur_val0x6c0, - coex_dm->cur_val0x6c4, - coex_dm->cur_val0x6c8, - coex_dm->cur_val0x6cc); + btc_alg_dbg(ALGO_TRACE_SW_DETAIL, + "[BTCoex], pre_val0x6c0 = 0x%x, pre_val0x6c4 = 0x%x, pre_val0x6c8 = 0x%x, pre_val0x6cc = 0x%x !!\n", + coex_dm->pre_val0x6c0, + coex_dm->pre_val0x6c4, + coex_dm->pre_val0x6c8, + coex_dm->pre_val0x6cc); + btc_alg_dbg(ALGO_TRACE_SW_DETAIL, + "[BTCoex], cur_val0x6c0 = 0x%x, cur_val0x6c4 = 0x%x, cur_val0x6c8 = 0x%x, cur_val0x6cc = 0x%x !!\n", + coex_dm->cur_val0x6c0, + coex_dm->cur_val0x6c4, + coex_dm->cur_val0x6c8, + coex_dm->cur_val0x6cc); if ((coex_dm->pre_val0x6c0 == coex_dm->cur_val0x6c0) && (coex_dm->pre_val0x6c4 == coex_dm->cur_val0x6c4) && @@ -1027,9 +1017,9 @@ static void halbtc8821a2ant_set_fw_ignore_wlan_act(struct btc_coexist *btcoex, if (enable) h2c_parameter[0] |= BIT0;/* function enable */ - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, - "[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x63 = 0x%x\n", - h2c_parameter[0]); + btc_alg_dbg(ALGO_TRACE_FW_EXEC, + "[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x63 = 0x%x\n", + h2c_parameter[0]); btcoex->btc_fill_h2c(btcoex, 0x63, 1, h2c_parameter); } @@ -1037,16 +1027,16 @@ static void halbtc8821a2ant_set_fw_ignore_wlan_act(struct btc_coexist *btcoex, static void halbtc8821a2ant_ignore_wlan_act(struct btc_coexist *btcoexist, bool force_exec, bool enable) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, - "[BTCoex], %s turn Ignore WlanAct %s\n", - (force_exec ? "force to" : ""), (enable ? "ON" : "OFF")); + btc_alg_dbg(ALGO_TRACE_FW, + "[BTCoex], %s turn Ignore WlanAct %s\n", + (force_exec ? "force to" : ""), (enable ? "ON" : "OFF")); coex_dm->cur_ignore_wlan_act = enable; if (!force_exec) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, - "[BTCoex], pre_ignore_wlan_act = %d, cur_ignore_wlan_act = %d!!\n", - coex_dm->pre_ignore_wlan_act, - coex_dm->cur_ignore_wlan_act); + btc_alg_dbg(ALGO_TRACE_FW_DETAIL, + "[BTCoex], pre_ignore_wlan_act = %d, cur_ignore_wlan_act = %d!!\n", + coex_dm->pre_ignore_wlan_act, + coex_dm->cur_ignore_wlan_act); if (coex_dm->pre_ignore_wlan_act == coex_dm->cur_ignore_wlan_act) @@ -1075,13 +1065,13 @@ static void halbtc8821a2ant_set_fw_pstdma(struct btc_coexist *btcoexist, coex_dm->ps_tdma_para[3] = byte4; coex_dm->ps_tdma_para[4] = byte5; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, - "[BTCoex], FW write 0x60(5bytes) = 0x%x%08x\n", - h2c_parameter[0], - h2c_parameter[1]<<24| - h2c_parameter[2]<<16| - h2c_parameter[3]<<8| - h2c_parameter[4]); + btc_alg_dbg(ALGO_TRACE_FW_EXEC, + "[BTCoex], FW write 0x60(5bytes) = 0x%x%08x\n", + h2c_parameter[0], + h2c_parameter[1] << 24 | + h2c_parameter[2] << 16 | + h2c_parameter[3] << 8 | + h2c_parameter[4]); btcoexist->btc_fill_h2c(btcoexist, 0x60, 5, h2c_parameter); } @@ -1175,20 +1165,20 @@ static void halbtc8821a2ant_set_ant_path(struct btc_coexist *btcoexist, static void halbtc8821a2ant_ps_tdma(struct btc_coexist *btcoexist, bool force_exec, bool turn_on, u8 type) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, - "[BTCoex], %s turn %s PS TDMA, type = %d\n", - (force_exec ? "force to" : ""), (turn_on ? "ON" : "OFF"), - type); + btc_alg_dbg(ALGO_TRACE_FW, + "[BTCoex], %s turn %s PS TDMA, type = %d\n", + (force_exec ? "force to" : ""), (turn_on ? "ON" : "OFF"), + type); coex_dm->cur_ps_tdma_on = turn_on; coex_dm->cur_ps_tdma = type; if (!force_exec) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, - "[BTCoex], pre_ps_tdma_on = %d, cur_ps_tdma_on = %d!!\n", - coex_dm->pre_ps_tdma_on, coex_dm->cur_ps_tdma_on); - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, - "[BTCoex], pre_ps_tdma = %d, cur_ps_tdma = %d!!\n", - coex_dm->pre_ps_tdma, coex_dm->cur_ps_tdma); + btc_alg_dbg(ALGO_TRACE_FW_DETAIL, + "[BTCoex], pre_ps_tdma_on = %d, cur_ps_tdma_on = %d!!\n", + coex_dm->pre_ps_tdma_on, coex_dm->cur_ps_tdma_on); + btc_alg_dbg(ALGO_TRACE_FW_DETAIL, + "[BTCoex], pre_ps_tdma = %d, cur_ps_tdma = %d!!\n", + coex_dm->pre_ps_tdma, coex_dm->cur_ps_tdma); if ((coex_dm->pre_ps_tdma_on == coex_dm->cur_ps_tdma_on) && (coex_dm->pre_ps_tdma == coex_dm->cur_ps_tdma)) @@ -1374,8 +1364,8 @@ static bool halbtc8821a2ant_is_common_action(struct btc_coexist *btcoexist) btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER, &low_pwr_disable); - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], Wifi IPS + BT IPS!!\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], Wifi IPS + BT IPS!!\n"); halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1); halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); @@ -1392,13 +1382,13 @@ static bool halbtc8821a2ant_is_common_action(struct btc_coexist *btcoexist) &low_pwr_disable); if (wifi_busy) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], Wifi Busy + BT IPS!!\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], Wifi Busy + BT IPS!!\n"); halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1); } else { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], Wifi LPS + BT IPS!!\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], Wifi LPS + BT IPS!!\n"); halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1); } @@ -1416,8 +1406,8 @@ static bool halbtc8821a2ant_is_common_action(struct btc_coexist *btcoexist) btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER, &low_pwr_disable); - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], Wifi IPS + BT LPS!!\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], Wifi IPS + BT LPS!!\n"); halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1); halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); @@ -1433,13 +1423,13 @@ static bool halbtc8821a2ant_is_common_action(struct btc_coexist *btcoexist) BTC_SET_ACT_DISABLE_LOW_POWER, &low_pwr_disable); if (wifi_busy) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], Wifi Busy + BT LPS!!\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], Wifi Busy + BT LPS!!\n"); halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1); } else { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], Wifi LPS + BT LPS!!\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], Wifi LPS + BT LPS!!\n"); halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1); } @@ -1458,8 +1448,8 @@ static bool halbtc8821a2ant_is_common_action(struct btc_coexist *btcoexist) btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER, &low_pwr_disable); - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], Wifi IPS + BT Busy!!\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], Wifi IPS + BT Busy!!\n"); halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1); halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); @@ -1478,12 +1468,12 @@ static bool halbtc8821a2ant_is_common_action(struct btc_coexist *btcoexist) &low_pwr_disable); if (wifi_busy) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], Wifi Busy + BT Busy!!\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], Wifi Busy + BT Busy!!\n"); common = false; } else { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], Wifi LPS + BT Busy!!\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], Wifi LPS + BT Busy!!\n"); halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 21); @@ -1505,8 +1495,8 @@ static void btc8821a2_int1(struct btc_coexist *btcoexist, bool tx_pause, int result) { if (tx_pause) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, - "[BTCoex], TxPause = 1\n"); + btc_alg_dbg(ALGO_TRACE_FW_DETAIL, + "[BTCoex], TxPause = 1\n"); if (coex_dm->cur_ps_tdma == 71) { halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, @@ -1601,8 +1591,8 @@ static void btc8821a2_int1(struct btc_coexist *btcoexist, bool tx_pause, } } } else { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, - "[BTCoex], TxPause = 0\n"); + btc_alg_dbg(ALGO_TRACE_FW_DETAIL, + "[BTCoex], TxPause = 0\n"); if (coex_dm->cur_ps_tdma == 5) { halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 71); @@ -1706,8 +1696,8 @@ static void btc8821a2_int2(struct btc_coexist *btcoexist, bool tx_pause, int result) { if (tx_pause) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, - "[BTCoex], TxPause = 1\n"); + btc_alg_dbg(ALGO_TRACE_FW_DETAIL, + "[BTCoex], TxPause = 1\n"); if (coex_dm->cur_ps_tdma == 1) { halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 6); @@ -1796,8 +1786,8 @@ static void btc8821a2_int2(struct btc_coexist *btcoexist, bool tx_pause, } } } else { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, - "[BTCoex], TxPause = 0\n"); + btc_alg_dbg(ALGO_TRACE_FW_DETAIL, + "[BTCoex], TxPause = 0\n"); if (coex_dm->cur_ps_tdma == 5) { halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 2); @@ -1892,8 +1882,8 @@ static void btc8821a2_int3(struct btc_coexist *btcoexist, bool tx_pause, int result) { if (tx_pause) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, - "[BTCoex], TxPause = 1\n"); + btc_alg_dbg(ALGO_TRACE_FW_DETAIL, + "[BTCoex], TxPause = 1\n"); if (coex_dm->cur_ps_tdma == 1) { halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 7); @@ -1982,8 +1972,8 @@ static void btc8821a2_int3(struct btc_coexist *btcoexist, bool tx_pause, } } } else { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, - "[BTCoex], TxPause = 0\n"); + btc_alg_dbg(ALGO_TRACE_FW_DETAIL, + "[BTCoex], TxPause = 0\n"); if (coex_dm->cur_ps_tdma == 5) { halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3); @@ -2085,13 +2075,13 @@ static void btc8821a2ant_tdma_dur_adj(struct btc_coexist *btcoexist, int result; u8 retry_count = 0; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, - "[BTCoex], TdmaDurationAdjust()\n"); + btc_alg_dbg(ALGO_TRACE_FW, + "[BTCoex], TdmaDurationAdjust()\n"); if (coex_dm->reset_tdma_adjust) { coex_dm->reset_tdma_adjust = false; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, - "[BTCoex], first run TdmaDurationAdjust()!!\n"); + btc_alg_dbg(ALGO_TRACE_FW_DETAIL, + "[BTCoex], first run TdmaDurationAdjust()!!\n"); if (sco_hid) { if (tx_pause) { if (max_interval == 1) { @@ -2195,11 +2185,11 @@ static void btc8821a2ant_tdma_dur_adj(struct btc_coexist *btcoexist, } else { /* accquire the BT TRx retry count from BT_Info byte2 */ retry_count = coex_sta->bt_retry_cnt; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, - "[BTCoex], retry_count = %d\n", retry_count); - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, - "[BTCoex], up = %d, dn = %d, m = %d, n = %d, wait_count = %d\n", - (int)up, (int)dn, (int)m, (int)n, (int)wait_count); + btc_alg_dbg(ALGO_TRACE_FW_DETAIL, + "[BTCoex], retry_count = %d\n", retry_count); + btc_alg_dbg(ALGO_TRACE_FW_DETAIL, + "[BTCoex], up = %d, dn = %d, m = %d, n = %d, wait_count = %d\n", + (int)up, (int)dn, (int)m, (int)n, (int)wait_count); result = 0; wait_count++; @@ -2220,9 +2210,8 @@ static void btc8821a2ant_tdma_dur_adj(struct btc_coexist *btcoexist, up = 0; dn = 0; result = 1; - BTC_PRINT(BTC_MSG_ALGORITHM, - ALGO_TRACE_FW_DETAIL, - "[BTCoex], Increase wifi duration!!\n"); + btc_alg_dbg(ALGO_TRACE_FW_DETAIL, + "[BTCoex], Increase wifi duration!!\n"); } } else if (retry_count <= 3) { /* <=3 retry in the last 2-second duration */ @@ -2251,9 +2240,8 @@ static void btc8821a2ant_tdma_dur_adj(struct btc_coexist *btcoexist, dn = 0; wait_count = 0; result = -1; - BTC_PRINT(BTC_MSG_ALGORITHM, - ALGO_TRACE_FW_DETAIL, - "[BTCoex], Decrease wifi duration for retryCounter<3!!\n"); + btc_alg_dbg(ALGO_TRACE_FW_DETAIL, + "[BTCoex], Decrease wifi duration for retryCounter<3!!\n"); } } else { /* retry count > 3, if retry count > 3 happens once, @@ -2274,12 +2262,12 @@ static void btc8821a2ant_tdma_dur_adj(struct btc_coexist *btcoexist, dn = 0; wait_count = 0; result = -1; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, - "[BTCoex], Decrease wifi duration for retryCounter>3!!\n"); + btc_alg_dbg(ALGO_TRACE_FW_DETAIL, + "[BTCoex], Decrease wifi duration for retryCounter>3!!\n"); } - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, - "[BTCoex], max Interval = %d\n", max_interval); + btc_alg_dbg(ALGO_TRACE_FW_DETAIL, + "[BTCoex], max Interval = %d\n", max_interval); if (max_interval == 1) btc8821a2_int1(btcoexist, tx_pause, result); else if (max_interval == 2) @@ -2295,9 +2283,9 @@ static void btc8821a2ant_tdma_dur_adj(struct btc_coexist *btcoexist, if (coex_dm->cur_ps_tdma != coex_dm->tdma_adj_type) { bool scan = false, link = false, roam = false; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, - "[BTCoex], PsTdma type dismatch!!!, cur_ps_tdma = %d, recordPsTdma = %d\n", - coex_dm->cur_ps_tdma, coex_dm->tdma_adj_type); + btc_alg_dbg(ALGO_TRACE_FW_DETAIL, + "[BTCoex], PsTdma type dismatch!!!, cur_ps_tdma = %d, recordPsTdma = %d\n", + coex_dm->cur_ps_tdma, coex_dm->tdma_adj_type); btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan); btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link); @@ -2307,8 +2295,8 @@ static void btc8821a2ant_tdma_dur_adj(struct btc_coexist *btcoexist, halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, coex_dm->tdma_adj_type); } else { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, - "[BTCoex], roaming/link/scan is under progress, will adjust next time!!!\n"); + btc_alg_dbg(ALGO_TRACE_FW_DETAIL, + "[BTCoex], roaming/link/scan is under progress, will adjust next time!!!\n"); } } @@ -3183,8 +3171,8 @@ static void halbtc8821a2ant_run_coexist_mechanism(struct btc_coexist *btcoexist) u8 algorithm = 0; if (btcoexist->manual_control) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], Manual control!!!\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], Manual control!!!\n"); return; } @@ -3192,8 +3180,8 @@ static void halbtc8821a2ant_run_coexist_mechanism(struct btc_coexist *btcoexist) BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g); if (wifi_under_5g) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], RunCoexistMechanism(), run 5G coex setting!!<===\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], RunCoexistMechanism(), run 5G coex setting!!<===\n"); halbtc8821a2ant_coex_under_5g(btcoexist); return; } @@ -3201,81 +3189,82 @@ static void halbtc8821a2ant_run_coexist_mechanism(struct btc_coexist *btcoexist) algorithm = halbtc8821a2ant_action_algorithm(btcoexist); if (coex_sta->c2h_bt_inquiry_page && (BT_8821A_2ANT_COEX_ALGO_PANHS != algorithm)) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], BT is under inquiry/page scan !!\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], BT is under inquiry/page scan !!\n"); halbtc8821a2ant_bt_inquiry_page(btcoexist); return; } coex_dm->cur_algorithm = algorithm; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], Algorithm = %d\n", coex_dm->cur_algorithm); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], Algorithm = %d\n", coex_dm->cur_algorithm); if (halbtc8821a2ant_is_common_action(btcoexist)) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], Action 2-Ant common.\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], Action 2-Ant common\n"); coex_dm->reset_tdma_adjust = true; } else { if (coex_dm->cur_algorithm != coex_dm->pre_algorithm) { - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], pre_algorithm = %d, cur_algorithm = %d\n", - coex_dm->pre_algorithm, coex_dm->cur_algorithm); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], pre_algorithm = %d, cur_algorithm = %d\n", + coex_dm->pre_algorithm, + coex_dm->cur_algorithm); coex_dm->reset_tdma_adjust = true; } switch (coex_dm->cur_algorithm) { case BT_8821A_2ANT_COEX_ALGO_SCO: - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], Action 2-Ant, algorithm = SCO.\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], Action 2-Ant, algorithm = SCO\n"); halbtc8821a2ant_action_sco(btcoexist); break; case BT_8821A_2ANT_COEX_ALGO_HID: - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], Action 2-Ant, algorithm = HID.\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], Action 2-Ant, algorithm = HID\n"); halbtc8821a2ant_action_hid(btcoexist); break; case BT_8821A_2ANT_COEX_ALGO_A2DP: - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], Action 2-Ant, algorithm = A2DP.\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], Action 2-Ant, algorithm = A2DP\n"); halbtc8821a2ant_action_a2dp(btcoexist); break; case BT_8821A_2ANT_COEX_ALGO_A2DP_PANHS: - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], Action 2-Ant, algorithm = A2DP+PAN(HS).\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], Action 2-Ant, algorithm = A2DP+PAN(HS)\n"); halbtc8821a2ant_action_a2dp_pan_hs(btcoexist); break; case BT_8821A_2ANT_COEX_ALGO_PANEDR: - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], Action 2-Ant, algorithm = PAN(EDR).\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], Action 2-Ant, algorithm = PAN(EDR)\n"); halbtc8821a2ant_action_pan_edr(btcoexist); break; case BT_8821A_2ANT_COEX_ALGO_PANHS: - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], Action 2-Ant, algorithm = HS mode.\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], Action 2-Ant, algorithm = HS mode\n"); halbtc8821a2ant_action_pan_hs(btcoexist); break; case BT_8821A_2ANT_COEX_ALGO_PANEDR_A2DP: - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], Action 2-Ant, algorithm = PAN+A2DP.\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], Action 2-Ant, algorithm = PAN+A2DP\n"); halbtc8821a2ant_action_pan_edr_a2dp(btcoexist); break; case BT_8821A_2ANT_COEX_ALGO_PANEDR_HID: - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], Action 2-Ant, algorithm = PAN(EDR)+HID.\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], Action 2-Ant, algorithm = PAN(EDR)+HID\n"); halbtc8821a2ant_action_pan_edr_hid(btcoexist); break; case BT_8821A_2ANT_COEX_ALGO_HID_A2DP_PANEDR: - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], Action 2-Ant, algorithm = HID+A2DP+PAN.\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], Action 2-Ant, algorithm = HID+A2DP+PAN\n"); btc8821a2ant_act_hid_a2dp_pan_edr(btcoexist); break; case BT_8821A_2ANT_COEX_ALGO_HID_A2DP: - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], Action 2-Ant, algorithm = HID+A2DP.\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], Action 2-Ant, algorithm = HID+A2DP\n"); halbtc8821a2ant_action_hid_a2dp(btcoexist); break; default: - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], Action 2-Ant, algorithm = coexist All Off!!\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], Action 2-Ant, algorithm = coexist All Off!!\n"); halbtc8821a2ant_coex_all_off(btcoexist); break; } @@ -3294,8 +3283,8 @@ void ex_halbtc8821a2ant_init_hwconfig(struct btc_coexist *btcoexist) { u8 u1tmp = 0; - BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, - "[BTCoex], 2Ant Init HW Config!!\n"); + btc_iface_dbg(INTF_INIT, + "[BTCoex], 2Ant Init HW Config!!\n"); /* backup rf 0x1e value */ coex_dm->bt_rf0x1e_backup = @@ -3328,8 +3317,8 @@ ex_halbtc8821a2ant_init_coex_dm( struct btc_coexist *btcoexist ) { - BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, - "[BTCoex], Coex Mechanism Init!!\n"); + btc_iface_dbg(INTF_INIT, + "[BTCoex], Coex Mechanism Init!!\n"); halbtc8821a2ant_init_coex_dm(btcoexist); } @@ -3574,13 +3563,13 @@ ex_halbtc8821a2ant_display_coex_info( void ex_halbtc8821a2ant_ips_notify(struct btc_coexist *btcoexist, u8 type) { if (BTC_IPS_ENTER == type) { - BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, - "[BTCoex], IPS ENTER notify\n"); + btc_iface_dbg(INTF_NOTIFY, + "[BTCoex], IPS ENTER notify\n"); coex_sta->under_ips = true; halbtc8821a2ant_coex_all_off(btcoexist); } else if (BTC_IPS_LEAVE == type) { - BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, - "[BTCoex], IPS LEAVE notify\n"); + btc_iface_dbg(INTF_NOTIFY, + "[BTCoex], IPS LEAVE notify\n"); coex_sta->under_ips = false; /*halbtc8821a2ant_init_coex_dm(btcoexist);*/ } @@ -3589,12 +3578,12 @@ void ex_halbtc8821a2ant_ips_notify(struct btc_coexist *btcoexist, u8 type) void ex_halbtc8821a2ant_lps_notify(struct btc_coexist *btcoexist, u8 type) { if (BTC_LPS_ENABLE == type) { - BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, - "[BTCoex], LPS ENABLE notify\n"); + btc_iface_dbg(INTF_NOTIFY, + "[BTCoex], LPS ENABLE notify\n"); coex_sta->under_lps = true; } else if (BTC_LPS_DISABLE == type) { - BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, - "[BTCoex], LPS DISABLE notify\n"); + btc_iface_dbg(INTF_NOTIFY, + "[BTCoex], LPS DISABLE notify\n"); coex_sta->under_lps = false; } } @@ -3602,22 +3591,22 @@ void ex_halbtc8821a2ant_lps_notify(struct btc_coexist *btcoexist, u8 type) void ex_halbtc8821a2ant_scan_notify(struct btc_coexist *btcoexist, u8 type) { if (BTC_SCAN_START == type) { - BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, - "[BTCoex], SCAN START notify\n"); + btc_iface_dbg(INTF_NOTIFY, + "[BTCoex], SCAN START notify\n"); } else if (BTC_SCAN_FINISH == type) { - BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, - "[BTCoex], SCAN FINISH notify\n"); + btc_iface_dbg(INTF_NOTIFY, + "[BTCoex], SCAN FINISH notify\n"); } } void ex_halbtc8821a2ant_connect_notify(struct btc_coexist *btcoexist, u8 type) { if (BTC_ASSOCIATE_START == type) { - BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, - "[BTCoex], CONNECT START notify\n"); + btc_iface_dbg(INTF_NOTIFY, + "[BTCoex], CONNECT START notify\n"); } else if (BTC_ASSOCIATE_FINISH == type) { - BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, - "[BTCoex], CONNECT FINISH notify\n"); + btc_iface_dbg(INTF_NOTIFY, + "[BTCoex], CONNECT FINISH notify\n"); } } @@ -3629,11 +3618,11 @@ void ex_halbtc8821a2ant_media_status_notify(struct btc_coexist *btcoexist, u8 wifi_central_chnl; if (BTC_MEDIA_CONNECT == type) { - BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, - "[BTCoex], MEDIA connect notify\n"); + btc_iface_dbg(INTF_NOTIFY, + "[BTCoex], MEDIA connect notify\n"); } else { - BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, - "[BTCoex], MEDIA disconnect notify\n"); + btc_iface_dbg(INTF_NOTIFY, + "[BTCoex], MEDIA disconnect notify\n"); } /* only 2.4G we need to inform bt the chnl mask*/ @@ -3654,9 +3643,11 @@ void ex_halbtc8821a2ant_media_status_notify(struct btc_coexist *btcoexist, coex_dm->wifi_chnl_info[1] = h2c_parameter[1]; coex_dm->wifi_chnl_info[2] = h2c_parameter[2]; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, - "[BTCoex], FW write 0x66 = 0x%x\n", - h2c_parameter[0]<<16|h2c_parameter[1]<<8|h2c_parameter[2]); + btc_alg_dbg(ALGO_TRACE_FW_EXEC, + "[BTCoex], FW write 0x66 = 0x%x\n", + h2c_parameter[0] << 16 | + h2c_parameter[1] << 8 | + h2c_parameter[2]); btcoexist->btc_fill_h2c(btcoexist, 0x66, 3, h2c_parameter); } @@ -3664,8 +3655,8 @@ void ex_halbtc8821a2ant_media_status_notify(struct btc_coexist *btcoexist, void ex_halbtc8821a2ant_special_packet_notify(struct btc_coexist *btcoexist, u8 type) { if (type == BTC_PACKET_DHCP) { - BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, - "[BTCoex], DHCP Packet notify\n"); + btc_iface_dbg(INTF_NOTIFY, + "[BTCoex], DHCP Packet notify\n"); } } @@ -3685,19 +3676,19 @@ void ex_halbtc8821a2ant_bt_info_notify(struct btc_coexist *btcoexist, rsp_source = BT_INFO_SRC_8821A_2ANT_WIFI_FW; coex_sta->bt_info_c2h_cnt[rsp_source]++; - BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, - "[BTCoex], Bt info[%d], length = %d, hex data = [", - rsp_source, length); + btc_iface_dbg(INTF_NOTIFY, + "[BTCoex], Bt info[%d], length = %d, hex data = [", + rsp_source, length); for (i = 0; i < length; i++) { coex_sta->bt_info_c2h[rsp_source][i] = tmp_buf[i]; if (i == 1) bt_info = tmp_buf[i]; if (i == length-1) { - BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, - "0x%02x]\n", tmp_buf[i]); + btc_iface_dbg(INTF_NOTIFY, + "0x%02x]\n", tmp_buf[i]); } else { - BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, - "0x%02x, ", tmp_buf[i]); + btc_iface_dbg(INTF_NOTIFY, + "0x%02x, ", tmp_buf[i]); } } @@ -3823,8 +3814,8 @@ void ex_halbtc8821a2ant_bt_info_notify(struct btc_coexist *btcoexist, void ex_halbtc8821a2ant_halt_notify(struct btc_coexist *btcoexist) { - BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, - "[BTCoex], Halt notify\n"); + btc_iface_dbg(INTF_NOTIFY, + "[BTCoex], Halt notify\n"); halbtc8821a2ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true); ex_halbtc8821a2ant_media_status_notify(btcoexist, BTC_MEDIA_DISCONNECT); @@ -3837,31 +3828,31 @@ void ex_halbtc8821a2ant_periodical(struct btc_coexist *btcoexist) struct btc_board_info *board_info = &btcoexist->board_info; struct btc_stack_info *stack_info = &btcoexist->stack_info; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "[BTCoex], ==========================Periodical===========================\n"); + btc_alg_dbg(ALGO_TRACE, + "[BTCoex], ==========================Periodical===========================\n"); if (dis_ver_info_cnt <= 5) { dis_ver_info_cnt += 1; - BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, - "[BTCoex], ****************************************************************\n"); - BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, - "[BTCoex], Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n", - board_info->pg_ant_num, - board_info->btdm_ant_num, - board_info->btdm_ant_pos); - BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, - "[BTCoex], BT stack/ hci ext ver = %s / %d\n", - ((stack_info->profile_notified) ? "Yes" : "No"), - stack_info->hci_version); + btc_iface_dbg(INTF_INIT, + "[BTCoex], ****************************************************************\n"); + btc_iface_dbg(INTF_INIT, + "[BTCoex], Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n", + board_info->pg_ant_num, + board_info->btdm_ant_num, + board_info->btdm_ant_pos); + btc_iface_dbg(INTF_INIT, + "[BTCoex], BT stack/ hci ext ver = %s / %d\n", + stack_info->profile_notified ? "Yes" : "No", + stack_info->hci_version); btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER, &bt_patch_ver); btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver); - BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, - "[BTCoex], CoexVer/ FwVer/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n", - glcoex_ver_date_8821a_2ant, glcoex_ver_8821a_2ant, - fw_ver, bt_patch_ver, bt_patch_ver); - BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, - "[BTCoex], ****************************************************************\n"); + btc_iface_dbg(INTF_INIT, + "[BTCoex], CoexVer/ FwVer/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n", + glcoex_ver_date_8821a_2ant, glcoex_ver_8821a_2ant, + fw_ver, bt_patch_ver, bt_patch_ver); + btc_iface_dbg(INTF_INIT, + "[BTCoex], ****************************************************************\n"); } halbtc8821a2ant_query_bt_info(btcoexist); diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c index babd1490f20c..b660c214dc71 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c @@ -141,8 +141,8 @@ static u8 halbtc_get_wifi_central_chnl(struct btc_coexist *btcoexist) if (rtlphy->current_channel != 0) chnl = rtlphy->current_channel; - BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, - "static halbtc_get_wifi_central_chnl:%d\n", chnl); + btc_alg_dbg(ALGO_TRACE, + "static halbtc_get_wifi_central_chnl:%d\n", chnl); return chnl; } diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h index f41ca57dd8a7..3cbe34c535ec 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h @@ -116,12 +116,17 @@ extern u32 btc_dbg_type[]; #define WIFI_P2P_GO_CONNECTED BIT3 #define WIFI_P2P_GC_CONNECTED BIT4 -#define BTC_PRINT(dbgtype, dbgflag, printstr, ...) \ - do { \ - if (unlikely(btc_dbg_type[dbgtype] & dbgflag)) {\ - printk(printstr, ##__VA_ARGS__); \ - } \ - } while (0) +#define btc_alg_dbg(dbgflag, fmt, ...) \ +do { \ + if (unlikely(btc_dbg_type[BTC_MSG_ALGORITHM] & dbgflag)) \ + printk(KERN_DEBUG fmt, ##__VA_ARGS__); \ +} while (0) +#define btc_iface_dbg(dbgflag, fmt, ...) \ +do { \ + if (unlikely(btc_dbg_type[BTC_MSG_INTERFACE] & dbgflag)) \ + printk(KERN_DEBUG fmt, ##__VA_ARGS__); \ +} while (0) + #define BTC_RSSI_HIGH(_rssi_) \ ((_rssi_ == BTC_RSSI_STATE_HIGH || \ From e32993eb3aed598790ba0c581796c357898bd8f0 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Fri, 18 Mar 2016 16:20:48 +0000 Subject: [PATCH 0344/1649] wl12xx: remove redundant null check on wl->scan.ssid ssid is an array of u8, so it can never be null, so the null check on wl->scan.ssid is redundant and can be removed. Signed-off-by: Colin Ian King Signed-off-by: Kalle Valo --- drivers/net/wireless/ti/wl12xx/scan.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/ti/wl12xx/scan.c b/drivers/net/wireless/ti/wl12xx/scan.c index ebed13af9852..a0dfc59e9644 100644 --- a/drivers/net/wireless/ti/wl12xx/scan.c +++ b/drivers/net/wireless/ti/wl12xx/scan.c @@ -149,7 +149,7 @@ static int wl1271_scan_send(struct wl1271 *wl, struct wl12xx_vif *wlvif, else cmd->params.band = WL1271_SCAN_BAND_5_GHZ; - if (wl->scan.ssid_len && wl->scan.ssid) { + if (wl->scan.ssid_len) { cmd->params.ssid_len = wl->scan.ssid_len; memcpy(cmd->params.ssid, wl->scan.ssid, wl->scan.ssid_len); } From 8b4c0009313f3d42e2540e3e1f776097dd0db73d Mon Sep 17 00:00:00 2001 From: Vishal Thanki Date: Sat, 19 Mar 2016 11:41:01 +0100 Subject: [PATCH 0345/1649] rt2x00usb: Use usb anchor to manage URB With current driver, it is observed that a URB is not completed while the USB disconnect is initiated. Due to that, the URB completion handler is trying to access the resource which was freed as a part of USB disconnect. Managing the URBs with anchor will make sure that all the URBs are handled gracefully before device gets disconnected. Signed-off-by: Vishal Thanki Acked-by: Stanislaw Gruszka Signed-off-by: Kalle Valo --- drivers/net/wireless/ralink/rt2x00/rt2x00.h | 3 +++ .../net/wireless/ralink/rt2x00/rt2x00dev.c | 3 +++ .../net/wireless/ralink/rt2x00/rt2x00usb.c | 21 +++++++++++++++++-- 3 files changed, 25 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00.h b/drivers/net/wireless/ralink/rt2x00/rt2x00.h index 6418620f95ff..3dacede7da5e 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00.h +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00.h @@ -38,6 +38,7 @@ #include #include #include +#include #include @@ -1002,6 +1003,8 @@ struct rt2x00_dev { /* Extra TX headroom required for alignment purposes. */ unsigned int extra_tx_headroom; + + struct usb_anchor *anchor; }; struct rt2x00_bar_list_entry { diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c index 5639ed816813..b2f7c586045d 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c @@ -1422,11 +1422,14 @@ void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev) cancel_work_sync(&rt2x00dev->intf_work); cancel_delayed_work_sync(&rt2x00dev->autowakeup_work); cancel_work_sync(&rt2x00dev->sleep_work); +#ifdef CONFIG_RT2X00_LIB_USB if (rt2x00_is_usb(rt2x00dev)) { + usb_kill_anchored_urbs(rt2x00dev->anchor); hrtimer_cancel(&rt2x00dev->txstatus_timer); cancel_work_sync(&rt2x00dev->rxdone_work); cancel_work_sync(&rt2x00dev->txdone_work); } +#endif if (rt2x00dev->workqueue) destroy_workqueue(rt2x00dev->workqueue); diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c index 7627af6098eb..7cf26c6124d1 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c @@ -171,8 +171,11 @@ static void rt2x00usb_register_read_async_cb(struct urb *urb) { struct rt2x00_async_read_data *rd = urb->context; if (rd->callback(rd->rt2x00dev, urb->status, le32_to_cpu(rd->reg))) { - if (usb_submit_urb(urb, GFP_ATOMIC) < 0) + usb_anchor_urb(urb, rd->rt2x00dev->anchor); + if (usb_submit_urb(urb, GFP_ATOMIC) < 0) { + usb_unanchor_urb(urb); kfree(rd); + } } else kfree(rd); } @@ -206,8 +209,11 @@ void rt2x00usb_register_read_async(struct rt2x00_dev *rt2x00dev, usb_fill_control_urb(urb, usb_dev, usb_rcvctrlpipe(usb_dev, 0), (unsigned char *)(&rd->cr), &rd->reg, sizeof(rd->reg), rt2x00usb_register_read_async_cb, rd); - if (usb_submit_urb(urb, GFP_ATOMIC) < 0) + usb_anchor_urb(urb, rt2x00dev->anchor); + if (usb_submit_urb(urb, GFP_ATOMIC) < 0) { + usb_unanchor_urb(urb); kfree(rd); + } usb_free_urb(urb); } EXPORT_SYMBOL_GPL(rt2x00usb_register_read_async); @@ -313,8 +319,10 @@ static bool rt2x00usb_kick_tx_entry(struct queue_entry *entry, void *data) entry->skb->data, length, rt2x00usb_interrupt_txdone, entry); + usb_anchor_urb(entry_priv->urb, rt2x00dev->anchor); status = usb_submit_urb(entry_priv->urb, GFP_ATOMIC); if (status) { + usb_unanchor_urb(entry_priv->urb); if (status == -ENODEV) clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags); set_bit(ENTRY_DATA_IO_FAILED, &entry->flags); @@ -402,8 +410,10 @@ static bool rt2x00usb_kick_rx_entry(struct queue_entry *entry, void *data) entry->skb->data, entry->skb->len, rt2x00usb_interrupt_rxdone, entry); + usb_anchor_urb(entry_priv->urb, rt2x00dev->anchor); status = usb_submit_urb(entry_priv->urb, GFP_ATOMIC); if (status) { + usb_unanchor_urb(entry_priv->urb); if (status == -ENODEV) clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags); set_bit(ENTRY_DATA_IO_FAILED, &entry->flags); @@ -818,6 +828,13 @@ int rt2x00usb_probe(struct usb_interface *usb_intf, if (retval) goto exit_free_reg; + rt2x00dev->anchor = devm_kmalloc(&usb_dev->dev, + sizeof(struct usb_anchor), + GFP_KERNEL); + if (!rt2x00dev->anchor) + goto exit_free_reg; + + init_usb_anchor(rt2x00dev->anchor); return 0; exit_free_reg: From 3d43e031840057f9609da3c546787f05cd6c5518 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Sun, 20 Mar 2016 17:34:52 +0000 Subject: [PATCH 0346/1649] brcmfmac: sdio: remove unused variable retry_limit retry_limit has never been used during the life of this driver, so we may as well remove it as it is redundant. Signed-off-by: Colin Ian King Reviewed-by: Julian Calaby Signed-off-by: Kalle Valo --- drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c index 43fd3f402eba..cd92ba77ecfd 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c @@ -535,9 +535,6 @@ static int qcount[NUMPRIO]; #define RETRYCHAN(chan) ((chan) == SDPCM_EVENT_CHANNEL) -/* Retry count for register access failures */ -static const uint retry_limit = 2; - /* Limit on rounding up frames */ static const uint max_roundup = 512; From d9f5725fb00b87f99d6fc876d27f7d54ab351669 Mon Sep 17 00:00:00 2001 From: Amitkumar Karwar Date: Tue, 22 Mar 2016 12:09:56 +0800 Subject: [PATCH 0347/1649] mwifiex: advertise low priority scan feature Low priority scan handling code which delays or aborts scan operation based on Tx traffic is removed recently. The reason is firmware already takes care of it in our new feature scan channel gap. Hence we should advertise low priority scan support to cfg80211. This patch fixes a problem in which OBSS scan request from wpa_supplicant was being rejected by cfg80211. Signed-off-by: Amitkumar Karwar Signed-off-by: Wei-Ning Huang Tested-by: Wei-Ning Huang Acked-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/cfg80211.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c index b0663bdea5b5..108e64137826 100644 --- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c +++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c @@ -4092,6 +4092,7 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter) wiphy->features |= NL80211_FEATURE_HT_IBSS | NL80211_FEATURE_INACTIVITY_TIMER | + NL80211_FEATURE_LOW_PRIORITY_SCAN | NL80211_FEATURE_NEED_OBSS_SCAN; if (ISSUPP_TDLS_ENABLED(adapter->fw_cap_info)) From 415cd2a645b2573f173cc52419049f9caacf9a47 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Fri, 18 Mar 2016 16:06:53 -0700 Subject: [PATCH 0348/1649] igb: Fix sparse warning about passing __beXX into leXX_to_cpup We were casting the addr as __beXX and then passing it into le32_to_cpu because the device expects the MAC address to be in network order even though the register set is little endian. Instead of casting it as __beXX we can just cast it as __leXX in order to maintain consistency since the region of memory is already in little endian order as far as we are concerned. Signed-off-by: Alexander Duyck Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igb/igb_main.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 55a1405cb2a1..36814a2e326d 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -7845,11 +7845,13 @@ static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index, struct e1000_hw *hw = &adapter->hw; u32 rar_low, rar_high; - /* HW expects these in little endian so we reverse the byte order - * from network order (big endian) to CPU endian + /* HW expects these to be in network order when they are plugged + * into the registers which are little endian. In order to guarantee + * that ordering we need to do an leXX_to_cpup here in order to be + * ready for the byteswap that occurs with writel */ - rar_low = le32_to_cpup((__be32 *)(addr)); - rar_high = le16_to_cpup((__be16 *)(addr + 4)); + rar_low = le32_to_cpup((__le32 *)(addr)); + rar_high = le16_to_cpup((__le16 *)(addr + 4)); /* Indicate to hardware the Address is Valid. */ rar_high |= E1000_RAH_AV; From 7f0ba845607364c76009e396a31651fa3a24bd1c Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Mon, 7 Mar 2016 09:30:21 -0800 Subject: [PATCH 0349/1649] igb: Add support for bulk Tx cleanup & cleanup boolean logic This patch enables bulk free in Tx cleanup for igb and cleans up the boolean logic in the polling routines for igb in the hopes of avoiding any mix-ups similar to what occurred with i40e and i40evf. Signed-off-by: Alexander Duyck Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igb/igb_main.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 36814a2e326d..e40983ca35b4 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -150,7 +150,7 @@ static void igb_update_dca(struct igb_q_vector *); static void igb_setup_dca(struct igb_adapter *); #endif /* CONFIG_IGB_DCA */ static int igb_poll(struct napi_struct *, int); -static bool igb_clean_tx_irq(struct igb_q_vector *); +static bool igb_clean_tx_irq(struct igb_q_vector *, int); static int igb_clean_rx_irq(struct igb_q_vector *, int); static int igb_ioctl(struct net_device *, struct ifreq *, int cmd); static void igb_tx_timeout(struct net_device *); @@ -6522,13 +6522,14 @@ static int igb_poll(struct napi_struct *napi, int budget) igb_update_dca(q_vector); #endif if (q_vector->tx.ring) - clean_complete = igb_clean_tx_irq(q_vector); + clean_complete = igb_clean_tx_irq(q_vector, budget); if (q_vector->rx.ring) { int cleaned = igb_clean_rx_irq(q_vector, budget); work_done += cleaned; - clean_complete &= (cleaned < budget); + if (cleaned >= budget) + clean_complete = false; } /* If all work not completed, return budget and keep polling */ @@ -6545,10 +6546,11 @@ static int igb_poll(struct napi_struct *napi, int budget) /** * igb_clean_tx_irq - Reclaim resources after transmit completes * @q_vector: pointer to q_vector containing needed info + * @napi_budget: Used to determine if we are in netpoll * * returns true if ring is completely cleaned **/ -static bool igb_clean_tx_irq(struct igb_q_vector *q_vector) +static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget) { struct igb_adapter *adapter = q_vector->adapter; struct igb_ring *tx_ring = q_vector->tx.ring; @@ -6587,7 +6589,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector) total_packets += tx_buffer->gso_segs; /* free the skb */ - dev_consume_skb_any(tx_buffer->skb); + napi_consume_skb(tx_buffer->skb, napi_budget); /* unmap skb header data */ dma_unmap_single(tx_ring->dev, From 806ffb1d504927d1449397377eac63bb63489266 Mon Sep 17 00:00:00 2001 From: John Holland Date: Thu, 18 Feb 2016 12:10:52 +0100 Subject: [PATCH 0350/1649] igb: allow setting MAC address on i211 using a device tree blob The Intel i211 LOM PCIe Ethernet controllers' iNVM operates as an OTP and has no external EEPROM interface [1]. The following allows the driver to pickup the MAC address from a device tree blob when CONFIG_OF has been enabled. [1] http://www.intel.com/content/www/us/en/embedded/products/networking/i211-ethernet-controller-datasheet.html Signed-off-by: John Holland Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igb/igb_main.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index e40983ca35b4..ff0476c89438 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -50,6 +50,7 @@ #include #include #include +#include #ifdef CONFIG_IGB_DCA #include #endif @@ -2442,9 +2443,11 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) break; } - /* copy the MAC address out of the NVM */ - if (hw->mac.ops.read_mac_addr(hw)) - dev_err(&pdev->dev, "NVM Read Error\n"); + if (eth_platform_get_mac_address(&pdev->dev, hw->mac.addr)) { + /* copy the MAC address out of the NVM */ + if (hw->mac.ops.read_mac_addr(hw)) + dev_err(&pdev->dev, "NVM Read Error\n"); + } memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len); From 8a21ec4e0abb99884ef2da3e4f950025f3bf7fd3 Mon Sep 17 00:00:00 2001 From: Hariprasad Shenai Date: Tue, 5 Apr 2016 09:52:21 +0530 Subject: [PATCH 0351/1649] cxgb4/cxgb4vf: Deprecate module parameter dflt_msg_enable Message level can be set through ethtool, so deprecate module parameter which is used to set the same. Signed-off-by: Hariprasad Shenai Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 3 ++- drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index d1e3f0997d6b..a1e329ec24cd 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -168,7 +168,8 @@ MODULE_PARM_DESC(force_init, "Forcibly become Master PF and initialize adapter," static int dflt_msg_enable = DFLT_MSG_ENABLE; module_param(dflt_msg_enable, int, 0644); -MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap"); +MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap, " + "deprecated parameter"); /* * The driver uses the best interrupt scheme available on a platform in the diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c index 1cc8a7a69457..730fec73d5a6 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c @@ -74,7 +74,8 @@ static int dflt_msg_enable = DFLT_MSG_ENABLE; module_param(dflt_msg_enable, int, 0644); MODULE_PARM_DESC(dflt_msg_enable, - "default adapter ethtool message level bitmap"); + "default adapter ethtool message level bitmap, " + "deprecated parameter"); /* * The driver uses the best interrupt scheme available on a platform in the From efea95d45e6ab4a30df9801f8e9bf68007ee9b43 Mon Sep 17 00:00:00 2001 From: Doron Shikmoni Date: Wed, 17 Feb 2016 09:34:25 +0200 Subject: [PATCH 0352/1649] igb: Garbled output for "ethtool -m" Garbled output for "ethtool -m ethX", in igb-driven NICs with module / plugin EEPROM (i.e. SFP information). Each output data byte appears duplicated. In igb_ethtool.c, igb_get_module_eeprom() is reading the EEPROM via i2c; the eeprom offset for each word that's read via igb_read_phy_reg_i2c() was passed in #words, whereas it needs to be a byte offset. This patches fixes the bug. Signed-off-by: Doron Shikmoni Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igb/igb_ethtool.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index 7982243d1f9b..bb4d6cdcd0b8 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -2831,7 +2831,8 @@ static int igb_get_module_eeprom(struct net_device *netdev, /* Read EEPROM block, SFF-8079/SFF-8472, word at a time */ for (i = 0; i < last_word - first_word + 1; i++) { - status = igb_read_phy_reg_i2c(hw, first_word + i, &dataword[i]); + status = igb_read_phy_reg_i2c(hw, (first_word + i) * 2, + &dataword[i]); if (status) { /* Error occurred while reading module */ kfree(dataword); From 0c867c9bf84ce2a998f83725bd363f66ce84d548 Mon Sep 17 00:00:00 2001 From: Jiri Benc Date: Tue, 5 Apr 2016 14:47:10 +0200 Subject: [PATCH 0353/1649] vxlan: move Ethernet initialization to a separate function This will allow to initialize vxlan in ARPHRD_NONE mode based on the passed rtnl attributes. v2: renamed "l2mode" to "ether". Signed-off-by: Jiri Benc Signed-off-by: David S. Miller --- drivers/net/vxlan.c | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 1c0fa364323e..6bd5b874ead7 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -2404,7 +2404,7 @@ static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) return 0; } -static const struct net_device_ops vxlan_netdev_ops = { +static const struct net_device_ops vxlan_netdev_ether_ops = { .ndo_init = vxlan_init, .ndo_uninit = vxlan_uninit, .ndo_open = vxlan_open, @@ -2458,10 +2458,6 @@ static void vxlan_setup(struct net_device *dev) struct vxlan_dev *vxlan = netdev_priv(dev); unsigned int h; - eth_hw_addr_random(dev); - ether_setup(dev); - - dev->netdev_ops = &vxlan_netdev_ops; dev->destructor = free_netdev; SET_NETDEV_DEVTYPE(dev, &vxlan_type); @@ -2476,8 +2472,7 @@ static void vxlan_setup(struct net_device *dev) dev->hw_features |= NETIF_F_GSO_SOFTWARE; dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX; netif_keep_dst(dev); - dev->priv_flags &= ~IFF_TX_SKB_SHARING; - dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE; + dev->priv_flags |= IFF_NO_QUEUE; INIT_LIST_HEAD(&vxlan->next); spin_lock_init(&vxlan->hash_lock); @@ -2496,6 +2491,15 @@ static void vxlan_setup(struct net_device *dev) INIT_HLIST_HEAD(&vxlan->fdb_head[h]); } +static void vxlan_ether_setup(struct net_device *dev) +{ + eth_hw_addr_random(dev); + ether_setup(dev); + dev->priv_flags &= ~IFF_TX_SKB_SHARING; + dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; + dev->netdev_ops = &vxlan_netdev_ether_ops; +} + static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = { [IFLA_VXLAN_ID] = { .type = NLA_U32 }, [IFLA_VXLAN_GROUP] = { .len = FIELD_SIZEOF(struct iphdr, daddr) }, @@ -2722,6 +2726,8 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev, __be16 default_port = vxlan->cfg.dst_port; struct net_device *lowerdev = NULL; + vxlan_ether_setup(dev); + vxlan->net = src_net; dst->remote_vni = conf->vni; From 47e5d1b06305e73afc917f47b65490adb06c7194 Mon Sep 17 00:00:00 2001 From: Jiri Benc Date: Tue, 5 Apr 2016 14:47:11 +0200 Subject: [PATCH 0354/1649] vxlan: move fdb code to common location in vxlan_xmit Handle VXLAN_F_COLLECT_METADATA before VXLAN_F_PROXY. The latter does not make sense with the former, as it needs populated fdb which does not happen in metadata mode. After this cleanup, the fdb code in vxlan_xmit is moved to a common location and can be later skipped for VXLAN-GPE which does not necessarily carry inner Ethernet header. v2: changed commit description to not reference L3 mode Signed-off-by: Jiri Benc Signed-off-by: David S. Miller --- drivers/net/vxlan.c | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 6bd5b874ead7..d62eebaa9720 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -2106,9 +2106,17 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev) info = skb_tunnel_info(skb); skb_reset_mac_header(skb); - eth = eth_hdr(skb); - if ((vxlan->flags & VXLAN_F_PROXY)) { + if (vxlan->flags & VXLAN_F_COLLECT_METADATA) { + if (info && info->mode & IP_TUNNEL_INFO_TX) + vxlan_xmit_one(skb, dev, NULL, false); + else + kfree_skb(skb); + return NETDEV_TX_OK; + } + + if (vxlan->flags & VXLAN_F_PROXY) { + eth = eth_hdr(skb); if (ntohs(eth->h_proto) == ETH_P_ARP) return arp_reduce(dev, skb); #if IS_ENABLED(CONFIG_IPV6) @@ -2123,18 +2131,10 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev) msg->icmph.icmp6_type == NDISC_NEIGHBOUR_SOLICITATION) return neigh_reduce(dev, skb); } - eth = eth_hdr(skb); #endif } - if (vxlan->flags & VXLAN_F_COLLECT_METADATA) { - if (info && info->mode & IP_TUNNEL_INFO_TX) - vxlan_xmit_one(skb, dev, NULL, false); - else - kfree_skb(skb); - return NETDEV_TX_OK; - } - + eth = eth_hdr(skb); f = vxlan_find_mac(vxlan, eth->h_dest); did_rsc = false; From a6d5bbf34efa8330af7b0b1dba0f38148516ed97 Mon Sep 17 00:00:00 2001 From: Jiri Benc Date: Tue, 5 Apr 2016 14:47:12 +0200 Subject: [PATCH 0355/1649] ip_tunnel: implement __iptunnel_pull_header Allow calling of iptunnel_pull_header without special casing ETH_P_TEB inner protocol. Signed-off-by: Jiri Benc Signed-off-by: David S. Miller --- include/net/ip_tunnels.h | 11 +++++++++-- net/ipv4/ip_tunnel_core.c | 8 ++++---- 2 files changed, 13 insertions(+), 6 deletions(-) diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h index 56050f913339..16435d8b1f93 100644 --- a/include/net/ip_tunnels.h +++ b/include/net/ip_tunnels.h @@ -295,8 +295,15 @@ static inline u8 ip_tunnel_ecn_encap(u8 tos, const struct iphdr *iph, return INET_ECN_encapsulate(tos, inner); } -int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto, - bool xnet); +int __iptunnel_pull_header(struct sk_buff *skb, int hdr_len, + __be16 inner_proto, bool raw_proto, bool xnet); + +static inline int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, + __be16 inner_proto, bool xnet) +{ + return __iptunnel_pull_header(skb, hdr_len, inner_proto, false, xnet); +} + void iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb, __be32 src, __be32 dst, u8 proto, u8 tos, u8 ttl, __be16 df, bool xnet); diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c index b3ab1205dfdf..43445df61efd 100644 --- a/net/ipv4/ip_tunnel_core.c +++ b/net/ipv4/ip_tunnel_core.c @@ -86,15 +86,15 @@ void iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb, } EXPORT_SYMBOL_GPL(iptunnel_xmit); -int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto, - bool xnet) +int __iptunnel_pull_header(struct sk_buff *skb, int hdr_len, + __be16 inner_proto, bool raw_proto, bool xnet) { if (unlikely(!pskb_may_pull(skb, hdr_len))) return -ENOMEM; skb_pull_rcsum(skb, hdr_len); - if (inner_proto == htons(ETH_P_TEB)) { + if (!raw_proto && inner_proto == htons(ETH_P_TEB)) { struct ethhdr *eh; if (unlikely(!pskb_may_pull(skb, ETH_HLEN))) @@ -117,7 +117,7 @@ int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto, return iptunnel_pull_offloads(skb); } -EXPORT_SYMBOL_GPL(iptunnel_pull_header); +EXPORT_SYMBOL_GPL(__iptunnel_pull_header); struct metadata_dst *iptunnel_metadata_reply(struct metadata_dst *md, gfp_t flags) From e1e5314de08ba6003b358125eafc9ad9e75a950c Mon Sep 17 00:00:00 2001 From: Jiri Benc Date: Tue, 5 Apr 2016 14:47:13 +0200 Subject: [PATCH 0356/1649] vxlan: implement GPE Implement VXLAN-GPE. Only COLLECT_METADATA is supported for now (it is possible to support static configuration, too, if there is demand for it). The GPE header parsing has to be moved before iptunnel_pull_header, as we need to know the protocol. v2: Removed what was called "L2 mode" in v1 of the patchset. Only "L3 mode" (now called "raw mode") is added by this patch. This mode does not allow Ethernet header to be encapsulated in VXLAN-GPE when using ip route to specify the encapsulation, IP header is encapsulated instead. The patch does support Ethernet to be encapsulated, though, using ETH_P_TEB in skb->protocol. This will be utilized by other COLLECT_METADATA users (openvswitch in particular). If there is ever demand for Ethernet encapsulation with VXLAN-GPE using ip route, it's easy to add a new flag switching the interface to "Ethernet mode" (called "L2 mode" in v1 of this patchset). For now, leave this out, it seems we don't need it. Disallowed more flag combinations, especially RCO with GPE. Added comment explaining that GBP and GPE cannot be set together. Signed-off-by: Jiri Benc Signed-off-by: David S. Miller --- drivers/net/vxlan.c | 170 +++++++++++++++++++++++++++++++---- include/net/vxlan.h | 68 ++++++++++++++ include/uapi/linux/if_link.h | 1 + 3 files changed, 222 insertions(+), 17 deletions(-) diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index d62eebaa9720..51cccddfe403 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -1192,6 +1192,45 @@ out: unparsed->vx_flags &= ~VXLAN_GBP_USED_BITS; } +static bool vxlan_parse_gpe_hdr(struct vxlanhdr *unparsed, + __be32 *protocol, + struct sk_buff *skb, u32 vxflags) +{ + struct vxlanhdr_gpe *gpe = (struct vxlanhdr_gpe *)unparsed; + + /* Need to have Next Protocol set for interfaces in GPE mode. */ + if (!gpe->np_applied) + return false; + /* "The initial version is 0. If a receiver does not support the + * version indicated it MUST drop the packet. + */ + if (gpe->version != 0) + return false; + /* "When the O bit is set to 1, the packet is an OAM packet and OAM + * processing MUST occur." However, we don't implement OAM + * processing, thus drop the packet. + */ + if (gpe->oam_flag) + return false; + + switch (gpe->next_protocol) { + case VXLAN_GPE_NP_IPV4: + *protocol = htons(ETH_P_IP); + break; + case VXLAN_GPE_NP_IPV6: + *protocol = htons(ETH_P_IPV6); + break; + case VXLAN_GPE_NP_ETHERNET: + *protocol = htons(ETH_P_TEB); + break; + default: + return false; + } + + unparsed->vx_flags &= ~VXLAN_GPE_USED_BITS; + return true; +} + static bool vxlan_set_mac(struct vxlan_dev *vxlan, struct vxlan_sock *vs, struct sk_buff *skb) @@ -1257,9 +1296,11 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb) struct vxlanhdr unparsed; struct vxlan_metadata _md; struct vxlan_metadata *md = &_md; + __be32 protocol = htons(ETH_P_TEB); + bool raw_proto = false; void *oiph; - /* Need Vxlan and inner Ethernet header to be present */ + /* Need UDP and VXLAN header to be present */ if (!pskb_may_pull(skb, VXLAN_HLEN)) return 1; @@ -1283,9 +1324,18 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb) if (!vxlan) goto drop; - if (iptunnel_pull_header(skb, VXLAN_HLEN, htons(ETH_P_TEB), - !net_eq(vxlan->net, dev_net(vxlan->dev)))) - goto drop; + /* For backwards compatibility, only allow reserved fields to be + * used by VXLAN extensions if explicitly requested. + */ + if (vs->flags & VXLAN_F_GPE) { + if (!vxlan_parse_gpe_hdr(&unparsed, &protocol, skb, vs->flags)) + goto drop; + raw_proto = true; + } + + if (__iptunnel_pull_header(skb, VXLAN_HLEN, protocol, raw_proto, + !net_eq(vxlan->net, dev_net(vxlan->dev)))) + goto drop; if (vxlan_collect_metadata(vs)) { __be32 vni = vxlan_vni(vxlan_hdr(skb)->vx_vni); @@ -1304,14 +1354,14 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb) memset(md, 0, sizeof(*md)); } - /* For backwards compatibility, only allow reserved fields to be - * used by VXLAN extensions if explicitly requested. - */ if (vs->flags & VXLAN_F_REMCSUM_RX) if (!vxlan_remcsum(&unparsed, skb, vs->flags)) goto drop; if (vs->flags & VXLAN_F_GBP) vxlan_parse_gbp_hdr(&unparsed, skb, vs->flags, md); + /* Note that GBP and GPE can never be active together. This is + * ensured in vxlan_dev_configure. + */ if (unparsed.vx_flags || unparsed.vx_vni) { /* If there are any unprocessed flags remaining treat @@ -1325,8 +1375,13 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb) goto drop; } - if (!vxlan_set_mac(vxlan, vs, skb)) - goto drop; + if (!raw_proto) { + if (!vxlan_set_mac(vxlan, vs, skb)) + goto drop; + } else { + skb->dev = vxlan->dev; + skb->pkt_type = PACKET_HOST; + } oiph = skb_network_header(skb); skb_reset_network_header(skb); @@ -1685,6 +1740,27 @@ static void vxlan_build_gbp_hdr(struct vxlanhdr *vxh, u32 vxflags, gbp->policy_id = htons(md->gbp & VXLAN_GBP_ID_MASK); } +static int vxlan_build_gpe_hdr(struct vxlanhdr *vxh, u32 vxflags, + __be16 protocol) +{ + struct vxlanhdr_gpe *gpe = (struct vxlanhdr_gpe *)vxh; + + gpe->np_applied = 1; + + switch (protocol) { + case htons(ETH_P_IP): + gpe->next_protocol = VXLAN_GPE_NP_IPV4; + return 0; + case htons(ETH_P_IPV6): + gpe->next_protocol = VXLAN_GPE_NP_IPV6; + return 0; + case htons(ETH_P_TEB): + gpe->next_protocol = VXLAN_GPE_NP_ETHERNET; + return 0; + } + return -EPFNOSUPPORT; +} + static int vxlan_build_skb(struct sk_buff *skb, struct dst_entry *dst, int iphdr_len, __be32 vni, struct vxlan_metadata *md, u32 vxflags, @@ -1694,6 +1770,7 @@ static int vxlan_build_skb(struct sk_buff *skb, struct dst_entry *dst, int min_headroom; int err; int type = udp_sum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL; + __be16 inner_protocol = htons(ETH_P_TEB); if ((vxflags & VXLAN_F_REMCSUM_TX) && skb->ip_summed == CHECKSUM_PARTIAL) { @@ -1712,10 +1789,8 @@ static int vxlan_build_skb(struct sk_buff *skb, struct dst_entry *dst, /* Need space for new headers (invalidates iph ptr) */ err = skb_cow_head(skb, min_headroom); - if (unlikely(err)) { - kfree_skb(skb); - return err; - } + if (unlikely(err)) + goto out_free; skb = vlan_hwaccel_push_inside(skb); if (WARN_ON(!skb)) @@ -1744,9 +1819,19 @@ static int vxlan_build_skb(struct sk_buff *skb, struct dst_entry *dst, if (vxflags & VXLAN_F_GBP) vxlan_build_gbp_hdr(vxh, vxflags, md); + if (vxflags & VXLAN_F_GPE) { + err = vxlan_build_gpe_hdr(vxh, vxflags, skb->protocol); + if (err < 0) + goto out_free; + inner_protocol = skb->protocol; + } - skb_set_inner_protocol(skb, htons(ETH_P_TEB)); + skb_set_inner_protocol(skb, inner_protocol); return 0; + +out_free: + kfree_skb(skb); + return err; } static struct rtable *vxlan_get_route(struct vxlan_dev *vxlan, @@ -2421,6 +2506,17 @@ static const struct net_device_ops vxlan_netdev_ether_ops = { .ndo_fill_metadata_dst = vxlan_fill_metadata_dst, }; +static const struct net_device_ops vxlan_netdev_raw_ops = { + .ndo_init = vxlan_init, + .ndo_uninit = vxlan_uninit, + .ndo_open = vxlan_open, + .ndo_stop = vxlan_stop, + .ndo_start_xmit = vxlan_xmit, + .ndo_get_stats64 = ip_tunnel_get_stats64, + .ndo_change_mtu = vxlan_change_mtu, + .ndo_fill_metadata_dst = vxlan_fill_metadata_dst, +}; + /* Info for udev, that this is a virtual tunnel endpoint */ static struct device_type vxlan_type = { .name = "vxlan", @@ -2500,6 +2596,17 @@ static void vxlan_ether_setup(struct net_device *dev) dev->netdev_ops = &vxlan_netdev_ether_ops; } +static void vxlan_raw_setup(struct net_device *dev) +{ + dev->type = ARPHRD_NONE; + dev->hard_header_len = 0; + dev->addr_len = 0; + dev->mtu = ETH_DATA_LEN; + dev->tx_queue_len = 1000; + dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; + dev->netdev_ops = &vxlan_netdev_raw_ops; +} + static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = { [IFLA_VXLAN_ID] = { .type = NLA_U32 }, [IFLA_VXLAN_GROUP] = { .len = FIELD_SIZEOF(struct iphdr, daddr) }, @@ -2526,6 +2633,7 @@ static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = { [IFLA_VXLAN_REMCSUM_TX] = { .type = NLA_U8 }, [IFLA_VXLAN_REMCSUM_RX] = { .type = NLA_U8 }, [IFLA_VXLAN_GBP] = { .type = NLA_FLAG, }, + [IFLA_VXLAN_GPE] = { .type = NLA_FLAG, }, [IFLA_VXLAN_REMCSUM_NOPARTIAL] = { .type = NLA_FLAG }, }; @@ -2726,7 +2834,20 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev, __be16 default_port = vxlan->cfg.dst_port; struct net_device *lowerdev = NULL; - vxlan_ether_setup(dev); + if (conf->flags & VXLAN_F_GPE) { + if (conf->flags & ~VXLAN_F_ALLOWED_GPE) + return -EINVAL; + /* For now, allow GPE only together with COLLECT_METADATA. + * This can be relaxed later; in such case, the other side + * of the PtP link will have to be provided. + */ + if (!(conf->flags & VXLAN_F_COLLECT_METADATA)) + return -EINVAL; + + vxlan_raw_setup(dev); + } else { + vxlan_ether_setup(dev); + } vxlan->net = src_net; @@ -2789,8 +2910,12 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev, dev->needed_headroom = needed_headroom; memcpy(&vxlan->cfg, conf, sizeof(*conf)); - if (!vxlan->cfg.dst_port) - vxlan->cfg.dst_port = default_port; + if (!vxlan->cfg.dst_port) { + if (conf->flags & VXLAN_F_GPE) + vxlan->cfg.dst_port = 4790; /* IANA assigned VXLAN-GPE port */ + else + vxlan->cfg.dst_port = default_port; + } vxlan->flags |= conf->flags; if (!vxlan->cfg.age_interval) @@ -2961,6 +3086,9 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev, if (data[IFLA_VXLAN_GBP]) conf.flags |= VXLAN_F_GBP; + if (data[IFLA_VXLAN_GPE]) + conf.flags |= VXLAN_F_GPE; + if (data[IFLA_VXLAN_REMCSUM_NOPARTIAL]) conf.flags |= VXLAN_F_REMCSUM_NOPARTIAL; @@ -2977,6 +3105,10 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev, case -EEXIST: pr_info("duplicate VNI %u\n", be32_to_cpu(conf.vni)); break; + + case -EINVAL: + pr_info("unsupported combination of extensions\n"); + break; } return err; @@ -3104,6 +3236,10 @@ static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev) nla_put_flag(skb, IFLA_VXLAN_GBP)) goto nla_put_failure; + if (vxlan->flags & VXLAN_F_GPE && + nla_put_flag(skb, IFLA_VXLAN_GPE)) + goto nla_put_failure; + if (vxlan->flags & VXLAN_F_REMCSUM_NOPARTIAL && nla_put_flag(skb, IFLA_VXLAN_REMCSUM_NOPARTIAL)) goto nla_put_failure; diff --git a/include/net/vxlan.h b/include/net/vxlan.h index 73ed2e951c02..dcc6f4057115 100644 --- a/include/net/vxlan.h +++ b/include/net/vxlan.h @@ -119,6 +119,64 @@ struct vxlanhdr_gbp { #define VXLAN_GBP_POLICY_APPLIED (BIT(3) << 16) #define VXLAN_GBP_ID_MASK (0xFFFF) +/* + * VXLAN Generic Protocol Extension (VXLAN_F_GPE): + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * |R|R|Ver|I|P|R|O| Reserved |Next Protocol | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | VXLAN Network Identifier (VNI) | Reserved | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * + * Ver = Version. Indicates VXLAN GPE protocol version. + * + * P = Next Protocol Bit. The P bit is set to indicate that the + * Next Protocol field is present. + * + * O = OAM Flag Bit. The O bit is set to indicate that the packet + * is an OAM packet. + * + * Next Protocol = This 8 bit field indicates the protocol header + * immediately following the VXLAN GPE header. + * + * https://tools.ietf.org/html/draft-ietf-nvo3-vxlan-gpe-01 + */ + +struct vxlanhdr_gpe { +#if defined(__LITTLE_ENDIAN_BITFIELD) + u8 oam_flag:1, + reserved_flags1:1, + np_applied:1, + instance_applied:1, + version:2, +reserved_flags2:2; +#elif defined(__BIG_ENDIAN_BITFIELD) + u8 reserved_flags2:2, + version:2, + instance_applied:1, + np_applied:1, + reserved_flags1:1, + oam_flag:1; +#endif + u8 reserved_flags3; + u8 reserved_flags4; + u8 next_protocol; + __be32 vx_vni; +}; + +/* VXLAN-GPE header flags. */ +#define VXLAN_HF_VER cpu_to_be32(BIT(29) | BIT(28)) +#define VXLAN_HF_NP cpu_to_be32(BIT(26)) +#define VXLAN_HF_OAM cpu_to_be32(BIT(24)) + +#define VXLAN_GPE_USED_BITS (VXLAN_HF_VER | VXLAN_HF_NP | VXLAN_HF_OAM | \ + cpu_to_be32(0xff)) + +/* VXLAN-GPE header Next Protocol. */ +#define VXLAN_GPE_NP_IPV4 0x01 +#define VXLAN_GPE_NP_IPV6 0x02 +#define VXLAN_GPE_NP_ETHERNET 0x03 +#define VXLAN_GPE_NP_NSH 0x04 + struct vxlan_metadata { u32 gbp; }; @@ -206,16 +264,26 @@ struct vxlan_dev { #define VXLAN_F_GBP 0x800 #define VXLAN_F_REMCSUM_NOPARTIAL 0x1000 #define VXLAN_F_COLLECT_METADATA 0x2000 +#define VXLAN_F_GPE 0x4000 /* Flags that are used in the receive path. These flags must match in * order for a socket to be shareable */ #define VXLAN_F_RCV_FLAGS (VXLAN_F_GBP | \ + VXLAN_F_GPE | \ VXLAN_F_UDP_ZERO_CSUM6_RX | \ VXLAN_F_REMCSUM_RX | \ VXLAN_F_REMCSUM_NOPARTIAL | \ VXLAN_F_COLLECT_METADATA) +/* Flags that can be set together with VXLAN_F_GPE. */ +#define VXLAN_F_ALLOWED_GPE (VXLAN_F_GPE | \ + VXLAN_F_IPV6 | \ + VXLAN_F_UDP_ZERO_CSUM_TX | \ + VXLAN_F_UDP_ZERO_CSUM6_TX | \ + VXLAN_F_UDP_ZERO_CSUM6_RX | \ + VXLAN_F_COLLECT_METADATA) + struct net_device *vxlan_dev_create(struct net *net, const char *name, u8 name_assign_type, struct vxlan_config *conf); diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index c488066fb53a..9427f17d06d6 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -488,6 +488,7 @@ enum { IFLA_VXLAN_REMCSUM_NOPARTIAL, IFLA_VXLAN_COLLECT_METADATA, IFLA_VXLAN_LABEL, + IFLA_VXLAN_GPE, __IFLA_VXLAN_MAX }; #define IFLA_VXLAN_MAX (__IFLA_VXLAN_MAX - 1) From d5ea45da1f04a3443710306e16db3b3aeae92918 Mon Sep 17 00:00:00 2001 From: Stefan Assmann Date: Wed, 3 Feb 2016 09:20:52 +0100 Subject: [PATCH 0357/1649] e1000e: call ndo_stop() instead of dev_close() when running offline selftest Calling dev_close() causes IFF_UP to be cleared which will remove the interfaces routes and some addresses. That's probably not what the user intended when running the offline selftest. Besides this does not happen if the interface is brought down before the test, so the current behaviour is inconsistent. Instead call the net_device_ops ndo_stop function directly and avoid touching IFF_UP at all. Signed-off-by: Stefan Assmann Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/e1000e/e1000.h | 2 ++ drivers/net/ethernet/intel/e1000e/ethtool.c | 4 ++-- drivers/net/ethernet/intel/e1000e/netdev.c | 12 ++++++------ 3 files changed, 10 insertions(+), 8 deletions(-) diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h index 1dc293bad87b..52eb641fc9dc 100644 --- a/drivers/net/ethernet/intel/e1000e/e1000.h +++ b/drivers/net/ethernet/intel/e1000e/e1000.h @@ -480,6 +480,8 @@ extern const char e1000e_driver_version[]; void e1000e_check_options(struct e1000_adapter *adapter); void e1000e_set_ethtool_ops(struct net_device *netdev); +int e1000e_open(struct net_device *netdev); +int e1000e_close(struct net_device *netdev); void e1000e_up(struct e1000_adapter *adapter); void e1000e_down(struct e1000_adapter *adapter, bool reset); void e1000e_reinit_locked(struct e1000_adapter *adapter); diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c index 6cab1f30d41e..1e3973aa707c 100644 --- a/drivers/net/ethernet/intel/e1000e/ethtool.c +++ b/drivers/net/ethernet/intel/e1000e/ethtool.c @@ -1816,7 +1816,7 @@ static void e1000_diag_test(struct net_device *netdev, if (if_running) /* indicate we're in test mode */ - dev_close(netdev); + e1000e_close(netdev); if (e1000_reg_test(adapter, &data[0])) eth_test->flags |= ETH_TEST_FL_FAILED; @@ -1849,7 +1849,7 @@ static void e1000_diag_test(struct net_device *netdev, clear_bit(__E1000_TESTING, &adapter->state); if (if_running) - dev_open(netdev); + e1000e_open(netdev); } else { /* Online tests */ diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 9b4ec13d9161..a7f16c35ebcd 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -4495,7 +4495,7 @@ static int e1000_test_msi(struct e1000_adapter *adapter) } /** - * e1000_open - Called when a network interface is made active + * e1000e_open - Called when a network interface is made active * @netdev: network interface device structure * * Returns 0 on success, negative value on failure @@ -4506,7 +4506,7 @@ static int e1000_test_msi(struct e1000_adapter *adapter) * handler is registered with the OS, the watchdog timer is started, * and the stack is notified that the interface is ready. **/ -static int e1000_open(struct net_device *netdev) +int e1000e_open(struct net_device *netdev) { struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; @@ -4604,7 +4604,7 @@ err_setup_tx: } /** - * e1000_close - Disables a network interface + * e1000e_close - Disables a network interface * @netdev: network interface device structure * * Returns 0, this is not allowed to fail @@ -4614,7 +4614,7 @@ err_setup_tx: * needs to be disabled. A global MAC reset is issued to stop the * hardware, and all transmit and receive resources are freed. **/ -static int e1000_close(struct net_device *netdev) +int e1000e_close(struct net_device *netdev) { struct e1000_adapter *adapter = netdev_priv(netdev); struct pci_dev *pdev = adapter->pdev; @@ -6920,8 +6920,8 @@ static int e1000_set_features(struct net_device *netdev, } static const struct net_device_ops e1000e_netdev_ops = { - .ndo_open = e1000_open, - .ndo_stop = e1000_close, + .ndo_open = e1000e_open, + .ndo_stop = e1000e_close, .ndo_start_xmit = e1000_xmit_frame, .ndo_get_stats64 = e1000e_get_stats64, .ndo_set_rx_mode = e1000e_set_rx_mode, From b98ff151b659b08f71cc2e21ce7044da6662b314 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Wed, 6 Apr 2016 17:10:00 +0200 Subject: [PATCH 0358/1649] mlxsw: reg: Add Port Prio To Buffer register When packets ingress the switch they are assigned a switch priority number that dictates the packet's priority group (PG) buffer in the port's headroom buffer. Add the Port Prio To Buffer (PPTB) register, which configures the switch priority to PG mapping. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/reg.h | 83 +++++++++++++++++++++++ 1 file changed, 83 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index ffe4c0305733..0995beee6c91 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -2340,6 +2340,87 @@ static inline void mlxsw_reg_ppcnt_pack(char *payload, u8 local_port) mlxsw_reg_ppcnt_prio_tc_set(payload, 0); } +/* PPTB - Port Prio To Buffer Register + * ----------------------------------- + * Configures the switch priority to buffer table. + */ +#define MLXSW_REG_PPTB_ID 0x500B +#define MLXSW_REG_PPTB_LEN 0x0C + +static const struct mlxsw_reg_info mlxsw_reg_pptb = { + .id = MLXSW_REG_PPTB_ID, + .len = MLXSW_REG_PPTB_LEN, +}; + +enum { + MLXSW_REG_PPTB_MM_UM, + MLXSW_REG_PPTB_MM_UNICAST, + MLXSW_REG_PPTB_MM_MULTICAST, +}; + +/* reg_pptb_mm + * Mapping mode. + * 0 - Map both unicast and multicast packets to the same buffer. + * 1 - Map only unicast packets. + * 2 - Map only multicast packets. + * Access: Index + * + * Note: SwitchX-2 only supports the first option. + */ +MLXSW_ITEM32(reg, pptb, mm, 0x00, 28, 2); + +/* reg_pptb_local_port + * Local port number. + * Access: Index + */ +MLXSW_ITEM32(reg, pptb, local_port, 0x00, 16, 8); + +/* reg_pptb_um + * Enables the update of the untagged_buf field. + * Access: RW + */ +MLXSW_ITEM32(reg, pptb, um, 0x00, 8, 1); + +/* reg_pptb_pm + * Enables the update of the prio_to_buff field. + * Bit is a flag for updating the mapping for switch priority . + * Access: RW + */ +MLXSW_ITEM32(reg, pptb, pm, 0x00, 0, 8); + +/* reg_pptb_prio_to_buff + * Mapping of switch priority to one of the allocated receive port + * buffers. + * Access: RW + */ +MLXSW_ITEM_BIT_ARRAY(reg, pptb, prio_to_buff, 0x04, 0x04, 4); + +/* reg_pptb_pm_msb + * Enables the update of the prio_to_buff field. + * Bit is a flag for updating the mapping for switch priority . + * Access: RW + */ +MLXSW_ITEM32(reg, pptb, pm_msb, 0x08, 24, 8); + +/* reg_pptb_untagged_buff + * Mapping of untagged frames to one of the allocated receive port buffers. + * Access: RW + * + * Note: In SwitchX-2 this field must be mapped to buffer 8. Reserved for + * Spectrum, as it maps untagged packets based on the default switch priority. + */ +MLXSW_ITEM32(reg, pptb, untagged_buff, 0x08, 0, 4); + +#define MLXSW_REG_PPTB_ALL_PRIO 0xFF + +static inline void mlxsw_reg_pptb_pack(char *payload, u8 local_port) +{ + MLXSW_REG_ZERO(pptb, payload); + mlxsw_reg_pptb_mm_set(payload, MLXSW_REG_PPTB_MM_UM); + mlxsw_reg_pptb_local_port_set(payload, local_port); + mlxsw_reg_pptb_pm_set(payload, MLXSW_REG_PPTB_ALL_PRIO); +} + /* PBMC - Port Buffer Management Control Register * ---------------------------------------------- * The PBMC register configures and retrieves the port packet buffer @@ -3295,6 +3376,8 @@ static inline const char *mlxsw_reg_id_str(u16 reg_id) return "PAOS"; case MLXSW_REG_PPCNT_ID: return "PPCNT"; + case MLXSW_REG_PPTB_ID: + return "PPTB"; case MLXSW_REG_PBMC_ID: return "PBMC"; case MLXSW_REG_PSPA_ID: From dd6cb0f9fdb31c4bf89e482031cd098bf5f706d4 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Wed, 6 Apr 2016 17:10:01 +0200 Subject: [PATCH 0359/1649] mlxsw: spectrum: Map all switch priorities to priority group 0 During transmission, the skb's priority is used to map the skb to a traffic class, where the idea is to group priorities with similar characteristics (e.g. lossy, lossless) to the same traffic class. By default, all priorities are mapped to traffic class 0. In the device, we model the skb's priority as the switch priority, which is assigned to a packet according to its PCP value and ingress port (untagged packets are assigned the port's default switch priority - 0). At ingress, the packet is directed to a priority group (PG) buffer in the port's headroom buffer according to the packet's switch priority and switch priority to buffer mapping. While it's possible to configure the egress mapping between skb's priority (switch priority) and traffic class, there is no mechanism to configure the ingress mapping to a PG. In order to keep things simple and since grouping certain priorities into a traffic class at egress also implies they should be grouped the same at ingress, treat a PG as the ingress counterpart of an egress traffic class. Having established the above, during initialization map all the switch priorities to PG0 in accordance with the Linux defaults for traffic class mapping. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- .../mellanox/mlxsw/spectrum_buffers.c | 25 ++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c index d59195e3f7fb..c3a275bb46cf 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c @@ -34,6 +34,7 @@ #include #include +#include #include "spectrum.h" #include "core.h" @@ -82,6 +83,28 @@ static int mlxsw_sp_port_pb_init(struct mlxsw_sp_port *mlxsw_sp_port) MLXSW_REG(pbmc), pbmc_pl); } +static int mlxsw_sp_port_pb_prio_init(struct mlxsw_sp_port *mlxsw_sp_port) +{ + char pptb_pl[MLXSW_REG_PPTB_LEN]; + int i; + + mlxsw_reg_pptb_pack(pptb_pl, mlxsw_sp_port->local_port); + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) + mlxsw_reg_pptb_prio_to_buff_set(pptb_pl, i, 0); + return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pptb), + pptb_pl); +} + +static int mlxsw_sp_port_headroom_init(struct mlxsw_sp_port *mlxsw_sp_port) +{ + int err; + + err = mlxsw_sp_port_pb_init(mlxsw_sp_port); + if (err) + return err; + return mlxsw_sp_port_pb_prio_init(mlxsw_sp_port); +} + #define MLXSW_SP_SB_BYTES_PER_CELL 96 struct mlxsw_sp_sb_pool { @@ -410,7 +433,7 @@ int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port) { int err; - err = mlxsw_sp_port_pb_init(mlxsw_sp_port); + err = mlxsw_sp_port_headroom_init(mlxsw_sp_port); if (err) return err; err = mlxsw_sp_port_sb_cms_init(mlxsw_sp_port); From 1a1984490f5c123ee62394bc435a2c09db15cc18 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Wed, 6 Apr 2016 17:10:02 +0200 Subject: [PATCH 0360/1649] mlxsw: spectrum: Add bytes to cells helper Buffers in the switch store packets in units called buffer cells. Add a helper to convert from bytes to cells, so that the actual number of cells required (result is round up) is returned. Also, drop the SB (shared buffer) acronym from the BYTES_PER_CELL macro, as this unit is also used in the ports' buffers and not only the switch's shared buffer. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/spectrum.h | 4 ++ .../mellanox/mlxsw/spectrum_buffers.c | 64 +++++++++---------- 2 files changed, 34 insertions(+), 34 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index d58ab0cd9507..84dddd47f1b0 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -62,6 +62,10 @@ #define MLXSW_SP_PORT_BASE_SPEED 25000 /* Mb/s */ +#define MLXSW_SP_BYTES_PER_CELL 96 + +#define MLXSW_SP_BYTES_TO_CELLS(b) DIV_ROUND_UP(b, MLXSW_SP_BYTES_PER_CELL) + struct mlxsw_sp_port; struct mlxsw_sp_upper { diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c index c3a275bb46cf..dadb6e1ccf82 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c @@ -105,8 +105,6 @@ static int mlxsw_sp_port_headroom_init(struct mlxsw_sp_port *mlxsw_sp_port) return mlxsw_sp_port_pb_prio_init(mlxsw_sp_port); } -#define MLXSW_SP_SB_BYTES_PER_CELL 96 - struct mlxsw_sp_sb_pool { u8 pool; enum mlxsw_reg_sbpr_dir dir; @@ -115,11 +113,9 @@ struct mlxsw_sp_sb_pool { }; #define MLXSW_SP_SB_POOL_INGRESS_SIZE \ - ((15000000 - (2 * 20000 * MLXSW_PORT_MAX_PORTS)) / \ - MLXSW_SP_SB_BYTES_PER_CELL) + (15000000 - (2 * 20000 * MLXSW_PORT_MAX_PORTS)) #define MLXSW_SP_SB_POOL_EGRESS_SIZE \ - ((14000000 - (8 * 1500 * MLXSW_PORT_MAX_PORTS)) / \ - MLXSW_SP_SB_BYTES_PER_CELL) + (14000000 - (8 * 1500 * MLXSW_PORT_MAX_PORTS)) #define MLXSW_SP_SB_POOL(_pool, _dir, _mode, _size) \ { \ @@ -138,14 +134,14 @@ struct mlxsw_sp_sb_pool { MLXSW_REG_SBPR_MODE_DYNAMIC, _size) static const struct mlxsw_sp_sb_pool mlxsw_sp_sb_pools[] = { - MLXSW_SP_SB_POOL_INGRESS(0, MLXSW_SP_SB_POOL_INGRESS_SIZE), + MLXSW_SP_SB_POOL_INGRESS(0, MLXSW_SP_BYTES_TO_CELLS(MLXSW_SP_SB_POOL_INGRESS_SIZE)), MLXSW_SP_SB_POOL_INGRESS(1, 0), MLXSW_SP_SB_POOL_INGRESS(2, 0), MLXSW_SP_SB_POOL_INGRESS(3, 0), - MLXSW_SP_SB_POOL_EGRESS(0, MLXSW_SP_SB_POOL_EGRESS_SIZE), + MLXSW_SP_SB_POOL_EGRESS(0, MLXSW_SP_BYTES_TO_CELLS(MLXSW_SP_SB_POOL_EGRESS_SIZE)), MLXSW_SP_SB_POOL_EGRESS(1, 0), MLXSW_SP_SB_POOL_EGRESS(2, 0), - MLXSW_SP_SB_POOL_EGRESS(2, MLXSW_SP_SB_POOL_EGRESS_SIZE), + MLXSW_SP_SB_POOL_EGRESS(2, MLXSW_SP_BYTES_TO_CELLS(MLXSW_SP_SB_POOL_EGRESS_SIZE)), }; #define MLXSW_SP_SB_POOLS_LEN ARRAY_SIZE(mlxsw_sp_sb_pools) @@ -201,7 +197,7 @@ struct mlxsw_sp_sb_cm { MLXSW_SP_SB_CM(_tc, MLXSW_REG_SBCM_DIR_EGRESS, 104, 2, 3) static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms[] = { - MLXSW_SP_SB_CM_INGRESS(0, 10000 / MLXSW_SP_SB_BYTES_PER_CELL, 8), + MLXSW_SP_SB_CM_INGRESS(0, MLXSW_SP_BYTES_TO_CELLS(10000), 8), MLXSW_SP_SB_CM_INGRESS(1, 0, 0), MLXSW_SP_SB_CM_INGRESS(2, 0, 0), MLXSW_SP_SB_CM_INGRESS(3, 0, 0), @@ -209,15 +205,15 @@ static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms[] = { MLXSW_SP_SB_CM_INGRESS(5, 0, 0), MLXSW_SP_SB_CM_INGRESS(6, 0, 0), MLXSW_SP_SB_CM_INGRESS(7, 0, 0), - MLXSW_SP_SB_CM_INGRESS(9, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff), - MLXSW_SP_SB_CM_EGRESS(0, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9), - MLXSW_SP_SB_CM_EGRESS(1, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9), - MLXSW_SP_SB_CM_EGRESS(2, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9), - MLXSW_SP_SB_CM_EGRESS(3, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9), - MLXSW_SP_SB_CM_EGRESS(4, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9), - MLXSW_SP_SB_CM_EGRESS(5, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9), - MLXSW_SP_SB_CM_EGRESS(6, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9), - MLXSW_SP_SB_CM_EGRESS(7, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9), + MLXSW_SP_SB_CM_INGRESS(9, MLXSW_SP_BYTES_TO_CELLS(20000), 0xff), + MLXSW_SP_SB_CM_EGRESS(0, MLXSW_SP_BYTES_TO_CELLS(1500), 9), + MLXSW_SP_SB_CM_EGRESS(1, MLXSW_SP_BYTES_TO_CELLS(1500), 9), + MLXSW_SP_SB_CM_EGRESS(2, MLXSW_SP_BYTES_TO_CELLS(1500), 9), + MLXSW_SP_SB_CM_EGRESS(3, MLXSW_SP_BYTES_TO_CELLS(1500), 9), + MLXSW_SP_SB_CM_EGRESS(4, MLXSW_SP_BYTES_TO_CELLS(1500), 9), + MLXSW_SP_SB_CM_EGRESS(5, MLXSW_SP_BYTES_TO_CELLS(1500), 9), + MLXSW_SP_SB_CM_EGRESS(6, MLXSW_SP_BYTES_TO_CELLS(1500), 9), + MLXSW_SP_SB_CM_EGRESS(7, MLXSW_SP_BYTES_TO_CELLS(1500), 9), MLXSW_SP_SB_CM_EGRESS(8, 0, 0), MLXSW_SP_SB_CM_EGRESS(9, 0, 0), MLXSW_SP_SB_CM_EGRESS(10, 0, 0), @@ -376,21 +372,21 @@ struct mlxsw_sp_sb_mm { } static const struct mlxsw_sp_sb_mm mlxsw_sp_sb_mms[] = { - MLXSW_SP_SB_MM(0, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0), - MLXSW_SP_SB_MM(1, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0), - MLXSW_SP_SB_MM(2, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0), - MLXSW_SP_SB_MM(3, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0), - MLXSW_SP_SB_MM(4, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0), - MLXSW_SP_SB_MM(5, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0), - MLXSW_SP_SB_MM(6, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0), - MLXSW_SP_SB_MM(7, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0), - MLXSW_SP_SB_MM(8, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0), - MLXSW_SP_SB_MM(9, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0), - MLXSW_SP_SB_MM(10, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0), - MLXSW_SP_SB_MM(11, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0), - MLXSW_SP_SB_MM(12, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0), - MLXSW_SP_SB_MM(13, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0), - MLXSW_SP_SB_MM(14, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0), + MLXSW_SP_SB_MM(0, MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), + MLXSW_SP_SB_MM(1, MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), + MLXSW_SP_SB_MM(2, MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), + MLXSW_SP_SB_MM(3, MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), + MLXSW_SP_SB_MM(4, MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), + MLXSW_SP_SB_MM(5, MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), + MLXSW_SP_SB_MM(6, MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), + MLXSW_SP_SB_MM(7, MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), + MLXSW_SP_SB_MM(8, MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), + MLXSW_SP_SB_MM(9, MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), + MLXSW_SP_SB_MM(10, MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), + MLXSW_SP_SB_MM(11, MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), + MLXSW_SP_SB_MM(12, MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), + MLXSW_SP_SB_MM(13, MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), + MLXSW_SP_SB_MM(14, MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), }; #define MLXSW_SP_SB_MMS_LEN ARRAY_SIZE(mlxsw_sp_sb_mms) From ff6551ec0c2748a31087878f00bcaf6db2f82116 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Wed, 6 Apr 2016 17:10:03 +0200 Subject: [PATCH 0361/1649] mlxsw: spectrum: Correctly configure headroom size When packets ingress the switch they are assigned a switch priority and directed to the corresponding priority group (PG) buffer in the port's headroom buffer. Since we now map all switch priorities to priority group 0 (PG0) by default, there is no need to allocate the other priority groups during initialization. The only exception is PG9, which is used for control traffic. At minimum, the PG should be able to store the currently classified packet (pipeline latency isn't 0) and also the packets arriving during the classification time. However, an incoming packet will not be buffered if there is no available MTU-sized buffer space for storing it. The buffer needed to accommodate for pipeline latency is variable and needs to take into account both the current link speed and current latency of the pipeline, which is time-dependent. Testing showed that setting the PG's size to twice the current MTU is optimal. Since PG9 is used strictly for control packets and not subject to flow control, we are not going to resize it according to user configuration, so we simply set it according to worst case scenario, which is twice the maximum MTU. In any case, later patches in the series will allow a user to direct lossless flows to other PGs than PG0 and set their size to accommodate for round-trip propagation delay. The above change also requires us to resize the PG buffer whenever the port's MTU is changed. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/spectrum.c | 25 ++++++++++++++++++- .../mellanox/mlxsw/spectrum_buffers.c | 19 +++++++------- 2 files changed, 34 insertions(+), 10 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index cb5f36e497e9..4576d59a98a2 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -449,16 +449,39 @@ static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p) return 0; } +static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, + int mtu) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + u16 pg_size = 2 * MLXSW_SP_BYTES_TO_CELLS(mtu); + char pbmc_pl[MLXSW_REG_PBMC_LEN]; + int err; + + mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0); + err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); + if (err) + return err; + mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, 0, pg_size); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); +} + static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu) { struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); int err; - err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu); + err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu); if (err) return err; + err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu); + if (err) + goto err_port_mtu_set; dev->mtu = mtu; return 0; + +err_port_mtu_set: + mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu); + return err; } static struct rtnl_link_stats64 * diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c index dadb6e1ccf82..e7a5b73188f1 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c @@ -35,6 +35,7 @@ #include #include #include +#include #include "spectrum.h" #include "core.h" @@ -53,15 +54,15 @@ struct mlxsw_sp_pb { } static const struct mlxsw_sp_pb mlxsw_sp_pbs[] = { - MLXSW_SP_PB(0, 208), - MLXSW_SP_PB(1, 208), - MLXSW_SP_PB(2, 208), - MLXSW_SP_PB(3, 208), - MLXSW_SP_PB(4, 208), - MLXSW_SP_PB(5, 208), - MLXSW_SP_PB(6, 208), - MLXSW_SP_PB(7, 208), - MLXSW_SP_PB(9, 208), + MLXSW_SP_PB(0, 2 * MLXSW_SP_BYTES_TO_CELLS(ETH_FRAME_LEN)), + MLXSW_SP_PB(1, 0), + MLXSW_SP_PB(2, 0), + MLXSW_SP_PB(3, 0), + MLXSW_SP_PB(4, 0), + MLXSW_SP_PB(5, 0), + MLXSW_SP_PB(6, 0), + MLXSW_SP_PB(7, 0), + MLXSW_SP_PB(9, 2 * MLXSW_SP_BYTES_TO_CELLS(MLXSW_PORT_MAX_MTU)), }; #define MLXSW_SP_PBS_LEN ARRAY_SIZE(mlxsw_sp_pbs) From 7ad7cd6113bacace67c55cadef6459eb0e74403d Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Wed, 6 Apr 2016 17:10:04 +0200 Subject: [PATCH 0362/1649] mlxsw: reg: Use correct PBMC register length The last field of the PBMC register is at offset 0x64 and its size is 0x8, so the correct register's length is 0x6C bytes. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/reg.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 0995beee6c91..f08a17fdd4c3 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -2427,7 +2427,7 @@ static inline void mlxsw_reg_pptb_pack(char *payload, u8 local_port) * allocation for different Prios, and the Pause threshold management. */ #define MLXSW_REG_PBMC_ID 0x500C -#define MLXSW_REG_PBMC_LEN 0x68 +#define MLXSW_REG_PBMC_LEN 0x6C static const struct mlxsw_reg_info mlxsw_reg_pbmc = { .id = MLXSW_REG_PBMC_ID, From d6b7c13b018f1785743150f079638bb3ed69fff1 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Wed, 6 Apr 2016 17:10:05 +0200 Subject: [PATCH 0363/1649] mlxsw: spectrum: Set port's shared buffer size to 0 In addition to the priority group (PG) buffers in the headroom, the device enables the allocation of headroom shared buffer, which can be shared between different PGs. However, we are not going to use the headroom shared buffer and instead allow the user to use its size for PGs or the switch's shared buffer. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/reg.h | 2 ++ drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c | 2 ++ 2 files changed, 4 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index f08a17fdd4c3..370914e607e0 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -2455,6 +2455,8 @@ MLXSW_ITEM32(reg, pbmc, xoff_timer_value, 0x04, 16, 16); */ MLXSW_ITEM32(reg, pbmc, xoff_refresh, 0x04, 0, 16); +#define MLXSW_REG_PBMC_PORT_SHARED_BUF_IDX 11 + /* reg_pbmc_buf_lossy * The field indicates if the buffer is lossy. * 0 - Lossless diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c index e7a5b73188f1..97c8d537be5b 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c @@ -80,6 +80,8 @@ static int mlxsw_sp_port_pb_init(struct mlxsw_sp_port *mlxsw_sp_port) pb = &mlxsw_sp_pbs[i]; mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, pb->index, pb->size); } + mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, + MLXSW_REG_PBMC_PORT_SHARED_BUF_IDX, 0); return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); } From b9b7cee405797cc395f699d8dee4747b96b1e0a8 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Wed, 6 Apr 2016 17:10:06 +0200 Subject: [PATCH 0364/1649] mlxsw: reg: Add QoS ETS Element Configuration register We are going to introduce support for DCB, so we need to be able to configure the traffic selection algorithm (TSA) used by each traffic class (TC), as well as the bandwidth percentage allocated to each TC in case of ETS. Add the QoS ETS Element Configuration register, which controls the above parameters. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/reg.h | 127 ++++++++++++++++++++++ 1 file changed, 127 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 370914e607e0..bc08f8bdca7a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -1805,6 +1805,131 @@ static inline void mlxsw_reg_spvmlr_pack(char *payload, u8 local_port, } } +/* QEEC - QoS ETS Element Configuration Register + * --------------------------------------------- + * Configures the ETS elements. + */ +#define MLXSW_REG_QEEC_ID 0x400D +#define MLXSW_REG_QEEC_LEN 0x1C + +static const struct mlxsw_reg_info mlxsw_reg_qeec = { + .id = MLXSW_REG_QEEC_ID, + .len = MLXSW_REG_QEEC_LEN, +}; + +/* reg_qeec_local_port + * Local port number. + * Access: Index + * + * Note: CPU port is supported. + */ +MLXSW_ITEM32(reg, qeec, local_port, 0x00, 16, 8); + +enum mlxsw_reg_qeec_hr { + MLXSW_REG_QEEC_HIERARCY_PORT, + MLXSW_REG_QEEC_HIERARCY_GROUP, + MLXSW_REG_QEEC_HIERARCY_SUBGROUP, + MLXSW_REG_QEEC_HIERARCY_TC, +}; + +/* reg_qeec_element_hierarchy + * 0 - Port + * 1 - Group + * 2 - Subgroup + * 3 - Traffic Class + * Access: Index + */ +MLXSW_ITEM32(reg, qeec, element_hierarchy, 0x04, 16, 4); + +/* reg_qeec_element_index + * The index of the element in the hierarchy. + * Access: Index + */ +MLXSW_ITEM32(reg, qeec, element_index, 0x04, 0, 8); + +/* reg_qeec_next_element_index + * The index of the next (lower) element in the hierarchy. + * Access: RW + * + * Note: Reserved for element_hierarchy 0. + */ +MLXSW_ITEM32(reg, qeec, next_element_index, 0x08, 0, 8); + +enum { + MLXSW_REG_QEEC_BYTES_MODE, + MLXSW_REG_QEEC_PACKETS_MODE, +}; + +/* reg_qeec_pb + * Packets or bytes mode. + * 0 - Bytes mode + * 1 - Packets mode + * Access: RW + * + * Note: Used for max shaper configuration. For Spectrum, packets mode + * is supported only for traffic classes of CPU port. + */ +MLXSW_ITEM32(reg, qeec, pb, 0x0C, 28, 1); + +/* reg_qeec_mase + * Max shaper configuration enable. Enables configuration of the max + * shaper on this ETS element. + * 0 - Disable + * 1 - Enable + * Access: RW + */ +MLXSW_ITEM32(reg, qeec, mase, 0x10, 31, 1); + +/* A large max rate will disable the max shaper. */ +#define MLXSW_REG_QEEC_MAS_DIS 200000000 /* Kbps */ + +/* reg_qeec_max_shaper_rate + * Max shaper information rate. + * For CPU port, can only be configured for port hierarchy. + * When in bytes mode, value is specified in units of 1000bps. + * Access: RW + */ +MLXSW_ITEM32(reg, qeec, max_shaper_rate, 0x10, 0, 28); + +/* reg_qeec_de + * DWRR configuration enable. Enables configuration of the dwrr and + * dwrr_weight. + * 0 - Disable + * 1 - Enable + * Access: RW + */ +MLXSW_ITEM32(reg, qeec, de, 0x18, 31, 1); + +/* reg_qeec_dwrr + * Transmission selection algorithm to use on the link going down from + * the ETS element. + * 0 - Strict priority + * 1 - DWRR + * Access: RW + */ +MLXSW_ITEM32(reg, qeec, dwrr, 0x18, 15, 1); + +/* reg_qeec_dwrr_weight + * DWRR weight on the link going down from the ETS element. The + * percentage of bandwidth guaranteed to an ETS element within + * its hierarchy. The sum of all weights across all ETS elements + * within one hierarchy should be equal to 100. Reserved when + * transmission selection algorithm is strict priority. + * Access: RW + */ +MLXSW_ITEM32(reg, qeec, dwrr_weight, 0x18, 0, 8); + +static inline void mlxsw_reg_qeec_pack(char *payload, u8 local_port, + enum mlxsw_reg_qeec_hr hr, u8 index, + u8 next_index) +{ + MLXSW_REG_ZERO(qeec, payload); + mlxsw_reg_qeec_local_port_set(payload, local_port); + mlxsw_reg_qeec_element_hierarchy_set(payload, hr); + mlxsw_reg_qeec_element_index_set(payload, index); + mlxsw_reg_qeec_next_element_index_set(payload, next_index); +} + /* PMLP - Ports Module to Local Port Register * ------------------------------------------ * Configures the assignment of modules to local ports. @@ -3366,6 +3491,8 @@ static inline const char *mlxsw_reg_id_str(u16 reg_id) return "SFMR"; case MLXSW_REG_SPVMLR_ID: return "SPVMLR"; + case MLXSW_REG_QEEC_ID: + return "QEEC"; case MLXSW_REG_PMLP_ID: return "PMLP"; case MLXSW_REG_PMTU_ID: From 2c63a555e8495f3d6db443ca73094a6a3508df4a Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Wed, 6 Apr 2016 17:10:07 +0200 Subject: [PATCH 0365/1649] mlxsw: reg: Add QoS Switch Traffic Class Table register As part of DCB ops we'll have to configure the priority to traffic class mapping of a port. Add the QoS Switch Traffic Class Table (QTCT) register, which configures the mapping between the packet switch priority and traffic class on the transmit port. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/reg.h | 55 +++++++++++++++++++++++ 1 file changed, 55 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index bc08f8bdca7a..2e58c41e90d4 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -1805,6 +1805,59 @@ static inline void mlxsw_reg_spvmlr_pack(char *payload, u8 local_port, } } +/* QTCT - QoS Switch Traffic Class Table + * ------------------------------------- + * Configures the mapping between the packet switch priority and the + * traffic class on the transmit port. + */ +#define MLXSW_REG_QTCT_ID 0x400A +#define MLXSW_REG_QTCT_LEN 0x08 + +static const struct mlxsw_reg_info mlxsw_reg_qtct = { + .id = MLXSW_REG_QTCT_ID, + .len = MLXSW_REG_QTCT_LEN, +}; + +/* reg_qtct_local_port + * Local port number. + * Access: Index + * + * Note: CPU port is not supported. + */ +MLXSW_ITEM32(reg, qtct, local_port, 0x00, 16, 8); + +/* reg_qtct_sub_port + * Virtual port within the physical port. + * Should be set to 0 when virtual ports are not enabled on the port. + * Access: Index + */ +MLXSW_ITEM32(reg, qtct, sub_port, 0x00, 8, 8); + +/* reg_qtct_switch_prio + * Switch priority. + * Access: Index + */ +MLXSW_ITEM32(reg, qtct, switch_prio, 0x00, 0, 4); + +/* reg_qtct_tclass + * Traffic class. + * Default values: + * switch_prio 0 : tclass 1 + * switch_prio 1 : tclass 0 + * switch_prio i : tclass i, for i > 1 + * Access: RW + */ +MLXSW_ITEM32(reg, qtct, tclass, 0x04, 0, 4); + +static inline void mlxsw_reg_qtct_pack(char *payload, u8 local_port, + u8 switch_prio, u8 tclass) +{ + MLXSW_REG_ZERO(qtct, payload); + mlxsw_reg_qtct_local_port_set(payload, local_port); + mlxsw_reg_qtct_switch_prio_set(payload, switch_prio); + mlxsw_reg_qtct_tclass_set(payload, tclass); +} + /* QEEC - QoS ETS Element Configuration Register * --------------------------------------------- * Configures the ETS elements. @@ -3491,6 +3544,8 @@ static inline const char *mlxsw_reg_id_str(u16 reg_id) return "SFMR"; case MLXSW_REG_SPVMLR_ID: return "SPVMLR"; + case MLXSW_REG_QTCT_ID: + return "QTCT"; case MLXSW_REG_QEEC_ID: return "QEEC"; case MLXSW_REG_PMLP_ID: From 90183b980d0af77df2369dee924fff13c792dcc5 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Wed, 6 Apr 2016 17:10:08 +0200 Subject: [PATCH 0366/1649] mlxsw: spectrum: Initialize egress scheduling Before introducing support for DCB ops we should first make sure we initialize the relevant parts in the device correctly. Specifically, the egress scheduling. The device supports a superset of the 802.1Qaz standard with 4 hierarchy levels that can be linked to each other in multiple ways and with different transmission selection algorithms (TSA) employed between them. However, since we only intend to support the 802.1Qaz standard we flatten the hierarchies and let the user configure via DCB ops the TSA and max rate shaper at the subgroup hierarchy (see figure below) and the mapping between switch priority to traffic class. By default, all switch priorities are mapped to traffic class 0, strict priority is employed and max shaper is disabled. Default configuration: switch priority 0 ... switch priority 7 + + | | +----------------------------------+ | +--v--+ +-----+ Traffic Class | | | | Hierarchy | TC0 | ... | TC7 | | | | | +--+--+ +--+--+ | | +--v--+ +--v--+ Subgroup | SG0 | | SG7 | Hierarchy | | | | +-----+ +-----+ | TSA | | TSA | +-----+ ... +-----+ | MAX | | MAX | +--+--+ +--+--+ | | +---------------+----------------+ | +--v--+ Group | | Hierarchy | GR0 | | | +--+--+ | +--v--+ Port | | Hierarchy | PR0 | | | +-----+ Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/spectrum.c | 111 ++++++++++++++++++ 1 file changed, 111 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 4576d59a98a2..1243c7404356 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -49,6 +49,7 @@ #include #include #include +#include #include #include #include @@ -1464,6 +1465,108 @@ mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width) return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); } +static int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port, + enum mlxsw_reg_qeec_hr hr, u8 index, + u8 next_index, bool dwrr, u8 dwrr_weight) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char qeec_pl[MLXSW_REG_QEEC_LEN]; + + mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, + next_index); + mlxsw_reg_qeec_de_set(qeec_pl, true); + mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr); + mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); +} + +static int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port, + enum mlxsw_reg_qeec_hr hr, u8 index, + u8 next_index, u32 maxrate) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char qeec_pl[MLXSW_REG_QEEC_LEN]; + + mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, + next_index); + mlxsw_reg_qeec_mase_set(qeec_pl, true); + mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); +} + +static int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port, + u8 switch_prio, u8 tclass) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char qtct_pl[MLXSW_REG_QTCT_LEN]; + + mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio, + tclass); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl); +} + +static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port) +{ + int err, i; + + /* Setup the elements hierarcy, so that each TC is linked to + * one subgroup, which are all member in the same group. + */ + err = mlxsw_sp_port_ets_set(mlxsw_sp_port, + MLXSW_REG_QEEC_HIERARCY_GROUP, 0, 0, false, + 0); + if (err) + return err; + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + err = mlxsw_sp_port_ets_set(mlxsw_sp_port, + MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i, + 0, false, 0); + if (err) + return err; + } + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + err = mlxsw_sp_port_ets_set(mlxsw_sp_port, + MLXSW_REG_QEEC_HIERARCY_TC, i, i, + false, 0); + if (err) + return err; + } + + /* Make sure the max shaper is disabled in all hierarcies that + * support it. + */ + err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, + MLXSW_REG_QEEC_HIERARCY_PORT, 0, 0, + MLXSW_REG_QEEC_MAS_DIS); + if (err) + return err; + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, + MLXSW_REG_QEEC_HIERARCY_SUBGROUP, + i, 0, + MLXSW_REG_QEEC_MAS_DIS); + if (err) + return err; + } + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, + MLXSW_REG_QEEC_HIERARCY_TC, + i, i, + MLXSW_REG_QEEC_MAS_DIS); + if (err) + return err; + } + + /* Map all priorities to traffic class 0. */ + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0); + if (err) + return err; + } + + return 0; +} + static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, bool split, u8 module, u8 width) { @@ -1571,6 +1674,13 @@ static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, goto err_port_buffers_init; } + err = mlxsw_sp_port_ets_init(mlxsw_sp_port); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n", + mlxsw_sp_port->local_port); + goto err_port_ets_init; + } + mlxsw_sp_port_switchdev_init(mlxsw_sp_port); err = register_netdev(dev); if (err) { @@ -1591,6 +1701,7 @@ static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, err_port_vlan_init: unregister_netdev(dev); err_register_netdev: +err_port_ets_init: err_port_buffers_init: err_port_admin_status_set: err_port_mtu_set: From f00817df2b428ec13711bd27729f992b8c3af054 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Wed, 6 Apr 2016 17:10:09 +0200 Subject: [PATCH 0367/1649] mlxsw: spectrum: Introduce support for Data Center Bridging (DCB) Introduce basic infrastructure for DCB and add the missing ops in following patches. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/Kconfig | 8 +++ drivers/net/ethernet/mellanox/mlxsw/Makefile | 1 + .../net/ethernet/mellanox/mlxsw/spectrum.c | 10 +++ .../net/ethernet/mellanox/mlxsw/spectrum.h | 17 +++++ .../ethernet/mellanox/mlxsw/spectrum_dcb.c | 65 +++++++++++++++++++ 5 files changed, 101 insertions(+) create mode 100644 drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig index 2ad7f67854d5..5989f7cb5462 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/Kconfig +++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig @@ -50,3 +50,11 @@ config MLXSW_SPECTRUM To compile this driver as a module, choose M here: the module will be called mlxsw_spectrum. + +config MLXSW_SPECTRUM_DCB + bool "Data Center Bridging (DCB) support" + depends on MLXSW_SPECTRUM && DCB + default y + ---help--- + Say Y here if you want to use Data Center Bridging (DCB) in the + driver. diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile index 584cac444852..9b5ebf84c051 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/Makefile +++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile @@ -8,3 +8,4 @@ mlxsw_switchx2-objs := switchx2.o obj-$(CONFIG_MLXSW_SPECTRUM) += mlxsw_spectrum.o mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \ spectrum_switchdev.o +mlxsw_spectrum-$(CONFIG_MLXSW_SPECTRUM_DCB) += spectrum_dcb.o diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 1243c7404356..baaa9ea52035 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -1681,6 +1681,14 @@ static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, goto err_port_ets_init; } + /* ETS and buffers must be initialized before DCB. */ + err = mlxsw_sp_port_dcb_init(mlxsw_sp_port); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n", + mlxsw_sp_port->local_port); + goto err_port_dcb_init; + } + mlxsw_sp_port_switchdev_init(mlxsw_sp_port); err = register_netdev(dev); if (err) { @@ -1701,6 +1709,7 @@ static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, err_port_vlan_init: unregister_netdev(dev); err_register_netdev: +err_port_dcb_init: err_port_ets_init: err_port_buffers_init: err_port_admin_status_set: @@ -1771,6 +1780,7 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) devlink_port = &mlxsw_sp_port->devlink_port; devlink_port_type_clear(devlink_port); unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */ + mlxsw_sp_port_dcb_fini(mlxsw_sp_port); devlink_port_unregister(devlink_port); mlxsw_sp_port_vports_fini(mlxsw_sp_port); mlxsw_sp_port_switchdev_fini(mlxsw_sp_port); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 84dddd47f1b0..1f50af8e25c2 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -270,4 +270,21 @@ int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 vfid, void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port); int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid); +#ifdef CONFIG_MLXSW_SPECTRUM_DCB + +int mlxsw_sp_port_dcb_init(struct mlxsw_sp_port *mlxsw_sp_port); +void mlxsw_sp_port_dcb_fini(struct mlxsw_sp_port *mlxsw_sp_port); + +#else + +static inline int mlxsw_sp_port_dcb_init(struct mlxsw_sp_port *mlxsw_sp_port) +{ + return 0; +} + +static inline void mlxsw_sp_port_dcb_fini(struct mlxsw_sp_port *mlxsw_sp_port) +{} + +#endif + #endif diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c new file mode 100644 index 000000000000..631e9803978d --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c @@ -0,0 +1,65 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c + * Copyright (c) 2016 Mellanox Technologies. All rights reserved. + * Copyright (c) 2016 Ido Schimmel + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include + +#include "spectrum.h" + +static u8 mlxsw_sp_dcbnl_getdcbx(struct net_device __always_unused *dev) +{ + return DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE; +} + +static u8 mlxsw_sp_dcbnl_setdcbx(struct net_device __always_unused *dev, + u8 mode) +{ + return (mode != (DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE)) ? 1 : 0; +} + +static const struct dcbnl_rtnl_ops mlxsw_sp_dcbnl_ops = { + .getdcbx = mlxsw_sp_dcbnl_getdcbx, + .setdcbx = mlxsw_sp_dcbnl_setdcbx, +}; + +int mlxsw_sp_port_dcb_init(struct mlxsw_sp_port *mlxsw_sp_port) +{ + mlxsw_sp_port->dev->dcbnl_ops = &mlxsw_sp_dcbnl_ops; + + return 0; +} + +void mlxsw_sp_port_dcb_fini(struct mlxsw_sp_port *mlxsw_sp_port) +{ +} From 8e8dfe9fdf063cd61f35ed82f5be463791a613a5 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Wed, 6 Apr 2016 17:10:10 +0200 Subject: [PATCH 0368/1649] mlxsw: spectrum: Add IEEE 802.1Qaz ETS support Implement the appropriate DCB ops and allow a user to configure: * Priority to traffic class (TC) mapping with a total of 8 supported TCs * Transmission selection algorithm (TSA) for each TC and the corresponding weights in case of weighted round robin (WRR) As previously explained, we treat the priority group (PG) buffer in the port's headroom as the ingress counterpart of the egress TC. Therefore, when a certain priority to TC mapping is configured, we also configure the port's headroom buffer. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/spectrum.c | 53 +++- .../net/ethernet/mellanox/mlxsw/spectrum.h | 11 + .../ethernet/mellanox/mlxsw/spectrum_dcb.c | 229 ++++++++++++++++++ 3 files changed, 283 insertions(+), 10 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index baaa9ea52035..1498e6a25035 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -450,22 +450,55 @@ static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p) return 0; } -static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, - int mtu) +static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int pg_index, int mtu) +{ + u16 pg_size = 2 * MLXSW_SP_BYTES_TO_CELLS(mtu); + + mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, pg_index, pg_size); +} + +int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu, + u8 *prio_tc) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - u16 pg_size = 2 * MLXSW_SP_BYTES_TO_CELLS(mtu); char pbmc_pl[MLXSW_REG_PBMC_LEN]; - int err; + int i, j, err; mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0); err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); if (err) return err; - mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, 0, pg_size); + + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + bool configure = false; + + for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) { + if (prio_tc[j] == i) { + configure = true; + break; + } + } + + if (!configure) + continue; + mlxsw_sp_pg_buf_pack(pbmc_pl, i, mtu); + } + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); } +static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, + int mtu) +{ + u8 def_prio_tc[IEEE_8021QAZ_MAX_TCS] = {0}; + bool dcb_en = !!mlxsw_sp_port->dcb.ets; + u8 *prio_tc; + + prio_tc = dcb_en ? mlxsw_sp_port->dcb.ets->prio_tc : def_prio_tc; + + return __mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, prio_tc); +} + static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu) { struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); @@ -1465,9 +1498,9 @@ mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width) return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); } -static int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port, - enum mlxsw_reg_qeec_hr hr, u8 index, - u8 next_index, bool dwrr, u8 dwrr_weight) +int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port, + enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index, + bool dwrr, u8 dwrr_weight) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; char qeec_pl[MLXSW_REG_QEEC_LEN]; @@ -1494,8 +1527,8 @@ static int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port, return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); } -static int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port, - u8 switch_prio, u8 tclass) +int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port, + u8 switch_prio, u8 tclass) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; char qtct_pl[MLXSW_REG_QTCT_LEN]; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 1f50af8e25c2..ef02081c4fbf 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -42,6 +42,7 @@ #include #include #include +#include #include #include @@ -170,6 +171,9 @@ struct mlxsw_sp_port { struct mlxsw_sp_vfid *vfid; u16 vid; } vport; + struct { + struct ieee_ets *ets; + } dcb; /* 802.1Q bridge VLANs */ unsigned long *active_vlans; unsigned long *untagged_vlans; @@ -269,6 +273,13 @@ int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 vfid, bool set, bool only_uc); void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port); int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid); +int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port, + enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index, + bool dwrr, u8 dwrr_weight); +int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port, + u8 switch_prio, u8 tclass); +int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu, + u8 *prio_tc); #ifdef CONFIG_MLXSW_SPECTRUM_DCB diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c index 631e9803978d..aa5b73a20685 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c @@ -33,9 +33,11 @@ */ #include +#include #include #include "spectrum.h" +#include "reg.h" static u8 mlxsw_sp_dcbnl_getdcbx(struct net_device __always_unused *dev) { @@ -48,13 +50,239 @@ static u8 mlxsw_sp_dcbnl_setdcbx(struct net_device __always_unused *dev, return (mode != (DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE)) ? 1 : 0; } +static int mlxsw_sp_dcbnl_ieee_getets(struct net_device *dev, + struct ieee_ets *ets) +{ + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + + memcpy(ets, mlxsw_sp_port->dcb.ets, sizeof(*ets)); + + return 0; +} + +static int mlxsw_sp_port_ets_validate(struct mlxsw_sp_port *mlxsw_sp_port, + struct ieee_ets *ets) +{ + struct net_device *dev = mlxsw_sp_port->dev; + bool has_ets_tc = false; + int i, tx_bw_sum = 0; + + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + switch (ets->tc_tsa[i]) { + case IEEE_8021QAZ_TSA_STRICT: + break; + case IEEE_8021QAZ_TSA_ETS: + has_ets_tc = true; + tx_bw_sum += ets->tc_tx_bw[i]; + break; + default: + netdev_err(dev, "Only strict priority and ETS are supported\n"); + return -EINVAL; + } + + if (ets->prio_tc[i] >= IEEE_8021QAZ_MAX_TCS) { + netdev_err(dev, "Invalid TC\n"); + return -EINVAL; + } + } + + if (has_ets_tc && tx_bw_sum != 100) { + netdev_err(dev, "Total ETS bandwidth should equal 100\n"); + return -EINVAL; + } + + return 0; +} + +static int mlxsw_sp_port_pg_prio_map(struct mlxsw_sp_port *mlxsw_sp_port, + u8 *prio_tc) +{ + char pptb_pl[MLXSW_REG_PPTB_LEN]; + int i; + + mlxsw_reg_pptb_pack(pptb_pl, mlxsw_sp_port->local_port); + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) + mlxsw_reg_pptb_prio_to_buff_set(pptb_pl, i, prio_tc[i]); + return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pptb), + pptb_pl); +} + +static bool mlxsw_sp_ets_has_pg(u8 *prio_tc, u8 pg) +{ + int i; + + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) + if (prio_tc[i] == pg) + return true; + return false; +} + +static int mlxsw_sp_port_pg_destroy(struct mlxsw_sp_port *mlxsw_sp_port, + u8 *old_prio_tc, u8 *new_prio_tc) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char pbmc_pl[MLXSW_REG_PBMC_LEN]; + int err, i; + + mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0); + err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); + if (err) + return err; + + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + u8 pg = old_prio_tc[i]; + + if (!mlxsw_sp_ets_has_pg(new_prio_tc, pg)) + mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, pg, 0); + } + + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); +} + +static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, + struct ieee_ets *ets) +{ + struct ieee_ets *my_ets = mlxsw_sp_port->dcb.ets; + struct net_device *dev = mlxsw_sp_port->dev; + int err; + + /* Create the required PGs, but don't destroy existing ones, as + * traffic is still directed to them. + */ + err = __mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, + ets->prio_tc); + if (err) { + netdev_err(dev, "Failed to configure port's headroom\n"); + return err; + } + + err = mlxsw_sp_port_pg_prio_map(mlxsw_sp_port, ets->prio_tc); + if (err) { + netdev_err(dev, "Failed to set PG-priority mapping\n"); + goto err_port_prio_pg_map; + } + + err = mlxsw_sp_port_pg_destroy(mlxsw_sp_port, my_ets->prio_tc, + ets->prio_tc); + if (err) + netdev_warn(dev, "Failed to remove ununsed PGs\n"); + + return 0; + +err_port_prio_pg_map: + mlxsw_sp_port_pg_destroy(mlxsw_sp_port, ets->prio_tc, my_ets->prio_tc); + return err; +} + +static int __mlxsw_sp_dcbnl_ieee_setets(struct mlxsw_sp_port *mlxsw_sp_port, + struct ieee_ets *ets) +{ + struct ieee_ets *my_ets = mlxsw_sp_port->dcb.ets; + struct net_device *dev = mlxsw_sp_port->dev; + int i, err; + + /* Egress configuration. */ + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + bool dwrr = ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS; + u8 weight = ets->tc_tx_bw[i]; + + err = mlxsw_sp_port_ets_set(mlxsw_sp_port, + MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i, + 0, dwrr, weight); + if (err) { + netdev_err(dev, "Failed to link subgroup ETS element %d to group\n", + i); + goto err_port_ets_set; + } + } + + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, + ets->prio_tc[i]); + if (err) { + netdev_err(dev, "Failed to map prio %d to TC %d\n", i, + ets->prio_tc[i]); + goto err_port_prio_tc_set; + } + } + + /* Ingress configuration. */ + err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, ets); + if (err) + goto err_port_headroom_set; + + return 0; + +err_port_headroom_set: + i = IEEE_8021QAZ_MAX_TCS; +err_port_prio_tc_set: + for (i--; i >= 0; i--) + mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, my_ets->prio_tc[i]); + i = IEEE_8021QAZ_MAX_TCS; +err_port_ets_set: + for (i--; i >= 0; i--) { + bool dwrr = my_ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS; + u8 weight = my_ets->tc_tx_bw[i]; + + err = mlxsw_sp_port_ets_set(mlxsw_sp_port, + MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i, + 0, dwrr, weight); + } + return err; +} + +static int mlxsw_sp_dcbnl_ieee_setets(struct net_device *dev, + struct ieee_ets *ets) +{ + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + int err; + + err = mlxsw_sp_port_ets_validate(mlxsw_sp_port, ets); + if (err) + return err; + + err = __mlxsw_sp_dcbnl_ieee_setets(mlxsw_sp_port, ets); + if (err) + return err; + + memcpy(mlxsw_sp_port->dcb.ets, ets, sizeof(*ets)); + + return 0; +} + static const struct dcbnl_rtnl_ops mlxsw_sp_dcbnl_ops = { + .ieee_getets = mlxsw_sp_dcbnl_ieee_getets, + .ieee_setets = mlxsw_sp_dcbnl_ieee_setets, + .getdcbx = mlxsw_sp_dcbnl_getdcbx, .setdcbx = mlxsw_sp_dcbnl_setdcbx, }; +static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port) +{ + mlxsw_sp_port->dcb.ets = kzalloc(sizeof(*mlxsw_sp_port->dcb.ets), + GFP_KERNEL); + if (!mlxsw_sp_port->dcb.ets) + return -ENOMEM; + + mlxsw_sp_port->dcb.ets->ets_cap = IEEE_8021QAZ_MAX_TCS; + + return 0; +} + +static void mlxsw_sp_port_ets_fini(struct mlxsw_sp_port *mlxsw_sp_port) +{ + kfree(mlxsw_sp_port->dcb.ets); +} + int mlxsw_sp_port_dcb_init(struct mlxsw_sp_port *mlxsw_sp_port) { + int err; + + err = mlxsw_sp_port_ets_init(mlxsw_sp_port); + if (err) + return err; + mlxsw_sp_port->dev->dcbnl_ops = &mlxsw_sp_dcbnl_ops; return 0; @@ -62,4 +290,5 @@ int mlxsw_sp_port_dcb_init(struct mlxsw_sp_port *mlxsw_sp_port) void mlxsw_sp_port_dcb_fini(struct mlxsw_sp_port *mlxsw_sp_port) { + mlxsw_sp_port_ets_fini(mlxsw_sp_port); } From cc7cf5175807daa9cb51f6e0eb034f60ced6b251 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Wed, 6 Apr 2016 17:10:11 +0200 Subject: [PATCH 0369/1649] mlxsw: spectrum: Allow setting maximum rate for a TC Allow a user to set maximum rate for a particular TC using DCB ops. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/spectrum.c | 6 +- .../net/ethernet/mellanox/mlxsw/spectrum.h | 4 ++ .../ethernet/mellanox/mlxsw/spectrum_dcb.c | 70 +++++++++++++++++++ 3 files changed, 77 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 1498e6a25035..5f4d44e14bf4 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -1513,9 +1513,9 @@ int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port, return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); } -static int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port, - enum mlxsw_reg_qeec_hr hr, u8 index, - u8 next_index, u32 maxrate) +int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port, + enum mlxsw_reg_qeec_hr hr, u8 index, + u8 next_index, u32 maxrate) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; char qeec_pl[MLXSW_REG_QEEC_LEN]; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index ef02081c4fbf..9e1e4fe32d35 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -173,6 +173,7 @@ struct mlxsw_sp_port { } vport; struct { struct ieee_ets *ets; + struct ieee_maxrate *maxrate; } dcb; /* 802.1Q bridge VLANs */ unsigned long *active_vlans; @@ -280,6 +281,9 @@ int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 switch_prio, u8 tclass); int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu, u8 *prio_tc); +int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port, + enum mlxsw_reg_qeec_hr hr, u8 index, + u8 next_index, u32 maxrate); #ifdef CONFIG_MLXSW_SPECTRUM_DCB diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c index aa5b73a20685..257e2d427cab 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c @@ -250,9 +250,51 @@ static int mlxsw_sp_dcbnl_ieee_setets(struct net_device *dev, return 0; } +static int mlxsw_sp_dcbnl_ieee_getmaxrate(struct net_device *dev, + struct ieee_maxrate *maxrate) +{ + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + + memcpy(maxrate, mlxsw_sp_port->dcb.maxrate, sizeof(*maxrate)); + + return 0; +} + +static int mlxsw_sp_dcbnl_ieee_setmaxrate(struct net_device *dev, + struct ieee_maxrate *maxrate) +{ + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + struct ieee_maxrate *my_maxrate = mlxsw_sp_port->dcb.maxrate; + int err, i; + + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, + MLXSW_REG_QEEC_HIERARCY_SUBGROUP, + i, 0, + maxrate->tc_maxrate[i]); + if (err) { + netdev_err(dev, "Failed to set maxrate for TC %d\n", i); + goto err_port_ets_maxrate_set; + } + } + + memcpy(mlxsw_sp_port->dcb.maxrate, maxrate, sizeof(*maxrate)); + + return 0; + +err_port_ets_maxrate_set: + for (i--; i >= 0; i--) + mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, + MLXSW_REG_QEEC_HIERARCY_SUBGROUP, + i, 0, my_maxrate->tc_maxrate[i]); + return err; +} + static const struct dcbnl_rtnl_ops mlxsw_sp_dcbnl_ops = { .ieee_getets = mlxsw_sp_dcbnl_ieee_getets, .ieee_setets = mlxsw_sp_dcbnl_ieee_setets, + .ieee_getmaxrate = mlxsw_sp_dcbnl_ieee_getmaxrate, + .ieee_setmaxrate = mlxsw_sp_dcbnl_ieee_setmaxrate, .getdcbx = mlxsw_sp_dcbnl_getdcbx, .setdcbx = mlxsw_sp_dcbnl_setdcbx, @@ -275,6 +317,26 @@ static void mlxsw_sp_port_ets_fini(struct mlxsw_sp_port *mlxsw_sp_port) kfree(mlxsw_sp_port->dcb.ets); } +static int mlxsw_sp_port_maxrate_init(struct mlxsw_sp_port *mlxsw_sp_port) +{ + int i; + + mlxsw_sp_port->dcb.maxrate = kmalloc(sizeof(*mlxsw_sp_port->dcb.maxrate), + GFP_KERNEL); + if (!mlxsw_sp_port->dcb.maxrate) + return -ENOMEM; + + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) + mlxsw_sp_port->dcb.maxrate->tc_maxrate[i] = MLXSW_REG_QEEC_MAS_DIS; + + return 0; +} + +static void mlxsw_sp_port_maxrate_fini(struct mlxsw_sp_port *mlxsw_sp_port) +{ + kfree(mlxsw_sp_port->dcb.maxrate); +} + int mlxsw_sp_port_dcb_init(struct mlxsw_sp_port *mlxsw_sp_port) { int err; @@ -282,13 +344,21 @@ int mlxsw_sp_port_dcb_init(struct mlxsw_sp_port *mlxsw_sp_port) err = mlxsw_sp_port_ets_init(mlxsw_sp_port); if (err) return err; + err = mlxsw_sp_port_maxrate_init(mlxsw_sp_port); + if (err) + goto err_port_maxrate_init; mlxsw_sp_port->dev->dcbnl_ops = &mlxsw_sp_dcbnl_ops; return 0; + +err_port_maxrate_init: + mlxsw_sp_port_ets_fini(mlxsw_sp_port); + return err; } void mlxsw_sp_port_dcb_fini(struct mlxsw_sp_port *mlxsw_sp_port) { + mlxsw_sp_port_maxrate_fini(mlxsw_sp_port); mlxsw_sp_port_ets_fini(mlxsw_sp_port); } From 6f253d8381e9e7b8a254e7384b7d32ea5784e6e8 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Wed, 6 Apr 2016 17:10:12 +0200 Subject: [PATCH 0370/1649] mlxsw: reg: Add Port Flow Control Configuration register Add the Port Flow Control Configuration (PFCC) register, which configures both flow control and Priority-based Flow Control (PFC). Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/reg.h | 131 ++++++++++++++++++++++ 1 file changed, 131 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 2e58c41e90d4..b83514aeeb0f 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -2319,6 +2319,135 @@ static inline void mlxsw_reg_paos_pack(char *payload, u8 local_port, mlxsw_reg_paos_e_set(payload, 1); } +/* PFCC - Ports Flow Control Configuration Register + * ------------------------------------------------ + * Configures and retrieves the per port flow control configuration. + */ +#define MLXSW_REG_PFCC_ID 0x5007 +#define MLXSW_REG_PFCC_LEN 0x20 + +static const struct mlxsw_reg_info mlxsw_reg_pfcc = { + .id = MLXSW_REG_PFCC_ID, + .len = MLXSW_REG_PFCC_LEN, +}; + +/* reg_pfcc_local_port + * Local port number. + * Access: Index + */ +MLXSW_ITEM32(reg, pfcc, local_port, 0x00, 16, 8); + +/* reg_pfcc_pnat + * Port number access type. Determines the way local_port is interpreted: + * 0 - Local port number. + * 1 - IB / label port number. + * Access: Index + */ +MLXSW_ITEM32(reg, pfcc, pnat, 0x00, 14, 2); + +/* reg_pfcc_shl_cap + * Send to higher layers capabilities: + * 0 - No capability of sending Pause and PFC frames to higher layers. + * 1 - Device has capability of sending Pause and PFC frames to higher + * layers. + * Access: RO + */ +MLXSW_ITEM32(reg, pfcc, shl_cap, 0x00, 1, 1); + +/* reg_pfcc_shl_opr + * Send to higher layers operation: + * 0 - Pause and PFC frames are handled by the port (default). + * 1 - Pause and PFC frames are handled by the port and also sent to + * higher layers. Only valid if shl_cap = 1. + * Access: RW + */ +MLXSW_ITEM32(reg, pfcc, shl_opr, 0x00, 0, 1); + +/* reg_pfcc_ppan + * Pause policy auto negotiation. + * 0 - Disabled. Generate / ignore Pause frames based on pptx / pprtx. + * 1 - Enabled. When auto-negotiation is performed, set the Pause policy + * based on the auto-negotiation resolution. + * Access: RW + * + * Note: The auto-negotiation advertisement is set according to pptx and + * pprtx. When PFC is set on Tx / Rx, ppan must be set to 0. + */ +MLXSW_ITEM32(reg, pfcc, ppan, 0x04, 28, 4); + +/* reg_pfcc_prio_mask_tx + * Bit per priority indicating if Tx flow control policy should be + * updated based on bit pfctx. + * Access: WO + */ +MLXSW_ITEM32(reg, pfcc, prio_mask_tx, 0x04, 16, 8); + +/* reg_pfcc_prio_mask_rx + * Bit per priority indicating if Rx flow control policy should be + * updated based on bit pfcrx. + * Access: WO + */ +MLXSW_ITEM32(reg, pfcc, prio_mask_rx, 0x04, 0, 8); + +/* reg_pfcc_pptx + * Admin Pause policy on Tx. + * 0 - Never generate Pause frames (default). + * 1 - Generate Pause frames according to Rx buffer threshold. + * Access: RW + */ +MLXSW_ITEM32(reg, pfcc, pptx, 0x08, 31, 1); + +/* reg_pfcc_aptx + * Active (operational) Pause policy on Tx. + * 0 - Never generate Pause frames. + * 1 - Generate Pause frames according to Rx buffer threshold. + * Access: RO + */ +MLXSW_ITEM32(reg, pfcc, aptx, 0x08, 30, 1); + +/* reg_pfcc_pfctx + * Priority based flow control policy on Tx[7:0]. Per-priority bit mask: + * 0 - Never generate priority Pause frames on the specified priority + * (default). + * 1 - Generate priority Pause frames according to Rx buffer threshold on + * the specified priority. + * Access: RW + * + * Note: pfctx and pptx must be mutually exclusive. + */ +MLXSW_ITEM32(reg, pfcc, pfctx, 0x08, 16, 8); + +/* reg_pfcc_pprx + * Admin Pause policy on Rx. + * 0 - Ignore received Pause frames (default). + * 1 - Respect received Pause frames. + * Access: RW + */ +MLXSW_ITEM32(reg, pfcc, pprx, 0x0C, 31, 1); + +/* reg_pfcc_aprx + * Active (operational) Pause policy on Rx. + * 0 - Ignore received Pause frames. + * 1 - Respect received Pause frames. + * Access: RO + */ +MLXSW_ITEM32(reg, pfcc, aprx, 0x0C, 30, 1); + +/* reg_pfcc_pfcrx + * Priority based flow control policy on Rx[7:0]. Per-priority bit mask: + * 0 - Ignore incoming priority Pause frames on the specified priority + * (default). + * 1 - Respect incoming priority Pause frames on the specified priority. + * Access: RW + */ +MLXSW_ITEM32(reg, pfcc, pfcrx, 0x0C, 16, 8); + +static inline void mlxsw_reg_pfcc_pack(char *payload, u8 local_port) +{ + MLXSW_REG_ZERO(pfcc, payload); + mlxsw_reg_pfcc_local_port_set(payload, local_port); +} + /* PPCNT - Ports Performance Counters Register * ------------------------------------------- * The PPCNT register retrieves per port performance counters. @@ -3558,6 +3687,8 @@ static inline const char *mlxsw_reg_id_str(u16 reg_id) return "PPAD"; case MLXSW_REG_PAOS_ID: return "PAOS"; + case MLXSW_REG_PFCC_ID: + return "PFCC"; case MLXSW_REG_PPCNT_ID: return "PPCNT"; case MLXSW_REG_PPTB_ID: From 155f9de2e09547ed510b86a4b463c2980e7df46a Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Wed, 6 Apr 2016 17:10:13 +0200 Subject: [PATCH 0371/1649] mlxsw: reg: Add lossless settings for PBMC register When configuring PAUSE frames and PFC we'll need to configure the Xon/Xoff threshold for the priority group (PG) buffers. Add the Xon/Xoff threshold fields to the PBMC register so that we can configure these when needed. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/reg.h | 35 +++++++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index b83514aeeb0f..bcd38ffd0d41 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -2788,6 +2788,30 @@ MLXSW_ITEM32_INDEXED(reg, pbmc, buf_epsb, 0x0C, 24, 1, 0x08, 0x00, false); */ MLXSW_ITEM32_INDEXED(reg, pbmc, buf_size, 0x0C, 0, 16, 0x08, 0x00, false); +/* reg_pbmc_buf_xoff_threshold + * Once the amount of data in the buffer goes above this value, device + * starts sending PFC frames for all priorities associated with the + * buffer. Units are represented in cells. Reserved in case of lossy + * buffer. + * Access: RW + * + * Note: In Spectrum, reserved for buffer[9]. + */ +MLXSW_ITEM32_INDEXED(reg, pbmc, buf_xoff_threshold, 0x0C, 16, 16, + 0x08, 0x04, false); + +/* reg_pbmc_buf_xon_threshold + * When the amount of data in the buffer goes below this value, device + * stops sending PFC frames for the priorities associated with the + * buffer. Units are represented in cells. Reserved in case of lossy + * buffer. + * Access: RW + * + * Note: In Spectrum, reserved for buffer[9]. + */ +MLXSW_ITEM32_INDEXED(reg, pbmc, buf_xon_threshold, 0x0C, 0, 16, + 0x08, 0x04, false); + static inline void mlxsw_reg_pbmc_pack(char *payload, u8 local_port, u16 xoff_timer_value, u16 xoff_refresh) { @@ -2806,6 +2830,17 @@ static inline void mlxsw_reg_pbmc_lossy_buffer_pack(char *payload, mlxsw_reg_pbmc_buf_size_set(payload, buf_index, size); } +static inline void mlxsw_reg_pbmc_lossless_buffer_pack(char *payload, + int buf_index, u16 size, + u16 threshold) +{ + mlxsw_reg_pbmc_buf_lossy_set(payload, buf_index, 0); + mlxsw_reg_pbmc_buf_epsb_set(payload, buf_index, 0); + mlxsw_reg_pbmc_buf_size_set(payload, buf_index, size); + mlxsw_reg_pbmc_buf_xoff_threshold_set(payload, buf_index, threshold); + mlxsw_reg_pbmc_buf_xon_threshold_set(payload, buf_index, threshold); +} + /* PSPA - Port Switch Partition Allocation * --------------------------------------- * Controls the association of a port with a switch partition and enables From 9f7ec052b75e1fd8a4cc876349a665f5b76669d5 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Wed, 6 Apr 2016 17:10:14 +0200 Subject: [PATCH 0372/1649] mlxsw: spectrum: Add support for PAUSE frames When a packet ingress the switch it's placed in its assigned priority group (PG) buffer in the port's headroom buffer while it goes through the switch's pipeline. After going through the pipeline - which determines its egress port(s) and traffic class - it's moved to the switch's shared buffer awaiting transmission. However, some packets are not eligible to enter the shared buffer due to exceeded quotas or insufficient space. Marking their associated PGs as lossless will cause the packets to accumulate in the PG buffer. Another reason for packets accumulation are complicated pipelines (e.g. involving a lot of ACLs). To prevent packets from being dropped a user can enable PAUSE frames on the port. This will mark all the active PGs as lossless and set their size according to the maximum delay, as it's not configured by user. +----------------+ + | | | | | | | | | | | | | | | | | | Delay | | | | | | | | | | | | | | | Xon/Xoff threshold +----------------+ + | | | | | | 2 * MTU | | | +----------------+ + The delay (612 [Cells]) was calculated according to worst-case scenario involving maximum MTU and 100m cables. After marking the PGs as lossless the device is configured to respect incoming PAUSE frames (Rx PAUSE) and generate PAUSE frames (Tx PAUSE) according to user's settings. Whenever the port's headroom configuration changes we take into account the PAUSE configuration, so that we correctly set the PG's type (lossy / lossless), size and threshold. This can happen when: a) The port's MTU changes, as it directly affects the PG's size. b) A PG is created following user configuration, by binding a priority to it. Note that the relevant SUPPORTED flags were already mistakenly set by the driver before this commit. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/spectrum.c | 85 +++++++++++++++++-- .../net/ethernet/mellanox/mlxsw/spectrum.h | 17 +++- .../ethernet/mellanox/mlxsw/spectrum_dcb.c | 3 +- 3 files changed, 95 insertions(+), 10 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 5f4d44e14bf4..086682e51c24 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -450,15 +450,23 @@ static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p) return 0; } -static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int pg_index, int mtu) +static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int pg_index, int mtu, + bool pause_en) { u16 pg_size = 2 * MLXSW_SP_BYTES_TO_CELLS(mtu); - mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, pg_index, pg_size); + if (pause_en) { + u16 pg_pause_size = pg_size + MLXSW_SP_PAUSE_DELAY; + + mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, pg_index, + pg_pause_size, pg_size); + } else { + mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, pg_index, pg_size); + } } int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu, - u8 *prio_tc) + u8 *prio_tc, bool pause_en) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; char pbmc_pl[MLXSW_REG_PBMC_LEN]; @@ -481,14 +489,14 @@ int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu, if (!configure) continue; - mlxsw_sp_pg_buf_pack(pbmc_pl, i, mtu); + mlxsw_sp_pg_buf_pack(pbmc_pl, i, mtu, pause_en); } return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); } static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, - int mtu) + int mtu, bool pause_en) { u8 def_prio_tc[IEEE_8021QAZ_MAX_TCS] = {0}; bool dcb_en = !!mlxsw_sp_port->dcb.ets; @@ -496,15 +504,17 @@ static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, prio_tc = dcb_en ? mlxsw_sp_port->dcb.ets->prio_tc : def_prio_tc; - return __mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, prio_tc); + return __mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, prio_tc, + pause_en); } static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu) { struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port); int err; - err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu); + err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, pause_en); if (err) return err; err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu); @@ -514,7 +524,7 @@ static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu) return 0; err_port_mtu_set: - mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu); + mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); return err; } @@ -993,6 +1003,63 @@ static void mlxsw_sp_port_get_drvinfo(struct net_device *dev, sizeof(drvinfo->bus_info)); } +static void mlxsw_sp_port_get_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *pause) +{ + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + + pause->rx_pause = mlxsw_sp_port->link.rx_pause; + pause->tx_pause = mlxsw_sp_port->link.tx_pause; +} + +static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port *mlxsw_sp_port, + struct ethtool_pauseparam *pause) +{ + char pfcc_pl[MLXSW_REG_PFCC_LEN]; + + mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port); + mlxsw_reg_pfcc_pprx_set(pfcc_pl, pause->rx_pause); + mlxsw_reg_pfcc_pptx_set(pfcc_pl, pause->tx_pause); + + return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc), + pfcc_pl); +} + +static int mlxsw_sp_port_set_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *pause) +{ + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + bool pause_en = pause->tx_pause || pause->rx_pause; + int err; + + if (pause->autoneg) { + netdev_err(dev, "PAUSE frames autonegotiation isn't supported\n"); + return -EINVAL; + } + + err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); + if (err) { + netdev_err(dev, "Failed to configure port's headroom\n"); + return err; + } + + err = mlxsw_sp_port_pause_set(mlxsw_sp_port, pause); + if (err) { + netdev_err(dev, "Failed to set PAUSE parameters\n"); + goto err_port_pause_configure; + } + + mlxsw_sp_port->link.rx_pause = pause->rx_pause; + mlxsw_sp_port->link.tx_pause = pause->tx_pause; + + return 0; + +err_port_pause_configure: + pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port); + mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); + return err; +} + struct mlxsw_sp_port_hw_stats { char str[ETH_GSTRING_LEN]; u64 (*getter)(char *payload); @@ -1476,6 +1543,8 @@ static int mlxsw_sp_port_set_settings(struct net_device *dev, static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = { .get_drvinfo = mlxsw_sp_port_get_drvinfo, .get_link = ethtool_op_get_link, + .get_pauseparam = mlxsw_sp_port_get_pauseparam, + .set_pauseparam = mlxsw_sp_port_set_pauseparam, .get_strings = mlxsw_sp_port_get_strings, .set_phys_id = mlxsw_sp_port_set_phys_id, .get_ethtool_stats = mlxsw_sp_port_get_stats, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 9e1e4fe32d35..f4b53dd34f22 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -67,6 +67,11 @@ #define MLXSW_SP_BYTES_TO_CELLS(b) DIV_ROUND_UP(b, MLXSW_SP_BYTES_PER_CELL) +/* Maximum delay buffer needed in case of PAUSE frames, in cells. + * Assumes 100m cable and maximum MTU. + */ +#define MLXSW_SP_PAUSE_DELAY 612 + struct mlxsw_sp_port; struct mlxsw_sp_upper { @@ -171,6 +176,10 @@ struct mlxsw_sp_port { struct mlxsw_sp_vfid *vfid; u16 vid; } vport; + struct { + u8 tx_pause:1, + rx_pause:1; + } link; struct { struct ieee_ets *ets; struct ieee_maxrate *maxrate; @@ -183,6 +192,12 @@ struct mlxsw_sp_port { struct devlink_port devlink_port; }; +static inline bool +mlxsw_sp_port_is_pause_en(const struct mlxsw_sp_port *mlxsw_sp_port) +{ + return mlxsw_sp_port->link.tx_pause || mlxsw_sp_port->link.rx_pause; +} + static inline struct mlxsw_sp_port * mlxsw_sp_port_lagged_get(struct mlxsw_sp *mlxsw_sp, u16 lag_id, u8 port_index) { @@ -280,7 +295,7 @@ int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port, int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 switch_prio, u8 tclass); int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu, - u8 *prio_tc); + u8 *prio_tc, bool pause_en); int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port, enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index, u32 maxrate); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c index 257e2d427cab..8786424f6191 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c @@ -142,6 +142,7 @@ static int mlxsw_sp_port_pg_destroy(struct mlxsw_sp_port *mlxsw_sp_port, static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, struct ieee_ets *ets) { + bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port); struct ieee_ets *my_ets = mlxsw_sp_port->dcb.ets; struct net_device *dev = mlxsw_sp_port->dev; int err; @@ -150,7 +151,7 @@ static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, * traffic is still directed to them. */ err = __mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, - ets->prio_tc); + ets->prio_tc, pause_en); if (err) { netdev_err(dev, "Failed to configure port's headroom\n"); return err; From 34dba0a59d072201171be1aeb9e52d1148d7c365 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Wed, 6 Apr 2016 17:10:15 +0200 Subject: [PATCH 0373/1649] mlxsw: reg: Introduce per priority counters We are going to add support for PFC as part of DCB ops, which requires us to report the number of PFC frames sent and received per priority. Add per priority counters in order to report number of PFC frames sent and received per priority. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/reg.h | 62 ++++++++++++++++++- .../net/ethernet/mellanox/mlxsw/spectrum.c | 3 +- .../net/ethernet/mellanox/mlxsw/switchx2.c | 3 +- 3 files changed, 63 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index bcd38ffd0d41..84aacb36c12a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -2487,6 +2487,11 @@ MLXSW_ITEM32(reg, ppcnt, local_port, 0x00, 16, 8); */ MLXSW_ITEM32(reg, ppcnt, pnat, 0x00, 14, 2); +enum mlxsw_reg_ppcnt_grp { + MLXSW_REG_PPCNT_IEEE_8023_CNT = 0x0, + MLXSW_REG_PPCNT_PRIO_CNT = 0x10, +}; + /* reg_ppcnt_grp * Performance counter group. * Group 63 indicates all groups. Only valid on Set() operation with @@ -2522,6 +2527,8 @@ MLXSW_ITEM32(reg, ppcnt, clr, 0x04, 31, 1); */ MLXSW_ITEM32(reg, ppcnt, prio_tc, 0x04, 0, 5); +/* Ethernet IEEE 802.3 Counter Group */ + /* reg_ppcnt_a_frames_transmitted_ok * Access: RO */ @@ -2636,15 +2643,64 @@ MLXSW_ITEM64(reg, ppcnt, a_pause_mac_ctrl_frames_received, MLXSW_ITEM64(reg, ppcnt, a_pause_mac_ctrl_frames_transmitted, 0x08 + 0x90, 0, 64); -static inline void mlxsw_reg_ppcnt_pack(char *payload, u8 local_port) +/* Ethernet Per Priority Group Counters */ + +/* reg_ppcnt_rx_octets + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, rx_octets, 0x08 + 0x00, 0, 64); + +/* reg_ppcnt_rx_frames + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, rx_frames, 0x08 + 0x20, 0, 64); + +/* reg_ppcnt_tx_octets + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, tx_octets, 0x08 + 0x28, 0, 64); + +/* reg_ppcnt_tx_frames + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, tx_frames, 0x08 + 0x48, 0, 64); + +/* reg_ppcnt_rx_pause + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, rx_pause, 0x08 + 0x50, 0, 64); + +/* reg_ppcnt_rx_pause_duration + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, rx_pause_duration, 0x08 + 0x58, 0, 64); + +/* reg_ppcnt_tx_pause + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, tx_pause, 0x08 + 0x60, 0, 64); + +/* reg_ppcnt_tx_pause_duration + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, tx_pause_duration, 0x08 + 0x68, 0, 64); + +/* reg_ppcnt_rx_pause_transition + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, tx_pause_transition, 0x08 + 0x70, 0, 64); + +static inline void mlxsw_reg_ppcnt_pack(char *payload, u8 local_port, + enum mlxsw_reg_ppcnt_grp grp, + u8 prio_tc) { MLXSW_REG_ZERO(ppcnt, payload); mlxsw_reg_ppcnt_swid_set(payload, 0); mlxsw_reg_ppcnt_local_port_set(payload, local_port); mlxsw_reg_ppcnt_pnat_set(payload, 0); - mlxsw_reg_ppcnt_grp_set(payload, 0); + mlxsw_reg_ppcnt_grp_set(payload, grp); mlxsw_reg_ppcnt_clr_set(payload, 0); - mlxsw_reg_ppcnt_prio_tc_set(payload, 0); + mlxsw_reg_ppcnt_prio_tc_set(payload, prio_tc); } /* PPTB - Port Prio To Buffer Register diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 086682e51c24..36a94a94a420 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -1195,7 +1195,8 @@ static void mlxsw_sp_port_get_stats(struct net_device *dev, int i; int err; - mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port); + mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, + MLXSW_REG_PPCNT_IEEE_8023_CNT, 0); err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl); for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) data[i] = !err ? mlxsw_sp_port_hw_stats[i].getter(ppcnt_pl) : 0; diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c index 7a60a26759b6..c49447f31acc 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c +++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c @@ -518,7 +518,8 @@ static void mlxsw_sx_port_get_stats(struct net_device *dev, int i; int err; - mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sx_port->local_port); + mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sx_port->local_port, + MLXSW_REG_PPCNT_IEEE_8023_CNT, 0); err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ppcnt), ppcnt_pl); for (i = 0; i < MLXSW_SX_PORT_HW_STATS_LEN; i++) data[i] = !err ? mlxsw_sx_port_hw_stats[i].getter(ppcnt_pl) : 0; From d81a6bdb87ce75337b453169ee39cdccb3286ddf Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Wed, 6 Apr 2016 17:10:16 +0200 Subject: [PATCH 0374/1649] mlxsw: spectrum: Add IEEE 802.1Qbb PFC support Implement the appropriate DCB ops and allow a user to configure certain traffic classes as lossless. The operation configures PFC for both the egress (respecting PFC frames) and ingress (sending PFC frames) parts of the port. At egress, when a PFC frame is received for a PFC enabled priority, then all the priorities mapped to the same TC are stopped. At ingress, the priority group (PG) buffers to which the enabled PFC priorities are mapped are configured to be lossless. PFC frames will be transmitted when the Xoff threshold is crossed. The user-supplied delay parameter is used to determine the PG's size according to the following formula: PG_SIZE = PG_SIZE_LOSSY + delay * CELL_FACTOR + MTU In the worst case scenario the delay will be made up of packets that are all of size CELL_SIZE + 1, which means each packet will require almost twice its true size when buffered in the switch. We therefore multiply this value by the "cell factor", which is close to 2. Another MTU is added in case the transmitting host already started transmitting a maximum length frame when the PFC packet was received. As with PAUSE enabled ports, when the port's MTU is changed both the PGs' size and threshold are adjusted accordingly. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/reg.h | 10 ++ .../net/ethernet/mellanox/mlxsw/spectrum.c | 30 +++-- .../net/ethernet/mellanox/mlxsw/spectrum.h | 12 +- .../ethernet/mellanox/mlxsw/spectrum_dcb.c | 117 +++++++++++++++++- 4 files changed, 158 insertions(+), 11 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 84aacb36c12a..28f5b99e585a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -2442,6 +2442,16 @@ MLXSW_ITEM32(reg, pfcc, aprx, 0x0C, 30, 1); */ MLXSW_ITEM32(reg, pfcc, pfcrx, 0x0C, 16, 8); +#define MLXSW_REG_PFCC_ALL_PRIO 0xFF + +static inline void mlxsw_reg_pfcc_prio_pack(char *payload, u8 pfc_en) +{ + mlxsw_reg_pfcc_prio_mask_tx_set(payload, MLXSW_REG_PFCC_ALL_PRIO); + mlxsw_reg_pfcc_prio_mask_rx_set(payload, MLXSW_REG_PFCC_ALL_PRIO); + mlxsw_reg_pfcc_pfctx_set(payload, pfc_en); + mlxsw_reg_pfcc_pfcrx_set(payload, pfc_en); +} + static inline void mlxsw_reg_pfcc_pack(char *payload, u8 local_port) { MLXSW_REG_ZERO(pfcc, payload); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 36a94a94a420..507263a2d226 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -451,24 +451,27 @@ static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p) } static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int pg_index, int mtu, - bool pause_en) + bool pause_en, bool pfc_en, u16 delay) { u16 pg_size = 2 * MLXSW_SP_BYTES_TO_CELLS(mtu); - if (pause_en) { - u16 pg_pause_size = pg_size + MLXSW_SP_PAUSE_DELAY; + delay = pfc_en ? mlxsw_sp_pfc_delay_get(mtu, delay) : + MLXSW_SP_PAUSE_DELAY; + if (pause_en || pfc_en) mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, pg_index, - pg_pause_size, pg_size); - } else { + pg_size + delay, pg_size); + else mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, pg_index, pg_size); - } } int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu, - u8 *prio_tc, bool pause_en) + u8 *prio_tc, bool pause_en, + struct ieee_pfc *my_pfc) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + u8 pfc_en = !!my_pfc ? my_pfc->pfc_en : 0; + u16 delay = !!my_pfc ? my_pfc->delay : 0; char pbmc_pl[MLXSW_REG_PBMC_LEN]; int i, j, err; @@ -479,9 +482,11 @@ int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu, for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { bool configure = false; + bool pfc = false; for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) { if (prio_tc[j] == i) { + pfc = pfc_en & BIT(j); configure = true; break; } @@ -489,7 +494,7 @@ int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu, if (!configure) continue; - mlxsw_sp_pg_buf_pack(pbmc_pl, i, mtu, pause_en); + mlxsw_sp_pg_buf_pack(pbmc_pl, i, mtu, pause_en, pfc, delay); } return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); @@ -500,12 +505,14 @@ static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, { u8 def_prio_tc[IEEE_8021QAZ_MAX_TCS] = {0}; bool dcb_en = !!mlxsw_sp_port->dcb.ets; + struct ieee_pfc *my_pfc; u8 *prio_tc; prio_tc = dcb_en ? mlxsw_sp_port->dcb.ets->prio_tc : def_prio_tc; + my_pfc = dcb_en ? mlxsw_sp_port->dcb.pfc : NULL; return __mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, prio_tc, - pause_en); + pause_en, my_pfc); } static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu) @@ -1032,6 +1039,11 @@ static int mlxsw_sp_port_set_pauseparam(struct net_device *dev, bool pause_en = pause->tx_pause || pause->rx_pause; int err; + if (mlxsw_sp_port->dcb.pfc && mlxsw_sp_port->dcb.pfc->pfc_en) { + netdev_err(dev, "PFC already enabled on port\n"); + return -EINVAL; + } + if (pause->autoneg) { netdev_err(dev, "PAUSE frames autonegotiation isn't supported\n"); return -EINVAL; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index f4b53dd34f22..47610a5ccd78 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -72,6 +72,14 @@ */ #define MLXSW_SP_PAUSE_DELAY 612 +#define MLXSW_SP_CELL_FACTOR 2 /* 2 * cell_size / (IPG + cell_size + 1) */ + +static inline u16 mlxsw_sp_pfc_delay_get(int mtu, u16 delay) +{ + delay = MLXSW_SP_BYTES_TO_CELLS(DIV_ROUND_UP(delay, BITS_PER_BYTE)); + return MLXSW_SP_CELL_FACTOR * delay + MLXSW_SP_BYTES_TO_CELLS(mtu); +} + struct mlxsw_sp_port; struct mlxsw_sp_upper { @@ -183,6 +191,7 @@ struct mlxsw_sp_port { struct { struct ieee_ets *ets; struct ieee_maxrate *maxrate; + struct ieee_pfc *pfc; } dcb; /* 802.1Q bridge VLANs */ unsigned long *active_vlans; @@ -295,7 +304,8 @@ int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port, int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 switch_prio, u8 tclass); int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu, - u8 *prio_tc, bool pause_en); + u8 *prio_tc, bool pause_en, + struct ieee_pfc *my_pfc); int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port, enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index, u32 maxrate); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c index 8786424f6191..0b323661c0b6 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c @@ -34,6 +34,7 @@ #include #include +#include #include #include "spectrum.h" @@ -151,7 +152,8 @@ static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, * traffic is still directed to them. */ err = __mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, - ets->prio_tc, pause_en); + ets->prio_tc, pause_en, + mlxsw_sp_port->dcb.pfc); if (err) { netdev_err(dev, "Failed to configure port's headroom\n"); return err; @@ -291,11 +293,101 @@ err_port_ets_maxrate_set: return err; } +static int mlxsw_sp_port_pfc_cnt_get(struct mlxsw_sp_port *mlxsw_sp_port, + u8 prio) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + struct ieee_pfc *my_pfc = mlxsw_sp_port->dcb.pfc; + char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; + int err; + + mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, + MLXSW_REG_PPCNT_PRIO_CNT, prio); + err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl); + if (err) + return err; + + my_pfc->requests[prio] = mlxsw_reg_ppcnt_tx_pause_get(ppcnt_pl); + my_pfc->indications[prio] = mlxsw_reg_ppcnt_rx_pause_get(ppcnt_pl); + + return 0; +} + +static int mlxsw_sp_dcbnl_ieee_getpfc(struct net_device *dev, + struct ieee_pfc *pfc) +{ + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + int err, i; + + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + err = mlxsw_sp_port_pfc_cnt_get(mlxsw_sp_port, i); + if (err) { + netdev_err(dev, "Failed to get PFC count for priority %d\n", + i); + return err; + } + } + + memcpy(pfc, mlxsw_sp_port->dcb.pfc, sizeof(*pfc)); + + return 0; +} + +static int mlxsw_sp_port_pfc_set(struct mlxsw_sp_port *mlxsw_sp_port, + struct ieee_pfc *pfc) +{ + char pfcc_pl[MLXSW_REG_PFCC_LEN]; + + mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port); + mlxsw_reg_pfcc_prio_pack(pfcc_pl, pfc->pfc_en); + + return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc), + pfcc_pl); +} + +static int mlxsw_sp_dcbnl_ieee_setpfc(struct net_device *dev, + struct ieee_pfc *pfc) +{ + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + int err; + + if (mlxsw_sp_port->link.tx_pause || mlxsw_sp_port->link.rx_pause) { + netdev_err(dev, "PAUSE frames already enabled on port\n"); + return -EINVAL; + } + + err = __mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, + mlxsw_sp_port->dcb.ets->prio_tc, + false, pfc); + if (err) { + netdev_err(dev, "Failed to configure port's headroom for PFC\n"); + return err; + } + + err = mlxsw_sp_port_pfc_set(mlxsw_sp_port, pfc); + if (err) { + netdev_err(dev, "Failed to configure PFC\n"); + goto err_port_pfc_set; + } + + memcpy(mlxsw_sp_port->dcb.pfc, pfc, sizeof(*pfc)); + + return 0; + +err_port_pfc_set: + __mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, + mlxsw_sp_port->dcb.ets->prio_tc, false, + mlxsw_sp_port->dcb.pfc); + return err; +} + static const struct dcbnl_rtnl_ops mlxsw_sp_dcbnl_ops = { .ieee_getets = mlxsw_sp_dcbnl_ieee_getets, .ieee_setets = mlxsw_sp_dcbnl_ieee_setets, .ieee_getmaxrate = mlxsw_sp_dcbnl_ieee_getmaxrate, .ieee_setmaxrate = mlxsw_sp_dcbnl_ieee_setmaxrate, + .ieee_getpfc = mlxsw_sp_dcbnl_ieee_getpfc, + .ieee_setpfc = mlxsw_sp_dcbnl_ieee_setpfc, .getdcbx = mlxsw_sp_dcbnl_getdcbx, .setdcbx = mlxsw_sp_dcbnl_setdcbx, @@ -338,6 +430,23 @@ static void mlxsw_sp_port_maxrate_fini(struct mlxsw_sp_port *mlxsw_sp_port) kfree(mlxsw_sp_port->dcb.maxrate); } +static int mlxsw_sp_port_pfc_init(struct mlxsw_sp_port *mlxsw_sp_port) +{ + mlxsw_sp_port->dcb.pfc = kzalloc(sizeof(*mlxsw_sp_port->dcb.pfc), + GFP_KERNEL); + if (!mlxsw_sp_port->dcb.pfc) + return -ENOMEM; + + mlxsw_sp_port->dcb.pfc->pfc_cap = IEEE_8021QAZ_MAX_TCS; + + return 0; +} + +static void mlxsw_sp_port_pfc_fini(struct mlxsw_sp_port *mlxsw_sp_port) +{ + kfree(mlxsw_sp_port->dcb.pfc); +} + int mlxsw_sp_port_dcb_init(struct mlxsw_sp_port *mlxsw_sp_port) { int err; @@ -348,11 +457,16 @@ int mlxsw_sp_port_dcb_init(struct mlxsw_sp_port *mlxsw_sp_port) err = mlxsw_sp_port_maxrate_init(mlxsw_sp_port); if (err) goto err_port_maxrate_init; + err = mlxsw_sp_port_pfc_init(mlxsw_sp_port); + if (err) + goto err_port_pfc_init; mlxsw_sp_port->dev->dcbnl_ops = &mlxsw_sp_dcbnl_ops; return 0; +err_port_pfc_init: + mlxsw_sp_port_maxrate_fini(mlxsw_sp_port); err_port_maxrate_init: mlxsw_sp_port_ets_fini(mlxsw_sp_port); return err; @@ -360,6 +474,7 @@ err_port_maxrate_init: void mlxsw_sp_port_dcb_fini(struct mlxsw_sp_port *mlxsw_sp_port) { + mlxsw_sp_port_pfc_fini(mlxsw_sp_port); mlxsw_sp_port_maxrate_fini(mlxsw_sp_port); mlxsw_sp_port_ets_fini(mlxsw_sp_port); } From 1f2f83f838489d386ecad9d0c77c3d6ec983102c Mon Sep 17 00:00:00 2001 From: Stefan Assmann Date: Wed, 3 Feb 2016 09:20:51 +0100 Subject: [PATCH 0375/1649] e1000: call ndo_stop() instead of dev_close() when running offline selftest Calling dev_close() causes IFF_UP to be cleared which will remove the interfaces routes and some addresses. That's probably not what the user intended when running the offline selftest. Besides this does not happen if the interface is brought down before the test, so the current behaviour is inconsistent. Instead call the net_device_ops ndo_stop function directly and avoid touching IFF_UP at all. Signed-off-by: Stefan Assmann Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/e1000/e1000.h | 2 ++ drivers/net/ethernet/intel/e1000/e1000_ethtool.c | 4 ++-- drivers/net/ethernet/intel/e1000/e1000_main.c | 8 ++++---- 3 files changed, 8 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/intel/e1000/e1000.h b/drivers/net/ethernet/intel/e1000/e1000.h index 98fe5a2cd6e3..d7bdea79e9fa 100644 --- a/drivers/net/ethernet/intel/e1000/e1000.h +++ b/drivers/net/ethernet/intel/e1000/e1000.h @@ -358,6 +358,8 @@ struct net_device *e1000_get_hw_dev(struct e1000_hw *hw); extern char e1000_driver_name[]; extern const char e1000_driver_version[]; +int e1000_open(struct net_device *netdev); +int e1000_close(struct net_device *netdev); int e1000_up(struct e1000_adapter *adapter); void e1000_down(struct e1000_adapter *adapter); void e1000_reinit_locked(struct e1000_adapter *adapter); diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c index 83e557c7f279..975eeb885ca2 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c +++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c @@ -1553,7 +1553,7 @@ static void e1000_diag_test(struct net_device *netdev, if (if_running) /* indicate we're in test mode */ - dev_close(netdev); + e1000_close(netdev); else e1000_reset(adapter); @@ -1582,7 +1582,7 @@ static void e1000_diag_test(struct net_device *netdev, e1000_reset(adapter); clear_bit(__E1000_TESTING, &adapter->flags); if (if_running) - dev_open(netdev); + e1000_open(netdev); } else { e_info(hw, "online testing starting\n"); /* Online tests */ diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index 3fc7bde699ba..6de0c7df56fa 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c @@ -114,8 +114,8 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent); static void e1000_remove(struct pci_dev *pdev); static int e1000_alloc_queues(struct e1000_adapter *adapter); static int e1000_sw_init(struct e1000_adapter *adapter); -static int e1000_open(struct net_device *netdev); -static int e1000_close(struct net_device *netdev); +int e1000_open(struct net_device *netdev); +int e1000_close(struct net_device *netdev); static void e1000_configure_tx(struct e1000_adapter *adapter); static void e1000_configure_rx(struct e1000_adapter *adapter); static void e1000_setup_rctl(struct e1000_adapter *adapter); @@ -1360,7 +1360,7 @@ static int e1000_alloc_queues(struct e1000_adapter *adapter) * handler is registered with the OS, the watchdog task is started, * and the stack is notified that the interface is ready. **/ -static int e1000_open(struct net_device *netdev) +int e1000_open(struct net_device *netdev) { struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; @@ -1437,7 +1437,7 @@ err_setup_tx: * needs to be disabled. A global MAC reset is issued to stop the * hardware, and all transmit and receive resources are freed. **/ -static int e1000_close(struct net_device *netdev) +int e1000_close(struct net_device *netdev) { struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; From 453e16e8e8b96821111e8d90252f4df8ec418eea Mon Sep 17 00:00:00 2001 From: Deepthi Kavalur Date: Fri, 1 Apr 2016 03:56:01 -0700 Subject: [PATCH 0376/1649] i40e: Inserting a HW capability display info Display MSIx vector count for HW capabilities. Change-ID: I4b41e9b50360cf660e7fbcb85b9390fedcf313b1 Signed-off-by: Deepthi Kavalur Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_common.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index ebcc0d3ecbfb..f3c1d8890cbb 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -3080,6 +3080,9 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff, break; case I40E_AQ_CAP_ID_MSIX: p->num_msix_vectors = number; + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: MSIX vector count = %d\n", + p->num_msix_vectors); break; case I40E_AQ_CAP_ID_VF_MSIX: p->num_msix_vectors_vf = number; From 89dd05512b79ee9ba0950f1ba1fb8077ec898ea2 Mon Sep 17 00:00:00 2001 From: Shannon Nelson Date: Fri, 1 Apr 2016 03:56:02 -0700 Subject: [PATCH 0377/1649] i40e: Leave debug_mask cleared at init Don't set our internal debug_mask at startup unless we get specific signal to from the debug module parameter. This should take care of the issue with all the device capabilities getting printed even when we hadn't asked for the debug info. Change-ID: I7fbc6bd8b11ed9b0631ec018ff36015a04100b6c Signed-off-by: Shannon Nelson Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_main.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index d6147f899062..86abd086ccbd 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -8438,7 +8438,6 @@ static int i40e_sw_init(struct i40e_pf *pf) pf->msg_enable = netif_msg_init(I40E_DEFAULT_MSG_ENABLE, (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)); - pf->hw.debug_mask = pf->msg_enable | I40E_DEBUG_DIAG; if (debug != -1 && debug != I40E_DEFAULT_MSG_ENABLE) { if (I40E_DEBUG_USER & debug) pf->hw.debug_mask = debug; From 30728c5bdf2ac6618eebf6949a2e59b3c4cf640f Mon Sep 17 00:00:00 2001 From: Akeem G Abodunrin Date: Fri, 1 Apr 2016 03:56:03 -0700 Subject: [PATCH 0378/1649] i40e: Move HW flush This patch moves the HW flush routine to the end of the reset flow, after the completion of writing to the device VFLR registers- the benefit is to avoid problems in the passthrough routines. Change-ID: Ieb56866f21895e6c1fc514b7328c3df79807a57c Signed-off-by: Akeem G Abodunrin Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 9924503c88f5..f2a9c14829ca 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -941,6 +941,7 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr) reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32; bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32; wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx)); + i40e_flush(hw); if (i40e_quiesce_vf_pci(vf)) dev_err(&pf->pdev->dev, "VF %d PCI transactions stuck\n", From d1bd743b5b4d675e739b574284d1412ba996fe07 Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Fri, 1 Apr 2016 03:56:04 -0700 Subject: [PATCH 0379/1649] i40e/i40evf: Move stack var deeper A local variable could move down inside the context where it is used. Change-ID: I9caba9e1eacf921037077f2665cbce83fd8e95d6 Signed-off-by: Jesse Brandeburg Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_txrx.c | 3 ++- drivers/net/ethernet/intel/i40evf/i40e_txrx.c | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 5d5fa5359a1d..76a48e9c859b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -2408,7 +2408,7 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, unsigned char *hdr; } l4; unsigned char *exthdr; - u32 offset, cmd = 0, tunnel = 0; + u32 offset, cmd = 0; __be16 frag_off; u8 l4_proto = 0; @@ -2422,6 +2422,7 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, offset = ((ip.hdr - skb->data) / 2) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT; if (skb->encapsulation) { + u32 tunnel = 0; /* define outer network header type */ if (*tx_flags & I40E_TX_FLAGS_IPV4) { tunnel |= (*tx_flags & I40E_TX_FLAGS_TSO) ? diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index 04aabc52ba0d..d633dcf4a882 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -1633,7 +1633,7 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, unsigned char *hdr; } l4; unsigned char *exthdr; - u32 offset, cmd = 0, tunnel = 0; + u32 offset, cmd = 0; __be16 frag_off; u8 l4_proto = 0; @@ -1647,6 +1647,7 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, offset = ((ip.hdr - skb->data) / 2) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT; if (skb->encapsulation) { + u32 tunnel = 0; /* define outer network header type */ if (*tx_flags & I40E_TX_FLAGS_IPV4) { tunnel |= (*tx_flags & I40E_TX_FLAGS_TSO) ? From 84b079928a10559ebc6679e1e973a3ee5b20ba83 Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Fri, 1 Apr 2016 03:56:05 -0700 Subject: [PATCH 0380/1649] i40e/i40evf: Drop unused tx_ring argument Some of the tx_ring arguments can be deleted since they are not used. Change-ID: I99275b0f191d7f63ec2f05061919904940c36f31 Signed-off-by: Jesse Brandeburg Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_txrx.c | 6 ++---- drivers/net/ethernet/intel/i40evf/i40e_txrx.c | 6 ++---- 2 files changed, 4 insertions(+), 8 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 76a48e9c859b..f4e4d3d098dc 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -2252,15 +2252,13 @@ out: /** * i40e_tso - set up the tso context descriptor - * @tx_ring: ptr to the ring to send * @skb: ptr to the skb we're sending * @hdr_len: ptr to the size of the packet header * @cd_type_cmd_tso_mss: Quad Word 1 * * Returns 0 if no TSO can happen, 1 if tso is going, or error **/ -static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, - u8 *hdr_len, u64 *cd_type_cmd_tso_mss) +static int i40e_tso(struct sk_buff *skb, u8 *hdr_len, u64 *cd_type_cmd_tso_mss) { u64 cd_cmd, cd_tso_len, cd_mss; union { @@ -2932,7 +2930,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, else if (protocol == htons(ETH_P_IPV6)) tx_flags |= I40E_TX_FLAGS_IPV6; - tso = i40e_tso(tx_ring, skb, &hdr_len, &cd_type_cmd_tso_mss); + tso = i40e_tso(skb, &hdr_len, &cd_type_cmd_tso_mss); if (tso < 0) goto out_drop; diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index d633dcf4a882..ec1f4479d72e 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -1519,15 +1519,13 @@ out: /** * i40e_tso - set up the tso context descriptor - * @tx_ring: ptr to the ring to send * @skb: ptr to the skb we're sending * @hdr_len: ptr to the size of the packet header * @cd_type_cmd_tso_mss: Quad Word 1 * * Returns 0 if no TSO can happen, 1 if tso is going, or error **/ -static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, - u8 *hdr_len, u64 *cd_type_cmd_tso_mss) +static int i40e_tso(struct sk_buff *skb, u8 *hdr_len, u64 *cd_type_cmd_tso_mss) { u64 cd_cmd, cd_tso_len, cd_mss; union { @@ -2150,7 +2148,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, else if (protocol == htons(ETH_P_IPV6)) tx_flags |= I40E_TX_FLAGS_IPV6; - tso = i40e_tso(tx_ring, skb, &hdr_len, &cd_type_cmd_tso_mss); + tso = i40e_tso(skb, &hdr_len, &cd_type_cmd_tso_mss); if (tso < 0) goto out_drop; From 1f15d66712bb64e39fe2c23b1b32f68f9e1d4ee7 Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Fri, 1 Apr 2016 03:56:06 -0700 Subject: [PATCH 0381/1649] i40e/i40evf: Faster RX via avoiding FCoE As it turns out, calling into other files from hot path hurts performance a lot. In this case the majority of the time we call "check FCoE" and the packet is *not* FCoE, but this call was taking 5% of our total cycles spent on receive. Change-ID: I080552c26e7060bc7b78504dc2763f6f0b3d8c76 Signed-off-by: Jesse Brandeburg Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_fcoe.c | 12 +----------- drivers/net/ethernet/intel/i40e/i40e_txrx.c | 8 ++++++-- drivers/net/ethernet/intel/i40e/i40e_txrx.h | 10 ++++++++++ drivers/net/ethernet/intel/i40evf/i40e_txrx.c | 4 +++- drivers/net/ethernet/intel/i40evf/i40e_txrx.h | 10 ++++++++++ 5 files changed, 30 insertions(+), 14 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c index 92d2208d13c7..58e6c1570335 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c +++ b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2015 Intel Corporation. + * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -37,16 +37,6 @@ #include "i40e.h" #include "i40e_fcoe.h" -/** - * i40e_rx_is_fcoe - returns true if the rx packet type is FCoE - * @ptype: the packet type field from rx descriptor write-back - **/ -static inline bool i40e_rx_is_fcoe(u16 ptype) -{ - return (ptype >= I40E_RX_PTYPE_L2_FCOE_PAY3) && - (ptype <= I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER); -} - /** * i40e_fcoe_sof_is_class2 - returns true if this is a FC Class 2 SOF * @sof: the FCoE start of frame delimiter diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index f4e4d3d098dc..29ffed27e5a9 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -1703,7 +1703,9 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, const int budget) ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0; #ifdef I40E_FCOE - if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) { + if (unlikely( + i40e_rx_is_fcoe(rx_ptype) && + !i40e_fcoe_handle_offload(rx_ring, rx_desc, skb))) { dev_kfree_skb_any(skb); continue; } @@ -1834,7 +1836,9 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0; #ifdef I40E_FCOE - if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) { + if (unlikely( + i40e_rx_is_fcoe(rx_ptype) && + !i40e_fcoe_handle_offload(rx_ring, rx_desc, skb))) { dev_kfree_skb_any(skb); continue; } diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index 9e654e611642..77ccdde56c0c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -448,4 +448,14 @@ static inline bool i40e_chk_linearize(struct sk_buff *skb, int count) return __i40e_chk_linearize(skb); } + +/** + * i40e_rx_is_fcoe - returns true if the Rx packet type is FCoE + * @ptype: the packet type field from Rx descriptor write-back + **/ +static inline bool i40e_rx_is_fcoe(u16 ptype) +{ + return (ptype >= I40E_RX_PTYPE_L2_FCOE_PAY3) && + (ptype <= I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER); +} #endif /* _I40E_TXRX_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index ec1f4479d72e..0c912a4999db 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -1160,7 +1160,9 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, const int budget) ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0; #ifdef I40E_FCOE - if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) { + if (unlikely( + i40e_rx_is_fcoe(rx_ptype) && + !i40e_fcoe_handle_offload(rx_ring, rx_desc, skb))) { dev_kfree_skb_any(skb); continue; } diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h index 3ec0ea5ea3db..84c28aa64fdf 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h @@ -430,4 +430,14 @@ static inline bool i40e_chk_linearize(struct sk_buff *skb, int count) return __i40evf_chk_linearize(skb); } + +/** + * i40e_rx_is_fcoe - returns true if the Rx packet type is FCoE + * @ptype: the packet type field from Rx descriptor write-back + **/ +static inline bool i40e_rx_is_fcoe(u16 ptype) +{ + return (ptype >= I40E_RX_PTYPE_L2_FCOE_PAY3) && + (ptype <= I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER); +} #endif /* _I40E_TXRX_H_ */ From c3bbbd2002b9565475721bb17b17f48ef5927498 Mon Sep 17 00:00:00 2001 From: Anjali Singhai Jain Date: Fri, 1 Apr 2016 03:56:07 -0700 Subject: [PATCH 0382/1649] i40e: Patch to support trusted VF This patch adds hook to support changing a VF from not-trusted to trusted and vice-versa. Fixed the wrappers and function prototype. Changed the dmesg to reflex the current state better. This patch also disables turning on/off trusted VF in MFP mode. Change-ID: Ibcd910935c01f0be1f3fdd6d427230291ee92ebe Signed-off-by: Anjali Singhai Jain Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_main.c | 1 + .../ethernet/intel/i40e/i40e_virtchnl_pf.c | 42 +++++++++++++++++++ .../ethernet/intel/i40e/i40e_virtchnl_pf.h | 2 + 3 files changed, 45 insertions(+) diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 86abd086ccbd..627acf0c5fea 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -9069,6 +9069,7 @@ static const struct net_device_ops i40e_netdev_ops = { .ndo_get_vf_config = i40e_ndo_get_vf_config, .ndo_set_vf_link_state = i40e_ndo_set_vf_link_state, .ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofchk, + .ndo_set_vf_trust = i40e_ndo_set_vf_trust, #if IS_ENABLED(CONFIG_VXLAN) .ndo_add_vxlan_port = i40e_add_vxlan_port, .ndo_del_vxlan_port = i40e_del_vxlan_port, diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index f2a9c14829ca..b3539660f4f1 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -2763,3 +2763,45 @@ int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable) out: return ret; } + +/** + * i40e_ndo_set_vf_trust + * @netdev: network interface device structure of the pf + * @vf_id: VF identifier + * @setting: trust setting + * + * Enable or disable VF trust setting + **/ +int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_pf *pf = np->vsi->back; + struct i40e_vf *vf; + int ret = 0; + + /* validate the request */ + if (vf_id >= pf->num_alloc_vfs) { + dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); + return -EINVAL; + } + + if (pf->flags & I40E_FLAG_MFP_ENABLED) { + dev_err(&pf->pdev->dev, "Trusted VF not supported in MFP mode.\n"); + return -EINVAL; + } + + vf = &pf->vf[vf_id]; + + if (!vf) + return -EINVAL; + if (setting == vf->trusted) + goto out; + + vf->trusted = setting; + i40e_vc_notify_vf_reset(vf); + i40e_reset_vf(vf, false); + dev_info(&pf->pdev->dev, "VF %u is now %strusted\n", + vf_id, setting ? "" : "un"); +out: + return ret; +} diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h index e7b2fba0309e..838cbd2299a4 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h @@ -88,6 +88,7 @@ struct i40e_vf { struct i40e_virtchnl_ether_addr default_fcoe_addr; u16 port_vlan_id; bool pf_set_mac; /* The VMM admin set the VF MAC address */ + bool trusted; /* VSI indices - actual VSI pointers are maintained in the PF structure * When assigned, these will be non-zero, because VSI 0 is always @@ -127,6 +128,7 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos); int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, int max_tx_rate); +int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting); int i40e_ndo_get_vf_config(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi); int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link); From 14c5f5d264c3ee28e8ec9fd4dffb29f5d1ea1d02 Mon Sep 17 00:00:00 2001 From: Shannon Nelson Date: Fri, 1 Apr 2016 03:56:08 -0700 Subject: [PATCH 0383/1649] i40e: Restrict VF poll mode to only single function mode devices The VFs can request their queues to be set up into polling mode, rather than interrupt mode, which works well for supporting things like DPDK, but this should not be available when working in an multi-function support device. Change-ID: Id36792e4e7422db8f2033336507211f68f14ff6f Signed-off-by: Shannon Nelson Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index b3539660f4f1..30f8cbe6b54b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -1362,8 +1362,16 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2; } - if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING) + if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING) { + if (pf->flags & I40E_FLAG_MFP_ENABLED) { + dev_err(&pf->pdev->dev, + "VF %d requested polling mode: this feature is supported only when the device is running in single function per port (SFP) mode\n", + vf->vf_id); + ret = I40E_ERR_PARAM; + goto err; + } vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING; + } if (pf->flags & I40E_FLAG_WB_ON_ITR_CAPABLE) { if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) From 437f82a2290ed94f0d6a86b749101f1ad5ed6231 Mon Sep 17 00:00:00 2001 From: Shannon Nelson Date: Fri, 1 Apr 2016 03:56:09 -0700 Subject: [PATCH 0384/1649] i40e: Move NVM variable out of AQ struct The NVM update status info should stay collected together, not spread across different structs. Change-ID: Ic16f9e9fd79945d865bb7226184c889884585025 Signed-off-by: Shannon Nelson Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_adminq.c | 6 +++--- drivers/net/ethernet/intel/i40e/i40e_adminq.h | 1 - drivers/net/ethernet/intel/i40e/i40e_nvm.c | 12 ++++++------ drivers/net/ethernet/intel/i40e/i40e_type.h | 1 + drivers/net/ethernet/intel/i40evf/i40e_adminq.h | 1 - drivers/net/ethernet/intel/i40evf/i40e_type.h | 1 + 6 files changed, 11 insertions(+), 11 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c index df8e2fd6a649..e8278e1d6130 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c @@ -624,7 +624,7 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw) /* pre-emptive resource lock release */ i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL); - hw->aq.nvm_release_on_done = false; + hw->nvm_release_on_done = false; hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; ret_code = i40e_aq_set_hmc_resource_profile(hw, @@ -1024,9 +1024,9 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw, hw->aq.arq.next_to_use = ntu; if (i40e_is_nvm_update_op(&e->desc)) { - if (hw->aq.nvm_release_on_done) { + if (hw->nvm_release_on_done) { i40e_release_nvm(hw); - hw->aq.nvm_release_on_done = false; + hw->nvm_release_on_done = false; } switch (hw->nvmupd_state) { diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.h b/drivers/net/ethernet/intel/i40e/i40e_adminq.h index 12fbbddea299..d92aad38afdc 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.h @@ -97,7 +97,6 @@ struct i40e_adminq_info { u32 fw_build; /* firmware build number */ u16 api_maj_ver; /* api major version */ u16 api_min_ver; /* api minor version */ - bool nvm_release_on_done; struct mutex asq_mutex; /* Send queue lock */ struct mutex arq_mutex; /* Receive queue lock */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c index 5730f8091e1b..1ae29ac8c310 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c +++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c @@ -696,7 +696,7 @@ i40e_status i40e_nvmupd_command(struct i40e_hw *hw, i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d cmd 0x%08x config 0x%08x offset 0x%08x data_size 0x%08x\n", i40e_nvm_update_state_str[upd_cmd], hw->nvmupd_state, - hw->aq.nvm_release_on_done, + hw->nvm_release_on_done, cmd->command, cmd->config, cmd->offset, cmd->data_size); if (upd_cmd == I40E_NVMUPD_INVALID) { @@ -799,7 +799,7 @@ static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw, if (status) { i40e_release_nvm(hw); } else { - hw->aq.nvm_release_on_done = true; + hw->nvm_release_on_done = true; hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT; } } @@ -815,7 +815,7 @@ static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw, if (status) { i40e_release_nvm(hw); } else { - hw->aq.nvm_release_on_done = true; + hw->nvm_release_on_done = true; hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT; } } @@ -849,7 +849,7 @@ static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw, -EIO; i40e_release_nvm(hw); } else { - hw->aq.nvm_release_on_done = true; + hw->nvm_release_on_done = true; hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT; } } @@ -953,7 +953,7 @@ retry: -EIO; hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; } else { - hw->aq.nvm_release_on_done = true; + hw->nvm_release_on_done = true; hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT; } break; @@ -980,7 +980,7 @@ retry: -EIO; hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; } else { - hw->aq.nvm_release_on_done = true; + hw->nvm_release_on_done = true; hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT; } break; diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h index 3335f9d13374..bf693580f9c4 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_type.h +++ b/drivers/net/ethernet/intel/i40e/i40e_type.h @@ -549,6 +549,7 @@ struct i40e_hw { enum i40e_nvmupd_state nvmupd_state; struct i40e_aq_desc nvm_wb_desc; struct i40e_virt_mem nvm_buff; + bool nvm_release_on_done; /* HMC info */ struct i40e_hmc_info hmc; /* HMC info struct */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h index a3eae5d9a2bd..1f9b3b5d946d 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h @@ -97,7 +97,6 @@ struct i40e_adminq_info { u32 fw_build; /* firmware build number */ u16 api_maj_ver; /* api major version */ u16 api_min_ver; /* api minor version */ - bool nvm_release_on_done; struct mutex asq_mutex; /* Send queue lock */ struct mutex arq_mutex; /* Receive queue lock */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h index 301fe2b6dd03..d68e017079e3 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_type.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h @@ -522,6 +522,7 @@ struct i40e_hw { enum i40e_nvmupd_state nvmupd_state; struct i40e_aq_desc nvm_wb_desc; struct i40e_virt_mem nvm_buff; + bool nvm_release_on_done; /* HMC info */ struct i40e_hmc_info hmc; /* HMC info struct */ From 585954f8b808def857771037392c1621f167fa92 Mon Sep 17 00:00:00 2001 From: Mitch Williams Date: Fri, 1 Apr 2016 03:56:10 -0700 Subject: [PATCH 0385/1649] i40e: Add RSS configuration to virtual channel Add opcodes and structures to support RSS configuration by PF driver on behalf of the VF drivers. This reduces complexity in the VF driver and allows us to support future hardware designs without modifying the VF driver. Change-ID: I8c75765c630eacb71f95967f1109a198542593ac Signed-off-by: Mitch Williams Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- .../net/ethernet/intel/i40e/i40e_virtchnl.h | 45 +++++++++++++++++-- .../net/ethernet/intel/i40evf/i40e_virtchnl.h | 45 +++++++++++++++++-- 2 files changed, 84 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h index ab866cf3dc18..c92a3bdee229 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h @@ -80,10 +80,15 @@ enum i40e_virtchnl_ops { I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14, I40E_VIRTCHNL_OP_GET_STATS = 15, I40E_VIRTCHNL_OP_FCOE = 16, - I40E_VIRTCHNL_OP_EVENT = 17, + I40E_VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */ I40E_VIRTCHNL_OP_IWARP = 20, I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP = 21, I40E_VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP = 22, + I40E_VIRTCHNL_OP_CONFIG_RSS_KEY = 23, + I40E_VIRTCHNL_OP_CONFIG_RSS_LUT = 24, + I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25, + I40E_VIRTCHNL_OP_SET_RSS_HENA = 26, + }; /* Virtual channel message descriptor. This overlays the admin queue @@ -157,6 +162,7 @@ struct i40e_virtchnl_vsi_resource { #define I40E_VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000 #define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000 #define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000 +#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000 struct i40e_virtchnl_vf_resource { u16 num_vsis; @@ -165,8 +171,8 @@ struct i40e_virtchnl_vf_resource { u16 max_mtu; u32 vf_offload_flags; - u32 max_fcoe_contexts; - u32 max_fcoe_filters; + u32 rss_key_size; + u32 rss_lut_size; struct i40e_virtchnl_vsi_resource vsi_res[1]; }; @@ -325,6 +331,39 @@ struct i40e_virtchnl_promisc_info { * PF replies with struct i40e_eth_stats in an external buffer. */ +/* I40E_VIRTCHNL_OP_CONFIG_RSS_KEY + * I40E_VIRTCHNL_OP_CONFIG_RSS_LUT + * VF sends these messages to configure RSS. Only supported if both PF + * and VF drivers set the I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF bit during + * configuration negotiation. If this is the case, then the RSS fields in + * the VF resource struct are valid. + * Both the key and LUT are initialized to 0 by the PF, meaning that + * RSS is effectively disabled until set up by the VF. + */ +struct i40e_virtchnl_rss_key { + u16 vsi_id; + u16 key_len; + u8 key[1]; /* RSS hash key, packed bytes */ +}; + +struct i40e_virtchnl_rss_lut { + u16 vsi_id; + u16 lut_entries; + u8 lut[1]; /* RSS lookup table*/ +}; + +/* I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS + * I40E_VIRTCHNL_OP_SET_RSS_HENA + * VF sends these messages to get and set the hash filter enable bits for RSS. + * By default, the PF sets these to all possible traffic types that the + * hardware supports. The VF can query this value if it wants to change the + * traffic types that are hashed by the hardware. + * Traffic types are defined in the i40e_filter_pctype enum in i40e_type.h + */ +struct i40e_virtchnl_rss_hena { + u64 hena; +}; + /* I40E_VIRTCHNL_OP_EVENT * PF sends this message to inform the VF driver of events that may affect it. * No direct response is expected from the VF, though it may generate other diff --git a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h index 3b9d2037456c..f04ce6cb70dc 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h @@ -80,7 +80,12 @@ enum i40e_virtchnl_ops { I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14, I40E_VIRTCHNL_OP_GET_STATS = 15, I40E_VIRTCHNL_OP_FCOE = 16, - I40E_VIRTCHNL_OP_EVENT = 17, + I40E_VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */ + I40E_VIRTCHNL_OP_CONFIG_RSS_KEY = 23, + I40E_VIRTCHNL_OP_CONFIG_RSS_LUT = 24, + I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25, + I40E_VIRTCHNL_OP_SET_RSS_HENA = 26, + }; /* Virtual channel message descriptor. This overlays the admin queue @@ -154,6 +159,7 @@ struct i40e_virtchnl_vsi_resource { #define I40E_VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000 #define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000 #define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000 +#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000 struct i40e_virtchnl_vf_resource { u16 num_vsis; @@ -162,8 +168,8 @@ struct i40e_virtchnl_vf_resource { u16 max_mtu; u32 vf_offload_flags; - u32 max_fcoe_contexts; - u32 max_fcoe_filters; + u32 rss_key_size; + u32 rss_lut_size; struct i40e_virtchnl_vsi_resource vsi_res[1]; }; @@ -322,6 +328,39 @@ struct i40e_virtchnl_promisc_info { * PF replies with struct i40e_eth_stats in an external buffer. */ +/* I40E_VIRTCHNL_OP_CONFIG_RSS_KEY + * I40E_VIRTCHNL_OP_CONFIG_RSS_LUT + * VF sends these messages to configure RSS. Only supported if both PF + * and VF drivers set the I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF bit during + * configuration negotiation. If this is the case, then the RSS fields in + * the VF resource struct are valid. + * Both the key and LUT are initialized to 0 by the PF, meaning that + * RSS is effectively disabled until set up by the VF. + */ +struct i40e_virtchnl_rss_key { + u16 vsi_id; + u16 key_len; + u8 key[1]; /* RSS hash key, packed bytes */ +}; + +struct i40e_virtchnl_rss_lut { + u16 vsi_id; + u16 lut_entries; + u8 lut[1]; /* RSS lookup table*/ +}; + +/* I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS + * I40E_VIRTCHNL_OP_SET_RSS_HENA + * VF sends these messages to get and set the hash filter enable bits for RSS. + * By default, the PF sets these to all possible traffic types that the + * hardware supports. The VF can query this value if it wants to change the + * traffic types that are hashed by the hardware. + * Traffic types are defined in the i40e_filter_pctype enum in i40e_type.h + */ +struct i40e_virtchnl_rss_hena { + u64 hena; +}; + /* I40E_VIRTCHNL_OP_EVENT * PF sends this message to inform the VF driver of events that may affect it. * No direct response is expected from the VF, though it may generate other From bab2fb60dcdd0f9d8715749d056ddd6c465b1875 Mon Sep 17 00:00:00 2001 From: Shannon Nelson Date: Fri, 1 Apr 2016 03:56:11 -0700 Subject: [PATCH 0386/1649] i40e: Move NVM event wait check to NVM code The logic that checks AQ events for NVM done events is better kept in nvm.c with the rest of the nvmupdate handling code. Change-ID: I2ea58980df8ecaa3726b28a37bff3dfcb8df03dc Signed-off-by: Shannon Nelson Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_adminq.c | 31 +------------------ drivers/net/ethernet/intel/i40e/i40e_nvm.c | 31 +++++++++++++++++++ .../net/ethernet/intel/i40e/i40e_prototype.h | 1 + 3 files changed, 33 insertions(+), 30 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c index e8278e1d6130..43bb4139d896 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c @@ -32,16 +32,6 @@ static void i40e_resume_aq(struct i40e_hw *hw); -/** - * i40e_is_nvm_update_op - return true if this is an NVM update operation - * @desc: API request descriptor - **/ -static inline bool i40e_is_nvm_update_op(struct i40e_aq_desc *desc) -{ - return (desc->opcode == cpu_to_le16(i40e_aqc_opc_nvm_erase)) || - (desc->opcode == cpu_to_le16(i40e_aqc_opc_nvm_update)); -} - /** * i40e_adminq_init_regs - Initialize AdminQ registers * @hw: pointer to the hardware structure @@ -1023,26 +1013,7 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw, hw->aq.arq.next_to_clean = ntc; hw->aq.arq.next_to_use = ntu; - if (i40e_is_nvm_update_op(&e->desc)) { - if (hw->nvm_release_on_done) { - i40e_release_nvm(hw); - hw->nvm_release_on_done = false; - } - - switch (hw->nvmupd_state) { - case I40E_NVMUPD_STATE_INIT_WAIT: - hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; - break; - - case I40E_NVMUPD_STATE_WRITE_WAIT: - hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING; - break; - - default: - break; - } - } - + i40e_nvmupd_check_wait_event(hw, le16_to_cpu(e->desc.opcode)); clean_arq_element_out: /* Set pending if needed, unlock and return */ if (pending) diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c index 1ae29ac8c310..f2cea3d25de3 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c +++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c @@ -1029,6 +1029,37 @@ retry: return status; } +/** + * i40e_nvmupd_check_wait_event - handle NVM update operation events + * @hw: pointer to the hardware structure + * @opcode: the event that just happened + **/ +void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode) +{ + if (opcode == i40e_aqc_opc_nvm_erase || + opcode == i40e_aqc_opc_nvm_update) { + i40e_debug(hw, I40E_DEBUG_NVM, + "NVMUPD: clearing wait on opcode 0x%04x\n", opcode); + if (hw->nvm_release_on_done) { + i40e_release_nvm(hw); + hw->nvm_release_on_done = false; + } + + switch (hw->nvmupd_state) { + case I40E_NVMUPD_STATE_INIT_WAIT: + hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; + break; + + case I40E_NVMUPD_STATE_WRITE_WAIT: + hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING; + break; + + default: + break; + } + } +} + /** * i40e_nvmupd_validate_command - Validate given command * @hw: pointer to hardware structure diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h index d51eee5bf79a..134035f53f2c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h +++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h @@ -308,6 +308,7 @@ i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw, i40e_status i40e_nvmupd_command(struct i40e_hw *hw, struct i40e_nvm_access *cmd, u8 *bytes, int *); +void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode); void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status); extern struct i40e_rx_ptype_decoded i40e_ptype_lookup[]; From 17a035be959ef0316ec86adb0c82ed3f057a853b Mon Sep 17 00:00:00 2001 From: Kiran Patil Date: Mon, 4 Apr 2016 07:01:10 -0700 Subject: [PATCH 0387/1649] i40e: Input set mask constants for RSS, flow director, and flex bytes Add defines for input set mask (RSS, flow director, flexible payload), including defines specific to IPv6. Change-ID: Ie95ef7d0916a4d6ca011c194283f959774c8dce9 Signed-off-by: Kiran Patil Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_type.h | 33 +++++++++++++++ drivers/net/ethernet/intel/i40evf/i40e_type.h | 42 +++++++++++++++++++ 2 files changed, 75 insertions(+) diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h index bf693580f9c4..793036b259e5 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_type.h +++ b/drivers/net/ethernet/intel/i40e/i40e_type.h @@ -1534,4 +1534,37 @@ struct i40e_lldp_variables { /* RSS Hash Table Size */ #define I40E_PFQF_CTL_0_HASHLUTSIZE_512 0x00010000 + +/* INPUT SET MASK for RSS, flow director, and flexible payload */ +#define I40E_L3_SRC_SHIFT 47 +#define I40E_L3_SRC_MASK (0x3ULL << I40E_L3_SRC_SHIFT) +#define I40E_L3_V6_SRC_SHIFT 43 +#define I40E_L3_V6_SRC_MASK (0xFFULL << I40E_L3_V6_SRC_SHIFT) +#define I40E_L3_DST_SHIFT 35 +#define I40E_L3_DST_MASK (0x3ULL << I40E_L3_DST_SHIFT) +#define I40E_L3_V6_DST_SHIFT 35 +#define I40E_L3_V6_DST_MASK (0xFFULL << I40E_L3_V6_DST_SHIFT) +#define I40E_L4_SRC_SHIFT 34 +#define I40E_L4_SRC_MASK (0x1ULL << I40E_L4_SRC_SHIFT) +#define I40E_L4_DST_SHIFT 33 +#define I40E_L4_DST_MASK (0x1ULL << I40E_L4_DST_SHIFT) +#define I40E_VERIFY_TAG_SHIFT 31 +#define I40E_VERIFY_TAG_MASK (0x3ULL << I40E_VERIFY_TAG_SHIFT) + +#define I40E_FLEX_50_SHIFT 13 +#define I40E_FLEX_50_MASK (0x1ULL << I40E_FLEX_50_SHIFT) +#define I40E_FLEX_51_SHIFT 12 +#define I40E_FLEX_51_MASK (0x1ULL << I40E_FLEX_51_SHIFT) +#define I40E_FLEX_52_SHIFT 11 +#define I40E_FLEX_52_MASK (0x1ULL << I40E_FLEX_52_SHIFT) +#define I40E_FLEX_53_SHIFT 10 +#define I40E_FLEX_53_MASK (0x1ULL << I40E_FLEX_53_SHIFT) +#define I40E_FLEX_54_SHIFT 9 +#define I40E_FLEX_54_MASK (0x1ULL << I40E_FLEX_54_SHIFT) +#define I40E_FLEX_55_SHIFT 8 +#define I40E_FLEX_55_MASK (0x1ULL << I40E_FLEX_55_SHIFT) +#define I40E_FLEX_56_SHIFT 7 +#define I40E_FLEX_56_MASK (0x1ULL << I40E_FLEX_56_SHIFT) +#define I40E_FLEX_57_SHIFT 6 +#define I40E_FLEX_57_MASK (0x1ULL << I40E_FLEX_57_SHIFT) #endif /* _I40E_TYPE_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h index d68e017079e3..4a78c18e0b7b 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_type.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h @@ -1330,4 +1330,46 @@ enum i40e_reset_type { /* RSS Hash Table Size */ #define I40E_PFQF_CTL_0_HASHLUTSIZE_512 0x00010000 + +/* INPUT SET MASK for RSS, flow director and flexible payload */ +#define I40E_FD_INSET_L3_SRC_SHIFT 47 +#define I40E_FD_INSET_L3_SRC_WORD_MASK (0x3ULL << \ + I40E_FD_INSET_L3_SRC_SHIFT) +#define I40E_FD_INSET_L3_DST_SHIFT 35 +#define I40E_FD_INSET_L3_DST_WORD_MASK (0x3ULL << \ + I40E_FD_INSET_L3_DST_SHIFT) +#define I40E_FD_INSET_L4_SRC_SHIFT 34 +#define I40E_FD_INSET_L4_SRC_WORD_MASK (0x1ULL << \ + I40E_FD_INSET_L4_SRC_SHIFT) +#define I40E_FD_INSET_L4_DST_SHIFT 33 +#define I40E_FD_INSET_L4_DST_WORD_MASK (0x1ULL << \ + I40E_FD_INSET_L4_DST_SHIFT) +#define I40E_FD_INSET_VERIFY_TAG_SHIFT 31 +#define I40E_FD_INSET_VERIFY_TAG_WORD_MASK (0x3ULL << \ + I40E_FD_INSET_VERIFY_TAG_SHIFT) + +#define I40E_FD_INSET_FLEX_WORD50_SHIFT 17 +#define I40E_FD_INSET_FLEX_WORD50_MASK (0x1ULL << \ + I40E_FD_INSET_FLEX_WORD50_SHIFT) +#define I40E_FD_INSET_FLEX_WORD51_SHIFT 16 +#define I40E_FD_INSET_FLEX_WORD51_MASK (0x1ULL << \ + I40E_FD_INSET_FLEX_WORD51_SHIFT) +#define I40E_FD_INSET_FLEX_WORD52_SHIFT 15 +#define I40E_FD_INSET_FLEX_WORD52_MASK (0x1ULL << \ + I40E_FD_INSET_FLEX_WORD52_SHIFT) +#define I40E_FD_INSET_FLEX_WORD53_SHIFT 14 +#define I40E_FD_INSET_FLEX_WORD53_MASK (0x1ULL << \ + I40E_FD_INSET_FLEX_WORD53_SHIFT) +#define I40E_FD_INSET_FLEX_WORD54_SHIFT 13 +#define I40E_FD_INSET_FLEX_WORD54_MASK (0x1ULL << \ + I40E_FD_INSET_FLEX_WORD54_SHIFT) +#define I40E_FD_INSET_FLEX_WORD55_SHIFT 12 +#define I40E_FD_INSET_FLEX_WORD55_MASK (0x1ULL << \ + I40E_FD_INSET_FLEX_WORD55_SHIFT) +#define I40E_FD_INSET_FLEX_WORD56_SHIFT 11 +#define I40E_FD_INSET_FLEX_WORD56_MASK (0x1ULL << \ + I40E_FD_INSET_FLEX_WORD56_SHIFT) +#define I40E_FD_INSET_FLEX_WORD57_SHIFT 10 +#define I40E_FD_INSET_FLEX_WORD57_MASK (0x1ULL << \ + I40E_FD_INSET_FLEX_WORD57_SHIFT) #endif /* _I40E_TYPE_H_ */ From 47c46778e1905721433a413b2522a8e2b3d6c354 Mon Sep 17 00:00:00 2001 From: Harshitha Ramamurthy Date: Fri, 1 Apr 2016 03:56:13 -0700 Subject: [PATCH 0388/1649] i40e/i40evf: Bump patch from 1.5.2 to 1.5.5 Signed-off-by: Harshitha Ramamurthy Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_main.c | 2 +- drivers/net/ethernet/intel/i40evf/i40evf_main.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 627acf0c5fea..dc3b3939dd0a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -46,7 +46,7 @@ static const char i40e_driver_string[] = #define DRV_VERSION_MAJOR 1 #define DRV_VERSION_MINOR 5 -#define DRV_VERSION_BUILD 2 +#define DRV_VERSION_BUILD 5 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ __stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_BUILD) DRV_KERN diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index f4dada02bbcf..4659ac2cf035 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -38,7 +38,7 @@ static const char i40evf_driver_string[] = #define DRV_VERSION_MAJOR 1 #define DRV_VERSION_MINOR 5 -#define DRV_VERSION_BUILD 2 +#define DRV_VERSION_BUILD 5 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ __stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_BUILD) \ From ba6cc7f6f194e3645368f87d951bedd7e3b75f39 Mon Sep 17 00:00:00 2001 From: Mitch Williams Date: Fri, 1 Apr 2016 13:34:31 -0700 Subject: [PATCH 0389/1649] i40evf: properly handle VLAN features Correctly set the VLAN feature flags after setting the rest of the netdev flags. And don't set them in hw_features, because these can't be controlled by the VF driver. Signed-off-by: Mitch Williams Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- .../net/ethernet/intel/i40evf/i40evf_main.c | 27 +++++++++---------- 1 file changed, 12 insertions(+), 15 deletions(-) diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index 4659ac2cf035..9110319a8f00 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -2323,29 +2323,20 @@ static int i40evf_check_reset_complete(struct i40e_hw *hw) **/ int i40evf_process_config(struct i40evf_adapter *adapter) { + struct i40e_virtchnl_vf_resource *vfres = adapter->vf_res; struct net_device *netdev = adapter->netdev; int i; /* got VF config message back from PF, now we can parse it */ - for (i = 0; i < adapter->vf_res->num_vsis; i++) { - if (adapter->vf_res->vsi_res[i].vsi_type == I40E_VSI_SRIOV) - adapter->vsi_res = &adapter->vf_res->vsi_res[i]; + for (i = 0; i < vfres->num_vsis; i++) { + if (vfres->vsi_res[i].vsi_type == I40E_VSI_SRIOV) + adapter->vsi_res = &vfres->vsi_res[i]; } if (!adapter->vsi_res) { dev_err(&adapter->pdev->dev, "No LAN VSI found\n"); return -ENODEV; } - if (adapter->vf_res->vf_offload_flags - & I40E_VIRTCHNL_VF_OFFLOAD_VLAN) { - netdev->vlan_features = netdev->features & - ~(NETIF_F_HW_VLAN_CTAG_TX | - NETIF_F_HW_VLAN_CTAG_RX | - NETIF_F_HW_VLAN_CTAG_FILTER); - netdev->features |= NETIF_F_HW_VLAN_CTAG_TX | - NETIF_F_HW_VLAN_CTAG_RX | - NETIF_F_HW_VLAN_CTAG_FILTER; - } netdev->features |= NETIF_F_HIGHDMA | NETIF_F_SG | NETIF_F_IP_CSUM | @@ -2354,7 +2345,7 @@ int i40evf_process_config(struct i40evf_adapter *adapter) NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN | - NETIF_F_GSO_GRE | + NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL | NETIF_F_RXCSUM | NETIF_F_GRO; @@ -2371,9 +2362,15 @@ int i40evf_process_config(struct i40evf_adapter *adapter) if (adapter->flags & I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE) netdev->features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; + /* always clear VLAN features because they can change at every reset */ + netdev->features &= ~(I40EVF_VLAN_FEATURES); /* copy netdev features into list of user selectable features */ netdev->hw_features |= netdev->features; - netdev->hw_features &= ~NETIF_F_RXCSUM; + + if (vfres->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_VLAN) { + netdev->vlan_features = netdev->features; + netdev->features |= I40EVF_VLAN_FEATURES; + } adapter->vsi.id = adapter->vsi_res->vsi_id; From 5bd0c0202aca1003a244b13792c09b40f73eadc0 Mon Sep 17 00:00:00 2001 From: Jiri Benc Date: Tue, 5 Apr 2016 16:25:07 +0200 Subject: [PATCH 0390/1649] net: intel: remove dead links The Kconfig for Intel NICs references two different URLs for the "Adapter & Driver ID Guide". Neither of those two links works. The current URL seems to be http://www.intel.com/content/www/us/en/support/network-and-i-o/ethernet-products/000005584.html but given it's apparently constantly changing, there's no point in having it in the help text. Just keep a generic pointer to http://support.intel.com. Hopefully, this one will have a longer live. It still works, at least. Furthermore, remove a link to "the latest Intel PRO/100 network driver for Linux", this has no place in the mainline kernel and the latest Linux driver it offers is from 2006, anyway. Signed-off-by: Jiri Benc Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/Kconfig | 80 ++++++------------------------ 1 file changed, 14 insertions(+), 66 deletions(-) diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig index 3772f3ac956e..714bd1014ddb 100644 --- a/drivers/net/ethernet/intel/Kconfig +++ b/drivers/net/ethernet/intel/Kconfig @@ -25,16 +25,13 @@ config E100 on the adapter. Look for a label that has a barcode and a number in the format 123456-001 (six digits hyphen three digits). - Use the above information and the Adapter & Driver ID Guide at: + Use the above information and the Adapter & Driver ID Guide that + can be located at: - + to identify the adapter. - For the latest Intel PRO/100 network driver for Linux, see: - - - More specific information on configuring the driver is in . @@ -47,12 +44,7 @@ config E1000 ---help--- This driver supports Intel(R) PRO/1000 gigabit ethernet family of adapters. For more information on how to identify your adapter, go - to the Adapter & Driver ID Guide at: - - - - For general information and support, go to the Intel support - website at: + to the Adapter & Driver ID Guide that can be located at: @@ -71,12 +63,8 @@ config E1000E This driver supports the PCI-Express Intel(R) PRO/1000 gigabit ethernet family of adapters. For PCI or PCI-X e1000 adapters, use the regular e1000 driver For more information on how to - identify your adapter, go to the Adapter & Driver ID Guide at: - - - - For general information and support, go to the Intel support - website at: + identify your adapter, go to the Adapter & Driver ID Guide that + can be located at: @@ -101,12 +89,7 @@ config IGB ---help--- This driver supports Intel(R) 82575/82576 gigabit ethernet family of adapters. For more information on how to identify your adapter, go - to the Adapter & Driver ID Guide at: - - - - For general information and support, go to the Intel support - website at: + to the Adapter & Driver ID Guide that can be located at: @@ -142,12 +125,7 @@ config IGBVF ---help--- This driver supports Intel(R) 82576 virtual functions. For more information on how to identify your adapter, go to the Adapter & - Driver ID Guide at: - - - - For general information and support, go to the Intel support - website at: + Driver ID Guide that can be located at: @@ -164,12 +142,7 @@ config IXGB This driver supports Intel(R) PRO/10GbE family of adapters for PCI-X type cards. For PCI-E type cards, use the "ixgbe" driver instead. For more information on how to identify your adapter, go - to the Adapter & Driver ID Guide at: - - - - For general information and support, go to the Intel support - website at: + to the Adapter & Driver ID Guide that can be located at: @@ -187,12 +160,7 @@ config IXGBE ---help--- This driver supports Intel(R) 10GbE PCI Express family of adapters. For more information on how to identify your adapter, go - to the Adapter & Driver ID Guide at: - - - - For general information and support, go to the Intel support - website at: + to the Adapter & Driver ID Guide that can be located at: @@ -243,12 +211,7 @@ config IXGBEVF ---help--- This driver supports Intel(R) PCI Express virtual functions for the Intel(R) ixgbe driver. For more information on how to identify your - adapter, go to the Adapter & Driver ID Guide at: - - - - For general information and support, go to the Intel support - website at: + adapter, go to the Adapter & Driver ID Guide that can be located at: @@ -266,12 +229,7 @@ config I40E ---help--- This driver supports Intel(R) Ethernet Controller XL710 Family of devices. For more information on how to identify your adapter, go - to the Adapter & Driver ID Guide at: - - - - For general information and support, go to the Intel support - website at: + to the Adapter & Driver ID Guide that can be located at: @@ -326,12 +284,7 @@ config I40EVF ---help--- This driver supports Intel(R) XL710 and X710 virtual functions. For more information on how to identify your adapter, go to the - Adapter & Driver ID Guide at: - - - - For general information and support, go to the Intel support - website at: + Adapter & Driver ID Guide that can be located at: @@ -347,12 +300,7 @@ config FM10K ---help--- This driver supports Intel(R) FM10000 Ethernet Switch Host Interface. For more information on how to identify your adapter, - go to the Adapter & Driver ID Guide at: - - - - For general information and support, go to the Intel support - website at: + go to the Adapter & Driver ID Guide that can be located at: From d99e366fc90c9b6e6197584ecd3a185441452b0c Mon Sep 17 00:00:00 2001 From: Arika Chen Date: Wed, 6 Apr 2016 21:02:11 -0700 Subject: [PATCH 0391/1649] Revert "igb: Fix a deadlock in igb_sriov_reinit" This reverts commit 3eb14ea8d958 ("igb: Fix a deadlock in igb_sriov_reinit") It is the same as commit f468adc944ef ("igb: missing rtnl_unlock in igb_sriov_reinit()") There is no rtnl_lock() in igb_resume before, rtnl_unlock will cause a deadlock. Signed-off-by: Arika Chen Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igb/igb_main.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index ff0476c89438..8e96c35307fb 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -7579,7 +7579,6 @@ static int igb_resume(struct device *dev) if (igb_init_interrupt_scheme(adapter, true)) { dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); - rtnl_unlock(); return -ENOMEM; } From b131129d96575479e2447d134cb1797cf430b3a4 Mon Sep 17 00:00:00 2001 From: Rajkumar Manoharan Date: Thu, 7 Apr 2016 12:07:29 +0530 Subject: [PATCH 0392/1649] ath10k: fix calibration init sequence of qca99x0 pre-calibration is meant for qca4019 which contains only caldata whereas calibration file is used by ar9888 and qca99x0 that contains both board data and caldata. So by definition both pre-cal-file and cal-file can not coexist. Keeping them in shared memory (union), is breaking boot sequence of qca99x0. Fix it by storing both binaries in separate memories. This issue is reported in ipq8064 platform which includes caldata in flash memory. Fixes: 3d9195ea19e4 ("ath10k: incorporate qca4019 cal data download sequence") Reported-by: Sebastian Gottschall Signed-off-by: Rajkumar Manoharan Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/core.h | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h index c23c37312ef7..d85b99164212 100644 --- a/drivers/net/wireless/ath/ath10k/core.h +++ b/drivers/net/wireless/ath/ath10k/core.h @@ -728,10 +728,8 @@ struct ath10k { const void *firmware_data; size_t firmware_len; - union { - const struct firmware *pre_cal_file; - const struct firmware *cal_file; - }; + const struct firmware *pre_cal_file; + const struct firmware *cal_file; struct { const void *firmware_codeswap_data; From dd7c280f9bf5ee6c7c46f03b2064f9f8fb617183 Mon Sep 17 00:00:00 2001 From: Rajkumar Manoharan Date: Thu, 7 Apr 2016 12:07:30 +0530 Subject: [PATCH 0393/1649] ath10k: remove unnecessary warning for probe response drops qca99x0 and qca4019 solutions limit probe responses transmissions. Logging warning message for each probe response drop is flooding kernel log unnecessary with " failed to increase tx mgmt pending count: -16, dropping". Hence reducing log level to debug. Reported-by: Sebastian Gottschall Signed-off-by: Rajkumar Manoharan Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/mac.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index 20d72e29dfa1..b0e613bc10a5 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -3994,8 +3994,8 @@ static void ath10k_mac_op_tx(struct ieee80211_hw *hw, ret = ath10k_htt_tx_mgmt_inc_pending(htt, is_mgmt, is_presp); if (ret) { - ath10k_warn(ar, "failed to increase tx mgmt pending count: %d, dropping\n", - ret); + ath10k_dbg(ar, ATH10K_DBG_MAC, "failed to increase tx mgmt pending count: %d, dropping\n", + ret); ath10k_htt_tx_dec_pending(htt); spin_unlock_bh(&ar->htt.tx_lock); ieee80211_free_txskb(ar->hw, skb); From 689de38e37179c6f524dd003e1dae92042f8f5cd Mon Sep 17 00:00:00 2001 From: Rajkumar Manoharan Date: Thu, 7 Apr 2016 12:07:31 +0530 Subject: [PATCH 0394/1649] ath10k: fix unconditional num_mpdus_ready subtraction Decrement num_mpdus_ready only when rx amsdu is processed successfully. Not doing so, will result in leak and impact stabilty under low memory cases. Also commit 3128b3d8a2b9 ("ath10k: speedup htt rx descriptor processing for rx_ind") missed to removed unused skb list rx_q. Fixes: 3128b3d8a2b9 ("ath10k: speedup htt rx descriptor processing for rx_ind") Signed-off-by: Rajkumar Manoharan Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/htt_rx.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c index 592421ec5635..6a2d2643de42 100644 --- a/drivers/net/wireless/ath/ath10k/htt_rx.c +++ b/drivers/net/wireless/ath/ath10k/htt_rx.c @@ -2412,14 +2412,12 @@ static void ath10k_htt_txrx_compl_task(unsigned long ptr) struct ath10k_htt *htt = (struct ath10k_htt *)ptr; struct ath10k *ar = htt->ar; struct htt_tx_done tx_done = {}; - struct sk_buff_head rx_q; struct sk_buff_head rx_ind_q; struct sk_buff_head tx_ind_q; struct sk_buff *skb; unsigned long flags; int num_mpdus; - __skb_queue_head_init(&rx_q); __skb_queue_head_init(&rx_ind_q); __skb_queue_head_init(&tx_ind_q); @@ -2447,11 +2445,13 @@ static void ath10k_htt_txrx_compl_task(unsigned long ptr) ath10k_mac_tx_push_pending(ar); num_mpdus = atomic_read(&htt->num_mpdus_ready); - atomic_sub(num_mpdus, &htt->num_mpdus_ready); - while (num_mpdus--) { + while (num_mpdus) { if (ath10k_htt_rx_handle_amsdu(htt)) break; + + num_mpdus--; + atomic_dec(&htt->num_mpdus_ready); } while ((skb = __skb_dequeue(&rx_ind_q))) { From 8501786929de4616b10b8059ad97abd304a7dddf Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 6 Apr 2016 22:07:34 -0700 Subject: [PATCH 0395/1649] tcp/dccp: fix inet_reuseport_add_sock() David Ahern reported panics in __inet_hash() caused by my recent commit. The reason is inet_reuseport_add_sock() was still using sk_nulls_for_each_rcu() instead of sk_for_each_rcu(). SO_REUSEPORT enabled listeners were causing an instant crash. While chasing this bug, I found that I forgot to clear SOCK_RCU_FREE flag, as it is inherited from the parent at clone time. Fixes: 3b24d854cb35 ("tcp/dccp: do not touch listener sk_refcnt under synflood") Signed-off-by: Eric Dumazet Reported-by: David Ahern Tested-by: David Ahern Signed-off-by: David S. Miller --- net/ipv4/inet_connection_sock.c | 3 +++ net/ipv4/inet_hashtables.c | 3 +-- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index bc5196ea1bdf..ab69da2d2a77 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -661,6 +661,9 @@ struct sock *inet_csk_clone_lock(const struct sock *sk, inet_sk(newsk)->inet_sport = htons(inet_rsk(req)->ir_num); newsk->sk_write_space = sk_stream_write_space; + /* listeners have SOCK_RCU_FREE, not the children */ + sock_reset_flag(newsk, SOCK_RCU_FREE); + newsk->sk_mark = inet_rsk(req)->ir_mark; atomic64_set(&newsk->sk_cookie, atomic64_read(&inet_rsk(req)->ir_cookie)); diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c index 98ba03b6f87d..fcadb670f50b 100644 --- a/net/ipv4/inet_hashtables.c +++ b/net/ipv4/inet_hashtables.c @@ -439,10 +439,9 @@ static int inet_reuseport_add_sock(struct sock *sk, bool match_wildcard)) { struct sock *sk2; - struct hlist_nulls_node *node; kuid_t uid = sock_i_uid(sk); - sk_nulls_for_each_rcu(sk2, node, &ilb->head) { + sk_for_each_rcu(sk2, &ilb->head) { if (sk2 != sk && sk2->sk_family == sk->sk_family && ipv6_only_sock(sk2) == ipv6_only_sock(sk) && From 0fef3c768037169f656fc4ae89cf88ff7175e586 Mon Sep 17 00:00:00 2001 From: Ivan Safonov Date: Fri, 18 Mar 2016 13:16:26 +1100 Subject: [PATCH 0396/1649] ath9k: Remove unnecessary ?: operator "(thermometer < 0) ? 0 : (thermometer == X)" is equivalent to "thermometer == X" for X >= 0. Signed-off-by: Ivan Safonov [Updated commit message] Signed-off-by: Julian Calaby Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath9k/ar9003_eeprom.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c index 54ed2f72d35e..a049f8d34f99 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c @@ -4097,16 +4097,16 @@ static void ar9003_hw_thermometer_apply(struct ath_hw *ah) REG_RMW_FIELD(ah, AR_PHY_65NM_CH2_RXTX4, AR_PHY_65NM_CH0_RXTX4_THERM_ON_OVR, therm_on); - therm_on = (thermometer < 0) ? 0 : (thermometer == 0); + therm_on = thermometer == 0; REG_RMW_FIELD(ah, AR_PHY_65NM_CH0_RXTX4, AR_PHY_65NM_CH0_RXTX4_THERM_ON, therm_on); if (pCap->chip_chainmask & BIT(1)) { - therm_on = (thermometer < 0) ? 0 : (thermometer == 1); + therm_on = thermometer == 1; REG_RMW_FIELD(ah, AR_PHY_65NM_CH1_RXTX4, AR_PHY_65NM_CH0_RXTX4_THERM_ON, therm_on); } if (pCap->chip_chainmask & BIT(2)) { - therm_on = (thermometer < 0) ? 0 : (thermometer == 2); + therm_on = thermometer == 2; REG_RMW_FIELD(ah, AR_PHY_65NM_CH2_RXTX4, AR_PHY_65NM_CH0_RXTX4_THERM_ON, therm_on); } From ea544aab42dbf35c7b8e80f931db400f4b5add60 Mon Sep 17 00:00:00 2001 From: Geliang Tang Date: Fri, 18 Mar 2016 13:20:59 +1100 Subject: [PATCH 0397/1649] ipw2x00: use to_pci_dev() Use to_pci_dev() instead of open-coding it. Signed-off-by: Geliang Tang Signed-off-by: Julian Calaby Acked-by: Stanislav Yakovlev Signed-off-by: Kalle Valo --- drivers/net/wireless/intel/ipw2x00/ipw2100.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2100.c b/drivers/net/wireless/intel/ipw2x00/ipw2100.c index f93a7f71c047..717320b17622 100644 --- a/drivers/net/wireless/intel/ipw2x00/ipw2100.c +++ b/drivers/net/wireless/intel/ipw2x00/ipw2100.c @@ -3521,7 +3521,7 @@ static void ipw2100_msg_free(struct ipw2100_priv *priv) static ssize_t show_pci(struct device *d, struct device_attribute *attr, char *buf) { - struct pci_dev *pci_dev = container_of(d, struct pci_dev, dev); + struct pci_dev *pci_dev = to_pci_dev(d); char *out = buf; int i, j; u32 val; From 61383412f00d5917a28f388c59ebd78cf7c9d909 Mon Sep 17 00:00:00 2001 From: Geliang Tang Date: Fri, 18 Mar 2016 13:21:28 +1100 Subject: [PATCH 0398/1649] wlcore: use to_delayed_work() Use to_delayed_work() instead of open-coding it. Signed-off-by: Geliang Tang [Update commit message] Signed-off-by: Julian Calaby Signed-off-by: Kalle Valo --- drivers/net/wireless/ti/wlcore/main.c | 10 +++++----- drivers/net/wireless/ti/wlcore/ps.c | 2 +- drivers/net/wireless/ti/wlcore/scan.c | 2 +- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c index dde36203ca42..a872a07a484c 100644 --- a/drivers/net/wireless/ti/wlcore/main.c +++ b/drivers/net/wireless/ti/wlcore/main.c @@ -243,7 +243,7 @@ static void wl12xx_tx_watchdog_work(struct work_struct *work) struct delayed_work *dwork; struct wl1271 *wl; - dwork = container_of(work, struct delayed_work, work); + dwork = to_delayed_work(work); wl = container_of(dwork, struct wl1271, tx_watchdog_work); mutex_lock(&wl->mutex); @@ -2011,7 +2011,7 @@ static void wlcore_channel_switch_work(struct work_struct *work) struct wl12xx_vif *wlvif; int ret; - dwork = container_of(work, struct delayed_work, work); + dwork = to_delayed_work(work); wlvif = container_of(dwork, struct wl12xx_vif, channel_switch_work); wl = wlvif->wl; @@ -2047,7 +2047,7 @@ static void wlcore_connection_loss_work(struct work_struct *work) struct ieee80211_vif *vif; struct wl12xx_vif *wlvif; - dwork = container_of(work, struct delayed_work, work); + dwork = to_delayed_work(work); wlvif = container_of(dwork, struct wl12xx_vif, connection_loss_work); wl = wlvif->wl; @@ -2076,7 +2076,7 @@ static void wlcore_pending_auth_complete_work(struct work_struct *work) unsigned long time_spare; int ret; - dwork = container_of(work, struct delayed_work, work); + dwork = to_delayed_work(work); wlvif = container_of(dwork, struct wl12xx_vif, pending_auth_complete_work); wl = wlvif->wl; @@ -5588,7 +5588,7 @@ static void wlcore_roc_complete_work(struct work_struct *work) struct wl1271 *wl; int ret; - dwork = container_of(work, struct delayed_work, work); + dwork = to_delayed_work(work); wl = container_of(dwork, struct wl1271, roc_complete_work); ret = wlcore_roc_completed(wl); diff --git a/drivers/net/wireless/ti/wlcore/ps.c b/drivers/net/wireless/ti/wlcore/ps.c index 4cd316e61466..d4420da637d8 100644 --- a/drivers/net/wireless/ti/wlcore/ps.c +++ b/drivers/net/wireless/ti/wlcore/ps.c @@ -38,7 +38,7 @@ void wl1271_elp_work(struct work_struct *work) struct wl12xx_vif *wlvif; int ret; - dwork = container_of(work, struct delayed_work, work); + dwork = to_delayed_work(work); wl = container_of(dwork, struct wl1271, elp_work); wl1271_debug(DEBUG_PSM, "elp work"); diff --git a/drivers/net/wireless/ti/wlcore/scan.c b/drivers/net/wireless/ti/wlcore/scan.c index 1e3d51cd673a..a384f3f83099 100644 --- a/drivers/net/wireless/ti/wlcore/scan.c +++ b/drivers/net/wireless/ti/wlcore/scan.c @@ -38,7 +38,7 @@ void wl1271_scan_complete_work(struct work_struct *work) struct wl12xx_vif *wlvif; int ret; - dwork = container_of(work, struct delayed_work, work); + dwork = to_delayed_work(work); wl = container_of(dwork, struct wl1271, scan_complete_work); wl1271_debug(DEBUG_SCAN, "Scanning complete"); From d1162f0283f0b4421d1098dd048a5e3cf8b2abb6 Mon Sep 17 00:00:00 2001 From: Geliang Tang Date: Fri, 18 Mar 2016 13:22:03 +1100 Subject: [PATCH 0399/1649] wl1251: use to_delayed_work() Use to_delayed_work() instead of open-coding it. Signed-off-by: Geliang Tang [Update commit message] Signed-off-by: Julian Calaby Signed-off-by: Kalle Valo --- drivers/net/wireless/ti/wl1251/ps.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/ti/wl1251/ps.c b/drivers/net/wireless/ti/wl1251/ps.c index b9e27b98bbc9..fa01b0a0f312 100644 --- a/drivers/net/wireless/ti/wl1251/ps.c +++ b/drivers/net/wireless/ti/wl1251/ps.c @@ -32,7 +32,7 @@ void wl1251_elp_work(struct work_struct *work) struct delayed_work *dwork; struct wl1251 *wl; - dwork = container_of(work, struct delayed_work, work); + dwork = to_delayed_work(work); wl = container_of(dwork, struct wl1251, elp_work); wl1251_debug(DEBUG_PSM, "elp work"); From 4679f41322012cf69b1035cde3de81151d2aefec Mon Sep 17 00:00:00 2001 From: Geliang Tang Date: Fri, 18 Mar 2016 13:22:24 +1100 Subject: [PATCH 0400/1649] rtlwifi: use to_delayed_work() Use to_delayed_work() instead of open-coding it. Signed-off-by: Geliang Tang [Update commit message] Signed-off-by: Julian Calaby Acked-by: Larry Finger Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtlwifi/wifi.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h index 93bd7fcd2b61..389dc47776c0 100644 --- a/drivers/net/wireless/realtek/rtlwifi/wifi.h +++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h @@ -2870,7 +2870,7 @@ value to host byte ordering.*/ (ppsc->cur_ps_level |= _ps_flg) #define container_of_dwork_rtl(x, y, z) \ - container_of(container_of(x, struct delayed_work, work), y, z) + container_of(to_delayed_work(x), y, z) #define FILL_OCTET_STRING(_os, _octet, _len) \ (_os).octet = (u8 *)(_octet); \ From cfbfbd13695c8f9a93b1ad3edeeedacbb86dbe5c Mon Sep 17 00:00:00 2001 From: Markus Elfring Date: Fri, 18 Mar 2016 13:22:52 +1100 Subject: [PATCH 0401/1649] ath9k_htc: Delete unnecessary variable initialisation In ath9k_hif_usb_rx_stream(), i is initialised in the for loop it's used in. Signed-off-by: Markus Elfring Reviewed-by: Oleksij Rempel [Rewrote commit message] Signed-off-by: Julian Calaby Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath9k/hif_usb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c index 8cbf4904db7b..e1c338cb9cb5 100644 --- a/drivers/net/wireless/ath/ath9k/hif_usb.c +++ b/drivers/net/wireless/ath/ath9k/hif_usb.c @@ -527,7 +527,7 @@ static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev, struct sk_buff *skb) { struct sk_buff *nskb, *skb_pool[MAX_PKT_NUM_IN_TRANSFER]; - int index = 0, i = 0, len = skb->len; + int index = 0, i, len = skb->len; int rx_remain_len, rx_pkt_len; u16 pool_index = 0; u8 *ptr; From 9e12904a953c46abc87b0ea157be8de90205b70d Mon Sep 17 00:00:00 2001 From: Markus Elfring Date: Fri, 18 Mar 2016 13:23:24 +1100 Subject: [PATCH 0402/1649] brcmfmac: Delete unnecessary variable initialisation In brcmf_sdio_download_firmware(), bcmerror is set by the call to brcmf_sdio_download_code_file(), before it's checked in the following line. Signed-off-by: Markus Elfring Acked-by: Arend van Spriel [Rewrote commit message] Signed-off-by: Julian Calaby Signed-off-by: Kalle Valo --- drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c index cd92ba77ecfd..48d7467d270e 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c @@ -3258,7 +3258,7 @@ static int brcmf_sdio_download_firmware(struct brcmf_sdio *bus, const struct firmware *fw, void *nvram, u32 nvlen) { - int bcmerror = -EFAULT; + int bcmerror; u32 rstvec; sdio_claim_host(bus->sdiodev->func[1]); From fb9693f04544068e6176051ce5b96e4574730107 Mon Sep 17 00:00:00 2001 From: Markus Elfring Date: Fri, 18 Mar 2016 13:23:46 +1100 Subject: [PATCH 0403/1649] iwlegacy: Return directly if allocation fails in il_eeprom_init() Also remove an unused label. Signed-off-by: Markus Elfring Acked-by: Stanislaw Gruszka [Rewrote commit message] Signed-off-by: Julian Calaby Signed-off-by: Kalle Valo --- drivers/net/wireless/intel/iwlegacy/common.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/drivers/net/wireless/intel/iwlegacy/common.c b/drivers/net/wireless/intel/iwlegacy/common.c index eb5cb603bc52..c3afaf71066e 100644 --- a/drivers/net/wireless/intel/iwlegacy/common.c +++ b/drivers/net/wireless/intel/iwlegacy/common.c @@ -723,10 +723,9 @@ il_eeprom_init(struct il_priv *il) sz = il->cfg->eeprom_size; D_EEPROM("NVM size = %d\n", sz); il->eeprom = kzalloc(sz, GFP_KERNEL); - if (!il->eeprom) { - ret = -ENOMEM; - goto alloc_err; - } + if (!il->eeprom) + return -ENOMEM; + e = (__le16 *) il->eeprom; il->ops->apm_init(il); @@ -778,7 +777,6 @@ err: il_eeprom_free(il); /* Reset chip to save power until we load uCode during "up". */ il_apm_stop(il); -alloc_err: return ret; } EXPORT_SYMBOL(il_eeprom_init); From fe9b47944edff9b6244c4f5e81bd7b50574dc22b Mon Sep 17 00:00:00 2001 From: Jia-Ju Bai Date: Fri, 18 Mar 2016 13:24:06 +1100 Subject: [PATCH 0404/1649] iwl4965: Fix a null pointer dereference in il_tx_queue_free and il_cmd_queue_free If "txq->cmd = kzalloc(...)" in il_tx_queue_init fails, "kfree(txq->cmd[i])" in il_tx_queue_free and il_cmd_queue_free in iwl4965_hw_txq_ctx_free will causes a null pointer dereference, because txq->cmd is NULL at that time. This patch fixes this problem by adding a if-check before kfree. To avoid double free in il_tx_queue_free and il_cmd_queue_free caused by the fixing, txq->meta and txq->cmd in error handling code of il_tx_queue_init are assigned null values. Otherwise, a double free will occur. This patch has been tested in real device, and it actually fixes the bug. Thanks Stanislaw for his suggestion. Signed-off-by: Jia-Ju Bai Acked-by: Stanislaw Gruszka Signed-off-by: Julian Calaby Signed-off-by: Kalle Valo --- drivers/net/wireless/intel/iwlegacy/common.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/drivers/net/wireless/intel/iwlegacy/common.c b/drivers/net/wireless/intel/iwlegacy/common.c index c3afaf71066e..2cc3d42bbab7 100644 --- a/drivers/net/wireless/intel/iwlegacy/common.c +++ b/drivers/net/wireless/intel/iwlegacy/common.c @@ -2792,8 +2792,10 @@ il_tx_queue_free(struct il_priv *il, int txq_id) il_tx_queue_unmap(il, txq_id); /* De-alloc array of command/tx buffers */ - for (i = 0; i < TFD_TX_CMD_SLOTS; i++) - kfree(txq->cmd[i]); + if (txq->cmd) { + for (i = 0; i < TFD_TX_CMD_SLOTS; i++) + kfree(txq->cmd[i]); + } /* De-alloc circular buffer of TFDs */ if (txq->q.n_bd) @@ -2871,8 +2873,10 @@ il_cmd_queue_free(struct il_priv *il) il_cmd_queue_unmap(il); /* De-alloc array of command/tx buffers */ - for (i = 0; i <= TFD_CMD_SLOTS; i++) - kfree(txq->cmd[i]); + if (txq->cmd) { + for (i = 0; i <= TFD_CMD_SLOTS; i++) + kfree(txq->cmd[i]); + } /* De-alloc circular buffer of TFDs */ if (txq->q.n_bd) @@ -3078,7 +3082,9 @@ err: kfree(txq->cmd[i]); out_free_arrays: kfree(txq->meta); + txq->meta = NULL; kfree(txq->cmd); + txq->cmd = NULL; return -ENOMEM; } From 96838d61102a0fca20a7bda7289e0492aaf11896 Mon Sep 17 00:00:00 2001 From: Jia-Ju Bai Date: Fri, 18 Mar 2016 13:24:28 +1100 Subject: [PATCH 0405/1649] b43: Fix memory leaks in b43_bus_dev_ssb_init and b43_bus_dev_bcma_init The memory allocated by kzalloc in b43_bus_dev_ssb_init and b43_bus_dev_bcma_init is not freed. This patch fixes the bug by adding kfree in b43_ssb_remove, b43_bcma_remove and error handling code of b43_bcma_probe. Thanks Michael for his suggestion. Signed-off-by: Jia-Ju Bai Tested-by: Sudip Mukherjee Signed-off-by: Julian Calaby Signed-off-by: Kalle Valo --- drivers/net/wireless/broadcom/b43/main.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/broadcom/b43/main.c b/drivers/net/wireless/broadcom/b43/main.c index 72380af9dc52..b0603e796ad8 100644 --- a/drivers/net/wireless/broadcom/b43/main.c +++ b/drivers/net/wireless/broadcom/b43/main.c @@ -5680,11 +5680,12 @@ static int b43_bcma_probe(struct bcma_device *core) INIT_WORK(&wl->firmware_load, b43_request_firmware); schedule_work(&wl->firmware_load); -bcma_out: return err; bcma_err_wireless_exit: ieee80211_free_hw(wl->hw); +bcma_out: + kfree(dev); return err; } @@ -5712,8 +5713,8 @@ static void b43_bcma_remove(struct bcma_device *core) b43_rng_exit(wl); b43_leds_unregister(wl); - ieee80211_free_hw(wl->hw); + kfree(wldev->dev); } static struct bcma_driver b43_bcma_driver = { @@ -5796,6 +5797,7 @@ static void b43_ssb_remove(struct ssb_device *sdev) b43_leds_unregister(wl); b43_wireless_exit(dev, wl); + kfree(dev); } static struct ssb_driver b43_ssb_driver = { From 1c76b4902c26c73283bb3578829dd0cfe53ce10a Mon Sep 17 00:00:00 2001 From: Jia-Ju Bai Date: Fri, 18 Mar 2016 13:24:51 +1100 Subject: [PATCH 0406/1649] rtl818x_pci: Disable pci device in error handling code When pci_request_regions in rtl8180_probe fails, pci_disable_device is not called to disable the device which is enabled by pci_enbale_device. This patch fixes the problem by adding a new lable in error handling code. Signed-off-by: Jia-Ju Bai Acked-by: Andrea Merello Signed-off-by: Julian Calaby Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c b/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c index a43a16fde59d..c76af5d8b8e0 100644 --- a/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c +++ b/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c @@ -1736,7 +1736,7 @@ static int rtl8180_probe(struct pci_dev *pdev, if (err) { printk(KERN_ERR "%s (rtl8180): Cannot obtain PCI resources\n", pci_name(pdev)); - return err; + goto err_disable_dev; } io_addr = pci_resource_start(pdev, 0); @@ -1938,6 +1938,8 @@ static int rtl8180_probe(struct pci_dev *pdev, err_free_reg: pci_release_regions(pdev); + + err_disable_dev: pci_disable_device(pdev); return err; } From 8b28310efe241339248e875400c6da16f5d91c1f Mon Sep 17 00:00:00 2001 From: Markus Elfring Date: Fri, 18 Mar 2016 13:25:13 +1100 Subject: [PATCH 0407/1649] rsi: Delete unnecessary variable initialisation In rsi_send_mgmt_pkt(), the following variables are assigned to before they're used: * wh - Assigned on line 161, first used on line 180 * bss - Assigned on line 160, first used on line 196 * msg - Assigned on line 168, first used on line 175 * extnd_size - Assigned on line 139, first used on line 142 Signed-off-by: Markus Elfring [Rewrote commit message] Signed-off-by: Julian Calaby Signed-off-by: Kalle Valo --- drivers/net/wireless/rsi/rsi_91x_pkt.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/net/wireless/rsi/rsi_91x_pkt.c b/drivers/net/wireless/rsi/rsi_91x_pkt.c index 702593f19997..571eaba368dc 100644 --- a/drivers/net/wireless/rsi/rsi_91x_pkt.c +++ b/drivers/net/wireless/rsi/rsi_91x_pkt.c @@ -123,15 +123,15 @@ int rsi_send_mgmt_pkt(struct rsi_common *common, struct sk_buff *skb) { struct rsi_hw *adapter = common->priv; - struct ieee80211_hdr *wh = NULL; + struct ieee80211_hdr *wh; struct ieee80211_tx_info *info; - struct ieee80211_bss_conf *bss = NULL; + struct ieee80211_bss_conf *bss; struct ieee80211_hw *hw = adapter->hw; struct ieee80211_conf *conf = &hw->conf; struct skb_info *tx_params; int status = -E2BIG; - __le16 *msg = NULL; - u8 extnd_size = 0; + __le16 *msg; + u8 extnd_size; u8 vap_id = 0; info = IEEE80211_SKB_CB(skb); From ab2ef1d68f62d9e2ec6e494668f288fc000fe886 Mon Sep 17 00:00:00 2001 From: Markus Elfring Date: Fri, 18 Mar 2016 13:25:33 +1100 Subject: [PATCH 0408/1649] rsi: Delete unnecessary variable initialisation In rsi_send_data_pkt(), the following variables are assigned to before they're used: * tmp_hdr - Assigned on line 47, first used on line 48 * bss - Assigned on line 41, first used on line 44 * extnd_size - Assigned on line 50, first used on line 52 * seq_num - Assigned on line 48, first used on line 96 Signed-off-by: Markus Elfring [Rewrote commit message] Signed-off-by: Julian Calaby Signed-off-by: Kalle Valo --- drivers/net/wireless/rsi/rsi_91x_pkt.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/net/wireless/rsi/rsi_91x_pkt.c b/drivers/net/wireless/rsi/rsi_91x_pkt.c index 571eaba368dc..4322df1fefdc 100644 --- a/drivers/net/wireless/rsi/rsi_91x_pkt.c +++ b/drivers/net/wireless/rsi/rsi_91x_pkt.c @@ -27,15 +27,15 @@ int rsi_send_data_pkt(struct rsi_common *common, struct sk_buff *skb) { struct rsi_hw *adapter = common->priv; - struct ieee80211_hdr *tmp_hdr = NULL; + struct ieee80211_hdr *tmp_hdr; struct ieee80211_tx_info *info; struct skb_info *tx_params; - struct ieee80211_bss_conf *bss = NULL; + struct ieee80211_bss_conf *bss; int status = -EINVAL; u8 ieee80211_size = MIN_802_11_HDR_LEN; - u8 extnd_size = 0; + u8 extnd_size; __le16 *frame_desc; - u16 seq_num = 0; + u16 seq_num; info = IEEE80211_SKB_CB(skb); bss = &info->control.vif->bss_conf; From 37190b2694911552c09119e2b23e65049bf47a1e Mon Sep 17 00:00:00 2001 From: Markus Elfring Date: Fri, 18 Mar 2016 13:27:31 +1100 Subject: [PATCH 0409/1649] rsi: Move variable initialisation into error code In rsi_send_data_pkt(), it's a little more logical to assign 'status' in the actual error handling code as opposed to at the top of the functon. Signed-off-by: Markus Elfring [Deleted controversial bits, rewrote commit message] Signed-off-by: Julian Calaby Signed-off-by: Kalle Valo --- drivers/net/wireless/rsi/rsi_91x_pkt.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/rsi/rsi_91x_pkt.c b/drivers/net/wireless/rsi/rsi_91x_pkt.c index 4322df1fefdc..a0b31c0cf25b 100644 --- a/drivers/net/wireless/rsi/rsi_91x_pkt.c +++ b/drivers/net/wireless/rsi/rsi_91x_pkt.c @@ -31,7 +31,7 @@ int rsi_send_data_pkt(struct rsi_common *common, struct sk_buff *skb) struct ieee80211_tx_info *info; struct skb_info *tx_params; struct ieee80211_bss_conf *bss; - int status = -EINVAL; + int status; u8 ieee80211_size = MIN_802_11_HDR_LEN; u8 extnd_size; __le16 *frame_desc; @@ -41,8 +41,10 @@ int rsi_send_data_pkt(struct rsi_common *common, struct sk_buff *skb) bss = &info->control.vif->bss_conf; tx_params = (struct skb_info *)info->driver_data; - if (!bss->assoc) + if (!bss->assoc) { + status = -EINVAL; goto err; + } tmp_hdr = (struct ieee80211_hdr *)&skb->data[0]; seq_num = (le16_to_cpu(tmp_hdr->seq_ctrl) >> 4); From c2fd34469d1623111e3c3db65cde533f3bddc26e Mon Sep 17 00:00:00 2001 From: Jia-Ju Bai Date: Fri, 18 Mar 2016 13:28:33 +1100 Subject: [PATCH 0410/1649] iwl4965: Fix a memory leak in error handling code of __il4965_up When il4965_hw_nic_init in __il4965_up fails, the memory allocated by iwl4965_sta_alloc_lq in iwl4965_alloc_bcast_station is not freed. This patches adds il_dealloc_bcast_stations in the error handling code of __il4965_up to fix this problem. This patch has been tested in real device, and it actually fixes the bug. Signed-off-by: Jia-Ju Bai Acked-by: Stanislaw Gruszka Signed-off-by: Julian Calaby Signed-off-by: Kalle Valo --- drivers/net/wireless/intel/iwlegacy/4965-mac.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/wireless/intel/iwlegacy/4965-mac.c b/drivers/net/wireless/intel/iwlegacy/4965-mac.c index b75f4ef3cdc7..30d9dd3dda53 100644 --- a/drivers/net/wireless/intel/iwlegacy/4965-mac.c +++ b/drivers/net/wireless/intel/iwlegacy/4965-mac.c @@ -5577,6 +5577,7 @@ __il4965_up(struct il_priv *il) ret = il4965_hw_nic_init(il); if (ret) { IL_ERR("Unable to init nic\n"); + il_dealloc_bcast_stations(il); return ret; } From 84d17a2a5a0f9e19e25d0472f0528996d945826e Mon Sep 17 00:00:00 2001 From: Julian Calaby Date: Fri, 18 Mar 2016 13:29:11 +1100 Subject: [PATCH 0411/1649] iwl4965: Fix more memory leaks in __il4965_up() In some of the non-success return paths, the memory allocated by iwl4965_sta_alloc_lq() in iwl4965_alloc_bcast_station() is not freed. In particular: - if the card isn't ready after il4965_prepare_card_hw() - if the card is hardware-rfkilled In the hardware rfkilled path, the driver enables the rfkill interrupt. When the card is unrfkilled and this interrupt is raised we end up calling il4965_bg_restart() which calls __il4965_up() which calls iwl4965_alloc_bcast_station() again. Suggested-by: Jia-Ju Bai Signed-off-by: Julian Calaby Acked-by: Stanislaw Gruszka Signed-off-by: Kalle Valo --- drivers/net/wireless/intel/iwlegacy/4965-mac.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/wireless/intel/iwlegacy/4965-mac.c b/drivers/net/wireless/intel/iwlegacy/4965-mac.c index 30d9dd3dda53..f9ed48070e17 100644 --- a/drivers/net/wireless/intel/iwlegacy/4965-mac.c +++ b/drivers/net/wireless/intel/iwlegacy/4965-mac.c @@ -5553,6 +5553,7 @@ __il4965_up(struct il_priv *il) il4965_prepare_card_hw(il); if (!il->hw_ready) { + il_dealloc_bcast_stations(il); IL_ERR("HW not ready\n"); return -EIO; } @@ -5564,6 +5565,7 @@ __il4965_up(struct il_priv *il) set_bit(S_RFKILL, &il->status); wiphy_rfkill_set_hw_state(il->hw->wiphy, true); + il_dealloc_bcast_stations(il); il_enable_rfkill_int(il); IL_WARN("Radio disabled by HW RF Kill switch\n"); return 0; From 001351881da1822b06e5d92e1fa2bf4920318e8c Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Mon, 28 Mar 2016 16:53:33 +0100 Subject: [PATCH 0412/1649] mwifiex: ie_list is an array, so no need to check if NULL ap_ie->ie_list is an array of struct mwifiex_ie and can never be null, so the null check on this array is redundant and can be removed. Signed-off-by: Colin Ian King Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/uap_cmd.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/marvell/mwifiex/uap_cmd.c b/drivers/net/wireless/marvell/mwifiex/uap_cmd.c index 16d95b22fe5c..92ce32f5bb13 100644 --- a/drivers/net/wireless/marvell/mwifiex/uap_cmd.c +++ b/drivers/net/wireless/marvell/mwifiex/uap_cmd.c @@ -694,7 +694,7 @@ static int mwifiex_uap_custom_ie_prepare(u8 *tlv, void *cmd_buf, u16 *ie_size) struct mwifiex_ie_list *ap_ie = cmd_buf; struct mwifiex_ie_types_header *tlv_ie = (void *)tlv; - if (!ap_ie || !ap_ie->len || !ap_ie->ie_list) + if (!ap_ie || !ap_ie->len) return -1; *ie_size += le16_to_cpu(ap_ie->len) + From a5c92f0b6a88a8abe3840869425f1372591a762c Mon Sep 17 00:00:00 2001 From: Wei-Ning Huang Date: Wed, 30 Mar 2016 18:14:55 +0800 Subject: [PATCH 0413/1649] mwifiex: fix NULL pointer dereference error In mwifiex_enable_hs, we need to check if priv->wdev.wiphy->wowlan_config is NULL before accessing its member. This sometimes cause kernel panic when suspend/resume. Signed-off-by: Wei-Ning Huang Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/sta_ioctl.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c index d5c56eb9e985..d8de432d46a2 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c @@ -509,7 +509,8 @@ int mwifiex_enable_hs(struct mwifiex_adapter *adapter) if (priv && priv->sched_scanning) { #ifdef CONFIG_PM - if (!priv->wdev.wiphy->wowlan_config->nd_config) { + if (priv->wdev.wiphy->wowlan_config && + !priv->wdev.wiphy->wowlan_config->nd_config) { #endif mwifiex_dbg(adapter, CMD, "aborting bgscan!\n"); mwifiex_stop_bg_scan(priv); From dbb2896b485e79be55bacd891db60c85f010045f Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Thu, 31 Mar 2016 17:08:33 -0400 Subject: [PATCH 0414/1649] rtl8xxxu: Change name of struct tx_desc to be more decriptive There are two major types of TX descriptor formats for the RTL parts, the old 32 byte descriptor, and the newer 40 byte descriptor used by the 8723bu, 8192eu, and 88xx series. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- .../net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 20 +++++++++---------- .../net/wireless/realtek/rtl8xxxu/rtl8xxxu.h | 4 ++-- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index abdff458b80f..13feb18ffa8e 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -7014,7 +7014,7 @@ static u32 rtl8xxxu_queue_select(struct ieee80211_hw *hw, struct sk_buff *skb) * format. The descriptor checksum is still only calculated over the * initial 32 bytes of the descriptor! */ -static void rtl8xxxu_calc_tx_desc_csum(struct rtl8723au_tx_desc *tx_desc) +static void rtl8xxxu_calc_tx_desc_csum(struct rtl8xxxu_txdesc32 *tx_desc) { __le16 *ptr = (__le16 *)tx_desc; u16 csum = 0; @@ -7026,7 +7026,7 @@ static void rtl8xxxu_calc_tx_desc_csum(struct rtl8723au_tx_desc *tx_desc) */ tx_desc->csum = cpu_to_le16(0); - for (i = 0; i < (sizeof(struct rtl8723au_tx_desc) / sizeof(u16)); i++) + for (i = 0; i < (sizeof(struct rtl8xxxu_txdesc32) / sizeof(u16)); i++) csum = csum ^ le16_to_cpu(ptr[i]); tx_desc->csum |= cpu_to_le16(csum); @@ -7164,8 +7164,8 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw, struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); struct ieee80211_rate *tx_rate = ieee80211_get_tx_rate(hw, tx_info); struct rtl8xxxu_priv *priv = hw->priv; - struct rtl8723au_tx_desc *tx_desc; - struct rtl8723bu_tx_desc *tx_desc40; + struct rtl8xxxu_txdesc32 *tx_desc; + struct rtl8xxxu_txdesc40 *tx_desc40; struct rtl8xxxu_tx_urb *tx_urb; struct ieee80211_sta *sta = NULL; struct ieee80211_vif *vif = tx_info->control.vif; @@ -7210,7 +7210,7 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw, if (control && control->sta) sta = control->sta; - tx_desc = (struct rtl8723au_tx_desc *)skb_push(skb, tx_desc_size); + tx_desc = (struct rtl8xxxu_txdesc32 *)skb_push(skb, tx_desc_size); memset(tx_desc, 0, tx_desc_size); tx_desc->pkt_size = cpu_to_le16(pktlen); @@ -7314,7 +7314,7 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw, cpu_to_le32(TXDESC_HW_RTS_ENABLE_8723A); } } else { - tx_desc40 = (struct rtl8723bu_tx_desc *)tx_desc; + tx_desc40 = (struct rtl8xxxu_txdesc40 *)tx_desc; tx_desc40->txdw4 = cpu_to_le32(rate); if (ieee80211_is_data(hdr->frame_control)) { @@ -8454,7 +8454,7 @@ static struct rtl8xxxu_fileops rtl8723au_fops = { .writeN_block_size = 1024, .mbox_ext_reg = REG_HMBOX_EXT_0, .mbox_ext_width = 2, - .tx_desc_size = sizeof(struct rtl8723au_tx_desc), + .tx_desc_size = sizeof(struct rtl8xxxu_txdesc32), .adda_1t_init = 0x0b1b25a0, .adda_1t_path_on = 0x0bdb25a0, .adda_2t_path_on_a = 0x04db25a4, @@ -8482,7 +8482,7 @@ static struct rtl8xxxu_fileops rtl8723bu_fops = { .writeN_block_size = 1024, .mbox_ext_reg = REG_HMBOX_EXT0_8723B, .mbox_ext_width = 4, - .tx_desc_size = sizeof(struct rtl8723bu_tx_desc), + .tx_desc_size = sizeof(struct rtl8xxxu_txdesc40), .has_s0s1 = 1, .adda_1t_init = 0x01c00014, .adda_1t_path_on = 0x01c00014, @@ -8510,7 +8510,7 @@ static struct rtl8xxxu_fileops rtl8192cu_fops = { .writeN_block_size = 128, .mbox_ext_reg = REG_HMBOX_EXT_0, .mbox_ext_width = 2, - .tx_desc_size = sizeof(struct rtl8723au_tx_desc), + .tx_desc_size = sizeof(struct rtl8xxxu_txdesc32), .adda_1t_init = 0x0b1b25a0, .adda_1t_path_on = 0x0bdb25a0, .adda_2t_path_on_a = 0x04db25a4, @@ -8537,7 +8537,7 @@ static struct rtl8xxxu_fileops rtl8192eu_fops = { .writeN_block_size = 128, .mbox_ext_reg = REG_HMBOX_EXT0_8723B, .mbox_ext_width = 4, - .tx_desc_size = sizeof(struct rtl8723au_tx_desc), + .tx_desc_size = sizeof(struct rtl8xxxu_txdesc32), .has_s0s1 = 1, .adda_1t_init = 0x0fc01616, .adda_1t_path_on = 0x0fc01616, diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h index 7b73654e1368..05579a038a8f 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h @@ -332,7 +332,7 @@ struct rtl8723bu_rx_desc { __le32 tsfl; }; -struct rtl8723au_tx_desc { +struct rtl8xxxu_txdesc32 { __le16 pkt_size; u8 pkt_offset; u8 txdw0; @@ -346,7 +346,7 @@ struct rtl8723au_tx_desc { __le16 txdw7; }; -struct rtl8723bu_tx_desc { +struct rtl8xxxu_txdesc40 { __le16 pkt_size; u8 pkt_offset; u8 txdw0; From 33f3724948422bc594ffd976ec4272b653562414 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Thu, 31 Mar 2016 17:08:34 -0400 Subject: [PATCH 0415/1649] rtl8xxxu: Rename TX descriptor bits to map them to 32/40 byte descriptors With the size based naming of TX descriptors. Change the bit definition namings to indicate which descriptor format they match, rather than having a device name in the bit name. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- .../net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 51 +++++----- .../net/wireless/realtek/rtl8xxxu/rtl8xxxu.h | 98 +++++++++---------- 2 files changed, 71 insertions(+), 78 deletions(-) diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 13feb18ffa8e..d7363165547a 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -7267,31 +7267,29 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw, tx_desc->txdw5 |= cpu_to_le32(0x0001ff00); tx_desc->txdw3 = - cpu_to_le32((u32)seq_number << TXDESC_SEQ_SHIFT_8723A); + cpu_to_le32((u32)seq_number << TXDESC32_SEQ_SHIFT); if (ampdu_enable) - tx_desc->txdw1 |= cpu_to_le32(TXDESC_AGG_ENABLE_8723A); + tx_desc->txdw1 |= cpu_to_le32(TXDESC32_AGG_ENABLE); else - tx_desc->txdw1 |= cpu_to_le32(TXDESC_AGG_BREAK_8723A); + tx_desc->txdw1 |= cpu_to_le32(TXDESC32_AGG_BREAK); if (ieee80211_is_mgmt(hdr->frame_control)) { tx_desc->txdw5 = cpu_to_le32(tx_rate->hw_value); tx_desc->txdw4 |= - cpu_to_le32(TXDESC_USE_DRIVER_RATE_8723A); + cpu_to_le32(TXDESC32_USE_DRIVER_RATE); tx_desc->txdw5 |= - cpu_to_le32(6 << - TXDESC_RETRY_LIMIT_SHIFT_8723A); + cpu_to_le32(6 << TXDESC32_RETRY_LIMIT_SHIFT); tx_desc->txdw5 |= - cpu_to_le32(TXDESC_RETRY_LIMIT_ENABLE_8723A); + cpu_to_le32(TXDESC32_RETRY_LIMIT_ENABLE); } if (ieee80211_is_data_qos(hdr->frame_control)) - tx_desc->txdw4 |= cpu_to_le32(TXDESC_QOS_8723A); + tx_desc->txdw4 |= cpu_to_le32(TXDESC32_QOS); if (rate_flag & IEEE80211_TX_RC_USE_SHORT_PREAMBLE || (sta && vif && vif->bss_conf.use_short_preamble)) - tx_desc->txdw4 |= - cpu_to_le32(TXDESC_SHORT_PREAMBLE_8723A); + tx_desc->txdw4 |= cpu_to_le32(TXDESC32_SHORT_PREAMBLE); if (rate_flag & IEEE80211_TX_RC_SHORT_GI || (ieee80211_is_data_qos(hdr->frame_control) && @@ -7307,11 +7305,10 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw, */ tx_desc->txdw4 |= cpu_to_le32(DESC_RATE_24M << - TXDESC_RTS_RATE_SHIFT_8723A); + TXDESC32_RTS_RATE_SHIFT); tx_desc->txdw4 |= - cpu_to_le32(TXDESC_RTS_CTS_ENABLE_8723A); - tx_desc->txdw4 |= - cpu_to_le32(TXDESC_HW_RTS_ENABLE_8723A); + cpu_to_le32(TXDESC32_RTS_CTS_ENABLE); + tx_desc->txdw4 |= cpu_to_le32(TXDESC32_HW_RTS_ENABLE); } } else { tx_desc40 = (struct rtl8xxxu_txdesc40 *)tx_desc; @@ -7320,33 +7317,31 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw, if (ieee80211_is_data(hdr->frame_control)) { tx_desc->txdw4 |= cpu_to_le32(0x1f << - TXDESC_DATA_RATE_FB_SHIFT_8723B); + TXDESC40_DATA_RATE_FB_SHIFT); } tx_desc40->txdw9 = - cpu_to_le32((u32)seq_number << TXDESC_SEQ_SHIFT_8723B); + cpu_to_le32((u32)seq_number << TXDESC40_SEQ_SHIFT); if (ampdu_enable) - tx_desc40->txdw2 |= - cpu_to_le32(TXDESC_AGG_ENABLE_8723B); + tx_desc40->txdw2 |= cpu_to_le32(TXDESC40_AGG_ENABLE); else - tx_desc40->txdw2 |= cpu_to_le32(TXDESC_AGG_BREAK_8723B); + tx_desc40->txdw2 |= cpu_to_le32(TXDESC40_AGG_BREAK); if (ieee80211_is_mgmt(hdr->frame_control)) { tx_desc40->txdw4 = cpu_to_le32(tx_rate->hw_value); tx_desc40->txdw3 |= - cpu_to_le32(TXDESC_USE_DRIVER_RATE_8723B); + cpu_to_le32(TXDESC40_USE_DRIVER_RATE); tx_desc40->txdw4 |= - cpu_to_le32(6 << - TXDESC_RETRY_LIMIT_SHIFT_8723B); + cpu_to_le32(6 << TXDESC40_RETRY_LIMIT_SHIFT); tx_desc40->txdw4 |= - cpu_to_le32(TXDESC_RETRY_LIMIT_ENABLE_8723B); + cpu_to_le32(TXDESC40_RETRY_LIMIT_ENABLE); } if (rate_flag & IEEE80211_TX_RC_USE_SHORT_PREAMBLE || (sta && vif && vif->bss_conf.use_short_preamble)) tx_desc40->txdw5 |= - cpu_to_le32(TXDESC_SHORT_PREAMBLE_8723B); + cpu_to_le32(TXDESC40_SHORT_PREAMBLE); if (rate_flag & IEEE80211_TX_RC_USE_RTS_CTS) { /* @@ -7355,11 +7350,9 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw, */ tx_desc->txdw4 |= cpu_to_le32(DESC_RATE_24M << - TXDESC_RTS_RATE_SHIFT_8723B); - tx_desc->txdw3 |= - cpu_to_le32(TXDESC_RTS_CTS_ENABLE_8723B); - tx_desc->txdw3 |= - cpu_to_le32(TXDESC_HW_RTS_ENABLE_8723B); + TXDESC40_RTS_RATE_SHIFT); + tx_desc->txdw3 |= cpu_to_le32(TXDESC40_RTS_CTS_ENABLE); + tx_desc->txdw3 |= cpu_to_le32(TXDESC40_HW_RTS_ENABLE); } } diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h index 05579a038a8f..38d4f5686f67 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h @@ -422,10 +422,10 @@ struct rtl8xxxu_txdesc40 { * aggregation enable and break respectively. For 8723bu, bits 0-7 are macid. */ #define TXDESC_PKT_OFFSET_SZ 0 -#define TXDESC_AGG_ENABLE_8723A BIT(5) -#define TXDESC_AGG_BREAK_8723A BIT(6) -#define TXDESC_MACID_SHIFT_8723B 0 -#define TXDESC_MACID_MASK_8723B 0x00f0 +#define TXDESC32_AGG_ENABLE BIT(5) +#define TXDESC32_AGG_BREAK BIT(6) +#define TXDESC40_MACID_SHIFT 0 +#define TXDESC40_MACID_MASK 0x00f0 #define TXDESC_QUEUE_SHIFT 8 #define TXDESC_QUEUE_MASK 0x1f00 #define TXDESC_QUEUE_BK 0x2 @@ -437,9 +437,9 @@ struct rtl8xxxu_txdesc40 { #define TXDESC_QUEUE_MGNT 0x12 #define TXDESC_QUEUE_CMD 0x13 #define TXDESC_QUEUE_MAX (TXDESC_QUEUE_CMD + 1) -#define TXDESC_RDG_NAV_EXT_8723B BIT(13) -#define TXDESC_LSIG_TXOP_ENABLE_8723B BIT(14) -#define TXDESC_PIFS_8723B BIT(15) +#define TXDESC40_RDG_NAV_EXT BIT(13) +#define TXDESC40_LSIG_TXOP_ENABLE BIT(14) +#define TXDESC40_PIFS BIT(15) #define DESC_RATE_ID_SHIFT 16 #define DESC_RATE_ID_MASK 0xf @@ -451,71 +451,71 @@ struct rtl8xxxu_txdesc40 { #define TXDESC_HWPC BIT(31) /* Word 2 */ -#define TXDESC_PAID_SHIFT_8723B 0 -#define TXDESC_PAID_MASK_8723B 0x1ff -#define TXDESC_CCA_RTS_SHIFT_8723B 10 -#define TXDESC_CCA_RTS_MASK_8723B 0xc00 -#define TXDESC_AGG_ENABLE_8723B BIT(12) -#define TXDESC_RDG_ENABLE_8723B BIT(13) -#define TXDESC_AGG_BREAK_8723B BIT(16) -#define TXDESC_MORE_FRAG_8723B BIT(17) -#define TXDESC_RAW_8723B BIT(18) -#define TXDESC_ACK_REPORT_8723A BIT(19) -#define TXDESC_SPE_RPT_8723B BIT(19) +#define TXDESC40_PAID_SHIFT 0 +#define TXDESC40_PAID_MASK 0x1ff +#define TXDESC40_CCA_RTS_SHIFT 10 +#define TXDESC40_CCA_RTS_MASK 0xc00 +#define TXDESC40_AGG_ENABLE BIT(12) +#define TXDESC40_RDG_ENABLE BIT(13) +#define TXDESC40_AGG_BREAK BIT(16) +#define TXDESC40_MORE_FRAG BIT(17) +#define TXDESC40_RAW BIT(18) +#define TXDESC32_ACK_REPORT BIT(19) +#define TXDESC40_SPE_RPT BIT(19) #define TXDESC_AMPDU_DENSITY_SHIFT 20 -#define TXDESC_BT_INT_8723B BIT(23) -#define TXDESC_GID_8723B BIT(24) +#define TXDESC40_BT_INT BIT(23) +#define TXDESC40_GID BIT(24) /* Word 3 */ -#define TXDESC_USE_DRIVER_RATE_8723B BIT(8) -#define TXDESC_CTS_SELF_ENABLE_8723B BIT(11) -#define TXDESC_RTS_CTS_ENABLE_8723B BIT(12) -#define TXDESC_HW_RTS_ENABLE_8723B BIT(13) -#define TXDESC_SEQ_SHIFT_8723A 16 -#define TXDESC_SEQ_MASK_8723A 0x0fff0000 +#define TXDESC40_USE_DRIVER_RATE BIT(8) +#define TXDESC40_CTS_SELF_ENABLE BIT(11) +#define TXDESC40_RTS_CTS_ENABLE BIT(12) +#define TXDESC40_HW_RTS_ENABLE BIT(13) +#define TXDESC32_SEQ_SHIFT 16 +#define TXDESC32_SEQ_MASK 0x0fff0000 /* Word 4 */ -#define TXDESC_RTS_RATE_SHIFT_8723A 0 -#define TXDESC_RTS_RATE_MASK_8723A 0x3f -#define TXDESC_QOS_8723A BIT(6) -#define TXDESC_HW_SEQ_ENABLE_8723A BIT(7) -#define TXDESC_USE_DRIVER_RATE_8723A BIT(8) +#define TXDESC32_RTS_RATE_SHIFT 0 +#define TXDESC32_RTS_RATE_MASK 0x3f +#define TXDESC32_QOS BIT(6) +#define TXDESC32_HW_SEQ_ENABLE BIT(7) +#define TXDESC32_USE_DRIVER_RATE BIT(8) #define TXDESC_DISABLE_DATA_FB BIT(10) -#define TXDESC_CTS_SELF_ENABLE_8723A BIT(11) -#define TXDESC_RTS_CTS_ENABLE_8723A BIT(12) -#define TXDESC_HW_RTS_ENABLE_8723A BIT(13) +#define TXDESC32_CTS_SELF_ENABLE BIT(11) +#define TXDESC32_RTS_CTS_ENABLE BIT(12) +#define TXDESC32_HW_RTS_ENABLE BIT(13) #define TXDESC_PRIME_CH_OFF_LOWER BIT(20) #define TXDESC_PRIME_CH_OFF_UPPER BIT(21) -#define TXDESC_SHORT_PREAMBLE_8723A BIT(24) +#define TXDESC32_SHORT_PREAMBLE BIT(24) #define TXDESC_DATA_BW BIT(25) #define TXDESC_RTS_DATA_BW BIT(27) #define TXDESC_RTS_PRIME_CH_OFF_LOWER BIT(28) #define TXDESC_RTS_PRIME_CH_OFF_UPPER BIT(29) -#define TXDESC_DATA_RATE_FB_SHIFT_8723B 8 -#define TXDESC_DATA_RATE_FB_MASK_8723B 0x00001f00 -#define TXDESC_RETRY_LIMIT_ENABLE_8723B BIT(17) -#define TXDESC_RETRY_LIMIT_SHIFT_8723B 18 -#define TXDESC_RETRY_LIMIT_MASK_8723B 0x00fc0000 -#define TXDESC_RTS_RATE_SHIFT_8723B 24 -#define TXDESC_RTS_RATE_MASK_8723B 0x3f000000 +#define TXDESC40_DATA_RATE_FB_SHIFT 8 +#define TXDESC40_DATA_RATE_FB_MASK 0x00001f00 +#define TXDESC40_RETRY_LIMIT_ENABLE BIT(17) +#define TXDESC40_RETRY_LIMIT_SHIFT 18 +#define TXDESC40_RETRY_LIMIT_MASK 0x00fc0000 +#define TXDESC40_RTS_RATE_SHIFT 24 +#define TXDESC40_RTS_RATE_MASK 0x3f000000 /* Word 5 */ -#define TXDESC_SHORT_PREAMBLE_8723B BIT(4) +#define TXDESC40_SHORT_PREAMBLE BIT(4) #define TXDESC_SHORT_GI BIT(6) #define TXDESC_CCX_TAG BIT(7) -#define TXDESC_RETRY_LIMIT_ENABLE_8723A BIT(17) -#define TXDESC_RETRY_LIMIT_SHIFT_8723A 18 -#define TXDESC_RETRY_LIMIT_MASK_8723A 0x00fc0000 +#define TXDESC32_RETRY_LIMIT_ENABLE BIT(17) +#define TXDESC32_RETRY_LIMIT_SHIFT 18 +#define TXDESC32_RETRY_LIMIT_MASK 0x00fc0000 /* Word 6 */ #define TXDESC_MAX_AGG_SHIFT 11 /* Word 8 */ -#define TXDESC_HW_SEQ_ENABLE_8723B BIT(15) +#define TXDESC40_HW_SEQ_ENABLE BIT(15) /* Word 9 */ -#define TXDESC_SEQ_SHIFT_8723B 12 -#define TXDESC_SEQ_MASK_8723B 0x00fff000 +#define TXDESC40_SEQ_SHIFT 12 +#define TXDESC40_SEQ_MASK 0x00fff000 struct phy_rx_agc_info { #ifdef __LITTLE_ENDIAN From 169bc5cb0b8162d271c8fd38ff3d90b098241e16 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Thu, 31 Mar 2016 17:08:35 -0400 Subject: [PATCH 0416/1649] rtl8xxxu: Correct txdesc40 gid definition txdesc40 dword2 gid is a 6 bit field, not a single bit Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h index 38d4f5686f67..af1d50482e12 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h @@ -464,7 +464,7 @@ struct rtl8xxxu_txdesc40 { #define TXDESC40_SPE_RPT BIT(19) #define TXDESC_AMPDU_DENSITY_SHIFT 20 #define TXDESC40_BT_INT BIT(23) -#define TXDESC40_GID BIT(24) +#define TXDESC40_GID_SHIFT 24 /* Word 3 */ #define TXDESC40_USE_DRIVER_RATE BIT(8) From 1df1de348572dff0fa7fb9c447d991c8dc1348f8 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Thu, 31 Mar 2016 17:08:36 -0400 Subject: [PATCH 0417/1649] rtl8xxxu: TXDESC_SHORT_GI is txdesc32 only This is no short GI bit in the txdesc40 format. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 2 +- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index d7363165547a..484d08fe80cd 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -7295,7 +7295,7 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw, (ieee80211_is_data_qos(hdr->frame_control) && sta && sta->ht_cap.cap & (IEEE80211_HT_CAP_SGI_40 | IEEE80211_HT_CAP_SGI_20))) { - tx_desc->txdw5 |= cpu_to_le32(TXDESC_SHORT_GI); + tx_desc->txdw5 |= cpu_to_le32(TXDESC32_SHORT_GI); } if (rate_flag & IEEE80211_TX_RC_USE_RTS_CTS) { diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h index af1d50482e12..f211c5db753f 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h @@ -501,7 +501,7 @@ struct rtl8xxxu_txdesc40 { /* Word 5 */ #define TXDESC40_SHORT_PREAMBLE BIT(4) -#define TXDESC_SHORT_GI BIT(6) +#define TXDESC32_SHORT_GI BIT(6) #define TXDESC_CCX_TAG BIT(7) #define TXDESC32_RETRY_LIMIT_ENABLE BIT(17) #define TXDESC32_RETRY_LIMIT_SHIFT 18 From f3fc251162f9390baabcbf812766b074e404d29a Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Thu, 31 Mar 2016 17:08:37 -0400 Subject: [PATCH 0418/1649] rtl8xxxu: 8192eu uses txdesc40 8192eu uses the new TX descriptor format Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 484d08fe80cd..ad280cfae694 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -8530,7 +8530,7 @@ static struct rtl8xxxu_fileops rtl8192eu_fops = { .writeN_block_size = 128, .mbox_ext_reg = REG_HMBOX_EXT0_8723B, .mbox_ext_width = 4, - .tx_desc_size = sizeof(struct rtl8xxxu_txdesc32), + .tx_desc_size = sizeof(struct rtl8xxxu_txdesc40), .has_s0s1 = 1, .adda_1t_init = 0x0fc01616, .adda_1t_path_on = 0x0fc01616, From 931d9278259a91a601c93fe62979c7db53678abb Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Thu, 31 Mar 2016 17:08:38 -0400 Subject: [PATCH 0419/1649] rtl8xxxu: Update some register definitions Improve descriptive names of some registers and add some additional registers only found on nextgen chips. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- .../wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h | 31 ++++++++++++++----- 1 file changed, 24 insertions(+), 7 deletions(-) diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h index e545e849f5a3..ade42fe7e742 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h @@ -417,13 +417,20 @@ /* spec version 11 */ /* 0x0400 ~ 0x047F Protocol Configuration */ -#define REG_VOQ_INFORMATION 0x0400 -#define REG_VIQ_INFORMATION 0x0404 -#define REG_BEQ_INFORMATION 0x0408 -#define REG_BKQ_INFORMATION 0x040c -#define REG_MGQ_INFORMATION 0x0410 -#define REG_HGQ_INFORMATION 0x0414 -#define REG_BCNQ_INFORMATION 0x0418 +/* 8192c, 8192d */ +#define REG_VOQ_INFO 0x0400 +#define REG_VIQ_INFO 0x0404 +#define REG_BEQ_INFO 0x0408 +#define REG_BKQ_INFO 0x040c +/* 8188e, 8723a, 8812a, 8821a, 8192e, 8723b */ +#define REG_Q0_INFO 0x400 +#define REG_Q1_INFO 0x404 +#define REG_Q2_INFO 0x408 +#define REG_Q3_INFO 0x40c + +#define REG_MGQ_INFO 0x0410 +#define REG_HGQ_INFO 0x0414 +#define REG_BCNQ_INFO 0x0418 #define REG_CPU_MGQ_INFORMATION 0x041c #define REG_FWHW_TXQ_CTRL 0x0420 @@ -494,6 +501,9 @@ #define REG_DATA_SUBCHANNEL 0x0483 /* 8723au */ #define REG_INIDATA_RATE_SEL 0x0484 +/* MACID_SLEEP_1/3 for 8723b, 8192e, 8812a, 8821a */ +#define REG_MACID_SLEEP_3_8732B 0x0484 +#define REG_MACID_SLEEP_1_8732B 0x0488 #define REG_POWER_STATUS 0x04a4 #define REG_POWER_STAGE1 0x04b4 @@ -508,6 +518,13 @@ #define REG_RTS_MAX_AGGR_NUM 0x04cb #define REG_BAR_MODE_CTRL 0x04cc #define REG_RA_TRY_RATE_AGG_LMT 0x04cf +/* MACID_DROP for 8723a */ +#define REG_MACID_DROP_8732A 0x04d0 +/* EARLY_MODE_CONTROL 8188e */ +#define REG_EARLY_MODE_CONTROL_8188E 0x04d0 +/* MACID_SLEEP_2 for 8723b, 8192e, 8812a, 8821a */ +#define REG_MACID_SLEEP_2_8732B 0x04d0 +#define REG_MACID_SLEEP 0x04d4 #define REG_NQOS_SEQ 0x04dc #define REG_QOS_SEQ 0x04de #define REG_NEED_CPU_HANDLE 0x04e0 From ba17d824783805235f317f79f2871b17bd679956 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Thu, 31 Mar 2016 17:08:39 -0400 Subject: [PATCH 0420/1649] rtl8xxxu: Use enums for chip version numbers With support for more chips being added, use an enum to specify the chip version. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- .../net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 84 +++++++++---------- .../net/wireless/realtek/rtl8xxxu/rtl8xxxu.h | 26 +++++- 2 files changed, 67 insertions(+), 43 deletions(-) diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index ad280cfae694..38576692f9b8 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -1574,7 +1574,7 @@ static void rtl8723a_enable_rf(struct rtl8xxxu_priv *priv) val32 &= ~OFDM_RF_PATH_TX_MASK; if (priv->tx_paths == 2) val32 |= OFDM_RF_PATH_TX_A | OFDM_RF_PATH_TX_B; - else if (priv->rtlchip == 0x8192c || priv->rtlchip == 0x8191c) + else if (priv->rtl_chip == RTL8192C || priv->rtl_chip == RTL8191C) val32 |= OFDM_RF_PATH_TX_B; else val32 |= OFDM_RF_PATH_TX_A; @@ -2199,11 +2199,11 @@ static int rtl8xxxu_identify_chip(struct rtl8xxxu_priv *priv) if (val32 & SYS_CFG_BT_FUNC) { if (priv->chip_cut >= 3) { sprintf(priv->chip_name, "8723BU"); - priv->rtlchip = 0x8723b; + priv->rtl_chip = RTL8723B; } else { sprintf(priv->chip_name, "8723AU"); priv->usb_interrupts = 1; - priv->rtlchip = 0x8723a; + priv->rtl_chip = RTL8723A; } priv->rf_paths = 1; @@ -2227,13 +2227,13 @@ static int rtl8xxxu_identify_chip(struct rtl8xxxu_priv *priv) priv->rf_paths = 2; priv->rx_paths = 2; priv->tx_paths = 1; - priv->rtlchip = 0x8191e; + priv->rtl_chip = RTL8191E; } else { sprintf(priv->chip_name, "8192EU"); priv->rf_paths = 2; priv->rx_paths = 2; priv->tx_paths = 2; - priv->rtlchip = 0x8192e; + priv->rtl_chip = RTL8192E; } } else if (bonding == HPON_FSM_BONDING_1T2R) { sprintf(priv->chip_name, "8191CU"); @@ -2241,14 +2241,14 @@ static int rtl8xxxu_identify_chip(struct rtl8xxxu_priv *priv) priv->rx_paths = 2; priv->tx_paths = 1; priv->usb_interrupts = 1; - priv->rtlchip = 0x8191c; + priv->rtl_chip = RTL8191C; } else { sprintf(priv->chip_name, "8192CU"); priv->rf_paths = 2; priv->rx_paths = 2; priv->tx_paths = 2; priv->usb_interrupts = 1; - priv->rtlchip = 0x8192c; + priv->rtl_chip = RTL8192C; } priv->has_wifi = 1; } else { @@ -2256,15 +2256,15 @@ static int rtl8xxxu_identify_chip(struct rtl8xxxu_priv *priv) priv->rf_paths = 1; priv->rx_paths = 1; priv->tx_paths = 1; - priv->rtlchip = 0x8188c; + priv->rtl_chip = RTL8188C; priv->usb_interrupts = 1; priv->has_wifi = 1; } - switch (priv->rtlchip) { - case 0x8188e: - case 0x8192e: - case 0x8723b: + switch (priv->rtl_chip) { + case RTL8188E: + case RTL8192E: + case RTL8723B: switch (val32 & SYS_CFG_VENDOR_EXT_MASK) { case SYS_CFG_VENDOR_ID_TSMC: sprintf(priv->chip_vendor, "TSMC"); @@ -2814,7 +2814,7 @@ static int rtl8xxxu_start_firmware(struct rtl8xxxu_priv *priv) /* * Init H2C command */ - if (priv->rtlchip == 0x8723b) + if (priv->rtl_chip == RTL8723B) rtl8xxxu_write8(priv, REG_HMTFR, 0x0f); exit: return ret; @@ -2997,7 +2997,7 @@ static int rtl8192cu_load_firmware(struct rtl8xxxu_priv *priv) if (!priv->vendor_umc) fw_name = "rtlwifi/rtl8192cufw_TMSC.bin"; - else if (priv->chip_cut || priv->rtlchip == 0x8192c) + else if (priv->chip_cut || priv->rtl_chip == RTL8192C) fw_name = "rtlwifi/rtl8192cufw_B.bin"; else fw_name = "rtlwifi/rtl8192cufw_A.bin"; @@ -3108,7 +3108,7 @@ rtl8xxxu_init_mac(struct rtl8xxxu_priv *priv, struct rtl8xxxu_reg8val *array) } } - if (priv->rtlchip != 0x8723b) + if (priv->rtl_chip != RTL8723B) rtl8xxxu_write8(priv, REG_MAX_AGGR_NUM, 0x0a); return 0; @@ -3154,7 +3154,7 @@ static int rtl8xxxu_init_phy_bb(struct rtl8xxxu_priv *priv) * addresses, which is initialized here. Do we need this? */ - if (priv->rtlchip == 0x8723b) { + if (priv->rtl_chip == RTL8723B) { val16 = rtl8xxxu_read16(priv, REG_SYS_FUNC); val16 |= SYS_FUNC_BB_GLB_RSTN | SYS_FUNC_BBRSTB | SYS_FUNC_DIO_RF; @@ -3176,7 +3176,7 @@ static int rtl8xxxu_init_phy_bb(struct rtl8xxxu_priv *priv) rtl8xxxu_write16(priv, REG_SYS_FUNC, val16); } - if (priv->rtlchip != 0x8723b) { + if (priv->rtl_chip != RTL8723B) { /* AFE_XTAL_RF_GATE (bit 14) if addressing as 32 bit register */ val32 = rtl8xxxu_read32(priv, REG_AFE_XTAL_CTRL); val32 &= ~AFE_XTAL_RF_GATE; @@ -3193,7 +3193,7 @@ static int rtl8xxxu_init_phy_bb(struct rtl8xxxu_priv *priv) rtl8xxxu_init_phy_regs(priv, rtl8188ru_phy_1t_highpa_table); else if (priv->tx_paths == 2) rtl8xxxu_init_phy_regs(priv, rtl8192cu_phy_2t_init_table); - else if (priv->rtlchip == 0x8723b) { + else if (priv->rtl_chip == RTL8723B) { /* * Why? */ @@ -3204,7 +3204,7 @@ static int rtl8xxxu_init_phy_bb(struct rtl8xxxu_priv *priv) rtl8xxxu_init_phy_regs(priv, rtl8723a_phy_1t_init_table); - if (priv->rtlchip == 0x8188c && priv->hi_pa && + if (priv->rtl_chip == RTL8188C && priv->hi_pa && priv->vendor_umc && priv->chip_cut == 1) rtl8xxxu_write8(priv, REG_OFDM0_AGC_PARM1 + 2, 0x50); @@ -3266,7 +3266,7 @@ static int rtl8xxxu_init_phy_bb(struct rtl8xxxu_priv *priv) rtl8xxxu_write32(priv, REG_TX_TO_TX, val32); } - if (priv->rtlchip == 0x8723b) + if (priv->rtl_chip == RTL8723B) rtl8xxxu_init_phy_regs(priv, rtl8xxx_agc_8723bu_table); else if (priv->hi_pa) rtl8xxxu_init_phy_regs(priv, rtl8xxx_agc_highpa_table); @@ -3283,7 +3283,7 @@ static int rtl8xxxu_init_phy_bb(struct rtl8xxxu_priv *priv) rtl8xxxu_write32(priv, REG_MAC_PHY_CTRL, val32); } - if (priv->rtlchip != 0x8723bu) { + if (priv->rtl_chip != RTL8723B) { ldoa15 = LDOA15_ENABLE | LDOA15_OBUF; ldov12d = LDOV12D_ENABLE | BIT(2) | (2 << LDOV12D_VADJ_SHIFT); ldohci12 = 0x57; @@ -5955,7 +5955,7 @@ static int rtl8192cu_power_on(struct rtl8xxxu_priv *priv) /* * Workaround for 8188RU LNA power leakage problem. */ - if (priv->rtlchip == 0x8188c && priv->hi_pa) { + if (priv->rtl_chip == RTL8188C && priv->hi_pa) { val32 = rtl8xxxu_read32(priv, REG_FPGA0_XCD_RF_PARM); val32 &= ~BIT(1); rtl8xxxu_write32(priv, REG_FPGA0_XCD_RF_PARM, val32); @@ -6020,7 +6020,7 @@ static void rtl8xxxu_power_off(struct rtl8xxxu_priv *priv) /* * Workaround for 8188RU LNA power leakage problem. */ - if (priv->rtlchip == 0x8188c && priv->hi_pa) { + if (priv->rtl_chip == RTL8188C && priv->hi_pa) { val32 = rtl8xxxu_read32(priv, REG_FPGA0_XCD_RF_PARM); val32 |= BIT(1); rtl8xxxu_write32(priv, REG_FPGA0_XCD_RF_PARM, val32); @@ -6313,7 +6313,7 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw) * Presumably this is for 8188EU as well * Enable TX report and TX report timer */ - if (priv->rtlchip == 0x8723bu) { + if (priv->rtl_chip == RTL8723B) { val8 = rtl8xxxu_read8(priv, REG_TX_REPORT_CTRL); val8 |= TX_REPORT_CTRL_TIMER_ENABLE; rtl8xxxu_write8(priv, REG_TX_REPORT_CTRL, val8); @@ -6340,9 +6340,9 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw) /* Solve too many protocol error on USB bus */ /* Can't do this for 8188/8192 UMC A cut parts */ - if (priv->rtlchip == 0x8723a || - ((priv->rtlchip == 0x8192c || priv->rtlchip == 0x8191c || - priv->rtlchip == 0x8188c) && + if (priv->rtl_chip == RTL8723A || + ((priv->rtl_chip == RTL8192C || priv->rtl_chip == RTL8191C || + priv->rtl_chip == RTL8188C) && (priv->chip_cut || !priv->vendor_umc))) { rtl8xxxu_write8(priv, 0xfe40, 0xe6); rtl8xxxu_write8(priv, 0xfe41, 0x94); @@ -6361,7 +6361,7 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw) rtl8xxxu_write8(priv, 0xfe42, 0x80); } - if (priv->rtlchip == 0x8192e) { + if (priv->rtl_chip == RTL8192E) { rtl8xxxu_write32(priv, REG_HIMR0, 0x00); rtl8xxxu_write32(priv, REG_HIMR1, 0x00); } @@ -6369,7 +6369,7 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw) if (priv->fops->phy_init_antenna_selection) priv->fops->phy_init_antenna_selection(priv); - if (priv->rtlchip == 0x8723b) + if (priv->rtl_chip == RTL8723B) ret = rtl8xxxu_init_mac(priv, rtl8723b_mac_init_table); else ret = rtl8xxxu_init_mac(priv, rtl8723a_mac_init_table); @@ -6383,12 +6383,12 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw) if (ret) goto exit; - switch(priv->rtlchip) { - case 0x8723a: + switch(priv->rtl_chip) { + case RTL8723A: rftable = rtl8723au_radioa_1t_init_table; ret = rtl8xxxu_init_phy_rf(priv, rftable, RF_A); break; - case 0x8723b: + case RTL8723B: rftable = rtl8723bu_radioa_1t_init_table; ret = rtl8xxxu_init_phy_rf(priv, rftable, RF_A); /* @@ -6399,18 +6399,18 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw) msleep(200); rtl8xxxu_write_rfreg(priv, RF_A, 0xb0, 0xdffe0); break; - case 0x8188c: + case RTL8188C: if (priv->hi_pa) rftable = rtl8188ru_radioa_1t_highpa_table; else rftable = rtl8192cu_radioa_1t_init_table; ret = rtl8xxxu_init_phy_rf(priv, rftable, RF_A); break; - case 0x8191c: + case RTL8191C: rftable = rtl8192cu_radioa_1t_init_table; ret = rtl8xxxu_init_phy_rf(priv, rftable, RF_A); break; - case 0x8192c: + case RTL8192C: rftable = rtl8192cu_radioa_2t_init_table; ret = rtl8xxxu_init_phy_rf(priv, rftable, RF_A); if (ret) @@ -6428,7 +6428,7 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw) /* * Chip specific quirks */ - if (priv->rtlchip == 0x8723a) { + if (priv->rtl_chip == RTL8723A) { /* Fix USB interface interference issue */ rtl8xxxu_write8(priv, 0xfe40, 0xe0); rtl8xxxu_write8(priv, 0xfe41, 0x8d); @@ -6468,7 +6468,7 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw) */ val8 = TX_TOTAL_PAGE_NUM + 1; - if (priv->rtlchip == 0x8723b) + if (priv->rtl_chip == RTL8723B) val8 -= 1; rtl8xxxu_write8(priv, REG_TXPKTBUF_BCNQ_BDNY, val8); @@ -6484,7 +6484,7 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw) goto exit; /* RFSW Control - clear bit 14 ?? */ - if (priv->rtlchip != 0x8723b) + if (priv->rtl_chip != RTL8723B) rtl8xxxu_write32(priv, REG_FPGA0_TX_INFO, 0x00000003); /* 0x07000760 */ val32 = FPGA0_RF_TRSW | FPGA0_RF_TRSWB | FPGA0_RF_ANTSW | @@ -6501,14 +6501,14 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw) /* * Set RX page boundary */ - if (priv->rtlchip == 0x8723b) + if (priv->rtl_chip == RTL8723B) rtl8xxxu_write16(priv, REG_TRXFF_BNDY + 2, 0x3f7f); else rtl8xxxu_write16(priv, REG_TRXFF_BNDY + 2, 0x27ff); /* * Transfer page size is always 128 */ - if (priv->rtlchip == 0x8723b) + if (priv->rtl_chip == RTL8723B) val8 = (PBP_PAGE_SIZE_256 << PBP_PAGE_SIZE_RX_SHIFT) | (PBP_PAGE_SIZE_256 << PBP_PAGE_SIZE_TX_SHIFT); else @@ -6600,7 +6600,7 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw) /* * Initialize burst parameters */ - if (priv->rtlchip == 0x8723b) { + if (priv->rtl_chip == RTL8723B) { /* * For USB high speed set 512B packets */ @@ -6682,7 +6682,7 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw) val8 = ((30000 + NAV_UPPER_UNIT - 1) / NAV_UPPER_UNIT); rtl8xxxu_write8(priv, REG_NAV_UPPER, val8); - if (priv->rtlchip == 0x8723a) { + if (priv->rtl_chip == RTL8723A) { /* * 2011/03/09 MH debug only, UMC-B cut pass 2500 S5 test, * but we need to find root cause. diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h index f211c5db753f..455e1122dbb5 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h @@ -65,6 +65,30 @@ #define EFUSE_BT_MAP_LEN_8723A 1024 #define EFUSE_MAX_WORD_UNIT 4 +enum rtl8xxxu_rtl_chip { + RTL8192S = 0x81920, + RTL8191S = 0x81910, + RTL8192C = 0x8192c, + RTL8191C = 0x8191c, + RTL8188C = 0x8188c, + RTL8188R = 0x81889, + RTL8192D = 0x8192d, + RTL8723A = 0x8723a, + RTL8188E = 0x8188e, + RTL8812 = 0x88120, + RTL8821 = 0x88210, + RTL8192E = 0x8192e, + RTL8191E = 0x8191e, + RTL8723B = 0x8723b, + RTL8814A = 0x8814a, + RTL8881A = 0x8881a, + RTL8821B = 0x8821b, + RTL8822B = 0x8822b, + RTL8703B = 0x8703b, + RTL8195A = 0x8195a, + RTL8188F = 0x8188f +}; + enum rtl8xxxu_rx_type { RX_TYPE_DATA_PKT = 0, RX_TYPE_C2H = 1, @@ -1236,7 +1260,7 @@ struct rtl8xxxu_priv { u32 mac_backup[RTL8XXXU_MAC_REGS]; u32 bb_backup[RTL8XXXU_BB_REGS]; u32 bb_recovery_backup[RTL8XXXU_BB_REGS]; - u32 rtlchip; + enum rtl8xxxu_rtl_chip rtl_chip; u8 pi_enabled:1; u8 int_buf[USB_INTR_CONTENT_LENGTH]; }; From af13faff851b49fb99c9b930c823a5362aeb80a1 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Thu, 31 Mar 2016 17:08:40 -0400 Subject: [PATCH 0421/1649] rtl8xxxu: Identify 8192eu rev A/B parts correctly 8192eu A/B cut parts were incorrectly identified as 8192cu devices. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 38576692f9b8..201f6cfaccc2 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -2221,7 +2221,7 @@ static int rtl8xxxu_identify_chip(struct rtl8xxxu_priv *priv) } else if (val32 & SYS_CFG_TYPE_ID) { bonding = rtl8xxxu_read32(priv, REG_HPON_FSM); bonding &= HPON_FSM_BONDING_MASK; - if (priv->chip_cut >= 3) { + if (priv->fops->has_s0s1) { if (bonding == HPON_FSM_BONDING_1T2R) { sprintf(priv->chip_name, "8191EU"); priv->rf_paths = 2; From 91cbe4e73197859498fba9920890979296b842e6 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Thu, 31 Mar 2016 17:08:41 -0400 Subject: [PATCH 0422/1649] rtl8xxxu: Use correct H2C calls for 8192eu The 8192eu uses the same H2C API as the 8723bu. Call the correct functions for update_rate_mask() and report_connect(). Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 201f6cfaccc2..f2e32a78e43c 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -8525,8 +8525,8 @@ static struct rtl8xxxu_fileops rtl8192eu_fops = { .enable_rf = rtl8723b_enable_rf, .disable_rf = rtl8723b_disable_rf, .set_tx_power = rtl8723b_set_tx_power, - .update_rate_mask = rtl8723au_update_rate_mask, - .report_connect = rtl8723au_report_connect, + .update_rate_mask = rtl8723bu_update_rate_mask, + .report_connect = rtl8723bu_report_connect, .writeN_block_size = 128, .mbox_ext_reg = REG_HMBOX_EXT0_8723B, .mbox_ext_width = 4, From a069caa3c30fc9744a82a6b83503ed93e00e723c Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Thu, 31 Mar 2016 17:08:42 -0400 Subject: [PATCH 0423/1649] rtl8xxxu: Do not set LDOA15 / LDOV12 on 8192eu Per the vendor driver, it looks like the 8192eu doesn't have LDOA15 / LDOV12 registers. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index f2e32a78e43c..333addd3d46a 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -3283,7 +3283,7 @@ static int rtl8xxxu_init_phy_bb(struct rtl8xxxu_priv *priv) rtl8xxxu_write32(priv, REG_MAC_PHY_CTRL, val32); } - if (priv->rtl_chip != RTL8723B) { + if (priv->rtl_chip != RTL8723B && priv->rtl_chip != RTL8192E) { ldoa15 = LDOA15_ENABLE | LDOA15_OBUF; ldov12d = LDOV12D_ENABLE | BIT(2) | (2 << LDOV12D_VADJ_SHIFT); ldohci12 = 0x57; From f6b1cbe029f6828bbdac8b54bdcbdc35420e842e Mon Sep 17 00:00:00 2001 From: Ganapathi Bhat Date: Tue, 5 Apr 2016 01:04:34 -0700 Subject: [PATCH 0424/1649] mwifiex: add support for GTK rekey offload Added driver functionality to offload GTK rekey to firmware. When AP sends new GTK, firmware will update it. Signed-off-by: Ganapathi Bhat Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- .../net/wireless/marvell/mwifiex/cfg80211.c | 13 ++++++++- drivers/net/wireless/marvell/mwifiex/fw.h | 10 +++++++ .../net/wireless/marvell/mwifiex/sta_cmd.c | 28 +++++++++++++++++++ .../wireless/marvell/mwifiex/sta_cmdresp.c | 2 ++ .../net/wireless/marvell/mwifiex/sta_event.c | 3 ++ 5 files changed, 55 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c index 108e64137826..ca8cdd2ec409 100644 --- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c +++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c @@ -3416,6 +3416,16 @@ static void mwifiex_cfg80211_set_wakeup(struct wiphy *wiphy, device_set_wakeup_enable(adapter->dev, enabled); } + +static int mwifiex_set_rekey_data(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_gtk_rekey_data *data) +{ + struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); + + return mwifiex_send_cmd(priv, HostCmd_CMD_GTK_REKEY_OFFLOAD_CFG, + HostCmd_ACT_GEN_SET, 0, data, true); +} + #endif static int mwifiex_get_coalesce_pkt_type(u8 *byte_seq) @@ -3938,6 +3948,7 @@ static struct cfg80211_ops mwifiex_cfg80211_ops = { .suspend = mwifiex_cfg80211_suspend, .resume = mwifiex_cfg80211_resume, .set_wakeup = mwifiex_cfg80211_set_wakeup, + .set_rekey_data = mwifiex_set_rekey_data, #endif .set_coalesce = mwifiex_cfg80211_set_coalesce, .tdls_mgmt = mwifiex_cfg80211_tdls_mgmt, @@ -3954,7 +3965,7 @@ static struct cfg80211_ops mwifiex_cfg80211_ops = { #ifdef CONFIG_PM static const struct wiphy_wowlan_support mwifiex_wowlan_support = { .flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT | - WIPHY_WOWLAN_NET_DETECT, + WIPHY_WOWLAN_NET_DETECT | WIPHY_WOWLAN_SUPPORTS_GTK_REKEY, .n_patterns = MWIFIEX_MEF_MAX_FILTERS, .pattern_min_len = 1, .pattern_max_len = MWIFIEX_MAX_PATTERN_LEN, diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h index c134cf865291..8703d24eaa9e 100644 --- a/drivers/net/wireless/marvell/mwifiex/fw.h +++ b/drivers/net/wireless/marvell/mwifiex/fw.h @@ -372,6 +372,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER { #define HostCmd_CMD_COALESCE_CFG 0x010a #define HostCmd_CMD_MGMT_FRAME_REG 0x010c #define HostCmd_CMD_REMAIN_ON_CHAN 0x010d +#define HostCmd_CMD_GTK_REKEY_OFFLOAD_CFG 0x010f #define HostCmd_CMD_11AC_CFG 0x0112 #define HostCmd_CMD_HS_WAKEUP_REASON 0x0116 #define HostCmd_CMD_TDLS_CONFIG 0x0100 @@ -2183,6 +2184,14 @@ struct host_cmd_ds_wakeup_reason { u16 wakeup_reason; } __packed; +struct host_cmd_ds_gtk_rekey_params { + __le16 action; + u8 kck[NL80211_KCK_LEN]; + u8 kek[NL80211_KEK_LEN]; + __le32 replay_ctr_low; + __le32 replay_ctr_high; +} __packed; + struct host_cmd_ds_command { __le16 command; __le16 size; @@ -2256,6 +2265,7 @@ struct host_cmd_ds_command { struct host_cmd_ds_multi_chan_policy mc_policy; struct host_cmd_ds_robust_coex coex; struct host_cmd_ds_wakeup_reason hs_wakeup_reason; + struct host_cmd_ds_gtk_rekey_params rekey; } params; } __packed; diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c index 30f152601c57..8cb895b7f2ee 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c @@ -1558,6 +1558,30 @@ static int mwifiex_cmd_robust_coex(struct mwifiex_private *priv, return 0; } +static int mwifiex_cmd_gtk_rekey_offload(struct mwifiex_private *priv, + struct host_cmd_ds_command *cmd, + u16 cmd_action, + struct cfg80211_gtk_rekey_data *data) +{ + struct host_cmd_ds_gtk_rekey_params *rekey = &cmd->params.rekey; + u64 rekey_ctr; + + cmd->command = cpu_to_le16(HostCmd_CMD_GTK_REKEY_OFFLOAD_CFG); + cmd->size = cpu_to_le16(sizeof(*rekey) + S_DS_GEN); + + rekey->action = cpu_to_le16(cmd_action); + if (cmd_action == HostCmd_ACT_GEN_SET) { + memcpy(rekey->kek, data->kek, NL80211_KEK_LEN); + memcpy(rekey->kck, data->kck, NL80211_KCK_LEN); + rekey_ctr = be64_to_cpup((__be64 *)data->replay_ctr); + rekey->replay_ctr_low = cpu_to_le32((u32)rekey_ctr); + rekey->replay_ctr_high = + cpu_to_le32((u32)((u64)rekey_ctr >> 32)); + } + + return 0; +} + static int mwifiex_cmd_coalesce_cfg(struct mwifiex_private *priv, struct host_cmd_ds_command *cmd, @@ -2094,6 +2118,10 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no, ret = mwifiex_cmd_robust_coex(priv, cmd_ptr, cmd_action, data_buf); break; + case HostCmd_CMD_GTK_REKEY_OFFLOAD_CFG: + ret = mwifiex_cmd_gtk_rekey_offload(priv, cmd_ptr, cmd_action, + data_buf); + break; default: mwifiex_dbg(priv->adapter, ERROR, "PREP_CMD: unknown cmd- %#x\n", cmd_no); diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c index d96523e10eb4..434b9776db45 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c @@ -1244,6 +1244,8 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no, case HostCmd_CMD_ROBUST_COEX: ret = mwifiex_ret_robust_coex(priv, resp, data_buf); break; + case HostCmd_CMD_GTK_REKEY_OFFLOAD_CFG: + break; default: mwifiex_dbg(adapter, ERROR, "CMD_RESP: unknown cmd response %#x\n", diff --git a/drivers/net/wireless/marvell/mwifiex/sta_event.c b/drivers/net/wireless/marvell/mwifiex/sta_event.c index 070bce401151..0104108b4ea2 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_event.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_event.c @@ -147,6 +147,9 @@ mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason_code) mwifiex_stop_net_dev_queue(priv->netdev, adapter); if (netif_carrier_ok(priv->netdev)) netif_carrier_off(priv->netdev); + + mwifiex_send_cmd(priv, HostCmd_CMD_GTK_REKEY_OFFLOAD_CFG, + HostCmd_ACT_GEN_REMOVE, 0, NULL, false); } static int mwifiex_parse_tdls_event(struct mwifiex_private *priv, From 8fa0a0dc634ba1bcf7678db296902d9c4e5025e0 Mon Sep 17 00:00:00 2001 From: Ganapathi Bhat Date: Tue, 5 Apr 2016 01:04:35 -0700 Subject: [PATCH 0425/1649] mwifiex: add support for wakeup on GTK rekey failure User can configure wakeup on GTK rekey fail with wowlan. Added corresponding wakeup reason. Signed-off-by: Ganapathi Bhat Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/cfg80211.c | 7 ++++++- drivers/net/wireless/marvell/mwifiex/fw.h | 1 + 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c index ca8cdd2ec409..49661e087811 100644 --- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c +++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c @@ -3390,6 +3390,10 @@ static int mwifiex_cfg80211_resume(struct wiphy *wiphy) break; case MANAGEMENT_FRAME_MATCHED: break; + case GTK_REKEY_FAILURE: + if (wiphy->wowlan_config->gtk_rekey_failure) + wakeup_report.gtk_rekey_failure = true; + break; default: break; } @@ -3965,7 +3969,8 @@ static struct cfg80211_ops mwifiex_cfg80211_ops = { #ifdef CONFIG_PM static const struct wiphy_wowlan_support mwifiex_wowlan_support = { .flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT | - WIPHY_WOWLAN_NET_DETECT | WIPHY_WOWLAN_SUPPORTS_GTK_REKEY, + WIPHY_WOWLAN_NET_DETECT | WIPHY_WOWLAN_SUPPORTS_GTK_REKEY | + WIPHY_WOWLAN_GTK_REKEY_FAILURE, .n_patterns = MWIFIEX_MEF_MAX_FILTERS, .pattern_min_len = 1, .pattern_max_len = MWIFIEX_MAX_PATTERN_LEN, diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h index 8703d24eaa9e..8e4145abdbfa 100644 --- a/drivers/net/wireless/marvell/mwifiex/fw.h +++ b/drivers/net/wireless/marvell/mwifiex/fw.h @@ -620,6 +620,7 @@ enum HS_WAKEUP_REASON { MAGIC_PATTERN_MATCHED, CONTROL_FRAME_MATCHED, MANAGEMENT_FRAME_MATCHED, + GTK_REKEY_FAILURE, RESERVED }; From a362e16b83e1823746874485710c7515eb5ee369 Mon Sep 17 00:00:00 2001 From: Shengzhen Li Date: Tue, 5 Apr 2016 01:04:36 -0700 Subject: [PATCH 0426/1649] mwifiex: check revision id while choosing PCIe firmware Some of the chipsets have two revisions. This patch selects appropriate firmware by checking revision id. Signed-off-by: Shengzhen Li Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/pcie.c | 56 +++++++++++++++++++-- drivers/net/wireless/marvell/mwifiex/pcie.h | 16 +++--- 2 files changed, 61 insertions(+), 11 deletions(-) diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c index de364381fe7b..6a06ca5c3eb1 100644 --- a/drivers/net/wireless/marvell/mwifiex/pcie.c +++ b/drivers/net/wireless/marvell/mwifiex/pcie.c @@ -190,7 +190,6 @@ static int mwifiex_pcie_probe(struct pci_dev *pdev, if (ent->driver_data) { struct mwifiex_pcie_device *data = (void *)ent->driver_data; - card->pcie.firmware = data->firmware; card->pcie.reg = data->reg; card->pcie.blksz_fw_dl = data->blksz_fw_dl; card->pcie.tx_buf_size = data->tx_buf_size; @@ -269,6 +268,11 @@ static const struct pci_device_id mwifiex_ids[] = { PCI_ANY_ID, PCI_ANY_ID, 0, 0, .driver_data = (unsigned long)&mwifiex_pcie8997, }, + { + PCIE_VENDOR_ID_V2_MARVELL, PCIE_DEVICE_ID_MARVELL_88W8997, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + .driver_data = (unsigned long)&mwifiex_pcie8997, + }, {}, }; @@ -2758,6 +2762,51 @@ static int mwifiex_pcie_request_irq(struct mwifiex_adapter *adapter) return 0; } +/* + * This function get firmare name for downloading by revision id + * + * Read revision id register to get revision id + */ +static void mwifiex_pcie_get_fw_name(struct mwifiex_adapter *adapter) +{ + int revision_id = 0; + struct pcie_service_card *card = adapter->card; + + switch (card->dev->device) { + case PCIE_DEVICE_ID_MARVELL_88W8766P: + strcpy(adapter->fw_name, PCIE8766_DEFAULT_FW_NAME); + break; + case PCIE_DEVICE_ID_MARVELL_88W8897: + mwifiex_write_reg(adapter, 0x0c58, 0x80c00000); + mwifiex_read_reg(adapter, 0x0c58, &revision_id); + revision_id &= 0xff00; + switch (revision_id) { + case PCIE8897_A0: + strcpy(adapter->fw_name, PCIE8897_A0_FW_NAME); + break; + case PCIE8897_B0: + strcpy(adapter->fw_name, PCIE8897_B0_FW_NAME); + break; + default: + break; + } + case PCIE_DEVICE_ID_MARVELL_88W8997: + mwifiex_read_reg(adapter, 0x0c48, &revision_id); + switch (revision_id) { + case PCIE8997_V2: + strcpy(adapter->fw_name, PCIE8997_FW_NAME_V2); + break; + case PCIE8997_Z: + strcpy(adapter->fw_name, PCIE8997_FW_NAME_Z); + break; + default: + break; + } + default: + break; + } +} + /* * This function registers the PCIE device. * @@ -2778,8 +2827,8 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter) adapter->tx_buf_size = card->pcie.tx_buf_size; adapter->mem_type_mapping_tbl = card->pcie.mem_type_mapping_tbl; adapter->num_mem_types = card->pcie.num_mem_types; - strcpy(adapter->fw_name, card->pcie.firmware); adapter->ext_scan = card->pcie.can_ext_scan; + mwifiex_pcie_get_fw_name(adapter); return 0; } @@ -2907,6 +2956,3 @@ MODULE_AUTHOR("Marvell International Ltd."); MODULE_DESCRIPTION("Marvell WiFi-Ex PCI-Express Driver version " PCIE_VERSION); MODULE_VERSION(PCIE_VERSION); MODULE_LICENSE("GPL v2"); -MODULE_FIRMWARE(PCIE8766_DEFAULT_FW_NAME); -MODULE_FIRMWARE(PCIE8897_DEFAULT_FW_NAME); -MODULE_FIRMWARE(PCIE8997_DEFAULT_FW_NAME); diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.h b/drivers/net/wireless/marvell/mwifiex/pcie.h index 29e58ce877e3..4455d1905d94 100644 --- a/drivers/net/wireless/marvell/mwifiex/pcie.h +++ b/drivers/net/wireless/marvell/mwifiex/pcie.h @@ -30,14 +30,22 @@ #include "main.h" #define PCIE8766_DEFAULT_FW_NAME "mrvl/pcie8766_uapsta.bin" -#define PCIE8897_DEFAULT_FW_NAME "mrvl/pcie8897_uapsta.bin" -#define PCIE8997_DEFAULT_FW_NAME "mrvl/pcie8997_uapsta.bin" +#define PCIE8897_A0_FW_NAME "mrvl/pcie8897_uapsta_a0.bin" +#define PCIE8897_B0_FW_NAME "mrvl/pcie8897_uapsta.bin" +#define PCIE8997_FW_NAME_Z "mrvl/pcieusb8997_combo.bin" +#define PCIE8997_FW_NAME_V2 "mrvl/pcieusb8997_combo_v2.bin" #define PCIE_VENDOR_ID_MARVELL (0x11ab) +#define PCIE_VENDOR_ID_V2_MARVELL (0x1b4b) #define PCIE_DEVICE_ID_MARVELL_88W8766P (0x2b30) #define PCIE_DEVICE_ID_MARVELL_88W8897 (0x2b38) #define PCIE_DEVICE_ID_MARVELL_88W8997 (0x2b42) +#define PCIE8897_A0 0x1100 +#define PCIE8897_B0 0x1200 +#define PCIE8997_Z 0x0 +#define PCIE8997_V2 0x471 + /* Constants for Buffer Descriptor (BD) rings */ #define MWIFIEX_MAX_TXRX_BD 0x20 #define MWIFIEX_TXBD_MASK 0x3F @@ -263,7 +271,6 @@ static struct memory_type_mapping mem_type_mapping_tbl_w8997[] = { }; struct mwifiex_pcie_device { - const char *firmware; const struct mwifiex_pcie_card_reg *reg; u16 blksz_fw_dl; u16 tx_buf_size; @@ -274,7 +281,6 @@ struct mwifiex_pcie_device { }; static const struct mwifiex_pcie_device mwifiex_pcie8766 = { - .firmware = PCIE8766_DEFAULT_FW_NAME, .reg = &mwifiex_reg_8766, .blksz_fw_dl = MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD, .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K, @@ -283,7 +289,6 @@ static const struct mwifiex_pcie_device mwifiex_pcie8766 = { }; static const struct mwifiex_pcie_device mwifiex_pcie8897 = { - .firmware = PCIE8897_DEFAULT_FW_NAME, .reg = &mwifiex_reg_8897, .blksz_fw_dl = MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD, .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K, @@ -294,7 +299,6 @@ static const struct mwifiex_pcie_device mwifiex_pcie8897 = { }; static const struct mwifiex_pcie_device mwifiex_pcie8997 = { - .firmware = PCIE8997_DEFAULT_FW_NAME, .reg = &mwifiex_reg_8997, .blksz_fw_dl = MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD, .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K, From 00c5478049683b15599339e36cae7fffc1e62844 Mon Sep 17 00:00:00 2001 From: Xinming Hu Date: Tue, 5 Apr 2016 01:04:37 -0700 Subject: [PATCH 0427/1649] mwifiex: remove redundant GFP_DMA flag skb forwarded to TCP/IP stack doesn't need to allocate in DMA ZONE. This patch removes GFP_DMA flag in this case to save precious DMA memory. Signed-off-by: Xinming Hu Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/sdio.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c index b2c839ae2c3c..a0aec3e00457 100644 --- a/drivers/net/wireless/marvell/mwifiex/sdio.c +++ b/drivers/net/wireless/marvell/mwifiex/sdio.c @@ -1123,8 +1123,8 @@ static void mwifiex_deaggr_sdio_pkt(struct mwifiex_adapter *adapter, __func__, pkt_len, blk_size); break; } - skb_deaggr = mwifiex_alloc_dma_align_buf(pkt_len, - GFP_KERNEL | GFP_DMA); + + skb_deaggr = mwifiex_alloc_dma_align_buf(pkt_len, GFP_KERNEL); if (!skb_deaggr) break; skb_put(skb_deaggr, pkt_len); @@ -1373,8 +1373,7 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter, /* copy pkt to deaggr buf */ skb_deaggr = mwifiex_alloc_dma_align_buf(len_arr[pind], - GFP_KERNEL | - GFP_DMA); + GFP_KERNEL); if (!skb_deaggr) { mwifiex_dbg(adapter, ERROR, "skb allocation failure\t" "drop pkt len=%d type=%d\n", From ad5ca845e3d194703be82ad4a2f3042f2e198e2b Mon Sep 17 00:00:00 2001 From: Xinming Hu Date: Tue, 5 Apr 2016 01:04:38 -0700 Subject: [PATCH 0428/1649] mwifiex: schedule main workqueue for transmitting bridge packets Bridge packets are enqueued to wmm tx queue, but will not be sent until main workqeue is scheduled for new interrupt or other reason. This adds unnecessary delay during traffic. We will schedule main workqueue when bridge packet is queued. Signed-off-by: Xinming Hu Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/uap_txrx.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/wireless/marvell/mwifiex/uap_txrx.c b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c index 52f7981a8afc..ee7fe58dd266 100644 --- a/drivers/net/wireless/marvell/mwifiex/uap_txrx.c +++ b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c @@ -212,6 +212,8 @@ static void mwifiex_uap_queue_bridged_pkt(struct mwifiex_private *priv, atomic_inc(&adapter->tx_pending); atomic_inc(&adapter->pending_bridged_pkts); + mwifiex_queue_main_work(priv->adapter); + return; } From bf00dc22bc7a72d58fd1945814321b30948dc83b Mon Sep 17 00:00:00 2001 From: Xinming Hu Date: Tue, 5 Apr 2016 01:04:39 -0700 Subject: [PATCH 0429/1649] mwifiex: AMSDU Rx frame handling in AP mode This patch processes sub AMSDU frame received in AP mode. If a packet is multicast/broadcast, it is sent to kernel/upper layer as well as queued back to AP TX queue so that it can be sent to other associated stations. If a packet is unicast and RA is present in associated station list, it is again requeued into AP TX queue. If a packet is unicast and RA is not in associated station list, packet is forwarded to kernel to handle routing logic. Signed-off-by: Xinming Hu Signed-off-by: Cathy Luo Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- .../wireless/marvell/mwifiex/11n_rxreorder.c | 5 +- drivers/net/wireless/marvell/mwifiex/main.h | 2 + .../net/wireless/marvell/mwifiex/uap_txrx.c | 90 +++++++++++++++++++ 3 files changed, 96 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c index 09578c6cde59..a74cc43b1953 100644 --- a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c +++ b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c @@ -59,7 +59,10 @@ static int mwifiex_11n_dispatch_amsdu_pkt(struct mwifiex_private *priv, skb->len); } - ret = mwifiex_recv_packet(priv, rx_skb); + if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP) + ret = mwifiex_uap_recv_packet(priv, rx_skb); + else + ret = mwifiex_recv_packet(priv, rx_skb); if (ret == -1) mwifiex_dbg(priv->adapter, ERROR, "Rx of A-MSDU failed"); diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h index aafc4ab4e5ae..a159fbef20cd 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.h +++ b/drivers/net/wireless/marvell/mwifiex/main.h @@ -1019,6 +1019,8 @@ int mwifiex_shutdown_fw_complete(struct mwifiex_adapter *adapter); int mwifiex_dnld_fw(struct mwifiex_adapter *, struct mwifiex_fw_image *); int mwifiex_recv_packet(struct mwifiex_private *priv, struct sk_buff *skb); +int mwifiex_uap_recv_packet(struct mwifiex_private *priv, + struct sk_buff *skb); int mwifiex_process_mgmt_packet(struct mwifiex_private *priv, struct sk_buff *skb); diff --git a/drivers/net/wireless/marvell/mwifiex/uap_txrx.c b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c index ee7fe58dd266..c95b61dc87c2 100644 --- a/drivers/net/wireless/marvell/mwifiex/uap_txrx.c +++ b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c @@ -265,6 +265,96 @@ int mwifiex_handle_uap_rx_forward(struct mwifiex_private *priv, return mwifiex_process_rx_packet(priv, skb); } +int mwifiex_uap_recv_packet(struct mwifiex_private *priv, + struct sk_buff *skb) +{ + struct mwifiex_adapter *adapter = adapter; + struct mwifiex_sta_node *src_node; + struct ethhdr *p_ethhdr; + struct sk_buff *skb_uap; + struct mwifiex_txinfo *tx_info; + + if (!skb) + return -1; + + p_ethhdr = (void *)skb->data; + src_node = mwifiex_get_sta_entry(priv, p_ethhdr->h_source); + if (src_node) { + src_node->stats.last_rx = jiffies; + src_node->stats.rx_bytes += skb->len; + src_node->stats.rx_packets++; + } + + skb->dev = priv->netdev; + skb->protocol = eth_type_trans(skb, priv->netdev); + skb->ip_summed = CHECKSUM_NONE; + + /* This is required only in case of 11n and USB/PCIE as we alloc + * a buffer of 4K only if its 11N (to be able to receive 4K + * AMSDU packets). In case of SD we allocate buffers based + * on the size of packet and hence this is not needed. + * + * Modifying the truesize here as our allocation for each + * skb is 4K but we only receive 2K packets and this cause + * the kernel to start dropping packets in case where + * application has allocated buffer based on 2K size i.e. + * if there a 64K packet received (in IP fragments and + * application allocates 64K to receive this packet but + * this packet would almost double up because we allocate + * each 1.5K fragment in 4K and pass it up. As soon as the + * 64K limit hits kernel will start to drop rest of the + * fragments. Currently we fail the Filesndl-ht.scr script + * for UDP, hence this fix + */ + if ((adapter->iface_type == MWIFIEX_USB || + adapter->iface_type == MWIFIEX_PCIE) && + (skb->truesize > MWIFIEX_RX_DATA_BUF_SIZE)) + skb->truesize += (skb->len - MWIFIEX_RX_DATA_BUF_SIZE); + + if (is_multicast_ether_addr(p_ethhdr->h_dest) || + mwifiex_get_sta_entry(priv, p_ethhdr->h_dest)) { + if (skb_headroom(skb) < MWIFIEX_MIN_DATA_HEADER_LEN) + skb_uap = + skb_realloc_headroom(skb, MWIFIEX_MIN_DATA_HEADER_LEN); + else + skb_uap = skb_copy(skb, GFP_ATOMIC); + + if (likely(skb_uap)) { + tx_info = MWIFIEX_SKB_TXCB(skb_uap); + memset(tx_info, 0, sizeof(*tx_info)); + tx_info->bss_num = priv->bss_num; + tx_info->bss_type = priv->bss_type; + tx_info->flags |= MWIFIEX_BUF_FLAG_BRIDGED_PKT; + __net_timestamp(skb_uap); + mwifiex_wmm_add_buf_txqueue(priv, skb_uap); + atomic_inc(&adapter->tx_pending); + atomic_inc(&adapter->pending_bridged_pkts); + if ((atomic_read(&adapter->pending_bridged_pkts) >= + MWIFIEX_BRIDGED_PKTS_THR_HIGH)) { + mwifiex_dbg(adapter, ERROR, + "Tx: Bridge packet limit reached. Drop packet!\n"); + mwifiex_uap_cleanup_tx_queues(priv); + } + + } else { + mwifiex_dbg(adapter, ERROR, "failed to allocate skb_uap"); + } + + mwifiex_queue_main_work(adapter); + /* Don't forward Intra-BSS unicast packet to upper layer*/ + if (mwifiex_get_sta_entry(priv, p_ethhdr->h_dest)) + return 0; + } + + /* Forward multicast/broadcast packet to upper layer*/ + if (in_interrupt()) + netif_rx(skb); + else + netif_rx_ni(skb); + + return 0; +} + /* * This function processes the packet received on AP interface. * From 4646968b94bdf88ae3c507c347d03acd5798939d Mon Sep 17 00:00:00 2001 From: Xinming Hu Date: Tue, 5 Apr 2016 01:04:40 -0700 Subject: [PATCH 0430/1649] mwifiex: dump pcie scratch registers This patch prints pcie scratch registers during firmware dump. They will be useful for analysing firmware status. Signed-off-by: Xinming Hu Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/main.c | 8 ++-- drivers/net/wireless/marvell/mwifiex/pcie.c | 42 +++++++++++++++++++++ drivers/net/wireless/marvell/mwifiex/pcie.h | 2 + 3 files changed, 49 insertions(+), 3 deletions(-) diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c index 3cfa94677a8e..04b975cbb330 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.c +++ b/drivers/net/wireless/marvell/mwifiex/main.c @@ -1074,12 +1074,14 @@ void mwifiex_drv_info_dump(struct mwifiex_adapter *adapter) priv->netdev->name, priv->num_tx_timeout); } - if (adapter->iface_type == MWIFIEX_SDIO) { - p += sprintf(p, "\n=== SDIO register dump===\n"); + if (adapter->iface_type == MWIFIEX_SDIO || + adapter->iface_type == MWIFIEX_PCIE) { + p += sprintf(p, "\n=== %s register dump===\n", + adapter->iface_type == MWIFIEX_SDIO ? + "SDIO" : "PCIE"); if (adapter->if_ops.reg_dump) p += adapter->if_ops.reg_dump(adapter, p); } - p += sprintf(p, "\n=== more debug information\n"); debug_info = kzalloc(sizeof(*debug_info), GFP_KERNEL); if (debug_info) { diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c index 6a06ca5c3eb1..edf8b070f665 100644 --- a/drivers/net/wireless/marvell/mwifiex/pcie.c +++ b/drivers/net/wireless/marvell/mwifiex/pcie.c @@ -2355,6 +2355,47 @@ static int mwifiex_pcie_host_to_card(struct mwifiex_adapter *adapter, u8 type, return 0; } +/* Function to dump PCIE scratch registers in case of FW crash + */ +static int +mwifiex_pcie_reg_dump(struct mwifiex_adapter *adapter, char *drv_buf) +{ + char *p = drv_buf; + char buf[256], *ptr; + int i; + u32 value; + struct pcie_service_card *card = adapter->card; + const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; + int pcie_scratch_reg[] = {PCIE_SCRATCH_12_REG, + PCIE_SCRATCH_13_REG, + PCIE_SCRATCH_14_REG}; + + if (!p) + return 0; + + mwifiex_dbg(adapter, MSG, "PCIE register dump start\n"); + + if (mwifiex_read_reg(adapter, reg->fw_status, &value)) { + mwifiex_dbg(adapter, ERROR, "failed to read firmware status"); + return 0; + } + + ptr = buf; + mwifiex_dbg(adapter, MSG, "pcie scratch register:"); + for (i = 0; i < ARRAY_SIZE(pcie_scratch_reg); i++) { + mwifiex_read_reg(adapter, pcie_scratch_reg[i], &value); + ptr += sprintf(ptr, "reg:0x%x, value=0x%x\n", + pcie_scratch_reg[i], value); + } + + mwifiex_dbg(adapter, MSG, "%s\n", buf); + p += sprintf(p, "%s\n", buf); + + mwifiex_dbg(adapter, MSG, "PCIE register dump end\n"); + + return p - drv_buf; +} + /* This function read/write firmware */ static enum rdwr_status mwifiex_pcie_rdwr_firmware(struct mwifiex_adapter *adapter, u8 doneflag) @@ -2899,6 +2940,7 @@ static struct mwifiex_if_ops pcie_ops = { .cleanup_mpa_buf = NULL, .init_fw_port = mwifiex_pcie_init_fw_port, .clean_pcie_ring = mwifiex_clean_pcie_ring_buf, + .reg_dump = mwifiex_pcie_reg_dump, .device_dump = mwifiex_pcie_device_dump, }; diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.h b/drivers/net/wireless/marvell/mwifiex/pcie.h index 4455d1905d94..cc7a5df903be 100644 --- a/drivers/net/wireless/marvell/mwifiex/pcie.h +++ b/drivers/net/wireless/marvell/mwifiex/pcie.h @@ -73,6 +73,8 @@ #define PCIE_SCRATCH_10_REG 0xCE8 #define PCIE_SCRATCH_11_REG 0xCEC #define PCIE_SCRATCH_12_REG 0xCF0 +#define PCIE_SCRATCH_13_REG 0xCF8 +#define PCIE_SCRATCH_14_REG 0xCFC #define PCIE_RD_DATA_PTR_Q0_Q1 0xC08C #define PCIE_WR_DATA_PTR_Q0_Q1 0xC05C From 85f1e7c29a46360b1b5f9cf87af6b27066c345fd Mon Sep 17 00:00:00 2001 From: Haishuang Yan Date: Sun, 3 Apr 2016 22:03:33 +0800 Subject: [PATCH 0431/1649] netfilter: ipv6: unnecessary to check whether ip6_route_output() returns NULL ip6_route_output() never returns NULL, so it is not appropriate to check if the return value is NULL. Signed-off-by: Haishuang Yan Signed-off-by: Pablo Neira Ayuso --- net/ipv6/netfilter/nf_reject_ipv6.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/ipv6/netfilter/nf_reject_ipv6.c b/net/ipv6/netfilter/nf_reject_ipv6.c index 4709f657b7b6..a5400223fd74 100644 --- a/net/ipv6/netfilter/nf_reject_ipv6.c +++ b/net/ipv6/netfilter/nf_reject_ipv6.c @@ -158,7 +158,7 @@ void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook) fl6.fl6_dport = otcph->source; security_skb_classify_flow(oldskb, flowi6_to_flowi(&fl6)); dst = ip6_route_output(net, NULL, &fl6); - if (dst == NULL || dst->error) { + if (dst->error) { dst_release(dst); return; } From 61881cfb5ad80c1d0a46ca6d08b7e271892b2ff6 Mon Sep 17 00:00:00 2001 From: Hannes Frederic Sowa Date: Tue, 5 Apr 2016 17:10:14 +0200 Subject: [PATCH 0432/1649] sock: fix lockdep annotation in release_sock During release_sock we use callbacks to finish the processing of outstanding skbs on the socket. We actually are still locked, sk_locked.owned == 1, but we already told lockdep that the mutex is released. This could lead to false positives in lockdep for lockdep_sock_is_held (we don't hold the slock spinlock during processing the outstanding skbs). I took over this patch from Eric Dumazet and tested it. Signed-off-by: Eric Dumazet Signed-off-by: Hannes Frederic Sowa Signed-off-by: David S. Miller --- include/net/sock.h | 7 ++++++- net/core/sock.c | 5 ----- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/include/net/sock.h b/include/net/sock.h index 1decb7a22261..91cee51086dc 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1333,7 +1333,12 @@ static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb) static inline void sock_release_ownership(struct sock *sk) { - sk->sk_lock.owned = 0; + if (sk->sk_lock.owned) { + sk->sk_lock.owned = 0; + + /* The sk_lock has mutex_unlock() semantics: */ + mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_); + } } /* diff --git a/net/core/sock.c b/net/core/sock.c index 2ce76e82857f..152274d188ef 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -2483,11 +2483,6 @@ EXPORT_SYMBOL(lock_sock_nested); void release_sock(struct sock *sk) { - /* - * The sk_lock has mutex_unlock() semantics: - */ - mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_); - spin_lock_bh(&sk->sk_lock.slock); if (sk->sk_backlog.tail) __release_sock(sk); From 1e1d04e678cf72442f57ce82803c7a407769135f Mon Sep 17 00:00:00 2001 From: Hannes Frederic Sowa Date: Tue, 5 Apr 2016 17:10:15 +0200 Subject: [PATCH 0433/1649] net: introduce lockdep_is_held and update various places to use it The socket is either locked if we hold the slock spin_lock for lock_sock_fast and unlock_sock_fast or we own the lock (sk_lock.owned != 0). Check for this and at the same time improve that the current thread/cpu is really holding the lock. Signed-off-by: Hannes Frederic Sowa Signed-off-by: David S. Miller --- include/net/sock.h | 12 ++++++++++-- net/dccp/ipv4.c | 2 +- net/dccp/ipv6.c | 2 +- net/ipv4/af_inet.c | 2 +- net/ipv4/cipso_ipv4.c | 3 ++- net/ipv4/ip_sockglue.c | 4 ++-- net/ipv4/tcp_ipv4.c | 8 +++----- net/ipv6/ipv6_sockglue.c | 6 ++++-- net/ipv6/tcp_ipv6.c | 2 +- net/socket.c | 2 +- 10 files changed, 26 insertions(+), 17 deletions(-) diff --git a/include/net/sock.h b/include/net/sock.h index 91cee51086dc..eb2d7c3e120b 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1360,6 +1360,14 @@ do { \ lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0); \ } while (0) +static bool lockdep_sock_is_held(const struct sock *csk) +{ + struct sock *sk = (struct sock *)csk; + + return lockdep_is_held(&sk->sk_lock) || + lockdep_is_held(&sk->sk_lock.slock); +} + void lock_sock_nested(struct sock *sk, int subclass); static inline void lock_sock(struct sock *sk) @@ -1598,8 +1606,8 @@ static inline void sk_rethink_txhash(struct sock *sk) static inline struct dst_entry * __sk_dst_get(struct sock *sk) { - return rcu_dereference_check(sk->sk_dst_cache, sock_owned_by_user(sk) || - lockdep_is_held(&sk->sk_lock.slock)); + return rcu_dereference_check(sk->sk_dst_cache, + lockdep_sock_is_held(sk)); } static inline struct dst_entry * diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index 6438c5a7efc4..f6d183f8f332 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c @@ -62,7 +62,7 @@ int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) nexthop = daddr = usin->sin_addr.s_addr; inet_opt = rcu_dereference_protected(inet->inet_opt, - sock_owned_by_user(sk)); + lockdep_sock_is_held(sk)); if (inet_opt != NULL && inet_opt->opt.srr) { if (daddr == 0) return -EINVAL; diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index 71bf1deba4c5..8ceb3cebcad4 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c @@ -868,7 +868,7 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr, fl6.fl6_sport = inet->inet_sport; security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); - opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk)); + opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk)); final_p = fl6_update_dst(&fl6, opt, &final); dst = ip6_dst_lookup_flow(sk, &fl6, final_p); diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index a38b9910af60..8217cd22f921 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -1107,7 +1107,7 @@ static int inet_sk_reselect_saddr(struct sock *sk) struct ip_options_rcu *inet_opt; inet_opt = rcu_dereference_protected(inet->inet_opt, - sock_owned_by_user(sk)); + lockdep_sock_is_held(sk)); if (inet_opt && inet_opt->opt.srr) daddr = inet_opt->opt.faddr; diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c index bdb2a07ec363..40d6b87713a1 100644 --- a/net/ipv4/cipso_ipv4.c +++ b/net/ipv4/cipso_ipv4.c @@ -1933,7 +1933,8 @@ int cipso_v4_sock_setattr(struct sock *sk, sk_inet = inet_sk(sk); - old = rcu_dereference_protected(sk_inet->inet_opt, sock_owned_by_user(sk)); + old = rcu_dereference_protected(sk_inet->inet_opt, + lockdep_sock_is_held(sk)); if (sk_inet->is_icsk) { sk_conn = inet_csk(sk); if (old) diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index 1b7c0776c805..89b5f3bd6694 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c @@ -642,7 +642,7 @@ static int do_ip_setsockopt(struct sock *sk, int level, if (err) break; old = rcu_dereference_protected(inet->inet_opt, - sock_owned_by_user(sk)); + lockdep_sock_is_held(sk)); if (inet->is_icsk) { struct inet_connection_sock *icsk = inet_csk(sk); #if IS_ENABLED(CONFIG_IPV6) @@ -1302,7 +1302,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname, struct ip_options_rcu *inet_opt; inet_opt = rcu_dereference_protected(inet->inet_opt, - sock_owned_by_user(sk)); + lockdep_sock_is_held(sk)); opt->optlen = 0; if (inet_opt) memcpy(optbuf, &inet_opt->opt, diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 456ff3d6a132..f4f2a0a3849d 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -157,7 +157,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) nexthop = daddr = usin->sin_addr.s_addr; inet_opt = rcu_dereference_protected(inet->inet_opt, - sock_owned_by_user(sk)); + lockdep_sock_is_held(sk)); if (inet_opt && inet_opt->opt.srr) { if (!daddr) return -EINVAL; @@ -882,8 +882,7 @@ struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk, /* caller either holds rcu_read_lock() or socket lock */ md5sig = rcu_dereference_check(tp->md5sig_info, - sock_owned_by_user(sk) || - lockdep_is_held((spinlock_t *)&sk->sk_lock.slock)); + lockdep_sock_is_held(sk)); if (!md5sig) return NULL; #if IS_ENABLED(CONFIG_IPV6) @@ -928,8 +927,7 @@ int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr, } md5sig = rcu_dereference_protected(tp->md5sig_info, - sock_owned_by_user(sk) || - lockdep_is_held(&sk->sk_lock.slock)); + lockdep_sock_is_held(sk)); if (!md5sig) { md5sig = kmalloc(sizeof(*md5sig), gfp); if (!md5sig) diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index a5557d22f89e..4ff4b29894eb 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c @@ -407,7 +407,8 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, if (optname != IPV6_RTHDR && !ns_capable(net->user_ns, CAP_NET_RAW)) break; - opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk)); + opt = rcu_dereference_protected(np->opt, + lockdep_sock_is_held(sk)); opt = ipv6_renew_options(sk, opt, optname, (struct ipv6_opt_hdr __user *)optval, optlen); @@ -1124,7 +1125,8 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname, struct ipv6_txoptions *opt; lock_sock(sk); - opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk)); + opt = rcu_dereference_protected(np->opt, + lockdep_sock_is_held(sk)); len = ipv6_getsockopt_sticky(sk, opt, optname, optval, len); release_sock(sk); /* check if ipv6_getsockopt_sticky() returns err code */ diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 7cde1b6fdda3..0e621bc1ae11 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -234,7 +234,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, fl6.fl6_dport = usin->sin6_port; fl6.fl6_sport = inet->inet_sport; - opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk)); + opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk)); final_p = fl6_update_dst(&fl6, opt, &final); security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); diff --git a/net/socket.c b/net/socket.c index 979d3146b081..afa3c3470717 100644 --- a/net/socket.c +++ b/net/socket.c @@ -1046,7 +1046,7 @@ static int sock_fasync(int fd, struct file *filp, int on) return -EINVAL; lock_sock(sk); - wq = rcu_dereference_protected(sock->wq, sock_owned_by_user(sk)); + wq = rcu_dereference_protected(sock->wq, lockdep_sock_is_held(sk)); fasync_helper(fd, filp, on, &wq->fasync_list); if (!wq->fasync_list) From 8ced425ee630c03beea06c1dfa35190bf8395d07 Mon Sep 17 00:00:00 2001 From: Hannes Frederic Sowa Date: Tue, 5 Apr 2016 17:10:16 +0200 Subject: [PATCH 0434/1649] tun: use socket locks for sk_{attach,detatch}_filter This reverts commit 5a5abb1fa3b05dd ("tun, bpf: fix suspicious RCU usage in tun_{attach, detach}_filter") and replaces it to use lock_sock around sk_{attach,detach}_filter. The checks inside filter.c are updated with lockdep_sock_is_held to check for proper socket locks. It keeps the code cleaner by ensuring that only one lock governs the socket filter instead of two independent locks. Cc: Daniel Borkmann Signed-off-by: Hannes Frederic Sowa Signed-off-by: David S. Miller --- drivers/net/tun.c | 14 +++++++++----- include/linux/filter.h | 4 ---- net/core/filter.c | 35 +++++++++++++---------------------- 3 files changed, 22 insertions(+), 31 deletions(-) diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 9abc36bf77ea..64bc143eddd9 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -622,8 +622,9 @@ static int tun_attach(struct tun_struct *tun, struct file *file, bool skip_filte /* Re-attach the filter to persist device */ if (!skip_filter && (tun->filter_attached == true)) { - err = __sk_attach_filter(&tun->fprog, tfile->socket.sk, - lockdep_rtnl_is_held()); + lock_sock(tfile->socket.sk); + err = sk_attach_filter(&tun->fprog, tfile->socket.sk); + release_sock(tfile->socket.sk); if (!err) goto out; } @@ -1824,7 +1825,9 @@ static void tun_detach_filter(struct tun_struct *tun, int n) for (i = 0; i < n; i++) { tfile = rtnl_dereference(tun->tfiles[i]); - __sk_detach_filter(tfile->socket.sk, lockdep_rtnl_is_held()); + lock_sock(tfile->socket.sk); + sk_detach_filter(tfile->socket.sk); + release_sock(tfile->socket.sk); } tun->filter_attached = false; @@ -1837,8 +1840,9 @@ static int tun_attach_filter(struct tun_struct *tun) for (i = 0; i < tun->numqueues; i++) { tfile = rtnl_dereference(tun->tfiles[i]); - ret = __sk_attach_filter(&tun->fprog, tfile->socket.sk, - lockdep_rtnl_is_held()); + lock_sock(tfile->socket.sk); + ret = sk_attach_filter(&tun->fprog, tfile->socket.sk); + release_sock(tfile->socket.sk); if (ret) { tun_detach_filter(tun, i); return ret; diff --git a/include/linux/filter.h b/include/linux/filter.h index a51a5361695f..43aa1f8855c7 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -465,14 +465,10 @@ int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog, void bpf_prog_destroy(struct bpf_prog *fp); int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk); -int __sk_attach_filter(struct sock_fprog *fprog, struct sock *sk, - bool locked); int sk_attach_bpf(u32 ufd, struct sock *sk); int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk); int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk); int sk_detach_filter(struct sock *sk); -int __sk_detach_filter(struct sock *sk, bool locked); - int sk_get_filter(struct sock *sk, struct sock_filter __user *filter, unsigned int len); diff --git a/net/core/filter.c b/net/core/filter.c index ca7f832b2980..e8486ba601ea 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -1149,8 +1149,7 @@ void bpf_prog_destroy(struct bpf_prog *fp) } EXPORT_SYMBOL_GPL(bpf_prog_destroy); -static int __sk_attach_prog(struct bpf_prog *prog, struct sock *sk, - bool locked) +static int __sk_attach_prog(struct bpf_prog *prog, struct sock *sk) { struct sk_filter *fp, *old_fp; @@ -1166,8 +1165,10 @@ static int __sk_attach_prog(struct bpf_prog *prog, struct sock *sk, return -ENOMEM; } - old_fp = rcu_dereference_protected(sk->sk_filter, locked); + old_fp = rcu_dereference_protected(sk->sk_filter, + lockdep_sock_is_held(sk)); rcu_assign_pointer(sk->sk_filter, fp); + if (old_fp) sk_filter_uncharge(sk, old_fp); @@ -1246,8 +1247,7 @@ struct bpf_prog *__get_filter(struct sock_fprog *fprog, struct sock *sk) * occurs or there is insufficient memory for the filter a negative * errno code is returned. On success the return is zero. */ -int __sk_attach_filter(struct sock_fprog *fprog, struct sock *sk, - bool locked) +int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk) { struct bpf_prog *prog = __get_filter(fprog, sk); int err; @@ -1255,7 +1255,7 @@ int __sk_attach_filter(struct sock_fprog *fprog, struct sock *sk, if (IS_ERR(prog)) return PTR_ERR(prog); - err = __sk_attach_prog(prog, sk, locked); + err = __sk_attach_prog(prog, sk); if (err < 0) { __bpf_prog_release(prog); return err; @@ -1263,12 +1263,7 @@ int __sk_attach_filter(struct sock_fprog *fprog, struct sock *sk, return 0; } -EXPORT_SYMBOL_GPL(__sk_attach_filter); - -int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk) -{ - return __sk_attach_filter(fprog, sk, sock_owned_by_user(sk)); -} +EXPORT_SYMBOL_GPL(sk_attach_filter); int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk) { @@ -1314,7 +1309,7 @@ int sk_attach_bpf(u32 ufd, struct sock *sk) if (IS_ERR(prog)) return PTR_ERR(prog); - err = __sk_attach_prog(prog, sk, sock_owned_by_user(sk)); + err = __sk_attach_prog(prog, sk); if (err < 0) { bpf_prog_put(prog); return err; @@ -2255,7 +2250,7 @@ static int __init register_sk_filter_ops(void) } late_initcall(register_sk_filter_ops); -int __sk_detach_filter(struct sock *sk, bool locked) +int sk_detach_filter(struct sock *sk) { int ret = -ENOENT; struct sk_filter *filter; @@ -2263,7 +2258,8 @@ int __sk_detach_filter(struct sock *sk, bool locked) if (sock_flag(sk, SOCK_FILTER_LOCKED)) return -EPERM; - filter = rcu_dereference_protected(sk->sk_filter, locked); + filter = rcu_dereference_protected(sk->sk_filter, + lockdep_sock_is_held(sk)); if (filter) { RCU_INIT_POINTER(sk->sk_filter, NULL); sk_filter_uncharge(sk, filter); @@ -2272,12 +2268,7 @@ int __sk_detach_filter(struct sock *sk, bool locked) return ret; } -EXPORT_SYMBOL_GPL(__sk_detach_filter); - -int sk_detach_filter(struct sock *sk) -{ - return __sk_detach_filter(sk, sock_owned_by_user(sk)); -} +EXPORT_SYMBOL_GPL(sk_detach_filter); int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf, unsigned int len) @@ -2288,7 +2279,7 @@ int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf, lock_sock(sk); filter = rcu_dereference_protected(sk->sk_filter, - sock_owned_by_user(sk)); + lockdep_sock_is_held(sk)); if (!filter) goto out; From 0340d0b9e0e2dc340acb664f19d6550940b22cde Mon Sep 17 00:00:00 2001 From: Tom Herbert Date: Tue, 5 Apr 2016 08:22:49 -0700 Subject: [PATCH 0435/1649] net: Checks skb_dst to be NULL in inet_iif In inet_iif check if skb_rtable is NULL for the skb and return skb->skb_iif if it is. This change allows inet_iif to be called before the dst information has been set in the skb (e.g. when doing socket based UDP GRO). Signed-off-by: Tom Herbert Signed-off-by: David S. Miller --- include/net/route.h | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/include/net/route.h b/include/net/route.h index 9b0a523bb428..f4b11eee1754 100644 --- a/include/net/route.h +++ b/include/net/route.h @@ -322,10 +322,11 @@ static inline struct rtable *ip_route_newports(struct flowi4 *fl4, struct rtable static inline int inet_iif(const struct sk_buff *skb) { - int iif = skb_rtable(skb)->rt_iif; + struct rtable *rt = skb_rtable(skb); + + if (rt && rt->rt_iif) + return rt->rt_iif; - if (iif) - return iif; return skb->skb_iif; } From 63058308cd55182bbfd7a87970bd57883fcfbd2e Mon Sep 17 00:00:00 2001 From: Tom Herbert Date: Tue, 5 Apr 2016 08:22:50 -0700 Subject: [PATCH 0436/1649] udp: Add udp6_lib_lookup_skb and udp4_lib_lookup_skb Add externally visible functions to lookup a UDP socket by skb. This will be used for GRO in UDP sockets. These functions also check if skb->dst is set, and if it is not skb->dev is used to get dev_net. This allows calling lookup functions before dst has been set on the skbuff. Signed-off-by: Tom Herbert Signed-off-by: David S. Miller --- include/net/udp.h | 4 ++++ net/ipv4/udp.c | 13 +++++++++++++ net/ipv6/udp.c | 13 +++++++++++++ 3 files changed, 30 insertions(+) diff --git a/include/net/udp.h b/include/net/udp.h index a0b0da97164c..3aa0b3ec1fb0 100644 --- a/include/net/udp.h +++ b/include/net/udp.h @@ -269,6 +269,8 @@ struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport, struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport, __be32 daddr, __be16 dport, int dif, struct udp_table *tbl, struct sk_buff *skb); +struct sock *udp4_lib_lookup_skb(struct sk_buff *skb, + __be16 sport, __be16 dport); struct sock *udp6_lib_lookup(struct net *net, const struct in6_addr *saddr, __be16 sport, const struct in6_addr *daddr, __be16 dport, @@ -278,6 +280,8 @@ struct sock *__udp6_lib_lookup(struct net *net, const struct in6_addr *daddr, __be16 dport, int dif, struct udp_table *tbl, struct sk_buff *skb); +struct sock *udp6_lib_lookup_skb(struct sk_buff *skb, + __be16 sport, __be16 dport); /* * SNMP statistics for UDP and UDP-Lite diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index d80312ddbb8a..3563788d064f 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -604,6 +604,19 @@ static inline struct sock *__udp4_lib_lookup_skb(struct sk_buff *skb, udptable, skb); } +struct sock *udp4_lib_lookup_skb(struct sk_buff *skb, + __be16 sport, __be16 dport) +{ + const struct iphdr *iph = ip_hdr(skb); + const struct net_device *dev = + skb_dst(skb) ? skb_dst(skb)->dev : skb->dev; + + return __udp4_lib_lookup(dev_net(dev), iph->saddr, sport, + iph->daddr, dport, inet_iif(skb), + &udp_table, skb); +} +EXPORT_SYMBOL_GPL(udp4_lib_lookup_skb); + /* Must be called under rcu_read_lock(). * Does increment socket refcount. */ diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 87bd7aff88b4..a050b70b9101 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -326,6 +326,19 @@ static struct sock *__udp6_lib_lookup_skb(struct sk_buff *skb, udptable, skb); } +struct sock *udp6_lib_lookup_skb(struct sk_buff *skb, + __be16 sport, __be16 dport) +{ + const struct ipv6hdr *iph = ipv6_hdr(skb); + const struct net_device *dev = + skb_dst(skb) ? skb_dst(skb)->dev : skb->dev; + + return __udp6_lib_lookup(dev_net(dev), &iph->saddr, sport, + &iph->daddr, dport, inet6_iif(skb), + &udp_table, skb); +} +EXPORT_SYMBOL_GPL(udp6_lib_lookup_skb); + /* Must be called under rcu_read_lock(). * Does increment socket refcount. */ From a6024562ffd7e0f31bc6671817840ad1e91de7b4 Mon Sep 17 00:00:00 2001 From: Tom Herbert Date: Tue, 5 Apr 2016 08:22:51 -0700 Subject: [PATCH 0437/1649] udp: Add GRO functions to UDP socket This patch adds GRO functions (gro_receive and gro_complete) to UDP sockets. udp_gro_receive is changed to perform socket lookup on a packet. If a socket is found the related GRO functions are called. This features obsoletes using UDP offload infrastructure for GRO (udp_offload). This has the advantage of not being limited to provide offload on a per port basis, GRO is now applied to whatever individual UDP sockets are bound to. This also allows the possbility of "application defined GRO"-- that is we can attach something like a BPF program to a UDP socket to perfrom GRO on an application layer protocol. Signed-off-by: Tom Herbert Signed-off-by: David S. Miller --- include/linux/udp.h | 8 +++++++ include/net/udp.h | 7 ++++-- net/ipv4/udp_offload.c | 52 ++++++++++++++++-------------------------- net/ipv6/Makefile | 5 ++-- net/ipv6/af_inet6.c | 8 +++++++ net/ipv6/ip6_offload.c | 2 -- net/ipv6/ip6_offload.h | 3 ++- net/ipv6/udp_offload.c | 11 ++++++--- 8 files changed, 54 insertions(+), 42 deletions(-) diff --git a/include/linux/udp.h b/include/linux/udp.h index 32342754643a..d1fd8cd39478 100644 --- a/include/linux/udp.h +++ b/include/linux/udp.h @@ -71,6 +71,14 @@ struct udp_sock { */ int (*encap_rcv)(struct sock *sk, struct sk_buff *skb); void (*encap_destroy)(struct sock *sk); + + /* GRO functions for UDP socket */ + struct sk_buff ** (*gro_receive)(struct sock *sk, + struct sk_buff **head, + struct sk_buff *skb); + int (*gro_complete)(struct sock *sk, + struct sk_buff *skb, + int nhoff); }; static inline struct udp_sock *udp_sk(const struct sock *sk) diff --git a/include/net/udp.h b/include/net/udp.h index 3aa0b3ec1fb0..3c5a65e0946d 100644 --- a/include/net/udp.h +++ b/include/net/udp.h @@ -167,9 +167,12 @@ static inline void udp_csum_pull_header(struct sk_buff *skb) UDP_SKB_CB(skb)->cscov -= sizeof(struct udphdr); } +typedef struct sock *(*udp_lookup_t)(struct sk_buff *skb, __be16 sport, + __be16 dport); + struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb, - struct udphdr *uh); -int udp_gro_complete(struct sk_buff *skb, int nhoff); + struct udphdr *uh, udp_lookup_t lookup); +int udp_gro_complete(struct sk_buff *skb, int nhoff, udp_lookup_t lookup); static inline struct udphdr *udp_gro_udphdr(struct sk_buff *skb) { diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c index 0ed2dafb7cc4..65c3fd34b363 100644 --- a/net/ipv4/udp_offload.c +++ b/net/ipv4/udp_offload.c @@ -179,6 +179,7 @@ out_unlock: return segs; } +EXPORT_SYMBOL(skb_udp_tunnel_segment); static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, netdev_features_t features) @@ -304,13 +305,13 @@ unlock: EXPORT_SYMBOL(udp_del_offload); struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb, - struct udphdr *uh) + struct udphdr *uh, udp_lookup_t lookup) { - struct udp_offload_priv *uo_priv; struct sk_buff *p, **pp = NULL; struct udphdr *uh2; unsigned int off = skb_gro_offset(skb); int flush = 1; + struct sock *sk; if (NAPI_GRO_CB(skb)->encap_mark || (skb->ip_summed != CHECKSUM_PARTIAL && @@ -322,13 +323,11 @@ struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb, NAPI_GRO_CB(skb)->encap_mark = 1; rcu_read_lock(); - uo_priv = rcu_dereference(udp_offload_base); - for (; uo_priv != NULL; uo_priv = rcu_dereference(uo_priv->next)) { - if (net_eq(read_pnet(&uo_priv->net), dev_net(skb->dev)) && - uo_priv->offload->port == uh->dest && - uo_priv->offload->callbacks.gro_receive) - goto unflush; - } + sk = (*lookup)(skb, uh->source, uh->dest); + + if (sk && udp_sk(sk)->gro_receive) + goto unflush; + goto out_unlock; unflush: @@ -352,9 +351,7 @@ unflush: skb_gro_pull(skb, sizeof(struct udphdr)); /* pull encapsulating udp header */ skb_gro_postpull_rcsum(skb, uh, sizeof(struct udphdr)); - NAPI_GRO_CB(skb)->proto = uo_priv->offload->ipproto; - pp = uo_priv->offload->callbacks.gro_receive(head, skb, - uo_priv->offload); + pp = udp_sk(sk)->gro_receive(sk, head, skb); out_unlock: rcu_read_unlock(); @@ -362,6 +359,7 @@ out: NAPI_GRO_CB(skb)->flush |= flush; return pp; } +EXPORT_SYMBOL(udp_gro_receive); static struct sk_buff **udp4_gro_receive(struct sk_buff **head, struct sk_buff *skb) @@ -383,39 +381,28 @@ static struct sk_buff **udp4_gro_receive(struct sk_buff **head, inet_gro_compute_pseudo); skip: NAPI_GRO_CB(skb)->is_ipv6 = 0; - return udp_gro_receive(head, skb, uh); + return udp_gro_receive(head, skb, uh, udp4_lib_lookup_skb); flush: NAPI_GRO_CB(skb)->flush = 1; return NULL; } -int udp_gro_complete(struct sk_buff *skb, int nhoff) +int udp_gro_complete(struct sk_buff *skb, int nhoff, + udp_lookup_t lookup) { - struct udp_offload_priv *uo_priv; __be16 newlen = htons(skb->len - nhoff); struct udphdr *uh = (struct udphdr *)(skb->data + nhoff); int err = -ENOSYS; + struct sock *sk; uh->len = newlen; rcu_read_lock(); - - uo_priv = rcu_dereference(udp_offload_base); - for (; uo_priv != NULL; uo_priv = rcu_dereference(uo_priv->next)) { - if (net_eq(read_pnet(&uo_priv->net), dev_net(skb->dev)) && - uo_priv->offload->port == uh->dest && - uo_priv->offload->callbacks.gro_complete) - break; - } - - if (uo_priv) { - NAPI_GRO_CB(skb)->proto = uo_priv->offload->ipproto; - err = uo_priv->offload->callbacks.gro_complete(skb, - nhoff + sizeof(struct udphdr), - uo_priv->offload); - } - + sk = (*lookup)(skb, uh->source, uh->dest); + if (sk && udp_sk(sk)->gro_complete) + err = udp_sk(sk)->gro_complete(sk, skb, + nhoff + sizeof(struct udphdr)); rcu_read_unlock(); if (skb->remcsum_offload) @@ -426,6 +413,7 @@ int udp_gro_complete(struct sk_buff *skb, int nhoff) return err; } +EXPORT_SYMBOL(udp_gro_complete); static int udp4_gro_complete(struct sk_buff *skb, int nhoff) { @@ -440,7 +428,7 @@ static int udp4_gro_complete(struct sk_buff *skb, int nhoff) skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL; } - return udp_gro_complete(skb, nhoff); + return udp_gro_complete(skb, nhoff, udp4_lib_lookup_skb); } static const struct net_offload udpv4_offload = { diff --git a/net/ipv6/Makefile b/net/ipv6/Makefile index 2fbd90bf8d33..5e9d6bf4aaca 100644 --- a/net/ipv6/Makefile +++ b/net/ipv6/Makefile @@ -8,9 +8,10 @@ ipv6-objs := af_inet6.o anycast.o ip6_output.o ip6_input.o addrconf.o \ addrlabel.o \ route.o ip6_fib.o ipv6_sockglue.o ndisc.o udp.o udplite.o \ raw.o icmp.o mcast.o reassembly.o tcp_ipv6.o ping.o \ - exthdrs.o datagram.o ip6_flowlabel.o inet6_connection_sock.o + exthdrs.o datagram.o ip6_flowlabel.o inet6_connection_sock.o \ + udp_offload.o -ipv6-offload := ip6_offload.o tcpv6_offload.o udp_offload.o exthdrs_offload.o +ipv6-offload := ip6_offload.o tcpv6_offload.o exthdrs_offload.o ipv6-$(CONFIG_SYSCTL) = sysctl_net_ipv6.o ipv6-$(CONFIG_IPV6_MROUTE) += ip6mr.o diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index 2b78aad0d52f..bfa86f040c16 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c @@ -64,6 +64,8 @@ #include #include +#include "ip6_offload.h" + MODULE_AUTHOR("Cast of dozens"); MODULE_DESCRIPTION("IPv6 protocol stack for Linux"); MODULE_LICENSE("GPL"); @@ -959,6 +961,10 @@ static int __init inet6_init(void) if (err) goto udplitev6_fail; + err = udpv6_offload_init(); + if (err) + goto udpv6_offload_fail; + err = tcpv6_init(); if (err) goto tcpv6_fail; @@ -988,6 +994,8 @@ pingv6_fail: ipv6_packet_fail: tcpv6_exit(); tcpv6_fail: + udpv6_offload_exit(); +udpv6_offload_fail: udplitev6_exit(); udplitev6_fail: udpv6_exit(); diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c index 82e9f3076028..204af2219471 100644 --- a/net/ipv6/ip6_offload.c +++ b/net/ipv6/ip6_offload.c @@ -325,8 +325,6 @@ static int __init ipv6_offload_init(void) if (tcpv6_offload_init() < 0) pr_crit("%s: Cannot add TCP protocol offload\n", __func__); - if (udp_offload_init() < 0) - pr_crit("%s: Cannot add UDP protocol offload\n", __func__); if (ipv6_exthdrs_offload_init() < 0) pr_crit("%s: Cannot add EXTHDRS protocol offload\n", __func__); diff --git a/net/ipv6/ip6_offload.h b/net/ipv6/ip6_offload.h index 2e155c651b35..96b40e41ac53 100644 --- a/net/ipv6/ip6_offload.h +++ b/net/ipv6/ip6_offload.h @@ -12,7 +12,8 @@ #define __ip6_offload_h int ipv6_exthdrs_offload_init(void); -int udp_offload_init(void); +int udpv6_offload_init(void); +int udpv6_offload_exit(void); int tcpv6_offload_init(void); #endif diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c index 2b0fbe6929e8..5429f6bcf047 100644 --- a/net/ipv6/udp_offload.c +++ b/net/ipv6/udp_offload.c @@ -153,7 +153,7 @@ static struct sk_buff **udp6_gro_receive(struct sk_buff **head, skip: NAPI_GRO_CB(skb)->is_ipv6 = 1; - return udp_gro_receive(head, skb, uh); + return udp_gro_receive(head, skb, uh, udp6_lib_lookup_skb); flush: NAPI_GRO_CB(skb)->flush = 1; @@ -173,7 +173,7 @@ static int udp6_gro_complete(struct sk_buff *skb, int nhoff) skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL; } - return udp_gro_complete(skb, nhoff); + return udp_gro_complete(skb, nhoff, udp6_lib_lookup_skb); } static const struct net_offload udpv6_offload = { @@ -184,7 +184,12 @@ static const struct net_offload udpv6_offload = { }, }; -int __init udp_offload_init(void) +int udpv6_offload_init(void) { return inet6_add_offload(&udpv6_offload, IPPROTO_UDP); } + +int udpv6_offload_exit(void) +{ + return inet6_del_offload(&udpv6_offload, IPPROTO_UDP); +} From 38fd2af24fcfda93f9fea3e53f26e48775ae9e09 Mon Sep 17 00:00:00 2001 From: Tom Herbert Date: Tue, 5 Apr 2016 08:22:52 -0700 Subject: [PATCH 0438/1649] udp: Add socket based GRO and config Add gro_receive and gro_complete to struct udp_tunnel_sock_cfg. Signed-off-by: Tom Herbert Signed-off-by: David S. Miller --- include/net/udp_tunnel.h | 7 +++++++ net/ipv4/udp_tunnel.c | 2 ++ 2 files changed, 9 insertions(+) diff --git a/include/net/udp_tunnel.h b/include/net/udp_tunnel.h index b83114077cee..2dcf1de948ac 100644 --- a/include/net/udp_tunnel.h +++ b/include/net/udp_tunnel.h @@ -64,6 +64,11 @@ static inline int udp_sock_create(struct net *net, typedef int (*udp_tunnel_encap_rcv_t)(struct sock *sk, struct sk_buff *skb); typedef void (*udp_tunnel_encap_destroy_t)(struct sock *sk); +typedef struct sk_buff **(*udp_tunnel_gro_receive_t)(struct sock *sk, + struct sk_buff **head, + struct sk_buff *skb); +typedef int (*udp_tunnel_gro_complete_t)(struct sock *sk, struct sk_buff *skb, + int nhoff); struct udp_tunnel_sock_cfg { void *sk_user_data; /* user data used by encap_rcv call back */ @@ -71,6 +76,8 @@ struct udp_tunnel_sock_cfg { __u8 encap_type; udp_tunnel_encap_rcv_t encap_rcv; udp_tunnel_encap_destroy_t encap_destroy; + udp_tunnel_gro_receive_t gro_receive; + udp_tunnel_gro_complete_t gro_complete; }; /* Setup the given (UDP) sock to receive UDP encapsulated packets */ diff --git a/net/ipv4/udp_tunnel.c b/net/ipv4/udp_tunnel.c index 96599d1a1318..47f12c73d959 100644 --- a/net/ipv4/udp_tunnel.c +++ b/net/ipv4/udp_tunnel.c @@ -69,6 +69,8 @@ void setup_udp_tunnel_sock(struct net *net, struct socket *sock, udp_sk(sk)->encap_type = cfg->encap_type; udp_sk(sk)->encap_rcv = cfg->encap_rcv; udp_sk(sk)->encap_destroy = cfg->encap_destroy; + udp_sk(sk)->gro_receive = cfg->gro_receive; + udp_sk(sk)->gro_complete = cfg->gro_complete; udp_tunnel_encap_enable(sock); } From 5602c48cf87562c2f95b831d690631935e834295 Mon Sep 17 00:00:00 2001 From: Tom Herbert Date: Tue, 5 Apr 2016 08:22:53 -0700 Subject: [PATCH 0439/1649] vxlan: change vxlan to use UDP socket GRO Adapt vxlan_gro_receive, vxlan_gro_complete to take a socket argument. Set these functions in tunnel_config. Don't set udp_offloads any more. Signed-off-by: Tom Herbert Signed-off-by: David S. Miller --- drivers/net/vxlan.c | 30 ++++++++---------------------- include/net/vxlan.h | 1 - 2 files changed, 8 insertions(+), 23 deletions(-) diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 51cccddfe403..9f3634064c92 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -551,16 +551,15 @@ static struct vxlanhdr *vxlan_gro_remcsum(struct sk_buff *skb, return vh; } -static struct sk_buff **vxlan_gro_receive(struct sk_buff **head, - struct sk_buff *skb, - struct udp_offload *uoff) +static struct sk_buff **vxlan_gro_receive(struct sock *sk, + struct sk_buff **head, + struct sk_buff *skb) { struct sk_buff *p, **pp = NULL; struct vxlanhdr *vh, *vh2; unsigned int hlen, off_vx; int flush = 1; - struct vxlan_sock *vs = container_of(uoff, struct vxlan_sock, - udp_offloads); + struct vxlan_sock *vs = rcu_dereference_sk_user_data(sk); __be32 flags; struct gro_remcsum grc; @@ -613,8 +612,7 @@ out: return pp; } -static int vxlan_gro_complete(struct sk_buff *skb, int nhoff, - struct udp_offload *uoff) +static int vxlan_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff) { udp_tunnel_gro_complete(skb, nhoff); @@ -629,13 +627,6 @@ static void vxlan_notify_add_rx_port(struct vxlan_sock *vs) struct net *net = sock_net(sk); sa_family_t sa_family = vxlan_get_sk_family(vs); __be16 port = inet_sk(sk)->inet_sport; - int err; - - if (sa_family == AF_INET) { - err = udp_add_offload(net, &vs->udp_offloads); - if (err) - pr_warn("vxlan: udp_add_offload failed with status %d\n", err); - } rcu_read_lock(); for_each_netdev_rcu(net, dev) { @@ -662,9 +653,6 @@ static void vxlan_notify_del_rx_port(struct vxlan_sock *vs) port); } rcu_read_unlock(); - - if (sa_family == AF_INET) - udp_del_offload(&vs->udp_offloads); } /* Add new entry to forwarding table -- assumes lock held */ @@ -2752,21 +2740,19 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, bool ipv6, atomic_set(&vs->refcnt, 1); vs->flags = (flags & VXLAN_F_RCV_FLAGS); - /* Initialize the vxlan udp offloads structure */ - vs->udp_offloads.port = port; - vs->udp_offloads.callbacks.gro_receive = vxlan_gro_receive; - vs->udp_offloads.callbacks.gro_complete = vxlan_gro_complete; - spin_lock(&vn->sock_lock); hlist_add_head_rcu(&vs->hlist, vs_head(net, port)); vxlan_notify_add_rx_port(vs); spin_unlock(&vn->sock_lock); /* Mark socket as an encapsulation socket. */ + memset(&tunnel_cfg, 0, sizeof(tunnel_cfg)); tunnel_cfg.sk_user_data = vs; tunnel_cfg.encap_type = 1; tunnel_cfg.encap_rcv = vxlan_rcv; tunnel_cfg.encap_destroy = NULL; + tunnel_cfg.gro_receive = vxlan_gro_receive; + tunnel_cfg.gro_complete = vxlan_gro_complete; setup_udp_tunnel_sock(net, sock, &tunnel_cfg); diff --git a/include/net/vxlan.h b/include/net/vxlan.h index dcc6f4057115..2f168f0ea32c 100644 --- a/include/net/vxlan.h +++ b/include/net/vxlan.h @@ -189,7 +189,6 @@ struct vxlan_sock { struct rcu_head rcu; struct hlist_head vni_list[VNI_HASH_SIZE]; atomic_t refcnt; - struct udp_offload udp_offloads; u32 flags; }; From d92283e338f6d6503b7417536bf3478f466cbc01 Mon Sep 17 00:00:00 2001 From: Tom Herbert Date: Tue, 5 Apr 2016 08:22:54 -0700 Subject: [PATCH 0440/1649] fou: change to use UDP socket GRO Adapt gue_gro_receive, gue_gro_complete to take a socket argument. Don't set udp_offloads any more. Signed-off-by: Tom Herbert Signed-off-by: David S. Miller --- net/ipv4/fou.c | 48 +++++++++++++++++------------------------------- 1 file changed, 17 insertions(+), 31 deletions(-) diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c index 5a94aea280d3..5738b9771067 100644 --- a/net/ipv4/fou.c +++ b/net/ipv4/fou.c @@ -22,7 +22,6 @@ struct fou { u8 flags; __be16 port; u16 type; - struct udp_offload udp_offloads; struct list_head list; struct rcu_head rcu; }; @@ -186,13 +185,13 @@ drop: return 0; } -static struct sk_buff **fou_gro_receive(struct sk_buff **head, - struct sk_buff *skb, - struct udp_offload *uoff) +static struct sk_buff **fou_gro_receive(struct sock *sk, + struct sk_buff **head, + struct sk_buff *skb) { const struct net_offload *ops; struct sk_buff **pp = NULL; - u8 proto = NAPI_GRO_CB(skb)->proto; + u8 proto = fou_from_sock(sk)->protocol; const struct net_offload **offloads; /* We can clear the encap_mark for FOU as we are essentially doing @@ -217,11 +216,11 @@ out_unlock: return pp; } -static int fou_gro_complete(struct sk_buff *skb, int nhoff, - struct udp_offload *uoff) +static int fou_gro_complete(struct sock *sk, struct sk_buff *skb, + int nhoff) { const struct net_offload *ops; - u8 proto = NAPI_GRO_CB(skb)->proto; + u8 proto = fou_from_sock(sk)->protocol; int err = -ENOSYS; const struct net_offload **offloads; @@ -264,9 +263,9 @@ static struct guehdr *gue_gro_remcsum(struct sk_buff *skb, unsigned int off, return guehdr; } -static struct sk_buff **gue_gro_receive(struct sk_buff **head, - struct sk_buff *skb, - struct udp_offload *uoff) +static struct sk_buff **gue_gro_receive(struct sock *sk, + struct sk_buff **head, + struct sk_buff *skb) { const struct net_offload **offloads; const struct net_offload *ops; @@ -277,7 +276,7 @@ static struct sk_buff **gue_gro_receive(struct sk_buff **head, void *data; u16 doffset = 0; int flush = 1; - struct fou *fou = container_of(uoff, struct fou, udp_offloads); + struct fou *fou = fou_from_sock(sk); struct gro_remcsum grc; skb_gro_remcsum_init(&grc); @@ -386,8 +385,7 @@ out: return pp; } -static int gue_gro_complete(struct sk_buff *skb, int nhoff, - struct udp_offload *uoff) +static int gue_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff) { const struct net_offload **offloads; struct guehdr *guehdr = (struct guehdr *)(skb->data + nhoff); @@ -435,10 +433,7 @@ static int fou_add_to_port_list(struct net *net, struct fou *fou) static void fou_release(struct fou *fou) { struct socket *sock = fou->sock; - struct sock *sk = sock->sk; - if (sk->sk_family == AF_INET) - udp_del_offload(&fou->udp_offloads); list_del(&fou->list); udp_tunnel_sock_release(sock); @@ -448,11 +443,9 @@ static void fou_release(struct fou *fou) static int fou_encap_init(struct sock *sk, struct fou *fou, struct fou_cfg *cfg) { udp_sk(sk)->encap_rcv = fou_udp_recv; - fou->protocol = cfg->protocol; - fou->udp_offloads.callbacks.gro_receive = fou_gro_receive; - fou->udp_offloads.callbacks.gro_complete = fou_gro_complete; - fou->udp_offloads.port = cfg->udp_config.local_udp_port; - fou->udp_offloads.ipproto = cfg->protocol; + udp_sk(sk)->gro_receive = fou_gro_receive; + udp_sk(sk)->gro_complete = fou_gro_complete; + fou_from_sock(sk)->protocol = cfg->protocol; return 0; } @@ -460,9 +453,8 @@ static int fou_encap_init(struct sock *sk, struct fou *fou, struct fou_cfg *cfg) static int gue_encap_init(struct sock *sk, struct fou *fou, struct fou_cfg *cfg) { udp_sk(sk)->encap_rcv = gue_udp_recv; - fou->udp_offloads.callbacks.gro_receive = gue_gro_receive; - fou->udp_offloads.callbacks.gro_complete = gue_gro_complete; - fou->udp_offloads.port = cfg->udp_config.local_udp_port; + udp_sk(sk)->gro_receive = gue_gro_receive; + udp_sk(sk)->gro_complete = gue_gro_complete; return 0; } @@ -521,12 +513,6 @@ static int fou_create(struct net *net, struct fou_cfg *cfg, sk->sk_allocation = GFP_ATOMIC; - if (cfg->udp_config.family == AF_INET) { - err = udp_add_offload(net, &fou->udp_offloads); - if (err) - goto error; - } - err = fou_add_to_port_list(net, fou); if (err) goto error; From 4a0090a98e5f6e7813d807c883abf362df4b0507 Mon Sep 17 00:00:00 2001 From: Tom Herbert Date: Tue, 5 Apr 2016 08:22:55 -0700 Subject: [PATCH 0441/1649] geneve: change to use UDP socket GRO Adapt geneve_gro_receive, geneve_gro_complete to take a socket argument. Set these functions in tunnel_config. Don't set udp_offloads any more. Signed-off-by: Tom Herbert Signed-off-by: David S. Miller --- drivers/net/geneve.c | 28 ++++++++-------------------- 1 file changed, 8 insertions(+), 20 deletions(-) diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index bc168894bda3..a9fbf17eb256 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -87,7 +87,6 @@ struct geneve_sock { struct socket *sock; struct rcu_head rcu; int refcnt; - struct udp_offload udp_offloads; struct hlist_head vni_list[VNI_HASH_SIZE]; u32 flags; }; @@ -409,14 +408,6 @@ static void geneve_notify_add_rx_port(struct geneve_sock *gs) struct net *net = sock_net(sk); sa_family_t sa_family = geneve_get_sk_family(gs); __be16 port = inet_sk(sk)->inet_sport; - int err; - - if (sa_family == AF_INET) { - err = udp_add_offload(sock_net(sk), &gs->udp_offloads); - if (err) - pr_warn("geneve: udp_add_offload failed with status %d\n", - err); - } rcu_read_lock(); for_each_netdev_rcu(net, dev) { @@ -432,9 +423,9 @@ static int geneve_hlen(struct genevehdr *gh) return sizeof(*gh) + gh->opt_len * 4; } -static struct sk_buff **geneve_gro_receive(struct sk_buff **head, - struct sk_buff *skb, - struct udp_offload *uoff) +static struct sk_buff **geneve_gro_receive(struct sock *sk, + struct sk_buff **head, + struct sk_buff *skb) { struct sk_buff *p, **pp = NULL; struct genevehdr *gh, *gh2; @@ -495,8 +486,8 @@ out: return pp; } -static int geneve_gro_complete(struct sk_buff *skb, int nhoff, - struct udp_offload *uoff) +static int geneve_gro_complete(struct sock *sk, struct sk_buff *skb, + int nhoff) { struct genevehdr *gh; struct packet_offload *ptype; @@ -545,14 +536,14 @@ static struct geneve_sock *geneve_socket_create(struct net *net, __be16 port, INIT_HLIST_HEAD(&gs->vni_list[h]); /* Initialize the geneve udp offloads structure */ - gs->udp_offloads.port = port; - gs->udp_offloads.callbacks.gro_receive = geneve_gro_receive; - gs->udp_offloads.callbacks.gro_complete = geneve_gro_complete; geneve_notify_add_rx_port(gs); /* Mark socket as an encapsulation socket */ + memset(&tunnel_cfg, 0, sizeof(tunnel_cfg)); tunnel_cfg.sk_user_data = gs; tunnel_cfg.encap_type = 1; + tunnel_cfg.gro_receive = geneve_gro_receive; + tunnel_cfg.gro_complete = geneve_gro_complete; tunnel_cfg.encap_rcv = geneve_udp_encap_recv; tunnel_cfg.encap_destroy = NULL; setup_udp_tunnel_sock(net, sock, &tunnel_cfg); @@ -576,9 +567,6 @@ static void geneve_notify_del_rx_port(struct geneve_sock *gs) } rcu_read_unlock(); - - if (sa_family == AF_INET) - udp_del_offload(&gs->udp_offloads); } static void __geneve_sock_release(struct geneve_sock *gs) From 46aa2f30aa7fe03a4dcd732b009284c02ff4f093 Mon Sep 17 00:00:00 2001 From: Tom Herbert Date: Tue, 5 Apr 2016 08:22:56 -0700 Subject: [PATCH 0442/1649] udp: Remove udp_offloads Now that the UDP encapsulation GRO functions have been moved to the UDP socket we not longer need the udp_offload insfrastructure so removing it. Signed-off-by: Tom Herbert Signed-off-by: David S. Miller --- include/linux/netdevice.h | 17 ----------- include/net/protocol.h | 3 -- net/ipv4/udp_offload.c | 63 --------------------------------------- 3 files changed, 83 deletions(-) diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index cb0d5d09c2e4..cb4e508b3f38 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -2159,23 +2159,6 @@ struct packet_offload { struct list_head list; }; -struct udp_offload; - -struct udp_offload_callbacks { - struct sk_buff **(*gro_receive)(struct sk_buff **head, - struct sk_buff *skb, - struct udp_offload *uoff); - int (*gro_complete)(struct sk_buff *skb, - int nhoff, - struct udp_offload *uoff); -}; - -struct udp_offload { - __be16 port; - u8 ipproto; - struct udp_offload_callbacks callbacks; -}; - /* often modified stats are per-CPU, other are shared (netdev->stats) */ struct pcpu_sw_netstats { u64 rx_packets; diff --git a/include/net/protocol.h b/include/net/protocol.h index da689f5432de..bf36ca34af7a 100644 --- a/include/net/protocol.h +++ b/include/net/protocol.h @@ -107,9 +107,6 @@ int inet_del_offload(const struct net_offload *prot, unsigned char num); void inet_register_protosw(struct inet_protosw *p); void inet_unregister_protosw(struct inet_protosw *p); -int udp_add_offload(struct net *net, struct udp_offload *prot); -void udp_del_offload(struct udp_offload *prot); - #if IS_ENABLED(CONFIG_IPV6) int inet6_add_protocol(const struct inet6_protocol *prot, unsigned char num); int inet6_del_protocol(const struct inet6_protocol *prot, unsigned char num); diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c index 65c3fd34b363..6230cf4b0d2d 100644 --- a/net/ipv4/udp_offload.c +++ b/net/ipv4/udp_offload.c @@ -14,18 +14,6 @@ #include #include -static DEFINE_SPINLOCK(udp_offload_lock); -static struct udp_offload_priv __rcu *udp_offload_base __read_mostly; - -#define udp_deref_protected(X) rcu_dereference_protected(X, lockdep_is_held(&udp_offload_lock)) - -struct udp_offload_priv { - struct udp_offload *offload; - possible_net_t net; - struct rcu_head rcu; - struct udp_offload_priv __rcu *next; -}; - static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb, netdev_features_t features, struct sk_buff *(*gso_inner_segment)(struct sk_buff *skb, @@ -254,56 +242,6 @@ out: return segs; } -int udp_add_offload(struct net *net, struct udp_offload *uo) -{ - struct udp_offload_priv *new_offload = kzalloc(sizeof(*new_offload), GFP_ATOMIC); - - if (!new_offload) - return -ENOMEM; - - write_pnet(&new_offload->net, net); - new_offload->offload = uo; - - spin_lock(&udp_offload_lock); - new_offload->next = udp_offload_base; - rcu_assign_pointer(udp_offload_base, new_offload); - spin_unlock(&udp_offload_lock); - - return 0; -} -EXPORT_SYMBOL(udp_add_offload); - -static void udp_offload_free_routine(struct rcu_head *head) -{ - struct udp_offload_priv *ou_priv = container_of(head, struct udp_offload_priv, rcu); - kfree(ou_priv); -} - -void udp_del_offload(struct udp_offload *uo) -{ - struct udp_offload_priv __rcu **head = &udp_offload_base; - struct udp_offload_priv *uo_priv; - - spin_lock(&udp_offload_lock); - - uo_priv = udp_deref_protected(*head); - for (; uo_priv != NULL; - uo_priv = udp_deref_protected(*head)) { - if (uo_priv->offload == uo) { - rcu_assign_pointer(*head, - udp_deref_protected(uo_priv->next)); - goto unlock; - } - head = &uo_priv->next; - } - pr_warn("udp_del_offload: didn't find offload for port %d\n", ntohs(uo->port)); -unlock: - spin_unlock(&udp_offload_lock); - if (uo_priv) - call_rcu(&uo_priv->rcu, udp_offload_free_routine); -} -EXPORT_SYMBOL(udp_del_offload); - struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb, struct udphdr *uh, udp_lookup_t lookup) { @@ -327,7 +265,6 @@ struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb, if (sk && udp_sk(sk)->gro_receive) goto unflush; - goto out_unlock; unflush: From 4e801fa14f68223d36480bced975ebf0c5f9a284 Mon Sep 17 00:00:00 2001 From: Jon Paul Maloy Date: Thu, 7 Apr 2016 10:09:13 -0400 Subject: [PATCH 0443/1649] tipc: eliminate buffer leak in bearer layer When enabling a bearer we create a 'neigbor discoverer' instance by calling the function tipc_disc_create() before the bearer is actually registered in the list of enabled bearers. Because of this, the very first discovery broadcast message, created by the mentioned function, is lost, since it cannot find any valid bearer to use. Furthermore, the used send function, tipc_bearer_xmit_skb() does not free the given buffer when it cannot find a bearer, resulting in the leak of exactly one send buffer each time a bearer is enabled. This commit fixes this problem by introducing two changes: 1) Instead of attemting to send the discovery message directly, we let tipc_disc_create() return the discovery buffer to the calling function, tipc_enable_bearer(), so that the latter can send it when the enabling sequence is finished. 2) In tipc_bearer_xmit_skb(), as well as in the two other transmit functions at the bearer layer, we now free the indicated buffer or buffer chain when a valid bearer cannot be found. Acked-by: Ying Xue Signed-off-by: Jon Maloy Signed-off-by: David S. Miller --- net/tipc/bearer.c | 51 +++++++++++++++++++++++---------------------- net/tipc/discover.c | 7 ++----- net/tipc/discover.h | 2 +- 3 files changed, 29 insertions(+), 31 deletions(-) diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c index 27a5406213c6..20566e9a1369 100644 --- a/net/tipc/bearer.c +++ b/net/tipc/bearer.c @@ -205,6 +205,7 @@ static int tipc_enable_bearer(struct net *net, const char *name, struct tipc_bearer *b; struct tipc_media *m; struct tipc_bearer_names b_names; + struct sk_buff *skb; char addr_string[16]; u32 bearer_id; u32 with_this_prio; @@ -301,7 +302,7 @@ restart: b->net_plane = bearer_id + 'A'; b->priority = priority; - res = tipc_disc_create(net, b, &b->bcast_addr); + res = tipc_disc_create(net, b, &b->bcast_addr, &skb); if (res) { bearer_disable(net, b); pr_warn("Bearer <%s> rejected, discovery object creation failed\n", @@ -310,7 +311,8 @@ restart: } rcu_assign_pointer(tn->bearer_list[bearer_id], b); - + if (skb) + tipc_bearer_xmit_skb(net, bearer_id, skb, &b->bcast_addr); pr_info("Enabled bearer <%s>, discovery domain %s, priority %u\n", name, tipc_addr_string_fill(addr_string, disc_domain), priority); @@ -450,6 +452,8 @@ void tipc_bearer_xmit_skb(struct net *net, u32 bearer_id, b = rcu_dereference_rtnl(tn->bearer_list[bearer_id]); if (likely(b)) b->media->send_msg(net, skb, b, dest); + else + kfree_skb(skb); rcu_read_unlock(); } @@ -468,11 +472,11 @@ void tipc_bearer_xmit(struct net *net, u32 bearer_id, rcu_read_lock(); b = rcu_dereference_rtnl(tn->bearer_list[bearer_id]); - if (likely(b)) { - skb_queue_walk_safe(xmitq, skb, tmp) { - __skb_dequeue(xmitq); - b->media->send_msg(net, skb, b, dst); - } + if (unlikely(!b)) + __skb_queue_purge(xmitq); + skb_queue_walk_safe(xmitq, skb, tmp) { + __skb_dequeue(xmitq); + b->media->send_msg(net, skb, b, dst); } rcu_read_unlock(); } @@ -490,14 +494,14 @@ void tipc_bearer_bc_xmit(struct net *net, u32 bearer_id, rcu_read_lock(); b = rcu_dereference_rtnl(tn->bearer_list[bearer_id]); - if (likely(b)) { - skb_queue_walk_safe(xmitq, skb, tmp) { - hdr = buf_msg(skb); - msg_set_non_seq(hdr, 1); - msg_set_mc_netid(hdr, net_id); - __skb_dequeue(xmitq); - b->media->send_msg(net, skb, b, &b->bcast_addr); - } + if (unlikely(!b)) + __skb_queue_purge(xmitq); + skb_queue_walk_safe(xmitq, skb, tmp) { + hdr = buf_msg(skb); + msg_set_non_seq(hdr, 1); + msg_set_mc_netid(hdr, net_id); + __skb_dequeue(xmitq); + b->media->send_msg(net, skb, b, &b->bcast_addr); } rcu_read_unlock(); } @@ -513,24 +517,21 @@ void tipc_bearer_bc_xmit(struct net *net, u32 bearer_id, * ignores packets sent using interface multicast, and traffic sent to other * nodes (which can happen if interface is running in promiscuous mode). */ -static int tipc_l2_rcv_msg(struct sk_buff *buf, struct net_device *dev, +static int tipc_l2_rcv_msg(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) { struct tipc_bearer *b; rcu_read_lock(); b = rcu_dereference_rtnl(dev->tipc_ptr); - if (likely(b)) { - if (likely(buf->pkt_type <= PACKET_BROADCAST)) { - buf->next = NULL; - tipc_rcv(dev_net(dev), buf, b); - rcu_read_unlock(); - return NET_RX_SUCCESS; - } + if (likely(b && (skb->pkt_type <= PACKET_BROADCAST))) { + skb->next = NULL; + tipc_rcv(dev_net(dev), skb, b); + rcu_read_unlock(); + return NET_RX_SUCCESS; } rcu_read_unlock(); - - kfree_skb(buf); + kfree_skb(skb); return NET_RX_DROP; } diff --git a/net/tipc/discover.c b/net/tipc/discover.c index f1e738e80535..ad9d477cc242 100644 --- a/net/tipc/discover.c +++ b/net/tipc/discover.c @@ -268,10 +268,9 @@ exit: * Returns 0 if successful, otherwise -errno. */ int tipc_disc_create(struct net *net, struct tipc_bearer *b, - struct tipc_media_addr *dest) + struct tipc_media_addr *dest, struct sk_buff **skb) { struct tipc_link_req *req; - struct sk_buff *skb; req = kmalloc(sizeof(*req), GFP_ATOMIC); if (!req) @@ -293,9 +292,7 @@ int tipc_disc_create(struct net *net, struct tipc_bearer *b, setup_timer(&req->timer, disc_timeout, (unsigned long)req); mod_timer(&req->timer, jiffies + req->timer_intv); b->link_req = req; - skb = skb_clone(req->buf, GFP_ATOMIC); - if (skb) - tipc_bearer_xmit_skb(net, req->bearer_id, skb, &req->dest); + *skb = skb_clone(req->buf, GFP_ATOMIC); return 0; } diff --git a/net/tipc/discover.h b/net/tipc/discover.h index c9b12770c5ed..b80a335389c0 100644 --- a/net/tipc/discover.h +++ b/net/tipc/discover.h @@ -40,7 +40,7 @@ struct tipc_link_req; int tipc_disc_create(struct net *net, struct tipc_bearer *b_ptr, - struct tipc_media_addr *dest); + struct tipc_media_addr *dest, struct sk_buff **skb); void tipc_disc_delete(struct tipc_link_req *req); void tipc_disc_reset(struct net *net, struct tipc_bearer *b_ptr); void tipc_disc_add_dest(struct tipc_link_req *req); From 5b7066c3dd24c7d538e5ee402eb24bb182c16dab Mon Sep 17 00:00:00 2001 From: Jon Paul Maloy Date: Thu, 7 Apr 2016 10:09:14 -0400 Subject: [PATCH 0444/1649] tipc: stricter filtering of packets in bearer layer Resetting a bearer/interface, with the consequence of resetting all its pertaining links, is not an atomic action. This becomes particularly evident in very large clusters, where a lot of traffic may happen on the remaining links while we are busy shutting them down. In extreme cases, we may even see links being re-created and re-established before we are finished with the job. To solve this, we now introduce a solution where we temporarily detach the bearer from the interface when the bearer is reset. This inhibits all packet reception, while sending still is possible. For the latter, we use the fact that the device's user pointer now is zero to filter out which packets can be sent during this situation; i.e., outgoing RESET messages only. This filtering serves to speed up the neighbors' detection of the loss event, and saves us from unnecessary probing. Acked-by: Ying Xue Signed-off-by: Jon Maloy Signed-off-by: David S. Miller --- net/tipc/bearer.c | 50 +++++++++++++++++++++++++++++++---------------- net/tipc/msg.h | 5 +++++ 2 files changed, 38 insertions(+), 17 deletions(-) diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c index 20566e9a1369..6f11c62bc8f9 100644 --- a/net/tipc/bearer.c +++ b/net/tipc/bearer.c @@ -337,23 +337,16 @@ static int tipc_reset_bearer(struct net *net, struct tipc_bearer *b) */ static void bearer_disable(struct net *net, struct tipc_bearer *b) { - struct tipc_net *tn = net_generic(net, tipc_net_id); - u32 i; + struct tipc_net *tn = tipc_net(net); + int bearer_id = b->identity; pr_info("Disabling bearer <%s>\n", b->name); b->media->disable_media(b); - - tipc_node_delete_links(net, b->identity); + tipc_node_delete_links(net, bearer_id); RCU_INIT_POINTER(b->media_ptr, NULL); if (b->link_req) tipc_disc_delete(b->link_req); - - for (i = 0; i < MAX_BEARERS; i++) { - if (b == rtnl_dereference(tn->bearer_list[i])) { - RCU_INIT_POINTER(tn->bearer_list[i], NULL); - break; - } - } + RCU_INIT_POINTER(tn->bearer_list[bearer_id], NULL); kfree_rcu(b, rcu); } @@ -396,7 +389,7 @@ void tipc_disable_l2_media(struct tipc_bearer *b) /** * tipc_l2_send_msg - send a TIPC packet out over an L2 interface - * @buf: the packet to be sent + * @skb: the packet to be sent * @b: the bearer through which the packet is to be sent * @dest: peer destination address */ @@ -405,17 +398,21 @@ int tipc_l2_send_msg(struct net *net, struct sk_buff *skb, { struct net_device *dev; int delta; + void *tipc_ptr; dev = (struct net_device *)rcu_dereference_rtnl(b->media_ptr); if (!dev) return 0; + /* Send RESET message even if bearer is detached from device */ + tipc_ptr = rtnl_dereference(dev->tipc_ptr); + if (unlikely(!tipc_ptr && !msg_is_reset(buf_msg(skb)))) + goto drop; + delta = dev->hard_header_len - skb_headroom(skb); if ((delta > 0) && - pskb_expand_head(skb, SKB_DATA_ALIGN(delta), 0, GFP_ATOMIC)) { - kfree_skb(skb); - return 0; - } + pskb_expand_head(skb, SKB_DATA_ALIGN(delta), 0, GFP_ATOMIC)) + goto drop; skb_reset_network_header(skb); skb->dev = dev; @@ -424,6 +421,9 @@ int tipc_l2_send_msg(struct net *net, struct sk_buff *skb, dev->dev_addr, skb->len); dev_queue_xmit(skb); return 0; +drop: + kfree_skb(skb); + return 0; } int tipc_bearer_mtu(struct net *net, u32 bearer_id) @@ -549,9 +549,18 @@ static int tipc_l2_device_event(struct notifier_block *nb, unsigned long evt, { struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct net *net = dev_net(dev); + struct tipc_net *tn = tipc_net(net); struct tipc_bearer *b; + int i; b = rtnl_dereference(dev->tipc_ptr); + if (!b) { + for (i = 0; i < MAX_BEARERS; b = NULL, i++) { + b = rtnl_dereference(tn->bearer_list[i]); + if (b && (b->media_ptr == dev)) + break; + } + } if (!b) return NOTIFY_DONE; @@ -561,13 +570,20 @@ static int tipc_l2_device_event(struct notifier_block *nb, unsigned long evt, case NETDEV_CHANGE: if (netif_carrier_ok(dev)) break; + case NETDEV_UP: + rcu_assign_pointer(dev->tipc_ptr, b); + break; case NETDEV_GOING_DOWN: + RCU_INIT_POINTER(dev->tipc_ptr, NULL); + synchronize_net(); + tipc_reset_bearer(net, b); + break; case NETDEV_CHANGEMTU: tipc_reset_bearer(net, b); break; case NETDEV_CHANGEADDR: b->media->raw2addr(b, &b->addr, - (char *)dev->dev_addr); + (char *)dev->dev_addr); tipc_reset_bearer(net, b); break; case NETDEV_UNREGISTER: diff --git a/net/tipc/msg.h b/net/tipc/msg.h index 55778a0aebf3..f34f639df643 100644 --- a/net/tipc/msg.h +++ b/net/tipc/msg.h @@ -779,6 +779,11 @@ static inline bool msg_peer_node_is_up(struct tipc_msg *m) return msg_redundant_link(m); } +static inline bool msg_is_reset(struct tipc_msg *hdr) +{ + return (msg_user(hdr) == LINK_PROTOCOL) && (msg_type(hdr) == RESET_MSG); +} + struct sk_buff *tipc_buf_acquire(u32 size); bool tipc_msg_validate(struct sk_buff *skb); bool tipc_msg_reverse(u32 own_addr, struct sk_buff **skb, int err); From 03be98226c14d787939381b9f42d81764ea8eedc Mon Sep 17 00:00:00 2001 From: Hannes Frederic Sowa Date: Thu, 7 Apr 2016 23:53:35 +0200 Subject: [PATCH 0445/1649] sock: make lockdep_sock_is_held static inline I forgot to add inline to lockdep_sock_is_held, so it generated all kinds of build warnings if not build with lockdep support. Reported-by: kbuild test robot Signed-off-by: Hannes Frederic Sowa Signed-off-by: David S. Miller --- include/net/sock.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/net/sock.h b/include/net/sock.h index eb2d7c3e120b..46b29374df8e 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1360,7 +1360,7 @@ do { \ lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0); \ } while (0) -static bool lockdep_sock_is_held(const struct sock *csk) +static inline bool lockdep_sock_is_held(const struct sock *csk) { struct sock *sk = (struct sock *)csk; From 832ac592149f542052e387f17dfcfa7ebea50aaf Mon Sep 17 00:00:00 2001 From: Mark Rustad Date: Mon, 14 Mar 2016 11:05:40 -0700 Subject: [PATCH 0446/1649] ixgbe: Delete some unused register definitions I noticed the SRAMREL registers are not referenced for any device, so delete the definitions. Signed-off-by: Mark Rustad Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_type.h | 8 -------- 1 file changed, 8 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index bc012ab48475..d02a0a3fa1d8 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -143,13 +143,6 @@ #define IXGBE_GRC_X550EM_a 0x15F64 #define IXGBE_GRC(_hw) IXGBE_BY_MAC((_hw), GRC) -#define IXGBE_SRAMREL_8259X 0x10210 -#define IXGBE_SRAMREL_X540 IXGBE_SRAMREL_8259X -#define IXGBE_SRAMREL_X550 IXGBE_SRAMREL_8259X -#define IXGBE_SRAMREL_X550EM_x IXGBE_SRAMREL_8259X -#define IXGBE_SRAMREL_X550EM_a 0x15F6C -#define IXGBE_SRAMREL(_hw) IXGBE_BY_MAC((_hw), SRAMREL) - /* General Receive Control */ #define IXGBE_GRC_MNG 0x00000001 /* Manageability Enable */ #define IXGBE_GRC_APME 0x00000002 /* APM enabled in EEPROM */ @@ -2948,7 +2941,6 @@ union ixgbe_atr_hash_dword { IXGBE_CAT(EEC, m), \ IXGBE_CAT(FLA, m), \ IXGBE_CAT(GRC, m), \ - IXGBE_CAT(SRAMREL, m), \ IXGBE_CAT(FACTPS, m), \ IXGBE_CAT(SWSM, m), \ IXGBE_CAT(SWFW_SYNC, m), \ From 3775b814d5380a25ed89b881d845f79f81bc5547 Mon Sep 17 00:00:00 2001 From: Mark Rustad Date: Mon, 14 Mar 2016 11:05:46 -0700 Subject: [PATCH 0447/1649] ixgbe: Change the lan_id and func fields to a u8 to avoid casts Since the lan_id and func fields only ever hold small values, make them u8 to avoid casts used to silence warnings. Signed-off-by: Mark Rustad Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_common.c | 2 +- drivers/net/ethernet/intel/ixgbe/ixgbe_type.h | 4 ++-- drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c index 8c7e78b21c4e..dfdb1149b6fd 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c @@ -3600,7 +3600,7 @@ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min, fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO; fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN; fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; - fw_cmd.port_num = (u8)hw->bus.func; + fw_cmd.port_num = hw->bus.func; fw_cmd.ver_maj = maj; fw_cmd.ver_min = min; fw_cmd.ver_build = build; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index d02a0a3fa1d8..7ae4bbd26ad8 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -3122,8 +3122,8 @@ struct ixgbe_bus_info { enum ixgbe_bus_width width; enum ixgbe_bus_type type; - u16 func; - u16 lan_id; + u8 func; + u8 lan_id; }; /* Flow control parameters */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c index 9d3f765638cc..5affac123b75 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c @@ -862,7 +862,7 @@ static void ixgbe_disable_rx_x550(struct ixgbe_hw *hw) fw_cmd.hdr.cmd = FW_DISABLE_RXEN_CMD; fw_cmd.hdr.buf_len = FW_DISABLE_RXEN_LEN; fw_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM; - fw_cmd.port_number = (u8)hw->bus.lan_id; + fw_cmd.port_number = hw->bus.lan_id; status = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd, sizeof(struct ixgbe_hic_disable_rxen), From 73457165d71d5ce0e41c0adb7bfa484702c36248 Mon Sep 17 00:00:00 2001 From: Mark Rustad Date: Mon, 14 Mar 2016 11:05:51 -0700 Subject: [PATCH 0448/1649] ixgbe: Correct length check for round up The function ixgbe_host_interface_command actually uses a multiple of word sized buffer to do its business, but only checks against the actual length passed in. This means that on read operations it could be possible to modify locations beyond the length passed in. Change the check to round up in the same way, just to avoid any possible hazard. Signed-off-by: Mark Rustad Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_common.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c index dfdb1149b6fd..a2ca9ef0daab 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c @@ -3557,7 +3557,7 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer, if (buf_len == 0) return 0; - if (length < (buf_len + hdr_size)) { + if (length < round_up(buf_len, 4) + hdr_size) { hw_dbg(hw, "Buffer not large enough for reply message.\n"); return IXGBE_ERR_HOST_INTERFACE_COMMAND; } From 5cffde309cb3f6f7aaaa459abd3eba245a863f8a Mon Sep 17 00:00:00 2001 From: Mark Rustad Date: Mon, 14 Mar 2016 11:05:57 -0700 Subject: [PATCH 0449/1649] ixgbe: Clean up interface for firmware commands Clean up the interface for issuing firmware commands to use a void * instead of a u32 *. This eliminates a number of casts. Also clean up ixgbe_host_interface_command in a few other ways, eliminating comparisons with 0, redundant parens and minor formatting issues. Signed-off-by: Mark Rustad Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- .../net/ethernet/intel/ixgbe/ixgbe_common.c | 39 ++++++++++--------- .../net/ethernet/intel/ixgbe/ixgbe_common.h | 4 +- drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c | 13 +++---- 3 files changed, 28 insertions(+), 28 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c index a2ca9ef0daab..b8cdff7fe673 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c @@ -3483,15 +3483,19 @@ static u8 ixgbe_calculate_checksum(u8 *buffer, u32 length) * Communicates with the manageability block. On success return 0 * else return IXGBE_ERR_HOST_INTERFACE_COMMAND. **/ -s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer, +s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer, u32 length, u32 timeout, bool return_data) { - u32 hicr, i, bi, fwsts; u32 hdr_size = sizeof(struct ixgbe_hic_hdr); + u32 hicr, i, bi, fwsts; u16 buf_len, dword_len; + union { + struct ixgbe_hic_hdr hdr; + u32 u32arr[1]; + } *bp = buffer; - if (length == 0 || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) { + if (!length || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) { hw_dbg(hw, "Buffer length failure buffersize-%d.\n", length); return IXGBE_ERR_HOST_INTERFACE_COMMAND; } @@ -3502,26 +3506,25 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer, /* Check that the host interface is enabled. */ hicr = IXGBE_READ_REG(hw, IXGBE_HICR); - if ((hicr & IXGBE_HICR_EN) == 0) { + if (!(hicr & IXGBE_HICR_EN)) { hw_dbg(hw, "IXGBE_HOST_EN bit disabled.\n"); return IXGBE_ERR_HOST_INTERFACE_COMMAND; } /* Calculate length in DWORDs. We must be DWORD aligned */ - if ((length % (sizeof(u32))) != 0) { + if (length % sizeof(u32)) { hw_dbg(hw, "Buffer length failure, not aligned to dword"); return IXGBE_ERR_INVALID_ARGUMENT; } dword_len = length >> 2; - /* - * The device driver writes the relevant command block + /* The device driver writes the relevant command block * into the ram area. */ for (i = 0; i < dword_len; i++) IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG, - i, cpu_to_le32(buffer[i])); + i, cpu_to_le32(bp->u32arr[i])); /* Setting this bit tells the ARC that a new command is pending. */ IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C); @@ -3534,8 +3537,8 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer, } /* Check command successful completion. */ - if ((timeout != 0 && i == timeout) || - (!(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV))) { + if ((timeout && i == timeout) || + !(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV)) { hw_dbg(hw, "Command has failed with no status valid.\n"); return IXGBE_ERR_HOST_INTERFACE_COMMAND; } @@ -3548,13 +3551,13 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer, /* first pull in the header so we know the buffer length */ for (bi = 0; bi < dword_len; bi++) { - buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi); - le32_to_cpus(&buffer[bi]); + bp->u32arr[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi); + le32_to_cpus(&bp->u32arr[bi]); } /* If there is any thing in data position pull it in */ - buf_len = ((struct ixgbe_hic_hdr *)buffer)->buf_len; - if (buf_len == 0) + buf_len = bp->hdr.buf_len; + if (!buf_len) return 0; if (length < round_up(buf_len, 4) + hdr_size) { @@ -3565,10 +3568,10 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer, /* Calculate length in DWORDs, add 3 for odd lengths */ dword_len = (buf_len + 3) >> 2; - /* Pull in the rest of the buffer (bi is where we left off)*/ + /* Pull in the rest of the buffer (bi is where we left off) */ for (; bi <= dword_len; bi++) { - buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi); - le32_to_cpus(&buffer[bi]); + bp->u32arr[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi); + le32_to_cpus(&bp->u32arr[bi]); } return 0; @@ -3612,7 +3615,7 @@ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min, fw_cmd.pad2 = 0; for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) { - ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd, + ret_val = ixgbe_host_interface_command(hw, &fw_cmd, sizeof(fw_cmd), IXGBE_HI_COMMAND_TIMEOUT, true); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h index 2e290150ab54..6f8e6a56e242 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h @@ -111,8 +111,8 @@ void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf); s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps); s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min, u8 build, u8 ver); -s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer, - u32 length, u32 timeout, bool return_data); +s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *, u32 length, + u32 timeout, bool return_data); void ixgbe_clear_tx_pending(struct ixgbe_hw *hw); bool ixgbe_mng_present(struct ixgbe_hw *hw); bool ixgbe_mng_enabled(struct ixgbe_hw *hw); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c index 5affac123b75..65832fa30426 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c @@ -437,8 +437,7 @@ static s32 ixgbe_read_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset, /* one word */ buffer.length = cpu_to_be16(sizeof(u16)); - status = ixgbe_host_interface_command(hw, (u32 *)&buffer, - sizeof(buffer), + status = ixgbe_host_interface_command(hw, &buffer, sizeof(buffer), IXGBE_HI_COMMAND_TIMEOUT, false); if (status) return status; @@ -488,7 +487,7 @@ static s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw, buffer.address = cpu_to_be32((offset + current_word) * 2); buffer.length = cpu_to_be16(words_to_read * 2); - status = ixgbe_host_interface_command(hw, (u32 *)&buffer, + status = ixgbe_host_interface_command(hw, &buffer, sizeof(buffer), IXGBE_HI_COMMAND_TIMEOUT, false); @@ -771,8 +770,7 @@ static s32 ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset, buffer.data = data; buffer.address = cpu_to_be32(offset * 2); - status = ixgbe_host_interface_command(hw, (u32 *)&buffer, - sizeof(buffer), + status = ixgbe_host_interface_command(hw, &buffer, sizeof(buffer), IXGBE_HI_COMMAND_TIMEOUT, false); return status; } @@ -814,8 +812,7 @@ static s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw) buffer.req.buf_lenl = FW_SHADOW_RAM_DUMP_LEN; buffer.req.checksum = FW_DEFAULT_CHECKSUM; - status = ixgbe_host_interface_command(hw, (u32 *)&buffer, - sizeof(buffer), + status = ixgbe_host_interface_command(hw, &buffer, sizeof(buffer), IXGBE_HI_COMMAND_TIMEOUT, false); return status; } @@ -864,7 +861,7 @@ static void ixgbe_disable_rx_x550(struct ixgbe_hw *hw) fw_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM; fw_cmd.port_number = hw->bus.lan_id; - status = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd, + status = ixgbe_host_interface_command(hw, &fw_cmd, sizeof(struct ixgbe_hic_disable_rxen), IXGBE_HI_COMMAND_TIMEOUT, true); From af7419017626b93ccdf76b12c2b1dc8fe17da4ad Mon Sep 17 00:00:00 2001 From: Mark Rustad Date: Mon, 14 Mar 2016 11:06:02 -0700 Subject: [PATCH 0450/1649] ixgbe: Take manageability semaphore for firmware commands We need to take the manageability semaphore when issuing firmware commands to avoid problems. With this in place, the semaphore is no longer taken in the ixgbe_set_fw_drv_ver_generic function, since it will now always be taken by the ixgbe_host_interface_command function. Signed-off-by: Mark Rustad Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- .../net/ethernet/intel/ixgbe/ixgbe_common.c | 30 ++++++++++++------- 1 file changed, 19 insertions(+), 11 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c index b8cdff7fe673..ee43a383aa0a 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c @@ -3494,11 +3494,16 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer, struct ixgbe_hic_hdr hdr; u32 u32arr[1]; } *bp = buffer; + s32 status; if (!length || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) { hw_dbg(hw, "Buffer length failure buffersize-%d.\n", length); return IXGBE_ERR_HOST_INTERFACE_COMMAND; } + /* Take management host interface semaphore */ + status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM); + if (status) + return status; /* Set bit 9 of FWSTS clearing FW reset indication */ fwsts = IXGBE_READ_REG(hw, IXGBE_FWSTS); @@ -3508,13 +3513,15 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer, hicr = IXGBE_READ_REG(hw, IXGBE_HICR); if (!(hicr & IXGBE_HICR_EN)) { hw_dbg(hw, "IXGBE_HOST_EN bit disabled.\n"); - return IXGBE_ERR_HOST_INTERFACE_COMMAND; + status = IXGBE_ERR_HOST_INTERFACE_COMMAND; + goto rel_out; } /* Calculate length in DWORDs. We must be DWORD aligned */ if (length % sizeof(u32)) { hw_dbg(hw, "Buffer length failure, not aligned to dword"); - return IXGBE_ERR_INVALID_ARGUMENT; + status = IXGBE_ERR_INVALID_ARGUMENT; + goto rel_out; } dword_len = length >> 2; @@ -3540,11 +3547,12 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer, if ((timeout && i == timeout) || !(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV)) { hw_dbg(hw, "Command has failed with no status valid.\n"); - return IXGBE_ERR_HOST_INTERFACE_COMMAND; + status = IXGBE_ERR_HOST_INTERFACE_COMMAND; + goto rel_out; } if (!return_data) - return 0; + goto rel_out; /* Calculate length in DWORDs */ dword_len = hdr_size >> 2; @@ -3558,11 +3566,12 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer, /* If there is any thing in data position pull it in */ buf_len = bp->hdr.buf_len; if (!buf_len) - return 0; + goto rel_out; if (length < round_up(buf_len, 4) + hdr_size) { hw_dbg(hw, "Buffer not large enough for reply message.\n"); - return IXGBE_ERR_HOST_INTERFACE_COMMAND; + status = IXGBE_ERR_HOST_INTERFACE_COMMAND; + goto rel_out; } /* Calculate length in DWORDs, add 3 for odd lengths */ @@ -3574,7 +3583,10 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer, le32_to_cpus(&bp->u32arr[bi]); } - return 0; +rel_out: + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM); + + return status; } /** @@ -3597,9 +3609,6 @@ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min, int i; s32 ret_val; - if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM)) - return IXGBE_ERR_SWFW_SYNC; - fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO; fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN; fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; @@ -3631,7 +3640,6 @@ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min, break; } - hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM); return ret_val; } From 8220bbc12d39175964cb56e100fabcedd59c48da Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Mon, 7 Mar 2016 09:30:09 -0800 Subject: [PATCH 0451/1649] ixgbe/ixgbevf: Add support for bulk free in Tx cleanup & cleanup boolean logic This patch enables bulk free in Tx cleanup for ixgbevf and cleans up the boolean logic in the polling routines for ixgbe and ixgbevf in the hopes of avoiding any mix-ups similar to what occurred with i40e and i40evf. Signed-off-by: Alexander Duyck Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 10 +++++++--- drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | 14 +++++++++----- 2 files changed, 16 insertions(+), 8 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 19bf3860d3d8..d5509cc30abd 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -1111,6 +1111,7 @@ static int ixgbe_tx_maxrate(struct net_device *netdev, * ixgbe_clean_tx_irq - Reclaim resources after transmit completes * @q_vector: structure containing interrupt and ring information * @tx_ring: tx ring to clean + * @napi_budget: Used to determine if we are in netpoll **/ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, struct ixgbe_ring *tx_ring, int napi_budget) @@ -2807,8 +2808,10 @@ int ixgbe_poll(struct napi_struct *napi, int budget) ixgbe_update_dca(q_vector); #endif - ixgbe_for_each_ring(ring, q_vector->tx) - clean_complete &= !!ixgbe_clean_tx_irq(q_vector, ring, budget); + ixgbe_for_each_ring(ring, q_vector->tx) { + if (!ixgbe_clean_tx_irq(q_vector, ring, budget)) + clean_complete = false; + } /* Exit if we are called by netpoll or busy polling is active */ if ((budget <= 0) || !ixgbe_qv_lock_napi(q_vector)) @@ -2826,7 +2829,8 @@ int ixgbe_poll(struct napi_struct *napi, int budget) per_ring_budget); work_done += cleaned; - clean_complete &= (cleaned < per_ring_budget); + if (cleaned >= per_ring_budget) + clean_complete = false; } ixgbe_qv_unlock_napi(q_vector); diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 50b6bfffaf32..007cbe094990 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -288,9 +288,10 @@ static void ixgbevf_tx_timeout(struct net_device *netdev) * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes * @q_vector: board private structure * @tx_ring: tx ring to clean + * @napi_budget: Used to determine if we are in netpoll **/ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, - struct ixgbevf_ring *tx_ring) + struct ixgbevf_ring *tx_ring, int napi_budget) { struct ixgbevf_adapter *adapter = q_vector->adapter; struct ixgbevf_tx_buffer *tx_buffer; @@ -328,7 +329,7 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, total_packets += tx_buffer->gso_segs; /* free the skb */ - dev_kfree_skb_any(tx_buffer->skb); + napi_consume_skb(tx_buffer->skb, napi_budget); /* unmap skb header data */ dma_unmap_single(tx_ring->dev, @@ -1013,8 +1014,10 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget) int per_ring_budget, work_done = 0; bool clean_complete = true; - ixgbevf_for_each_ring(ring, q_vector->tx) - clean_complete &= ixgbevf_clean_tx_irq(q_vector, ring); + ixgbevf_for_each_ring(ring, q_vector->tx) { + if (!ixgbevf_clean_tx_irq(q_vector, ring, budget)) + clean_complete = false; + } if (budget <= 0) return budget; @@ -1035,7 +1038,8 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget) int cleaned = ixgbevf_clean_rx_irq(q_vector, ring, per_ring_budget); work_done += cleaned; - clean_complete &= (cleaned < per_ring_budget); + if (cleaned >= per_ring_budget) + clean_complete = false; } #ifdef CONFIG_NET_RX_BUSY_POLL From a711ad89a887f7cb2ecbea591a58b6102ad9be7a Mon Sep 17 00:00:00 2001 From: Mark Rustad Date: Mon, 21 Mar 2016 11:21:31 -0700 Subject: [PATCH 0452/1649] ixgbe: Add support for single-port X550 device Add support for a single-port X550 device. Signed-off-by: Mark Rustad Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_common.c | 1 + drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 2 ++ drivers/net/ethernet/intel/ixgbe/ixgbe_type.h | 1 + 3 files changed, 4 insertions(+) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c index ee43a383aa0a..8c560da29d23 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c @@ -97,6 +97,7 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw) case IXGBE_DEV_ID_X540T: case IXGBE_DEV_ID_X540T1: case IXGBE_DEV_ID_X550T: + case IXGBE_DEV_ID_X550T1: case IXGBE_DEV_ID_X550EM_X_10G_T: supported = true; break; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index d5509cc30abd..9594438ffa07 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -125,6 +125,7 @@ static const struct pci_device_id ixgbe_pci_tbl[] = { {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF_QP), board_82599 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T1), board_X540 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T), board_X550}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T1), board_X550}, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KX4), board_X550EM_x}, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KR), board_X550EM_x}, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_10G_T), board_X550EM_x}, @@ -8983,6 +8984,7 @@ int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id, case IXGBE_DEV_ID_X540T: case IXGBE_DEV_ID_X540T1: case IXGBE_DEV_ID_X550T: + case IXGBE_DEV_ID_X550T1: case IXGBE_DEV_ID_X550EM_X_KX4: case IXGBE_DEV_ID_X550EM_X_KR: case IXGBE_DEV_ID_X550EM_X_10G_T: diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index 7ae4bbd26ad8..bd95be2d7927 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -75,6 +75,7 @@ #define IXGBE_DEV_ID_X540T1 0x1560 #define IXGBE_DEV_ID_X550T 0x1563 +#define IXGBE_DEV_ID_X550T1 0x15D1 #define IXGBE_DEV_ID_X550EM_X_KX4 0x15AA #define IXGBE_DEV_ID_X550EM_X_KR 0x15AB #define IXGBE_DEV_ID_X550EM_X_SFP 0x15AC From 207969b94cf2736f4f2f51aec287a6a0ea7d5dbd Mon Sep 17 00:00:00 2001 From: Mark Rustad Date: Fri, 1 Apr 2016 12:17:59 -0700 Subject: [PATCH 0453/1649] ixgbe: Add definitions for x550em_a 10G MAC Add definitions for a x550em_a 10G MAC device with a native SFP interface. Signed-off-by: Mark Rustad Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_type.h | 22 +++++++++++++++---- 1 file changed, 18 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index bd95be2d7927..b505da3e9778 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -81,16 +81,18 @@ #define IXGBE_DEV_ID_X550EM_X_SFP 0x15AC #define IXGBE_DEV_ID_X550EM_X_10G_T 0x15AD #define IXGBE_DEV_ID_X550EM_X_1G_T 0x15AE +#define IXGBE_DEV_ID_X550EM_A_SFP_N 0x15C4 + +/* VF Device IDs */ #define IXGBE_DEV_ID_X550_VF_HV 0x1564 #define IXGBE_DEV_ID_X550_VF 0x1565 #define IXGBE_DEV_ID_X550EM_X_VF 0x15A8 #define IXGBE_DEV_ID_X550EM_X_VF_HV 0x15A9 - -/* VF Device IDs */ #define IXGBE_DEV_ID_82599_VF 0x10ED #define IXGBE_DEV_ID_X540_VF 0x1515 #define IXGBE_DEV_ID_X550_VF 0x1565 #define IXGBE_DEV_ID_X550EM_X_VF 0x15A8 +#define IXGBE_DEV_ID_X550EM_A_VF 0x15C5 #define IXGBE_CAT(r, m) IXGBE_##r##_##m @@ -129,7 +131,7 @@ #define IXGBE_FLA_X540 IXGBE_FLA_8259X #define IXGBE_FLA_X550 IXGBE_FLA_8259X #define IXGBE_FLA_X550EM_x IXGBE_FLA_8259X -#define IXGBE_FLA_X550EM_a 0x15F6C +#define IXGBE_FLA_X550EM_a 0x15F68 #define IXGBE_FLA(_hw) IXGBE_BY_MAC((_hw), FLA) #define IXGBE_EEMNGCTL 0x10110 #define IXGBE_EEMNGDATA 0x10114 @@ -369,6 +371,8 @@ struct ixgbe_thermal_sensor_data { #define IXGBE_MRCTL(_i) (0x0F600 + ((_i) * 4)) #define IXGBE_VMRVLAN(_i) (0x0F610 + ((_i) * 4)) #define IXGBE_VMRVM(_i) (0x0F630 + ((_i) * 4)) +#define IXGBE_WQBR_RX(_i) (0x2FB0 + ((_i) * 4)) /* 4 total */ +#define IXGBE_WQBR_TX(_i) (0x8130 + ((_i) * 4)) /* 4 total */ #define IXGBE_L34T_IMIR(_i) (0x0E800 + ((_i) * 4)) /*128 of these (0-127)*/ #define IXGBE_RXFECCERR0 0x051B8 #define IXGBE_LLITHRESH 0x0EC90 @@ -440,6 +444,8 @@ struct ixgbe_thermal_sensor_data { #define IXGBE_DMATXCTL_TE 0x1 /* Transmit Enable */ #define IXGBE_DMATXCTL_NS 0x2 /* No Snoop LSO hdr buffer */ #define IXGBE_DMATXCTL_GDV 0x8 /* Global Double VLAN */ +#define IXGBE_DMATXCTL_MDP_EN 0x20 /* Bit 5 */ +#define IXGBE_DMATXCTL_MBINTEN 0x40 /* Bit 6 */ #define IXGBE_DMATXCTL_VT_SHIFT 16 /* VLAN EtherType */ #define IXGBE_PFDTXGSWC_VT_LBEN 0x1 /* Local L2 VT switch enable */ @@ -548,7 +554,6 @@ struct ixgbe_thermal_sensor_data { #define IXGBE_TDPT2TCCR(_i) (0x0CD20 + ((_i) * 4)) /* 8 of these (0-7) */ #define IXGBE_TDPT2TCSR(_i) (0x0CD40 + ((_i) * 4)) /* 8 of these (0-7) */ - /* Security Control Registers */ #define IXGBE_SECTXCTRL 0x08800 #define IXGBE_SECTXSTAT 0x08804 @@ -1197,6 +1202,8 @@ struct ixgbe_thermal_sensor_data { #define IXGBE_RDRXCTL_RSCLLIDIS 0x00800000 /* Disable RSC compl on LLI */ #define IXGBE_RDRXCTL_RSCACKC 0x02000000 /* must set 1 when RSC enabled */ #define IXGBE_RDRXCTL_FCOE_WRFIX 0x04000000 /* must set 1 when RSC enabled */ +#define IXGBE_RDRXCTL_MBINTEN 0x10000000 +#define IXGBE_RDRXCTL_MDP_EN 0x20000000 /* RQTC Bit Masks and Shifts */ #define IXGBE_RQTC_SHIFT_TC(_i) ((_i) * 4) @@ -1951,7 +1958,9 @@ enum { #define IXGBE_GSSR_PHY1_SM 0x0004 #define IXGBE_GSSR_MAC_CSR_SM 0x0008 #define IXGBE_GSSR_FLASH_SM 0x0010 +#define IXGBE_GSSR_NVM_UPDATE_SM 0x0200 #define IXGBE_GSSR_SW_MNG_SM 0x0400 +#define IXGBE_GSSR_TOKEN_SM 0x40000000 /* SW bit for shared access */ #define IXGBE_GSSR_SHARED_I2C_SM 0x1806 /* Wait for both phys & I2Cs */ #define IXGBE_GSSR_I2C_MASK 0x1800 #define IXGBE_GSSR_NVM_PHY_MASK 0xF @@ -2524,6 +2533,10 @@ enum ixgbe_fdir_pballoc_type { #define IXGBE_FDIRCTRL_REPORT_STATUS_ALWAYS 0x00000080 #define IXGBE_FDIRCTRL_DROP_Q_SHIFT 8 #define IXGBE_FDIRCTRL_FLEX_SHIFT 16 +#define IXGBE_FDIRCTRL_DROP_NO_MATCH 0x00008000 +#define IXGBE_FDIRCTRL_FILTERMODE_SHIFT 21 +#define IXGBE_FDIRCTRL_FILTERMODE_MACVLAN 0x0001 /* bit 23:21, 001b */ +#define IXGBE_FDIRCTRL_FILTERMODE_CLOUD 0x0002 /* bit 23:21, 010b */ #define IXGBE_FDIRCTRL_SEARCHLIM 0x00800000 #define IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT 24 #define IXGBE_FDIRCTRL_FULL_THRESH_MASK 0xF0000000 @@ -2982,6 +2995,7 @@ enum ixgbe_mac_type { ixgbe_mac_X540, ixgbe_mac_X550, ixgbe_mac_X550EM_x, + ixgbe_mac_x550em_a, ixgbe_num_macs }; From 9a5c27e6ef9166612f95564bc2fc69506d1be2b3 Mon Sep 17 00:00:00 2001 From: Mark Rustad Date: Fri, 1 Apr 2016 12:18:04 -0700 Subject: [PATCH 0454/1649] ixgbe: Use method pointer to access IOSF devices Provide method pointers and use them to access IOSF-attached devices. A new MAC will introduce a new access method. Signed-off-by: Mark Rustad Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_type.h | 2 ++ drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c | 32 +++++++++++-------- 2 files changed, 20 insertions(+), 14 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index b505da3e9778..fef2264ff5f0 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -3332,6 +3332,8 @@ struct ixgbe_mac_operations { s32 (*dmac_config)(struct ixgbe_hw *hw); s32 (*dmac_update_tcs)(struct ixgbe_hw *hw); s32 (*dmac_config_tcs)(struct ixgbe_hw *hw); + s32 (*read_iosf_sb_reg)(struct ixgbe_hw *, u32, u32, u32 *); + s32 (*write_iosf_sb_reg)(struct ixgbe_hw *, u32, u32, u32); }; struct ixgbe_phy_operations { diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c index 65832fa30426..878ea1ed87b4 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c @@ -1615,7 +1615,7 @@ static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw, s32 status; u32 reg_val; - status = ixgbe_read_iosf_sb_reg_x550(hw, + status = hw->mac.ops.read_iosf_sb_reg(hw, IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); if (status) @@ -1637,7 +1637,7 @@ static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw, /* Restart auto-negotiation. */ reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART; - status = ixgbe_write_iosf_sb_reg_x550(hw, + status = hw->mac.ops.write_iosf_sb_reg(hw, IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); @@ -1654,9 +1654,9 @@ static s32 ixgbe_setup_kx4_x550em(struct ixgbe_hw *hw) s32 status; u32 reg_val; - status = ixgbe_read_iosf_sb_reg_x550(hw, IXGBE_KX4_LINK_CNTL_1, - IXGBE_SB_IOSF_TARGET_KX4_PCS0 + - hw->bus.lan_id, ®_val); + status = hw->mac.ops.read_iosf_sb_reg(hw, IXGBE_KX4_LINK_CNTL_1, + IXGBE_SB_IOSF_TARGET_KX4_PCS0 + + hw->bus.lan_id, ®_val); if (status) return status; @@ -1675,9 +1675,9 @@ static s32 ixgbe_setup_kx4_x550em(struct ixgbe_hw *hw) /* Restart auto-negotiation. */ reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_RESTART; - status = ixgbe_write_iosf_sb_reg_x550(hw, IXGBE_KX4_LINK_CNTL_1, - IXGBE_SB_IOSF_TARGET_KX4_PCS0 + - hw->bus.lan_id, reg_val); + status = hw->mac.ops.write_iosf_sb_reg(hw, IXGBE_KX4_LINK_CNTL_1, + IXGBE_SB_IOSF_TARGET_KX4_PCS0 + + hw->bus.lan_id, reg_val); return status; } @@ -1897,9 +1897,10 @@ static s32 ixgbe_setup_fc_x550em(struct ixgbe_hw *hw) if (hw->device_id != IXGBE_DEV_ID_X550EM_X_KR) return 0; - rc = ixgbe_read_iosf_sb_reg_x550(hw, - IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + rc = hw->mac.ops.read_iosf_sb_reg(hw, + IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, + ®_val); if (rc) return rc; @@ -1909,9 +1910,10 @@ static s32 ixgbe_setup_fc_x550em(struct ixgbe_hw *hw) reg_val |= IXGBE_KRM_AN_CNTL_1_SYM_PAUSE; if (asm_dir) reg_val |= IXGBE_KRM_AN_CNTL_1_ASM_PAUSE; - rc = ixgbe_write_iosf_sb_reg_x550(hw, - IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + rc = hw->mac.ops.write_iosf_sb_reg(hw, + IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, + reg_val); /* This device does not fully support AN. */ hw->fc.disable_fc_autoneg = true; @@ -2449,6 +2451,8 @@ static const struct ixgbe_mac_operations mac_ops_X550EM_x = { .release_swfw_sync = &ixgbe_release_swfw_sync_X550em, .init_swfw_sync = &ixgbe_init_swfw_sync_X540, .setup_fc = NULL, /* defined later */ + .read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550, + .write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550, }; #define X550_COMMON_EEP \ From 49425dfc74512bef9cf15eafb5de0fc98f024e20 Mon Sep 17 00:00:00 2001 From: Mark Rustad Date: Fri, 1 Apr 2016 12:18:09 -0700 Subject: [PATCH 0455/1649] ixgbe: Add support for x550em_a 10G MAC type Add support for x550em_a 10G MAC type to the ixgbe driver. The new MAC includes new firmware commands that need to be used to control PHY and IOSF access, so that support is also added. The interface supported is a native SFP+ interface. Signed-off-by: Mark Rustad Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe.h | 3 + .../net/ethernet/intel/ixgbe/ixgbe_82599.c | 1 + .../net/ethernet/intel/ixgbe/ixgbe_common.c | 1 + drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c | 6 +- .../net/ethernet/intel/ixgbe/ixgbe_ethtool.c | 9 +- drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c | 3 +- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 43 +++- drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c | 2 + drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c | 6 +- drivers/net/ethernet/intel/ixgbe/ixgbe_type.h | 38 ++++ drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c | 208 +++++++++++++++++- 11 files changed, 311 insertions(+), 9 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index 4590fabdedf0..d10ed62993c1 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -817,6 +817,7 @@ static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter) return IXGBE_MAX_RSS_INDICES; case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: return IXGBE_MAX_RSS_INDICES_X550; default: return 0; @@ -860,6 +861,7 @@ enum ixgbe_boards { board_X540, board_X550, board_X550EM_x, + board_x550em_a, }; extern const struct ixgbe_info ixgbe_82598_info; @@ -867,6 +869,7 @@ extern const struct ixgbe_info ixgbe_82599_info; extern const struct ixgbe_info ixgbe_X540_info; extern const struct ixgbe_info ixgbe_X550_info; extern const struct ixgbe_info ixgbe_X550EM_x_info; +extern const struct ixgbe_info ixgbe_x550em_a_info; #ifdef CONFIG_IXGBE_DCB extern const struct dcbnl_rtnl_ops dcbnl_ops; #endif diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c index 4bb6b685263b..01519787324a 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c @@ -1633,6 +1633,7 @@ s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw, switch (hw->mac.type) { case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, ~fdirtcpm); break; default: diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c index 8c560da29d23..11450bd8ec9c 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c @@ -2855,6 +2855,7 @@ u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS; max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599; break; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c index 02c7333a9c83..f8fb2acc2632 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2014 Intel Corporation. + Copyright(c) 1999 - 2016 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -293,6 +293,7 @@ s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw, case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: return ixgbe_dcb_hw_config_82599(hw, pfc_en, refill, max, bwgid, ptype, prio_tc); default: @@ -311,6 +312,7 @@ s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: return ixgbe_dcb_config_pfc_82599(hw, pfc_en, prio_tc); default: break; @@ -368,6 +370,7 @@ s32 ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw, case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id, prio_type, prio_tc); ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, @@ -398,6 +401,7 @@ void ixgbe_dcb_read_rtrup2tc(struct ixgbe_hw *hw, u8 *map) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: ixgbe_dcb_read_rtrup2tc_82599(hw, map); break; default: diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index b3530e1e3ce1..9f76be1431b1 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2014 Intel Corporation. + Copyright(c) 1999 - 2016 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -547,6 +547,7 @@ static void ixgbe_get_regs(struct net_device *netdev, case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL_82599(i)); regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i)); break; @@ -660,6 +661,7 @@ static void ixgbe_get_regs(struct net_device *netdev, case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_RTTDCS); regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RTRPCS); for (i = 0; i < 8; i++) @@ -1443,6 +1445,7 @@ static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: toggle = 0x7FFFF30F; test = reg_test_82599; break; @@ -1681,6 +1684,7 @@ static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: reg_ctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); reg_ctl &= ~IXGBE_DMATXCTL_TE; IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_ctl); @@ -1720,6 +1724,7 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL); reg_data |= IXGBE_DMATXCTL_TE; IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data); @@ -1780,6 +1785,7 @@ static int ixgbe_setup_loopback_test(struct ixgbe_adapter *adapter) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: reg_data = IXGBE_READ_REG(hw, IXGBE_MACC); reg_data |= IXGBE_MACC_FLU; IXGBE_WRITE_REG(hw, IXGBE_MACC, reg_data); @@ -2991,6 +2997,7 @@ static int ixgbe_get_ts_info(struct net_device *dev, switch (adapter->hw.mac.type) { case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: case ixgbe_mac_X540: case ixgbe_mac_82599EB: info->so_timestamping = diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c index e771e764daa3..bcdc88444ceb 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2013 Intel Corporation. + Copyright(c) 1999 - 2016 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -128,6 +128,7 @@ static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc, case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: if (num_tcs > 4) { /* * TCs : TC0/1 TC2/3 TC4-7 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 9594438ffa07..eb93319337a1 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -73,7 +73,7 @@ static char ixgbe_default_device_descr[] = #define DRV_VERSION "4.2.1-k" const char ixgbe_driver_version[] = DRV_VERSION; static const char ixgbe_copyright[] = - "Copyright (c) 1999-2015 Intel Corporation."; + "Copyright (c) 1999-2016 Intel Corporation."; static const char ixgbe_overheat_msg[] = "Network adapter has been stopped because it has over heated. Restart the computer. If the problem persists, power off the system and replace the adapter"; @@ -83,6 +83,7 @@ static const struct ixgbe_info *ixgbe_info_tbl[] = { [board_X540] = &ixgbe_X540_info, [board_X550] = &ixgbe_X550_info, [board_X550EM_x] = &ixgbe_X550EM_x_info, + [board_x550em_a] = &ixgbe_x550em_a_info, }; /* ixgbe_pci_tbl - PCI Device ID Table @@ -130,6 +131,7 @@ static const struct pci_device_id ixgbe_pci_tbl[] = { {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KR), board_X550EM_x}, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_10G_T), board_X550EM_x}, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_SFP), board_X550EM_x}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP_N), board_x550em_a }, /* required last entry */ {0, } }; @@ -861,6 +863,7 @@ static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction, case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: if (direction == -1) { /* other causes */ msix_vector |= IXGBE_IVAR_ALLOC_VAL; @@ -899,6 +902,7 @@ static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter, case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: mask = (qmask & 0xFFFFFFFF); IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask); mask = (qmask >> 32); @@ -2245,6 +2249,7 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: ixgbe_set_ivar(adapter, -1, 1, v_idx); break; default: @@ -2356,6 +2361,7 @@ void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: /* * set the WDIS bit to not clear the timer bits and cause an * immediate assertion of the interrupt @@ -2517,6 +2523,7 @@ static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw) return false; case ixgbe_mac_82599EB: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: switch (hw->mac.ops.get_media_type(hw)) { case ixgbe_media_type_fiber: case ixgbe_media_type_fiber_qsfp: @@ -2591,6 +2598,7 @@ static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter, case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: mask = (qmask & 0xFFFFFFFF); if (mask) IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask); @@ -2619,6 +2627,7 @@ static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter, case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: mask = (qmask & 0xFFFFFFFF); if (mask) IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask); @@ -2654,6 +2663,7 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues, case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: mask |= IXGBE_EIMS_TS; break; default: @@ -2669,7 +2679,9 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues, case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: - if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_SFP) + case ixgbe_mac_x550em_a: + if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_SFP || + adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_SFP_N) mask |= IXGBE_EIMS_GPI_SDP0(&adapter->hw); if (adapter->hw.phy.type == ixgbe_phy_x550em_ext_t) mask |= IXGBE_EICR_GPI_SDP0_X540; @@ -2727,6 +2739,7 @@ static irqreturn_t ixgbe_msix_other(int irq, void *data) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: if (hw->phy.type == ixgbe_phy_x550em_ext_t && (eicr & IXGBE_EICR_GPI_SDP0_X540)) { adapter->flags2 |= IXGBE_FLAG2_PHY_INTERRUPT; @@ -2963,6 +2976,7 @@ static irqreturn_t ixgbe_intr(int irq, void *data) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: if (eicr & IXGBE_EICR_ECC) { e_info(link, "Received ECC Err, initiating reset\n"); adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED; @@ -3059,6 +3073,7 @@ static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000); IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0); IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0); @@ -3858,6 +3873,7 @@ static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter) break; case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: if (adapter->num_vfs) rdrxctl |= IXGBE_RDRXCTL_PSP; /* fall through for older HW */ @@ -4021,6 +4037,7 @@ static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: for (i = 0; i < adapter->num_rx_queues; i++) { struct ixgbe_ring *ring = adapter->rx_ring[i]; @@ -4057,6 +4074,7 @@ static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: for (i = 0; i < adapter->num_rx_queues; i++) { struct ixgbe_ring *ring = adapter->rx_ring[i]; @@ -4083,6 +4101,7 @@ static void ixgbe_vlan_promisc_enable(struct ixgbe_adapter *adapter) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: default: if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) break; @@ -4173,6 +4192,7 @@ static void ixgbe_vlan_promisc_disable(struct ixgbe_adapter *adapter) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: default: if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) break; @@ -4561,6 +4581,7 @@ static void ixgbe_clear_vxlan_port(struct ixgbe_adapter *adapter) switch (adapter->hw.mac.type) { case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: IXGBE_WRITE_REG(&adapter->hw, IXGBE_VXLANCTRL, 0); adapter->vxlan_port = 0; break; @@ -4661,6 +4682,7 @@ static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: dv_id = IXGBE_DV_X540(link, tc); break; default: @@ -4721,6 +4743,7 @@ static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter, int pb) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: dv_id = IXGBE_LOW_DV_X540(tc); break; default: @@ -5137,6 +5160,7 @@ static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: default: IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF); IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF); @@ -5187,6 +5211,7 @@ static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter) gpie |= IXGBE_SDP1_GPIEN_8259X | IXGBE_SDP2_GPIEN_8259X; break; case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: gpie |= IXGBE_SDP0_GPIEN_X540; break; default: @@ -5498,6 +5523,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) & ~IXGBE_DMATXCTL_TE)); @@ -5616,6 +5642,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter) adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; break; case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: case ixgbe_mac_X550: #ifdef CONFIG_IXGBE_DCA adapter->flags &= ~IXGBE_FLAG_DCA_CAPABLE; @@ -5641,6 +5668,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: adapter->dcb_cfg.num_tcs.pg_tcs = X540_TRAFFIC_CLASS; adapter->dcb_cfg.num_tcs.pfc_tcs = X540_TRAFFIC_CLASS; break; @@ -6248,6 +6276,7 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: pci_wake_from_d3(pdev, !!wufc); break; default: @@ -6383,6 +6412,7 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: hwstats->pxonrxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i)); break; @@ -6398,7 +6428,8 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) if ((hw->mac.type == ixgbe_mac_82599EB) || (hw->mac.type == ixgbe_mac_X540) || (hw->mac.type == ixgbe_mac_X550) || - (hw->mac.type == ixgbe_mac_X550EM_x)) { + (hw->mac.type == ixgbe_mac_X550EM_x) || + (hw->mac.type == ixgbe_mac_x550em_a)) { hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i)); IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)); /* to clear */ hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i)); @@ -6423,6 +6454,7 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: /* OS2BMC stats are X540 and later */ hwstats->o2bgptc += IXGBE_READ_REG(hw, IXGBE_O2BGPTC); hwstats->o2bspc += IXGBE_READ_REG(hw, IXGBE_O2BSPC); @@ -6693,6 +6725,7 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: case ixgbe_mac_82599EB: { u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN); u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG); @@ -9146,6 +9179,7 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); break; default: @@ -9578,6 +9612,9 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev, case ixgbe_mac_X550EM_x: device_id = IXGBE_DEV_ID_X550EM_X_VF; break; + case ixgbe_mac_x550em_a: + device_id = IXGBE_DEV_ID_X550EM_A_VF; + break; default: device_id = 0; break; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c index 2837c94d6e35..b2125e358f7b 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c @@ -307,6 +307,7 @@ static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: vflre = IXGBE_READ_REG(hw, IXGBE_VFLREC(reg_offset)); break; default: @@ -430,6 +431,7 @@ void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw) if (hw->mac.type != ixgbe_mac_82599EB && hw->mac.type != ixgbe_mac_X550 && hw->mac.type != ixgbe_mac_X550EM_x && + hw->mac.type != ixgbe_mac_x550em_a && hw->mac.type != ixgbe_mac_X540) return; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c index ef1504d41890..bdc8fdcc07a5 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2015 Intel Corporation. + Copyright(c) 1999 - 2016 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -333,6 +333,7 @@ static void ixgbe_ptp_convert_to_hwtstamp(struct ixgbe_adapter *adapter, */ case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: /* Upper 32 bits represent billions of cycles, lower 32 bits * represent cycles. However, we use timespec64_to_ns for the * correct math even though the units haven't been corrected @@ -921,6 +922,7 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter, switch (hw->mac.type) { case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: /* enable timestamping all packets only if at least some * packets were requested. Otherwise, play nice and disable * timestamping @@ -1083,6 +1085,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter) cc.shift = 2; } /* fallthrough */ + case ixgbe_mac_x550em_a: case ixgbe_mac_X550: cc.read = ixgbe_ptp_read_X550; @@ -1223,6 +1226,7 @@ static long ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter) break; case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: snprintf(adapter->ptp_caps.name, 16, "%s", netdev->name); adapter->ptp_caps.owner = THIS_MODULE; adapter->ptp_caps.max_adj = 30000000; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index fef2264ff5f0..ced38c19436c 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -2627,6 +2627,20 @@ enum ixgbe_fdir_pballoc_type { #define FW_MAX_READ_BUFFER_SIZE 1024 #define FW_DISABLE_RXEN_CMD 0xDE #define FW_DISABLE_RXEN_LEN 0x1 +#define FW_PHY_MGMT_REQ_CMD 0x20 +#define FW_PHY_TOKEN_REQ_CMD 0x0A +#define FW_PHY_TOKEN_REQ_LEN 2 +#define FW_PHY_TOKEN_REQ 0 +#define FW_PHY_TOKEN_REL 1 +#define FW_PHY_TOKEN_OK 1 +#define FW_PHY_TOKEN_RETRY 0x80 +#define FW_PHY_TOKEN_DELAY 5 /* milliseconds */ +#define FW_PHY_TOKEN_WAIT 5 /* seconds */ +#define FW_PHY_TOKEN_RETRIES ((FW_PHY_TOKEN_WAIT * 1000) / FW_PHY_TOKEN_DELAY) +#define FW_INT_PHY_REQ_CMD 0xB +#define FW_INT_PHY_REQ_LEN 10 +#define FW_INT_PHY_REQ_READ 0 +#define FW_INT_PHY_REQ_WRITE 1 /* Host Interface Command Structures */ struct ixgbe_hic_hdr { @@ -2695,6 +2709,28 @@ struct ixgbe_hic_disable_rxen { u16 pad3; }; +struct ixgbe_hic_phy_token_req { + struct ixgbe_hic_hdr hdr; + u8 port_number; + u8 command_type; + u16 pad; +}; + +struct ixgbe_hic_internal_phy_req { + struct ixgbe_hic_hdr hdr; + u8 port_number; + u8 command_type; + __be16 address; + u16 rsv1; + __be32 write_data; + u16 pad; +} __packed; + +struct ixgbe_hic_internal_phy_resp { + struct ixgbe_hic_hdr hdr; + __be32 read_data; +}; + /* Transmit Descriptor - Advanced */ union ixgbe_adv_tx_desc { struct { @@ -3528,6 +3564,8 @@ struct ixgbe_info { #define IXGBE_ERR_INVALID_ARGUMENT -32 #define IXGBE_ERR_HOST_INTERFACE_COMMAND -33 #define IXGBE_ERR_FDIR_CMD_INCOMPLETE -38 +#define IXGBE_ERR_FW_RESP_INVALID -39 +#define IXGBE_ERR_TOKEN_RETRY -40 #define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF #define IXGBE_FUSES0_GROUP(_i) (0x11158 + ((_i) * 4)) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c index 878ea1ed87b4..ba161b5077eb 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c @@ -278,6 +278,8 @@ static s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw) hw->phy.phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM; ixgbe_setup_mux_ctl(hw); ixgbe_check_cs4227(hw); + /* Fallthrough */ + case IXGBE_DEV_ID_X550EM_A_SFP_N: return ixgbe_identify_module_generic(hw); case IXGBE_DEV_ID_X550EM_X_KX4: hw->phy.type = ixgbe_phy_x550em_kx4; @@ -413,6 +415,121 @@ out: return ret; } +/** + * ixgbe_get_phy_token - Get the token for shared PHY access + * @hw: Pointer to hardware structure + */ +static s32 ixgbe_get_phy_token(struct ixgbe_hw *hw) +{ + struct ixgbe_hic_phy_token_req token_cmd; + s32 status; + + token_cmd.hdr.cmd = FW_PHY_TOKEN_REQ_CMD; + token_cmd.hdr.buf_len = FW_PHY_TOKEN_REQ_LEN; + token_cmd.hdr.cmd_or_resp.cmd_resv = 0; + token_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM; + token_cmd.port_number = hw->bus.lan_id; + token_cmd.command_type = FW_PHY_TOKEN_REQ; + token_cmd.pad = 0; + status = ixgbe_host_interface_command(hw, &token_cmd, sizeof(token_cmd), + IXGBE_HI_COMMAND_TIMEOUT, + true); + if (status) + return status; + if (token_cmd.hdr.cmd_or_resp.ret_status == FW_PHY_TOKEN_OK) + return 0; + if (token_cmd.hdr.cmd_or_resp.ret_status != FW_PHY_TOKEN_RETRY) + return IXGBE_ERR_FW_RESP_INVALID; + + return IXGBE_ERR_TOKEN_RETRY; +} + +/** + * ixgbe_put_phy_token - Put the token for shared PHY access + * @hw: Pointer to hardware structure + */ +static s32 ixgbe_put_phy_token(struct ixgbe_hw *hw) +{ + struct ixgbe_hic_phy_token_req token_cmd; + s32 status; + + token_cmd.hdr.cmd = FW_PHY_TOKEN_REQ_CMD; + token_cmd.hdr.buf_len = FW_PHY_TOKEN_REQ_LEN; + token_cmd.hdr.cmd_or_resp.cmd_resv = 0; + token_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM; + token_cmd.port_number = hw->bus.lan_id; + token_cmd.command_type = FW_PHY_TOKEN_REL; + token_cmd.pad = 0; + status = ixgbe_host_interface_command(hw, &token_cmd, sizeof(token_cmd), + IXGBE_HI_COMMAND_TIMEOUT, + true); + if (status) + return status; + if (token_cmd.hdr.cmd_or_resp.ret_status == FW_PHY_TOKEN_OK) + return 0; + return IXGBE_ERR_FW_RESP_INVALID; +} + +/** + * ixgbe_write_iosf_sb_reg_x550a - Write to IOSF PHY register + * @hw: pointer to hardware structure + * @reg_addr: 32 bit PHY register to write + * @device_type: 3 bit device type + * @data: Data to write to the register + **/ +static s32 ixgbe_write_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, + __always_unused u32 device_type, + u32 data) +{ + struct ixgbe_hic_internal_phy_req write_cmd; + + memset(&write_cmd, 0, sizeof(write_cmd)); + write_cmd.hdr.cmd = FW_INT_PHY_REQ_CMD; + write_cmd.hdr.buf_len = FW_INT_PHY_REQ_LEN; + write_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM; + write_cmd.port_number = hw->bus.lan_id; + write_cmd.command_type = FW_INT_PHY_REQ_WRITE; + write_cmd.address = cpu_to_be16(reg_addr); + write_cmd.write_data = cpu_to_be32(data); + + return ixgbe_host_interface_command(hw, &write_cmd, sizeof(write_cmd), + IXGBE_HI_COMMAND_TIMEOUT, false); +} + +/** + * ixgbe_read_iosf_sb_reg_x550a - Read from IOSF PHY register + * @hw: pointer to hardware structure + * @reg_addr: 32 bit PHY register to write + * @device_type: 3 bit device type + * @data: Pointer to read data from the register + **/ +static s32 ixgbe_read_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, + __always_unused u32 device_type, + u32 *data) +{ + union { + struct ixgbe_hic_internal_phy_req cmd; + struct ixgbe_hic_internal_phy_resp rsp; + } hic; + s32 status; + + memset(&hic, 0, sizeof(hic)); + hic.cmd.hdr.cmd = FW_INT_PHY_REQ_CMD; + hic.cmd.hdr.buf_len = FW_INT_PHY_REQ_LEN; + hic.cmd.hdr.checksum = FW_DEFAULT_CHECKSUM; + hic.cmd.port_number = hw->bus.lan_id; + hic.cmd.command_type = FW_INT_PHY_REQ_READ; + hic.cmd.address = cpu_to_be16(reg_addr); + + status = ixgbe_host_interface_command(hw, &hic.cmd, sizeof(hic.cmd), + IXGBE_HI_COMMAND_TIMEOUT, true); + + /* Extract the register value from the response. */ + *data = be32_to_cpu(hic.rsp.read_data); + + return status; +} + /** ixgbe_read_ee_hostif_data_X550 - Read EEPROM word using a host interface * command assuming that the semaphore is already obtained. * @hw: pointer to hardware structure @@ -1339,9 +1456,9 @@ static void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw) mac->ops.disable_tx_laser = NULL; mac->ops.enable_tx_laser = NULL; mac->ops.flap_tx_laser = NULL; + mac->ops.setup_mac_link = ixgbe_setup_mac_link_sfp_x550em; mac->ops.setup_link = ixgbe_setup_mac_link_multispeed_fiber; mac->ops.setup_fc = ixgbe_setup_fc_x550em; - mac->ops.setup_mac_link = ixgbe_setup_mac_link_sfp_x550em; mac->ops.set_rate_select_speed = ixgbe_set_soft_rate_select_speed; break; @@ -1349,6 +1466,8 @@ static void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw) mac->ops.setup_link = ixgbe_setup_mac_link_t_X550em; mac->ops.setup_fc = ixgbe_setup_fc_generic; mac->ops.check_link = ixgbe_check_link_t_X550em; + return; + case ixgbe_media_type_backplane: break; default: mac->ops.setup_fc = ixgbe_setup_fc_x550em; @@ -2107,11 +2226,12 @@ static enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw) media_type = ixgbe_media_type_backplane; break; case IXGBE_DEV_ID_X550EM_X_SFP: + case IXGBE_DEV_ID_X550EM_A_SFP_N: media_type = ixgbe_media_type_fiber; break; case IXGBE_DEV_ID_X550EM_X_1G_T: case IXGBE_DEV_ID_X550EM_X_10G_T: - media_type = ixgbe_media_type_copper; + media_type = ixgbe_media_type_copper; break; default: media_type = ixgbe_media_type_unknown; @@ -2375,6 +2495,59 @@ static void ixgbe_release_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask) ixgbe_release_swfw_sync_X540(hw, mask); } +/** + * ixgbe_acquire_swfw_sync_x550em_a - Acquire SWFW semaphore + * @hw: pointer to hardware structure + * @mask: Mask to specify which semaphore to acquire + * + * Acquires the SWFW semaphore and get the shared PHY token as needed + */ +static s32 ixgbe_acquire_swfw_sync_x550em_a(struct ixgbe_hw *hw, u32 mask) +{ + u32 hmask = mask & ~IXGBE_GSSR_TOKEN_SM; + int retries = FW_PHY_TOKEN_RETRIES; + s32 status; + + while (--retries) { + status = 0; + if (hmask) + status = ixgbe_acquire_swfw_sync_X540(hw, hmask); + if (status) + return status; + if (!(mask & IXGBE_GSSR_TOKEN_SM)) + return 0; + + status = ixgbe_get_phy_token(hw); + if (!status) + return 0; + if (hmask) + ixgbe_release_swfw_sync_X540(hw, hmask); + if (status != IXGBE_ERR_TOKEN_RETRY) + return status; + udelay(FW_PHY_TOKEN_DELAY * 1000); + } + + return status; +} + +/** + * ixgbe_release_swfw_sync_x550em_a - Release SWFW semaphore + * @hw: pointer to hardware structure + * @mask: Mask to specify which semaphore to release + * + * Release the SWFW semaphore and puts the shared PHY token as needed + */ +static void ixgbe_release_swfw_sync_x550em_a(struct ixgbe_hw *hw, u32 mask) +{ + u32 hmask = mask & ~IXGBE_GSSR_TOKEN_SM; + + if (mask & IXGBE_GSSR_TOKEN_SM) + ixgbe_put_phy_token(hw); + + if (hmask) + ixgbe_release_swfw_sync_X540(hw, hmask); +} + #define X550_COMMON_MAC \ .init_hw = &ixgbe_init_hw_generic, \ .start_hw = &ixgbe_start_hw_X540, \ @@ -2455,6 +2628,23 @@ static const struct ixgbe_mac_operations mac_ops_X550EM_x = { .write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550, }; +static struct ixgbe_mac_operations mac_ops_x550em_a = { + X550_COMMON_MAC + .reset_hw = ixgbe_reset_hw_X550em, + .get_media_type = ixgbe_get_media_type_X550em, + .get_san_mac_addr = NULL, + .get_wwn_prefix = NULL, + .setup_link = NULL, /* defined later */ + .get_link_capabilities = ixgbe_get_link_capabilities_X550em, + .get_bus_info = ixgbe_get_bus_info_X550em, + .setup_sfp = ixgbe_setup_sfp_modules_X550em, + .acquire_swfw_sync = ixgbe_acquire_swfw_sync_x550em_a, + .release_swfw_sync = ixgbe_release_swfw_sync_x550em_a, + .setup_fc = ixgbe_setup_fc_generic, + .read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550a, + .write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550a, +}; + #define X550_COMMON_EEP \ .read = &ixgbe_read_ee_hostif_X550, \ .read_buffer = &ixgbe_read_ee_hostif_buffer_X550, \ @@ -2515,6 +2705,10 @@ static const u32 ixgbe_mvals_X550EM_x[IXGBE_MVALS_IDX_LIMIT] = { IXGBE_MVALS_INIT(X550EM_x) }; +static const u32 ixgbe_mvals_x550em_a[IXGBE_MVALS_IDX_LIMIT] = { + IXGBE_MVALS_INIT(X550EM_a) +}; + const struct ixgbe_info ixgbe_X550_info = { .mac = ixgbe_mac_X550, .get_invariants = &ixgbe_get_invariants_X540, @@ -2534,3 +2728,13 @@ const struct ixgbe_info ixgbe_X550EM_x_info = { .mbx_ops = &mbx_ops_generic, .mvals = ixgbe_mvals_X550EM_x, }; + +const struct ixgbe_info ixgbe_x550em_a_info = { + .mac = ixgbe_mac_x550em_a, + .get_invariants = &ixgbe_get_invariants_X550_x, + .mac_ops = &mac_ops_x550em_a, + .eeprom_ops = &eeprom_ops_X550EM_x, + .phy_ops = &phy_ops_X550EM_x, + .mbx_ops = &mbx_ops_generic, + .mvals = ixgbe_mvals_x550em_a, +}; From d31afc8f5ca11249a3b15dafa5972fc76e4099cf Mon Sep 17 00:00:00 2001 From: Mark Rustad Date: Fri, 1 Apr 2016 12:18:14 -0700 Subject: [PATCH 0456/1649] ixgbe: Use new methods for PHY access Now x550em_a devices will use a new method for PHY access that will get the firmware token for each access. Signed-off-by: Mark Rustad Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c | 67 ++++++++++++++++++- 1 file changed, 64 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c index ba161b5077eb..ef1dc3b5b4ed 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c @@ -2548,6 +2548,57 @@ static void ixgbe_release_swfw_sync_x550em_a(struct ixgbe_hw *hw, u32 mask) ixgbe_release_swfw_sync_X540(hw, hmask); } +/** + * ixgbe_read_phy_reg_x550a - Reads specified PHY register + * @hw: pointer to hardware structure + * @reg_addr: 32 bit address of PHY register to read + * @phy_data: Pointer to read data from PHY register + * + * Reads a value from a specified PHY register using the SWFW lock and PHY + * Token. The PHY Token is needed since the MDIO is shared between to MAC + * instances. + */ +static s32 ixgbe_read_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 *phy_data) +{ + u32 mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM; + s32 status; + + if (hw->mac.ops.acquire_swfw_sync(hw, mask)) + return IXGBE_ERR_SWFW_SYNC; + + status = hw->phy.ops.read_reg_mdi(hw, reg_addr, device_type, phy_data); + + hw->mac.ops.release_swfw_sync(hw, mask); + + return status; +} + +/** + * ixgbe_write_phy_reg_x550a - Writes specified PHY register + * @hw: pointer to hardware structure + * @reg_addr: 32 bit PHY register to write + * @device_type: 5 bit device type + * @phy_data: Data to write to the PHY register + * + * Writes a value to specified PHY register using the SWFW lock and PHY Token. + * The PHY Token is needed since the MDIO is shared between to MAC instances. + */ +static s32 ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 phy_data) +{ + u32 mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM; + s32 status; + + if (hw->mac.ops.acquire_swfw_sync(hw, mask)) + return IXGBE_ERR_SWFW_SYNC; + + status = ixgbe_write_phy_reg_mdi(hw, reg_addr, device_type, phy_data); + hw->mac.ops.release_swfw_sync(hw, mask); + + return status; +} + #define X550_COMMON_MAC \ .init_hw = &ixgbe_init_hw_generic, \ .start_hw = &ixgbe_start_hw_X540, \ @@ -2673,8 +2724,6 @@ static const struct ixgbe_eeprom_operations eeprom_ops_X550EM_x = { .read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_generic, \ .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic, \ .write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic, \ - .read_reg = &ixgbe_read_phy_reg_generic, \ - .write_reg = &ixgbe_write_phy_reg_generic, \ .setup_link = &ixgbe_setup_phy_link_generic, \ .set_phy_power = NULL, \ .check_overtemp = &ixgbe_tn_check_overtemp, \ @@ -2684,12 +2733,16 @@ static const struct ixgbe_phy_operations phy_ops_X550 = { X550_COMMON_PHY .init = NULL, .identify = &ixgbe_identify_phy_generic, + .read_reg = &ixgbe_read_phy_reg_generic, + .write_reg = &ixgbe_write_phy_reg_generic, }; static const struct ixgbe_phy_operations phy_ops_X550EM_x = { X550_COMMON_PHY .init = &ixgbe_init_phy_ops_X550em, .identify = &ixgbe_identify_phy_x550em, + .read_reg = &ixgbe_read_phy_reg_generic, + .write_reg = &ixgbe_write_phy_reg_generic, .read_i2c_combined = &ixgbe_read_i2c_combined_generic, .write_i2c_combined = &ixgbe_write_i2c_combined_generic, .read_i2c_combined_unlocked = &ixgbe_read_i2c_combined_generic_unlocked, @@ -2697,6 +2750,14 @@ static const struct ixgbe_phy_operations phy_ops_X550EM_x = { &ixgbe_write_i2c_combined_generic_unlocked, }; +static const struct ixgbe_phy_operations phy_ops_x550em_a = { + X550_COMMON_PHY + .init = &ixgbe_init_phy_ops_X550em, + .identify = &ixgbe_identify_phy_x550em, + .read_reg = &ixgbe_read_phy_reg_x550a, + .write_reg = &ixgbe_write_phy_reg_x550a, +}; + static const u32 ixgbe_mvals_X550[IXGBE_MVALS_IDX_LIMIT] = { IXGBE_MVALS_INIT(X550) }; @@ -2734,7 +2795,7 @@ const struct ixgbe_info ixgbe_x550em_a_info = { .get_invariants = &ixgbe_get_invariants_X550_x, .mac_ops = &mac_ops_x550em_a, .eeprom_ops = &eeprom_ops_X550EM_x, - .phy_ops = &phy_ops_X550EM_x, + .phy_ops = &phy_ops_x550em_a, .mbx_ops = &mbx_ops_generic, .mvals = ixgbe_mvals_x550em_a, }; From c898fe280457dcdf500fc1001ee73cb1adedc4d2 Mon Sep 17 00:00:00 2001 From: Mark Rustad Date: Fri, 1 Apr 2016 12:18:20 -0700 Subject: [PATCH 0457/1649] ixgbe: Read and set instance id Read the instance number from EEPROM and save it for later use. Signed-off-by: Mark Rustad Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_common.c | 8 ++++++++ drivers/net/ethernet/intel/ixgbe/ixgbe_type.h | 5 +++++ 2 files changed, 13 insertions(+) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c index 11450bd8ec9c..737443a015d5 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c @@ -682,6 +682,7 @@ s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw) void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw) { struct ixgbe_bus_info *bus = &hw->bus; + u16 ee_ctrl_4; u32 reg; reg = IXGBE_READ_REG(hw, IXGBE_STATUS); @@ -692,6 +693,13 @@ void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw) reg = IXGBE_READ_REG(hw, IXGBE_FACTPS(hw)); if (reg & IXGBE_FACTPS_LFS) bus->func ^= 0x1; + + /* Get MAC instance from EEPROM for configuring CS4227 */ + if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP) { + hw->eeprom.ops.read(hw, IXGBE_EEPROM_CTRL_4, &ee_ctrl_4); + bus->instance_id = (ee_ctrl_4 & IXGBE_EE_CTRL_4_INST_ID) >> + IXGBE_EE_CTRL_4_INST_ID_SHIFT; + } } /** diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index ced38c19436c..a5c789e30de3 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -82,6 +82,7 @@ #define IXGBE_DEV_ID_X550EM_X_10G_T 0x15AD #define IXGBE_DEV_ID_X550EM_X_1G_T 0x15AE #define IXGBE_DEV_ID_X550EM_A_SFP_N 0x15C4 +#define IXGBE_DEV_ID_X550EM_A_SFP 0x15CE /* VF Device IDs */ #define IXGBE_DEV_ID_X550_VF_HV 0x1564 @@ -2000,6 +2001,9 @@ enum { #define IXGBE_PBANUM_PTR_GUARD 0xFAFA #define IXGBE_EEPROM_CHECKSUM 0x3F #define IXGBE_EEPROM_SUM 0xBABA +#define IXGBE_EEPROM_CTRL_4 0x45 +#define IXGBE_EE_CTRL_4_INST_ID 0x10 +#define IXGBE_EE_CTRL_4_INST_ID_SHIFT 4 #define IXGBE_PCIE_ANALOG_PTR 0x03 #define IXGBE_ATLAS0_CONFIG_PTR 0x04 #define IXGBE_PHY_PTR 0x04 @@ -3175,6 +3179,7 @@ struct ixgbe_bus_info { u8 func; u8 lan_id; + u8 instance_id; }; /* Flow control parameters */ From 537cc5df4fcb82c0ee1f1dc4751357929a135bbc Mon Sep 17 00:00:00 2001 From: Mark Rustad Date: Fri, 1 Apr 2016 12:18:25 -0700 Subject: [PATCH 0458/1649] ixgbe: Read and parse NW_MNG_IF_SEL register Read the IXGBE_NW_MNG_IF_SEL register and use it to set interface attributes. Signed-off-by: Mark Rustad Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_type.h | 5 +++ drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c | 37 ++++++++++++++++--- 2 files changed, 37 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index a5c789e30de3..6b68e8ba1dce 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -3649,5 +3649,10 @@ struct ixgbe_info { #define IXGBE_SB_IOSF_TARGET_KX4_PCS1 3 #define IXGBE_NW_MNG_IF_SEL 0x00011178 +#define IXGBE_NW_MNG_IF_SEL_MDIO_ACT BIT(1) +#define IXGBE_NW_MNG_IF_SEL_ENABLE_10_100M BIT(23) #define IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE BIT(24) +#define IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT 3 +#define IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD \ + (0x1F << IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT) #endif /* _IXGBE_TYPE_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c index ef1dc3b5b4ed..3563b862d8ea 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c @@ -2137,6 +2137,36 @@ static s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw) return status; } +/** + * ixgbe_read_mng_if_sel_x550em - Read NW_MNG_IF_SEL register + * @hw: pointer to hardware structure + * + * Read NW_MNG_IF_SEL register and save field values. + */ +static void ixgbe_read_mng_if_sel_x550em(struct ixgbe_hw *hw) +{ + /* Save NW management interface connected on board. This is used + * to determine internal PHY mode. + */ + hw->phy.nw_mng_if_sel = IXGBE_READ_REG(hw, IXGBE_NW_MNG_IF_SEL); + + /* If X552 (X550EM_a) and MDIO is connected to external PHY, then set + * PHY address. This register field was has only been used for X552. + */ + if (!hw->phy.nw_mng_if_sel) { + if (hw->mac.type == ixgbe_mac_x550em_a) { + struct ixgbe_adapter *adapter = hw->back; + + e_warn(drv, "nw_mng_if_sel not set\n"); + } + return; + } + + hw->phy.mdio.prtad = (hw->phy.nw_mng_if_sel & + IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD) >> + IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT; +} + /** ixgbe_init_phy_ops_X550em - PHY/SFP specific init * @hw: pointer to hardware structure * @@ -2151,14 +2181,11 @@ static s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw) hw->mac.ops.set_lan_id(hw); + ixgbe_read_mng_if_sel_x550em(hw); + if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) { phy->phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM; ixgbe_setup_mux_ctl(hw); - - /* Save NW management interface connected on board. This is used - * to determine internal PHY mode. - */ - phy->nw_mng_if_sel = IXGBE_READ_REG(hw, IXGBE_NW_MNG_IF_SEL); } /* Identify the PHY or SFP module */ From e84db7272798ed8abb2760a3fcd9c6d89abf99a5 Mon Sep 17 00:00:00 2001 From: Mark Rustad Date: Fri, 1 Apr 2016 12:18:30 -0700 Subject: [PATCH 0459/1649] ixgbe: Introduce function to control MDIO speed Move code that controls MDIO speed into a new function because there will be more MACs that need the control. Signed-off-by: Mark Rustad Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c | 27 ++++++++++++++----- 1 file changed, 21 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c index 3563b862d8ea..0d6cbb0af1a6 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c @@ -2306,6 +2306,26 @@ static s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw) return status; } +/** + * ixgbe_set_mdio_speed - Set MDIO clock speed + * @hw: pointer to hardware structure + */ +static void ixgbe_set_mdio_speed(struct ixgbe_hw *hw) +{ + u32 hlreg0; + + switch (hw->device_id) { + case IXGBE_DEV_ID_X550EM_X_10G_T: + /* Config MDIO clock speed before the first MDIO PHY access */ + hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); + hlreg0 &= ~IXGBE_HLREG0_MDCSPD; + IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); + break; + default: + break; + } +} + /** ixgbe_reset_hw_X550em - Perform hardware reset ** @hw: pointer to hardware structure ** @@ -2319,7 +2339,6 @@ static s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw) s32 status; u32 ctrl = 0; u32 i; - u32 hlreg0; bool link_up = false; /* Call adapter stop to disable Tx/Rx and clear interrupts */ @@ -2405,11 +2424,7 @@ mac_reset_top: hw->mac.num_rar_entries = 128; hw->mac.ops.init_rx_addrs(hw); - if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) { - hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); - hlreg0 &= ~IXGBE_HLREG0_MDCSPD; - IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); - } + ixgbe_set_mdio_speed(hw); if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP) ixgbe_setup_mux_ctl(hw); From 2d40cd1720cb6eb4406b80866c08d97b92595dfe Mon Sep 17 00:00:00 2001 From: Mark Rustad Date: Fri, 1 Apr 2016 12:18:35 -0700 Subject: [PATCH 0460/1649] ixgbe: Add support for SFPs with retimer Add support for SFPs with an external retimer. Signed-off-by: Mark Rustad Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 2 + drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h | 6 +- drivers/net/ethernet/intel/ixgbe/ixgbe_type.h | 5 + drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c | 133 +++++++++++++++++- 4 files changed, 144 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index eb93319337a1..93db4bf00dfe 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -132,6 +132,7 @@ static const struct pci_device_id ixgbe_pci_tbl[] = { {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_10G_T), board_X550EM_x}, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_SFP), board_X550EM_x}, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP_N), board_x550em_a }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP), board_x550em_a }, /* required last entry */ {0, } }; @@ -2681,6 +2682,7 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues, case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_SFP || + adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_SFP || adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_SFP_N) mask |= IXGBE_EIMS_GPI_SDP0(&adapter->hw); if (adapter->hw.phy.type == ixgbe_phy_x550em_ext_t) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h index 5abd66c84d00..cdf4c3800801 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2014 Intel Corporation. + Copyright(c) 1999 - 2016 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -81,7 +81,11 @@ #define IXGBE_I2C_EEPROM_STATUS_FAIL 0x2 #define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS 0x3 #define IXGBE_CS4227 0xBE /* CS4227 address */ +#define IXGBE_CS4227_GLOBAL_ID_LSB 0 +#define IXGBE_CS4227_GLOBAL_ID_MSB 1 #define IXGBE_CS4227_SCRATCH 2 +#define IXGBE_CS4223_PHY_ID 0x7003 /* Quad port */ +#define IXGBE_CS4227_PHY_ID 0x3003 /* Dual port */ #define IXGBE_CS4227_RESET_PENDING 0x1357 #define IXGBE_CS4227_RESET_COMPLETE 0x5AA5 #define IXGBE_CS4227_RETRIES 15 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index 6b68e8ba1dce..fbbc13224657 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -1311,6 +1311,7 @@ struct ixgbe_thermal_sensor_data { /* MDIO definitions */ +#define IXGBE_MDIO_ZERO_DEV_TYPE 0x0 #define IXGBE_MDIO_PMA_PMD_DEV_TYPE 0x1 #define IXGBE_MDIO_PCS_DEV_TYPE 0x3 #define IXGBE_MDIO_PHY_XS_DEV_TYPE 0x4 @@ -3580,6 +3581,7 @@ struct ixgbe_info { #define IXGBE_KRM_PORT_CAR_GEN_CTRL(P) ((P) ? 0x8010 : 0x4010) #define IXGBE_KRM_LINK_CTRL_1(P) ((P) ? 0x820C : 0x420C) #define IXGBE_KRM_AN_CNTL_1(P) ((P) ? 0x822C : 0x422C) +#define IXGBE_KRM_AN_CNTL_8(P) ((P) ? 0x8248 : 0x4248) #define IXGBE_KRM_DSP_TXFFE_STATE_4(P) ((P) ? 0x8634 : 0x4634) #define IXGBE_KRM_DSP_TXFFE_STATE_5(P) ((P) ? 0x8638 : 0x4638) #define IXGBE_KRM_RX_TRN_LINKUP_CTRL(P) ((P) ? 0x8B00 : 0x4B00) @@ -3605,6 +3607,9 @@ struct ixgbe_info { #define IXGBE_KRM_AN_CNTL_1_SYM_PAUSE (1 << 28) #define IXGBE_KRM_AN_CNTL_1_ASM_PAUSE (1 << 29) +#define IXGBE_KRM_AN_CNTL_8_LINEAR BIT(0) +#define IXGBE_KRM_AN_CNTL_8_LIMITING BIT(1) + #define IXGBE_KRM_DSP_TXFFE_STATE_C0_EN (1 << 6) #define IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN (1 << 15) #define IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN (1 << 16) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c index 0d6cbb0af1a6..a9d86b37872c 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c @@ -273,6 +273,12 @@ out: static s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw) { switch (hw->device_id) { + case IXGBE_DEV_ID_X550EM_A_SFP: + if (hw->bus.lan_id) + hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM; + else + hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM; + return ixgbe_identify_module_generic(hw); case IXGBE_DEV_ID_X550EM_X_SFP: /* set up for CS4227 usage */ hw->phy.phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM; @@ -1362,6 +1368,117 @@ i2c_err: return status; } +/** + * ixgbe_setup_mac_link_sfp_n - Setup internal PHY for native SFP + * @hw: pointer to hardware structure + * + * Configure the the integrated PHY for native SFP support. + */ +static s32 +ixgbe_setup_mac_link_sfp_n(struct ixgbe_hw *hw, ixgbe_link_speed speed, + __always_unused bool autoneg_wait_to_complete) +{ + bool setup_linear = false; + u32 reg_phy_int; + s32 rc; + + /* Check if SFP module is supported and linear */ + rc = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear); + + /* If no SFP module present, then return success. Return success since + * SFP not present error is not excepted in the setup MAC link flow. + */ + if (rc == IXGBE_ERR_SFP_NOT_PRESENT) + return 0; + + if (!rc) + return rc; + + /* Configure internal PHY for native SFI */ + rc = hw->mac.ops.read_iosf_sb_reg(hw, + IXGBE_KRM_AN_CNTL_8(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, + ®_phy_int); + if (rc) + return rc; + + if (setup_linear) { + reg_phy_int &= ~IXGBE_KRM_AN_CNTL_8_LIMITING; + reg_phy_int |= IXGBE_KRM_AN_CNTL_8_LINEAR; + } else { + reg_phy_int |= IXGBE_KRM_AN_CNTL_8_LIMITING; + reg_phy_int &= ~IXGBE_KRM_AN_CNTL_8_LINEAR; + } + + rc = hw->mac.ops.write_iosf_sb_reg(hw, + IXGBE_KRM_AN_CNTL_8(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, + reg_phy_int); + if (rc) + return rc; + + /* Setup XFI/SFI internal link */ + return ixgbe_setup_ixfi_x550em(hw, &speed); +} + +/** + * ixgbe_setup_mac_link_sfp_x550a - Setup internal PHY for SFP + * @hw: pointer to hardware structure + * + * Configure the the integrated PHY for SFP support. + */ +static s32 +ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw, ixgbe_link_speed speed, + __always_unused bool autoneg_wait_to_complete) +{ + u32 reg_slice, slice_offset; + bool setup_linear = false; + u16 reg_phy_ext; + s32 rc; + + /* Check if SFP module is supported and linear */ + rc = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear); + + /* If no SFP module present, then return success. Return success since + * SFP not present error is not excepted in the setup MAC link flow. + */ + if (rc == IXGBE_ERR_SFP_NOT_PRESENT) + return 0; + + if (!rc) + return rc; + + /* Configure internal PHY for KR/KX. */ + ixgbe_setup_kr_speed_x550em(hw, speed); + + if (!hw->phy.mdio.prtad || hw->phy.mdio.prtad == 0xFFFF) + return IXGBE_ERR_PHY_ADDR_INVALID; + + /* Get external PHY device id */ + rc = hw->phy.ops.read_reg(hw, IXGBE_CS4227_GLOBAL_ID_MSB, + IXGBE_MDIO_ZERO_DEV_TYPE, ®_phy_ext); + if (rc) + return rc; + + /* When configuring quad port CS4223, the MAC instance is part + * of the slice offset. + */ + if (reg_phy_ext == IXGBE_CS4223_PHY_ID) + slice_offset = (hw->bus.lan_id + + (hw->bus.instance_id << 1)) << 12; + else + slice_offset = hw->bus.lan_id << 12; + + /* Configure CS4227/CS4223 LINE side to proper mode. */ + reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB + slice_offset; + if (setup_linear) + reg_phy_ext = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 1; + else + reg_phy_ext = (IXGBE_CS4227_EDC_MODE_SR << 1) | 1; + return hw->phy.ops.write_reg(hw, reg_slice, IXGBE_MDIO_ZERO_DEV_TYPE, + reg_phy_ext); +} + /** * ixgbe_setup_mac_link_t_X550em - Sets the auto advertised link speed * @hw: pointer to hardware structure @@ -1456,9 +1573,21 @@ static void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw) mac->ops.disable_tx_laser = NULL; mac->ops.enable_tx_laser = NULL; mac->ops.flap_tx_laser = NULL; - mac->ops.setup_mac_link = ixgbe_setup_mac_link_sfp_x550em; mac->ops.setup_link = ixgbe_setup_mac_link_multispeed_fiber; mac->ops.setup_fc = ixgbe_setup_fc_x550em; + switch (hw->device_id) { + case IXGBE_DEV_ID_X550EM_A_SFP_N: + mac->ops.setup_mac_link = ixgbe_setup_mac_link_sfp_n; + break; + case IXGBE_DEV_ID_X550EM_A_SFP: + mac->ops.setup_mac_link = + ixgbe_setup_mac_link_sfp_x550a; + break; + default: + mac->ops.setup_mac_link = + ixgbe_setup_mac_link_sfp_x550em; + break; + } mac->ops.set_rate_select_speed = ixgbe_set_soft_rate_select_speed; break; @@ -2253,6 +2382,7 @@ static enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw) media_type = ixgbe_media_type_backplane; break; case IXGBE_DEV_ID_X550EM_X_SFP: + case IXGBE_DEV_ID_X550EM_A_SFP: case IXGBE_DEV_ID_X550EM_A_SFP_N: media_type = ixgbe_media_type_fiber; break; @@ -2316,6 +2446,7 @@ static void ixgbe_set_mdio_speed(struct ixgbe_hw *hw) switch (hw->device_id) { case IXGBE_DEV_ID_X550EM_X_10G_T: + case IXGBE_DEV_ID_X550EM_A_SFP: /* Config MDIO clock speed before the first MDIO PHY access */ hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); hlreg0 &= ~IXGBE_HLREG0_MDCSPD; From 200157c2e31a5931d0d825e9fddb44d10888e6b3 Mon Sep 17 00:00:00 2001 From: Mark Rustad Date: Fri, 1 Apr 2016 12:18:40 -0700 Subject: [PATCH 0461/1649] ixgbe: Add support for SGMII backplane interface Add support for an SGMII backplane interface. Signed-off-by: Mark Rustad Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 2 + drivers/net/ethernet/intel/ixgbe/ixgbe_type.h | 9 +++ drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c | 58 +++++++++++++++++++ 3 files changed, 69 insertions(+) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 93db4bf00dfe..c96af3fdd554 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -132,6 +132,8 @@ static const struct pci_device_id ixgbe_pci_tbl[] = { {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_10G_T), board_X550EM_x}, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_SFP), board_X550EM_x}, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP_N), board_x550em_a }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SGMII), board_x550em_a }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SGMII_L), board_x550em_a }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP), board_x550em_a }, /* required last entry */ {0, } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index fbbc13224657..50e8bc0ef4e7 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -82,6 +82,8 @@ #define IXGBE_DEV_ID_X550EM_X_10G_T 0x15AD #define IXGBE_DEV_ID_X550EM_X_1G_T 0x15AE #define IXGBE_DEV_ID_X550EM_A_SFP_N 0x15C4 +#define IXGBE_DEV_ID_X550EM_A_SGMII 0x15C6 +#define IXGBE_DEV_ID_X550EM_A_SGMII_L 0x15C7 #define IXGBE_DEV_ID_X550EM_A_SFP 0x15CE /* VF Device IDs */ @@ -3065,6 +3067,7 @@ enum ixgbe_phy_type { ixgbe_phy_qsfp_intel, ixgbe_phy_qsfp_unknown, ixgbe_phy_sfp_unsupported, + ixgbe_phy_sgmii, ixgbe_phy_generic }; @@ -3582,6 +3585,7 @@ struct ixgbe_info { #define IXGBE_KRM_LINK_CTRL_1(P) ((P) ? 0x820C : 0x420C) #define IXGBE_KRM_AN_CNTL_1(P) ((P) ? 0x822C : 0x422C) #define IXGBE_KRM_AN_CNTL_8(P) ((P) ? 0x8248 : 0x4248) +#define IXGBE_KRM_SGMII_CTRL(P) ((P) ? 0x82A0 : 0x42A0) #define IXGBE_KRM_DSP_TXFFE_STATE_4(P) ((P) ? 0x8634 : 0x4634) #define IXGBE_KRM_DSP_TXFFE_STATE_5(P) ((P) ? 0x8638 : 0x4638) #define IXGBE_KRM_RX_TRN_LINKUP_CTRL(P) ((P) ? 0x8B00 : 0x4B00) @@ -3595,6 +3599,8 @@ struct ixgbe_info { #define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK (0x7 << 8) #define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G (2 << 8) #define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G (4 << 8) +#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN BIT(12) +#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN BIT(13) #define IXGBE_KRM_LINK_CTRL_1_TETH_AN_FEC_REQ (1 << 14) #define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC (1 << 15) #define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX (1 << 16) @@ -3610,6 +3616,9 @@ struct ixgbe_info { #define IXGBE_KRM_AN_CNTL_8_LINEAR BIT(0) #define IXGBE_KRM_AN_CNTL_8_LIMITING BIT(1) +#define IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D BIT(12) +#define IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D BIT(19) + #define IXGBE_KRM_DSP_TXFFE_STATE_C0_EN (1 << 6) #define IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN (1 << 15) #define IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN (1 << 16) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c index a9d86b37872c..81e5d54476c7 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c @@ -1558,6 +1558,57 @@ static s32 ixgbe_check_link_t_X550em(struct ixgbe_hw *hw, return 0; } +/** + * ixgbe_setup_sgmii - Set up link for sgmii + * @hw: pointer to hardware structure + */ +static s32 +ixgbe_setup_sgmii(struct ixgbe_hw *hw, __always_unused ixgbe_link_speed speed, + __always_unused bool autoneg_wait_to_complete) +{ + struct ixgbe_mac_info *mac = &hw->mac; + u32 lval, sval; + s32 rc; + + rc = mac->ops.read_iosf_sb_reg(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, &lval); + if (rc) + return rc; + + lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE; + lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK; + lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN; + lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN; + lval |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G; + rc = mac->ops.write_iosf_sb_reg(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, lval); + if (rc) + return rc; + + rc = mac->ops.read_iosf_sb_reg(hw, + IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, &sval); + if (rc) + return rc; + + sval |= IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D; + sval |= IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D; + rc = mac->ops.write_iosf_sb_reg(hw, + IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, sval); + if (rc) + return rc; + + lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART; + rc = mac->ops.write_iosf_sb_reg(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, lval); + + return rc; +} + /** ixgbe_init_mac_link_ops_X550em - init mac link function pointers * @hw: pointer to hardware structure **/ @@ -1597,6 +1648,9 @@ static void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw) mac->ops.check_link = ixgbe_check_link_t_X550em; return; case ixgbe_media_type_backplane: + if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII || + hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII_L) + mac->ops.setup_link = ixgbe_setup_sgmii; break; default: mac->ops.setup_fc = ixgbe_setup_fc_x550em; @@ -2377,6 +2431,10 @@ static enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw) /* Detect if there is a copper PHY attached. */ switch (hw->device_id) { + case IXGBE_DEV_ID_X550EM_A_SGMII: + case IXGBE_DEV_ID_X550EM_A_SGMII_L: + hw->phy.type = ixgbe_phy_sgmii; + /* Fallthrough */ case IXGBE_DEV_ID_X550EM_X_KR: case IXGBE_DEV_ID_X550EM_X_KX4: media_type = ixgbe_media_type_backplane; From f572b2c4c86dcebe6b8684cbab03d9b2ea0d2ad6 Mon Sep 17 00:00:00 2001 From: Mark Rustad Date: Fri, 1 Apr 2016 12:18:46 -0700 Subject: [PATCH 0462/1649] ixgbe: Add KR backplane support for x550em_a Add support for x550em_a-based KR backplane devices. Signed-off-by: Mark Rustad Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 2 ++ drivers/net/ethernet/intel/ixgbe/ixgbe_type.h | 2 ++ drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c | 18 ++++++++++++++---- 3 files changed, 18 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index c96af3fdd554..1a7bfcfc030e 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -131,6 +131,8 @@ static const struct pci_device_id ixgbe_pci_tbl[] = { {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KR), board_X550EM_x}, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_10G_T), board_X550EM_x}, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_SFP), board_X550EM_x}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_KR), board_x550em_a }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_KR_L), board_x550em_a }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP_N), board_x550em_a }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SGMII), board_x550em_a }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SGMII_L), board_x550em_a }, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index 50e8bc0ef4e7..ba3b837c7e9d 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -81,6 +81,8 @@ #define IXGBE_DEV_ID_X550EM_X_SFP 0x15AC #define IXGBE_DEV_ID_X550EM_X_10G_T 0x15AD #define IXGBE_DEV_ID_X550EM_X_1G_T 0x15AE +#define IXGBE_DEV_ID_X550EM_A_KR 0x15C2 +#define IXGBE_DEV_ID_X550EM_A_KR_L 0x15C3 #define IXGBE_DEV_ID_X550EM_A_SFP_N 0x15C4 #define IXGBE_DEV_ID_X550EM_A_SGMII 0x15C6 #define IXGBE_DEV_ID_X550EM_A_SGMII_L 0x15C7 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c index 81e5d54476c7..c71e93ed4451 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c @@ -291,6 +291,8 @@ static s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw) hw->phy.type = ixgbe_phy_x550em_kx4; break; case IXGBE_DEV_ID_X550EM_X_KR: + case IXGBE_DEV_ID_X550EM_A_KR: + case IXGBE_DEV_ID_X550EM_A_KR_L: hw->phy.type = ixgbe_phy_x550em_kr; break; case IXGBE_DEV_ID_X550EM_X_1G_T: @@ -1984,13 +1986,17 @@ static s32 ixgbe_setup_kx4_x550em(struct ixgbe_hw *hw) return status; } -/** ixgbe_setup_kr_x550em - Configure the KR PHY. - * @hw: pointer to hardware structure +/** + * ixgbe_setup_kr_x550em - Configure the KR PHY + * @hw: pointer to hardware structure * - * Configures the integrated KR PHY. + * Configures the integrated KR PHY for X550EM_x. **/ static s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw) { + if (hw->mac.type != ixgbe_mac_X550EM_x) + return 0; + return ixgbe_setup_kr_speed_x550em(hw, hw->phy.autoneg_advertised); } @@ -2196,7 +2202,9 @@ static s32 ixgbe_setup_fc_x550em(struct ixgbe_hw *hw) return IXGBE_ERR_CONFIG; } - if (hw->device_id != IXGBE_DEV_ID_X550EM_X_KR) + if (hw->device_id != IXGBE_DEV_ID_X550EM_X_KR && + hw->device_id != IXGBE_DEV_ID_X550EM_A_KR && + hw->device_id != IXGBE_DEV_ID_X550EM_A_KR_L) return 0; rc = hw->mac.ops.read_iosf_sb_reg(hw, @@ -2437,6 +2445,8 @@ static enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw) /* Fallthrough */ case IXGBE_DEV_ID_X550EM_X_KR: case IXGBE_DEV_ID_X550EM_X_KX4: + case IXGBE_DEV_ID_X550EM_A_KR: + case IXGBE_DEV_ID_X550EM_A_KR_L: media_type = ixgbe_media_type_backplane; break; case IXGBE_DEV_ID_X550EM_X_SFP: From 10ef00fe539a387ded9e0d710012500896589dbb Mon Sep 17 00:00:00 2001 From: Mark Rustad Date: Fri, 1 Apr 2016 12:18:51 -0700 Subject: [PATCH 0463/1649] ixgbe: Bump version number Update ixgbe version number. Signed-off-by: Mark Rustad Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 1a7bfcfc030e..2976df77bf14 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -70,7 +70,7 @@ char ixgbe_default_device_descr[] = static char ixgbe_default_device_descr[] = "Intel(R) 10 Gigabit Network Connection"; #endif -#define DRV_VERSION "4.2.1-k" +#define DRV_VERSION "4.4.0-k" const char ixgbe_driver_version[] = DRV_VERSION; static const char ixgbe_copyright[] = "Copyright (c) 1999-2016 Intel Corporation."; From b33b0a1bf69faff89693df49519fa7b459f5d807 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Thu, 7 Apr 2016 20:40:25 -0400 Subject: [PATCH 0464/1649] net: Fix build failure due to lockdep_sock_is_held(). Needs to be protected with CONFIG_LOCKDEP. Based upon a patch by Hannes Frederic Sowa. Signed-off-by: David S. Miller --- include/net/sock.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/include/net/sock.h b/include/net/sock.h index 46b29374df8e..81d6fecec0a2 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1360,6 +1360,7 @@ do { \ lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0); \ } while (0) +#ifdef CONFIG_LOCKDEP static inline bool lockdep_sock_is_held(const struct sock *csk) { struct sock *sk = (struct sock *)csk; @@ -1367,6 +1368,7 @@ static inline bool lockdep_sock_is_held(const struct sock *csk) return lockdep_is_held(&sk->sk_lock) || lockdep_is_held(&sk->sk_lock.slock); } +#endif void lock_sock_nested(struct sock *sk, int subclass); From ec5e099d6e941668d121ea9ca7057f4fa00830b0 Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Wed, 6 Apr 2016 18:43:22 -0700 Subject: [PATCH 0465/1649] perf: optimize perf_fetch_caller_regs avoid memset in perf_fetch_caller_regs, since it's the critical path of all tracepoints. It's called from perf_sw_event_sched, perf_event_task_sched_in and all of perf_trace_##call with this_cpu_ptr(&__perf_regs[..]) which are zero initialized by perpcu init logic and subsequent call to perf_arch_fetch_caller_regs initializes the same fields on all archs, so we can safely drop memset from all of the above cases and move it into perf_ftrace_function_call that calls it with stack allocated pt_regs. Acked-by: Peter Zijlstra (Intel) Signed-off-by: Alexei Starovoitov Signed-off-by: David S. Miller --- include/linux/perf_event.h | 2 -- kernel/trace/trace_event_perf.c | 1 + 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index f291275ffd71..e89f7199c223 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -882,8 +882,6 @@ static inline void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned lo */ static inline void perf_fetch_caller_regs(struct pt_regs *regs) { - memset(regs, 0, sizeof(*regs)); - perf_arch_fetch_caller_regs(regs, CALLER_ADDR0); } diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c index 00df25fd86ef..7a68afca8249 100644 --- a/kernel/trace/trace_event_perf.c +++ b/kernel/trace/trace_event_perf.c @@ -316,6 +316,7 @@ perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip, BUILD_BUG_ON(ENTRY_SIZE > PERF_MAX_TRACE_SIZE); + memset(®s, 0, sizeof(regs)); perf_fetch_caller_regs(®s); entry = perf_trace_buf_prepare(ENTRY_SIZE, TRACE_FN, NULL, &rctx); From e93735be6a1898dd9f8de8f55254cc76309777ce Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Wed, 6 Apr 2016 18:43:23 -0700 Subject: [PATCH 0466/1649] perf: remove unused __addr variable now all calls to perf_trace_buf_submit() pass 0 as 4th argument which will be repurposed in the next patch which will change the meaning of 1st arg of perf_tp_event() to event_type Signed-off-by: Alexei Starovoitov Acked-by: Peter Zijlstra (Intel) Signed-off-by: David S. Miller --- include/trace/perf.h | 7 ++----- include/trace/trace_events.h | 3 --- 2 files changed, 2 insertions(+), 8 deletions(-) diff --git a/include/trace/perf.h b/include/trace/perf.h index 26486fcd74ce..6f7e37869065 100644 --- a/include/trace/perf.h +++ b/include/trace/perf.h @@ -20,9 +20,6 @@ #undef __get_bitmask #define __get_bitmask(field) (char *)__get_dynamic_array(field) -#undef __perf_addr -#define __perf_addr(a) (__addr = (a)) - #undef __perf_count #define __perf_count(c) (__count = (c)) @@ -38,7 +35,7 @@ perf_trace_##call(void *__data, proto) \ struct trace_event_data_offsets_##call __maybe_unused __data_offsets;\ struct trace_event_raw_##call *entry; \ struct pt_regs *__regs; \ - u64 __addr = 0, __count = 1; \ + u64 __count = 1; \ struct task_struct *__task = NULL; \ struct hlist_head *head; \ int __entry_size; \ @@ -67,7 +64,7 @@ perf_trace_##call(void *__data, proto) \ \ { assign; } \ \ - perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \ + perf_trace_buf_submit(entry, __entry_size, rctx, 0, \ __count, __regs, head, __task); \ } diff --git a/include/trace/trace_events.h b/include/trace/trace_events.h index 170c93bbdbb7..80679a9fae65 100644 --- a/include/trace/trace_events.h +++ b/include/trace/trace_events.h @@ -652,9 +652,6 @@ static inline notrace int trace_event_get_offsets_##call( \ #undef TP_fast_assign #define TP_fast_assign(args...) args -#undef __perf_addr -#define __perf_addr(a) (a) - #undef __perf_count #define __perf_count(c) (c) From 1e1dcd93b468901e114f279c94a0b356adc5e7cd Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Wed, 6 Apr 2016 18:43:24 -0700 Subject: [PATCH 0467/1649] perf: split perf_trace_buf_prepare into alloc and update parts split allows to move expensive update of 'struct trace_entry' to later phase. Repurpose unused 1st argument of perf_tp_event() to indicate event type. While splitting use temp variable 'rctx' instead of '*rctx' to avoid unnecessary loads done by the compiler due to -fno-strict-aliasing Signed-off-by: Alexei Starovoitov Acked-by: Peter Zijlstra (Intel) Signed-off-by: David S. Miller --- include/linux/perf_event.h | 2 +- include/linux/trace_events.h | 8 +++---- include/trace/perf.h | 8 +++---- kernel/events/core.c | 6 +++-- kernel/trace/trace_event_perf.c | 39 +++++++++++++++++---------------- kernel/trace/trace_kprobe.c | 10 +++++---- kernel/trace/trace_syscalls.c | 13 ++++++----- kernel/trace/trace_uprobe.c | 5 +++-- 8 files changed, 49 insertions(+), 42 deletions(-) diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index e89f7199c223..eb41b535ef38 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -1016,7 +1016,7 @@ static inline bool perf_paranoid_kernel(void) } extern void perf_event_init(void); -extern void perf_tp_event(u64 addr, u64 count, void *record, +extern void perf_tp_event(u16 event_type, u64 count, void *record, int entry_size, struct pt_regs *regs, struct hlist_head *head, int rctx, struct task_struct *task); diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h index 0810f81b6db2..56f795e6a093 100644 --- a/include/linux/trace_events.h +++ b/include/linux/trace_events.h @@ -605,15 +605,15 @@ extern void perf_trace_del(struct perf_event *event, int flags); extern int ftrace_profile_set_filter(struct perf_event *event, int event_id, char *filter_str); extern void ftrace_profile_free_filter(struct perf_event *event); -extern void *perf_trace_buf_prepare(int size, unsigned short type, - struct pt_regs **regs, int *rctxp); +void perf_trace_buf_update(void *record, u16 type); +void *perf_trace_buf_alloc(int size, struct pt_regs **regs, int *rctxp); static inline void -perf_trace_buf_submit(void *raw_data, int size, int rctx, u64 addr, +perf_trace_buf_submit(void *raw_data, int size, int rctx, u16 type, u64 count, struct pt_regs *regs, void *head, struct task_struct *task) { - perf_tp_event(addr, count, raw_data, size, regs, head, rctx, task); + perf_tp_event(type, count, raw_data, size, regs, head, rctx, task); } #endif diff --git a/include/trace/perf.h b/include/trace/perf.h index 6f7e37869065..77cd9043b7e4 100644 --- a/include/trace/perf.h +++ b/include/trace/perf.h @@ -53,8 +53,7 @@ perf_trace_##call(void *__data, proto) \ sizeof(u64)); \ __entry_size -= sizeof(u32); \ \ - entry = perf_trace_buf_prepare(__entry_size, \ - event_call->event.type, &__regs, &rctx); \ + entry = perf_trace_buf_alloc(__entry_size, &__regs, &rctx); \ if (!entry) \ return; \ \ @@ -64,8 +63,9 @@ perf_trace_##call(void *__data, proto) \ \ { assign; } \ \ - perf_trace_buf_submit(entry, __entry_size, rctx, 0, \ - __count, __regs, head, __task); \ + perf_trace_buf_submit(entry, __entry_size, rctx, \ + event_call->event.type, __count, __regs, \ + head, __task); \ } /* diff --git a/kernel/events/core.c b/kernel/events/core.c index de24fbce5277..d8512883c0a0 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -6987,7 +6987,7 @@ static int perf_tp_event_match(struct perf_event *event, return 1; } -void perf_tp_event(u64 addr, u64 count, void *record, int entry_size, +void perf_tp_event(u16 event_type, u64 count, void *record, int entry_size, struct pt_regs *regs, struct hlist_head *head, int rctx, struct task_struct *task) { @@ -6999,9 +6999,11 @@ void perf_tp_event(u64 addr, u64 count, void *record, int entry_size, .data = record, }; - perf_sample_data_init(&data, addr, 0); + perf_sample_data_init(&data, 0, 0); data.raw = &raw; + perf_trace_buf_update(record, event_type); + hlist_for_each_entry_rcu(event, head, hlist_entry) { if (perf_tp_event_match(event, &data, regs)) perf_swevent_event(event, count, &data, regs); diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c index 7a68afca8249..5a927075977f 100644 --- a/kernel/trace/trace_event_perf.c +++ b/kernel/trace/trace_event_perf.c @@ -260,42 +260,43 @@ void perf_trace_del(struct perf_event *p_event, int flags) tp_event->class->reg(tp_event, TRACE_REG_PERF_DEL, p_event); } -void *perf_trace_buf_prepare(int size, unsigned short type, - struct pt_regs **regs, int *rctxp) +void *perf_trace_buf_alloc(int size, struct pt_regs **regs, int *rctxp) { - struct trace_entry *entry; - unsigned long flags; char *raw_data; - int pc; + int rctx; BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long)); if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, - "perf buffer not large enough")) + "perf buffer not large enough")) return NULL; - pc = preempt_count(); - - *rctxp = perf_swevent_get_recursion_context(); - if (*rctxp < 0) + *rctxp = rctx = perf_swevent_get_recursion_context(); + if (rctx < 0) return NULL; if (regs) - *regs = this_cpu_ptr(&__perf_regs[*rctxp]); - raw_data = this_cpu_ptr(perf_trace_buf[*rctxp]); + *regs = this_cpu_ptr(&__perf_regs[rctx]); + raw_data = this_cpu_ptr(perf_trace_buf[rctx]); /* zero the dead bytes from align to not leak stack to user */ memset(&raw_data[size - sizeof(u64)], 0, sizeof(u64)); + return raw_data; +} +EXPORT_SYMBOL_GPL(perf_trace_buf_alloc); +NOKPROBE_SYMBOL(perf_trace_buf_alloc); + +void perf_trace_buf_update(void *record, u16 type) +{ + struct trace_entry *entry = record; + int pc = preempt_count(); + unsigned long flags; - entry = (struct trace_entry *)raw_data; local_save_flags(flags); tracing_generic_entry_update(entry, flags, pc); entry->type = type; - - return raw_data; } -EXPORT_SYMBOL_GPL(perf_trace_buf_prepare); -NOKPROBE_SYMBOL(perf_trace_buf_prepare); +NOKPROBE_SYMBOL(perf_trace_buf_update); #ifdef CONFIG_FUNCTION_TRACER static void @@ -319,13 +320,13 @@ perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip, memset(®s, 0, sizeof(regs)); perf_fetch_caller_regs(®s); - entry = perf_trace_buf_prepare(ENTRY_SIZE, TRACE_FN, NULL, &rctx); + entry = perf_trace_buf_alloc(ENTRY_SIZE, NULL, &rctx); if (!entry) return; entry->ip = ip; entry->parent_ip = parent_ip; - perf_trace_buf_submit(entry, ENTRY_SIZE, rctx, 0, + perf_trace_buf_submit(entry, ENTRY_SIZE, rctx, TRACE_FN, 1, ®s, head, NULL); #undef ENTRY_SIZE diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 919e0ddd8fcc..5546eec0505f 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -1149,14 +1149,15 @@ kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs) size = ALIGN(__size + sizeof(u32), sizeof(u64)); size -= sizeof(u32); - entry = perf_trace_buf_prepare(size, call->event.type, NULL, &rctx); + entry = perf_trace_buf_alloc(size, NULL, &rctx); if (!entry) return; entry->ip = (unsigned long)tk->rp.kp.addr; memset(&entry[1], 0, dsize); store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize); - perf_trace_buf_submit(entry, size, rctx, 0, 1, regs, head, NULL); + perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs, + head, NULL); } NOKPROBE_SYMBOL(kprobe_perf_func); @@ -1184,14 +1185,15 @@ kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri, size = ALIGN(__size + sizeof(u32), sizeof(u64)); size -= sizeof(u32); - entry = perf_trace_buf_prepare(size, call->event.type, NULL, &rctx); + entry = perf_trace_buf_alloc(size, NULL, &rctx); if (!entry) return; entry->func = (unsigned long)tk->rp.kp.addr; entry->ret_ip = (unsigned long)ri->ret_addr; store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize); - perf_trace_buf_submit(entry, size, rctx, 0, 1, regs, head, NULL); + perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs, + head, NULL); } NOKPROBE_SYMBOL(kretprobe_perf_func); #endif /* CONFIG_PERF_EVENTS */ diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index e78f364cc192..b2b6efc083a4 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c @@ -587,15 +587,16 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id) size = ALIGN(size + sizeof(u32), sizeof(u64)); size -= sizeof(u32); - rec = (struct syscall_trace_enter *)perf_trace_buf_prepare(size, - sys_data->enter_event->event.type, NULL, &rctx); + rec = perf_trace_buf_alloc(size, NULL, &rctx); if (!rec) return; rec->nr = syscall_nr; syscall_get_arguments(current, regs, 0, sys_data->nb_args, (unsigned long *)&rec->args); - perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL); + perf_trace_buf_submit(rec, size, rctx, + sys_data->enter_event->event.type, 1, regs, + head, NULL); } static int perf_sysenter_enable(struct trace_event_call *call) @@ -660,14 +661,14 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret) size = ALIGN(sizeof(*rec) + sizeof(u32), sizeof(u64)); size -= sizeof(u32); - rec = (struct syscall_trace_exit *)perf_trace_buf_prepare(size, - sys_data->exit_event->event.type, NULL, &rctx); + rec = perf_trace_buf_alloc(size, NULL, &rctx); if (!rec) return; rec->nr = syscall_nr; rec->ret = syscall_get_return_value(current, regs); - perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL); + perf_trace_buf_submit(rec, size, rctx, sys_data->exit_event->event.type, + 1, regs, head, NULL); } static int perf_sysexit_enable(struct trace_event_call *call) diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index 7915142c89e4..c53485441c88 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c @@ -1131,7 +1131,7 @@ static void __uprobe_perf_func(struct trace_uprobe *tu, if (hlist_empty(head)) goto out; - entry = perf_trace_buf_prepare(size, call->event.type, NULL, &rctx); + entry = perf_trace_buf_alloc(size, NULL, &rctx); if (!entry) goto out; @@ -1152,7 +1152,8 @@ static void __uprobe_perf_func(struct trace_uprobe *tu, memset(data + len, 0, size - esize - len); } - perf_trace_buf_submit(entry, size, rctx, 0, 1, regs, head, NULL); + perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs, + head, NULL); out: preempt_enable(); } From 98b5c2c65c2951772a8fc661f50d675e450e8bce Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Wed, 6 Apr 2016 18:43:25 -0700 Subject: [PATCH 0468/1649] perf, bpf: allow bpf programs attach to tracepoints introduce BPF_PROG_TYPE_TRACEPOINT program type and allow it to be attached to the perf tracepoint handler, which will copy the arguments into the per-cpu buffer and pass it to the bpf program as its first argument. The layout of the fields can be discovered by doing 'cat /sys/kernel/debug/tracing/events/sched/sched_switch/format' prior to the compilation of the program with exception that first 8 bytes are reserved and not accessible to the program. This area is used to store the pointer to 'struct pt_regs' which some of the bpf helpers will use: +---------+ | 8 bytes | hidden 'struct pt_regs *' (inaccessible to bpf program) +---------+ | N bytes | static tracepoint fields defined in tracepoint/format (bpf readonly) +---------+ | dynamic | __dynamic_array bytes of tracepoint (inaccessible to bpf yet) +---------+ Not that all of the fields are already dumped to user space via perf ring buffer and broken application access it directly without consulting tracepoint/format. Same rule applies here: static tracepoint fields should only be accessed in a format defined in tracepoint/format. The order of fields and field sizes are not an ABI. Signed-off-by: Alexei Starovoitov Acked-by: Peter Zijlstra (Intel) Signed-off-by: David S. Miller --- include/trace/perf.h | 10 +++++++++- include/uapi/linux/bpf.h | 1 + kernel/events/core.c | 13 +++++++++---- 3 files changed, 19 insertions(+), 5 deletions(-) diff --git a/include/trace/perf.h b/include/trace/perf.h index 77cd9043b7e4..a182306eefd7 100644 --- a/include/trace/perf.h +++ b/include/trace/perf.h @@ -34,6 +34,7 @@ perf_trace_##call(void *__data, proto) \ struct trace_event_call *event_call = __data; \ struct trace_event_data_offsets_##call __maybe_unused __data_offsets;\ struct trace_event_raw_##call *entry; \ + struct bpf_prog *prog = event_call->prog; \ struct pt_regs *__regs; \ u64 __count = 1; \ struct task_struct *__task = NULL; \ @@ -45,7 +46,7 @@ perf_trace_##call(void *__data, proto) \ __data_size = trace_event_get_offsets_##call(&__data_offsets, args); \ \ head = this_cpu_ptr(event_call->perf_events); \ - if (__builtin_constant_p(!__task) && !__task && \ + if (!prog && __builtin_constant_p(!__task) && !__task && \ hlist_empty(head)) \ return; \ \ @@ -63,6 +64,13 @@ perf_trace_##call(void *__data, proto) \ \ { assign; } \ \ + if (prog) { \ + *(struct pt_regs **)entry = __regs; \ + if (!trace_call_bpf(prog, entry) || hlist_empty(head)) { \ + perf_swevent_put_recursion_context(rctx); \ + return; \ + } \ + } \ perf_trace_buf_submit(entry, __entry_size, rctx, \ event_call->event.type, __count, __regs, \ head, __task); \ diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 23917bb47bf3..70eda5aeb304 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -92,6 +92,7 @@ enum bpf_prog_type { BPF_PROG_TYPE_KPROBE, BPF_PROG_TYPE_SCHED_CLS, BPF_PROG_TYPE_SCHED_ACT, + BPF_PROG_TYPE_TRACEPOINT, }; #define BPF_PSEUDO_MAP_FD 1 diff --git a/kernel/events/core.c b/kernel/events/core.c index d8512883c0a0..e5ffe97d6166 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -6725,12 +6725,13 @@ int perf_swevent_get_recursion_context(void) } EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context); -inline void perf_swevent_put_recursion_context(int rctx) +void perf_swevent_put_recursion_context(int rctx) { struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable); put_recursion_context(swhash->recursion, rctx); } +EXPORT_SYMBOL_GPL(perf_swevent_put_recursion_context); void ___perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) { @@ -7106,6 +7107,7 @@ static void perf_event_free_filter(struct perf_event *event) static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd) { + bool is_kprobe, is_tracepoint; struct bpf_prog *prog; if (event->attr.type != PERF_TYPE_TRACEPOINT) @@ -7114,15 +7116,18 @@ static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd) if (event->tp_event->prog) return -EEXIST; - if (!(event->tp_event->flags & TRACE_EVENT_FL_UKPROBE)) - /* bpf programs can only be attached to u/kprobes */ + is_kprobe = event->tp_event->flags & TRACE_EVENT_FL_UKPROBE; + is_tracepoint = event->tp_event->flags & TRACE_EVENT_FL_TRACEPOINT; + if (!is_kprobe && !is_tracepoint) + /* bpf programs can only be attached to u/kprobe or tracepoint */ return -EINVAL; prog = bpf_prog_get(prog_fd); if (IS_ERR(prog)) return PTR_ERR(prog); - if (prog->type != BPF_PROG_TYPE_KPROBE) { + if ((is_kprobe && prog->type != BPF_PROG_TYPE_KPROBE) || + (is_tracepoint && prog->type != BPF_PROG_TYPE_TRACEPOINT)) { /* valid fd, but invalid bpf program type */ bpf_prog_put(prog); return -EINVAL; From 9fd82b610ba3351f05a59c3e9117cfefe82f7751 Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Wed, 6 Apr 2016 18:43:26 -0700 Subject: [PATCH 0469/1649] bpf: register BPF_PROG_TYPE_TRACEPOINT program type register tracepoint bpf program type and let it call the same set of helper functions as BPF_PROG_TYPE_KPROBE Signed-off-by: Alexei Starovoitov Signed-off-by: David S. Miller --- kernel/trace/bpf_trace.c | 45 ++++++++++++++++++++++++++++++++++++++-- 1 file changed, 43 insertions(+), 2 deletions(-) diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 3e4ffb3ace5f..3e5ebe3254d2 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -268,7 +268,7 @@ static const struct bpf_func_proto bpf_perf_event_output_proto = { .arg5_type = ARG_CONST_STACK_SIZE, }; -static const struct bpf_func_proto *kprobe_prog_func_proto(enum bpf_func_id func_id) +static const struct bpf_func_proto *tracing_func_proto(enum bpf_func_id func_id) { switch (func_id) { case BPF_FUNC_map_lookup_elem: @@ -295,12 +295,20 @@ static const struct bpf_func_proto *kprobe_prog_func_proto(enum bpf_func_id func return &bpf_get_smp_processor_id_proto; case BPF_FUNC_perf_event_read: return &bpf_perf_event_read_proto; + default: + return NULL; + } +} + +static const struct bpf_func_proto *kprobe_prog_func_proto(enum bpf_func_id func_id) +{ + switch (func_id) { case BPF_FUNC_perf_event_output: return &bpf_perf_event_output_proto; case BPF_FUNC_get_stackid: return &bpf_get_stackid_proto; default: - return NULL; + return tracing_func_proto(func_id); } } @@ -332,9 +340,42 @@ static struct bpf_prog_type_list kprobe_tl = { .type = BPF_PROG_TYPE_KPROBE, }; +static const struct bpf_func_proto *tp_prog_func_proto(enum bpf_func_id func_id) +{ + switch (func_id) { + case BPF_FUNC_perf_event_output: + case BPF_FUNC_get_stackid: + return NULL; + default: + return tracing_func_proto(func_id); + } +} + +static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type) +{ + if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE) + return false; + if (type != BPF_READ) + return false; + if (off % size != 0) + return false; + return true; +} + +static const struct bpf_verifier_ops tracepoint_prog_ops = { + .get_func_proto = tp_prog_func_proto, + .is_valid_access = tp_prog_is_valid_access, +}; + +static struct bpf_prog_type_list tracepoint_tl = { + .ops = &tracepoint_prog_ops, + .type = BPF_PROG_TYPE_TRACEPOINT, +}; + static int __init register_kprobe_prog_ops(void) { bpf_register_prog_type(&kprobe_tl); + bpf_register_prog_type(&tracepoint_tl); return 0; } late_initcall(register_kprobe_prog_ops); From 9940d67c93b5bb7ddcf862b41b1847cb728186c4 Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Wed, 6 Apr 2016 18:43:27 -0700 Subject: [PATCH 0470/1649] bpf: support bpf_get_stackid() and bpf_perf_event_output() in tracepoint programs needs two wrapper functions to fetch 'struct pt_regs *' to convert tracepoint bpf context into kprobe bpf context to reuse existing helper functions Signed-off-by: Alexei Starovoitov Signed-off-by: David S. Miller --- include/linux/bpf.h | 1 + kernel/bpf/stackmap.c | 2 +- kernel/trace/bpf_trace.c | 42 +++++++++++++++++++++++++++++++++++++++- 3 files changed, 43 insertions(+), 2 deletions(-) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 21ee41b92e8a..198f6ace70ec 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -160,6 +160,7 @@ struct bpf_array { #define MAX_TAIL_CALL_CNT 32 u64 bpf_tail_call(u64 ctx, u64 r2, u64 index, u64 r4, u64 r5); +u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); void bpf_fd_array_map_clear(struct bpf_map *map); bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp); const struct bpf_func_proto *bpf_get_trace_printk_proto(void); diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c index 499d9e933f8e..35114725cf30 100644 --- a/kernel/bpf/stackmap.c +++ b/kernel/bpf/stackmap.c @@ -116,7 +116,7 @@ free_smap: return ERR_PTR(err); } -static u64 bpf_get_stackid(u64 r1, u64 r2, u64 flags, u64 r4, u64 r5) +u64 bpf_get_stackid(u64 r1, u64 r2, u64 flags, u64 r4, u64 r5) { struct pt_regs *regs = (struct pt_regs *) (long) r1; struct bpf_map *map = (struct bpf_map *) (long) r2; diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 3e5ebe3254d2..413ec5614180 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -340,12 +340,52 @@ static struct bpf_prog_type_list kprobe_tl = { .type = BPF_PROG_TYPE_KPROBE, }; +static u64 bpf_perf_event_output_tp(u64 r1, u64 r2, u64 index, u64 r4, u64 size) +{ + /* + * r1 points to perf tracepoint buffer where first 8 bytes are hidden + * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it + * from there and call the same bpf_perf_event_output() helper + */ + u64 ctx = *(long *)r1; + + return bpf_perf_event_output(ctx, r2, index, r4, size); +} + +static const struct bpf_func_proto bpf_perf_event_output_proto_tp = { + .func = bpf_perf_event_output_tp, + .gpl_only = true, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, + .arg2_type = ARG_CONST_MAP_PTR, + .arg3_type = ARG_ANYTHING, + .arg4_type = ARG_PTR_TO_STACK, + .arg5_type = ARG_CONST_STACK_SIZE, +}; + +static u64 bpf_get_stackid_tp(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) +{ + u64 ctx = *(long *)r1; + + return bpf_get_stackid(ctx, r2, r3, r4, r5); +} + +static const struct bpf_func_proto bpf_get_stackid_proto_tp = { + .func = bpf_get_stackid_tp, + .gpl_only = true, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, + .arg2_type = ARG_CONST_MAP_PTR, + .arg3_type = ARG_ANYTHING, +}; + static const struct bpf_func_proto *tp_prog_func_proto(enum bpf_func_id func_id) { switch (func_id) { case BPF_FUNC_perf_event_output: + return &bpf_perf_event_output_proto_tp; case BPF_FUNC_get_stackid: - return NULL; + return &bpf_get_stackid_proto_tp; default: return tracing_func_proto(func_id); } From 32bbe0078afe86a8bf4c67c6b3477781b15e94dc Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Wed, 6 Apr 2016 18:43:28 -0700 Subject: [PATCH 0471/1649] bpf: sanitize bpf tracepoint access during bpf program loading remember the last byte of ctx access and at the time of attaching the program to tracepoint check that the program doesn't access bytes beyond defined in tracepoint fields This also disallows access to __dynamic_array fields, but can be relaxed in the future. Signed-off-by: Alexei Starovoitov Signed-off-by: David S. Miller --- include/linux/bpf.h | 1 + include/linux/trace_events.h | 1 + kernel/bpf/verifier.c | 6 +++++- kernel/events/core.c | 8 ++++++++ kernel/trace/trace_events.c | 18 ++++++++++++++++++ 5 files changed, 33 insertions(+), 1 deletion(-) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 198f6ace70ec..b2365a6eba3d 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -131,6 +131,7 @@ struct bpf_prog_type_list { struct bpf_prog_aux { atomic_t refcnt; u32 used_map_cnt; + u32 max_ctx_offset; const struct bpf_verifier_ops *ops; struct bpf_map **used_maps; struct bpf_prog *prog; diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h index 56f795e6a093..fe6441203b59 100644 --- a/include/linux/trace_events.h +++ b/include/linux/trace_events.h @@ -569,6 +569,7 @@ extern int trace_define_field(struct trace_event_call *call, const char *type, int is_signed, int filter_type); extern int trace_add_event_call(struct trace_event_call *call); extern int trace_remove_event_call(struct trace_event_call *call); +extern int trace_event_get_offsets(struct trace_event_call *call); #define is_signed_type(type) (((type)(-1)) < (type)1) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 2e08f8e9b771..58792fed5678 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -652,8 +652,12 @@ static int check_ctx_access(struct verifier_env *env, int off, int size, enum bpf_access_type t) { if (env->prog->aux->ops->is_valid_access && - env->prog->aux->ops->is_valid_access(off, size, t)) + env->prog->aux->ops->is_valid_access(off, size, t)) { + /* remember the offset of last byte accessed in ctx */ + if (env->prog->aux->max_ctx_offset < off + size) + env->prog->aux->max_ctx_offset = off + size; return 0; + } verbose("invalid bpf_context access off=%d size=%d\n", off, size); return -EACCES; diff --git a/kernel/events/core.c b/kernel/events/core.c index e5ffe97d6166..9a01019ff7c8 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -7133,6 +7133,14 @@ static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd) return -EINVAL; } + if (is_tracepoint) { + int off = trace_event_get_offsets(event->tp_event); + + if (prog->aux->max_ctx_offset > off) { + bpf_prog_put(prog); + return -EACCES; + } + } event->tp_event->prog = prog; return 0; diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 05ddc0820771..ced963049e0a 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -204,6 +204,24 @@ static void trace_destroy_fields(struct trace_event_call *call) } } +/* + * run-time version of trace_event_get_offsets_() that returns the last + * accessible offset of trace fields excluding __dynamic_array bytes + */ +int trace_event_get_offsets(struct trace_event_call *call) +{ + struct ftrace_event_field *tail; + struct list_head *head; + + head = trace_get_fields(call); + /* + * head->next points to the last field with the largest offset, + * since it was added last by trace_define_field() + */ + tail = list_first_entry(head, struct ftrace_event_field, link); + return tail->offset + tail->size; +} + int trace_event_raw_init(struct trace_event_call *call) { int id; From c07660409ec954403776200cec1dd04b2db851f8 Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Wed, 6 Apr 2016 18:43:29 -0700 Subject: [PATCH 0472/1649] samples/bpf: add tracepoint support to bpf loader Recognize "tracepoint/" section name prefix and attach the program to that tracepoint. Signed-off-by: Alexei Starovoitov Signed-off-by: David S. Miller --- samples/bpf/bpf_load.c | 26 +++++++++++++++++++++----- 1 file changed, 21 insertions(+), 5 deletions(-) diff --git a/samples/bpf/bpf_load.c b/samples/bpf/bpf_load.c index 58f86bd11b3d..022af71c2bb5 100644 --- a/samples/bpf/bpf_load.c +++ b/samples/bpf/bpf_load.c @@ -49,6 +49,7 @@ static int load_and_attach(const char *event, struct bpf_insn *prog, int size) bool is_socket = strncmp(event, "socket", 6) == 0; bool is_kprobe = strncmp(event, "kprobe/", 7) == 0; bool is_kretprobe = strncmp(event, "kretprobe/", 10) == 0; + bool is_tracepoint = strncmp(event, "tracepoint/", 11) == 0; enum bpf_prog_type prog_type; char buf[256]; int fd, efd, err, id; @@ -63,6 +64,8 @@ static int load_and_attach(const char *event, struct bpf_insn *prog, int size) prog_type = BPF_PROG_TYPE_SOCKET_FILTER; } else if (is_kprobe || is_kretprobe) { prog_type = BPF_PROG_TYPE_KPROBE; + } else if (is_tracepoint) { + prog_type = BPF_PROG_TYPE_TRACEPOINT; } else { printf("Unknown event '%s'\n", event); return -1; @@ -111,12 +114,23 @@ static int load_and_attach(const char *event, struct bpf_insn *prog, int size) event, strerror(errno)); return -1; } - } - strcpy(buf, DEBUGFS); - strcat(buf, "events/kprobes/"); - strcat(buf, event); - strcat(buf, "/id"); + strcpy(buf, DEBUGFS); + strcat(buf, "events/kprobes/"); + strcat(buf, event); + strcat(buf, "/id"); + } else if (is_tracepoint) { + event += 11; + + if (*event == 0) { + printf("event name cannot be empty\n"); + return -1; + } + strcpy(buf, DEBUGFS); + strcat(buf, "events/"); + strcat(buf, event); + strcat(buf, "/id"); + } efd = open(buf, O_RDONLY, 0); if (efd < 0) { @@ -304,6 +318,7 @@ int load_bpf_file(char *path) if (memcmp(shname_prog, "kprobe/", 7) == 0 || memcmp(shname_prog, "kretprobe/", 10) == 0 || + memcmp(shname_prog, "tracepoint/", 11) == 0 || memcmp(shname_prog, "socket", 6) == 0) load_and_attach(shname_prog, insns, data_prog->d_size); } @@ -320,6 +335,7 @@ int load_bpf_file(char *path) if (memcmp(shname, "kprobe/", 7) == 0 || memcmp(shname, "kretprobe/", 10) == 0 || + memcmp(shname, "tracepoint/", 11) == 0 || memcmp(shname, "socket", 6) == 0) load_and_attach(shname, data->d_buf, data->d_size); } From 3c9b16448cf6924c203e3c01696c87fcbfb71fc6 Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Wed, 6 Apr 2016 18:43:30 -0700 Subject: [PATCH 0473/1649] samples/bpf: tracepoint example modify offwaketime to work with sched/sched_switch tracepoint instead of kprobe into finish_task_switch Signed-off-by: Alexei Starovoitov Signed-off-by: David S. Miller --- samples/bpf/offwaketime_kern.c | 28 +++++++++++++++++++++++----- 1 file changed, 23 insertions(+), 5 deletions(-) diff --git a/samples/bpf/offwaketime_kern.c b/samples/bpf/offwaketime_kern.c index c0aa5a9b9c48..983629a31c79 100644 --- a/samples/bpf/offwaketime_kern.c +++ b/samples/bpf/offwaketime_kern.c @@ -73,7 +73,7 @@ int waker(struct pt_regs *ctx) return 0; } -static inline int update_counts(struct pt_regs *ctx, u32 pid, u64 delta) +static inline int update_counts(void *ctx, u32 pid, u64 delta) { struct key_t key = {}; struct wokeby_t *woke; @@ -100,15 +100,33 @@ static inline int update_counts(struct pt_regs *ctx, u32 pid, u64 delta) return 0; } +#if 1 +/* taken from /sys/kernel/debug/tracing/events/sched/sched_switch/format */ +struct sched_switch_args { + unsigned long long pad; + char prev_comm[16]; + int prev_pid; + int prev_prio; + long long prev_state; + char next_comm[16]; + int next_pid; + int next_prio; +}; +SEC("tracepoint/sched/sched_switch") +int oncpu(struct sched_switch_args *ctx) +{ + /* record previous thread sleep time */ + u32 pid = ctx->prev_pid; +#else SEC("kprobe/finish_task_switch") int oncpu(struct pt_regs *ctx) { struct task_struct *p = (void *) PT_REGS_PARM1(ctx); - u64 delta, ts, *tsp; - u32 pid; - /* record previous thread sleep time */ - pid = _(p->pid); + u32 pid = _(p->pid); +#endif + u64 delta, ts, *tsp; + ts = bpf_ktime_get_ns(); bpf_map_update_elem(&start, &pid, &ts, BPF_ANY); From e3edfdec04d43aa6276db639d3721e073161d2c2 Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Wed, 6 Apr 2016 18:43:31 -0700 Subject: [PATCH 0474/1649] samples/bpf: add tracepoint vs kprobe performance tests the first microbenchmark does fd=open("/proc/self/comm"); for() { write(fd, "test"); } and on 4 cpus in parallel: writes per sec base (no tracepoints, no kprobes) 930k with kprobe at __set_task_comm() 420k with tracepoint at task:task_rename 730k For kprobe + full bpf program manully fetches oldcomm, newcomm via bpf_probe_read. For tracepint bpf program does nothing, since arguments are copied by tracepoint. 2nd microbenchmark does: fd=open("/dev/urandom"); for() { read(fd, buf); } and on 4 cpus in parallel: reads per sec base (no tracepoints, no kprobes) 300k with kprobe at urandom_read() 279k with tracepoint at random:urandom_read 290k bpf progs attached to kprobe and tracepoint are noop. Signed-off-by: Alexei Starovoitov Signed-off-by: David S. Miller --- samples/bpf/Makefile | 5 + samples/bpf/test_overhead_kprobe_kern.c | 41 ++++++ samples/bpf/test_overhead_tp_kern.c | 36 ++++++ samples/bpf/test_overhead_user.c | 162 ++++++++++++++++++++++++ 4 files changed, 244 insertions(+) create mode 100644 samples/bpf/test_overhead_kprobe_kern.c create mode 100644 samples/bpf/test_overhead_tp_kern.c create mode 100644 samples/bpf/test_overhead_user.c diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile index 502c9fc8db85..9959771bf808 100644 --- a/samples/bpf/Makefile +++ b/samples/bpf/Makefile @@ -19,6 +19,7 @@ hostprogs-y += lathist hostprogs-y += offwaketime hostprogs-y += spintest hostprogs-y += map_perf_test +hostprogs-y += test_overhead test_verifier-objs := test_verifier.o libbpf.o test_maps-objs := test_maps.o libbpf.o @@ -38,6 +39,7 @@ lathist-objs := bpf_load.o libbpf.o lathist_user.o offwaketime-objs := bpf_load.o libbpf.o offwaketime_user.o spintest-objs := bpf_load.o libbpf.o spintest_user.o map_perf_test-objs := bpf_load.o libbpf.o map_perf_test_user.o +test_overhead-objs := bpf_load.o libbpf.o test_overhead_user.o # Tell kbuild to always build the programs always := $(hostprogs-y) @@ -56,6 +58,8 @@ always += lathist_kern.o always += offwaketime_kern.o always += spintest_kern.o always += map_perf_test_kern.o +always += test_overhead_tp_kern.o +always += test_overhead_kprobe_kern.o HOSTCFLAGS += -I$(objtree)/usr/include @@ -75,6 +79,7 @@ HOSTLOADLIBES_lathist += -lelf HOSTLOADLIBES_offwaketime += -lelf HOSTLOADLIBES_spintest += -lelf HOSTLOADLIBES_map_perf_test += -lelf -lrt +HOSTLOADLIBES_test_overhead += -lelf -lrt # point this to your LLVM backend with bpf support LLC=$(srctree)/tools/bpf/llvm/bld/Debug+Asserts/bin/llc diff --git a/samples/bpf/test_overhead_kprobe_kern.c b/samples/bpf/test_overhead_kprobe_kern.c new file mode 100644 index 000000000000..468a66a92ef9 --- /dev/null +++ b/samples/bpf/test_overhead_kprobe_kern.c @@ -0,0 +1,41 @@ +/* Copyright (c) 2016 Facebook + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + */ +#include +#include +#include +#include "bpf_helpers.h" + +#define _(P) ({typeof(P) val = 0; bpf_probe_read(&val, sizeof(val), &P); val;}) + +SEC("kprobe/__set_task_comm") +int prog(struct pt_regs *ctx) +{ + struct signal_struct *signal; + struct task_struct *tsk; + char oldcomm[16] = {}; + char newcomm[16] = {}; + u16 oom_score_adj; + u32 pid; + + tsk = (void *)PT_REGS_PARM1(ctx); + + pid = _(tsk->pid); + bpf_probe_read(oldcomm, sizeof(oldcomm), &tsk->comm); + bpf_probe_read(newcomm, sizeof(newcomm), (void *)PT_REGS_PARM2(ctx)); + signal = _(tsk->signal); + oom_score_adj = _(signal->oom_score_adj); + return 0; +} + +SEC("kprobe/urandom_read") +int prog2(struct pt_regs *ctx) +{ + return 0; +} + +char _license[] SEC("license") = "GPL"; +u32 _version SEC("version") = LINUX_VERSION_CODE; diff --git a/samples/bpf/test_overhead_tp_kern.c b/samples/bpf/test_overhead_tp_kern.c new file mode 100644 index 000000000000..38f5c0b9da9f --- /dev/null +++ b/samples/bpf/test_overhead_tp_kern.c @@ -0,0 +1,36 @@ +/* Copyright (c) 2016 Facebook + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + */ +#include +#include "bpf_helpers.h" + +/* from /sys/kernel/debug/tracing/events/task/task_rename/format */ +struct task_rename { + __u64 pad; + __u32 pid; + char oldcomm[16]; + char newcomm[16]; + __u16 oom_score_adj; +}; +SEC("tracepoint/task/task_rename") +int prog(struct task_rename *ctx) +{ + return 0; +} + +/* from /sys/kernel/debug/tracing/events/random/urandom_read/format */ +struct urandom_read { + __u64 pad; + int got_bits; + int pool_left; + int input_left; +}; +SEC("tracepoint/random/urandom_read") +int prog2(struct urandom_read *ctx) +{ + return 0; +} +char _license[] SEC("license") = "GPL"; diff --git a/samples/bpf/test_overhead_user.c b/samples/bpf/test_overhead_user.c new file mode 100644 index 000000000000..d291167fd3c7 --- /dev/null +++ b/samples/bpf/test_overhead_user.c @@ -0,0 +1,162 @@ +/* Copyright (c) 2016 Facebook + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + */ +#define _GNU_SOURCE +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "libbpf.h" +#include "bpf_load.h" + +#define MAX_CNT 1000000 + +static __u64 time_get_ns(void) +{ + struct timespec ts; + + clock_gettime(CLOCK_MONOTONIC, &ts); + return ts.tv_sec * 1000000000ull + ts.tv_nsec; +} + +static void test_task_rename(int cpu) +{ + __u64 start_time; + char buf[] = "test\n"; + int i, fd; + + fd = open("/proc/self/comm", O_WRONLY|O_TRUNC); + if (fd < 0) { + printf("couldn't open /proc\n"); + exit(1); + } + start_time = time_get_ns(); + for (i = 0; i < MAX_CNT; i++) + write(fd, buf, sizeof(buf)); + printf("task_rename:%d: %lld events per sec\n", + cpu, MAX_CNT * 1000000000ll / (time_get_ns() - start_time)); + close(fd); +} + +static void test_urandom_read(int cpu) +{ + __u64 start_time; + char buf[4]; + int i, fd; + + fd = open("/dev/urandom", O_RDONLY); + if (fd < 0) { + printf("couldn't open /dev/urandom\n"); + exit(1); + } + start_time = time_get_ns(); + for (i = 0; i < MAX_CNT; i++) + read(fd, buf, sizeof(buf)); + printf("urandom_read:%d: %lld events per sec\n", + cpu, MAX_CNT * 1000000000ll / (time_get_ns() - start_time)); + close(fd); +} + +static void loop(int cpu, int flags) +{ + cpu_set_t cpuset; + + CPU_ZERO(&cpuset); + CPU_SET(cpu, &cpuset); + sched_setaffinity(0, sizeof(cpuset), &cpuset); + + if (flags & 1) + test_task_rename(cpu); + if (flags & 2) + test_urandom_read(cpu); +} + +static void run_perf_test(int tasks, int flags) +{ + pid_t pid[tasks]; + int i; + + for (i = 0; i < tasks; i++) { + pid[i] = fork(); + if (pid[i] == 0) { + loop(i, flags); + exit(0); + } else if (pid[i] == -1) { + printf("couldn't spawn #%d process\n", i); + exit(1); + } + } + for (i = 0; i < tasks; i++) { + int status; + + assert(waitpid(pid[i], &status, 0) == pid[i]); + assert(status == 0); + } +} + +static void unload_progs(void) +{ + close(prog_fd[0]); + close(prog_fd[1]); + close(event_fd[0]); + close(event_fd[1]); +} + +int main(int argc, char **argv) +{ + struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY}; + char filename[256]; + int num_cpu = 8; + int test_flags = ~0; + + setrlimit(RLIMIT_MEMLOCK, &r); + + if (argc > 1) + test_flags = atoi(argv[1]) ? : test_flags; + if (argc > 2) + num_cpu = atoi(argv[2]) ? : num_cpu; + + if (test_flags & 0x3) { + printf("BASE\n"); + run_perf_test(num_cpu, test_flags); + } + + if (test_flags & 0xC) { + snprintf(filename, sizeof(filename), + "%s_kprobe_kern.o", argv[0]); + if (load_bpf_file(filename)) { + printf("%s", bpf_log_buf); + return 1; + } + printf("w/KPROBE\n"); + run_perf_test(num_cpu, test_flags >> 2); + unload_progs(); + } + + if (test_flags & 0x30) { + snprintf(filename, sizeof(filename), + "%s_tp_kern.o", argv[0]); + if (load_bpf_file(filename)) { + printf("%s", bpf_log_buf); + return 1; + } + printf("w/TRACEPOINT\n"); + run_perf_test(num_cpu, test_flags >> 4); + unload_progs(); + } + + return 0; +} From f18ba58f538e44a701ad0b86d47bb57b917d7c0a Mon Sep 17 00:00:00 2001 From: Johan Hedberg Date: Wed, 6 Apr 2016 13:09:05 +0300 Subject: [PATCH 0475/1649] Bluetooth: Fix setting NO_BREDR advertising flag If we're dealing with a single-mode controller or BR/EDR is disable for a dual-mode one, the NO_BREDR flag needs to be unconditionally present in the advertising data. This patch moves it out from behind an extra condition to be always set in the create_instance_adv_data() function if BR/EDR is disabled. Signed-off-by: Johan Hedberg Signed-off-by: Marcel Holtmann --- net/bluetooth/hci_request.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c index 6e125d76df0d..c045b3c54768 100644 --- a/net/bluetooth/hci_request.c +++ b/net/bluetooth/hci_request.c @@ -1065,6 +1065,9 @@ static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr) if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV) flags |= LE_AD_LIMITED; + if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) + flags |= LE_AD_NO_BREDR; + if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) { /* If a discovery flag wasn't provided, simply use the global * settings. @@ -1072,9 +1075,6 @@ static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr) if (!flags) flags |= mgmt_get_adv_discov_flags(hdev); - if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) - flags |= LE_AD_NO_BREDR; - /* If flags would still be empty, then there is no need to * include the "Flags" AD field". */ From 56b40fbf61a247e23b50e426971148b2e50262e0 Mon Sep 17 00:00:00 2001 From: Johan Hedberg Date: Thu, 7 Apr 2016 21:01:27 +0300 Subject: [PATCH 0476/1649] Bluetooth: Ignore unknown advertising packet types In case of buggy controllers send advertising packet types that we don't know of we should simply ignore them instead of trying to react to them in some (potentially wrong) way. Signed-off-by: Johan Hedberg Signed-off-by: Marcel Holtmann --- net/bluetooth/hci_event.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index c162af5d16bf..d4b3dd5413be 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -4727,6 +4727,19 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr, u32 flags; u8 *ptr, real_len; + switch (type) { + case LE_ADV_IND: + case LE_ADV_DIRECT_IND: + case LE_ADV_SCAN_IND: + case LE_ADV_NONCONN_IND: + case LE_ADV_SCAN_RSP: + break; + default: + BT_ERR_RATELIMITED("Unknown advetising packet type: 0x%02x", + type); + return; + } + /* Find the end of the data in case the report contains padded zero * bytes at the end causing an invalid length value. * From 1dbfc59a931495b2e7bdc4e85886162a0b03235b Mon Sep 17 00:00:00 2001 From: Loic Poulain Date: Mon, 4 Apr 2016 11:31:12 +0200 Subject: [PATCH 0477/1649] Bluetooth: hci_bcm: Add BCM2E71 ACPI ID This ID is used at least by Asus T100-CHI. Signed-off-by: Loic Poulain Signed-off-by: Marcel Holtmann --- drivers/bluetooth/hci_bcm.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c index d8881dc0600c..1c97eda8bae3 100644 --- a/drivers/bluetooth/hci_bcm.c +++ b/drivers/bluetooth/hci_bcm.c @@ -825,6 +825,7 @@ static const struct acpi_device_id bcm_acpi_match[] = { { "BCM2E64", 0 }, { "BCM2E65", 0 }, { "BCM2E67", 0 }, + { "BCM2E71", 0 }, { "BCM2E7B", 0 }, { "BCM2E7C", 0 }, { }, From 84cb3df02aea4b00405521e67c4c67c2d525c364 Mon Sep 17 00:00:00 2001 From: Loic Poulain Date: Mon, 4 Apr 2016 10:48:13 +0200 Subject: [PATCH 0478/1649] Bluetooth: hci_ldisc: Fix null pointer derefence in case of early data HCI_UART_PROTO_SET flag is set before hci_uart_set_proto call. If we receive data from tty layer during this procedure, proto pointer may not be assigned yet, leading to null pointer dereference in rx method hci_uart_tty_receive. This patch fixes this issue by introducing HCI_UART_PROTO_READY flag in order to avoid any proto operation before proto opening and assignment. Signed-off-by: Loic Poulain Signed-off-by: Marcel Holtmann --- drivers/bluetooth/hci_ldisc.c | 11 +++++++---- drivers/bluetooth/hci_uart.h | 1 + 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c index c00168a5bb80..49b3e1e2d236 100644 --- a/drivers/bluetooth/hci_ldisc.c +++ b/drivers/bluetooth/hci_ldisc.c @@ -227,7 +227,7 @@ static int hci_uart_flush(struct hci_dev *hdev) tty_ldisc_flush(tty); tty_driver_flush_buffer(tty); - if (test_bit(HCI_UART_PROTO_SET, &hu->flags)) + if (test_bit(HCI_UART_PROTO_READY, &hu->flags)) hu->proto->flush(hu); return 0; @@ -492,7 +492,7 @@ static void hci_uart_tty_close(struct tty_struct *tty) cancel_work_sync(&hu->write_work); - if (test_and_clear_bit(HCI_UART_PROTO_SET, &hu->flags)) { + if (test_and_clear_bit(HCI_UART_PROTO_READY, &hu->flags)) { if (hdev) { if (test_bit(HCI_UART_REGISTERED, &hu->flags)) hci_unregister_dev(hdev); @@ -500,6 +500,7 @@ static void hci_uart_tty_close(struct tty_struct *tty) } hu->proto->close(hu); } + clear_bit(HCI_UART_PROTO_SET, &hu->flags); kfree(hu); } @@ -526,7 +527,7 @@ static void hci_uart_tty_wakeup(struct tty_struct *tty) if (tty != hu->tty) return; - if (test_bit(HCI_UART_PROTO_SET, &hu->flags)) + if (test_bit(HCI_UART_PROTO_READY, &hu->flags)) hci_uart_tx_wakeup(hu); } @@ -550,7 +551,7 @@ static void hci_uart_tty_receive(struct tty_struct *tty, const u8 *data, if (!hu || tty != hu->tty) return; - if (!test_bit(HCI_UART_PROTO_SET, &hu->flags)) + if (!test_bit(HCI_UART_PROTO_READY, &hu->flags)) return; /* It does not need a lock here as it is already protected by a mutex in @@ -638,9 +639,11 @@ static int hci_uart_set_proto(struct hci_uart *hu, int id) return err; hu->proto = p; + set_bit(HCI_UART_PROTO_READY, &hu->flags); err = hci_uart_register_dev(hu); if (err) { + clear_bit(HCI_UART_PROTO_READY, &hu->flags); p->close(hu); return err; } diff --git a/drivers/bluetooth/hci_uart.h b/drivers/bluetooth/hci_uart.h index 4814ff08f427..839bad1d8152 100644 --- a/drivers/bluetooth/hci_uart.h +++ b/drivers/bluetooth/hci_uart.h @@ -95,6 +95,7 @@ struct hci_uart { /* HCI_UART proto flag bits */ #define HCI_UART_PROTO_SET 0 #define HCI_UART_REGISTERED 1 +#define HCI_UART_PROTO_READY 2 /* TX states */ #define HCI_UART_SENDING 1 From a164cee111085f9ee77f6038f006658249073523 Mon Sep 17 00:00:00 2001 From: Patrik Flykt Date: Thu, 24 Mar 2016 16:04:15 +0200 Subject: [PATCH 0479/1649] Bluetooth: Allow setting BT_SECURITY_FIPS with setsockopt Update the security level check to allow setting BT_SECURITY_FIPS for an L2CAP socket. Signed-off-by: Patrik Flykt Signed-off-by: Marcel Holtmann --- net/bluetooth/l2cap_sock.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c index e4cae72895a7..388ee8b59145 100644 --- a/net/bluetooth/l2cap_sock.c +++ b/net/bluetooth/l2cap_sock.c @@ -778,7 +778,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, } if (sec.level < BT_SECURITY_LOW || - sec.level > BT_SECURITY_HIGH) { + sec.level > BT_SECURITY_FIPS) { err = -EINVAL; break; } From 373a32c848ae3a1c03618517cce85f9211a6facf Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Sat, 19 Mar 2016 11:05:18 +0100 Subject: [PATCH 0480/1649] Bluetooth: vhci: fix open_timeout vs. hdev race Both vhci_get_user and vhci_release race with open_timeout work. They both contain cancel_delayed_work_sync, but do not test whether the work actually created hdev or not. Since the work can be in progress and _sync will wait for finishing it, we can have data->hdev allocated when cancel_delayed_work_sync returns. But the call sites do 'if (data->hdev)' *before* cancel_delayed_work_sync. As a result: * vhci_get_user allocates a second hdev and puts it into data->hdev. The former is leaked. * vhci_release does not release data->hdev properly as it thinks there is none. Fix both cases by moving the actual test *after* the call to cancel_delayed_work_sync. This can be hit by this program: #include #include #include #include #include #include #include #include int main(int argc, char **argv) { int fd; srand(time(NULL)); while (1) { const int delta = (rand() % 200 - 100) * 100; fd = open("/dev/vhci", O_RDWR); if (fd < 0) err(1, "open"); usleep(1000000 + delta); close(fd); } return 0; } And the result is: BUG: KASAN: use-after-free in skb_queue_tail+0x13e/0x150 at addr ffff88006b0c1228 Read of size 8 by task kworker/u13:1/32068 ============================================================================= BUG kmalloc-192 (Tainted: G E ): kasan: bad access detected ----------------------------------------------------------------------------- Disabling lock debugging due to kernel taint INFO: Allocated in vhci_open+0x50/0x330 [hci_vhci] age=260 cpu=3 pid=32040 ... kmem_cache_alloc_trace+0x150/0x190 vhci_open+0x50/0x330 [hci_vhci] misc_open+0x35b/0x4e0 chrdev_open+0x23b/0x510 ... INFO: Freed in vhci_release+0xa4/0xd0 [hci_vhci] age=9 cpu=2 pid=32040 ... __slab_free+0x204/0x310 vhci_release+0xa4/0xd0 [hci_vhci] ... INFO: Slab 0xffffea0001ac3000 objects=16 used=13 fp=0xffff88006b0c1e00 flags=0x5fffff80004080 INFO: Object 0xffff88006b0c1200 @offset=4608 fp=0xffff88006b0c0600 Bytes b4 ffff88006b0c11f0: 09 df 00 00 01 00 00 00 00 00 00 00 00 00 00 00 ................ Object ffff88006b0c1200: 00 06 0c 6b 00 88 ff ff 00 00 00 00 00 00 00 00 ...k............ Object ffff88006b0c1210: 10 12 0c 6b 00 88 ff ff 10 12 0c 6b 00 88 ff ff ...k.......k.... Object ffff88006b0c1220: c0 46 c2 6b 00 88 ff ff c0 46 c2 6b 00 88 ff ff .F.k.....F.k.... Object ffff88006b0c1230: 01 00 00 00 01 00 00 00 e0 ff ff ff 0f 00 00 00 ................ Object ffff88006b0c1240: 40 12 0c 6b 00 88 ff ff 40 12 0c 6b 00 88 ff ff @..k....@..k.... Object ffff88006b0c1250: 50 0d 6e a0 ff ff ff ff 00 02 00 00 00 00 ad de P.n............. Object ffff88006b0c1260: 00 00 00 00 00 00 00 00 ab 62 02 00 01 00 00 00 .........b...... Object ffff88006b0c1270: 90 b9 19 81 ff ff ff ff 38 12 0c 6b 00 88 ff ff ........8..k.... Object ffff88006b0c1280: 03 00 20 00 ff ff ff ff ff ff ff ff 00 00 00 00 .. ............. Object ffff88006b0c1290: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................ Object ffff88006b0c12a0: 00 00 00 00 00 00 00 00 00 80 cd 3d 00 88 ff ff ...........=.... Object ffff88006b0c12b0: 00 20 00 00 00 00 00 00 00 00 00 00 00 00 00 00 . .............. Redzone ffff88006b0c12c0: bb bb bb bb bb bb bb bb ........ Padding ffff88006b0c13f8: 00 00 00 00 00 00 00 00 ........ CPU: 3 PID: 32068 Comm: kworker/u13:1 Tainted: G B E 4.4.6-0-default #1 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.8.1-0-g4adadbd-20151112_172657-sheep25 04/01/2014 Workqueue: hci0 hci_cmd_work [bluetooth] 00000000ffffffff ffffffff81926cfa ffff88006be37c68 ffff88006bc27180 ffff88006b0c1200 ffff88006b0c1234 ffffffff81577993 ffffffff82489320 ffff88006bc24240 0000000000000046 ffff88006a100000 000000026e51eb80 Call Trace: ... [] ? skb_queue_tail+0x13e/0x150 [] ? vhci_send_frame+0xac/0x100 [hci_vhci] [] ? hci_send_frame+0x188/0x320 [bluetooth] [] ? hci_cmd_work+0x115/0x310 [bluetooth] [] ? process_one_work+0x815/0x1340 [] ? worker_thread+0xe5/0x11f0 [] ? process_one_work+0x1340/0x1340 [] ? kthread+0x1c8/0x230 ... Memory state around the buggy address: ffff88006b0c1100: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc ffff88006b0c1180: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc >ffff88006b0c1200: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb ^ ffff88006b0c1280: fb fb fb fb fb fb fb fb fc fc fc fc fc fc fc fc ffff88006b0c1300: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc Fixes: 23424c0d31 (Bluetooth: Add support creating virtual AMP controllers) Signed-off-by: Jiri Slaby Signed-off-by: Marcel Holtmann Cc: Dmitry Vyukov Cc: stable 3.13+ --- drivers/bluetooth/hci_vhci.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c index 80783dcb7f57..3ec580e38c17 100644 --- a/drivers/bluetooth/hci_vhci.c +++ b/drivers/bluetooth/hci_vhci.c @@ -189,13 +189,13 @@ static inline ssize_t vhci_get_user(struct vhci_data *data, break; case HCI_VENDOR_PKT: + cancel_delayed_work_sync(&data->open_timeout); + if (data->hdev) { kfree_skb(skb); return -EBADFD; } - cancel_delayed_work_sync(&data->open_timeout); - opcode = *((__u8 *) skb->data); skb_pull(skb, 1); @@ -333,10 +333,12 @@ static int vhci_open(struct inode *inode, struct file *file) static int vhci_release(struct inode *inode, struct file *file) { struct vhci_data *data = file->private_data; - struct hci_dev *hdev = data->hdev; + struct hci_dev *hdev; cancel_delayed_work_sync(&data->open_timeout); + hdev = data->hdev; + if (hdev) { hci_unregister_dev(hdev); hci_free_dev(hdev); From 13407376b255325fa817798800117a839f3aa055 Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Sat, 19 Mar 2016 11:49:43 +0100 Subject: [PATCH 0481/1649] Bluetooth: vhci: purge unhandled skbs The write handler allocates skbs and queues them into data->readq. Read side should read them, if there is any. If there is none, skbs should be dropped by hdev->flush. But this happens only if the device is HCI_UP, i.e. hdev->power_on work was triggered already. When it was not, skbs stay allocated in the queue when /dev/vhci is closed. So purge the queue in ->release. Program to reproduce: #include #include #include #include #include #include #include int main() { char buf[] = { 0xff, 0 }; struct iovec iov = { .iov_base = buf, .iov_len = sizeof(buf), }; int fd; while (1) { fd = open("/dev/vhci", O_RDWR); if (fd < 0) err(1, "open"); usleep(50); if (writev(fd, &iov, 1) < 0) err(1, "writev"); usleep(50); close(fd); } return 0; } Result: kmemleak: 4609 new suspected memory leaks unreferenced object 0xffff88059f4d5440 (size 232): comm "vhci", pid 1084, jiffies 4294912542 (age 37569.296s) hex dump (first 32 bytes): 20 f0 23 87 05 88 ff ff 20 f0 23 87 05 88 ff ff .#..... .#..... 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................ backtrace: ... [] __alloc_skb+0x0/0x5a0 [] vhci_create_device+0x5c/0x580 [hci_vhci] [] vhci_write+0x306/0x4c8 [hci_vhci] Fixes: 23424c0d31 (Bluetooth: Add support creating virtual AMP controllers) Signed-off-by: Jiri Slaby Signed-off-by: Marcel Holtmann Cc: stable 3.13+ --- drivers/bluetooth/hci_vhci.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c index 3ec580e38c17..f67ea1c090cb 100644 --- a/drivers/bluetooth/hci_vhci.c +++ b/drivers/bluetooth/hci_vhci.c @@ -344,6 +344,7 @@ static int vhci_release(struct inode *inode, struct file *file) hci_free_dev(hdev); } + skb_queue_purge(&data->readq); file->private_data = NULL; kfree(data); From feb2add3235ca81dc5cd5d975490c707a24c9889 Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Wed, 16 Mar 2016 13:52:41 +0100 Subject: [PATCH 0482/1649] 6lowpan: iphc: fix handling of link-local compression This patch fixes handling in case of link-local address compression. A IPv6 link-local address is defined as fe80::/10 prefix which is also what ipv6_addr_type checks for link-local addresses. But IPHC compression for link-local addresses are for fe80::/64 types only. This patch adds additional checks for zero padded bits in case of link-local address compression to match on a fe80::/64 address only. Signed-off-by: Alexander Aring Acked-by: Jukka Rissanen Reviewed-by: Stefan Schmidt Signed-off-by: Marcel Holtmann --- net/6lowpan/iphc.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/net/6lowpan/iphc.c b/net/6lowpan/iphc.c index 99bb22aea346..68c80f3c9add 100644 --- a/net/6lowpan/iphc.c +++ b/net/6lowpan/iphc.c @@ -148,6 +148,11 @@ (((a)->s6_addr16[6]) == 0) && \ (((a)->s6_addr[14]) == 0)) +#define lowpan_is_linklocal_zero_padded(a) \ + (!(hdr->saddr.s6_addr[1] & 0x3f) && \ + !hdr->saddr.s6_addr16[1] && \ + !hdr->saddr.s6_addr32[1]) + #define LOWPAN_IPHC_CID_DCI(cid) (cid & 0x0f) #define LOWPAN_IPHC_CID_SCI(cid) ((cid & 0xf0) >> 4) @@ -1101,7 +1106,8 @@ int lowpan_header_compress(struct sk_buff *skb, const struct net_device *dev, true); iphc1 |= LOWPAN_IPHC_SAC; } else { - if (ipv6_saddr_type & IPV6_ADDR_LINKLOCAL) { + if (ipv6_saddr_type & IPV6_ADDR_LINKLOCAL && + lowpan_is_linklocal_zero_padded(hdr->saddr)) { iphc1 |= lowpan_compress_addr_64(&hc_ptr, &hdr->saddr, saddr, true); @@ -1135,7 +1141,8 @@ int lowpan_header_compress(struct sk_buff *skb, const struct net_device *dev, false); iphc1 |= LOWPAN_IPHC_DAC; } else { - if (ipv6_daddr_type & IPV6_ADDR_LINKLOCAL) { + if (ipv6_daddr_type & IPV6_ADDR_LINKLOCAL && + lowpan_is_linklocal_zero_padded(hdr->daddr)) { iphc1 |= lowpan_compress_addr_64(&hc_ptr, &hdr->daddr, daddr, false); From cd9d7213d5f546d9c0795fdcffe4ce5bf63445fd Mon Sep 17 00:00:00 2001 From: Sudip Mukherjee Date: Thu, 7 Apr 2016 16:46:04 +0530 Subject: [PATCH 0483/1649] ieee802154/adf7242: fix memory leak of firmware If the firmware upload or the firmware verification fails then we printed the error message and exited but we missed releasing the firmware. Signed-off-by: Sudip Mukherjee Acked-by: Michael Hennerich Signed-off-by: Marcel Holtmann --- drivers/net/ieee802154/adf7242.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/ieee802154/adf7242.c b/drivers/net/ieee802154/adf7242.c index 89154c079788..b82e39d24394 100644 --- a/drivers/net/ieee802154/adf7242.c +++ b/drivers/net/ieee802154/adf7242.c @@ -1030,6 +1030,7 @@ static int adf7242_hw_init(struct adf7242_local *lp) if (ret) { dev_err(&lp->spi->dev, "upload firmware failed with %d\n", ret); + release_firmware(fw); return ret; } @@ -1037,6 +1038,7 @@ static int adf7242_hw_init(struct adf7242_local *lp) if (ret) { dev_err(&lp->spi->dev, "verify firmware failed with %d\n", ret); + release_firmware(fw); return ret; } From ff1b68ab2daf292c0f0897f9c155a6ddc8484693 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 7 Apr 2016 19:39:34 +0100 Subject: [PATCH 0484/1649] nfp: correct RX buffer length calculation When calculating the RX buffer length we need to account for up to 2 VLAN tags. Rounding up to 1k is an relic of a distant past and can be removed. While at it also remove trivial print statement. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/nfp_net_common.c | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index 43c618bafdb6..0dae81454e77 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -1911,9 +1911,6 @@ static void nfp_net_set_rx_mode(struct net_device *netdev) static int nfp_net_change_mtu(struct net_device *netdev, int new_mtu) { struct nfp_net *nn = netdev_priv(netdev); - u32 tmp; - - nn_dbg(nn, "New MTU = %d\n", new_mtu); if (new_mtu < 68 || new_mtu > nn->max_mtu) { nn_err(nn, "New MTU (%d) is not valid\n", new_mtu); @@ -1921,10 +1918,7 @@ static int nfp_net_change_mtu(struct net_device *netdev, int new_mtu) } netdev->mtu = new_mtu; - - /* Freelist buffer size rounded up to the nearest 1K */ - tmp = new_mtu + ETH_HLEN + VLAN_HLEN + NFP_NET_MAX_PREPEND; - nn->fl_bufsz = roundup(tmp, 1024); + nn->fl_bufsz = NFP_NET_MAX_PREPEND + ETH_HLEN + VLAN_HLEN * 2 + new_mtu; /* restart if running */ if (netif_running(netdev)) { From 0ba40af963f01b557a4d7a0a6c550a51b0fb8d34 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 7 Apr 2016 19:39:35 +0100 Subject: [PATCH 0485/1649] nfp: move link state interrupt request/free calls We need to be able to disable the link state interrupt when the device is brought down. We used to just free the IRQ at the beginning of .ndo_stop(). As we now move towards more ordered .ndo_open()/.ndo_stop() paths LSC allocation should be placed in the "allocate resource" section. Since the IRQ can't be freed early in .ndo_stop(), it is disabled instead. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- .../ethernet/netronome/nfp/nfp_net_common.c | 23 ++++++++++--------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index 0dae81454e77..5da1199e7afb 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -1729,10 +1729,16 @@ static int nfp_net_netdev_open(struct net_device *netdev) NFP_NET_IRQ_EXN_IDX, nn->exn_handler); if (err) return err; + err = nfp_net_aux_irq_request(nn, NFP_NET_CFG_LSC, "%s-lsc", + nn->lsc_name, sizeof(nn->lsc_name), + NFP_NET_IRQ_LSC_IDX, nn->lsc_handler); + if (err) + goto err_free_exn; + disable_irq(nn->irq_entries[NFP_NET_CFG_LSC].vector); err = nfp_net_alloc_rings(nn); if (err) - goto err_free_exn; + goto err_free_lsc; err = netif_set_real_num_tx_queues(netdev, nn->num_tx_rings); if (err) @@ -1812,19 +1818,11 @@ static int nfp_net_netdev_open(struct net_device *netdev) netif_tx_wake_all_queues(netdev); - err = nfp_net_aux_irq_request(nn, NFP_NET_CFG_LSC, "%s-lsc", - nn->lsc_name, sizeof(nn->lsc_name), - NFP_NET_IRQ_LSC_IDX, nn->lsc_handler); - if (err) - goto err_stop_tx; + enable_irq(nn->irq_entries[NFP_NET_CFG_LSC].vector); nfp_net_read_link_status(nn); return 0; -err_stop_tx: - netif_tx_disable(netdev); - for (r = 0; r < nn->num_r_vecs; r++) - nfp_net_tx_flush(nn->r_vecs[r].tx_ring); err_disable_napi: while (r--) { napi_disable(&nn->r_vecs[r].napi); @@ -1834,6 +1832,8 @@ err_clear_config: nfp_net_clear_config_and_disable(nn); err_free_rings: nfp_net_free_rings(nn); +err_free_lsc: + nfp_net_aux_irq_free(nn, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX); err_free_exn: nfp_net_aux_irq_free(nn, NFP_NET_CFG_EXN, NFP_NET_IRQ_EXN_IDX); return err; @@ -1855,7 +1855,7 @@ static int nfp_net_netdev_close(struct net_device *netdev) /* Step 1: Disable RX and TX rings from the Linux kernel perspective */ - nfp_net_aux_irq_free(nn, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX); + disable_irq(nn->irq_entries[NFP_NET_CFG_LSC].vector); netif_carrier_off(netdev); nn->link_up = false; @@ -1876,6 +1876,7 @@ static int nfp_net_netdev_close(struct net_device *netdev) } nfp_net_free_rings(nn); + nfp_net_aux_irq_free(nn, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX); nfp_net_aux_irq_free(nn, NFP_NET_CFG_EXN, NFP_NET_IRQ_EXN_IDX); nn_dbg(nn, "%s down", netdev->name); From 0afbfb183bf5e1029ecc644acbc487d22e095b14 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 7 Apr 2016 19:39:36 +0100 Subject: [PATCH 0486/1649] nfp: break up nfp_net_{alloc|free}_rings nfp_net_{alloc|free}_rings contained strange mix of allocations and vector initialization. Remove it, declare vector init as a separate function and handle allocations explicitly. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- .../ethernet/netronome/nfp/nfp_net_common.c | 132 +++++++----------- 1 file changed, 50 insertions(+), 82 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index 5da1199e7afb..8692587904c5 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -1488,91 +1488,40 @@ err_alloc: return -ENOMEM; } -static void __nfp_net_free_rings(struct nfp_net *nn, unsigned int n_free) +static int +nfp_net_prepare_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec, + int idx) { - struct nfp_net_r_vector *r_vec; - struct msix_entry *entry; - - while (n_free--) { - r_vec = &nn->r_vecs[n_free]; - entry = &nn->irq_entries[r_vec->irq_idx]; - - nfp_net_rx_ring_free(r_vec->rx_ring); - nfp_net_tx_ring_free(r_vec->tx_ring); - - irq_set_affinity_hint(entry->vector, NULL); - free_irq(entry->vector, r_vec); - - netif_napi_del(&r_vec->napi); - } -} - -/** - * nfp_net_free_rings() - Free all ring resources - * @nn: NFP Net device to reconfigure - */ -static void nfp_net_free_rings(struct nfp_net *nn) -{ - __nfp_net_free_rings(nn, nn->num_r_vecs); -} - -/** - * nfp_net_alloc_rings() - Allocate resources for RX and TX rings - * @nn: NFP Net device to reconfigure - * - * Return: 0 on success or negative errno on error. - */ -static int nfp_net_alloc_rings(struct nfp_net *nn) -{ - struct nfp_net_r_vector *r_vec; - struct msix_entry *entry; + struct msix_entry *entry = &nn->irq_entries[r_vec->irq_idx]; int err; - int r; - for (r = 0; r < nn->num_r_vecs; r++) { - r_vec = &nn->r_vecs[r]; - entry = &nn->irq_entries[r_vec->irq_idx]; - - /* Setup NAPI */ - netif_napi_add(nn->netdev, &r_vec->napi, - nfp_net_poll, NAPI_POLL_WEIGHT); - - snprintf(r_vec->name, sizeof(r_vec->name), - "%s-rxtx-%d", nn->netdev->name, r); - err = request_irq(entry->vector, r_vec->handler, 0, - r_vec->name, r_vec); - if (err) { - nn_dbg(nn, "Error requesting IRQ %d\n", entry->vector); - goto err_napi_del; - } - - irq_set_affinity_hint(entry->vector, &r_vec->affinity_mask); - - nn_dbg(nn, "RV%02d: irq=%03d/%03d\n", - r, entry->vector, entry->entry); - - /* Allocate TX ring resources */ - err = nfp_net_tx_ring_alloc(r_vec->tx_ring); - if (err) - goto err_free_irq; - - /* Allocate RX ring resources */ - err = nfp_net_rx_ring_alloc(r_vec->rx_ring); - if (err) - goto err_free_tx; + snprintf(r_vec->name, sizeof(r_vec->name), + "%s-rxtx-%d", nn->netdev->name, idx); + err = request_irq(entry->vector, r_vec->handler, 0, r_vec->name, r_vec); + if (err) { + nn_err(nn, "Error requesting IRQ %d\n", entry->vector); + return err; } + /* Setup NAPI */ + netif_napi_add(nn->netdev, &r_vec->napi, + nfp_net_poll, NAPI_POLL_WEIGHT); + + irq_set_affinity_hint(entry->vector, &r_vec->affinity_mask); + + nn_dbg(nn, "RV%02d: irq=%03d/%03d\n", idx, entry->vector, entry->entry); + return 0; +} + +static void +nfp_net_cleanup_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec) +{ + struct msix_entry *entry = &nn->irq_entries[r_vec->irq_idx]; -err_free_tx: - nfp_net_tx_ring_free(r_vec->tx_ring); -err_free_irq: irq_set_affinity_hint(entry->vector, NULL); - free_irq(entry->vector, r_vec); -err_napi_del: netif_napi_del(&r_vec->napi); - __nfp_net_free_rings(nn, r); - return err; + free_irq(entry->vector, r_vec); } /** @@ -1736,9 +1685,19 @@ static int nfp_net_netdev_open(struct net_device *netdev) goto err_free_exn; disable_irq(nn->irq_entries[NFP_NET_CFG_LSC].vector); - err = nfp_net_alloc_rings(nn); - if (err) - goto err_free_lsc; + for (r = 0; r < nn->num_r_vecs; r++) { + err = nfp_net_prepare_vector(nn, &nn->r_vecs[r], r); + if (err) + goto err_free_prev_vecs; + + err = nfp_net_tx_ring_alloc(nn->r_vecs[r].tx_ring); + if (err) + goto err_cleanup_vec_p; + + err = nfp_net_rx_ring_alloc(nn->r_vecs[r].rx_ring); + if (err) + goto err_free_tx_ring_p; + } err = netif_set_real_num_tx_queues(netdev, nn->num_tx_rings); if (err) @@ -1831,8 +1790,15 @@ err_disable_napi: err_clear_config: nfp_net_clear_config_and_disable(nn); err_free_rings: - nfp_net_free_rings(nn); -err_free_lsc: + r = nn->num_r_vecs; +err_free_prev_vecs: + while (r--) { + nfp_net_rx_ring_free(nn->r_vecs[r].rx_ring); +err_free_tx_ring_p: + nfp_net_tx_ring_free(nn->r_vecs[r].tx_ring); +err_cleanup_vec_p: + nfp_net_cleanup_vector(nn, &nn->r_vecs[r]); + } nfp_net_aux_irq_free(nn, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX); err_free_exn: nfp_net_aux_irq_free(nn, NFP_NET_CFG_EXN, NFP_NET_IRQ_EXN_IDX); @@ -1873,9 +1839,11 @@ static int nfp_net_netdev_close(struct net_device *netdev) for (r = 0; r < nn->num_r_vecs; r++) { nfp_net_rx_flush(nn->r_vecs[r].rx_ring); nfp_net_tx_flush(nn->r_vecs[r].tx_ring); + nfp_net_rx_ring_free(nn->r_vecs[r].rx_ring); + nfp_net_tx_ring_free(nn->r_vecs[r].tx_ring); + nfp_net_cleanup_vector(nn, &nn->r_vecs[r]); } - nfp_net_free_rings(nn); nfp_net_aux_irq_free(nn, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX); nfp_net_aux_irq_free(nn, NFP_NET_CFG_EXN, NFP_NET_IRQ_EXN_IDX); From d79737c25e4a170e7cd75866e45042de746934d8 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 7 Apr 2016 19:39:37 +0100 Subject: [PATCH 0487/1649] nfp: make *x_ring_init do all the init nfp_net_[rt]x_ring_init functions used to be called from probe path only and some of their functionality was spilled to the call site. In order to reuse them for ring reconfiguration we need them to do all the init. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- .../ethernet/netronome/nfp/nfp_net_common.c | 28 ++++++++++++------- 1 file changed, 18 insertions(+), 10 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index 8692587904c5..7cd20fcd631a 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -347,12 +347,18 @@ static irqreturn_t nfp_net_irq_exn(int irq, void *data) /** * nfp_net_tx_ring_init() - Fill in the boilerplate for a TX ring * @tx_ring: TX ring structure + * @r_vec: IRQ vector servicing this ring + * @idx: Ring index */ -static void nfp_net_tx_ring_init(struct nfp_net_tx_ring *tx_ring) +static void +nfp_net_tx_ring_init(struct nfp_net_tx_ring *tx_ring, + struct nfp_net_r_vector *r_vec, unsigned int idx) { - struct nfp_net_r_vector *r_vec = tx_ring->r_vec; struct nfp_net *nn = r_vec->nfp_net; + tx_ring->idx = idx; + tx_ring->r_vec = r_vec; + tx_ring->qcidx = tx_ring->idx * nn->stride_tx; tx_ring->qcp_q = nn->tx_bar + NFP_QCP_QUEUE_OFF(tx_ring->qcidx); } @@ -360,12 +366,18 @@ static void nfp_net_tx_ring_init(struct nfp_net_tx_ring *tx_ring) /** * nfp_net_rx_ring_init() - Fill in the boilerplate for a RX ring * @rx_ring: RX ring structure + * @r_vec: IRQ vector servicing this ring + * @idx: Ring index */ -static void nfp_net_rx_ring_init(struct nfp_net_rx_ring *rx_ring) +static void +nfp_net_rx_ring_init(struct nfp_net_rx_ring *rx_ring, + struct nfp_net_r_vector *r_vec, unsigned int idx) { - struct nfp_net_r_vector *r_vec = rx_ring->r_vec; struct nfp_net *nn = r_vec->nfp_net; + rx_ring->idx = idx; + rx_ring->r_vec = r_vec; + rx_ring->fl_qcidx = rx_ring->idx * nn->stride_rx; rx_ring->rx_qcidx = rx_ring->fl_qcidx + (nn->stride_rx - 1); @@ -403,14 +415,10 @@ static void nfp_net_irqs_assign(struct net_device *netdev) cpumask_set_cpu(r, &r_vec->affinity_mask); r_vec->tx_ring = &nn->tx_rings[r]; - nn->tx_rings[r].idx = r; - nn->tx_rings[r].r_vec = r_vec; - nfp_net_tx_ring_init(r_vec->tx_ring); + nfp_net_tx_ring_init(r_vec->tx_ring, r_vec, r); r_vec->rx_ring = &nn->rx_rings[r]; - nn->rx_rings[r].idx = r; - nn->rx_rings[r].r_vec = r_vec; - nfp_net_rx_ring_init(r_vec->rx_ring); + nfp_net_rx_ring_init(r_vec->rx_ring, r_vec, r); } } From 73725d9dfd99c5bb1da4d25bbe980231aa48d251 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 7 Apr 2016 19:39:38 +0100 Subject: [PATCH 0488/1649] nfp: allocate ring SW structs dynamically To be able to switch rings more easily on config changes allocate them dynamically, separately from nfp_net structure. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/nfp_net.h | 6 ++-- .../ethernet/netronome/nfp/nfp_net_common.c | 28 +++++++++++++++---- .../ethernet/netronome/nfp/nfp_net_debugfs.c | 20 +++++++------ 3 files changed, 37 insertions(+), 17 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h index 75683fb26734..fc005c982b7d 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h @@ -472,6 +472,9 @@ struct nfp_net { u32 rx_offset; + struct nfp_net_tx_ring *tx_rings; + struct nfp_net_rx_ring *rx_rings; + #ifdef CONFIG_PCI_IOV unsigned int num_vfs; struct vf_data_storage *vfinfo; @@ -504,9 +507,6 @@ struct nfp_net { int txd_cnt; int rxd_cnt; - struct nfp_net_tx_ring tx_rings[NFP_NET_MAX_TX_RINGS]; - struct nfp_net_rx_ring rx_rings[NFP_NET_MAX_RX_RINGS]; - u8 num_irqs; u8 num_r_vecs; struct nfp_net_r_vector r_vecs[NFP_NET_MAX_TX_RINGS]; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index 7cd20fcd631a..66fab7162b7c 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -413,12 +413,6 @@ static void nfp_net_irqs_assign(struct net_device *netdev) r_vec->irq_idx = NFP_NET_NON_Q_VECTORS + r; cpumask_set_cpu(r, &r_vec->affinity_mask); - - r_vec->tx_ring = &nn->tx_rings[r]; - nfp_net_tx_ring_init(r_vec->tx_ring, r_vec, r); - - r_vec->rx_ring = &nn->rx_rings[r]; - nfp_net_rx_ring_init(r_vec->rx_ring, r_vec, r); } } @@ -1503,6 +1497,12 @@ nfp_net_prepare_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec, struct msix_entry *entry = &nn->irq_entries[r_vec->irq_idx]; int err; + r_vec->tx_ring = &nn->tx_rings[idx]; + nfp_net_tx_ring_init(r_vec->tx_ring, r_vec, idx); + + r_vec->rx_ring = &nn->rx_rings[idx]; + nfp_net_rx_ring_init(r_vec->rx_ring, r_vec, idx); + snprintf(r_vec->name, sizeof(r_vec->name), "%s-rxtx-%d", nn->netdev->name, idx); err = request_irq(entry->vector, r_vec->handler, 0, r_vec->name, r_vec); @@ -1693,6 +1693,15 @@ static int nfp_net_netdev_open(struct net_device *netdev) goto err_free_exn; disable_irq(nn->irq_entries[NFP_NET_CFG_LSC].vector); + nn->rx_rings = kcalloc(nn->num_rx_rings, sizeof(*nn->rx_rings), + GFP_KERNEL); + if (!nn->rx_rings) + goto err_free_lsc; + nn->tx_rings = kcalloc(nn->num_tx_rings, sizeof(*nn->tx_rings), + GFP_KERNEL); + if (!nn->tx_rings) + goto err_free_rx_rings; + for (r = 0; r < nn->num_r_vecs; r++) { err = nfp_net_prepare_vector(nn, &nn->r_vecs[r], r); if (err) @@ -1807,6 +1816,10 @@ err_free_tx_ring_p: err_cleanup_vec_p: nfp_net_cleanup_vector(nn, &nn->r_vecs[r]); } + kfree(nn->tx_rings); +err_free_rx_rings: + kfree(nn->rx_rings); +err_free_lsc: nfp_net_aux_irq_free(nn, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX); err_free_exn: nfp_net_aux_irq_free(nn, NFP_NET_CFG_EXN, NFP_NET_IRQ_EXN_IDX); @@ -1852,6 +1865,9 @@ static int nfp_net_netdev_close(struct net_device *netdev) nfp_net_cleanup_vector(nn, &nn->r_vecs[r]); } + kfree(nn->rx_rings); + kfree(nn->tx_rings); + nfp_net_aux_irq_free(nn, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX); nfp_net_aux_irq_free(nn, NFP_NET_CFG_EXN, NFP_NET_IRQ_EXN_IDX); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c index 4c97c713121c..f86a1f13d27b 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c @@ -40,8 +40,9 @@ static struct dentry *nfp_dir; static int nfp_net_debugfs_rx_q_read(struct seq_file *file, void *data) { - struct nfp_net_rx_ring *rx_ring = file->private; int fl_rd_p, fl_wr_p, rx_rd_p, rx_wr_p, rxd_cnt; + struct nfp_net_r_vector *r_vec = file->private; + struct nfp_net_rx_ring *rx_ring; struct nfp_net_rx_desc *rxd; struct sk_buff *skb; struct nfp_net *nn; @@ -49,9 +50,10 @@ static int nfp_net_debugfs_rx_q_read(struct seq_file *file, void *data) rtnl_lock(); - if (!rx_ring->r_vec || !rx_ring->r_vec->nfp_net) + if (!r_vec->nfp_net || !r_vec->rx_ring) goto out; - nn = rx_ring->r_vec->nfp_net; + nn = r_vec->nfp_net; + rx_ring = r_vec->rx_ring; if (!netif_running(nn->netdev)) goto out; @@ -115,7 +117,8 @@ static const struct file_operations nfp_rx_q_fops = { static int nfp_net_debugfs_tx_q_read(struct seq_file *file, void *data) { - struct nfp_net_tx_ring *tx_ring = file->private; + struct nfp_net_r_vector *r_vec = file->private; + struct nfp_net_tx_ring *tx_ring; struct nfp_net_tx_desc *txd; int d_rd_p, d_wr_p, txd_cnt; struct sk_buff *skb; @@ -124,9 +127,10 @@ static int nfp_net_debugfs_tx_q_read(struct seq_file *file, void *data) rtnl_lock(); - if (!tx_ring->r_vec || !tx_ring->r_vec->nfp_net) + if (!r_vec->nfp_net || !r_vec->tx_ring) goto out; - nn = tx_ring->r_vec->nfp_net; + nn = r_vec->nfp_net; + tx_ring = r_vec->tx_ring; if (!netif_running(nn->netdev)) goto out; @@ -207,13 +211,13 @@ void nfp_net_debugfs_adapter_add(struct nfp_net *nn) for (i = 0; i < nn->num_rx_rings; i++) { sprintf(int_name, "%d", i); debugfs_create_file(int_name, S_IRUSR, rx, - &nn->rx_rings[i], &nfp_rx_q_fops); + &nn->r_vecs[i], &nfp_rx_q_fops); } for (i = 0; i < nn->num_tx_rings; i++) { sprintf(int_name, "%d", i); debugfs_create_file(int_name, S_IRUSR, tx, - &nn->tx_rings[i], &nfp_tx_q_fops); + &nn->r_vecs[i], &nfp_tx_q_fops); } } From 827deea9bcd8fec3b6c0acf0178a5c508f3dfbe1 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 7 Apr 2016 19:39:39 +0100 Subject: [PATCH 0489/1649] nfp: cleanup tx ring flush and rename to reset Since we never used flush without freeing the ring later the functionality of the two operations is mixed. Rename flush to ring reset and move there all the things which have to be done after FW ring state is cleared. While at it do some clean-ups. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- .../ethernet/netronome/nfp/nfp_net_common.c | 79 +++++++++---------- 1 file changed, 36 insertions(+), 43 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index 66fab7162b7c..61f243760ee0 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -867,61 +867,59 @@ static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring) } /** - * nfp_net_tx_flush() - Free any untransmitted buffers currently on the TX ring - * @tx_ring: TX ring structure + * nfp_net_tx_ring_reset() - Free any untransmitted buffers and reset pointers + * @nn: NFP Net device + * @tx_ring: TX ring structure * * Assumes that the device is stopped */ -static void nfp_net_tx_flush(struct nfp_net_tx_ring *tx_ring) +static void +nfp_net_tx_ring_reset(struct nfp_net *nn, struct nfp_net_tx_ring *tx_ring) { - struct nfp_net_r_vector *r_vec = tx_ring->r_vec; - struct nfp_net *nn = r_vec->nfp_net; - struct pci_dev *pdev = nn->pdev; const struct skb_frag_struct *frag; struct netdev_queue *nd_q; - struct sk_buff *skb; - int nr_frags; - int fidx; - int idx; + struct pci_dev *pdev = nn->pdev; while (tx_ring->rd_p != tx_ring->wr_p) { + int nr_frags, fidx, idx; + struct sk_buff *skb; + idx = tx_ring->rd_p % tx_ring->cnt; - skb = tx_ring->txbufs[idx].skb; - if (skb) { - nr_frags = skb_shinfo(skb)->nr_frags; - fidx = tx_ring->txbufs[idx].fidx; + nr_frags = skb_shinfo(skb)->nr_frags; + fidx = tx_ring->txbufs[idx].fidx; - if (fidx == -1) { - /* unmap head */ - dma_unmap_single(&pdev->dev, - tx_ring->txbufs[idx].dma_addr, - skb_headlen(skb), - DMA_TO_DEVICE); - } else { - /* unmap fragment */ - frag = &skb_shinfo(skb)->frags[fidx]; - dma_unmap_page(&pdev->dev, - tx_ring->txbufs[idx].dma_addr, - skb_frag_size(frag), - DMA_TO_DEVICE); - } - - /* check for last gather fragment */ - if (fidx == nr_frags - 1) - dev_kfree_skb_any(skb); - - tx_ring->txbufs[idx].dma_addr = 0; - tx_ring->txbufs[idx].skb = NULL; - tx_ring->txbufs[idx].fidx = -2; + if (fidx == -1) { + /* unmap head */ + dma_unmap_single(&pdev->dev, + tx_ring->txbufs[idx].dma_addr, + skb_headlen(skb), DMA_TO_DEVICE); + } else { + /* unmap fragment */ + frag = &skb_shinfo(skb)->frags[fidx]; + dma_unmap_page(&pdev->dev, + tx_ring->txbufs[idx].dma_addr, + skb_frag_size(frag), DMA_TO_DEVICE); } - memset(&tx_ring->txds[idx], 0, sizeof(tx_ring->txds[idx])); + /* check for last gather fragment */ + if (fidx == nr_frags - 1) + dev_kfree_skb_any(skb); + + tx_ring->txbufs[idx].dma_addr = 0; + tx_ring->txbufs[idx].skb = NULL; + tx_ring->txbufs[idx].fidx = -2; tx_ring->qcp_rd_p++; tx_ring->rd_p++; } + memset(tx_ring->txds, 0, sizeof(*tx_ring->txds) * tx_ring->cnt); + tx_ring->wr_p = 0; + tx_ring->rd_p = 0; + tx_ring->qcp_rd_p = 0; + tx_ring->wr_ptr_add = 0; + nd_q = netdev_get_tx_queue(nn->netdev, tx_ring->idx); netdev_tx_reset_queue(nd_q); } @@ -1362,11 +1360,6 @@ static void nfp_net_tx_ring_free(struct nfp_net_tx_ring *tx_ring) tx_ring->txds, tx_ring->dma); tx_ring->cnt = 0; - tx_ring->wr_p = 0; - tx_ring->rd_p = 0; - tx_ring->qcp_rd_p = 0; - tx_ring->wr_ptr_add = 0; - tx_ring->txbufs = NULL; tx_ring->txds = NULL; tx_ring->dma = 0; @@ -1859,7 +1852,7 @@ static int nfp_net_netdev_close(struct net_device *netdev) */ for (r = 0; r < nn->num_r_vecs; r++) { nfp_net_rx_flush(nn->r_vecs[r].rx_ring); - nfp_net_tx_flush(nn->r_vecs[r].tx_ring); + nfp_net_tx_ring_reset(nn, nn->r_vecs[r].tx_ring); nfp_net_rx_ring_free(nn->r_vecs[r].rx_ring); nfp_net_tx_ring_free(nn->r_vecs[r].tx_ring); nfp_net_cleanup_vector(nn, &nn->r_vecs[r]); From 1934680f5582b69a708181741cd77473a0d530ed Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 7 Apr 2016 19:39:40 +0100 Subject: [PATCH 0490/1649] nfp: reorganize initial filling of RX rings Separate allocation of buffers from giving them to FW, thanks to this it will be possible to move allocation earlier on .ndo_open() path and reuse buffers during runtime reconfiguration. Similar to TX side clean up the spill of functionality from flush to freeing the ring. Unlike on TX side, RX ring reset does not free buffers from the ring. Ring reset means only that FW pointers are zeroed and buffers on the ring must be placed in [0, cnt - 1) positions. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- .../ethernet/netronome/nfp/nfp_net_common.c | 121 ++++++++++++------ 1 file changed, 79 insertions(+), 42 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index 61f243760ee0..0c3c37ad28a4 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -1020,61 +1020,99 @@ static void nfp_net_rx_give_one(struct nfp_net_rx_ring *rx_ring, } /** - * nfp_net_rx_flush() - Free any buffers currently on the RX ring - * @rx_ring: RX ring to remove buffers from + * nfp_net_rx_ring_reset() - Reflect in SW state of freelist after disable + * @rx_ring: RX ring structure * - * Assumes that the device is stopped + * Warning: Do *not* call if ring buffers were never put on the FW freelist + * (i.e. device was not enabled)! */ -static void nfp_net_rx_flush(struct nfp_net_rx_ring *rx_ring) +static void nfp_net_rx_ring_reset(struct nfp_net_rx_ring *rx_ring) +{ + unsigned int wr_idx, last_idx; + + /* Move the empty entry to the end of the list */ + wr_idx = rx_ring->wr_p % rx_ring->cnt; + last_idx = rx_ring->cnt - 1; + rx_ring->rxbufs[wr_idx].dma_addr = rx_ring->rxbufs[last_idx].dma_addr; + rx_ring->rxbufs[wr_idx].skb = rx_ring->rxbufs[last_idx].skb; + rx_ring->rxbufs[last_idx].dma_addr = 0; + rx_ring->rxbufs[last_idx].skb = NULL; + + memset(rx_ring->rxds, 0, sizeof(*rx_ring->rxds) * rx_ring->cnt); + rx_ring->wr_p = 0; + rx_ring->rd_p = 0; + rx_ring->wr_ptr_add = 0; +} + +/** + * nfp_net_rx_ring_bufs_free() - Free any buffers currently on the RX ring + * @nn: NFP Net device + * @rx_ring: RX ring to remove buffers from + * + * Assumes that the device is stopped and buffers are in [0, ring->cnt - 1) + * entries. After device is disabled nfp_net_rx_ring_reset() must be called + * to restore required ring geometry. + */ +static void +nfp_net_rx_ring_bufs_free(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring) { - struct nfp_net *nn = rx_ring->r_vec->nfp_net; struct pci_dev *pdev = nn->pdev; - int idx; + unsigned int i; - while (rx_ring->rd_p != rx_ring->wr_p) { - idx = rx_ring->rd_p % rx_ring->cnt; + for (i = 0; i < rx_ring->cnt - 1; i++) { + /* NULL skb can only happen when initial filling of the ring + * fails to allocate enough buffers and calls here to free + * already allocated ones. + */ + if (!rx_ring->rxbufs[i].skb) + continue; - if (rx_ring->rxbufs[idx].skb) { - dma_unmap_single(&pdev->dev, - rx_ring->rxbufs[idx].dma_addr, - nn->fl_bufsz, DMA_FROM_DEVICE); - dev_kfree_skb_any(rx_ring->rxbufs[idx].skb); - rx_ring->rxbufs[idx].dma_addr = 0; - rx_ring->rxbufs[idx].skb = NULL; - } - - memset(&rx_ring->rxds[idx], 0, sizeof(rx_ring->rxds[idx])); - - rx_ring->rd_p++; + dma_unmap_single(&pdev->dev, rx_ring->rxbufs[i].dma_addr, + nn->fl_bufsz, DMA_FROM_DEVICE); + dev_kfree_skb_any(rx_ring->rxbufs[i].skb); + rx_ring->rxbufs[i].dma_addr = 0; + rx_ring->rxbufs[i].skb = NULL; } } /** - * nfp_net_rx_fill_freelist() - Attempt filling freelist with RX buffers - * @rx_ring: RX ring to fill - * - * Try to fill as many buffers as possible into freelist. Return - * number of buffers added. - * - * Return: Number of freelist buffers added. + * nfp_net_rx_ring_bufs_alloc() - Fill RX ring with buffers (don't give to FW) + * @nn: NFP Net device + * @rx_ring: RX ring to remove buffers from */ -static int nfp_net_rx_fill_freelist(struct nfp_net_rx_ring *rx_ring) +static int +nfp_net_rx_ring_bufs_alloc(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring) { - struct sk_buff *skb; - dma_addr_t dma_addr; + struct nfp_net_rx_buf *rxbufs; + unsigned int i; - while (nfp_net_rx_space(rx_ring)) { - skb = nfp_net_rx_alloc_one(rx_ring, &dma_addr); - if (!skb) { - nfp_net_rx_flush(rx_ring); + rxbufs = rx_ring->rxbufs; + + for (i = 0; i < rx_ring->cnt - 1; i++) { + rxbufs[i].skb = + nfp_net_rx_alloc_one(rx_ring, &rxbufs[i].dma_addr); + if (!rxbufs[i].skb) { + nfp_net_rx_ring_bufs_free(nn, rx_ring); return -ENOMEM; } - nfp_net_rx_give_one(rx_ring, skb, dma_addr); } return 0; } +/** + * nfp_net_rx_ring_fill_freelist() - Give buffers from the ring to FW + * @rx_ring: RX ring to fill + */ +static void nfp_net_rx_ring_fill_freelist(struct nfp_net_rx_ring *rx_ring) +{ + unsigned int i; + + for (i = 0; i < rx_ring->cnt - 1; i++) + nfp_net_rx_give_one(rx_ring, rx_ring->rxbufs[i].skb, + rx_ring->rxbufs[i].dma_addr); +} + /** * nfp_net_rx_csum_has_errors() - group check if rxd has any csum errors * @flags: RX descriptor flags field in CPU byte order @@ -1431,10 +1469,6 @@ static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring) rx_ring->rxds, rx_ring->dma); rx_ring->cnt = 0; - rx_ring->wr_p = 0; - rx_ring->rd_p = 0; - rx_ring->wr_ptr_add = 0; - rx_ring->rxbufs = NULL; rx_ring->rxds = NULL; rx_ring->dma = 0; @@ -1641,12 +1675,13 @@ static int nfp_net_start_vec(struct nfp_net *nn, struct nfp_net_r_vector *r_vec) disable_irq(irq_vec); - err = nfp_net_rx_fill_freelist(r_vec->rx_ring); + err = nfp_net_rx_ring_bufs_alloc(r_vec->nfp_net, r_vec->rx_ring); if (err) { nn_err(nn, "RV%02d: couldn't allocate enough buffers\n", r_vec->irq_idx); goto out; } + nfp_net_rx_ring_fill_freelist(r_vec->rx_ring); napi_enable(&r_vec->napi); out: @@ -1795,7 +1830,8 @@ static int nfp_net_netdev_open(struct net_device *netdev) err_disable_napi: while (r--) { napi_disable(&nn->r_vecs[r].napi); - nfp_net_rx_flush(nn->r_vecs[r].rx_ring); + nfp_net_rx_ring_reset(nn->r_vecs[r].rx_ring); + nfp_net_rx_ring_bufs_free(nn, nn->r_vecs[r].rx_ring); } err_clear_config: nfp_net_clear_config_and_disable(nn); @@ -1851,7 +1887,8 @@ static int nfp_net_netdev_close(struct net_device *netdev) /* Step 3: Free resources */ for (r = 0; r < nn->num_r_vecs; r++) { - nfp_net_rx_flush(nn->r_vecs[r].rx_ring); + nfp_net_rx_ring_reset(nn->r_vecs[r].rx_ring); + nfp_net_rx_ring_bufs_free(nn, nn->r_vecs[r].rx_ring); nfp_net_tx_ring_reset(nn, nn->r_vecs[r].tx_ring); nfp_net_rx_ring_free(nn->r_vecs[r].rx_ring); nfp_net_tx_ring_free(nn->r_vecs[r].tx_ring); From 114bdef0be28aa9aa71e291d133e79edd514f8dc Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 7 Apr 2016 19:39:41 +0100 Subject: [PATCH 0491/1649] nfp: preallocate RX buffers early in .ndo_open We want the .ndo_open() to have following structure: - allocate resources; - configure HW/FW; - enable the device from stack perspective. Therefore filling RX rings needs to be moved to the beginning of .ndo_open(). Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- .../ethernet/netronome/nfp/nfp_net_common.c | 34 ++++++------------- 1 file changed, 11 insertions(+), 23 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index 0c3c37ad28a4..a6a917fe8e31 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -1666,28 +1666,19 @@ static void nfp_net_clear_config_and_disable(struct nfp_net *nn) * @nn: NFP Net device structure * @r_vec: Ring vector to be started */ -static int nfp_net_start_vec(struct nfp_net *nn, struct nfp_net_r_vector *r_vec) +static void +nfp_net_start_vec(struct nfp_net *nn, struct nfp_net_r_vector *r_vec) { unsigned int irq_vec; - int err = 0; irq_vec = nn->irq_entries[r_vec->irq_idx].vector; disable_irq(irq_vec); - err = nfp_net_rx_ring_bufs_alloc(r_vec->nfp_net, r_vec->rx_ring); - if (err) { - nn_err(nn, "RV%02d: couldn't allocate enough buffers\n", - r_vec->irq_idx); - goto out; - } nfp_net_rx_ring_fill_freelist(r_vec->rx_ring); - napi_enable(&r_vec->napi); -out: - enable_irq(irq_vec); - return err; + enable_irq(irq_vec); } static int nfp_net_netdev_open(struct net_device *netdev) @@ -1742,6 +1733,10 @@ static int nfp_net_netdev_open(struct net_device *netdev) err = nfp_net_rx_ring_alloc(nn->r_vecs[r].rx_ring); if (err) goto err_free_tx_ring_p; + + err = nfp_net_rx_ring_bufs_alloc(nn, nn->r_vecs[r].rx_ring); + if (err) + goto err_flush_rx_ring_p; } err = netif_set_real_num_tx_queues(netdev, nn->num_tx_rings); @@ -1814,11 +1809,8 @@ static int nfp_net_netdev_open(struct net_device *netdev) * - enable all TX queues * - set link state */ - for (r = 0; r < nn->num_r_vecs; r++) { - err = nfp_net_start_vec(nn, &nn->r_vecs[r]); - if (err) - goto err_disable_napi; - } + for (r = 0; r < nn->num_r_vecs; r++) + nfp_net_start_vec(nn, &nn->r_vecs[r]); netif_tx_wake_all_queues(netdev); @@ -1827,18 +1819,14 @@ static int nfp_net_netdev_open(struct net_device *netdev) return 0; -err_disable_napi: - while (r--) { - napi_disable(&nn->r_vecs[r].napi); - nfp_net_rx_ring_reset(nn->r_vecs[r].rx_ring); - nfp_net_rx_ring_bufs_free(nn, nn->r_vecs[r].rx_ring); - } err_clear_config: nfp_net_clear_config_and_disable(nn); err_free_rings: r = nn->num_r_vecs; err_free_prev_vecs: while (r--) { + nfp_net_rx_ring_bufs_free(nn, nn->r_vecs[r].rx_ring); +err_flush_rx_ring_p: nfp_net_rx_ring_free(nn->r_vecs[r].rx_ring); err_free_tx_ring_p: nfp_net_tx_ring_free(nn->r_vecs[r].tx_ring); From ca40feab8f3d46a69bde7a13d652db2c9246c067 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 7 Apr 2016 19:39:42 +0100 Subject: [PATCH 0492/1649] nfp: move filling ring information to FW config nfp_net_[rt]x_ring_{alloc,free} should only allocate or free ring resources without touching the device. Move setting parameters in the BAR to separate functions. This will make it possible to reuse alloc/free functions to allocate new rings while the device is running. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- .../ethernet/netronome/nfp/nfp_net_common.c | 50 ++++++++++++------- 1 file changed, 32 insertions(+), 18 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index a6a917fe8e31..342335d09fb2 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -1387,10 +1387,6 @@ static void nfp_net_tx_ring_free(struct nfp_net_tx_ring *tx_ring) struct nfp_net *nn = r_vec->nfp_net; struct pci_dev *pdev = nn->pdev; - nn_writeq(nn, NFP_NET_CFG_TXR_ADDR(tx_ring->idx), 0); - nn_writeb(nn, NFP_NET_CFG_TXR_SZ(tx_ring->idx), 0); - nn_writeb(nn, NFP_NET_CFG_TXR_VEC(tx_ring->idx), 0); - kfree(tx_ring->txbufs); if (tx_ring->txds) @@ -1430,11 +1426,6 @@ static int nfp_net_tx_ring_alloc(struct nfp_net_tx_ring *tx_ring) if (!tx_ring->txbufs) goto err_alloc; - /* Write the DMA address, size and MSI-X info to the device */ - nn_writeq(nn, NFP_NET_CFG_TXR_ADDR(tx_ring->idx), tx_ring->dma); - nn_writeb(nn, NFP_NET_CFG_TXR_SZ(tx_ring->idx), ilog2(tx_ring->cnt)); - nn_writeb(nn, NFP_NET_CFG_TXR_VEC(tx_ring->idx), r_vec->irq_idx); - netif_set_xps_queue(nn->netdev, &r_vec->affinity_mask, tx_ring->idx); nn_dbg(nn, "TxQ%02d: QCidx=%02d cnt=%d dma=%#llx host=%p\n", @@ -1458,10 +1449,6 @@ static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring) struct nfp_net *nn = r_vec->nfp_net; struct pci_dev *pdev = nn->pdev; - nn_writeq(nn, NFP_NET_CFG_RXR_ADDR(rx_ring->idx), 0); - nn_writeb(nn, NFP_NET_CFG_RXR_SZ(rx_ring->idx), 0); - nn_writeb(nn, NFP_NET_CFG_RXR_VEC(rx_ring->idx), 0); - kfree(rx_ring->rxbufs); if (rx_ring->rxds) @@ -1501,11 +1488,6 @@ static int nfp_net_rx_ring_alloc(struct nfp_net_rx_ring *rx_ring) if (!rx_ring->rxbufs) goto err_alloc; - /* Write the DMA address, size and MSI-X info to the device */ - nn_writeq(nn, NFP_NET_CFG_RXR_ADDR(rx_ring->idx), rx_ring->dma); - nn_writeb(nn, NFP_NET_CFG_RXR_SZ(rx_ring->idx), ilog2(rx_ring->cnt)); - nn_writeb(nn, NFP_NET_CFG_RXR_VEC(rx_ring->idx), r_vec->irq_idx); - nn_dbg(nn, "RxQ%02d: FlQCidx=%02d RxQCidx=%02d cnt=%d dma=%#llx host=%p\n", rx_ring->idx, rx_ring->fl_qcidx, rx_ring->rx_qcidx, rx_ring->cnt, (unsigned long long)rx_ring->dma, rx_ring->rxds); @@ -1630,6 +1612,17 @@ static void nfp_net_write_mac_addr(struct nfp_net *nn, const u8 *mac) get_unaligned_be16(nn->netdev->dev_addr + 4) << 16); } +static void nfp_net_vec_clear_ring_data(struct nfp_net *nn, unsigned int idx) +{ + nn_writeq(nn, NFP_NET_CFG_RXR_ADDR(idx), 0); + nn_writeb(nn, NFP_NET_CFG_RXR_SZ(idx), 0); + nn_writeb(nn, NFP_NET_CFG_RXR_VEC(idx), 0); + + nn_writeq(nn, NFP_NET_CFG_TXR_ADDR(idx), 0); + nn_writeb(nn, NFP_NET_CFG_TXR_SZ(idx), 0); + nn_writeb(nn, NFP_NET_CFG_TXR_VEC(idx), 0); +} + /** * nfp_net_clear_config_and_disable() - Clear control BAR and disable NFP * @nn: NFP Net device to reconfigure @@ -1637,6 +1630,7 @@ static void nfp_net_write_mac_addr(struct nfp_net *nn, const u8 *mac) static void nfp_net_clear_config_and_disable(struct nfp_net *nn) { u32 new_ctrl, update; + unsigned int r; int err; new_ctrl = nn->ctrl; @@ -1658,9 +1652,26 @@ static void nfp_net_clear_config_and_disable(struct nfp_net *nn) return; } + for (r = 0; r < nn->num_r_vecs; r++) + nfp_net_vec_clear_ring_data(nn, r); + nn->ctrl = new_ctrl; } +static void +nfp_net_vec_write_ring_data(struct nfp_net *nn, struct nfp_net_r_vector *r_vec, + unsigned int idx) +{ + /* Write the DMA address, size and MSI-X info to the device */ + nn_writeq(nn, NFP_NET_CFG_RXR_ADDR(idx), r_vec->rx_ring->dma); + nn_writeb(nn, NFP_NET_CFG_RXR_SZ(idx), ilog2(r_vec->rx_ring->cnt)); + nn_writeb(nn, NFP_NET_CFG_RXR_VEC(idx), r_vec->irq_idx); + + nn_writeq(nn, NFP_NET_CFG_TXR_ADDR(idx), r_vec->tx_ring->dma); + nn_writeb(nn, NFP_NET_CFG_TXR_SZ(idx), ilog2(r_vec->tx_ring->cnt)); + nn_writeb(nn, NFP_NET_CFG_TXR_VEC(idx), r_vec->irq_idx); +} + /** * nfp_net_start_vec() - Start ring vector * @nn: NFP Net device structure @@ -1768,6 +1779,9 @@ static int nfp_net_netdev_open(struct net_device *netdev) * - Set the Freelist buffer size * - Enable the FW */ + for (r = 0; r < nn->num_r_vecs; r++) + nfp_net_vec_write_ring_data(nn, &nn->r_vecs[r], r); + nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, nn->num_tx_rings == 64 ? 0xffffffffffffffffULL : ((u64)1 << nn->num_tx_rings) - 1); From 1cd0cfc498f7e928c5ff8e9ced537d41fa46df50 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 7 Apr 2016 19:39:43 +0100 Subject: [PATCH 0493/1649] nfp: slice .ndo_open() and .ndo_stop() up Divide .ndo_open() and .ndo_stop() into logical, callable chunks. No functional changes. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- .../ethernet/netronome/nfp/nfp_net_common.c | 218 +++++++++++------- 1 file changed, 136 insertions(+), 82 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index 342335d09fb2..6c1ed8914416 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -1672,6 +1672,82 @@ nfp_net_vec_write_ring_data(struct nfp_net *nn, struct nfp_net_r_vector *r_vec, nn_writeb(nn, NFP_NET_CFG_TXR_VEC(idx), r_vec->irq_idx); } +static int __nfp_net_set_config_and_enable(struct nfp_net *nn) +{ + u32 new_ctrl, update = 0; + unsigned int r; + int err; + + new_ctrl = nn->ctrl; + + if (nn->cap & NFP_NET_CFG_CTRL_RSS) { + nfp_net_rss_write_key(nn); + nfp_net_rss_write_itbl(nn); + nn_writel(nn, NFP_NET_CFG_RSS_CTRL, nn->rss_cfg); + update |= NFP_NET_CFG_UPDATE_RSS; + } + + if (nn->cap & NFP_NET_CFG_CTRL_IRQMOD) { + nfp_net_coalesce_write_cfg(nn); + + new_ctrl |= NFP_NET_CFG_CTRL_IRQMOD; + update |= NFP_NET_CFG_UPDATE_IRQMOD; + } + + for (r = 0; r < nn->num_r_vecs; r++) + nfp_net_vec_write_ring_data(nn, &nn->r_vecs[r], r); + + nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, nn->num_tx_rings == 64 ? + 0xffffffffffffffffULL : ((u64)1 << nn->num_tx_rings) - 1); + + nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, nn->num_rx_rings == 64 ? + 0xffffffffffffffffULL : ((u64)1 << nn->num_rx_rings) - 1); + + nfp_net_write_mac_addr(nn, nn->netdev->dev_addr); + + nn_writel(nn, NFP_NET_CFG_MTU, nn->netdev->mtu); + nn_writel(nn, NFP_NET_CFG_FLBUFSZ, nn->fl_bufsz); + + /* Enable device */ + new_ctrl |= NFP_NET_CFG_CTRL_ENABLE; + update |= NFP_NET_CFG_UPDATE_GEN; + update |= NFP_NET_CFG_UPDATE_MSIX; + update |= NFP_NET_CFG_UPDATE_RING; + if (nn->cap & NFP_NET_CFG_CTRL_RINGCFG) + new_ctrl |= NFP_NET_CFG_CTRL_RINGCFG; + + nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl); + err = nfp_net_reconfig(nn, update); + + nn->ctrl = new_ctrl; + + /* Since reconfiguration requests while NFP is down are ignored we + * have to wipe the entire VXLAN configuration and reinitialize it. + */ + if (nn->ctrl & NFP_NET_CFG_CTRL_VXLAN) { + memset(&nn->vxlan_ports, 0, sizeof(nn->vxlan_ports)); + memset(&nn->vxlan_usecnt, 0, sizeof(nn->vxlan_usecnt)); + vxlan_get_rx_port(nn->netdev); + } + + return err; +} + +/** + * nfp_net_set_config_and_enable() - Write control BAR and enable NFP + * @nn: NFP Net device to reconfigure + */ +static int nfp_net_set_config_and_enable(struct nfp_net *nn) +{ + int err; + + err = __nfp_net_set_config_and_enable(nn); + if (err) + nfp_net_clear_config_and_disable(nn); + + return err; +} + /** * nfp_net_start_vec() - Start ring vector * @nn: NFP Net device structure @@ -1692,20 +1768,33 @@ nfp_net_start_vec(struct nfp_net *nn, struct nfp_net_r_vector *r_vec) enable_irq(irq_vec); } +/** + * nfp_net_open_stack() - Start the device from stack's perspective + * @nn: NFP Net device to reconfigure + */ +static void nfp_net_open_stack(struct nfp_net *nn) +{ + unsigned int r; + + for (r = 0; r < nn->num_r_vecs; r++) + nfp_net_start_vec(nn, &nn->r_vecs[r]); + + netif_tx_wake_all_queues(nn->netdev); + + enable_irq(nn->irq_entries[NFP_NET_CFG_LSC].vector); + nfp_net_read_link_status(nn); +} + static int nfp_net_netdev_open(struct net_device *netdev) { struct nfp_net *nn = netdev_priv(netdev); int err, r; - u32 update = 0; - u32 new_ctrl; if (nn->ctrl & NFP_NET_CFG_CTRL_ENABLE) { nn_err(nn, "Dev is already enabled: 0x%08x\n", nn->ctrl); return -EBUSY; } - new_ctrl = nn->ctrl; - /* Step 1: Allocate resources for rings and the like * - Request interrupts * - Allocate RX and TX ring resources @@ -1758,20 +1847,6 @@ static int nfp_net_netdev_open(struct net_device *netdev) if (err) goto err_free_rings; - if (nn->cap & NFP_NET_CFG_CTRL_RSS) { - nfp_net_rss_write_key(nn); - nfp_net_rss_write_itbl(nn); - nn_writel(nn, NFP_NET_CFG_RSS_CTRL, nn->rss_cfg); - update |= NFP_NET_CFG_UPDATE_RSS; - } - - if (nn->cap & NFP_NET_CFG_CTRL_IRQMOD) { - nfp_net_coalesce_write_cfg(nn); - - new_ctrl |= NFP_NET_CFG_CTRL_IRQMOD; - update |= NFP_NET_CFG_UPDATE_IRQMOD; - } - /* Step 2: Configure the NFP * - Enable rings from 0 to tx_rings/rx_rings - 1. * - Write MAC address (in case it changed) @@ -1779,43 +1854,9 @@ static int nfp_net_netdev_open(struct net_device *netdev) * - Set the Freelist buffer size * - Enable the FW */ - for (r = 0; r < nn->num_r_vecs; r++) - nfp_net_vec_write_ring_data(nn, &nn->r_vecs[r], r); - - nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, nn->num_tx_rings == 64 ? - 0xffffffffffffffffULL : ((u64)1 << nn->num_tx_rings) - 1); - - nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, nn->num_rx_rings == 64 ? - 0xffffffffffffffffULL : ((u64)1 << nn->num_rx_rings) - 1); - - nfp_net_write_mac_addr(nn, netdev->dev_addr); - - nn_writel(nn, NFP_NET_CFG_MTU, netdev->mtu); - nn_writel(nn, NFP_NET_CFG_FLBUFSZ, nn->fl_bufsz); - - /* Enable device */ - new_ctrl |= NFP_NET_CFG_CTRL_ENABLE; - update |= NFP_NET_CFG_UPDATE_GEN; - update |= NFP_NET_CFG_UPDATE_MSIX; - update |= NFP_NET_CFG_UPDATE_RING; - if (nn->cap & NFP_NET_CFG_CTRL_RINGCFG) - new_ctrl |= NFP_NET_CFG_CTRL_RINGCFG; - - nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl); - err = nfp_net_reconfig(nn, update); + err = nfp_net_set_config_and_enable(nn); if (err) - goto err_clear_config; - - nn->ctrl = new_ctrl; - - /* Since reconfiguration requests while NFP is down are ignored we - * have to wipe the entire VXLAN configuration and reinitialize it. - */ - if (nn->ctrl & NFP_NET_CFG_CTRL_VXLAN) { - memset(&nn->vxlan_ports, 0, sizeof(nn->vxlan_ports)); - memset(&nn->vxlan_usecnt, 0, sizeof(nn->vxlan_usecnt)); - vxlan_get_rx_port(netdev); - } + goto err_free_rings; /* Step 3: Enable for kernel * - put some freelist descriptors on each RX ring @@ -1823,18 +1864,10 @@ static int nfp_net_netdev_open(struct net_device *netdev) * - enable all TX queues * - set link state */ - for (r = 0; r < nn->num_r_vecs; r++) - nfp_net_start_vec(nn, &nn->r_vecs[r]); - - netif_tx_wake_all_queues(netdev); - - enable_irq(nn->irq_entries[NFP_NET_CFG_LSC].vector); - nfp_net_read_link_status(nn); + nfp_net_open_stack(nn); return 0; -err_clear_config: - nfp_net_clear_config_and_disable(nn); err_free_rings: r = nn->num_r_vecs; err_free_prev_vecs: @@ -1858,36 +1891,31 @@ err_free_exn: } /** - * nfp_net_netdev_close() - Called when the device is downed - * @netdev: netdev structure + * nfp_net_close_stack() - Quiescent the stack (part of close) + * @nn: NFP Net device to reconfigure */ -static int nfp_net_netdev_close(struct net_device *netdev) +static void nfp_net_close_stack(struct nfp_net *nn) { - struct nfp_net *nn = netdev_priv(netdev); - int r; + unsigned int r; - if (!(nn->ctrl & NFP_NET_CFG_CTRL_ENABLE)) { - nn_err(nn, "Dev is not up: 0x%08x\n", nn->ctrl); - return 0; - } - - /* Step 1: Disable RX and TX rings from the Linux kernel perspective - */ disable_irq(nn->irq_entries[NFP_NET_CFG_LSC].vector); - netif_carrier_off(netdev); + netif_carrier_off(nn->netdev); nn->link_up = false; for (r = 0; r < nn->num_r_vecs; r++) napi_disable(&nn->r_vecs[r].napi); - netif_tx_disable(netdev); + netif_tx_disable(nn->netdev); +} - /* Step 2: Tell NFP - */ - nfp_net_clear_config_and_disable(nn); +/** + * nfp_net_close_free_all() - Free all runtime resources + * @nn: NFP Net device to reconfigure + */ +static void nfp_net_close_free_all(struct nfp_net *nn) +{ + unsigned int r; - /* Step 3: Free resources - */ for (r = 0; r < nn->num_r_vecs; r++) { nfp_net_rx_ring_reset(nn->r_vecs[r].rx_ring); nfp_net_rx_ring_bufs_free(nn, nn->r_vecs[r].rx_ring); @@ -1902,6 +1930,32 @@ static int nfp_net_netdev_close(struct net_device *netdev) nfp_net_aux_irq_free(nn, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX); nfp_net_aux_irq_free(nn, NFP_NET_CFG_EXN, NFP_NET_IRQ_EXN_IDX); +} + +/** + * nfp_net_netdev_close() - Called when the device is downed + * @netdev: netdev structure + */ +static int nfp_net_netdev_close(struct net_device *netdev) +{ + struct nfp_net *nn = netdev_priv(netdev); + + if (!(nn->ctrl & NFP_NET_CFG_CTRL_ENABLE)) { + nn_err(nn, "Dev is not up: 0x%08x\n", nn->ctrl); + return 0; + } + + /* Step 1: Disable RX and TX rings from the Linux kernel perspective + */ + nfp_net_close_stack(nn); + + /* Step 2: Tell NFP + */ + nfp_net_clear_config_and_disable(nn); + + /* Step 3: Free resources + */ + nfp_net_close_free_all(nn); nn_dbg(nn, "%s down", netdev->name); return 0; From aba52df80b1a2d15fe1745dfe187e9823821f5c0 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 7 Apr 2016 19:39:44 +0100 Subject: [PATCH 0494/1649] nfp: sync ring state during FW reconfiguration FW reconfiguration in .ndo_open()/.ndo_stop() should reset/ restore queue state. Since we need IRQs to be disabled when filling rings on RX path we have to move disable_irq() from .ndo_open() all the way up to IRQ allocation. nfp_net_start_vec() becomes trivial now so it's inlined. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- .../ethernet/netronome/nfp/nfp_net_common.c | 45 +++++++------------ 1 file changed, 16 insertions(+), 29 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index 6c1ed8914416..ed23b9d348c3 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -1519,6 +1519,7 @@ nfp_net_prepare_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec, nn_err(nn, "Error requesting IRQ %d\n", entry->vector); return err; } + disable_irq(entry->vector); /* Setup NAPI */ netif_napi_add(nn->netdev, &r_vec->napi, @@ -1647,13 +1648,14 @@ static void nfp_net_clear_config_and_disable(struct nfp_net *nn) nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl); err = nfp_net_reconfig(nn, update); - if (err) { + if (err) nn_err(nn, "Could not disable device: %d\n", err); - return; - } - for (r = 0; r < nn->num_r_vecs; r++) + for (r = 0; r < nn->num_r_vecs; r++) { + nfp_net_rx_ring_reset(nn->r_vecs[r].rx_ring); + nfp_net_tx_ring_reset(nn, nn->r_vecs[r].tx_ring); nfp_net_vec_clear_ring_data(nn, r); + } nn->ctrl = new_ctrl; } @@ -1721,6 +1723,9 @@ static int __nfp_net_set_config_and_enable(struct nfp_net *nn) nn->ctrl = new_ctrl; + for (r = 0; r < nn->num_r_vecs; r++) + nfp_net_rx_ring_fill_freelist(nn->r_vecs[r].rx_ring); + /* Since reconfiguration requests while NFP is down are ignored we * have to wipe the entire VXLAN configuration and reinitialize it. */ @@ -1748,26 +1753,6 @@ static int nfp_net_set_config_and_enable(struct nfp_net *nn) return err; } -/** - * nfp_net_start_vec() - Start ring vector - * @nn: NFP Net device structure - * @r_vec: Ring vector to be started - */ -static void -nfp_net_start_vec(struct nfp_net *nn, struct nfp_net_r_vector *r_vec) -{ - unsigned int irq_vec; - - irq_vec = nn->irq_entries[r_vec->irq_idx].vector; - - disable_irq(irq_vec); - - nfp_net_rx_ring_fill_freelist(r_vec->rx_ring); - napi_enable(&r_vec->napi); - - enable_irq(irq_vec); -} - /** * nfp_net_open_stack() - Start the device from stack's perspective * @nn: NFP Net device to reconfigure @@ -1776,8 +1761,10 @@ static void nfp_net_open_stack(struct nfp_net *nn) { unsigned int r; - for (r = 0; r < nn->num_r_vecs; r++) - nfp_net_start_vec(nn, &nn->r_vecs[r]); + for (r = 0; r < nn->num_r_vecs; r++) { + napi_enable(&nn->r_vecs[r].napi); + enable_irq(nn->irq_entries[nn->r_vecs[r].irq_idx].vector); + } netif_tx_wake_all_queues(nn->netdev); @@ -1902,8 +1889,10 @@ static void nfp_net_close_stack(struct nfp_net *nn) netif_carrier_off(nn->netdev); nn->link_up = false; - for (r = 0; r < nn->num_r_vecs; r++) + for (r = 0; r < nn->num_r_vecs; r++) { + disable_irq(nn->irq_entries[nn->r_vecs[r].irq_idx].vector); napi_disable(&nn->r_vecs[r].napi); + } netif_tx_disable(nn->netdev); } @@ -1917,9 +1906,7 @@ static void nfp_net_close_free_all(struct nfp_net *nn) unsigned int r; for (r = 0; r < nn->num_r_vecs; r++) { - nfp_net_rx_ring_reset(nn->r_vecs[r].rx_ring); nfp_net_rx_ring_bufs_free(nn, nn->r_vecs[r].rx_ring); - nfp_net_tx_ring_reset(nn, nn->r_vecs[r].tx_ring); nfp_net_rx_ring_free(nn->r_vecs[r].rx_ring); nfp_net_tx_ring_free(nn->r_vecs[r].tx_ring); nfp_net_cleanup_vector(nn, &nn->r_vecs[r]); From 30d2117191b7437b5b6ce2f09eddf86f203c7a37 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 7 Apr 2016 19:39:45 +0100 Subject: [PATCH 0495/1649] nfp: propagate list buffer size in struct rx_ring Free list buffer size needs to be propagated to few functions as a parameter and added to struct nfp_net_rx_ring since soon some of the functions will be reused to manage rings with buffers of size different than nn->fl_bufsz. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/nfp_net.h | 3 +++ .../ethernet/netronome/nfp/nfp_net_common.c | 24 ++++++++++++------- 2 files changed, 19 insertions(+), 8 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h index fc005c982b7d..9ab8e3967dc9 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h @@ -298,6 +298,8 @@ struct nfp_net_rx_buf { * @rxds: Virtual address of FL/RX ring in host memory * @dma: DMA address of the FL/RX ring * @size: Size, in bytes, of the FL/RX ring (needed to free) + * @bufsz: Buffer allocation size for convenience of management routines + * (NOTE: this is in second cache line, do not use on fast path!) */ struct nfp_net_rx_ring { struct nfp_net_r_vector *r_vec; @@ -319,6 +321,7 @@ struct nfp_net_rx_ring { dma_addr_t dma; unsigned int size; + unsigned int bufsz; } ____cacheline_aligned; /** diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index ed23b9d348c3..03c60f755de0 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -957,25 +957,27 @@ static inline int nfp_net_rx_space(struct nfp_net_rx_ring *rx_ring) * nfp_net_rx_alloc_one() - Allocate and map skb for RX * @rx_ring: RX ring structure of the skb * @dma_addr: Pointer to storage for DMA address (output param) + * @fl_bufsz: size of freelist buffers * * This function will allcate a new skb, map it for DMA. * * Return: allocated skb or NULL on failure. */ static struct sk_buff * -nfp_net_rx_alloc_one(struct nfp_net_rx_ring *rx_ring, dma_addr_t *dma_addr) +nfp_net_rx_alloc_one(struct nfp_net_rx_ring *rx_ring, dma_addr_t *dma_addr, + unsigned int fl_bufsz) { struct nfp_net *nn = rx_ring->r_vec->nfp_net; struct sk_buff *skb; - skb = netdev_alloc_skb(nn->netdev, nn->fl_bufsz); + skb = netdev_alloc_skb(nn->netdev, fl_bufsz); if (!skb) { nn_warn_ratelimit(nn, "Failed to alloc receive SKB\n"); return NULL; } *dma_addr = dma_map_single(&nn->pdev->dev, skb->data, - nn->fl_bufsz, DMA_FROM_DEVICE); + fl_bufsz, DMA_FROM_DEVICE); if (dma_mapping_error(&nn->pdev->dev, *dma_addr)) { dev_kfree_skb_any(skb); nn_warn_ratelimit(nn, "Failed to map DMA RX buffer\n"); @@ -1068,7 +1070,7 @@ nfp_net_rx_ring_bufs_free(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring) continue; dma_unmap_single(&pdev->dev, rx_ring->rxbufs[i].dma_addr, - nn->fl_bufsz, DMA_FROM_DEVICE); + rx_ring->bufsz, DMA_FROM_DEVICE); dev_kfree_skb_any(rx_ring->rxbufs[i].skb); rx_ring->rxbufs[i].dma_addr = 0; rx_ring->rxbufs[i].skb = NULL; @@ -1090,7 +1092,8 @@ nfp_net_rx_ring_bufs_alloc(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring) for (i = 0; i < rx_ring->cnt - 1; i++) { rxbufs[i].skb = - nfp_net_rx_alloc_one(rx_ring, &rxbufs[i].dma_addr); + nfp_net_rx_alloc_one(rx_ring, &rxbufs[i].dma_addr, + rx_ring->bufsz); if (!rxbufs[i].skb) { nfp_net_rx_ring_bufs_free(nn, rx_ring); return -ENOMEM; @@ -1278,7 +1281,8 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget) skb = rx_ring->rxbufs[idx].skb; - new_skb = nfp_net_rx_alloc_one(rx_ring, &new_dma_addr); + new_skb = nfp_net_rx_alloc_one(rx_ring, &new_dma_addr, + nn->fl_bufsz); if (!new_skb) { nfp_net_rx_give_one(rx_ring, rx_ring->rxbufs[idx].skb, rx_ring->rxbufs[idx].dma_addr); @@ -1465,10 +1469,12 @@ static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring) /** * nfp_net_rx_ring_alloc() - Allocate resource for a RX ring * @rx_ring: RX ring to allocate + * @fl_bufsz: Size of buffers to allocate * * Return: 0 on success, negative errno otherwise. */ -static int nfp_net_rx_ring_alloc(struct nfp_net_rx_ring *rx_ring) +static int +nfp_net_rx_ring_alloc(struct nfp_net_rx_ring *rx_ring, unsigned int fl_bufsz) { struct nfp_net_r_vector *r_vec = rx_ring->r_vec; struct nfp_net *nn = r_vec->nfp_net; @@ -1476,6 +1482,7 @@ static int nfp_net_rx_ring_alloc(struct nfp_net_rx_ring *rx_ring) int sz; rx_ring->cnt = nn->rxd_cnt; + rx_ring->bufsz = fl_bufsz; rx_ring->size = sizeof(*rx_ring->rxds) * rx_ring->cnt; rx_ring->rxds = dma_zalloc_coherent(&pdev->dev, rx_ring->size, @@ -1817,7 +1824,8 @@ static int nfp_net_netdev_open(struct net_device *netdev) if (err) goto err_cleanup_vec_p; - err = nfp_net_rx_ring_alloc(nn->r_vecs[r].rx_ring); + err = nfp_net_rx_ring_alloc(nn->r_vecs[r].rx_ring, + nn->fl_bufsz); if (err) goto err_free_tx_ring_p; From 36a857e4f2c9783cd573c948df022011cb386aa4 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 7 Apr 2016 19:39:46 +0100 Subject: [PATCH 0496/1649] nfp: convert .ndo_change_mtu() to prepare/commit paradigm When changing MTU on running device first allocate new rings and buffers and once it succeeds proceed with changing MTU. Allocation of new rings is not really necessary for this operation - it's done to keep the code simple and because size of the extra ring memory is quite small compared to the size of buffers. Operation can still fail midway through if FW communication times out. In that case we retry with old MTU (rings). Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- .../ethernet/netronome/nfp/nfp_net_common.c | 110 ++++++++++++++++-- 1 file changed, 103 insertions(+), 7 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index 03c60f755de0..e7c420fdcb0d 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -1506,6 +1506,64 @@ err_alloc: return -ENOMEM; } +static struct nfp_net_rx_ring * +nfp_net_shadow_rx_rings_prepare(struct nfp_net *nn, unsigned int fl_bufsz) +{ + struct nfp_net_rx_ring *rings; + unsigned int r; + + rings = kcalloc(nn->num_rx_rings, sizeof(*rings), GFP_KERNEL); + if (!rings) + return NULL; + + for (r = 0; r < nn->num_rx_rings; r++) { + nfp_net_rx_ring_init(&rings[r], nn->rx_rings[r].r_vec, r); + + if (nfp_net_rx_ring_alloc(&rings[r], fl_bufsz)) + goto err_free_prev; + + if (nfp_net_rx_ring_bufs_alloc(nn, &rings[r])) + goto err_free_ring; + } + + return rings; + +err_free_prev: + while (r--) { + nfp_net_rx_ring_bufs_free(nn, &rings[r]); +err_free_ring: + nfp_net_rx_ring_free(&rings[r]); + } + kfree(rings); + return NULL; +} + +static struct nfp_net_rx_ring * +nfp_net_shadow_rx_rings_swap(struct nfp_net *nn, struct nfp_net_rx_ring *rings) +{ + struct nfp_net_rx_ring *old = nn->rx_rings; + unsigned int r; + + for (r = 0; r < nn->num_rx_rings; r++) + old[r].r_vec->rx_ring = &rings[r]; + + nn->rx_rings = rings; + return old; +} + +static void +nfp_net_shadow_rx_rings_free(struct nfp_net *nn, struct nfp_net_rx_ring *rings) +{ + unsigned int r; + + for (r = 0; r < nn->num_r_vecs; r++) { + nfp_net_rx_ring_bufs_free(nn, &rings[r]); + nfp_net_rx_ring_free(&rings[r]); + } + + kfree(rings); +} + static int nfp_net_prepare_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec, int idx) @@ -1984,23 +2042,61 @@ static void nfp_net_set_rx_mode(struct net_device *netdev) static int nfp_net_change_mtu(struct net_device *netdev, int new_mtu) { + unsigned int old_mtu, old_fl_bufsz, new_fl_bufsz; struct nfp_net *nn = netdev_priv(netdev); + struct nfp_net_rx_ring *tmp_rings; + int err; if (new_mtu < 68 || new_mtu > nn->max_mtu) { nn_err(nn, "New MTU (%d) is not valid\n", new_mtu); return -EINVAL; } - netdev->mtu = new_mtu; - nn->fl_bufsz = NFP_NET_MAX_PREPEND + ETH_HLEN + VLAN_HLEN * 2 + new_mtu; + old_mtu = netdev->mtu; + old_fl_bufsz = nn->fl_bufsz; + new_fl_bufsz = NFP_NET_MAX_PREPEND + ETH_HLEN + VLAN_HLEN * 2 + new_mtu; - /* restart if running */ - if (netif_running(netdev)) { - nfp_net_netdev_close(netdev); - nfp_net_netdev_open(netdev); + if (!netif_running(netdev)) { + netdev->mtu = new_mtu; + nn->fl_bufsz = new_fl_bufsz; + return 0; } - return 0; + /* Prepare new rings */ + tmp_rings = nfp_net_shadow_rx_rings_prepare(nn, new_fl_bufsz); + if (!tmp_rings) + return -ENOMEM; + + /* Stop device, swap in new rings, try to start the firmware */ + nfp_net_close_stack(nn); + nfp_net_clear_config_and_disable(nn); + + tmp_rings = nfp_net_shadow_rx_rings_swap(nn, tmp_rings); + + netdev->mtu = new_mtu; + nn->fl_bufsz = new_fl_bufsz; + + err = nfp_net_set_config_and_enable(nn); + if (err) { + const int err_new = err; + + /* Try with old configuration and old rings */ + tmp_rings = nfp_net_shadow_rx_rings_swap(nn, tmp_rings); + + netdev->mtu = old_mtu; + nn->fl_bufsz = old_fl_bufsz; + + err = __nfp_net_set_config_and_enable(nn); + if (err) + nn_err(nn, "Can't restore MTU - FW communication failed (%d,%d)\n", + err_new, err); + } + + nfp_net_shadow_rx_rings_free(nn, tmp_rings); + + nfp_net_open_stack(nn); + + return err; } static struct rtnl_link_stats64 *nfp_net_stat64(struct net_device *netdev, From a98cb2581211023539887a11f8391dd615409ab8 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 7 Apr 2016 19:39:47 +0100 Subject: [PATCH 0497/1649] nfp: pass ring count as function parameter Soon ring resize will call this functions with values different than the current configuration we need to explicitly pass the ring count as parameter. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- .../ethernet/netronome/nfp/nfp_net_common.c | 23 +++++++++++-------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index e7c420fdcb0d..c4f0c70e77ce 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -1407,17 +1407,18 @@ static void nfp_net_tx_ring_free(struct nfp_net_tx_ring *tx_ring) /** * nfp_net_tx_ring_alloc() - Allocate resource for a TX ring * @tx_ring: TX Ring structure to allocate + * @cnt: Ring buffer count * * Return: 0 on success, negative errno otherwise. */ -static int nfp_net_tx_ring_alloc(struct nfp_net_tx_ring *tx_ring) +static int nfp_net_tx_ring_alloc(struct nfp_net_tx_ring *tx_ring, u32 cnt) { struct nfp_net_r_vector *r_vec = tx_ring->r_vec; struct nfp_net *nn = r_vec->nfp_net; struct pci_dev *pdev = nn->pdev; int sz; - tx_ring->cnt = nn->txd_cnt; + tx_ring->cnt = cnt; tx_ring->size = sizeof(*tx_ring->txds) * tx_ring->cnt; tx_ring->txds = dma_zalloc_coherent(&pdev->dev, tx_ring->size, @@ -1470,18 +1471,20 @@ static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring) * nfp_net_rx_ring_alloc() - Allocate resource for a RX ring * @rx_ring: RX ring to allocate * @fl_bufsz: Size of buffers to allocate + * @cnt: Ring buffer count * * Return: 0 on success, negative errno otherwise. */ static int -nfp_net_rx_ring_alloc(struct nfp_net_rx_ring *rx_ring, unsigned int fl_bufsz) +nfp_net_rx_ring_alloc(struct nfp_net_rx_ring *rx_ring, unsigned int fl_bufsz, + u32 cnt) { struct nfp_net_r_vector *r_vec = rx_ring->r_vec; struct nfp_net *nn = r_vec->nfp_net; struct pci_dev *pdev = nn->pdev; int sz; - rx_ring->cnt = nn->rxd_cnt; + rx_ring->cnt = cnt; rx_ring->bufsz = fl_bufsz; rx_ring->size = sizeof(*rx_ring->rxds) * rx_ring->cnt; @@ -1507,7 +1510,8 @@ err_alloc: } static struct nfp_net_rx_ring * -nfp_net_shadow_rx_rings_prepare(struct nfp_net *nn, unsigned int fl_bufsz) +nfp_net_shadow_rx_rings_prepare(struct nfp_net *nn, unsigned int fl_bufsz, + u32 buf_cnt) { struct nfp_net_rx_ring *rings; unsigned int r; @@ -1519,7 +1523,7 @@ nfp_net_shadow_rx_rings_prepare(struct nfp_net *nn, unsigned int fl_bufsz) for (r = 0; r < nn->num_rx_rings; r++) { nfp_net_rx_ring_init(&rings[r], nn->rx_rings[r].r_vec, r); - if (nfp_net_rx_ring_alloc(&rings[r], fl_bufsz)) + if (nfp_net_rx_ring_alloc(&rings[r], fl_bufsz, buf_cnt)) goto err_free_prev; if (nfp_net_rx_ring_bufs_alloc(nn, &rings[r])) @@ -1878,12 +1882,12 @@ static int nfp_net_netdev_open(struct net_device *netdev) if (err) goto err_free_prev_vecs; - err = nfp_net_tx_ring_alloc(nn->r_vecs[r].tx_ring); + err = nfp_net_tx_ring_alloc(nn->r_vecs[r].tx_ring, nn->txd_cnt); if (err) goto err_cleanup_vec_p; err = nfp_net_rx_ring_alloc(nn->r_vecs[r].rx_ring, - nn->fl_bufsz); + nn->fl_bufsz, nn->rxd_cnt); if (err) goto err_free_tx_ring_p; @@ -2063,7 +2067,8 @@ static int nfp_net_change_mtu(struct net_device *netdev, int new_mtu) } /* Prepare new rings */ - tmp_rings = nfp_net_shadow_rx_rings_prepare(nn, new_fl_bufsz); + tmp_rings = nfp_net_shadow_rx_rings_prepare(nn, new_fl_bufsz, + nn->rxd_cnt); if (!tmp_rings) return -ENOMEM; From cc7c033330fd67dd9d66a1ccb8c9d42381107bcd Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 7 Apr 2016 19:39:48 +0100 Subject: [PATCH 0498/1649] nfp: allow ring size reconfiguration at runtime Since much of the required changes have already been made for changing MTU at runtime let's use it for ring size changes as well. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/nfp_net.h | 1 + .../ethernet/netronome/nfp/nfp_net_common.c | 126 ++++++++++++++++++ .../ethernet/netronome/nfp/nfp_net_ethtool.c | 30 ++--- 3 files changed, 136 insertions(+), 21 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h index 9ab8e3967dc9..3d53fcf323eb 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h @@ -724,6 +724,7 @@ void nfp_net_rss_write_key(struct nfp_net *nn); void nfp_net_coalesce_write_cfg(struct nfp_net *nn); int nfp_net_irqs_alloc(struct nfp_net *nn); void nfp_net_irqs_disable(struct nfp_net *nn); +int nfp_net_set_ring_size(struct nfp_net *nn, u32 rxd_cnt, u32 txd_cnt); #ifdef CONFIG_NFP_NET_DEBUG void nfp_net_debugfs_create(void); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index c4f0c70e77ce..0bdff390c958 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -1444,6 +1444,59 @@ err_alloc: return -ENOMEM; } +static struct nfp_net_tx_ring * +nfp_net_shadow_tx_rings_prepare(struct nfp_net *nn, u32 buf_cnt) +{ + struct nfp_net_tx_ring *rings; + unsigned int r; + + rings = kcalloc(nn->num_tx_rings, sizeof(*rings), GFP_KERNEL); + if (!rings) + return NULL; + + for (r = 0; r < nn->num_tx_rings; r++) { + nfp_net_tx_ring_init(&rings[r], nn->tx_rings[r].r_vec, r); + + if (nfp_net_tx_ring_alloc(&rings[r], buf_cnt)) + goto err_free_prev; + } + + return rings; + +err_free_prev: + while (r--) + nfp_net_tx_ring_free(&rings[r]); + kfree(rings); + return NULL; +} + +static struct nfp_net_tx_ring * +nfp_net_shadow_tx_rings_swap(struct nfp_net *nn, struct nfp_net_tx_ring *rings) +{ + struct nfp_net_tx_ring *old = nn->tx_rings; + unsigned int r; + + for (r = 0; r < nn->num_tx_rings; r++) + old[r].r_vec->tx_ring = &rings[r]; + + nn->tx_rings = rings; + return old; +} + +static void +nfp_net_shadow_tx_rings_free(struct nfp_net *nn, struct nfp_net_tx_ring *rings) +{ + unsigned int r; + + if (!rings) + return; + + for (r = 0; r < nn->num_tx_rings; r++) + nfp_net_tx_ring_free(&rings[r]); + + kfree(rings); +} + /** * nfp_net_rx_ring_free() - Free resources allocated to a RX ring * @rx_ring: RX ring to free @@ -1560,6 +1613,9 @@ nfp_net_shadow_rx_rings_free(struct nfp_net *nn, struct nfp_net_rx_ring *rings) { unsigned int r; + if (!rings) + return; + for (r = 0; r < nn->num_r_vecs; r++) { nfp_net_rx_ring_bufs_free(nn, &rings[r]); nfp_net_rx_ring_free(&rings[r]); @@ -2104,6 +2160,76 @@ static int nfp_net_change_mtu(struct net_device *netdev, int new_mtu) return err; } +int nfp_net_set_ring_size(struct nfp_net *nn, u32 rxd_cnt, u32 txd_cnt) +{ + struct nfp_net_tx_ring *tx_rings = NULL; + struct nfp_net_rx_ring *rx_rings = NULL; + u32 old_rxd_cnt, old_txd_cnt; + int err; + + if (!netif_running(nn->netdev)) { + nn->rxd_cnt = rxd_cnt; + nn->txd_cnt = txd_cnt; + return 0; + } + + old_rxd_cnt = nn->rxd_cnt; + old_txd_cnt = nn->txd_cnt; + + /* Prepare new rings */ + if (nn->rxd_cnt != rxd_cnt) { + rx_rings = nfp_net_shadow_rx_rings_prepare(nn, nn->fl_bufsz, + rxd_cnt); + if (!rx_rings) + return -ENOMEM; + } + if (nn->txd_cnt != txd_cnt) { + tx_rings = nfp_net_shadow_tx_rings_prepare(nn, txd_cnt); + if (!tx_rings) { + nfp_net_shadow_rx_rings_free(nn, rx_rings); + return -ENOMEM; + } + } + + /* Stop device, swap in new rings, try to start the firmware */ + nfp_net_close_stack(nn); + nfp_net_clear_config_and_disable(nn); + + if (rx_rings) + rx_rings = nfp_net_shadow_rx_rings_swap(nn, rx_rings); + if (tx_rings) + tx_rings = nfp_net_shadow_tx_rings_swap(nn, tx_rings); + + nn->rxd_cnt = rxd_cnt; + nn->txd_cnt = txd_cnt; + + err = nfp_net_set_config_and_enable(nn); + if (err) { + const int err_new = err; + + /* Try with old configuration and old rings */ + if (rx_rings) + rx_rings = nfp_net_shadow_rx_rings_swap(nn, rx_rings); + if (tx_rings) + tx_rings = nfp_net_shadow_tx_rings_swap(nn, tx_rings); + + nn->rxd_cnt = old_rxd_cnt; + nn->txd_cnt = old_txd_cnt; + + err = __nfp_net_set_config_and_enable(nn); + if (err) + nn_err(nn, "Can't restore ring config - FW communication failed (%d,%d)\n", + err_new, err); + } + + nfp_net_shadow_rx_rings_free(nn, rx_rings); + nfp_net_shadow_tx_rings_free(nn, tx_rings); + + nfp_net_open_stack(nn); + + return err; +} + static struct rtnl_link_stats64 *nfp_net_stat64(struct net_device *netdev, struct rtnl_link_stats64 *stats) { diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c index 9a4084a68db5..ccfef1f17627 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c @@ -153,37 +153,25 @@ static int nfp_net_set_ringparam(struct net_device *netdev, struct nfp_net *nn = netdev_priv(netdev); u32 rxd_cnt, txd_cnt; - if (netif_running(netdev)) { - /* Some NIC drivers allow reconfiguration on the fly, - * some down the interface, change and then up it - * again. For now we don't allow changes when the - * device is up. - */ - nn_warn(nn, "Can't change rings while device is up\n"); - return -EBUSY; - } - /* We don't have separate queues/rings for small/large frames. */ if (ring->rx_mini_pending || ring->rx_jumbo_pending) return -EINVAL; /* Round up to supported values */ rxd_cnt = roundup_pow_of_two(ring->rx_pending); - rxd_cnt = max_t(u32, rxd_cnt, NFP_NET_MIN_RX_DESCS); - rxd_cnt = min_t(u32, rxd_cnt, NFP_NET_MAX_RX_DESCS); - txd_cnt = roundup_pow_of_two(ring->tx_pending); - txd_cnt = max_t(u32, txd_cnt, NFP_NET_MIN_TX_DESCS); - txd_cnt = min_t(u32, txd_cnt, NFP_NET_MAX_TX_DESCS); - if (nn->rxd_cnt != rxd_cnt || nn->txd_cnt != txd_cnt) - nn_dbg(nn, "Change ring size: RxQ %u->%u, TxQ %u->%u\n", - nn->rxd_cnt, rxd_cnt, nn->txd_cnt, txd_cnt); + if (rxd_cnt < NFP_NET_MIN_RX_DESCS || rxd_cnt > NFP_NET_MAX_RX_DESCS || + txd_cnt < NFP_NET_MIN_TX_DESCS || txd_cnt > NFP_NET_MAX_TX_DESCS) + return -EINVAL; - nn->rxd_cnt = rxd_cnt; - nn->txd_cnt = txd_cnt; + if (nn->rxd_cnt == rxd_cnt && nn->txd_cnt == txd_cnt) + return 0; - return 0; + nn_dbg(nn, "Change ring size: RxQ %u->%u, TxQ %u->%u\n", + nn->rxd_cnt, rxd_cnt, nn->txd_cnt, txd_cnt); + + return nfp_net_set_ring_size(nn, rxd_cnt, txd_cnt); } static void nfp_net_get_strings(struct net_device *netdev, From a9844881ba19d15d274bd684d4de0758bbd71c90 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Fri, 8 Apr 2016 19:11:20 +0200 Subject: [PATCH 0499/1649] devlink: remove implicit type set in port register As we rely on caller zeroing or correctly set the struct before the call, this implicit type set is either no-op (DEVLINK_PORT_TYPE_NOTSET is 0) or it rewrites wanted value. So remove this. Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- net/core/devlink.c | 1 - 1 file changed, 1 deletion(-) diff --git a/net/core/devlink.c b/net/core/devlink.c index 590fa561cb7f..44f880d3b816 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -630,7 +630,6 @@ int devlink_port_register(struct devlink *devlink, } devlink_port->devlink = devlink; devlink_port->index = port_index; - devlink_port->type = DEVLINK_PORT_TYPE_NOTSET; devlink_port->registered = true; list_add_tail(&devlink_port->list, &devlink->port_list); mutex_unlock(&devlink_port_mutex); From 932762b69a282d3fa12febc1a02628f0fb79a1b8 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Fri, 8 Apr 2016 19:11:21 +0200 Subject: [PATCH 0500/1649] mlxsw: Move devlink port registration into common core code Remove devlink port reg/unreg from spectrum and switchx2 code and rather do the common work in core. That also ensures code separation where devlink is only used in core.c. Signed-off-by: Jiri Pirko Reviewed-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/core.c | 22 +++++++++++++ drivers/net/ethernet/mellanox/mlxsw/core.h | 10 ++++++ .../net/ethernet/mellanox/mlxsw/spectrum.c | 31 +++++++------------ .../net/ethernet/mellanox/mlxsw/spectrum.h | 3 +- .../net/ethernet/mellanox/mlxsw/switchx2.c | 30 +++++++----------- 5 files changed, 55 insertions(+), 41 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index f69f6280519f..004fb8b50fab 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -1358,6 +1358,28 @@ void mlxsw_core_lag_mapping_clear(struct mlxsw_core *mlxsw_core, } EXPORT_SYMBOL(mlxsw_core_lag_mapping_clear); +int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, + struct mlxsw_core_port *mlxsw_core_port, u8 local_port, + struct net_device *dev, bool split, u32 split_group) +{ + struct devlink *devlink = priv_to_devlink(mlxsw_core); + struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port; + + if (split) + devlink_port_split_set(devlink_port, split_group); + devlink_port_type_eth_set(devlink_port, dev); + return devlink_port_register(devlink, devlink_port, local_port); +} +EXPORT_SYMBOL(mlxsw_core_port_init); + +void mlxsw_core_port_fini(struct mlxsw_core_port *mlxsw_core_port) +{ + struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port; + + devlink_port_unregister(devlink_port); +} +EXPORT_SYMBOL(mlxsw_core_port_fini); + int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod, u32 in_mod, bool out_mbox_direct, char *in_mbox, size_t in_mbox_size, diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h index c73d1c0792a6..06631a0136a5 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core.h @@ -43,6 +43,7 @@ #include #include #include +#include #include "trap.h" #include "reg.h" @@ -131,6 +132,15 @@ u8 mlxsw_core_lag_mapping_get(struct mlxsw_core *mlxsw_core, void mlxsw_core_lag_mapping_clear(struct mlxsw_core *mlxsw_core, u16 lag_id, u8 local_port); +struct mlxsw_core_port { + struct devlink_port devlink_port; +}; + +int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, + struct mlxsw_core_port *mlxsw_core_port, u8 local_port, + struct net_device *dev, bool split, u32 split_group); +void mlxsw_core_port_fini(struct mlxsw_core_port *mlxsw_core_port); + #define MLXSW_CONFIG_PROFILE_SWID_COUNT 8 struct mlxsw_swid_config { diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 507263a2d226..3216f2b9844f 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -50,7 +50,6 @@ #include #include #include -#include #include #include @@ -1685,9 +1684,7 @@ static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port) static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, bool split, u8 module, u8 width) { - struct devlink *devlink = priv_to_devlink(mlxsw_sp->core); struct mlxsw_sp_port *mlxsw_sp_port; - struct devlink_port *devlink_port; struct net_device *dev; size_t bytes; int err; @@ -1740,16 +1737,6 @@ static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, */ dev->hard_header_len += MLXSW_TXHDR_LEN; - devlink_port = &mlxsw_sp_port->devlink_port; - if (mlxsw_sp_port->split) - devlink_port_split_set(devlink_port, module); - err = devlink_port_register(devlink, devlink_port, local_port); - if (err) { - dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register devlink port\n", - mlxsw_sp_port->local_port); - goto err_devlink_port_register; - } - err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port); if (err) { dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n", @@ -1812,7 +1799,14 @@ static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, goto err_register_netdev; } - devlink_port_type_eth_set(devlink_port, dev); + err = mlxsw_core_port_init(mlxsw_sp->core, &mlxsw_sp_port->core_port, + mlxsw_sp_port->local_port, dev, + mlxsw_sp_port->split, module); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n", + mlxsw_sp_port->local_port); + goto err_core_port_init; + } err = mlxsw_sp_port_vlan_init(mlxsw_sp_port); if (err) @@ -1822,6 +1816,8 @@ static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, return 0; err_port_vlan_init: + mlxsw_core_port_fini(&mlxsw_sp_port->core_port); +err_core_port_init: unregister_netdev(dev); err_register_netdev: err_port_dcb_init: @@ -1832,8 +1828,6 @@ err_port_mtu_set: err_port_speed_by_width_set: err_port_swid_set: err_port_system_port_mapping_set: - devlink_port_unregister(&mlxsw_sp_port->devlink_port); -err_devlink_port_register: err_dev_addr_init: free_percpu(mlxsw_sp_port->pcpu_stats); err_alloc_stats: @@ -1887,16 +1881,13 @@ static void mlxsw_sp_port_vports_fini(struct mlxsw_sp_port *mlxsw_sp_port) static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) { struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; - struct devlink_port *devlink_port; if (!mlxsw_sp_port) return; mlxsw_sp->ports[local_port] = NULL; - devlink_port = &mlxsw_sp_port->devlink_port; - devlink_port_type_clear(devlink_port); + mlxsw_core_port_fini(&mlxsw_sp_port->core_port); unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */ mlxsw_sp_port_dcb_fini(mlxsw_sp_port); - devlink_port_unregister(devlink_port); mlxsw_sp_port_vports_fini(mlxsw_sp_port); mlxsw_sp_port_switchdev_fini(mlxsw_sp_port); mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 47610a5ccd78..361b0c270b56 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -44,7 +44,6 @@ #include #include #include -#include #include "port.h" #include "core.h" @@ -166,6 +165,7 @@ struct mlxsw_sp_port_pcpu_stats { }; struct mlxsw_sp_port { + struct mlxsw_core_port core_port; /* must be first */ struct net_device *dev; struct mlxsw_sp_port_pcpu_stats __percpu *pcpu_stats; struct mlxsw_sp *mlxsw_sp; @@ -198,7 +198,6 @@ struct mlxsw_sp_port { unsigned long *untagged_vlans; /* VLAN interfaces */ struct list_head vports_list; - struct devlink_port devlink_port; }; static inline bool diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c index c49447f31acc..2417f099931b 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c +++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c @@ -43,7 +43,6 @@ #include #include #include -#include #include #include @@ -75,11 +74,11 @@ struct mlxsw_sx_port_pcpu_stats { }; struct mlxsw_sx_port { + struct mlxsw_core_port core_port; /* must be first */ struct net_device *dev; struct mlxsw_sx_port_pcpu_stats __percpu *pcpu_stats; struct mlxsw_sx *mlxsw_sx; u8 local_port; - struct devlink_port devlink_port; }; /* tx_hdr_version @@ -956,9 +955,7 @@ mlxsw_sx_port_mac_learning_mode_set(struct mlxsw_sx_port *mlxsw_sx_port, static int mlxsw_sx_port_create(struct mlxsw_sx *mlxsw_sx, u8 local_port) { - struct devlink *devlink = priv_to_devlink(mlxsw_sx->core); struct mlxsw_sx_port *mlxsw_sx_port; - struct devlink_port *devlink_port; struct net_device *dev; bool usable; int err; @@ -1012,14 +1009,6 @@ static int mlxsw_sx_port_create(struct mlxsw_sx *mlxsw_sx, u8 local_port) goto port_not_usable; } - devlink_port = &mlxsw_sx_port->devlink_port; - err = devlink_port_register(devlink, devlink_port, local_port); - if (err) { - dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to register devlink port\n", - mlxsw_sx_port->local_port); - goto err_devlink_port_register; - } - err = mlxsw_sx_port_system_port_mapping_set(mlxsw_sx_port); if (err) { dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set system port mapping\n", @@ -1077,11 +1066,19 @@ static int mlxsw_sx_port_create(struct mlxsw_sx *mlxsw_sx, u8 local_port) goto err_register_netdev; } - devlink_port_type_eth_set(devlink_port, dev); + err = mlxsw_core_port_init(mlxsw_sx->core, &mlxsw_sx_port->core_port, + mlxsw_sx_port->local_port, dev, false, 0); + if (err) { + dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to init core port\n", + mlxsw_sx_port->local_port); + goto err_core_port_init; + } mlxsw_sx->ports[local_port] = mlxsw_sx_port; return 0; +err_core_port_init: + unregister_netdev(dev); err_register_netdev: err_port_mac_learning_mode_set: err_port_stp_state_set: @@ -1090,8 +1087,6 @@ err_port_mtu_set: err_port_speed_set: err_port_swid_set: err_port_system_port_mapping_set: - devlink_port_unregister(&mlxsw_sx_port->devlink_port); -err_devlink_port_register: port_not_usable: err_port_module_check: err_dev_addr_get: @@ -1104,15 +1099,12 @@ err_alloc_stats: static void mlxsw_sx_port_remove(struct mlxsw_sx *mlxsw_sx, u8 local_port) { struct mlxsw_sx_port *mlxsw_sx_port = mlxsw_sx->ports[local_port]; - struct devlink_port *devlink_port; if (!mlxsw_sx_port) return; - devlink_port = &mlxsw_sx_port->devlink_port; - devlink_port_type_clear(devlink_port); + mlxsw_core_port_fini(&mlxsw_sx_port->core_port); unregister_netdev(mlxsw_sx_port->dev); /* This calls ndo_stop */ mlxsw_sx_port_swid_set(mlxsw_sx_port, MLXSW_PORT_SWID_DISABLED_PORT); - devlink_port_unregister(devlink_port); free_percpu(mlxsw_sx_port->pcpu_stats); free_netdev(mlxsw_sx_port->dev); } From 307c2431abf0974996356c13b67432f4b35e5f2f Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Fri, 8 Apr 2016 19:11:22 +0200 Subject: [PATCH 0501/1649] mlxsw: Pass mlxsw_core as a param of mlxsw_core_skb_transmit* Instead of passing around driver priv, pass struct mlxsw_core * directly. Signed-off-by: Jiri Pirko Reviewed-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/core.c | 15 +++------------ drivers/net/ethernet/mellanox/mlxsw/core.h | 5 ++--- drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 4 ++-- drivers/net/ethernet/mellanox/mlxsw/switchx2.c | 4 ++-- 4 files changed, 9 insertions(+), 19 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index 004fb8b50fab..39161fb91ec1 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -381,7 +381,7 @@ static int __mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core, mlxsw_core->emad.trans_active = true; - err = mlxsw_core_skb_transmit(mlxsw_core->driver_priv, skb, tx_info); + err = mlxsw_core_skb_transmit(mlxsw_core, skb, tx_info); if (err) { dev_err(mlxsw_core->bus_info->dev, "Failed to transmit EMAD (tid=%llx)\n", mlxsw_core->emad.tid); @@ -929,26 +929,17 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core) } EXPORT_SYMBOL(mlxsw_core_bus_device_unregister); -static struct mlxsw_core *__mlxsw_core_get(void *driver_priv) -{ - return container_of(driver_priv, struct mlxsw_core, driver_priv); -} - -bool mlxsw_core_skb_transmit_busy(void *driver_priv, +bool mlxsw_core_skb_transmit_busy(struct mlxsw_core *mlxsw_core, const struct mlxsw_tx_info *tx_info) { - struct mlxsw_core *mlxsw_core = __mlxsw_core_get(driver_priv); - return mlxsw_core->bus->skb_transmit_busy(mlxsw_core->bus_priv, tx_info); } EXPORT_SYMBOL(mlxsw_core_skb_transmit_busy); -int mlxsw_core_skb_transmit(void *driver_priv, struct sk_buff *skb, +int mlxsw_core_skb_transmit(struct mlxsw_core *mlxsw_core, struct sk_buff *skb, const struct mlxsw_tx_info *tx_info) { - struct mlxsw_core *mlxsw_core = __mlxsw_core_get(driver_priv); - return mlxsw_core->bus->skb_transmit(mlxsw_core->bus_priv, skb, tx_info); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h index 06631a0136a5..0454212a86d1 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core.h @@ -75,10 +75,9 @@ struct mlxsw_tx_info { bool is_emad; }; -bool mlxsw_core_skb_transmit_busy(void *driver_priv, +bool mlxsw_core_skb_transmit_busy(struct mlxsw_core *mlxsw_core, const struct mlxsw_tx_info *tx_info); - -int mlxsw_core_skb_transmit(void *driver_priv, struct sk_buff *skb, +int mlxsw_core_skb_transmit(struct mlxsw_core *mlxsw_core, struct sk_buff *skb, const struct mlxsw_tx_info *tx_info); struct mlxsw_rx_listener { diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 3216f2b9844f..8abe1a615c94 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -390,7 +390,7 @@ static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb, u64 len; int err; - if (mlxsw_core_skb_transmit_busy(mlxsw_sp, &tx_info)) + if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info)) return NETDEV_TX_BUSY; if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) { @@ -414,7 +414,7 @@ static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb, /* Due to a race we might fail here because of a full queue. In that * unlikely case we simply drop the packet. */ - err = mlxsw_core_skb_transmit(mlxsw_sp, skb, &tx_info); + err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info); if (!err) { pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c index 2417f099931b..2518c84960a0 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c +++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c @@ -302,7 +302,7 @@ static netdev_tx_t mlxsw_sx_port_xmit(struct sk_buff *skb, u64 len; int err; - if (mlxsw_core_skb_transmit_busy(mlxsw_sx, &tx_info)) + if (mlxsw_core_skb_transmit_busy(mlxsw_sx->core, &tx_info)) return NETDEV_TX_BUSY; if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) { @@ -320,7 +320,7 @@ static netdev_tx_t mlxsw_sx_port_xmit(struct sk_buff *skb, /* Due to a race we might fail here because of a full queue. In that * unlikely case we simply drop the packet. */ - err = mlxsw_core_skb_transmit(mlxsw_sx, skb, &tx_info); + err = mlxsw_core_skb_transmit(mlxsw_sx->core, skb, &tx_info); if (!err) { pcpu_stats = this_cpu_ptr(mlxsw_sx_port->pcpu_stats); From b2f10571b96414986f7293b06847d202f2d1d0ca Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Fri, 8 Apr 2016 19:11:23 +0200 Subject: [PATCH 0502/1649] mlxsw: Do not pass around driver_priv directly Instead of that, pass mlxsw_core and use a helper to get driver priv from driver code. Looks much cleaner that way. Signed-off-by: Jiri Pirko Reviewed-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/core.c | 19 +++++++++++-------- drivers/net/ethernet/mellanox/mlxsw/core.h | 11 +++++++---- .../net/ethernet/mellanox/mlxsw/spectrum.c | 17 +++++++++-------- .../net/ethernet/mellanox/mlxsw/switchx2.c | 8 ++++---- 4 files changed, 31 insertions(+), 24 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index 39161fb91ec1..3958195526d1 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -114,6 +114,12 @@ struct mlxsw_core { /* driver_priv has to be always the last item */ }; +void *mlxsw_core_driver_priv(struct mlxsw_core *mlxsw_core) +{ + return mlxsw_core->driver_priv; +} +EXPORT_SYMBOL(mlxsw_core_driver_priv); + struct mlxsw_rx_listener_item { struct list_head list; struct mlxsw_rx_listener rxl; @@ -795,8 +801,7 @@ static int mlxsw_devlink_port_split(struct devlink *devlink, return -EINVAL; if (!mlxsw_core->driver->port_split) return -EOPNOTSUPP; - return mlxsw_core->driver->port_split(mlxsw_core->driver_priv, - port_index, count); + return mlxsw_core->driver->port_split(mlxsw_core, port_index, count); } static int mlxsw_devlink_port_unsplit(struct devlink *devlink, @@ -808,8 +813,7 @@ static int mlxsw_devlink_port_unsplit(struct devlink *devlink, return -EINVAL; if (!mlxsw_core->driver->port_unsplit) return -EOPNOTSUPP; - return mlxsw_core->driver->port_unsplit(mlxsw_core->driver_priv, - port_index); + return mlxsw_core->driver->port_unsplit(mlxsw_core, port_index); } static const struct devlink_ops mlxsw_devlink_ops = { @@ -880,8 +884,7 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, if (err) goto err_devlink_register; - err = mlxsw_driver->init(mlxsw_core->driver_priv, mlxsw_core, - mlxsw_bus_info); + err = mlxsw_driver->init(mlxsw_core, mlxsw_bus_info); if (err) goto err_driver_init; @@ -892,7 +895,7 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, return 0; err_debugfs_init: - mlxsw_core->driver->fini(mlxsw_core->driver_priv); + mlxsw_core->driver->fini(mlxsw_core); err_driver_init: devlink_unregister(devlink); err_devlink_register: @@ -918,7 +921,7 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core) struct devlink *devlink = priv_to_devlink(mlxsw_core); mlxsw_core_debugfs_fini(mlxsw_core); - mlxsw_core->driver->fini(mlxsw_core->driver_priv); + mlxsw_core->driver->fini(mlxsw_core); devlink_unregister(devlink); mlxsw_emad_fini(mlxsw_core); mlxsw_core->bus->fini(mlxsw_core->bus_priv); diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h index 0454212a86d1..f3cebef9c31c 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core.h @@ -62,6 +62,8 @@ struct mlxsw_driver; struct mlxsw_bus; struct mlxsw_bus_info; +void *mlxsw_core_driver_priv(struct mlxsw_core *mlxsw_core); + int mlxsw_core_driver_register(struct mlxsw_driver *mlxsw_driver); void mlxsw_core_driver_unregister(struct mlxsw_driver *mlxsw_driver); @@ -192,11 +194,12 @@ struct mlxsw_driver { const char *kind; struct module *owner; size_t priv_size; - int (*init)(void *driver_priv, struct mlxsw_core *mlxsw_core, + int (*init)(struct mlxsw_core *mlxsw_core, const struct mlxsw_bus_info *mlxsw_bus_info); - void (*fini)(void *driver_priv); - int (*port_split)(void *driver_priv, u8 local_port, unsigned int count); - int (*port_unsplit)(void *driver_priv, u8 local_port); + void (*fini)(struct mlxsw_core *mlxsw_core); + int (*port_split)(struct mlxsw_core *mlxsw_core, u8 local_port, + unsigned int count); + int (*port_unsplit)(struct mlxsw_core *mlxsw_core, u8 local_port); void (*txhdr_construct)(struct sk_buff *skb, const struct mlxsw_tx_info *tx_info); u8 txhdr_len; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 8abe1a615c94..19b3c144abc6 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -1948,9 +1948,10 @@ static u8 mlxsw_sp_cluster_base_port_get(u8 local_port) return local_port - offset; } -static int mlxsw_sp_port_split(void *priv, u8 local_port, unsigned int count) +static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, + unsigned int count) { - struct mlxsw_sp *mlxsw_sp = priv; + struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); struct mlxsw_sp_port *mlxsw_sp_port; u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count; u8 module, cur_width, base_port; @@ -2022,9 +2023,9 @@ err_port_create: return err; } -static int mlxsw_sp_port_unsplit(void *priv, u8 local_port) +static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port) { - struct mlxsw_sp *mlxsw_sp = priv; + struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); struct mlxsw_sp_port *mlxsw_sp_port; u8 module, cur_width, base_port; unsigned int count; @@ -2369,10 +2370,10 @@ static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp) return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl); } -static int mlxsw_sp_init(void *priv, struct mlxsw_core *mlxsw_core, +static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, const struct mlxsw_bus_info *mlxsw_bus_info) { - struct mlxsw_sp *mlxsw_sp = priv; + struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); int err; mlxsw_sp->core = mlxsw_core; @@ -2443,9 +2444,9 @@ err_event_register: return err; } -static void mlxsw_sp_fini(void *priv) +static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) { - struct mlxsw_sp *mlxsw_sp = priv; + struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); mlxsw_sp_switchdev_fini(mlxsw_sp); mlxsw_sp_traps_fini(mlxsw_sp); diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c index 2518c84960a0..3842eab9449a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c +++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c @@ -1447,10 +1447,10 @@ static int mlxsw_sx_flood_init(struct mlxsw_sx *mlxsw_sx) return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sgcr), sgcr_pl); } -static int mlxsw_sx_init(void *priv, struct mlxsw_core *mlxsw_core, +static int mlxsw_sx_init(struct mlxsw_core *mlxsw_core, const struct mlxsw_bus_info *mlxsw_bus_info) { - struct mlxsw_sx *mlxsw_sx = priv; + struct mlxsw_sx *mlxsw_sx = mlxsw_core_driver_priv(mlxsw_core); int err; mlxsw_sx->core = mlxsw_core; @@ -1497,9 +1497,9 @@ err_event_register: return err; } -static void mlxsw_sx_fini(void *priv) +static void mlxsw_sx_fini(struct mlxsw_core *mlxsw_core) { - struct mlxsw_sx *mlxsw_sx = priv; + struct mlxsw_sx *mlxsw_sx = mlxsw_core_driver_priv(mlxsw_core); mlxsw_sx_traps_fini(mlxsw_sx); mlxsw_sx_event_unregister(mlxsw_sx, MLXSW_TRAP_ID_PUDE); From 497e8592c6d22772d0ad100c1f08e601dc417ed5 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Fri, 8 Apr 2016 19:11:24 +0200 Subject: [PATCH 0503/1649] mlxsw: reg: Share direction enum between SBPR, SBCM, SBPM Same field, same values, so share the same enum. Signed-off-by: Jiri Pirko Reviewed-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/reg.h | 23 ++++++------------- .../mellanox/mlxsw/spectrum_buffers.c | 20 ++++++++-------- 2 files changed, 17 insertions(+), 26 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 28f5b99e585a..19bdc826e3cd 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -3476,9 +3476,10 @@ static const struct mlxsw_reg_info mlxsw_reg_sbpr = { .len = MLXSW_REG_SBPR_LEN, }; -enum mlxsw_reg_sbpr_dir { - MLXSW_REG_SBPR_DIR_INGRESS, - MLXSW_REG_SBPR_DIR_EGRESS, +/* shared direstion enum for SBPR, SBCM, SBPM */ +enum mlxsw_reg_sbxx_dir { + MLXSW_REG_SBXX_DIR_INGRESS, + MLXSW_REG_SBXX_DIR_EGRESS, }; /* reg_sbpr_dir @@ -3511,7 +3512,7 @@ enum mlxsw_reg_sbpr_mode { MLXSW_ITEM32(reg, sbpr, mode, 0x08, 0, 4); static inline void mlxsw_reg_sbpr_pack(char *payload, u8 pool, - enum mlxsw_reg_sbpr_dir dir, + enum mlxsw_reg_sbxx_dir dir, enum mlxsw_reg_sbpr_mode mode, u32 size) { MLXSW_REG_ZERO(sbpr, payload); @@ -3553,11 +3554,6 @@ MLXSW_ITEM32(reg, sbcm, local_port, 0x00, 16, 8); */ MLXSW_ITEM32(reg, sbcm, pg_buff, 0x00, 8, 6); -enum mlxsw_reg_sbcm_dir { - MLXSW_REG_SBCM_DIR_INGRESS, - MLXSW_REG_SBCM_DIR_EGRESS, -}; - /* reg_sbcm_dir * Direction. * Access: Index @@ -3590,7 +3586,7 @@ MLXSW_ITEM32(reg, sbcm, max_buff, 0x1C, 0, 24); MLXSW_ITEM32(reg, sbcm, pool, 0x24, 0, 4); static inline void mlxsw_reg_sbcm_pack(char *payload, u8 local_port, u8 pg_buff, - enum mlxsw_reg_sbcm_dir dir, + enum mlxsw_reg_sbxx_dir dir, u32 min_buff, u32 max_buff, u8 pool) { MLXSW_REG_ZERO(sbcm, payload); @@ -3630,11 +3626,6 @@ MLXSW_ITEM32(reg, sbpm, local_port, 0x00, 16, 8); */ MLXSW_ITEM32(reg, sbpm, pool, 0x00, 8, 4); -enum mlxsw_reg_sbpm_dir { - MLXSW_REG_SBPM_DIR_INGRESS, - MLXSW_REG_SBPM_DIR_EGRESS, -}; - /* reg_sbpm_dir * Direction. * Access: Index @@ -3661,7 +3652,7 @@ MLXSW_ITEM32(reg, sbpm, min_buff, 0x18, 0, 24); MLXSW_ITEM32(reg, sbpm, max_buff, 0x1C, 0, 24); static inline void mlxsw_reg_sbpm_pack(char *payload, u8 local_port, u8 pool, - enum mlxsw_reg_sbpm_dir dir, + enum mlxsw_reg_sbxx_dir dir, u32 min_buff, u32 max_buff) { MLXSW_REG_ZERO(sbpm, payload); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c index 97c8d537be5b..f58b1d3a619a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c @@ -110,7 +110,7 @@ static int mlxsw_sp_port_headroom_init(struct mlxsw_sp_port *mlxsw_sp_port) struct mlxsw_sp_sb_pool { u8 pool; - enum mlxsw_reg_sbpr_dir dir; + enum mlxsw_reg_sbxx_dir dir; enum mlxsw_reg_sbpr_mode mode; u32 size; }; @@ -129,11 +129,11 @@ struct mlxsw_sp_sb_pool { } #define MLXSW_SP_SB_POOL_INGRESS(_pool, _size) \ - MLXSW_SP_SB_POOL(_pool, MLXSW_REG_SBPR_DIR_INGRESS, \ + MLXSW_SP_SB_POOL(_pool, MLXSW_REG_SBXX_DIR_INGRESS, \ MLXSW_REG_SBPR_MODE_DYNAMIC, _size) #define MLXSW_SP_SB_POOL_EGRESS(_pool, _size) \ - MLXSW_SP_SB_POOL(_pool, MLXSW_REG_SBPR_DIR_EGRESS, \ + MLXSW_SP_SB_POOL(_pool, MLXSW_REG_SBXX_DIR_EGRESS, \ MLXSW_REG_SBPR_MODE_DYNAMIC, _size) static const struct mlxsw_sp_sb_pool mlxsw_sp_sb_pools[] = { @@ -173,7 +173,7 @@ struct mlxsw_sp_sb_cm { u8 pg; u8 tc; } u; - enum mlxsw_reg_sbcm_dir dir; + enum mlxsw_reg_sbxx_dir dir; u32 min_buff; u32 max_buff; u8 pool; @@ -189,15 +189,15 @@ struct mlxsw_sp_sb_cm { } #define MLXSW_SP_SB_CM_INGRESS(_pg, _min_buff, _max_buff) \ - MLXSW_SP_SB_CM(_pg, MLXSW_REG_SBCM_DIR_INGRESS, \ + MLXSW_SP_SB_CM(_pg, MLXSW_REG_SBXX_DIR_INGRESS, \ _min_buff, _max_buff, 0) #define MLXSW_SP_SB_CM_EGRESS(_tc, _min_buff, _max_buff) \ - MLXSW_SP_SB_CM(_tc, MLXSW_REG_SBCM_DIR_EGRESS, \ + MLXSW_SP_SB_CM(_tc, MLXSW_REG_SBXX_DIR_EGRESS, \ _min_buff, _max_buff, 0) #define MLXSW_SP_CPU_PORT_SB_CM_EGRESS(_tc) \ - MLXSW_SP_SB_CM(_tc, MLXSW_REG_SBCM_DIR_EGRESS, 104, 2, 3) + MLXSW_SP_SB_CM(_tc, MLXSW_REG_SBXX_DIR_EGRESS, 104, 2, 3) static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms[] = { MLXSW_SP_SB_CM_INGRESS(0, MLXSW_SP_BYTES_TO_CELLS(10000), 8), @@ -304,7 +304,7 @@ static int mlxsw_sp_cpu_port_sb_cms_init(struct mlxsw_sp *mlxsw_sp) struct mlxsw_sp_sb_pm { u8 pool; - enum mlxsw_reg_sbpm_dir dir; + enum mlxsw_reg_sbxx_dir dir; u32 min_buff; u32 max_buff; }; @@ -318,11 +318,11 @@ struct mlxsw_sp_sb_pm { } #define MLXSW_SP_SB_PM_INGRESS(_pool, _min_buff, _max_buff) \ - MLXSW_SP_SB_PM(_pool, MLXSW_REG_SBPM_DIR_INGRESS, \ + MLXSW_SP_SB_PM(_pool, MLXSW_REG_SBXX_DIR_INGRESS, \ _min_buff, _max_buff) #define MLXSW_SP_SB_PM_EGRESS(_pool, _min_buff, _max_buff) \ - MLXSW_SP_SB_PM(_pool, MLXSW_REG_SBPM_DIR_EGRESS, \ + MLXSW_SP_SB_PM(_pool, MLXSW_REG_SBXX_DIR_EGRESS, \ _min_buff, _max_buff) static const struct mlxsw_sp_sb_pm mlxsw_sp_sb_pms[] = { From 9efc8f655c8488c6ee2f7d5034826880bf5b4bba Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Fri, 8 Apr 2016 19:11:25 +0200 Subject: [PATCH 0504/1649] mlxsw: reg: Fix SBPM register name Fix copy&paste error and state the name of SBPM register correctly. Signed-off-by: Jiri Pirko Reviewed-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/reg.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 19bdc826e3cd..57e4a6337ae3 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -3598,8 +3598,8 @@ static inline void mlxsw_reg_sbcm_pack(char *payload, u8 local_port, u8 pg_buff, mlxsw_reg_sbcm_pool_set(payload, pool); } -/* SBPM - Shared Buffer Class Management Register - * ---------------------------------------------- +/* SBPM - Shared Buffer Port Management Register + * --------------------------------------------- * The SBPM register configures and retrieves the shared buffer allocation * and configuration according to Port-Pool, including the definition * of the associated quota. From 1fc2257e837f86c2688fdcc5c8810b73c133794d Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Fri, 8 Apr 2016 19:12:48 +0200 Subject: [PATCH 0505/1649] devlink: share user_ptr pointer for both devlink and devlink_port Ptr to devlink structure can be easily obtained from devlink_port->devlink. So share user_ptr[0] pointer for both and leave user_ptr[1] free for other users. Signed-off-by: Jiri Pirko Reviewed-by: Ido Schimmel Signed-off-by: David S. Miller --- net/core/devlink.c | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/net/core/devlink.c b/net/core/devlink.c index 44f880d3b816..b84cf0df4a0e 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -119,7 +119,8 @@ static struct devlink_port *devlink_port_get_from_info(struct devlink *devlink, return devlink_port_get_from_attrs(devlink, info->attrs); } -#define DEVLINK_NL_FLAG_NEED_PORT BIT(0) +#define DEVLINK_NL_FLAG_NEED_DEVLINK BIT(0) +#define DEVLINK_NL_FLAG_NEED_PORT BIT(1) static int devlink_nl_pre_doit(const struct genl_ops *ops, struct sk_buff *skb, struct genl_info *info) @@ -132,8 +133,9 @@ static int devlink_nl_pre_doit(const struct genl_ops *ops, mutex_unlock(&devlink_mutex); return PTR_ERR(devlink); } - info->user_ptr[0] = devlink; - if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_PORT) { + if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_DEVLINK) { + info->user_ptr[0] = devlink; + } else if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_PORT) { struct devlink_port *devlink_port; mutex_lock(&devlink_port_mutex); @@ -143,7 +145,7 @@ static int devlink_nl_pre_doit(const struct genl_ops *ops, mutex_unlock(&devlink_mutex); return PTR_ERR(devlink_port); } - info->user_ptr[1] = devlink_port; + info->user_ptr[0] = devlink_port; } return 0; } @@ -356,8 +358,8 @@ out: static int devlink_nl_cmd_port_get_doit(struct sk_buff *skb, struct genl_info *info) { - struct devlink *devlink = info->user_ptr[0]; - struct devlink_port *devlink_port = info->user_ptr[1]; + struct devlink_port *devlink_port = info->user_ptr[0]; + struct devlink *devlink = devlink_port->devlink; struct sk_buff *msg; int err; @@ -436,8 +438,8 @@ static int devlink_port_type_set(struct devlink *devlink, static int devlink_nl_cmd_port_set_doit(struct sk_buff *skb, struct genl_info *info) { - struct devlink *devlink = info->user_ptr[0]; - struct devlink_port *devlink_port = info->user_ptr[1]; + struct devlink_port *devlink_port = info->user_ptr[0]; + struct devlink *devlink = devlink_port->devlink; int err; if (info->attrs[DEVLINK_ATTR_PORT_TYPE]) { @@ -511,6 +513,7 @@ static const struct genl_ops devlink_nl_ops[] = { .doit = devlink_nl_cmd_get_doit, .dumpit = devlink_nl_cmd_get_dumpit, .policy = devlink_nl_policy, + .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK, /* can be retrieved by unprivileged users */ }, { @@ -533,12 +536,14 @@ static const struct genl_ops devlink_nl_ops[] = { .doit = devlink_nl_cmd_port_split_doit, .policy = devlink_nl_policy, .flags = GENL_ADMIN_PERM, + .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK, }, { .cmd = DEVLINK_CMD_PORT_UNSPLIT, .doit = devlink_nl_cmd_port_unsplit_doit, .policy = devlink_nl_policy, .flags = GENL_ADMIN_PERM, + .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK, }, }; From 07016151a446d25397b24588df4ed5cf777a69bb Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Tue, 5 Apr 2016 22:33:17 +0200 Subject: [PATCH 0506/1649] bpf, verifier: further improve search pruning The verifier needs to go through every path of the program in order to check that it terminates safely, which can be quite a lot of instructions that need to be processed f.e. in cases with more branchy programs. With search pruning from f1bca824dabb ("bpf: add search pruning optimization to verifier") the search space can already be reduced significantly when the verifier detects that a previously walked path with same register and stack contents terminated already (see verifier's states_equal()), so the search can skip walking those states. When working with larger programs of > ~2000 (out of max 4096) insns, we found that the current limit of 32k instructions is easily hit. For example, a case we ran into is that the search space cannot be pruned due to branches at the beginning of the program that make use of certain stack space slots (STACK_MISC), which are never used in the remaining program (STACK_INVALID). Therefore, the verifier needs to walk paths for the slots in STACK_INVALID state, but also all remaining paths with a stack structure, where the slots are in STACK_MISC, which can nearly double the search space needed. After various experiments, we find that a limit of 64k processed insns is a more reasonable choice when dealing with larger programs in practice. This still allows to reject extreme crafted cases that can have a much higher complexity (f.e. > ~300k) within the 4096 insns limit due to search pruning not being able to take effect. Furthermore, we found that a lot of states can be pruned after a call instruction, f.e. we were able to reduce the search state by ~35% in some cases with this heuristic, trade-off is to keep a bit more states in env->explored_states. Usually, call instructions have a number of preceding register assignments and/or stack stores, where search pruning has a better chance to suceed in states_equal() test. The current code marks the branch targets with STATE_LIST_MARK in case of conditional jumps, and the next (t + 1) instruction in case of unconditional jump so that f.e. a backjump will walk it. We also did experiments with using t + insns[t].off + 1 as a marker in the unconditionally jump case instead of t + 1 with the rationale that these two branches of execution that converge after the label might have more potential of pruning. We found that it was a bit better, but not necessarily significantly better than the current state, perhaps also due to clang not generating back jumps often. Hence, we left that as is for now. Signed-off-by: Daniel Borkmann Acked-by: Alexei Starovoitov Signed-off-by: David S. Miller --- kernel/bpf/verifier.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 58792fed5678..8233021538d3 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -202,6 +202,9 @@ struct verifier_env { bool allow_ptr_leaks; }; +#define BPF_COMPLEXITY_LIMIT_INSNS 65536 +#define BPF_COMPLEXITY_LIMIT_STACK 1024 + /* verbose verifier prints what it's seeing * bpf_check() is called under lock, so no race to access these global vars */ @@ -454,7 +457,7 @@ static struct verifier_state *push_stack(struct verifier_env *env, int insn_idx, elem->next = env->head; env->head = elem; env->stack_size++; - if (env->stack_size > 1024) { + if (env->stack_size > BPF_COMPLEXITY_LIMIT_STACK) { verbose("BPF program is too complex\n"); goto err; } @@ -1543,6 +1546,8 @@ peek_stack: goto peek_stack; else if (ret < 0) goto err_free; + if (t + 1 < insn_cnt) + env->explored_states[t + 1] = STATE_LIST_MARK; } else if (opcode == BPF_JA) { if (BPF_SRC(insns[t].code) != BPF_K) { ret = -EINVAL; @@ -1747,7 +1752,7 @@ static int do_check(struct verifier_env *env) insn = &insns[insn_idx]; class = BPF_CLASS(insn->code); - if (++insn_processed > 32768) { + if (++insn_processed > BPF_COMPLEXITY_LIMIT_INSNS) { verbose("BPF program is too large. Proccessed %d insn\n", insn_processed); return -E2BIG; From f453939c1a4a758312f799748b344bacd1db701f Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Wed, 6 Apr 2016 11:06:20 -0400 Subject: [PATCH 0507/1649] net: dsa: document missing functions Add description for the missing port_vlan_prepare, port_fdb_prepare, port_fdb_dump functions in the DSA documentation. Signed-off-by: Vivien Didelot Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- Documentation/networking/dsa/dsa.txt | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/Documentation/networking/dsa/dsa.txt b/Documentation/networking/dsa/dsa.txt index 3b196c304b73..013b67066b82 100644 --- a/Documentation/networking/dsa/dsa.txt +++ b/Documentation/networking/dsa/dsa.txt @@ -542,6 +542,12 @@ Bridge layer Bridge VLAN filtering --------------------- +- port_vlan_prepare: bridge layer function invoked when the bridge prepares the + configuration of a VLAN on the given port. If the operation is not supported + by the hardware, this function should return -EOPNOTSUPP to inform the bridge + code to fallback to a software implementation. No hardware setup must be done + in this function. See port_vlan_add for this and details. + - port_vlan_add: bridge layer function invoked when a VLAN is configured (tagged or untagged) for the given switch port @@ -552,6 +558,12 @@ Bridge VLAN filtering function that the driver has to call for each VLAN the given port is a member of. A switchdev object is used to carry the VID and bridge flags. +- port_fdb_prepare: bridge layer function invoked when the bridge prepares the + installation of a Forwarding Database entry. If the operation is not + supported, this function should return -EOPNOTSUPP to inform the bridge code + to fallback to a software implementation. No hardware setup must be done in + this function. See port_fdb_add for this and details. + - port_fdb_add: bridge layer function invoked when the bridge wants to install a Forwarding Database entry, the switch hardware should be programmed with the specified address in the specified VLAN Id in the forwarding database @@ -565,6 +577,10 @@ of DSA, would be the its port-based VLAN, used by the associated bridge device. the specified MAC address from the specified VLAN ID if it was mapped into this port forwarding database +- port_fdb_dump: bridge layer function invoked with a switchdev callback + function that the driver has to call for each MAC address known to be behind + the given port. A switchdev object is used to carry the VID and FDB info. + TODO ==== From 43c44a9f655170fb92536167b95b1c6ae8b732cb Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Wed, 6 Apr 2016 11:55:03 -0400 Subject: [PATCH 0508/1649] net: dsa: make the STP state function return void The DSA layer doesn't care about the return code of the port_stp_update routine, so make it void in the layer and the DSA drivers. Replace the useless dsa_slave_stp_update function with a dsa_slave_stp_state function used to reply to the switchdev SWITCHDEV_ATTR_ID_PORT_STP_STATE attribute. In the meantime, rename port_stp_update to port_stp_state_set to explicit the state change. Signed-off-by: Vivien Didelot Signed-off-by: David S. Miller --- Documentation/networking/dsa/dsa.txt | 2 +- drivers/net/dsa/bcm_sf2.c | 16 ++++++-------- drivers/net/dsa/mv88e6171.c | 2 +- drivers/net/dsa/mv88e6352.c | 2 +- drivers/net/dsa/mv88e6xxx.c | 6 ++---- drivers/net/dsa/mv88e6xxx.h | 2 +- include/net/dsa.h | 4 ++-- net/dsa/slave.c | 32 +++++++++++++--------------- 8 files changed, 29 insertions(+), 37 deletions(-) diff --git a/Documentation/networking/dsa/dsa.txt b/Documentation/networking/dsa/dsa.txt index 013b67066b82..ba698c56919d 100644 --- a/Documentation/networking/dsa/dsa.txt +++ b/Documentation/networking/dsa/dsa.txt @@ -533,7 +533,7 @@ Bridge layer out at the switch hardware for the switch to (re) learn MAC addresses behind this port. -- port_stp_update: bridge layer function invoked when a given switch port STP +- port_stp_state_set: bridge layer function invoked when a given switch port STP state is computed by the bridge layer and should be propagated to switch hardware to forward/block/learn traffic. The switch driver is responsible for computing a STP state change based on current and asked parameters and perform diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index 95944d5e3e22..2bba1d938694 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -545,12 +545,11 @@ static void bcm_sf2_sw_br_leave(struct dsa_switch *ds, int port) priv->port_sts[port].bridge_dev = NULL; } -static int bcm_sf2_sw_br_set_stp_state(struct dsa_switch *ds, int port, - u8 state) +static void bcm_sf2_sw_br_set_stp_state(struct dsa_switch *ds, int port, + u8 state) { struct bcm_sf2_priv *priv = ds_to_priv(ds); u8 hw_state, cur_hw_state; - int ret = 0; u32 reg; reg = core_readl(priv, CORE_G_PCTL_PORT(port)); @@ -574,7 +573,7 @@ static int bcm_sf2_sw_br_set_stp_state(struct dsa_switch *ds, int port, break; default: pr_err("%s: invalid STP state: %d\n", __func__, state); - return -EINVAL; + return; } /* Fast-age ARL entries if we are moving a port from Learning or @@ -584,10 +583,9 @@ static int bcm_sf2_sw_br_set_stp_state(struct dsa_switch *ds, int port, if (cur_hw_state != hw_state) { if (cur_hw_state >= G_MISTP_LEARN_STATE && hw_state <= G_MISTP_LISTEN_STATE) { - ret = bcm_sf2_sw_fast_age_port(ds, port); - if (ret) { + if (bcm_sf2_sw_fast_age_port(ds, port)) { pr_err("%s: fast-ageing failed\n", __func__); - return ret; + return; } } } @@ -596,8 +594,6 @@ static int bcm_sf2_sw_br_set_stp_state(struct dsa_switch *ds, int port, reg &= ~(G_MISTP_STATE_MASK << G_MISTP_STATE_SHIFT); reg |= hw_state; core_writel(priv, reg, CORE_G_PCTL_PORT(port)); - - return 0; } /* Address Resolution Logic routines */ @@ -1387,7 +1383,7 @@ static struct dsa_switch_driver bcm_sf2_switch_driver = { .set_eee = bcm_sf2_sw_set_eee, .port_bridge_join = bcm_sf2_sw_br_join, .port_bridge_leave = bcm_sf2_sw_br_leave, - .port_stp_update = bcm_sf2_sw_br_set_stp_state, + .port_stp_state_set = bcm_sf2_sw_br_set_stp_state, .port_fdb_prepare = bcm_sf2_sw_fdb_prepare, .port_fdb_add = bcm_sf2_sw_fdb_add, .port_fdb_del = bcm_sf2_sw_fdb_del, diff --git a/drivers/net/dsa/mv88e6171.c b/drivers/net/dsa/mv88e6171.c index c0164b98fc08..0e62f3b5bc81 100644 --- a/drivers/net/dsa/mv88e6171.c +++ b/drivers/net/dsa/mv88e6171.c @@ -105,7 +105,7 @@ struct dsa_switch_driver mv88e6171_switch_driver = { .get_regs = mv88e6xxx_get_regs, .port_bridge_join = mv88e6xxx_port_bridge_join, .port_bridge_leave = mv88e6xxx_port_bridge_leave, - .port_stp_update = mv88e6xxx_port_stp_update, + .port_stp_state_set = mv88e6xxx_port_stp_state_set, .port_vlan_filtering = mv88e6xxx_port_vlan_filtering, .port_vlan_prepare = mv88e6xxx_port_vlan_prepare, .port_vlan_add = mv88e6xxx_port_vlan_add, diff --git a/drivers/net/dsa/mv88e6352.c b/drivers/net/dsa/mv88e6352.c index 5f528abc8af1..7f452e4a04a5 100644 --- a/drivers/net/dsa/mv88e6352.c +++ b/drivers/net/dsa/mv88e6352.c @@ -326,7 +326,7 @@ struct dsa_switch_driver mv88e6352_switch_driver = { .get_regs = mv88e6xxx_get_regs, .port_bridge_join = mv88e6xxx_port_bridge_join, .port_bridge_leave = mv88e6xxx_port_bridge_leave, - .port_stp_update = mv88e6xxx_port_stp_update, + .port_stp_state_set = mv88e6xxx_port_stp_state_set, .port_vlan_filtering = mv88e6xxx_port_vlan_filtering, .port_vlan_prepare = mv88e6xxx_port_vlan_prepare, .port_vlan_add = mv88e6xxx_port_vlan_add, diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index 0dda2817d0ec..53c545cbb779 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c @@ -1193,7 +1193,7 @@ static int _mv88e6xxx_port_based_vlan_map(struct dsa_switch *ds, int port) return _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_BASE_VLAN, reg); } -int mv88e6xxx_port_stp_update(struct dsa_switch *ds, int port, u8 state) +void mv88e6xxx_port_stp_state_set(struct dsa_switch *ds, int port, u8 state) { struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); int stp_state; @@ -1215,14 +1215,12 @@ int mv88e6xxx_port_stp_update(struct dsa_switch *ds, int port, u8 state) break; } - /* mv88e6xxx_port_stp_update may be called with softirqs disabled, + /* mv88e6xxx_port_stp_state_set may be called with softirqs disabled, * so we can not update the port state directly but need to schedule it. */ ps->ports[port].state = stp_state; set_bit(port, ps->port_state_update_mask); schedule_work(&ps->bridge_work); - - return 0; } static int _mv88e6xxx_port_pvid(struct dsa_switch *ds, int port, u16 *new, diff --git a/drivers/net/dsa/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx.h index 26a424acd10f..49448553c44b 100644 --- a/drivers/net/dsa/mv88e6xxx.h +++ b/drivers/net/dsa/mv88e6xxx.h @@ -497,7 +497,7 @@ int mv88e6xxx_set_eee(struct dsa_switch *ds, int port, int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port, struct net_device *bridge); void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port); -int mv88e6xxx_port_stp_update(struct dsa_switch *ds, int port, u8 state); +void mv88e6xxx_port_stp_state_set(struct dsa_switch *ds, int port, u8 state); int mv88e6xxx_port_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering); int mv88e6xxx_port_vlan_prepare(struct dsa_switch *ds, int port, diff --git a/include/net/dsa.h b/include/net/dsa.h index 6463bb2863ac..2123981fd94a 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -299,8 +299,8 @@ struct dsa_switch_driver { int (*port_bridge_join)(struct dsa_switch *ds, int port, struct net_device *bridge); void (*port_bridge_leave)(struct dsa_switch *ds, int port); - int (*port_stp_update)(struct dsa_switch *ds, int port, - u8 state); + void (*port_stp_state_set)(struct dsa_switch *ds, int port, + u8 state); /* * VLAN support diff --git a/net/dsa/slave.c b/net/dsa/slave.c index a575f0350d5a..088215c3642f 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -104,8 +104,8 @@ static int dsa_slave_open(struct net_device *dev) goto clear_promisc; } - if (ds->drv->port_stp_update) - ds->drv->port_stp_update(ds, p->port, stp_state); + if (ds->drv->port_stp_state_set) + ds->drv->port_stp_state_set(ds, p->port, stp_state); if (p->phy) phy_start(p->phy); @@ -147,8 +147,8 @@ static int dsa_slave_close(struct net_device *dev) if (ds->drv->port_disable) ds->drv->port_disable(ds, p->port, p->phy); - if (ds->drv->port_stp_update) - ds->drv->port_stp_update(ds, p->port, BR_STATE_DISABLED); + if (ds->drv->port_stp_state_set) + ds->drv->port_stp_state_set(ds, p->port, BR_STATE_DISABLED); return 0; } @@ -305,16 +305,19 @@ static int dsa_slave_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) return -EOPNOTSUPP; } -static int dsa_slave_stp_update(struct net_device *dev, u8 state) +static int dsa_slave_stp_state_set(struct net_device *dev, + const struct switchdev_attr *attr, + struct switchdev_trans *trans) { struct dsa_slave_priv *p = netdev_priv(dev); struct dsa_switch *ds = p->parent; - int ret = -EOPNOTSUPP; - if (ds->drv->port_stp_update) - ret = ds->drv->port_stp_update(ds, p->port, state); + if (switchdev_trans_ph_prepare(trans)) + return ds->drv->port_stp_state_set ? 0 : -EOPNOTSUPP; - return ret; + ds->drv->port_stp_state_set(ds, p->port, attr->u.stp_state); + + return 0; } static int dsa_slave_vlan_filtering(struct net_device *dev, @@ -339,17 +342,11 @@ static int dsa_slave_port_attr_set(struct net_device *dev, const struct switchdev_attr *attr, struct switchdev_trans *trans) { - struct dsa_slave_priv *p = netdev_priv(dev); - struct dsa_switch *ds = p->parent; int ret; switch (attr->id) { case SWITCHDEV_ATTR_ID_PORT_STP_STATE: - if (switchdev_trans_ph_prepare(trans)) - ret = ds->drv->port_stp_update ? 0 : -EOPNOTSUPP; - else - ret = ds->drv->port_stp_update(ds, p->port, - attr->u.stp_state); + ret = dsa_slave_stp_state_set(dev, attr, trans); break; case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING: ret = dsa_slave_vlan_filtering(dev, attr, trans); @@ -468,7 +465,8 @@ static void dsa_slave_bridge_port_leave(struct net_device *dev) /* Port left the bridge, put in BR_STATE_DISABLED by the bridge layer, * so allow it to be in BR_STATE_FORWARDING to be kept functional */ - dsa_slave_stp_update(dev, BR_STATE_FORWARDING); + if (ds->drv->port_stp_state_set) + ds->drv->port_stp_state_set(ds, p->port, BR_STATE_FORWARDING); } static int dsa_slave_port_attr_get(struct net_device *dev, From 8497aa618dd605b084fae86e676ea23ca85558b5 Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Wed, 6 Apr 2016 11:55:04 -0400 Subject: [PATCH 0509/1649] net: dsa: make the FDB add function return void The switchdev design implies that a software error should not happen in the commit phase since it must have been previously reported in the prepare phase. If an hardware error occurs during the commit phase, there is nothing switchdev can do about it. The DSA layer separates port_fdb_prepare and port_fdb_add for simplicity and convenience. If an hardware error occurs during the commit phase, there is no need to report it outside the DSA driver itself. Make the DSA port_fdb_add routine return void for explicitness. Signed-off-by: Vivien Didelot Signed-off-by: David S. Miller --- drivers/net/dsa/bcm_sf2.c | 9 +++++---- drivers/net/dsa/mv88e6xxx.c | 12 +++++------- drivers/net/dsa/mv88e6xxx.h | 6 +++--- include/net/dsa.h | 2 +- net/dsa/slave.c | 16 ++++++++-------- 5 files changed, 22 insertions(+), 23 deletions(-) diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index 2bba1d938694..780f22876538 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -724,13 +724,14 @@ static int bcm_sf2_sw_fdb_prepare(struct dsa_switch *ds, int port, return 0; } -static int bcm_sf2_sw_fdb_add(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_fdb *fdb, - struct switchdev_trans *trans) +static void bcm_sf2_sw_fdb_add(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_fdb *fdb, + struct switchdev_trans *trans) { struct bcm_sf2_priv *priv = ds_to_priv(ds); - return bcm_sf2_arl_op(priv, 0, port, fdb->addr, fdb->vid, true); + if (bcm_sf2_arl_op(priv, 0, port, fdb->addr, fdb->vid, true)) + pr_err("%s: failed to add MAC address\n", __func__); } static int bcm_sf2_sw_fdb_del(struct dsa_switch *ds, int port, diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index 53c545cbb779..ef36bf6d6cdd 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c @@ -2090,21 +2090,19 @@ int mv88e6xxx_port_fdb_prepare(struct dsa_switch *ds, int port, return 0; } -int mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_fdb *fdb, - struct switchdev_trans *trans) +void mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_fdb *fdb, + struct switchdev_trans *trans) { int state = is_multicast_ether_addr(fdb->addr) ? GLOBAL_ATU_DATA_STATE_MC_STATIC : GLOBAL_ATU_DATA_STATE_UC_STATIC; struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); - int ret; mutex_lock(&ps->smi_mutex); - ret = _mv88e6xxx_port_fdb_load(ds, port, fdb->addr, fdb->vid, state); + if (_mv88e6xxx_port_fdb_load(ds, port, fdb->addr, fdb->vid, state)) + netdev_err(ds->ports[port], "failed to load MAC address\n"); mutex_unlock(&ps->smi_mutex); - - return ret; } int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port, diff --git a/drivers/net/dsa/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx.h index 49448553c44b..a7dccbe229f2 100644 --- a/drivers/net/dsa/mv88e6xxx.h +++ b/drivers/net/dsa/mv88e6xxx.h @@ -514,9 +514,9 @@ int mv88e6xxx_port_vlan_dump(struct dsa_switch *ds, int port, int mv88e6xxx_port_fdb_prepare(struct dsa_switch *ds, int port, const struct switchdev_obj_port_fdb *fdb, struct switchdev_trans *trans); -int mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_fdb *fdb, - struct switchdev_trans *trans); +void mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_fdb *fdb, + struct switchdev_trans *trans); int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port, const struct switchdev_obj_port_fdb *fdb); int mv88e6xxx_port_fdb_dump(struct dsa_switch *ds, int port, diff --git a/include/net/dsa.h b/include/net/dsa.h index 2123981fd94a..f1670a4daaeb 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -325,7 +325,7 @@ struct dsa_switch_driver { int (*port_fdb_prepare)(struct dsa_switch *ds, int port, const struct switchdev_obj_port_fdb *fdb, struct switchdev_trans *trans); - int (*port_fdb_add)(struct dsa_switch *ds, int port, + void (*port_fdb_add)(struct dsa_switch *ds, int port, const struct switchdev_obj_port_fdb *fdb, struct switchdev_trans *trans); int (*port_fdb_del)(struct dsa_switch *ds, int port, diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 088215c3642f..90bc7442c44f 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -256,17 +256,17 @@ static int dsa_slave_port_fdb_add(struct net_device *dev, { struct dsa_slave_priv *p = netdev_priv(dev); struct dsa_switch *ds = p->parent; - int ret; - if (!ds->drv->port_fdb_prepare || !ds->drv->port_fdb_add) - return -EOPNOTSUPP; + if (switchdev_trans_ph_prepare(trans)) { + if (!ds->drv->port_fdb_prepare || !ds->drv->port_fdb_add) + return -EOPNOTSUPP; - if (switchdev_trans_ph_prepare(trans)) - ret = ds->drv->port_fdb_prepare(ds, p->port, fdb, trans); - else - ret = ds->drv->port_fdb_add(ds, p->port, fdb, trans); + return ds->drv->port_fdb_prepare(ds, p->port, fdb, trans); + } - return ret; + ds->drv->port_fdb_add(ds, p->port, fdb, trans); + + return 0; } static int dsa_slave_port_fdb_del(struct net_device *dev, From 4d5770b39710180644f655b2c6cb0c880d108c63 Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Wed, 6 Apr 2016 11:55:05 -0400 Subject: [PATCH 0510/1649] net: dsa: make the VLAN add function return void The switchdev design implies that a software error should not happen in the commit phase since it must have been previously reported in the prepare phase. If an hardware error occurs during the commit phase, there is nothing switchdev can do about it. The DSA layer separates port_vlan_prepare and port_vlan_add for simplicity and convenience. If an hardware error occurs during the commit phase, there is no need to report it outside the driver itself. Make the DSA port_vlan_add routine return void for explicitness. Signed-off-by: Vivien Didelot Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx.c | 26 +++++++++++--------------- drivers/net/dsa/mv88e6xxx.h | 6 +++--- include/net/dsa.h | 2 +- net/dsa/slave.c | 11 +++-------- 4 files changed, 18 insertions(+), 27 deletions(-) diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index ef36bf6d6cdd..62320fca6712 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c @@ -1908,31 +1908,27 @@ static int _mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port, u16 vid, return _mv88e6xxx_vtu_loadpurge(ds, &vlan); } -int mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_vlan *vlan, - struct switchdev_trans *trans) +void mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan, + struct switchdev_trans *trans) { struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; u16 vid; - int err = 0; mutex_lock(&ps->smi_mutex); - for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) { - err = _mv88e6xxx_port_vlan_add(ds, port, vid, untagged); - if (err) - goto unlock; - } + for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) + if (_mv88e6xxx_port_vlan_add(ds, port, vid, untagged)) + netdev_err(ds->ports[port], "failed to add VLAN %d%c\n", + vid, untagged ? 'u' : 't'); + + if (pvid && _mv88e6xxx_port_pvid_set(ds, port, vlan->vid_end)) + netdev_err(ds->ports[port], "failed to set PVID %d\n", + vlan->vid_end); - /* no PVID with ranges, otherwise it's a bug */ - if (pvid) - err = _mv88e6xxx_port_pvid_set(ds, port, vlan->vid_end); -unlock: mutex_unlock(&ps->smi_mutex); - - return err; } static int _mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port, u16 vid) diff --git a/drivers/net/dsa/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx.h index a7dccbe229f2..236bcaa606e7 100644 --- a/drivers/net/dsa/mv88e6xxx.h +++ b/drivers/net/dsa/mv88e6xxx.h @@ -503,9 +503,9 @@ int mv88e6xxx_port_vlan_filtering(struct dsa_switch *ds, int port, int mv88e6xxx_port_vlan_prepare(struct dsa_switch *ds, int port, const struct switchdev_obj_port_vlan *vlan, struct switchdev_trans *trans); -int mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_vlan *vlan, - struct switchdev_trans *trans); +void mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan, + struct switchdev_trans *trans); int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port, const struct switchdev_obj_port_vlan *vlan); int mv88e6xxx_port_vlan_dump(struct dsa_switch *ds, int port, diff --git a/include/net/dsa.h b/include/net/dsa.h index f1670a4daaeb..18d1be3ad62d 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -310,7 +310,7 @@ struct dsa_switch_driver { int (*port_vlan_prepare)(struct dsa_switch *ds, int port, const struct switchdev_obj_port_vlan *vlan, struct switchdev_trans *trans); - int (*port_vlan_add)(struct dsa_switch *ds, int port, + void (*port_vlan_add)(struct dsa_switch *ds, int port, const struct switchdev_obj_port_vlan *vlan, struct switchdev_trans *trans); int (*port_vlan_del)(struct dsa_switch *ds, int port, diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 90bc7442c44f..2dae0d064359 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -207,21 +207,16 @@ static int dsa_slave_port_vlan_add(struct net_device *dev, { struct dsa_slave_priv *p = netdev_priv(dev); struct dsa_switch *ds = p->parent; - int err; if (switchdev_trans_ph_prepare(trans)) { if (!ds->drv->port_vlan_prepare || !ds->drv->port_vlan_add) return -EOPNOTSUPP; - err = ds->drv->port_vlan_prepare(ds, p->port, vlan, trans); - if (err) - return err; - } else { - err = ds->drv->port_vlan_add(ds, p->port, vlan, trans); - if (err) - return err; + return ds->drv->port_vlan_prepare(ds, p->port, vlan, trans); } + ds->drv->port_vlan_add(ds, p->port, vlan, trans); + return 0; } From 8805eea2494a2837983bc4aaaf6842c89666ec25 Mon Sep 17 00:00:00 2001 From: Maxim Zhukov Date: Fri, 8 Apr 2016 23:54:51 +0300 Subject: [PATCH 0511/1649] Bluetooth: hci_bcsp: fix code style This commit fixed: trailing "*/" trailing spaces mixed indent space between ~ and ( Signed-off-by: Maxim Zhukov Signed-off-by: Marcel Holtmann --- drivers/bluetooth/hci_bcsp.c | 57 ++++++++++++++++++++---------------- 1 file changed, 31 insertions(+), 26 deletions(-) diff --git a/drivers/bluetooth/hci_bcsp.c b/drivers/bluetooth/hci_bcsp.c index 064f2fefad62..d7d23ceba4d1 100644 --- a/drivers/bluetooth/hci_bcsp.c +++ b/drivers/bluetooth/hci_bcsp.c @@ -102,13 +102,12 @@ static const u16 crc_table[] = { /* Initialise the crc calculator */ #define BCSP_CRC_INIT(x) x = 0xffff -/* - Update crc with next data byte - - Implementation note - The data byte is treated as two nibbles. The crc is generated - in reverse, i.e., bits are fed into the register from the top. -*/ +/* Update crc with next data byte + * + * Implementation note + * The data byte is treated as two nibbles. The crc is generated + * in reverse, i.e., bits are fed into the register from the top. + */ static void bcsp_crc_update(u16 *crc, u8 d) { u16 reg = *crc; @@ -223,9 +222,10 @@ static struct sk_buff *bcsp_prepare_pkt(struct bcsp_struct *bcsp, u8 *data, } /* Max len of packet: (original len +4(bcsp hdr) +2(crc))*2 - (because bytes 0xc0 and 0xdb are escaped, worst case is - when the packet is all made of 0xc0 and 0xdb :) ) - + 2 (0xc0 delimiters at start and end). */ + * (because bytes 0xc0 and 0xdb are escaped, worst case is + * when the packet is all made of 0xc0 and 0xdb :) ) + * + 2 (0xc0 delimiters at start and end). + */ nskb = alloc_skb((len + 6) * 2 + 2, GFP_ATOMIC); if (!nskb) @@ -285,7 +285,7 @@ static struct sk_buff *bcsp_dequeue(struct hci_uart *hu) struct bcsp_struct *bcsp = hu->priv; unsigned long flags; struct sk_buff *skb; - + /* First of all, check for unreliable messages in the queue, since they have priority */ @@ -305,8 +305,9 @@ static struct sk_buff *bcsp_dequeue(struct hci_uart *hu) } /* Now, try to send a reliable pkt. We can only send a - reliable packet if the number of packets sent but not yet ack'ed - is < than the winsize */ + * reliable packet if the number of packets sent but not yet ack'ed + * is < than the winsize + */ spin_lock_irqsave_nested(&bcsp->unack.lock, flags, SINGLE_DEPTH_NESTING); @@ -332,12 +333,14 @@ static struct sk_buff *bcsp_dequeue(struct hci_uart *hu) spin_unlock_irqrestore(&bcsp->unack.lock, flags); /* We could not send a reliable packet, either because there are - none or because there are too many unack'ed pkts. Did we receive - any packets we have not acknowledged yet ? */ + * none or because there are too many unack'ed pkts. Did we receive + * any packets we have not acknowledged yet ? + */ if (bcsp->txack_req) { /* if so, craft an empty ACK pkt and send it on BCSP unreliable - channel 0 */ + * channel 0 + */ struct sk_buff *nskb = bcsp_prepare_pkt(bcsp, NULL, 0, BCSP_ACK_PKT); return nskb; } @@ -399,8 +402,9 @@ static void bcsp_pkt_cull(struct bcsp_struct *bcsp) } /* Handle BCSP link-establishment packets. When we - detect a "sync" packet, symptom that the BT module has reset, - we do nothing :) (yet) */ + * detect a "sync" packet, symptom that the BT module has reset, + * we do nothing :) (yet) + */ static void bcsp_handle_le_pkt(struct hci_uart *hu) { struct bcsp_struct *bcsp = hu->priv; @@ -462,7 +466,7 @@ static inline void bcsp_unslip_one_byte(struct bcsp_struct *bcsp, unsigned char case 0xdd: memcpy(skb_put(bcsp->rx_skb, 1), &db, 1); if ((bcsp->rx_skb->data[0] & 0x40) != 0 && - bcsp->rx_state != BCSP_W4_CRC) + bcsp->rx_state != BCSP_W4_CRC) bcsp_crc_update(&bcsp->message_crc, 0xdb); bcsp->rx_esc_state = BCSP_ESCSTATE_NOESC; bcsp->rx_count--; @@ -534,7 +538,7 @@ static void bcsp_complete_rx_pkt(struct hci_uart *hu) } else { BT_ERR("Packet for unknown channel (%u %s)", bcsp->rx_skb->data[1] & 0x0f, - bcsp->rx_skb->data[0] & 0x80 ? + bcsp->rx_skb->data[0] & 0x80 ? "reliable" : "unreliable"); kfree_skb(bcsp->rx_skb); } @@ -562,7 +566,7 @@ static int bcsp_recv(struct hci_uart *hu, const void *data, int count) struct bcsp_struct *bcsp = hu->priv; const unsigned char *ptr; - BT_DBG("hu %p count %d rx_state %d rx_count %ld", + BT_DBG("hu %p count %d rx_state %d rx_count %ld", hu, count, bcsp->rx_state, bcsp->rx_count); ptr = data; @@ -591,7 +595,7 @@ static int bcsp_recv(struct hci_uart *hu, const void *data, int count) continue; } if (bcsp->rx_skb->data[0] & 0x80 /* reliable pkt */ - && (bcsp->rx_skb->data[0] & 0x07) != bcsp->rxseq_txack) { + && (bcsp->rx_skb->data[0] & 0x07) != bcsp->rxseq_txack) { BT_ERR("Out-of-order packet arrived, got %u expected %u", bcsp->rx_skb->data[0] & 0x07, bcsp->rxseq_txack); @@ -601,7 +605,7 @@ static int bcsp_recv(struct hci_uart *hu, const void *data, int count) continue; } bcsp->rx_state = BCSP_W4_DATA; - bcsp->rx_count = (bcsp->rx_skb->data[1] >> 4) + + bcsp->rx_count = (bcsp->rx_skb->data[1] >> 4) + (bcsp->rx_skb->data[2] << 4); /* May be 0 */ continue; @@ -615,7 +619,7 @@ static int bcsp_recv(struct hci_uart *hu, const void *data, int count) case BCSP_W4_CRC: if (bitrev16(bcsp->message_crc) != bscp_get_crc(bcsp)) { - BT_ERR ("Checksum failed: computed %04x received %04x", + BT_ERR("Checksum failed: computed %04x received %04x", bitrev16(bcsp->message_crc), bscp_get_crc(bcsp)); @@ -653,8 +657,9 @@ static int bcsp_recv(struct hci_uart *hu, const void *data, int count) BCSP_CRC_INIT(bcsp->message_crc); /* Do not increment ptr or decrement count - * Allocate packet. Max len of a BCSP pkt= - * 0xFFF (payload) +4 (header) +2 (crc) */ + * Allocate packet. Max len of a BCSP pkt= + * 0xFFF (payload) +4 (header) +2 (crc) + */ bcsp->rx_skb = bt_skb_alloc(0x1005, GFP_ATOMIC); if (!bcsp->rx_skb) { From 498cd8e49509c761b39dab26be7f739d95940e16 Mon Sep 17 00:00:00 2001 From: John Allen Date: Wed, 6 Apr 2016 11:49:55 -0500 Subject: [PATCH 0512/1649] ibmvnic: Enable use of multiple tx/rx scrqs Enables the use of multiple transmit and receive scrqs allowing the ibmvnic driver to take advantage of multiqueue functionality. To achieve this, the driver must implement the process of negotiating the maximum number of queues allowed by the server. Initially, the driver will attempt to login with the maximum number of tx and rx queues supported by the server. If the server fails to allocate the requested number of scrqs, it will return partial success in the login response. In this case, we must reinitiate the login process from the request capabilities stage and attempt to login requesting fewer scrqs. Signed-off-by: John Allen Signed-off-by: David S. Miller --- drivers/net/ethernet/ibm/ibmvnic.c | 56 +++++++++++++++++++----------- drivers/net/ethernet/ibm/ibmvnic.h | 1 + 2 files changed, 37 insertions(+), 20 deletions(-) diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 21bccf6eb919..864cb21351a4 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -800,11 +800,12 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) ret = NETDEV_TX_BUSY; goto out; } - lpar_rc = send_subcrq_indirect(adapter, handle_array[0], + lpar_rc = send_subcrq_indirect(adapter, handle_array[queue_num], (u64)tx_buff->indir_dma, (u64)num_entries); } else { - lpar_rc = send_subcrq(adapter, handle_array[0], &tx_crq); + lpar_rc = send_subcrq(adapter, handle_array[queue_num], + &tx_crq); } if (lpar_rc != H_SUCCESS) { dev_err(dev, "tx failed with code %ld\n", lpar_rc); @@ -989,7 +990,7 @@ restart_poll: netdev->stats.rx_bytes += length; frames_processed++; } - replenish_pools(adapter); + replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]); if (frames_processed < budget) { enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]); @@ -1426,9 +1427,9 @@ static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry) entries_page : adapter->max_rx_add_entries_per_subcrq; /* Choosing the maximum number of queues supported by firmware*/ - adapter->req_tx_queues = adapter->min_tx_queues; - adapter->req_rx_queues = adapter->min_rx_queues; - adapter->req_rx_add_queues = adapter->min_rx_add_queues; + adapter->req_tx_queues = adapter->max_tx_queues; + adapter->req_rx_queues = adapter->max_rx_queues; + adapter->req_rx_add_queues = adapter->max_rx_add_queues; adapter->req_mtu = adapter->max_mtu; } @@ -1776,13 +1777,11 @@ static void send_login(struct ibmvnic_adapter *adapter) goto buf_map_failed; } - rsp_buffer_size = - sizeof(struct ibmvnic_login_rsp_buffer) + - sizeof(u64) * (adapter->req_tx_queues + - adapter->req_rx_queues * - adapter->req_rx_add_queues + adapter-> - req_rx_add_queues) + - sizeof(u8) * (IBMVNIC_TX_DESC_VERSIONS); + rsp_buffer_size = sizeof(struct ibmvnic_login_rsp_buffer) + + sizeof(u64) * adapter->req_tx_queues + + sizeof(u64) * adapter->req_rx_queues + + sizeof(u64) * adapter->req_rx_queues + + sizeof(u8) * IBMVNIC_TX_DESC_VERSIONS; login_rsp_buffer = kmalloc(rsp_buffer_size, GFP_ATOMIC); if (!login_rsp_buffer) @@ -2401,6 +2400,16 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq, dma_unmap_single(dev, adapter->login_rsp_buf_token, adapter->login_rsp_buf_sz, DMA_BIDIRECTIONAL); + /* If the number of queues requested can't be allocated by the + * server, the login response will return with code 1. We will need + * to resend the login buffer with fewer queues requested. + */ + if (login_rsp_crq->generic.rc.code) { + adapter->renegotiate = true; + complete(&adapter->init_done); + return 0; + } + netdev_dbg(adapter->netdev, "Login Response Buffer:\n"); for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) { netdev_dbg(adapter->netdev, "%016lx\n", @@ -3628,14 +3637,21 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) init_completion(&adapter->init_done); wait_for_completion(&adapter->init_done); - /* needed to pull init_sub_crqs outside of an interrupt context - * because it creates IRQ mappings for the subCRQ queues, causing - * a kernel warning - */ - init_sub_crqs(adapter, 0); + do { + adapter->renegotiate = false; - reinit_completion(&adapter->init_done); - wait_for_completion(&adapter->init_done); + init_sub_crqs(adapter, 0); + reinit_completion(&adapter->init_done); + wait_for_completion(&adapter->init_done); + + if (adapter->renegotiate) { + release_sub_crqs(adapter); + send_cap_queries(adapter); + + reinit_completion(&adapter->init_done); + wait_for_completion(&adapter->init_done); + } + } while (adapter->renegotiate); /* if init_sub_crqs is partially successful, retry */ while (!adapter->tx_scrq || !adapter->rx_scrq) { diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h index 5af8a796e523..0b66a506a4e4 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.h +++ b/drivers/net/ethernet/ibm/ibmvnic.h @@ -980,6 +980,7 @@ struct ibmvnic_adapter { struct ibmvnic_sub_crq_queue **tx_scrq; struct ibmvnic_sub_crq_queue **rx_scrq; int requested_caps; + bool renegotiate; /* rx structs */ struct napi_struct *napi; From 03c5b534185f9844c1b5fcfdbae2adc32821ec42 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Sat, 9 Apr 2016 08:01:13 -0700 Subject: [PATCH 0513/1649] ipv6: fix inet6_lookup_listener() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit A stupid refactoring bug in inet6_lookup_listener() needs to be fixed in order to get proper SO_REUSEPORT behavior. Fixes: 3b24d854cb35 ("tcp/dccp: do not touch listener sk_refcnt under synflood") Signed-off-by: Eric Dumazet Reported-by: Maciej Żenczykowski Signed-off-by: David S. Miller --- net/ipv6/inet6_hashtables.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c index 607da088344d..f1678388fb0d 100644 --- a/net/ipv6/inet6_hashtables.c +++ b/net/ipv6/inet6_hashtables.c @@ -137,7 +137,7 @@ struct sock *inet6_lookup_listener(struct net *net, sk_for_each(sk, &ilb->head) { score = compute_score(sk, net, hnum, daddr, dif); if (score > hiscore) { - hiscore = score; + reuseport = sk->sk_reuseport; if (reuseport) { phash = inet6_ehashfn(net, daddr, hnum, saddr, sport); @@ -148,7 +148,7 @@ struct sock *inet6_lookup_listener(struct net *net, matches = 1; } result = sk; - reuseport = sk->sk_reuseport; + hiscore = score; } else if (score == hiscore && reuseport) { matches++; if (reciprocal_scale(phash, matches) == 0) From e997ebbe46fe46bd4e8d476adca1f9b76779f270 Mon Sep 17 00:00:00 2001 From: Michael Thalmeier Date: Fri, 25 Mar 2016 15:46:51 +0100 Subject: [PATCH 0514/1649] NFC: pn533: Send ATR_REQ only if NFC_PROTO_NFC_DEP bit is set Currently it is not possible to only poll for passive targets with the pn533 driver. To change this ATR_REQ is only sent when NFC_PROTO_NFC_DEP is explicitly requested in poll_protocols. As most implementations (e.g. neard) poll for all protocols that are reported to be supported by the adapter, this should not have much of an effect on current implementations. Signed-off-by: Michael Thalmeier Signed-off-by: Samuel Ortiz --- drivers/nfc/pn533.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/nfc/pn533.c b/drivers/nfc/pn533.c index bb3d5ea9869c..a85830fcafd0 100644 --- a/drivers/nfc/pn533.c +++ b/drivers/nfc/pn533.c @@ -1540,7 +1540,8 @@ static int pn533_start_poll_complete(struct pn533 *dev, struct sk_buff *resp) int rc, tgdata_len; /* Toggle the DEP polling */ - dev->poll_dep = 1; + if (dev->poll_protocols & NFC_PROTO_NFC_DEP_MASK) + dev->poll_dep = 1; nbtg = resp->data[0]; tg = resp->data[1]; @@ -2054,7 +2055,7 @@ static int pn533_send_poll_frame(struct pn533 *dev) dev_dbg(&dev->interface->dev, "%s mod len %d\n", __func__, mod->len); - if (dev->poll_dep) { + if ((dev->poll_protocols & NFC_PROTO_NFC_DEP_MASK) && dev->poll_dep) { dev->poll_dep = 0; return pn533_poll_dep(dev->nfc_dev); } From 37f895d7e85e7d7e23e2395e666ea43001862e5f Mon Sep 17 00:00:00 2001 From: Michael Thalmeier Date: Fri, 25 Mar 2016 15:46:52 +0100 Subject: [PATCH 0515/1649] NFC: pn533: Fix socket deadlock A deadlock can occur when the NFC raw socket is closed while the driver is processing a command. Following is the call graph of the affected situation: send data via raw_sock: ------------- rawsock_tx_work sock_hold => socket refcnt++ nfc_data_exchange => cb = rawsock_data_exchange_complete ops->im_transceive = pn533_transceive => arg->cb = db = rawsock_data_exchange_complete pn533_send_data_async => cb = pn533_data_exchange_complete __pn533_send_async => cmd->complete_cb = cb = pn533_data_exchange_complete if_ops->send_frame_async response: -------- pn533_recv_response queue_work(priv->wq, &priv->cmd_complete_work) pn533_wq_cmd_complete pn533_send_async_complete cmd->complete_cb() = pn533_data_exchange_complete() arg->cb() = rawsock_data_exchange_complete() sock_put => socket refcnt-- => If the corresponding socket gets closed in the meantime socket will be destructed sk_free __sk_free sk->sk_destruct = rawsock_destruct nfc_deactivate_target ops->deactivate_target = pn533_deactivate_target pn533_send_cmd_sync pn533_send_cmd_async __pn533_send_async list_add_tail(&cmd->queue,&dev->cmd_queue) => add to command list because a command is currently processed wait_for_completion => the workqueue thread waits here because it is the one processing the commands => deadlock To fix the deadlock pn533_deactivate_target is changed to issue the PN533_CMD_IN_RELEASE command in async mode. This way nothing blocks and the release command is executed after the current command. Signed-off-by: Michael Thalmeier Signed-off-by: Samuel Ortiz --- drivers/nfc/pn533.c | 40 ++++++++++++++++++++++++++++++---------- 1 file changed, 30 insertions(+), 10 deletions(-) diff --git a/drivers/nfc/pn533.c b/drivers/nfc/pn533.c index a85830fcafd0..074f1e42e378 100644 --- a/drivers/nfc/pn533.c +++ b/drivers/nfc/pn533.c @@ -2263,12 +2263,35 @@ static int pn533_activate_target(struct nfc_dev *nfc_dev, return 0; } +static int pn533_deactivate_target_complete(struct pn533 *dev, void *arg, + struct sk_buff *resp) +{ + int rc = 0; + + dev_dbg(&dev->interface->dev, "%s\n", __func__); + + if (IS_ERR(resp)) { + rc = PTR_ERR(resp); + + nfc_err(&dev->interface->dev, "Target release error %d\n", rc); + + return rc; + } + + rc = resp->data[0] & PN533_CMD_RET_MASK; + if (rc != PN533_CMD_RET_SUCCESS) + nfc_err(&dev->interface->dev, + "Error 0x%x when releasing the target\n", rc); + + dev_kfree_skb(resp); + return rc; +} + static void pn533_deactivate_target(struct nfc_dev *nfc_dev, struct nfc_target *target, u8 mode) { struct pn533 *dev = nfc_get_drvdata(nfc_dev); struct sk_buff *skb; - struct sk_buff *resp; int rc; dev_dbg(&dev->interface->dev, "%s\n", __func__); @@ -2287,16 +2310,13 @@ static void pn533_deactivate_target(struct nfc_dev *nfc_dev, *skb_put(skb, 1) = 1; /* TG*/ - resp = pn533_send_cmd_sync(dev, PN533_CMD_IN_RELEASE, skb); - if (IS_ERR(resp)) - return; + rc = pn533_send_cmd_async(dev, PN533_CMD_IN_RELEASE, skb, + pn533_deactivate_target_complete, NULL); + if (rc < 0) { + dev_kfree_skb(skb); + nfc_err(&dev->interface->dev, "Target release error %d\n", rc); + } - rc = resp->data[0] & PN533_CMD_RET_MASK; - if (rc != PN533_CMD_RET_SUCCESS) - nfc_err(&dev->interface->dev, - "Error 0x%x when releasing the target\n", rc); - - dev_kfree_skb(resp); return; } From 9815c7cf22daceabfb919ddcd6f2c80e049c1fbc Mon Sep 17 00:00:00 2001 From: Michael Thalmeier Date: Fri, 25 Mar 2016 15:46:53 +0100 Subject: [PATCH 0516/1649] NFC: pn533: Separate physical layer from the core implementation The driver now has all core stuff isolated in one file, and all the hardware link specifics in another. Writing a pn533 driver on top of another hardware link is now just a matter of adding a new file for that new hardware specifics. The first user of this separation will be the i2c based pn532 driver that reuses pn533 core implementation on top of an i2c layer. Signed-off-by: Michael Thalmeier Signed-off-by: Samuel Ortiz --- drivers/nfc/Kconfig | 11 +- drivers/nfc/Makefile | 2 +- drivers/nfc/pn533/Kconfig | 16 + drivers/nfc/pn533/Makefile | 7 + drivers/nfc/{ => pn533}/pn533.c | 1130 ++++++------------------------- drivers/nfc/pn533/pn533.h | 235 +++++++ drivers/nfc/pn533/usb.c | 598 ++++++++++++++++ 7 files changed, 1078 insertions(+), 921 deletions(-) create mode 100644 drivers/nfc/pn533/Kconfig create mode 100644 drivers/nfc/pn533/Makefile rename drivers/nfc/{ => pn533}/pn533.c (67%) create mode 100644 drivers/nfc/pn533/pn533.h create mode 100644 drivers/nfc/pn533/usb.c diff --git a/drivers/nfc/Kconfig b/drivers/nfc/Kconfig index 7437c9dfd8fc..ea8321a483f9 100644 --- a/drivers/nfc/Kconfig +++ b/drivers/nfc/Kconfig @@ -5,16 +5,6 @@ menu "Near Field Communication (NFC) devices" depends on NFC -config NFC_PN533 - tristate "NXP PN533 USB driver" - depends on USB - help - NXP PN533 USB driver. - This driver provides support for NFC NXP PN533 devices. - - Say Y here to compile support for PN533 devices into the - kernel or say M to compile it as module (pn533). - config NFC_WILINK tristate "Texas Instruments NFC WiLink driver" depends on TI_ST && NFC_NCI @@ -70,6 +60,7 @@ config NFC_PORT100 source "drivers/nfc/fdp/Kconfig" source "drivers/nfc/pn544/Kconfig" +source "drivers/nfc/pn533/Kconfig" source "drivers/nfc/microread/Kconfig" source "drivers/nfc/nfcmrvl/Kconfig" source "drivers/nfc/st21nfca/Kconfig" diff --git a/drivers/nfc/Makefile b/drivers/nfc/Makefile index 0a99e67daa10..bab8ef06ae35 100644 --- a/drivers/nfc/Makefile +++ b/drivers/nfc/Makefile @@ -5,7 +5,7 @@ obj-$(CONFIG_NFC_FDP) += fdp/ obj-$(CONFIG_NFC_PN544) += pn544/ obj-$(CONFIG_NFC_MICROREAD) += microread/ -obj-$(CONFIG_NFC_PN533) += pn533.o +obj-$(CONFIG_NFC_PN533) += pn533/ obj-$(CONFIG_NFC_WILINK) += nfcwilink.o obj-$(CONFIG_NFC_MEI_PHY) += mei_phy.o obj-$(CONFIG_NFC_SIM) += nfcsim.o diff --git a/drivers/nfc/pn533/Kconfig b/drivers/nfc/pn533/Kconfig new file mode 100644 index 000000000000..b5a926e42f7b --- /dev/null +++ b/drivers/nfc/pn533/Kconfig @@ -0,0 +1,16 @@ +config NFC_PN533 + tristate + help + NXP PN533 core driver. + This driver provides core functionality for NXP PN533 NFC devices. + +config NFC_PN533_USB + tristate "NFC PN533 device support (USB)" + depends on USB + select NFC_PN533 + ---help--- + This module adds support for the NXP pn533 USB interface. + Select this if your platform is using the USB bus. + + If you choose to build a module, it'll be called pn533_usb. + Say N if unsure. diff --git a/drivers/nfc/pn533/Makefile b/drivers/nfc/pn533/Makefile new file mode 100644 index 000000000000..12c6be481483 --- /dev/null +++ b/drivers/nfc/pn533/Makefile @@ -0,0 +1,7 @@ +# +# Makefile for PN533 NFC driver +# +pn533_usb-objs = usb.o + +obj-$(CONFIG_NFC_PN533) += pn533.o +obj-$(CONFIG_NFC_PN533_USB) += pn533_usb.o diff --git a/drivers/nfc/pn533.c b/drivers/nfc/pn533/pn533.c similarity index 67% rename from drivers/nfc/pn533.c rename to drivers/nfc/pn533/pn533.c index 074f1e42e378..52d83fec5add 100644 --- a/drivers/nfc/pn533.c +++ b/drivers/nfc/pn533/pn533.c @@ -1,4 +1,6 @@ /* + * Driver for NXP PN533 NFC Chip - core functions + * * Copyright (C) 2011 Instituto Nokia de Tecnologia * Copyright (C) 2012-2013 Tieto Poland * @@ -20,137 +22,18 @@ #include #include #include -#include #include #include #include +#include "pn533.h" -#define VERSION "0.2" - -#define PN533_VENDOR_ID 0x4CC -#define PN533_PRODUCT_ID 0x2533 - -#define SCM_VENDOR_ID 0x4E6 -#define SCL3711_PRODUCT_ID 0x5591 - -#define SONY_VENDOR_ID 0x054c -#define PASORI_PRODUCT_ID 0x02e1 - -#define ACS_VENDOR_ID 0x072f -#define ACR122U_PRODUCT_ID 0x2200 - -#define PN533_DEVICE_STD 0x1 -#define PN533_DEVICE_PASORI 0x2 -#define PN533_DEVICE_ACR122U 0x3 - -#define PN533_ALL_PROTOCOLS (NFC_PROTO_JEWEL_MASK | NFC_PROTO_MIFARE_MASK |\ - NFC_PROTO_FELICA_MASK | NFC_PROTO_ISO14443_MASK |\ - NFC_PROTO_NFC_DEP_MASK |\ - NFC_PROTO_ISO14443_B_MASK) - -#define PN533_NO_TYPE_B_PROTOCOLS (NFC_PROTO_JEWEL_MASK | \ - NFC_PROTO_MIFARE_MASK | \ - NFC_PROTO_FELICA_MASK | \ - NFC_PROTO_ISO14443_MASK | \ - NFC_PROTO_NFC_DEP_MASK) - -static const struct usb_device_id pn533_table[] = { - { USB_DEVICE(PN533_VENDOR_ID, PN533_PRODUCT_ID), - .driver_info = PN533_DEVICE_STD }, - { USB_DEVICE(SCM_VENDOR_ID, SCL3711_PRODUCT_ID), - .driver_info = PN533_DEVICE_STD }, - { USB_DEVICE(SONY_VENDOR_ID, PASORI_PRODUCT_ID), - .driver_info = PN533_DEVICE_PASORI }, - { USB_DEVICE(ACS_VENDOR_ID, ACR122U_PRODUCT_ID), - .driver_info = PN533_DEVICE_ACR122U }, - { } -}; -MODULE_DEVICE_TABLE(usb, pn533_table); +#define VERSION "0.3" /* How much time we spend listening for initiators */ #define PN533_LISTEN_TIME 2 /* Delay between each poll frame (ms) */ #define PN533_POLL_INTERVAL 10 -/* Standard pn533 frame definitions (standard and extended)*/ -#define PN533_STD_FRAME_HEADER_LEN (sizeof(struct pn533_std_frame) \ - + 2) /* data[0] TFI, data[1] CC */ -#define PN533_STD_FRAME_TAIL_LEN 2 /* data[len] DCS, data[len + 1] postamble*/ - -#define PN533_EXT_FRAME_HEADER_LEN (sizeof(struct pn533_ext_frame) \ - + 2) /* data[0] TFI, data[1] CC */ - -#define PN533_CMD_DATAEXCH_DATA_MAXLEN 262 -#define PN533_CMD_DATAFRAME_MAXLEN 240 /* max data length (send) */ - -/* - * Max extended frame payload len, excluding TFI and CC - * which are already in PN533_FRAME_HEADER_LEN. - */ -#define PN533_STD_FRAME_MAX_PAYLOAD_LEN 263 - -#define PN533_STD_FRAME_ACK_SIZE 6 /* Preamble (1), SoPC (2), ACK Code (2), - Postamble (1) */ -#define PN533_STD_FRAME_CHECKSUM(f) (f->data[f->datalen]) -#define PN533_STD_FRAME_POSTAMBLE(f) (f->data[f->datalen + 1]) -/* Half start code (3), LEN (4) should be 0xffff for extended frame */ -#define PN533_STD_IS_EXTENDED(hdr) ((hdr)->datalen == 0xFF \ - && (hdr)->datalen_checksum == 0xFF) -#define PN533_EXT_FRAME_CHECKSUM(f) (f->data[be16_to_cpu(f->datalen)]) - -/* start of frame */ -#define PN533_STD_FRAME_SOF 0x00FF - -/* standard frame identifier: in/out/error */ -#define PN533_STD_FRAME_IDENTIFIER(f) (f->data[0]) /* TFI */ -#define PN533_STD_FRAME_DIR_OUT 0xD4 -#define PN533_STD_FRAME_DIR_IN 0xD5 - -/* ACS ACR122 pn533 frame definitions */ -#define PN533_ACR122_TX_FRAME_HEADER_LEN (sizeof(struct pn533_acr122_tx_frame) \ - + 2) -#define PN533_ACR122_TX_FRAME_TAIL_LEN 0 -#define PN533_ACR122_RX_FRAME_HEADER_LEN (sizeof(struct pn533_acr122_rx_frame) \ - + 2) -#define PN533_ACR122_RX_FRAME_TAIL_LEN 2 -#define PN533_ACR122_FRAME_MAX_PAYLOAD_LEN PN533_STD_FRAME_MAX_PAYLOAD_LEN - -/* CCID messages types */ -#define PN533_ACR122_PC_TO_RDR_ICCPOWERON 0x62 -#define PN533_ACR122_PC_TO_RDR_ESCAPE 0x6B - -#define PN533_ACR122_RDR_TO_PC_ESCAPE 0x83 - -/* PN533 Commands */ -#define PN533_FRAME_CMD(f) (f->data[1]) - -#define PN533_CMD_GET_FIRMWARE_VERSION 0x02 -#define PN533_CMD_RF_CONFIGURATION 0x32 -#define PN533_CMD_IN_DATA_EXCHANGE 0x40 -#define PN533_CMD_IN_COMM_THRU 0x42 -#define PN533_CMD_IN_LIST_PASSIVE_TARGET 0x4A -#define PN533_CMD_IN_ATR 0x50 -#define PN533_CMD_IN_RELEASE 0x52 -#define PN533_CMD_IN_JUMP_FOR_DEP 0x56 - -#define PN533_CMD_TG_INIT_AS_TARGET 0x8c -#define PN533_CMD_TG_GET_DATA 0x86 -#define PN533_CMD_TG_SET_DATA 0x8e -#define PN533_CMD_TG_SET_META_DATA 0x94 -#define PN533_CMD_UNDEF 0xff - -#define PN533_CMD_RESPONSE(cmd) (cmd + 1) - -/* PN533 Return codes */ -#define PN533_CMD_RET_MASK 0x3F -#define PN533_CMD_MI_MASK 0x40 -#define PN533_CMD_RET_SUCCESS 0x00 - -struct pn533; - -typedef int (*pn533_send_async_complete_t) (struct pn533 *dev, void *arg, - struct sk_buff *resp); - /* structs for pn533 commands */ /* PN533_CMD_GET_FIRMWARE_VERSION */ @@ -220,19 +103,6 @@ union pn533_cmd_poll_initdata { } __packed felica; }; -/* Poll modulations */ -enum { - PN533_POLL_MOD_106KBPS_A, - PN533_POLL_MOD_212KBPS_FELICA, - PN533_POLL_MOD_424KBPS_FELICA, - PN533_POLL_MOD_106KBPS_JEWEL, - PN533_POLL_MOD_847KBPS_B, - PN533_LISTEN_MOD, - - __PN533_POLL_MOD_AFTER_LAST, -}; -#define PN533_POLL_MOD_MAX (__PN533_POLL_MOD_AFTER_LAST - 1) - struct pn533_poll_modulations { struct { u8 maxtg; @@ -336,219 +206,6 @@ struct pn533_cmd_jump_dep_response { #define PN533_INIT_TARGET_RESP_ACTIVE 0x1 #define PN533_INIT_TARGET_RESP_DEP 0x4 -enum pn533_protocol_type { - PN533_PROTO_REQ_ACK_RESP = 0, - PN533_PROTO_REQ_RESP -}; - -struct pn533 { - struct usb_device *udev; - struct usb_interface *interface; - struct nfc_dev *nfc_dev; - u32 device_type; - enum pn533_protocol_type protocol_type; - - struct urb *out_urb; - struct urb *in_urb; - - struct sk_buff_head resp_q; - struct sk_buff_head fragment_skb; - - struct workqueue_struct *wq; - struct work_struct cmd_work; - struct work_struct cmd_complete_work; - struct delayed_work poll_work; - struct work_struct mi_rx_work; - struct work_struct mi_tx_work; - struct work_struct mi_tm_rx_work; - struct work_struct mi_tm_tx_work; - struct work_struct tg_work; - struct work_struct rf_work; - - struct list_head cmd_queue; - struct pn533_cmd *cmd; - u8 cmd_pending; - struct mutex cmd_lock; /* protects cmd queue */ - - void *cmd_complete_mi_arg; - void *cmd_complete_dep_arg; - - struct pn533_poll_modulations *poll_mod_active[PN533_POLL_MOD_MAX + 1]; - u8 poll_mod_count; - u8 poll_mod_curr; - u8 poll_dep; - u32 poll_protocols; - u32 listen_protocols; - struct timer_list listen_timer; - int cancel_listen; - - u8 *gb; - size_t gb_len; - - u8 tgt_available_prots; - u8 tgt_active_prot; - u8 tgt_mode; - - struct pn533_frame_ops *ops; -}; - -struct pn533_cmd { - struct list_head queue; - u8 code; - int status; - struct sk_buff *req; - struct sk_buff *resp; - int resp_len; - pn533_send_async_complete_t complete_cb; - void *complete_cb_context; -}; - -struct pn533_std_frame { - u8 preamble; - __be16 start_frame; - u8 datalen; - u8 datalen_checksum; - u8 data[]; -} __packed; - -struct pn533_ext_frame { /* Extended Information frame */ - u8 preamble; - __be16 start_frame; - __be16 eif_flag; /* fixed to 0xFFFF */ - __be16 datalen; - u8 datalen_checksum; - u8 data[]; -} __packed; - -struct pn533_frame_ops { - void (*tx_frame_init)(void *frame, u8 cmd_code); - void (*tx_frame_finish)(void *frame); - void (*tx_update_payload_len)(void *frame, int len); - int tx_header_len; - int tx_tail_len; - - bool (*rx_is_frame_valid)(void *frame, struct pn533 *dev); - int (*rx_frame_size)(void *frame); - int rx_header_len; - int rx_tail_len; - - int max_payload_len; - u8 (*get_cmd_code)(void *frame); -}; - -struct pn533_acr122_ccid_hdr { - u8 type; - u32 datalen; - u8 slot; - u8 seq; - u8 params[3]; /* 3 msg specific bytes or status, error and 1 specific - byte for reposnse msg */ - u8 data[]; /* payload */ -} __packed; - -struct pn533_acr122_apdu_hdr { - u8 class; - u8 ins; - u8 p1; - u8 p2; -} __packed; - -struct pn533_acr122_tx_frame { - struct pn533_acr122_ccid_hdr ccid; - struct pn533_acr122_apdu_hdr apdu; - u8 datalen; - u8 data[]; /* pn533 frame: TFI ... */ -} __packed; - -struct pn533_acr122_rx_frame { - struct pn533_acr122_ccid_hdr ccid; - u8 data[]; /* pn533 frame : TFI ... */ -} __packed; - -static void pn533_acr122_tx_frame_init(void *_frame, u8 cmd_code) -{ - struct pn533_acr122_tx_frame *frame = _frame; - - frame->ccid.type = PN533_ACR122_PC_TO_RDR_ESCAPE; - frame->ccid.datalen = sizeof(frame->apdu) + 1; /* sizeof(apdu_hdr) + - sizeof(datalen) */ - frame->ccid.slot = 0; - frame->ccid.seq = 0; - frame->ccid.params[0] = 0; - frame->ccid.params[1] = 0; - frame->ccid.params[2] = 0; - - frame->data[0] = PN533_STD_FRAME_DIR_OUT; - frame->data[1] = cmd_code; - frame->datalen = 2; /* data[0] + data[1] */ - - frame->apdu.class = 0xFF; - frame->apdu.ins = 0; - frame->apdu.p1 = 0; - frame->apdu.p2 = 0; -} - -static void pn533_acr122_tx_frame_finish(void *_frame) -{ - struct pn533_acr122_tx_frame *frame = _frame; - - frame->ccid.datalen += frame->datalen; -} - -static void pn533_acr122_tx_update_payload_len(void *_frame, int len) -{ - struct pn533_acr122_tx_frame *frame = _frame; - - frame->datalen += len; -} - -static bool pn533_acr122_is_rx_frame_valid(void *_frame, struct pn533 *dev) -{ - struct pn533_acr122_rx_frame *frame = _frame; - - if (frame->ccid.type != 0x83) - return false; - - if (!frame->ccid.datalen) - return false; - - if (frame->data[frame->ccid.datalen - 2] == 0x63) - return false; - - return true; -} - -static int pn533_acr122_rx_frame_size(void *frame) -{ - struct pn533_acr122_rx_frame *f = frame; - - /* f->ccid.datalen already includes tail length */ - return sizeof(struct pn533_acr122_rx_frame) + f->ccid.datalen; -} - -static u8 pn533_acr122_get_cmd_code(void *frame) -{ - struct pn533_acr122_rx_frame *f = frame; - - return PN533_FRAME_CMD(f); -} - -static struct pn533_frame_ops pn533_acr122_frame_ops = { - .tx_frame_init = pn533_acr122_tx_frame_init, - .tx_frame_finish = pn533_acr122_tx_frame_finish, - .tx_update_payload_len = pn533_acr122_tx_update_payload_len, - .tx_header_len = PN533_ACR122_TX_FRAME_HEADER_LEN, - .tx_tail_len = PN533_ACR122_TX_FRAME_TAIL_LEN, - - .rx_is_frame_valid = pn533_acr122_is_rx_frame_valid, - .rx_header_len = PN533_ACR122_RX_FRAME_HEADER_LEN, - .rx_tail_len = PN533_ACR122_RX_FRAME_TAIL_LEN, - .rx_frame_size = pn533_acr122_rx_frame_size, - - .max_payload_len = PN533_ACR122_FRAME_MAX_PAYLOAD_LEN, - .get_cmd_code = pn533_acr122_get_cmd_code, -}; - /* The rule: value(high byte) + value(low byte) + checksum = 0 */ static inline u8 pn533_ext_checksum(u16 value) { @@ -642,8 +299,10 @@ static bool pn533_std_rx_frame_is_valid(void *_frame, struct pn533 *dev) return true; } -static bool pn533_std_rx_frame_is_ack(struct pn533_std_frame *frame) +bool pn533_rx_frame_is_ack(void *_frame) { + struct pn533_std_frame *frame = _frame; + if (frame->start_frame != cpu_to_be16(PN533_STD_FRAME_SOF)) return false; @@ -652,6 +311,7 @@ static bool pn533_std_rx_frame_is_ack(struct pn533_std_frame *frame) return true; } +EXPORT_SYMBOL_GPL(pn533_rx_frame_is_ack); static inline int pn533_std_rx_frame_size(void *frame) { @@ -680,6 +340,14 @@ static u8 pn533_std_get_cmd_code(void *frame) return PN533_FRAME_CMD(f); } +bool pn533_rx_frame_is_cmd_response(struct pn533 *dev, void *frame) +{ + return (dev->ops->get_cmd_code(frame) == + PN533_CMD_RESPONSE(dev->cmd->code)); +} +EXPORT_SYMBOL_GPL(pn533_rx_frame_is_cmd_response); + + static struct pn533_frame_ops pn533_std_frame_ops = { .tx_frame_init = pn533_std_tx_frame_init, .tx_frame_finish = pn533_std_tx_frame_finish, @@ -696,172 +364,6 @@ static struct pn533_frame_ops pn533_std_frame_ops = { .get_cmd_code = pn533_std_get_cmd_code, }; -static bool pn533_rx_frame_is_cmd_response(struct pn533 *dev, void *frame) -{ - return (dev->ops->get_cmd_code(frame) == - PN533_CMD_RESPONSE(dev->cmd->code)); -} - -static void pn533_recv_response(struct urb *urb) -{ - struct pn533 *dev = urb->context; - struct pn533_cmd *cmd = dev->cmd; - u8 *in_frame; - - cmd->status = urb->status; - - switch (urb->status) { - case 0: - break; /* success */ - case -ECONNRESET: - case -ENOENT: - dev_dbg(&dev->interface->dev, - "The urb has been canceled (status %d)\n", - urb->status); - goto sched_wq; - case -ESHUTDOWN: - default: - nfc_err(&dev->interface->dev, - "Urb failure (status %d)\n", urb->status); - goto sched_wq; - } - - in_frame = dev->in_urb->transfer_buffer; - - dev_dbg(&dev->interface->dev, "Received a frame\n"); - print_hex_dump_debug("PN533 RX: ", DUMP_PREFIX_NONE, 16, 1, in_frame, - dev->ops->rx_frame_size(in_frame), false); - - if (!dev->ops->rx_is_frame_valid(in_frame, dev)) { - nfc_err(&dev->interface->dev, "Received an invalid frame\n"); - cmd->status = -EIO; - goto sched_wq; - } - - if (!pn533_rx_frame_is_cmd_response(dev, in_frame)) { - nfc_err(&dev->interface->dev, - "It it not the response to the last command\n"); - cmd->status = -EIO; - goto sched_wq; - } - -sched_wq: - queue_work(dev->wq, &dev->cmd_complete_work); -} - -static int pn533_submit_urb_for_response(struct pn533 *dev, gfp_t flags) -{ - dev->in_urb->complete = pn533_recv_response; - - return usb_submit_urb(dev->in_urb, flags); -} - -static void pn533_recv_ack(struct urb *urb) -{ - struct pn533 *dev = urb->context; - struct pn533_cmd *cmd = dev->cmd; - struct pn533_std_frame *in_frame; - int rc; - - cmd->status = urb->status; - - switch (urb->status) { - case 0: - break; /* success */ - case -ECONNRESET: - case -ENOENT: - dev_dbg(&dev->interface->dev, - "The urb has been stopped (status %d)\n", - urb->status); - goto sched_wq; - case -ESHUTDOWN: - default: - nfc_err(&dev->interface->dev, - "Urb failure (status %d)\n", urb->status); - goto sched_wq; - } - - in_frame = dev->in_urb->transfer_buffer; - - if (!pn533_std_rx_frame_is_ack(in_frame)) { - nfc_err(&dev->interface->dev, "Received an invalid ack\n"); - cmd->status = -EIO; - goto sched_wq; - } - - rc = pn533_submit_urb_for_response(dev, GFP_ATOMIC); - if (rc) { - nfc_err(&dev->interface->dev, - "usb_submit_urb failed with result %d\n", rc); - cmd->status = rc; - goto sched_wq; - } - - return; - -sched_wq: - queue_work(dev->wq, &dev->cmd_complete_work); -} - -static int pn533_submit_urb_for_ack(struct pn533 *dev, gfp_t flags) -{ - dev->in_urb->complete = pn533_recv_ack; - - return usb_submit_urb(dev->in_urb, flags); -} - -static int pn533_send_ack(struct pn533 *dev, gfp_t flags) -{ - u8 ack[PN533_STD_FRAME_ACK_SIZE] = {0x00, 0x00, 0xff, 0x00, 0xff, 0x00}; - /* spec 7.1.1.3: Preamble, SoPC (2), ACK Code (2), Postamble */ - int rc; - - dev->out_urb->transfer_buffer = ack; - dev->out_urb->transfer_buffer_length = sizeof(ack); - rc = usb_submit_urb(dev->out_urb, flags); - - return rc; -} - -static int __pn533_send_frame_async(struct pn533 *dev, - struct sk_buff *out, - struct sk_buff *in, - int in_len) -{ - int rc; - - dev->out_urb->transfer_buffer = out->data; - dev->out_urb->transfer_buffer_length = out->len; - - dev->in_urb->transfer_buffer = in->data; - dev->in_urb->transfer_buffer_length = in_len; - - print_hex_dump_debug("PN533 TX: ", DUMP_PREFIX_NONE, 16, 1, - out->data, out->len, false); - - rc = usb_submit_urb(dev->out_urb, GFP_KERNEL); - if (rc) - return rc; - - if (dev->protocol_type == PN533_PROTO_REQ_RESP) { - /* request for response for sent packet directly */ - rc = pn533_submit_urb_for_response(dev, GFP_ATOMIC); - if (rc) - goto error; - } else if (dev->protocol_type == PN533_PROTO_REQ_ACK_RESP) { - /* request for ACK if that's the case */ - rc = pn533_submit_urb_for_ack(dev, GFP_KERNEL); - if (rc) - goto error; - } - - return 0; - -error: - usb_unlink_urb(dev->out_urb); - return rc; -} - static void pn533_build_cmd_frame(struct pn533 *dev, u8 cmd_code, struct sk_buff *skb) { @@ -897,7 +399,6 @@ static int pn533_send_async_complete(struct pn533 *dev) goto done; } - skb_put(resp, dev->ops->rx_frame_size(resp->data)); skb_pull(resp, dev->ops->rx_header_len); skb_trim(resp, resp->len - dev->ops->rx_tail_len); @@ -910,15 +411,14 @@ done: } static int __pn533_send_async(struct pn533 *dev, u8 cmd_code, - struct sk_buff *req, struct sk_buff *resp, - int resp_len, + struct sk_buff *req, pn533_send_async_complete_t complete_cb, void *complete_cb_context) { struct pn533_cmd *cmd; int rc = 0; - dev_dbg(&dev->interface->dev, "Sending command 0x%x\n", cmd_code); + dev_dbg(dev->dev, "Sending command 0x%x\n", cmd_code); cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (!cmd) @@ -926,8 +426,6 @@ static int __pn533_send_async(struct pn533 *dev, u8 cmd_code, cmd->code = cmd_code; cmd->req = req; - cmd->resp = resp; - cmd->resp_len = resp_len; cmd->complete_cb = complete_cb; cmd->complete_cb_context = complete_cb_context; @@ -936,7 +434,7 @@ static int __pn533_send_async(struct pn533 *dev, u8 cmd_code, mutex_lock(&dev->cmd_lock); if (!dev->cmd_pending) { - rc = __pn533_send_frame_async(dev, req, resp, resp_len); + rc = dev->phy_ops->send_frame(dev, req); if (rc) goto error; @@ -945,7 +443,7 @@ static int __pn533_send_async(struct pn533 *dev, u8 cmd_code, goto unlock; } - dev_dbg(&dev->interface->dev, "%s Queueing command 0x%x\n", + dev_dbg(dev->dev, "%s Queueing command 0x%x\n", __func__, cmd_code); INIT_LIST_HEAD(&cmd->queue); @@ -965,20 +463,10 @@ static int pn533_send_data_async(struct pn533 *dev, u8 cmd_code, pn533_send_async_complete_t complete_cb, void *complete_cb_context) { - struct sk_buff *resp; int rc; - int resp_len = dev->ops->rx_header_len + - dev->ops->max_payload_len + - dev->ops->rx_tail_len; - resp = nfc_alloc_recv_skb(resp_len, GFP_KERNEL); - if (!resp) - return -ENOMEM; - - rc = __pn533_send_async(dev, cmd_code, req, resp, resp_len, complete_cb, + rc = __pn533_send_async(dev, cmd_code, req, complete_cb, complete_cb_context); - if (rc) - dev_kfree_skb(resp); return rc; } @@ -988,20 +476,10 @@ static int pn533_send_cmd_async(struct pn533 *dev, u8 cmd_code, pn533_send_async_complete_t complete_cb, void *complete_cb_context) { - struct sk_buff *resp; int rc; - int resp_len = dev->ops->rx_header_len + - dev->ops->max_payload_len + - dev->ops->rx_tail_len; - resp = alloc_skb(resp_len, GFP_KERNEL); - if (!resp) - return -ENOMEM; - - rc = __pn533_send_async(dev, cmd_code, req, resp, resp_len, complete_cb, + rc = __pn533_send_async(dev, cmd_code, req, complete_cb, complete_cb_context); - if (rc) - dev_kfree_skb(resp); return rc; } @@ -1019,39 +497,25 @@ static int pn533_send_cmd_direct_async(struct pn533 *dev, u8 cmd_code, pn533_send_async_complete_t complete_cb, void *complete_cb_context) { - struct sk_buff *resp; struct pn533_cmd *cmd; int rc; - int resp_len = dev->ops->rx_header_len + - dev->ops->max_payload_len + - dev->ops->rx_tail_len; - - resp = alloc_skb(resp_len, GFP_KERNEL); - if (!resp) - return -ENOMEM; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); - if (!cmd) { - dev_kfree_skb(resp); + if (!cmd) return -ENOMEM; - } cmd->code = cmd_code; cmd->req = req; - cmd->resp = resp; - cmd->resp_len = resp_len; cmd->complete_cb = complete_cb; cmd->complete_cb_context = complete_cb_context; pn533_build_cmd_frame(dev, cmd_code, req); - rc = __pn533_send_frame_async(dev, req, resp, resp_len); - if (rc < 0) { - dev_kfree_skb(resp); + rc = dev->phy_ops->send_frame(dev, req); + if (rc < 0) kfree(cmd); - } else { + else dev->cmd = cmd; - } return rc; } @@ -1086,10 +550,9 @@ static void pn533_wq_cmd(struct work_struct *work) mutex_unlock(&dev->cmd_lock); - rc = __pn533_send_frame_async(dev, cmd->req, cmd->resp, cmd->resp_len); + rc = dev->phy_ops->send_frame(dev, cmd->req); if (rc < 0) { dev_kfree_skb(cmd->req); - dev_kfree_skb(cmd->resp); kfree(cmd); return; } @@ -1121,7 +584,7 @@ static int pn533_send_sync_complete(struct pn533 *dev, void *_arg, * 1. negative in case of error during TX path -> req should be freed * * 2. negative in case of error during RX path -> req should not be freed - * as it's been already freed at the begining of RX path by + * as it's been already freed at the beginning of RX path by * async_complete_cb. * * 3. valid pointer in case of succesfult RX path @@ -1129,7 +592,7 @@ static int pn533_send_sync_complete(struct pn533 *dev, void *_arg, * A caller has to check a return value with IS_ERR macro. If the test pass, * the returned pointer is valid. * - * */ + */ static struct sk_buff *pn533_send_cmd_sync(struct pn533 *dev, u8 cmd_code, struct sk_buff *req) { @@ -1150,43 +613,6 @@ static struct sk_buff *pn533_send_cmd_sync(struct pn533 *dev, u8 cmd_code, return arg.resp; } -static void pn533_send_complete(struct urb *urb) -{ - struct pn533 *dev = urb->context; - - switch (urb->status) { - case 0: - break; /* success */ - case -ECONNRESET: - case -ENOENT: - dev_dbg(&dev->interface->dev, - "The urb has been stopped (status %d)\n", - urb->status); - break; - case -ESHUTDOWN: - default: - nfc_err(&dev->interface->dev, "Urb failure (status %d)\n", - urb->status); - } -} - -static void pn533_abort_cmd(struct pn533 *dev, gfp_t flags) -{ - /* ACR122U does not support any command which aborts last - * issued command i.e. as ACK for standard PN533. Additionally, - * it behaves stange, sending broken or incorrect responses, - * when we cancel urb before the chip will send response. - */ - if (dev->device_type == PN533_DEVICE_ACR122U) - return; - - /* An ack will cancel the last issued command */ - pn533_send_ack(dev, flags); - - /* cancel the urb request */ - usb_kill_urb(dev->in_urb); -} - static struct sk_buff *pn533_alloc_skb(struct pn533 *dev, unsigned int size) { struct sk_buff *skb; @@ -1233,8 +659,10 @@ static bool pn533_target_type_a_is_valid(struct pn533_target_type_a *type_a, if (target_data_len < sizeof(struct pn533_target_type_a)) return false; - /* The lenght check of nfcid[] and ats[] are not being performed because - the values are not being used */ + /* + * The length check of nfcid[] and ats[] are not being performed because + * the values are not being used + */ /* Requirement 4.6.3.3 from NFC Forum Digital Spec */ ssd = PN533_TYPE_A_SENS_RES_SSD(type_a->sens_res); @@ -1443,7 +871,7 @@ static int pn533_target_found(struct pn533 *dev, u8 tg, u8 *tgdata, struct nfc_target nfc_tgt; int rc; - dev_dbg(&dev->interface->dev, "%s: modulation=%d\n", + dev_dbg(dev->dev, "%s: modulation=%d\n", __func__, dev->poll_mod_curr); if (tg != 1) @@ -1466,7 +894,7 @@ static int pn533_target_found(struct pn533 *dev, u8 tg, u8 *tgdata, rc = pn533_target_found_type_b(&nfc_tgt, tgdata, tgdata_len); break; default: - nfc_err(&dev->interface->dev, + nfc_err(dev->dev, "Unknown current poll modulation\n"); return -EPROTO; } @@ -1475,12 +903,12 @@ static int pn533_target_found(struct pn533 *dev, u8 tg, u8 *tgdata, return rc; if (!(nfc_tgt.supported_protocols & dev->poll_protocols)) { - dev_dbg(&dev->interface->dev, + dev_dbg(dev->dev, "The Tg found doesn't have the desired protocol\n"); return -EAGAIN; } - dev_dbg(&dev->interface->dev, + dev_dbg(dev->dev, "Target found - supported protocols: 0x%x\n", nfc_tgt.supported_protocols); @@ -1578,8 +1006,10 @@ static struct sk_buff *pn533_alloc_poll_tg_frame(struct pn533 *dev) 0x0, 0x0, 0x0, 0x40}; /* SEL_RES for DEP */ - unsigned int skb_len = 36 + /* mode (1), mifare (6), - felica (18), nfcid3 (10), gb_len (1) */ + unsigned int skb_len = 36 + /* + * mode (1), mifare (6), + * felica (18), nfcid3 (10), gb_len (1) + */ gbytes_len + 1; /* len Tk*/ @@ -1615,8 +1045,6 @@ static struct sk_buff *pn533_alloc_poll_tg_frame(struct pn533 *dev) return skb; } -#define PN533_CMD_DATAEXCH_HEAD_LEN 1 -#define PN533_CMD_DATAEXCH_DATA_MAXLEN 262 static void pn533_wq_tm_mi_recv(struct work_struct *work); static struct sk_buff *pn533_build_response(struct pn533 *dev); @@ -1627,7 +1055,7 @@ static int pn533_tm_get_data_complete(struct pn533 *dev, void *arg, u8 status, ret, mi; int rc; - dev_dbg(&dev->interface->dev, "%s\n", __func__); + dev_dbg(dev->dev, "%s\n", __func__); if (IS_ERR(resp)) { skb_queue_purge(&dev->resp_q); @@ -1676,7 +1104,7 @@ static void pn533_wq_tm_mi_recv(struct work_struct *work) struct sk_buff *skb; int rc; - dev_dbg(&dev->interface->dev, "%s\n", __func__); + dev_dbg(dev->dev, "%s\n", __func__); skb = pn533_alloc_skb(dev, 0); if (!skb) @@ -1690,8 +1118,6 @@ static void pn533_wq_tm_mi_recv(struct work_struct *work) if (rc < 0) dev_kfree_skb(skb); - - return; } static int pn533_tm_send_complete(struct pn533 *dev, void *arg, @@ -1702,7 +1128,7 @@ static void pn533_wq_tm_mi_send(struct work_struct *work) struct sk_buff *skb; int rc; - dev_dbg(&dev->interface->dev, "%s\n", __func__); + dev_dbg(dev->dev, "%s\n", __func__); /* Grab the first skb in the queue */ skb = skb_dequeue(&dev->fragment_skb); @@ -1724,13 +1150,13 @@ static void pn533_wq_tm_mi_send(struct work_struct *work) if (rc == 0) /* success */ return; - dev_err(&dev->interface->dev, + dev_err(dev->dev, "Error %d when trying to perform set meta data_exchange", rc); dev_kfree_skb(skb); error: - pn533_send_ack(dev, GFP_KERNEL); + dev->phy_ops->send_ack(dev, GFP_KERNEL); queue_work(dev->wq, &dev->cmd_work); } @@ -1740,7 +1166,7 @@ static void pn533_wq_tg_get_data(struct work_struct *work) struct sk_buff *skb; int rc; - dev_dbg(&dev->interface->dev, "%s\n", __func__); + dev_dbg(dev->dev, "%s\n", __func__); skb = pn533_alloc_skb(dev, 0); if (!skb) @@ -1751,8 +1177,6 @@ static void pn533_wq_tg_get_data(struct work_struct *work) if (rc < 0) dev_kfree_skb(skb); - - return; } #define ATR_REQ_GB_OFFSET 17 @@ -1762,7 +1186,7 @@ static int pn533_init_target_complete(struct pn533 *dev, struct sk_buff *resp) size_t gb_len; int rc; - dev_dbg(&dev->interface->dev, "%s\n", __func__); + dev_dbg(dev->dev, "%s\n", __func__); if (resp->len < ATR_REQ_GB_OFFSET + 1) return -EINVAL; @@ -1770,7 +1194,7 @@ static int pn533_init_target_complete(struct pn533 *dev, struct sk_buff *resp) mode = resp->data[0]; cmd = &resp->data[1]; - dev_dbg(&dev->interface->dev, "Target mode 0x%x len %d\n", + dev_dbg(dev->dev, "Target mode 0x%x len %d\n", mode, resp->len); if ((mode & PN533_INIT_TARGET_RESP_FRAME_MASK) == @@ -1786,7 +1210,7 @@ static int pn533_init_target_complete(struct pn533 *dev, struct sk_buff *resp) rc = nfc_tm_activated(dev->nfc_dev, NFC_PROTO_NFC_DEP_MASK, comm_mode, gb, gb_len); if (rc < 0) { - nfc_err(&dev->interface->dev, + nfc_err(dev->dev, "Error when signaling target activation\n"); return rc; } @@ -1801,7 +1225,7 @@ static void pn533_listen_mode_timer(unsigned long data) { struct pn533 *dev = (struct pn533 *)data; - dev_dbg(&dev->interface->dev, "Listen mode timeout\n"); + dev_dbg(dev->dev, "Listen mode timeout\n"); dev->cancel_listen = 1; @@ -1816,12 +1240,12 @@ static int pn533_rf_complete(struct pn533 *dev, void *arg, { int rc = 0; - dev_dbg(&dev->interface->dev, "%s\n", __func__); + dev_dbg(dev->dev, "%s\n", __func__); if (IS_ERR(resp)) { rc = PTR_ERR(resp); - nfc_err(&dev->interface->dev, "RF setting error %d\n", rc); + nfc_err(dev->dev, "RF setting error %d\n", rc); return rc; } @@ -1839,7 +1263,7 @@ static void pn533_wq_rf(struct work_struct *work) struct sk_buff *skb; int rc; - dev_dbg(&dev->interface->dev, "%s\n", __func__); + dev_dbg(dev->dev, "%s\n", __func__); skb = pn533_alloc_skb(dev, 2); if (!skb) @@ -1852,10 +1276,8 @@ static void pn533_wq_rf(struct work_struct *work) pn533_rf_complete, NULL); if (rc < 0) { dev_kfree_skb(skb); - nfc_err(&dev->interface->dev, "RF setting error %d\n", rc); + nfc_err(dev->dev, "RF setting error %d\n", rc); } - - return; } static int pn533_poll_dep_complete(struct pn533 *dev, void *arg, @@ -1880,7 +1302,7 @@ static int pn533_poll_dep_complete(struct pn533 *dev, void *arg, return 0; } - dev_dbg(&dev->interface->dev, "Creating new target"); + dev_dbg(dev->dev, "Creating new target"); nfc_target.supported_protocols = NFC_PROTO_NFC_DEP_MASK; nfc_target.nfcid1_len = 10; @@ -1918,7 +1340,7 @@ static int pn533_poll_dep(struct nfc_dev *nfc_dev) u8 *next, nfcid3[NFC_NFCID3_MAXSIZE]; u8 passive_data[PASSIVE_DATA_LEN] = {0x00, 0xff, 0xff, 0x00, 0x3}; - dev_dbg(&dev->interface->dev, "%s", __func__); + dev_dbg(dev->dev, "%s", __func__); if (!dev->gb) { dev->gb = nfc_get_local_general_bytes(nfc_dev, &dev->gb_len); @@ -1975,21 +1397,20 @@ static int pn533_poll_complete(struct pn533 *dev, void *arg, struct pn533_poll_modulations *cur_mod; int rc; - dev_dbg(&dev->interface->dev, "%s\n", __func__); + dev_dbg(dev->dev, "%s\n", __func__); if (IS_ERR(resp)) { rc = PTR_ERR(resp); - nfc_err(&dev->interface->dev, "%s Poll complete error %d\n", + nfc_err(dev->dev, "%s Poll complete error %d\n", __func__, rc); if (rc == -ENOENT) { if (dev->poll_mod_count != 0) return rc; - else - goto stop_poll; + goto stop_poll; } else if (rc < 0) { - nfc_err(&dev->interface->dev, + nfc_err(dev->dev, "Error %d when running poll\n", rc); goto stop_poll; } @@ -2009,7 +1430,7 @@ static int pn533_poll_complete(struct pn533 *dev, void *arg, goto done; if (!dev->poll_mod_count) { - dev_dbg(&dev->interface->dev, "Polling has been stopped\n"); + dev_dbg(dev->dev, "Polling has been stopped\n"); goto done; } @@ -2022,7 +1443,7 @@ done: return rc; stop_poll: - nfc_err(&dev->interface->dev, "Polling operation has been stopped\n"); + nfc_err(dev->dev, "Polling operation has been stopped\n"); pn533_poll_reset_mod_list(dev); dev->poll_protocols = 0; @@ -2052,7 +1473,7 @@ static int pn533_send_poll_frame(struct pn533 *dev) mod = dev->poll_mod_active[dev->poll_mod_curr]; - dev_dbg(&dev->interface->dev, "%s mod len %d\n", + dev_dbg(dev->dev, "%s mod len %d\n", __func__, mod->len); if ((dev->poll_protocols & NFC_PROTO_NFC_DEP_MASK) && dev->poll_dep) { @@ -2069,7 +1490,7 @@ static int pn533_send_poll_frame(struct pn533 *dev) } if (!skb) { - nfc_err(&dev->interface->dev, "Failed to allocate skb\n"); + nfc_err(dev->dev, "Failed to allocate skb\n"); return -ENOMEM; } @@ -2077,7 +1498,7 @@ static int pn533_send_poll_frame(struct pn533 *dev) NULL); if (rc < 0) { dev_kfree_skb(skb); - nfc_err(&dev->interface->dev, "Polling loop error %d\n", rc); + nfc_err(dev->dev, "Polling loop error %d\n", rc); } return rc; @@ -2091,13 +1512,13 @@ static void pn533_wq_poll(struct work_struct *work) cur_mod = dev->poll_mod_active[dev->poll_mod_curr]; - dev_dbg(&dev->interface->dev, + dev_dbg(dev->dev, "%s cancel_listen %d modulation len %d\n", __func__, dev->cancel_listen, cur_mod->len); if (dev->cancel_listen == 1) { dev->cancel_listen = 0; - pn533_abort_cmd(dev, GFP_ATOMIC); + dev->phy_ops->abort_cmd(dev, GFP_ATOMIC); } rc = pn533_send_poll_frame(dev); @@ -2106,8 +1527,6 @@ static void pn533_wq_poll(struct work_struct *work) if (cur_mod->len == 0 && dev->poll_mod_count > 1) mod_timer(&dev->listen_timer, jiffies + PN533_LISTEN_TIME * HZ); - - return; } static int pn533_start_poll(struct nfc_dev *nfc_dev, @@ -2118,18 +1537,18 @@ static int pn533_start_poll(struct nfc_dev *nfc_dev, u8 rand_mod; int rc; - dev_dbg(&dev->interface->dev, + dev_dbg(dev->dev, "%s: im protocols 0x%x tm protocols 0x%x\n", __func__, im_protocols, tm_protocols); if (dev->tgt_active_prot) { - nfc_err(&dev->interface->dev, + nfc_err(dev->dev, "Cannot poll with a target already activated\n"); return -EBUSY; } if (dev->tgt_mode) { - nfc_err(&dev->interface->dev, + nfc_err(dev->dev, "Cannot poll while already being activated\n"); return -EBUSY; } @@ -2167,12 +1586,12 @@ static void pn533_stop_poll(struct nfc_dev *nfc_dev) del_timer(&dev->listen_timer); if (!dev->poll_mod_count) { - dev_dbg(&dev->interface->dev, + dev_dbg(dev->dev, "Polling operation was not running\n"); return; } - pn533_abort_cmd(dev, GFP_KERNEL); + dev->phy_ops->abort_cmd(dev, GFP_KERNEL); flush_delayed_work(&dev->poll_work); pn533_poll_reset_mod_list(dev); } @@ -2185,7 +1604,7 @@ static int pn533_activate_target_nfcdep(struct pn533 *dev) struct sk_buff *skb; struct sk_buff *resp; - dev_dbg(&dev->interface->dev, "%s\n", __func__); + dev_dbg(dev->dev, "%s\n", __func__); skb = pn533_alloc_skb(dev, sizeof(u8) * 2); /*TG + Next*/ if (!skb) @@ -2201,7 +1620,7 @@ static int pn533_activate_target_nfcdep(struct pn533 *dev) rsp = (struct pn533_cmd_activate_response *)resp->data; rc = rsp->status & PN533_CMD_RET_MASK; if (rc != PN533_CMD_RET_SUCCESS) { - nfc_err(&dev->interface->dev, + nfc_err(dev->dev, "Target activation failed (error 0x%x)\n", rc); dev_kfree_skb(resp); return -EIO; @@ -2221,28 +1640,28 @@ static int pn533_activate_target(struct nfc_dev *nfc_dev, struct pn533 *dev = nfc_get_drvdata(nfc_dev); int rc; - dev_dbg(&dev->interface->dev, "%s: protocol=%u\n", __func__, protocol); + dev_dbg(dev->dev, "%s: protocol=%u\n", __func__, protocol); if (dev->poll_mod_count) { - nfc_err(&dev->interface->dev, + nfc_err(dev->dev, "Cannot activate while polling\n"); return -EBUSY; } if (dev->tgt_active_prot) { - nfc_err(&dev->interface->dev, + nfc_err(dev->dev, "There is already an active target\n"); return -EBUSY; } if (!dev->tgt_available_prots) { - nfc_err(&dev->interface->dev, + nfc_err(dev->dev, "There is no available target to activate\n"); return -EINVAL; } if (!(dev->tgt_available_prots & (1 << protocol))) { - nfc_err(&dev->interface->dev, + nfc_err(dev->dev, "Target doesn't support requested proto %u\n", protocol); return -EINVAL; @@ -2251,7 +1670,7 @@ static int pn533_activate_target(struct nfc_dev *nfc_dev, if (protocol == NFC_PROTO_NFC_DEP) { rc = pn533_activate_target_nfcdep(dev); if (rc) { - nfc_err(&dev->interface->dev, + nfc_err(dev->dev, "Activating target with DEP failed %d\n", rc); return rc; } @@ -2268,19 +1687,19 @@ static int pn533_deactivate_target_complete(struct pn533 *dev, void *arg, { int rc = 0; - dev_dbg(&dev->interface->dev, "%s\n", __func__); + dev_dbg(dev->dev, "%s\n", __func__); if (IS_ERR(resp)) { rc = PTR_ERR(resp); - nfc_err(&dev->interface->dev, "Target release error %d\n", rc); + nfc_err(dev->dev, "Target release error %d\n", rc); return rc; } rc = resp->data[0] & PN533_CMD_RET_MASK; if (rc != PN533_CMD_RET_SUCCESS) - nfc_err(&dev->interface->dev, + nfc_err(dev->dev, "Error 0x%x when releasing the target\n", rc); dev_kfree_skb(resp); @@ -2294,10 +1713,10 @@ static void pn533_deactivate_target(struct nfc_dev *nfc_dev, struct sk_buff *skb; int rc; - dev_dbg(&dev->interface->dev, "%s\n", __func__); + dev_dbg(dev->dev, "%s\n", __func__); if (!dev->tgt_active_prot) { - nfc_err(&dev->interface->dev, "There is no active target\n"); + nfc_err(dev->dev, "There is no active target\n"); return; } @@ -2314,10 +1733,8 @@ static void pn533_deactivate_target(struct nfc_dev *nfc_dev, pn533_deactivate_target_complete, NULL); if (rc < 0) { dev_kfree_skb(skb); - nfc_err(&dev->interface->dev, "Target release error %d\n", rc); + nfc_err(dev->dev, "Target release error %d\n", rc); } - - return; } @@ -2336,7 +1753,7 @@ static int pn533_in_dep_link_up_complete(struct pn533 *dev, void *arg, if (dev->tgt_available_prots && !(dev->tgt_available_prots & (1 << NFC_PROTO_NFC_DEP))) { - nfc_err(&dev->interface->dev, + nfc_err(dev->dev, "The target does not support DEP\n"); rc = -EINVAL; goto error; @@ -2346,7 +1763,7 @@ static int pn533_in_dep_link_up_complete(struct pn533 *dev, void *arg, rc = rsp->status & PN533_CMD_RET_MASK; if (rc != PN533_CMD_RET_SUCCESS) { - nfc_err(&dev->interface->dev, + nfc_err(dev->dev, "Bringing DEP link up failed (error 0x%x)\n", rc); goto error; } @@ -2354,7 +1771,7 @@ static int pn533_in_dep_link_up_complete(struct pn533 *dev, void *arg, if (!dev->tgt_available_prots) { struct nfc_target nfc_target; - dev_dbg(&dev->interface->dev, "Creating new target\n"); + dev_dbg(dev->dev, "Creating new target\n"); nfc_target.supported_protocols = NFC_PROTO_NFC_DEP_MASK; nfc_target.nfcid1_len = 10; @@ -2392,16 +1809,16 @@ static int pn533_dep_link_up(struct nfc_dev *nfc_dev, struct nfc_target *target, u8 *next, *arg, nfcid3[NFC_NFCID3_MAXSIZE]; u8 passive_data[PASSIVE_DATA_LEN] = {0x00, 0xff, 0xff, 0x00, 0x3}; - dev_dbg(&dev->interface->dev, "%s\n", __func__); + dev_dbg(dev->dev, "%s\n", __func__); if (dev->poll_mod_count) { - nfc_err(&dev->interface->dev, + nfc_err(dev->dev, "Cannot bring the DEP link up while polling\n"); return -EBUSY; } if (dev->tgt_active_prot) { - nfc_err(&dev->interface->dev, + nfc_err(dev->dev, "There is already an active target\n"); return -EBUSY; } @@ -2472,12 +1889,12 @@ static int pn533_dep_link_down(struct nfc_dev *nfc_dev) { struct pn533 *dev = nfc_get_drvdata(nfc_dev); - dev_dbg(&dev->interface->dev, "%s\n", __func__); + dev_dbg(dev->dev, "%s\n", __func__); pn533_poll_reset_mod_list(dev); if (dev->tgt_mode || dev->tgt_active_prot) - pn533_abort_cmd(dev, GFP_KERNEL); + dev->phy_ops->abort_cmd(dev, GFP_KERNEL); dev->tgt_active_prot = 0; dev->tgt_mode = 0; @@ -2497,7 +1914,7 @@ static struct sk_buff *pn533_build_response(struct pn533 *dev) struct sk_buff *skb, *tmp, *t; unsigned int skb_len = 0, tmp_len = 0; - dev_dbg(&dev->interface->dev, "%s\n", __func__); + dev_dbg(dev->dev, "%s\n", __func__); if (skb_queue_empty(&dev->resp_q)) return NULL; @@ -2510,7 +1927,7 @@ static struct sk_buff *pn533_build_response(struct pn533 *dev) skb_queue_walk_safe(&dev->resp_q, tmp, t) skb_len += tmp->len; - dev_dbg(&dev->interface->dev, "%s total length %d\n", + dev_dbg(dev->dev, "%s total length %d\n", __func__, skb_len); skb = alloc_skb(skb_len, GFP_KERNEL); @@ -2538,7 +1955,7 @@ static int pn533_data_exchange_complete(struct pn533 *dev, void *_arg, int rc = 0; u8 status, ret, mi; - dev_dbg(&dev->interface->dev, "%s\n", __func__); + dev_dbg(dev->dev, "%s\n", __func__); if (IS_ERR(resp)) { rc = PTR_ERR(resp); @@ -2552,7 +1969,7 @@ static int pn533_data_exchange_complete(struct pn533 *dev, void *_arg, skb_pull(resp, sizeof(status)); if (ret != PN533_CMD_RET_SUCCESS) { - nfc_err(&dev->interface->dev, + nfc_err(dev->dev, "Exchanging data failed (error 0x%x)\n", ret); rc = -EIO; goto error; @@ -2593,6 +2010,43 @@ _error: return rc; } +/* + * Receive an incoming pn533 frame. skb contains only header and payload. + * If skb == NULL, it is a notification that the link below is dead. + */ +void pn533_recv_frame(struct pn533 *dev, struct sk_buff *skb, int status) +{ + dev->cmd->status = status; + + if (skb == NULL) { + pr_err("NULL Frame -> link is dead\n"); + goto sched_wq; + } + + if (pn533_rx_frame_is_ack(skb->data)) { + dev_dbg(dev->dev, "%s: Received ACK frame\n", __func__); + dev_kfree_skb(skb); + return; + } + + print_hex_dump_debug("PN533 RX: ", DUMP_PREFIX_NONE, 16, 1, skb->data, + dev->ops->rx_frame_size(skb->data), false); + + if (!dev->ops->rx_is_frame_valid(skb->data, dev)) { + nfc_err(dev->dev, "Received an invalid frame\n"); + dev->cmd->status = -EIO; + } else if (!pn533_rx_frame_is_cmd_response(dev, skb->data)) { + nfc_err(dev->dev, "It it not the response to the last command\n"); + dev->cmd->status = -EIO; + } + + dev->cmd->resp = skb; + +sched_wq: + queue_work(dev->wq, &dev->cmd_complete_work); +} +EXPORT_SYMBOL(pn533_recv_frame); + /* Split the Tx skb into small chunks */ static int pn533_fill_fragment_skbs(struct pn533 *dev, struct sk_buff *skb) { @@ -2648,10 +2102,10 @@ static int pn533_transceive(struct nfc_dev *nfc_dev, struct pn533_data_exchange_arg *arg = NULL; int rc; - dev_dbg(&dev->interface->dev, "%s\n", __func__); + dev_dbg(dev->dev, "%s\n", __func__); if (!dev->tgt_active_prot) { - nfc_err(&dev->interface->dev, + nfc_err(dev->dev, "Can't exchange data if there is no active target\n"); rc = -EINVAL; goto error; @@ -2715,7 +2169,7 @@ static int pn533_tm_send_complete(struct pn533 *dev, void *arg, { u8 status; - dev_dbg(&dev->interface->dev, "%s\n", __func__); + dev_dbg(dev->dev, "%s\n", __func__); if (IS_ERR(resp)) return PTR_ERR(resp); @@ -2747,7 +2201,7 @@ static int pn533_tm_send(struct nfc_dev *nfc_dev, struct sk_buff *skb) struct pn533 *dev = nfc_get_drvdata(nfc_dev); int rc; - dev_dbg(&dev->interface->dev, "%s\n", __func__); + dev_dbg(dev->dev, "%s\n", __func__); /* let's split in multiple chunks if size's too big */ if (skb->len > PN533_CMD_DATAEXCH_DATA_MAXLEN) { @@ -2785,7 +2239,7 @@ static void pn533_wq_mi_recv(struct work_struct *work) struct sk_buff *skb; int rc; - dev_dbg(&dev->interface->dev, "%s\n", __func__); + dev_dbg(dev->dev, "%s\n", __func__); skb = pn533_alloc_skb(dev, PN533_CMD_DATAEXCH_HEAD_LEN); if (!skb) @@ -2817,14 +2271,14 @@ static void pn533_wq_mi_recv(struct work_struct *work) if (rc == 0) /* success */ return; - nfc_err(&dev->interface->dev, + nfc_err(dev->dev, "Error %d when trying to perform data_exchange\n", rc); dev_kfree_skb(skb); kfree(dev->cmd_complete_mi_arg); error: - pn533_send_ack(dev, GFP_KERNEL); + dev->phy_ops->send_ack(dev, GFP_KERNEL); queue_work(dev->wq, &dev->cmd_work); } @@ -2834,7 +2288,7 @@ static void pn533_wq_mi_send(struct work_struct *work) struct sk_buff *skb; int rc; - dev_dbg(&dev->interface->dev, "%s\n", __func__); + dev_dbg(dev->dev, "%s\n", __func__); /* Grab the first skb in the queue */ skb = skb_dequeue(&dev->fragment_skb); @@ -2861,7 +2315,8 @@ static void pn533_wq_mi_send(struct work_struct *work) default: /* Still some fragments? */ - rc = pn533_send_cmd_direct_async(dev,PN533_CMD_IN_DATA_EXCHANGE, + rc = pn533_send_cmd_direct_async(dev, + PN533_CMD_IN_DATA_EXCHANGE, skb, pn533_data_exchange_complete, dev->cmd_complete_dep_arg); @@ -2872,14 +2327,14 @@ static void pn533_wq_mi_send(struct work_struct *work) if (rc == 0) /* success */ return; - nfc_err(&dev->interface->dev, + nfc_err(dev->dev, "Error %d when trying to perform data_exchange\n", rc); dev_kfree_skb(skb); kfree(dev->cmd_complete_dep_arg); error: - pn533_send_ack(dev, GFP_KERNEL); + dev->phy_ops->send_ack(dev, GFP_KERNEL); queue_work(dev->wq, &dev->cmd_work); } @@ -2890,7 +2345,7 @@ static int pn533_set_configuration(struct pn533 *dev, u8 cfgitem, u8 *cfgdata, struct sk_buff *resp; int skb_len; - dev_dbg(&dev->interface->dev, "%s\n", __func__); + dev_dbg(dev->dev, "%s\n", __func__); skb_len = sizeof(cfgitem) + cfgdata_len; /* cfgitem + cfgdata */ @@ -2937,7 +2392,7 @@ static int pn533_pasori_fw_reset(struct pn533 *dev) struct sk_buff *skb; struct sk_buff *resp; - dev_dbg(&dev->interface->dev, "%s\n", __func__); + dev_dbg(dev->dev, "%s\n", __func__); skb = pn533_alloc_skb(dev, sizeof(u8)); if (!skb) @@ -2954,71 +2409,6 @@ static int pn533_pasori_fw_reset(struct pn533 *dev) return 0; } -struct pn533_acr122_poweron_rdr_arg { - int rc; - struct completion done; -}; - -static void pn533_acr122_poweron_rdr_resp(struct urb *urb) -{ - struct pn533_acr122_poweron_rdr_arg *arg = urb->context; - - dev_dbg(&urb->dev->dev, "%s\n", __func__); - - print_hex_dump_debug("ACR122 RX: ", DUMP_PREFIX_NONE, 16, 1, - urb->transfer_buffer, urb->transfer_buffer_length, - false); - - arg->rc = urb->status; - complete(&arg->done); -} - -static int pn533_acr122_poweron_rdr(struct pn533 *dev) -{ - /* Power on th reader (CCID cmd) */ - u8 cmd[10] = {PN533_ACR122_PC_TO_RDR_ICCPOWERON, - 0, 0, 0, 0, 0, 0, 3, 0, 0}; - u8 buf[255]; - int rc; - void *cntx; - struct pn533_acr122_poweron_rdr_arg arg; - - dev_dbg(&dev->interface->dev, "%s\n", __func__); - - init_completion(&arg.done); - cntx = dev->in_urb->context; /* backup context */ - - dev->in_urb->transfer_buffer = buf; - dev->in_urb->transfer_buffer_length = 255; - dev->in_urb->complete = pn533_acr122_poweron_rdr_resp; - dev->in_urb->context = &arg; - - dev->out_urb->transfer_buffer = cmd; - dev->out_urb->transfer_buffer_length = sizeof(cmd); - - print_hex_dump_debug("ACR122 TX: ", DUMP_PREFIX_NONE, 16, 1, - cmd, sizeof(cmd), false); - - rc = usb_submit_urb(dev->out_urb, GFP_KERNEL); - if (rc) { - nfc_err(&dev->interface->dev, - "Reader power on cmd error %d\n", rc); - return rc; - } - - rc = usb_submit_urb(dev->in_urb, GFP_KERNEL); - if (rc) { - nfc_err(&dev->interface->dev, - "Can't submit reader poweron cmd response %d\n", rc); - return rc; - } - - wait_for_completion(&arg.done); - dev->in_urb->context = cntx; /* restore context */ - - return arg.rc; -} - static int pn533_rf_field(struct nfc_dev *nfc_dev, u8 rf) { struct pn533 *dev = nfc_get_drvdata(nfc_dev); @@ -3030,7 +2420,7 @@ static int pn533_rf_field(struct nfc_dev *nfc_dev, u8 rf) rc = pn533_set_configuration(dev, PN533_CFGITEM_RF_FIELD, (u8 *)&rf_field, 1); if (rc) { - nfc_err(&dev->interface->dev, "Error on setting RF field\n"); + nfc_err(dev->dev, "Error on setting RF field\n"); return rc; } @@ -3083,7 +2473,7 @@ static int pn533_setup(struct pn533 *dev) break; default: - nfc_err(&dev->interface->dev, "Unknown device type %d\n", + nfc_err(dev->dev, "Unknown device type %d\n", dev->device_type); return -EINVAL; } @@ -3091,7 +2481,7 @@ static int pn533_setup(struct pn533 *dev) rc = pn533_set_configuration(dev, PN533_CFGITEM_MAX_RETRIES, (u8 *)&max_retries, sizeof(max_retries)); if (rc) { - nfc_err(&dev->interface->dev, + nfc_err(dev->dev, "Error on setting MAX_RETRIES config\n"); return rc; } @@ -3100,7 +2490,7 @@ static int pn533_setup(struct pn533 *dev) rc = pn533_set_configuration(dev, PN533_CFGITEM_TIMING, (u8 *)&timing, sizeof(timing)); if (rc) { - nfc_err(&dev->interface->dev, "Error on setting RF timings\n"); + nfc_err(dev->dev, "Error on setting RF timings\n"); return rc; } @@ -3114,7 +2504,7 @@ static int pn533_setup(struct pn533 *dev) rc = pn533_set_configuration(dev, PN533_CFGITEM_PASORI, pasori_cfg, 3); if (rc) { - nfc_err(&dev->interface->dev, + nfc_err(dev->dev, "Error while settings PASORI config\n"); return rc; } @@ -3127,208 +2517,128 @@ static int pn533_setup(struct pn533 *dev) return 0; } -static int pn533_probe(struct usb_interface *interface, - const struct usb_device_id *id) +struct pn533 *pn533_register_device(u32 device_type, + u32 protocols, + enum pn533_protocol_type protocol_type, + void *phy, + struct pn533_phy_ops *phy_ops, + struct pn533_frame_ops *fops, + struct device *dev) { struct pn533_fw_version fw_ver; - struct pn533 *dev; - struct usb_host_interface *iface_desc; - struct usb_endpoint_descriptor *endpoint; - int in_endpoint = 0; - int out_endpoint = 0; + struct pn533 *priv; int rc = -ENOMEM; - int i; - u32 protocols; - dev = kzalloc(sizeof(*dev), GFP_KERNEL); - if (!dev) - return -ENOMEM; + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) + return ERR_PTR(-ENOMEM); - dev->udev = usb_get_dev(interface_to_usbdev(interface)); - dev->interface = interface; - mutex_init(&dev->cmd_lock); + priv->phy = phy; + priv->phy_ops = phy_ops; + priv->dev = dev; + if (fops != NULL) + priv->ops = fops; + else + priv->ops = &pn533_std_frame_ops; - iface_desc = interface->cur_altsetting; - for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) { - endpoint = &iface_desc->endpoint[i].desc; + priv->protocol_type = protocol_type; + priv->device_type = device_type; - if (!in_endpoint && usb_endpoint_is_bulk_in(endpoint)) - in_endpoint = endpoint->bEndpointAddress; + mutex_init(&priv->cmd_lock); - if (!out_endpoint && usb_endpoint_is_bulk_out(endpoint)) - out_endpoint = endpoint->bEndpointAddress; - } - - if (!in_endpoint || !out_endpoint) { - nfc_err(&interface->dev, - "Could not find bulk-in or bulk-out endpoint\n"); - rc = -ENODEV; - goto error; - } - - dev->in_urb = usb_alloc_urb(0, GFP_KERNEL); - dev->out_urb = usb_alloc_urb(0, GFP_KERNEL); - - if (!dev->in_urb || !dev->out_urb) + INIT_WORK(&priv->cmd_work, pn533_wq_cmd); + INIT_WORK(&priv->cmd_complete_work, pn533_wq_cmd_complete); + INIT_WORK(&priv->mi_rx_work, pn533_wq_mi_recv); + INIT_WORK(&priv->mi_tx_work, pn533_wq_mi_send); + INIT_WORK(&priv->tg_work, pn533_wq_tg_get_data); + INIT_WORK(&priv->mi_tm_rx_work, pn533_wq_tm_mi_recv); + INIT_WORK(&priv->mi_tm_tx_work, pn533_wq_tm_mi_send); + INIT_DELAYED_WORK(&priv->poll_work, pn533_wq_poll); + INIT_WORK(&priv->rf_work, pn533_wq_rf); + priv->wq = alloc_ordered_workqueue("pn533", 0); + if (priv->wq == NULL) goto error; - usb_fill_bulk_urb(dev->in_urb, dev->udev, - usb_rcvbulkpipe(dev->udev, in_endpoint), - NULL, 0, NULL, dev); - usb_fill_bulk_urb(dev->out_urb, dev->udev, - usb_sndbulkpipe(dev->udev, out_endpoint), - NULL, 0, pn533_send_complete, dev); + init_timer(&priv->listen_timer); + priv->listen_timer.data = (unsigned long) priv; + priv->listen_timer.function = pn533_listen_mode_timer; - INIT_WORK(&dev->cmd_work, pn533_wq_cmd); - INIT_WORK(&dev->cmd_complete_work, pn533_wq_cmd_complete); - INIT_WORK(&dev->mi_rx_work, pn533_wq_mi_recv); - INIT_WORK(&dev->mi_tx_work, pn533_wq_mi_send); - INIT_WORK(&dev->tg_work, pn533_wq_tg_get_data); - INIT_WORK(&dev->mi_tm_rx_work, pn533_wq_tm_mi_recv); - INIT_WORK(&dev->mi_tm_tx_work, pn533_wq_tm_mi_send); - INIT_DELAYED_WORK(&dev->poll_work, pn533_wq_poll); - INIT_WORK(&dev->rf_work, pn533_wq_rf); - dev->wq = alloc_ordered_workqueue("pn533", 0); - if (dev->wq == NULL) - goto error; + skb_queue_head_init(&priv->resp_q); + skb_queue_head_init(&priv->fragment_skb); - init_timer(&dev->listen_timer); - dev->listen_timer.data = (unsigned long) dev; - dev->listen_timer.function = pn533_listen_mode_timer; - - skb_queue_head_init(&dev->resp_q); - skb_queue_head_init(&dev->fragment_skb); - - INIT_LIST_HEAD(&dev->cmd_queue); - - usb_set_intfdata(interface, dev); - - dev->ops = &pn533_std_frame_ops; - - dev->protocol_type = PN533_PROTO_REQ_ACK_RESP; - dev->device_type = id->driver_info; - switch (dev->device_type) { - case PN533_DEVICE_STD: - protocols = PN533_ALL_PROTOCOLS; - break; - - case PN533_DEVICE_PASORI: - protocols = PN533_NO_TYPE_B_PROTOCOLS; - break; - - case PN533_DEVICE_ACR122U: - protocols = PN533_NO_TYPE_B_PROTOCOLS; - dev->ops = &pn533_acr122_frame_ops; - dev->protocol_type = PN533_PROTO_REQ_RESP, - - rc = pn533_acr122_poweron_rdr(dev); - if (rc < 0) { - nfc_err(&dev->interface->dev, - "Couldn't poweron the reader (error %d)\n", rc); - goto destroy_wq; - } - break; - - default: - nfc_err(&dev->interface->dev, "Unknown device type %d\n", - dev->device_type); - rc = -EINVAL; - goto destroy_wq; - } + INIT_LIST_HEAD(&priv->cmd_queue); memset(&fw_ver, 0, sizeof(fw_ver)); - rc = pn533_get_firmware_version(dev, &fw_ver); + rc = pn533_get_firmware_version(priv, &fw_ver); if (rc < 0) goto destroy_wq; - nfc_info(&dev->interface->dev, - "NXP PN5%02X firmware ver %d.%d now attached\n", + nfc_info(dev, "NXP PN5%02X firmware ver %d.%d now attached\n", fw_ver.ic, fw_ver.ver, fw_ver.rev); - dev->nfc_dev = nfc_allocate_device(&pn533_nfc_ops, protocols, - dev->ops->tx_header_len + + priv->nfc_dev = nfc_allocate_device(&pn533_nfc_ops, protocols, + priv->ops->tx_header_len + PN533_CMD_DATAEXCH_HEAD_LEN, - dev->ops->tx_tail_len); - if (!dev->nfc_dev) { + priv->ops->tx_tail_len); + if (!priv->nfc_dev) { rc = -ENOMEM; goto destroy_wq; } - nfc_set_parent_dev(dev->nfc_dev, &interface->dev); - nfc_set_drvdata(dev->nfc_dev, dev); + nfc_set_drvdata(priv->nfc_dev, priv); - rc = nfc_register_device(dev->nfc_dev); + rc = nfc_register_device(priv->nfc_dev); if (rc) goto free_nfc_dev; - rc = pn533_setup(dev); + rc = pn533_setup(priv); if (rc) goto unregister_nfc_dev; - return 0; + return priv; unregister_nfc_dev: - nfc_unregister_device(dev->nfc_dev); + nfc_unregister_device(priv->nfc_dev); free_nfc_dev: - nfc_free_device(dev->nfc_dev); + nfc_free_device(priv->nfc_dev); destroy_wq: - destroy_workqueue(dev->wq); + destroy_workqueue(priv->wq); error: - usb_free_urb(dev->in_urb); - usb_free_urb(dev->out_urb); - usb_put_dev(dev->udev); - kfree(dev); - return rc; + kfree(priv); + return ERR_PTR(rc); } +EXPORT_SYMBOL_GPL(pn533_register_device); -static void pn533_disconnect(struct usb_interface *interface) +void pn533_unregister_device(struct pn533 *priv) { - struct pn533 *dev; struct pn533_cmd *cmd, *n; - dev = usb_get_intfdata(interface); - usb_set_intfdata(interface, NULL); + nfc_unregister_device(priv->nfc_dev); + nfc_free_device(priv->nfc_dev); - nfc_unregister_device(dev->nfc_dev); - nfc_free_device(dev->nfc_dev); + flush_delayed_work(&priv->poll_work); + destroy_workqueue(priv->wq); - usb_kill_urb(dev->in_urb); - usb_kill_urb(dev->out_urb); + skb_queue_purge(&priv->resp_q); - flush_delayed_work(&dev->poll_work); - destroy_workqueue(dev->wq); + del_timer(&priv->listen_timer); - skb_queue_purge(&dev->resp_q); - - del_timer(&dev->listen_timer); - - list_for_each_entry_safe(cmd, n, &dev->cmd_queue, queue) { + list_for_each_entry_safe(cmd, n, &priv->cmd_queue, queue) { list_del(&cmd->queue); kfree(cmd); } - usb_free_urb(dev->in_urb); - usb_free_urb(dev->out_urb); - kfree(dev); - - nfc_info(&interface->dev, "NXP PN533 NFC device disconnected\n"); + kfree(priv); } +EXPORT_SYMBOL_GPL(pn533_unregister_device); -static struct usb_driver pn533_driver = { - .name = "pn533", - .probe = pn533_probe, - .disconnect = pn533_disconnect, - .id_table = pn533_table, -}; - -module_usb_driver(pn533_driver); MODULE_AUTHOR("Lauro Ramos Venancio "); MODULE_AUTHOR("Aloisio Almeida Jr "); MODULE_AUTHOR("Waldemar Rymarkiewicz "); -MODULE_DESCRIPTION("PN533 usb driver ver " VERSION); +MODULE_DESCRIPTION("PN533 driver ver " VERSION); MODULE_VERSION(VERSION); MODULE_LICENSE("GPL"); diff --git a/drivers/nfc/pn533/pn533.h b/drivers/nfc/pn533/pn533.h new file mode 100644 index 000000000000..1d9f19eb2a99 --- /dev/null +++ b/drivers/nfc/pn533/pn533.h @@ -0,0 +1,235 @@ +/* + * Driver for NXP PN533 NFC Chip + * + * Copyright (C) 2011 Instituto Nokia de Tecnologia + * Copyright (C) 2012-2013 Tieto Poland + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#define PN533_DEVICE_STD 0x1 +#define PN533_DEVICE_PASORI 0x2 +#define PN533_DEVICE_ACR122U 0x3 + +#define PN533_ALL_PROTOCOLS (NFC_PROTO_JEWEL_MASK | NFC_PROTO_MIFARE_MASK |\ + NFC_PROTO_FELICA_MASK | NFC_PROTO_ISO14443_MASK |\ + NFC_PROTO_NFC_DEP_MASK |\ + NFC_PROTO_ISO14443_B_MASK) + +#define PN533_NO_TYPE_B_PROTOCOLS (NFC_PROTO_JEWEL_MASK | \ + NFC_PROTO_MIFARE_MASK | \ + NFC_PROTO_FELICA_MASK | \ + NFC_PROTO_ISO14443_MASK | \ + NFC_PROTO_NFC_DEP_MASK) + +/* Standard pn533 frame definitions (standard and extended)*/ +#define PN533_STD_FRAME_HEADER_LEN (sizeof(struct pn533_std_frame) \ + + 2) /* data[0] TFI, data[1] CC */ +#define PN533_STD_FRAME_TAIL_LEN 2 /* data[len] DCS, data[len + 1] postamble*/ + +#define PN533_EXT_FRAME_HEADER_LEN (sizeof(struct pn533_ext_frame) \ + + 2) /* data[0] TFI, data[1] CC */ + +#define PN533_CMD_DATAEXCH_HEAD_LEN 1 +#define PN533_CMD_DATAEXCH_DATA_MAXLEN 262 +#define PN533_CMD_DATAFRAME_MAXLEN 240 /* max data length (send) */ + +/* + * Max extended frame payload len, excluding TFI and CC + * which are already in PN533_FRAME_HEADER_LEN. + */ +#define PN533_STD_FRAME_MAX_PAYLOAD_LEN 263 + + +/* Preamble (1), SoPC (2), ACK Code (2), Postamble (1) */ +#define PN533_STD_FRAME_ACK_SIZE 6 +#define PN533_STD_FRAME_CHECKSUM(f) (f->data[f->datalen]) +#define PN533_STD_FRAME_POSTAMBLE(f) (f->data[f->datalen + 1]) +/* Half start code (3), LEN (4) should be 0xffff for extended frame */ +#define PN533_STD_IS_EXTENDED(hdr) ((hdr)->datalen == 0xFF \ + && (hdr)->datalen_checksum == 0xFF) +#define PN533_EXT_FRAME_CHECKSUM(f) (f->data[be16_to_cpu(f->datalen)]) + +/* start of frame */ +#define PN533_STD_FRAME_SOF 0x00FF + +/* standard frame identifier: in/out/error */ +#define PN533_STD_FRAME_IDENTIFIER(f) (f->data[0]) /* TFI */ +#define PN533_STD_FRAME_DIR_OUT 0xD4 +#define PN533_STD_FRAME_DIR_IN 0xD5 + +/* PN533 Commands */ +#define PN533_FRAME_CMD(f) (f->data[1]) + +#define PN533_CMD_GET_FIRMWARE_VERSION 0x02 +#define PN533_CMD_RF_CONFIGURATION 0x32 +#define PN533_CMD_IN_DATA_EXCHANGE 0x40 +#define PN533_CMD_IN_COMM_THRU 0x42 +#define PN533_CMD_IN_LIST_PASSIVE_TARGET 0x4A +#define PN533_CMD_IN_ATR 0x50 +#define PN533_CMD_IN_RELEASE 0x52 +#define PN533_CMD_IN_JUMP_FOR_DEP 0x56 + +#define PN533_CMD_TG_INIT_AS_TARGET 0x8c +#define PN533_CMD_TG_GET_DATA 0x86 +#define PN533_CMD_TG_SET_DATA 0x8e +#define PN533_CMD_TG_SET_META_DATA 0x94 +#define PN533_CMD_UNDEF 0xff + +#define PN533_CMD_RESPONSE(cmd) (cmd + 1) + +/* PN533 Return codes */ +#define PN533_CMD_RET_MASK 0x3F +#define PN533_CMD_MI_MASK 0x40 +#define PN533_CMD_RET_SUCCESS 0x00 + + +enum pn533_protocol_type { + PN533_PROTO_REQ_ACK_RESP = 0, + PN533_PROTO_REQ_RESP +}; + +/* Poll modulations */ +enum { + PN533_POLL_MOD_106KBPS_A, + PN533_POLL_MOD_212KBPS_FELICA, + PN533_POLL_MOD_424KBPS_FELICA, + PN533_POLL_MOD_106KBPS_JEWEL, + PN533_POLL_MOD_847KBPS_B, + PN533_LISTEN_MOD, + + __PN533_POLL_MOD_AFTER_LAST, +}; +#define PN533_POLL_MOD_MAX (__PN533_POLL_MOD_AFTER_LAST - 1) + +struct pn533_std_frame { + u8 preamble; + __be16 start_frame; + u8 datalen; + u8 datalen_checksum; + u8 data[]; +} __packed; + +struct pn533_ext_frame { /* Extended Information frame */ + u8 preamble; + __be16 start_frame; + __be16 eif_flag; /* fixed to 0xFFFF */ + __be16 datalen; + u8 datalen_checksum; + u8 data[]; +} __packed; + +struct pn533 { + struct nfc_dev *nfc_dev; + u32 device_type; + enum pn533_protocol_type protocol_type; + + struct sk_buff_head resp_q; + struct sk_buff_head fragment_skb; + + struct workqueue_struct *wq; + struct work_struct cmd_work; + struct work_struct cmd_complete_work; + struct delayed_work poll_work; + struct work_struct mi_rx_work; + struct work_struct mi_tx_work; + struct work_struct mi_tm_rx_work; + struct work_struct mi_tm_tx_work; + struct work_struct tg_work; + struct work_struct rf_work; + + struct list_head cmd_queue; + struct pn533_cmd *cmd; + u8 cmd_pending; + struct mutex cmd_lock; /* protects cmd queue */ + + void *cmd_complete_mi_arg; + void *cmd_complete_dep_arg; + + struct pn533_poll_modulations *poll_mod_active[PN533_POLL_MOD_MAX + 1]; + u8 poll_mod_count; + u8 poll_mod_curr; + u8 poll_dep; + u32 poll_protocols; + u32 listen_protocols; + struct timer_list listen_timer; + int cancel_listen; + + u8 *gb; + size_t gb_len; + + u8 tgt_available_prots; + u8 tgt_active_prot; + u8 tgt_mode; + + struct pn533_frame_ops *ops; + + struct device *dev; + void *phy; + struct pn533_phy_ops *phy_ops; +}; + +typedef int (*pn533_send_async_complete_t) (struct pn533 *dev, void *arg, + struct sk_buff *resp); + +struct pn533_cmd { + struct list_head queue; + u8 code; + int status; + struct sk_buff *req; + struct sk_buff *resp; + pn533_send_async_complete_t complete_cb; + void *complete_cb_context; +}; + + +struct pn533_frame_ops { + void (*tx_frame_init)(void *frame, u8 cmd_code); + void (*tx_frame_finish)(void *frame); + void (*tx_update_payload_len)(void *frame, int len); + int tx_header_len; + int tx_tail_len; + + bool (*rx_is_frame_valid)(void *frame, struct pn533 *dev); + bool (*rx_frame_is_ack)(void *frame); + int (*rx_frame_size)(void *frame); + int rx_header_len; + int rx_tail_len; + + int max_payload_len; + u8 (*get_cmd_code)(void *frame); +}; + + +struct pn533_phy_ops { + int (*send_frame)(struct pn533 *priv, + struct sk_buff *out); + int (*send_ack)(struct pn533 *dev, gfp_t flags); + void (*abort_cmd)(struct pn533 *priv, gfp_t flags); +}; + + +struct pn533 *pn533_register_device(u32 device_type, + u32 protocols, + enum pn533_protocol_type protocol_type, + void *phy, + struct pn533_phy_ops *phy_ops, + struct pn533_frame_ops *fops, + struct device *dev); + +void pn533_unregister_device(struct pn533 *priv); +void pn533_recv_frame(struct pn533 *dev, struct sk_buff *skb, int status); + +bool pn533_rx_frame_is_cmd_response(struct pn533 *dev, void *frame); +bool pn533_rx_frame_is_ack(void *_frame); diff --git a/drivers/nfc/pn533/usb.c b/drivers/nfc/pn533/usb.c new file mode 100644 index 000000000000..4f73cbf8ccef --- /dev/null +++ b/drivers/nfc/pn533/usb.c @@ -0,0 +1,598 @@ +/* + * Driver for NXP PN533 NFC Chip - USB transport layer + * + * Copyright (C) 2011 Instituto Nokia de Tecnologia + * Copyright (C) 2012-2013 Tieto Poland + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "pn533.h" + +#define VERSION "0.1" + +#define PN533_VENDOR_ID 0x4CC +#define PN533_PRODUCT_ID 0x2533 + +#define SCM_VENDOR_ID 0x4E6 +#define SCL3711_PRODUCT_ID 0x5591 + +#define SONY_VENDOR_ID 0x054c +#define PASORI_PRODUCT_ID 0x02e1 + +#define ACS_VENDOR_ID 0x072f +#define ACR122U_PRODUCT_ID 0x2200 + +static const struct usb_device_id pn533_usb_table[] = { + { USB_DEVICE(PN533_VENDOR_ID, PN533_PRODUCT_ID), + .driver_info = PN533_DEVICE_STD }, + { USB_DEVICE(SCM_VENDOR_ID, SCL3711_PRODUCT_ID), + .driver_info = PN533_DEVICE_STD }, + { USB_DEVICE(SONY_VENDOR_ID, PASORI_PRODUCT_ID), + .driver_info = PN533_DEVICE_PASORI }, + { USB_DEVICE(ACS_VENDOR_ID, ACR122U_PRODUCT_ID), + .driver_info = PN533_DEVICE_ACR122U }, + { } +}; +MODULE_DEVICE_TABLE(usb, pn533_usb_table); + +struct pn533_usb_phy { + struct usb_device *udev; + struct usb_interface *interface; + + struct urb *out_urb; + struct urb *in_urb; + + struct pn533 *priv; +}; + +static void pn533_recv_response(struct urb *urb) +{ + struct pn533_usb_phy *phy = urb->context; + struct sk_buff *skb = NULL; + + if (!urb->status) { + skb = alloc_skb(urb->actual_length, GFP_KERNEL); + if (!skb) { + nfc_err(&phy->udev->dev, "failed to alloc memory\n"); + } else { + memcpy(skb_put(skb, urb->actual_length), + urb->transfer_buffer, urb->actual_length); + } + } + + pn533_recv_frame(phy->priv, skb, urb->status); +} + +static int pn533_submit_urb_for_response(struct pn533_usb_phy *phy, gfp_t flags) +{ + phy->in_urb->complete = pn533_recv_response; + + return usb_submit_urb(phy->in_urb, flags); +} + +static void pn533_recv_ack(struct urb *urb) +{ + struct pn533_usb_phy *phy = urb->context; + struct pn533 *priv = phy->priv; + struct pn533_cmd *cmd = priv->cmd; + struct pn533_std_frame *in_frame; + int rc; + + cmd->status = urb->status; + + switch (urb->status) { + case 0: + break; /* success */ + case -ECONNRESET: + case -ENOENT: + dev_dbg(&phy->udev->dev, + "The urb has been stopped (status %d)\n", + urb->status); + goto sched_wq; + case -ESHUTDOWN: + default: + nfc_err(&phy->udev->dev, + "Urb failure (status %d)\n", urb->status); + goto sched_wq; + } + + in_frame = phy->in_urb->transfer_buffer; + + if (!pn533_rx_frame_is_ack(in_frame)) { + nfc_err(&phy->udev->dev, "Received an invalid ack\n"); + cmd->status = -EIO; + goto sched_wq; + } + + rc = pn533_submit_urb_for_response(phy, GFP_ATOMIC); + if (rc) { + nfc_err(&phy->udev->dev, + "usb_submit_urb failed with result %d\n", rc); + cmd->status = rc; + goto sched_wq; + } + + return; + +sched_wq: + queue_work(priv->wq, &priv->cmd_complete_work); +} + +static int pn533_submit_urb_for_ack(struct pn533_usb_phy *phy, gfp_t flags) +{ + phy->in_urb->complete = pn533_recv_ack; + + return usb_submit_urb(phy->in_urb, flags); +} + +static int pn533_usb_send_ack(struct pn533 *dev, gfp_t flags) +{ + struct pn533_usb_phy *phy = dev->phy; + u8 ack[6] = {0x00, 0x00, 0xff, 0x00, 0xff, 0x00}; + /* spec 7.1.1.3: Preamble, SoPC (2), ACK Code (2), Postamble */ + int rc; + + phy->out_urb->transfer_buffer = ack; + phy->out_urb->transfer_buffer_length = sizeof(ack); + rc = usb_submit_urb(phy->out_urb, flags); + + return rc; +} + +static int pn533_usb_send_frame(struct pn533 *dev, + struct sk_buff *out) +{ + struct pn533_usb_phy *phy = dev->phy; + int rc; + + if (phy->priv == NULL) + phy->priv = dev; + + phy->out_urb->transfer_buffer = out->data; + phy->out_urb->transfer_buffer_length = out->len; + + print_hex_dump_debug("PN533 TX: ", DUMP_PREFIX_NONE, 16, 1, + out->data, out->len, false); + + rc = usb_submit_urb(phy->out_urb, GFP_KERNEL); + if (rc) + return rc; + + if (dev->protocol_type == PN533_PROTO_REQ_RESP) { + /* request for response for sent packet directly */ + rc = pn533_submit_urb_for_response(phy, GFP_ATOMIC); + if (rc) + goto error; + } else if (dev->protocol_type == PN533_PROTO_REQ_ACK_RESP) { + /* request for ACK if that's the case */ + rc = pn533_submit_urb_for_ack(phy, GFP_KERNEL); + if (rc) + goto error; + } + + return 0; + +error: + usb_unlink_urb(phy->out_urb); + return rc; +} + +static void pn533_usb_abort_cmd(struct pn533 *dev, gfp_t flags) +{ + struct pn533_usb_phy *phy = dev->phy; + + /* ACR122U does not support any command which aborts last + * issued command i.e. as ACK for standard PN533. Additionally, + * it behaves stange, sending broken or incorrect responses, + * when we cancel urb before the chip will send response. + */ + if (dev->device_type == PN533_DEVICE_ACR122U) + return; + + /* An ack will cancel the last issued command */ + pn533_usb_send_ack(dev, flags); + + /* cancel the urb request */ + usb_kill_urb(phy->in_urb); +} + +/* ACR122 specific structs and fucntions */ + +/* ACS ACR122 pn533 frame definitions */ +#define PN533_ACR122_TX_FRAME_HEADER_LEN (sizeof(struct pn533_acr122_tx_frame) \ + + 2) +#define PN533_ACR122_TX_FRAME_TAIL_LEN 0 +#define PN533_ACR122_RX_FRAME_HEADER_LEN (sizeof(struct pn533_acr122_rx_frame) \ + + 2) +#define PN533_ACR122_RX_FRAME_TAIL_LEN 2 +#define PN533_ACR122_FRAME_MAX_PAYLOAD_LEN PN533_STD_FRAME_MAX_PAYLOAD_LEN + +/* CCID messages types */ +#define PN533_ACR122_PC_TO_RDR_ICCPOWERON 0x62 +#define PN533_ACR122_PC_TO_RDR_ESCAPE 0x6B + +#define PN533_ACR122_RDR_TO_PC_ESCAPE 0x83 + + +struct pn533_acr122_ccid_hdr { + u8 type; + u32 datalen; + u8 slot; + u8 seq; + + /* + * 3 msg specific bytes or status, error and 1 specific + * byte for reposnse msg + */ + u8 params[3]; + u8 data[]; /* payload */ +} __packed; + +struct pn533_acr122_apdu_hdr { + u8 class; + u8 ins; + u8 p1; + u8 p2; +} __packed; + +struct pn533_acr122_tx_frame { + struct pn533_acr122_ccid_hdr ccid; + struct pn533_acr122_apdu_hdr apdu; + u8 datalen; + u8 data[]; /* pn533 frame: TFI ... */ +} __packed; + +struct pn533_acr122_rx_frame { + struct pn533_acr122_ccid_hdr ccid; + u8 data[]; /* pn533 frame : TFI ... */ +} __packed; + +static void pn533_acr122_tx_frame_init(void *_frame, u8 cmd_code) +{ + struct pn533_acr122_tx_frame *frame = _frame; + + frame->ccid.type = PN533_ACR122_PC_TO_RDR_ESCAPE; + /* sizeof(apdu_hdr) + sizeof(datalen) */ + frame->ccid.datalen = sizeof(frame->apdu) + 1; + frame->ccid.slot = 0; + frame->ccid.seq = 0; + frame->ccid.params[0] = 0; + frame->ccid.params[1] = 0; + frame->ccid.params[2] = 0; + + frame->data[0] = PN533_STD_FRAME_DIR_OUT; + frame->data[1] = cmd_code; + frame->datalen = 2; /* data[0] + data[1] */ + + frame->apdu.class = 0xFF; + frame->apdu.ins = 0; + frame->apdu.p1 = 0; + frame->apdu.p2 = 0; +} + +static void pn533_acr122_tx_frame_finish(void *_frame) +{ + struct pn533_acr122_tx_frame *frame = _frame; + + frame->ccid.datalen += frame->datalen; +} + +static void pn533_acr122_tx_update_payload_len(void *_frame, int len) +{ + struct pn533_acr122_tx_frame *frame = _frame; + + frame->datalen += len; +} + +static bool pn533_acr122_is_rx_frame_valid(void *_frame, struct pn533 *dev) +{ + struct pn533_acr122_rx_frame *frame = _frame; + + if (frame->ccid.type != 0x83) + return false; + + if (!frame->ccid.datalen) + return false; + + if (frame->data[frame->ccid.datalen - 2] == 0x63) + return false; + + return true; +} + +static int pn533_acr122_rx_frame_size(void *frame) +{ + struct pn533_acr122_rx_frame *f = frame; + + /* f->ccid.datalen already includes tail length */ + return sizeof(struct pn533_acr122_rx_frame) + f->ccid.datalen; +} + +static u8 pn533_acr122_get_cmd_code(void *frame) +{ + struct pn533_acr122_rx_frame *f = frame; + + return PN533_FRAME_CMD(f); +} + +static struct pn533_frame_ops pn533_acr122_frame_ops = { + .tx_frame_init = pn533_acr122_tx_frame_init, + .tx_frame_finish = pn533_acr122_tx_frame_finish, + .tx_update_payload_len = pn533_acr122_tx_update_payload_len, + .tx_header_len = PN533_ACR122_TX_FRAME_HEADER_LEN, + .tx_tail_len = PN533_ACR122_TX_FRAME_TAIL_LEN, + + .rx_is_frame_valid = pn533_acr122_is_rx_frame_valid, + .rx_header_len = PN533_ACR122_RX_FRAME_HEADER_LEN, + .rx_tail_len = PN533_ACR122_RX_FRAME_TAIL_LEN, + .rx_frame_size = pn533_acr122_rx_frame_size, + + .max_payload_len = PN533_ACR122_FRAME_MAX_PAYLOAD_LEN, + .get_cmd_code = pn533_acr122_get_cmd_code, +}; + +struct pn533_acr122_poweron_rdr_arg { + int rc; + struct completion done; +}; + +static void pn533_acr122_poweron_rdr_resp(struct urb *urb) +{ + struct pn533_acr122_poweron_rdr_arg *arg = urb->context; + + dev_dbg(&urb->dev->dev, "%s\n", __func__); + + print_hex_dump_debug("ACR122 RX: ", DUMP_PREFIX_NONE, 16, 1, + urb->transfer_buffer, urb->transfer_buffer_length, + false); + + arg->rc = urb->status; + complete(&arg->done); +} + +static int pn533_acr122_poweron_rdr(struct pn533_usb_phy *phy) +{ + /* Power on th reader (CCID cmd) */ + u8 cmd[10] = {PN533_ACR122_PC_TO_RDR_ICCPOWERON, + 0, 0, 0, 0, 0, 0, 3, 0, 0}; + int rc; + void *cntx; + struct pn533_acr122_poweron_rdr_arg arg; + + dev_dbg(&phy->udev->dev, "%s\n", __func__); + + init_completion(&arg.done); + cntx = phy->in_urb->context; /* backup context */ + + phy->in_urb->complete = pn533_acr122_poweron_rdr_resp; + phy->in_urb->context = &arg; + + phy->out_urb->transfer_buffer = cmd; + phy->out_urb->transfer_buffer_length = sizeof(cmd); + + print_hex_dump_debug("ACR122 TX: ", DUMP_PREFIX_NONE, 16, 1, + cmd, sizeof(cmd), false); + + rc = usb_submit_urb(phy->out_urb, GFP_KERNEL); + if (rc) { + nfc_err(&phy->udev->dev, + "Reader power on cmd error %d\n", rc); + return rc; + } + + rc = usb_submit_urb(phy->in_urb, GFP_KERNEL); + if (rc) { + nfc_err(&phy->udev->dev, + "Can't submit reader poweron cmd response %d\n", rc); + return rc; + } + + wait_for_completion(&arg.done); + phy->in_urb->context = cntx; /* restore context */ + + return arg.rc; +} + +static void pn533_send_complete(struct urb *urb) +{ + struct pn533_usb_phy *phy = urb->context; + + switch (urb->status) { + case 0: + break; /* success */ + case -ECONNRESET: + case -ENOENT: + dev_dbg(&phy->udev->dev, + "The urb has been stopped (status %d)\n", + urb->status); + break; + case -ESHUTDOWN: + default: + nfc_err(&phy->udev->dev, + "Urb failure (status %d)\n", + urb->status); + } +} + +static struct pn533_phy_ops usb_phy_ops = { + .send_frame = pn533_usb_send_frame, + .send_ack = pn533_usb_send_ack, + .abort_cmd = pn533_usb_abort_cmd, +}; + +static int pn533_usb_probe(struct usb_interface *interface, + const struct usb_device_id *id) +{ + struct pn533 *priv; + struct pn533_usb_phy *phy; + struct usb_host_interface *iface_desc; + struct usb_endpoint_descriptor *endpoint; + int in_endpoint = 0; + int out_endpoint = 0; + int rc = -ENOMEM; + int i; + u32 protocols; + enum pn533_protocol_type protocol_type = PN533_PROTO_REQ_ACK_RESP; + struct pn533_frame_ops *fops = NULL; + unsigned char *in_buf; + int in_buf_len = PN533_EXT_FRAME_HEADER_LEN + + PN533_STD_FRAME_MAX_PAYLOAD_LEN + + PN533_STD_FRAME_TAIL_LEN; + + phy = devm_kzalloc(&interface->dev, sizeof(*phy), GFP_KERNEL); + if (!phy) + return -ENOMEM; + + in_buf = kzalloc(in_buf_len, GFP_KERNEL); + if (!in_buf) { + rc = -ENOMEM; + goto out_free_phy; + } + + phy->udev = usb_get_dev(interface_to_usbdev(interface)); + phy->interface = interface; + + iface_desc = interface->cur_altsetting; + for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) { + endpoint = &iface_desc->endpoint[i].desc; + + if (!in_endpoint && usb_endpoint_is_bulk_in(endpoint)) + in_endpoint = endpoint->bEndpointAddress; + + if (!out_endpoint && usb_endpoint_is_bulk_out(endpoint)) + out_endpoint = endpoint->bEndpointAddress; + } + + if (!in_endpoint || !out_endpoint) { + nfc_err(&interface->dev, + "Could not find bulk-in or bulk-out endpoint\n"); + rc = -ENODEV; + goto error; + } + + phy->in_urb = usb_alloc_urb(0, GFP_KERNEL); + phy->out_urb = usb_alloc_urb(0, GFP_KERNEL); + + if (!phy->in_urb || !phy->out_urb) + goto error; + + usb_fill_bulk_urb(phy->in_urb, phy->udev, + usb_rcvbulkpipe(phy->udev, in_endpoint), + in_buf, in_buf_len, NULL, phy); + + usb_fill_bulk_urb(phy->out_urb, phy->udev, + usb_sndbulkpipe(phy->udev, out_endpoint), + NULL, 0, pn533_send_complete, phy); + + + switch (id->driver_info) { + case PN533_DEVICE_STD: + protocols = PN533_ALL_PROTOCOLS; + break; + + case PN533_DEVICE_PASORI: + protocols = PN533_NO_TYPE_B_PROTOCOLS; + break; + + case PN533_DEVICE_ACR122U: + protocols = PN533_NO_TYPE_B_PROTOCOLS; + fops = &pn533_acr122_frame_ops; + protocol_type = PN533_PROTO_REQ_RESP, + + rc = pn533_acr122_poweron_rdr(phy); + if (rc < 0) { + nfc_err(&interface->dev, + "Couldn't poweron the reader (error %d)\n", rc); + goto error; + } + break; + + default: + nfc_err(&interface->dev, "Unknown device type %lu\n", + id->driver_info); + rc = -EINVAL; + goto error; + } + + priv = pn533_register_device(id->driver_info, protocols, protocol_type, + phy, &usb_phy_ops, fops, + &phy->udev->dev); + + if (IS_ERR(priv)) { + rc = PTR_ERR(priv); + goto error; + } + + phy->priv = priv; + nfc_set_parent_dev(priv->nfc_dev, &interface->dev); + + usb_set_intfdata(interface, phy); + + return 0; + +error: + usb_free_urb(phy->in_urb); + usb_free_urb(phy->out_urb); + usb_put_dev(phy->udev); + kfree(in_buf); +out_free_phy: + kfree(phy); + return rc; +} + +static void pn533_usb_disconnect(struct usb_interface *interface) +{ + struct pn533_usb_phy *phy = usb_get_intfdata(interface); + + if (!phy) + return; + + pn533_unregister_device(phy->priv); + + usb_set_intfdata(interface, NULL); + + usb_kill_urb(phy->in_urb); + usb_kill_urb(phy->out_urb); + + kfree(phy->in_urb->transfer_buffer); + usb_free_urb(phy->in_urb); + usb_free_urb(phy->out_urb); + + nfc_info(&interface->dev, "NXP PN533 NFC device disconnected\n"); +} + +static struct usb_driver pn533_usb_driver = { + .name = "pn533_usb", + .probe = pn533_usb_probe, + .disconnect = pn533_usb_disconnect, + .id_table = pn533_usb_table, +}; + +module_usb_driver(pn533_usb_driver); + +MODULE_AUTHOR("Lauro Ramos Venancio "); +MODULE_AUTHOR("Aloisio Almeida Jr "); +MODULE_AUTHOR("Waldemar Rymarkiewicz "); +MODULE_DESCRIPTION("PN533 USB driver ver " VERSION); +MODULE_VERSION(VERSION); +MODULE_LICENSE("GPL"); From dd7bedcd2673e4c8957d15d7e6e649fc6fa40206 Mon Sep 17 00:00:00 2001 From: Michael Thalmeier Date: Fri, 25 Mar 2016 15:46:54 +0100 Subject: [PATCH 0517/1649] NFC: pn533: add I2C phy driver This adds the I2C phy interface for the pn533 driver. This way the driver can be used to interact with I2C connected pn532 devices. Signed-off-by: Michael Thalmeier Signed-off-by: Samuel Ortiz --- drivers/nfc/pn533/Kconfig | 11 ++ drivers/nfc/pn533/Makefile | 2 + drivers/nfc/pn533/i2c.c | 271 +++++++++++++++++++++++++++++++++++++ drivers/nfc/pn533/pn533.c | 31 +++++ drivers/nfc/pn533/pn533.h | 2 + 5 files changed, 317 insertions(+) create mode 100644 drivers/nfc/pn533/i2c.c diff --git a/drivers/nfc/pn533/Kconfig b/drivers/nfc/pn533/Kconfig index b5a926e42f7b..d94122dd30e4 100644 --- a/drivers/nfc/pn533/Kconfig +++ b/drivers/nfc/pn533/Kconfig @@ -14,3 +14,14 @@ config NFC_PN533_USB If you choose to build a module, it'll be called pn533_usb. Say N if unsure. + +config NFC_PN533_I2C + tristate "NFC PN533 device support (I2C)" + depends on I2C + select NFC_PN533 + ---help--- + This module adds support for the NXP pn533 I2C interface. + Select this if your platform is using the I2C bus. + + If you choose to build a module, it'll be called pn533_i2c. + Say N if unsure. diff --git a/drivers/nfc/pn533/Makefile b/drivers/nfc/pn533/Makefile index 12c6be481483..51d24c622fcb 100644 --- a/drivers/nfc/pn533/Makefile +++ b/drivers/nfc/pn533/Makefile @@ -2,6 +2,8 @@ # Makefile for PN533 NFC driver # pn533_usb-objs = usb.o +pn533_i2c-objs = i2c.o obj-$(CONFIG_NFC_PN533) += pn533.o obj-$(CONFIG_NFC_PN533_USB) += pn533_usb.o +obj-$(CONFIG_NFC_PN533_I2C) += pn533_i2c.o diff --git a/drivers/nfc/pn533/i2c.c b/drivers/nfc/pn533/i2c.c new file mode 100644 index 000000000000..9679aa52c381 --- /dev/null +++ b/drivers/nfc/pn533/i2c.c @@ -0,0 +1,271 @@ +/* + * Driver for NXP PN533 NFC Chip - I2C transport layer + * + * Copyright (C) 2011 Instituto Nokia de Tecnologia + * Copyright (C) 2012-2013 Tieto Poland + * Copyright (C) 2016 HALE electronic + * + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "pn533.h" + +#define VERSION "0.1" + +#define PN533_I2C_DRIVER_NAME "pn533_i2c" + +struct pn533_i2c_phy { + struct i2c_client *i2c_dev; + struct pn533 *priv; + + int hard_fault; /* + * < 0 if hardware error occurred (e.g. i2c err) + * and prevents normal operation. + */ +}; + +static int pn533_i2c_send_ack(struct pn533 *dev, gfp_t flags) +{ + struct pn533_i2c_phy *phy = dev->phy; + struct i2c_client *client = phy->i2c_dev; + u8 ack[6] = {0x00, 0x00, 0xff, 0x00, 0xff, 0x00}; + /* spec 6.2.1.3: Preamble, SoPC (2), ACK Code (2), Postamble */ + int rc; + + rc = i2c_master_send(client, ack, 6); + + return rc; +} + +static int pn533_i2c_send_frame(struct pn533 *dev, + struct sk_buff *out) +{ + struct pn533_i2c_phy *phy = dev->phy; + struct i2c_client *client = phy->i2c_dev; + int rc; + + if (phy->hard_fault != 0) + return phy->hard_fault; + + if (phy->priv == NULL) + phy->priv = dev; + + print_hex_dump_debug("PN533_i2c TX: ", DUMP_PREFIX_NONE, 16, 1, + out->data, out->len, false); + + rc = i2c_master_send(client, out->data, out->len); + + if (rc == -EREMOTEIO) { /* Retry, chip was in power down */ + usleep_range(6000, 10000); + rc = i2c_master_send(client, out->data, out->len); + } + + if (rc >= 0) { + if (rc != out->len) + rc = -EREMOTEIO; + else + rc = 0; + } + + return rc; +} + +static void pn533_i2c_abort_cmd(struct pn533 *dev, gfp_t flags) +{ + /* An ack will cancel the last issued command */ + pn533_i2c_send_ack(dev, flags); + + /* schedule cmd_complete_work to finish current command execution */ + if (dev->cmd != NULL) + dev->cmd->status = -ENOENT; + queue_work(dev->wq, &dev->cmd_complete_work); +} + +static int pn533_i2c_read(struct pn533_i2c_phy *phy, struct sk_buff **skb) +{ + struct i2c_client *client = phy->i2c_dev; + int len = PN533_EXT_FRAME_HEADER_LEN + + PN533_STD_FRAME_MAX_PAYLOAD_LEN + + PN533_STD_FRAME_TAIL_LEN + 1; + int r; + + *skb = alloc_skb(len, GFP_KERNEL); + if (*skb == NULL) + return -ENOMEM; + + r = i2c_master_recv(client, skb_put(*skb, len), len); + if (r != len) { + nfc_err(&client->dev, "cannot read. r=%d len=%d\n", r, len); + kfree_skb(*skb); + return -EREMOTEIO; + } + + if (!((*skb)->data[0] & 0x01)) { + nfc_err(&client->dev, "READY flag not set"); + kfree_skb(*skb); + return -EBUSY; + } + + /* remove READY byte */ + skb_pull(*skb, 1); + /* trim to frame size */ + skb_trim(*skb, phy->priv->ops->rx_frame_size((*skb)->data)); + + return 0; +} + +static irqreturn_t pn533_i2c_irq_thread_fn(int irq, void *data) +{ + struct pn533_i2c_phy *phy = data; + struct i2c_client *client; + struct sk_buff *skb = NULL; + int r; + + if (!phy || irq != phy->i2c_dev->irq) { + WARN_ON_ONCE(1); + return IRQ_NONE; + } + + client = phy->i2c_dev; + dev_dbg(&client->dev, "IRQ\n"); + + if (phy->hard_fault != 0) + return IRQ_HANDLED; + + r = pn533_i2c_read(phy, &skb); + if (r == -EREMOTEIO) { + phy->hard_fault = r; + + pn533_recv_frame(phy->priv, NULL, -EREMOTEIO); + + return IRQ_HANDLED; + } else if ((r == -ENOMEM) || (r == -EBADMSG) || (r == -EBUSY)) { + return IRQ_HANDLED; + } + + pn533_recv_frame(phy->priv, skb, 0); + + return IRQ_HANDLED; +} + +static struct pn533_phy_ops i2c_phy_ops = { + .send_frame = pn533_i2c_send_frame, + .send_ack = pn533_i2c_send_ack, + .abort_cmd = pn533_i2c_abort_cmd, +}; + + +static int pn533_i2c_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct pn533_i2c_phy *phy; + struct pn533 *priv; + int r = 0; + + dev_dbg(&client->dev, "%s\n", __func__); + dev_dbg(&client->dev, "IRQ: %d\n", client->irq); + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { + nfc_err(&client->dev, "Need I2C_FUNC_I2C\n"); + return -ENODEV; + } + + phy = devm_kzalloc(&client->dev, sizeof(struct pn533_i2c_phy), + GFP_KERNEL); + if (!phy) + return -ENOMEM; + + phy->i2c_dev = client; + i2c_set_clientdata(client, phy); + + r = request_threaded_irq(client->irq, NULL, pn533_i2c_irq_thread_fn, + IRQF_TRIGGER_FALLING | + IRQF_SHARED | IRQF_ONESHOT, + PN533_I2C_DRIVER_NAME, phy); + + if (r < 0) + nfc_err(&client->dev, "Unable to register IRQ handler\n"); + + priv = pn533_register_device(PN533_DEVICE_PN532, + PN533_NO_TYPE_B_PROTOCOLS, + PN533_PROTO_REQ_ACK_RESP, + phy, &i2c_phy_ops, NULL, + &phy->i2c_dev->dev); + + if (IS_ERR(priv)) { + r = PTR_ERR(priv); + goto err_register; + } + + phy->priv = priv; + + return 0; + +err_register: + free_irq(client->irq, phy); + + return r; +} + +static int pn533_i2c_remove(struct i2c_client *client) +{ + struct pn533_i2c_phy *phy = i2c_get_clientdata(client); + + dev_dbg(&client->dev, "%s\n", __func__); + + pn533_unregister_device(phy->priv); + + return 0; +} + +static const struct of_device_id of_pn533_i2c_match[] = { + { .compatible = "nxp,pn533-i2c", }, + { .compatible = "nxp,pn532-i2c", }, + {}, +}; +MODULE_DEVICE_TABLE(of, of_pn533_i2c_match); + +static struct i2c_device_id pn533_i2c_id_table[] = { + { PN533_I2C_DRIVER_NAME, 0 }, + {} +}; +MODULE_DEVICE_TABLE(i2c, pn533_i2c_id_table); + +static struct i2c_driver pn533_i2c_driver = { + .driver = { + .name = PN533_I2C_DRIVER_NAME, + .owner = THIS_MODULE, + .of_match_table = of_match_ptr(of_pn533_i2c_match), + }, + .probe = pn533_i2c_probe, + .id_table = pn533_i2c_id_table, + .remove = pn533_i2c_remove, +}; + +module_i2c_driver(pn533_i2c_driver); + +MODULE_AUTHOR("Michael Thalmeier "); +MODULE_DESCRIPTION("PN533 I2C driver ver " VERSION); +MODULE_VERSION(VERSION); +MODULE_LICENSE("GPL"); diff --git a/drivers/nfc/pn533/pn533.c b/drivers/nfc/pn533/pn533.c index 52d83fec5add..ee9e8f1195fa 100644 --- a/drivers/nfc/pn533/pn533.c +++ b/drivers/nfc/pn533/pn533.c @@ -2427,8 +2427,37 @@ static int pn533_rf_field(struct nfc_dev *nfc_dev, u8 rf) return rc; } +static int pn532_sam_configuration(struct nfc_dev *nfc_dev) +{ + struct pn533 *dev = nfc_get_drvdata(nfc_dev); + struct sk_buff *skb; + struct sk_buff *resp; + + skb = pn533_alloc_skb(dev, 1); + if (!skb) + return -ENOMEM; + + *skb_put(skb, 1) = 0x01; + + resp = pn533_send_cmd_sync(dev, PN533_CMD_SAM_CONFIGURATION, skb); + if (IS_ERR(resp)) + return PTR_ERR(resp); + + dev_kfree_skb(resp); + return 0; +} + static int pn533_dev_up(struct nfc_dev *nfc_dev) { + struct pn533 *dev = nfc_get_drvdata(nfc_dev); + + if (dev->device_type == PN533_DEVICE_PN532) { + int rc = pn532_sam_configuration(nfc_dev); + + if (rc) + return rc; + } + return pn533_rf_field(nfc_dev, 1); } @@ -2461,6 +2490,7 @@ static int pn533_setup(struct pn533 *dev) case PN533_DEVICE_STD: case PN533_DEVICE_PASORI: case PN533_DEVICE_ACR122U: + case PN533_DEVICE_PN532: max_retries.mx_rty_atr = 0x2; max_retries.mx_rty_psl = 0x1; max_retries.mx_rty_passive_act = @@ -2496,6 +2526,7 @@ static int pn533_setup(struct pn533 *dev) switch (dev->device_type) { case PN533_DEVICE_STD: + case PN533_DEVICE_PN532: break; case PN533_DEVICE_PASORI: diff --git a/drivers/nfc/pn533/pn533.h b/drivers/nfc/pn533/pn533.h index 1d9f19eb2a99..ba604f6d93f9 100644 --- a/drivers/nfc/pn533/pn533.h +++ b/drivers/nfc/pn533/pn533.h @@ -21,6 +21,7 @@ #define PN533_DEVICE_STD 0x1 #define PN533_DEVICE_PASORI 0x2 #define PN533_DEVICE_ACR122U 0x3 +#define PN533_DEVICE_PN532 0x4 #define PN533_ALL_PROTOCOLS (NFC_PROTO_JEWEL_MASK | NFC_PROTO_MIFARE_MASK |\ NFC_PROTO_FELICA_MASK | NFC_PROTO_ISO14443_MASK |\ @@ -73,6 +74,7 @@ #define PN533_FRAME_CMD(f) (f->data[1]) #define PN533_CMD_GET_FIRMWARE_VERSION 0x02 +#define PN533_CMD_SAM_CONFIGURATION 0x14 #define PN533_CMD_RF_CONFIGURATION 0x32 #define PN533_CMD_IN_DATA_EXCHANGE 0x40 #define PN533_CMD_IN_COMM_THRU 0x42 From 4923ec0b10d998349c2ac4b38aa4674e539e6f92 Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Wed, 6 Apr 2016 19:39:21 -0700 Subject: [PATCH 0518/1649] bpf: simplify verifier register state assignments verifier is using the following structure to track the state of registers: struct reg_state { enum bpf_reg_type type; union { int imm; struct bpf_map *map_ptr; }; }; and later on in states_equal() does memcmp(&old->regs[i], &cur->regs[i],..) to find equivalent states. Throughout the code of verifier there are assignements to 'imm' and 'map_ptr' fields and it's not obvious that most of the assignments into 'imm' don't need to clear extra 4 bytes (like mark_reg_unknown_value() does) to make sure that memcmp doesn't go over junk left from 'map_ptr' assignment. Simplify the code by converting 'int' into 'long' Suggested-by: Daniel Borkmann Signed-off-by: Alexei Starovoitov Acked-by: Daniel Borkmann Signed-off-by: David S. Miller --- kernel/bpf/verifier.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 8233021538d3..6c5d7cd4cb0e 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -142,7 +142,7 @@ struct reg_state { enum bpf_reg_type type; union { /* valid when type == CONST_IMM | PTR_TO_STACK */ - int imm; + long imm; /* valid when type == CONST_PTR_TO_MAP | PTR_TO_MAP_VALUE | * PTR_TO_MAP_VALUE_OR_NULL @@ -263,7 +263,7 @@ static void print_verifier_state(struct verifier_env *env) continue; verbose(" R%d=%s", i, reg_type_str[t]); if (t == CONST_IMM || t == PTR_TO_STACK) - verbose("%d", env->cur_state.regs[i].imm); + verbose("%ld", env->cur_state.regs[i].imm); else if (t == CONST_PTR_TO_MAP || t == PTR_TO_MAP_VALUE || t == PTR_TO_MAP_VALUE_OR_NULL) verbose("(ks=%d,vs=%d)", @@ -480,7 +480,6 @@ static void init_reg_state(struct reg_state *regs) for (i = 0; i < MAX_BPF_REG; i++) { regs[i].type = NOT_INIT; regs[i].imm = 0; - regs[i].map_ptr = NULL; } /* frame pointer */ @@ -495,7 +494,6 @@ static void mark_reg_unknown_value(struct reg_state *regs, u32 regno) BUG_ON(regno >= MAX_BPF_REG); regs[regno].type = UNKNOWN_VALUE; regs[regno].imm = 0; - regs[regno].map_ptr = NULL; } enum reg_arg_type { From 03efbec03198a0f505c2a6c93268c3c5df321c90 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Mon, 11 Apr 2016 04:11:11 -0400 Subject: [PATCH 0519/1649] bnxt_en: Disallow forced speed for 10GBaseT devices. 10GBaseT devices must autonegotiate to determine master/slave clocking. Disallow forced speed in ethtool .set_settings() for these devices. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 1 + drivers/net/ethernet/broadcom/bnxt/bnxt.h | 1 + drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c | 8 ++++++++ 3 files changed, 10 insertions(+) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 597e4724a474..a06dcaa75f6e 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -4611,6 +4611,7 @@ static int bnxt_update_link(struct bnxt *bp, bool chng_link_state) link_info->phy_ver[1] = resp->phy_min; link_info->phy_ver[2] = resp->phy_bld; link_info->media_type = resp->media_type; + link_info->phy_type = resp->phy_type; link_info->transceiver = resp->xcvr_pkg_type; link_info->phy_addr = resp->eee_config_phy_addr & PORT_PHY_QCFG_RESP_PHY_ADDR_MASK; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index cc8e38a9f684..26dac2f3c63c 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -759,6 +759,7 @@ struct bnxt_ntuple_filter { }; struct bnxt_link_info { + u8 phy_type; u8 media_type; u8 transceiver; u8 phy_addr; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index a2e93241b06b..d6e41f237f2c 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -850,7 +850,15 @@ static int bnxt_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) set_pause = true; } else { u16 fw_speed; + u8 phy_type = link_info->phy_type; + if (phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASET || + phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE || + link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) { + netdev_err(dev, "10GBase-T devices must autoneg\n"); + rc = -EINVAL; + goto set_setting_exit; + } /* TODO: currently don't support half duplex */ if (cmd->duplex == DUPLEX_HALF) { netdev_err(dev, "HALF DUPLEX is not supported!\n"); From 33f7d55f07ab964055d73d38774346f8d4821f00 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Mon, 11 Apr 2016 04:11:12 -0400 Subject: [PATCH 0520/1649] bnxt_en: Shutdown link when device is closed. Let firmware know that the driver is giving up control of the link so that it can be shutdown if no management firmware is running. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index a06dcaa75f6e..e874a564f40b 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -4790,6 +4790,21 @@ int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee) return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); } +static int bnxt_hwrm_shutdown_link(struct bnxt *bp) +{ + struct hwrm_port_phy_cfg_input req = {0}; + + if (BNXT_VF(bp)) + return 0; + + if (pci_num_vf(bp->pdev)) + return 0; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1); + req.flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DOWN); + return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); +} + static bool bnxt_eee_config_ok(struct bnxt *bp) { struct ethtool_eee *eee = &bp->eee; @@ -5044,6 +5059,7 @@ static int bnxt_close(struct net_device *dev) struct bnxt *bp = netdev_priv(dev); bnxt_close_nic(bp, true, true); + bnxt_hwrm_shutdown_link(bp); return 0; } From 84c33dd342ad596a271a61da0119bf34e80bb1c5 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Mon, 11 Apr 2016 04:11:13 -0400 Subject: [PATCH 0521/1649] bnxt_en: Call firmware to approve VF MAC address change. Some hypervisors (e.g. ESX) require the VF MAC address to be forwarded to the PF for approval. In Linux PF, the call is not forwarded and the firmware will simply check and approve the MAC address if the PF has not previously administered a valid MAC address for this VF. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 7 ++--- .../net/ethernet/broadcom/bnxt/bnxt_sriov.c | 30 +++++++++++++++++++ .../net/ethernet/broadcom/bnxt/bnxt_sriov.h | 1 + 3 files changed, 34 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index e874a564f40b..c83a5a1862d0 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -5696,10 +5696,9 @@ static int bnxt_change_mac_addr(struct net_device *dev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; -#ifdef CONFIG_BNXT_SRIOV - if (BNXT_VF(bp) && is_valid_ether_addr(bp->vf.mac_addr)) - return -EADDRNOTAVAIL; -#endif + rc = bnxt_approve_mac(bp, addr->sa_data); + if (rc) + return rc; if (ether_addr_equal(addr->sa_data, dev->dev_addr)) return 0; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c index 8457850b0bdd..363884dd9e8a 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c @@ -865,6 +865,31 @@ update_vf_mac_exit: mutex_unlock(&bp->hwrm_cmd_lock); } +int bnxt_approve_mac(struct bnxt *bp, u8 *mac) +{ + struct hwrm_func_vf_cfg_input req = {0}; + int rc = 0; + + if (!BNXT_VF(bp)) + return 0; + + if (bp->hwrm_spec_code < 0x10202) { + if (is_valid_ether_addr(bp->vf.mac_addr)) + rc = -EADDRNOTAVAIL; + goto mac_done; + } + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1); + req.enables = cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR); + memcpy(req.dflt_mac_addr, mac, ETH_ALEN); + rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); +mac_done: + if (rc) { + rc = -EADDRNOTAVAIL; + netdev_warn(bp->dev, "VF MAC address %pM not approved by the PF\n", + mac); + } + return rc; +} #else void bnxt_sriov_disable(struct bnxt *bp) @@ -879,4 +904,9 @@ void bnxt_hwrm_exec_fwd_req(struct bnxt *bp) void bnxt_update_vf_mac(struct bnxt *bp) { } + +int bnxt_approve_mac(struct bnxt *bp, u8 *mac) +{ + return 0; +} #endif diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h index 3f08354a247e..0392670ab49c 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h @@ -20,4 +20,5 @@ int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs); void bnxt_sriov_disable(struct bnxt *); void bnxt_hwrm_exec_fwd_req(struct bnxt *); void bnxt_update_vf_mac(struct bnxt *); +int bnxt_approve_mac(struct bnxt *, u8 *); #endif From 8cbde1175e3c8565edbb777cd09cbfdb93c78397 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Mon, 11 Apr 2016 04:11:14 -0400 Subject: [PATCH 0522/1649] bnxt_en: Add async event handling for speed config changes. On some dual port cards, link speeds on both ports have to be compatible. Firmware will inform the driver when a certain speed is no longer supported if the other port has linked up at a certain speed. Add logic to handle this event by logging a message and getting the updated list of supported speeds. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index c83a5a1862d0..4645c44e7c15 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -122,6 +122,7 @@ static const u16 bnxt_async_events_arr[] = { HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE, HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD, HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED, + HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE, }; static bool bnxt_vf_pciid(enum board_idx idx) @@ -1257,6 +1258,21 @@ static int bnxt_async_event_process(struct bnxt *bp, /* TODO CHIMP_FW: Define event id's for link change, error etc */ switch (event_id) { + case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: { + u32 data1 = le32_to_cpu(cmpl->event_data1); + struct bnxt_link_info *link_info = &bp->link_info; + + if (BNXT_VF(bp)) + goto async_event_process_exit; + if (data1 & 0x20000) { + u16 fw_speed = link_info->force_link_speed; + u32 speed = bnxt_fw_to_ethtool_speed(fw_speed); + + netdev_warn(bp->dev, "Link speed %d no longer supported\n", + speed); + } + /* fall thru */ + } case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE: set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event); break; From e0897ae3ec720b1653d4ff9aaf48b532c276ab63 Mon Sep 17 00:00:00 2001 From: Vaishali Thakkar Date: Mon, 11 Apr 2016 15:58:17 +0530 Subject: [PATCH 0523/1649] net: fjes: Use resource_size Use the function resource_size instead of explicit computation. Problem found using Coccinelle. Signed-off-by: Vaishali Thakkar Signed-off-by: David S. Miller --- drivers/net/fjes/fjes_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/fjes/fjes_main.c b/drivers/net/fjes/fjes_main.c index 0ddb54fe3d91..061b4af4ee62 100644 --- a/drivers/net/fjes/fjes_main.c +++ b/drivers/net/fjes/fjes_main.c @@ -1129,7 +1129,7 @@ static int fjes_probe(struct platform_device *plat_dev) res = platform_get_resource(plat_dev, IORESOURCE_MEM, 0); hw->hw_res.start = res->start; - hw->hw_res.size = res->end - res->start + 1; + hw->hw_res.size = resource_size(res); hw->hw_res.irq = platform_get_irq(plat_dev, 0); err = fjes_hw_init(&adapter->hw); if (err) From 61618eeac3e6165684895481c4f58ea879c3d616 Mon Sep 17 00:00:00 2001 From: Jiri Benc Date: Mon, 11 Apr 2016 17:06:08 +0200 Subject: [PATCH 0524/1649] vxlan: fix incorrect type The protocol is 16bit, not 32bit. Fixes: e1e5314de08ba ("vxlan: implement GPE") Reported-by: Dan Carpenter Signed-off-by: Jiri Benc Signed-off-by: David S. Miller --- drivers/net/vxlan.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 9f3634064c92..7f697a3f00a4 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -1181,7 +1181,7 @@ out: } static bool vxlan_parse_gpe_hdr(struct vxlanhdr *unparsed, - __be32 *protocol, + __be16 *protocol, struct sk_buff *skb, u32 vxflags) { struct vxlanhdr_gpe *gpe = (struct vxlanhdr_gpe *)unparsed; @@ -1284,7 +1284,7 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb) struct vxlanhdr unparsed; struct vxlan_metadata _md; struct vxlan_metadata *md = &_md; - __be32 protocol = htons(ETH_P_TEB); + __be16 protocol = htons(ETH_P_TEB); bool raw_proto = false; void *oiph; From 61f1cef90a18122ff9832a897dc75738c14e710a Mon Sep 17 00:00:00 2001 From: Grygorii Strashko Date: Thu, 7 Apr 2016 15:16:43 +0300 Subject: [PATCH 0525/1649] drivers: net: cpsw: fix port_mask parameters in ale calls ALE APIs expect to receive port masks as input values for arguments port_mask, untag, reg_mcast, unreg_mcast. But there are few places in code where port masks are passed left-shifted by cpsw_priv->host_port, like below: cpsw_ale_add_vlan(priv->ale, priv->data.default_vlan, ALE_ALL_PORTS << priv->host_port, ALE_ALL_PORTS << priv->host_port, 0, 0); and cpsw is still working just because priv->host_port == 0 and has never ever been changed. Hence, fix port_mask parameters in ALE APIs calls and drop "<< priv->host_port" from all places where it's used to shift valid port mask. Signed-off-by: Grygorii Strashko Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/cpsw.c | 22 +++++++++------------- 1 file changed, 9 insertions(+), 13 deletions(-) diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 42fdfd4d9d4f..5292e70b4825 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -535,7 +535,7 @@ static const struct cpsw_stats cpsw_gstrings_stats[] = { ALE_VLAN, slave->port_vlan, 0); \ } else { \ cpsw_ale_add_mcast(priv->ale, addr, \ - ALE_ALL_PORTS << priv->host_port, \ + ALE_ALL_PORTS, \ 0, 0, 0); \ } \ } while (0) @@ -602,8 +602,7 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable) cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1); /* Clear all mcast from ALE */ - cpsw_ale_flush_multicast(ale, ALE_ALL_PORTS << - priv->host_port, -1); + cpsw_ale_flush_multicast(ale, ALE_ALL_PORTS, -1); /* Flood All Unicast Packets to Host port */ cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 1); @@ -648,8 +647,7 @@ static void cpsw_ndo_set_rx_mode(struct net_device *ndev) cpsw_ale_set_allmulti(priv->ale, priv->ndev->flags & IFF_ALLMULTI); /* Clear all mcast from ALE */ - cpsw_ale_flush_multicast(priv->ale, ALE_ALL_PORTS << priv->host_port, - vid); + cpsw_ale_flush_multicast(priv->ale, ALE_ALL_PORTS, vid); if (!netdev_mc_empty(ndev)) { struct netdev_hw_addr *ha; @@ -1172,7 +1170,6 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv) static inline void cpsw_add_default_vlan(struct cpsw_priv *priv) { const int vlan = priv->data.default_vlan; - const int port = priv->host_port; u32 reg; int i; int unreg_mcast_mask; @@ -1190,9 +1187,9 @@ static inline void cpsw_add_default_vlan(struct cpsw_priv *priv) else unreg_mcast_mask = ALE_PORT_1 | ALE_PORT_2; - cpsw_ale_add_vlan(priv->ale, vlan, ALE_ALL_PORTS << port, - ALE_ALL_PORTS << port, ALE_ALL_PORTS << port, - unreg_mcast_mask << port); + cpsw_ale_add_vlan(priv->ale, vlan, ALE_ALL_PORTS, + ALE_ALL_PORTS, ALE_ALL_PORTS, + unreg_mcast_mask); } static void cpsw_init_host_port(struct cpsw_priv *priv) @@ -1273,8 +1270,7 @@ static int cpsw_ndo_open(struct net_device *ndev) cpsw_add_default_vlan(priv); else cpsw_ale_add_vlan(priv->ale, priv->data.default_vlan, - ALE_ALL_PORTS << priv->host_port, - ALE_ALL_PORTS << priv->host_port, 0, 0); + ALE_ALL_PORTS, ALE_ALL_PORTS, 0, 0); if (!cpsw_common_res_usage_state(priv)) { struct cpsw_priv *priv_sl0 = cpsw_get_slave_priv(priv, 0); @@ -1666,7 +1662,7 @@ static inline int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv, } ret = cpsw_ale_add_vlan(priv->ale, vid, port_mask, 0, port_mask, - unreg_mcast_mask << priv->host_port); + unreg_mcast_mask); if (ret != 0) return ret; @@ -1738,7 +1734,7 @@ static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev, return ret; ret = cpsw_ale_del_ucast(priv->ale, priv->mac_addr, - priv->host_port, ALE_VLAN, vid); + HOST_PORT_NUM, ALE_VLAN, vid); if (ret != 0) return ret; From 71a2cbb72a2bcbf3f1c1b14031870e37ad5e8109 Mon Sep 17 00:00:00 2001 From: Grygorii Strashko Date: Thu, 7 Apr 2016 15:16:44 +0300 Subject: [PATCH 0526/1649] drivers: net: cpsw: drop host_port field from struct cpsw_priv The host_port field is constantly assigned to 0 and this value has never changed (since time when cpsw driver was introduced. More over, if this field will be assigned to non 0 value it will break current driver functionality. Hence, there are no reasons to continue maintaining this host_port field and it can be removed, and the HOST_PORT_NUM and ALE_PORT_HOST defines can be used instead. Signed-off-by: Grygorii Strashko Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/cpsw.c | 30 ++++++++++++------------------ 1 file changed, 12 insertions(+), 18 deletions(-) diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 5292e70b4825..54bcc3851b7e 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -381,7 +381,6 @@ struct cpsw_priv { u32 coal_intvl; u32 bus_freq_mhz; int rx_packet_max; - int host_port; struct clk *clk; u8 mac_addr[ETH_ALEN]; struct cpsw_slave *slaves; @@ -531,7 +530,7 @@ static const struct cpsw_stats cpsw_gstrings_stats[] = { int slave_port = cpsw_get_slave_port(priv, \ slave->slave_num); \ cpsw_ale_add_mcast(priv->ale, addr, \ - 1 << slave_port | 1 << priv->host_port, \ + 1 << slave_port | ALE_PORT_HOST, \ ALE_VLAN, slave->port_vlan, 0); \ } else { \ cpsw_ale_add_mcast(priv->ale, addr, \ @@ -542,10 +541,7 @@ static const struct cpsw_stats cpsw_gstrings_stats[] = { static inline int cpsw_get_slave_port(struct cpsw_priv *priv, u32 slave_num) { - if (priv->host_port == 0) - return slave_num + 1; - else - return slave_num; + return slave_num + 1; } static void cpsw_set_promiscious(struct net_device *ndev, bool enable) @@ -1090,7 +1086,7 @@ static inline void cpsw_add_dual_emac_def_ale_entries( struct cpsw_priv *priv, struct cpsw_slave *slave, u32 slave_port) { - u32 port_mask = 1 << slave_port | 1 << priv->host_port; + u32 port_mask = 1 << slave_port | ALE_PORT_HOST; if (priv->version == CPSW_VERSION_1) slave_write(slave, slave->port_vlan, CPSW1_PORT_VLAN); @@ -1101,7 +1097,7 @@ static inline void cpsw_add_dual_emac_def_ale_entries( cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast, port_mask, ALE_VLAN, slave->port_vlan, 0); cpsw_ale_add_ucast(priv->ale, priv->mac_addr, - priv->host_port, ALE_VLAN | ALE_SECURE, slave->port_vlan); + HOST_PORT_NUM, ALE_VLAN | ALE_SECURE, slave->port_vlan); } static void soft_reset_slave(struct cpsw_slave *slave) @@ -1202,7 +1198,7 @@ static void cpsw_init_host_port(struct cpsw_priv *priv) cpsw_ale_start(priv->ale); /* switch to vlan unaware mode */ - cpsw_ale_control_set(priv->ale, priv->host_port, ALE_VLAN_AWARE, + cpsw_ale_control_set(priv->ale, HOST_PORT_NUM, ALE_VLAN_AWARE, CPSW_ALE_VLAN_AWARE); control_reg = readl(&priv->regs->control); control_reg |= CPSW_VLAN_AWARE; @@ -1216,14 +1212,14 @@ static void cpsw_init_host_port(struct cpsw_priv *priv) &priv->host_port_regs->cpdma_tx_pri_map); __raw_writel(0, &priv->host_port_regs->cpdma_rx_chan_map); - cpsw_ale_control_set(priv->ale, priv->host_port, + cpsw_ale_control_set(priv->ale, HOST_PORT_NUM, ALE_PORT_STATE, ALE_PORT_STATE_FORWARD); if (!priv->data.dual_emac) { - cpsw_ale_add_ucast(priv->ale, priv->mac_addr, priv->host_port, + cpsw_ale_add_ucast(priv->ale, priv->mac_addr, HOST_PORT_NUM, 0, 0); cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast, - 1 << priv->host_port, 0, 0, ALE_MCAST_FWD_2); + ALE_PORT_HOST, 0, 0, ALE_MCAST_FWD_2); } } @@ -1616,9 +1612,9 @@ static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p) flags = ALE_VLAN; } - cpsw_ale_del_ucast(priv->ale, priv->mac_addr, priv->host_port, + cpsw_ale_del_ucast(priv->ale, priv->mac_addr, HOST_PORT_NUM, flags, vid); - cpsw_ale_add_ucast(priv->ale, addr->sa_data, priv->host_port, + cpsw_ale_add_ucast(priv->ale, addr->sa_data, HOST_PORT_NUM, flags, vid); memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN); @@ -1667,7 +1663,7 @@ static inline int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv, return ret; ret = cpsw_ale_add_ucast(priv->ale, priv->mac_addr, - priv->host_port, ALE_VLAN, vid); + HOST_PORT_NUM, ALE_VLAN, vid); if (ret != 0) goto clean_vid; @@ -1679,7 +1675,7 @@ static inline int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv, clean_vlan_ucast: cpsw_ale_del_ucast(priv->ale, priv->mac_addr, - priv->host_port, ALE_VLAN, vid); + HOST_PORT_NUM, ALE_VLAN, vid); clean_vid: cpsw_ale_del_vlan(priv->ale, vid, 0); return ret; @@ -2148,7 +2144,6 @@ static int cpsw_probe_dual_emac(struct platform_device *pdev, priv_sl2->bus_freq_mhz = priv->bus_freq_mhz; priv_sl2->regs = priv->regs; - priv_sl2->host_port = priv->host_port; priv_sl2->host_port_regs = priv->host_port_regs; priv_sl2->wr_regs = priv->wr_regs; priv_sl2->hw_stats = priv->hw_stats; @@ -2317,7 +2312,6 @@ static int cpsw_probe(struct platform_device *pdev) goto clean_runtime_disable_ret; } priv->regs = ss_regs; - priv->host_port = HOST_PORT_NUM; /* Need to enable clocks with runtime PM api to access module * registers From a6db4494d218c2e559173661ee972e048dc04fdd Mon Sep 17 00:00:00 2001 From: David Ahern Date: Thu, 7 Apr 2016 07:21:00 -0700 Subject: [PATCH 0527/1649] net: ipv4: Consider failed nexthops in multipath routes Multipath route lookups should consider knowledge about next hops and not select a hop that is known to be failed. Example: [h2] [h3] 15.0.0.5 | | 3| 3| [SP1] [SP2]--+ 1 2 1 2 | | /-------------+ | | \ / | | X | | / \ | | / \---------------\ | 1 2 1 2 12.0.0.2 [TOR1] 3-----------------3 [TOR2] 12.0.0.3 4 4 \ / \ / \ / -------| |-----/ 1 2 [TOR3] 3| | [h1] 12.0.0.1 host h1 with IP 12.0.0.1 has 2 paths to host h3 at 15.0.0.5: root@h1:~# ip ro ls ... 12.0.0.0/24 dev swp1 proto kernel scope link src 12.0.0.1 15.0.0.0/16 nexthop via 12.0.0.2 dev swp1 weight 1 nexthop via 12.0.0.3 dev swp1 weight 1 ... If the link between tor3 and tor1 is down and the link between tor1 and tor2 then tor1 is effectively cut-off from h1. Yet the route lookups in h1 are alternating between the 2 routes: ping 15.0.0.5 gets one and ssh 15.0.0.5 gets the other. Connections that attempt to use the 12.0.0.2 nexthop fail since that neighbor is not reachable: root@h1:~# ip neigh show ... 12.0.0.3 dev swp1 lladdr 00:02:00:00:00:1b REACHABLE 12.0.0.2 dev swp1 FAILED ... The failed path can be avoided by considering known neighbor information when selecting next hops. If the neighbor lookup fails we have no knowledge about the nexthop, so give it a shot. If there is an entry then only select the nexthop if the state is sane. This is similar to what fib_detect_death does. To maintain backward compatibility use of the neighbor information is based on a new sysctl, fib_multipath_use_neigh. Signed-off-by: David Ahern Reviewed-by: Julian Anastasov Signed-off-by: David S. Miller --- Documentation/networking/ip-sysctl.txt | 10 ++++++++ include/net/netns/ipv4.h | 3 +++ net/ipv4/fib_semantics.c | 34 ++++++++++++++++++++++---- net/ipv4/sysctl_net_ipv4.c | 11 +++++++++ 4 files changed, 53 insertions(+), 5 deletions(-) diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt index b183e2b606c8..6c7f365b1515 100644 --- a/Documentation/networking/ip-sysctl.txt +++ b/Documentation/networking/ip-sysctl.txt @@ -63,6 +63,16 @@ fwmark_reflect - BOOLEAN fwmark of the packet they are replying to. Default: 0 +fib_multipath_use_neigh - BOOLEAN + Use status of existing neighbor entry when determining nexthop for + multipath routes. If disabled, neighbor information is not used and + packets could be directed to a failed nexthop. Only valid for kernels + built with CONFIG_IP_ROUTE_MULTIPATH enabled. + Default: 0 (disabled) + Possible values: + 0 - disabled + 1 - enabled + route/max_size - INTEGER Maximum number of routes allowed in the kernel. Increase this when using large numbers of interfaces and/or routes. diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h index a69cde3ce460..d061ffeb1e71 100644 --- a/include/net/netns/ipv4.h +++ b/include/net/netns/ipv4.h @@ -132,6 +132,9 @@ struct netns_ipv4 { struct list_head mr_tables; struct fib_rules_ops *mr_rules_ops; #endif +#endif +#ifdef CONFIG_IP_ROUTE_MULTIPATH + int sysctl_fib_multipath_use_neigh; #endif atomic_t rt_genid; }; diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index d97268e8ff10..ab64d9f2eef9 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c @@ -1559,21 +1559,45 @@ int fib_sync_up(struct net_device *dev, unsigned int nh_flags) } #ifdef CONFIG_IP_ROUTE_MULTIPATH +static bool fib_good_nh(const struct fib_nh *nh) +{ + int state = NUD_REACHABLE; + + if (nh->nh_scope == RT_SCOPE_LINK) { + struct neighbour *n; + + rcu_read_lock_bh(); + + n = __ipv4_neigh_lookup_noref(nh->nh_dev, nh->nh_gw); + if (n) + state = n->nud_state; + + rcu_read_unlock_bh(); + } + + return !!(state & NUD_VALID); +} void fib_select_multipath(struct fib_result *res, int hash) { struct fib_info *fi = res->fi; + struct net *net = fi->fib_net; + bool first = false; for_nexthops(fi) { if (hash > atomic_read(&nh->nh_upper_bound)) continue; - res->nh_sel = nhsel; - return; + if (!net->ipv4.sysctl_fib_multipath_use_neigh || + fib_good_nh(nh)) { + res->nh_sel = nhsel; + return; + } + if (!first) { + res->nh_sel = nhsel; + first = true; + } } endfor_nexthops(fi); - - /* Race condition: route has just become dead. */ - res->nh_sel = 0; } #endif diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index 1e1fe6086dd9..bb0419582b8d 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c @@ -960,6 +960,17 @@ static struct ctl_table ipv4_net_table[] = { .mode = 0644, .proc_handler = proc_dointvec, }, +#ifdef CONFIG_IP_ROUTE_MULTIPATH + { + .procname = "fib_multipath_use_neigh", + .data = &init_net.ipv4.sysctl_fib_multipath_use_neigh, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = &zero, + .extra2 = &one, + }, +#endif { } }; From 1da8c681d5c122afe9fbadc02e92a0f9e3f7af44 Mon Sep 17 00:00:00 2001 From: Willem de Bruijn Date: Thu, 7 Apr 2016 11:44:58 -0400 Subject: [PATCH 0528/1649] sunrpc: do not pull udp headers on receive Commit e6afc8ace6dd modified the udp receive path by pulling the udp header before queuing an skbuff onto the receive queue. Sunrpc also calls skb_recv_datagram to dequeue an skb from a udp socket. Modify this receive path to also no longer expect udp headers. Fixes: e6afc8ace6dd ("udp: remove headers from UDP packets before queueing") Reported-by: Franklin S Cooper Jr. Signed-off-by: Willem de Bruijn Tested-by: Thierry Reding Signed-off-by: David S. Miller --- net/sunrpc/socklib.c | 2 +- net/sunrpc/svcsock.c | 5 ++--- net/sunrpc/xprtsock.c | 5 ++--- 3 files changed, 5 insertions(+), 7 deletions(-) diff --git a/net/sunrpc/socklib.c b/net/sunrpc/socklib.c index de70c78025d7..f217c348b341 100644 --- a/net/sunrpc/socklib.c +++ b/net/sunrpc/socklib.c @@ -155,7 +155,7 @@ int csum_partial_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb) struct xdr_skb_reader desc; desc.skb = skb; - desc.offset = sizeof(struct udphdr); + desc.offset = 0; desc.count = skb->len - desc.offset; if (skb_csum_unnecessary(skb)) diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index 1413cdcc131c..71d6072664d2 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c @@ -617,7 +617,7 @@ static int svc_udp_recvfrom(struct svc_rqst *rqstp) svsk->sk_sk->sk_stamp = skb->tstamp; set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); /* there may be more data... */ - len = skb->len - sizeof(struct udphdr); + len = skb->len; rqstp->rq_arg.len = len; rqstp->rq_prot = IPPROTO_UDP; @@ -641,8 +641,7 @@ static int svc_udp_recvfrom(struct svc_rqst *rqstp) skb_free_datagram_locked(svsk->sk_sk, skb); } else { /* we can use it in-place */ - rqstp->rq_arg.head[0].iov_base = skb->data + - sizeof(struct udphdr); + rqstp->rq_arg.head[0].iov_base = skb->data; rqstp->rq_arg.head[0].iov_len = len; if (skb_checksum_complete(skb)) goto out_free; diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 65e759569e48..c1fc7b20bbc1 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -995,15 +995,14 @@ static void xs_udp_data_read_skb(struct rpc_xprt *xprt, u32 _xid; __be32 *xp; - repsize = skb->len - sizeof(struct udphdr); + repsize = skb->len; if (repsize < 4) { dprintk("RPC: impossible RPC reply size %d!\n", repsize); return; } /* Copy the XID from the skb... */ - xp = skb_header_pointer(skb, sizeof(struct udphdr), - sizeof(_xid), &_xid); + xp = skb_header_pointer(skb, 0, sizeof(_xid), &_xid); if (xp == NULL) return; From 4d0fc73ebe94ac984a187f21fbf4f3a1ac846f5a Mon Sep 17 00:00:00 2001 From: Willem de Bruijn Date: Thu, 7 Apr 2016 11:44:59 -0400 Subject: [PATCH 0529/1649] rxrpc: do not pull udp headers on receive Commit e6afc8ace6dd modified the udp receive path by pulling the udp header before queuing an skbuff onto the receive queue. Rxrpc also calls skb_recv_datagram to dequeue an skb from a udp socket. Modify this receive path to also no longer expect udp headers. Fixes: e6afc8ace6dd ("udp: remove headers from UDP packets before queueing") Signed-off-by: Willem de Bruijn Tested-by: Thierry Reding Signed-off-by: David S. Miller --- net/rxrpc/ar-input.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/net/rxrpc/ar-input.c b/net/rxrpc/ar-input.c index 63ed75c40e29..4824a827d10d 100644 --- a/net/rxrpc/ar-input.c +++ b/net/rxrpc/ar-input.c @@ -612,9 +612,9 @@ int rxrpc_extract_header(struct rxrpc_skb_priv *sp, struct sk_buff *skb) struct rxrpc_wire_header whdr; /* dig out the RxRPC connection details */ - if (skb_copy_bits(skb, sizeof(struct udphdr), &whdr, sizeof(whdr)) < 0) + if (skb_copy_bits(skb, 0, &whdr, sizeof(whdr)) < 0) return -EBADMSG; - if (!pskb_pull(skb, sizeof(struct udphdr) + sizeof(whdr))) + if (!pskb_pull(skb, sizeof(whdr))) BUG(); memset(sp, 0, sizeof(*sp)); From 2f02f7aea7b6c9a9312846c006e076ae6ad026a4 Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 7 Apr 2016 17:23:03 +0100 Subject: [PATCH 0530/1649] afs: Wait for outstanding async calls before closing rxrpc socket The afs filesystem needs to wait for any outstanding asynchronous calls (such as FS.GiveUpCallBacks cleaning up the callbacks lodged with a server) to complete before closing the AF_RXRPC socket when unloading the module. This may occur if the module is removed too quickly after unmounting all filesystems. This will produce an error report that looks like: AFS: Assertion failed 1 == 0 is false 0x1 == 0x0 is false ------------[ cut here ]------------ kernel BUG at ../fs/afs/rxrpc.c:135! ... RIP: 0010:[] afs_close_socket+0xec/0x107 [kafs] ... Call Trace: [] afs_exit+0x1f/0x57 [kafs] [] SyS_delete_module+0xec/0x17d [] entry_SYSCALL_64_fastpath+0x12/0x6b Signed-off-by: David Howells Signed-off-by: David S. Miller --- fs/afs/rxrpc.c | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c index b50642870a43..b4d337ad6e36 100644 --- a/fs/afs/rxrpc.c +++ b/fs/afs/rxrpc.c @@ -65,6 +65,12 @@ static void afs_async_workfn(struct work_struct *work) call->async_workfn(call); } +static int afs_wait_atomic_t(atomic_t *p) +{ + schedule(); + return 0; +} + /* * open an RxRPC socket and bind it to be a server for callback notifications * - the socket is left in blocking mode and non-blocking ops use MSG_DONTWAIT @@ -126,13 +132,16 @@ void afs_close_socket(void) { _enter(""); + wait_on_atomic_t(&afs_outstanding_calls, afs_wait_atomic_t, + TASK_UNINTERRUPTIBLE); + _debug("no outstanding calls"); + sock_release(afs_socket); _debug("dework"); destroy_workqueue(afs_async_calls); ASSERTCMP(atomic_read(&afs_outstanding_skbs), ==, 0); - ASSERTCMP(atomic_read(&afs_outstanding_calls), ==, 0); _leave(""); } @@ -178,8 +187,6 @@ static void afs_free_call(struct afs_call *call) { _debug("DONE %p{%s} [%d]", call, call->type->name, atomic_read(&afs_outstanding_calls)); - if (atomic_dec_return(&afs_outstanding_calls) == -1) - BUG(); ASSERTCMP(call->rxcall, ==, NULL); ASSERT(!work_pending(&call->async_work)); @@ -188,6 +195,9 @@ static void afs_free_call(struct afs_call *call) kfree(call->request); kfree(call); + + if (atomic_dec_and_test(&afs_outstanding_calls)) + wake_up_atomic_t(&afs_outstanding_calls); } /* From 8f7e6e75d3074dd1856a6105f7511249ee2f2ffd Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 7 Apr 2016 17:23:09 +0100 Subject: [PATCH 0531/1649] rxrpc: Disable a debugging statement that has been left enabled. Disable a debugging statement that has been left enabled Signed-off-by: David Howells Signed-off-by: David S. Miller --- net/rxrpc/ar-ack.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/rxrpc/ar-ack.c b/net/rxrpc/ar-ack.c index 16d967075eaf..01a017a05f14 100644 --- a/net/rxrpc/ar-ack.c +++ b/net/rxrpc/ar-ack.c @@ -426,7 +426,7 @@ static void rxrpc_rotate_tx_window(struct rxrpc_call *call, u32 hard) int tail = call->acks_tail, old_tail; int win = CIRC_CNT(call->acks_head, tail, call->acks_winsz); - kenter("{%u,%u},%u", call->acks_hard, win, hard); + _enter("{%u,%u},%u", call->acks_hard, win, hard); ASSERTCMP(hard - call->acks_hard, <=, win); From 8e688d9c166671bb4a6977384de2fe7f46a31ba4 Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 7 Apr 2016 17:23:16 +0100 Subject: [PATCH 0532/1649] rxrpc: Move some miscellaneous bits out into their own file Move some miscellaneous bits out into their own file to make it easier to split the call handling. Signed-off-by: David Howells Signed-off-by: David S. Miller --- include/net/af_rxrpc.h | 1 + net/rxrpc/Makefile | 3 +- net/rxrpc/ar-ack.c | 68 ------------------------------- net/rxrpc/ar-input.c | 6 --- net/rxrpc/ar-internal.h | 24 ++++++----- net/rxrpc/misc.c | 89 +++++++++++++++++++++++++++++++++++++++++ 6 files changed, 107 insertions(+), 84 deletions(-) create mode 100644 net/rxrpc/misc.c diff --git a/include/net/af_rxrpc.h b/include/net/af_rxrpc.h index e797d45a5ae6..4fd3e4a2cadd 100644 --- a/include/net/af_rxrpc.h +++ b/include/net/af_rxrpc.h @@ -12,6 +12,7 @@ #ifndef _NET_RXRPC_H #define _NET_RXRPC_H +#include #include struct rxrpc_call; diff --git a/net/rxrpc/Makefile b/net/rxrpc/Makefile index ec126f91276b..5b98c1640d6d 100644 --- a/net/rxrpc/Makefile +++ b/net/rxrpc/Makefile @@ -18,7 +18,8 @@ af-rxrpc-y := \ ar-recvmsg.o \ ar-security.o \ ar-skbuff.o \ - ar-transport.o + ar-transport.o \ + misc.o af-rxrpc-$(CONFIG_PROC_FS) += ar-proc.o af-rxrpc-$(CONFIG_SYSCTL) += sysctl.o diff --git a/net/rxrpc/ar-ack.c b/net/rxrpc/ar-ack.c index 01a017a05f14..54bf43ba9aa8 100644 --- a/net/rxrpc/ar-ack.c +++ b/net/rxrpc/ar-ack.c @@ -19,74 +19,6 @@ #include #include "ar-internal.h" -/* - * How long to wait before scheduling ACK generation after seeing a - * packet with RXRPC_REQUEST_ACK set (in jiffies). - */ -unsigned int rxrpc_requested_ack_delay = 1; - -/* - * How long to wait before scheduling an ACK with subtype DELAY (in jiffies). - * - * We use this when we've received new data packets. If those packets aren't - * all consumed within this time we will send a DELAY ACK if an ACK was not - * requested to let the sender know it doesn't need to resend. - */ -unsigned int rxrpc_soft_ack_delay = 1 * HZ; - -/* - * How long to wait before scheduling an ACK with subtype IDLE (in jiffies). - * - * We use this when we've consumed some previously soft-ACK'd packets when - * further packets aren't immediately received to decide when to send an IDLE - * ACK let the other end know that it can free up its Tx buffer space. - */ -unsigned int rxrpc_idle_ack_delay = 0.5 * HZ; - -/* - * Receive window size in packets. This indicates the maximum number of - * unconsumed received packets we're willing to retain in memory. Once this - * limit is hit, we should generate an EXCEEDS_WINDOW ACK and discard further - * packets. - */ -unsigned int rxrpc_rx_window_size = 32; - -/* - * Maximum Rx MTU size. This indicates to the sender the size of jumbo packet - * made by gluing normal packets together that we're willing to handle. - */ -unsigned int rxrpc_rx_mtu = 5692; - -/* - * The maximum number of fragments in a received jumbo packet that we tell the - * sender that we're willing to handle. - */ -unsigned int rxrpc_rx_jumbo_max = 4; - -static const char *rxrpc_acks(u8 reason) -{ - static const char *const str[] = { - "---", "REQ", "DUP", "OOS", "WIN", "MEM", "PNG", "PNR", "DLY", - "IDL", "-?-" - }; - - if (reason >= ARRAY_SIZE(str)) - reason = ARRAY_SIZE(str) - 1; - return str[reason]; -} - -static const s8 rxrpc_ack_priority[] = { - [0] = 0, - [RXRPC_ACK_DELAY] = 1, - [RXRPC_ACK_REQUESTED] = 2, - [RXRPC_ACK_IDLE] = 3, - [RXRPC_ACK_PING_RESPONSE] = 4, - [RXRPC_ACK_DUPLICATE] = 5, - [RXRPC_ACK_OUT_OF_SEQUENCE] = 6, - [RXRPC_ACK_EXCEEDS_WINDOW] = 7, - [RXRPC_ACK_NOSPACE] = 8, -}; - /* * propose an ACK be sent */ diff --git a/net/rxrpc/ar-input.c b/net/rxrpc/ar-input.c index 4824a827d10d..c947cd13f435 100644 --- a/net/rxrpc/ar-input.c +++ b/net/rxrpc/ar-input.c @@ -25,12 +25,6 @@ #include #include "ar-internal.h" -const char *rxrpc_pkts[] = { - "?00", - "DATA", "ACK", "BUSY", "ABORT", "ACKALL", "CHALL", "RESP", "DEBUG", - "?09", "?10", "?11", "?12", "VERSION", "?14", "?15" -}; - /* * queue a packet for recvmsg to pass to userspace * - the caller must hold a lock on call->lock diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h index cd6cdbe87125..24126d954f38 100644 --- a/net/rxrpc/ar-internal.h +++ b/net/rxrpc/ar-internal.h @@ -478,13 +478,6 @@ int rxrpc_reject_call(struct rxrpc_sock *); /* * ar-ack.c */ -extern unsigned int rxrpc_requested_ack_delay; -extern unsigned int rxrpc_soft_ack_delay; -extern unsigned int rxrpc_idle_ack_delay; -extern unsigned int rxrpc_rx_window_size; -extern unsigned int rxrpc_rx_mtu; -extern unsigned int rxrpc_rx_jumbo_max; - void __rxrpc_propose_ACK(struct rxrpc_call *, u8, u32, bool); void rxrpc_propose_ACK(struct rxrpc_call *, u8, u32, bool); void rxrpc_process_call(struct work_struct *); @@ -550,8 +543,6 @@ void rxrpc_UDP_error_handler(struct work_struct *); /* * ar-input.c */ -extern const char *rxrpc_pkts[]; - void rxrpc_data_ready(struct sock *); int rxrpc_queue_rcv_skb(struct rxrpc_call *, struct sk_buff *, bool, bool); void rxrpc_fast_process_packet(struct rxrpc_call *, struct sk_buff *); @@ -636,6 +627,21 @@ void __exit rxrpc_destroy_all_transports(void); struct rxrpc_transport *rxrpc_find_transport(struct rxrpc_local *, struct rxrpc_peer *); +/* + * misc.c + */ +extern unsigned int rxrpc_requested_ack_delay; +extern unsigned int rxrpc_soft_ack_delay; +extern unsigned int rxrpc_idle_ack_delay; +extern unsigned int rxrpc_rx_window_size; +extern unsigned int rxrpc_rx_mtu; +extern unsigned int rxrpc_rx_jumbo_max; + +extern const char *rxrpc_pkts[]; +extern const s8 rxrpc_ack_priority[]; + +extern const char *rxrpc_acks(u8 reason); + /* * sysctl.c */ diff --git a/net/rxrpc/misc.c b/net/rxrpc/misc.c new file mode 100644 index 000000000000..8ebeec3384e1 --- /dev/null +++ b/net/rxrpc/misc.c @@ -0,0 +1,89 @@ +/* Miscellaneous bits + * + * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ + +#include +#include +#include +#include "ar-internal.h" + +/* + * How long to wait before scheduling ACK generation after seeing a + * packet with RXRPC_REQUEST_ACK set (in jiffies). + */ +unsigned int rxrpc_requested_ack_delay = 1; + +/* + * How long to wait before scheduling an ACK with subtype DELAY (in jiffies). + * + * We use this when we've received new data packets. If those packets aren't + * all consumed within this time we will send a DELAY ACK if an ACK was not + * requested to let the sender know it doesn't need to resend. + */ +unsigned int rxrpc_soft_ack_delay = 1 * HZ; + +/* + * How long to wait before scheduling an ACK with subtype IDLE (in jiffies). + * + * We use this when we've consumed some previously soft-ACK'd packets when + * further packets aren't immediately received to decide when to send an IDLE + * ACK let the other end know that it can free up its Tx buffer space. + */ +unsigned int rxrpc_idle_ack_delay = 0.5 * HZ; + +/* + * Receive window size in packets. This indicates the maximum number of + * unconsumed received packets we're willing to retain in memory. Once this + * limit is hit, we should generate an EXCEEDS_WINDOW ACK and discard further + * packets. + */ +unsigned int rxrpc_rx_window_size = 32; + +/* + * Maximum Rx MTU size. This indicates to the sender the size of jumbo packet + * made by gluing normal packets together that we're willing to handle. + */ +unsigned int rxrpc_rx_mtu = 5692; + +/* + * The maximum number of fragments in a received jumbo packet that we tell the + * sender that we're willing to handle. + */ +unsigned int rxrpc_rx_jumbo_max = 4; + +const char *rxrpc_pkts[] = { + "?00", + "DATA", "ACK", "BUSY", "ABORT", "ACKALL", "CHALL", "RESP", "DEBUG", + "?09", "?10", "?11", "?12", "VERSION", "?14", "?15" +}; + +const s8 rxrpc_ack_priority[] = { + [0] = 0, + [RXRPC_ACK_DELAY] = 1, + [RXRPC_ACK_REQUESTED] = 2, + [RXRPC_ACK_IDLE] = 3, + [RXRPC_ACK_PING_RESPONSE] = 4, + [RXRPC_ACK_DUPLICATE] = 5, + [RXRPC_ACK_OUT_OF_SEQUENCE] = 6, + [RXRPC_ACK_EXCEEDS_WINDOW] = 7, + [RXRPC_ACK_NOSPACE] = 8, +}; + +const char *rxrpc_acks(u8 reason) +{ + static const char *const str[] = { + "---", "REQ", "DUP", "OOS", "WIN", "MEM", "PNG", "PNR", "DLY", + "IDL", "-?-" + }; + + if (reason >= ARRAY_SIZE(str)) + reason = ARRAY_SIZE(str) - 1; + return str[reason]; +} From 5b3e87f19e71b7a2f789c40de04704886932b5cf Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 7 Apr 2016 17:23:23 +0100 Subject: [PATCH 0533/1649] rxrpc: Static arrays of strings should be const char *const[] Static arrays of strings should be const char *const[]. Signed-off-by: David Howells Signed-off-by: David S. Miller --- include/rxrpc/packet.h | 2 -- net/rxrpc/ar-internal.h | 2 +- net/rxrpc/misc.c | 2 +- 3 files changed, 2 insertions(+), 4 deletions(-) diff --git a/include/rxrpc/packet.h b/include/rxrpc/packet.h index 9ebab3a8cf0a..b2017440b765 100644 --- a/include/rxrpc/packet.h +++ b/include/rxrpc/packet.h @@ -68,8 +68,6 @@ struct rxrpc_wire_header { } __packed; -extern const char *rxrpc_pkts[]; - #define RXRPC_SUPPORTED_PACKET_TYPES ( \ (1 << RXRPC_PACKET_TYPE_DATA) | \ (1 << RXRPC_PACKET_TYPE_ACK) | \ diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h index 24126d954f38..eeb829e837e1 100644 --- a/net/rxrpc/ar-internal.h +++ b/net/rxrpc/ar-internal.h @@ -637,7 +637,7 @@ extern unsigned int rxrpc_rx_window_size; extern unsigned int rxrpc_rx_mtu; extern unsigned int rxrpc_rx_jumbo_max; -extern const char *rxrpc_pkts[]; +extern const char *const rxrpc_pkts[]; extern const s8 rxrpc_ack_priority[]; extern const char *rxrpc_acks(u8 reason); diff --git a/net/rxrpc/misc.c b/net/rxrpc/misc.c index 8ebeec3384e1..1afe9876e79f 100644 --- a/net/rxrpc/misc.c +++ b/net/rxrpc/misc.c @@ -58,7 +58,7 @@ unsigned int rxrpc_rx_mtu = 5692; */ unsigned int rxrpc_rx_jumbo_max = 4; -const char *rxrpc_pkts[] = { +const char *const rxrpc_pkts[] = { "?00", "DATA", "ACK", "BUSY", "ABORT", "ACKALL", "CHALL", "RESP", "DEBUG", "?09", "?10", "?11", "?12", "VERSION", "?14", "?15" From dc44b3a09aec9ac57c1e7410677c87c0e6453624 Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 7 Apr 2016 17:23:30 +0100 Subject: [PATCH 0534/1649] rxrpc: Differentiate local and remote abort codes in structs In the rxrpc_connection and rxrpc_call structs, there's one field to hold the abort code, no matter whether that value was generated locally to be sent or was received from the peer via an abort packet. Split the abort code fields in two for cleanliness sake and add an error field to hold the Linux error number to the rxrpc_call struct too (sometimes this is generated in a context where we can't return it to userspace directly). Furthermore, add a skb mark to indicate a packet that caused a local abort to be generated so that recvmsg() can pick up the correct abort code. A future addition will need to be to indicate to userspace the difference between aborts via a control message. Signed-off-by: David Howells Signed-off-by: David S. Miller --- fs/afs/rxrpc.c | 14 +++++++++++--- include/net/af_rxrpc.h | 3 ++- net/rxrpc/ar-ack.c | 4 ++-- net/rxrpc/ar-call.c | 4 ++-- net/rxrpc/ar-connevent.c | 12 +++++++----- net/rxrpc/ar-input.c | 6 +++--- net/rxrpc/ar-internal.h | 10 +++++++--- net/rxrpc/ar-output.c | 2 +- net/rxrpc/ar-proc.c | 2 +- net/rxrpc/ar-recvmsg.c | 18 ++++++++++++++---- 10 files changed, 50 insertions(+), 25 deletions(-) diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c index b4d337ad6e36..63cd9f939f19 100644 --- a/fs/afs/rxrpc.c +++ b/fs/afs/rxrpc.c @@ -430,9 +430,11 @@ error_kill_call: } /* - * handles intercepted messages that were arriving in the socket's Rx queue - * - called with the socket receive queue lock held to ensure message ordering - * - called with softirqs disabled + * Handles intercepted messages that were arriving in the socket's Rx queue. + * + * Called from the AF_RXRPC call processor in waitqueue process context. For + * each call, it is guaranteed this will be called in order of packet to be + * delivered. */ static void afs_rx_interceptor(struct sock *sk, unsigned long user_call_ID, struct sk_buff *skb) @@ -523,6 +525,12 @@ static void afs_deliver_to_call(struct afs_call *call) call->state = AFS_CALL_ABORTED; _debug("Rcv ABORT %u -> %d", abort_code, call->error); break; + case RXRPC_SKB_MARK_LOCAL_ABORT: + abort_code = rxrpc_kernel_get_abort_code(skb); + call->error = call->type->abort_to_error(abort_code); + call->state = AFS_CALL_ABORTED; + _debug("Loc ABORT %u -> %d", abort_code, call->error); + break; case RXRPC_SKB_MARK_NET_ERROR: call->error = -rxrpc_kernel_get_error_number(skb); call->state = AFS_CALL_ERROR; diff --git a/include/net/af_rxrpc.h b/include/net/af_rxrpc.h index 4fd3e4a2cadd..ac1bc3c49fbd 100644 --- a/include/net/af_rxrpc.h +++ b/include/net/af_rxrpc.h @@ -20,11 +20,12 @@ struct rxrpc_call; /* * the mark applied to socket buffers that may be intercepted */ -enum { +enum rxrpc_skb_mark { RXRPC_SKB_MARK_DATA, /* data message */ RXRPC_SKB_MARK_FINAL_ACK, /* final ACK received message */ RXRPC_SKB_MARK_BUSY, /* server busy message */ RXRPC_SKB_MARK_REMOTE_ABORT, /* remote abort message */ + RXRPC_SKB_MARK_LOCAL_ABORT, /* local abort message */ RXRPC_SKB_MARK_NET_ERROR, /* network error message */ RXRPC_SKB_MARK_LOCAL_ERROR, /* local error message */ RXRPC_SKB_MARK_NEW_CALL, /* local error message */ diff --git a/net/rxrpc/ar-ack.c b/net/rxrpc/ar-ack.c index 54bf43ba9aa8..d0eb98e1391c 100644 --- a/net/rxrpc/ar-ack.c +++ b/net/rxrpc/ar-ack.c @@ -905,7 +905,7 @@ void rxrpc_process_call(struct work_struct *work) ECONNABORTED, true) < 0) goto no_mem; whdr.type = RXRPC_PACKET_TYPE_ABORT; - data = htonl(call->abort_code); + data = htonl(call->local_abort); iov[1].iov_base = &data; iov[1].iov_len = sizeof(data); genbit = RXRPC_CALL_EV_ABORT; @@ -968,7 +968,7 @@ void rxrpc_process_call(struct work_struct *work) write_lock_bh(&call->state_lock); if (call->state <= RXRPC_CALL_COMPLETE) { call->state = RXRPC_CALL_LOCALLY_ABORTED; - call->abort_code = RX_CALL_TIMEOUT; + call->local_abort = RX_CALL_TIMEOUT; set_bit(RXRPC_CALL_EV_ABORT, &call->events); } write_unlock_bh(&call->state_lock); diff --git a/net/rxrpc/ar-call.c b/net/rxrpc/ar-call.c index 7c8d300ade9b..67a211f0ebba 100644 --- a/net/rxrpc/ar-call.c +++ b/net/rxrpc/ar-call.c @@ -682,7 +682,7 @@ void rxrpc_release_call(struct rxrpc_call *call) call->state != RXRPC_CALL_CLIENT_FINAL_ACK) { _debug("+++ ABORTING STATE %d +++\n", call->state); call->state = RXRPC_CALL_LOCALLY_ABORTED; - call->abort_code = RX_CALL_DEAD; + call->local_abort = RX_CALL_DEAD; set_bit(RXRPC_CALL_EV_ABORT, &call->events); rxrpc_queue_call(call); } @@ -758,7 +758,7 @@ static void rxrpc_mark_call_released(struct rxrpc_call *call) if (call->state < RXRPC_CALL_COMPLETE) { _debug("abort call %p", call); call->state = RXRPC_CALL_LOCALLY_ABORTED; - call->abort_code = RX_CALL_DEAD; + call->local_abort = RX_CALL_DEAD; if (!test_and_set_bit(RXRPC_CALL_EV_ABORT, &call->events)) sched = true; } diff --git a/net/rxrpc/ar-connevent.c b/net/rxrpc/ar-connevent.c index 1bdaaed8cdc4..4dc6ab81fd2f 100644 --- a/net/rxrpc/ar-connevent.c +++ b/net/rxrpc/ar-connevent.c @@ -40,11 +40,13 @@ static void rxrpc_abort_calls(struct rxrpc_connection *conn, int state, write_lock(&call->state_lock); if (call->state <= RXRPC_CALL_COMPLETE) { call->state = state; - call->abort_code = abort_code; - if (state == RXRPC_CALL_LOCALLY_ABORTED) + if (state == RXRPC_CALL_LOCALLY_ABORTED) { + call->local_abort = conn->local_abort; set_bit(RXRPC_CALL_EV_CONN_ABORT, &call->events); - else + } else { + call->remote_abort = conn->remote_abort; set_bit(RXRPC_CALL_EV_RCVD_ABORT, &call->events); + } rxrpc_queue_call(call); } write_unlock(&call->state_lock); @@ -101,7 +103,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn, whdr._rsvd = 0; whdr.serviceId = htons(conn->service_id); - word = htonl(abort_code); + word = htonl(conn->local_abort); iov[0].iov_base = &whdr; iov[0].iov_len = sizeof(whdr); @@ -112,7 +114,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn, serial = atomic_inc_return(&conn->serial); whdr.serial = htonl(serial); - _proto("Tx CONN ABORT %%%u { %d }", serial, abort_code); + _proto("Tx CONN ABORT %%%u { %d }", serial, conn->local_abort); ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len); if (ret < 0) { diff --git a/net/rxrpc/ar-input.c b/net/rxrpc/ar-input.c index c947cd13f435..c6c784d3a3e8 100644 --- a/net/rxrpc/ar-input.c +++ b/net/rxrpc/ar-input.c @@ -349,7 +349,7 @@ void rxrpc_fast_process_packet(struct rxrpc_call *call, struct sk_buff *skb) write_lock_bh(&call->state_lock); if (call->state < RXRPC_CALL_COMPLETE) { call->state = RXRPC_CALL_REMOTELY_ABORTED; - call->abort_code = abort_code; + call->remote_abort = abort_code; set_bit(RXRPC_CALL_EV_RCVD_ABORT, &call->events); rxrpc_queue_call(call); } @@ -422,7 +422,7 @@ protocol_error: protocol_error_locked: if (call->state <= RXRPC_CALL_COMPLETE) { call->state = RXRPC_CALL_LOCALLY_ABORTED; - call->abort_code = RX_PROTOCOL_ERROR; + call->local_abort = RX_PROTOCOL_ERROR; set_bit(RXRPC_CALL_EV_ABORT, &call->events); rxrpc_queue_call(call); } @@ -494,7 +494,7 @@ protocol_error: write_lock_bh(&call->state_lock); if (call->state <= RXRPC_CALL_COMPLETE) { call->state = RXRPC_CALL_LOCALLY_ABORTED; - call->abort_code = RX_PROTOCOL_ERROR; + call->local_abort = RX_PROTOCOL_ERROR; set_bit(RXRPC_CALL_EV_ABORT, &call->events); rxrpc_queue_call(call); } diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h index eeb829e837e1..258b74a2a23f 100644 --- a/net/rxrpc/ar-internal.h +++ b/net/rxrpc/ar-internal.h @@ -289,7 +289,9 @@ struct rxrpc_connection { RXRPC_CONN_LOCALLY_ABORTED, /* - conn aborted locally */ RXRPC_CONN_NETWORK_ERROR, /* - conn terminated by network error */ } state; - int error; /* error code for local abort */ + u32 local_abort; /* local abort code */ + u32 remote_abort; /* remote abort code */ + int error; /* local error incurred */ int debug_id; /* debug ID for printks */ unsigned int call_counter; /* call ID counter */ atomic_t serial; /* packet serial number counter */ @@ -399,7 +401,9 @@ struct rxrpc_call { rwlock_t state_lock; /* lock for state transition */ atomic_t usage; atomic_t sequence; /* Tx data packet sequence counter */ - u32 abort_code; /* local/remote abort code */ + u32 local_abort; /* local abort code */ + u32 remote_abort; /* remote abort code */ + int error; /* local error incurred */ enum rxrpc_call_state state : 8; /* current state of call */ int debug_id; /* debug ID for printks */ u8 channel; /* connection channel occupied by this call */ @@ -453,7 +457,7 @@ static inline void rxrpc_abort_call(struct rxrpc_call *call, u32 abort_code) { write_lock_bh(&call->state_lock); if (call->state < RXRPC_CALL_COMPLETE) { - call->abort_code = abort_code; + call->local_abort = abort_code; call->state = RXRPC_CALL_LOCALLY_ABORTED; set_bit(RXRPC_CALL_EV_ABORT, &call->events); } diff --git a/net/rxrpc/ar-output.c b/net/rxrpc/ar-output.c index d36fb6e1a29c..94e7d9537437 100644 --- a/net/rxrpc/ar-output.c +++ b/net/rxrpc/ar-output.c @@ -110,7 +110,7 @@ static void rxrpc_send_abort(struct rxrpc_call *call, u32 abort_code) if (call->state <= RXRPC_CALL_COMPLETE) { call->state = RXRPC_CALL_LOCALLY_ABORTED; - call->abort_code = abort_code; + call->local_abort = abort_code; set_bit(RXRPC_CALL_EV_ABORT, &call->events); del_timer_sync(&call->resend_timer); del_timer_sync(&call->ack_timer); diff --git a/net/rxrpc/ar-proc.c b/net/rxrpc/ar-proc.c index 525b2ba5a8f4..225163bc658d 100644 --- a/net/rxrpc/ar-proc.c +++ b/net/rxrpc/ar-proc.c @@ -80,7 +80,7 @@ static int rxrpc_call_seq_show(struct seq_file *seq, void *v) call->conn->in_clientflag ? "Svc" : "Clt", atomic_read(&call->usage), rxrpc_call_states[call->state], - call->abort_code, + call->remote_abort ?: call->local_abort, call->user_call_ID); return 0; diff --git a/net/rxrpc/ar-recvmsg.c b/net/rxrpc/ar-recvmsg.c index 64facba24a45..160f0927aa3e 100644 --- a/net/rxrpc/ar-recvmsg.c +++ b/net/rxrpc/ar-recvmsg.c @@ -288,7 +288,11 @@ receive_non_data_message: ret = put_cmsg(msg, SOL_RXRPC, RXRPC_BUSY, 0, &abort_code); break; case RXRPC_SKB_MARK_REMOTE_ABORT: - abort_code = call->abort_code; + abort_code = call->remote_abort; + ret = put_cmsg(msg, SOL_RXRPC, RXRPC_ABORT, 4, &abort_code); + break; + case RXRPC_SKB_MARK_LOCAL_ABORT: + abort_code = call->local_abort; ret = put_cmsg(msg, SOL_RXRPC, RXRPC_ABORT, 4, &abort_code); break; case RXRPC_SKB_MARK_NET_ERROR: @@ -303,6 +307,7 @@ receive_non_data_message: &abort_code); break; default: + pr_err("RxRPC: Unknown packet mark %u\n", skb->mark); BUG(); break; } @@ -401,9 +406,14 @@ u32 rxrpc_kernel_get_abort_code(struct sk_buff *skb) { struct rxrpc_skb_priv *sp = rxrpc_skb(skb); - ASSERTCMP(skb->mark, ==, RXRPC_SKB_MARK_REMOTE_ABORT); - - return sp->call->abort_code; + switch (skb->mark) { + case RXRPC_SKB_MARK_REMOTE_ABORT: + return sp->call->remote_abort; + case RXRPC_SKB_MARK_LOCAL_ABORT: + return sp->call->local_abort; + default: + BUG(); + } } EXPORT_SYMBOL(rxrpc_kernel_get_abort_code); From 843099cac0dbe421d7c3ea1f8662251fd7065731 Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 7 Apr 2016 17:23:37 +0100 Subject: [PATCH 0535/1649] rxrpc: Don't pass gfp around in incoming call handling functions Don't pass gfp around in incoming call handling functions, but rather hard code it at the points where we actually need it since the value comes from within the rxrpc driver and is always the same. Signed-off-by: David Howells Signed-off-by: David S. Miller --- net/rxrpc/ar-accept.c | 4 ++-- net/rxrpc/ar-call.c | 7 +++---- net/rxrpc/ar-connection.c | 5 ++--- net/rxrpc/ar-internal.h | 5 ++--- 4 files changed, 9 insertions(+), 12 deletions(-) diff --git a/net/rxrpc/ar-accept.c b/net/rxrpc/ar-accept.c index 277731a5e67a..e7a7f05f13e2 100644 --- a/net/rxrpc/ar-accept.c +++ b/net/rxrpc/ar-accept.c @@ -108,7 +108,7 @@ static int rxrpc_accept_incoming_call(struct rxrpc_local *local, goto error; } - conn = rxrpc_incoming_connection(trans, &sp->hdr, GFP_NOIO); + conn = rxrpc_incoming_connection(trans, &sp->hdr); rxrpc_put_transport(trans); if (IS_ERR(conn)) { _debug("no conn"); @@ -116,7 +116,7 @@ static int rxrpc_accept_incoming_call(struct rxrpc_local *local, goto error; } - call = rxrpc_incoming_call(rx, conn, &sp->hdr, GFP_NOIO); + call = rxrpc_incoming_call(rx, conn, &sp->hdr); rxrpc_put_connection(conn); if (IS_ERR(call)) { _debug("no call"); diff --git a/net/rxrpc/ar-call.c b/net/rxrpc/ar-call.c index 67a211f0ebba..571a41fd5a32 100644 --- a/net/rxrpc/ar-call.c +++ b/net/rxrpc/ar-call.c @@ -411,18 +411,17 @@ found_extant_second: */ struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *rx, struct rxrpc_connection *conn, - struct rxrpc_host_header *hdr, - gfp_t gfp) + struct rxrpc_host_header *hdr) { struct rxrpc_call *call, *candidate; struct rb_node **p, *parent; u32 call_id; - _enter(",%d,,%x", conn->debug_id, gfp); + _enter(",%d", conn->debug_id); ASSERT(rx != NULL); - candidate = rxrpc_alloc_call(gfp); + candidate = rxrpc_alloc_call(GFP_NOIO); if (!candidate) return ERR_PTR(-EBUSY); diff --git a/net/rxrpc/ar-connection.c b/net/rxrpc/ar-connection.c index 9942da1edbf6..9b6966777633 100644 --- a/net/rxrpc/ar-connection.c +++ b/net/rxrpc/ar-connection.c @@ -619,8 +619,7 @@ interrupted: */ struct rxrpc_connection * rxrpc_incoming_connection(struct rxrpc_transport *trans, - struct rxrpc_host_header *hdr, - gfp_t gfp) + struct rxrpc_host_header *hdr) { struct rxrpc_connection *conn, *candidate = NULL; struct rb_node *p, **pp; @@ -659,7 +658,7 @@ rxrpc_incoming_connection(struct rxrpc_transport *trans, /* not yet present - create a candidate for a new record and then * redo the search */ - candidate = rxrpc_alloc_connection(gfp); + candidate = rxrpc_alloc_connection(GFP_NOIO); if (!candidate) { _leave(" = -ENOMEM"); return ERR_PTR(-ENOMEM); diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h index 258b74a2a23f..d38071b09f72 100644 --- a/net/rxrpc/ar-internal.h +++ b/net/rxrpc/ar-internal.h @@ -503,7 +503,7 @@ struct rxrpc_call *rxrpc_get_client_call(struct rxrpc_sock *, unsigned long, int, gfp_t); struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *, struct rxrpc_connection *, - struct rxrpc_host_header *, gfp_t); + struct rxrpc_host_header *); struct rxrpc_call *rxrpc_find_server_call(struct rxrpc_sock *, unsigned long); void rxrpc_release_call(struct rxrpc_call *); void rxrpc_release_calls_on_socket(struct rxrpc_sock *); @@ -528,8 +528,7 @@ void __exit rxrpc_destroy_all_connections(void); struct rxrpc_connection *rxrpc_find_connection(struct rxrpc_transport *, struct rxrpc_host_header *); extern struct rxrpc_connection * -rxrpc_incoming_connection(struct rxrpc_transport *, struct rxrpc_host_header *, - gfp_t); +rxrpc_incoming_connection(struct rxrpc_transport *, struct rxrpc_host_header *); /* * ar-connevent.c From 6dd050f88d702e2718bd856ea014487563207756 Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 7 Apr 2016 17:23:44 +0100 Subject: [PATCH 0536/1649] rxrpc: Don't assume transport address family and size when using it Don't assume transport address family and size when using the peer address to send a packet. Instead, use the start of the transport address rather than any particular element of the union and use the transport address length noted inside the sockaddr_rxrpc struct. This will be necessary when IPv6 support is introduced. Signed-off-by: David Howells Signed-off-by: David S. Miller --- net/rxrpc/ar-ack.c | 4 ++-- net/rxrpc/ar-connevent.c | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/net/rxrpc/ar-ack.c b/net/rxrpc/ar-ack.c index d0eb98e1391c..3cd9264806a4 100644 --- a/net/rxrpc/ar-ack.c +++ b/net/rxrpc/ar-ack.c @@ -833,8 +833,8 @@ void rxrpc_process_call(struct work_struct *work) /* there's a good chance we're going to have to send a message, so set * one up in advance */ - msg.msg_name = &call->conn->trans->peer->srx.transport.sin; - msg.msg_namelen = sizeof(call->conn->trans->peer->srx.transport.sin); + msg.msg_name = &call->conn->trans->peer->srx.transport; + msg.msg_namelen = call->conn->trans->peer->srx.transport_len; msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_flags = 0; diff --git a/net/rxrpc/ar-connevent.c b/net/rxrpc/ar-connevent.c index 4dc6ab81fd2f..291522392ac7 100644 --- a/net/rxrpc/ar-connevent.c +++ b/net/rxrpc/ar-connevent.c @@ -86,8 +86,8 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn, rxrpc_abort_calls(conn, RXRPC_CALL_LOCALLY_ABORTED, abort_code); - msg.msg_name = &conn->trans->peer->srx.transport.sin; - msg.msg_namelen = sizeof(conn->trans->peer->srx.transport.sin); + msg.msg_name = &conn->trans->peer->srx.transport; + msg.msg_namelen = conn->trans->peer->srx.transport_len; msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_flags = 0; From 648af7fca15901740c7aaafd55904ebd54d01860 Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 7 Apr 2016 17:23:51 +0100 Subject: [PATCH 0537/1649] rxrpc: Absorb the rxkad security module Absorb the rxkad security module into the af_rxrpc module so that there's only one module file. This avoids a circular dependency whereby rxkad pins af_rxrpc and cached connections pin rxkad but can't be manually evicted (they will expire eventually and cease pinning). With this change, af_rxrpc can just be unloaded, despite having cached connections. Signed-off-by: David Howells Signed-off-by: David S. Miller --- net/rxrpc/Kconfig | 2 +- net/rxrpc/Makefile | 3 +- net/rxrpc/af_rxrpc.c | 9 +++ net/rxrpc/ar-internal.h | 21 +++++-- net/rxrpc/ar-security.c | 131 ++++++++++++---------------------------- net/rxrpc/rxkad.c | 61 ++++++++----------- 6 files changed, 89 insertions(+), 138 deletions(-) diff --git a/net/rxrpc/Kconfig b/net/rxrpc/Kconfig index 23dcef12b986..784c53163b7b 100644 --- a/net/rxrpc/Kconfig +++ b/net/rxrpc/Kconfig @@ -30,7 +30,7 @@ config AF_RXRPC_DEBUG config RXKAD - tristate "RxRPC Kerberos security" + bool "RxRPC Kerberos security" depends on AF_RXRPC select CRYPTO select CRYPTO_MANAGER diff --git a/net/rxrpc/Makefile b/net/rxrpc/Makefile index 5b98c1640d6d..fa09cb55bfce 100644 --- a/net/rxrpc/Makefile +++ b/net/rxrpc/Makefile @@ -22,8 +22,7 @@ af-rxrpc-y := \ misc.o af-rxrpc-$(CONFIG_PROC_FS) += ar-proc.o +af-rxrpc-$(CONFIG_RXKAD) += rxkad.o af-rxrpc-$(CONFIG_SYSCTL) += sysctl.o obj-$(CONFIG_AF_RXRPC) += af-rxrpc.o - -obj-$(CONFIG_RXKAD) += rxkad.o diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c index 9d935fa5a2a9..e45e94ca030f 100644 --- a/net/rxrpc/af_rxrpc.c +++ b/net/rxrpc/af_rxrpc.c @@ -806,6 +806,12 @@ static int __init af_rxrpc_init(void) goto error_work_queue; } + ret = rxrpc_init_security(); + if (ret < 0) { + printk(KERN_CRIT "RxRPC: Cannot initialise security\n"); + goto error_security; + } + ret = proto_register(&rxrpc_proto, 1); if (ret < 0) { printk(KERN_CRIT "RxRPC: Cannot register protocol\n"); @@ -853,6 +859,8 @@ error_sock: proto_unregister(&rxrpc_proto); error_proto: destroy_workqueue(rxrpc_workqueue); +error_security: + rxrpc_exit_security(); error_work_queue: kmem_cache_destroy(rxrpc_call_jar); error_call_jar: @@ -883,6 +891,7 @@ static void __exit af_rxrpc_exit(void) remove_proc_entry("rxrpc_conns", init_net.proc_net); remove_proc_entry("rxrpc_calls", init_net.proc_net); destroy_workqueue(rxrpc_workqueue); + rxrpc_exit_security(); kmem_cache_destroy(rxrpc_call_jar); _leave(""); } diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h index d38071b09f72..72fd675a891e 100644 --- a/net/rxrpc/ar-internal.h +++ b/net/rxrpc/ar-internal.h @@ -124,11 +124,15 @@ enum rxrpc_command { * RxRPC security module interface */ struct rxrpc_security { - struct module *owner; /* providing module */ - struct list_head link; /* link in master list */ const char *name; /* name of this service */ u8 security_index; /* security type provided */ + /* Initialise a security service */ + int (*init)(void); + + /* Clean up a security service */ + void (*exit)(void); + /* initialise a connection's security */ int (*init_connection_security)(struct rxrpc_connection *); @@ -268,7 +272,7 @@ struct rxrpc_connection { struct rb_root calls; /* calls on this connection */ struct sk_buff_head rx_queue; /* received conn-level packets */ struct rxrpc_call *channels[RXRPC_MAXCALLS]; /* channels (active calls) */ - struct rxrpc_security *security; /* applied security module */ + const struct rxrpc_security *security; /* applied security module */ struct key *key; /* security for this connection (client) */ struct key *server_key; /* security for this service */ struct crypto_skcipher *cipher; /* encryption handle */ @@ -604,8 +608,8 @@ int rxrpc_recvmsg(struct socket *, struct msghdr *, size_t, int); /* * ar-security.c */ -int rxrpc_register_security(struct rxrpc_security *); -void rxrpc_unregister_security(struct rxrpc_security *); +int __init rxrpc_init_security(void); +void rxrpc_exit_security(void); int rxrpc_init_client_conn_security(struct rxrpc_connection *); int rxrpc_init_server_conn_security(struct rxrpc_connection *); int rxrpc_secure_packet(const struct rxrpc_call *, struct sk_buff *, size_t, @@ -645,6 +649,13 @@ extern const s8 rxrpc_ack_priority[]; extern const char *rxrpc_acks(u8 reason); +/* + * rxkad.c + */ +#ifdef CONFIG_RXKAD +extern const struct rxrpc_security rxkad; +#endif + /* * sysctl.c */ diff --git a/net/rxrpc/ar-security.c b/net/rxrpc/ar-security.c index ceff6394a65f..6946aec7ab1f 100644 --- a/net/rxrpc/ar-security.c +++ b/net/rxrpc/ar-security.c @@ -22,109 +22,59 @@ static LIST_HEAD(rxrpc_security_methods); static DECLARE_RWSEM(rxrpc_security_sem); -/* - * get an RxRPC security module - */ -static struct rxrpc_security *rxrpc_security_get(struct rxrpc_security *sec) +static const struct rxrpc_security *rxrpc_security_types[] = { +#ifdef CONFIG_RXKAD + [RXRPC_SECURITY_RXKAD] = &rxkad, +#endif +}; + +int __init rxrpc_init_security(void) { - return try_module_get(sec->owner) ? sec : NULL; + int i, ret; + + for (i = 0; i < ARRAY_SIZE(rxrpc_security_types); i++) { + if (rxrpc_security_types[i]) { + ret = rxrpc_security_types[i]->init(); + if (ret < 0) + goto failed; + } + } + + return 0; + +failed: + for (i--; i >= 0; i--) + if (rxrpc_security_types[i]) + rxrpc_security_types[i]->exit(); + return ret; } -/* - * release an RxRPC security module - */ -static void rxrpc_security_put(struct rxrpc_security *sec) +void rxrpc_exit_security(void) { - module_put(sec->owner); + int i; + + for (i = 0; i < ARRAY_SIZE(rxrpc_security_types); i++) + if (rxrpc_security_types[i]) + rxrpc_security_types[i]->exit(); } /* * look up an rxrpc security module */ -static struct rxrpc_security *rxrpc_security_lookup(u8 security_index) +static const struct rxrpc_security *rxrpc_security_lookup(u8 security_index) { - struct rxrpc_security *sec = NULL; - - _enter(""); - - down_read(&rxrpc_security_sem); - - list_for_each_entry(sec, &rxrpc_security_methods, link) { - if (sec->security_index == security_index) { - if (unlikely(!rxrpc_security_get(sec))) - break; - goto out; - } - } - - sec = NULL; -out: - up_read(&rxrpc_security_sem); - _leave(" = %p [%s]", sec, sec ? sec->name : ""); - return sec; + if (security_index >= ARRAY_SIZE(rxrpc_security_types)) + return NULL; + return rxrpc_security_types[security_index]; } -/** - * rxrpc_register_security - register an RxRPC security handler - * @sec: security module - * - * register an RxRPC security handler for use by RxRPC - */ -int rxrpc_register_security(struct rxrpc_security *sec) -{ - struct rxrpc_security *psec; - int ret; - - _enter(""); - down_write(&rxrpc_security_sem); - - ret = -EEXIST; - list_for_each_entry(psec, &rxrpc_security_methods, link) { - if (psec->security_index == sec->security_index) - goto out; - } - - list_add(&sec->link, &rxrpc_security_methods); - - printk(KERN_NOTICE "RxRPC: Registered security type %d '%s'\n", - sec->security_index, sec->name); - ret = 0; - -out: - up_write(&rxrpc_security_sem); - _leave(" = %d", ret); - return ret; -} - -EXPORT_SYMBOL_GPL(rxrpc_register_security); - -/** - * rxrpc_unregister_security - unregister an RxRPC security handler - * @sec: security module - * - * unregister an RxRPC security handler - */ -void rxrpc_unregister_security(struct rxrpc_security *sec) -{ - - _enter(""); - down_write(&rxrpc_security_sem); - list_del_init(&sec->link); - up_write(&rxrpc_security_sem); - - printk(KERN_NOTICE "RxRPC: Unregistered security type %d '%s'\n", - sec->security_index, sec->name); -} - -EXPORT_SYMBOL_GPL(rxrpc_unregister_security); - /* * initialise the security on a client connection */ int rxrpc_init_client_conn_security(struct rxrpc_connection *conn) { + const struct rxrpc_security *sec; struct rxrpc_key_token *token; - struct rxrpc_security *sec; struct key *key = conn->key; int ret; @@ -148,7 +98,6 @@ int rxrpc_init_client_conn_security(struct rxrpc_connection *conn) ret = conn->security->init_connection_security(conn); if (ret < 0) { - rxrpc_security_put(conn->security); conn->security = NULL; return ret; } @@ -162,7 +111,7 @@ int rxrpc_init_client_conn_security(struct rxrpc_connection *conn) */ int rxrpc_init_server_conn_security(struct rxrpc_connection *conn) { - struct rxrpc_security *sec; + const struct rxrpc_security *sec; struct rxrpc_local *local = conn->trans->local; struct rxrpc_sock *rx; struct key *key; @@ -188,14 +137,12 @@ int rxrpc_init_server_conn_security(struct rxrpc_connection *conn) /* the service appears to have died */ read_unlock_bh(&local->services_lock); - rxrpc_security_put(sec); _leave(" = -ENOENT"); return -ENOENT; found_service: if (!rx->securities) { read_unlock_bh(&local->services_lock); - rxrpc_security_put(sec); _leave(" = -ENOKEY"); return -ENOKEY; } @@ -205,7 +152,6 @@ found_service: &key_type_rxrpc_s, kdesc); if (IS_ERR(kref)) { read_unlock_bh(&local->services_lock); - rxrpc_security_put(sec); _leave(" = %ld [search]", PTR_ERR(kref)); return PTR_ERR(kref); } @@ -253,11 +199,8 @@ void rxrpc_clear_conn_security(struct rxrpc_connection *conn) { _enter("{%d}", conn->debug_id); - if (conn->security) { + if (conn->security) conn->security->clear(conn); - rxrpc_security_put(conn->security); - conn->security = NULL; - } key_put(conn->key); key_put(conn->server_key); diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c index f0aeb8163688..6b726a046a7d 100644 --- a/net/rxrpc/rxkad.c +++ b/net/rxrpc/rxkad.c @@ -20,7 +20,6 @@ #include #include #include -#define rxrpc_debug rxkad_debug #include "ar-internal.h" #define RXKAD_VERSION 2 @@ -31,10 +30,6 @@ #define REALM_SZ 40 /* size of principal's auth domain */ #define SNAME_SZ 40 /* size of service name */ -unsigned int rxrpc_debug; -module_param_named(debug, rxrpc_debug, uint, S_IWUSR | S_IRUGO); -MODULE_PARM_DESC(debug, "rxkad debugging mask"); - struct rxkad_level1_hdr { __be32 data_size; /* true data size (excluding padding) */ }; @@ -44,10 +39,6 @@ struct rxkad_level2_hdr { __be32 checksum; /* decrypted data checksum */ }; -MODULE_DESCRIPTION("RxRPC network protocol type-2 security (Kerberos 4)"); -MODULE_AUTHOR("Red Hat, Inc."); -MODULE_LICENSE("GPL"); - /* * this holds a pinned cipher so that keventd doesn't get called by the cipher * alloc routine, but since we have it to hand, we use it to decrypt RESPONSE @@ -1163,13 +1154,36 @@ static void rxkad_clear(struct rxrpc_connection *conn) crypto_free_skcipher(conn->cipher); } +/* + * Initialise the rxkad security service. + */ +static int rxkad_init(void) +{ + /* pin the cipher we need so that the crypto layer doesn't invoke + * keventd to go get it */ + rxkad_ci = crypto_alloc_skcipher("pcbc(fcrypt)", 0, CRYPTO_ALG_ASYNC); + if (IS_ERR(rxkad_ci)) + return PTR_ERR(rxkad_ci); + return 0; +} + +/* + * Clean up the rxkad security service. + */ +static void rxkad_exit(void) +{ + if (rxkad_ci) + crypto_free_skcipher(rxkad_ci); +} + /* * RxRPC Kerberos-based security */ -static struct rxrpc_security rxkad = { - .owner = THIS_MODULE, +const struct rxrpc_security rxkad = { .name = "rxkad", .security_index = RXRPC_SECURITY_RXKAD, + .init = rxkad_init, + .exit = rxkad_exit, .init_connection_security = rxkad_init_connection_security, .prime_packet_security = rxkad_prime_packet_security, .secure_packet = rxkad_secure_packet, @@ -1179,28 +1193,3 @@ static struct rxrpc_security rxkad = { .verify_response = rxkad_verify_response, .clear = rxkad_clear, }; - -static __init int rxkad_init(void) -{ - _enter(""); - - /* pin the cipher we need so that the crypto layer doesn't invoke - * keventd to go get it */ - rxkad_ci = crypto_alloc_skcipher("pcbc(fcrypt)", 0, CRYPTO_ALG_ASYNC); - if (IS_ERR(rxkad_ci)) - return PTR_ERR(rxkad_ci); - - return rxrpc_register_security(&rxkad); -} - -module_init(rxkad_init); - -static __exit void rxkad_exit(void) -{ - _enter(""); - - rxrpc_unregister_security(&rxkad); - crypto_free_skcipher(rxkad_ci); -} - -module_exit(rxkad_exit); From e0e4d82f3be60cfe8b10304c6daf3ca5973ae9e3 Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 7 Apr 2016 17:23:58 +0100 Subject: [PATCH 0538/1649] rxrpc: Create a null security type and get rid of conditional calls Create a null security type for security index 0 and get rid of all conditional calls to the security operations. We expect normally to be using security, so this should be of little negative impact. Signed-off-by: David Howells Signed-off-by: David S. Miller --- net/rxrpc/Makefile | 1 + net/rxrpc/ar-ack.c | 3 +- net/rxrpc/ar-connection.c | 9 +++-- net/rxrpc/ar-connevent.c | 11 +----- net/rxrpc/ar-input.c | 2 +- net/rxrpc/ar-internal.h | 10 +++-- net/rxrpc/ar-output.c | 4 +- net/rxrpc/ar-security.c | 43 +------------------- net/rxrpc/insecure.c | 83 +++++++++++++++++++++++++++++++++++++++ 9 files changed, 105 insertions(+), 61 deletions(-) create mode 100644 net/rxrpc/insecure.c diff --git a/net/rxrpc/Makefile b/net/rxrpc/Makefile index fa09cb55bfce..e05a06ef2254 100644 --- a/net/rxrpc/Makefile +++ b/net/rxrpc/Makefile @@ -19,6 +19,7 @@ af-rxrpc-y := \ ar-security.o \ ar-skbuff.o \ ar-transport.o \ + insecure.o \ misc.o af-rxrpc-$(CONFIG_PROC_FS) += ar-proc.o diff --git a/net/rxrpc/ar-ack.c b/net/rxrpc/ar-ack.c index 3cd9264806a4..374478e006e7 100644 --- a/net/rxrpc/ar-ack.c +++ b/net/rxrpc/ar-ack.c @@ -588,7 +588,8 @@ process_further: _proto("OOSQ DATA %%%u { #%u }", sp->hdr.serial, sp->hdr.seq); /* secured packets must be verified and possibly decrypted */ - if (rxrpc_verify_packet(call, skb, _abort_code) < 0) + if (call->conn->security->verify_packet(call, skb, + _abort_code) < 0) goto protocol_error; rxrpc_insert_oos_packet(call, skb); diff --git a/net/rxrpc/ar-connection.c b/net/rxrpc/ar-connection.c index 9b6966777633..97f4fae74bca 100644 --- a/net/rxrpc/ar-connection.c +++ b/net/rxrpc/ar-connection.c @@ -207,6 +207,7 @@ static struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp) INIT_LIST_HEAD(&conn->bundle_link); conn->calls = RB_ROOT; skb_queue_head_init(&conn->rx_queue); + conn->security = &rxrpc_no_security; rwlock_init(&conn->lock); spin_lock_init(&conn->state_lock); atomic_set(&conn->usage, 1); @@ -564,8 +565,7 @@ int rxrpc_connect_call(struct rxrpc_sock *rx, candidate->debug_id, candidate->trans->debug_id); rxrpc_assign_connection_id(candidate); - if (candidate->security) - candidate->security->prime_packet_security(candidate); + candidate->security->prime_packet_security(candidate); /* leave the candidate lurking in zombie mode attached to the * bundle until we're ready for it */ @@ -830,7 +830,10 @@ static void rxrpc_destroy_connection(struct rxrpc_connection *conn) ASSERT(RB_EMPTY_ROOT(&conn->calls)); rxrpc_purge_queue(&conn->rx_queue); - rxrpc_clear_conn_security(conn); + conn->security->clear(conn); + key_put(conn->key); + key_put(conn->server_key); + rxrpc_put_transport(conn->trans); kfree(conn); _leave(""); diff --git a/net/rxrpc/ar-connevent.c b/net/rxrpc/ar-connevent.c index 291522392ac7..5f9563968a5b 100644 --- a/net/rxrpc/ar-connevent.c +++ b/net/rxrpc/ar-connevent.c @@ -174,15 +174,10 @@ static int rxrpc_process_event(struct rxrpc_connection *conn, return -ECONNABORTED; case RXRPC_PACKET_TYPE_CHALLENGE: - if (conn->security) - return conn->security->respond_to_challenge( - conn, skb, _abort_code); - return -EPROTO; + return conn->security->respond_to_challenge(conn, skb, + _abort_code); case RXRPC_PACKET_TYPE_RESPONSE: - if (!conn->security) - return -EPROTO; - ret = conn->security->verify_response(conn, skb, _abort_code); if (ret < 0) return ret; @@ -238,8 +233,6 @@ static void rxrpc_secure_connection(struct rxrpc_connection *conn) } } - ASSERT(conn->security != NULL); - if (conn->security->issue_challenge(conn) < 0) { abort_code = RX_CALL_DEAD; ret = -ENOMEM; diff --git a/net/rxrpc/ar-input.c b/net/rxrpc/ar-input.c index c6c784d3a3e8..01e038146b7c 100644 --- a/net/rxrpc/ar-input.c +++ b/net/rxrpc/ar-input.c @@ -193,7 +193,7 @@ static int rxrpc_fast_process_data(struct rxrpc_call *call, /* if the packet need security things doing to it, then it goes down * the slow path */ - if (call->conn->security) + if (call->conn->security_ix) goto enqueue_packet; sp->call = call; diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h index 72fd675a891e..f0b807a163fa 100644 --- a/net/rxrpc/ar-internal.h +++ b/net/rxrpc/ar-internal.h @@ -9,6 +9,7 @@ * 2 of the License, or (at your option) any later version. */ +#include #include #if 0 @@ -612,10 +613,6 @@ int __init rxrpc_init_security(void); void rxrpc_exit_security(void); int rxrpc_init_client_conn_security(struct rxrpc_connection *); int rxrpc_init_server_conn_security(struct rxrpc_connection *); -int rxrpc_secure_packet(const struct rxrpc_call *, struct sk_buff *, size_t, - void *); -int rxrpc_verify_packet(const struct rxrpc_call *, struct sk_buff *, u32 *); -void rxrpc_clear_conn_security(struct rxrpc_connection *); /* * ar-skbuff.c @@ -634,6 +631,11 @@ void __exit rxrpc_destroy_all_transports(void); struct rxrpc_transport *rxrpc_find_transport(struct rxrpc_local *, struct rxrpc_peer *); +/* + * insecure.c + */ +extern const struct rxrpc_security rxrpc_no_security; + /* * misc.c */ diff --git a/net/rxrpc/ar-output.c b/net/rxrpc/ar-output.c index 94e7d9537437..51cb10062a8d 100644 --- a/net/rxrpc/ar-output.c +++ b/net/rxrpc/ar-output.c @@ -663,7 +663,7 @@ static int rxrpc_send_data(struct rxrpc_sock *rx, size_t pad; /* pad out if we're using security */ - if (conn->security) { + if (conn->security_ix) { pad = conn->security_size + skb->mark; pad = conn->size_align - pad; pad &= conn->size_align - 1; @@ -695,7 +695,7 @@ static int rxrpc_send_data(struct rxrpc_sock *rx, if (more && seq & 1) sp->hdr.flags |= RXRPC_REQUEST_ACK; - ret = rxrpc_secure_packet( + ret = conn->security->secure_packet( call, skb, skb->mark, skb->head + sizeof(struct rxrpc_wire_header)); if (ret < 0) diff --git a/net/rxrpc/ar-security.c b/net/rxrpc/ar-security.c index 6946aec7ab1f..d223253b22fa 100644 --- a/net/rxrpc/ar-security.c +++ b/net/rxrpc/ar-security.c @@ -23,6 +23,7 @@ static LIST_HEAD(rxrpc_security_methods); static DECLARE_RWSEM(rxrpc_security_sem); static const struct rxrpc_security *rxrpc_security_types[] = { + [RXRPC_SECURITY_NONE] = &rxrpc_no_security, #ifdef CONFIG_RXKAD [RXRPC_SECURITY_RXKAD] = &rxkad, #endif @@ -98,7 +99,7 @@ int rxrpc_init_client_conn_security(struct rxrpc_connection *conn) ret = conn->security->init_connection_security(conn); if (ret < 0) { - conn->security = NULL; + conn->security = &rxrpc_no_security; return ret; } @@ -165,43 +166,3 @@ found_service: _leave(" = 0"); return 0; } - -/* - * secure a packet prior to transmission - */ -int rxrpc_secure_packet(const struct rxrpc_call *call, - struct sk_buff *skb, - size_t data_size, - void *sechdr) -{ - if (call->conn->security) - return call->conn->security->secure_packet( - call, skb, data_size, sechdr); - return 0; -} - -/* - * secure a packet prior to transmission - */ -int rxrpc_verify_packet(const struct rxrpc_call *call, struct sk_buff *skb, - u32 *_abort_code) -{ - if (call->conn->security) - return call->conn->security->verify_packet( - call, skb, _abort_code); - return 0; -} - -/* - * clear connection security - */ -void rxrpc_clear_conn_security(struct rxrpc_connection *conn) -{ - _enter("{%d}", conn->debug_id); - - if (conn->security) - conn->security->clear(conn); - - key_put(conn->key); - key_put(conn->server_key); -} diff --git a/net/rxrpc/insecure.c b/net/rxrpc/insecure.c new file mode 100644 index 000000000000..e571403613c1 --- /dev/null +++ b/net/rxrpc/insecure.c @@ -0,0 +1,83 @@ +/* Null security operations. + * + * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ + +#include +#include "ar-internal.h" + +static int none_init_connection_security(struct rxrpc_connection *conn) +{ + return 0; +} + +static void none_prime_packet_security(struct rxrpc_connection *conn) +{ +} + +static int none_secure_packet(const struct rxrpc_call *call, + struct sk_buff *skb, + size_t data_size, + void *sechdr) +{ + return 0; +} + +static int none_verify_packet(const struct rxrpc_call *call, + struct sk_buff *skb, + u32 *_abort_code) +{ + return 0; +} + +static int none_respond_to_challenge(struct rxrpc_connection *conn, + struct sk_buff *skb, + u32 *_abort_code) +{ + *_abort_code = RX_PROTOCOL_ERROR; + return -EPROTO; +} + +static int none_verify_response(struct rxrpc_connection *conn, + struct sk_buff *skb, + u32 *_abort_code) +{ + *_abort_code = RX_PROTOCOL_ERROR; + return -EPROTO; +} + +static void none_clear(struct rxrpc_connection *conn) +{ +} + +static int none_init(void) +{ + return 0; +} + +static void none_exit(void) +{ +} + +/* + * RxRPC Kerberos-based security + */ +const struct rxrpc_security rxrpc_no_security = { + .name = "none", + .security_index = RXRPC_SECURITY_NONE, + .init = none_init, + .exit = none_exit, + .init_connection_security = none_init_connection_security, + .prime_packet_security = none_prime_packet_security, + .secure_packet = none_secure_packet, + .verify_packet = none_verify_packet, + .respond_to_challenge = none_respond_to_challenge, + .verify_response = none_verify_response, + .clear = none_clear, +}; From 9a6f2b0113c8fce815db7c9d23754bdea4b428a0 Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Mon, 11 Apr 2016 21:40:05 +0200 Subject: [PATCH 0539/1649] net: mdio: Fix lockdep falls positive splat MDIO devices can be stacked upon each other. The current code supports two levels, which until recently has been enough for a DSA mdio bus on top of another bus. Now we have hardware which has an MDIO mux in the middle. Define an MDIO MUTEX class with three levels. Signed-off-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/phy/mdio-mux.c | 10 ++-------- drivers/net/phy/mdio_bus.c | 4 ++-- include/linux/mdio.h | 11 +++++++++++ 3 files changed, 15 insertions(+), 10 deletions(-) diff --git a/drivers/net/phy/mdio-mux.c b/drivers/net/phy/mdio-mux.c index 308ade0eb1b6..5c81d6faf304 100644 --- a/drivers/net/phy/mdio-mux.c +++ b/drivers/net/phy/mdio-mux.c @@ -45,13 +45,7 @@ static int mdio_mux_read(struct mii_bus *bus, int phy_id, int regnum) struct mdio_mux_parent_bus *pb = cb->parent; int r; - /* In theory multiple mdio_mux could be stacked, thus creating - * more than a single level of nesting. But in practice, - * SINGLE_DEPTH_NESTING will cover the vast majority of use - * cases. We use it, instead of trying to handle the general - * case. - */ - mutex_lock_nested(&pb->mii_bus->mdio_lock, SINGLE_DEPTH_NESTING); + mutex_lock_nested(&pb->mii_bus->mdio_lock, MDIO_MUTEX_MUX); r = pb->switch_fn(pb->current_child, cb->bus_number, pb->switch_data); if (r) goto out; @@ -76,7 +70,7 @@ static int mdio_mux_write(struct mii_bus *bus, int phy_id, int r; - mutex_lock_nested(&pb->mii_bus->mdio_lock, SINGLE_DEPTH_NESTING); + mutex_lock_nested(&pb->mii_bus->mdio_lock, MDIO_MUTEX_MUX); r = pb->switch_fn(pb->current_child, cb->bus_number, pb->switch_data); if (r) goto out; diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index 0cba64f1ecf4..751202a285a6 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c @@ -457,7 +457,7 @@ int mdiobus_read_nested(struct mii_bus *bus, int addr, u32 regnum) BUG_ON(in_interrupt()); - mutex_lock_nested(&bus->mdio_lock, SINGLE_DEPTH_NESTING); + mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED); retval = bus->read(bus, addr, regnum); mutex_unlock(&bus->mdio_lock); @@ -509,7 +509,7 @@ int mdiobus_write_nested(struct mii_bus *bus, int addr, u32 regnum, u16 val) BUG_ON(in_interrupt()); - mutex_lock_nested(&bus->mdio_lock, SINGLE_DEPTH_NESTING); + mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED); err = bus->write(bus, addr, regnum, val); mutex_unlock(&bus->mdio_lock); diff --git a/include/linux/mdio.h b/include/linux/mdio.h index 5bfd99d1a40a..bf9d1d750693 100644 --- a/include/linux/mdio.h +++ b/include/linux/mdio.h @@ -13,6 +13,17 @@ struct mii_bus; +/* Multiple levels of nesting are possible. However typically this is + * limited to nested DSA like layer, a MUX layer, and the normal + * user. Instead of trying to handle the general case, just define + * these cases. + */ +enum mdio_mutex_lock_class { + MDIO_MUTEX_NORMAL, + MDIO_MUTEX_MUX, + MDIO_MUTEX_NESTED, +}; + struct mdio_device { struct device dev; From 35eb8f7b1a37013d7a38466ae58c39fbd2c57faa Mon Sep 17 00:00:00 2001 From: Jouni Malinen Date: Wed, 6 Apr 2016 17:38:44 +0300 Subject: [PATCH 0540/1649] cfg80211: Improve Connect/Associate command documentation The roaming cases for the Connect command were not fully covered and neither Connect nor Associate command uses of the prev_bssid parameter were very clear. Add details to describe how the prev_bssid argument is supposed to be used and when the driver should use association or reassociation. Signed-off-by: Jouni Malinen Signed-off-by: Johannes Berg --- include/net/cfg80211.h | 26 +++++++++++++++++++++++--- include/uapi/linux/nl80211.h | 16 +++++++++++++--- 2 files changed, 36 insertions(+), 6 deletions(-) diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index b39277eb251f..5ec20369ceb8 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -1750,7 +1750,12 @@ enum cfg80211_assoc_req_flags { * @ie_len: Length of ie buffer in octets * @use_mfp: Use management frame protection (IEEE 802.11w) in this association * @crypto: crypto settings - * @prev_bssid: previous BSSID, if not %NULL use reassociate frame + * @prev_bssid: previous BSSID, if not %NULL use reassociate frame. This is used + * to indicate a request to reassociate within the ESS instead of a request + * do the initial association with the ESS. When included, this is set to + * the BSSID of the current association, i.e., to the value that is + * included in the Current AP address field of the Reassociation Request + * frame. * @flags: See &enum cfg80211_assoc_req_flags * @ht_capa: HT Capabilities over-rides. Values set in ht_capa_mask * will be used in ht_capa. Un-supported values will be ignored. @@ -1925,7 +1930,12 @@ struct cfg80211_bss_selection { * @pbss: if set, connect to a PCP instead of AP. Valid for DMG * networks. * @bss_select: criteria to be used for BSS selection. - * @prev_bssid: previous BSSID, if not %NULL use reassociate frame + * @prev_bssid: previous BSSID, if not %NULL use reassociate frame. This is used + * to indicate a request to reassociate within the ESS instead of a request + * do the initial association with the ESS. When included, this is set to + * the BSSID of the current association, i.e., to the value that is + * included in the Current AP address field of the Reassociation Request + * frame. */ struct cfg80211_connect_params { struct ieee80211_channel *channel; @@ -2377,7 +2387,17 @@ struct cfg80211_qos_map { * @connect: Connect to the ESS with the specified parameters. When connected, * call cfg80211_connect_result() with status code %WLAN_STATUS_SUCCESS. * If the connection fails for some reason, call cfg80211_connect_result() - * with the status from the AP. + * with the status from the AP. The driver is allowed to roam to other + * BSSes within the ESS when the other BSS matches the connect parameters. + * When such roaming is initiated by the driver, the driver is expected to + * verify that the target matches the configured security parameters and + * to use Reassociation Request frame instead of Association Request frame. + * The connect function can also be used to request the driver to perform + * a specific roam when connected to an ESS. In that case, the prev_bssid + * parameter is set to the BSSID of the currently associated BSS as an + * indication of requesting reassociation. In both the driver-initiated and + * new connect() call initiated roaming cases, the result of roaming is + * indicated with a call to cfg80211_roamed() or cfg80211_roamed_bss(). * (invoked with the wireless_dev mutex held) * @disconnect: Disconnect from the BSS/ESS. * (invoked with the wireless_dev mutex held) diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index 6da52d7b48c4..b4606288ef7a 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -429,7 +429,11 @@ * @NL80211_CMD_ASSOCIATE: association request and notification; like * NL80211_CMD_AUTHENTICATE but for Association and Reassociation * (similar to MLME-ASSOCIATE.request, MLME-REASSOCIATE.request, - * MLME-ASSOCIATE.confirm or MLME-REASSOCIATE.confirm primitives). + * MLME-ASSOCIATE.confirm or MLME-REASSOCIATE.confirm primitives). The + * %NL80211_ATTR_PREV_BSSID attribute is used to specify whether the + * request is for the initial association to an ESS (that attribute not + * included) or for reassociation within the ESS (that attribute is + * included). * @NL80211_CMD_DEAUTHENTICATE: deauthentication request and notification; like * NL80211_CMD_AUTHENTICATE but for Deauthentication frames (similar to * MLME-DEAUTHENTICATION.request and MLME-DEAUTHENTICATE.indication @@ -479,6 +483,9 @@ * set of BSSID,frequency parameters is used (i.e., either the enforcing * %NL80211_ATTR_MAC,%NL80211_ATTR_WIPHY_FREQ or the less strict * %NL80211_ATTR_MAC_HINT and %NL80211_ATTR_WIPHY_FREQ_HINT). + * %NL80211_ATTR_PREV_BSSID can be used to request a reassociation within + * the ESS in case the device is already associated and an association with + * a different BSS is desired. * Background scan period can optionally be * specified in %NL80211_ATTR_BG_SCAN_PERIOD, * if not specified default background scan configuration @@ -1287,8 +1294,11 @@ enum nl80211_commands { * @NL80211_ATTR_RESP_IE: (Re)association response information elements as * sent by peer, for ROAM and successful CONNECT events. * - * @NL80211_ATTR_PREV_BSSID: previous BSSID, to be used by in ASSOCIATE - * commands to specify using a reassociate frame + * @NL80211_ATTR_PREV_BSSID: previous BSSID, to be used in ASSOCIATE and CONNECT + * commands to specify a request to reassociate within an ESS, i.e., to use + * Reassociate Request frame (with the value of this attribute in the + * Current AP address field) instead of Association Request frame which is + * used for the initial association to an ESS. * * @NL80211_ATTR_KEY: key information in a nested attribute with * %NL80211_KEY_* sub-attributes From 57fbcce37be7c1d2622b56587c10ade00e96afa3 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Tue, 12 Apr 2016 15:56:15 +0200 Subject: [PATCH 0541/1649] cfg80211: remove enum ieee80211_band This enum is already perfectly aliased to enum nl80211_band, and the only reason for it is that we get IEEE80211_NUM_BANDS out of it. There's no really good reason to not declare the number of bands in nl80211 though, so do that and remove the cfg80211 one. Signed-off-by: Johannes Berg --- Documentation/DocBook/80211.tmpl | 1 - drivers/net/wireless/admtek/adm8211.c | 4 +- drivers/net/wireless/ath/ar5523/ar5523.c | 4 +- drivers/net/wireless/ath/ath.h | 2 +- drivers/net/wireless/ath/ath10k/core.h | 2 +- drivers/net/wireless/ath/ath10k/htt_rx.c | 8 +- drivers/net/wireless/ath/ath10k/mac.c | 68 +++---- drivers/net/wireless/ath/ath10k/wmi.c | 8 +- drivers/net/wireless/ath/ath5k/ani.c | 2 +- drivers/net/wireless/ath/ath5k/ath5k.h | 10 +- drivers/net/wireless/ath/ath5k/attach.c | 8 +- drivers/net/wireless/ath/ath5k/base.c | 32 ++-- drivers/net/wireless/ath/ath5k/debug.c | 6 +- drivers/net/wireless/ath/ath5k/pcu.c | 6 +- drivers/net/wireless/ath/ath5k/phy.c | 30 +-- drivers/net/wireless/ath/ath5k/qcu.c | 8 +- drivers/net/wireless/ath/ath5k/reset.c | 6 +- drivers/net/wireless/ath/ath6kl/cfg80211.c | 22 +-- drivers/net/wireless/ath/ath6kl/core.h | 2 +- drivers/net/wireless/ath/ath6kl/wmi.c | 22 +-- drivers/net/wireless/ath/ath6kl/wmi.h | 2 +- drivers/net/wireless/ath/ath9k/calib.c | 6 +- drivers/net/wireless/ath/ath9k/channel.c | 8 +- drivers/net/wireless/ath/ath9k/common-init.c | 28 +-- drivers/net/wireless/ath/ath9k/common.c | 4 +- drivers/net/wireless/ath/ath9k/debug_sta.c | 6 +- drivers/net/wireless/ath/ath9k/dynack.c | 2 +- drivers/net/wireless/ath/ath9k/htc_drv_init.c | 8 +- drivers/net/wireless/ath/ath9k/htc_drv_main.c | 12 +- drivers/net/wireless/ath/ath9k/htc_drv_txrx.c | 2 +- drivers/net/wireless/ath/ath9k/init.c | 12 +- drivers/net/wireless/ath/ath9k/main.c | 4 +- drivers/net/wireless/ath/ath9k/xmit.c | 4 +- drivers/net/wireless/ath/carl9170/mac.c | 12 +- drivers/net/wireless/ath/carl9170/main.c | 6 +- drivers/net/wireless/ath/carl9170/phy.c | 18 +- drivers/net/wireless/ath/carl9170/rx.c | 2 +- drivers/net/wireless/ath/carl9170/tx.c | 8 +- drivers/net/wireless/ath/regd.c | 16 +- drivers/net/wireless/ath/regd.h | 2 +- drivers/net/wireless/ath/wcn36xx/main.c | 12 +- drivers/net/wireless/ath/wcn36xx/smd.c | 4 +- drivers/net/wireless/ath/wcn36xx/txrx.c | 2 +- drivers/net/wireless/ath/wil6210/cfg80211.c | 4 +- drivers/net/wireless/ath/wil6210/netdev.c | 2 +- drivers/net/wireless/ath/wil6210/wmi.c | 2 +- drivers/net/wireless/atmel/at76c50x-usb.c | 6 +- drivers/net/wireless/atmel/atmel.c | 2 +- drivers/net/wireless/broadcom/b43/b43.h | 4 +- drivers/net/wireless/broadcom/b43/main.c | 34 ++-- drivers/net/wireless/broadcom/b43/phy_ac.c | 2 +- .../net/wireless/broadcom/b43/phy_common.c | 2 +- drivers/net/wireless/broadcom/b43/phy_ht.c | 16 +- drivers/net/wireless/broadcom/b43/phy_lcn.c | 10 +- drivers/net/wireless/broadcom/b43/phy_lp.c | 30 +-- drivers/net/wireless/broadcom/b43/phy_n.c | 176 +++++++++--------- .../net/wireless/broadcom/b43/tables_lpphy.c | 14 +- .../net/wireless/broadcom/b43/tables_nphy.c | 16 +- .../wireless/broadcom/b43/tables_phy_lcn.c | 2 +- drivers/net/wireless/broadcom/b43/xmit.c | 8 +- .../net/wireless/broadcom/b43legacy/main.c | 12 +- .../net/wireless/broadcom/b43legacy/xmit.c | 2 +- .../broadcom/brcm80211/brcmfmac/cfg80211.c | 74 ++++---- .../broadcom/brcm80211/brcmfmac/p2p.c | 8 +- .../broadcom/brcm80211/brcmsmac/channel.c | 10 +- .../broadcom/brcm80211/brcmsmac/mac80211_if.c | 16 +- .../broadcom/brcm80211/brcmsmac/main.c | 4 +- drivers/net/wireless/cisco/airo.c | 6 +- drivers/net/wireless/intel/ipw2x00/ipw2100.c | 6 +- drivers/net/wireless/intel/ipw2x00/ipw2200.c | 12 +- .../net/wireless/intel/iwlegacy/3945-mac.c | 30 +-- drivers/net/wireless/intel/iwlegacy/3945-rs.c | 22 +-- drivers/net/wireless/intel/iwlegacy/3945.c | 20 +- .../net/wireless/intel/iwlegacy/4965-mac.c | 38 ++-- drivers/net/wireless/intel/iwlegacy/4965-rs.c | 22 +-- drivers/net/wireless/intel/iwlegacy/4965.c | 6 +- drivers/net/wireless/intel/iwlegacy/4965.h | 2 +- drivers/net/wireless/intel/iwlegacy/common.c | 70 +++---- drivers/net/wireless/intel/iwlegacy/common.h | 30 +-- drivers/net/wireless/intel/iwlegacy/debug.c | 4 +- drivers/net/wireless/intel/iwlwifi/dvm/agn.h | 8 +- .../net/wireless/intel/iwlwifi/dvm/debugfs.c | 4 +- drivers/net/wireless/intel/iwlwifi/dvm/dev.h | 6 +- .../net/wireless/intel/iwlwifi/dvm/devices.c | 4 +- drivers/net/wireless/intel/iwlwifi/dvm/lib.c | 6 +- .../net/wireless/intel/iwlwifi/dvm/mac80211.c | 12 +- drivers/net/wireless/intel/iwlwifi/dvm/main.c | 4 +- drivers/net/wireless/intel/iwlwifi/dvm/rs.c | 22 +-- drivers/net/wireless/intel/iwlwifi/dvm/rs.h | 2 +- drivers/net/wireless/intel/iwlwifi/dvm/rx.c | 2 +- drivers/net/wireless/intel/iwlwifi/dvm/rxon.c | 10 +- drivers/net/wireless/intel/iwlwifi/dvm/scan.c | 38 ++-- drivers/net/wireless/intel/iwlwifi/dvm/sta.c | 2 +- drivers/net/wireless/intel/iwlwifi/dvm/tx.c | 4 +- drivers/net/wireless/intel/iwlwifi/iwl-1000.c | 2 +- drivers/net/wireless/intel/iwlwifi/iwl-2000.c | 2 +- drivers/net/wireless/intel/iwlwifi/iwl-5000.c | 2 +- drivers/net/wireless/intel/iwlwifi/iwl-6000.c | 2 +- drivers/net/wireless/intel/iwlwifi/iwl-7000.c | 4 +- drivers/net/wireless/intel/iwlwifi/iwl-8000.c | 2 +- drivers/net/wireless/intel/iwlwifi/iwl-9000.c | 2 +- .../net/wireless/intel/iwlwifi/iwl-config.h | 2 +- .../wireless/intel/iwlwifi/iwl-eeprom-parse.c | 38 ++-- .../wireless/intel/iwlwifi/iwl-eeprom-parse.h | 6 +- .../wireless/intel/iwlwifi/iwl-nvm-parse.c | 26 +-- drivers/net/wireless/intel/iwlwifi/mvm/coex.c | 10 +- .../wireless/intel/iwlwifi/mvm/debugfs-vif.c | 6 +- drivers/net/wireless/intel/iwlwifi/mvm/fw.c | 2 +- .../net/wireless/intel/iwlwifi/mvm/mac-ctxt.c | 8 +- .../net/wireless/intel/iwlwifi/mvm/mac80211.c | 16 +- drivers/net/wireless/intel/iwlwifi/mvm/mvm.h | 6 +- .../net/wireless/intel/iwlwifi/mvm/phy-ctxt.c | 2 +- drivers/net/wireless/intel/iwlwifi/mvm/rs.c | 28 +-- drivers/net/wireless/intel/iwlwifi/mvm/rs.h | 4 +- drivers/net/wireless/intel/iwlwifi/mvm/rx.c | 2 +- drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c | 4 +- drivers/net/wireless/intel/iwlwifi/mvm/scan.c | 36 ++-- drivers/net/wireless/intel/iwlwifi/mvm/tdls.c | 2 +- drivers/net/wireless/intel/iwlwifi/mvm/tx.c | 6 +- .../net/wireless/intel/iwlwifi/mvm/utils.c | 4 +- drivers/net/wireless/intersil/orinoco/cfg.c | 6 +- drivers/net/wireless/intersil/orinoco/hw.c | 2 +- drivers/net/wireless/intersil/orinoco/scan.c | 4 +- drivers/net/wireless/intersil/p54/eeprom.c | 32 ++-- drivers/net/wireless/intersil/p54/main.c | 4 +- drivers/net/wireless/intersil/p54/p54.h | 2 +- drivers/net/wireless/intersil/p54/txrx.c | 4 +- drivers/net/wireless/mac80211_hwsim.c | 14 +- drivers/net/wireless/marvell/libertas/cfg.c | 10 +- drivers/net/wireless/marvell/libertas/cmd.c | 4 +- .../net/wireless/marvell/libertas_tf/main.c | 4 +- .../net/wireless/marvell/mwifiex/cfg80211.c | 34 ++-- drivers/net/wireless/marvell/mwifiex/cfp.c | 12 +- drivers/net/wireless/marvell/mwifiex/scan.c | 8 +- .../net/wireless/marvell/mwifiex/uap_cmd.c | 2 +- drivers/net/wireless/marvell/mwl8k.c | 88 ++++----- drivers/net/wireless/mediatek/mt7601u/init.c | 4 +- .../net/wireless/ralink/rt2x00/rt2800lib.c | 30 +-- drivers/net/wireless/ralink/rt2x00/rt2x00.h | 4 +- .../net/wireless/ralink/rt2x00/rt2x00dev.c | 40 ++-- drivers/net/wireless/ralink/rt2x00/rt61pci.c | 22 +-- drivers/net/wireless/ralink/rt2x00/rt73usb.c | 22 +-- .../wireless/realtek/rtl818x/rtl8180/dev.c | 8 +- .../wireless/realtek/rtl818x/rtl8187/dev.c | 4 +- .../net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 30 +-- drivers/net/wireless/realtek/rtlwifi/base.c | 44 ++--- drivers/net/wireless/realtek/rtlwifi/regd.c | 16 +- drivers/net/wireless/realtek/rtlwifi/wifi.h | 2 +- drivers/net/wireless/rndis_wlan.c | 4 +- drivers/net/wireless/rsi/rsi_91x_mac80211.c | 100 +++++----- drivers/net/wireless/rsi/rsi_91x_mgmt.c | 8 +- drivers/net/wireless/rsi/rsi_91x_pkt.c | 2 +- drivers/net/wireless/rsi/rsi_main.h | 2 +- drivers/net/wireless/st/cw1200/main.c | 10 +- drivers/net/wireless/st/cw1200/scan.c | 2 +- drivers/net/wireless/st/cw1200/sta.c | 6 +- drivers/net/wireless/st/cw1200/txrx.c | 2 +- drivers/net/wireless/st/cw1200/wsm.c | 4 +- drivers/net/wireless/ti/wl1251/main.c | 2 +- drivers/net/wireless/ti/wl1251/rx.c | 2 +- drivers/net/wireless/ti/wl12xx/main.c | 8 +- drivers/net/wireless/ti/wl12xx/scan.c | 20 +- drivers/net/wireless/ti/wl18xx/cmd.c | 6 +- drivers/net/wireless/ti/wl18xx/event.c | 6 +- drivers/net/wireless/ti/wl18xx/main.c | 22 +-- drivers/net/wireless/ti/wl18xx/scan.c | 8 +- drivers/net/wireless/ti/wl18xx/tx.c | 2 +- drivers/net/wireless/ti/wlcore/cmd.c | 36 ++-- drivers/net/wireless/ti/wlcore/cmd.h | 6 +- drivers/net/wireless/ti/wlcore/main.c | 32 ++-- drivers/net/wireless/ti/wlcore/ps.c | 4 +- drivers/net/wireless/ti/wlcore/rx.c | 4 +- drivers/net/wireless/ti/wlcore/rx.h | 2 +- drivers/net/wireless/ti/wlcore/scan.c | 14 +- drivers/net/wireless/ti/wlcore/tx.c | 2 +- drivers/net/wireless/ti/wlcore/tx.h | 4 +- drivers/net/wireless/ti/wlcore/wlcore.h | 4 +- drivers/net/wireless/ti/wlcore/wlcore_i.h | 2 +- drivers/net/wireless/wl3501_cs.c | 2 +- drivers/net/wireless/zydas/zd1211rw/zd_mac.c | 4 +- drivers/staging/rtl8723au/core/rtw_mlme_ext.c | 4 +- drivers/staging/rtl8723au/include/ieee80211.h | 2 +- .../staging/rtl8723au/os_dep/ioctl_cfg80211.c | 54 +++--- drivers/staging/vt6655/channel.c | 4 +- drivers/staging/vt6655/device_main.c | 4 +- drivers/staging/vt6655/rxtx.c | 2 +- drivers/staging/vt6656/channel.c | 4 +- drivers/staging/vt6656/int.c | 2 +- drivers/staging/vt6656/main_usb.c | 2 +- drivers/staging/vt6656/rxtx.c | 2 +- .../staging/wilc1000/wilc_wfi_cfgoperations.c | 10 +- drivers/staging/wlan-ng/cfg80211.c | 6 +- include/net/cfg80211.h | 44 ++--- include/net/mac80211.h | 12 +- include/uapi/linux/nl80211.h | 4 + net/mac80211/cfg.c | 14 +- net/mac80211/debugfs_netdev.c | 12 +- net/mac80211/ibss.c | 10 +- net/mac80211/ieee80211_i.h | 34 ++-- net/mac80211/iface.c | 2 +- net/mac80211/main.c | 6 +- net/mac80211/mesh.c | 10 +- net/mac80211/mesh_plink.c | 10 +- net/mac80211/mlme.c | 14 +- net/mac80211/rate.c | 2 +- net/mac80211/rc80211_minstrel.c | 6 +- net/mac80211/rc80211_minstrel_ht.c | 4 +- net/mac80211/rx.c | 6 +- net/mac80211/scan.c | 12 +- net/mac80211/spectmgmt.c | 4 +- net/mac80211/tdls.c | 18 +- net/mac80211/trace.h | 6 +- net/mac80211/tx.c | 14 +- net/mac80211/util.c | 24 +-- net/mac80211/vht.c | 4 +- net/wireless/chan.c | 2 +- net/wireless/core.c | 8 +- net/wireless/debugfs.c | 4 +- net/wireless/ibss.c | 6 +- net/wireless/mesh.c | 4 +- net/wireless/mlme.c | 2 +- net/wireless/nl80211.c | 44 ++--- net/wireless/rdev-ops.h | 2 +- net/wireless/reg.c | 30 +-- net/wireless/reg.h | 2 +- net/wireless/scan.c | 14 +- net/wireless/sme.c | 6 +- net/wireless/trace.h | 20 +- net/wireless/util.c | 40 ++-- net/wireless/wext-compat.c | 14 +- 230 files changed, 1420 insertions(+), 1437 deletions(-) diff --git a/Documentation/DocBook/80211.tmpl b/Documentation/DocBook/80211.tmpl index f9b9ad7894f5..f2a312b35875 100644 --- a/Documentation/DocBook/80211.tmpl +++ b/Documentation/DocBook/80211.tmpl @@ -75,7 +75,6 @@ Device registration !Pinclude/net/cfg80211.h Device registration -!Finclude/net/cfg80211.h ieee80211_band !Finclude/net/cfg80211.h ieee80211_channel_flags !Finclude/net/cfg80211.h ieee80211_channel !Finclude/net/cfg80211.h ieee80211_rate_flags diff --git a/drivers/net/wireless/admtek/adm8211.c b/drivers/net/wireless/admtek/adm8211.c index 15f057ed41ad..70ecd82d674d 100644 --- a/drivers/net/wireless/admtek/adm8211.c +++ b/drivers/net/wireless/admtek/adm8211.c @@ -440,7 +440,7 @@ static void adm8211_interrupt_rci(struct ieee80211_hw *dev) rx_status.rate_idx = rate; rx_status.freq = adm8211_channels[priv->channel - 1].center_freq; - rx_status.band = IEEE80211_BAND_2GHZ; + rx_status.band = NL80211_BAND_2GHZ; memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status)); ieee80211_rx_irqsafe(dev, skb); @@ -1894,7 +1894,7 @@ static int adm8211_probe(struct pci_dev *pdev, priv->channel = 1; - dev->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band; + dev->wiphy->bands[NL80211_BAND_2GHZ] = &priv->band; err = ieee80211_register_hw(dev); if (err) { diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c index 3b343c63aa52..8aded24bcdf4 100644 --- a/drivers/net/wireless/ath/ar5523/ar5523.c +++ b/drivers/net/wireless/ath/ar5523/ar5523.c @@ -1471,12 +1471,12 @@ static int ar5523_init_modes(struct ar5523 *ar) memcpy(ar->channels, ar5523_channels, sizeof(ar5523_channels)); memcpy(ar->rates, ar5523_rates, sizeof(ar5523_rates)); - ar->band.band = IEEE80211_BAND_2GHZ; + ar->band.band = NL80211_BAND_2GHZ; ar->band.channels = ar->channels; ar->band.n_channels = ARRAY_SIZE(ar5523_channels); ar->band.bitrates = ar->rates; ar->band.n_bitrates = ARRAY_SIZE(ar5523_rates); - ar->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &ar->band; + ar->hw->wiphy->bands[NL80211_BAND_2GHZ] = &ar->band; return 0; } diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h index 65ef483ebf50..da7a7c8dafb2 100644 --- a/drivers/net/wireless/ath/ath.h +++ b/drivers/net/wireless/ath/ath.h @@ -185,7 +185,7 @@ struct ath_common { bool bt_ant_diversity; int last_rssi; - struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS]; + struct ieee80211_supported_band sbands[NUM_NL80211_BANDS]; }; static inline const struct ath_ps_ops *ath_ps_ops(struct ath_common *common) diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h index d85b99164212..362bbed8f0e9 100644 --- a/drivers/net/wireless/ath/ath10k/core.h +++ b/drivers/net/wireless/ath/ath10k/core.h @@ -765,7 +765,7 @@ struct ath10k { } scan; struct { - struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS]; + struct ieee80211_supported_band sbands[NUM_NL80211_BANDS]; } mac; /* should never be NULL; needed for regular htt rx */ diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c index 9390897a00c6..079fef5b7ef2 100644 --- a/drivers/net/wireless/ath/ath10k/htt_rx.c +++ b/drivers/net/wireless/ath/ath10k/htt_rx.c @@ -2182,9 +2182,9 @@ static void ath10k_htt_rx_tx_mode_switch_ind(struct ath10k *ar, ath10k_mac_tx_push_pending(ar); } -static inline enum ieee80211_band phy_mode_to_band(u32 phy_mode) +static inline enum nl80211_band phy_mode_to_band(u32 phy_mode) { - enum ieee80211_band band; + enum nl80211_band band; switch (phy_mode) { case MODE_11A: @@ -2193,7 +2193,7 @@ static inline enum ieee80211_band phy_mode_to_band(u32 phy_mode) case MODE_11AC_VHT20: case MODE_11AC_VHT40: case MODE_11AC_VHT80: - band = IEEE80211_BAND_5GHZ; + band = NL80211_BAND_5GHZ; break; case MODE_11G: case MODE_11B: @@ -2204,7 +2204,7 @@ static inline enum ieee80211_band phy_mode_to_band(u32 phy_mode) case MODE_11AC_VHT40_2G: case MODE_11AC_VHT80_2G: default: - band = IEEE80211_BAND_2GHZ; + band = NL80211_BAND_2GHZ; } return band; diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index b0e613bc10a5..6ace10bc96f5 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -482,7 +482,7 @@ chan_to_phymode(const struct cfg80211_chan_def *chandef) enum wmi_phy_mode phymode = MODE_UNKNOWN; switch (chandef->chan->band) { - case IEEE80211_BAND_2GHZ: + case NL80211_BAND_2GHZ: switch (chandef->width) { case NL80211_CHAN_WIDTH_20_NOHT: if (chandef->chan->flags & IEEE80211_CHAN_NO_OFDM) @@ -505,7 +505,7 @@ chan_to_phymode(const struct cfg80211_chan_def *chandef) break; } break; - case IEEE80211_BAND_5GHZ: + case NL80211_BAND_5GHZ: switch (chandef->width) { case NL80211_CHAN_WIDTH_20_NOHT: phymode = MODE_11A; @@ -2055,7 +2055,7 @@ static void ath10k_peer_assoc_h_rates(struct ath10k *ar, struct cfg80211_chan_def def; const struct ieee80211_supported_band *sband; const struct ieee80211_rate *rates; - enum ieee80211_band band; + enum nl80211_band band; u32 ratemask; u8 rate; int i; @@ -2115,7 +2115,7 @@ static void ath10k_peer_assoc_h_ht(struct ath10k *ar, const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); struct cfg80211_chan_def def; - enum ieee80211_band band; + enum nl80211_band band; const u8 *ht_mcs_mask; const u16 *vht_mcs_mask; int i, n; @@ -2339,7 +2339,7 @@ static void ath10k_peer_assoc_h_vht(struct ath10k *ar, const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap; struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); struct cfg80211_chan_def def; - enum ieee80211_band band; + enum nl80211_band band; const u16 *vht_mcs_mask; u8 ampdu_factor; @@ -2357,7 +2357,7 @@ static void ath10k_peer_assoc_h_vht(struct ath10k *ar, arg->peer_flags |= ar->wmi.peer_flags->vht; - if (def.chan->band == IEEE80211_BAND_2GHZ) + if (def.chan->band == NL80211_BAND_2GHZ) arg->peer_flags |= ar->wmi.peer_flags->vht_2g; arg->peer_vht_caps = vht_cap->cap; @@ -2426,7 +2426,7 @@ static void ath10k_peer_assoc_h_qos(struct ath10k *ar, static bool ath10k_mac_sta_has_ofdm_only(struct ieee80211_sta *sta) { - return sta->supp_rates[IEEE80211_BAND_2GHZ] >> + return sta->supp_rates[NL80211_BAND_2GHZ] >> ATH10K_MAC_FIRST_OFDM_RATE_IDX; } @@ -2437,7 +2437,7 @@ static void ath10k_peer_assoc_h_phymode(struct ath10k *ar, { struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); struct cfg80211_chan_def def; - enum ieee80211_band band; + enum nl80211_band band; const u8 *ht_mcs_mask; const u16 *vht_mcs_mask; enum wmi_phy_mode phymode = MODE_UNKNOWN; @@ -2450,7 +2450,7 @@ static void ath10k_peer_assoc_h_phymode(struct ath10k *ar, vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs; switch (band) { - case IEEE80211_BAND_2GHZ: + case NL80211_BAND_2GHZ: if (sta->vht_cap.vht_supported && !ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) { if (sta->bandwidth == IEEE80211_STA_RX_BW_40) @@ -2470,7 +2470,7 @@ static void ath10k_peer_assoc_h_phymode(struct ath10k *ar, } break; - case IEEE80211_BAND_5GHZ: + case NL80211_BAND_5GHZ: /* * Check VHT first. */ @@ -2848,7 +2848,7 @@ static int ath10k_update_channel_list(struct ath10k *ar) { struct ieee80211_hw *hw = ar->hw; struct ieee80211_supported_band **bands; - enum ieee80211_band band; + enum nl80211_band band; struct ieee80211_channel *channel; struct wmi_scan_chan_list_arg arg = {0}; struct wmi_channel_arg *ch; @@ -2860,7 +2860,7 @@ static int ath10k_update_channel_list(struct ath10k *ar) lockdep_assert_held(&ar->conf_mutex); bands = hw->wiphy->bands; - for (band = 0; band < IEEE80211_NUM_BANDS; band++) { + for (band = 0; band < NUM_NL80211_BANDS; band++) { if (!bands[band]) continue; @@ -2879,7 +2879,7 @@ static int ath10k_update_channel_list(struct ath10k *ar) return -ENOMEM; ch = arg.channels; - for (band = 0; band < IEEE80211_NUM_BANDS; band++) { + for (band = 0; band < NUM_NL80211_BANDS; band++) { if (!bands[band]) continue; @@ -2917,7 +2917,7 @@ static int ath10k_update_channel_list(struct ath10k *ar) /* FIXME: why use only legacy modes, why not any * HT/VHT modes? Would that even make any * difference? */ - if (channel->band == IEEE80211_BAND_2GHZ) + if (channel->band == NL80211_BAND_2GHZ) ch->mode = MODE_11G; else ch->mode = MODE_11A; @@ -4254,14 +4254,14 @@ static void ath10k_mac_setup_ht_vht_cap(struct ath10k *ar) vht_cap = ath10k_create_vht_cap(ar); if (ar->phy_capability & WHAL_WLAN_11G_CAPABILITY) { - band = &ar->mac.sbands[IEEE80211_BAND_2GHZ]; + band = &ar->mac.sbands[NL80211_BAND_2GHZ]; band->ht_cap = ht_cap; /* Enable the VHT support at 2.4 GHz */ band->vht_cap = vht_cap; } if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) { - band = &ar->mac.sbands[IEEE80211_BAND_5GHZ]; + band = &ar->mac.sbands[NL80211_BAND_5GHZ]; band->ht_cap = ht_cap; band->vht_cap = vht_cap; } @@ -5595,7 +5595,7 @@ static void ath10k_sta_rc_update_wk(struct work_struct *wk) struct ath10k_sta *arsta; struct ieee80211_sta *sta; struct cfg80211_chan_def def; - enum ieee80211_band band; + enum nl80211_band band; const u8 *ht_mcs_mask; const u16 *vht_mcs_mask; u32 changed, bw, nss, smps; @@ -6394,14 +6394,14 @@ static int ath10k_get_survey(struct ieee80211_hw *hw, int idx, mutex_lock(&ar->conf_mutex); - sband = hw->wiphy->bands[IEEE80211_BAND_2GHZ]; + sband = hw->wiphy->bands[NL80211_BAND_2GHZ]; if (sband && idx >= sband->n_channels) { idx -= sband->n_channels; sband = NULL; } if (!sband) - sband = hw->wiphy->bands[IEEE80211_BAND_5GHZ]; + sband = hw->wiphy->bands[NL80211_BAND_5GHZ]; if (!sband || idx >= sband->n_channels) { ret = -ENOENT; @@ -6424,7 +6424,7 @@ exit: static bool ath10k_mac_bitrate_mask_has_single_rate(struct ath10k *ar, - enum ieee80211_band band, + enum nl80211_band band, const struct cfg80211_bitrate_mask *mask) { int num_rates = 0; @@ -6443,7 +6443,7 @@ ath10k_mac_bitrate_mask_has_single_rate(struct ath10k *ar, static bool ath10k_mac_bitrate_mask_get_single_nss(struct ath10k *ar, - enum ieee80211_band band, + enum nl80211_band band, const struct cfg80211_bitrate_mask *mask, int *nss) { @@ -6492,7 +6492,7 @@ ath10k_mac_bitrate_mask_get_single_nss(struct ath10k *ar, static int ath10k_mac_bitrate_mask_get_single_rate(struct ath10k *ar, - enum ieee80211_band band, + enum nl80211_band band, const struct cfg80211_bitrate_mask *mask, u8 *rate, u8 *nss) { @@ -6593,7 +6593,7 @@ static int ath10k_mac_set_fixed_rate_params(struct ath10k_vif *arvif, static bool ath10k_mac_can_set_bitrate_mask(struct ath10k *ar, - enum ieee80211_band band, + enum nl80211_band band, const struct cfg80211_bitrate_mask *mask) { int i; @@ -6645,7 +6645,7 @@ static int ath10k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw, struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); struct cfg80211_chan_def def; struct ath10k *ar = arvif->ar; - enum ieee80211_band band; + enum nl80211_band band; const u8 *ht_mcs_mask; const u16 *vht_mcs_mask; u8 rate; @@ -7275,7 +7275,7 @@ static const struct ieee80211_ops ath10k_ops = { }; #define CHAN2G(_channel, _freq, _flags) { \ - .band = IEEE80211_BAND_2GHZ, \ + .band = NL80211_BAND_2GHZ, \ .hw_value = (_channel), \ .center_freq = (_freq), \ .flags = (_flags), \ @@ -7284,7 +7284,7 @@ static const struct ieee80211_ops ath10k_ops = { } #define CHAN5G(_channel, _freq, _flags) { \ - .band = IEEE80211_BAND_5GHZ, \ + .band = NL80211_BAND_5GHZ, \ .hw_value = (_channel), \ .center_freq = (_freq), \ .flags = (_flags), \ @@ -7604,13 +7604,13 @@ int ath10k_mac_register(struct ath10k *ar) goto err_free; } - band = &ar->mac.sbands[IEEE80211_BAND_2GHZ]; + band = &ar->mac.sbands[NL80211_BAND_2GHZ]; band->n_channels = ARRAY_SIZE(ath10k_2ghz_channels); band->channels = channels; band->n_bitrates = ath10k_g_rates_size; band->bitrates = ath10k_g_rates; - ar->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = band; + ar->hw->wiphy->bands[NL80211_BAND_2GHZ] = band; } if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) { @@ -7622,12 +7622,12 @@ int ath10k_mac_register(struct ath10k *ar) goto err_free; } - band = &ar->mac.sbands[IEEE80211_BAND_5GHZ]; + band = &ar->mac.sbands[NL80211_BAND_5GHZ]; band->n_channels = ARRAY_SIZE(ath10k_5ghz_channels); band->channels = channels; band->n_bitrates = ath10k_a_rates_size; band->bitrates = ath10k_a_rates; - ar->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = band; + ar->hw->wiphy->bands[NL80211_BAND_5GHZ] = band; } ath10k_mac_setup_ht_vht_cap(ar); @@ -7815,8 +7815,8 @@ err_dfs_detector_exit: ar->dfs_detector->exit(ar->dfs_detector); err_free: - kfree(ar->mac.sbands[IEEE80211_BAND_2GHZ].channels); - kfree(ar->mac.sbands[IEEE80211_BAND_5GHZ].channels); + kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels); + kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels); SET_IEEE80211_DEV(ar->hw, NULL); return ret; @@ -7829,8 +7829,8 @@ void ath10k_mac_unregister(struct ath10k *ar) if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) ar->dfs_detector->exit(ar->dfs_detector); - kfree(ar->mac.sbands[IEEE80211_BAND_2GHZ].channels); - kfree(ar->mac.sbands[IEEE80211_BAND_5GHZ].channels); + kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels); + kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels); SET_IEEE80211_DEV(ar->hw, NULL); } diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index f7ec65f263a0..4c75c74be5e7 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -2281,9 +2281,9 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb) * of mgmt rx. */ if (channel >= 1 && channel <= 14) { - status->band = IEEE80211_BAND_2GHZ; + status->band = NL80211_BAND_2GHZ; } else if (channel >= 36 && channel <= 165) { - status->band = IEEE80211_BAND_5GHZ; + status->band = NL80211_BAND_5GHZ; } else { /* Shouldn't happen unless list of advertised channels to * mac80211 has been changed. @@ -2293,7 +2293,7 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb) return 0; } - if (phy_mode == MODE_11B && status->band == IEEE80211_BAND_5GHZ) + if (phy_mode == MODE_11B && status->band == NL80211_BAND_5GHZ) ath10k_dbg(ar, ATH10K_DBG_MGMT, "wmi mgmt rx 11b (CCK) on 5GHz\n"); sband = &ar->mac.sbands[status->band]; @@ -2352,7 +2352,7 @@ static int freq_to_idx(struct ath10k *ar, int freq) struct ieee80211_supported_band *sband; int band, ch, idx = 0; - for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) { + for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) { sband = ar->hw->wiphy->bands[band]; if (!sband) continue; diff --git a/drivers/net/wireless/ath/ath5k/ani.c b/drivers/net/wireless/ath/ath5k/ani.c index 38be2702c0e2..0624333f5430 100644 --- a/drivers/net/wireless/ath/ath5k/ani.c +++ b/drivers/net/wireless/ath/ath5k/ani.c @@ -279,7 +279,7 @@ ath5k_ani_raise_immunity(struct ath5k_hw *ah, struct ath5k_ani_state *as, if (as->firstep_level < ATH5K_ANI_MAX_FIRSTEP_LVL) ath5k_ani_set_firstep_level(ah, as->firstep_level + 1); return; - } else if (ah->ah_current_channel->band == IEEE80211_BAND_2GHZ) { + } else if (ah->ah_current_channel->band == NL80211_BAND_2GHZ) { /* beacon RSSI is low. in B/G mode turn of OFDM weak signal * detect and zero firstep level to maximize CCK sensitivity */ ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, diff --git a/drivers/net/wireless/ath/ath5k/ath5k.h b/drivers/net/wireless/ath/ath5k/ath5k.h index ba12f7f4061d..67fedb61fcc0 100644 --- a/drivers/net/wireless/ath/ath5k/ath5k.h +++ b/drivers/net/wireless/ath/ath5k/ath5k.h @@ -1265,10 +1265,10 @@ struct ath5k_hw { void __iomem *iobase; /* address of the device */ struct mutex lock; /* dev-level lock */ struct ieee80211_hw *hw; /* IEEE 802.11 common */ - struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS]; + struct ieee80211_supported_band sbands[NUM_NL80211_BANDS]; struct ieee80211_channel channels[ATH_CHAN_MAX]; - struct ieee80211_rate rates[IEEE80211_NUM_BANDS][AR5K_MAX_RATES]; - s8 rate_idx[IEEE80211_NUM_BANDS][AR5K_MAX_RATES]; + struct ieee80211_rate rates[NUM_NL80211_BANDS][AR5K_MAX_RATES]; + s8 rate_idx[NUM_NL80211_BANDS][AR5K_MAX_RATES]; enum nl80211_iftype opmode; #ifdef CONFIG_ATH5K_DEBUG @@ -1532,7 +1532,7 @@ int ath5k_eeprom_mode_from_channel(struct ath5k_hw *ah, /* Protocol Control Unit Functions */ /* Helpers */ -int ath5k_hw_get_frame_duration(struct ath5k_hw *ah, enum ieee80211_band band, +int ath5k_hw_get_frame_duration(struct ath5k_hw *ah, enum nl80211_band band, int len, struct ieee80211_rate *rate, bool shortpre); unsigned int ath5k_hw_get_default_slottime(struct ath5k_hw *ah); unsigned int ath5k_hw_get_default_sifs(struct ath5k_hw *ah); @@ -1611,7 +1611,7 @@ int ath5k_hw_write_initvals(struct ath5k_hw *ah, u8 mode, bool change_channel); /* PHY functions */ /* Misc PHY functions */ -u16 ath5k_hw_radio_revision(struct ath5k_hw *ah, enum ieee80211_band band); +u16 ath5k_hw_radio_revision(struct ath5k_hw *ah, enum nl80211_band band); int ath5k_hw_phy_disable(struct ath5k_hw *ah); /* Gain_F optimization */ enum ath5k_rfgain ath5k_hw_gainf_calibrate(struct ath5k_hw *ah); diff --git a/drivers/net/wireless/ath/ath5k/attach.c b/drivers/net/wireless/ath/ath5k/attach.c index 66b6366158b9..233054bd6b52 100644 --- a/drivers/net/wireless/ath/ath5k/attach.c +++ b/drivers/net/wireless/ath/ath5k/attach.c @@ -152,7 +152,7 @@ int ath5k_hw_init(struct ath5k_hw *ah) ah->ah_phy_revision = ath5k_hw_reg_read(ah, AR5K_PHY_CHIP_ID) & 0xffffffff; ah->ah_radio_5ghz_revision = ath5k_hw_radio_revision(ah, - IEEE80211_BAND_5GHZ); + NL80211_BAND_5GHZ); /* Try to identify radio chip based on its srev */ switch (ah->ah_radio_5ghz_revision & 0xf0) { @@ -160,14 +160,14 @@ int ath5k_hw_init(struct ath5k_hw *ah) ah->ah_radio = AR5K_RF5111; ah->ah_single_chip = false; ah->ah_radio_2ghz_revision = ath5k_hw_radio_revision(ah, - IEEE80211_BAND_2GHZ); + NL80211_BAND_2GHZ); break; case AR5K_SREV_RAD_5112: case AR5K_SREV_RAD_2112: ah->ah_radio = AR5K_RF5112; ah->ah_single_chip = false; ah->ah_radio_2ghz_revision = ath5k_hw_radio_revision(ah, - IEEE80211_BAND_2GHZ); + NL80211_BAND_2GHZ); break; case AR5K_SREV_RAD_2413: ah->ah_radio = AR5K_RF2413; @@ -204,7 +204,7 @@ int ath5k_hw_init(struct ath5k_hw *ah) ah->ah_radio = AR5K_RF5111; ah->ah_single_chip = false; ah->ah_radio_2ghz_revision = ath5k_hw_radio_revision(ah, - IEEE80211_BAND_2GHZ); + NL80211_BAND_2GHZ); } else if (ah->ah_mac_version == (AR5K_SREV_AR2425 >> 4) || ah->ah_mac_version == (AR5K_SREV_AR2417 >> 4) || ah->ah_phy_revision == AR5K_SREV_PHY_2425) { diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c index 3d946d8b2db2..d98fd421c7ec 100644 --- a/drivers/net/wireless/ath/ath5k/base.c +++ b/drivers/net/wireless/ath/ath5k/base.c @@ -268,15 +268,15 @@ static void ath5k_reg_notifier(struct wiphy *wiphy, * Returns true for the channel numbers used. */ #ifdef CONFIG_ATH5K_TEST_CHANNELS -static bool ath5k_is_standard_channel(short chan, enum ieee80211_band band) +static bool ath5k_is_standard_channel(short chan, enum nl80211_band band) { return true; } #else -static bool ath5k_is_standard_channel(short chan, enum ieee80211_band band) +static bool ath5k_is_standard_channel(short chan, enum nl80211_band band) { - if (band == IEEE80211_BAND_2GHZ && chan <= 14) + if (band == NL80211_BAND_2GHZ && chan <= 14) return true; return /* UNII 1,2 */ @@ -297,18 +297,18 @@ ath5k_setup_channels(struct ath5k_hw *ah, struct ieee80211_channel *channels, unsigned int mode, unsigned int max) { unsigned int count, size, freq, ch; - enum ieee80211_band band; + enum nl80211_band band; switch (mode) { case AR5K_MODE_11A: /* 1..220, but 2GHz frequencies are filtered by check_channel */ size = 220; - band = IEEE80211_BAND_5GHZ; + band = NL80211_BAND_5GHZ; break; case AR5K_MODE_11B: case AR5K_MODE_11G: size = 26; - band = IEEE80211_BAND_2GHZ; + band = NL80211_BAND_2GHZ; break; default: ATH5K_WARN(ah, "bad mode, not copying channels\n"); @@ -363,13 +363,13 @@ ath5k_setup_bands(struct ieee80211_hw *hw) int max_c, count_c = 0; int i; - BUILD_BUG_ON(ARRAY_SIZE(ah->sbands) < IEEE80211_NUM_BANDS); + BUILD_BUG_ON(ARRAY_SIZE(ah->sbands) < NUM_NL80211_BANDS); max_c = ARRAY_SIZE(ah->channels); /* 2GHz band */ - sband = &ah->sbands[IEEE80211_BAND_2GHZ]; - sband->band = IEEE80211_BAND_2GHZ; - sband->bitrates = &ah->rates[IEEE80211_BAND_2GHZ][0]; + sband = &ah->sbands[NL80211_BAND_2GHZ]; + sband->band = NL80211_BAND_2GHZ; + sband->bitrates = &ah->rates[NL80211_BAND_2GHZ][0]; if (test_bit(AR5K_MODE_11G, ah->ah_capabilities.cap_mode)) { /* G mode */ @@ -381,7 +381,7 @@ ath5k_setup_bands(struct ieee80211_hw *hw) sband->n_channels = ath5k_setup_channels(ah, sband->channels, AR5K_MODE_11G, max_c); - hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband; + hw->wiphy->bands[NL80211_BAND_2GHZ] = sband; count_c = sband->n_channels; max_c -= count_c; } else if (test_bit(AR5K_MODE_11B, ah->ah_capabilities.cap_mode)) { @@ -407,7 +407,7 @@ ath5k_setup_bands(struct ieee80211_hw *hw) sband->n_channels = ath5k_setup_channels(ah, sband->channels, AR5K_MODE_11B, max_c); - hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband; + hw->wiphy->bands[NL80211_BAND_2GHZ] = sband; count_c = sband->n_channels; max_c -= count_c; } @@ -415,9 +415,9 @@ ath5k_setup_bands(struct ieee80211_hw *hw) /* 5GHz band, A mode */ if (test_bit(AR5K_MODE_11A, ah->ah_capabilities.cap_mode)) { - sband = &ah->sbands[IEEE80211_BAND_5GHZ]; - sband->band = IEEE80211_BAND_5GHZ; - sband->bitrates = &ah->rates[IEEE80211_BAND_5GHZ][0]; + sband = &ah->sbands[NL80211_BAND_5GHZ]; + sband->band = NL80211_BAND_5GHZ; + sband->bitrates = &ah->rates[NL80211_BAND_5GHZ][0]; memcpy(sband->bitrates, &ath5k_rates[4], sizeof(struct ieee80211_rate) * 8); @@ -427,7 +427,7 @@ ath5k_setup_bands(struct ieee80211_hw *hw) sband->n_channels = ath5k_setup_channels(ah, sband->channels, AR5K_MODE_11A, max_c); - hw->wiphy->bands[IEEE80211_BAND_5GHZ] = sband; + hw->wiphy->bands[NL80211_BAND_5GHZ] = sband; } ath5k_setup_rate_idx(ah, sband); diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c index 654a1e33f827..929d7ccc031c 100644 --- a/drivers/net/wireless/ath/ath5k/debug.c +++ b/drivers/net/wireless/ath/ath5k/debug.c @@ -1043,14 +1043,14 @@ ath5k_debug_dump_bands(struct ath5k_hw *ah) BUG_ON(!ah->sbands); - for (b = 0; b < IEEE80211_NUM_BANDS; b++) { + for (b = 0; b < NUM_NL80211_BANDS; b++) { struct ieee80211_supported_band *band = &ah->sbands[b]; char bname[6]; switch (band->band) { - case IEEE80211_BAND_2GHZ: + case NL80211_BAND_2GHZ: strcpy(bname, "2 GHz"); break; - case IEEE80211_BAND_5GHZ: + case NL80211_BAND_5GHZ: strcpy(bname, "5 GHz"); break; default: diff --git a/drivers/net/wireless/ath/ath5k/pcu.c b/drivers/net/wireless/ath/ath5k/pcu.c index bf29da5e90da..fc47b70988b1 100644 --- a/drivers/net/wireless/ath/ath5k/pcu.c +++ b/drivers/net/wireless/ath/ath5k/pcu.c @@ -110,7 +110,7 @@ static const unsigned int ack_rates_high[] = * bwmodes. */ int -ath5k_hw_get_frame_duration(struct ath5k_hw *ah, enum ieee80211_band band, +ath5k_hw_get_frame_duration(struct ath5k_hw *ah, enum nl80211_band band, int len, struct ieee80211_rate *rate, bool shortpre) { int sifs, preamble, plcp_bits, sym_time; @@ -221,7 +221,7 @@ ath5k_hw_get_default_sifs(struct ath5k_hw *ah) case AR5K_BWMODE_DEFAULT: sifs = AR5K_INIT_SIFS_DEFAULT_BG; default: - if (channel->band == IEEE80211_BAND_5GHZ) + if (channel->band == NL80211_BAND_5GHZ) sifs = AR5K_INIT_SIFS_DEFAULT_A; break; } @@ -279,7 +279,7 @@ ath5k_hw_write_rate_duration(struct ath5k_hw *ah) struct ieee80211_rate *rate; unsigned int i; /* 802.11g covers both OFDM and CCK */ - u8 band = IEEE80211_BAND_2GHZ; + u8 band = NL80211_BAND_2GHZ; /* Write rate duration table */ for (i = 0; i < ah->sbands[band].n_bitrates; i++) { diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c index 98ee85456321..641b13a279e1 100644 --- a/drivers/net/wireless/ath/ath5k/phy.c +++ b/drivers/net/wireless/ath/ath5k/phy.c @@ -75,13 +75,13 @@ /** * ath5k_hw_radio_revision() - Get the PHY Chip revision * @ah: The &struct ath5k_hw - * @band: One of enum ieee80211_band + * @band: One of enum nl80211_band * * Returns the revision number of a 2GHz, 5GHz or single chip * radio. */ u16 -ath5k_hw_radio_revision(struct ath5k_hw *ah, enum ieee80211_band band) +ath5k_hw_radio_revision(struct ath5k_hw *ah, enum nl80211_band band) { unsigned int i; u32 srev; @@ -91,10 +91,10 @@ ath5k_hw_radio_revision(struct ath5k_hw *ah, enum ieee80211_band band) * Set the radio chip access register */ switch (band) { - case IEEE80211_BAND_2GHZ: + case NL80211_BAND_2GHZ: ath5k_hw_reg_write(ah, AR5K_PHY_SHIFT_2GHZ, AR5K_PHY(0)); break; - case IEEE80211_BAND_5GHZ: + case NL80211_BAND_5GHZ: ath5k_hw_reg_write(ah, AR5K_PHY_SHIFT_5GHZ, AR5K_PHY(0)); break; default: @@ -138,11 +138,11 @@ ath5k_channel_ok(struct ath5k_hw *ah, struct ieee80211_channel *channel) u16 freq = channel->center_freq; /* Check if the channel is in our supported range */ - if (channel->band == IEEE80211_BAND_2GHZ) { + if (channel->band == NL80211_BAND_2GHZ) { if ((freq >= ah->ah_capabilities.cap_range.range_2ghz_min) && (freq <= ah->ah_capabilities.cap_range.range_2ghz_max)) return true; - } else if (channel->band == IEEE80211_BAND_5GHZ) + } else if (channel->band == NL80211_BAND_5GHZ) if ((freq >= ah->ah_capabilities.cap_range.range_5ghz_min) && (freq <= ah->ah_capabilities.cap_range.range_5ghz_max)) return true; @@ -743,7 +743,7 @@ done: /** * ath5k_hw_rfgain_init() - Write initial RF gain settings to hw * @ah: The &struct ath5k_hw - * @band: One of enum ieee80211_band + * @band: One of enum nl80211_band * * Write initial RF gain table to set the RF sensitivity. * @@ -751,7 +751,7 @@ done: * with Gain_F calibration */ static int -ath5k_hw_rfgain_init(struct ath5k_hw *ah, enum ieee80211_band band) +ath5k_hw_rfgain_init(struct ath5k_hw *ah, enum nl80211_band band) { const struct ath5k_ini_rfgain *ath5k_rfg; unsigned int i, size, index; @@ -786,7 +786,7 @@ ath5k_hw_rfgain_init(struct ath5k_hw *ah, enum ieee80211_band band) return -EINVAL; } - index = (band == IEEE80211_BAND_2GHZ) ? 1 : 0; + index = (band == NL80211_BAND_2GHZ) ? 1 : 0; for (i = 0; i < size; i++) { AR5K_REG_WAIT(i); @@ -917,7 +917,7 @@ ath5k_hw_rfregs_init(struct ath5k_hw *ah, } /* Set Output and Driver bias current (OB/DB) */ - if (channel->band == IEEE80211_BAND_2GHZ) { + if (channel->band == NL80211_BAND_2GHZ) { if (channel->hw_value == AR5K_MODE_11B) ee_mode = AR5K_EEPROM_MODE_11B; @@ -944,7 +944,7 @@ ath5k_hw_rfregs_init(struct ath5k_hw *ah, AR5K_RF_DB_2GHZ, true); /* RF5111 always needs OB/DB for 5GHz, even if we use 2GHz */ - } else if ((channel->band == IEEE80211_BAND_5GHZ) || + } else if ((channel->band == NL80211_BAND_5GHZ) || (ah->ah_radio == AR5K_RF5111)) { /* For 11a, Turbo and XR we need to choose @@ -1145,7 +1145,7 @@ ath5k_hw_rfregs_init(struct ath5k_hw *ah, } if (ah->ah_radio == AR5K_RF5413 && - channel->band == IEEE80211_BAND_2GHZ) { + channel->band == NL80211_BAND_2GHZ) { ath5k_hw_rfb_op(ah, rf_regs, 1, AR5K_RF_DERBY_CHAN_SEL_MODE, true); @@ -1270,7 +1270,7 @@ ath5k_hw_rf5111_channel(struct ath5k_hw *ah, */ data0 = data1 = 0; - if (channel->band == IEEE80211_BAND_2GHZ) { + if (channel->band == NL80211_BAND_2GHZ) { /* Map 2GHz channel to 5GHz Atheros channel ID */ ret = ath5k_hw_rf5111_chan2athchan( ieee80211_frequency_to_channel(channel->center_freq), @@ -1919,7 +1919,7 @@ ath5k_hw_set_spur_mitigation_filter(struct ath5k_hw *ah, /* Convert current frequency to fbin value (the same way channels * are stored on EEPROM, check out ath5k_eeprom_bin2freq) and scale * up by 2 so we can compare it later */ - if (channel->band == IEEE80211_BAND_2GHZ) { + if (channel->band == NL80211_BAND_2GHZ) { chan_fbin = (channel->center_freq - 2300) * 10; freq_band = AR5K_EEPROM_BAND_2GHZ; } else { @@ -1983,7 +1983,7 @@ ath5k_hw_set_spur_mitigation_filter(struct ath5k_hw *ah, symbol_width = AR5K_SPUR_SYMBOL_WIDTH_BASE_100Hz / 4; break; default: - if (channel->band == IEEE80211_BAND_5GHZ) { + if (channel->band == NL80211_BAND_5GHZ) { /* Both sample_freq and chip_freq are 40MHz */ spur_delta_phase = (spur_offset << 17) / 25; spur_freq_sigma_delta = diff --git a/drivers/net/wireless/ath/ath5k/qcu.c b/drivers/net/wireless/ath/ath5k/qcu.c index ddaad712c59a..beda11ce34a7 100644 --- a/drivers/net/wireless/ath/ath5k/qcu.c +++ b/drivers/net/wireless/ath/ath5k/qcu.c @@ -559,7 +559,7 @@ ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue) int ath5k_hw_set_ifs_intervals(struct ath5k_hw *ah, unsigned int slot_time) { struct ieee80211_channel *channel = ah->ah_current_channel; - enum ieee80211_band band; + enum nl80211_band band; struct ieee80211_supported_band *sband; struct ieee80211_rate *rate; u32 ack_tx_time, eifs, eifs_clock, sifs, sifs_clock; @@ -596,10 +596,10 @@ int ath5k_hw_set_ifs_intervals(struct ath5k_hw *ah, unsigned int slot_time) * * Also we have different lowest rate for 802.11a */ - if (channel->band == IEEE80211_BAND_5GHZ) - band = IEEE80211_BAND_5GHZ; + if (channel->band == NL80211_BAND_5GHZ) + band = NL80211_BAND_5GHZ; else - band = IEEE80211_BAND_2GHZ; + band = NL80211_BAND_2GHZ; switch (ah->ah_bwmode) { case AR5K_BWMODE_5MHZ: diff --git a/drivers/net/wireless/ath/ath5k/reset.c b/drivers/net/wireless/ath/ath5k/reset.c index 4b1c87fa15ac..56d7925a0c2c 100644 --- a/drivers/net/wireless/ath/ath5k/reset.c +++ b/drivers/net/wireless/ath/ath5k/reset.c @@ -752,7 +752,7 @@ ath5k_hw_nic_wakeup(struct ath5k_hw *ah, struct ieee80211_channel *channel) clock = AR5K_PHY_PLL_RF5111; /*Zero*/ } - if (channel->band == IEEE80211_BAND_2GHZ) { + if (channel->band == NL80211_BAND_2GHZ) { mode |= AR5K_PHY_MODE_FREQ_2GHZ; clock |= AR5K_PHY_PLL_44MHZ; @@ -771,7 +771,7 @@ ath5k_hw_nic_wakeup(struct ath5k_hw *ah, struct ieee80211_channel *channel) else mode |= AR5K_PHY_MODE_MOD_DYN; } - } else if (channel->band == IEEE80211_BAND_5GHZ) { + } else if (channel->band == NL80211_BAND_5GHZ) { mode |= (AR5K_PHY_MODE_FREQ_5GHZ | AR5K_PHY_MODE_MOD_OFDM); @@ -906,7 +906,7 @@ ath5k_hw_tweak_initval_settings(struct ath5k_hw *ah, u32 data; ath5k_hw_reg_write(ah, AR5K_PHY_CCKTXCTL_WORLD, AR5K_PHY_CCKTXCTL); - if (channel->band == IEEE80211_BAND_5GHZ) + if (channel->band == NL80211_BAND_5GHZ) data = 0xffb81020; else data = 0xffb80d20; diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c index 7f3f94fbf157..4e11ba06f089 100644 --- a/drivers/net/wireless/ath/ath6kl/cfg80211.c +++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c @@ -34,7 +34,7 @@ } #define CHAN2G(_channel, _freq, _flags) { \ - .band = IEEE80211_BAND_2GHZ, \ + .band = NL80211_BAND_2GHZ, \ .hw_value = (_channel), \ .center_freq = (_freq), \ .flags = (_flags), \ @@ -43,7 +43,7 @@ } #define CHAN5G(_channel, _flags) { \ - .band = IEEE80211_BAND_5GHZ, \ + .band = NL80211_BAND_5GHZ, \ .hw_value = (_channel), \ .center_freq = 5000 + (5 * (_channel)), \ .flags = (_flags), \ @@ -2583,7 +2583,7 @@ void ath6kl_check_wow_status(struct ath6kl *ar) } #endif -static int ath6kl_set_htcap(struct ath6kl_vif *vif, enum ieee80211_band band, +static int ath6kl_set_htcap(struct ath6kl_vif *vif, enum nl80211_band band, bool ht_enable) { struct ath6kl_htcap *htcap = &vif->htcap[band]; @@ -2594,7 +2594,7 @@ static int ath6kl_set_htcap(struct ath6kl_vif *vif, enum ieee80211_band band, if (ht_enable) { /* Set default ht capabilities */ htcap->ht_enable = true; - htcap->cap_info = (band == IEEE80211_BAND_2GHZ) ? + htcap->cap_info = (band == NL80211_BAND_2GHZ) ? ath6kl_g_htcap : ath6kl_a_htcap; htcap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_16K; } else /* Disable ht */ @@ -2609,7 +2609,7 @@ static int ath6kl_restore_htcap(struct ath6kl_vif *vif) struct wiphy *wiphy = vif->ar->wiphy; int band, ret = 0; - for (band = 0; band < IEEE80211_NUM_BANDS; band++) { + for (band = 0; band < NUM_NL80211_BANDS; band++) { if (!wiphy->bands[band]) continue; @@ -3530,7 +3530,7 @@ static void ath6kl_cfg80211_reg_notify(struct wiphy *wiphy, struct regulatory_request *request) { struct ath6kl *ar = wiphy_priv(wiphy); - u32 rates[IEEE80211_NUM_BANDS]; + u32 rates[NUM_NL80211_BANDS]; int ret, i; ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, @@ -3555,7 +3555,7 @@ static void ath6kl_cfg80211_reg_notify(struct wiphy *wiphy, * changed. */ - for (i = 0; i < IEEE80211_NUM_BANDS; i++) + for (i = 0; i < NUM_NL80211_BANDS; i++) if (wiphy->bands[i]) rates[i] = (1 << wiphy->bands[i]->n_bitrates) - 1; @@ -3791,8 +3791,8 @@ struct wireless_dev *ath6kl_interface_add(struct ath6kl *ar, const char *name, vif->listen_intvl_t = ATH6KL_DEFAULT_LISTEN_INTVAL; vif->bmiss_time_t = ATH6KL_DEFAULT_BMISS_TIME; vif->bg_scan_period = 0; - vif->htcap[IEEE80211_BAND_2GHZ].ht_enable = true; - vif->htcap[IEEE80211_BAND_5GHZ].ht_enable = true; + vif->htcap[NL80211_BAND_2GHZ].ht_enable = true; + vif->htcap[NL80211_BAND_5GHZ].ht_enable = true; memcpy(ndev->dev_addr, ar->mac_addr, ETH_ALEN); if (fw_vif_idx != 0) { @@ -3943,9 +3943,9 @@ int ath6kl_cfg80211_init(struct ath6kl *ar) wiphy->available_antennas_rx = ar->hw.rx_ant; if (band_2gig) - wiphy->bands[IEEE80211_BAND_2GHZ] = &ath6kl_band_2ghz; + wiphy->bands[NL80211_BAND_2GHZ] = &ath6kl_band_2ghz; if (band_5gig) - wiphy->bands[IEEE80211_BAND_5GHZ] = &ath6kl_band_5ghz; + wiphy->bands[NL80211_BAND_5GHZ] = &ath6kl_band_5ghz; wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM; diff --git a/drivers/net/wireless/ath/ath6kl/core.h b/drivers/net/wireless/ath/ath6kl/core.h index 5f3acfe6015e..713a571a27ce 100644 --- a/drivers/net/wireless/ath/ath6kl/core.h +++ b/drivers/net/wireless/ath/ath6kl/core.h @@ -623,7 +623,7 @@ struct ath6kl_vif { struct ath6kl_wep_key wep_key_list[WMI_MAX_KEY_INDEX + 1]; struct ath6kl_key keys[WMI_MAX_KEY_INDEX + 1]; struct aggr_info *aggr_cntxt; - struct ath6kl_htcap htcap[IEEE80211_NUM_BANDS]; + struct ath6kl_htcap htcap[NUM_NL80211_BANDS]; struct timer_list disconnect_timer; struct timer_list sched_scan_timer; diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c index 0b3e9c0293e0..631c3a0c572b 100644 --- a/drivers/net/wireless/ath/ath6kl/wmi.c +++ b/drivers/net/wireless/ath/ath6kl/wmi.c @@ -2048,7 +2048,7 @@ int ath6kl_wmi_beginscan_cmd(struct wmi *wmi, u8 if_idx, sc->no_cck = cpu_to_le32(no_cck); sc->num_ch = num_chan; - for (band = 0; band < IEEE80211_NUM_BANDS; band++) { + for (band = 0; band < NUM_NL80211_BANDS; band++) { sband = ar->wiphy->bands[band]; if (!sband) @@ -2770,10 +2770,10 @@ static int ath6kl_set_bitrate_mask64(struct wmi *wmi, u8 if_idx, memset(&ratemask, 0, sizeof(ratemask)); /* only check 2.4 and 5 GHz bands, skip the rest */ - for (band = 0; band <= IEEE80211_BAND_5GHZ; band++) { + for (band = 0; band <= NL80211_BAND_5GHZ; band++) { /* copy legacy rate mask */ ratemask[band] = mask->control[band].legacy; - if (band == IEEE80211_BAND_5GHZ) + if (band == NL80211_BAND_5GHZ) ratemask[band] = mask->control[band].legacy << 4; @@ -2799,9 +2799,9 @@ static int ath6kl_set_bitrate_mask64(struct wmi *wmi, u8 if_idx, if (mode == WMI_RATES_MODE_11A || mode == WMI_RATES_MODE_11A_HT20 || mode == WMI_RATES_MODE_11A_HT40) - band = IEEE80211_BAND_5GHZ; + band = NL80211_BAND_5GHZ; else - band = IEEE80211_BAND_2GHZ; + band = NL80211_BAND_2GHZ; cmd->ratemask[mode] = cpu_to_le64(ratemask[band]); } @@ -2822,10 +2822,10 @@ static int ath6kl_set_bitrate_mask32(struct wmi *wmi, u8 if_idx, memset(&ratemask, 0, sizeof(ratemask)); /* only check 2.4 and 5 GHz bands, skip the rest */ - for (band = 0; band <= IEEE80211_BAND_5GHZ; band++) { + for (band = 0; band <= NL80211_BAND_5GHZ; band++) { /* copy legacy rate mask */ ratemask[band] = mask->control[band].legacy; - if (band == IEEE80211_BAND_5GHZ) + if (band == NL80211_BAND_5GHZ) ratemask[band] = mask->control[band].legacy << 4; @@ -2849,9 +2849,9 @@ static int ath6kl_set_bitrate_mask32(struct wmi *wmi, u8 if_idx, if (mode == WMI_RATES_MODE_11A || mode == WMI_RATES_MODE_11A_HT20 || mode == WMI_RATES_MODE_11A_HT40) - band = IEEE80211_BAND_5GHZ; + band = NL80211_BAND_5GHZ; else - band = IEEE80211_BAND_2GHZ; + band = NL80211_BAND_2GHZ; cmd->ratemask[mode] = cpu_to_le32(ratemask[band]); } @@ -3174,7 +3174,7 @@ int ath6kl_wmi_set_keepalive_cmd(struct wmi *wmi, u8 if_idx, } int ath6kl_wmi_set_htcap_cmd(struct wmi *wmi, u8 if_idx, - enum ieee80211_band band, + enum nl80211_band band, struct ath6kl_htcap *htcap) { struct sk_buff *skb; @@ -3187,7 +3187,7 @@ int ath6kl_wmi_set_htcap_cmd(struct wmi *wmi, u8 if_idx, cmd = (struct wmi_set_htcap_cmd *) skb->data; /* - * NOTE: Band in firmware matches enum ieee80211_band, it is unlikely + * NOTE: Band in firmware matches enum nl80211_band, it is unlikely * this will be changed in firmware. If at all there is any change in * band value, the host needs to be fixed. */ diff --git a/drivers/net/wireless/ath/ath6kl/wmi.h b/drivers/net/wireless/ath/ath6kl/wmi.h index 05d25a94c781..3af464a73b58 100644 --- a/drivers/net/wireless/ath/ath6kl/wmi.h +++ b/drivers/net/wireless/ath/ath6kl/wmi.h @@ -2628,7 +2628,7 @@ int ath6kl_wmi_set_wmm_txop(struct wmi *wmi, u8 if_idx, enum wmi_txop_cfg cfg); int ath6kl_wmi_set_keepalive_cmd(struct wmi *wmi, u8 if_idx, u8 keep_alive_intvl); int ath6kl_wmi_set_htcap_cmd(struct wmi *wmi, u8 if_idx, - enum ieee80211_band band, + enum nl80211_band band, struct ath6kl_htcap *htcap); int ath6kl_wmi_test_cmd(struct wmi *wmi, void *buf, size_t len); diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c index 37f6d66d1671..0f71146b781d 100644 --- a/drivers/net/wireless/ath/ath9k/calib.c +++ b/drivers/net/wireless/ath/ath9k/calib.c @@ -145,14 +145,14 @@ static void ath9k_hw_update_nfcal_hist_buffer(struct ath_hw *ah, } static bool ath9k_hw_get_nf_thresh(struct ath_hw *ah, - enum ieee80211_band band, + enum nl80211_band band, int16_t *nft) { switch (band) { - case IEEE80211_BAND_5GHZ: + case NL80211_BAND_5GHZ: *nft = (int8_t)ah->eep_ops->get_eeprom(ah, EEP_NFTHRESH_5); break; - case IEEE80211_BAND_2GHZ: + case NL80211_BAND_2GHZ: *nft = (int8_t)ah->eep_ops->get_eeprom(ah, EEP_NFTHRESH_2); break; default: diff --git a/drivers/net/wireless/ath/ath9k/channel.c b/drivers/net/wireless/ath/ath9k/channel.c index 319cb5f25f58..e56bafcf5864 100644 --- a/drivers/net/wireless/ath/ath9k/channel.c +++ b/drivers/net/wireless/ath/ath9k/channel.c @@ -107,9 +107,9 @@ void ath_chanctx_init(struct ath_softc *sc) struct ieee80211_channel *chan; int i, j; - sband = &common->sbands[IEEE80211_BAND_2GHZ]; + sband = &common->sbands[NL80211_BAND_2GHZ]; if (!sband->n_channels) - sband = &common->sbands[IEEE80211_BAND_5GHZ]; + sband = &common->sbands[NL80211_BAND_5GHZ]; chan = &sband->channels[0]; for (i = 0; i < ATH9K_NUM_CHANCTX; i++) { @@ -1333,9 +1333,9 @@ void ath9k_offchannel_init(struct ath_softc *sc) struct ieee80211_channel *chan; int i; - sband = &common->sbands[IEEE80211_BAND_2GHZ]; + sband = &common->sbands[NL80211_BAND_2GHZ]; if (!sband->n_channels) - sband = &common->sbands[IEEE80211_BAND_5GHZ]; + sband = &common->sbands[NL80211_BAND_5GHZ]; chan = &sband->channels[0]; diff --git a/drivers/net/wireless/ath/ath9k/common-init.c b/drivers/net/wireless/ath/ath9k/common-init.c index a006c1499728..8b4f7fdabf58 100644 --- a/drivers/net/wireless/ath/ath9k/common-init.c +++ b/drivers/net/wireless/ath/ath9k/common-init.c @@ -19,14 +19,14 @@ #include "common.h" #define CHAN2G(_freq, _idx) { \ - .band = IEEE80211_BAND_2GHZ, \ + .band = NL80211_BAND_2GHZ, \ .center_freq = (_freq), \ .hw_value = (_idx), \ .max_power = 20, \ } #define CHAN5G(_freq, _idx) { \ - .band = IEEE80211_BAND_5GHZ, \ + .band = NL80211_BAND_5GHZ, \ .center_freq = (_freq), \ .hw_value = (_idx), \ .max_power = 20, \ @@ -139,12 +139,12 @@ int ath9k_cmn_init_channels_rates(struct ath_common *common) memcpy(channels, ath9k_2ghz_chantable, sizeof(ath9k_2ghz_chantable)); - common->sbands[IEEE80211_BAND_2GHZ].channels = channels; - common->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ; - common->sbands[IEEE80211_BAND_2GHZ].n_channels = + common->sbands[NL80211_BAND_2GHZ].channels = channels; + common->sbands[NL80211_BAND_2GHZ].band = NL80211_BAND_2GHZ; + common->sbands[NL80211_BAND_2GHZ].n_channels = ARRAY_SIZE(ath9k_2ghz_chantable); - common->sbands[IEEE80211_BAND_2GHZ].bitrates = ath9k_legacy_rates; - common->sbands[IEEE80211_BAND_2GHZ].n_bitrates = + common->sbands[NL80211_BAND_2GHZ].bitrates = ath9k_legacy_rates; + common->sbands[NL80211_BAND_2GHZ].n_bitrates = ARRAY_SIZE(ath9k_legacy_rates); } @@ -156,13 +156,13 @@ int ath9k_cmn_init_channels_rates(struct ath_common *common) memcpy(channels, ath9k_5ghz_chantable, sizeof(ath9k_5ghz_chantable)); - common->sbands[IEEE80211_BAND_5GHZ].channels = channels; - common->sbands[IEEE80211_BAND_5GHZ].band = IEEE80211_BAND_5GHZ; - common->sbands[IEEE80211_BAND_5GHZ].n_channels = + common->sbands[NL80211_BAND_5GHZ].channels = channels; + common->sbands[NL80211_BAND_5GHZ].band = NL80211_BAND_5GHZ; + common->sbands[NL80211_BAND_5GHZ].n_channels = ARRAY_SIZE(ath9k_5ghz_chantable); - common->sbands[IEEE80211_BAND_5GHZ].bitrates = + common->sbands[NL80211_BAND_5GHZ].bitrates = ath9k_legacy_rates + 4; - common->sbands[IEEE80211_BAND_5GHZ].n_bitrates = + common->sbands[NL80211_BAND_5GHZ].n_bitrates = ARRAY_SIZE(ath9k_legacy_rates) - 4; } return 0; @@ -236,9 +236,9 @@ void ath9k_cmn_reload_chainmask(struct ath_hw *ah) if (ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ) ath9k_cmn_setup_ht_cap(ah, - &common->sbands[IEEE80211_BAND_2GHZ].ht_cap); + &common->sbands[NL80211_BAND_2GHZ].ht_cap); if (ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ) ath9k_cmn_setup_ht_cap(ah, - &common->sbands[IEEE80211_BAND_5GHZ].ht_cap); + &common->sbands[NL80211_BAND_5GHZ].ht_cap); } EXPORT_SYMBOL(ath9k_cmn_reload_chainmask); diff --git a/drivers/net/wireless/ath/ath9k/common.c b/drivers/net/wireless/ath/ath9k/common.c index e8c699446470..b80e08b13b74 100644 --- a/drivers/net/wireless/ath/ath9k/common.c +++ b/drivers/net/wireless/ath/ath9k/common.c @@ -173,7 +173,7 @@ int ath9k_cmn_process_rate(struct ath_common *common, struct ieee80211_rx_status *rxs) { struct ieee80211_supported_band *sband; - enum ieee80211_band band; + enum nl80211_band band; unsigned int i = 0; struct ath_hw *ah = common->ah; @@ -305,7 +305,7 @@ static void ath9k_cmn_update_ichannel(struct ath9k_channel *ichan, ichan->channel = chan->center_freq; ichan->chan = chan; - if (chan->band == IEEE80211_BAND_5GHZ) + if (chan->band == NL80211_BAND_5GHZ) flags |= CHANNEL_5GHZ; switch (chandef->width) { diff --git a/drivers/net/wireless/ath/ath9k/debug_sta.c b/drivers/net/wireless/ath/ath9k/debug_sta.c index c2ca57a2ed09..b66cfa91364f 100644 --- a/drivers/net/wireless/ath/ath9k/debug_sta.c +++ b/drivers/net/wireless/ath/ath9k/debug_sta.c @@ -139,7 +139,7 @@ void ath_debug_rate_stats(struct ath_softc *sc, } if (IS_OFDM_RATE(rs->rs_rate)) { - if (ah->curchan->chan->band == IEEE80211_BAND_2GHZ) + if (ah->curchan->chan->band == NL80211_BAND_2GHZ) rstats->ofdm_stats[rxs->rate_idx - 4].ofdm_cnt++; else rstats->ofdm_stats[rxs->rate_idx].ofdm_cnt++; @@ -173,7 +173,7 @@ static ssize_t read_file_node_recv(struct file *file, char __user *user_buf, struct ath_hw *ah = sc->sc_ah; struct ath_rx_rate_stats *rstats; struct ieee80211_sta *sta = an->sta; - enum ieee80211_band band; + enum nl80211_band band; u32 len = 0, size = 4096; char *buf; size_t retval; @@ -206,7 +206,7 @@ static ssize_t read_file_node_recv(struct file *file, char __user *user_buf, len += scnprintf(buf + len, size - len, "\n"); legacy: - if (band == IEEE80211_BAND_2GHZ) { + if (band == NL80211_BAND_2GHZ) { PRINT_CCK_RATE("CCK-1M/LP", 0, false); PRINT_CCK_RATE("CCK-2M/LP", 1, false); PRINT_CCK_RATE("CCK-5.5M/LP", 2, false); diff --git a/drivers/net/wireless/ath/ath9k/dynack.c b/drivers/net/wireless/ath/ath9k/dynack.c index 22b3cc4c27cd..d2ff0fc0484c 100644 --- a/drivers/net/wireless/ath/ath9k/dynack.c +++ b/drivers/net/wireless/ath/ath9k/dynack.c @@ -212,7 +212,7 @@ void ath_dynack_sample_tx_ts(struct ath_hw *ah, struct sk_buff *skb, struct ieee80211_tx_rate *rates = info->status.rates; rate = &common->sbands[info->band].bitrates[rates[ridx].idx]; - if (info->band == IEEE80211_BAND_2GHZ && + if (info->band == NL80211_BAND_2GHZ && !(rate->flags & IEEE80211_RATE_ERP_G)) phy = WLAN_RC_PHY_CCK; else diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c index c2249ad54085..c148c6c504f7 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c @@ -765,11 +765,11 @@ static void ath9k_set_hw_capab(struct ath9k_htc_priv *priv, sizeof(struct htc_frame_hdr) + 4; if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ) - hw->wiphy->bands[IEEE80211_BAND_2GHZ] = - &common->sbands[IEEE80211_BAND_2GHZ]; + hw->wiphy->bands[NL80211_BAND_2GHZ] = + &common->sbands[NL80211_BAND_2GHZ]; if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ) - hw->wiphy->bands[IEEE80211_BAND_5GHZ] = - &common->sbands[IEEE80211_BAND_5GHZ]; + hw->wiphy->bands[NL80211_BAND_5GHZ] = + &common->sbands[NL80211_BAND_5GHZ]; ath9k_cmn_reload_chainmask(ah); diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c index 639294a9e34d..8a8d7853da15 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c @@ -1770,8 +1770,8 @@ static int ath9k_htc_set_bitrate_mask(struct ieee80211_hw *hw, memset(&tmask, 0, sizeof(struct ath9k_htc_target_rate_mask)); tmask.vif_index = avp->index; - tmask.band = IEEE80211_BAND_2GHZ; - tmask.mask = cpu_to_be32(mask->control[IEEE80211_BAND_2GHZ].legacy); + tmask.band = NL80211_BAND_2GHZ; + tmask.mask = cpu_to_be32(mask->control[NL80211_BAND_2GHZ].legacy); WMI_CMD_BUF(WMI_BITRATE_MASK_CMDID, &tmask); if (ret) { @@ -1781,8 +1781,8 @@ static int ath9k_htc_set_bitrate_mask(struct ieee80211_hw *hw, goto out; } - tmask.band = IEEE80211_BAND_5GHZ; - tmask.mask = cpu_to_be32(mask->control[IEEE80211_BAND_5GHZ].legacy); + tmask.band = NL80211_BAND_5GHZ; + tmask.mask = cpu_to_be32(mask->control[NL80211_BAND_5GHZ].legacy); WMI_CMD_BUF(WMI_BITRATE_MASK_CMDID, &tmask); if (ret) { @@ -1793,8 +1793,8 @@ static int ath9k_htc_set_bitrate_mask(struct ieee80211_hw *hw, } ath_dbg(common, CONFIG, "Set bitrate masks: 0x%x, 0x%x\n", - mask->control[IEEE80211_BAND_2GHZ].legacy, - mask->control[IEEE80211_BAND_5GHZ].legacy); + mask->control[NL80211_BAND_2GHZ].legacy, + mask->control[NL80211_BAND_5GHZ].legacy); out: return ret; } diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c index cc9648f844ae..f333ef1e3e7b 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c @@ -494,7 +494,7 @@ static void ath9k_htc_tx_process(struct ath9k_htc_priv *priv, if (txs->ts_flags & ATH9K_HTC_TXSTAT_SGI) rate->flags |= IEEE80211_TX_RC_SHORT_GI; } else { - if (cur_conf->chandef.chan->band == IEEE80211_BAND_5GHZ) + if (cur_conf->chandef.chan->band == NL80211_BAND_5GHZ) rate->idx += 4; /* No CCK rates */ } diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c index 77ace8d72d54..4bf1e244b49b 100644 --- a/drivers/net/wireless/ath/ath9k/init.c +++ b/drivers/net/wireless/ath/ath9k/init.c @@ -705,9 +705,9 @@ static void ath9k_init_txpower_limits(struct ath_softc *sc) struct ath9k_channel *curchan = ah->curchan; if (ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ) - ath9k_init_band_txpower(sc, IEEE80211_BAND_2GHZ); + ath9k_init_band_txpower(sc, NL80211_BAND_2GHZ); if (ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ) - ath9k_init_band_txpower(sc, IEEE80211_BAND_5GHZ); + ath9k_init_band_txpower(sc, NL80211_BAND_5GHZ); ah->curchan = curchan; } @@ -879,11 +879,11 @@ static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw) sc->ant_tx = hw->wiphy->available_antennas_tx; if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ) - hw->wiphy->bands[IEEE80211_BAND_2GHZ] = - &common->sbands[IEEE80211_BAND_2GHZ]; + hw->wiphy->bands[NL80211_BAND_2GHZ] = + &common->sbands[NL80211_BAND_2GHZ]; if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ) - hw->wiphy->bands[IEEE80211_BAND_5GHZ] = - &common->sbands[IEEE80211_BAND_5GHZ]; + hw->wiphy->bands[NL80211_BAND_5GHZ] = + &common->sbands[NL80211_BAND_5GHZ]; #ifdef CONFIG_ATH9K_CHANNEL_CONTEXT ath9k_set_mcc_capab(sc, hw); diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 50ec4c9a9da7..8b6398850657 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -1933,14 +1933,14 @@ static int ath9k_get_survey(struct ieee80211_hw *hw, int idx, if (idx == 0) ath_update_survey_stats(sc); - sband = hw->wiphy->bands[IEEE80211_BAND_2GHZ]; + sband = hw->wiphy->bands[NL80211_BAND_2GHZ]; if (sband && idx >= sband->n_channels) { idx -= sband->n_channels; sband = NULL; } if (!sband) - sband = hw->wiphy->bands[IEEE80211_BAND_5GHZ]; + sband = hw->wiphy->bands[NL80211_BAND_5GHZ]; if (!sband || idx >= sband->n_channels) { spin_unlock_bh(&common->cc_lock); diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index fe795fc5288c..8ddd604bd00c 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c @@ -1112,7 +1112,7 @@ static u8 ath_get_rate_txpower(struct ath_softc *sc, struct ath_buf *bf, bool is_2ghz; struct modal_eep_header *pmodal; - is_2ghz = info->band == IEEE80211_BAND_2GHZ; + is_2ghz = info->band == NL80211_BAND_2GHZ; pmodal = &eep->modalHeader[is_2ghz]; power_ht40delta = pmodal->ht40PowerIncForPdadc; } else { @@ -1236,7 +1236,7 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, /* legacy rates */ rate = &common->sbands[tx_info->band].bitrates[rates[i].idx]; - if ((tx_info->band == IEEE80211_BAND_2GHZ) && + if ((tx_info->band == NL80211_BAND_2GHZ) && !(rate->flags & IEEE80211_RATE_ERP_G)) phy = WLAN_RC_PHY_CCK; else diff --git a/drivers/net/wireless/ath/carl9170/mac.c b/drivers/net/wireless/ath/carl9170/mac.c index a2f005703c04..7d4a72dc98db 100644 --- a/drivers/net/wireless/ath/carl9170/mac.c +++ b/drivers/net/wireless/ath/carl9170/mac.c @@ -48,7 +48,7 @@ int carl9170_set_dyn_sifs_ack(struct ar9170 *ar) if (conf_is_ht40(&ar->hw->conf)) val = 0x010a; else { - if (ar->hw->conf.chandef.chan->band == IEEE80211_BAND_2GHZ) + if (ar->hw->conf.chandef.chan->band == NL80211_BAND_2GHZ) val = 0x105; else val = 0x104; @@ -66,7 +66,7 @@ int carl9170_set_rts_cts_rate(struct ar9170 *ar) rts_rate = 0x1da; cts_rate = 0x10a; } else { - if (ar->hw->conf.chandef.chan->band == IEEE80211_BAND_2GHZ) { + if (ar->hw->conf.chandef.chan->band == NL80211_BAND_2GHZ) { /* 11 mbit CCK */ rts_rate = 033; cts_rate = 003; @@ -93,7 +93,7 @@ int carl9170_set_slot_time(struct ar9170 *ar) return 0; } - if ((ar->hw->conf.chandef.chan->band == IEEE80211_BAND_5GHZ) || + if ((ar->hw->conf.chandef.chan->band == NL80211_BAND_5GHZ) || vif->bss_conf.use_short_slot) slottime = 9; @@ -120,7 +120,7 @@ int carl9170_set_mac_rates(struct ar9170 *ar) basic |= (vif->bss_conf.basic_rates & 0xff0) << 4; rcu_read_unlock(); - if (ar->hw->conf.chandef.chan->band == IEEE80211_BAND_5GHZ) + if (ar->hw->conf.chandef.chan->band == NL80211_BAND_5GHZ) mandatory = 0xff00; /* OFDM 6/9/12/18/24/36/48/54 */ else mandatory = 0xff0f; /* OFDM (6/9../54) + CCK (1/2/5.5/11) */ @@ -512,10 +512,10 @@ int carl9170_set_mac_tpc(struct ar9170 *ar, struct ieee80211_channel *channel) chains = AR9170_TX_PHY_TXCHAIN_1; switch (channel->band) { - case IEEE80211_BAND_2GHZ: + case NL80211_BAND_2GHZ: power = ar->power_2G_ofdm[0] & 0x3f; break; - case IEEE80211_BAND_5GHZ: + case NL80211_BAND_5GHZ: power = ar->power_5G_leg[0] & 0x3f; break; default: diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c index 4d1527a2e292..ffb22a04beeb 100644 --- a/drivers/net/wireless/ath/carl9170/main.c +++ b/drivers/net/wireless/ath/carl9170/main.c @@ -1666,7 +1666,7 @@ static int carl9170_op_get_survey(struct ieee80211_hw *hw, int idx, return err; } - for (b = 0; b < IEEE80211_NUM_BANDS; b++) { + for (b = 0; b < NUM_NL80211_BANDS; b++) { band = ar->hw->wiphy->bands[b]; if (!band) @@ -1941,13 +1941,13 @@ static int carl9170_parse_eeprom(struct ar9170 *ar) } if (ar->eeprom.operating_flags & AR9170_OPFLAG_2GHZ) { - ar->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = + ar->hw->wiphy->bands[NL80211_BAND_2GHZ] = &carl9170_band_2GHz; chans += carl9170_band_2GHz.n_channels; bands++; } if (ar->eeprom.operating_flags & AR9170_OPFLAG_5GHZ) { - ar->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = + ar->hw->wiphy->bands[NL80211_BAND_5GHZ] = &carl9170_band_5GHz; chans += carl9170_band_5GHz.n_channels; bands++; diff --git a/drivers/net/wireless/ath/carl9170/phy.c b/drivers/net/wireless/ath/carl9170/phy.c index dca6df13fd5b..34d9fd77046e 100644 --- a/drivers/net/wireless/ath/carl9170/phy.c +++ b/drivers/net/wireless/ath/carl9170/phy.c @@ -540,11 +540,11 @@ static int carl9170_init_phy_from_eeprom(struct ar9170 *ar, return carl9170_regwrite_result(); } -static int carl9170_init_phy(struct ar9170 *ar, enum ieee80211_band band) +static int carl9170_init_phy(struct ar9170 *ar, enum nl80211_band band) { int i, err; u32 val; - bool is_2ghz = band == IEEE80211_BAND_2GHZ; + bool is_2ghz = band == NL80211_BAND_2GHZ; bool is_40mhz = conf_is_ht40(&ar->hw->conf); carl9170_regwrite_begin(ar); @@ -1125,13 +1125,13 @@ static int carl9170_set_freq_cal_data(struct ar9170 *ar, u8 f, tmp; switch (channel->band) { - case IEEE80211_BAND_2GHZ: + case NL80211_BAND_2GHZ: f = channel->center_freq - 2300; cal_freq_pier = ar->eeprom.cal_freq_pier_2G; i = AR5416_NUM_2G_CAL_PIERS - 1; break; - case IEEE80211_BAND_5GHZ: + case NL80211_BAND_5GHZ: f = (channel->center_freq - 4800) / 5; cal_freq_pier = ar->eeprom.cal_freq_pier_5G; i = AR5416_NUM_5G_CAL_PIERS - 1; @@ -1158,12 +1158,12 @@ static int carl9170_set_freq_cal_data(struct ar9170 *ar, int j; switch (channel->band) { - case IEEE80211_BAND_2GHZ: + case NL80211_BAND_2GHZ: cal_pier_data = &ar->eeprom. cal_pier_data_2G[chain][idx]; break; - case IEEE80211_BAND_5GHZ: + case NL80211_BAND_5GHZ: cal_pier_data = &ar->eeprom. cal_pier_data_5G[chain][idx]; break; @@ -1340,7 +1340,7 @@ static void carl9170_calc_ctl(struct ar9170 *ar, u32 freq, enum carl9170_bw bw) /* skip CTL and heavy clip for CTL_MKK and CTL_ETSI */ return; - if (ar->hw->conf.chandef.chan->band == IEEE80211_BAND_2GHZ) { + if (ar->hw->conf.chandef.chan->band == NL80211_BAND_2GHZ) { modes = mode_list_2ghz; nr_modes = ARRAY_SIZE(mode_list_2ghz); } else { @@ -1607,7 +1607,7 @@ int carl9170_set_channel(struct ar9170 *ar, struct ieee80211_channel *channel, return err; err = carl9170_init_rf_banks_0_7(ar, - channel->band == IEEE80211_BAND_5GHZ); + channel->band == NL80211_BAND_5GHZ); if (err) return err; @@ -1621,7 +1621,7 @@ int carl9170_set_channel(struct ar9170 *ar, struct ieee80211_channel *channel, return err; err = carl9170_init_rf_bank4_pwr(ar, - channel->band == IEEE80211_BAND_5GHZ, + channel->band == NL80211_BAND_5GHZ, channel->center_freq, bw); if (err) return err; diff --git a/drivers/net/wireless/ath/carl9170/rx.c b/drivers/net/wireless/ath/carl9170/rx.c index d66533cbc38a..0c34c8729dc6 100644 --- a/drivers/net/wireless/ath/carl9170/rx.c +++ b/drivers/net/wireless/ath/carl9170/rx.c @@ -417,7 +417,7 @@ static int carl9170_rx_mac_status(struct ar9170 *ar, return -EINVAL; } - if (status->band == IEEE80211_BAND_2GHZ) + if (status->band == NL80211_BAND_2GHZ) status->rate_idx += 4; break; diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c index ae86a600d920..2bf04c9edc98 100644 --- a/drivers/net/wireless/ath/carl9170/tx.c +++ b/drivers/net/wireless/ath/carl9170/tx.c @@ -720,12 +720,12 @@ static void carl9170_tx_rate_tpc_chains(struct ar9170 *ar, /* +1 dBm for HT40 */ *tpc += 2; - if (info->band == IEEE80211_BAND_2GHZ) + if (info->band == NL80211_BAND_2GHZ) txpower = ar->power_2G_ht40; else txpower = ar->power_5G_ht40; } else { - if (info->band == IEEE80211_BAND_2GHZ) + if (info->band == NL80211_BAND_2GHZ) txpower = ar->power_2G_ht20; else txpower = ar->power_5G_ht20; @@ -734,7 +734,7 @@ static void carl9170_tx_rate_tpc_chains(struct ar9170 *ar, *phyrate = txrate->idx; *tpc += txpower[idx & 7]; } else { - if (info->band == IEEE80211_BAND_2GHZ) { + if (info->band == NL80211_BAND_2GHZ) { if (idx < 4) txpower = ar->power_2G_cck; else @@ -797,7 +797,7 @@ static __le32 carl9170_tx_physet(struct ar9170 *ar, * tmp |= cpu_to_le32(AR9170_TX_PHY_GREENFIELD); */ } else { - if (info->band == IEEE80211_BAND_2GHZ) { + if (info->band == NL80211_BAND_2GHZ) { if (txrate->idx <= AR9170_TX_PHY_RATE_CCK_11M) tmp |= cpu_to_le32(AR9170_TX_PHY_MOD_CCK); else diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c index 06ea6cc9e30a..7e15ed9ed31f 100644 --- a/drivers/net/wireless/ath/regd.c +++ b/drivers/net/wireless/ath/regd.c @@ -336,12 +336,12 @@ ath_reg_apply_beaconing_flags(struct wiphy *wiphy, struct ath_regulatory *reg, enum nl80211_reg_initiator initiator) { - enum ieee80211_band band; + enum nl80211_band band; struct ieee80211_supported_band *sband; struct ieee80211_channel *ch; unsigned int i; - for (band = 0; band < IEEE80211_NUM_BANDS; band++) { + for (band = 0; band < NUM_NL80211_BANDS; band++) { if (!wiphy->bands[band]) continue; sband = wiphy->bands[band]; @@ -374,7 +374,7 @@ ath_reg_apply_ir_flags(struct wiphy *wiphy, { struct ieee80211_supported_band *sband; - sband = wiphy->bands[IEEE80211_BAND_2GHZ]; + sband = wiphy->bands[NL80211_BAND_2GHZ]; if (!sband) return; @@ -402,10 +402,10 @@ static void ath_reg_apply_radar_flags(struct wiphy *wiphy) struct ieee80211_channel *ch; unsigned int i; - if (!wiphy->bands[IEEE80211_BAND_5GHZ]) + if (!wiphy->bands[NL80211_BAND_5GHZ]) return; - sband = wiphy->bands[IEEE80211_BAND_5GHZ]; + sband = wiphy->bands[NL80211_BAND_5GHZ]; for (i = 0; i < sband->n_channels; i++) { ch = &sband->channels[i]; @@ -772,7 +772,7 @@ ath_regd_init(struct ath_regulatory *reg, EXPORT_SYMBOL(ath_regd_init); u32 ath_regd_get_band_ctl(struct ath_regulatory *reg, - enum ieee80211_band band) + enum nl80211_band band) { if (!reg->regpair || (reg->country_code == CTRY_DEFAULT && @@ -794,9 +794,9 @@ u32 ath_regd_get_band_ctl(struct ath_regulatory *reg, } switch (band) { - case IEEE80211_BAND_2GHZ: + case NL80211_BAND_2GHZ: return reg->regpair->reg_2ghz_ctl; - case IEEE80211_BAND_5GHZ: + case NL80211_BAND_5GHZ: return reg->regpair->reg_5ghz_ctl; default: return NO_CTL; diff --git a/drivers/net/wireless/ath/regd.h b/drivers/net/wireless/ath/regd.h index 37f53bd8fcb1..565d3075f06e 100644 --- a/drivers/net/wireless/ath/regd.h +++ b/drivers/net/wireless/ath/regd.h @@ -255,7 +255,7 @@ int ath_regd_init(struct ath_regulatory *reg, struct wiphy *wiphy, void (*reg_notifier)(struct wiphy *wiphy, struct regulatory_request *request)); u32 ath_regd_get_band_ctl(struct ath_regulatory *reg, - enum ieee80211_band band); + enum nl80211_band band); void ath_reg_notifier_apply(struct wiphy *wiphy, struct regulatory_request *request, struct ath_regulatory *reg); diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c index a27279c2c695..9a1db3bbec4e 100644 --- a/drivers/net/wireless/ath/wcn36xx/main.c +++ b/drivers/net/wireless/ath/wcn36xx/main.c @@ -26,14 +26,14 @@ module_param_named(debug_mask, wcn36xx_dbg_mask, uint, 0644); MODULE_PARM_DESC(debug_mask, "Debugging mask"); #define CHAN2G(_freq, _idx) { \ - .band = IEEE80211_BAND_2GHZ, \ + .band = NL80211_BAND_2GHZ, \ .center_freq = (_freq), \ .hw_value = (_idx), \ .max_power = 25, \ } #define CHAN5G(_freq, _idx) { \ - .band = IEEE80211_BAND_5GHZ, \ + .band = NL80211_BAND_5GHZ, \ .center_freq = (_freq), \ .hw_value = (_idx), \ .max_power = 25, \ @@ -516,7 +516,7 @@ static void wcn36xx_sw_scan_complete(struct ieee80211_hw *hw, } static void wcn36xx_update_allowed_rates(struct ieee80211_sta *sta, - enum ieee80211_band band) + enum nl80211_band band) { int i, size; u16 *rates_table; @@ -529,7 +529,7 @@ static void wcn36xx_update_allowed_rates(struct ieee80211_sta *sta, size = ARRAY_SIZE(sta_priv->supported_rates.dsss_rates); rates_table = sta_priv->supported_rates.dsss_rates; - if (band == IEEE80211_BAND_2GHZ) { + if (band == NL80211_BAND_2GHZ) { for (i = 0; i < size; i++) { if (rates & 0x01) { rates_table[i] = wcn_2ghz_rates[i].hw_value; @@ -958,8 +958,8 @@ static int wcn36xx_init_ieee80211(struct wcn36xx *wcn) BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_MESH_POINT); - wcn->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &wcn_band_2ghz; - wcn->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &wcn_band_5ghz; + wcn->hw->wiphy->bands[NL80211_BAND_2GHZ] = &wcn_band_2ghz; + wcn->hw->wiphy->bands[NL80211_BAND_5GHZ] = &wcn_band_5ghz; wcn->hw->wiphy->cipher_suites = cipher_suites; wcn->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites); diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c index 74f56a81ad9a..96992a2c4b42 100644 --- a/drivers/net/wireless/ath/wcn36xx/smd.c +++ b/drivers/net/wireless/ath/wcn36xx/smd.c @@ -104,11 +104,11 @@ static void wcn36xx_smd_set_bss_nw_type(struct wcn36xx *wcn, struct ieee80211_sta *sta, struct wcn36xx_hal_config_bss_params *bss_params) { - if (IEEE80211_BAND_5GHZ == WCN36XX_BAND(wcn)) + if (NL80211_BAND_5GHZ == WCN36XX_BAND(wcn)) bss_params->nw_type = WCN36XX_HAL_11A_NW_TYPE; else if (sta && sta->ht_cap.ht_supported) bss_params->nw_type = WCN36XX_HAL_11N_NW_TYPE; - else if (sta && (sta->supp_rates[IEEE80211_BAND_2GHZ] & 0x7f)) + else if (sta && (sta->supp_rates[NL80211_BAND_2GHZ] & 0x7f)) bss_params->nw_type = WCN36XX_HAL_11G_NW_TYPE; else bss_params->nw_type = WCN36XX_HAL_11B_NW_TYPE; diff --git a/drivers/net/wireless/ath/wcn36xx/txrx.c b/drivers/net/wireless/ath/wcn36xx/txrx.c index 99c21aac68bd..6c47a7336c38 100644 --- a/drivers/net/wireless/ath/wcn36xx/txrx.c +++ b/drivers/net/wireless/ath/wcn36xx/txrx.c @@ -225,7 +225,7 @@ static void wcn36xx_set_tx_mgmt(struct wcn36xx_tx_bd *bd, /* default rate for unicast */ if (ieee80211_is_mgmt(hdr->frame_control)) - bd->bd_rate = (WCN36XX_BAND(wcn) == IEEE80211_BAND_5GHZ) ? + bd->bd_rate = (WCN36XX_BAND(wcn) == NL80211_BAND_5GHZ) ? WCN36XX_BD_RATE_CTRL : WCN36XX_BD_RATE_MGMT; else if (ieee80211_is_ctl(hdr->frame_control)) diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c index 12cae3c005fb..0fb3a7941d84 100644 --- a/drivers/net/wireless/ath/wil6210/cfg80211.c +++ b/drivers/net/wireless/ath/wil6210/cfg80211.c @@ -21,7 +21,7 @@ #define WIL_MAX_ROC_DURATION_MS 5000 #define CHAN60G(_channel, _flags) { \ - .band = IEEE80211_BAND_60GHZ, \ + .band = NL80211_BAND_60GHZ, \ .center_freq = 56160 + (2160 * (_channel)), \ .hw_value = (_channel), \ .flags = (_flags), \ @@ -1411,7 +1411,7 @@ static void wil_wiphy_init(struct wiphy *wiphy) NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 | NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P; - wiphy->bands[IEEE80211_BAND_60GHZ] = &wil_band_60ghz; + wiphy->bands[NL80211_BAND_60GHZ] = &wil_band_60ghz; /* TODO: figure this out */ wiphy->signal_type = CFG80211_SIGNAL_TYPE_UNSPEC; diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c index 3bc0e2634db0..098409753d5b 100644 --- a/drivers/net/wireless/ath/wil6210/netdev.c +++ b/drivers/net/wireless/ath/wil6210/netdev.c @@ -157,7 +157,7 @@ void *wil_if_alloc(struct device *dev) wdev->iftype = NL80211_IFTYPE_STATION; /* TODO */ /* default monitor channel */ - ch = wdev->wiphy->bands[IEEE80211_BAND_60GHZ]->channels; + ch = wdev->wiphy->bands[NL80211_BAND_60GHZ]->channels; cfg80211_chandef_create(&wdev->preset_chandef, ch, NL80211_CHAN_NO_HT); ndev = alloc_netdev(0, "wlan%d", NET_NAME_UNKNOWN, wil_dev_setup); diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c index 3cc4462aec1a..6ca28c3eff0a 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.c +++ b/drivers/net/wireless/ath/wil6210/wmi.c @@ -333,7 +333,7 @@ static void wmi_evt_rx_mgmt(struct wil6210_priv *wil, int id, void *d, int len) } ch_no = data->info.channel + 1; - freq = ieee80211_channel_to_frequency(ch_no, IEEE80211_BAND_60GHZ); + freq = ieee80211_channel_to_frequency(ch_no, NL80211_BAND_60GHZ); channel = ieee80211_get_channel(wiphy, freq); signal = data->info.sqi; d_status = le16_to_cpu(data->info.status); diff --git a/drivers/net/wireless/atmel/at76c50x-usb.c b/drivers/net/wireless/atmel/at76c50x-usb.c index 1efb1d66e0b7..7c108047fb46 100644 --- a/drivers/net/wireless/atmel/at76c50x-usb.c +++ b/drivers/net/wireless/atmel/at76c50x-usb.c @@ -1547,7 +1547,7 @@ static inline int at76_guess_freq(struct at76_priv *priv) channel = el[2]; exit: - return ieee80211_channel_to_frequency(channel, IEEE80211_BAND_2GHZ); + return ieee80211_channel_to_frequency(channel, NL80211_BAND_2GHZ); } static void at76_rx_tasklet(unsigned long param) @@ -1590,7 +1590,7 @@ static void at76_rx_tasklet(unsigned long param) rx_status.signal = buf->rssi; rx_status.flag |= RX_FLAG_DECRYPTED; rx_status.flag |= RX_FLAG_IV_STRIPPED; - rx_status.band = IEEE80211_BAND_2GHZ; + rx_status.band = NL80211_BAND_2GHZ; rx_status.freq = at76_guess_freq(priv); at76_dbg(DBG_MAC80211, "calling ieee80211_rx_irqsafe(): %d/%d", @@ -2359,7 +2359,7 @@ static int at76_init_new_device(struct at76_priv *priv, priv->hw->wiphy->max_scan_ssids = 1; priv->hw->wiphy->max_scan_ie_len = 0; priv->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION); - priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &at76_supported_band; + priv->hw->wiphy->bands[NL80211_BAND_2GHZ] = &at76_supported_band; ieee80211_hw_set(priv->hw, RX_INCLUDES_FCS); ieee80211_hw_set(priv->hw, SIGNAL_UNSPEC); priv->hw->max_signal = 100; diff --git a/drivers/net/wireless/atmel/atmel.c b/drivers/net/wireless/atmel/atmel.c index 6a1f03c271c1..8f8f37f3a00c 100644 --- a/drivers/net/wireless/atmel/atmel.c +++ b/drivers/net/wireless/atmel/atmel.c @@ -2434,7 +2434,7 @@ static int atmel_get_range(struct net_device *dev, /* Values in MHz -> * 10^5 * 10 */ range->freq[k].m = 100000 * - ieee80211_channel_to_frequency(i, IEEE80211_BAND_2GHZ); + ieee80211_channel_to_frequency(i, NL80211_BAND_2GHZ); range->freq[k++].e = 1; } range->num_frequency = k; diff --git a/drivers/net/wireless/broadcom/b43/b43.h b/drivers/net/wireless/broadcom/b43/b43.h index 036552439816..d7d42f0b80c3 100644 --- a/drivers/net/wireless/broadcom/b43/b43.h +++ b/drivers/net/wireless/broadcom/b43/b43.h @@ -992,9 +992,9 @@ static inline int b43_is_mode(struct b43_wl *wl, int type) /** * b43_current_band - Returns the currently used band. - * Returns one of IEEE80211_BAND_2GHZ and IEEE80211_BAND_5GHZ. + * Returns one of NL80211_BAND_2GHZ and NL80211_BAND_5GHZ. */ -static inline enum ieee80211_band b43_current_band(struct b43_wl *wl) +static inline enum nl80211_band b43_current_band(struct b43_wl *wl) { return wl->hw->conf.chandef.chan->band; } diff --git a/drivers/net/wireless/broadcom/b43/main.c b/drivers/net/wireless/broadcom/b43/main.c index b0603e796ad8..4ee5c5853f9f 100644 --- a/drivers/net/wireless/broadcom/b43/main.c +++ b/drivers/net/wireless/broadcom/b43/main.c @@ -187,7 +187,7 @@ static struct ieee80211_rate __b43_ratetable[] = { #define b43_g_ratetable_size 12 #define CHAN2G(_channel, _freq, _flags) { \ - .band = IEEE80211_BAND_2GHZ, \ + .band = NL80211_BAND_2GHZ, \ .center_freq = (_freq), \ .hw_value = (_channel), \ .flags = (_flags), \ @@ -216,7 +216,7 @@ static struct ieee80211_channel b43_2ghz_chantable[] = { #undef CHAN2G #define CHAN4G(_channel, _flags) { \ - .band = IEEE80211_BAND_5GHZ, \ + .band = NL80211_BAND_5GHZ, \ .center_freq = 4000 + (5 * (_channel)), \ .hw_value = (_channel), \ .flags = (_flags), \ @@ -224,7 +224,7 @@ static struct ieee80211_channel b43_2ghz_chantable[] = { .max_power = 30, \ } #define CHAN5G(_channel, _flags) { \ - .band = IEEE80211_BAND_5GHZ, \ + .band = NL80211_BAND_5GHZ, \ .center_freq = 5000 + (5 * (_channel)), \ .hw_value = (_channel), \ .flags = (_flags), \ @@ -323,7 +323,7 @@ static struct ieee80211_channel b43_5ghz_aphy_chantable[] = { #undef CHAN5G static struct ieee80211_supported_band b43_band_5GHz_nphy = { - .band = IEEE80211_BAND_5GHZ, + .band = NL80211_BAND_5GHZ, .channels = b43_5ghz_nphy_chantable, .n_channels = ARRAY_SIZE(b43_5ghz_nphy_chantable), .bitrates = b43_a_ratetable, @@ -331,7 +331,7 @@ static struct ieee80211_supported_band b43_band_5GHz_nphy = { }; static struct ieee80211_supported_band b43_band_5GHz_nphy_limited = { - .band = IEEE80211_BAND_5GHZ, + .band = NL80211_BAND_5GHZ, .channels = b43_5ghz_nphy_chantable_limited, .n_channels = ARRAY_SIZE(b43_5ghz_nphy_chantable_limited), .bitrates = b43_a_ratetable, @@ -339,7 +339,7 @@ static struct ieee80211_supported_band b43_band_5GHz_nphy_limited = { }; static struct ieee80211_supported_band b43_band_5GHz_aphy = { - .band = IEEE80211_BAND_5GHZ, + .band = NL80211_BAND_5GHZ, .channels = b43_5ghz_aphy_chantable, .n_channels = ARRAY_SIZE(b43_5ghz_aphy_chantable), .bitrates = b43_a_ratetable, @@ -347,7 +347,7 @@ static struct ieee80211_supported_band b43_band_5GHz_aphy = { }; static struct ieee80211_supported_band b43_band_2GHz = { - .band = IEEE80211_BAND_2GHZ, + .band = NL80211_BAND_2GHZ, .channels = b43_2ghz_chantable, .n_channels = ARRAY_SIZE(b43_2ghz_chantable), .bitrates = b43_g_ratetable, @@ -355,7 +355,7 @@ static struct ieee80211_supported_band b43_band_2GHz = { }; static struct ieee80211_supported_band b43_band_2ghz_limited = { - .band = IEEE80211_BAND_2GHZ, + .band = NL80211_BAND_2GHZ, .channels = b43_2ghz_chantable, .n_channels = b43_2ghz_chantable_limited_size, .bitrates = b43_g_ratetable, @@ -717,7 +717,7 @@ static void b43_set_slot_time(struct b43_wldev *dev, u16 slot_time) { /* slot_time is in usec. */ /* This test used to exit for all but a G PHY. */ - if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) + if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ) return; b43_write16(dev, B43_MMIO_IFSSLOT, 510 + slot_time); /* Shared memory location 0x0010 is the slot time and should be @@ -3880,12 +3880,12 @@ static void b43_op_set_tsf(struct ieee80211_hw *hw, mutex_unlock(&wl->mutex); } -static const char *band_to_string(enum ieee80211_band band) +static const char *band_to_string(enum nl80211_band band) { switch (band) { - case IEEE80211_BAND_5GHZ: + case NL80211_BAND_5GHZ: return "5"; - case IEEE80211_BAND_2GHZ: + case NL80211_BAND_2GHZ: return "2.4"; default: break; @@ -3903,10 +3903,10 @@ static int b43_switch_band(struct b43_wldev *dev, u32 tmp; switch (chan->band) { - case IEEE80211_BAND_5GHZ: + case NL80211_BAND_5GHZ: gmode = false; break; - case IEEE80211_BAND_2GHZ: + case NL80211_BAND_2GHZ: gmode = true; break; default: @@ -5294,16 +5294,16 @@ static int b43_setup_bands(struct b43_wldev *dev, phy->radio_rev == 9; if (have_2ghz_phy) - hw->wiphy->bands[IEEE80211_BAND_2GHZ] = limited_2g ? + hw->wiphy->bands[NL80211_BAND_2GHZ] = limited_2g ? &b43_band_2ghz_limited : &b43_band_2GHz; if (dev->phy.type == B43_PHYTYPE_N) { if (have_5ghz_phy) - hw->wiphy->bands[IEEE80211_BAND_5GHZ] = limited_5g ? + hw->wiphy->bands[NL80211_BAND_5GHZ] = limited_5g ? &b43_band_5GHz_nphy_limited : &b43_band_5GHz_nphy; } else { if (have_5ghz_phy) - hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &b43_band_5GHz_aphy; + hw->wiphy->bands[NL80211_BAND_5GHZ] = &b43_band_5GHz_aphy; } dev->phy.supports_2ghz = have_2ghz_phy; diff --git a/drivers/net/wireless/broadcom/b43/phy_ac.c b/drivers/net/wireless/broadcom/b43/phy_ac.c index e75633d67938..52f8abad8831 100644 --- a/drivers/net/wireless/broadcom/b43/phy_ac.c +++ b/drivers/net/wireless/broadcom/b43/phy_ac.c @@ -61,7 +61,7 @@ static void b43_phy_ac_op_radio_write(struct b43_wldev *dev, u16 reg, static unsigned int b43_phy_ac_op_get_default_chan(struct b43_wldev *dev) { - if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) + if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) return 11; return 36; } diff --git a/drivers/net/wireless/broadcom/b43/phy_common.c b/drivers/net/wireless/broadcom/b43/phy_common.c index ec2b9c577b90..85f2ca989565 100644 --- a/drivers/net/wireless/broadcom/b43/phy_common.c +++ b/drivers/net/wireless/broadcom/b43/phy_common.c @@ -436,7 +436,7 @@ int b43_switch_channel(struct b43_wldev *dev, unsigned int new_channel) * firmware from sending ghost packets. */ channelcookie = new_channel; - if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) + if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ) channelcookie |= B43_SHM_SH_CHAN_5GHZ; /* FIXME: set 40Mhz flag if required */ if (0) diff --git a/drivers/net/wireless/broadcom/b43/phy_ht.c b/drivers/net/wireless/broadcom/b43/phy_ht.c index bd68945965d6..718c90e81696 100644 --- a/drivers/net/wireless/broadcom/b43/phy_ht.c +++ b/drivers/net/wireless/broadcom/b43/phy_ht.c @@ -568,7 +568,7 @@ static void b43_phy_ht_tx_power_ctl(struct b43_wldev *dev, bool enable) } else { b43_phy_set(dev, B43_PHY_HT_TXPCTL_CMD_C1, en_bits); - if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) { + if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ) { for (i = 0; i < 3; i++) b43_phy_write(dev, cmd_regs[i], 0x32); } @@ -643,7 +643,7 @@ static void b43_phy_ht_tx_power_ctl_setup(struct b43_wldev *dev) u16 freq = dev->phy.chandef->chan->center_freq; int i, c; - if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { + if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) { for (c = 0; c < 3; c++) { target[c] = sprom->core_pwr_info[c].maxpwr_2g; a1[c] = sprom->core_pwr_info[c].pa_2g[0]; @@ -777,7 +777,7 @@ static void b43_phy_ht_channel_setup(struct b43_wldev *dev, const struct b43_phy_ht_channeltab_e_phy *e, struct ieee80211_channel *new_channel) { - if (new_channel->band == IEEE80211_BAND_5GHZ) { + if (new_channel->band == NL80211_BAND_5GHZ) { /* Switch to 2 GHz for a moment to access B-PHY regs */ b43_phy_mask(dev, B43_PHY_HT_BANDCTL, ~B43_PHY_HT_BANDCTL_5GHZ); @@ -805,7 +805,7 @@ static void b43_phy_ht_channel_setup(struct b43_wldev *dev, } else { b43_phy_ht_classifier(dev, B43_PHY_HT_CLASS_CTL_OFDM_EN, B43_PHY_HT_CLASS_CTL_OFDM_EN); - if (new_channel->band == IEEE80211_BAND_2GHZ) + if (new_channel->band == NL80211_BAND_2GHZ) b43_phy_mask(dev, B43_PHY_HT_TEST, ~0x840); } @@ -916,7 +916,7 @@ static int b43_phy_ht_op_init(struct b43_wldev *dev) if (0) /* TODO: condition */ ; /* TODO: PHY op on reg 0x217 */ - if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) + if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ) b43_phy_ht_classifier(dev, B43_PHY_HT_CLASS_CTL_CCK_EN, 0); else b43_phy_ht_classifier(dev, B43_PHY_HT_CLASS_CTL_CCK_EN, @@ -1005,7 +1005,7 @@ static int b43_phy_ht_op_init(struct b43_wldev *dev) b43_phy_ht_classifier(dev, 0, 0); b43_phy_ht_read_clip_detection(dev, clip_state); - if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) + if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) b43_phy_ht_bphy_init(dev); b43_httab_write_bulk(dev, B43_HTTAB32(0x1a, 0xc0), @@ -1077,7 +1077,7 @@ static int b43_phy_ht_op_switch_channel(struct b43_wldev *dev, enum nl80211_channel_type channel_type = cfg80211_get_chandef_type(&dev->wl->hw->conf.chandef); - if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { + if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) { if ((new_channel < 1) || (new_channel > 14)) return -EINVAL; } else { @@ -1089,7 +1089,7 @@ static int b43_phy_ht_op_switch_channel(struct b43_wldev *dev, static unsigned int b43_phy_ht_op_get_default_chan(struct b43_wldev *dev) { - if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) + if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) return 11; return 36; } diff --git a/drivers/net/wireless/broadcom/b43/phy_lcn.c b/drivers/net/wireless/broadcom/b43/phy_lcn.c index 97461ccf3e1e..63bd29f070f7 100644 --- a/drivers/net/wireless/broadcom/b43/phy_lcn.c +++ b/drivers/net/wireless/broadcom/b43/phy_lcn.c @@ -108,7 +108,7 @@ static void b43_radio_2064_channel_setup(struct b43_wldev *dev) /* wlc_radio_2064_init */ static void b43_radio_2064_init(struct b43_wldev *dev) { - if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { + if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) { b43_radio_write(dev, 0x09c, 0x0020); b43_radio_write(dev, 0x105, 0x0008); } else { @@ -535,7 +535,7 @@ static void b43_phy_lcn_tx_pwr_ctl_init(struct b43_wldev *dev) b43_mac_suspend(dev); if (!dev->phy.lcn->hw_pwr_ctl_capable) { - if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { + if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) { tx_gains.gm_gain = 4; tx_gains.pga_gain = 12; tx_gains.pad_gain = 12; @@ -720,7 +720,7 @@ static int b43_phy_lcn_op_init(struct b43_wldev *dev) else B43_WARN_ON(1); - if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) + if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) b43_phy_lcn_tx_pwr_ctl_init(dev); b43_switch_channel(dev, dev->phy.channel); @@ -779,7 +779,7 @@ static int b43_phy_lcn_op_switch_channel(struct b43_wldev *dev, enum nl80211_channel_type channel_type = cfg80211_get_chandef_type(&dev->wl->hw->conf.chandef); - if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { + if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) { if ((new_channel < 1) || (new_channel > 14)) return -EINVAL; } else { @@ -791,7 +791,7 @@ static int b43_phy_lcn_op_switch_channel(struct b43_wldev *dev, static unsigned int b43_phy_lcn_op_get_default_chan(struct b43_wldev *dev) { - if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) + if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) return 1; return 36; } diff --git a/drivers/net/wireless/broadcom/b43/phy_lp.c b/drivers/net/wireless/broadcom/b43/phy_lp.c index 058a9f232050..6922cbb99a04 100644 --- a/drivers/net/wireless/broadcom/b43/phy_lp.c +++ b/drivers/net/wireless/broadcom/b43/phy_lp.c @@ -46,7 +46,7 @@ static inline u16 channel2freq_lp(u8 channel) static unsigned int b43_lpphy_op_get_default_chan(struct b43_wldev *dev) { - if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) + if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) return 1; return 36; } @@ -91,7 +91,7 @@ static void lpphy_read_band_sprom(struct b43_wldev *dev) u32 ofdmpo; int i; - if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { + if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) { lpphy->tx_isolation_med_band = sprom->tri2g; lpphy->bx_arch = sprom->bxa2g; lpphy->rx_pwr_offset = sprom->rxpo2g; @@ -174,7 +174,7 @@ static void lpphy_adjust_gain_table(struct b43_wldev *dev, u32 freq) B43_WARN_ON(dev->phy.rev >= 2); - if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) + if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) isolation = lpphy->tx_isolation_med_band; else if (freq <= 5320) isolation = lpphy->tx_isolation_low_band; @@ -238,7 +238,7 @@ static void lpphy_baseband_rev0_1_init(struct b43_wldev *dev) b43_phy_maskset(dev, B43_LPPHY_INPUT_PWRDB, 0xFF00, lpphy->rx_pwr_offset); if ((sprom->boardflags_lo & B43_BFL_FEM) && - ((b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) || + ((b43_current_band(dev->wl) == NL80211_BAND_5GHZ) || (sprom->boardflags_hi & B43_BFH_PAREF))) { ssb_pmu_set_ldo_voltage(&bus->chipco, LDO_PAREF, 0x28); ssb_pmu_set_ldo_paref(&bus->chipco, true); @@ -280,7 +280,7 @@ static void lpphy_baseband_rev0_1_init(struct b43_wldev *dev) b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_7, 0xC0FF, 0x0900); b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_8, 0xFFC0, 0x000A); b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_8, 0xC0FF, 0x0B00); - } else if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ || + } else if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ || (dev->dev->board_type == SSB_BOARD_BU4312) || (dev->phy.rev == 0 && (sprom->boardflags_lo & B43_BFL_FEM))) { b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_1, 0xFFC0, 0x0001); @@ -326,7 +326,7 @@ static void lpphy_baseband_rev0_1_init(struct b43_wldev *dev) //FIXME the Broadcom driver caches & delays this HF write! b43_hf_write(dev, b43_hf_read(dev) | B43_HF_PR45960W); } - if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { + if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) { b43_phy_set(dev, B43_LPPHY_LP_PHY_CTL, 0x8000); b43_phy_set(dev, B43_LPPHY_CRSGAIN_CTL, 0x0040); b43_phy_maskset(dev, B43_LPPHY_MINPWR_LEVEL, 0x00FF, 0xA400); @@ -466,7 +466,7 @@ static void lpphy_baseband_rev2plus_init(struct b43_wldev *dev) b43_lptab_write(dev, B43_LPTAB16(0x08, 0x12), 0x40); } - if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { + if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) { b43_phy_set(dev, B43_LPPHY_CRSGAIN_CTL, 0x40); b43_phy_maskset(dev, B43_LPPHY_CRSGAIN_CTL, 0xF0FF, 0xB00); b43_phy_maskset(dev, B43_LPPHY_SYNCPEAKCNT, 0xFFF8, 0x6); @@ -547,7 +547,7 @@ static void lpphy_2062_init(struct b43_wldev *dev) b43_radio_write(dev, B2062_S_BG_CTL1, (b43_radio_read(dev, B2062_N_COMM2) >> 1) | 0x80); } - if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) + if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) b43_radio_set(dev, B2062_N_TSSI_CTL0, 0x1); else b43_radio_mask(dev, B2062_N_TSSI_CTL0, ~0x1); @@ -746,7 +746,7 @@ static void lpphy_clear_deaf(struct b43_wldev *dev, bool user) lpphy->crs_sys_disable = false; if (!lpphy->crs_usr_disable && !lpphy->crs_sys_disable) { - if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) + if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) b43_phy_maskset(dev, B43_LPPHY_CRSGAIN_CTL, 0xFF1F, 0x60); else @@ -807,7 +807,7 @@ static void lpphy_disable_rx_gain_override(struct b43_wldev *dev) b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_0, 0xFFBF); if (dev->phy.rev >= 2) { b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_2, 0xFEFF); - if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { + if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) { b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_2, 0xFBFF); b43_phy_mask(dev, B43_PHY_OFDM(0xE5), 0xFFF7); } @@ -823,7 +823,7 @@ static void lpphy_enable_rx_gain_override(struct b43_wldev *dev) b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x40); if (dev->phy.rev >= 2) { b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_2, 0x100); - if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { + if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) { b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_2, 0x400); b43_phy_set(dev, B43_PHY_OFDM(0xE5), 0x8); } @@ -951,7 +951,7 @@ static void lpphy_rev2plus_set_rx_gain(struct b43_wldev *dev, u32 gain) 0xFBFF, ext_lna << 10); b43_phy_write(dev, B43_LPPHY_RX_GAIN_CTL_OVERRIDE_VAL, low_gain); b43_phy_maskset(dev, B43_LPPHY_AFE_DDFS, 0xFFF0, high_gain); - if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { + if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) { tmp = (gain >> 2) & 0x3; b43_phy_maskset(dev, B43_LPPHY_RF_OVERRIDE_2_VAL, 0xE7FF, tmp<<11); @@ -1344,7 +1344,7 @@ static void lpphy_calibrate_rc(struct b43_wldev *dev) if (dev->phy.rev >= 2) { lpphy_rev2plus_rc_calib(dev); } else if (!lpphy->rc_cap) { - if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) + if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) lpphy_rev0_1_rc_calib(dev); } else { lpphy_set_rc_cap(dev); @@ -1548,7 +1548,7 @@ static void lpphy_tx_pctl_init_sw(struct b43_wldev *dev) { struct lpphy_tx_gains gains; - if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { + if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) { gains.gm = 4; gains.pad = 12; gains.pga = 12; @@ -1902,7 +1902,7 @@ static int lpphy_rx_iq_cal(struct b43_wldev *dev, bool noise, bool tx, lpphy_set_trsw_over(dev, tx, rx); - if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { + if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) { b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x8); b43_phy_maskset(dev, B43_LPPHY_RF_OVERRIDE_VAL_0, 0xFFF7, pa << 3); diff --git a/drivers/net/wireless/broadcom/b43/phy_n.c b/drivers/net/wireless/broadcom/b43/phy_n.c index 9f0bcf3b8414..a5557d70689f 100644 --- a/drivers/net/wireless/broadcom/b43/phy_n.c +++ b/drivers/net/wireless/broadcom/b43/phy_n.c @@ -105,9 +105,9 @@ enum n_rail_type { static inline bool b43_nphy_ipa(struct b43_wldev *dev) { - enum ieee80211_band band = b43_current_band(dev->wl); - return ((dev->phy.n->ipa2g_on && band == IEEE80211_BAND_2GHZ) || - (dev->phy.n->ipa5g_on && band == IEEE80211_BAND_5GHZ)); + enum nl80211_band band = b43_current_band(dev->wl); + return ((dev->phy.n->ipa2g_on && band == NL80211_BAND_2GHZ) || + (dev->phy.n->ipa5g_on && band == NL80211_BAND_5GHZ)); } /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxCoreGetState */ @@ -357,7 +357,7 @@ static void b43_nphy_rf_ctl_intc_override_rev7(struct b43_wldev *dev, break; case N_INTC_OVERRIDE_PA: tmp = 0x0030; - if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) + if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ) val = value << 5; else val = value << 4; @@ -365,7 +365,7 @@ static void b43_nphy_rf_ctl_intc_override_rev7(struct b43_wldev *dev, b43_phy_set(dev, reg, 0x1000); break; case N_INTC_OVERRIDE_EXT_LNA_PU: - if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) { + if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ) { tmp = 0x0001; tmp2 = 0x0004; val = value; @@ -378,7 +378,7 @@ static void b43_nphy_rf_ctl_intc_override_rev7(struct b43_wldev *dev, b43_phy_mask(dev, reg, ~tmp2); break; case N_INTC_OVERRIDE_EXT_LNA_GAIN: - if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) { + if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ) { tmp = 0x0002; tmp2 = 0x0008; val = value << 1; @@ -465,7 +465,7 @@ static void b43_nphy_rf_ctl_intc_override(struct b43_wldev *dev, } break; case N_INTC_OVERRIDE_PA: - if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) { + if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ) { tmp = 0x0020; val = value << 5; } else { @@ -475,7 +475,7 @@ static void b43_nphy_rf_ctl_intc_override(struct b43_wldev *dev, b43_phy_maskset(dev, reg, ~tmp, val); break; case N_INTC_OVERRIDE_EXT_LNA_PU: - if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) { + if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ) { tmp = 0x0001; val = value; } else { @@ -485,7 +485,7 @@ static void b43_nphy_rf_ctl_intc_override(struct b43_wldev *dev, b43_phy_maskset(dev, reg, ~tmp, val); break; case N_INTC_OVERRIDE_EXT_LNA_GAIN: - if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) { + if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ) { tmp = 0x0002; val = value << 1; } else { @@ -600,7 +600,7 @@ static void b43_nphy_adjust_lna_gain_table(struct b43_wldev *dev) b43_nphy_stay_in_carrier_search(dev, 1); if (nphy->gain_boost) { - if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { + if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) { gain[0] = 6; gain[1] = 6; } else { @@ -736,7 +736,7 @@ static void b43_radio_2057_setup(struct b43_wldev *dev, switch (phy->radio_rev) { case 0 ... 4: case 6: - if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { + if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) { b43_radio_write(dev, R2057_RFPLL_LOOPFILTER_R1, 0x3f); b43_radio_write(dev, R2057_CP_KPD_IDAC, 0x3f); b43_radio_write(dev, R2057_RFPLL_LOOPFILTER_C1, 0x8); @@ -751,7 +751,7 @@ static void b43_radio_2057_setup(struct b43_wldev *dev, case 9: /* e.g. PHY rev 16 */ b43_radio_write(dev, R2057_LOGEN_PTAT_RESETS, 0x20); b43_radio_write(dev, R2057_VCOBUF_IDACS, 0x18); - if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) { + if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ) { b43_radio_write(dev, R2057_LOGEN_PTAT_RESETS, 0x38); b43_radio_write(dev, R2057_VCOBUF_IDACS, 0x0f); @@ -775,7 +775,7 @@ static void b43_radio_2057_setup(struct b43_wldev *dev, break; } - if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { + if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) { u16 txmix2g_tune_boost_pu = 0; u16 pad2g_tune_pus = 0; @@ -1135,7 +1135,7 @@ static void b43_radio_2056_setup(struct b43_wldev *dev, { struct b43_phy *phy = &dev->phy; struct ssb_sprom *sprom = dev->dev->bus_sprom; - enum ieee80211_band band = b43_current_band(dev->wl); + enum nl80211_band band = b43_current_band(dev->wl); u16 offset; u8 i; u16 bias, cbias; @@ -1152,10 +1152,10 @@ static void b43_radio_2056_setup(struct b43_wldev *dev, dev->dev->chip_pkg == BCMA_PKG_ID_BCM43224_FAB_SMIC); b43_chantab_radio_2056_upload(dev, e); - b2056_upload_syn_pll_cp2(dev, band == IEEE80211_BAND_5GHZ); + b2056_upload_syn_pll_cp2(dev, band == NL80211_BAND_5GHZ); if (sprom->boardflags2_lo & B43_BFL2_GPLL_WAR && - b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { + b43_current_band(dev->wl) == NL80211_BAND_2GHZ) { b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER1, 0x1F); b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER2, 0x1F); if (dev->dev->chip_id == BCMA_CHIP_ID_BCM4716 || @@ -1168,21 +1168,21 @@ static void b43_radio_2056_setup(struct b43_wldev *dev, } } if (sprom->boardflags2_hi & B43_BFH2_GPLL_WAR2 && - b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { + b43_current_band(dev->wl) == NL80211_BAND_2GHZ) { b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER1, 0x1f); b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER2, 0x1f); b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER4, 0x0b); b43_radio_write(dev, B2056_SYN_PLL_CP2, 0x20); } if (sprom->boardflags2_lo & B43_BFL2_APLL_WAR && - b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) { + b43_current_band(dev->wl) == NL80211_BAND_5GHZ) { b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER1, 0x1F); b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER2, 0x1F); b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER4, 0x05); b43_radio_write(dev, B2056_SYN_PLL_CP2, 0x0C); } - if (dev->phy.n->ipa2g_on && band == IEEE80211_BAND_2GHZ) { + if (dev->phy.n->ipa2g_on && band == NL80211_BAND_2GHZ) { for (i = 0; i < 2; i++) { offset = i ? B2056_TX1 : B2056_TX0; if (dev->phy.rev >= 5) { @@ -1244,7 +1244,7 @@ static void b43_radio_2056_setup(struct b43_wldev *dev, } b43_radio_write(dev, offset | B2056_TX_PA_SPARE1, 0xee); } - } else if (dev->phy.n->ipa5g_on && band == IEEE80211_BAND_5GHZ) { + } else if (dev->phy.n->ipa5g_on && band == NL80211_BAND_5GHZ) { u16 freq = phy->chandef->chan->center_freq; if (freq < 5100) { paa_boost = 0xA; @@ -1501,7 +1501,7 @@ static void b43_radio_init2055(struct b43_wldev *dev) /* Follow wl, not specs. Do not force uploading all regs */ b2055_upload_inittab(dev, 0, 0); } else { - bool ghz5 = b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ; + bool ghz5 = b43_current_band(dev->wl) == NL80211_BAND_5GHZ; b2055_upload_inittab(dev, ghz5, 0); } b43_radio_init2055_post(dev); @@ -1785,7 +1785,7 @@ static void b43_nphy_rev3_rssi_select(struct b43_wldev *dev, u8 code, b43_phy_maskset(dev, reg, 0xFFC3, 0); if (rssi_type == N_RSSI_W1) - val = (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) ? 4 : 8; + val = (b43_current_band(dev->wl) == NL80211_BAND_5GHZ) ? 4 : 8; else if (rssi_type == N_RSSI_W2) val = 16; else @@ -1813,12 +1813,12 @@ static void b43_nphy_rev3_rssi_select(struct b43_wldev *dev, u8 code, if (rssi_type != N_RSSI_IQ && rssi_type != N_RSSI_TBD) { - enum ieee80211_band band = + enum nl80211_band band = b43_current_band(dev->wl); if (dev->phy.rev < 7) { if (b43_nphy_ipa(dev)) - val = (band == IEEE80211_BAND_5GHZ) ? 0xC : 0xE; + val = (band == NL80211_BAND_5GHZ) ? 0xC : 0xE; else val = 0x11; reg = (i == 0) ? B2056_TX0 : B2056_TX1; @@ -2120,7 +2120,7 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev) 1, 0, false); b43_nphy_rf_ctl_override_rev7(dev, 0x80, 1, 0, false, 0); b43_nphy_rf_ctl_override_rev7(dev, 0x40, 1, 0, false, 0); - if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) { + if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ) { b43_nphy_rf_ctl_override_rev7(dev, 0x20, 0, 0, false, 0); b43_nphy_rf_ctl_override_rev7(dev, 0x10, 1, 0, false, @@ -2136,7 +2136,7 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev) b43_nphy_rf_ctl_override(dev, 0x2, 1, 0, false); b43_nphy_rf_ctl_override(dev, 0x80, 1, 0, false); b43_nphy_rf_ctl_override(dev, 0x40, 1, 0, false); - if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) { + if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ) { b43_nphy_rf_ctl_override(dev, 0x20, 0, 0, false); b43_nphy_rf_ctl_override(dev, 0x10, 1, 0, false); } else { @@ -2257,7 +2257,7 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev) b43_phy_write(dev, regs_to_store[i], saved_regs_phy[i]); /* Store for future configuration */ - if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { + if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) { rssical_radio_regs = nphy->rssical_cache.rssical_radio_regs_2G; rssical_phy_regs = nphy->rssical_cache.rssical_phy_regs_2G; } else { @@ -2289,7 +2289,7 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev) rssical_phy_regs[11] = b43_phy_read(dev, B43_NPHY_RSSIMC_1Q_RSSI_Y); /* Remember for which channel we store configuration */ - if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) + if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) nphy->rssical_chanspec_2G.center_freq = phy->chandef->chan->center_freq; else nphy->rssical_chanspec_5G.center_freq = phy->chandef->chan->center_freq; @@ -2336,7 +2336,7 @@ static void b43_nphy_rev2_rssi_cal(struct b43_wldev *dev, enum n_rssi_type type) b43_nphy_read_clip_detection(dev, clip_state); b43_nphy_write_clip_detection(dev, clip_off); - if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) + if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ) override = 0x140; else override = 0x110; @@ -2629,7 +2629,7 @@ static void b43_nphy_gain_ctl_workarounds_rev1_2(struct b43_wldev *dev) b43_phy_write(dev, B43_NPHY_CCK_SHIFTB_REF, 0x809C); if (nphy->gain_boost) { - if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ && + if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ && b43_is_40mhz(dev)) code = 4; else @@ -2688,7 +2688,7 @@ static void b43_nphy_gain_ctl_workarounds_rev1_2(struct b43_wldev *dev) ~B43_NPHY_OVER_DGAIN_CCKDGECV & 0xFFFF, 0x5A << B43_NPHY_OVER_DGAIN_CCKDGECV_SHIFT); - if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) + if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) b43_phy_maskset(dev, B43_PHY_N(0xC5D), 0xFF80, 4); } @@ -2803,7 +2803,7 @@ static void b43_nphy_workarounds_rev7plus(struct b43_wldev *dev) scap_val = b43_radio_read(dev, R2057_RCCAL_SCAP_VAL); if (b43_nphy_ipa(dev)) { - bool ghz2 = b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ; + bool ghz2 = b43_current_band(dev->wl) == NL80211_BAND_2GHZ; switch (phy->radio_rev) { case 5: @@ -2831,7 +2831,7 @@ static void b43_nphy_workarounds_rev7plus(struct b43_wldev *dev) bcap_val_11b[core] = bcap_val; lpf_ofdm_20mhz[core] = 4; lpf_11b[core] = 1; - if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { + if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) { scap_val_11n_20[core] = 0xc; bcap_val_11n_20[core] = 0xc; scap_val_11n_40[core] = 0xa; @@ -2982,7 +2982,7 @@ static void b43_nphy_workarounds_rev7plus(struct b43_wldev *dev) conv = 0x7f; filt = 0xee; } - if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { + if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) { for (core = 0; core < 2; core++) { if (core == 0) { b43_radio_write(dev, 0x5F, bias); @@ -2998,7 +2998,7 @@ static void b43_nphy_workarounds_rev7plus(struct b43_wldev *dev) } if (b43_nphy_ipa(dev)) { - if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { + if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) { if (phy->radio_rev == 3 || phy->radio_rev == 4 || phy->radio_rev == 6) { for (core = 0; core < 2; core++) { @@ -3221,7 +3221,7 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev) ARRAY_SIZE(rx2tx_events)); } - tmp16 = (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) ? + tmp16 = (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) ? 0x2 : 0x9C40; b43_phy_write(dev, B43_NPHY_ENDROP_TLEN, tmp16); @@ -3240,7 +3240,7 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev) b43_ntab_write(dev, B43_NTAB16(8, 0), 2); b43_ntab_write(dev, B43_NTAB16(8, 16), 2); - if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) + if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) pdet_range = sprom->fem.ghz2.pdet_range; else pdet_range = sprom->fem.ghz5.pdet_range; @@ -3249,7 +3249,7 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev) switch (pdet_range) { case 3: if (!(dev->phy.rev >= 4 && - b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)) + b43_current_band(dev->wl) == NL80211_BAND_2GHZ)) break; /* FALL THROUGH */ case 0: @@ -3261,7 +3261,7 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev) break; case 2: if (dev->phy.rev >= 6) { - if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) + if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) vmid[3] = 0x94; else vmid[3] = 0x8e; @@ -3277,7 +3277,7 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev) break; case 4: case 5: - if (b43_current_band(dev->wl) != IEEE80211_BAND_2GHZ) { + if (b43_current_band(dev->wl) != NL80211_BAND_2GHZ) { if (pdet_range == 4) { vmid[3] = 0x8e; tmp16 = 0x96; @@ -3322,9 +3322,9 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev) /* N PHY WAR TX Chain Update with hw_phytxchain as argument */ if ((sprom->boardflags2_lo & B43_BFL2_APLL_WAR && - b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) || + b43_current_band(dev->wl) == NL80211_BAND_5GHZ) || (sprom->boardflags2_lo & B43_BFL2_GPLL_WAR && - b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)) + b43_current_band(dev->wl) == NL80211_BAND_2GHZ)) tmp32 = 0x00088888; else tmp32 = 0x88888888; @@ -3333,7 +3333,7 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev) b43_ntab_write(dev, B43_NTAB32(30, 3), tmp32); if (dev->phy.rev == 4 && - b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) { + b43_current_band(dev->wl) == NL80211_BAND_5GHZ) { b43_radio_write(dev, B2056_TX0 | B2056_TX_GMBB_IDAC, 0x70); b43_radio_write(dev, B2056_TX1 | B2056_TX_GMBB_IDAC, @@ -3376,7 +3376,7 @@ static void b43_nphy_workarounds_rev1_2(struct b43_wldev *dev) delays1[5] = 0x14; } - if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ && + if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ && nphy->band5g_pwrgain) { b43_radio_mask(dev, B2055_C1_TX_RF_SPARE, ~0x8); b43_radio_mask(dev, B2055_C2_TX_RF_SPARE, ~0x8); @@ -3451,7 +3451,7 @@ static void b43_nphy_workarounds(struct b43_wldev *dev) struct b43_phy *phy = &dev->phy; struct b43_phy_n *nphy = phy->n; - if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) + if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ) b43_nphy_classifier(dev, 1, 0); else b43_nphy_classifier(dev, 1, 1); @@ -3586,7 +3586,7 @@ static void b43_nphy_iq_cal_gain_params(struct b43_wldev *dev, u16 core, gain = (target.pad[core]) | (target.pga[core] << 4) | (target.txgm[core] << 8); - indx = (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) ? + indx = (b43_current_band(dev->wl) == NL80211_BAND_5GHZ) ? 1 : 0; for (i = 0; i < 9; i++) if (tbl_iqcal_gainparams[indx][i][0] == gain) @@ -3614,7 +3614,7 @@ static void b43_nphy_tx_power_ctrl(struct b43_wldev *dev, bool enable) struct b43_phy_n *nphy = dev->phy.n; u8 i; u16 bmask, val, tmp; - enum ieee80211_band band = b43_current_band(dev->wl); + enum nl80211_band band = b43_current_band(dev->wl); if (nphy->hang_avoid) b43_nphy_stay_in_carrier_search(dev, 1); @@ -3679,7 +3679,7 @@ static void b43_nphy_tx_power_ctrl(struct b43_wldev *dev, bool enable) } b43_phy_maskset(dev, B43_NPHY_TXPCTL_CMD, ~(bmask), val); - if (band == IEEE80211_BAND_5GHZ) { + if (band == NL80211_BAND_5GHZ) { if (phy->rev >= 19) { /* TODO */ } else if (phy->rev >= 7) { @@ -3770,7 +3770,7 @@ static void b43_nphy_tx_power_fix(struct b43_wldev *dev) txpi[0] = 72; txpi[1] = 72; } else { - if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { + if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) { txpi[0] = sprom->txpid2g[0]; txpi[1] = sprom->txpid2g[1]; } else if (freq >= 4900 && freq < 5100) { @@ -3868,7 +3868,7 @@ static void b43_nphy_ipa_internal_tssi_setup(struct b43_wldev *dev) } else if (phy->rev >= 7) { for (core = 0; core < 2; core++) { r = core ? 0x190 : 0x170; - if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { + if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) { b43_radio_write(dev, r + 0x5, 0x5); b43_radio_write(dev, r + 0x9, 0xE); if (phy->rev != 5) @@ -3892,7 +3892,7 @@ static void b43_nphy_ipa_internal_tssi_setup(struct b43_wldev *dev) b43_radio_write(dev, r + 0xC, 0); } } else { - if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) + if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) b43_radio_write(dev, B2056_SYN_RESERVED_ADDR31, 0x128); else b43_radio_write(dev, B2056_SYN_RESERVED_ADDR31, 0x80); @@ -3909,7 +3909,7 @@ static void b43_nphy_ipa_internal_tssi_setup(struct b43_wldev *dev) b43_radio_write(dev, r | B2056_TX_TSSI_MISC1, 8); b43_radio_write(dev, r | B2056_TX_TSSI_MISC2, 0); b43_radio_write(dev, r | B2056_TX_TSSI_MISC3, 0); - if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { + if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) { b43_radio_write(dev, r | B2056_TX_TX_SSI_MASTER, 0x5); if (phy->rev != 5) @@ -4098,7 +4098,7 @@ static void b43_nphy_tx_power_ctl_setup(struct b43_wldev *dev) b0[0] = b0[1] = 5612; b1[0] = b1[1] = -1393; } else { - if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { + if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) { for (c = 0; c < 2; c++) { idle[c] = nphy->pwr_ctl_info[c].idle_tssi_2g; target[c] = sprom->core_pwr_info[c].maxpwr_2g; @@ -4153,11 +4153,11 @@ static void b43_nphy_tx_power_ctl_setup(struct b43_wldev *dev) for (c = 0; c < 2; c++) { r = c ? 0x190 : 0x170; if (b43_nphy_ipa(dev)) - b43_radio_write(dev, r + 0x9, (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) ? 0xE : 0xC); + b43_radio_write(dev, r + 0x9, (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) ? 0xE : 0xC); } } else { if (b43_nphy_ipa(dev)) { - tmp = (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) ? 0xC : 0xE; + tmp = (b43_current_band(dev->wl) == NL80211_BAND_5GHZ) ? 0xC : 0xE; b43_radio_write(dev, B2056_TX0 | B2056_TX_TX_SSI_MUX, tmp); b43_radio_write(dev, @@ -4267,13 +4267,13 @@ static void b43_nphy_tx_gain_table_upload(struct b43_wldev *dev) } else if (phy->rev >= 7) { pga_gain = (table[i] >> 24) & 0xf; pad_gain = (table[i] >> 19) & 0x1f; - if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) + if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) rfpwr_offset = rf_pwr_offset_table[pad_gain]; else rfpwr_offset = rf_pwr_offset_table[pga_gain]; } else { pga_gain = (table[i] >> 24) & 0xF; - if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) + if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) rfpwr_offset = b43_ntab_papd_pga_gain_delta_ipa_2g[pga_gain]; else rfpwr_offset = 0; /* FIXME */ @@ -4288,7 +4288,7 @@ static void b43_nphy_tx_gain_table_upload(struct b43_wldev *dev) static void b43_nphy_pa_override(struct b43_wldev *dev, bool enable) { struct b43_phy_n *nphy = dev->phy.n; - enum ieee80211_band band; + enum nl80211_band band; u16 tmp; if (!enable) { @@ -4300,12 +4300,12 @@ static void b43_nphy_pa_override(struct b43_wldev *dev, bool enable) if (dev->phy.rev >= 7) { tmp = 0x1480; } else if (dev->phy.rev >= 3) { - if (band == IEEE80211_BAND_5GHZ) + if (band == NL80211_BAND_5GHZ) tmp = 0x600; else tmp = 0x480; } else { - if (band == IEEE80211_BAND_5GHZ) + if (band == NL80211_BAND_5GHZ) tmp = 0x180; else tmp = 0x120; @@ -4734,7 +4734,7 @@ static void b43_nphy_restore_rssi_cal(struct b43_wldev *dev) u16 *rssical_radio_regs = NULL; u16 *rssical_phy_regs = NULL; - if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { + if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) { if (!nphy->rssical_chanspec_2G.center_freq) return; rssical_radio_regs = nphy->rssical_cache.rssical_radio_regs_2G; @@ -4804,7 +4804,7 @@ static void b43_nphy_tx_cal_radio_setup_rev7(struct b43_wldev *dev) save[off + 7] = b43_radio_read(dev, r + R2057_TX0_TSSIG); save[off + 8] = b43_radio_read(dev, r + R2057_TX0_TSSI_MISC1); - if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) { + if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ) { b43_radio_write(dev, r + R2057_TX0_TX_SSI_MASTER, 0xA); b43_radio_write(dev, r + R2057_TX0_IQCAL_VCM_HG, 0x43); b43_radio_write(dev, r + R2057_TX0_IQCAL_IDAC, 0x55); @@ -4864,7 +4864,7 @@ static void b43_nphy_tx_cal_radio_setup(struct b43_wldev *dev) save[offset + 9] = b43_radio_read(dev, B2055_XOMISC); save[offset + 10] = b43_radio_read(dev, B2055_PLL_LFC1); - if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) { + if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ) { b43_radio_write(dev, tmp | B2055_CAL_RVARCTL, 0x0A); b43_radio_write(dev, tmp | B2055_CAL_LPOCTL, 0x40); b43_radio_write(dev, tmp | B2055_CAL_TS, 0x55); @@ -5005,7 +5005,7 @@ static void b43_nphy_int_pa_set_tx_dig_filters(struct b43_wldev *dev) b43_nphy_pa_set_tx_dig_filter(dev, 0x186, tbl_tx_filter_coef_rev4[3]); } else { - if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) + if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ) b43_nphy_pa_set_tx_dig_filter(dev, 0x186, tbl_tx_filter_coef_rev4[5]); if (dev->phy.channel == 14) @@ -5185,7 +5185,7 @@ static void b43_nphy_tx_cal_phy_setup(struct b43_wldev *dev) false, 0); } else if (phy->rev == 7) { b43_radio_maskset(dev, R2057_OVR_REG0, 1 << 4, 1 << 4); - if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { + if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) { b43_radio_maskset(dev, R2057_PAD2G_TUNE_PUS_CORE0, ~1, 0); b43_radio_maskset(dev, R2057_PAD2G_TUNE_PUS_CORE1, ~1, 0); } else { @@ -5210,7 +5210,7 @@ static void b43_nphy_tx_cal_phy_setup(struct b43_wldev *dev) b43_ntab_write(dev, B43_NTAB16(8, 18), tmp); regs[5] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC1); regs[6] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC2); - if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) + if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ) tmp = 0x0180; else tmp = 0x0120; @@ -5233,7 +5233,7 @@ static void b43_nphy_save_cal(struct b43_wldev *dev) if (nphy->hang_avoid) b43_nphy_stay_in_carrier_search(dev, 1); - if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { + if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) { rxcal_coeffs = &nphy->cal_cache.rxcal_coeffs_2G; txcal_radio_regs = nphy->cal_cache.txcal_radio_regs_2G; iqcal_chanspec = &nphy->iqcal_chanspec_2G; @@ -5304,7 +5304,7 @@ static void b43_nphy_restore_cal(struct b43_wldev *dev) u16 *txcal_radio_regs = NULL; struct b43_phy_n_iq_comp *rxcal_coeffs = NULL; - if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { + if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) { if (!nphy->iqcal_chanspec_2G.center_freq) return; table = nphy->cal_cache.txcal_coeffs_2G; @@ -5332,7 +5332,7 @@ static void b43_nphy_restore_cal(struct b43_wldev *dev) if (dev->phy.rev < 2) b43_nphy_tx_iq_workaround(dev); - if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { + if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) { txcal_radio_regs = nphy->cal_cache.txcal_radio_regs_2G; rxcal_coeffs = &nphy->cal_cache.rxcal_coeffs_2G; } else { @@ -5422,7 +5422,7 @@ static int b43_nphy_cal_tx_iq_lo(struct b43_wldev *dev, phy6or5x = dev->phy.rev >= 6 || (dev->phy.rev == 5 && nphy->ipa2g_on && - b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ); + b43_current_band(dev->wl) == NL80211_BAND_2GHZ); if (phy6or5x) { if (b43_is_40mhz(dev)) { b43_ntab_write_bulk(dev, B43_NTAB16(15, 0), 18, @@ -5657,7 +5657,7 @@ static int b43_nphy_rev2_cal_rx_iq(struct b43_wldev *dev, u16 tmp[6]; u16 uninitialized_var(cur_hpf1), uninitialized_var(cur_hpf2), cur_lna; u32 real, imag; - enum ieee80211_band band; + enum nl80211_band band; u8 use; u16 cur_hpf; @@ -5712,18 +5712,18 @@ static int b43_nphy_rev2_cal_rx_iq(struct b43_wldev *dev, band = b43_current_band(dev->wl); if (nphy->rxcalparams & 0xFF000000) { - if (band == IEEE80211_BAND_5GHZ) + if (band == NL80211_BAND_5GHZ) b43_phy_write(dev, rfctl[0], 0x140); else b43_phy_write(dev, rfctl[0], 0x110); } else { - if (band == IEEE80211_BAND_5GHZ) + if (band == NL80211_BAND_5GHZ) b43_phy_write(dev, rfctl[0], 0x180); else b43_phy_write(dev, rfctl[0], 0x120); } - if (band == IEEE80211_BAND_5GHZ) + if (band == NL80211_BAND_5GHZ) b43_phy_write(dev, rfctl[1], 0x148); else b43_phy_write(dev, rfctl[1], 0x114); @@ -5919,7 +5919,7 @@ static enum b43_txpwr_result b43_nphy_op_recalc_txpower(struct b43_wldev *dev, #if 0 /* Some extra gains */ hw_gain = 6; /* N-PHY specific */ - if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) + if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) hw_gain += sprom->antenna_gain.a0; else hw_gain += sprom->antenna_gain.a1; @@ -6043,7 +6043,7 @@ static int b43_phy_initn(struct b43_wldev *dev) u8 tx_pwr_state; struct nphy_txgains target; u16 tmp; - enum ieee80211_band tmp2; + enum nl80211_band tmp2; bool do_rssi_cal; u16 clip[2]; @@ -6051,7 +6051,7 @@ static int b43_phy_initn(struct b43_wldev *dev) if ((dev->phy.rev >= 3) && (sprom->boardflags_lo & B43_BFL_EXTLNA) && - (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)) { + (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)) { switch (dev->dev->bus_type) { #ifdef CONFIG_B43_BCMA case B43_BUS_BCMA: @@ -6170,7 +6170,7 @@ static int b43_phy_initn(struct b43_wldev *dev) b43_nphy_classifier(dev, 0, 0); b43_nphy_read_clip_detection(dev, clip); - if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) + if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) b43_nphy_bphy_init(dev); tx_pwr_state = nphy->txpwrctrl; @@ -6187,7 +6187,7 @@ static int b43_phy_initn(struct b43_wldev *dev) do_rssi_cal = false; if (phy->rev >= 3) { - if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) + if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) do_rssi_cal = !nphy->rssical_chanspec_2G.center_freq; else do_rssi_cal = !nphy->rssical_chanspec_5G.center_freq; @@ -6201,7 +6201,7 @@ static int b43_phy_initn(struct b43_wldev *dev) } if (!((nphy->measure_hold & 0x6) != 0)) { - if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) + if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) do_cal = !nphy->iqcal_chanspec_2G.center_freq; else do_cal = !nphy->iqcal_chanspec_5G.center_freq; @@ -6291,7 +6291,7 @@ static void b43_nphy_channel_setup(struct b43_wldev *dev, int ch = new_channel->hw_value; u16 tmp16; - if (new_channel->band == IEEE80211_BAND_5GHZ) { + if (new_channel->band == NL80211_BAND_5GHZ) { /* Switch to 2 GHz for a moment to access B43_PHY_B_BBCFG */ b43_phy_mask(dev, B43_NPHY_BANDCTL, ~B43_NPHY_BANDCTL_5GHZ); @@ -6302,7 +6302,7 @@ static void b43_nphy_channel_setup(struct b43_wldev *dev, B43_PHY_B_BBCFG_RSTCCA | B43_PHY_B_BBCFG_RSTRX); b43_write16(dev, B43_MMIO_PSM_PHY_HDR, tmp16); b43_phy_set(dev, B43_NPHY_BANDCTL, B43_NPHY_BANDCTL_5GHZ); - } else if (new_channel->band == IEEE80211_BAND_2GHZ) { + } else if (new_channel->band == NL80211_BAND_2GHZ) { b43_phy_mask(dev, B43_NPHY_BANDCTL, ~B43_NPHY_BANDCTL_5GHZ); tmp16 = b43_read16(dev, B43_MMIO_PSM_PHY_HDR); b43_write16(dev, B43_MMIO_PSM_PHY_HDR, tmp16 | 4); @@ -6319,7 +6319,7 @@ static void b43_nphy_channel_setup(struct b43_wldev *dev, b43_phy_set(dev, B43_PHY_B_TEST, 0x0800); } else { b43_nphy_classifier(dev, 2, 2); - if (new_channel->band == IEEE80211_BAND_2GHZ) + if (new_channel->band == NL80211_BAND_2GHZ) b43_phy_mask(dev, B43_PHY_B_TEST, ~0x840); } @@ -6449,7 +6449,7 @@ static int b43_nphy_set_channel(struct b43_wldev *dev, &(tabent_r7->phy_regs) : &(tabent_r7_2g->phy_regs); if (phy->radio_rev <= 4 || phy->radio_rev == 6) { - tmp = (channel->band == IEEE80211_BAND_5GHZ) ? 2 : 0; + tmp = (channel->band == NL80211_BAND_5GHZ) ? 2 : 0; b43_radio_maskset(dev, R2057_TIA_CONFIG_CORE0, ~2, tmp); b43_radio_maskset(dev, R2057_TIA_CONFIG_CORE1, ~2, tmp); } @@ -6457,12 +6457,12 @@ static int b43_nphy_set_channel(struct b43_wldev *dev, b43_radio_2057_setup(dev, tabent_r7, tabent_r7_2g); b43_nphy_channel_setup(dev, phy_regs, channel); } else if (phy->rev >= 3) { - tmp = (channel->band == IEEE80211_BAND_5GHZ) ? 4 : 0; + tmp = (channel->band == NL80211_BAND_5GHZ) ? 4 : 0; b43_radio_maskset(dev, 0x08, 0xFFFB, tmp); b43_radio_2056_setup(dev, tabent_r3); b43_nphy_channel_setup(dev, &(tabent_r3->phy_regs), channel); } else { - tmp = (channel->band == IEEE80211_BAND_5GHZ) ? 0x0020 : 0x0050; + tmp = (channel->band == NL80211_BAND_5GHZ) ? 0x0020 : 0x0050; b43_radio_maskset(dev, B2055_MASTER1, 0xFF8F, tmp); b43_radio_2055_setup(dev, tabent_r2); b43_nphy_channel_setup(dev, &(tabent_r2->phy_regs), channel); @@ -6692,7 +6692,7 @@ static int b43_nphy_op_switch_channel(struct b43_wldev *dev, enum nl80211_channel_type channel_type = cfg80211_get_chandef_type(&dev->wl->hw->conf.chandef); - if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { + if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) { if ((new_channel < 1) || (new_channel > 14)) return -EINVAL; } else { @@ -6705,7 +6705,7 @@ static int b43_nphy_op_switch_channel(struct b43_wldev *dev, static unsigned int b43_nphy_op_get_default_chan(struct b43_wldev *dev) { - if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) + if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) return 1; return 36; } diff --git a/drivers/net/wireless/broadcom/b43/tables_lpphy.c b/drivers/net/wireless/broadcom/b43/tables_lpphy.c index cff187c5616d..ce01e1645df7 100644 --- a/drivers/net/wireless/broadcom/b43/tables_lpphy.c +++ b/drivers/net/wireless/broadcom/b43/tables_lpphy.c @@ -560,7 +560,7 @@ void b2062_upload_init_table(struct b43_wldev *dev) for (i = 0; i < ARRAY_SIZE(b2062_init_tab); i++) { e = &b2062_init_tab[i]; - if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { + if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) { if (!(e->flags & B206X_FLAG_G)) continue; b43_radio_write(dev, e->offset, e->value_g); @@ -579,7 +579,7 @@ void b2063_upload_init_table(struct b43_wldev *dev) for (i = 0; i < ARRAY_SIZE(b2063_init_tab); i++) { e = &b2063_init_tab[i]; - if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { + if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) { if (!(e->flags & B206X_FLAG_G)) continue; b43_radio_write(dev, e->offset, e->value_g); @@ -2379,12 +2379,12 @@ static void lpphy_rev2plus_write_gain_table(struct b43_wldev *dev, int offset, tmp |= data.pga << 8; tmp |= data.gm; if (dev->phy.rev >= 3) { - if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) + if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ) tmp |= 0x10 << 24; else tmp |= 0x70 << 24; } else { - if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) + if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ) tmp |= 0x14 << 24; else tmp |= 0x7F << 24; @@ -2423,7 +2423,7 @@ void lpphy_init_tx_gain_table(struct b43_wldev *dev) (sprom->boardflags_lo & B43_BFL_HGPA)) lpphy_write_gain_table_bulk(dev, 0, 128, lpphy_rev0_nopa_tx_gain_table); - else if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) + else if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) lpphy_write_gain_table_bulk(dev, 0, 128, lpphy_rev0_2ghz_tx_gain_table); else @@ -2435,7 +2435,7 @@ void lpphy_init_tx_gain_table(struct b43_wldev *dev) (sprom->boardflags_lo & B43_BFL_HGPA)) lpphy_write_gain_table_bulk(dev, 0, 128, lpphy_rev1_nopa_tx_gain_table); - else if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) + else if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) lpphy_write_gain_table_bulk(dev, 0, 128, lpphy_rev1_2ghz_tx_gain_table); else @@ -2446,7 +2446,7 @@ void lpphy_init_tx_gain_table(struct b43_wldev *dev) if (sprom->boardflags_hi & B43_BFH_NOPA) lpphy_write_gain_table_bulk(dev, 0, 128, lpphy_rev2_nopa_tx_gain_table); - else if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) + else if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) lpphy_write_gain_table_bulk(dev, 0, 128, lpphy_rev2_2ghz_tx_gain_table); else diff --git a/drivers/net/wireless/broadcom/b43/tables_nphy.c b/drivers/net/wireless/broadcom/b43/tables_nphy.c index b2f0d245bcf3..44e0957a70cc 100644 --- a/drivers/net/wireless/broadcom/b43/tables_nphy.c +++ b/drivers/net/wireless/broadcom/b43/tables_nphy.c @@ -3502,7 +3502,7 @@ static void b43_nphy_tables_init_rev7_volatile(struct b43_wldev *dev) { 0x2, 0x18, 0x2 }, /* Core 1 */ }; - if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) + if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ) antswlut = sprom->fem.ghz5.antswlut; else antswlut = sprom->fem.ghz2.antswlut; @@ -3566,7 +3566,7 @@ static void b43_nphy_tables_init_rev3(struct b43_wldev *dev) struct ssb_sprom *sprom = dev->dev->bus_sprom; u8 antswlut; - if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) + if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ) antswlut = sprom->fem.ghz5.antswlut; else antswlut = sprom->fem.ghz2.antswlut; @@ -3651,7 +3651,7 @@ static const u32 *b43_nphy_get_ipa_gain_table(struct b43_wldev *dev) { struct b43_phy *phy = &dev->phy; - if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { + if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) { switch (phy->rev) { case 17: if (phy->radio_rev == 14) @@ -3698,17 +3698,17 @@ static const u32 *b43_nphy_get_ipa_gain_table(struct b43_wldev *dev) const u32 *b43_nphy_get_tx_gain_table(struct b43_wldev *dev) { struct b43_phy *phy = &dev->phy; - enum ieee80211_band band = b43_current_band(dev->wl); + enum nl80211_band band = b43_current_band(dev->wl); struct ssb_sprom *sprom = dev->dev->bus_sprom; if (dev->phy.rev < 3) return b43_ntab_tx_gain_rev0_1_2; /* rev 3+ */ - if ((dev->phy.n->ipa2g_on && band == IEEE80211_BAND_2GHZ) || - (dev->phy.n->ipa5g_on && band == IEEE80211_BAND_5GHZ)) { + if ((dev->phy.n->ipa2g_on && band == NL80211_BAND_2GHZ) || + (dev->phy.n->ipa5g_on && band == NL80211_BAND_5GHZ)) { return b43_nphy_get_ipa_gain_table(dev); - } else if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) { + } else if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ) { switch (phy->rev) { case 6: case 5: @@ -3746,7 +3746,7 @@ const s16 *b43_ntab_get_rf_pwr_offset_table(struct b43_wldev *dev) { struct b43_phy *phy = &dev->phy; - if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { + if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) { switch (phy->rev) { case 17: if (phy->radio_rev == 14) diff --git a/drivers/net/wireless/broadcom/b43/tables_phy_lcn.c b/drivers/net/wireless/broadcom/b43/tables_phy_lcn.c index e347b8d80ea4..704ef1bcb5b1 100644 --- a/drivers/net/wireless/broadcom/b43/tables_phy_lcn.c +++ b/drivers/net/wireless/broadcom/b43/tables_phy_lcn.c @@ -701,7 +701,7 @@ void b43_phy_lcn_tables_init(struct b43_wldev *dev) b43_phy_lcn_upload_static_tables(dev); - if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { + if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) { if (sprom->boardflags_lo & B43_BFL_FEM) b43_phy_lcn_load_tx_gain_tab(dev, b43_lcntab_tx_gain_tbl_2ghz_ext_pa_rev0); diff --git a/drivers/net/wireless/broadcom/b43/xmit.c b/drivers/net/wireless/broadcom/b43/xmit.c index 426dc13c44cd..f6201264de49 100644 --- a/drivers/net/wireless/broadcom/b43/xmit.c +++ b/drivers/net/wireless/broadcom/b43/xmit.c @@ -803,7 +803,7 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr) chanid = (chanstat & B43_RX_CHAN_ID) >> B43_RX_CHAN_ID_SHIFT; switch (chanstat & B43_RX_CHAN_PHYTYPE) { case B43_PHYTYPE_A: - status.band = IEEE80211_BAND_5GHZ; + status.band = NL80211_BAND_5GHZ; B43_WARN_ON(1); /* FIXME: We don't really know which value the "chanid" contains. * So the following assignment might be wrong. */ @@ -811,7 +811,7 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr) ieee80211_channel_to_frequency(chanid, status.band); break; case B43_PHYTYPE_G: - status.band = IEEE80211_BAND_2GHZ; + status.band = NL80211_BAND_2GHZ; /* Somewhere between 478.104 and 508.1084 firmware for G-PHY * has been modified to be compatible with N-PHY and others. */ @@ -826,9 +826,9 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr) /* chanid is the SHM channel cookie. Which is the plain * channel number in b43. */ if (chanstat & B43_RX_CHAN_5GHZ) - status.band = IEEE80211_BAND_5GHZ; + status.band = NL80211_BAND_5GHZ; else - status.band = IEEE80211_BAND_2GHZ; + status.band = NL80211_BAND_2GHZ; status.freq = ieee80211_channel_to_frequency(chanid, status.band); break; diff --git a/drivers/net/wireless/broadcom/b43legacy/main.c b/drivers/net/wireless/broadcom/b43legacy/main.c index afc1fb3e38df..83770d2ea057 100644 --- a/drivers/net/wireless/broadcom/b43legacy/main.c +++ b/drivers/net/wireless/broadcom/b43legacy/main.c @@ -1056,7 +1056,7 @@ static void b43legacy_write_probe_resp_plcp(struct b43legacy_wldev *dev, b43legacy_generate_plcp_hdr(&plcp, size + FCS_LEN, rate->hw_value); dur = ieee80211_generic_frame_duration(dev->wl->hw, dev->wl->vif, - IEEE80211_BAND_2GHZ, + NL80211_BAND_2GHZ, size, rate); /* Write PLCP in two parts and timing for packet transfer */ @@ -1122,7 +1122,7 @@ static const u8 *b43legacy_generate_probe_resp(struct b43legacy_wldev *dev, IEEE80211_STYPE_PROBE_RESP); dur = ieee80211_generic_frame_duration(dev->wl->hw, dev->wl->vif, - IEEE80211_BAND_2GHZ, + NL80211_BAND_2GHZ, *dest_size, rate); hdr->duration_id = dur; @@ -2719,7 +2719,7 @@ static int b43legacy_op_dev_config(struct ieee80211_hw *hw, /* Switch the PHY mode (if necessary). */ switch (conf->chandef.chan->band) { - case IEEE80211_BAND_2GHZ: + case NL80211_BAND_2GHZ: if (phy->type == B43legacy_PHYTYPE_B) new_phymode = B43legacy_PHYMODE_B; else @@ -2792,7 +2792,7 @@ out_unlock_mutex: static void b43legacy_update_basic_rates(struct b43legacy_wldev *dev, u32 brates) { struct ieee80211_supported_band *sband = - dev->wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ]; + dev->wl->hw->wiphy->bands[NL80211_BAND_2GHZ]; struct ieee80211_rate *rate; int i; u16 basic, direct, offset, basic_offset, rateptr; @@ -3630,13 +3630,13 @@ static int b43legacy_setup_modes(struct b43legacy_wldev *dev, phy->possible_phymodes = 0; if (have_bphy) { - hw->wiphy->bands[IEEE80211_BAND_2GHZ] = + hw->wiphy->bands[NL80211_BAND_2GHZ] = &b43legacy_band_2GHz_BPHY; phy->possible_phymodes |= B43legacy_PHYMODE_B; } if (have_gphy) { - hw->wiphy->bands[IEEE80211_BAND_2GHZ] = + hw->wiphy->bands[NL80211_BAND_2GHZ] = &b43legacy_band_2GHz_GPHY; phy->possible_phymodes |= B43legacy_PHYMODE_G; } diff --git a/drivers/net/wireless/broadcom/b43legacy/xmit.c b/drivers/net/wireless/broadcom/b43legacy/xmit.c index 34bf3f0b729f..35ccf400b02c 100644 --- a/drivers/net/wireless/broadcom/b43legacy/xmit.c +++ b/drivers/net/wireless/broadcom/b43legacy/xmit.c @@ -565,7 +565,7 @@ void b43legacy_rx(struct b43legacy_wldev *dev, switch (chanstat & B43legacy_RX_CHAN_PHYTYPE) { case B43legacy_PHYTYPE_B: case B43legacy_PHYTYPE_G: - status.band = IEEE80211_BAND_2GHZ; + status.band = NL80211_BAND_2GHZ; status.freq = chanid + 2400; break; default: diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index d5c2a27573b4..9a567e263bb1 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -144,7 +144,7 @@ static struct ieee80211_rate __wl_rates[] = { #define wl_a_rates_size (wl_g_rates_size - 4) #define CHAN2G(_channel, _freq) { \ - .band = IEEE80211_BAND_2GHZ, \ + .band = NL80211_BAND_2GHZ, \ .center_freq = (_freq), \ .hw_value = (_channel), \ .flags = IEEE80211_CHAN_DISABLED, \ @@ -153,7 +153,7 @@ static struct ieee80211_rate __wl_rates[] = { } #define CHAN5G(_channel) { \ - .band = IEEE80211_BAND_5GHZ, \ + .band = NL80211_BAND_5GHZ, \ .center_freq = 5000 + (5 * (_channel)), \ .hw_value = (_channel), \ .flags = IEEE80211_CHAN_DISABLED, \ @@ -181,13 +181,13 @@ static struct ieee80211_channel __wl_5ghz_channels[] = { * above is added to the band during setup. */ static const struct ieee80211_supported_band __wl_band_2ghz = { - .band = IEEE80211_BAND_2GHZ, + .band = NL80211_BAND_2GHZ, .bitrates = wl_g_rates, .n_bitrates = wl_g_rates_size, }; static const struct ieee80211_supported_band __wl_band_5ghz = { - .band = IEEE80211_BAND_5GHZ, + .band = NL80211_BAND_5GHZ, .bitrates = wl_a_rates, .n_bitrates = wl_a_rates_size, }; @@ -292,13 +292,13 @@ static u16 chandef_to_chanspec(struct brcmu_d11inf *d11inf, WARN_ON_ONCE(1); } switch (ch->chan->band) { - case IEEE80211_BAND_2GHZ: + case NL80211_BAND_2GHZ: ch_inf.band = BRCMU_CHAN_BAND_2G; break; - case IEEE80211_BAND_5GHZ: + case NL80211_BAND_5GHZ: ch_inf.band = BRCMU_CHAN_BAND_5G; break; - case IEEE80211_BAND_60GHZ: + case NL80211_BAND_60GHZ: default: WARN_ON_ONCE(1); } @@ -2679,9 +2679,9 @@ static s32 brcmf_inform_single_bss(struct brcmf_cfg80211_info *cfg, channel = bi->ctl_ch; if (channel <= CH_MAX_2G_CHANNEL) - band = wiphy->bands[IEEE80211_BAND_2GHZ]; + band = wiphy->bands[NL80211_BAND_2GHZ]; else - band = wiphy->bands[IEEE80211_BAND_5GHZ]; + band = wiphy->bands[NL80211_BAND_5GHZ]; freq = ieee80211_channel_to_frequency(channel, band->band); notify_channel = ieee80211_get_channel(wiphy, freq); @@ -2788,9 +2788,9 @@ static s32 brcmf_inform_ibss(struct brcmf_cfg80211_info *cfg, cfg->d11inf.decchspec(&ch); if (ch.band == BRCMU_CHAN_BAND_2G) - band = wiphy->bands[IEEE80211_BAND_2GHZ]; + band = wiphy->bands[NL80211_BAND_2GHZ]; else - band = wiphy->bands[IEEE80211_BAND_5GHZ]; + band = wiphy->bands[NL80211_BAND_5GHZ]; freq = ieee80211_channel_to_frequency(ch.chnum, band->band); cfg->channel = freq; @@ -5215,9 +5215,9 @@ brcmf_bss_roaming_done(struct brcmf_cfg80211_info *cfg, cfg->d11inf.decchspec(&ch); if (ch.band == BRCMU_CHAN_BAND_2G) - band = wiphy->bands[IEEE80211_BAND_2GHZ]; + band = wiphy->bands[NL80211_BAND_2GHZ]; else - band = wiphy->bands[IEEE80211_BAND_5GHZ]; + band = wiphy->bands[NL80211_BAND_5GHZ]; freq = ieee80211_channel_to_frequency(ch.chnum, band->band); notify_channel = ieee80211_get_channel(wiphy, freq); @@ -5707,11 +5707,11 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg, } wiphy = cfg_to_wiphy(cfg); - band = wiphy->bands[IEEE80211_BAND_2GHZ]; + band = wiphy->bands[NL80211_BAND_2GHZ]; if (band) for (i = 0; i < band->n_channels; i++) band->channels[i].flags = IEEE80211_CHAN_DISABLED; - band = wiphy->bands[IEEE80211_BAND_5GHZ]; + band = wiphy->bands[NL80211_BAND_5GHZ]; if (band) for (i = 0; i < band->n_channels; i++) band->channels[i].flags = IEEE80211_CHAN_DISABLED; @@ -5722,9 +5722,9 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg, cfg->d11inf.decchspec(&ch); if (ch.band == BRCMU_CHAN_BAND_2G) { - band = wiphy->bands[IEEE80211_BAND_2GHZ]; + band = wiphy->bands[NL80211_BAND_2GHZ]; } else if (ch.band == BRCMU_CHAN_BAND_5G) { - band = wiphy->bands[IEEE80211_BAND_5GHZ]; + band = wiphy->bands[NL80211_BAND_5GHZ]; } else { brcmf_err("Invalid channel Spec. 0x%x.\n", ch.chspec); continue; @@ -5839,7 +5839,7 @@ static int brcmf_enable_bw40_2g(struct brcmf_cfg80211_info *cfg) return err; } - band = cfg_to_wiphy(cfg)->bands[IEEE80211_BAND_2GHZ]; + band = cfg_to_wiphy(cfg)->bands[NL80211_BAND_2GHZ]; list = (struct brcmf_chanspec_list *)pbuf; num_chan = le32_to_cpu(list->count); for (i = 0; i < num_chan; i++) { @@ -5871,11 +5871,11 @@ static void brcmf_get_bwcap(struct brcmf_if *ifp, u32 bw_cap[]) band = WLC_BAND_2G; err = brcmf_fil_iovar_int_get(ifp, "bw_cap", &band); if (!err) { - bw_cap[IEEE80211_BAND_2GHZ] = band; + bw_cap[NL80211_BAND_2GHZ] = band; band = WLC_BAND_5G; err = brcmf_fil_iovar_int_get(ifp, "bw_cap", &band); if (!err) { - bw_cap[IEEE80211_BAND_5GHZ] = band; + bw_cap[NL80211_BAND_5GHZ] = band; return; } WARN_ON(1); @@ -5890,14 +5890,14 @@ static void brcmf_get_bwcap(struct brcmf_if *ifp, u32 bw_cap[]) switch (mimo_bwcap) { case WLC_N_BW_40ALL: - bw_cap[IEEE80211_BAND_2GHZ] |= WLC_BW_40MHZ_BIT; + bw_cap[NL80211_BAND_2GHZ] |= WLC_BW_40MHZ_BIT; /* fall-thru */ case WLC_N_BW_20IN2G_40IN5G: - bw_cap[IEEE80211_BAND_5GHZ] |= WLC_BW_40MHZ_BIT; + bw_cap[NL80211_BAND_5GHZ] |= WLC_BW_40MHZ_BIT; /* fall-thru */ case WLC_N_BW_20ALL: - bw_cap[IEEE80211_BAND_2GHZ] |= WLC_BW_20MHZ_BIT; - bw_cap[IEEE80211_BAND_5GHZ] |= WLC_BW_20MHZ_BIT; + bw_cap[NL80211_BAND_2GHZ] |= WLC_BW_20MHZ_BIT; + bw_cap[NL80211_BAND_5GHZ] |= WLC_BW_20MHZ_BIT; break; default: brcmf_err("invalid mimo_bw_cap value\n"); @@ -5938,7 +5938,7 @@ static void brcmf_update_vht_cap(struct ieee80211_supported_band *band, __le16 mcs_map; /* not allowed in 2.4G band */ - if (band->band == IEEE80211_BAND_2GHZ) + if (band->band == NL80211_BAND_2GHZ) return; band->vht_cap.vht_supported = true; @@ -5997,8 +5997,8 @@ static int brcmf_setup_wiphybands(struct wiphy *wiphy) brcmf_get_bwcap(ifp, bw_cap); } brcmf_dbg(INFO, "nmode=%d, vhtmode=%d, bw_cap=(%d, %d)\n", - nmode, vhtmode, bw_cap[IEEE80211_BAND_2GHZ], - bw_cap[IEEE80211_BAND_5GHZ]); + nmode, vhtmode, bw_cap[NL80211_BAND_2GHZ], + bw_cap[NL80211_BAND_5GHZ]); err = brcmf_fil_iovar_int_get(ifp, "rxchain", &rxchain); if (err) { @@ -6321,7 +6321,7 @@ static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp) } band->n_channels = ARRAY_SIZE(__wl_2ghz_channels); - wiphy->bands[IEEE80211_BAND_2GHZ] = band; + wiphy->bands[NL80211_BAND_2GHZ] = band; } if (bandlist[i] == cpu_to_le32(WLC_BAND_5G)) { band = kmemdup(&__wl_band_5ghz, sizeof(__wl_band_5ghz), @@ -6338,7 +6338,7 @@ static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp) } band->n_channels = ARRAY_SIZE(__wl_5ghz_channels); - wiphy->bands[IEEE80211_BAND_5GHZ] = band; + wiphy->bands[NL80211_BAND_5GHZ] = band; } } err = brcmf_setup_wiphybands(wiphy); @@ -6604,13 +6604,13 @@ static void brcmf_free_wiphy(struct wiphy *wiphy) kfree(wiphy->iface_combinations[i].limits); } kfree(wiphy->iface_combinations); - if (wiphy->bands[IEEE80211_BAND_2GHZ]) { - kfree(wiphy->bands[IEEE80211_BAND_2GHZ]->channels); - kfree(wiphy->bands[IEEE80211_BAND_2GHZ]); + if (wiphy->bands[NL80211_BAND_2GHZ]) { + kfree(wiphy->bands[NL80211_BAND_2GHZ]->channels); + kfree(wiphy->bands[NL80211_BAND_2GHZ]); } - if (wiphy->bands[IEEE80211_BAND_5GHZ]) { - kfree(wiphy->bands[IEEE80211_BAND_5GHZ]->channels); - kfree(wiphy->bands[IEEE80211_BAND_5GHZ]); + if (wiphy->bands[NL80211_BAND_5GHZ]) { + kfree(wiphy->bands[NL80211_BAND_5GHZ]->channels); + kfree(wiphy->bands[NL80211_BAND_5GHZ]); } wiphy_free(wiphy); } @@ -6698,8 +6698,8 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr, * cfg80211 here that we do and have it decide we can enable * it. But first check if device does support 2G operation. */ - if (wiphy->bands[IEEE80211_BAND_2GHZ]) { - cap = &wiphy->bands[IEEE80211_BAND_2GHZ]->ht_cap.cap; + if (wiphy->bands[NL80211_BAND_2GHZ]) { + cap = &wiphy->bands[NL80211_BAND_2GHZ]->ht_cap.cap; *cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40; } err = wiphy_register(wiphy); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c index b5a49e564f25..c2ac91df35ed 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c @@ -1430,8 +1430,8 @@ int brcmf_p2p_notify_action_frame_rx(struct brcmf_if *ifp, freq = ieee80211_channel_to_frequency(ch.chnum, ch.band == BRCMU_CHAN_BAND_2G ? - IEEE80211_BAND_2GHZ : - IEEE80211_BAND_5GHZ); + NL80211_BAND_2GHZ : + NL80211_BAND_5GHZ); wdev = &ifp->vif->wdev; cfg80211_rx_mgmt(wdev, freq, 0, (u8 *)mgmt_frame, mgmt_frame_len, 0); @@ -1900,8 +1900,8 @@ s32 brcmf_p2p_notify_rx_mgmt_p2p_probereq(struct brcmf_if *ifp, mgmt_frame_len = e->datalen - sizeof(*rxframe); freq = ieee80211_channel_to_frequency(ch.chnum, ch.band == BRCMU_CHAN_BAND_2G ? - IEEE80211_BAND_2GHZ : - IEEE80211_BAND_5GHZ); + NL80211_BAND_2GHZ : + NL80211_BAND_5GHZ); cfg80211_rx_mgmt(&vif->wdev, freq, 0, mgmt_frame, mgmt_frame_len, 0); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/channel.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/channel.c index 38bd5890bd53..3a03287fa912 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/channel.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/channel.c @@ -636,7 +636,7 @@ static void brcms_reg_apply_radar_flags(struct wiphy *wiphy) struct ieee80211_channel *ch; int i; - sband = wiphy->bands[IEEE80211_BAND_5GHZ]; + sband = wiphy->bands[NL80211_BAND_5GHZ]; if (!sband) return; @@ -666,7 +666,7 @@ brcms_reg_apply_beaconing_flags(struct wiphy *wiphy, const struct ieee80211_reg_rule *rule; int band, i; - for (band = 0; band < IEEE80211_NUM_BANDS; band++) { + for (band = 0; band < NUM_NL80211_BANDS; band++) { sband = wiphy->bands[band]; if (!sband) continue; @@ -710,7 +710,7 @@ static void brcms_reg_notifier(struct wiphy *wiphy, brcms_reg_apply_beaconing_flags(wiphy, request->initiator); /* Disable radio if all channels disallowed by regulatory */ - for (band = 0; !ch_found && band < IEEE80211_NUM_BANDS; band++) { + for (band = 0; !ch_found && band < NUM_NL80211_BANDS; band++) { sband = wiphy->bands[band]; if (!sband) continue; @@ -755,9 +755,9 @@ void brcms_c_regd_init(struct brcms_c_info *wlc) &sup_chan); if (band_idx == BAND_2G_INDEX) - sband = wiphy->bands[IEEE80211_BAND_2GHZ]; + sband = wiphy->bands[NL80211_BAND_2GHZ]; else - sband = wiphy->bands[IEEE80211_BAND_5GHZ]; + sband = wiphy->bands[NL80211_BAND_5GHZ]; for (i = 0; i < sband->n_channels; i++) { ch = &sband->channels[i]; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c index 61ae2768132a..7c2a9a9bc372 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c @@ -49,7 +49,7 @@ FIF_PSPOLL) #define CHAN2GHZ(channel, freqency, chflags) { \ - .band = IEEE80211_BAND_2GHZ, \ + .band = NL80211_BAND_2GHZ, \ .center_freq = (freqency), \ .hw_value = (channel), \ .flags = chflags, \ @@ -58,7 +58,7 @@ } #define CHAN5GHZ(channel, chflags) { \ - .band = IEEE80211_BAND_5GHZ, \ + .band = NL80211_BAND_5GHZ, \ .center_freq = 5000 + 5*(channel), \ .hw_value = (channel), \ .flags = chflags, \ @@ -217,7 +217,7 @@ static struct ieee80211_rate legacy_ratetable[] = { }; static const struct ieee80211_supported_band brcms_band_2GHz_nphy_template = { - .band = IEEE80211_BAND_2GHZ, + .band = NL80211_BAND_2GHZ, .channels = brcms_2ghz_chantable, .n_channels = ARRAY_SIZE(brcms_2ghz_chantable), .bitrates = legacy_ratetable, @@ -238,7 +238,7 @@ static const struct ieee80211_supported_band brcms_band_2GHz_nphy_template = { }; static const struct ieee80211_supported_band brcms_band_5GHz_nphy_template = { - .band = IEEE80211_BAND_5GHZ, + .band = NL80211_BAND_5GHZ, .channels = brcms_5ghz_nphy_chantable, .n_channels = ARRAY_SIZE(brcms_5ghz_nphy_chantable), .bitrates = legacy_ratetable + BRCMS_LEGACY_5G_RATE_OFFSET, @@ -1026,8 +1026,8 @@ static int ieee_hw_rate_init(struct ieee80211_hw *hw) int has_5g = 0; u16 phy_type; - hw->wiphy->bands[IEEE80211_BAND_2GHZ] = NULL; - hw->wiphy->bands[IEEE80211_BAND_5GHZ] = NULL; + hw->wiphy->bands[NL80211_BAND_2GHZ] = NULL; + hw->wiphy->bands[NL80211_BAND_5GHZ] = NULL; phy_type = brcms_c_get_phy_type(wl->wlc, 0); if (phy_type == PHY_TYPE_N || phy_type == PHY_TYPE_LCN) { @@ -1038,7 +1038,7 @@ static int ieee_hw_rate_init(struct ieee80211_hw *hw) band->ht_cap.mcs.rx_mask[1] = 0; band->ht_cap.mcs.rx_highest = cpu_to_le16(72); } - hw->wiphy->bands[IEEE80211_BAND_2GHZ] = band; + hw->wiphy->bands[NL80211_BAND_2GHZ] = band; } else { return -EPERM; } @@ -1049,7 +1049,7 @@ static int ieee_hw_rate_init(struct ieee80211_hw *hw) if (phy_type == PHY_TYPE_N || phy_type == PHY_TYPE_LCN) { band = &wlc->bandstate[BAND_5G_INDEX]->band; *band = brcms_band_5GHz_nphy_template; - hw->wiphy->bands[IEEE80211_BAND_5GHZ] = band; + hw->wiphy->bands[NL80211_BAND_5GHZ] = band; } else { return -EPERM; } diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c index 218cbc8bf3a7..e16ee60639f5 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c @@ -7076,7 +7076,7 @@ prep_mac80211_status(struct brcms_c_info *wlc, struct d11rxhdr *rxh, channel = BRCMS_CHAN_CHANNEL(rxh->RxChan); rx_status->band = - channel > 14 ? IEEE80211_BAND_5GHZ : IEEE80211_BAND_2GHZ; + channel > 14 ? NL80211_BAND_5GHZ : NL80211_BAND_2GHZ; rx_status->freq = ieee80211_channel_to_frequency(channel, rx_status->band); @@ -7143,7 +7143,7 @@ prep_mac80211_status(struct brcms_c_info *wlc, struct d11rxhdr *rxh, * a subset of the 2.4G rates. See bitrates field * of brcms_band_5GHz_nphy (in mac80211_if.c). */ - if (rx_status->band == IEEE80211_BAND_5GHZ) + if (rx_status->band == NL80211_BAND_5GHZ) rx_status->rate_idx -= BRCMS_LEGACY_5G_RATE_OFFSET; /* Determine short preamble and rate_idx */ diff --git a/drivers/net/wireless/cisco/airo.c b/drivers/net/wireless/cisco/airo.c index d2353f6e5214..4bd9e2b97e86 100644 --- a/drivers/net/wireless/cisco/airo.c +++ b/drivers/net/wireless/cisco/airo.c @@ -5836,7 +5836,7 @@ static int airo_get_freq(struct net_device *dev, ch = le16_to_cpu(status_rid.channel); if((ch > 0) && (ch < 15)) { fwrq->m = 100000 * - ieee80211_channel_to_frequency(ch, IEEE80211_BAND_2GHZ); + ieee80211_channel_to_frequency(ch, NL80211_BAND_2GHZ); fwrq->e = 1; } else { fwrq->m = ch; @@ -6894,7 +6894,7 @@ static int airo_get_range(struct net_device *dev, for(i = 0; i < 14; i++) { range->freq[k].i = i + 1; /* List index */ range->freq[k].m = 100000 * - ieee80211_channel_to_frequency(i + 1, IEEE80211_BAND_2GHZ); + ieee80211_channel_to_frequency(i + 1, NL80211_BAND_2GHZ); range->freq[k++].e = 1; /* Values in MHz -> * 10^5 * 10 */ } range->num_frequency = k; @@ -7302,7 +7302,7 @@ static inline char *airo_translate_scan(struct net_device *dev, iwe.cmd = SIOCGIWFREQ; iwe.u.freq.m = le16_to_cpu(bss->dsChannel); iwe.u.freq.m = 100000 * - ieee80211_channel_to_frequency(iwe.u.freq.m, IEEE80211_BAND_2GHZ); + ieee80211_channel_to_frequency(iwe.u.freq.m, NL80211_BAND_2GHZ); iwe.u.freq.e = 1; current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe, IW_EV_FREQ_LEN); diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2100.c b/drivers/net/wireless/intel/ipw2x00/ipw2100.c index 717320b17622..e1e42ed6c412 100644 --- a/drivers/net/wireless/intel/ipw2x00/ipw2100.c +++ b/drivers/net/wireless/intel/ipw2x00/ipw2100.c @@ -1913,7 +1913,7 @@ static int ipw2100_wdev_init(struct net_device *dev) if (geo->bg_channels) { struct ieee80211_supported_band *bg_band = &priv->ieee->bg_band; - bg_band->band = IEEE80211_BAND_2GHZ; + bg_band->band = NL80211_BAND_2GHZ; bg_band->n_channels = geo->bg_channels; bg_band->channels = kcalloc(geo->bg_channels, sizeof(struct ieee80211_channel), @@ -1924,7 +1924,7 @@ static int ipw2100_wdev_init(struct net_device *dev) } /* translate geo->bg to bg_band.channels */ for (i = 0; i < geo->bg_channels; i++) { - bg_band->channels[i].band = IEEE80211_BAND_2GHZ; + bg_band->channels[i].band = NL80211_BAND_2GHZ; bg_band->channels[i].center_freq = geo->bg[i].freq; bg_band->channels[i].hw_value = geo->bg[i].channel; bg_band->channels[i].max_power = geo->bg[i].max_power; @@ -1945,7 +1945,7 @@ static int ipw2100_wdev_init(struct net_device *dev) bg_band->bitrates = ipw2100_bg_rates; bg_band->n_bitrates = RATE_COUNT; - wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = bg_band; + wdev->wiphy->bands[NL80211_BAND_2GHZ] = bg_band; } wdev->wiphy->cipher_suites = ipw_cipher_suites; diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2200.c b/drivers/net/wireless/intel/ipw2x00/ipw2200.c index ed0adaf1eec4..dac13cf42e9f 100644 --- a/drivers/net/wireless/intel/ipw2x00/ipw2200.c +++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.c @@ -11359,7 +11359,7 @@ static int ipw_wdev_init(struct net_device *dev) if (geo->bg_channels) { struct ieee80211_supported_band *bg_band = &priv->ieee->bg_band; - bg_band->band = IEEE80211_BAND_2GHZ; + bg_band->band = NL80211_BAND_2GHZ; bg_band->n_channels = geo->bg_channels; bg_band->channels = kcalloc(geo->bg_channels, sizeof(struct ieee80211_channel), @@ -11370,7 +11370,7 @@ static int ipw_wdev_init(struct net_device *dev) } /* translate geo->bg to bg_band.channels */ for (i = 0; i < geo->bg_channels; i++) { - bg_band->channels[i].band = IEEE80211_BAND_2GHZ; + bg_band->channels[i].band = NL80211_BAND_2GHZ; bg_band->channels[i].center_freq = geo->bg[i].freq; bg_band->channels[i].hw_value = geo->bg[i].channel; bg_band->channels[i].max_power = geo->bg[i].max_power; @@ -11391,14 +11391,14 @@ static int ipw_wdev_init(struct net_device *dev) bg_band->bitrates = ipw2200_bg_rates; bg_band->n_bitrates = ipw2200_num_bg_rates; - wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = bg_band; + wdev->wiphy->bands[NL80211_BAND_2GHZ] = bg_band; } /* fill-out priv->ieee->a_band */ if (geo->a_channels) { struct ieee80211_supported_band *a_band = &priv->ieee->a_band; - a_band->band = IEEE80211_BAND_5GHZ; + a_band->band = NL80211_BAND_5GHZ; a_band->n_channels = geo->a_channels; a_band->channels = kcalloc(geo->a_channels, sizeof(struct ieee80211_channel), @@ -11409,7 +11409,7 @@ static int ipw_wdev_init(struct net_device *dev) } /* translate geo->a to a_band.channels */ for (i = 0; i < geo->a_channels; i++) { - a_band->channels[i].band = IEEE80211_BAND_5GHZ; + a_band->channels[i].band = NL80211_BAND_5GHZ; a_band->channels[i].center_freq = geo->a[i].freq; a_band->channels[i].hw_value = geo->a[i].channel; a_band->channels[i].max_power = geo->a[i].max_power; @@ -11430,7 +11430,7 @@ static int ipw_wdev_init(struct net_device *dev) a_band->bitrates = ipw2200_a_rates; a_band->n_bitrates = ipw2200_num_a_rates; - wdev->wiphy->bands[IEEE80211_BAND_5GHZ] = a_band; + wdev->wiphy->bands[NL80211_BAND_5GHZ] = a_band; } wdev->wiphy->cipher_suites = ipw_cipher_suites; diff --git a/drivers/net/wireless/intel/iwlegacy/3945-mac.c b/drivers/net/wireless/intel/iwlegacy/3945-mac.c index af1b3e6839fa..466912eb2d87 100644 --- a/drivers/net/wireless/intel/iwlegacy/3945-mac.c +++ b/drivers/net/wireless/intel/iwlegacy/3945-mac.c @@ -1547,7 +1547,7 @@ il3945_irq_tasklet(struct il_priv *il) } static int -il3945_get_channels_for_scan(struct il_priv *il, enum ieee80211_band band, +il3945_get_channels_for_scan(struct il_priv *il, enum nl80211_band band, u8 is_active, u8 n_probes, struct il3945_scan_channel *scan_ch, struct ieee80211_vif *vif) @@ -1618,7 +1618,7 @@ il3945_get_channels_for_scan(struct il_priv *il, enum ieee80211_band band, /* scan_pwr_info->tpc.dsp_atten; */ /*scan_pwr_info->tpc.tx_gain; */ - if (band == IEEE80211_BAND_5GHZ) + if (band == NL80211_BAND_5GHZ) scan_ch->tpc.tx_gain = ((1 << 5) | (3 << 3)) | 3; else { scan_ch->tpc.tx_gain = ((1 << 5) | (5 << 3)); @@ -2534,7 +2534,7 @@ il3945_request_scan(struct il_priv *il, struct ieee80211_vif *vif) }; struct il3945_scan_cmd *scan; u8 n_probes = 0; - enum ieee80211_band band; + enum nl80211_band band; bool is_active = false; int ret; u16 len; @@ -2615,14 +2615,14 @@ il3945_request_scan(struct il_priv *il, struct ieee80211_vif *vif) /* flags + rate selection */ switch (il->scan_band) { - case IEEE80211_BAND_2GHZ: + case NL80211_BAND_2GHZ: scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK; scan->tx_cmd.rate = RATE_1M_PLCP; - band = IEEE80211_BAND_2GHZ; + band = NL80211_BAND_2GHZ; break; - case IEEE80211_BAND_5GHZ: + case NL80211_BAND_5GHZ: scan->tx_cmd.rate = RATE_6M_PLCP; - band = IEEE80211_BAND_5GHZ; + band = NL80211_BAND_5GHZ; break; default: IL_WARN("Invalid scan band\n"); @@ -3507,7 +3507,7 @@ il3945_init_drv(struct il_priv *il) il->ieee_channels = NULL; il->ieee_rates = NULL; - il->band = IEEE80211_BAND_2GHZ; + il->band = NL80211_BAND_2GHZ; il->iw_mode = NL80211_IFTYPE_STATION; il->missed_beacon_threshold = IL_MISSED_BEACON_THRESHOLD_DEF; @@ -3582,13 +3582,13 @@ il3945_setup_mac(struct il_priv *il) /* Default value; 4 EDCA QOS priorities */ hw->queues = 4; - if (il->bands[IEEE80211_BAND_2GHZ].n_channels) - il->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = - &il->bands[IEEE80211_BAND_2GHZ]; + if (il->bands[NL80211_BAND_2GHZ].n_channels) + il->hw->wiphy->bands[NL80211_BAND_2GHZ] = + &il->bands[NL80211_BAND_2GHZ]; - if (il->bands[IEEE80211_BAND_5GHZ].n_channels) - il->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = - &il->bands[IEEE80211_BAND_5GHZ]; + if (il->bands[NL80211_BAND_5GHZ].n_channels) + il->hw->wiphy->bands[NL80211_BAND_5GHZ] = + &il->bands[NL80211_BAND_5GHZ]; il_leds_init(il); @@ -3761,7 +3761,7 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto out_release_irq; } - il_set_rxon_channel(il, &il->bands[IEEE80211_BAND_2GHZ].channels[5]); + il_set_rxon_channel(il, &il->bands[NL80211_BAND_2GHZ].channels[5]); il3945_setup_deferred_work(il); il3945_setup_handlers(il); il_power_initialize(il); diff --git a/drivers/net/wireless/intel/iwlegacy/3945-rs.c b/drivers/net/wireless/intel/iwlegacy/3945-rs.c index 76b0729ade17..03ad9b8b55f4 100644 --- a/drivers/net/wireless/intel/iwlegacy/3945-rs.c +++ b/drivers/net/wireless/intel/iwlegacy/3945-rs.c @@ -97,7 +97,7 @@ static struct il3945_tpt_entry il3945_tpt_table_g[] = { #define RATE_RETRY_TH 15 static u8 -il3945_get_rate_idx_by_rssi(s32 rssi, enum ieee80211_band band) +il3945_get_rate_idx_by_rssi(s32 rssi, enum nl80211_band band) { u32 idx = 0; u32 table_size = 0; @@ -107,11 +107,11 @@ il3945_get_rate_idx_by_rssi(s32 rssi, enum ieee80211_band band) rssi = IL_MIN_RSSI_VAL; switch (band) { - case IEEE80211_BAND_2GHZ: + case NL80211_BAND_2GHZ: tpt_table = il3945_tpt_table_g; table_size = ARRAY_SIZE(il3945_tpt_table_g); break; - case IEEE80211_BAND_5GHZ: + case NL80211_BAND_5GHZ: tpt_table = il3945_tpt_table_a; table_size = ARRAY_SIZE(il3945_tpt_table_a); break; @@ -380,7 +380,7 @@ il3945_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta, u8 sta_id) il->_3945.sta_supp_rates = sta->supp_rates[sband->band]; /* For 5 GHz band it start at IL_FIRST_OFDM_RATE */ - if (sband->band == IEEE80211_BAND_5GHZ) { + if (sband->band == NL80211_BAND_5GHZ) { rs_sta->last_txrate_idx += IL_FIRST_OFDM_RATE; il->_3945.sta_supp_rates <<= IL_FIRST_OFDM_RATE; } @@ -541,7 +541,7 @@ il3945_rs_tx_status(void *il_rate, struct ieee80211_supported_band *sband, static u16 il3945_get_adjacent_rate(struct il3945_rs_sta *rs_sta, u8 idx, u16 rate_mask, - enum ieee80211_band band) + enum nl80211_band band) { u8 high = RATE_INVALID; u8 low = RATE_INVALID; @@ -549,7 +549,7 @@ il3945_get_adjacent_rate(struct il3945_rs_sta *rs_sta, u8 idx, u16 rate_mask, /* 802.11A walks to the next literal adjacent rate in * the rate table */ - if (unlikely(band == IEEE80211_BAND_5GHZ)) { + if (unlikely(band == NL80211_BAND_5GHZ)) { int i; u32 mask; @@ -657,14 +657,14 @@ il3945_rs_get_rate(void *il_r, struct ieee80211_sta *sta, void *il_sta, /* get user max rate if set */ max_rate_idx = txrc->max_rate_idx; - if (sband->band == IEEE80211_BAND_5GHZ && max_rate_idx != -1) + if (sband->band == NL80211_BAND_5GHZ && max_rate_idx != -1) max_rate_idx += IL_FIRST_OFDM_RATE; if (max_rate_idx < 0 || max_rate_idx >= RATE_COUNT) max_rate_idx = -1; idx = min(rs_sta->last_txrate_idx & 0xffff, RATE_COUNT_3945 - 1); - if (sband->band == IEEE80211_BAND_5GHZ) + if (sband->band == NL80211_BAND_5GHZ) rate_mask = rate_mask << IL_FIRST_OFDM_RATE; spin_lock_irqsave(&rs_sta->lock, flags); @@ -806,7 +806,7 @@ il3945_rs_get_rate(void *il_r, struct ieee80211_sta *sta, void *il_sta, out: - if (sband->band == IEEE80211_BAND_5GHZ) { + if (sband->band == NL80211_BAND_5GHZ) { if (WARN_ON_ONCE(idx < IL_FIRST_OFDM_RATE)) idx = IL_FIRST_OFDM_RATE; rs_sta->last_txrate_idx = idx; @@ -935,7 +935,7 @@ il3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id) rs_sta->tgg = 0; switch (il->band) { - case IEEE80211_BAND_2GHZ: + case NL80211_BAND_2GHZ: /* TODO: this always does G, not a regression */ if (il->active.flags & RXON_FLG_TGG_PROTECT_MSK) { rs_sta->tgg = 1; @@ -943,7 +943,7 @@ il3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id) } else rs_sta->expected_tpt = il3945_expected_tpt_g; break; - case IEEE80211_BAND_5GHZ: + case NL80211_BAND_5GHZ: rs_sta->expected_tpt = il3945_expected_tpt_a; break; default: diff --git a/drivers/net/wireless/intel/iwlegacy/3945.c b/drivers/net/wireless/intel/iwlegacy/3945.c index 93bdf684babe..7bcedbb53d94 100644 --- a/drivers/net/wireless/intel/iwlegacy/3945.c +++ b/drivers/net/wireless/intel/iwlegacy/3945.c @@ -255,13 +255,13 @@ il3945_rs_next_rate(struct il_priv *il, int rate) int next_rate = il3945_get_prev_ieee_rate(rate); switch (il->band) { - case IEEE80211_BAND_5GHZ: + case NL80211_BAND_5GHZ: if (rate == RATE_12M_IDX) next_rate = RATE_9M_IDX; else if (rate == RATE_6M_IDX) next_rate = RATE_6M_IDX; break; - case IEEE80211_BAND_2GHZ: + case NL80211_BAND_2GHZ: if (!(il->_3945.sta_supp_rates & IL_OFDM_RATES_MASK) && il_is_associated(il)) { if (rate == RATE_11M_IDX) @@ -349,7 +349,7 @@ il3945_hdl_tx(struct il_priv *il, struct il_rx_buf *rxb) /* Fill the MRR chain with some info about on-chip retransmissions */ rate_idx = il3945_hwrate_to_plcp_idx(tx_resp->rate); - if (info->band == IEEE80211_BAND_5GHZ) + if (info->band == NL80211_BAND_5GHZ) rate_idx -= IL_FIRST_OFDM_RATE; fail = tx_resp->failure_frame; @@ -554,14 +554,14 @@ il3945_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb) rx_status.mactime = le64_to_cpu(rx_end->timestamp); rx_status.band = (rx_hdr-> - phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? IEEE80211_BAND_2GHZ : - IEEE80211_BAND_5GHZ; + phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? NL80211_BAND_2GHZ : + NL80211_BAND_5GHZ; rx_status.freq = ieee80211_channel_to_frequency(le16_to_cpu(rx_hdr->channel), rx_status.band); rx_status.rate_idx = il3945_hwrate_to_plcp_idx(rx_hdr->rate); - if (rx_status.band == IEEE80211_BAND_5GHZ) + if (rx_status.band == NL80211_BAND_5GHZ) rx_status.rate_idx -= IL_FIRST_OFDM_RATE; rx_status.antenna = @@ -1409,7 +1409,7 @@ il3945_send_tx_power(struct il_priv *il) chan = le16_to_cpu(il->active.channel); - txpower.band = (il->band == IEEE80211_BAND_5GHZ) ? 0 : 1; + txpower.band = (il->band == NL80211_BAND_5GHZ) ? 0 : 1; ch_info = il_get_channel_info(il, il->band, chan); if (!ch_info) { IL_ERR("Failed to get channel info for channel %d [%d]\n", chan, @@ -2310,7 +2310,7 @@ il3945_manage_ibss_station(struct il_priv *il, struct ieee80211_vif *vif, il3945_sync_sta(il, vif_priv->ibss_bssid_sta_id, (il->band == - IEEE80211_BAND_5GHZ) ? RATE_6M_PLCP : + NL80211_BAND_5GHZ) ? RATE_6M_PLCP : RATE_1M_PLCP); il3945_rate_scale_init(il->hw, vif_priv->ibss_bssid_sta_id); @@ -2343,7 +2343,7 @@ il3945_init_hw_rate_table(struct il_priv *il) } switch (il->band) { - case IEEE80211_BAND_5GHZ: + case NL80211_BAND_5GHZ: D_RATE("Select A mode rate scale\n"); /* If one of the following CCK rates is used, * have it fall back to the 6M OFDM rate */ @@ -2359,7 +2359,7 @@ il3945_init_hw_rate_table(struct il_priv *il) il3945_rates[IL_FIRST_OFDM_RATE].table_rs_idx; break; - case IEEE80211_BAND_2GHZ: + case NL80211_BAND_2GHZ: D_RATE("Select B/G mode rate scale\n"); /* If an OFDM rate is used, have it fall back to the * 1M CCK rates */ diff --git a/drivers/net/wireless/intel/iwlegacy/4965-mac.c b/drivers/net/wireless/intel/iwlegacy/4965-mac.c index f9ed48070e17..a91d170a614b 100644 --- a/drivers/net/wireless/intel/iwlegacy/4965-mac.c +++ b/drivers/net/wireless/intel/iwlegacy/4965-mac.c @@ -457,7 +457,7 @@ il4965_rxq_stop(struct il_priv *il) } int -il4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band) +il4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum nl80211_band band) { int idx = 0; int band_offset = 0; @@ -468,7 +468,7 @@ il4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band) return idx; /* Legacy rate format, search for match in table */ } else { - if (band == IEEE80211_BAND_5GHZ) + if (band == NL80211_BAND_5GHZ) band_offset = IL_FIRST_OFDM_RATE; for (idx = band_offset; idx < RATE_COUNT_LEGACY; idx++) if (il_rates[idx].plcp == (rate_n_flags & 0xFF)) @@ -688,8 +688,8 @@ il4965_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb) rx_status.mactime = le64_to_cpu(phy_res->timestamp); rx_status.band = (phy_res-> - phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? IEEE80211_BAND_2GHZ : - IEEE80211_BAND_5GHZ; + phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? NL80211_BAND_2GHZ : + NL80211_BAND_5GHZ; rx_status.freq = ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel), rx_status.band); @@ -766,7 +766,7 @@ il4965_hdl_rx_phy(struct il_priv *il, struct il_rx_buf *rxb) static int il4965_get_channels_for_scan(struct il_priv *il, struct ieee80211_vif *vif, - enum ieee80211_band band, u8 is_active, + enum nl80211_band band, u8 is_active, u8 n_probes, struct il_scan_channel *scan_ch) { struct ieee80211_channel *chan; @@ -822,7 +822,7 @@ il4965_get_channels_for_scan(struct il_priv *il, struct ieee80211_vif *vif, * power level: * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3; */ - if (band == IEEE80211_BAND_5GHZ) + if (band == NL80211_BAND_5GHZ) scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3; else scan_ch->tx_gain = ((1 << 5) | (5 << 3)); @@ -870,7 +870,7 @@ il4965_request_scan(struct il_priv *il, struct ieee80211_vif *vif) u32 rate_flags = 0; u16 cmd_len; u16 rx_chain = 0; - enum ieee80211_band band; + enum nl80211_band band; u8 n_probes = 0; u8 rx_ant = il->hw_params.valid_rx_ant; u8 rate; @@ -944,7 +944,7 @@ il4965_request_scan(struct il_priv *il, struct ieee80211_vif *vif) scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; switch (il->scan_band) { - case IEEE80211_BAND_2GHZ: + case NL80211_BAND_2GHZ: scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK; chan_mod = le32_to_cpu(il->active.flags & RXON_FLG_CHANNEL_MODE_MSK) >> @@ -956,7 +956,7 @@ il4965_request_scan(struct il_priv *il, struct ieee80211_vif *vif) rate_flags = RATE_MCS_CCK_MSK; } break; - case IEEE80211_BAND_5GHZ: + case NL80211_BAND_5GHZ: rate = RATE_6M_PLCP; break; default: @@ -1590,7 +1590,7 @@ il4965_tx_cmd_build_rate(struct il_priv *il, || rate_idx > RATE_COUNT_LEGACY) rate_idx = rate_lowest_index(&il->bands[info->band], sta); /* For 5 GHZ band, remap mac80211 rate indices into driver indices */ - if (info->band == IEEE80211_BAND_5GHZ) + if (info->band == NL80211_BAND_5GHZ) rate_idx += IL_FIRST_OFDM_RATE; /* Get PLCP rate for tx_cmd->rate_n_flags */ rate_plcp = il_rates[rate_idx].plcp; @@ -3051,7 +3051,7 @@ il4965_sta_alloc_lq(struct il_priv *il, u8 sta_id) } /* Set up the rate scaling to start at selected rate, fall back * all the way down to 1M in IEEE order, and then spin on 1M */ - if (il->band == IEEE80211_BAND_5GHZ) + if (il->band == NL80211_BAND_5GHZ) r = RATE_6M_IDX; else r = RATE_1M_IDX; @@ -5790,12 +5790,12 @@ il4965_mac_setup_register(struct il_priv *il, u32 max_probe_length) hw->max_listen_interval = IL_CONN_MAX_LISTEN_INTERVAL; - if (il->bands[IEEE80211_BAND_2GHZ].n_channels) - il->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = - &il->bands[IEEE80211_BAND_2GHZ]; - if (il->bands[IEEE80211_BAND_5GHZ].n_channels) - il->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = - &il->bands[IEEE80211_BAND_5GHZ]; + if (il->bands[NL80211_BAND_2GHZ].n_channels) + il->hw->wiphy->bands[NL80211_BAND_2GHZ] = + &il->bands[NL80211_BAND_2GHZ]; + if (il->bands[NL80211_BAND_5GHZ].n_channels) + il->hw->wiphy->bands[NL80211_BAND_5GHZ] = + &il->bands[NL80211_BAND_5GHZ]; il_leds_init(il); @@ -6368,7 +6368,7 @@ il4965_init_drv(struct il_priv *il) il->ieee_channels = NULL; il->ieee_rates = NULL; - il->band = IEEE80211_BAND_2GHZ; + il->band = NL80211_BAND_2GHZ; il->iw_mode = NL80211_IFTYPE_STATION; il->current_ht_config.smps = IEEE80211_SMPS_STATIC; @@ -6480,7 +6480,7 @@ il4965_set_hw_params(struct il_priv *il) il->hw_params.max_data_size = IL49_RTC_DATA_SIZE; il->hw_params.max_inst_size = IL49_RTC_INST_SIZE; il->hw_params.max_bsm_size = BSM_SRAM_SIZE; - il->hw_params.ht40_channel = BIT(IEEE80211_BAND_5GHZ); + il->hw_params.ht40_channel = BIT(NL80211_BAND_5GHZ); il->hw_params.rx_wrt_ptr_reg = FH49_RSCSR_CHNL0_WPTR; diff --git a/drivers/net/wireless/intel/iwlegacy/4965-rs.c b/drivers/net/wireless/intel/iwlegacy/4965-rs.c index bac60b2bc3f0..a867ae7f4095 100644 --- a/drivers/net/wireless/intel/iwlegacy/4965-rs.c +++ b/drivers/net/wireless/intel/iwlegacy/4965-rs.c @@ -549,7 +549,7 @@ il4965_rate_n_flags_from_tbl(struct il_priv *il, struct il_scale_tbl_info *tbl, */ static int il4965_rs_get_tbl_info_from_mcs(const u32 rate_n_flags, - enum ieee80211_band band, + enum nl80211_band band, struct il_scale_tbl_info *tbl, int *rate_idx) { u32 ant_msk = (rate_n_flags & RATE_MCS_ANT_ABC_MSK); @@ -574,7 +574,7 @@ il4965_rs_get_tbl_info_from_mcs(const u32 rate_n_flags, /* legacy rate format */ if (!(rate_n_flags & RATE_MCS_HT_MSK)) { if (il4965_num_of_ant == 1) { - if (band == IEEE80211_BAND_5GHZ) + if (band == NL80211_BAND_5GHZ) tbl->lq_type = LQ_A; else tbl->lq_type = LQ_G; @@ -743,7 +743,7 @@ il4965_rs_get_lower_rate(struct il_lq_sta *lq_sta, if (!is_legacy(tbl->lq_type) && (!ht_possible || !scale_idx)) { switch_to_legacy = 1; scale_idx = rs_ht_to_legacy[scale_idx]; - if (lq_sta->band == IEEE80211_BAND_5GHZ) + if (lq_sta->band == NL80211_BAND_5GHZ) tbl->lq_type = LQ_A; else tbl->lq_type = LQ_G; @@ -762,7 +762,7 @@ il4965_rs_get_lower_rate(struct il_lq_sta *lq_sta, /* Mask with station rate restriction */ if (is_legacy(tbl->lq_type)) { /* supp_rates has no CCK bits in A mode */ - if (lq_sta->band == IEEE80211_BAND_5GHZ) + if (lq_sta->band == NL80211_BAND_5GHZ) rate_mask = (u16) (rate_mask & (lq_sta->supp_rates << IL_FIRST_OFDM_RATE)); @@ -851,7 +851,7 @@ il4965_rs_tx_status(void *il_r, struct ieee80211_supported_band *sband, table = &lq_sta->lq; tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags); il4965_rs_get_tbl_info_from_mcs(tx_rate, il->band, &tbl_type, &rs_idx); - if (il->band == IEEE80211_BAND_5GHZ) + if (il->band == NL80211_BAND_5GHZ) rs_idx -= IL_FIRST_OFDM_RATE; mac_flags = info->status.rates[0].flags; mac_idx = info->status.rates[0].idx; @@ -864,7 +864,7 @@ il4965_rs_tx_status(void *il_r, struct ieee80211_supported_band *sband, * mac80211 HT idx is always zero-idxed; we need to move * HT OFDM rates after CCK rates in 2.4 GHz band */ - if (il->band == IEEE80211_BAND_2GHZ) + if (il->band == NL80211_BAND_2GHZ) mac_idx += IL_FIRST_OFDM_RATE; } /* Here we actually compare this rate to the latest LQ command */ @@ -1816,7 +1816,7 @@ il4965_rs_rate_scale_perform(struct il_priv *il, struct sk_buff *skb, /* mask with station rate restriction */ if (is_legacy(tbl->lq_type)) { - if (lq_sta->band == IEEE80211_BAND_5GHZ) + if (lq_sta->band == NL80211_BAND_5GHZ) /* supp_rates has no CCK bits in A mode */ rate_scale_idx_msk = (u16) (rate_mask & @@ -2212,7 +2212,7 @@ il4965_rs_get_rate(void *il_r, struct ieee80211_sta *sta, void *il_sta, /* Get max rate if user set max rate */ if (lq_sta) { lq_sta->max_rate_idx = txrc->max_rate_idx; - if (sband->band == IEEE80211_BAND_5GHZ && + if (sband->band == NL80211_BAND_5GHZ && lq_sta->max_rate_idx != -1) lq_sta->max_rate_idx += IL_FIRST_OFDM_RATE; if (lq_sta->max_rate_idx < 0 || @@ -2258,11 +2258,11 @@ il4965_rs_get_rate(void *il_r, struct ieee80211_sta *sta, void *il_sta, } else { /* Check for invalid rates */ if (rate_idx < 0 || rate_idx >= RATE_COUNT_LEGACY || - (sband->band == IEEE80211_BAND_5GHZ && + (sband->band == NL80211_BAND_5GHZ && rate_idx < IL_FIRST_OFDM_RATE)) rate_idx = rate_lowest_index(sband, sta); /* On valid 5 GHz rate, adjust idx */ - else if (sband->band == IEEE80211_BAND_5GHZ) + else if (sband->band == NL80211_BAND_5GHZ) rate_idx -= IL_FIRST_OFDM_RATE; info->control.rates[0].flags = 0; } @@ -2362,7 +2362,7 @@ il4965_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta, u8 sta_id) /* Set last_txrate_idx to lowest rate */ lq_sta->last_txrate_idx = rate_lowest_index(sband, sta); - if (sband->band == IEEE80211_BAND_5GHZ) + if (sband->band == NL80211_BAND_5GHZ) lq_sta->last_txrate_idx += IL_FIRST_OFDM_RATE; lq_sta->is_agg = 0; diff --git a/drivers/net/wireless/intel/iwlegacy/4965.c b/drivers/net/wireless/intel/iwlegacy/4965.c index fe47db9c20cd..c3c638ed0ed7 100644 --- a/drivers/net/wireless/intel/iwlegacy/4965.c +++ b/drivers/net/wireless/intel/iwlegacy/4965.c @@ -1267,7 +1267,7 @@ il4965_send_tx_power(struct il_priv *il) "TX Power requested while scanning!\n")) return -EAGAIN; - band = il->band == IEEE80211_BAND_2GHZ; + band = il->band == NL80211_BAND_2GHZ; is_ht40 = iw4965_is_ht40_channel(il->active.flags); @@ -1480,7 +1480,7 @@ il4965_hw_channel_switch(struct il_priv *il, u8 switch_count; u16 beacon_interval = le16_to_cpu(il->timing.beacon_interval); struct ieee80211_vif *vif = il->vif; - band = (il->band == IEEE80211_BAND_2GHZ); + band = (il->band == NL80211_BAND_2GHZ); if (WARN_ON_ONCE(vif == NULL)) return -EIO; @@ -1918,7 +1918,7 @@ struct il_cfg il4965_cfg = { * Force use of chains B and C for scan RX on 5 GHz band * because the device has off-channel reception on chain A. */ - .scan_rx_antennas[IEEE80211_BAND_5GHZ] = ANT_BC, + .scan_rx_antennas[NL80211_BAND_5GHZ] = ANT_BC, .eeprom_size = IL4965_EEPROM_IMG_SIZE, .num_of_queues = IL49_NUM_QUEUES, diff --git a/drivers/net/wireless/intel/iwlegacy/4965.h b/drivers/net/wireless/intel/iwlegacy/4965.h index e432715e02d8..527e8b531aed 100644 --- a/drivers/net/wireless/intel/iwlegacy/4965.h +++ b/drivers/net/wireless/intel/iwlegacy/4965.h @@ -68,7 +68,7 @@ void il4965_rx_replenish(struct il_priv *il); void il4965_rx_replenish_now(struct il_priv *il); void il4965_rx_queue_free(struct il_priv *il, struct il_rx_queue *rxq); int il4965_rxq_stop(struct il_priv *il); -int il4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band); +int il4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum nl80211_band band); void il4965_rx_handle(struct il_priv *il); /* tx */ diff --git a/drivers/net/wireless/intel/iwlegacy/common.c b/drivers/net/wireless/intel/iwlegacy/common.c index 2cc3d42bbab7..eb24b9241bb2 100644 --- a/drivers/net/wireless/intel/iwlegacy/common.c +++ b/drivers/net/wireless/intel/iwlegacy/common.c @@ -860,7 +860,7 @@ il_init_band_reference(const struct il_priv *il, int eep_band, * Does not set up a command, or touch hardware. */ static int -il_mod_ht40_chan_info(struct il_priv *il, enum ieee80211_band band, u16 channel, +il_mod_ht40_chan_info(struct il_priv *il, enum nl80211_band band, u16 channel, const struct il_eeprom_channel *eeprom_ch, u8 clear_ht40_extension_channel) { @@ -945,7 +945,7 @@ il_init_channel_map(struct il_priv *il) ch_info->channel = eeprom_ch_idx[ch]; ch_info->band = (band == - 1) ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; + 1) ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ; /* permanently store EEPROM's channel regulatory flags * and max power in channel info database. */ @@ -1003,14 +1003,14 @@ il_init_channel_map(struct il_priv *il) /* Two additional EEPROM bands for 2.4 and 5 GHz HT40 channels */ for (band = 6; band <= 7; band++) { - enum ieee80211_band ieeeband; + enum nl80211_band ieeeband; il_init_band_reference(il, band, &eeprom_ch_count, &eeprom_ch_info, &eeprom_ch_idx); /* EEPROM band 6 is 2.4, band 7 is 5 GHz */ ieeeband = - (band == 6) ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; + (band == 6) ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ; /* Loop through each band adding each of the channels */ for (ch = 0; ch < eeprom_ch_count; ch++) { @@ -1048,19 +1048,19 @@ EXPORT_SYMBOL(il_free_channel_map); * Based on band and channel number. */ const struct il_channel_info * -il_get_channel_info(const struct il_priv *il, enum ieee80211_band band, +il_get_channel_info(const struct il_priv *il, enum nl80211_band band, u16 channel) { int i; switch (band) { - case IEEE80211_BAND_5GHZ: + case NL80211_BAND_5GHZ: for (i = 14; i < il->channel_count; i++) { if (il->channel_info[i].channel == channel) return &il->channel_info[i]; } break; - case IEEE80211_BAND_2GHZ: + case NL80211_BAND_2GHZ: if (channel >= 1 && channel <= 14) return &il->channel_info[channel - 1]; break; @@ -1457,7 +1457,7 @@ il_hdl_scan_complete(struct il_priv *il, struct il_rx_buf *rxb) clear_bit(S_SCAN_HW, &il->status); D_SCAN("Scan on %sGHz took %dms\n", - (il->scan_band == IEEE80211_BAND_2GHZ) ? "2.4" : "5.2", + (il->scan_band == NL80211_BAND_2GHZ) ? "2.4" : "5.2", jiffies_to_msecs(jiffies - il->scan_start)); queue_work(il->workqueue, &il->scan_completed); @@ -1475,10 +1475,10 @@ il_setup_rx_scan_handlers(struct il_priv *il) EXPORT_SYMBOL(il_setup_rx_scan_handlers); u16 -il_get_active_dwell_time(struct il_priv *il, enum ieee80211_band band, +il_get_active_dwell_time(struct il_priv *il, enum nl80211_band band, u8 n_probes) { - if (band == IEEE80211_BAND_5GHZ) + if (band == NL80211_BAND_5GHZ) return IL_ACTIVE_DWELL_TIME_52 + IL_ACTIVE_DWELL_FACTOR_52GHZ * (n_probes + 1); else @@ -1488,14 +1488,14 @@ il_get_active_dwell_time(struct il_priv *il, enum ieee80211_band band, EXPORT_SYMBOL(il_get_active_dwell_time); u16 -il_get_passive_dwell_time(struct il_priv *il, enum ieee80211_band band, +il_get_passive_dwell_time(struct il_priv *il, enum nl80211_band band, struct ieee80211_vif *vif) { u16 value; u16 passive = (band == - IEEE80211_BAND_2GHZ) ? IL_PASSIVE_DWELL_BASE + + NL80211_BAND_2GHZ) ? IL_PASSIVE_DWELL_BASE + IL_PASSIVE_DWELL_TIME_24 : IL_PASSIVE_DWELL_BASE + IL_PASSIVE_DWELL_TIME_52; @@ -1520,10 +1520,10 @@ void il_init_scan_params(struct il_priv *il) { u8 ant_idx = fls(il->hw_params.valid_tx_ant) - 1; - if (!il->scan_tx_ant[IEEE80211_BAND_5GHZ]) - il->scan_tx_ant[IEEE80211_BAND_5GHZ] = ant_idx; - if (!il->scan_tx_ant[IEEE80211_BAND_2GHZ]) - il->scan_tx_ant[IEEE80211_BAND_2GHZ] = ant_idx; + if (!il->scan_tx_ant[NL80211_BAND_5GHZ]) + il->scan_tx_ant[NL80211_BAND_5GHZ] = ant_idx; + if (!il->scan_tx_ant[NL80211_BAND_2GHZ]) + il->scan_tx_ant[NL80211_BAND_2GHZ] = ant_idx; } EXPORT_SYMBOL(il_init_scan_params); @@ -2003,7 +2003,7 @@ il_prep_station(struct il_priv *il, const u8 *addr, bool is_ap, il_set_ht_add_station(il, sta_id, sta); /* 3945 only */ - rate = (il->band == IEEE80211_BAND_5GHZ) ? RATE_6M_PLCP : RATE_1M_PLCP; + rate = (il->band == NL80211_BAND_5GHZ) ? RATE_6M_PLCP : RATE_1M_PLCP; /* Turn on both antennas for the station... */ station->sta.rate_n_flags = cpu_to_le16(rate | RATE_MCS_ANT_AB_MSK); @@ -3382,7 +3382,7 @@ EXPORT_SYMBOL(il_bcast_addr); static void il_init_ht_hw_capab(const struct il_priv *il, struct ieee80211_sta_ht_cap *ht_info, - enum ieee80211_band band) + enum nl80211_band band) { u16 max_bit_rate = 0; u8 rx_chains_num = il->hw_params.rx_chains_num; @@ -3443,8 +3443,8 @@ il_init_geos(struct il_priv *il) int i = 0; s8 max_tx_power = 0; - if (il->bands[IEEE80211_BAND_2GHZ].n_bitrates || - il->bands[IEEE80211_BAND_5GHZ].n_bitrates) { + if (il->bands[NL80211_BAND_2GHZ].n_bitrates || + il->bands[NL80211_BAND_5GHZ].n_bitrates) { D_INFO("Geography modes already initialized.\n"); set_bit(S_GEO_CONFIGURED, &il->status); return 0; @@ -3465,23 +3465,23 @@ il_init_geos(struct il_priv *il) } /* 5.2GHz channels start after the 2.4GHz channels */ - sband = &il->bands[IEEE80211_BAND_5GHZ]; + sband = &il->bands[NL80211_BAND_5GHZ]; sband->channels = &channels[ARRAY_SIZE(il_eeprom_band_1)]; /* just OFDM */ sband->bitrates = &rates[IL_FIRST_OFDM_RATE]; sband->n_bitrates = RATE_COUNT_LEGACY - IL_FIRST_OFDM_RATE; if (il->cfg->sku & IL_SKU_N) - il_init_ht_hw_capab(il, &sband->ht_cap, IEEE80211_BAND_5GHZ); + il_init_ht_hw_capab(il, &sband->ht_cap, NL80211_BAND_5GHZ); - sband = &il->bands[IEEE80211_BAND_2GHZ]; + sband = &il->bands[NL80211_BAND_2GHZ]; sband->channels = channels; /* OFDM & CCK */ sband->bitrates = rates; sband->n_bitrates = RATE_COUNT_LEGACY; if (il->cfg->sku & IL_SKU_N) - il_init_ht_hw_capab(il, &sband->ht_cap, IEEE80211_BAND_2GHZ); + il_init_ht_hw_capab(il, &sband->ht_cap, NL80211_BAND_2GHZ); il->ieee_channels = channels; il->ieee_rates = rates; @@ -3532,7 +3532,7 @@ il_init_geos(struct il_priv *il) il->tx_power_user_lmt = max_tx_power; il->tx_power_next = max_tx_power; - if (il->bands[IEEE80211_BAND_5GHZ].n_channels == 0 && + if (il->bands[NL80211_BAND_5GHZ].n_channels == 0 && (il->cfg->sku & IL_SKU_A)) { IL_INFO("Incorrectly detected BG card as ABG. " "Please send your PCI ID 0x%04X:0x%04X to maintainer.\n", @@ -3541,8 +3541,8 @@ il_init_geos(struct il_priv *il) } IL_INFO("Tunable channels: %d 802.11bg, %d 802.11a channels\n", - il->bands[IEEE80211_BAND_2GHZ].n_channels, - il->bands[IEEE80211_BAND_5GHZ].n_channels); + il->bands[NL80211_BAND_2GHZ].n_channels, + il->bands[NL80211_BAND_5GHZ].n_channels); set_bit(S_GEO_CONFIGURED, &il->status); @@ -3563,7 +3563,7 @@ il_free_geos(struct il_priv *il) EXPORT_SYMBOL(il_free_geos); static bool -il_is_channel_extension(struct il_priv *il, enum ieee80211_band band, +il_is_channel_extension(struct il_priv *il, enum nl80211_band band, u16 channel, u8 extension_chan_offset) { const struct il_channel_info *ch_info; @@ -3926,14 +3926,14 @@ EXPORT_SYMBOL(il_set_rxon_ht); /* Return valid, unused, channel for a passive scan to reset the RF */ u8 -il_get_single_channel_number(struct il_priv *il, enum ieee80211_band band) +il_get_single_channel_number(struct il_priv *il, enum nl80211_band band) { const struct il_channel_info *ch_info; int i; u8 channel = 0; u8 min, max; - if (band == IEEE80211_BAND_5GHZ) { + if (band == NL80211_BAND_5GHZ) { min = 14; max = il->channel_count; } else { @@ -3965,14 +3965,14 @@ EXPORT_SYMBOL(il_get_single_channel_number); int il_set_rxon_channel(struct il_priv *il, struct ieee80211_channel *ch) { - enum ieee80211_band band = ch->band; + enum nl80211_band band = ch->band; u16 channel = ch->hw_value; if (le16_to_cpu(il->staging.channel) == channel && il->band == band) return 0; il->staging.channel = cpu_to_le16(channel); - if (band == IEEE80211_BAND_5GHZ) + if (band == NL80211_BAND_5GHZ) il->staging.flags &= ~RXON_FLG_BAND_24G_MSK; else il->staging.flags |= RXON_FLG_BAND_24G_MSK; @@ -3986,10 +3986,10 @@ il_set_rxon_channel(struct il_priv *il, struct ieee80211_channel *ch) EXPORT_SYMBOL(il_set_rxon_channel); void -il_set_flags_for_band(struct il_priv *il, enum ieee80211_band band, +il_set_flags_for_band(struct il_priv *il, enum nl80211_band band, struct ieee80211_vif *vif) { - if (band == IEEE80211_BAND_5GHZ) { + if (band == NL80211_BAND_5GHZ) { il->staging.flags &= ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_CCK_MSK); @@ -5415,7 +5415,7 @@ il_mac_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, if (changes & BSS_CHANGED_ERP_CTS_PROT) { D_MAC80211("ERP_CTS %d\n", bss_conf->use_cts_prot); - if (bss_conf->use_cts_prot && il->band != IEEE80211_BAND_5GHZ) + if (bss_conf->use_cts_prot && il->band != NL80211_BAND_5GHZ) il->staging.flags |= RXON_FLG_TGG_PROTECT_MSK; else il->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK; diff --git a/drivers/net/wireless/intel/iwlegacy/common.h b/drivers/net/wireless/intel/iwlegacy/common.h index ce52cf114fde..726ede391cb9 100644 --- a/drivers/net/wireless/intel/iwlegacy/common.h +++ b/drivers/net/wireless/intel/iwlegacy/common.h @@ -432,7 +432,7 @@ u16 il_eeprom_query16(const struct il_priv *il, size_t offset); int il_init_channel_map(struct il_priv *il); void il_free_channel_map(struct il_priv *il); const struct il_channel_info *il_get_channel_info(const struct il_priv *il, - enum ieee80211_band band, + enum nl80211_band band, u16 channel); #define IL_NUM_SCAN_RATES (2) @@ -497,7 +497,7 @@ struct il_channel_info { u8 group_idx; /* 0-4, maps channel to group1/2/3/4/5 */ u8 band_idx; /* 0-4, maps channel to band1/2/3/4/5 */ - enum ieee80211_band band; + enum nl80211_band band; /* HT40 channel info */ s8 ht40_max_power_avg; /* (dBm) regul. eeprom, normal Tx, any rate */ @@ -811,7 +811,7 @@ struct il_sensitivity_ranges { * @rx_wrt_ptr_reg: FH{39}_RSCSR_CHNL0_WPTR * @max_stations: * @ht40_channel: is 40MHz width possible in band 2.4 - * BIT(IEEE80211_BAND_5GHZ) BIT(IEEE80211_BAND_5GHZ) + * BIT(NL80211_BAND_5GHZ) BIT(NL80211_BAND_5GHZ) * @sw_crypto: 0 for hw, 1 for sw * @max_xxx_size: for ucode uses * @ct_kill_threshold: temperature threshold @@ -1141,13 +1141,13 @@ struct il_priv { struct list_head free_frames; int frames_count; - enum ieee80211_band band; + enum nl80211_band band; int alloc_rxb_page; void (*handlers[IL_CN_MAX]) (struct il_priv *il, struct il_rx_buf *rxb); - struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS]; + struct ieee80211_supported_band bands[NUM_NL80211_BANDS]; /* spectrum measurement report caching */ struct il_spectrum_notification measure_report; @@ -1176,10 +1176,10 @@ struct il_priv { unsigned long scan_start; unsigned long scan_start_tsf; void *scan_cmd; - enum ieee80211_band scan_band; + enum nl80211_band scan_band; struct cfg80211_scan_request *scan_request; struct ieee80211_vif *scan_vif; - u8 scan_tx_ant[IEEE80211_NUM_BANDS]; + u8 scan_tx_ant[NUM_NL80211_BANDS]; u8 mgmt_tx_ant; /* spinlock */ @@ -1479,7 +1479,7 @@ il_is_channel_radar(const struct il_channel_info *ch_info) static inline u8 il_is_channel_a_band(const struct il_channel_info *ch_info) { - return ch_info->band == IEEE80211_BAND_5GHZ; + return ch_info->band == NL80211_BAND_5GHZ; } static inline int @@ -1673,7 +1673,7 @@ struct il_cfg { /* params not likely to change within a device family */ struct il_base_params *base_params; /* params likely to change within a device family */ - u8 scan_rx_antennas[IEEE80211_NUM_BANDS]; + u8 scan_rx_antennas[NUM_NL80211_BANDS]; enum il_led_mode led_mode; int eeprom_size; @@ -1707,9 +1707,9 @@ void il_set_rxon_hwcrypto(struct il_priv *il, int hw_decrypt); int il_check_rxon_cmd(struct il_priv *il); int il_full_rxon_required(struct il_priv *il); int il_set_rxon_channel(struct il_priv *il, struct ieee80211_channel *ch); -void il_set_flags_for_band(struct il_priv *il, enum ieee80211_band band, +void il_set_flags_for_band(struct il_priv *il, enum nl80211_band band, struct ieee80211_vif *vif); -u8 il_get_single_channel_number(struct il_priv *il, enum ieee80211_band band); +u8 il_get_single_channel_number(struct il_priv *il, enum nl80211_band band); void il_set_rxon_ht(struct il_priv *il, struct il_ht_config *ht_conf); bool il_is_ht40_tx_allowed(struct il_priv *il, struct ieee80211_sta_ht_cap *ht_cap); @@ -1793,9 +1793,9 @@ int il_force_reset(struct il_priv *il, bool external); u16 il_fill_probe_req(struct il_priv *il, struct ieee80211_mgmt *frame, const u8 *ta, const u8 *ie, int ie_len, int left); void il_setup_rx_scan_handlers(struct il_priv *il); -u16 il_get_active_dwell_time(struct il_priv *il, enum ieee80211_band band, +u16 il_get_active_dwell_time(struct il_priv *il, enum nl80211_band band, u8 n_probes); -u16 il_get_passive_dwell_time(struct il_priv *il, enum ieee80211_band band, +u16 il_get_passive_dwell_time(struct il_priv *il, enum nl80211_band band, struct ieee80211_vif *vif); void il_setup_scan_deferred_work(struct il_priv *il); void il_cancel_scan_deferred_work(struct il_priv *il); @@ -1955,7 +1955,7 @@ il_commit_rxon(struct il_priv *il) } static inline const struct ieee80211_supported_band * -il_get_hw_mode(struct il_priv *il, enum ieee80211_band band) +il_get_hw_mode(struct il_priv *il, enum nl80211_band band) { return il->hw->wiphy->bands[band]; } @@ -2813,7 +2813,7 @@ struct il_lq_sta { u8 action_counter; /* # mode-switch actions tried */ u8 is_green; u8 is_dup; - enum ieee80211_band band; + enum nl80211_band band; /* The following are bitmaps of rates; RATE_6M_MASK, etc. */ u32 supp_rates; diff --git a/drivers/net/wireless/intel/iwlegacy/debug.c b/drivers/net/wireless/intel/iwlegacy/debug.c index 908b9f4fef6f..6fc6b7ff9849 100644 --- a/drivers/net/wireless/intel/iwlegacy/debug.c +++ b/drivers/net/wireless/intel/iwlegacy/debug.c @@ -544,7 +544,7 @@ il_dbgfs_channels_read(struct file *file, char __user *user_buf, size_t count, return -ENOMEM; } - supp_band = il_get_hw_mode(il, IEEE80211_BAND_2GHZ); + supp_band = il_get_hw_mode(il, NL80211_BAND_2GHZ); if (supp_band) { channels = supp_band->channels; @@ -571,7 +571,7 @@ il_dbgfs_channels_read(struct file *file, char __user *user_buf, size_t count, flags & IEEE80211_CHAN_NO_IR ? "passive only" : "active/passive"); } - supp_band = il_get_hw_mode(il, IEEE80211_BAND_5GHZ); + supp_band = il_get_hw_mode(il, NL80211_BAND_5GHZ); if (supp_band) { channels = supp_band->channels; diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/agn.h b/drivers/net/wireless/intel/iwlwifi/dvm/agn.h index 9de277c6c420..b79e38734f2f 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/agn.h +++ b/drivers/net/wireless/intel/iwlwifi/dvm/agn.h @@ -158,7 +158,7 @@ void iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch, struct iwl_rxon_context *ctx); void iwl_set_flags_for_band(struct iwl_priv *priv, struct iwl_rxon_context *ctx, - enum ieee80211_band band, + enum nl80211_band band, struct ieee80211_vif *vif); /* uCode */ @@ -186,7 +186,7 @@ int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear); static inline const struct ieee80211_supported_band *iwl_get_hw_mode( - struct iwl_priv *priv, enum ieee80211_band band) + struct iwl_priv *priv, enum nl80211_band band) { return priv->hw->wiphy->bands[band]; } @@ -198,7 +198,7 @@ int iwlagn_suspend(struct iwl_priv *priv, struct cfg80211_wowlan *wowlan); #endif /* rx */ -int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band); +int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum nl80211_band band); void iwl_setup_rx_handlers(struct iwl_priv *priv); void iwl_chswitch_done(struct iwl_priv *priv, bool is_success); @@ -258,7 +258,7 @@ void iwl_cancel_scan_deferred_work(struct iwl_priv *priv); int __must_check iwl_scan_initiate(struct iwl_priv *priv, struct ieee80211_vif *vif, enum iwl_scan_type scan_type, - enum ieee80211_band band); + enum nl80211_band band); /* For faster active scanning, scan will move to the next channel if fewer than * PLCP_QUIET_THRESH packets are heard on this channel within diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c index 74c51615244e..f6591c83d636 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c @@ -335,7 +335,7 @@ static ssize_t iwl_dbgfs_channels_read(struct file *file, char __user *user_buf, if (!buf) return -ENOMEM; - supp_band = iwl_get_hw_mode(priv, IEEE80211_BAND_2GHZ); + supp_band = iwl_get_hw_mode(priv, NL80211_BAND_2GHZ); if (supp_band) { channels = supp_band->channels; @@ -358,7 +358,7 @@ static ssize_t iwl_dbgfs_channels_read(struct file *file, char __user *user_buf, IEEE80211_CHAN_NO_IR ? "passive only" : "active/passive"); } - supp_band = iwl_get_hw_mode(priv, IEEE80211_BAND_5GHZ); + supp_band = iwl_get_hw_mode(priv, NL80211_BAND_5GHZ); if (supp_band) { channels = supp_band->channels; diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/dev.h b/drivers/net/wireless/intel/iwlwifi/dvm/dev.h index 1a7ead753eee..8148df61a916 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/dev.h +++ b/drivers/net/wireless/intel/iwlwifi/dvm/dev.h @@ -677,7 +677,7 @@ struct iwl_priv { struct iwl_hw_params hw_params; - enum ieee80211_band band; + enum nl80211_band band; u8 valid_contexts; void (*rx_handlers[REPLY_MAX])(struct iwl_priv *priv, @@ -722,11 +722,11 @@ struct iwl_priv { unsigned long scan_start; unsigned long scan_start_tsf; void *scan_cmd; - enum ieee80211_band scan_band; + enum nl80211_band scan_band; struct cfg80211_scan_request *scan_request; struct ieee80211_vif *scan_vif; enum iwl_scan_type scan_type; - u8 scan_tx_ant[IEEE80211_NUM_BANDS]; + u8 scan_tx_ant[NUM_NL80211_BANDS]; u8 mgmt_tx_ant; /* max number of station keys */ diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/devices.c b/drivers/net/wireless/intel/iwlwifi/dvm/devices.c index cc13c04063a5..f21732ec3b25 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/devices.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/devices.c @@ -420,7 +420,7 @@ static int iwl5000_hw_channel_switch(struct iwl_priv *priv, .data = { &cmd, }, }; - cmd.band = priv->band == IEEE80211_BAND_2GHZ; + cmd.band = priv->band == NL80211_BAND_2GHZ; ch = ch_switch->chandef.chan->hw_value; IWL_DEBUG_11H(priv, "channel switch from %d to %d\n", ctx->active.channel, ch); @@ -588,7 +588,7 @@ static int iwl6000_hw_channel_switch(struct iwl_priv *priv, hcmd.data[0] = cmd; - cmd->band = priv->band == IEEE80211_BAND_2GHZ; + cmd->band = priv->band == NL80211_BAND_2GHZ; ch = ch_switch->chandef.chan->hw_value; IWL_DEBUG_11H(priv, "channel switch from %u to %u\n", ctx->active.channel, ch); diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/lib.c b/drivers/net/wireless/intel/iwlwifi/dvm/lib.c index 1799469268ea..8dda52ae3bb5 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/lib.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/lib.c @@ -94,7 +94,7 @@ void iwlagn_temperature(struct iwl_priv *priv) iwl_tt_handler(priv); } -int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band) +int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum nl80211_band band) { int idx = 0; int band_offset = 0; @@ -105,7 +105,7 @@ int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band) return idx; /* Legacy rate format, search for match in table */ } else { - if (band == IEEE80211_BAND_5GHZ) + if (band == NL80211_BAND_5GHZ) band_offset = IWL_FIRST_OFDM_RATE; for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++) if (iwl_rates[idx].plcp == (rate_n_flags & 0xFF)) @@ -878,7 +878,7 @@ u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant, u8 valid) int i; u8 ind = ant; - if (priv->band == IEEE80211_BAND_2GHZ && + if (priv->band == NL80211_BAND_2GHZ && priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH) return 0; diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c index c63ea79571ff..8c0719468d00 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c @@ -202,12 +202,12 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv, hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL; - if (priv->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels) - priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = - &priv->nvm_data->bands[IEEE80211_BAND_2GHZ]; - if (priv->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels) - priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = - &priv->nvm_data->bands[IEEE80211_BAND_5GHZ]; + if (priv->nvm_data->bands[NL80211_BAND_2GHZ].n_channels) + priv->hw->wiphy->bands[NL80211_BAND_2GHZ] = + &priv->nvm_data->bands[NL80211_BAND_2GHZ]; + if (priv->nvm_data->bands[NL80211_BAND_5GHZ].n_channels) + priv->hw->wiphy->bands[NL80211_BAND_5GHZ] = + &priv->nvm_data->bands[NL80211_BAND_5GHZ]; hw->wiphy->hw_version = priv->trans->hw_id; diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/main.c b/drivers/net/wireless/intel/iwlwifi/dvm/main.c index 614716251c39..37b32a6f60fd 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/main.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/main.c @@ -262,7 +262,7 @@ int iwlagn_send_beacon_cmd(struct iwl_priv *priv) rate_flags = iwl_ant_idx_to_flags(priv->mgmt_tx_ant); /* In mac80211, rates for 5 GHz start at 0 */ - if (info->band == IEEE80211_BAND_5GHZ) + if (info->band == NL80211_BAND_5GHZ) rate += IWL_FIRST_OFDM_RATE; else if (rate >= IWL_FIRST_CCK_RATE && rate <= IWL_LAST_CCK_RATE) rate_flags |= RATE_MCS_CCK_MSK; @@ -1117,7 +1117,7 @@ static int iwl_init_drv(struct iwl_priv *priv) INIT_LIST_HEAD(&priv->calib_results); - priv->band = IEEE80211_BAND_2GHZ; + priv->band = NL80211_BAND_2GHZ; priv->plcp_delta_threshold = priv->lib->plcp_delta_threshold; diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rs.c b/drivers/net/wireless/intel/iwlwifi/dvm/rs.c index ee7505537c96..b95c2d76db33 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/rs.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/rs.c @@ -599,7 +599,7 @@ static u32 rate_n_flags_from_tbl(struct iwl_priv *priv, * fill "search" or "active" tx mode table. */ static int rs_get_tbl_info_from_mcs(const u32 rate_n_flags, - enum ieee80211_band band, + enum nl80211_band band, struct iwl_scale_tbl_info *tbl, int *rate_idx) { @@ -624,7 +624,7 @@ static int rs_get_tbl_info_from_mcs(const u32 rate_n_flags, /* legacy rate format */ if (!(rate_n_flags & RATE_MCS_HT_MSK)) { if (num_of_ant == 1) { - if (band == IEEE80211_BAND_5GHZ) + if (band == NL80211_BAND_5GHZ) tbl->lq_type = LQ_A; else tbl->lq_type = LQ_G; @@ -802,7 +802,7 @@ static u32 rs_get_lower_rate(struct iwl_lq_sta *lq_sta, if (!is_legacy(tbl->lq_type) && (!ht_possible || !scale_index)) { switch_to_legacy = 1; scale_index = rs_ht_to_legacy[scale_index]; - if (lq_sta->band == IEEE80211_BAND_5GHZ) + if (lq_sta->band == NL80211_BAND_5GHZ) tbl->lq_type = LQ_A; else tbl->lq_type = LQ_G; @@ -821,7 +821,7 @@ static u32 rs_get_lower_rate(struct iwl_lq_sta *lq_sta, /* Mask with station rate restriction */ if (is_legacy(tbl->lq_type)) { /* supp_rates has no CCK bits in A mode */ - if (lq_sta->band == IEEE80211_BAND_5GHZ) + if (lq_sta->band == NL80211_BAND_5GHZ) rate_mask = (u16)(rate_mask & (lq_sta->supp_rates << IWL_FIRST_OFDM_RATE)); else @@ -939,7 +939,7 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband, table = &lq_sta->lq; tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags); rs_get_tbl_info_from_mcs(tx_rate, priv->band, &tbl_type, &rs_index); - if (priv->band == IEEE80211_BAND_5GHZ) + if (priv->band == NL80211_BAND_5GHZ) rs_index -= IWL_FIRST_OFDM_RATE; mac_flags = info->status.rates[0].flags; mac_index = info->status.rates[0].idx; @@ -952,7 +952,7 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband, * mac80211 HT index is always zero-indexed; we need to move * HT OFDM rates after CCK rates in 2.4 GHz band */ - if (priv->band == IEEE80211_BAND_2GHZ) + if (priv->band == NL80211_BAND_2GHZ) mac_index += IWL_FIRST_OFDM_RATE; } /* Here we actually compare this rate to the latest LQ command */ @@ -2284,7 +2284,7 @@ static void rs_rate_scale_perform(struct iwl_priv *priv, /* mask with station rate restriction */ if (is_legacy(tbl->lq_type)) { - if (lq_sta->band == IEEE80211_BAND_5GHZ) + if (lq_sta->band == NL80211_BAND_5GHZ) /* supp_rates has no CCK bits in A mode */ rate_scale_index_msk = (u16) (rate_mask & (lq_sta->supp_rates << IWL_FIRST_OFDM_RATE)); @@ -2721,7 +2721,7 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta, void *priv_sta, /* Get max rate if user set max rate */ if (lq_sta) { lq_sta->max_rate_idx = txrc->max_rate_idx; - if ((sband->band == IEEE80211_BAND_5GHZ) && + if ((sband->band == NL80211_BAND_5GHZ) && (lq_sta->max_rate_idx != -1)) lq_sta->max_rate_idx += IWL_FIRST_OFDM_RATE; if ((lq_sta->max_rate_idx < 0) || @@ -2763,11 +2763,11 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta, void *priv_sta, } else { /* Check for invalid rates */ if ((rate_idx < 0) || (rate_idx >= IWL_RATE_COUNT_LEGACY) || - ((sband->band == IEEE80211_BAND_5GHZ) && + ((sband->band == NL80211_BAND_5GHZ) && (rate_idx < IWL_FIRST_OFDM_RATE))) rate_idx = rate_lowest_index(sband, sta); /* On valid 5 GHz rate, adjust index */ - else if (sband->band == IEEE80211_BAND_5GHZ) + else if (sband->band == NL80211_BAND_5GHZ) rate_idx -= IWL_FIRST_OFDM_RATE; info->control.rates[0].flags = 0; } @@ -2880,7 +2880,7 @@ void iwl_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_i /* Set last_txrate_idx to lowest rate */ lq_sta->last_txrate_idx = rate_lowest_index(sband, sta); - if (sband->band == IEEE80211_BAND_5GHZ) + if (sband->band == NL80211_BAND_5GHZ) lq_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE; lq_sta->is_agg = 0; #ifdef CONFIG_MAC80211_DEBUGFS diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rs.h b/drivers/net/wireless/intel/iwlwifi/dvm/rs.h index c5fe44584613..50c1e951dd2d 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/rs.h +++ b/drivers/net/wireless/intel/iwlwifi/dvm/rs.h @@ -355,7 +355,7 @@ struct iwl_lq_sta { u8 action_counter; /* # mode-switch actions tried */ u8 is_green; u8 is_dup; - enum ieee80211_band band; + enum nl80211_band band; /* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */ u32 supp_rates; diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rx.c b/drivers/net/wireless/intel/iwlwifi/dvm/rx.c index 27ea61e3a390..dfa2041cfdac 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/rx.c @@ -834,7 +834,7 @@ static void iwlagn_rx_reply_rx(struct iwl_priv *priv, /* rx_status carries information about the packet to mac80211 */ rx_status.mactime = le64_to_cpu(phy_res->timestamp); rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? - IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; + NL80211_BAND_2GHZ : NL80211_BAND_5GHZ; rx_status.freq = ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel), rx_status.band); diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rxon.c b/drivers/net/wireless/intel/iwlwifi/dvm/rxon.c index 2d47cb24c48b..b228552184b5 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/rxon.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/rxon.c @@ -719,7 +719,7 @@ void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf) void iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch, struct iwl_rxon_context *ctx) { - enum ieee80211_band band = ch->band; + enum nl80211_band band = ch->band; u16 channel = ch->hw_value; if ((le16_to_cpu(ctx->staging.channel) == channel) && @@ -727,7 +727,7 @@ void iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch, return; ctx->staging.channel = cpu_to_le16(channel); - if (band == IEEE80211_BAND_5GHZ) + if (band == NL80211_BAND_5GHZ) ctx->staging.flags &= ~RXON_FLG_BAND_24G_MSK; else ctx->staging.flags |= RXON_FLG_BAND_24G_MSK; @@ -740,10 +740,10 @@ void iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch, void iwl_set_flags_for_band(struct iwl_priv *priv, struct iwl_rxon_context *ctx, - enum ieee80211_band band, + enum nl80211_band band, struct ieee80211_vif *vif) { - if (band == IEEE80211_BAND_5GHZ) { + if (band == NL80211_BAND_5GHZ) { ctx->staging.flags &= ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_CCK_MSK); @@ -1476,7 +1476,7 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw, iwlagn_set_rxon_chain(priv, ctx); - if (bss_conf->use_cts_prot && (priv->band != IEEE80211_BAND_5GHZ)) + if (bss_conf->use_cts_prot && (priv->band != NL80211_BAND_5GHZ)) ctx->staging.flags |= RXON_FLG_TGG_PROTECT_MSK; else ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK; diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/scan.c b/drivers/net/wireless/intel/iwlwifi/dvm/scan.c index 81a2ddbe9569..d01766f16175 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/scan.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/scan.c @@ -312,7 +312,7 @@ static void iwl_rx_scan_complete_notif(struct iwl_priv *priv, scan_notif->tsf_high, scan_notif->status); IWL_DEBUG_SCAN(priv, "Scan on %sGHz took %dms\n", - (priv->scan_band == IEEE80211_BAND_2GHZ) ? "2.4" : "5.2", + (priv->scan_band == NL80211_BAND_2GHZ) ? "2.4" : "5.2", jiffies_to_msecs(jiffies - priv->scan_start)); /* @@ -362,9 +362,9 @@ void iwl_setup_rx_scan_handlers(struct iwl_priv *priv) } static u16 iwl_get_active_dwell_time(struct iwl_priv *priv, - enum ieee80211_band band, u8 n_probes) + enum nl80211_band band, u8 n_probes) { - if (band == IEEE80211_BAND_5GHZ) + if (band == NL80211_BAND_5GHZ) return IWL_ACTIVE_DWELL_TIME_52 + IWL_ACTIVE_DWELL_FACTOR_52GHZ * (n_probes + 1); else @@ -431,9 +431,9 @@ static u16 iwl_limit_dwell(struct iwl_priv *priv, u16 dwell_time) } static u16 iwl_get_passive_dwell_time(struct iwl_priv *priv, - enum ieee80211_band band) + enum nl80211_band band) { - u16 passive = (band == IEEE80211_BAND_2GHZ) ? + u16 passive = (band == NL80211_BAND_2GHZ) ? IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_24 : IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_52; @@ -442,7 +442,7 @@ static u16 iwl_get_passive_dwell_time(struct iwl_priv *priv, /* Return valid, unused, channel for a passive scan to reset the RF */ static u8 iwl_get_single_channel_number(struct iwl_priv *priv, - enum ieee80211_band band) + enum nl80211_band band) { struct ieee80211_supported_band *sband = priv->hw->wiphy->bands[band]; struct iwl_rxon_context *ctx; @@ -470,7 +470,7 @@ static u8 iwl_get_single_channel_number(struct iwl_priv *priv, static int iwl_get_channel_for_reset_scan(struct iwl_priv *priv, struct ieee80211_vif *vif, - enum ieee80211_band band, + enum nl80211_band band, struct iwl_scan_channel *scan_ch) { const struct ieee80211_supported_band *sband; @@ -492,7 +492,7 @@ static int iwl_get_channel_for_reset_scan(struct iwl_priv *priv, cpu_to_le16(IWL_RADIO_RESET_DWELL_TIME); /* Set txpower levels to defaults */ scan_ch->dsp_atten = 110; - if (band == IEEE80211_BAND_5GHZ) + if (band == NL80211_BAND_5GHZ) scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3; else scan_ch->tx_gain = ((1 << 5) | (5 << 3)); @@ -505,7 +505,7 @@ static int iwl_get_channel_for_reset_scan(struct iwl_priv *priv, static int iwl_get_channels_for_scan(struct iwl_priv *priv, struct ieee80211_vif *vif, - enum ieee80211_band band, + enum nl80211_band band, u8 is_active, u8 n_probes, struct iwl_scan_channel *scan_ch) { @@ -553,7 +553,7 @@ static int iwl_get_channels_for_scan(struct iwl_priv *priv, * power level: * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3; */ - if (band == IEEE80211_BAND_5GHZ) + if (band == NL80211_BAND_5GHZ) scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3; else scan_ch->tx_gain = ((1 << 5) | (5 << 3)); @@ -636,7 +636,7 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif) u32 rate_flags = 0; u16 cmd_len = 0; u16 rx_chain = 0; - enum ieee80211_band band; + enum nl80211_band band; u8 n_probes = 0; u8 rx_ant = priv->nvm_data->valid_rx_ant; u8 rate; @@ -750,7 +750,7 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif) scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; switch (priv->scan_band) { - case IEEE80211_BAND_2GHZ: + case NL80211_BAND_2GHZ: scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK; chan_mod = le32_to_cpu( priv->contexts[IWL_RXON_CTX_BSS].active.flags & @@ -771,7 +771,7 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif) priv->lib->bt_params->advanced_bt_coexist) scan->tx_cmd.tx_flags |= TX_CMD_FLG_IGNORE_BT; break; - case IEEE80211_BAND_5GHZ: + case NL80211_BAND_5GHZ: rate = IWL_RATE_6M_PLCP; break; default: @@ -809,7 +809,7 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif) band = priv->scan_band; - if (band == IEEE80211_BAND_2GHZ && + if (band == NL80211_BAND_2GHZ && priv->lib->bt_params && priv->lib->bt_params->advanced_bt_coexist) { /* transmit 2.4 GHz probes only on first antenna */ @@ -925,16 +925,16 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif) void iwl_init_scan_params(struct iwl_priv *priv) { u8 ant_idx = fls(priv->nvm_data->valid_tx_ant) - 1; - if (!priv->scan_tx_ant[IEEE80211_BAND_5GHZ]) - priv->scan_tx_ant[IEEE80211_BAND_5GHZ] = ant_idx; - if (!priv->scan_tx_ant[IEEE80211_BAND_2GHZ]) - priv->scan_tx_ant[IEEE80211_BAND_2GHZ] = ant_idx; + if (!priv->scan_tx_ant[NL80211_BAND_5GHZ]) + priv->scan_tx_ant[NL80211_BAND_5GHZ] = ant_idx; + if (!priv->scan_tx_ant[NL80211_BAND_2GHZ]) + priv->scan_tx_ant[NL80211_BAND_2GHZ] = ant_idx; } int __must_check iwl_scan_initiate(struct iwl_priv *priv, struct ieee80211_vif *vif, enum iwl_scan_type scan_type, - enum ieee80211_band band) + enum nl80211_band band) { int ret; diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/sta.c b/drivers/net/wireless/intel/iwlwifi/dvm/sta.c index 8e9768a553e4..de6ec9b7ace4 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/sta.c @@ -579,7 +579,7 @@ static void iwl_sta_fill_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx, /* Set up the rate scaling to start at selected rate, fall back * all the way down to 1M in IEEE order, and then spin on 1M */ - if (priv->band == IEEE80211_BAND_5GHZ) + if (priv->band == NL80211_BAND_5GHZ) r = IWL_RATE_6M_INDEX; else if (ctx && ctx->vif && ctx->vif->p2p) r = IWL_RATE_6M_INDEX; diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c index 59e2001c39f8..4b97371c3b42 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c @@ -81,7 +81,7 @@ static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv, tx_flags |= TX_CMD_FLG_TSF_MSK; else if (ieee80211_is_back_req(fc)) tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK; - else if (info->band == IEEE80211_BAND_2GHZ && + else if (info->band == NL80211_BAND_2GHZ && priv->lib->bt_params && priv->lib->bt_params->advanced_bt_coexist && (ieee80211_is_auth(fc) || ieee80211_is_assoc_req(fc) || @@ -177,7 +177,7 @@ static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv, rate_idx = rate_lowest_index( &priv->nvm_data->bands[info->band], sta); /* For 5 GHZ band, remap mac80211 rate indices into driver indices */ - if (info->band == IEEE80211_BAND_5GHZ) + if (info->band == NL80211_BAND_5GHZ) rate_idx += IWL_FIRST_OFDM_RATE; /* Get PLCP rate for tx_cmd->rate_n_flags */ rate_plcp = iwl_rates[rate_idx].plcp; diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-1000.c b/drivers/net/wireless/intel/iwlwifi/iwl-1000.c index ef22c3d168fc..5c2aae64d59f 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-1000.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-1000.c @@ -64,7 +64,7 @@ static const struct iwl_base_params iwl1000_base_params = { static const struct iwl_ht_params iwl1000_ht_params = { .ht_greenfield_support = true, .use_rts_for_aggregation = true, /* use rts/cts protection */ - .ht40_bands = BIT(IEEE80211_BAND_2GHZ), + .ht40_bands = BIT(NL80211_BAND_2GHZ), }; static const struct iwl_eeprom_params iwl1000_eeprom_params = { diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-2000.c b/drivers/net/wireless/intel/iwlwifi/iwl-2000.c index dc246c997084..2e823bdc4757 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-2000.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-2000.c @@ -89,7 +89,7 @@ static const struct iwl_base_params iwl2030_base_params = { static const struct iwl_ht_params iwl2000_ht_params = { .ht_greenfield_support = true, .use_rts_for_aggregation = true, /* use rts/cts protection */ - .ht40_bands = BIT(IEEE80211_BAND_2GHZ), + .ht40_bands = BIT(NL80211_BAND_2GHZ), }; static const struct iwl_eeprom_params iwl20x0_eeprom_params = { diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-5000.c b/drivers/net/wireless/intel/iwlwifi/iwl-5000.c index 4dcdab6781cc..4c3e3cf4c799 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-5000.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-5000.c @@ -62,7 +62,7 @@ static const struct iwl_base_params iwl5000_base_params = { static const struct iwl_ht_params iwl5000_ht_params = { .ht_greenfield_support = true, - .ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ), + .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ), }; static const struct iwl_eeprom_params iwl5000_eeprom_params = { diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-6000.c b/drivers/net/wireless/intel/iwlwifi/iwl-6000.c index 9938f5340ac0..5a7b7e1f0aab 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-6000.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-6000.c @@ -110,7 +110,7 @@ static const struct iwl_base_params iwl6000_g2_base_params = { static const struct iwl_ht_params iwl6000_ht_params = { .ht_greenfield_support = true, .use_rts_for_aggregation = true, /* use rts/cts protection */ - .ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ), + .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ), }; static const struct iwl_eeprom_params iwl6000_eeprom_params = { diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-7000.c b/drivers/net/wireless/intel/iwlwifi/iwl-7000.c index b6283c881d42..abd2904ecc48 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-7000.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-7000.c @@ -156,7 +156,7 @@ static const struct iwl_tt_params iwl7000_high_temp_tt_params = { static const struct iwl_ht_params iwl7000_ht_params = { .stbc = true, - .ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ), + .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ), }; #define IWL_DEVICE_7000_COMMON \ @@ -287,7 +287,7 @@ static const struct iwl_pwr_tx_backoff iwl7265_pwr_tx_backoffs[] = { static const struct iwl_ht_params iwl7265_ht_params = { .stbc = true, .ldpc = true, - .ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ), + .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ), }; const struct iwl_cfg iwl3165_2ac_cfg = { diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c index 0728a288aa3d..a9212a12f4da 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c @@ -124,7 +124,7 @@ static const struct iwl_base_params iwl8000_base_params = { static const struct iwl_ht_params iwl8000_ht_params = { .stbc = true, .ldpc = true, - .ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ), + .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ), }; static const struct iwl_tt_params iwl8000_tt_params = { diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-9000.c b/drivers/net/wireless/intel/iwlwifi/iwl-9000.c index a3d35aa291a9..b9aca3795f06 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-9000.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-9000.c @@ -93,7 +93,7 @@ static const struct iwl_base_params iwl9000_base_params = { static const struct iwl_ht_params iwl9000_ht_params = { .stbc = true, .ldpc = true, - .ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ), + .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ), }; static const struct iwl_tt_params iwl9000_tt_params = { diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h index 08bb4f4e424a..720679889ab3 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h @@ -185,7 +185,7 @@ struct iwl_base_params { * @stbc: support Tx STBC and 1*SS Rx STBC * @ldpc: support Tx/Rx with LDPC * @use_rts_for_aggregation: use rts/cts protection for HT traffic - * @ht40_bands: bitmap of bands (using %IEEE80211_BAND_*) that support HT40 + * @ht40_bands: bitmap of bands (using %NL80211_BAND_*) that support HT40 */ struct iwl_ht_params { enum ieee80211_smps_mode smps_mode; diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c index c15f5be85197..bf1b69aec813 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c @@ -390,10 +390,10 @@ iwl_eeprom_enh_txp_read_element(struct iwl_nvm_data *data, int n_channels, s8 max_txpower_avg) { int ch_idx; - enum ieee80211_band band; + enum nl80211_band band; band = txp->flags & IWL_EEPROM_ENH_TXP_FL_BAND_52G ? - IEEE80211_BAND_5GHZ : IEEE80211_BAND_2GHZ; + NL80211_BAND_5GHZ : NL80211_BAND_2GHZ; for (ch_idx = 0; ch_idx < n_channels; ch_idx++) { struct ieee80211_channel *chan = &data->channels[ch_idx]; @@ -526,7 +526,7 @@ static void iwl_init_band_reference(const struct iwl_cfg *cfg, static void iwl_mod_ht40_chan_info(struct device *dev, struct iwl_nvm_data *data, int n_channels, - enum ieee80211_band band, u16 channel, + enum nl80211_band band, u16 channel, const struct iwl_eeprom_channel *eeprom_ch, u8 clear_ht40_extension_channel) { @@ -548,7 +548,7 @@ static void iwl_mod_ht40_chan_info(struct device *dev, IWL_DEBUG_EEPROM(dev, "HT40 Ch. %d [%sGHz] %s%s%s%s%s(0x%02x %ddBm): Ad-Hoc %ssupported\n", channel, - band == IEEE80211_BAND_5GHZ ? "5.2" : "2.4", + band == NL80211_BAND_5GHZ ? "5.2" : "2.4", CHECK_AND_PRINT(IBSS), CHECK_AND_PRINT(ACTIVE), CHECK_AND_PRINT(RADAR), @@ -606,8 +606,8 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg, n_channels++; channel->hw_value = eeprom_ch_array[ch_idx]; - channel->band = (band == 1) ? IEEE80211_BAND_2GHZ - : IEEE80211_BAND_5GHZ; + channel->band = (band == 1) ? NL80211_BAND_2GHZ + : NL80211_BAND_5GHZ; channel->center_freq = ieee80211_channel_to_frequency( channel->hw_value, channel->band); @@ -677,15 +677,15 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg, /* Two additional EEPROM bands for 2.4 and 5 GHz HT40 channels */ for (band = 6; band <= 7; band++) { - enum ieee80211_band ieeeband; + enum nl80211_band ieeeband; iwl_init_band_reference(cfg, eeprom, eeprom_size, band, &eeprom_ch_count, &eeprom_ch_info, &eeprom_ch_array); /* EEPROM band 6 is 2.4, band 7 is 5 GHz */ - ieeeband = (band == 6) ? IEEE80211_BAND_2GHZ - : IEEE80211_BAND_5GHZ; + ieeeband = (band == 6) ? NL80211_BAND_2GHZ + : NL80211_BAND_5GHZ; /* Loop through each band adding each of the channels */ for (ch_idx = 0; ch_idx < eeprom_ch_count; ch_idx++) { @@ -708,7 +708,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg, int iwl_init_sband_channels(struct iwl_nvm_data *data, struct ieee80211_supported_band *sband, - int n_channels, enum ieee80211_band band) + int n_channels, enum nl80211_band band) { struct ieee80211_channel *chan = &data->channels[0]; int n = 0, idx = 0; @@ -734,7 +734,7 @@ int iwl_init_sband_channels(struct iwl_nvm_data *data, void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg, struct iwl_nvm_data *data, struct ieee80211_sta_ht_cap *ht_info, - enum ieee80211_band band, + enum nl80211_band band, u8 tx_chains, u8 rx_chains) { int max_bit_rate = 0; @@ -813,22 +813,22 @@ static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg, int n_used = 0; struct ieee80211_supported_band *sband; - sband = &data->bands[IEEE80211_BAND_2GHZ]; - sband->band = IEEE80211_BAND_2GHZ; + sband = &data->bands[NL80211_BAND_2GHZ]; + sband->band = NL80211_BAND_2GHZ; sband->bitrates = &iwl_cfg80211_rates[RATES_24_OFFS]; sband->n_bitrates = N_RATES_24; n_used += iwl_init_sband_channels(data, sband, n_channels, - IEEE80211_BAND_2GHZ); - iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_2GHZ, + NL80211_BAND_2GHZ); + iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, NL80211_BAND_2GHZ, data->valid_tx_ant, data->valid_rx_ant); - sband = &data->bands[IEEE80211_BAND_5GHZ]; - sband->band = IEEE80211_BAND_5GHZ; + sband = &data->bands[NL80211_BAND_5GHZ]; + sband->band = NL80211_BAND_5GHZ; sband->bitrates = &iwl_cfg80211_rates[RATES_52_OFFS]; sband->n_bitrates = N_RATES_52; n_used += iwl_init_sband_channels(data, sband, n_channels, - IEEE80211_BAND_5GHZ); - iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_5GHZ, + NL80211_BAND_5GHZ); + iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, NL80211_BAND_5GHZ, data->valid_tx_ant, data->valid_rx_ant); if (n_channels != n_used) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h index ad2b834668ff..53f39a34eca2 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h @@ -98,7 +98,7 @@ struct iwl_nvm_data { s8 max_tx_pwr_half_dbm; bool lar_enabled; - struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS]; + struct ieee80211_supported_band bands[NUM_NL80211_BANDS]; struct ieee80211_channel channels[]; }; @@ -133,12 +133,12 @@ int iwl_nvm_check_version(struct iwl_nvm_data *data, int iwl_init_sband_channels(struct iwl_nvm_data *data, struct ieee80211_supported_band *sband, - int n_channels, enum ieee80211_band band); + int n_channels, enum nl80211_band band); void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg, struct iwl_nvm_data *data, struct ieee80211_sta_ht_cap *ht_info, - enum ieee80211_band band, + enum nl80211_band band, u8 tx_chains, u8 rx_chains); #endif /* __iwl_eeprom_parse_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c index 93a689583dff..14743c37d976 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c @@ -308,7 +308,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg, channel->hw_value = nvm_chan[ch_idx]; channel->band = (ch_idx < num_2ghz_channels) ? - IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; + NL80211_BAND_2GHZ : NL80211_BAND_5GHZ; channel->center_freq = ieee80211_channel_to_frequency( channel->hw_value, channel->band); @@ -320,7 +320,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg, * is not used in mvm, and is used for backwards compatibility */ channel->max_power = IWL_DEFAULT_MAX_TX_POWER; - is_5ghz = channel->band == IEEE80211_BAND_5GHZ; + is_5ghz = channel->band == NL80211_BAND_5GHZ; /* don't put limitations in case we're using LAR */ if (!lar_supported) @@ -439,22 +439,22 @@ static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg, &ch_section[NVM_CHANNELS_FAMILY_8000], lar_supported); - sband = &data->bands[IEEE80211_BAND_2GHZ]; - sband->band = IEEE80211_BAND_2GHZ; + sband = &data->bands[NL80211_BAND_2GHZ]; + sband->band = NL80211_BAND_2GHZ; sband->bitrates = &iwl_cfg80211_rates[RATES_24_OFFS]; sband->n_bitrates = N_RATES_24; n_used += iwl_init_sband_channels(data, sband, n_channels, - IEEE80211_BAND_2GHZ); - iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_2GHZ, + NL80211_BAND_2GHZ); + iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, NL80211_BAND_2GHZ, tx_chains, rx_chains); - sband = &data->bands[IEEE80211_BAND_5GHZ]; - sband->band = IEEE80211_BAND_5GHZ; + sband = &data->bands[NL80211_BAND_5GHZ]; + sband->band = NL80211_BAND_5GHZ; sband->bitrates = &iwl_cfg80211_rates[RATES_52_OFFS]; sband->n_bitrates = N_RATES_52; n_used += iwl_init_sband_channels(data, sband, n_channels, - IEEE80211_BAND_5GHZ); - iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_5GHZ, + NL80211_BAND_5GHZ); + iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, NL80211_BAND_5GHZ, tx_chains, rx_chains); if (data->sku_cap_11ac_enable && !iwlwifi_mod_params.disable_11ac) iwl_init_vht_hw_capab(cfg, data, &sband->vht_cap, @@ -781,7 +781,7 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, struct ieee80211_regdomain *regd; int size_of_regd; struct ieee80211_reg_rule *rule; - enum ieee80211_band band; + enum nl80211_band band; int center_freq, prev_center_freq = 0; int valid_rules = 0; bool new_rule; @@ -809,7 +809,7 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) { ch_flags = (u16)__le32_to_cpup(channels + ch_idx); band = (ch_idx < NUM_2GHZ_CHANNELS) ? - IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; + NL80211_BAND_2GHZ : NL80211_BAND_5GHZ; center_freq = ieee80211_channel_to_frequency(nvm_chan[ch_idx], band); new_rule = false; @@ -857,7 +857,7 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, IWL_DEBUG_DEV(dev, IWL_DL_LAR, "Ch. %d [%sGHz] %s%s%s%s%s%s%s%s%s(0x%02x): Ad-Hoc %ssupported\n", center_freq, - band == IEEE80211_BAND_5GHZ ? "5.2" : "2.4", + band == NL80211_BAND_5GHZ ? "5.2" : "2.4", CHECK_AND_PRINT_I(VALID), CHECK_AND_PRINT_I(ACTIVE), CHECK_AND_PRINT_I(RADAR), diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c index 35cdeca3d61e..a63f5bbb1ba7 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c @@ -378,7 +378,7 @@ iwl_get_coex_type(struct iwl_mvm *mvm, const struct ieee80211_vif *vif) chanctx_conf = rcu_dereference(vif->chanctx_conf); if (!chanctx_conf || - chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ) { + chanctx_conf->def.chan->band != NL80211_BAND_2GHZ) { rcu_read_unlock(); return BT_COEX_INVALID_LUT; } @@ -537,7 +537,7 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac, /* If channel context is invalid or not on 2.4GHz .. */ if ((!chanctx_conf || - chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ)) { + chanctx_conf->def.chan->band != NL80211_BAND_2GHZ)) { if (vif->type == NL80211_IFTYPE_STATION) { /* ... relax constraints and disable rssi events */ iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX, @@ -857,11 +857,11 @@ bool iwl_mvm_bt_coex_is_shared_ant_avail(struct iwl_mvm *mvm) } bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm, - enum ieee80211_band band) + enum nl80211_band band) { u32 bt_activity = le32_to_cpu(mvm->last_bt_notif.bt_activity_grading); - if (band != IEEE80211_BAND_2GHZ) + if (band != NL80211_BAND_2GHZ) return false; return bt_activity >= BT_LOW_TRAFFIC; @@ -873,7 +873,7 @@ u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr, __le16 fc = hdr->frame_control; bool mplut_enabled = iwl_mvm_is_mplut_supported(mvm); - if (info->band != IEEE80211_BAND_2GHZ) + if (info->band != NL80211_BAND_2GHZ) return 0; if (unlikely(mvm->bt_tx_prio)) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c index 3a279d3403ef..fb96bc00f022 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c @@ -724,9 +724,9 @@ static ssize_t iwl_dbgfs_tof_responder_params_write(struct ieee80211_vif *vif, ret = kstrtou32(data, 10, &value); if (ret == 0 && value) { - enum ieee80211_band band = (cmd->channel_num <= 14) ? - IEEE80211_BAND_2GHZ : - IEEE80211_BAND_5GHZ; + enum nl80211_band band = (cmd->channel_num <= 14) ? + NL80211_BAND_2GHZ : + NL80211_BAND_5GHZ; struct ieee80211_channel chn = { .band = band, .center_freq = ieee80211_channel_to_frequency( diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index 6ad5c602e84c..9e97cf4ff1c5 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -980,7 +980,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm) goto error; /* Add all the PHY contexts */ - chan = &mvm->hw->wiphy->bands[IEEE80211_BAND_2GHZ]->channels[0]; + chan = &mvm->hw->wiphy->bands[NL80211_BAND_2GHZ]->channels[0]; cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_NO_HT); for (i = 0; i < NUM_PHY_CTX; i++) { /* diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c index 923d19112e0c..456067b2f48d 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c @@ -559,7 +559,7 @@ void iwl_mvm_mac_ctxt_release(struct iwl_mvm *mvm, struct ieee80211_vif *vif) static void iwl_mvm_ack_rates(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - enum ieee80211_band band, + enum nl80211_band band, u8 *cck_rates, u8 *ofdm_rates) { struct ieee80211_supported_band *sband; @@ -730,7 +730,7 @@ static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm, rcu_read_lock(); chanctx = rcu_dereference(vif->chanctx_conf); iwl_mvm_ack_rates(mvm, vif, chanctx ? chanctx->def.chan->band - : IEEE80211_BAND_2GHZ, + : NL80211_BAND_2GHZ, &cck_ack_rates, &ofdm_ack_rates); rcu_read_unlock(); @@ -1065,7 +1065,7 @@ static int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm, cpu_to_le32(BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS); - if (info->band == IEEE80211_BAND_5GHZ || vif->p2p) { + if (info->band == NL80211_BAND_5GHZ || vif->p2p) { rate = IWL_FIRST_OFDM_RATE; } else { rate = IWL_FIRST_CCK_RATE; @@ -1516,7 +1516,7 @@ void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm, rx_status.device_timestamp = le32_to_cpu(sb->system_time); rx_status.band = (sb->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_BAND_24)) ? - IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; + NL80211_BAND_2GHZ : NL80211_BAND_5GHZ; rx_status.freq = ieee80211_channel_to_frequency(le16_to_cpu(sb->channel), rx_status.band); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 4f5ec495b460..ef91b3770703 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -550,18 +550,18 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) else mvm->max_scans = IWL_MVM_MAX_LMAC_SCANS; - if (mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels) - hw->wiphy->bands[IEEE80211_BAND_2GHZ] = - &mvm->nvm_data->bands[IEEE80211_BAND_2GHZ]; - if (mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels) { - hw->wiphy->bands[IEEE80211_BAND_5GHZ] = - &mvm->nvm_data->bands[IEEE80211_BAND_5GHZ]; + if (mvm->nvm_data->bands[NL80211_BAND_2GHZ].n_channels) + hw->wiphy->bands[NL80211_BAND_2GHZ] = + &mvm->nvm_data->bands[NL80211_BAND_2GHZ]; + if (mvm->nvm_data->bands[NL80211_BAND_5GHZ].n_channels) { + hw->wiphy->bands[NL80211_BAND_5GHZ] = + &mvm->nvm_data->bands[NL80211_BAND_5GHZ]; if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_BEAMFORMER) && fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_LQ_SS_PARAMS)) - hw->wiphy->bands[IEEE80211_BAND_5GHZ]->vht_cap.cap |= + hw->wiphy->bands[NL80211_BAND_5GHZ]->vht_cap.cap |= IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE; } @@ -2911,7 +2911,7 @@ static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm, cpu_to_le32(FW_CMD_ID_AND_COLOR(MAC_INDEX_AUX, 0)), .sta_id_and_color = cpu_to_le32(mvm->aux_sta.sta_id), /* Set the channel info data */ - .channel_info.band = (channel->band == IEEE80211_BAND_2GHZ) ? + .channel_info.band = (channel->band == NL80211_BAND_2GHZ) ? PHY_BAND_24 : PHY_BAND_5, .channel_info.channel = channel->hw_value, .channel_info.width = PHY_VHT_CHANNEL_MODE20, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index 2d685e02d488..85800ba0c667 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -1133,9 +1133,9 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm); /* Utils */ int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags, - enum ieee80211_band band); + enum nl80211_band band); void iwl_mvm_hwrate_to_tx_rate(u32 rate_n_flags, - enum ieee80211_band band, + enum nl80211_band band, struct ieee80211_tx_rate *r); u8 iwl_mvm_mac80211_idx_to_hwrate(int rate_idx); void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm); @@ -1468,7 +1468,7 @@ bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm, bool iwl_mvm_bt_coex_is_ant_avail(struct iwl_mvm *mvm, u8 ant); bool iwl_mvm_bt_coex_is_shared_ant_avail(struct iwl_mvm *mvm); bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm, - enum ieee80211_band band); + enum nl80211_band band); u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr, struct ieee80211_tx_info *info, u8 ac); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c index 6e6a56f2153d..95138830b9f8 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c @@ -147,7 +147,7 @@ static void iwl_mvm_phy_ctxt_cmd_data(struct iwl_mvm *mvm, u8 active_cnt, idle_cnt; /* Set the channel info data */ - cmd->ci.band = (chandef->chan->band == IEEE80211_BAND_2GHZ ? + cmd->ci.band = (chandef->chan->band == NL80211_BAND_2GHZ ? PHY_BAND_24 : PHY_BAND_5); cmd->ci.channel = chandef->chan->hw_value; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c index 61d0a8cd13f9..81dd2f6a48a5 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c @@ -829,7 +829,7 @@ static u32 ucode_rate_from_rs_rate(struct iwl_mvm *mvm, /* Convert a ucode rate into an rs_rate object */ static int rs_rate_from_ucode_rate(const u32 ucode_rate, - enum ieee80211_band band, + enum nl80211_band band, struct rs_rate *rate) { u32 ant_msk = ucode_rate & RATE_MCS_ANT_ABC_MSK; @@ -848,7 +848,7 @@ static int rs_rate_from_ucode_rate(const u32 ucode_rate, if (!(ucode_rate & RATE_MCS_HT_MSK) && !(ucode_rate & RATE_MCS_VHT_MSK)) { if (num_of_ant == 1) { - if (band == IEEE80211_BAND_5GHZ) + if (band == NL80211_BAND_5GHZ) rate->type = LQ_LEGACY_A; else rate->type = LQ_LEGACY_G; @@ -1043,7 +1043,7 @@ static void rs_get_lower_rate_down_column(struct iwl_lq_sta *lq_sta, return; } else if (is_siso(rate)) { /* Downgrade to Legacy if we were in SISO */ - if (lq_sta->band == IEEE80211_BAND_5GHZ) + if (lq_sta->band == NL80211_BAND_5GHZ) rate->type = LQ_LEGACY_A; else rate->type = LQ_LEGACY_G; @@ -1850,7 +1850,7 @@ static int rs_switch_to_column(struct iwl_mvm *mvm, rate->ant = column->ant; if (column->mode == RS_LEGACY) { - if (lq_sta->band == IEEE80211_BAND_5GHZ) + if (lq_sta->band == NL80211_BAND_5GHZ) rate->type = LQ_LEGACY_A; else rate->type = LQ_LEGACY_G; @@ -2020,7 +2020,7 @@ static void rs_get_adjacent_txp(struct iwl_mvm *mvm, int index, } static bool rs_tpc_allowed(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - struct rs_rate *rate, enum ieee80211_band band) + struct rs_rate *rate, enum nl80211_band band) { int index = rate->index; bool cam = (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM); @@ -2126,7 +2126,7 @@ static bool rs_tpc_perform(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); struct ieee80211_vif *vif = mvm_sta->vif; struct ieee80211_chanctx_conf *chanctx_conf; - enum ieee80211_band band; + enum nl80211_band band; struct iwl_rate_scale_data *window; struct rs_rate *rate = &tbl->rate; enum tpc_action action; @@ -2148,7 +2148,7 @@ static bool rs_tpc_perform(struct iwl_mvm *mvm, rcu_read_lock(); chanctx_conf = rcu_dereference(vif->chanctx_conf); if (WARN_ON(!chanctx_conf)) - band = IEEE80211_NUM_BANDS; + band = NUM_NL80211_BANDS; else band = chanctx_conf->def.chan->band; rcu_read_unlock(); @@ -2606,7 +2606,7 @@ static void rs_init_optimal_rate(struct iwl_mvm *mvm, rate->type = lq_sta->is_vht ? LQ_VHT_MIMO2 : LQ_HT_MIMO2; else if (lq_sta->max_siso_rate_idx != IWL_RATE_INVALID) rate->type = lq_sta->is_vht ? LQ_VHT_SISO : LQ_HT_SISO; - else if (lq_sta->band == IEEE80211_BAND_5GHZ) + else if (lq_sta->band == NL80211_BAND_5GHZ) rate->type = LQ_LEGACY_A; else rate->type = LQ_LEGACY_G; @@ -2623,7 +2623,7 @@ static void rs_init_optimal_rate(struct iwl_mvm *mvm, } else { lq_sta->optimal_rate_mask = lq_sta->active_legacy_rate; - if (lq_sta->band == IEEE80211_BAND_5GHZ) { + if (lq_sta->band == NL80211_BAND_5GHZ) { lq_sta->optimal_rates = rs_optimal_rates_5ghz_legacy; lq_sta->optimal_nentries = ARRAY_SIZE(rs_optimal_rates_5ghz_legacy); @@ -2679,7 +2679,7 @@ static struct rs_rate *rs_get_optimal_rate(struct iwl_mvm *mvm, static void rs_get_initial_rate(struct iwl_mvm *mvm, struct ieee80211_sta *sta, struct iwl_lq_sta *lq_sta, - enum ieee80211_band band, + enum nl80211_band band, struct rs_rate *rate) { int i, nentries; @@ -2714,7 +2714,7 @@ static void rs_get_initial_rate(struct iwl_mvm *mvm, rate->index = find_first_bit(&lq_sta->active_legacy_rate, BITS_PER_LONG); - if (band == IEEE80211_BAND_5GHZ) { + if (band == NL80211_BAND_5GHZ) { rate->type = LQ_LEGACY_A; initial_rates = rs_optimal_rates_5ghz_legacy; nentries = ARRAY_SIZE(rs_optimal_rates_5ghz_legacy); @@ -2814,7 +2814,7 @@ void rs_update_last_rssi(struct iwl_mvm *mvm, static void rs_initialize_lq(struct iwl_mvm *mvm, struct ieee80211_sta *sta, struct iwl_lq_sta *lq_sta, - enum ieee80211_band band, + enum nl80211_band band, bool init) { struct iwl_scale_tbl_info *tbl; @@ -3097,7 +3097,7 @@ void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg) * Called after adding a new station to initialize rate scaling */ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, - enum ieee80211_band band, bool init) + enum nl80211_band band, bool init) { int i, j; struct ieee80211_hw *hw = mvm->hw; @@ -3203,7 +3203,7 @@ static void rs_rate_update(void *mvm_r, #ifdef CONFIG_MAC80211_DEBUGFS static void rs_build_rates_table_from_fixed(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq_cmd, - enum ieee80211_band band, + enum nl80211_band band, u32 ucode_rate) { struct rs_rate rate; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h index bdb6f2d8d854..90d046fb24a0 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h @@ -305,7 +305,7 @@ struct iwl_lq_sta { bool stbc_capable; /* Tx STBC is supported by chip and Rx by STA */ bool bfer_capable; /* Remote supports beamformee and we BFer */ - enum ieee80211_band band; + enum nl80211_band band; /* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */ unsigned long active_legacy_rate; @@ -358,7 +358,7 @@ struct iwl_lq_sta { /* Initialize station's rate scaling information after adding station */ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, - enum ieee80211_band band, bool init); + enum nl80211_band band, bool init); /* Notify RS about Tx status */ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c index d8cadf2fe098..263e8a8576b7 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c @@ -319,7 +319,7 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi, rx_status->device_timestamp = le32_to_cpu(phy_info->system_timestamp); rx_status->band = (phy_info->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_BAND_24)) ? - IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; + NL80211_BAND_2GHZ : NL80211_BAND_5GHZ; rx_status->freq = ieee80211_channel_to_frequency(le16_to_cpu(phy_info->channel), rx_status->band); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c index d4a4c13400cb..651604d18a32 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c @@ -456,8 +456,8 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, rx_status->mactime = le64_to_cpu(desc->tsf_on_air_rise); rx_status->device_timestamp = le32_to_cpu(desc->gp2_on_air_rise); - rx_status->band = desc->channel > 14 ? IEEE80211_BAND_5GHZ : - IEEE80211_BAND_2GHZ; + rx_status->band = desc->channel > 14 ? NL80211_BAND_5GHZ : + NL80211_BAND_2GHZ; rx_status->freq = ieee80211_channel_to_frequency(desc->channel, rx_status->band); iwl_mvm_get_signal_strength(mvm, desc, rx_status); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c index c1d1be9c5d01..6f609dd5c222 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c @@ -163,16 +163,16 @@ static inline __le16 iwl_mvm_scan_rx_chain(struct iwl_mvm *mvm) return cpu_to_le16(rx_chain); } -static __le32 iwl_mvm_scan_rxon_flags(enum ieee80211_band band) +static __le32 iwl_mvm_scan_rxon_flags(enum nl80211_band band) { - if (band == IEEE80211_BAND_2GHZ) + if (band == NL80211_BAND_2GHZ) return cpu_to_le32(PHY_BAND_24); else return cpu_to_le32(PHY_BAND_5); } static inline __le32 -iwl_mvm_scan_rate_n_flags(struct iwl_mvm *mvm, enum ieee80211_band band, +iwl_mvm_scan_rate_n_flags(struct iwl_mvm *mvm, enum nl80211_band band, bool no_cck) { u32 tx_ant; @@ -182,7 +182,7 @@ iwl_mvm_scan_rate_n_flags(struct iwl_mvm *mvm, enum ieee80211_band band, mvm->scan_last_antenna_idx); tx_ant = BIT(mvm->scan_last_antenna_idx) << RATE_MCS_ANT_POS; - if (band == IEEE80211_BAND_2GHZ && !no_cck) + if (band == NL80211_BAND_2GHZ && !no_cck) return cpu_to_le32(IWL_RATE_1M_PLCP | RATE_MCS_CCK_MSK | tx_ant); else @@ -591,14 +591,14 @@ static void iwl_mvm_scan_fill_tx_cmd(struct iwl_mvm *mvm, tx_cmd[0].tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL | TX_CMD_FLG_BT_DIS); tx_cmd[0].rate_n_flags = iwl_mvm_scan_rate_n_flags(mvm, - IEEE80211_BAND_2GHZ, + NL80211_BAND_2GHZ, no_cck); tx_cmd[0].sta_id = mvm->aux_sta.sta_id; tx_cmd[1].tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL | TX_CMD_FLG_BT_DIS); tx_cmd[1].rate_n_flags = iwl_mvm_scan_rate_n_flags(mvm, - IEEE80211_BAND_5GHZ, + NL80211_BAND_5GHZ, no_cck); tx_cmd[1].sta_id = mvm->aux_sta.sta_id; } @@ -695,19 +695,19 @@ iwl_mvm_build_scan_probe(struct iwl_mvm *mvm, struct ieee80211_vif *vif, /* Insert ds parameter set element on 2.4 GHz band */ newpos = iwl_mvm_copy_and_insert_ds_elem(mvm, - ies->ies[IEEE80211_BAND_2GHZ], - ies->len[IEEE80211_BAND_2GHZ], + ies->ies[NL80211_BAND_2GHZ], + ies->len[NL80211_BAND_2GHZ], pos); params->preq.band_data[0].offset = cpu_to_le16(pos - params->preq.buf); params->preq.band_data[0].len = cpu_to_le16(newpos - pos); pos = newpos; - memcpy(pos, ies->ies[IEEE80211_BAND_5GHZ], - ies->len[IEEE80211_BAND_5GHZ]); + memcpy(pos, ies->ies[NL80211_BAND_5GHZ], + ies->len[NL80211_BAND_5GHZ]); params->preq.band_data[1].offset = cpu_to_le16(pos - params->preq.buf); params->preq.band_data[1].len = - cpu_to_le16(ies->len[IEEE80211_BAND_5GHZ]); - pos += ies->len[IEEE80211_BAND_5GHZ]; + cpu_to_le16(ies->len[NL80211_BAND_5GHZ]); + pos += ies->len[NL80211_BAND_5GHZ]; memcpy(pos, ies->common_ies, ies->common_ie_len); params->preq.common_data.offset = cpu_to_le16(pos - params->preq.buf); @@ -921,10 +921,10 @@ static __le32 iwl_mvm_scan_config_rates(struct iwl_mvm *mvm) unsigned int rates = 0; int i; - band = &mvm->nvm_data->bands[IEEE80211_BAND_2GHZ]; + band = &mvm->nvm_data->bands[NL80211_BAND_2GHZ]; for (i = 0; i < band->n_bitrates; i++) rates |= rate_to_scan_rate_flag(band->bitrates[i].hw_value); - band = &mvm->nvm_data->bands[IEEE80211_BAND_5GHZ]; + band = &mvm->nvm_data->bands[NL80211_BAND_5GHZ]; for (i = 0; i < band->n_bitrates; i++) rates |= rate_to_scan_rate_flag(band->bitrates[i].hw_value); @@ -939,8 +939,8 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm) struct iwl_scan_config *scan_config; struct ieee80211_supported_band *band; int num_channels = - mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels + - mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels; + mvm->nvm_data->bands[NL80211_BAND_2GHZ].n_channels + + mvm->nvm_data->bands[NL80211_BAND_5GHZ].n_channels; int ret, i, j = 0, cmd_size; struct iwl_host_cmd cmd = { .id = iwl_cmd_id(SCAN_CFG_CMD, IWL_ALWAYS_LONG_GROUP, 0), @@ -994,10 +994,10 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm) IWL_CHANNEL_FLAG_EBS_ADD | IWL_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE; - band = &mvm->nvm_data->bands[IEEE80211_BAND_2GHZ]; + band = &mvm->nvm_data->bands[NL80211_BAND_2GHZ]; for (i = 0; i < band->n_channels; i++, j++) scan_config->channel_array[j] = band->channels[i].hw_value; - band = &mvm->nvm_data->bands[IEEE80211_BAND_5GHZ]; + band = &mvm->nvm_data->bands[NL80211_BAND_5GHZ]; for (i = 0; i < band->n_channels; i++, j++) scan_config->channel_array[j] = band->channels[i].hw_value; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c b/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c index 18711c5de35a..9f160fc58cd0 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c @@ -444,7 +444,7 @@ iwl_mvm_tdls_config_channel_switch(struct iwl_mvm *mvm, } if (chandef) { - cmd.ci.band = (chandef->chan->band == IEEE80211_BAND_2GHZ ? + cmd.ci.band = (chandef->chan->band == NL80211_BAND_2GHZ ? PHY_BAND_24 : PHY_BAND_5); cmd.ci.channel = chandef->chan->hw_value; cmd.ci.width = iwl_mvm_get_channel_width(chandef); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index efb9b98c4c98..bd286fca3776 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -359,7 +359,7 @@ void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd, &mvm->nvm_data->bands[info->band], sta); /* For 5 GHZ band, remap mac80211 rate indices into driver indices */ - if (info->band == IEEE80211_BAND_5GHZ) + if (info->band == NL80211_BAND_5GHZ) rate_idx += IWL_FIRST_OFDM_RATE; /* For 2.4 GHZ band, check that there is no need to remap */ @@ -372,7 +372,7 @@ void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd, iwl_mvm_next_antenna(mvm, iwl_mvm_get_valid_tx_ant(mvm), mvm->mgmt_last_antenna_idx); - if (info->band == IEEE80211_BAND_2GHZ && + if (info->band == NL80211_BAND_2GHZ && !iwl_mvm_bt_coex_is_shared_ant_avail(mvm)) rate_flags = mvm->cfg->non_shared_ant << RATE_MCS_ANT_POS; else @@ -1052,7 +1052,7 @@ const char *iwl_mvm_get_tx_fail_reason(u32 status) #endif /* CONFIG_IWLWIFI_DEBUG */ void iwl_mvm_hwrate_to_tx_rate(u32 rate_n_flags, - enum ieee80211_band band, + enum nl80211_band band, struct ieee80211_tx_rate *r) { if (rate_n_flags & RATE_HT_MCS_GF_MSK) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c index 486c98541afc..f0ffd62f02d3 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c @@ -217,14 +217,14 @@ static const u8 fw_rate_idx_to_plcp[IWL_RATE_COUNT] = { }; int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags, - enum ieee80211_band band) + enum nl80211_band band) { int rate = rate_n_flags & RATE_LEGACY_RATE_MSK; int idx; int band_offset = 0; /* Legacy rate format, search for match in table */ - if (band == IEEE80211_BAND_5GHZ) + if (band == NL80211_BAND_5GHZ) band_offset = IWL_FIRST_OFDM_RATE; for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++) if (fw_rate_idx_to_plcp[idx] == rate) diff --git a/drivers/net/wireless/intersil/orinoco/cfg.c b/drivers/net/wireless/intersil/orinoco/cfg.c index 0f6ea316e38e..7aa47069af0a 100644 --- a/drivers/net/wireless/intersil/orinoco/cfg.c +++ b/drivers/net/wireless/intersil/orinoco/cfg.c @@ -60,14 +60,14 @@ int orinoco_wiphy_register(struct wiphy *wiphy) if (priv->channel_mask & (1 << i)) { priv->channels[i].center_freq = ieee80211_channel_to_frequency(i + 1, - IEEE80211_BAND_2GHZ); + NL80211_BAND_2GHZ); channels++; } } priv->band.channels = priv->channels; priv->band.n_channels = channels; - wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band; + wiphy->bands[NL80211_BAND_2GHZ] = &priv->band; wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM; i = 0; @@ -175,7 +175,7 @@ static int orinoco_set_monitor_channel(struct wiphy *wiphy, if (cfg80211_get_chandef_type(chandef) != NL80211_CHAN_NO_HT) return -EINVAL; - if (chandef->chan->band != IEEE80211_BAND_2GHZ) + if (chandef->chan->band != NL80211_BAND_2GHZ) return -EINVAL; channel = ieee80211_frequency_to_channel(chandef->chan->center_freq); diff --git a/drivers/net/wireless/intersil/orinoco/hw.c b/drivers/net/wireless/intersil/orinoco/hw.c index e27e32851f1e..61af5a28f269 100644 --- a/drivers/net/wireless/intersil/orinoco/hw.c +++ b/drivers/net/wireless/intersil/orinoco/hw.c @@ -1193,7 +1193,7 @@ int orinoco_hw_get_freq(struct orinoco_private *priv) goto out; } - freq = ieee80211_channel_to_frequency(channel, IEEE80211_BAND_2GHZ); + freq = ieee80211_channel_to_frequency(channel, NL80211_BAND_2GHZ); out: orinoco_unlock(priv, &flags); diff --git a/drivers/net/wireless/intersil/orinoco/scan.c b/drivers/net/wireless/intersil/orinoco/scan.c index 2c66166add70..d0ceb06c72d0 100644 --- a/drivers/net/wireless/intersil/orinoco/scan.c +++ b/drivers/net/wireless/intersil/orinoco/scan.c @@ -111,7 +111,7 @@ static void orinoco_add_hostscan_result(struct orinoco_private *priv, } freq = ieee80211_channel_to_frequency( - le16_to_cpu(bss->a.channel), IEEE80211_BAND_2GHZ); + le16_to_cpu(bss->a.channel), NL80211_BAND_2GHZ); channel = ieee80211_get_channel(wiphy, freq); if (!channel) { printk(KERN_DEBUG "Invalid channel designation %04X(%04X)", @@ -148,7 +148,7 @@ void orinoco_add_extscan_result(struct orinoco_private *priv, ie_len = len - sizeof(*bss); ie = cfg80211_find_ie(WLAN_EID_DS_PARAMS, bss->data, ie_len); chan = ie ? ie[2] : 0; - freq = ieee80211_channel_to_frequency(chan, IEEE80211_BAND_2GHZ); + freq = ieee80211_channel_to_frequency(chan, NL80211_BAND_2GHZ); channel = ieee80211_get_channel(wiphy, freq); timestamp = le64_to_cpu(bss->timestamp); diff --git a/drivers/net/wireless/intersil/p54/eeprom.c b/drivers/net/wireless/intersil/p54/eeprom.c index 2fe713eda7ad..d4c73d39336f 100644 --- a/drivers/net/wireless/intersil/p54/eeprom.c +++ b/drivers/net/wireless/intersil/p54/eeprom.c @@ -76,14 +76,14 @@ struct p54_channel_entry { u16 data; int index; int max_power; - enum ieee80211_band band; + enum nl80211_band band; }; struct p54_channel_list { struct p54_channel_entry *channels; size_t entries; size_t max_entries; - size_t band_channel_num[IEEE80211_NUM_BANDS]; + size_t band_channel_num[NUM_NL80211_BANDS]; }; static int p54_get_band_from_freq(u16 freq) @@ -91,10 +91,10 @@ static int p54_get_band_from_freq(u16 freq) /* FIXME: sync these values with the 802.11 spec */ if ((freq >= 2412) && (freq <= 2484)) - return IEEE80211_BAND_2GHZ; + return NL80211_BAND_2GHZ; if ((freq >= 4920) && (freq <= 5825)) - return IEEE80211_BAND_5GHZ; + return NL80211_BAND_5GHZ; return -1; } @@ -124,16 +124,16 @@ static int p54_compare_rssichan(const void *_a, static int p54_fill_band_bitrates(struct ieee80211_hw *dev, struct ieee80211_supported_band *band_entry, - enum ieee80211_band band) + enum nl80211_band band) { /* TODO: generate rate array dynamically */ switch (band) { - case IEEE80211_BAND_2GHZ: + case NL80211_BAND_2GHZ: band_entry->bitrates = p54_bgrates; band_entry->n_bitrates = ARRAY_SIZE(p54_bgrates); break; - case IEEE80211_BAND_5GHZ: + case NL80211_BAND_5GHZ: band_entry->bitrates = p54_arates; band_entry->n_bitrates = ARRAY_SIZE(p54_arates); break; @@ -147,7 +147,7 @@ static int p54_fill_band_bitrates(struct ieee80211_hw *dev, static int p54_generate_band(struct ieee80211_hw *dev, struct p54_channel_list *list, unsigned int *chan_num, - enum ieee80211_band band) + enum nl80211_band band) { struct p54_common *priv = dev->priv; struct ieee80211_supported_band *tmp, *old; @@ -206,7 +206,7 @@ static int p54_generate_band(struct ieee80211_hw *dev, if (j == 0) { wiphy_err(dev->wiphy, "Disabling totally damaged %d GHz band\n", - (band == IEEE80211_BAND_2GHZ) ? 2 : 5); + (band == NL80211_BAND_2GHZ) ? 2 : 5); ret = -ENODATA; goto err_out; @@ -396,7 +396,7 @@ static int p54_generate_channel_lists(struct ieee80211_hw *dev) p54_compare_channels, NULL); k = 0; - for (i = 0, j = 0; i < IEEE80211_NUM_BANDS; i++) { + for (i = 0, j = 0; i < NUM_NL80211_BANDS; i++) { if (p54_generate_band(dev, list, &k, i) == 0) j++; } @@ -573,10 +573,10 @@ static int p54_parse_rssical(struct ieee80211_hw *dev, for (i = 0; i < entries; i++) { u16 freq = 0; switch (i) { - case IEEE80211_BAND_2GHZ: + case NL80211_BAND_2GHZ: freq = 2437; break; - case IEEE80211_BAND_5GHZ: + case NL80211_BAND_5GHZ: freq = 5240; break; } @@ -902,11 +902,11 @@ good_eeprom: if (priv->rxhw == PDR_SYNTH_FRONTEND_XBOW) p54_init_xbow_synth(priv); if (!(synth & PDR_SYNTH_24_GHZ_DISABLED)) - dev->wiphy->bands[IEEE80211_BAND_2GHZ] = - priv->band_table[IEEE80211_BAND_2GHZ]; + dev->wiphy->bands[NL80211_BAND_2GHZ] = + priv->band_table[NL80211_BAND_2GHZ]; if (!(synth & PDR_SYNTH_5_GHZ_DISABLED)) - dev->wiphy->bands[IEEE80211_BAND_5GHZ] = - priv->band_table[IEEE80211_BAND_5GHZ]; + dev->wiphy->bands[NL80211_BAND_5GHZ] = + priv->band_table[NL80211_BAND_5GHZ]; if ((synth & PDR_SYNTH_RX_DIV_MASK) == PDR_SYNTH_RX_DIV_SUPPORTED) priv->rx_diversity_mask = 3; if ((synth & PDR_SYNTH_TX_DIV_MASK) == PDR_SYNTH_TX_DIV_SUPPORTED) diff --git a/drivers/net/wireless/intersil/p54/main.c b/drivers/net/wireless/intersil/p54/main.c index 7805864e76f9..d5a3bf91a03e 100644 --- a/drivers/net/wireless/intersil/p54/main.c +++ b/drivers/net/wireless/intersil/p54/main.c @@ -477,7 +477,7 @@ static void p54_bss_info_changed(struct ieee80211_hw *dev, p54_set_edcf(priv); } if (changed & BSS_CHANGED_BASIC_RATES) { - if (dev->conf.chandef.chan->band == IEEE80211_BAND_5GHZ) + if (dev->conf.chandef.chan->band == NL80211_BAND_5GHZ) priv->basic_rate_mask = (info->basic_rates << 4); else priv->basic_rate_mask = info->basic_rates; @@ -829,7 +829,7 @@ void p54_free_common(struct ieee80211_hw *dev) struct p54_common *priv = dev->priv; unsigned int i; - for (i = 0; i < IEEE80211_NUM_BANDS; i++) + for (i = 0; i < NUM_NL80211_BANDS; i++) kfree(priv->band_table[i]); kfree(priv->iq_autocal); diff --git a/drivers/net/wireless/intersil/p54/p54.h b/drivers/net/wireless/intersil/p54/p54.h index 40b401ed6845..529939e611cd 100644 --- a/drivers/net/wireless/intersil/p54/p54.h +++ b/drivers/net/wireless/intersil/p54/p54.h @@ -223,7 +223,7 @@ struct p54_common { struct p54_cal_database *curve_data; struct p54_cal_database *output_limit; struct p54_cal_database *rssi_db; - struct ieee80211_supported_band *band_table[IEEE80211_NUM_BANDS]; + struct ieee80211_supported_band *band_table[NUM_NL80211_BANDS]; /* BBP/MAC state */ u8 mac_addr[ETH_ALEN]; diff --git a/drivers/net/wireless/intersil/p54/txrx.c b/drivers/net/wireless/intersil/p54/txrx.c index 24e5ff9a9272..1af7da0b386e 100644 --- a/drivers/net/wireless/intersil/p54/txrx.c +++ b/drivers/net/wireless/intersil/p54/txrx.c @@ -353,7 +353,7 @@ static int p54_rx_data(struct p54_common *priv, struct sk_buff *skb) rx_status->signal = p54_rssi_to_dbm(priv, hdr->rssi); if (hdr->rate & 0x10) rx_status->flag |= RX_FLAG_SHORTPRE; - if (priv->hw->conf.chandef.chan->band == IEEE80211_BAND_5GHZ) + if (priv->hw->conf.chandef.chan->band == NL80211_BAND_5GHZ) rx_status->rate_idx = (rate < 4) ? 0 : rate - 4; else rx_status->rate_idx = rate; @@ -867,7 +867,7 @@ void p54_tx_80211(struct ieee80211_hw *dev, for (i = 0; i < nrates && ridx < 8; i++) { /* we register the rates in perfect order */ rate = info->control.rates[i].idx; - if (info->band == IEEE80211_BAND_5GHZ) + if (info->band == NL80211_BAND_5GHZ) rate += 4; /* store the count we actually calculated for TX status */ diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 2b185feb1aa0..c757f14c4c00 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -255,14 +255,14 @@ static struct class *hwsim_class; static struct net_device *hwsim_mon; /* global monitor netdev */ #define CHAN2G(_freq) { \ - .band = IEEE80211_BAND_2GHZ, \ + .band = NL80211_BAND_2GHZ, \ .center_freq = (_freq), \ .hw_value = (_freq), \ .max_power = 20, \ } #define CHAN5G(_freq) { \ - .band = IEEE80211_BAND_5GHZ, \ + .band = NL80211_BAND_5GHZ, \ .center_freq = (_freq), \ .hw_value = (_freq), \ .max_power = 20, \ @@ -479,7 +479,7 @@ struct mac80211_hwsim_data { struct list_head list; struct ieee80211_hw *hw; struct device *dev; - struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS]; + struct ieee80211_supported_band bands[NUM_NL80211_BANDS]; struct ieee80211_channel channels_2ghz[ARRAY_SIZE(hwsim_channels_2ghz)]; struct ieee80211_channel channels_5ghz[ARRAY_SIZE(hwsim_channels_5ghz)]; struct ieee80211_rate rates[ARRAY_SIZE(hwsim_rates)]; @@ -2347,7 +2347,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info, u8 addr[ETH_ALEN]; struct mac80211_hwsim_data *data; struct ieee80211_hw *hw; - enum ieee80211_band band; + enum nl80211_band band; const struct ieee80211_ops *ops = &mac80211_hwsim_ops; int idx; @@ -2476,16 +2476,16 @@ static int mac80211_hwsim_new_radio(struct genl_info *info, sizeof(hwsim_channels_5ghz)); memcpy(data->rates, hwsim_rates, sizeof(hwsim_rates)); - for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) { + for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) { struct ieee80211_supported_band *sband = &data->bands[band]; switch (band) { - case IEEE80211_BAND_2GHZ: + case NL80211_BAND_2GHZ: sband->channels = data->channels_2ghz; sband->n_channels = ARRAY_SIZE(hwsim_channels_2ghz); sband->bitrates = data->rates; sband->n_bitrates = ARRAY_SIZE(hwsim_rates); break; - case IEEE80211_BAND_5GHZ: + case NL80211_BAND_5GHZ: sband->channels = data->channels_5ghz; sband->n_channels = ARRAY_SIZE(hwsim_channels_5ghz); sband->bitrates = data->rates + 4; diff --git a/drivers/net/wireless/marvell/libertas/cfg.c b/drivers/net/wireless/marvell/libertas/cfg.c index 2eea76a340b7..776b44bfd93a 100644 --- a/drivers/net/wireless/marvell/libertas/cfg.c +++ b/drivers/net/wireless/marvell/libertas/cfg.c @@ -23,7 +23,7 @@ #define CHAN2G(_channel, _freq, _flags) { \ - .band = IEEE80211_BAND_2GHZ, \ + .band = NL80211_BAND_2GHZ, \ .center_freq = (_freq), \ .hw_value = (_channel), \ .flags = (_flags), \ @@ -639,7 +639,7 @@ static int lbs_ret_scan(struct lbs_private *priv, unsigned long dummy, if (chan_no != -1) { struct wiphy *wiphy = priv->wdev->wiphy; int freq = ieee80211_channel_to_frequency(chan_no, - IEEE80211_BAND_2GHZ); + NL80211_BAND_2GHZ); struct ieee80211_channel *channel = ieee80211_get_channel(wiphy, freq); @@ -1266,7 +1266,7 @@ _new_connect_scan_req(struct wiphy *wiphy, struct cfg80211_connect_params *sme) { struct cfg80211_scan_request *creq = NULL; int i, n_channels = ieee80211_get_num_supported_channels(wiphy); - enum ieee80211_band band; + enum nl80211_band band; creq = kzalloc(sizeof(*creq) + sizeof(struct cfg80211_ssid) + n_channels * sizeof(void *), @@ -1281,7 +1281,7 @@ _new_connect_scan_req(struct wiphy *wiphy, struct cfg80211_connect_params *sme) /* Scan all available channels */ i = 0; - for (band = 0; band < IEEE80211_NUM_BANDS; band++) { + for (band = 0; band < NUM_NL80211_BANDS; band++) { int j; if (!wiphy->bands[band]) @@ -2200,7 +2200,7 @@ int lbs_cfg_register(struct lbs_private *priv) if (lbs_mesh_activated(priv)) wdev->wiphy->interface_modes |= BIT(NL80211_IFTYPE_MESH_POINT); - wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = &lbs_band_2ghz; + wdev->wiphy->bands[NL80211_BAND_2GHZ] = &lbs_band_2ghz; /* * We could check priv->fwcapinfo && FW_CAPINFO_WPA, but I have diff --git a/drivers/net/wireless/marvell/libertas/cmd.c b/drivers/net/wireless/marvell/libertas/cmd.c index 4ddd0e5a6b85..301170cccfff 100644 --- a/drivers/net/wireless/marvell/libertas/cmd.c +++ b/drivers/net/wireless/marvell/libertas/cmd.c @@ -743,7 +743,7 @@ int lbs_set_11d_domain_info(struct lbs_private *priv) struct cmd_ds_802_11d_domain_info cmd; struct mrvl_ie_domain_param_set *domain = &cmd.domain; struct ieee80211_country_ie_triplet *t; - enum ieee80211_band band; + enum nl80211_band band; struct ieee80211_channel *ch; u8 num_triplet = 0; u8 num_parsed_chan = 0; @@ -777,7 +777,7 @@ int lbs_set_11d_domain_info(struct lbs_private *priv) * etc. */ for (band = 0; - (band < IEEE80211_NUM_BANDS) && (num_triplet < MAX_11D_TRIPLETS); + (band < NUM_NL80211_BANDS) && (num_triplet < MAX_11D_TRIPLETS); band++) { if (!bands[band]) diff --git a/drivers/net/wireless/marvell/libertas_tf/main.c b/drivers/net/wireless/marvell/libertas_tf/main.c index a47f0acc099a..0bf8916a02cf 100644 --- a/drivers/net/wireless/marvell/libertas_tf/main.c +++ b/drivers/net/wireless/marvell/libertas_tf/main.c @@ -570,7 +570,7 @@ int lbtf_rx(struct lbtf_private *priv, struct sk_buff *skb) if (!(prxpd->status & cpu_to_le16(MRVDRV_RXPD_STATUS_OK))) stats.flag |= RX_FLAG_FAILED_FCS_CRC; stats.freq = priv->cur_freq; - stats.band = IEEE80211_BAND_2GHZ; + stats.band = NL80211_BAND_2GHZ; stats.signal = prxpd->snr; priv->noise = prxpd->nf; /* Marvell rate index has a hole at value 4 */ @@ -642,7 +642,7 @@ struct lbtf_private *lbtf_add_card(void *card, struct device *dmdev) priv->band.bitrates = priv->rates; priv->band.n_channels = ARRAY_SIZE(lbtf_channels); priv->band.channels = priv->channels; - hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band; + hw->wiphy->bands[NL80211_BAND_2GHZ] = &priv->band; hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_ADHOC); diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c index 49661e087811..6db202fa7157 100644 --- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c +++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c @@ -474,7 +474,7 @@ int mwifiex_send_domain_info_cmd_fw(struct wiphy *wiphy) u8 no_of_parsed_chan = 0; u8 first_chan = 0, next_chan = 0, max_pwr = 0; u8 i, flag = 0; - enum ieee80211_band band; + enum nl80211_band band; struct ieee80211_supported_band *sband; struct ieee80211_channel *ch; struct mwifiex_adapter *adapter = mwifiex_cfg80211_get_adapter(wiphy); @@ -1410,7 +1410,7 @@ mwifiex_cfg80211_dump_survey(struct wiphy *wiphy, struct net_device *dev, { struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); struct mwifiex_chan_stats *pchan_stats = priv->adapter->chan_stats; - enum ieee80211_band band; + enum nl80211_band band; mwifiex_dbg(priv->adapter, DUMP, "dump_survey idx=%d\n", idx); @@ -1586,7 +1586,7 @@ static int mwifiex_cfg80211_set_bitrate_mask(struct wiphy *wiphy, { struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); u16 bitmap_rates[MAX_BITMAP_RATES_SIZE]; - enum ieee80211_band band; + enum nl80211_band band; struct mwifiex_adapter *adapter = priv->adapter; if (!priv->media_connected) { @@ -1600,11 +1600,11 @@ static int mwifiex_cfg80211_set_bitrate_mask(struct wiphy *wiphy, memset(bitmap_rates, 0, sizeof(bitmap_rates)); /* Fill HR/DSSS rates. */ - if (band == IEEE80211_BAND_2GHZ) + if (band == NL80211_BAND_2GHZ) bitmap_rates[0] = mask->control[band].legacy & 0x000f; /* Fill OFDM rates */ - if (band == IEEE80211_BAND_2GHZ) + if (band == NL80211_BAND_2GHZ) bitmap_rates[1] = (mask->control[band].legacy & 0x0ff0) >> 4; else bitmap_rates[1] = mask->control[band].legacy; @@ -1771,7 +1771,7 @@ mwifiex_cfg80211_set_antenna(struct wiphy *wiphy, u32 tx_ant, u32 rx_ant) } else { struct ieee80211_sta_ht_cap *ht_info; int rx_mcs_supp; - enum ieee80211_band band; + enum nl80211_band band; if ((tx_ant == 0x1 && rx_ant == 0x1)) { adapter->user_dev_mcs_support = HT_STREAM_1X1; @@ -1785,7 +1785,7 @@ mwifiex_cfg80211_set_antenna(struct wiphy *wiphy, u32 tx_ant, u32 rx_ant) MWIFIEX_11AC_MCS_MAP_2X2; } - for (band = 0; band < IEEE80211_NUM_BANDS; band++) { + for (band = 0; band < NUM_NL80211_BANDS; band++) { if (!adapter->wiphy->bands[band]) continue; @@ -1997,7 +1997,7 @@ static int mwifiex_cfg80211_inform_ibss_bss(struct mwifiex_private *priv) struct cfg80211_bss *bss; int ie_len; u8 ie_buf[IEEE80211_MAX_SSID_LEN + sizeof(struct ieee_types_header)]; - enum ieee80211_band band; + enum nl80211_band band; if (mwifiex_get_bss_info(priv, &bss_info)) return -1; @@ -2271,7 +2271,7 @@ static int mwifiex_set_ibss_params(struct mwifiex_private *priv, int index = 0, i; u8 config_bands = 0; - if (params->chandef.chan->band == IEEE80211_BAND_2GHZ) { + if (params->chandef.chan->band == NL80211_BAND_2GHZ) { if (!params->basic_rates) { config_bands = BAND_B | BAND_G; } else { @@ -2859,18 +2859,18 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy, mwifiex_init_priv_params(priv, dev); priv->netdev = dev; - mwifiex_setup_ht_caps(&wiphy->bands[IEEE80211_BAND_2GHZ]->ht_cap, priv); + mwifiex_setup_ht_caps(&wiphy->bands[NL80211_BAND_2GHZ]->ht_cap, priv); if (adapter->is_hw_11ac_capable) mwifiex_setup_vht_caps( - &wiphy->bands[IEEE80211_BAND_2GHZ]->vht_cap, priv); + &wiphy->bands[NL80211_BAND_2GHZ]->vht_cap, priv); if (adapter->config_bands & BAND_A) mwifiex_setup_ht_caps( - &wiphy->bands[IEEE80211_BAND_5GHZ]->ht_cap, priv); + &wiphy->bands[NL80211_BAND_5GHZ]->ht_cap, priv); if ((adapter->config_bands & BAND_A) && adapter->is_hw_11ac_capable) mwifiex_setup_vht_caps( - &wiphy->bands[IEEE80211_BAND_5GHZ]->vht_cap, priv); + &wiphy->bands[NL80211_BAND_5GHZ]->vht_cap, priv); dev_net_set(dev, wiphy_net(wiphy)); dev->ieee80211_ptr = &priv->wdev; @@ -3821,7 +3821,7 @@ static int mwifiex_cfg80211_get_channel(struct wiphy *wiphy, struct ieee80211_channel *chan; u8 second_chan_offset; enum nl80211_channel_type chan_type; - enum ieee80211_band band; + enum nl80211_band band; int freq; int ret = -ENODATA; @@ -4053,11 +4053,11 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter) BIT(NL80211_IFTYPE_P2P_GO) | BIT(NL80211_IFTYPE_AP); - wiphy->bands[IEEE80211_BAND_2GHZ] = &mwifiex_band_2ghz; + wiphy->bands[NL80211_BAND_2GHZ] = &mwifiex_band_2ghz; if (adapter->config_bands & BAND_A) - wiphy->bands[IEEE80211_BAND_5GHZ] = &mwifiex_band_5ghz; + wiphy->bands[NL80211_BAND_5GHZ] = &mwifiex_band_5ghz; else - wiphy->bands[IEEE80211_BAND_5GHZ] = NULL; + wiphy->bands[NL80211_BAND_5GHZ] = NULL; if (adapter->drcs_enabled && ISSUPP_DRCS_ENABLED(adapter->fw_cap_info)) wiphy->iface_combinations = &mwifiex_iface_comb_ap_sta_drcs; diff --git a/drivers/net/wireless/marvell/mwifiex/cfp.c b/drivers/net/wireless/marvell/mwifiex/cfp.c index 09fae27140f7..1ff22055e54f 100644 --- a/drivers/net/wireless/marvell/mwifiex/cfp.c +++ b/drivers/net/wireless/marvell/mwifiex/cfp.c @@ -322,9 +322,9 @@ mwifiex_get_cfp(struct mwifiex_private *priv, u8 band, u16 channel, u32 freq) return cfp; if (mwifiex_band_to_radio_type(band) == HostCmd_SCAN_RADIO_TYPE_BG) - sband = priv->wdev.wiphy->bands[IEEE80211_BAND_2GHZ]; + sband = priv->wdev.wiphy->bands[NL80211_BAND_2GHZ]; else - sband = priv->wdev.wiphy->bands[IEEE80211_BAND_5GHZ]; + sband = priv->wdev.wiphy->bands[NL80211_BAND_5GHZ]; if (!sband) { mwifiex_dbg(priv->adapter, ERROR, @@ -399,15 +399,15 @@ u32 mwifiex_get_rates_from_cfg80211(struct mwifiex_private *priv, int i; if (radio_type) { - sband = wiphy->bands[IEEE80211_BAND_5GHZ]; + sband = wiphy->bands[NL80211_BAND_5GHZ]; if (WARN_ON_ONCE(!sband)) return 0; - rate_mask = request->rates[IEEE80211_BAND_5GHZ]; + rate_mask = request->rates[NL80211_BAND_5GHZ]; } else { - sband = wiphy->bands[IEEE80211_BAND_2GHZ]; + sband = wiphy->bands[NL80211_BAND_2GHZ]; if (WARN_ON_ONCE(!sband)) return 0; - rate_mask = request->rates[IEEE80211_BAND_2GHZ]; + rate_mask = request->rates[NL80211_BAND_2GHZ]; } num_rates = 0; diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c index 489f7a911a83..624b0a95c64e 100644 --- a/drivers/net/wireless/marvell/mwifiex/scan.c +++ b/drivers/net/wireless/marvell/mwifiex/scan.c @@ -494,13 +494,13 @@ mwifiex_scan_create_channel_list(struct mwifiex_private *priv, *scan_chan_list, u8 filtered_scan) { - enum ieee80211_band band; + enum nl80211_band band; struct ieee80211_supported_band *sband; struct ieee80211_channel *ch; struct mwifiex_adapter *adapter = priv->adapter; int chan_idx = 0, i; - for (band = 0; (band < IEEE80211_NUM_BANDS) ; band++) { + for (band = 0; (band < NUM_NL80211_BANDS) ; band++) { if (!priv->wdev.wiphy->bands[band]) continue; @@ -557,13 +557,13 @@ mwifiex_bgscan_create_channel_list(struct mwifiex_private *priv, struct mwifiex_chan_scan_param_set *scan_chan_list) { - enum ieee80211_band band; + enum nl80211_band band; struct ieee80211_supported_band *sband; struct ieee80211_channel *ch; struct mwifiex_adapter *adapter = priv->adapter; int chan_idx = 0, i; - for (band = 0; (band < IEEE80211_NUM_BANDS); band++) { + for (band = 0; (band < NUM_NL80211_BANDS); band++) { if (!priv->wdev.wiphy->bands[band]) continue; diff --git a/drivers/net/wireless/marvell/mwifiex/uap_cmd.c b/drivers/net/wireless/marvell/mwifiex/uap_cmd.c index 92ce32f5bb13..f79d00d1e294 100644 --- a/drivers/net/wireless/marvell/mwifiex/uap_cmd.c +++ b/drivers/net/wireless/marvell/mwifiex/uap_cmd.c @@ -816,7 +816,7 @@ void mwifiex_uap_set_channel(struct mwifiex_private *priv, chandef.chan->center_freq); /* Set appropriate bands */ - if (chandef.chan->band == IEEE80211_BAND_2GHZ) { + if (chandef.chan->band == NL80211_BAND_2GHZ) { bss_cfg->band_cfg = BAND_CONFIG_BG; config_bands = BAND_B | BAND_G; diff --git a/drivers/net/wireless/marvell/mwl8k.c b/drivers/net/wireless/marvell/mwl8k.c index 088429d0a634..b1b400b59d86 100644 --- a/drivers/net/wireless/marvell/mwl8k.c +++ b/drivers/net/wireless/marvell/mwl8k.c @@ -346,20 +346,20 @@ struct mwl8k_sta { #define MWL8K_STA(_sta) ((struct mwl8k_sta *)&((_sta)->drv_priv)) static const struct ieee80211_channel mwl8k_channels_24[] = { - { .band = IEEE80211_BAND_2GHZ, .center_freq = 2412, .hw_value = 1, }, - { .band = IEEE80211_BAND_2GHZ, .center_freq = 2417, .hw_value = 2, }, - { .band = IEEE80211_BAND_2GHZ, .center_freq = 2422, .hw_value = 3, }, - { .band = IEEE80211_BAND_2GHZ, .center_freq = 2427, .hw_value = 4, }, - { .band = IEEE80211_BAND_2GHZ, .center_freq = 2432, .hw_value = 5, }, - { .band = IEEE80211_BAND_2GHZ, .center_freq = 2437, .hw_value = 6, }, - { .band = IEEE80211_BAND_2GHZ, .center_freq = 2442, .hw_value = 7, }, - { .band = IEEE80211_BAND_2GHZ, .center_freq = 2447, .hw_value = 8, }, - { .band = IEEE80211_BAND_2GHZ, .center_freq = 2452, .hw_value = 9, }, - { .band = IEEE80211_BAND_2GHZ, .center_freq = 2457, .hw_value = 10, }, - { .band = IEEE80211_BAND_2GHZ, .center_freq = 2462, .hw_value = 11, }, - { .band = IEEE80211_BAND_2GHZ, .center_freq = 2467, .hw_value = 12, }, - { .band = IEEE80211_BAND_2GHZ, .center_freq = 2472, .hw_value = 13, }, - { .band = IEEE80211_BAND_2GHZ, .center_freq = 2484, .hw_value = 14, }, + { .band = NL80211_BAND_2GHZ, .center_freq = 2412, .hw_value = 1, }, + { .band = NL80211_BAND_2GHZ, .center_freq = 2417, .hw_value = 2, }, + { .band = NL80211_BAND_2GHZ, .center_freq = 2422, .hw_value = 3, }, + { .band = NL80211_BAND_2GHZ, .center_freq = 2427, .hw_value = 4, }, + { .band = NL80211_BAND_2GHZ, .center_freq = 2432, .hw_value = 5, }, + { .band = NL80211_BAND_2GHZ, .center_freq = 2437, .hw_value = 6, }, + { .band = NL80211_BAND_2GHZ, .center_freq = 2442, .hw_value = 7, }, + { .band = NL80211_BAND_2GHZ, .center_freq = 2447, .hw_value = 8, }, + { .band = NL80211_BAND_2GHZ, .center_freq = 2452, .hw_value = 9, }, + { .band = NL80211_BAND_2GHZ, .center_freq = 2457, .hw_value = 10, }, + { .band = NL80211_BAND_2GHZ, .center_freq = 2462, .hw_value = 11, }, + { .band = NL80211_BAND_2GHZ, .center_freq = 2467, .hw_value = 12, }, + { .band = NL80211_BAND_2GHZ, .center_freq = 2472, .hw_value = 13, }, + { .band = NL80211_BAND_2GHZ, .center_freq = 2484, .hw_value = 14, }, }; static const struct ieee80211_rate mwl8k_rates_24[] = { @@ -379,10 +379,10 @@ static const struct ieee80211_rate mwl8k_rates_24[] = { }; static const struct ieee80211_channel mwl8k_channels_50[] = { - { .band = IEEE80211_BAND_5GHZ, .center_freq = 5180, .hw_value = 36, }, - { .band = IEEE80211_BAND_5GHZ, .center_freq = 5200, .hw_value = 40, }, - { .band = IEEE80211_BAND_5GHZ, .center_freq = 5220, .hw_value = 44, }, - { .band = IEEE80211_BAND_5GHZ, .center_freq = 5240, .hw_value = 48, }, + { .band = NL80211_BAND_5GHZ, .center_freq = 5180, .hw_value = 36, }, + { .band = NL80211_BAND_5GHZ, .center_freq = 5200, .hw_value = 40, }, + { .band = NL80211_BAND_5GHZ, .center_freq = 5220, .hw_value = 44, }, + { .band = NL80211_BAND_5GHZ, .center_freq = 5240, .hw_value = 48, }, }; static const struct ieee80211_rate mwl8k_rates_50[] = { @@ -1010,11 +1010,11 @@ mwl8k_rxd_ap_process(void *_rxd, struct ieee80211_rx_status *status, } if (rxd->channel > 14) { - status->band = IEEE80211_BAND_5GHZ; + status->band = NL80211_BAND_5GHZ; if (!(status->flag & RX_FLAG_HT)) status->rate_idx -= 5; } else { - status->band = IEEE80211_BAND_2GHZ; + status->band = NL80211_BAND_2GHZ; } status->freq = ieee80211_channel_to_frequency(rxd->channel, status->band); @@ -1118,11 +1118,11 @@ mwl8k_rxd_sta_process(void *_rxd, struct ieee80211_rx_status *status, status->flag |= RX_FLAG_HT; if (rxd->channel > 14) { - status->band = IEEE80211_BAND_5GHZ; + status->band = NL80211_BAND_5GHZ; if (!(status->flag & RX_FLAG_HT)) status->rate_idx -= 5; } else { - status->band = IEEE80211_BAND_2GHZ; + status->band = NL80211_BAND_2GHZ; } status->freq = ieee80211_channel_to_frequency(rxd->channel, status->band); @@ -2300,13 +2300,13 @@ static void mwl8k_setup_2ghz_band(struct ieee80211_hw *hw) BUILD_BUG_ON(sizeof(priv->rates_24) != sizeof(mwl8k_rates_24)); memcpy(priv->rates_24, mwl8k_rates_24, sizeof(mwl8k_rates_24)); - priv->band_24.band = IEEE80211_BAND_2GHZ; + priv->band_24.band = NL80211_BAND_2GHZ; priv->band_24.channels = priv->channels_24; priv->band_24.n_channels = ARRAY_SIZE(mwl8k_channels_24); priv->band_24.bitrates = priv->rates_24; priv->band_24.n_bitrates = ARRAY_SIZE(mwl8k_rates_24); - hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band_24; + hw->wiphy->bands[NL80211_BAND_2GHZ] = &priv->band_24; } static void mwl8k_setup_5ghz_band(struct ieee80211_hw *hw) @@ -2319,13 +2319,13 @@ static void mwl8k_setup_5ghz_band(struct ieee80211_hw *hw) BUILD_BUG_ON(sizeof(priv->rates_50) != sizeof(mwl8k_rates_50)); memcpy(priv->rates_50, mwl8k_rates_50, sizeof(mwl8k_rates_50)); - priv->band_50.band = IEEE80211_BAND_5GHZ; + priv->band_50.band = NL80211_BAND_5GHZ; priv->band_50.channels = priv->channels_50; priv->band_50.n_channels = ARRAY_SIZE(mwl8k_channels_50); priv->band_50.bitrates = priv->rates_50; priv->band_50.n_bitrates = ARRAY_SIZE(mwl8k_rates_50); - hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &priv->band_50; + hw->wiphy->bands[NL80211_BAND_5GHZ] = &priv->band_50; } /* @@ -2876,9 +2876,9 @@ static int mwl8k_cmd_tx_power(struct ieee80211_hw *hw, cmd->header.length = cpu_to_le16(sizeof(*cmd)); cmd->action = cpu_to_le16(MWL8K_CMD_SET_LIST); - if (channel->band == IEEE80211_BAND_2GHZ) + if (channel->band == NL80211_BAND_2GHZ) cmd->band = cpu_to_le16(0x1); - else if (channel->band == IEEE80211_BAND_5GHZ) + else if (channel->band == NL80211_BAND_5GHZ) cmd->band = cpu_to_le16(0x4); cmd->channel = cpu_to_le16(channel->hw_value); @@ -3067,7 +3067,7 @@ static int freq_to_idx(struct mwl8k_priv *priv, int freq) struct ieee80211_supported_band *sband; int band, ch, idx = 0; - for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) { + for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) { sband = priv->hw->wiphy->bands[band]; if (!sband) continue; @@ -3149,9 +3149,9 @@ static int mwl8k_cmd_set_rf_channel(struct ieee80211_hw *hw, cmd->action = cpu_to_le16(MWL8K_CMD_SET); cmd->current_channel = channel->hw_value; - if (channel->band == IEEE80211_BAND_2GHZ) + if (channel->band == NL80211_BAND_2GHZ) cmd->channel_flags |= cpu_to_le32(0x00000001); - else if (channel->band == IEEE80211_BAND_5GHZ) + else if (channel->band == NL80211_BAND_5GHZ) cmd->channel_flags |= cpu_to_le32(0x00000004); if (!priv->sw_scan_start) { @@ -4094,10 +4094,10 @@ static int mwl8k_cmd_set_new_stn_add(struct ieee80211_hw *hw, memcpy(cmd->mac_addr, sta->addr, ETH_ALEN); cmd->stn_id = cpu_to_le16(sta->aid); cmd->action = cpu_to_le16(MWL8K_STA_ACTION_ADD); - if (hw->conf.chandef.chan->band == IEEE80211_BAND_2GHZ) - rates = sta->supp_rates[IEEE80211_BAND_2GHZ]; + if (hw->conf.chandef.chan->band == NL80211_BAND_2GHZ) + rates = sta->supp_rates[NL80211_BAND_2GHZ]; else - rates = sta->supp_rates[IEEE80211_BAND_5GHZ] << 5; + rates = sta->supp_rates[NL80211_BAND_5GHZ] << 5; cmd->legacy_rates = cpu_to_le32(rates); if (sta->ht_cap.ht_supported) { cmd->ht_rates[0] = sta->ht_cap.mcs.rx_mask[0]; @@ -4529,10 +4529,10 @@ static int mwl8k_cmd_update_stadb_add(struct ieee80211_hw *hw, p->ht_caps = cpu_to_le16(sta->ht_cap.cap); p->extended_ht_caps = (sta->ht_cap.ampdu_factor & 3) | ((sta->ht_cap.ampdu_density & 7) << 2); - if (hw->conf.chandef.chan->band == IEEE80211_BAND_2GHZ) - rates = sta->supp_rates[IEEE80211_BAND_2GHZ]; + if (hw->conf.chandef.chan->band == NL80211_BAND_2GHZ) + rates = sta->supp_rates[NL80211_BAND_2GHZ]; else - rates = sta->supp_rates[IEEE80211_BAND_5GHZ] << 5; + rates = sta->supp_rates[NL80211_BAND_5GHZ] << 5; legacy_rate_mask_to_array(p->legacy_rates, rates); memcpy(p->ht_rates, sta->ht_cap.mcs.rx_mask, 16); p->interop = 1; @@ -5010,11 +5010,11 @@ mwl8k_bss_info_changed_sta(struct ieee80211_hw *hw, struct ieee80211_vif *vif, goto out; } - if (hw->conf.chandef.chan->band == IEEE80211_BAND_2GHZ) { - ap_legacy_rates = ap->supp_rates[IEEE80211_BAND_2GHZ]; + if (hw->conf.chandef.chan->band == NL80211_BAND_2GHZ) { + ap_legacy_rates = ap->supp_rates[NL80211_BAND_2GHZ]; } else { ap_legacy_rates = - ap->supp_rates[IEEE80211_BAND_5GHZ] << 5; + ap->supp_rates[NL80211_BAND_5GHZ] << 5; } memcpy(ap_mcs_rates, ap->ht_cap.mcs.rx_mask, 16); @@ -5042,7 +5042,7 @@ mwl8k_bss_info_changed_sta(struct ieee80211_hw *hw, struct ieee80211_vif *vif, idx--; if (hw->conf.chandef.chan->band == - IEEE80211_BAND_2GHZ) + NL80211_BAND_2GHZ) rate = mwl8k_rates_24[idx].hw_value; else rate = mwl8k_rates_50[idx].hw_value; @@ -5116,7 +5116,7 @@ mwl8k_bss_info_changed_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif, if (idx) idx--; - if (hw->conf.chandef.chan->band == IEEE80211_BAND_2GHZ) + if (hw->conf.chandef.chan->band == NL80211_BAND_2GHZ) rate = mwl8k_rates_24[idx].hw_value; else rate = mwl8k_rates_50[idx].hw_value; @@ -5388,7 +5388,7 @@ static int mwl8k_get_survey(struct ieee80211_hw *hw, int idx, struct ieee80211_supported_band *sband; if (priv->ap_fw) { - sband = hw->wiphy->bands[IEEE80211_BAND_2GHZ]; + sband = hw->wiphy->bands[NL80211_BAND_2GHZ]; if (sband && idx >= sband->n_channels) { idx -= sband->n_channels; @@ -5396,7 +5396,7 @@ static int mwl8k_get_survey(struct ieee80211_hw *hw, int idx, } if (!sband) - sband = hw->wiphy->bands[IEEE80211_BAND_5GHZ]; + sband = hw->wiphy->bands[NL80211_BAND_5GHZ]; if (!sband || idx >= sband->n_channels) return -ENOENT; diff --git a/drivers/net/wireless/mediatek/mt7601u/init.c b/drivers/net/wireless/mediatek/mt7601u/init.c index 26190fd33407..8fa78d7156be 100644 --- a/drivers/net/wireless/mediatek/mt7601u/init.c +++ b/drivers/net/wireless/mediatek/mt7601u/init.c @@ -469,7 +469,7 @@ struct mt7601u_dev *mt7601u_alloc_device(struct device *pdev) } #define CHAN2G(_idx, _freq) { \ - .band = IEEE80211_BAND_2GHZ, \ + .band = NL80211_BAND_2GHZ, \ .center_freq = (_freq), \ .hw_value = (_idx), \ .max_power = 30, \ @@ -563,7 +563,7 @@ mt76_init_sband_2g(struct mt7601u_dev *dev) { dev->sband_2g = devm_kzalloc(dev->dev, sizeof(*dev->sband_2g), GFP_KERNEL); - dev->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = dev->sband_2g; + dev->hw->wiphy->bands[NL80211_BAND_2GHZ] = dev->sband_2g; WARN_ON(dev->ee->reg.start - 1 + dev->ee->reg.num > ARRAY_SIZE(mt76_channels_2ghz)); diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c index 7fa0128de7e3..c36fa4e03fb6 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c @@ -777,7 +777,7 @@ static int rt2800_agc_to_rssi(struct rt2x00_dev *rt2x00dev, u32 rxwi_w2) u8 offset1; u8 offset2; - if (rt2x00dev->curr_band == IEEE80211_BAND_2GHZ) { + if (rt2x00dev->curr_band == NL80211_BAND_2GHZ) { rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_BG, &eeprom); offset0 = rt2x00_get_field16(eeprom, EEPROM_RSSI_BG_OFFSET0); offset1 = rt2x00_get_field16(eeprom, EEPROM_RSSI_BG_OFFSET1); @@ -1174,7 +1174,7 @@ static void rt2800_brightness_set(struct led_classdev *led_cdev, container_of(led_cdev, struct rt2x00_led, led_dev); unsigned int enabled = brightness != LED_OFF; unsigned int bg_mode = - (enabled && led->rt2x00dev->curr_band == IEEE80211_BAND_2GHZ); + (enabled && led->rt2x00dev->curr_band == NL80211_BAND_2GHZ); unsigned int polarity = rt2x00_get_field16(led->rt2x00dev->led_mcu_reg, EEPROM_FREQ_LED_POLARITY); @@ -1741,7 +1741,7 @@ static void rt2800_config_3572bt_ant(struct rt2x00_dev *rt2x00dev) u8 led_ctrl, led_g_mode, led_r_mode; rt2800_register_read(rt2x00dev, GPIO_SWITCH, ®); - if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) { + if (rt2x00dev->curr_band == NL80211_BAND_5GHZ) { rt2x00_set_field32(®, GPIO_SWITCH_0, 1); rt2x00_set_field32(®, GPIO_SWITCH_1, 1); } else { @@ -1844,7 +1844,7 @@ void rt2800_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant) rt2x00_has_cap_bt_coexist(rt2x00dev)) { rt2x00_set_field8(&r3, BBP3_RX_ADC, 1); rt2x00_set_field8(&r3, BBP3_RX_ANTENNA, - rt2x00dev->curr_band == IEEE80211_BAND_5GHZ); + rt2x00dev->curr_band == NL80211_BAND_5GHZ); rt2800_set_ant_diversity(rt2x00dev, ANTENNA_B); } else { rt2x00_set_field8(&r3, BBP3_RX_ANTENNA, 1); @@ -3451,7 +3451,7 @@ static int rt2800_get_gain_calibration_delta(struct rt2x00_dev *rt2x00dev) * Matching Delta value -4 -3 -2 -1 0 +1 +2 +3 +4 * Example TSSI bounds 0xF0 0xD0 0xB5 0xA0 0x88 0x45 0x25 0x15 0x00 */ - if (rt2x00dev->curr_band == IEEE80211_BAND_2GHZ) { + if (rt2x00dev->curr_band == NL80211_BAND_2GHZ) { rt2800_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_BG1, &eeprom); tssi_bounds[0] = rt2x00_get_field16(eeprom, EEPROM_TSSI_BOUND_BG1_MINUS4); @@ -3546,7 +3546,7 @@ static int rt2800_get_gain_calibration_delta(struct rt2x00_dev *rt2x00dev) } static int rt2800_get_txpower_bw_comp(struct rt2x00_dev *rt2x00dev, - enum ieee80211_band band) + enum nl80211_band band) { u16 eeprom; u8 comp_en; @@ -3562,7 +3562,7 @@ static int rt2800_get_txpower_bw_comp(struct rt2x00_dev *rt2x00dev, !test_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags)) return 0; - if (band == IEEE80211_BAND_2GHZ) { + if (band == NL80211_BAND_2GHZ) { comp_en = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_DELTA_ENABLE_2G); if (comp_en) { @@ -3611,7 +3611,7 @@ static int rt2800_get_txpower_reg_delta(struct rt2x00_dev *rt2x00dev, } static u8 rt2800_compensate_txpower(struct rt2x00_dev *rt2x00dev, int is_rate_b, - enum ieee80211_band band, int power_level, + enum nl80211_band band, int power_level, u8 txpower, int delta) { u16 eeprom; @@ -3639,7 +3639,7 @@ static u8 rt2800_compensate_txpower(struct rt2x00_dev *rt2x00dev, int is_rate_b, rt2800_eeprom_read(rt2x00dev, EEPROM_EIRP_MAX_TX_POWER, &eeprom); - if (band == IEEE80211_BAND_2GHZ) + if (band == NL80211_BAND_2GHZ) eirp_txpower_criterion = rt2x00_get_field16(eeprom, EEPROM_EIRP_MAX_TX_POWER_2GHZ); else @@ -3686,7 +3686,7 @@ static void rt2800_config_txpower_rt3593(struct rt2x00_dev *rt2x00dev, u16 eeprom; u32 regs[TX_PWR_CFG_IDX_COUNT]; unsigned int offset; - enum ieee80211_band band = chan->band; + enum nl80211_band band = chan->band; int delta; int i; @@ -3697,7 +3697,7 @@ static void rt2800_config_txpower_rt3593(struct rt2x00_dev *rt2x00dev, /* calculate temperature compensation delta */ delta = rt2800_get_gain_calibration_delta(rt2x00dev); - if (band == IEEE80211_BAND_5GHZ) + if (band == NL80211_BAND_5GHZ) offset = 16; else offset = 0; @@ -4055,7 +4055,7 @@ static void rt2800_config_txpower_rt3593(struct rt2x00_dev *rt2x00dev, for (i = 0; i < TX_PWR_CFG_IDX_COUNT; i++) rt2x00_dbg(rt2x00dev, "band:%cGHz, BW:%c0MHz, TX_PWR_CFG_%d%s = %08lx\n", - (band == IEEE80211_BAND_5GHZ) ? '5' : '2', + (band == NL80211_BAND_5GHZ) ? '5' : '2', (test_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags)) ? '4' : '2', (i > TX_PWR_CFG_9_IDX) ? @@ -4081,7 +4081,7 @@ static void rt2800_config_txpower_rt28xx(struct rt2x00_dev *rt2x00dev, u16 eeprom; u32 reg, offset; int i, is_rate_b, delta, power_ctrl; - enum ieee80211_band band = chan->band; + enum nl80211_band band = chan->band; /* * Calculate HT40 compensation. For 40MHz we need to add or subtract @@ -4436,7 +4436,7 @@ static u8 rt2800_get_default_vgc(struct rt2x00_dev *rt2x00dev) { u8 vgc; - if (rt2x00dev->curr_band == IEEE80211_BAND_2GHZ) { + if (rt2x00dev->curr_band == NL80211_BAND_2GHZ) { if (rt2x00_rt(rt2x00dev, RT3070) || rt2x00_rt(rt2x00dev, RT3071) || rt2x00_rt(rt2x00dev, RT3090) || @@ -4511,7 +4511,7 @@ void rt2800_link_tuner(struct rt2x00_dev *rt2x00dev, struct link_qual *qual, case RT3572: case RT3593: if (qual->rssi > -65) { - if (rt2x00dev->curr_band == IEEE80211_BAND_2GHZ) + if (rt2x00dev->curr_band == NL80211_BAND_2GHZ) vgc += 0x20; else vgc += 0x10; diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00.h b/drivers/net/wireless/ralink/rt2x00/rt2x00.h index 3dacede7da5e..f68d492129c6 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00.h +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00.h @@ -753,8 +753,8 @@ struct rt2x00_dev { * IEEE80211 control structure. */ struct ieee80211_hw *hw; - struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS]; - enum ieee80211_band curr_band; + struct ieee80211_supported_band bands[NUM_NL80211_BANDS]; + enum nl80211_band curr_band; int curr_freq; /* diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c index b2f7c586045d..4e0c5653054b 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c @@ -911,7 +911,7 @@ static void rt2x00lib_channel(struct ieee80211_channel *entry, const int value) { /* XXX: this assumption about the band is wrong for 802.11j */ - entry->band = channel <= 14 ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; + entry->band = channel <= 14 ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ; entry->center_freq = ieee80211_channel_to_frequency(channel, entry->band); entry->hw_value = value; @@ -975,13 +975,13 @@ static int rt2x00lib_probe_hw_modes(struct rt2x00_dev *rt2x00dev, * Channels: 2.4 GHz */ if (spec->supported_bands & SUPPORT_BAND_2GHZ) { - rt2x00dev->bands[IEEE80211_BAND_2GHZ].n_channels = 14; - rt2x00dev->bands[IEEE80211_BAND_2GHZ].n_bitrates = num_rates; - rt2x00dev->bands[IEEE80211_BAND_2GHZ].channels = channels; - rt2x00dev->bands[IEEE80211_BAND_2GHZ].bitrates = rates; - hw->wiphy->bands[IEEE80211_BAND_2GHZ] = - &rt2x00dev->bands[IEEE80211_BAND_2GHZ]; - memcpy(&rt2x00dev->bands[IEEE80211_BAND_2GHZ].ht_cap, + rt2x00dev->bands[NL80211_BAND_2GHZ].n_channels = 14; + rt2x00dev->bands[NL80211_BAND_2GHZ].n_bitrates = num_rates; + rt2x00dev->bands[NL80211_BAND_2GHZ].channels = channels; + rt2x00dev->bands[NL80211_BAND_2GHZ].bitrates = rates; + hw->wiphy->bands[NL80211_BAND_2GHZ] = + &rt2x00dev->bands[NL80211_BAND_2GHZ]; + memcpy(&rt2x00dev->bands[NL80211_BAND_2GHZ].ht_cap, &spec->ht, sizeof(spec->ht)); } @@ -991,15 +991,15 @@ static int rt2x00lib_probe_hw_modes(struct rt2x00_dev *rt2x00dev, * Channels: OFDM, UNII, HiperLAN2. */ if (spec->supported_bands & SUPPORT_BAND_5GHZ) { - rt2x00dev->bands[IEEE80211_BAND_5GHZ].n_channels = + rt2x00dev->bands[NL80211_BAND_5GHZ].n_channels = spec->num_channels - 14; - rt2x00dev->bands[IEEE80211_BAND_5GHZ].n_bitrates = + rt2x00dev->bands[NL80211_BAND_5GHZ].n_bitrates = num_rates - 4; - rt2x00dev->bands[IEEE80211_BAND_5GHZ].channels = &channels[14]; - rt2x00dev->bands[IEEE80211_BAND_5GHZ].bitrates = &rates[4]; - hw->wiphy->bands[IEEE80211_BAND_5GHZ] = - &rt2x00dev->bands[IEEE80211_BAND_5GHZ]; - memcpy(&rt2x00dev->bands[IEEE80211_BAND_5GHZ].ht_cap, + rt2x00dev->bands[NL80211_BAND_5GHZ].channels = &channels[14]; + rt2x00dev->bands[NL80211_BAND_5GHZ].bitrates = &rates[4]; + hw->wiphy->bands[NL80211_BAND_5GHZ] = + &rt2x00dev->bands[NL80211_BAND_5GHZ]; + memcpy(&rt2x00dev->bands[NL80211_BAND_5GHZ].ht_cap, &spec->ht, sizeof(spec->ht)); } @@ -1016,11 +1016,11 @@ static void rt2x00lib_remove_hw(struct rt2x00_dev *rt2x00dev) if (test_bit(DEVICE_STATE_REGISTERED_HW, &rt2x00dev->flags)) ieee80211_unregister_hw(rt2x00dev->hw); - if (likely(rt2x00dev->hw->wiphy->bands[IEEE80211_BAND_2GHZ])) { - kfree(rt2x00dev->hw->wiphy->bands[IEEE80211_BAND_2GHZ]->channels); - kfree(rt2x00dev->hw->wiphy->bands[IEEE80211_BAND_2GHZ]->bitrates); - rt2x00dev->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = NULL; - rt2x00dev->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = NULL; + if (likely(rt2x00dev->hw->wiphy->bands[NL80211_BAND_2GHZ])) { + kfree(rt2x00dev->hw->wiphy->bands[NL80211_BAND_2GHZ]->channels); + kfree(rt2x00dev->hw->wiphy->bands[NL80211_BAND_2GHZ]->bitrates); + rt2x00dev->hw->wiphy->bands[NL80211_BAND_2GHZ] = NULL; + rt2x00dev->hw->wiphy->bands[NL80211_BAND_5GHZ] = NULL; } kfree(rt2x00dev->spec.channels_info); diff --git a/drivers/net/wireless/ralink/rt2x00/rt61pci.c b/drivers/net/wireless/ralink/rt2x00/rt61pci.c index 24a3436ef952..03013eb2f642 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt61pci.c +++ b/drivers/net/wireless/ralink/rt2x00/rt61pci.c @@ -252,9 +252,9 @@ static void rt61pci_brightness_set(struct led_classdev *led_cdev, container_of(led_cdev, struct rt2x00_led, led_dev); unsigned int enabled = brightness != LED_OFF; unsigned int a_mode = - (enabled && led->rt2x00dev->curr_band == IEEE80211_BAND_5GHZ); + (enabled && led->rt2x00dev->curr_band == NL80211_BAND_5GHZ); unsigned int bg_mode = - (enabled && led->rt2x00dev->curr_band == IEEE80211_BAND_2GHZ); + (enabled && led->rt2x00dev->curr_band == NL80211_BAND_2GHZ); if (led->type == LED_TYPE_RADIO) { rt2x00_set_field16(&led->rt2x00dev->led_mcu_reg, @@ -643,12 +643,12 @@ static void rt61pci_config_antenna_5x(struct rt2x00_dev *rt2x00dev, case ANTENNA_HW_DIVERSITY: rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 2); rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, - (rt2x00dev->curr_band != IEEE80211_BAND_5GHZ)); + (rt2x00dev->curr_band != NL80211_BAND_5GHZ)); break; case ANTENNA_A: rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 1); rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, 0); - if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) + if (rt2x00dev->curr_band == NL80211_BAND_5GHZ) rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 0); else rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 3); @@ -657,7 +657,7 @@ static void rt61pci_config_antenna_5x(struct rt2x00_dev *rt2x00dev, default: rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 1); rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, 0); - if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) + if (rt2x00dev->curr_band == NL80211_BAND_5GHZ) rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 3); else rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 0); @@ -808,7 +808,7 @@ static void rt61pci_config_ant(struct rt2x00_dev *rt2x00dev, BUG_ON(ant->rx == ANTENNA_SW_DIVERSITY || ant->tx == ANTENNA_SW_DIVERSITY); - if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) { + if (rt2x00dev->curr_band == NL80211_BAND_5GHZ) { sel = antenna_sel_a; lna = rt2x00_has_cap_external_lna_a(rt2x00dev); } else { @@ -822,9 +822,9 @@ static void rt61pci_config_ant(struct rt2x00_dev *rt2x00dev, rt2x00mmio_register_read(rt2x00dev, PHY_CSR0, ®); rt2x00_set_field32(®, PHY_CSR0_PA_PE_BG, - rt2x00dev->curr_band == IEEE80211_BAND_2GHZ); + rt2x00dev->curr_band == NL80211_BAND_2GHZ); rt2x00_set_field32(®, PHY_CSR0_PA_PE_A, - rt2x00dev->curr_band == IEEE80211_BAND_5GHZ); + rt2x00dev->curr_band == NL80211_BAND_5GHZ); rt2x00mmio_register_write(rt2x00dev, PHY_CSR0, reg); @@ -846,7 +846,7 @@ static void rt61pci_config_lna_gain(struct rt2x00_dev *rt2x00dev, u16 eeprom; short lna_gain = 0; - if (libconf->conf->chandef.chan->band == IEEE80211_BAND_2GHZ) { + if (libconf->conf->chandef.chan->band == NL80211_BAND_2GHZ) { if (rt2x00_has_cap_external_lna_bg(rt2x00dev)) lna_gain += 14; @@ -1048,7 +1048,7 @@ static void rt61pci_link_tuner(struct rt2x00_dev *rt2x00dev, /* * Determine r17 bounds. */ - if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) { + if (rt2x00dev->curr_band == NL80211_BAND_5GHZ) { low_bound = 0x28; up_bound = 0x48; if (rt2x00_has_cap_external_lna_a(rt2x00dev)) { @@ -2077,7 +2077,7 @@ static int rt61pci_agc_to_rssi(struct rt2x00_dev *rt2x00dev, int rxd_w1) return 0; } - if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) { + if (rt2x00dev->curr_band == NL80211_BAND_5GHZ) { if (lna == 3 || lna == 2) offset += 10; } diff --git a/drivers/net/wireless/ralink/rt2x00/rt73usb.c b/drivers/net/wireless/ralink/rt2x00/rt73usb.c index 7bbc86931168..c1397a6d3cee 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt73usb.c +++ b/drivers/net/wireless/ralink/rt2x00/rt73usb.c @@ -197,9 +197,9 @@ static void rt73usb_brightness_set(struct led_classdev *led_cdev, container_of(led_cdev, struct rt2x00_led, led_dev); unsigned int enabled = brightness != LED_OFF; unsigned int a_mode = - (enabled && led->rt2x00dev->curr_band == IEEE80211_BAND_5GHZ); + (enabled && led->rt2x00dev->curr_band == NL80211_BAND_5GHZ); unsigned int bg_mode = - (enabled && led->rt2x00dev->curr_band == IEEE80211_BAND_2GHZ); + (enabled && led->rt2x00dev->curr_band == NL80211_BAND_2GHZ); if (led->type == LED_TYPE_RADIO) { rt2x00_set_field16(&led->rt2x00dev->led_mcu_reg, @@ -593,13 +593,13 @@ static void rt73usb_config_antenna_5x(struct rt2x00_dev *rt2x00dev, case ANTENNA_HW_DIVERSITY: rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 2); temp = !rt2x00_has_cap_frame_type(rt2x00dev) && - (rt2x00dev->curr_band != IEEE80211_BAND_5GHZ); + (rt2x00dev->curr_band != NL80211_BAND_5GHZ); rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, temp); break; case ANTENNA_A: rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 1); rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, 0); - if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) + if (rt2x00dev->curr_band == NL80211_BAND_5GHZ) rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 0); else rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 3); @@ -608,7 +608,7 @@ static void rt73usb_config_antenna_5x(struct rt2x00_dev *rt2x00dev, default: rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 1); rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, 0); - if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) + if (rt2x00dev->curr_band == NL80211_BAND_5GHZ) rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 3); else rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 0); @@ -704,7 +704,7 @@ static void rt73usb_config_ant(struct rt2x00_dev *rt2x00dev, BUG_ON(ant->rx == ANTENNA_SW_DIVERSITY || ant->tx == ANTENNA_SW_DIVERSITY); - if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) { + if (rt2x00dev->curr_band == NL80211_BAND_5GHZ) { sel = antenna_sel_a; lna = rt2x00_has_cap_external_lna_a(rt2x00dev); } else { @@ -718,9 +718,9 @@ static void rt73usb_config_ant(struct rt2x00_dev *rt2x00dev, rt2x00usb_register_read(rt2x00dev, PHY_CSR0, ®); rt2x00_set_field32(®, PHY_CSR0_PA_PE_BG, - (rt2x00dev->curr_band == IEEE80211_BAND_2GHZ)); + (rt2x00dev->curr_band == NL80211_BAND_2GHZ)); rt2x00_set_field32(®, PHY_CSR0_PA_PE_A, - (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ)); + (rt2x00dev->curr_band == NL80211_BAND_5GHZ)); rt2x00usb_register_write(rt2x00dev, PHY_CSR0, reg); @@ -736,7 +736,7 @@ static void rt73usb_config_lna_gain(struct rt2x00_dev *rt2x00dev, u16 eeprom; short lna_gain = 0; - if (libconf->conf->chandef.chan->band == IEEE80211_BAND_2GHZ) { + if (libconf->conf->chandef.chan->band == NL80211_BAND_2GHZ) { if (rt2x00_has_cap_external_lna_bg(rt2x00dev)) lna_gain += 14; @@ -923,7 +923,7 @@ static void rt73usb_link_tuner(struct rt2x00_dev *rt2x00dev, /* * Determine r17 bounds. */ - if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) { + if (rt2x00dev->curr_band == NL80211_BAND_5GHZ) { low_bound = 0x28; up_bound = 0x48; @@ -1657,7 +1657,7 @@ static int rt73usb_agc_to_rssi(struct rt2x00_dev *rt2x00dev, int rxd_w1) return 0; } - if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) { + if (rt2x00dev->curr_band == NL80211_BAND_5GHZ) { if (rt2x00_has_cap_external_lna_a(rt2x00dev)) { if (lna == 3 || lna == 2) offset += 10; diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c b/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c index c76af5d8b8e0..ba242d0160ec 100644 --- a/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c +++ b/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c @@ -526,7 +526,7 @@ static void rtl8180_tx(struct ieee80211_hw *dev, * ieee80211_generic_frame_duration */ duration = ieee80211_generic_frame_duration(dev, priv->vif, - IEEE80211_BAND_2GHZ, skb->len, + NL80211_BAND_2GHZ, skb->len, ieee80211_get_tx_rate(dev, info)); frame_duration = priv->ack_time + le16_to_cpu(duration); @@ -1529,7 +1529,7 @@ static void rtl8180_bss_info_changed(struct ieee80211_hw *dev, priv->ack_time = le16_to_cpu(ieee80211_generic_frame_duration(dev, priv->vif, - IEEE80211_BAND_2GHZ, 10, + NL80211_BAND_2GHZ, 10, &priv->rates[0])) - 10; rtl8180_conf_erp(dev, info); @@ -1795,12 +1795,12 @@ static int rtl8180_probe(struct pci_dev *pdev, memcpy(priv->channels, rtl818x_channels, sizeof(rtl818x_channels)); memcpy(priv->rates, rtl818x_rates, sizeof(rtl818x_rates)); - priv->band.band = IEEE80211_BAND_2GHZ; + priv->band.band = NL80211_BAND_2GHZ; priv->band.channels = priv->channels; priv->band.n_channels = ARRAY_SIZE(rtl818x_channels); priv->band.bitrates = priv->rates; priv->band.n_bitrates = 4; - dev->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band; + dev->wiphy->bands[NL80211_BAND_2GHZ] = &priv->band; ieee80211_hw_set(dev, HOST_BROADCAST_PS_BUFFERING); ieee80211_hw_set(dev, RX_INCLUDES_FCS); diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c b/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c index b7f72f9c7988..231f84db9ab0 100644 --- a/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c +++ b/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c @@ -1470,12 +1470,12 @@ static int rtl8187_probe(struct usb_interface *intf, memcpy(priv->rates, rtl818x_rates, sizeof(rtl818x_rates)); priv->map = (struct rtl818x_csr *)0xFF00; - priv->band.band = IEEE80211_BAND_2GHZ; + priv->band.band = NL80211_BAND_2GHZ; priv->band.channels = priv->channels; priv->band.n_channels = ARRAY_SIZE(rtl818x_channels); priv->band.bitrates = priv->rates; priv->band.n_bitrates = ARRAY_SIZE(rtl818x_rates); - dev->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band; + dev->wiphy->bands[NL80211_BAND_2GHZ] = &priv->band; ieee80211_hw_set(dev, RX_INCLUDES_FCS); diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 333addd3d46a..db8433a9efe2 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -91,33 +91,33 @@ static struct ieee80211_rate rtl8xxxu_rates[] = { }; static struct ieee80211_channel rtl8xxxu_channels_2g[] = { - { .band = IEEE80211_BAND_2GHZ, .center_freq = 2412, + { .band = NL80211_BAND_2GHZ, .center_freq = 2412, .hw_value = 1, .max_power = 30 }, - { .band = IEEE80211_BAND_2GHZ, .center_freq = 2417, + { .band = NL80211_BAND_2GHZ, .center_freq = 2417, .hw_value = 2, .max_power = 30 }, - { .band = IEEE80211_BAND_2GHZ, .center_freq = 2422, + { .band = NL80211_BAND_2GHZ, .center_freq = 2422, .hw_value = 3, .max_power = 30 }, - { .band = IEEE80211_BAND_2GHZ, .center_freq = 2427, + { .band = NL80211_BAND_2GHZ, .center_freq = 2427, .hw_value = 4, .max_power = 30 }, - { .band = IEEE80211_BAND_2GHZ, .center_freq = 2432, + { .band = NL80211_BAND_2GHZ, .center_freq = 2432, .hw_value = 5, .max_power = 30 }, - { .band = IEEE80211_BAND_2GHZ, .center_freq = 2437, + { .band = NL80211_BAND_2GHZ, .center_freq = 2437, .hw_value = 6, .max_power = 30 }, - { .band = IEEE80211_BAND_2GHZ, .center_freq = 2442, + { .band = NL80211_BAND_2GHZ, .center_freq = 2442, .hw_value = 7, .max_power = 30 }, - { .band = IEEE80211_BAND_2GHZ, .center_freq = 2447, + { .band = NL80211_BAND_2GHZ, .center_freq = 2447, .hw_value = 8, .max_power = 30 }, - { .band = IEEE80211_BAND_2GHZ, .center_freq = 2452, + { .band = NL80211_BAND_2GHZ, .center_freq = 2452, .hw_value = 9, .max_power = 30 }, - { .band = IEEE80211_BAND_2GHZ, .center_freq = 2457, + { .band = NL80211_BAND_2GHZ, .center_freq = 2457, .hw_value = 10, .max_power = 30 }, - { .band = IEEE80211_BAND_2GHZ, .center_freq = 2462, + { .band = NL80211_BAND_2GHZ, .center_freq = 2462, .hw_value = 11, .max_power = 30 }, - { .band = IEEE80211_BAND_2GHZ, .center_freq = 2467, + { .band = NL80211_BAND_2GHZ, .center_freq = 2467, .hw_value = 12, .max_power = 30 }, - { .band = IEEE80211_BAND_2GHZ, .center_freq = 2472, + { .band = NL80211_BAND_2GHZ, .center_freq = 2472, .hw_value = 13, .max_power = 30 }, - { .band = IEEE80211_BAND_2GHZ, .center_freq = 2484, + { .band = NL80211_BAND_2GHZ, .center_freq = 2484, .hw_value = 14, .max_power = 30 } }; @@ -8378,7 +8378,7 @@ static int rtl8xxxu_probe(struct usb_interface *interface, dev_info(&udev->dev, "Enabling HT_20_40 on the 2.4GHz band\n"); sband->ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40; } - hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband; + hw->wiphy->bands[NL80211_BAND_2GHZ] = sband; hw->wiphy->rts_threshold = 2347; diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c index 0517a4f2d3f2..c74eb139bfa1 100644 --- a/drivers/net/wireless/realtek/rtlwifi/base.c +++ b/drivers/net/wireless/realtek/rtlwifi/base.c @@ -131,7 +131,7 @@ static struct ieee80211_rate rtl_ratetable_5g[] = { }; static const struct ieee80211_supported_band rtl_band_2ghz = { - .band = IEEE80211_BAND_2GHZ, + .band = NL80211_BAND_2GHZ, .channels = rtl_channeltable_2g, .n_channels = ARRAY_SIZE(rtl_channeltable_2g), @@ -143,7 +143,7 @@ static const struct ieee80211_supported_band rtl_band_2ghz = { }; static struct ieee80211_supported_band rtl_band_5ghz = { - .band = IEEE80211_BAND_5GHZ, + .band = NL80211_BAND_5GHZ, .channels = rtl_channeltable_5g, .n_channels = ARRAY_SIZE(rtl_channeltable_5g), @@ -197,7 +197,7 @@ static void _rtl_init_hw_ht_capab(struct ieee80211_hw *hw, ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED; - /*hw->wiphy->bands[IEEE80211_BAND_2GHZ] + /*hw->wiphy->bands[NL80211_BAND_2GHZ] *base on ant_num *rx_mask: RX mask *if rx_ant = 1 rx_mask[0]= 0xff;==>MCS0-MCS7 @@ -328,26 +328,26 @@ static void _rtl_init_mac80211(struct ieee80211_hw *hw) rtlhal->bandset == BAND_ON_BOTH) { /* 1: 2.4 G bands */ /* <1> use mac->bands as mem for hw->wiphy->bands */ - sband = &(rtlmac->bands[IEEE80211_BAND_2GHZ]); + sband = &(rtlmac->bands[NL80211_BAND_2GHZ]); - /* <2> set hw->wiphy->bands[IEEE80211_BAND_2GHZ] + /* <2> set hw->wiphy->bands[NL80211_BAND_2GHZ] * to default value(1T1R) */ - memcpy(&(rtlmac->bands[IEEE80211_BAND_2GHZ]), &rtl_band_2ghz, + memcpy(&(rtlmac->bands[NL80211_BAND_2GHZ]), &rtl_band_2ghz, sizeof(struct ieee80211_supported_band)); /* <3> init ht cap base on ant_num */ _rtl_init_hw_ht_capab(hw, &sband->ht_cap); /* <4> set mac->sband to wiphy->sband */ - hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband; + hw->wiphy->bands[NL80211_BAND_2GHZ] = sband; /* 2: 5 G bands */ /* <1> use mac->bands as mem for hw->wiphy->bands */ - sband = &(rtlmac->bands[IEEE80211_BAND_5GHZ]); + sband = &(rtlmac->bands[NL80211_BAND_5GHZ]); - /* <2> set hw->wiphy->bands[IEEE80211_BAND_5GHZ] + /* <2> set hw->wiphy->bands[NL80211_BAND_5GHZ] * to default value(1T1R) */ - memcpy(&(rtlmac->bands[IEEE80211_BAND_5GHZ]), &rtl_band_5ghz, + memcpy(&(rtlmac->bands[NL80211_BAND_5GHZ]), &rtl_band_5ghz, sizeof(struct ieee80211_supported_band)); /* <3> init ht cap base on ant_num */ @@ -355,15 +355,15 @@ static void _rtl_init_mac80211(struct ieee80211_hw *hw) _rtl_init_hw_vht_capab(hw, &sband->vht_cap); /* <4> set mac->sband to wiphy->sband */ - hw->wiphy->bands[IEEE80211_BAND_5GHZ] = sband; + hw->wiphy->bands[NL80211_BAND_5GHZ] = sband; } else { if (rtlhal->current_bandtype == BAND_ON_2_4G) { /* <1> use mac->bands as mem for hw->wiphy->bands */ - sband = &(rtlmac->bands[IEEE80211_BAND_2GHZ]); + sband = &(rtlmac->bands[NL80211_BAND_2GHZ]); - /* <2> set hw->wiphy->bands[IEEE80211_BAND_2GHZ] + /* <2> set hw->wiphy->bands[NL80211_BAND_2GHZ] * to default value(1T1R) */ - memcpy(&(rtlmac->bands[IEEE80211_BAND_2GHZ]), + memcpy(&(rtlmac->bands[NL80211_BAND_2GHZ]), &rtl_band_2ghz, sizeof(struct ieee80211_supported_band)); @@ -371,14 +371,14 @@ static void _rtl_init_mac80211(struct ieee80211_hw *hw) _rtl_init_hw_ht_capab(hw, &sband->ht_cap); /* <4> set mac->sband to wiphy->sband */ - hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband; + hw->wiphy->bands[NL80211_BAND_2GHZ] = sband; } else if (rtlhal->current_bandtype == BAND_ON_5G) { /* <1> use mac->bands as mem for hw->wiphy->bands */ - sband = &(rtlmac->bands[IEEE80211_BAND_5GHZ]); + sband = &(rtlmac->bands[NL80211_BAND_5GHZ]); - /* <2> set hw->wiphy->bands[IEEE80211_BAND_5GHZ] + /* <2> set hw->wiphy->bands[NL80211_BAND_5GHZ] * to default value(1T1R) */ - memcpy(&(rtlmac->bands[IEEE80211_BAND_5GHZ]), + memcpy(&(rtlmac->bands[NL80211_BAND_5GHZ]), &rtl_band_5ghz, sizeof(struct ieee80211_supported_band)); @@ -387,7 +387,7 @@ static void _rtl_init_mac80211(struct ieee80211_hw *hw) _rtl_init_hw_vht_capab(hw, &sband->vht_cap); /* <4> set mac->sband to wiphy->sband */ - hw->wiphy->bands[IEEE80211_BAND_5GHZ] = sband; + hw->wiphy->bands[NL80211_BAND_5GHZ] = sband; } else { RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, "Err BAND %d\n", rtlhal->current_bandtype); @@ -861,7 +861,7 @@ static u8 _rtl_get_highest_n_rate(struct ieee80211_hw *hw, /* mac80211's rate_idx is like this: * - * 2.4G band:rx_status->band == IEEE80211_BAND_2GHZ + * 2.4G band:rx_status->band == NL80211_BAND_2GHZ * * B/G rate: * (rx_status->flag & RX_FLAG_HT) = 0, @@ -871,7 +871,7 @@ static u8 _rtl_get_highest_n_rate(struct ieee80211_hw *hw, * (rx_status->flag & RX_FLAG_HT) = 1, * DESC_RATEMCS0-->DESC_RATEMCS15 ==> idx is 0-->15 * - * 5G band:rx_status->band == IEEE80211_BAND_5GHZ + * 5G band:rx_status->band == NL80211_BAND_5GHZ * A rate: * (rx_status->flag & RX_FLAG_HT) = 0, * DESC_RATE6M-->DESC_RATE54M ==> idx is 0-->7, @@ -958,7 +958,7 @@ int rtlwifi_rate_mapping(struct ieee80211_hw *hw, bool isht, bool isvht, return rate_idx; } if (false == isht) { - if (IEEE80211_BAND_2GHZ == hw->conf.chandef.chan->band) { + if (NL80211_BAND_2GHZ == hw->conf.chandef.chan->band) { switch (desc_rate) { case DESC_RATE1M: rate_idx = 0; diff --git a/drivers/net/wireless/realtek/rtlwifi/regd.c b/drivers/net/wireless/realtek/rtlwifi/regd.c index 5be34118e0af..3524441fd516 100644 --- a/drivers/net/wireless/realtek/rtlwifi/regd.c +++ b/drivers/net/wireless/realtek/rtlwifi/regd.c @@ -154,13 +154,13 @@ static bool _rtl_is_radar_freq(u16 center_freq) static void _rtl_reg_apply_beaconing_flags(struct wiphy *wiphy, enum nl80211_reg_initiator initiator) { - enum ieee80211_band band; + enum nl80211_band band; struct ieee80211_supported_band *sband; const struct ieee80211_reg_rule *reg_rule; struct ieee80211_channel *ch; unsigned int i; - for (band = 0; band < IEEE80211_NUM_BANDS; band++) { + for (band = 0; band < NUM_NL80211_BANDS; band++) { if (!wiphy->bands[band]) continue; @@ -210,9 +210,9 @@ static void _rtl_reg_apply_active_scan_flags(struct wiphy *wiphy, struct ieee80211_channel *ch; const struct ieee80211_reg_rule *reg_rule; - if (!wiphy->bands[IEEE80211_BAND_2GHZ]) + if (!wiphy->bands[NL80211_BAND_2GHZ]) return; - sband = wiphy->bands[IEEE80211_BAND_2GHZ]; + sband = wiphy->bands[NL80211_BAND_2GHZ]; /* *If no country IE has been received always enable active scan @@ -262,10 +262,10 @@ static void _rtl_reg_apply_radar_flags(struct wiphy *wiphy) struct ieee80211_channel *ch; unsigned int i; - if (!wiphy->bands[IEEE80211_BAND_5GHZ]) + if (!wiphy->bands[NL80211_BAND_5GHZ]) return; - sband = wiphy->bands[IEEE80211_BAND_5GHZ]; + sband = wiphy->bands[NL80211_BAND_5GHZ]; for (i = 0; i < sband->n_channels; i++) { ch = &sband->channels[i]; @@ -301,12 +301,12 @@ static void _rtl_reg_apply_world_flags(struct wiphy *wiphy, static void _rtl_dump_channel_map(struct wiphy *wiphy) { - enum ieee80211_band band; + enum nl80211_band band; struct ieee80211_supported_band *sband; struct ieee80211_channel *ch; unsigned int i; - for (band = 0; band < IEEE80211_NUM_BANDS; band++) { + for (band = 0; band < NUM_NL80211_BANDS; band++) { if (!wiphy->bands[band]) continue; sband = wiphy->bands[band]; diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h index 389dc47776c0..11d9c2307e2f 100644 --- a/drivers/net/wireless/realtek/rtlwifi/wifi.h +++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h @@ -1359,7 +1359,7 @@ struct rtl_mac { u32 tx_ss_num; u32 rx_ss_num; - struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS]; + struct ieee80211_supported_band bands[NUM_NL80211_BANDS]; struct ieee80211_hw *hw; struct ieee80211_vif *vif; enum nl80211_iftype opmode; diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c index a13d1f2b5912..569918c485b4 100644 --- a/drivers/net/wireless/rndis_wlan.c +++ b/drivers/net/wireless/rndis_wlan.c @@ -1291,7 +1291,7 @@ static int set_channel(struct usbnet *usbdev, int channel) return 0; dsconfig = 1000 * - ieee80211_channel_to_frequency(channel, IEEE80211_BAND_2GHZ); + ieee80211_channel_to_frequency(channel, NL80211_BAND_2GHZ); len = sizeof(config); ret = rndis_query_oid(usbdev, @@ -3476,7 +3476,7 @@ static int rndis_wlan_bind(struct usbnet *usbdev, struct usb_interface *intf) priv->band.n_channels = ARRAY_SIZE(rndis_channels); priv->band.bitrates = priv->rates; priv->band.n_bitrates = ARRAY_SIZE(rndis_rates); - wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band; + wiphy->bands[NL80211_BAND_2GHZ] = &priv->band; wiphy->signal_type = CFG80211_SIGNAL_TYPE_UNSPEC; memcpy(priv->cipher_suites, rndis_cipher_suites, diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c index 4df992de7d07..dbb23899ddcb 100644 --- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c +++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c @@ -20,84 +20,84 @@ #include "rsi_common.h" static const struct ieee80211_channel rsi_2ghz_channels[] = { - { .band = IEEE80211_BAND_2GHZ, .center_freq = 2412, + { .band = NL80211_BAND_2GHZ, .center_freq = 2412, .hw_value = 1 }, /* Channel 1 */ - { .band = IEEE80211_BAND_2GHZ, .center_freq = 2417, + { .band = NL80211_BAND_2GHZ, .center_freq = 2417, .hw_value = 2 }, /* Channel 2 */ - { .band = IEEE80211_BAND_2GHZ, .center_freq = 2422, + { .band = NL80211_BAND_2GHZ, .center_freq = 2422, .hw_value = 3 }, /* Channel 3 */ - { .band = IEEE80211_BAND_2GHZ, .center_freq = 2427, + { .band = NL80211_BAND_2GHZ, .center_freq = 2427, .hw_value = 4 }, /* Channel 4 */ - { .band = IEEE80211_BAND_2GHZ, .center_freq = 2432, + { .band = NL80211_BAND_2GHZ, .center_freq = 2432, .hw_value = 5 }, /* Channel 5 */ - { .band = IEEE80211_BAND_2GHZ, .center_freq = 2437, + { .band = NL80211_BAND_2GHZ, .center_freq = 2437, .hw_value = 6 }, /* Channel 6 */ - { .band = IEEE80211_BAND_2GHZ, .center_freq = 2442, + { .band = NL80211_BAND_2GHZ, .center_freq = 2442, .hw_value = 7 }, /* Channel 7 */ - { .band = IEEE80211_BAND_2GHZ, .center_freq = 2447, + { .band = NL80211_BAND_2GHZ, .center_freq = 2447, .hw_value = 8 }, /* Channel 8 */ - { .band = IEEE80211_BAND_2GHZ, .center_freq = 2452, + { .band = NL80211_BAND_2GHZ, .center_freq = 2452, .hw_value = 9 }, /* Channel 9 */ - { .band = IEEE80211_BAND_2GHZ, .center_freq = 2457, + { .band = NL80211_BAND_2GHZ, .center_freq = 2457, .hw_value = 10 }, /* Channel 10 */ - { .band = IEEE80211_BAND_2GHZ, .center_freq = 2462, + { .band = NL80211_BAND_2GHZ, .center_freq = 2462, .hw_value = 11 }, /* Channel 11 */ - { .band = IEEE80211_BAND_2GHZ, .center_freq = 2467, + { .band = NL80211_BAND_2GHZ, .center_freq = 2467, .hw_value = 12 }, /* Channel 12 */ - { .band = IEEE80211_BAND_2GHZ, .center_freq = 2472, + { .band = NL80211_BAND_2GHZ, .center_freq = 2472, .hw_value = 13 }, /* Channel 13 */ - { .band = IEEE80211_BAND_2GHZ, .center_freq = 2484, + { .band = NL80211_BAND_2GHZ, .center_freq = 2484, .hw_value = 14 }, /* Channel 14 */ }; static const struct ieee80211_channel rsi_5ghz_channels[] = { - { .band = IEEE80211_BAND_5GHZ, .center_freq = 5180, + { .band = NL80211_BAND_5GHZ, .center_freq = 5180, .hw_value = 36, }, /* Channel 36 */ - { .band = IEEE80211_BAND_5GHZ, .center_freq = 5200, + { .band = NL80211_BAND_5GHZ, .center_freq = 5200, .hw_value = 40, }, /* Channel 40 */ - { .band = IEEE80211_BAND_5GHZ, .center_freq = 5220, + { .band = NL80211_BAND_5GHZ, .center_freq = 5220, .hw_value = 44, }, /* Channel 44 */ - { .band = IEEE80211_BAND_5GHZ, .center_freq = 5240, + { .band = NL80211_BAND_5GHZ, .center_freq = 5240, .hw_value = 48, }, /* Channel 48 */ - { .band = IEEE80211_BAND_5GHZ, .center_freq = 5260, + { .band = NL80211_BAND_5GHZ, .center_freq = 5260, .hw_value = 52, }, /* Channel 52 */ - { .band = IEEE80211_BAND_5GHZ, .center_freq = 5280, + { .band = NL80211_BAND_5GHZ, .center_freq = 5280, .hw_value = 56, }, /* Channel 56 */ - { .band = IEEE80211_BAND_5GHZ, .center_freq = 5300, + { .band = NL80211_BAND_5GHZ, .center_freq = 5300, .hw_value = 60, }, /* Channel 60 */ - { .band = IEEE80211_BAND_5GHZ, .center_freq = 5320, + { .band = NL80211_BAND_5GHZ, .center_freq = 5320, .hw_value = 64, }, /* Channel 64 */ - { .band = IEEE80211_BAND_5GHZ, .center_freq = 5500, + { .band = NL80211_BAND_5GHZ, .center_freq = 5500, .hw_value = 100, }, /* Channel 100 */ - { .band = IEEE80211_BAND_5GHZ, .center_freq = 5520, + { .band = NL80211_BAND_5GHZ, .center_freq = 5520, .hw_value = 104, }, /* Channel 104 */ - { .band = IEEE80211_BAND_5GHZ, .center_freq = 5540, + { .band = NL80211_BAND_5GHZ, .center_freq = 5540, .hw_value = 108, }, /* Channel 108 */ - { .band = IEEE80211_BAND_5GHZ, .center_freq = 5560, + { .band = NL80211_BAND_5GHZ, .center_freq = 5560, .hw_value = 112, }, /* Channel 112 */ - { .band = IEEE80211_BAND_5GHZ, .center_freq = 5580, + { .band = NL80211_BAND_5GHZ, .center_freq = 5580, .hw_value = 116, }, /* Channel 116 */ - { .band = IEEE80211_BAND_5GHZ, .center_freq = 5600, + { .band = NL80211_BAND_5GHZ, .center_freq = 5600, .hw_value = 120, }, /* Channel 120 */ - { .band = IEEE80211_BAND_5GHZ, .center_freq = 5620, + { .band = NL80211_BAND_5GHZ, .center_freq = 5620, .hw_value = 124, }, /* Channel 124 */ - { .band = IEEE80211_BAND_5GHZ, .center_freq = 5640, + { .band = NL80211_BAND_5GHZ, .center_freq = 5640, .hw_value = 128, }, /* Channel 128 */ - { .band = IEEE80211_BAND_5GHZ, .center_freq = 5660, + { .band = NL80211_BAND_5GHZ, .center_freq = 5660, .hw_value = 132, }, /* Channel 132 */ - { .band = IEEE80211_BAND_5GHZ, .center_freq = 5680, + { .band = NL80211_BAND_5GHZ, .center_freq = 5680, .hw_value = 136, }, /* Channel 136 */ - { .band = IEEE80211_BAND_5GHZ, .center_freq = 5700, + { .band = NL80211_BAND_5GHZ, .center_freq = 5700, .hw_value = 140, }, /* Channel 140 */ - { .band = IEEE80211_BAND_5GHZ, .center_freq = 5745, + { .band = NL80211_BAND_5GHZ, .center_freq = 5745, .hw_value = 149, }, /* Channel 149 */ - { .band = IEEE80211_BAND_5GHZ, .center_freq = 5765, + { .band = NL80211_BAND_5GHZ, .center_freq = 5765, .hw_value = 153, }, /* Channel 153 */ - { .band = IEEE80211_BAND_5GHZ, .center_freq = 5785, + { .band = NL80211_BAND_5GHZ, .center_freq = 5785, .hw_value = 157, }, /* Channel 157 */ - { .band = IEEE80211_BAND_5GHZ, .center_freq = 5805, + { .band = NL80211_BAND_5GHZ, .center_freq = 5805, .hw_value = 161, }, /* Channel 161 */ - { .band = IEEE80211_BAND_5GHZ, .center_freq = 5825, + { .band = NL80211_BAND_5GHZ, .center_freq = 5825, .hw_value = 165, }, /* Channel 165 */ }; @@ -150,12 +150,12 @@ static void rsi_register_rates_channels(struct rsi_hw *adapter, int band) struct ieee80211_supported_band *sbands = &adapter->sbands[band]; void *channels = NULL; - if (band == IEEE80211_BAND_2GHZ) { + if (band == NL80211_BAND_2GHZ) { channels = kmalloc(sizeof(rsi_2ghz_channels), GFP_KERNEL); memcpy(channels, rsi_2ghz_channels, sizeof(rsi_2ghz_channels)); - sbands->band = IEEE80211_BAND_2GHZ; + sbands->band = NL80211_BAND_2GHZ; sbands->n_channels = ARRAY_SIZE(rsi_2ghz_channels); sbands->bitrates = rsi_rates; sbands->n_bitrates = ARRAY_SIZE(rsi_rates); @@ -164,7 +164,7 @@ static void rsi_register_rates_channels(struct rsi_hw *adapter, int band) memcpy(channels, rsi_5ghz_channels, sizeof(rsi_5ghz_channels)); - sbands->band = IEEE80211_BAND_5GHZ; + sbands->band = NL80211_BAND_5GHZ; sbands->n_channels = ARRAY_SIZE(rsi_5ghz_channels); sbands->bitrates = &rsi_rates[4]; sbands->n_bitrates = ARRAY_SIZE(rsi_rates) - 4; @@ -775,7 +775,7 @@ static int rsi_mac80211_set_rate_mask(struct ieee80211_hw *hw, { struct rsi_hw *adapter = hw->priv; struct rsi_common *common = adapter->priv; - enum ieee80211_band band = hw->conf.chandef.chan->band; + enum nl80211_band band = hw->conf.chandef.chan->band; mutex_lock(&common->mutex); common->fixedrate_mask[band] = 0; @@ -999,8 +999,8 @@ static int rsi_mac80211_sta_remove(struct ieee80211_hw *hw, mutex_lock(&common->mutex); /* Resetting all the fields to default values */ - common->bitrate_mask[IEEE80211_BAND_2GHZ] = 0; - common->bitrate_mask[IEEE80211_BAND_5GHZ] = 0; + common->bitrate_mask[NL80211_BAND_2GHZ] = 0; + common->bitrate_mask[NL80211_BAND_5GHZ] = 0; common->min_rate = 0xffff; common->vif_info[0].is_ht = false; common->vif_info[0].sgi = false; @@ -1070,8 +1070,8 @@ int rsi_mac80211_attach(struct rsi_common *common) hw->max_rate_tries = MAX_RETRIES; hw->max_tx_aggregation_subframes = 6; - rsi_register_rates_channels(adapter, IEEE80211_BAND_2GHZ); - rsi_register_rates_channels(adapter, IEEE80211_BAND_5GHZ); + rsi_register_rates_channels(adapter, NL80211_BAND_2GHZ); + rsi_register_rates_channels(adapter, NL80211_BAND_5GHZ); hw->rate_control_algorithm = "AARF"; SET_IEEE80211_PERM_ADDR(hw, common->mac_addr); @@ -1087,10 +1087,10 @@ int rsi_mac80211_attach(struct rsi_common *common) wiphy->available_antennas_rx = 1; wiphy->available_antennas_tx = 1; - wiphy->bands[IEEE80211_BAND_2GHZ] = - &adapter->sbands[IEEE80211_BAND_2GHZ]; - wiphy->bands[IEEE80211_BAND_5GHZ] = - &adapter->sbands[IEEE80211_BAND_5GHZ]; + wiphy->bands[NL80211_BAND_2GHZ] = + &adapter->sbands[NL80211_BAND_2GHZ]; + wiphy->bands[NL80211_BAND_5GHZ] = + &adapter->sbands[NL80211_BAND_5GHZ]; status = ieee80211_register_hw(hw); if (status) diff --git a/drivers/net/wireless/rsi/rsi_91x_mgmt.c b/drivers/net/wireless/rsi/rsi_91x_mgmt.c index e43b59d5b53b..40658b62d077 100644 --- a/drivers/net/wireless/rsi/rsi_91x_mgmt.c +++ b/drivers/net/wireless/rsi/rsi_91x_mgmt.c @@ -210,7 +210,7 @@ static u16 mcs[] = {13, 26, 39, 52, 78, 104, 117, 130}; */ static void rsi_set_default_parameters(struct rsi_common *common) { - common->band = IEEE80211_BAND_2GHZ; + common->band = NL80211_BAND_2GHZ; common->channel_width = BW_20MHZ; common->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD; common->channel = 1; @@ -655,7 +655,7 @@ int rsi_set_vap_capabilities(struct rsi_common *common, enum opmode mode) vap_caps->rts_threshold = cpu_to_le16(common->rts_threshold); vap_caps->default_mgmt_rate = cpu_to_le32(RSI_RATE_6); - if (common->band == IEEE80211_BAND_5GHZ) { + if (common->band == NL80211_BAND_5GHZ) { vap_caps->default_ctrl_rate = cpu_to_le32(RSI_RATE_6); if (conf_is_ht40(&common->priv->hw->conf)) { vap_caps->default_ctrl_rate |= @@ -872,7 +872,7 @@ int rsi_band_check(struct rsi_common *common) else common->channel_width = BW_40MHZ; - if (common->band == IEEE80211_BAND_2GHZ) { + if (common->band == NL80211_BAND_2GHZ) { if (common->channel_width) common->endpoint = EP_2GHZ_40MHZ; else @@ -1046,7 +1046,7 @@ static int rsi_send_auto_rate_request(struct rsi_common *common) if (common->channel_width == BW_40MHZ) auto_rate->desc_word[7] |= cpu_to_le16(1); - if (band == IEEE80211_BAND_2GHZ) { + if (band == NL80211_BAND_2GHZ) { min_rate = RSI_RATE_1; rate_table_offset = 0; } else { diff --git a/drivers/net/wireless/rsi/rsi_91x_pkt.c b/drivers/net/wireless/rsi/rsi_91x_pkt.c index a0b31c0cf25b..02920c93e82d 100644 --- a/drivers/net/wireless/rsi/rsi_91x_pkt.c +++ b/drivers/net/wireless/rsi/rsi_91x_pkt.c @@ -184,7 +184,7 @@ int rsi_send_mgmt_pkt(struct rsi_common *common, if (wh->addr1[0] & BIT(0)) msg[3] |= cpu_to_le16(RSI_BROADCAST_PKT); - if (common->band == IEEE80211_BAND_2GHZ) + if (common->band == NL80211_BAND_2GHZ) msg[4] = cpu_to_le16(RSI_11B_MODE); else msg[4] = cpu_to_le16((RSI_RATE_6 & 0x0f) | RSI_11G_MODE); diff --git a/drivers/net/wireless/rsi/rsi_main.h b/drivers/net/wireless/rsi/rsi_main.h index 5baed945f60e..dcd095787166 100644 --- a/drivers/net/wireless/rsi/rsi_main.h +++ b/drivers/net/wireless/rsi/rsi_main.h @@ -211,7 +211,7 @@ struct rsi_hw { struct ieee80211_hw *hw; struct ieee80211_vif *vifs[RSI_MAX_VIFS]; struct ieee80211_tx_queue_params edca_params[NUM_EDCA_QUEUES]; - struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS]; + struct ieee80211_supported_band sbands[NUM_NL80211_BANDS]; struct device *device; u8 sc_nvifs; diff --git a/drivers/net/wireless/st/cw1200/main.c b/drivers/net/wireless/st/cw1200/main.c index 0e51e27d2e3f..dc478cedbde0 100644 --- a/drivers/net/wireless/st/cw1200/main.c +++ b/drivers/net/wireless/st/cw1200/main.c @@ -102,7 +102,7 @@ static struct ieee80211_rate cw1200_mcs_rates[] = { #define CHAN2G(_channel, _freq, _flags) { \ - .band = IEEE80211_BAND_2GHZ, \ + .band = NL80211_BAND_2GHZ, \ .center_freq = (_freq), \ .hw_value = (_channel), \ .flags = (_flags), \ @@ -111,7 +111,7 @@ static struct ieee80211_rate cw1200_mcs_rates[] = { } #define CHAN5G(_channel, _flags) { \ - .band = IEEE80211_BAND_5GHZ, \ + .band = NL80211_BAND_5GHZ, \ .center_freq = 5000 + (5 * (_channel)), \ .hw_value = (_channel), \ .flags = (_flags), \ @@ -311,12 +311,12 @@ static struct ieee80211_hw *cw1200_init_common(const u8 *macaddr, hw->sta_data_size = sizeof(struct cw1200_sta_priv); - hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &cw1200_band_2ghz; + hw->wiphy->bands[NL80211_BAND_2GHZ] = &cw1200_band_2ghz; if (have_5ghz) - hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &cw1200_band_5ghz; + hw->wiphy->bands[NL80211_BAND_5GHZ] = &cw1200_band_5ghz; /* Channel params have to be cleared before registering wiphy again */ - for (band = 0; band < IEEE80211_NUM_BANDS; band++) { + for (band = 0; band < NUM_NL80211_BANDS; band++) { struct ieee80211_supported_band *sband = hw->wiphy->bands[band]; if (!sband) continue; diff --git a/drivers/net/wireless/st/cw1200/scan.c b/drivers/net/wireless/st/cw1200/scan.c index bff81b8d4164..983788156bb0 100644 --- a/drivers/net/wireless/st/cw1200/scan.c +++ b/drivers/net/wireless/st/cw1200/scan.c @@ -402,7 +402,7 @@ void cw1200_probe_work(struct work_struct *work) } wsm = (struct wsm_tx *)frame.skb->data; scan.max_tx_rate = wsm->max_tx_rate; - scan.band = (priv->channel->band == IEEE80211_BAND_5GHZ) ? + scan.band = (priv->channel->band == NL80211_BAND_5GHZ) ? WSM_PHY_BAND_5G : WSM_PHY_BAND_2_4G; if (priv->join_status == CW1200_JOIN_STATUS_STA || priv->join_status == CW1200_JOIN_STATUS_IBSS) { diff --git a/drivers/net/wireless/st/cw1200/sta.c b/drivers/net/wireless/st/cw1200/sta.c index d0ddcde6c695..daf06a4f842e 100644 --- a/drivers/net/wireless/st/cw1200/sta.c +++ b/drivers/net/wireless/st/cw1200/sta.c @@ -1278,7 +1278,7 @@ static void cw1200_do_join(struct cw1200_common *priv) join.dtim_period = priv->join_dtim_period; join.channel_number = priv->channel->hw_value; - join.band = (priv->channel->band == IEEE80211_BAND_5GHZ) ? + join.band = (priv->channel->band == NL80211_BAND_5GHZ) ? WSM_PHY_BAND_5G : WSM_PHY_BAND_2_4G; memcpy(join.bssid, bssid, sizeof(join.bssid)); @@ -1462,7 +1462,7 @@ int cw1200_enable_listening(struct cw1200_common *priv) }; if (priv->channel) { - start.band = priv->channel->band == IEEE80211_BAND_5GHZ ? + start.band = priv->channel->band == NL80211_BAND_5GHZ ? WSM_PHY_BAND_5G : WSM_PHY_BAND_2_4G; start.channel_number = priv->channel->hw_value; } else { @@ -2315,7 +2315,7 @@ static int cw1200_start_ap(struct cw1200_common *priv) struct wsm_start start = { .mode = priv->vif->p2p ? WSM_START_MODE_P2P_GO : WSM_START_MODE_AP, - .band = (priv->channel->band == IEEE80211_BAND_5GHZ) ? + .band = (priv->channel->band == NL80211_BAND_5GHZ) ? WSM_PHY_BAND_5G : WSM_PHY_BAND_2_4G, .channel_number = priv->channel->hw_value, .beacon_interval = conf->beacon_int, diff --git a/drivers/net/wireless/st/cw1200/txrx.c b/drivers/net/wireless/st/cw1200/txrx.c index d28bd49cb5fd..3d170287cd0b 100644 --- a/drivers/net/wireless/st/cw1200/txrx.c +++ b/drivers/net/wireless/st/cw1200/txrx.c @@ -1079,7 +1079,7 @@ void cw1200_rx_cb(struct cw1200_common *priv, hdr->band = ((arg->channel_number & 0xff00) || (arg->channel_number > 14)) ? - IEEE80211_BAND_5GHZ : IEEE80211_BAND_2GHZ; + NL80211_BAND_5GHZ : NL80211_BAND_2GHZ; hdr->freq = ieee80211_channel_to_frequency( arg->channel_number, hdr->band); diff --git a/drivers/net/wireless/st/cw1200/wsm.c b/drivers/net/wireless/st/cw1200/wsm.c index 9e0ca3048657..680d60eabc75 100644 --- a/drivers/net/wireless/st/cw1200/wsm.c +++ b/drivers/net/wireless/st/cw1200/wsm.c @@ -849,9 +849,9 @@ static int wsm_startup_indication(struct cw1200_common *priv, /* Disable unsupported frequency bands */ if (!(priv->wsm_caps.fw_cap & 0x1)) - priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = NULL; + priv->hw->wiphy->bands[NL80211_BAND_2GHZ] = NULL; if (!(priv->wsm_caps.fw_cap & 0x2)) - priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = NULL; + priv->hw->wiphy->bands[NL80211_BAND_5GHZ] = NULL; priv->firmware_ready = 1; wake_up(&priv->wsm_startup_done); diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c index cd4777954f87..56384a4e2a35 100644 --- a/drivers/net/wireless/ti/wl1251/main.c +++ b/drivers/net/wireless/ti/wl1251/main.c @@ -1482,7 +1482,7 @@ int wl1251_init_ieee80211(struct wl1251 *wl) wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_ADHOC); wl->hw->wiphy->max_scan_ssids = 1; - wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &wl1251_band_2ghz; + wl->hw->wiphy->bands[NL80211_BAND_2GHZ] = &wl1251_band_2ghz; wl->hw->queues = 4; diff --git a/drivers/net/wireless/ti/wl1251/rx.c b/drivers/net/wireless/ti/wl1251/rx.c index cde0eaf99714..a27d4c22b6e8 100644 --- a/drivers/net/wireless/ti/wl1251/rx.c +++ b/drivers/net/wireless/ti/wl1251/rx.c @@ -53,7 +53,7 @@ static void wl1251_rx_status(struct wl1251 *wl, memset(status, 0, sizeof(struct ieee80211_rx_status)); - status->band = IEEE80211_BAND_2GHZ; + status->band = NL80211_BAND_2GHZ; status->mactime = desc->timestamp; /* diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c index a0d6cccc56f3..58b9d3c3a833 100644 --- a/drivers/net/wireless/ti/wl12xx/main.c +++ b/drivers/net/wireless/ti/wl12xx/main.c @@ -469,8 +469,8 @@ static const u8 wl12xx_rate_to_idx_5ghz[] = { }; static const u8 *wl12xx_band_rate_to_idx[] = { - [IEEE80211_BAND_2GHZ] = wl12xx_rate_to_idx_2ghz, - [IEEE80211_BAND_5GHZ] = wl12xx_rate_to_idx_5ghz + [NL80211_BAND_2GHZ] = wl12xx_rate_to_idx_2ghz, + [NL80211_BAND_5GHZ] = wl12xx_rate_to_idx_5ghz }; enum wl12xx_hw_rates { @@ -1827,8 +1827,8 @@ static int wl12xx_setup(struct wl1271 *wl) wl->fw_status_priv_len = 0; wl->stats.fw_stats_len = sizeof(struct wl12xx_acx_statistics); wl->ofdm_only_ap = true; - wlcore_set_ht_cap(wl, IEEE80211_BAND_2GHZ, &wl12xx_ht_cap); - wlcore_set_ht_cap(wl, IEEE80211_BAND_5GHZ, &wl12xx_ht_cap); + wlcore_set_ht_cap(wl, NL80211_BAND_2GHZ, &wl12xx_ht_cap); + wlcore_set_ht_cap(wl, NL80211_BAND_5GHZ, &wl12xx_ht_cap); wl12xx_conf_init(wl); if (!fref_param) { diff --git a/drivers/net/wireless/ti/wl12xx/scan.c b/drivers/net/wireless/ti/wl12xx/scan.c index a0dfc59e9644..8d475393f9e3 100644 --- a/drivers/net/wireless/ti/wl12xx/scan.c +++ b/drivers/net/wireless/ti/wl12xx/scan.c @@ -27,7 +27,7 @@ static int wl1271_get_scan_channels(struct wl1271 *wl, struct cfg80211_scan_request *req, struct basic_scan_channel_params *channels, - enum ieee80211_band band, bool passive) + enum nl80211_band band, bool passive) { struct conf_scan_settings *c = &wl->conf.scan; int i, j; @@ -92,7 +92,7 @@ static int wl1271_get_scan_channels(struct wl1271 *wl, #define WL1271_NOTHING_TO_SCAN 1 static int wl1271_scan_send(struct wl1271 *wl, struct wl12xx_vif *wlvif, - enum ieee80211_band band, + enum nl80211_band band, bool passive, u32 basic_rate) { struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif); @@ -144,7 +144,7 @@ static int wl1271_scan_send(struct wl1271 *wl, struct wl12xx_vif *wlvif, cmd->params.tid_trigger = CONF_TX_AC_ANY_TID; cmd->params.scan_tag = WL1271_SCAN_DEFAULT_TAG; - if (band == IEEE80211_BAND_2GHZ) + if (band == NL80211_BAND_2GHZ) cmd->params.band = WL1271_SCAN_BAND_2_4_GHZ; else cmd->params.band = WL1271_SCAN_BAND_5_GHZ; @@ -218,7 +218,7 @@ out: void wl1271_scan_stm(struct wl1271 *wl, struct wl12xx_vif *wlvif) { int ret = 0; - enum ieee80211_band band; + enum nl80211_band band; u32 rate, mask; switch (wl->scan.state) { @@ -226,7 +226,7 @@ void wl1271_scan_stm(struct wl1271 *wl, struct wl12xx_vif *wlvif) break; case WL1271_SCAN_STATE_2GHZ_ACTIVE: - band = IEEE80211_BAND_2GHZ; + band = NL80211_BAND_2GHZ; mask = wlvif->bitrate_masks[band]; if (wl->scan.req->no_cck) { mask &= ~CONF_TX_CCK_RATES; @@ -243,7 +243,7 @@ void wl1271_scan_stm(struct wl1271 *wl, struct wl12xx_vif *wlvif) break; case WL1271_SCAN_STATE_2GHZ_PASSIVE: - band = IEEE80211_BAND_2GHZ; + band = NL80211_BAND_2GHZ; mask = wlvif->bitrate_masks[band]; if (wl->scan.req->no_cck) { mask &= ~CONF_TX_CCK_RATES; @@ -263,7 +263,7 @@ void wl1271_scan_stm(struct wl1271 *wl, struct wl12xx_vif *wlvif) break; case WL1271_SCAN_STATE_5GHZ_ACTIVE: - band = IEEE80211_BAND_5GHZ; + band = NL80211_BAND_5GHZ; rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[band]); ret = wl1271_scan_send(wl, wlvif, band, false, rate); if (ret == WL1271_NOTHING_TO_SCAN) { @@ -274,7 +274,7 @@ void wl1271_scan_stm(struct wl1271 *wl, struct wl12xx_vif *wlvif) break; case WL1271_SCAN_STATE_5GHZ_PASSIVE: - band = IEEE80211_BAND_5GHZ; + band = NL80211_BAND_5GHZ; rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[band]); ret = wl1271_scan_send(wl, wlvif, band, true, rate); if (ret == WL1271_NOTHING_TO_SCAN) { @@ -378,7 +378,7 @@ int wl1271_scan_sched_scan_config(struct wl1271 *wl, wl12xx_adjust_channels(cfg, cfg_channels); if (!force_passive && cfg->active[0]) { - u8 band = IEEE80211_BAND_2GHZ; + u8 band = NL80211_BAND_2GHZ; ret = wl12xx_cmd_build_probe_req(wl, wlvif, wlvif->role_id, band, req->ssids[0].ssid, @@ -395,7 +395,7 @@ int wl1271_scan_sched_scan_config(struct wl1271 *wl, } if (!force_passive && cfg->active[1]) { - u8 band = IEEE80211_BAND_5GHZ; + u8 band = NL80211_BAND_5GHZ; ret = wl12xx_cmd_build_probe_req(wl, wlvif, wlvif->role_id, band, req->ssids[0].ssid, diff --git a/drivers/net/wireless/ti/wl18xx/cmd.c b/drivers/net/wireless/ti/wl18xx/cmd.c index a8d176ddc73c..63e95ba744fd 100644 --- a/drivers/net/wireless/ti/wl18xx/cmd.c +++ b/drivers/net/wireless/ti/wl18xx/cmd.c @@ -48,10 +48,10 @@ int wl18xx_cmd_channel_switch(struct wl1271 *wl, cmd->stop_tx = ch_switch->block_tx; switch (ch_switch->chandef.chan->band) { - case IEEE80211_BAND_2GHZ: + case NL80211_BAND_2GHZ: cmd->band = WLCORE_BAND_2_4GHZ; break; - case IEEE80211_BAND_5GHZ: + case NL80211_BAND_5GHZ: cmd->band = WLCORE_BAND_5GHZ; break; default: @@ -187,7 +187,7 @@ int wl18xx_cmd_set_cac(struct wl1271 *wl, struct wl12xx_vif *wlvif, bool start) cmd->role_id = wlvif->role_id; cmd->channel = wlvif->channel; - if (wlvif->band == IEEE80211_BAND_5GHZ) + if (wlvif->band == NL80211_BAND_5GHZ) cmd->band = WLCORE_BAND_5GHZ; cmd->bandwidth = wlcore_get_native_channel_type(wlvif->channel_type); diff --git a/drivers/net/wireless/ti/wl18xx/event.c b/drivers/net/wireless/ti/wl18xx/event.c index ff6e46dd61f8..ef811848d141 100644 --- a/drivers/net/wireless/ti/wl18xx/event.c +++ b/drivers/net/wireless/ti/wl18xx/event.c @@ -64,13 +64,13 @@ static int wlcore_smart_config_sync_event(struct wl1271 *wl, u8 sync_channel, u8 sync_band) { struct sk_buff *skb; - enum ieee80211_band band; + enum nl80211_band band; int freq; if (sync_band == WLCORE_BAND_5GHZ) - band = IEEE80211_BAND_5GHZ; + band = NL80211_BAND_5GHZ; else - band = IEEE80211_BAND_2GHZ; + band = NL80211_BAND_2GHZ; freq = ieee80211_channel_to_frequency(sync_channel, band); diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c index 1bf26cc7374e..ae47c79cb9b6 100644 --- a/drivers/net/wireless/ti/wl18xx/main.c +++ b/drivers/net/wireless/ti/wl18xx/main.c @@ -137,8 +137,8 @@ static const u8 wl18xx_rate_to_idx_5ghz[] = { }; static const u8 *wl18xx_band_rate_to_idx[] = { - [IEEE80211_BAND_2GHZ] = wl18xx_rate_to_idx_2ghz, - [IEEE80211_BAND_5GHZ] = wl18xx_rate_to_idx_5ghz + [NL80211_BAND_2GHZ] = wl18xx_rate_to_idx_2ghz, + [NL80211_BAND_5GHZ] = wl18xx_rate_to_idx_5ghz }; enum wl18xx_hw_rates { @@ -1302,12 +1302,12 @@ static u32 wl18xx_ap_get_mimo_wide_rate_mask(struct wl1271 *wl, wl1271_debug(DEBUG_ACX, "using wide channel rate mask"); /* sanity check - we don't support this */ - if (WARN_ON(wlvif->band != IEEE80211_BAND_5GHZ)) + if (WARN_ON(wlvif->band != NL80211_BAND_5GHZ)) return 0; return CONF_TX_RATE_USE_WIDE_CHAN; } else if (wl18xx_is_mimo_supported(wl) && - wlvif->band == IEEE80211_BAND_2GHZ) { + wlvif->band == NL80211_BAND_2GHZ) { wl1271_debug(DEBUG_ACX, "using MIMO rate mask"); /* * we don't care about HT channel here - if a peer doesn't @@ -1996,24 +1996,24 @@ static int wl18xx_setup(struct wl1271 *wl) * siso40. */ if (wl18xx_is_mimo_supported(wl)) - wlcore_set_ht_cap(wl, IEEE80211_BAND_2GHZ, + wlcore_set_ht_cap(wl, NL80211_BAND_2GHZ, &wl18xx_mimo_ht_cap_2ghz); else - wlcore_set_ht_cap(wl, IEEE80211_BAND_2GHZ, + wlcore_set_ht_cap(wl, NL80211_BAND_2GHZ, &wl18xx_siso40_ht_cap_2ghz); /* 5Ghz is always wide */ - wlcore_set_ht_cap(wl, IEEE80211_BAND_5GHZ, + wlcore_set_ht_cap(wl, NL80211_BAND_5GHZ, &wl18xx_siso40_ht_cap_5ghz); } else if (priv->conf.ht.mode == HT_MODE_WIDE) { - wlcore_set_ht_cap(wl, IEEE80211_BAND_2GHZ, + wlcore_set_ht_cap(wl, NL80211_BAND_2GHZ, &wl18xx_siso40_ht_cap_2ghz); - wlcore_set_ht_cap(wl, IEEE80211_BAND_5GHZ, + wlcore_set_ht_cap(wl, NL80211_BAND_5GHZ, &wl18xx_siso40_ht_cap_5ghz); } else if (priv->conf.ht.mode == HT_MODE_SISO20) { - wlcore_set_ht_cap(wl, IEEE80211_BAND_2GHZ, + wlcore_set_ht_cap(wl, NL80211_BAND_2GHZ, &wl18xx_siso20_ht_cap); - wlcore_set_ht_cap(wl, IEEE80211_BAND_5GHZ, + wlcore_set_ht_cap(wl, NL80211_BAND_5GHZ, &wl18xx_siso20_ht_cap); } diff --git a/drivers/net/wireless/ti/wl18xx/scan.c b/drivers/net/wireless/ti/wl18xx/scan.c index bc15aa2c3efa..4e5221544354 100644 --- a/drivers/net/wireless/ti/wl18xx/scan.c +++ b/drivers/net/wireless/ti/wl18xx/scan.c @@ -110,7 +110,7 @@ static int wl18xx_scan_send(struct wl1271 *wl, struct wl12xx_vif *wlvif, /* TODO: per-band ies? */ if (cmd->active[0]) { - u8 band = IEEE80211_BAND_2GHZ; + u8 band = NL80211_BAND_2GHZ; ret = wl12xx_cmd_build_probe_req(wl, wlvif, cmd->role_id, band, req->ssids ? req->ssids[0].ssid : NULL, @@ -127,7 +127,7 @@ static int wl18xx_scan_send(struct wl1271 *wl, struct wl12xx_vif *wlvif, } if (cmd->active[1] || cmd->dfs) { - u8 band = IEEE80211_BAND_5GHZ; + u8 band = NL80211_BAND_5GHZ; ret = wl12xx_cmd_build_probe_req(wl, wlvif, cmd->role_id, band, req->ssids ? req->ssids[0].ssid : NULL, @@ -253,7 +253,7 @@ int wl18xx_scan_sched_scan_config(struct wl1271 *wl, cmd->terminate_on_report = 0; if (cmd->active[0]) { - u8 band = IEEE80211_BAND_2GHZ; + u8 band = NL80211_BAND_2GHZ; ret = wl12xx_cmd_build_probe_req(wl, wlvif, cmd->role_id, band, req->ssids ? req->ssids[0].ssid : NULL, @@ -270,7 +270,7 @@ int wl18xx_scan_sched_scan_config(struct wl1271 *wl, } if (cmd->active[1] || cmd->dfs) { - u8 band = IEEE80211_BAND_5GHZ; + u8 band = NL80211_BAND_5GHZ; ret = wl12xx_cmd_build_probe_req(wl, wlvif, cmd->role_id, band, req->ssids ? req->ssids[0].ssid : NULL, diff --git a/drivers/net/wireless/ti/wl18xx/tx.c b/drivers/net/wireless/ti/wl18xx/tx.c index 3406ffb53325..ebaf66ef3f84 100644 --- a/drivers/net/wireless/ti/wl18xx/tx.c +++ b/drivers/net/wireless/ti/wl18xx/tx.c @@ -43,7 +43,7 @@ void wl18xx_get_last_tx_rate(struct wl1271 *wl, struct ieee80211_vif *vif, if (fw_rate <= CONF_HW_RATE_INDEX_54MBPS) { rate->idx = fw_rate; - if (band == IEEE80211_BAND_5GHZ) + if (band == NL80211_BAND_5GHZ) rate->idx -= CONF_HW_RATE_INDEX_6MBPS; rate->flags = 0; } else { diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c index f01d24baff7c..33153565ad62 100644 --- a/drivers/net/wireless/ti/wlcore/cmd.c +++ b/drivers/net/wireless/ti/wlcore/cmd.c @@ -423,7 +423,7 @@ EXPORT_SYMBOL_GPL(wlcore_get_native_channel_type); static int wl12xx_cmd_role_start_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif, - enum ieee80211_band band, + enum nl80211_band band, int channel) { struct wl12xx_cmd_role_start *cmd; @@ -438,7 +438,7 @@ static int wl12xx_cmd_role_start_dev(struct wl1271 *wl, wl1271_debug(DEBUG_CMD, "cmd role start dev %d", wlvif->dev_role_id); cmd->role_id = wlvif->dev_role_id; - if (band == IEEE80211_BAND_5GHZ) + if (band == NL80211_BAND_5GHZ) cmd->band = WLCORE_BAND_5GHZ; cmd->channel = channel; @@ -524,7 +524,7 @@ int wl12xx_cmd_role_start_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif) wl1271_debug(DEBUG_CMD, "cmd role start sta %d", wlvif->role_id); cmd->role_id = wlvif->role_id; - if (wlvif->band == IEEE80211_BAND_5GHZ) + if (wlvif->band == NL80211_BAND_5GHZ) cmd->band = WLCORE_BAND_5GHZ; cmd->channel = wlvif->channel; cmd->sta.basic_rate_set = cpu_to_le32(wlvif->basic_rate_set); @@ -693,10 +693,10 @@ int wl12xx_cmd_role_start_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif) cmd->ap.local_rates = cpu_to_le32(supported_rates); switch (wlvif->band) { - case IEEE80211_BAND_2GHZ: + case NL80211_BAND_2GHZ: cmd->band = WLCORE_BAND_2_4GHZ; break; - case IEEE80211_BAND_5GHZ: + case NL80211_BAND_5GHZ: cmd->band = WLCORE_BAND_5GHZ; break; default: @@ -773,7 +773,7 @@ int wl12xx_cmd_role_start_ibss(struct wl1271 *wl, struct wl12xx_vif *wlvif) wl1271_debug(DEBUG_CMD, "cmd role start ibss %d", wlvif->role_id); cmd->role_id = wlvif->role_id; - if (wlvif->band == IEEE80211_BAND_5GHZ) + if (wlvif->band == NL80211_BAND_5GHZ) cmd->band = WLCORE_BAND_5GHZ; cmd->channel = wlvif->channel; cmd->ibss.basic_rate_set = cpu_to_le32(wlvif->basic_rate_set); @@ -1164,7 +1164,7 @@ int wl12xx_cmd_build_probe_req(struct wl1271 *wl, struct wl12xx_vif *wlvif, } rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[band]); - if (band == IEEE80211_BAND_2GHZ) + if (band == NL80211_BAND_2GHZ) ret = wl1271_cmd_template_set(wl, role_id, template_id_2_4, skb->data, skb->len, 0, rate); @@ -1195,7 +1195,7 @@ struct sk_buff *wl1271_cmd_build_ap_probe_req(struct wl1271 *wl, wl1271_debug(DEBUG_SCAN, "set ap probe request template"); rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[wlvif->band]); - if (wlvif->band == IEEE80211_BAND_2GHZ) + if (wlvif->band == NL80211_BAND_2GHZ) ret = wl1271_cmd_template_set(wl, wlvif->role_id, CMD_TEMPL_CFG_PROBE_REQ_2_4, skb->data, skb->len, 0, rate); @@ -1628,19 +1628,19 @@ out: return ret; } -static int wlcore_get_reg_conf_ch_idx(enum ieee80211_band band, u16 ch) +static int wlcore_get_reg_conf_ch_idx(enum nl80211_band band, u16 ch) { /* * map the given band/channel to the respective predefined * bit expected by the fw */ switch (band) { - case IEEE80211_BAND_2GHZ: + case NL80211_BAND_2GHZ: /* channels 1..14 are mapped to 0..13 */ if (ch >= 1 && ch <= 14) return ch - 1; break; - case IEEE80211_BAND_5GHZ: + case NL80211_BAND_5GHZ: switch (ch) { case 8 ... 16: /* channels 8,12,16 are mapped to 18,19,20 */ @@ -1670,7 +1670,7 @@ static int wlcore_get_reg_conf_ch_idx(enum ieee80211_band band, u16 ch) } void wlcore_set_pending_regdomain_ch(struct wl1271 *wl, u16 channel, - enum ieee80211_band band) + enum nl80211_band band) { int ch_bit_idx = 0; @@ -1699,7 +1699,7 @@ int wlcore_cmd_regdomain_config_locked(struct wl1271 *wl) memset(tmp_ch_bitmap, 0, sizeof(tmp_ch_bitmap)); - for (b = IEEE80211_BAND_2GHZ; b <= IEEE80211_BAND_5GHZ; b++) { + for (b = NL80211_BAND_2GHZ; b <= NL80211_BAND_5GHZ; b++) { band = wiphy->bands[b]; for (i = 0; i < band->n_channels; i++) { struct ieee80211_channel *channel = &band->channels[i]; @@ -1851,7 +1851,7 @@ out: } static int wl12xx_cmd_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif, - u8 role_id, enum ieee80211_band band, u8 channel) + u8 role_id, enum nl80211_band band, u8 channel) { struct wl12xx_cmd_roc *cmd; int ret = 0; @@ -1870,10 +1870,10 @@ static int wl12xx_cmd_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif, cmd->role_id = role_id; cmd->channel = channel; switch (band) { - case IEEE80211_BAND_2GHZ: + case NL80211_BAND_2GHZ: cmd->band = WLCORE_BAND_2_4GHZ; break; - case IEEE80211_BAND_5GHZ: + case NL80211_BAND_5GHZ: cmd->band = WLCORE_BAND_5GHZ; break; default: @@ -1925,7 +1925,7 @@ out: } int wl12xx_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 role_id, - enum ieee80211_band band, u8 channel) + enum nl80211_band band, u8 channel) { int ret = 0; @@ -1995,7 +1995,7 @@ out: /* start dev role and roc on its channel */ int wl12xx_start_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif, - enum ieee80211_band band, int channel) + enum nl80211_band band, int channel) { int ret; diff --git a/drivers/net/wireless/ti/wlcore/cmd.h b/drivers/net/wireless/ti/wlcore/cmd.h index e28e2f2303ce..52c3b4860461 100644 --- a/drivers/net/wireless/ti/wlcore/cmd.h +++ b/drivers/net/wireless/ti/wlcore/cmd.h @@ -40,7 +40,7 @@ int wl12xx_cmd_role_start_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif); int wl12xx_cmd_role_stop_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif); int wl12xx_cmd_role_start_ibss(struct wl1271 *wl, struct wl12xx_vif *wlvif); int wl12xx_start_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif, - enum ieee80211_band band, int channel); + enum nl80211_band band, int channel); int wl12xx_stop_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif); int wl1271_cmd_test(struct wl1271 *wl, void *buf, size_t buf_len, u8 answer); int wl1271_cmd_interrogate(struct wl1271 *wl, u16 id, void *buf, @@ -83,14 +83,14 @@ int wl1271_cmd_set_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif, int wl12xx_cmd_set_peer_state(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid); int wl12xx_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 role_id, - enum ieee80211_band band, u8 channel); + enum nl80211_band band, u8 channel); int wl12xx_croc(struct wl1271 *wl, u8 role_id); int wl12xx_cmd_add_peer(struct wl1271 *wl, struct wl12xx_vif *wlvif, struct ieee80211_sta *sta, u8 hlid); int wl12xx_cmd_remove_peer(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid); void wlcore_set_pending_regdomain_ch(struct wl1271 *wl, u16 channel, - enum ieee80211_band band); + enum nl80211_band band); int wlcore_cmd_regdomain_config_locked(struct wl1271 *wl); int wlcore_cmd_generic_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 feature, u8 enable, u8 value); diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c index a872a07a484c..10fd24c28ece 100644 --- a/drivers/net/wireless/ti/wlcore/main.c +++ b/drivers/net/wireless/ti/wlcore/main.c @@ -1930,7 +1930,7 @@ static void wlcore_op_stop_locked(struct wl1271 *wl) if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) wlcore_enable_interrupts(wl); - wl->band = IEEE80211_BAND_2GHZ; + wl->band = NL80211_BAND_2GHZ; wl->rx_counter = 0; wl->power_level = WL1271_DEFAULT_POWER_LEVEL; @@ -2240,8 +2240,8 @@ static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif) wlvif->rate_set = CONF_TX_ENABLED_RATES; } - wlvif->bitrate_masks[IEEE80211_BAND_2GHZ] = wl->conf.tx.basic_rate; - wlvif->bitrate_masks[IEEE80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5; + wlvif->bitrate_masks[NL80211_BAND_2GHZ] = wl->conf.tx.basic_rate; + wlvif->bitrate_masks[NL80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5; wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT; /* @@ -2330,7 +2330,7 @@ power_off: * 11a channels if not supported */ if (!wl->enable_11a) - wiphy->bands[IEEE80211_BAND_5GHZ]->n_channels = 0; + wiphy->bands[NL80211_BAND_5GHZ]->n_channels = 0; wl1271_debug(DEBUG_MAC80211, "11a is %ssupported", wl->enable_11a ? "" : "not "); @@ -5871,7 +5871,7 @@ static const struct ieee80211_ops wl1271_ops = { }; -u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum ieee80211_band band) +u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum nl80211_band band) { u8 idx; @@ -6096,21 +6096,21 @@ static int wl1271_init_ieee80211(struct wl1271 *wl) * We keep local copies of the band structs because we need to * modify them on a per-device basis. */ - memcpy(&wl->bands[IEEE80211_BAND_2GHZ], &wl1271_band_2ghz, + memcpy(&wl->bands[NL80211_BAND_2GHZ], &wl1271_band_2ghz, sizeof(wl1271_band_2ghz)); - memcpy(&wl->bands[IEEE80211_BAND_2GHZ].ht_cap, - &wl->ht_cap[IEEE80211_BAND_2GHZ], + memcpy(&wl->bands[NL80211_BAND_2GHZ].ht_cap, + &wl->ht_cap[NL80211_BAND_2GHZ], sizeof(*wl->ht_cap)); - memcpy(&wl->bands[IEEE80211_BAND_5GHZ], &wl1271_band_5ghz, + memcpy(&wl->bands[NL80211_BAND_5GHZ], &wl1271_band_5ghz, sizeof(wl1271_band_5ghz)); - memcpy(&wl->bands[IEEE80211_BAND_5GHZ].ht_cap, - &wl->ht_cap[IEEE80211_BAND_5GHZ], + memcpy(&wl->bands[NL80211_BAND_5GHZ].ht_cap, + &wl->ht_cap[NL80211_BAND_5GHZ], sizeof(*wl->ht_cap)); - wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = - &wl->bands[IEEE80211_BAND_2GHZ]; - wl->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = - &wl->bands[IEEE80211_BAND_5GHZ]; + wl->hw->wiphy->bands[NL80211_BAND_2GHZ] = + &wl->bands[NL80211_BAND_2GHZ]; + wl->hw->wiphy->bands[NL80211_BAND_5GHZ] = + &wl->bands[NL80211_BAND_5GHZ]; /* * allow 4 queues per mac address we support + @@ -6205,7 +6205,7 @@ struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size, wl->channel = 0; wl->rx_counter = 0; wl->power_level = WL1271_DEFAULT_POWER_LEVEL; - wl->band = IEEE80211_BAND_2GHZ; + wl->band = NL80211_BAND_2GHZ; wl->channel_type = NL80211_CHAN_NO_HT; wl->flags = 0; wl->sg_enabled = true; diff --git a/drivers/net/wireless/ti/wlcore/ps.c b/drivers/net/wireless/ti/wlcore/ps.c index d4420da637d8..b36133b739cb 100644 --- a/drivers/net/wireless/ti/wlcore/ps.c +++ b/drivers/net/wireless/ti/wlcore/ps.c @@ -202,7 +202,7 @@ int wl1271_ps_set_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif, * enable beacon early termination. * Not relevant for 5GHz and for high rates. */ - if ((wlvif->band == IEEE80211_BAND_2GHZ) && + if ((wlvif->band == NL80211_BAND_2GHZ) && (wlvif->basic_rate < CONF_HW_BIT_RATE_9MBPS)) { ret = wl1271_acx_bet_enable(wl, wlvif, true); if (ret < 0) @@ -213,7 +213,7 @@ int wl1271_ps_set_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif, wl1271_debug(DEBUG_PSM, "leaving psm"); /* disable beacon early termination */ - if ((wlvif->band == IEEE80211_BAND_2GHZ) && + if ((wlvif->band == NL80211_BAND_2GHZ) && (wlvif->basic_rate < CONF_HW_BIT_RATE_9MBPS)) { ret = wl1271_acx_bet_enable(wl, wlvif, false); if (ret < 0) diff --git a/drivers/net/wireless/ti/wlcore/rx.c b/drivers/net/wireless/ti/wlcore/rx.c index 34e7e938ede4..c9bd294a0aa6 100644 --- a/drivers/net/wireless/ti/wlcore/rx.c +++ b/drivers/net/wireless/ti/wlcore/rx.c @@ -64,9 +64,9 @@ static void wl1271_rx_status(struct wl1271 *wl, memset(status, 0, sizeof(struct ieee80211_rx_status)); if ((desc->flags & WL1271_RX_DESC_BAND_MASK) == WL1271_RX_DESC_BAND_BG) - status->band = IEEE80211_BAND_2GHZ; + status->band = NL80211_BAND_2GHZ; else - status->band = IEEE80211_BAND_5GHZ; + status->band = NL80211_BAND_5GHZ; status->rate_idx = wlcore_rate_to_idx(wl, desc->rate, status->band); diff --git a/drivers/net/wireless/ti/wlcore/rx.h b/drivers/net/wireless/ti/wlcore/rx.h index f5a7087cfb97..57c0565637d6 100644 --- a/drivers/net/wireless/ti/wlcore/rx.h +++ b/drivers/net/wireless/ti/wlcore/rx.h @@ -146,7 +146,7 @@ struct wl1271_rx_descriptor { } __packed; int wlcore_rx(struct wl1271 *wl, struct wl_fw_status *status); -u8 wl1271_rate_to_idx(int rate, enum ieee80211_band band); +u8 wl1271_rate_to_idx(int rate, enum nl80211_band band); int wl1271_rx_filter_enable(struct wl1271 *wl, int index, bool enable, struct wl12xx_rx_filter *filter); diff --git a/drivers/net/wireless/ti/wlcore/scan.c b/drivers/net/wireless/ti/wlcore/scan.c index a384f3f83099..23343643207a 100644 --- a/drivers/net/wireless/ti/wlcore/scan.c +++ b/drivers/net/wireless/ti/wlcore/scan.c @@ -164,7 +164,7 @@ wlcore_scan_get_channels(struct wl1271 *wl, struct conf_sched_scan_settings *c = &wl->conf.sched_scan; u32 delta_per_probe; - if (band == IEEE80211_BAND_5GHZ) + if (band == NL80211_BAND_5GHZ) delta_per_probe = c->dwell_time_delta_per_probe_5; else delta_per_probe = c->dwell_time_delta_per_probe; @@ -215,7 +215,7 @@ wlcore_scan_get_channels(struct wl1271 *wl, channels[j].channel = req_channels[i]->hw_value; if (n_pactive_ch && - (band == IEEE80211_BAND_2GHZ) && + (band == NL80211_BAND_2GHZ) && (channels[j].channel >= 12) && (channels[j].channel <= 14) && (flags & IEEE80211_CHAN_NO_IR) && @@ -266,7 +266,7 @@ wlcore_set_scan_chan_params(struct wl1271 *wl, n_channels, n_ssids, cfg->channels_2, - IEEE80211_BAND_2GHZ, + NL80211_BAND_2GHZ, false, true, 0, MAX_CHANNELS_2GHZ, &n_pactive_ch, @@ -277,7 +277,7 @@ wlcore_set_scan_chan_params(struct wl1271 *wl, n_channels, n_ssids, cfg->channels_2, - IEEE80211_BAND_2GHZ, + NL80211_BAND_2GHZ, false, false, cfg->passive[0], MAX_CHANNELS_2GHZ, @@ -289,7 +289,7 @@ wlcore_set_scan_chan_params(struct wl1271 *wl, n_channels, n_ssids, cfg->channels_5, - IEEE80211_BAND_5GHZ, + NL80211_BAND_5GHZ, false, true, 0, wl->max_channels_5, &n_pactive_ch, @@ -300,7 +300,7 @@ wlcore_set_scan_chan_params(struct wl1271 *wl, n_channels, n_ssids, cfg->channels_5, - IEEE80211_BAND_5GHZ, + NL80211_BAND_5GHZ, true, true, cfg->passive[1], wl->max_channels_5, @@ -312,7 +312,7 @@ wlcore_set_scan_chan_params(struct wl1271 *wl, n_channels, n_ssids, cfg->channels_5, - IEEE80211_BAND_5GHZ, + NL80211_BAND_5GHZ, false, false, cfg->passive[1] + cfg->dfs, wl->max_channels_5, diff --git a/drivers/net/wireless/ti/wlcore/tx.c b/drivers/net/wireless/ti/wlcore/tx.c index f0ac36139bcc..c1b8e4e9d70b 100644 --- a/drivers/net/wireless/ti/wlcore/tx.c +++ b/drivers/net/wireless/ti/wlcore/tx.c @@ -453,7 +453,7 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif, } u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set, - enum ieee80211_band rate_band) + enum nl80211_band rate_band) { struct ieee80211_supported_band *band; u32 enabled_rates = 0; diff --git a/drivers/net/wireless/ti/wlcore/tx.h b/drivers/net/wireless/ti/wlcore/tx.h index 79cb3ff8b71f..e2ba62d92d7a 100644 --- a/drivers/net/wireless/ti/wlcore/tx.h +++ b/drivers/net/wireless/ti/wlcore/tx.h @@ -246,9 +246,9 @@ int wlcore_tx_complete(struct wl1271 *wl); void wl12xx_tx_reset_wlvif(struct wl1271 *wl, struct wl12xx_vif *wlvif); void wl12xx_tx_reset(struct wl1271 *wl); void wl1271_tx_flush(struct wl1271 *wl); -u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum ieee80211_band band); +u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum nl80211_band band); u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set, - enum ieee80211_band rate_band); + enum nl80211_band rate_band); u32 wl1271_tx_min_rate_get(struct wl1271 *wl, u32 rate_set); u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif, struct sk_buff *skb, struct ieee80211_sta *sta); diff --git a/drivers/net/wireless/ti/wlcore/wlcore.h b/drivers/net/wireless/ti/wlcore/wlcore.h index 72c31a8edcfb..8f28aa02230c 100644 --- a/drivers/net/wireless/ti/wlcore/wlcore.h +++ b/drivers/net/wireless/ti/wlcore/wlcore.h @@ -342,7 +342,7 @@ struct wl1271 { struct wl12xx_vif *sched_vif; /* The current band */ - enum ieee80211_band band; + enum nl80211_band band; struct completion *elp_compl; struct delayed_work elp_work; @@ -517,7 +517,7 @@ void wlcore_update_inconn_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, struct wl1271_station *wl_sta, bool in_conn); static inline void -wlcore_set_ht_cap(struct wl1271 *wl, enum ieee80211_band band, +wlcore_set_ht_cap(struct wl1271 *wl, enum nl80211_band band, struct ieee80211_sta_ht_cap *ht_cap) { memcpy(&wl->ht_cap[band], ht_cap, sizeof(*ht_cap)); diff --git a/drivers/net/wireless/ti/wlcore/wlcore_i.h b/drivers/net/wireless/ti/wlcore/wlcore_i.h index 27c56876b2c1..5c4199f3a19a 100644 --- a/drivers/net/wireless/ti/wlcore/wlcore_i.h +++ b/drivers/net/wireless/ti/wlcore/wlcore_i.h @@ -392,7 +392,7 @@ struct wl12xx_vif { u8 ssid_len; /* The current band */ - enum ieee80211_band band; + enum nl80211_band band; int channel; enum nl80211_channel_type channel_type; diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c index d5c371d77ddf..99de07d14939 100644 --- a/drivers/net/wireless/wl3501_cs.c +++ b/drivers/net/wireless/wl3501_cs.c @@ -1454,7 +1454,7 @@ static int wl3501_get_freq(struct net_device *dev, struct iw_request_info *info, struct wl3501_card *this = netdev_priv(dev); wrqu->freq.m = 100000 * - ieee80211_channel_to_frequency(this->chan, IEEE80211_BAND_2GHZ); + ieee80211_channel_to_frequency(this->chan, NL80211_BAND_2GHZ); wrqu->freq.e = 1; return 0; } diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_mac.c b/drivers/net/wireless/zydas/zd1211rw/zd_mac.c index e539d9b1b562..3e37a045f702 100644 --- a/drivers/net/wireless/zydas/zd1211rw/zd_mac.c +++ b/drivers/net/wireless/zydas/zd1211rw/zd_mac.c @@ -1068,7 +1068,7 @@ int zd_mac_rx(struct ieee80211_hw *hw, const u8 *buffer, unsigned int length) } stats.freq = zd_channels[_zd_chip_get_channel(&mac->chip) - 1].center_freq; - stats.band = IEEE80211_BAND_2GHZ; + stats.band = NL80211_BAND_2GHZ; stats.signal = zd_check_signal(hw, status->signal_strength); rate = zd_rx_rate(buffer, status); @@ -1395,7 +1395,7 @@ struct ieee80211_hw *zd_mac_alloc_hw(struct usb_interface *intf) mac->band.n_channels = ARRAY_SIZE(zd_channels); mac->band.channels = mac->channels; - hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &mac->band; + hw->wiphy->bands[NL80211_BAND_2GHZ] = &mac->band; ieee80211_hw_set(hw, MFP_CAPABLE); ieee80211_hw_set(hw, HOST_BROADCAST_PS_BUFFERING); diff --git a/drivers/staging/rtl8723au/core/rtw_mlme_ext.c b/drivers/staging/rtl8723au/core/rtw_mlme_ext.c index f4fff385aeb2..7dd1540ebedd 100644 --- a/drivers/staging/rtl8723au/core/rtw_mlme_ext.c +++ b/drivers/staging/rtl8723au/core/rtw_mlme_ext.c @@ -2113,10 +2113,10 @@ static int on_action_public23a(struct rtw_adapter *padapter, if (channel <= RTW_CH_MAX_2G_CHANNEL) freq = ieee80211_channel_to_frequency(channel, - IEEE80211_BAND_2GHZ); + NL80211_BAND_2GHZ); else freq = ieee80211_channel_to_frequency(channel, - IEEE80211_BAND_5GHZ); + NL80211_BAND_5GHZ); if (cfg80211_rx_mgmt(padapter->rtw_wdev, freq, 0, pframe, skb->len, 0)) diff --git a/drivers/staging/rtl8723au/include/ieee80211.h b/drivers/staging/rtl8723au/include/ieee80211.h index 3aa40a32555e..634102e1bda6 100644 --- a/drivers/staging/rtl8723au/include/ieee80211.h +++ b/drivers/staging/rtl8723au/include/ieee80211.h @@ -266,7 +266,7 @@ join_res: /* Represent channel details, subset of ieee80211_channel */ struct rtw_ieee80211_channel { - /* enum ieee80211_band band; */ + /* enum nl80211_band band; */ /* u16 center_freq; */ u16 hw_value; u32 flags; diff --git a/drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c b/drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c index 12d18440e824..0da559d929bc 100644 --- a/drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c +++ b/drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c @@ -39,7 +39,7 @@ static const u32 rtw_cipher_suites[] = { } #define CHAN2G(_channel, _freq, _flags) { \ - .band = IEEE80211_BAND_2GHZ, \ + .band = NL80211_BAND_2GHZ, \ .center_freq = (_freq), \ .hw_value = (_channel), \ .flags = (_flags), \ @@ -48,7 +48,7 @@ static const u32 rtw_cipher_suites[] = { } #define CHAN5G(_channel, _flags) { \ - .band = IEEE80211_BAND_5GHZ, \ + .band = NL80211_BAND_5GHZ, \ .center_freq = 5000 + (5 * (_channel)), \ .hw_value = (_channel), \ .flags = (_flags), \ @@ -143,15 +143,15 @@ static void rtw_5g_rates_init(struct ieee80211_rate *rates) } static struct ieee80211_supported_band * -rtw_spt_band_alloc(enum ieee80211_band band) +rtw_spt_band_alloc(enum nl80211_band band) { struct ieee80211_supported_band *spt_band = NULL; int n_channels, n_bitrates; - if (band == IEEE80211_BAND_2GHZ) { + if (band == NL80211_BAND_2GHZ) { n_channels = RTW_2G_CHANNELS_NUM; n_bitrates = RTW_G_RATES_NUM; - } else if (band == IEEE80211_BAND_5GHZ) { + } else if (band == NL80211_BAND_5GHZ) { n_channels = RTW_5G_CHANNELS_NUM; n_bitrates = RTW_A_RATES_NUM; } else { @@ -176,10 +176,10 @@ rtw_spt_band_alloc(enum ieee80211_band band) spt_band->n_channels = n_channels; spt_band->n_bitrates = n_bitrates; - if (band == IEEE80211_BAND_2GHZ) { + if (band == NL80211_BAND_2GHZ) { rtw_2g_channels_init(spt_band->channels); rtw_2g_rates_init(spt_band->bitrates); - } else if (band == IEEE80211_BAND_5GHZ) { + } else if (band == NL80211_BAND_5GHZ) { rtw_5g_channels_init(spt_band->channels); rtw_5g_rates_init(spt_band->bitrates); } @@ -257,10 +257,10 @@ static int rtw_cfg80211_inform_bss(struct rtw_adapter *padapter, channel = pnetwork->network.DSConfig; if (channel <= RTW_CH_MAX_2G_CHANNEL) freq = ieee80211_channel_to_frequency(channel, - IEEE80211_BAND_2GHZ); + NL80211_BAND_2GHZ); else freq = ieee80211_channel_to_frequency(channel, - IEEE80211_BAND_5GHZ); + NL80211_BAND_5GHZ); notify_channel = ieee80211_get_channel(wiphy, freq); @@ -322,11 +322,11 @@ void rtw_cfg80211_indicate_connect(struct rtw_adapter *padapter) if (channel <= RTW_CH_MAX_2G_CHANNEL) freq = ieee80211_channel_to_frequency(channel, - IEEE80211_BAND_2GHZ); + NL80211_BAND_2GHZ); else freq = ieee80211_channel_to_frequency(channel, - IEEE80211_BAND_5GHZ); + NL80211_BAND_5GHZ); notify_channel = ieee80211_get_channel(wiphy, freq); @@ -2360,10 +2360,10 @@ void rtw_cfg80211_indicate_sta_assoc(struct rtw_adapter *padapter, channel = pmlmeext->cur_channel; if (channel <= RTW_CH_MAX_2G_CHANNEL) freq = ieee80211_channel_to_frequency(channel, - IEEE80211_BAND_2GHZ); + NL80211_BAND_2GHZ); else freq = ieee80211_channel_to_frequency(channel, - IEEE80211_BAND_5GHZ); + NL80211_BAND_5GHZ); cfg80211_rx_mgmt(padapter->rtw_wdev, freq, 0, pmgmt_frame, frame_len, 0); @@ -2392,10 +2392,10 @@ void rtw_cfg80211_indicate_sta_disassoc(struct rtw_adapter *padapter, channel = pmlmeext->cur_channel; if (channel <= RTW_CH_MAX_2G_CHANNEL) freq = ieee80211_channel_to_frequency(channel, - IEEE80211_BAND_2GHZ); + NL80211_BAND_2GHZ); else freq = ieee80211_channel_to_frequency(channel, - IEEE80211_BAND_5GHZ); + NL80211_BAND_5GHZ); mgmt.frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_DEAUTH); @@ -3109,7 +3109,7 @@ static struct cfg80211_ops rtw_cfg80211_ops = { }; static void rtw_cfg80211_init_ht_capab(struct ieee80211_sta_ht_cap *ht_cap, - enum ieee80211_band band, u8 rf_type) + enum nl80211_band band, u8 rf_type) { #define MAX_BIT_RATE_40MHZ_MCS15 300 /* Mbps */ @@ -3133,7 +3133,7 @@ static void rtw_cfg80211_init_ht_capab(struct ieee80211_sta_ht_cap *ht_cap, ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED; /* - *hw->wiphy->bands[IEEE80211_BAND_2GHZ] + *hw->wiphy->bands[NL80211_BAND_2GHZ] *base on ant_num *rx_mask: RX mask *if rx_ant = 1 rx_mask[0]= 0xff;==>MCS0-MCS7 @@ -3173,19 +3173,19 @@ void rtw_cfg80211_init_wiphy(struct rtw_adapter *padapter) /* if (padapter->registrypriv.wireless_mode & WIRELESS_11G) */ { - bands = wiphy->bands[IEEE80211_BAND_2GHZ]; + bands = wiphy->bands[NL80211_BAND_2GHZ]; if (bands) rtw_cfg80211_init_ht_capab(&bands->ht_cap, - IEEE80211_BAND_2GHZ, + NL80211_BAND_2GHZ, rf_type); } /* if (padapter->registrypriv.wireless_mode & WIRELESS_11A) */ { - bands = wiphy->bands[IEEE80211_BAND_5GHZ]; + bands = wiphy->bands[NL80211_BAND_5GHZ]; if (bands) rtw_cfg80211_init_ht_capab(&bands->ht_cap, - IEEE80211_BAND_5GHZ, + NL80211_BAND_5GHZ, rf_type); } } @@ -3224,11 +3224,11 @@ static void rtw_cfg80211_preinit_wiphy(struct rtw_adapter *padapter, wiphy->n_cipher_suites = ARRAY_SIZE(rtw_cipher_suites); /* if (padapter->registrypriv.wireless_mode & WIRELESS_11G) */ - wiphy->bands[IEEE80211_BAND_2GHZ] = - rtw_spt_band_alloc(IEEE80211_BAND_2GHZ); + wiphy->bands[NL80211_BAND_2GHZ] = + rtw_spt_band_alloc(NL80211_BAND_2GHZ); /* if (padapter->registrypriv.wireless_mode & WIRELESS_11A) */ - wiphy->bands[IEEE80211_BAND_5GHZ] = - rtw_spt_band_alloc(IEEE80211_BAND_5GHZ); + wiphy->bands[NL80211_BAND_5GHZ] = + rtw_spt_band_alloc(NL80211_BAND_5GHZ); wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; wiphy->flags |= WIPHY_FLAG_OFFCHAN_TX | WIPHY_FLAG_HAVE_AP_SME; @@ -3313,8 +3313,8 @@ void rtw_wdev_free(struct wireless_dev *wdev) if (!wdev) return; - kfree(wdev->wiphy->bands[IEEE80211_BAND_2GHZ]); - kfree(wdev->wiphy->bands[IEEE80211_BAND_5GHZ]); + kfree(wdev->wiphy->bands[NL80211_BAND_2GHZ]); + kfree(wdev->wiphy->bands[NL80211_BAND_5GHZ]); wiphy_free(wdev->wiphy); diff --git a/drivers/staging/vt6655/channel.c b/drivers/staging/vt6655/channel.c index 9ac1ef9d0d51..b7d43a5622ba 100644 --- a/drivers/staging/vt6655/channel.c +++ b/drivers/staging/vt6655/channel.c @@ -144,7 +144,7 @@ void vnt_init_bands(struct vnt_private *priv) ch[i].flags = IEEE80211_CHAN_NO_HT40; } - priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = + priv->hw->wiphy->bands[NL80211_BAND_5GHZ] = &vnt_supported_5ghz_band; /* fallthrough */ case RF_RFMD2959: @@ -159,7 +159,7 @@ void vnt_init_bands(struct vnt_private *priv) ch[i].flags = IEEE80211_CHAN_NO_HT40; } - priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = + priv->hw->wiphy->bands[NL80211_BAND_2GHZ] = &vnt_supported_2ghz_band; break; } diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c index c3eea07ca97e..494164045a0f 100644 --- a/drivers/staging/vt6655/device_main.c +++ b/drivers/staging/vt6655/device_main.c @@ -812,7 +812,7 @@ static int vnt_int_report_rate(struct vnt_private *priv, else if (fb_option & FIFOCTL_AUTO_FB_1) tx_rate = fallback_rate1[tx_rate][retry]; - if (info->band == IEEE80211_BAND_5GHZ) + if (info->band == NL80211_BAND_5GHZ) idx = tx_rate - RATE_6M; else idx = tx_rate; @@ -1290,7 +1290,7 @@ static int vnt_config(struct ieee80211_hw *hw, u32 changed) (conf->flags & IEEE80211_CONF_OFFCHANNEL)) { set_channel(priv, conf->chandef.chan); - if (conf->chandef.chan->band == IEEE80211_BAND_5GHZ) + if (conf->chandef.chan->band == NL80211_BAND_5GHZ) bb_type = BB_TYPE_11A; else bb_type = BB_TYPE_11G; diff --git a/drivers/staging/vt6655/rxtx.c b/drivers/staging/vt6655/rxtx.c index 1a2dda09b69d..e4c3165ae027 100644 --- a/drivers/staging/vt6655/rxtx.c +++ b/drivers/staging/vt6655/rxtx.c @@ -1307,7 +1307,7 @@ int vnt_generate_fifo_header(struct vnt_private *priv, u32 dma_idx, } if (current_rate > RATE_11M) { - if (info->band == IEEE80211_BAND_5GHZ) { + if (info->band == NL80211_BAND_5GHZ) { pkt_type = PK_TYPE_11A; } else { if (tx_rate->flags & IEEE80211_TX_RC_USE_CTS_PROTECT) diff --git a/drivers/staging/vt6656/channel.c b/drivers/staging/vt6656/channel.c index a0fe288c1322..a4299f405d7f 100644 --- a/drivers/staging/vt6656/channel.c +++ b/drivers/staging/vt6656/channel.c @@ -153,7 +153,7 @@ void vnt_init_bands(struct vnt_private *priv) ch[i].flags = IEEE80211_CHAN_NO_HT40; } - priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = + priv->hw->wiphy->bands[NL80211_BAND_5GHZ] = &vnt_supported_5ghz_band; /* fallthrough */ case RF_AL2230: @@ -167,7 +167,7 @@ void vnt_init_bands(struct vnt_private *priv) ch[i].flags = IEEE80211_CHAN_NO_HT40; } - priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = + priv->hw->wiphy->bands[NL80211_BAND_2GHZ] = &vnt_supported_2ghz_band; break; } diff --git a/drivers/staging/vt6656/int.c b/drivers/staging/vt6656/int.c index 8d05acbc0e23..73538fb4e4e2 100644 --- a/drivers/staging/vt6656/int.c +++ b/drivers/staging/vt6656/int.c @@ -97,7 +97,7 @@ static int vnt_int_report_rate(struct vnt_private *priv, u8 pkt_no, u8 tsr) else if (context->fb_option == AUTO_FB_1) tx_rate = fallback_rate1[tx_rate][retry]; - if (info->band == IEEE80211_BAND_5GHZ) + if (info->band == NL80211_BAND_5GHZ) idx = tx_rate - RATE_6M; else idx = tx_rate; diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c index f9afab77b79f..fc5fe4ec6d05 100644 --- a/drivers/staging/vt6656/main_usb.c +++ b/drivers/staging/vt6656/main_usb.c @@ -662,7 +662,7 @@ static int vnt_config(struct ieee80211_hw *hw, u32 changed) (conf->flags & IEEE80211_CONF_OFFCHANNEL)) { vnt_set_channel(priv, conf->chandef.chan->hw_value); - if (conf->chandef.chan->band == IEEE80211_BAND_5GHZ) + if (conf->chandef.chan->band == NL80211_BAND_5GHZ) bb_type = BB_TYPE_11A; else bb_type = BB_TYPE_11G; diff --git a/drivers/staging/vt6656/rxtx.c b/drivers/staging/vt6656/rxtx.c index b74e32001318..aa59e7f14ab3 100644 --- a/drivers/staging/vt6656/rxtx.c +++ b/drivers/staging/vt6656/rxtx.c @@ -813,7 +813,7 @@ int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb) } if (current_rate > RATE_11M) { - if (info->band == IEEE80211_BAND_5GHZ) { + if (info->band == NL80211_BAND_5GHZ) { pkt_type = PK_TYPE_11A; } else { if (tx_rate->flags & IEEE80211_TX_RC_USE_CTS_PROTECT) diff --git a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c index 448a5c8c4514..544917d8b2df 100644 --- a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c +++ b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c @@ -102,7 +102,7 @@ static u8 op_ifcs; u8 wilc_initialized = 1; #define CHAN2G(_channel, _freq, _flags) { \ - .band = IEEE80211_BAND_2GHZ, \ + .band = NL80211_BAND_2GHZ, \ .center_freq = (_freq), \ .hw_value = (_channel), \ .flags = (_flags), \ @@ -241,7 +241,7 @@ static void refresh_scan(void *user_void, u8 all, bool direct_scan) struct ieee80211_channel *channel; if (network_info) { - freq = ieee80211_channel_to_frequency((s32)network_info->ch, IEEE80211_BAND_2GHZ); + freq = ieee80211_channel_to_frequency((s32)network_info->ch, NL80211_BAND_2GHZ); channel = ieee80211_get_channel(wiphy, freq); rssi = get_rssi_avg(network_info); @@ -409,7 +409,7 @@ static void CfgScanResult(enum scan_event scan_event, return; if (network_info) { - s32Freq = ieee80211_channel_to_frequency((s32)network_info->ch, IEEE80211_BAND_2GHZ); + s32Freq = ieee80211_channel_to_frequency((s32)network_info->ch, NL80211_BAND_2GHZ); channel = ieee80211_get_channel(wiphy, s32Freq); if (!channel) @@ -1451,7 +1451,7 @@ void WILC_WFI_p2p_rx(struct net_device *dev, u8 *buff, u32 size) return; } } else { - s32Freq = ieee80211_channel_to_frequency(curr_channel, IEEE80211_BAND_2GHZ); + s32Freq = ieee80211_channel_to_frequency(curr_channel, NL80211_BAND_2GHZ); if (ieee80211_is_action(buff[FRAME_TYPE_ID])) { if (priv->bCfgScanning && time_after_eq(jiffies, (unsigned long)pstrWFIDrv->p2p_timeout)) { @@ -2246,7 +2246,7 @@ static struct wireless_dev *WILC_WFI_CfgAlloc(void) WILC_WFI_band_2ghz.ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_8K; WILC_WFI_band_2ghz.ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_NONE; - wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = &WILC_WFI_band_2ghz; + wdev->wiphy->bands[NL80211_BAND_2GHZ] = &WILC_WFI_band_2ghz; return wdev; diff --git a/drivers/staging/wlan-ng/cfg80211.c b/drivers/staging/wlan-ng/cfg80211.c index 8bad018eda47..2438cf7cc695 100644 --- a/drivers/staging/wlan-ng/cfg80211.c +++ b/drivers/staging/wlan-ng/cfg80211.c @@ -415,7 +415,7 @@ static int prism2_scan(struct wiphy *wiphy, ie_len = ie_buf[1] + 2; memcpy(&ie_buf[2], &(msg2.ssid.data.data), msg2.ssid.data.len); freq = ieee80211_channel_to_frequency(msg2.dschannel.data, - IEEE80211_BAND_2GHZ); + NL80211_BAND_2GHZ); bss = cfg80211_inform_bss(wiphy, ieee80211_get_channel(wiphy, freq), CFG80211_BSS_FTYPE_UNKNOWN, @@ -758,9 +758,9 @@ static struct wiphy *wlan_create_wiphy(struct device *dev, wlandevice_t *wlandev priv->band.n_channels = ARRAY_SIZE(prism2_channels); priv->band.bitrates = priv->rates; priv->band.n_bitrates = ARRAY_SIZE(prism2_rates); - priv->band.band = IEEE80211_BAND_2GHZ; + priv->band.band = NL80211_BAND_2GHZ; priv->band.ht_cap.ht_supported = false; - wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band; + wiphy->bands[NL80211_BAND_2GHZ] = &priv->band; set_wiphy_dev(wiphy, dev); wiphy->privid = prism2_wiphy_privid; diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 5ec20369ceb8..183916e168f1 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -67,26 +67,6 @@ struct wiphy; * wireless hardware capability structures */ -/** - * enum ieee80211_band - supported frequency bands - * - * The bands are assigned this way because the supported - * bitrates differ in these bands. - * - * @IEEE80211_BAND_2GHZ: 2.4GHz ISM band - * @IEEE80211_BAND_5GHZ: around 5GHz band (4.9-5.7) - * @IEEE80211_BAND_60GHZ: around 60 GHz band (58.32 - 64.80 GHz) - * @IEEE80211_NUM_BANDS: number of defined bands - */ -enum ieee80211_band { - IEEE80211_BAND_2GHZ = NL80211_BAND_2GHZ, - IEEE80211_BAND_5GHZ = NL80211_BAND_5GHZ, - IEEE80211_BAND_60GHZ = NL80211_BAND_60GHZ, - - /* keep last */ - IEEE80211_NUM_BANDS -}; - /** * enum ieee80211_channel_flags - channel flags * @@ -167,7 +147,7 @@ enum ieee80211_channel_flags { * @dfs_cac_ms: DFS CAC time in milliseconds, this is valid for DFS channels. */ struct ieee80211_channel { - enum ieee80211_band band; + enum nl80211_band band; u16 center_freq; u16 hw_value; u32 flags; @@ -324,7 +304,7 @@ struct ieee80211_sta_vht_cap { struct ieee80211_supported_band { struct ieee80211_channel *channels; struct ieee80211_rate *bitrates; - enum ieee80211_band band; + enum nl80211_band band; int n_channels; int n_bitrates; struct ieee80211_sta_ht_cap ht_cap; @@ -1370,7 +1350,7 @@ struct mesh_setup { bool user_mpm; u8 dtim_period; u16 beacon_interval; - int mcast_rate[IEEE80211_NUM_BANDS]; + int mcast_rate[NUM_NL80211_BANDS]; u32 basic_rates; }; @@ -1468,7 +1448,7 @@ struct cfg80211_scan_request { size_t ie_len; u32 flags; - u32 rates[IEEE80211_NUM_BANDS]; + u32 rates[NUM_NL80211_BANDS]; struct wireless_dev *wdev; @@ -1860,7 +1840,7 @@ struct cfg80211_ibss_params { bool privacy; bool control_port; bool userspace_handles_dfs; - int mcast_rate[IEEE80211_NUM_BANDS]; + int mcast_rate[NUM_NL80211_BANDS]; struct ieee80211_ht_cap ht_capa; struct ieee80211_ht_cap ht_capa_mask; }; @@ -1872,7 +1852,7 @@ struct cfg80211_ibss_params { * @delta: value of RSSI level adjustment. */ struct cfg80211_bss_select_adjust { - enum ieee80211_band band; + enum nl80211_band band; s8 delta; }; @@ -1887,7 +1867,7 @@ struct cfg80211_bss_select_adjust { struct cfg80211_bss_selection { enum nl80211_bss_select_attr behaviour; union { - enum ieee80211_band band_pref; + enum nl80211_band band_pref; struct cfg80211_bss_select_adjust adjust; } param; }; @@ -1990,7 +1970,7 @@ struct cfg80211_bitrate_mask { u8 ht_mcs[IEEE80211_HT_MCS_MASK_LEN]; u16 vht_mcs[NL80211_VHT_NSS_MAX]; enum nl80211_txrate_gi gi; - } control[IEEE80211_NUM_BANDS]; + } control[NUM_NL80211_BANDS]; }; /** * struct cfg80211_pmksa - PMK Security Association @@ -2677,7 +2657,7 @@ struct cfg80211_ops { int (*leave_ibss)(struct wiphy *wiphy, struct net_device *dev); int (*set_mcast_rate)(struct wiphy *wiphy, struct net_device *dev, - int rate[IEEE80211_NUM_BANDS]); + int rate[NUM_NL80211_BANDS]); int (*set_wiphy_params)(struct wiphy *wiphy, u32 changed); @@ -3323,7 +3303,7 @@ struct wiphy { * help determine whether you own this wiphy or not. */ const void *privid; - struct ieee80211_supported_band *bands[IEEE80211_NUM_BANDS]; + struct ieee80211_supported_band *bands[NUM_NL80211_BANDS]; /* Lets us get back the wiphy on the callback */ void (*reg_notifier)(struct wiphy *wiphy, @@ -3658,7 +3638,7 @@ static inline void *wdev_priv(struct wireless_dev *wdev) * @band: band, necessary due to channel number overlap * Return: The corresponding frequency (in MHz), or 0 if the conversion failed. */ -int ieee80211_channel_to_frequency(int chan, enum ieee80211_band band); +int ieee80211_channel_to_frequency(int chan, enum nl80211_band band); /** * ieee80211_frequency_to_channel - convert frequency to channel number @@ -5089,7 +5069,7 @@ void cfg80211_ch_switch_started_notify(struct net_device *dev, * Returns %true if the conversion was successful, %false otherwise. */ bool ieee80211_operating_class_to_band(u8 operating_class, - enum ieee80211_band *band); + enum nl80211_band *band); /** * ieee80211_chandef_to_operating_class - convert chandef to operation class diff --git a/include/net/mac80211.h b/include/net/mac80211.h index ebc5a408acc2..07ef9378df2b 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -549,7 +549,7 @@ struct ieee80211_bss_conf { u8 sync_dtim_count; u32 basic_rates; struct ieee80211_rate *beacon_rate; - int mcast_rate[IEEE80211_NUM_BANDS]; + int mcast_rate[NUM_NL80211_BANDS]; u16 ht_operation_mode; s32 cqm_rssi_thold; u32 cqm_rssi_hyst; @@ -938,8 +938,8 @@ struct ieee80211_tx_info { * @common_ie_len: length of the common_ies */ struct ieee80211_scan_ies { - const u8 *ies[IEEE80211_NUM_BANDS]; - size_t len[IEEE80211_NUM_BANDS]; + const u8 *ies[NUM_NL80211_BANDS]; + size_t len[NUM_NL80211_BANDS]; const u8 *common_ies; size_t common_ie_len; }; @@ -1754,7 +1754,7 @@ struct ieee80211_sta_rates { * @txq: per-TID data TX queues (if driver uses the TXQ abstraction) */ struct ieee80211_sta { - u32 supp_rates[IEEE80211_NUM_BANDS]; + u32 supp_rates[NUM_NL80211_BANDS]; u8 addr[ETH_ALEN]; u16 aid; struct ieee80211_sta_ht_cap ht_cap; @@ -4404,7 +4404,7 @@ __le16 ieee80211_ctstoself_duration(struct ieee80211_hw *hw, */ __le16 ieee80211_generic_frame_duration(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - enum ieee80211_band band, + enum nl80211_band band, size_t frame_len, struct ieee80211_rate *rate); @@ -5357,7 +5357,7 @@ struct rate_control_ops { }; static inline int rate_supported(struct ieee80211_sta *sta, - enum ieee80211_band band, + enum nl80211_band band, int index) { return (sta == NULL || sta->supp_rates[band] & BIT(index)); diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index b4606288ef7a..1df655d8aa52 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -3653,11 +3653,15 @@ enum nl80211_txrate_gi { * @NL80211_BAND_2GHZ: 2.4 GHz ISM band * @NL80211_BAND_5GHZ: around 5 GHz band (4.9 - 5.7 GHz) * @NL80211_BAND_60GHZ: around 60 GHz band (58.32 - 64.80 GHz) + * @NUM_NL80211_BANDS: number of bands, avoid using this in userspace + * since newer kernel versions may support more bands */ enum nl80211_band { NL80211_BAND_2GHZ, NL80211_BAND_5GHZ, NL80211_BAND_60GHZ, + + NUM_NL80211_BANDS, }; /** diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index fc4730b938d0..0c12e4001f19 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -1049,7 +1049,7 @@ static int sta_apply_parameters(struct ieee80211_local *local, int ret = 0; struct ieee80211_supported_band *sband; struct ieee80211_sub_if_data *sdata = sta->sdata; - enum ieee80211_band band = ieee80211_get_sdata_band(sdata); + enum nl80211_band band = ieee80211_get_sdata_band(sdata); u32 mask, set; sband = local->hw.wiphy->bands[band]; @@ -1848,7 +1848,7 @@ static int ieee80211_change_bss(struct wiphy *wiphy, struct bss_parameters *params) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); - enum ieee80211_band band; + enum nl80211_band band; u32 changed = 0; if (!sdata_dereference(sdata->u.ap.beacon, sdata)) @@ -1867,7 +1867,7 @@ static int ieee80211_change_bss(struct wiphy *wiphy, } if (!sdata->vif.bss_conf.use_short_slot && - band == IEEE80211_BAND_5GHZ) { + band == NL80211_BAND_5GHZ) { sdata->vif.bss_conf.use_short_slot = true; changed |= BSS_CHANGED_ERP_SLOT; } @@ -2097,12 +2097,12 @@ static int ieee80211_leave_ocb(struct wiphy *wiphy, struct net_device *dev) } static int ieee80211_set_mcast_rate(struct wiphy *wiphy, struct net_device *dev, - int rate[IEEE80211_NUM_BANDS]) + int rate[NUM_NL80211_BANDS]) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); memcpy(sdata->vif.bss_conf.mcast_rate, rate, - sizeof(int) * IEEE80211_NUM_BANDS); + sizeof(int) * NUM_NL80211_BANDS); return 0; } @@ -2507,7 +2507,7 @@ static int ieee80211_set_bitrate_mask(struct wiphy *wiphy, return ret; } - for (i = 0; i < IEEE80211_NUM_BANDS; i++) { + for (i = 0; i < NUM_NL80211_BANDS; i++) { struct ieee80211_supported_band *sband = wiphy->bands[i]; int j; @@ -3135,7 +3135,7 @@ static int ieee80211_probe_client(struct wiphy *wiphy, struct net_device *dev, struct ieee80211_tx_info *info; struct sta_info *sta; struct ieee80211_chanctx_conf *chanctx_conf; - enum ieee80211_band band; + enum nl80211_band band; int ret; /* the lock is needed to assign the cookie later */ diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c index 37ea30e0754c..a5ba739cd2a7 100644 --- a/net/mac80211/debugfs_netdev.c +++ b/net/mac80211/debugfs_netdev.c @@ -169,21 +169,21 @@ static ssize_t ieee80211_if_write_##name(struct file *file, \ IEEE80211_IF_FILE_R(name) /* common attributes */ -IEEE80211_IF_FILE(rc_rateidx_mask_2ghz, rc_rateidx_mask[IEEE80211_BAND_2GHZ], +IEEE80211_IF_FILE(rc_rateidx_mask_2ghz, rc_rateidx_mask[NL80211_BAND_2GHZ], HEX); -IEEE80211_IF_FILE(rc_rateidx_mask_5ghz, rc_rateidx_mask[IEEE80211_BAND_5GHZ], +IEEE80211_IF_FILE(rc_rateidx_mask_5ghz, rc_rateidx_mask[NL80211_BAND_5GHZ], HEX); IEEE80211_IF_FILE(rc_rateidx_mcs_mask_2ghz, - rc_rateidx_mcs_mask[IEEE80211_BAND_2GHZ], HEXARRAY); + rc_rateidx_mcs_mask[NL80211_BAND_2GHZ], HEXARRAY); IEEE80211_IF_FILE(rc_rateidx_mcs_mask_5ghz, - rc_rateidx_mcs_mask[IEEE80211_BAND_5GHZ], HEXARRAY); + rc_rateidx_mcs_mask[NL80211_BAND_5GHZ], HEXARRAY); static ssize_t ieee80211_if_fmt_rc_rateidx_vht_mcs_mask_2ghz( const struct ieee80211_sub_if_data *sdata, char *buf, int buflen) { int i, len = 0; - const u16 *mask = sdata->rc_rateidx_vht_mcs_mask[IEEE80211_BAND_2GHZ]; + const u16 *mask = sdata->rc_rateidx_vht_mcs_mask[NL80211_BAND_2GHZ]; for (i = 0; i < NL80211_VHT_NSS_MAX; i++) len += scnprintf(buf + len, buflen - len, "%04x ", mask[i]); @@ -199,7 +199,7 @@ static ssize_t ieee80211_if_fmt_rc_rateidx_vht_mcs_mask_5ghz( char *buf, int buflen) { int i, len = 0; - const u16 *mask = sdata->rc_rateidx_vht_mcs_mask[IEEE80211_BAND_5GHZ]; + const u16 *mask = sdata->rc_rateidx_vht_mcs_mask[NL80211_BAND_5GHZ]; for (i = 0; i < NL80211_VHT_NSS_MAX; i++) len += scnprintf(buf + len, buflen - len, "%04x ", mask[i]); diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c index c6d4b75eb60b..a31d30713d08 100644 --- a/net/mac80211/ibss.c +++ b/net/mac80211/ibss.c @@ -126,7 +126,7 @@ ieee80211_ibss_build_presp(struct ieee80211_sub_if_data *sdata, } } - if (sband->band == IEEE80211_BAND_2GHZ) { + if (sband->band == NL80211_BAND_2GHZ) { *pos++ = WLAN_EID_DS_PARAMS; *pos++ = 1; *pos++ = ieee80211_frequency_to_channel( @@ -348,11 +348,11 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, * * HT follows these specifications (IEEE 802.11-2012 20.3.18) */ - sdata->vif.bss_conf.use_short_slot = chan->band == IEEE80211_BAND_5GHZ; + sdata->vif.bss_conf.use_short_slot = chan->band == NL80211_BAND_5GHZ; bss_change |= BSS_CHANGED_ERP_SLOT; /* cf. IEEE 802.11 9.2.12 */ - if (chan->band == IEEE80211_BAND_2GHZ && have_higher_than_11mbit) + if (chan->band == NL80211_BAND_2GHZ && have_higher_than_11mbit) sdata->flags |= IEEE80211_SDATA_OPERATING_GMODE; else sdata->flags &= ~IEEE80211_SDATA_OPERATING_GMODE; @@ -989,7 +989,7 @@ static void ieee80211_update_sta_info(struct ieee80211_sub_if_data *sdata, struct ieee80211_channel *channel) { struct sta_info *sta; - enum ieee80211_band band = rx_status->band; + enum nl80211_band band = rx_status->band; enum nl80211_bss_scan_width scan_width; struct ieee80211_local *local = sdata->local; struct ieee80211_supported_band *sband = local->hw.wiphy->bands[band]; @@ -1109,7 +1109,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, struct ieee80211_channel *channel; u64 beacon_timestamp, rx_timestamp; u32 supp_rates = 0; - enum ieee80211_band band = rx_status->band; + enum nl80211_band band = rx_status->band; channel = ieee80211_get_channel(local->hw.wiphy, rx_status->freq); if (!channel) diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 8857b01b82d0..9438c9406687 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -896,13 +896,13 @@ struct ieee80211_sub_if_data { struct ieee80211_if_ap *bss; /* bitmap of allowed (non-MCS) rate indexes for rate control */ - u32 rc_rateidx_mask[IEEE80211_NUM_BANDS]; + u32 rc_rateidx_mask[NUM_NL80211_BANDS]; - bool rc_has_mcs_mask[IEEE80211_NUM_BANDS]; - u8 rc_rateidx_mcs_mask[IEEE80211_NUM_BANDS][IEEE80211_HT_MCS_MASK_LEN]; + bool rc_has_mcs_mask[NUM_NL80211_BANDS]; + u8 rc_rateidx_mcs_mask[NUM_NL80211_BANDS][IEEE80211_HT_MCS_MASK_LEN]; - bool rc_has_vht_mcs_mask[IEEE80211_NUM_BANDS]; - u16 rc_rateidx_vht_mcs_mask[IEEE80211_NUM_BANDS][NL80211_VHT_NSS_MAX]; + bool rc_has_vht_mcs_mask[NUM_NL80211_BANDS]; + u16 rc_rateidx_vht_mcs_mask[NUM_NL80211_BANDS][NL80211_VHT_NSS_MAX]; union { struct ieee80211_if_ap ap; @@ -957,10 +957,10 @@ sdata_assert_lock(struct ieee80211_sub_if_data *sdata) lockdep_assert_held(&sdata->wdev.mtx); } -static inline enum ieee80211_band +static inline enum nl80211_band ieee80211_get_sdata_band(struct ieee80211_sub_if_data *sdata) { - enum ieee80211_band band = IEEE80211_BAND_2GHZ; + enum nl80211_band band = NL80211_BAND_2GHZ; struct ieee80211_chanctx_conf *chanctx_conf; rcu_read_lock(); @@ -1231,7 +1231,7 @@ struct ieee80211_local { struct cfg80211_scan_request __rcu *scan_req; struct ieee80211_scan_request *hw_scan_req; struct cfg80211_chan_def scan_chandef; - enum ieee80211_band hw_scan_band; + enum nl80211_band hw_scan_band; int scan_channel_idx; int scan_ies_len; int hw_scan_ies_bufsize; @@ -1738,10 +1738,10 @@ void ieee80211_process_mu_groups(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt); u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata, struct sta_info *sta, u8 opmode, - enum ieee80211_band band); + enum nl80211_band band); void ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata, struct sta_info *sta, u8 opmode, - enum ieee80211_band band); + enum nl80211_band band); void ieee80211_apply_vhtcap_overrides(struct ieee80211_sub_if_data *sdata, struct ieee80211_sta_vht_cap *vht_cap); void ieee80211_get_vht_mask_from_cap(__le16 vht_cap, @@ -1769,7 +1769,7 @@ void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata, */ int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata, struct ieee802_11_elems *elems, - enum ieee80211_band current_band, + enum nl80211_band current_band, u32 sta_flags, u8 *bssid, struct ieee80211_csa_ie *csa_ie); @@ -1794,7 +1794,7 @@ static inline int __ieee80211_resume(struct ieee80211_hw *hw) /* utility functions/constants */ extern const void *const mac80211_wiphy_privid; /* for wiphy privid */ -int ieee80211_frame_duration(enum ieee80211_band band, size_t len, +int ieee80211_frame_duration(enum nl80211_band band, size_t len, int rate, int erp, int short_preamble, int shift); void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata, @@ -1804,12 +1804,12 @@ void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, void __ieee80211_tx_skb_tid_band(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, int tid, - enum ieee80211_band band); + enum nl80211_band band); static inline void ieee80211_tx_skb_tid_band(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, int tid, - enum ieee80211_band band) + enum nl80211_band band) { rcu_read_lock(); __ieee80211_tx_skb_tid_band(sdata, skb, tid, band); @@ -1964,7 +1964,7 @@ void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u32 ieee80211_sta_get_rates(struct ieee80211_sub_if_data *sdata, struct ieee802_11_elems *elems, - enum ieee80211_band band, u32 *basic_rates); + enum nl80211_band band, u32 *basic_rates); int __ieee80211_request_smps_mgd(struct ieee80211_sub_if_data *sdata, enum ieee80211_smps_mode smps_mode); int __ieee80211_request_smps_ap(struct ieee80211_sub_if_data *sdata, @@ -1987,10 +1987,10 @@ int ieee80211_parse_bitrates(struct cfg80211_chan_def *chandef, const u8 *srates, int srates_len, u32 *rates); int ieee80211_add_srates_ie(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, bool need_basic, - enum ieee80211_band band); + enum nl80211_band band); int ieee80211_add_ext_srates_ie(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, bool need_basic, - enum ieee80211_band band); + enum nl80211_band band); u8 *ieee80211_add_wmm_info_ie(u8 *buf, u8 qosinfo); /* channel management */ diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index 097ece8b5c02..6a33f0b4d839 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -1800,7 +1800,7 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name, INIT_DELAYED_WORK(&sdata->dec_tailroom_needed_wk, ieee80211_delayed_tailroom_dec); - for (i = 0; i < IEEE80211_NUM_BANDS; i++) { + for (i = 0; i < NUM_NL80211_BANDS; i++) { struct ieee80211_supported_band *sband; sband = local->hw.wiphy->bands[i]; sdata->rc_rateidx_mask[i] = diff --git a/net/mac80211/main.c b/net/mac80211/main.c index 33c80de61eca..7ee91d6151d1 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c @@ -801,7 +801,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) { struct ieee80211_local *local = hw_to_local(hw); int result, i; - enum ieee80211_band band; + enum nl80211_band band; int channels, max_bitrates; bool supp_ht, supp_vht; netdev_features_t feature_whitelist; @@ -874,7 +874,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) max_bitrates = 0; supp_ht = false; supp_vht = false; - for (band = 0; band < IEEE80211_NUM_BANDS; band++) { + for (band = 0; band < NUM_NL80211_BANDS; band++) { struct ieee80211_supported_band *sband; sband = local->hw.wiphy->bands[band]; @@ -936,7 +936,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) if (!local->int_scan_req) return -ENOMEM; - for (band = 0; band < IEEE80211_NUM_BANDS; band++) { + for (band = 0; band < NUM_NL80211_BANDS; band++) { if (!local->hw.wiphy->bands[band]) continue; local->int_scan_req->rates[band] = (u32) -1; diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index dcc1facc807c..4c6404e1ad6e 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c @@ -415,7 +415,7 @@ int mesh_add_ht_cap_ie(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) { struct ieee80211_local *local = sdata->local; - enum ieee80211_band band = ieee80211_get_sdata_band(sdata); + enum nl80211_band band = ieee80211_get_sdata_band(sdata); struct ieee80211_supported_band *sband; u8 *pos; @@ -478,7 +478,7 @@ int mesh_add_vht_cap_ie(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) { struct ieee80211_local *local = sdata->local; - enum ieee80211_band band = ieee80211_get_sdata_band(sdata); + enum nl80211_band band = ieee80211_get_sdata_band(sdata); struct ieee80211_supported_band *sband; u8 *pos; @@ -680,7 +680,7 @@ ieee80211_mesh_build_beacon(struct ieee80211_if_mesh *ifmsh) struct ieee80211_mgmt *mgmt; struct ieee80211_chanctx_conf *chanctx_conf; struct mesh_csa_settings *csa; - enum ieee80211_band band; + enum nl80211_band band; u8 *pos; struct ieee80211_sub_if_data *sdata; int hdr_len = offsetof(struct ieee80211_mgmt, u.beacon) + @@ -930,7 +930,7 @@ ieee80211_mesh_process_chnswitch(struct ieee80211_sub_if_data *sdata, struct cfg80211_csa_settings params; struct ieee80211_csa_ie csa_ie; struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; - enum ieee80211_band band = ieee80211_get_sdata_band(sdata); + enum nl80211_band band = ieee80211_get_sdata_band(sdata); int err; u32 sta_flags; @@ -1084,7 +1084,7 @@ static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata, struct ieee80211_channel *channel; size_t baselen; int freq; - enum ieee80211_band band = rx_status->band; + enum nl80211_band band = rx_status->band; /* ignore ProbeResp to foreign address */ if (stype == IEEE80211_STYPE_PROBE_RESP && diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c index 563bea050383..79f2a0a13db8 100644 --- a/net/mac80211/mesh_plink.c +++ b/net/mac80211/mesh_plink.c @@ -93,18 +93,18 @@ static inline void mesh_plink_fsm_restart(struct sta_info *sta) static u32 mesh_set_short_slot_time(struct ieee80211_sub_if_data *sdata) { struct ieee80211_local *local = sdata->local; - enum ieee80211_band band = ieee80211_get_sdata_band(sdata); + enum nl80211_band band = ieee80211_get_sdata_band(sdata); struct ieee80211_supported_band *sband = local->hw.wiphy->bands[band]; struct sta_info *sta; u32 erp_rates = 0, changed = 0; int i; bool short_slot = false; - if (band == IEEE80211_BAND_5GHZ) { + if (band == NL80211_BAND_5GHZ) { /* (IEEE 802.11-2012 19.4.5) */ short_slot = true; goto out; - } else if (band != IEEE80211_BAND_2GHZ) + } else if (band != NL80211_BAND_2GHZ) goto out; for (i = 0; i < sband->n_bitrates; i++) @@ -247,7 +247,7 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata, mgmt->u.action.u.self_prot.action_code = action; if (action != WLAN_SP_MESH_PEERING_CLOSE) { - enum ieee80211_band band = ieee80211_get_sdata_band(sdata); + enum nl80211_band band = ieee80211_get_sdata_band(sdata); /* capability info */ pos = skb_put(skb, 2); @@ -385,7 +385,7 @@ static void mesh_sta_info_init(struct ieee80211_sub_if_data *sdata, struct ieee802_11_elems *elems, bool insert) { struct ieee80211_local *local = sdata->local; - enum ieee80211_band band = ieee80211_get_sdata_band(sdata); + enum nl80211_band band = ieee80211_get_sdata_band(sdata); struct ieee80211_supported_band *sband; u32 rates, basic_rates = 0, changed = 0; enum ieee80211_sta_rx_bandwidth bw = sta->sta.bandwidth; diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index d3c75ac8a029..885f4ca0888d 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -661,7 +661,7 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata) capab = WLAN_CAPABILITY_ESS; - if (sband->band == IEEE80211_BAND_2GHZ) { + if (sband->band == NL80211_BAND_2GHZ) { capab |= WLAN_CAPABILITY_SHORT_SLOT_TIME; capab |= WLAN_CAPABILITY_SHORT_PREAMBLE; } @@ -1100,7 +1100,7 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata, struct cfg80211_bss *cbss = ifmgd->associated; struct ieee80211_chanctx_conf *conf; struct ieee80211_chanctx *chanctx; - enum ieee80211_band current_band; + enum nl80211_band current_band; struct ieee80211_csa_ie csa_ie; struct ieee80211_channel_switch ch_switch; int res; @@ -1257,11 +1257,11 @@ ieee80211_find_80211h_pwr_constr(struct ieee80211_sub_if_data *sdata, default: WARN_ON_ONCE(1); /* fall through */ - case IEEE80211_BAND_2GHZ: - case IEEE80211_BAND_60GHZ: + case NL80211_BAND_2GHZ: + case NL80211_BAND_60GHZ: chan_increment = 1; break; - case IEEE80211_BAND_5GHZ: + case NL80211_BAND_5GHZ: chan_increment = 4; break; } @@ -1861,7 +1861,7 @@ static u32 ieee80211_handle_bss_capability(struct ieee80211_sub_if_data *sdata, } use_short_slot = !!(capab & WLAN_CAPABILITY_SHORT_SLOT_TIME); - if (ieee80211_get_sdata_band(sdata) == IEEE80211_BAND_5GHZ) + if (ieee80211_get_sdata_band(sdata) == NL80211_BAND_5GHZ) use_short_slot = true; if (use_protection != bss_conf->use_cts_prot) { @@ -4375,7 +4375,7 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata, sdata->vif.bss_conf.basic_rates = basic_rates; /* cf. IEEE 802.11 9.2.12 */ - if (cbss->channel->band == IEEE80211_BAND_2GHZ && + if (cbss->channel->band == NL80211_BAND_2GHZ && have_higher_than_11mbit) sdata->flags |= IEEE80211_SDATA_OPERATING_GMODE; else diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c index a4e2f4e67f94..206698bc93f4 100644 --- a/net/mac80211/rate.c +++ b/net/mac80211/rate.c @@ -287,7 +287,7 @@ static void __rate_control_send_low(struct ieee80211_hw *hw, u32 rate_flags = ieee80211_chandef_rate_flags(&hw->conf.chandef); - if ((sband->band == IEEE80211_BAND_2GHZ) && + if ((sband->band == NL80211_BAND_2GHZ) && (info->flags & IEEE80211_TX_CTL_NO_CCK_RATE)) rate_flags |= IEEE80211_RATE_ERP_G; diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c index b54f398cda5d..14c5ba3a1b1c 100644 --- a/net/mac80211/rc80211_minstrel.c +++ b/net/mac80211/rc80211_minstrel.c @@ -436,7 +436,7 @@ minstrel_get_rate(void *priv, struct ieee80211_sta *sta, static void -calc_rate_durations(enum ieee80211_band band, +calc_rate_durations(enum nl80211_band band, struct minstrel_rate *d, struct ieee80211_rate *rate, struct cfg80211_chan_def *chandef) @@ -579,7 +579,7 @@ minstrel_alloc_sta(void *priv, struct ieee80211_sta *sta, gfp_t gfp) if (!mi) return NULL; - for (i = 0; i < IEEE80211_NUM_BANDS; i++) { + for (i = 0; i < NUM_NL80211_BANDS; i++) { sband = hw->wiphy->bands[i]; if (sband && sband->n_bitrates > max_rates) max_rates = sband->n_bitrates; @@ -621,7 +621,7 @@ minstrel_init_cck_rates(struct minstrel_priv *mp) u32 rate_flags = ieee80211_chandef_rate_flags(&mp->hw->conf.chandef); int i, j; - sband = mp->hw->wiphy->bands[IEEE80211_BAND_2GHZ]; + sband = mp->hw->wiphy->bands[NL80211_BAND_2GHZ]; if (!sband) return; diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c index d77a9a842338..30fbabf4bcbc 100644 --- a/net/mac80211/rc80211_minstrel_ht.c +++ b/net/mac80211/rc80211_minstrel_ht.c @@ -1137,7 +1137,7 @@ minstrel_ht_update_cck(struct minstrel_priv *mp, struct minstrel_ht_sta *mi, { int i; - if (sband->band != IEEE80211_BAND_2GHZ) + if (sband->band != NL80211_BAND_2GHZ) return; if (!ieee80211_hw_check(mp->hw, SUPPORTS_HT_CCK_RATES)) @@ -1335,7 +1335,7 @@ minstrel_ht_alloc_sta(void *priv, struct ieee80211_sta *sta, gfp_t gfp) int max_rates = 0; int i; - for (i = 0; i < IEEE80211_NUM_BANDS; i++) { + for (i = 0; i < NUM_NL80211_BANDS; i++) { sband = hw->wiphy->bands[i]; if (sband && sband->n_bitrates > max_rates) max_rates = sband->n_bitrates; diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index c2b659e9a9f9..c5678703921e 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -322,7 +322,7 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, else if (status->flag & RX_FLAG_5MHZ) channel_flags |= IEEE80211_CHAN_QUARTER; - if (status->band == IEEE80211_BAND_5GHZ) + if (status->band == NL80211_BAND_5GHZ) channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ; else if (status->flag & (RX_FLAG_HT | RX_FLAG_VHT)) channel_flags |= IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ; @@ -2823,7 +2823,7 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx) switch (mgmt->u.action.u.measurement.action_code) { case WLAN_ACTION_SPCT_MSR_REQ: - if (status->band != IEEE80211_BAND_5GHZ) + if (status->band != NL80211_BAND_5GHZ) break; if (len < (IEEE80211_MIN_ACTION_SIZE + @@ -4043,7 +4043,7 @@ void ieee80211_rx_napi(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta, WARN_ON_ONCE(softirq_count() == 0); - if (WARN_ON(status->band >= IEEE80211_NUM_BANDS)) + if (WARN_ON(status->band >= NUM_NL80211_BANDS)) goto drop; sband = local->hw.wiphy->bands[status->band]; diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c index 41aa728e5468..f9648ef9e31f 100644 --- a/net/mac80211/scan.c +++ b/net/mac80211/scan.c @@ -272,7 +272,7 @@ static bool ieee80211_prep_hw_scan(struct ieee80211_local *local) n_chans = req->n_channels; } else { do { - if (local->hw_scan_band == IEEE80211_NUM_BANDS) + if (local->hw_scan_band == NUM_NL80211_BANDS) return false; n_chans = 0; @@ -485,7 +485,7 @@ static void ieee80211_scan_state_send_probe(struct ieee80211_local *local, int i; struct ieee80211_sub_if_data *sdata; struct cfg80211_scan_request *scan_req; - enum ieee80211_band band = local->hw.conf.chandef.chan->band; + enum nl80211_band band = local->hw.conf.chandef.chan->band; u32 tx_flags; scan_req = rcu_dereference_protected(local->scan_req, @@ -953,7 +953,7 @@ int ieee80211_request_ibss_scan(struct ieee80211_sub_if_data *sdata, { struct ieee80211_local *local = sdata->local; int ret = -EBUSY, i, n_ch = 0; - enum ieee80211_band band; + enum nl80211_band band; mutex_lock(&local->mtx); @@ -965,7 +965,7 @@ int ieee80211_request_ibss_scan(struct ieee80211_sub_if_data *sdata, if (!channels) { int max_n; - for (band = 0; band < IEEE80211_NUM_BANDS; band++) { + for (band = 0; band < NUM_NL80211_BANDS; band++) { if (!local->hw.wiphy->bands[band]) continue; @@ -1085,7 +1085,7 @@ int __ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata, struct ieee80211_scan_ies sched_scan_ies = {}; struct cfg80211_chan_def chandef; int ret, i, iebufsz, num_bands = 0; - u32 rate_masks[IEEE80211_NUM_BANDS] = {}; + u32 rate_masks[NUM_NL80211_BANDS] = {}; u8 bands_used = 0; u8 *ie; size_t len; @@ -1097,7 +1097,7 @@ int __ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata, if (!local->ops->sched_scan_start) return -ENOTSUPP; - for (i = 0; i < IEEE80211_NUM_BANDS; i++) { + for (i = 0; i < NUM_NL80211_BANDS; i++) { if (local->hw.wiphy->bands[i]) { bands_used |= BIT(i); rate_masks[i] = (u32) -1; diff --git a/net/mac80211/spectmgmt.c b/net/mac80211/spectmgmt.c index 06e6ac8cc693..2ddc661f0988 100644 --- a/net/mac80211/spectmgmt.c +++ b/net/mac80211/spectmgmt.c @@ -23,11 +23,11 @@ int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata, struct ieee802_11_elems *elems, - enum ieee80211_band current_band, + enum nl80211_band current_band, u32 sta_flags, u8 *bssid, struct ieee80211_csa_ie *csa_ie) { - enum ieee80211_band new_band; + enum nl80211_band new_band; int new_freq; u8 new_chan_no; struct ieee80211_channel *new_chan; diff --git a/net/mac80211/tdls.c b/net/mac80211/tdls.c index a29ea813b7d5..1c7d45a6d93e 100644 --- a/net/mac80211/tdls.c +++ b/net/mac80211/tdls.c @@ -47,7 +47,7 @@ static void ieee80211_tdls_add_ext_capab(struct ieee80211_sub_if_data *sdata, NL80211_FEATURE_TDLS_CHANNEL_SWITCH; bool wider_band = ieee80211_hw_check(&local->hw, TDLS_WIDER_BW) && !ifmgd->tdls_wider_bw_prohibited; - enum ieee80211_band band = ieee80211_get_sdata_band(sdata); + enum nl80211_band band = ieee80211_get_sdata_band(sdata); struct ieee80211_supported_band *sband = local->hw.wiphy->bands[band]; bool vht = sband && sband->vht_cap.vht_supported; u8 *pos = (void *)skb_put(skb, 10); @@ -184,7 +184,7 @@ static u16 ieee80211_get_tdls_sta_capab(struct ieee80211_sub_if_data *sdata, if (status_code != 0) return 0; - if (ieee80211_get_sdata_band(sdata) == IEEE80211_BAND_2GHZ) { + if (ieee80211_get_sdata_band(sdata) == NL80211_BAND_2GHZ) { return WLAN_CAPABILITY_SHORT_SLOT_TIME | WLAN_CAPABILITY_SHORT_PREAMBLE; } @@ -357,7 +357,7 @@ ieee80211_tdls_add_setup_start_ies(struct ieee80211_sub_if_data *sdata, u8 action_code, bool initiator, const u8 *extra_ies, size_t extra_ies_len) { - enum ieee80211_band band = ieee80211_get_sdata_band(sdata); + enum nl80211_band band = ieee80211_get_sdata_band(sdata); struct ieee80211_local *local = sdata->local; struct ieee80211_supported_band *sband; struct ieee80211_sta_ht_cap ht_cap; @@ -544,7 +544,7 @@ ieee80211_tdls_add_setup_cfm_ies(struct ieee80211_sub_if_data *sdata, struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; size_t offset = 0, noffset; struct sta_info *sta, *ap_sta; - enum ieee80211_band band = ieee80211_get_sdata_band(sdata); + enum nl80211_band band = ieee80211_get_sdata_band(sdata); u8 *pos; mutex_lock(&local->sta_mtx); @@ -611,7 +611,7 @@ ieee80211_tdls_add_setup_cfm_ies(struct ieee80211_sub_if_data *sdata, ieee80211_tdls_add_link_ie(sdata, skb, peer, initiator); /* only include VHT-operation if not on the 2.4GHz band */ - if (band != IEEE80211_BAND_2GHZ && sta->sta.vht_cap.vht_supported) { + if (band != NL80211_BAND_2GHZ && sta->sta.vht_cap.vht_supported) { /* * if both peers support WIDER_BW, we can expand the chandef to * a wider compatible one, up to 80MHz @@ -1773,7 +1773,7 @@ ieee80211_process_tdls_channel_switch_req(struct ieee80211_sub_if_data *sdata, u8 target_channel, oper_class; bool local_initiator; struct sta_info *sta; - enum ieee80211_band band; + enum nl80211_band band; struct ieee80211_tdls_data *tf = (void *)skb->data; struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb); int baselen = offsetof(typeof(*tf), u.chan_switch_req.variable); @@ -1805,10 +1805,10 @@ ieee80211_process_tdls_channel_switch_req(struct ieee80211_sub_if_data *sdata, if ((oper_class == 112 || oper_class == 2 || oper_class == 3 || oper_class == 4 || oper_class == 5 || oper_class == 6) && target_channel < 14) - band = IEEE80211_BAND_5GHZ; + band = NL80211_BAND_5GHZ; else - band = target_channel < 14 ? IEEE80211_BAND_2GHZ : - IEEE80211_BAND_5GHZ; + band = target_channel < 14 ? NL80211_BAND_2GHZ : + NL80211_BAND_5GHZ; freq = ieee80211_channel_to_frequency(target_channel, band); if (freq == 0) { diff --git a/net/mac80211/trace.h b/net/mac80211/trace.h index 8c3b7ae103bc..77e4c53baefb 100644 --- a/net/mac80211/trace.h +++ b/net/mac80211/trace.h @@ -401,7 +401,7 @@ TRACE_EVENT(drv_bss_info_changed, __field(u32, sync_device_ts) __field(u8, sync_dtim_count) __field(u32, basic_rates) - __array(int, mcast_rate, IEEE80211_NUM_BANDS) + __array(int, mcast_rate, NUM_NL80211_BANDS) __field(u16, ht_operation_mode) __field(s32, cqm_rssi_thold); __field(s32, cqm_rssi_hyst); @@ -1265,8 +1265,8 @@ TRACE_EVENT(drv_set_bitrate_mask, TP_fast_assign( LOCAL_ASSIGN; VIF_ASSIGN; - __entry->legacy_2g = mask->control[IEEE80211_BAND_2GHZ].legacy; - __entry->legacy_5g = mask->control[IEEE80211_BAND_5GHZ].legacy; + __entry->legacy_2g = mask->control[NL80211_BAND_2GHZ].legacy; + __entry->legacy_5g = mask->control[NL80211_BAND_5GHZ].legacy; ), TP_printk( diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index e04d850726c5..203044379ce0 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -150,7 +150,7 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, rate = DIV_ROUND_UP(r->bitrate, 1 << shift); switch (sband->band) { - case IEEE80211_BAND_2GHZ: { + case NL80211_BAND_2GHZ: { u32 flag; if (tx->sdata->flags & IEEE80211_SDATA_OPERATING_GMODE) flag = IEEE80211_RATE_MANDATORY_G; @@ -160,13 +160,13 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, mrate = r->bitrate; break; } - case IEEE80211_BAND_5GHZ: + case NL80211_BAND_5GHZ: if (r->flags & IEEE80211_RATE_MANDATORY_A) mrate = r->bitrate; break; - case IEEE80211_BAND_60GHZ: + case NL80211_BAND_60GHZ: /* TODO, for now fall through */ - case IEEE80211_NUM_BANDS: + case NUM_NL80211_BANDS: WARN_ON(1); break; } @@ -2138,7 +2138,7 @@ static struct sk_buff *ieee80211_build_hdr(struct ieee80211_sub_if_data *sdata, u16 info_id = 0; struct ieee80211_chanctx_conf *chanctx_conf; struct ieee80211_sub_if_data *ap_sdata; - enum ieee80211_band band; + enum nl80211_band band; int ret; if (IS_ERR(sta)) @@ -3597,7 +3597,7 @@ __ieee80211_beacon_get(struct ieee80211_hw *hw, struct sk_buff *skb = NULL; struct ieee80211_tx_info *info; struct ieee80211_sub_if_data *sdata = NULL; - enum ieee80211_band band; + enum nl80211_band band; struct ieee80211_tx_rate_control txrc; struct ieee80211_chanctx_conf *chanctx_conf; int csa_off_base = 0; @@ -4165,7 +4165,7 @@ EXPORT_SYMBOL(ieee80211_unreserve_tid); void __ieee80211_tx_skb_tid_band(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, int tid, - enum ieee80211_band band) + enum nl80211_band band) { int ac = ieee802_1d_to_ac[tid & 7]; diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 0319d6d4f863..905003f75c4d 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -59,7 +59,7 @@ void ieee80211_tx_set_protected(struct ieee80211_tx_data *tx) } } -int ieee80211_frame_duration(enum ieee80211_band band, size_t len, +int ieee80211_frame_duration(enum nl80211_band band, size_t len, int rate, int erp, int short_preamble, int shift) { @@ -77,7 +77,7 @@ int ieee80211_frame_duration(enum ieee80211_band band, size_t len, * is assumed to be 0 otherwise. */ - if (band == IEEE80211_BAND_5GHZ || erp) { + if (band == NL80211_BAND_5GHZ || erp) { /* * OFDM: * @@ -129,7 +129,7 @@ int ieee80211_frame_duration(enum ieee80211_band band, size_t len, /* Exported duration function for driver use */ __le16 ieee80211_generic_frame_duration(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - enum ieee80211_band band, + enum nl80211_band band, size_t frame_len, struct ieee80211_rate *rate) { @@ -1129,7 +1129,7 @@ void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata, rcu_read_lock(); chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); use_11b = (chanctx_conf && - chanctx_conf->def.chan->band == IEEE80211_BAND_2GHZ) && + chanctx_conf->def.chan->band == NL80211_BAND_2GHZ) && !(sdata->flags & IEEE80211_SDATA_OPERATING_GMODE); rcu_read_unlock(); @@ -1301,7 +1301,7 @@ void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata, static int ieee80211_build_preq_ies_band(struct ieee80211_local *local, u8 *buffer, size_t buffer_len, const u8 *ie, size_t ie_len, - enum ieee80211_band band, + enum nl80211_band band, u32 rate_mask, struct cfg80211_chan_def *chandef, size_t *offset) @@ -1375,7 +1375,7 @@ static int ieee80211_build_preq_ies_band(struct ieee80211_local *local, pos += ext_rates_len; } - if (chandef->chan && sband->band == IEEE80211_BAND_2GHZ) { + if (chandef->chan && sband->band == NL80211_BAND_2GHZ) { if (end - pos < 3) goto out_err; *pos++ = WLAN_EID_DS_PARAMS; @@ -1479,7 +1479,7 @@ int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer, memset(ie_desc, 0, sizeof(*ie_desc)); - for (i = 0; i < IEEE80211_NUM_BANDS; i++) { + for (i = 0; i < NUM_NL80211_BANDS; i++) { if (bands_used & BIT(i)) { pos += ieee80211_build_preq_ies_band(local, buffer + pos, @@ -1522,7 +1522,7 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb; struct ieee80211_mgmt *mgmt; int ies_len; - u32 rate_masks[IEEE80211_NUM_BANDS] = {}; + u32 rate_masks[NUM_NL80211_BANDS] = {}; struct ieee80211_scan_ies dummy_ie_desc; /* @@ -1582,7 +1582,7 @@ void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u32 ieee80211_sta_get_rates(struct ieee80211_sub_if_data *sdata, struct ieee802_11_elems *elems, - enum ieee80211_band band, u32 *basic_rates) + enum nl80211_band band, u32 *basic_rates) { struct ieee80211_supported_band *sband; size_t num_rates; @@ -2520,7 +2520,7 @@ int ieee80211_parse_bitrates(struct cfg80211_chan_def *chandef, int ieee80211_add_srates_ie(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, bool need_basic, - enum ieee80211_band band) + enum nl80211_band band) { struct ieee80211_local *local = sdata->local; struct ieee80211_supported_band *sband; @@ -2565,7 +2565,7 @@ int ieee80211_add_srates_ie(struct ieee80211_sub_if_data *sdata, int ieee80211_add_ext_srates_ie(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, bool need_basic, - enum ieee80211_band band) + enum nl80211_band band) { struct ieee80211_local *local = sdata->local; struct ieee80211_supported_band *sband; @@ -2711,7 +2711,7 @@ u64 ieee80211_calculate_rx_timestamp(struct ieee80211_local *local, if (status->flag & RX_FLAG_MACTIME_PLCP_START) { /* TODO: handle HT/VHT preambles */ - if (status->band == IEEE80211_BAND_5GHZ) { + if (status->band == NL80211_BAND_5GHZ) { ts += 20 << shift; mpdu_offset += 2; } else if (status->flag & RX_FLAG_SHORTPRE) { diff --git a/net/mac80211/vht.c b/net/mac80211/vht.c index e590e2ef9eaf..ee715764a828 100644 --- a/net/mac80211/vht.c +++ b/net/mac80211/vht.c @@ -418,7 +418,7 @@ void ieee80211_sta_set_rx_nss(struct sta_info *sta) u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata, struct sta_info *sta, u8 opmode, - enum ieee80211_band band) + enum nl80211_band band) { struct ieee80211_local *local = sdata->local; struct ieee80211_supported_band *sband; @@ -504,7 +504,7 @@ EXPORT_SYMBOL_GPL(ieee80211_update_mu_groups); void ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata, struct sta_info *sta, u8 opmode, - enum ieee80211_band band) + enum nl80211_band band) { struct ieee80211_local *local = sdata->local; struct ieee80211_supported_band *sband = local->hw.wiphy->bands[band]; diff --git a/net/wireless/chan.c b/net/wireless/chan.c index 59cabc9bce69..a6631fb319c1 100644 --- a/net/wireless/chan.c +++ b/net/wireless/chan.c @@ -768,7 +768,7 @@ static bool cfg80211_ir_permissive_chan(struct wiphy *wiphy, if (chan == other_chan) return true; - if (chan->band != IEEE80211_BAND_5GHZ) + if (chan->band != NL80211_BAND_5GHZ) continue; r1 = cfg80211_get_unii(chan->center_freq); diff --git a/net/wireless/core.c b/net/wireless/core.c index 5327e4b974fa..7f7b9409bf4c 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c @@ -557,7 +557,7 @@ int wiphy_register(struct wiphy *wiphy) { struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); int res; - enum ieee80211_band band; + enum nl80211_band band; struct ieee80211_supported_band *sband; bool have_band = false; int i; @@ -647,7 +647,7 @@ int wiphy_register(struct wiphy *wiphy) return res; /* sanity check supported bands/channels */ - for (band = 0; band < IEEE80211_NUM_BANDS; band++) { + for (band = 0; band < NUM_NL80211_BANDS; band++) { sband = wiphy->bands[band]; if (!sband) continue; @@ -659,7 +659,7 @@ int wiphy_register(struct wiphy *wiphy) * on 60GHz band, there are no legacy rates, so * n_bitrates is 0 */ - if (WARN_ON(band != IEEE80211_BAND_60GHZ && + if (WARN_ON(band != NL80211_BAND_60GHZ && !sband->n_bitrates)) return -EINVAL; @@ -669,7 +669,7 @@ int wiphy_register(struct wiphy *wiphy) * global structure for that. */ if (cfg80211_disable_40mhz_24ghz && - band == IEEE80211_BAND_2GHZ && + band == NL80211_BAND_2GHZ && sband->ht_cap.ht_supported) { sband->ht_cap.cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40; sband->ht_cap.cap &= ~IEEE80211_HT_CAP_SGI_40; diff --git a/net/wireless/debugfs.c b/net/wireless/debugfs.c index 454157717efa..5d453916a417 100644 --- a/net/wireless/debugfs.c +++ b/net/wireless/debugfs.c @@ -69,7 +69,7 @@ static ssize_t ht40allow_map_read(struct file *file, struct wiphy *wiphy = file->private_data; char *buf; unsigned int offset = 0, buf_size = PAGE_SIZE, i, r; - enum ieee80211_band band; + enum nl80211_band band; struct ieee80211_supported_band *sband; buf = kzalloc(buf_size, GFP_KERNEL); @@ -78,7 +78,7 @@ static ssize_t ht40allow_map_read(struct file *file, rtnl_lock(); - for (band = 0; band < IEEE80211_NUM_BANDS; band++) { + for (band = 0; band < NUM_NL80211_BANDS; band++) { sband = wiphy->bands[band]; if (!sband) continue; diff --git a/net/wireless/ibss.c b/net/wireless/ibss.c index 4c55fab9b4e4..4a4dda53bdf1 100644 --- a/net/wireless/ibss.c +++ b/net/wireless/ibss.c @@ -104,7 +104,7 @@ static int __cfg80211_join_ibss(struct cfg80211_registered_device *rdev, struct ieee80211_supported_band *sband = rdev->wiphy.bands[params->chandef.chan->band]; int j; - u32 flag = params->chandef.chan->band == IEEE80211_BAND_5GHZ ? + u32 flag = params->chandef.chan->band == NL80211_BAND_5GHZ ? IEEE80211_RATE_MANDATORY_A : IEEE80211_RATE_MANDATORY_B; @@ -236,7 +236,7 @@ int cfg80211_ibss_wext_join(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev) { struct cfg80211_cached_keys *ck = NULL; - enum ieee80211_band band; + enum nl80211_band band; int i, err; ASSERT_WDEV_LOCK(wdev); @@ -248,7 +248,7 @@ int cfg80211_ibss_wext_join(struct cfg80211_registered_device *rdev, if (!wdev->wext.ibss.chandef.chan) { struct ieee80211_channel *new_chan = NULL; - for (band = 0; band < IEEE80211_NUM_BANDS; band++) { + for (band = 0; band < NUM_NL80211_BANDS; band++) { struct ieee80211_supported_band *sband; struct ieee80211_channel *chan; diff --git a/net/wireless/mesh.c b/net/wireless/mesh.c index 092300b30c37..fa2066b56f36 100644 --- a/net/wireless/mesh.c +++ b/net/wireless/mesh.c @@ -128,9 +128,9 @@ int __cfg80211_join_mesh(struct cfg80211_registered_device *rdev, if (!setup->chandef.chan) { /* if we don't have that either, use the first usable channel */ - enum ieee80211_band band; + enum nl80211_band band; - for (band = 0; band < IEEE80211_NUM_BANDS; band++) { + for (band = 0; band < NUM_NL80211_BANDS; band++) { struct ieee80211_supported_band *sband; struct ieee80211_channel *chan; int i; diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c index ff328250bc44..c284d883c349 100644 --- a/net/wireless/mlme.c +++ b/net/wireless/mlme.c @@ -726,7 +726,7 @@ void cfg80211_dfs_channels_update_work(struct work_struct *work) wiphy = &rdev->wiphy; rtnl_lock(); - for (bandid = 0; bandid < IEEE80211_NUM_BANDS; bandid++) { + for (bandid = 0; bandid < NUM_NL80211_BANDS; bandid++) { sband = wiphy->bands[bandid]; if (!sband) continue; diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 4f45a2913104..13ef553b99d4 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -1277,7 +1277,7 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev, struct nlattr *nl_bands, *nl_band; struct nlattr *nl_freqs, *nl_freq; struct nlattr *nl_cmds; - enum ieee80211_band band; + enum nl80211_band band; struct ieee80211_channel *chan; int i; const struct ieee80211_txrx_stypes *mgmt_stypes = @@ -1410,7 +1410,7 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev, goto nla_put_failure; for (band = state->band_start; - band < IEEE80211_NUM_BANDS; band++) { + band < NUM_NL80211_BANDS; band++) { struct ieee80211_supported_band *sband; sband = rdev->wiphy.bands[band]; @@ -1472,7 +1472,7 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev, } nla_nest_end(msg, nl_bands); - if (band < IEEE80211_NUM_BANDS) + if (band < NUM_NL80211_BANDS) state->band_start = band + 1; else state->band_start = 0; @@ -3493,7 +3493,7 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info) } params.pbss = nla_get_flag(info->attrs[NL80211_ATTR_PBSS]); - if (params.pbss && !rdev->wiphy.bands[IEEE80211_BAND_60GHZ]) + if (params.pbss && !rdev->wiphy.bands[NL80211_BAND_60GHZ]) return -EOPNOTSUPP; wdev_lock(wdev); @@ -5821,9 +5821,9 @@ static int validate_scan_freqs(struct nlattr *freqs) return n_channels; } -static bool is_band_valid(struct wiphy *wiphy, enum ieee80211_band b) +static bool is_band_valid(struct wiphy *wiphy, enum nl80211_band b) { - return b < IEEE80211_NUM_BANDS && wiphy->bands[b]; + return b < NUM_NL80211_BANDS && wiphy->bands[b]; } static int parse_bss_select(struct nlattr *nla, struct wiphy *wiphy, @@ -6018,10 +6018,10 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info) i++; } } else { - enum ieee80211_band band; + enum nl80211_band band; /* all channels */ - for (band = 0; band < IEEE80211_NUM_BANDS; band++) { + for (band = 0; band < NUM_NL80211_BANDS; band++) { int j; if (!wiphy->bands[band]) continue; @@ -6066,7 +6066,7 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info) request->ie_len); } - for (i = 0; i < IEEE80211_NUM_BANDS; i++) + for (i = 0; i < NUM_NL80211_BANDS; i++) if (wiphy->bands[i]) request->rates[i] = (1 << wiphy->bands[i]->n_bitrates) - 1; @@ -6075,9 +6075,9 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info) nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_SUPP_RATES], tmp) { - enum ieee80211_band band = nla_type(attr); + enum nl80211_band band = nla_type(attr); - if (band < 0 || band >= IEEE80211_NUM_BANDS) { + if (band < 0 || band >= NUM_NL80211_BANDS) { err = -EINVAL; goto out_free; } @@ -6265,7 +6265,7 @@ nl80211_parse_sched_scan(struct wiphy *wiphy, struct wireless_dev *wdev, struct cfg80211_sched_scan_request *request; struct nlattr *attr; int err, tmp, n_ssids = 0, n_match_sets = 0, n_channels, i, n_plans = 0; - enum ieee80211_band band; + enum nl80211_band band; size_t ie_len; struct nlattr *tb[NL80211_SCHED_SCAN_MATCH_ATTR_MAX + 1]; s32 default_match_rssi = NL80211_SCAN_RSSI_THOLD_OFF; @@ -6430,7 +6430,7 @@ nl80211_parse_sched_scan(struct wiphy *wiphy, struct wireless_dev *wdev, } } else { /* all channels */ - for (band = 0; band < IEEE80211_NUM_BANDS; band++) { + for (band = 0; band < NUM_NL80211_BANDS; band++) { int j; if (!wiphy->bands[band]) continue; @@ -7538,14 +7538,14 @@ static int nl80211_disassociate(struct sk_buff *skb, struct genl_info *info) static bool nl80211_parse_mcast_rate(struct cfg80211_registered_device *rdev, - int mcast_rate[IEEE80211_NUM_BANDS], + int mcast_rate[NUM_NL80211_BANDS], int rateval) { struct wiphy *wiphy = &rdev->wiphy; bool found = false; int band, i; - for (band = 0; band < IEEE80211_NUM_BANDS; band++) { + for (band = 0; band < NUM_NL80211_BANDS; band++) { struct ieee80211_supported_band *sband; sband = wiphy->bands[band]; @@ -7725,7 +7725,7 @@ static int nl80211_set_mcast_rate(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; - int mcast_rate[IEEE80211_NUM_BANDS]; + int mcast_rate[NUM_NL80211_BANDS]; u32 nla_rate; int err; @@ -8130,7 +8130,7 @@ static int nl80211_connect(struct sk_buff *skb, struct genl_info *info) } connect.pbss = nla_get_flag(info->attrs[NL80211_ATTR_PBSS]); - if (connect.pbss && !rdev->wiphy.bands[IEEE80211_BAND_60GHZ]) { + if (connect.pbss && !rdev->wiphy.bands[NL80211_BAND_60GHZ]) { kzfree(connkeys); return -EOPNOTSUPP; } @@ -8550,7 +8550,7 @@ static int nl80211_set_tx_bitrate_mask(struct sk_buff *skb, memset(&mask, 0, sizeof(mask)); /* Default to all rates enabled */ - for (i = 0; i < IEEE80211_NUM_BANDS; i++) { + for (i = 0; i < NUM_NL80211_BANDS; i++) { sband = rdev->wiphy.bands[i]; if (!sband) @@ -8574,14 +8574,14 @@ static int nl80211_set_tx_bitrate_mask(struct sk_buff *skb, /* * The nested attribute uses enum nl80211_band as the index. This maps - * directly to the enum ieee80211_band values used in cfg80211. + * directly to the enum nl80211_band values used in cfg80211. */ BUILD_BUG_ON(NL80211_MAX_SUPP_HT_RATES > IEEE80211_HT_MCS_MASK_LEN * 8); nla_for_each_nested(tx_rates, info->attrs[NL80211_ATTR_TX_RATES], rem) { - enum ieee80211_band band = nla_type(tx_rates); + enum nl80211_band band = nla_type(tx_rates); int err; - if (band < 0 || band >= IEEE80211_NUM_BANDS) + if (band < 0 || band >= NUM_NL80211_BANDS) return -EINVAL; sband = rdev->wiphy.bands[band]; if (sband == NULL) @@ -10746,7 +10746,7 @@ static int nl80211_tdls_channel_switch(struct sk_buff *skb, * section 10.22.6.2.1. Disallow 5/10Mhz channels as well for now, the * specification is not defined for them. */ - if (chandef.chan->band == IEEE80211_BAND_2GHZ && + if (chandef.chan->band == NL80211_BAND_2GHZ && chandef.width != NL80211_CHAN_WIDTH_20_NOHT && chandef.width != NL80211_CHAN_WIDTH_20) return -EINVAL; diff --git a/net/wireless/rdev-ops.h b/net/wireless/rdev-ops.h index 8ae0c04f9fc7..85ff30bee2b9 100644 --- a/net/wireless/rdev-ops.h +++ b/net/wireless/rdev-ops.h @@ -1048,7 +1048,7 @@ rdev_start_radar_detection(struct cfg80211_registered_device *rdev, static inline int rdev_set_mcast_rate(struct cfg80211_registered_device *rdev, struct net_device *dev, - int mcast_rate[IEEE80211_NUM_BANDS]) + int mcast_rate[NUM_NL80211_BANDS]) { int ret = -ENOTSUPP; diff --git a/net/wireless/reg.c b/net/wireless/reg.c index c5fb317eee68..e271dea6bc02 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c @@ -1546,12 +1546,12 @@ static void reg_process_ht_flags_band(struct wiphy *wiphy, static void reg_process_ht_flags(struct wiphy *wiphy) { - enum ieee80211_band band; + enum nl80211_band band; if (!wiphy) return; - for (band = 0; band < IEEE80211_NUM_BANDS; band++) + for (band = 0; band < NUM_NL80211_BANDS; band++) reg_process_ht_flags_band(wiphy, wiphy->bands[band]); } @@ -1673,7 +1673,7 @@ static void reg_check_channels(void) static void wiphy_update_regulatory(struct wiphy *wiphy, enum nl80211_reg_initiator initiator) { - enum ieee80211_band band; + enum nl80211_band band; struct regulatory_request *lr = get_last_request(); if (ignore_reg_update(wiphy, initiator)) { @@ -1690,7 +1690,7 @@ static void wiphy_update_regulatory(struct wiphy *wiphy, lr->dfs_region = get_cfg80211_regdom()->dfs_region; - for (band = 0; band < IEEE80211_NUM_BANDS; band++) + for (band = 0; band < NUM_NL80211_BANDS; band++) handle_band(wiphy, initiator, wiphy->bands[band]); reg_process_beacons(wiphy); @@ -1786,14 +1786,14 @@ static void handle_band_custom(struct wiphy *wiphy, void wiphy_apply_custom_regulatory(struct wiphy *wiphy, const struct ieee80211_regdomain *regd) { - enum ieee80211_band band; + enum nl80211_band band; unsigned int bands_set = 0; WARN(!(wiphy->regulatory_flags & REGULATORY_CUSTOM_REG), "wiphy should have REGULATORY_CUSTOM_REG\n"); wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG; - for (band = 0; band < IEEE80211_NUM_BANDS; band++) { + for (band = 0; band < NUM_NL80211_BANDS; band++) { if (!wiphy->bands[band]) continue; handle_band_custom(wiphy, wiphy->bands[band], regd); @@ -2228,7 +2228,7 @@ static void reg_process_self_managed_hints(void) struct wiphy *wiphy; const struct ieee80211_regdomain *tmp; const struct ieee80211_regdomain *regd; - enum ieee80211_band band; + enum nl80211_band band; struct regulatory_request request = {}; list_for_each_entry(rdev, &cfg80211_rdev_list, list) { @@ -2246,7 +2246,7 @@ static void reg_process_self_managed_hints(void) rcu_assign_pointer(wiphy->regd, regd); rcu_free_regdom(tmp); - for (band = 0; band < IEEE80211_NUM_BANDS; band++) + for (band = 0; band < NUM_NL80211_BANDS; band++) handle_band_custom(wiphy, wiphy->bands[band], regd); reg_process_ht_flags(wiphy); @@ -2404,7 +2404,7 @@ int regulatory_hint(struct wiphy *wiphy, const char *alpha2) } EXPORT_SYMBOL(regulatory_hint); -void regulatory_hint_country_ie(struct wiphy *wiphy, enum ieee80211_band band, +void regulatory_hint_country_ie(struct wiphy *wiphy, enum nl80211_band band, const u8 *country_ie, u8 country_ie_len) { char alpha2[2]; @@ -2504,11 +2504,11 @@ static void restore_alpha2(char *alpha2, bool reset_user) static void restore_custom_reg_settings(struct wiphy *wiphy) { struct ieee80211_supported_band *sband; - enum ieee80211_band band; + enum nl80211_band band; struct ieee80211_channel *chan; int i; - for (band = 0; band < IEEE80211_NUM_BANDS; band++) { + for (band = 0; band < NUM_NL80211_BANDS; band++) { sband = wiphy->bands[band]; if (!sband) continue; @@ -2623,9 +2623,9 @@ void regulatory_hint_disconnect(void) static bool freq_is_chan_12_13_14(u16 freq) { - if (freq == ieee80211_channel_to_frequency(12, IEEE80211_BAND_2GHZ) || - freq == ieee80211_channel_to_frequency(13, IEEE80211_BAND_2GHZ) || - freq == ieee80211_channel_to_frequency(14, IEEE80211_BAND_2GHZ)) + if (freq == ieee80211_channel_to_frequency(12, NL80211_BAND_2GHZ) || + freq == ieee80211_channel_to_frequency(13, NL80211_BAND_2GHZ) || + freq == ieee80211_channel_to_frequency(14, NL80211_BAND_2GHZ)) return true; return false; } @@ -2650,7 +2650,7 @@ int regulatory_hint_found_beacon(struct wiphy *wiphy, if (beacon_chan->beacon_found || beacon_chan->flags & IEEE80211_CHAN_RADAR || - (beacon_chan->band == IEEE80211_BAND_2GHZ && + (beacon_chan->band == NL80211_BAND_2GHZ && !freq_is_chan_12_13_14(beacon_chan->center_freq))) return 0; diff --git a/net/wireless/reg.h b/net/wireless/reg.h index 9f495d76eca0..f6ced316b5a4 100644 --- a/net/wireless/reg.h +++ b/net/wireless/reg.h @@ -104,7 +104,7 @@ int regulatory_hint_found_beacon(struct wiphy *wiphy, * information for a band the BSS is not present in it will be ignored. */ void regulatory_hint_country_ie(struct wiphy *wiphy, - enum ieee80211_band band, + enum nl80211_band band, const u8 *country_ie, u8 country_ie_len); diff --git a/net/wireless/scan.c b/net/wireless/scan.c index 50ea8e3fcbeb..abdf651a70d9 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c @@ -531,7 +531,7 @@ static int cmp_bss(struct cfg80211_bss *a, } static bool cfg80211_bss_type_match(u16 capability, - enum ieee80211_band band, + enum nl80211_band band, enum ieee80211_bss_type bss_type) { bool ret = true; @@ -540,7 +540,7 @@ static bool cfg80211_bss_type_match(u16 capability, if (bss_type == IEEE80211_BSS_TYPE_ANY) return ret; - if (band == IEEE80211_BAND_60GHZ) { + if (band == NL80211_BAND_60GHZ) { mask = WLAN_CAPABILITY_DMG_TYPE_MASK; switch (bss_type) { case IEEE80211_BSS_TYPE_ESS: @@ -1006,7 +1006,7 @@ cfg80211_inform_bss_data(struct wiphy *wiphy, if (!res) return NULL; - if (channel->band == IEEE80211_BAND_60GHZ) { + if (channel->band == NL80211_BAND_60GHZ) { bss_type = res->pub.capability & WLAN_CAPABILITY_DMG_TYPE_MASK; if (bss_type == WLAN_CAPABILITY_DMG_TYPE_AP || bss_type == WLAN_CAPABILITY_DMG_TYPE_PBSS) @@ -1089,7 +1089,7 @@ cfg80211_inform_bss_frame_data(struct wiphy *wiphy, if (!res) return NULL; - if (channel->band == IEEE80211_BAND_60GHZ) { + if (channel->band == NL80211_BAND_60GHZ) { bss_type = res->pub.capability & WLAN_CAPABILITY_DMG_TYPE_MASK; if (bss_type == WLAN_CAPABILITY_DMG_TYPE_AP || bss_type == WLAN_CAPABILITY_DMG_TYPE_PBSS) @@ -1185,7 +1185,7 @@ int cfg80211_wext_siwscan(struct net_device *dev, struct iw_scan_req *wreq = NULL; struct cfg80211_scan_request *creq = NULL; int i, err, n_channels = 0; - enum ieee80211_band band; + enum nl80211_band band; if (!netif_running(dev)) return -ENETDOWN; @@ -1229,7 +1229,7 @@ int cfg80211_wext_siwscan(struct net_device *dev, /* translate "Scan on frequencies" request */ i = 0; - for (band = 0; band < IEEE80211_NUM_BANDS; band++) { + for (band = 0; band < NUM_NL80211_BANDS; band++) { int j; if (!wiphy->bands[band]) @@ -1289,7 +1289,7 @@ int cfg80211_wext_siwscan(struct net_device *dev, creq->n_ssids = 0; } - for (i = 0; i < IEEE80211_NUM_BANDS; i++) + for (i = 0; i < NUM_NL80211_BANDS; i++) if (wiphy->bands[i]) creq->rates[i] = (1 << wiphy->bands[i]->n_bitrates) - 1; diff --git a/net/wireless/sme.c b/net/wireless/sme.c index 1fba41676428..e22e5b83cfa9 100644 --- a/net/wireless/sme.c +++ b/net/wireless/sme.c @@ -81,7 +81,7 @@ static int cfg80211_conn_scan(struct wireless_dev *wdev) return -ENOMEM; if (wdev->conn->params.channel) { - enum ieee80211_band band = wdev->conn->params.channel->band; + enum nl80211_band band = wdev->conn->params.channel->band; struct ieee80211_supported_band *sband = wdev->wiphy->bands[band]; @@ -93,11 +93,11 @@ static int cfg80211_conn_scan(struct wireless_dev *wdev) request->rates[band] = (1 << sband->n_bitrates) - 1; } else { int i = 0, j; - enum ieee80211_band band; + enum nl80211_band band; struct ieee80211_supported_band *bands; struct ieee80211_channel *channel; - for (band = 0; band < IEEE80211_NUM_BANDS; band++) { + for (band = 0; band < NUM_NL80211_BANDS; band++) { bands = wdev->wiphy->bands[band]; if (!bands) continue; diff --git a/net/wireless/trace.h b/net/wireless/trace.h index 8da1fae23cfb..3c1091ae6c36 100644 --- a/net/wireless/trace.h +++ b/net/wireless/trace.h @@ -110,7 +110,7 @@ conf->dot11MeshHWMPconfirmationInterval; \ } while (0) -#define CHAN_ENTRY __field(enum ieee80211_band, band) \ +#define CHAN_ENTRY __field(enum nl80211_band, band) \ __field(u16, center_freq) #define CHAN_ASSIGN(chan) \ do { \ @@ -125,7 +125,7 @@ #define CHAN_PR_FMT "band: %d, freq: %u" #define CHAN_PR_ARG __entry->band, __entry->center_freq -#define CHAN_DEF_ENTRY __field(enum ieee80211_band, band) \ +#define CHAN_DEF_ENTRY __field(enum nl80211_band, band) \ __field(u32, control_freq) \ __field(u32, width) \ __field(u32, center_freq1) \ @@ -2647,7 +2647,7 @@ TRACE_EVENT(cfg80211_scan_done, TP_STRUCT__entry( __field(u32, n_channels) __dynamic_array(u8, ie, request ? request->ie_len : 0) - __array(u32, rates, IEEE80211_NUM_BANDS) + __array(u32, rates, NUM_NL80211_BANDS) __field(u32, wdev_id) MAC_ENTRY(wiphy_mac) __field(bool, no_cck) @@ -2658,7 +2658,7 @@ TRACE_EVENT(cfg80211_scan_done, memcpy(__get_dynamic_array(ie), request->ie, request->ie_len); memcpy(__entry->rates, request->rates, - IEEE80211_NUM_BANDS); + NUM_NL80211_BANDS); __entry->wdev_id = request->wdev ? request->wdev->identifier : 0; if (request->wiphy) @@ -2883,25 +2883,25 @@ TRACE_EVENT(rdev_start_radar_detection, TRACE_EVENT(rdev_set_mcast_rate, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, - int mcast_rate[IEEE80211_NUM_BANDS]), + int mcast_rate[NUM_NL80211_BANDS]), TP_ARGS(wiphy, netdev, mcast_rate), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY - __array(int, mcast_rate, IEEE80211_NUM_BANDS) + __array(int, mcast_rate, NUM_NL80211_BANDS) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; memcpy(__entry->mcast_rate, mcast_rate, - sizeof(int) * IEEE80211_NUM_BANDS); + sizeof(int) * NUM_NL80211_BANDS); ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", " "mcast_rates [2.4GHz=0x%x, 5.2GHz=0x%x, 60GHz=0x%x]", WIPHY_PR_ARG, NETDEV_PR_ARG, - __entry->mcast_rate[IEEE80211_BAND_2GHZ], - __entry->mcast_rate[IEEE80211_BAND_5GHZ], - __entry->mcast_rate[IEEE80211_BAND_60GHZ]) + __entry->mcast_rate[NL80211_BAND_2GHZ], + __entry->mcast_rate[NL80211_BAND_5GHZ], + __entry->mcast_rate[NL80211_BAND_60GHZ]) ); TRACE_EVENT(rdev_set_coalesce, diff --git a/net/wireless/util.c b/net/wireless/util.c index 9f440a9de63b..f36039888eb5 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c @@ -47,7 +47,7 @@ u32 ieee80211_mandatory_rates(struct ieee80211_supported_band *sband, if (WARN_ON(!sband)) return 1; - if (sband->band == IEEE80211_BAND_2GHZ) { + if (sband->band == NL80211_BAND_2GHZ) { if (scan_width == NL80211_BSS_CHAN_WIDTH_5 || scan_width == NL80211_BSS_CHAN_WIDTH_10) mandatory_flag = IEEE80211_RATE_MANDATORY_G; @@ -65,26 +65,26 @@ u32 ieee80211_mandatory_rates(struct ieee80211_supported_band *sband, } EXPORT_SYMBOL(ieee80211_mandatory_rates); -int ieee80211_channel_to_frequency(int chan, enum ieee80211_band band) +int ieee80211_channel_to_frequency(int chan, enum nl80211_band band) { /* see 802.11 17.3.8.3.2 and Annex J * there are overlapping channel numbers in 5GHz and 2GHz bands */ if (chan <= 0) return 0; /* not supported */ switch (band) { - case IEEE80211_BAND_2GHZ: + case NL80211_BAND_2GHZ: if (chan == 14) return 2484; else if (chan < 14) return 2407 + chan * 5; break; - case IEEE80211_BAND_5GHZ: + case NL80211_BAND_5GHZ: if (chan >= 182 && chan <= 196) return 4000 + chan * 5; else return 5000 + chan * 5; break; - case IEEE80211_BAND_60GHZ: + case NL80211_BAND_60GHZ: if (chan < 5) return 56160 + chan * 2160; break; @@ -116,11 +116,11 @@ EXPORT_SYMBOL(ieee80211_frequency_to_channel); struct ieee80211_channel *__ieee80211_get_channel(struct wiphy *wiphy, int freq) { - enum ieee80211_band band; + enum nl80211_band band; struct ieee80211_supported_band *sband; int i; - for (band = 0; band < IEEE80211_NUM_BANDS; band++) { + for (band = 0; band < NUM_NL80211_BANDS; band++) { sband = wiphy->bands[band]; if (!sband) @@ -137,12 +137,12 @@ struct ieee80211_channel *__ieee80211_get_channel(struct wiphy *wiphy, EXPORT_SYMBOL(__ieee80211_get_channel); static void set_mandatory_flags_band(struct ieee80211_supported_band *sband, - enum ieee80211_band band) + enum nl80211_band band) { int i, want; switch (band) { - case IEEE80211_BAND_5GHZ: + case NL80211_BAND_5GHZ: want = 3; for (i = 0; i < sband->n_bitrates; i++) { if (sband->bitrates[i].bitrate == 60 || @@ -155,7 +155,7 @@ static void set_mandatory_flags_band(struct ieee80211_supported_band *sband, } WARN_ON(want); break; - case IEEE80211_BAND_2GHZ: + case NL80211_BAND_2GHZ: want = 7; for (i = 0; i < sband->n_bitrates; i++) { if (sband->bitrates[i].bitrate == 10) { @@ -185,12 +185,12 @@ static void set_mandatory_flags_band(struct ieee80211_supported_band *sband, } WARN_ON(want != 0 && want != 3 && want != 6); break; - case IEEE80211_BAND_60GHZ: + case NL80211_BAND_60GHZ: /* check for mandatory HT MCS 1..4 */ WARN_ON(!sband->ht_cap.ht_supported); WARN_ON((sband->ht_cap.mcs.rx_mask[0] & 0x1e) != 0x1e); break; - case IEEE80211_NUM_BANDS: + case NUM_NL80211_BANDS: WARN_ON(1); break; } @@ -198,9 +198,9 @@ static void set_mandatory_flags_band(struct ieee80211_supported_band *sband, void ieee80211_set_bitrate_flags(struct wiphy *wiphy) { - enum ieee80211_band band; + enum nl80211_band band; - for (band = 0; band < IEEE80211_NUM_BANDS; band++) + for (band = 0; band < NUM_NL80211_BANDS; band++) if (wiphy->bands[band]) set_mandatory_flags_band(wiphy->bands[band], band); } @@ -1399,22 +1399,22 @@ size_t ieee80211_ie_split_ric(const u8 *ies, size_t ielen, EXPORT_SYMBOL(ieee80211_ie_split_ric); bool ieee80211_operating_class_to_band(u8 operating_class, - enum ieee80211_band *band) + enum nl80211_band *band) { switch (operating_class) { case 112: case 115 ... 127: case 128 ... 130: - *band = IEEE80211_BAND_5GHZ; + *band = NL80211_BAND_5GHZ; return true; case 81: case 82: case 83: case 84: - *band = IEEE80211_BAND_2GHZ; + *band = NL80211_BAND_2GHZ; return true; case 180: - *band = IEEE80211_BAND_60GHZ; + *band = NL80211_BAND_60GHZ; return true; } @@ -1726,10 +1726,10 @@ int ieee80211_get_ratemask(struct ieee80211_supported_band *sband, unsigned int ieee80211_get_num_supported_channels(struct wiphy *wiphy) { - enum ieee80211_band band; + enum nl80211_band band; unsigned int n_channels = 0; - for (band = 0; band < IEEE80211_NUM_BANDS; band++) + for (band = 0; band < NUM_NL80211_BANDS; band++) if (wiphy->bands[band]) n_channels += wiphy->bands[band]->n_channels; diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c index fd682832a0e3..4c89f0ca61ba 100644 --- a/net/wireless/wext-compat.c +++ b/net/wireless/wext-compat.c @@ -32,13 +32,13 @@ int cfg80211_wext_giwname(struct net_device *dev, if (!wdev) return -EOPNOTSUPP; - sband = wdev->wiphy->bands[IEEE80211_BAND_5GHZ]; + sband = wdev->wiphy->bands[NL80211_BAND_5GHZ]; if (sband) { is_a = true; is_ht |= sband->ht_cap.ht_supported; } - sband = wdev->wiphy->bands[IEEE80211_BAND_2GHZ]; + sband = wdev->wiphy->bands[NL80211_BAND_2GHZ]; if (sband) { int i; /* Check for mandatory rates */ @@ -143,7 +143,7 @@ int cfg80211_wext_giwrange(struct net_device *dev, { struct wireless_dev *wdev = dev->ieee80211_ptr; struct iw_range *range = (struct iw_range *) extra; - enum ieee80211_band band; + enum nl80211_band band; int i, c = 0; if (!wdev) @@ -215,7 +215,7 @@ int cfg80211_wext_giwrange(struct net_device *dev, } } - for (band = 0; band < IEEE80211_NUM_BANDS; band ++) { + for (band = 0; band < NUM_NL80211_BANDS; band ++) { struct ieee80211_supported_band *sband; sband = wdev->wiphy->bands[band]; @@ -265,11 +265,11 @@ int cfg80211_wext_freq(struct iw_freq *freq) * -EINVAL for impossible things. */ if (freq->e == 0) { - enum ieee80211_band band = IEEE80211_BAND_2GHZ; + enum nl80211_band band = NL80211_BAND_2GHZ; if (freq->m < 0) return 0; if (freq->m > 14) - band = IEEE80211_BAND_5GHZ; + band = NL80211_BAND_5GHZ; return ieee80211_channel_to_frequency(freq->m, band); } else { int i, div = 1000000; @@ -1245,7 +1245,7 @@ static int cfg80211_wext_siwrate(struct net_device *dev, maxrate = rate->value / 100000; } - for (band = 0; band < IEEE80211_NUM_BANDS; band++) { + for (band = 0; band < NUM_NL80211_BANDS; band++) { sband = wdev->wiphy->bands[band]; if (sband == NULL) continue; From 169ff6db3a81fad3011e4c0b0114dbe9de1cd517 Mon Sep 17 00:00:00 2001 From: Ben Greear Date: Fri, 5 Feb 2016 15:10:02 -0800 Subject: [PATCH 0542/1649] ath10k: Document alloc_frag_desc_for_data_pkt config option. This will help anyone trying to use the ack-rssi reporting feature with the host-specified TX-rate option in 10.4 firmware. Signed-off-by: Ben Greear Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/wmi.h | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h index feebd19ff08c..0fae0daa9215 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.h +++ b/drivers/net/wireless/ath/ath10k/wmi.h @@ -2661,9 +2661,14 @@ struct wmi_resource_config_10_4 { */ __le32 iphdr_pad_config; - /* qwrap configuration + /* qwrap configuration (bits 15-0) * 1 - This is qwrap configuration * 0 - This is not qwrap + * + * Bits 31-16 is alloc_frag_desc_for_data_pkt (1 enables, 0 disables) + * In order to get ack-RSSI reporting and to specify the tx-rate for + * individual frames, this option must be enabled. This uses an extra + * 4 bytes per tx-msdu descriptor, so don't enable it unless you need it. */ __le32 qwrap_config; } __packed; From 9f0b7e7dea7c3cedba315d44816070fcc692c748 Mon Sep 17 00:00:00 2001 From: Peter Oh Date: Mon, 4 Apr 2016 16:19:14 -0700 Subject: [PATCH 0543/1649] ath10k: add a support of set_tsf on vdev interface 10.2.4.70.24 firmware introduces new feature to set TSF via vdev parameter, hence implement relevant function. set_tsf function can be used to shift TBTT that will help avoid its clockdrift which happens when beacons are collided. Signed-off-by: Peter Oh Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/mac.c | 27 +++++++++++++++++++++++ drivers/net/wireless/ath/ath10k/wmi-tlv.c | 1 + drivers/net/wireless/ath/ath10k/wmi.c | 4 ++++ drivers/net/wireless/ath/ath10k/wmi.h | 2 ++ 4 files changed, 34 insertions(+) diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index b0e613bc10a5..c30a3944b612 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -6796,6 +6796,32 @@ static u64 ath10k_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif) return 0; } +static void ath10k_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + u64 tsf) +{ + struct ath10k *ar = hw->priv; + struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + u32 tsf_offset, vdev_param = ar->wmi.vdev_param->set_tsf; + int ret; + + /* Workaround: + * + * Given tsf argument is entire TSF value, but firmware accepts + * only TSF offset to current TSF. + * + * get_tsf function is used to get offset value, however since + * ath10k_get_tsf is not implemented properly, it will return 0 always. + * Luckily all the caller functions to set_tsf, as of now, also rely on + * get_tsf function to get entire tsf value such get_tsf() + tsf_delta, + * final tsf offset value to firmware will be arithmetically correct. + */ + tsf_offset = tsf - ath10k_get_tsf(hw, vif); + ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, + vdev_param, tsf_offset); + if (ret && ret != -EOPNOTSUPP) + ath10k_warn(ar, "failed to set tsf offset: %d\n", ret); +} + static int ath10k_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_ampdu_params *params) @@ -7252,6 +7278,7 @@ static const struct ieee80211_ops ath10k_ops = { .set_bitrate_mask = ath10k_mac_op_set_bitrate_mask, .sta_rc_update = ath10k_sta_rc_update, .get_tsf = ath10k_get_tsf, + .set_tsf = ath10k_set_tsf, .ampdu_action = ath10k_ampdu_action, .get_et_sset_count = ath10k_debug_get_et_sset_count, .get_et_stats = ath10k_debug_get_et_stats, diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c index 108593202052..e09337ee7c96 100644 --- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c +++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c @@ -3409,6 +3409,7 @@ static struct wmi_vdev_param_map wmi_tlv_vdev_param_map = { .meru_vc = WMI_VDEV_PARAM_UNSUPPORTED, .rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED, .bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED, + .set_tsf = WMI_VDEV_PARAM_UNSUPPORTED, }; static const struct wmi_ops wmi_tlv_ops = { diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index f7ec65f263a0..db3e9a4be564 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -781,6 +781,7 @@ static struct wmi_vdev_param_map wmi_vdev_param_map = { .meru_vc = WMI_VDEV_PARAM_UNSUPPORTED, .rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED, .bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED, + .set_tsf = WMI_VDEV_PARAM_UNSUPPORTED, }; /* 10.X WMI VDEV param map */ @@ -856,6 +857,7 @@ static struct wmi_vdev_param_map wmi_10x_vdev_param_map = { .meru_vc = WMI_VDEV_PARAM_UNSUPPORTED, .rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED, .bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED, + .set_tsf = WMI_VDEV_PARAM_UNSUPPORTED, }; static struct wmi_vdev_param_map wmi_10_2_4_vdev_param_map = { @@ -930,6 +932,7 @@ static struct wmi_vdev_param_map wmi_10_2_4_vdev_param_map = { .meru_vc = WMI_VDEV_PARAM_UNSUPPORTED, .rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED, .bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED, + .set_tsf = WMI_10X_VDEV_PARAM_TSF_INCREMENT, }; static struct wmi_vdev_param_map wmi_10_4_vdev_param_map = { @@ -1005,6 +1008,7 @@ static struct wmi_vdev_param_map wmi_10_4_vdev_param_map = { .meru_vc = WMI_10_4_VDEV_PARAM_MERU_VC, .rx_decap_type = WMI_10_4_VDEV_PARAM_RX_DECAP_TYPE, .bw_nss_ratemask = WMI_10_4_VDEV_PARAM_BW_NSS_RATEMASK, + .set_tsf = WMI_VDEV_PARAM_UNSUPPORTED, }; static struct wmi_pdev_param_map wmi_pdev_param_map = { diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h index 0fae0daa9215..137edb4d451d 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.h +++ b/drivers/net/wireless/ath/ath10k/wmi.h @@ -4635,6 +4635,7 @@ struct wmi_vdev_param_map { u32 meru_vc; u32 rx_decap_type; u32 bw_nss_ratemask; + u32 set_tsf; }; #define WMI_VDEV_PARAM_UNSUPPORTED 0 @@ -4891,6 +4892,7 @@ enum wmi_10x_vdev_param { WMI_10X_VDEV_PARAM_RTS_FIXED_RATE, WMI_10X_VDEV_PARAM_VHT_SGIMASK, WMI_10X_VDEV_PARAM_VHT80_RATEMASK, + WMI_10X_VDEV_PARAM_TSF_INCREMENT, }; enum wmi_10_4_vdev_param { From 4857dd14ec3720d25c33ea0186c55b849d891b0f Mon Sep 17 00:00:00 2001 From: Peter Oh Date: Mon, 4 Apr 2016 16:19:15 -0700 Subject: [PATCH 0544/1649] ath10k: update 10.4 WMI vdev parameters Update 10.4 WMI vdev param to sync to current 10.4 firmware as of 2/23/2016. Signed-off-by: Peter Oh Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/wmi.h | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h index 137edb4d451d..c83e1e39f8cc 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.h +++ b/drivers/net/wireless/ath/ath10k/wmi.h @@ -4962,6 +4962,12 @@ enum wmi_10_4_vdev_param { WMI_10_4_VDEV_PARAM_MERU_VC, WMI_10_4_VDEV_PARAM_RX_DECAP_TYPE, WMI_10_4_VDEV_PARAM_BW_NSS_RATEMASK, + WMI_10_4_VDEV_PARAM_SENSOR_AP, + WMI_10_4_VDEV_PARAM_BEACON_RATE, + WMI_10_4_VDEV_PARAM_DTIM_ENABLE_CTS, + WMI_10_4_VDEV_PARAM_STA_KICKOUT, + WMI_10_4_VDEV_PARAM_CAPABILITIES, + WMI_10_4_VDEV_PARAM_TSF_INCREMENT, }; #define WMI_VDEV_PARAM_TXBF_SU_TX_BFEE BIT(0) From a606c0ee9d5c12eb1a0e86837db622969956b825 Mon Sep 17 00:00:00 2001 From: Peter Oh Date: Mon, 4 Apr 2016 16:19:16 -0700 Subject: [PATCH 0545/1649] ath10k: enable set_tsf vdev command to WMI 10.4 10.4 firmware has addeded set_tsf vdev parameter, hence enable it. set_tsf function can be used to shift TBTT that will help avoid its clockdrift which happens when beacons are collided. Signed-off-by: Peter Oh Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/wmi.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index db3e9a4be564..e8d9a3e5c2df 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -1008,7 +1008,7 @@ static struct wmi_vdev_param_map wmi_10_4_vdev_param_map = { .meru_vc = WMI_10_4_VDEV_PARAM_MERU_VC, .rx_decap_type = WMI_10_4_VDEV_PARAM_RX_DECAP_TYPE, .bw_nss_ratemask = WMI_10_4_VDEV_PARAM_BW_NSS_RATEMASK, - .set_tsf = WMI_VDEV_PARAM_UNSUPPORTED, + .set_tsf = WMI_10_4_VDEV_PARAM_TSF_INCREMENT, }; static struct wmi_pdev_param_map wmi_pdev_param_map = { From cfe9011a05a8de56f97d15a24977639fe6534e9c Mon Sep 17 00:00:00 2001 From: Rajkumar Manoharan Date: Thu, 7 Apr 2016 12:10:58 +0530 Subject: [PATCH 0546/1649] ath10k: remove MSI range support MSI-X is never well-tested, might contain bugs and generally isn't really all that useful to maintain. Also ath10k is mainly used with shared/singly-MSI interrupt systems. Hence removing MSI range support. This change will be useful for further cleanup in copy engine lock and to add NAPI support. Signed-off-by: Rajkumar Manoharan Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/pci.c | 164 +++----------------------- drivers/net/wireless/ath/ath10k/pci.h | 17 ++- 2 files changed, 24 insertions(+), 157 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index 0b305efe6c94..cdd8a307c55b 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c @@ -33,12 +33,6 @@ #include "ce.h" #include "pci.h" -enum ath10k_pci_irq_mode { - ATH10K_PCI_IRQ_AUTO = 0, - ATH10K_PCI_IRQ_LEGACY = 1, - ATH10K_PCI_IRQ_MSI = 2, -}; - enum ath10k_pci_reset_mode { ATH10K_PCI_RESET_AUTO = 0, ATH10K_PCI_RESET_WARM_ONLY = 1, @@ -745,10 +739,7 @@ static inline const char *ath10k_pci_get_irq_method(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - if (ar_pci->num_msi_intrs > 1) - return "msi-x"; - - if (ar_pci->num_msi_intrs == 1) + if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_MSI) return "msi"; return "legacy"; @@ -1502,13 +1493,8 @@ void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe, void ath10k_pci_kill_tasklet(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - int i; tasklet_kill(&ar_pci->intr_tq); - tasklet_kill(&ar_pci->msi_fw_err); - - for (i = 0; i < CE_COUNT; i++) - tasklet_kill(&ar_pci->pipe_info[i].intr); del_timer_sync(&ar_pci->rx_post_retry); } @@ -1624,10 +1610,8 @@ static void ath10k_pci_irq_disable(struct ath10k *ar) static void ath10k_pci_irq_sync(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - int i; - for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++) - synchronize_irq(ar_pci->pdev->irq + i); + synchronize_irq(ar_pci->pdev->irq); } static void ath10k_pci_irq_enable(struct ath10k *ar) @@ -2596,65 +2580,6 @@ static const struct ath10k_hif_ops ath10k_pci_hif_ops = { #endif }; -static void ath10k_pci_ce_tasklet(unsigned long ptr) -{ - struct ath10k_pci_pipe *pipe = (struct ath10k_pci_pipe *)ptr; - struct ath10k_pci *ar_pci = pipe->ar_pci; - - ath10k_ce_per_engine_service(ar_pci->ar, pipe->pipe_num); -} - -static void ath10k_msi_err_tasklet(unsigned long data) -{ - struct ath10k *ar = (struct ath10k *)data; - - if (!ath10k_pci_has_fw_crashed(ar)) { - ath10k_warn(ar, "received unsolicited fw crash interrupt\n"); - return; - } - - ath10k_pci_irq_disable(ar); - ath10k_pci_fw_crashed_clear(ar); - ath10k_pci_fw_crashed_dump(ar); -} - -/* - * Handler for a per-engine interrupt on a PARTICULAR CE. - * This is used in cases where each CE has a private MSI interrupt. - */ -static irqreturn_t ath10k_pci_per_engine_handler(int irq, void *arg) -{ - struct ath10k *ar = arg; - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - int ce_id = irq - ar_pci->pdev->irq - MSI_ASSIGN_CE_INITIAL; - - if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_pci->pipe_info)) { - ath10k_warn(ar, "unexpected/invalid irq %d ce_id %d\n", irq, - ce_id); - return IRQ_HANDLED; - } - - /* - * NOTE: We are able to derive ce_id from irq because we - * use a one-to-one mapping for CE's 0..5. - * CE's 6 & 7 do not use interrupts at all. - * - * This mapping must be kept in sync with the mapping - * used by firmware. - */ - tasklet_schedule(&ar_pci->pipe_info[ce_id].intr); - return IRQ_HANDLED; -} - -static irqreturn_t ath10k_pci_msi_fw_handler(int irq, void *arg) -{ - struct ath10k *ar = arg; - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - - tasklet_schedule(&ar_pci->msi_fw_err); - return IRQ_HANDLED; -} - /* * Top-level interrupt handler for all PCI interrupts from a Target. * When a block of MSI interrupts is allocated, this top-level handler @@ -2672,7 +2597,7 @@ static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg) return IRQ_NONE; } - if (ar_pci->num_msi_intrs == 0) { + if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_LEGACY) { if (!ath10k_pci_irq_pending(ar)) return IRQ_NONE; @@ -2699,43 +2624,10 @@ static void ath10k_pci_tasklet(unsigned long data) ath10k_ce_per_engine_service_any(ar); /* Re-enable legacy irq that was disabled in the irq handler */ - if (ar_pci->num_msi_intrs == 0) + if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_LEGACY) ath10k_pci_enable_legacy_irq(ar); } -static int ath10k_pci_request_irq_msix(struct ath10k *ar) -{ - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - int ret, i; - - ret = request_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW, - ath10k_pci_msi_fw_handler, - IRQF_SHARED, "ath10k_pci", ar); - if (ret) { - ath10k_warn(ar, "failed to request MSI-X fw irq %d: %d\n", - ar_pci->pdev->irq + MSI_ASSIGN_FW, ret); - return ret; - } - - for (i = MSI_ASSIGN_CE_INITIAL; i <= MSI_ASSIGN_CE_MAX; i++) { - ret = request_irq(ar_pci->pdev->irq + i, - ath10k_pci_per_engine_handler, - IRQF_SHARED, "ath10k_pci", ar); - if (ret) { - ath10k_warn(ar, "failed to request MSI-X ce irq %d: %d\n", - ar_pci->pdev->irq + i, ret); - - for (i--; i >= MSI_ASSIGN_CE_INITIAL; i--) - free_irq(ar_pci->pdev->irq + i, ar); - - free_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW, ar); - return ret; - } - } - - return 0; -} - static int ath10k_pci_request_irq_msi(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); @@ -2774,41 +2666,28 @@ static int ath10k_pci_request_irq(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - switch (ar_pci->num_msi_intrs) { - case 0: + switch (ar_pci->oper_irq_mode) { + case ATH10K_PCI_IRQ_LEGACY: return ath10k_pci_request_irq_legacy(ar); - case 1: + case ATH10K_PCI_IRQ_MSI: return ath10k_pci_request_irq_msi(ar); default: - return ath10k_pci_request_irq_msix(ar); + return -EINVAL; } } static void ath10k_pci_free_irq(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - int i; - /* There's at least one interrupt irregardless whether its legacy INTR - * or MSI or MSI-X */ - for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++) - free_irq(ar_pci->pdev->irq + i, ar); + free_irq(ar_pci->pdev->irq, ar); } void ath10k_pci_init_irq_tasklets(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - int i; tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long)ar); - tasklet_init(&ar_pci->msi_fw_err, ath10k_msi_err_tasklet, - (unsigned long)ar); - - for (i = 0; i < CE_COUNT; i++) { - ar_pci->pipe_info[i].ar_pci = ar_pci; - tasklet_init(&ar_pci->pipe_info[i].intr, ath10k_pci_ce_tasklet, - (unsigned long)&ar_pci->pipe_info[i]); - } } static int ath10k_pci_init_irq(struct ath10k *ar) @@ -2822,20 +2701,9 @@ static int ath10k_pci_init_irq(struct ath10k *ar) ath10k_info(ar, "limiting irq mode to: %d\n", ath10k_pci_irq_mode); - /* Try MSI-X */ - if (ath10k_pci_irq_mode == ATH10K_PCI_IRQ_AUTO) { - ar_pci->num_msi_intrs = MSI_ASSIGN_CE_MAX + 1; - ret = pci_enable_msi_range(ar_pci->pdev, ar_pci->num_msi_intrs, - ar_pci->num_msi_intrs); - if (ret > 0) - return 0; - - /* fall-through */ - } - /* Try MSI */ if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_LEGACY) { - ar_pci->num_msi_intrs = 1; + ar_pci->oper_irq_mode = ATH10K_PCI_IRQ_MSI; ret = pci_enable_msi(ar_pci->pdev); if (ret == 0) return 0; @@ -2851,7 +2719,7 @@ static int ath10k_pci_init_irq(struct ath10k *ar) * This write might get lost if target has NOT written BAR. * For now, fix the race by repeating the write in below * synchronization checking. */ - ar_pci->num_msi_intrs = 0; + ar_pci->oper_irq_mode = ATH10K_PCI_IRQ_LEGACY; ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS, PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL); @@ -2869,8 +2737,8 @@ static int ath10k_pci_deinit_irq(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - switch (ar_pci->num_msi_intrs) { - case 0: + switch (ar_pci->oper_irq_mode) { + case ATH10K_PCI_IRQ_LEGACY: ath10k_pci_deinit_irq_legacy(ar); break; default: @@ -2908,7 +2776,7 @@ int ath10k_pci_wait_for_target_init(struct ath10k *ar) if (val & FW_IND_INITIALIZED) break; - if (ar_pci->num_msi_intrs == 0) + if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_LEGACY) /* Fix potential race by repeating CORE_BASE writes */ ath10k_pci_enable_legacy_irq(ar); @@ -3186,8 +3054,8 @@ static int ath10k_pci_probe(struct pci_dev *pdev, goto err_sleep; } - ath10k_info(ar, "pci irq %s interrupts %d irq_mode %d reset_mode %d\n", - ath10k_pci_get_irq_method(ar), ar_pci->num_msi_intrs, + ath10k_info(ar, "pci irq %s oper_irq_mode %d irq_mode %d reset_mode %d\n", + ath10k_pci_get_irq_method(ar), ar_pci->oper_irq_mode, ath10k_pci_irq_mode, ath10k_pci_reset_mode); ret = ath10k_pci_request_irq(ar); diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h index 249c73a69800..959dc321b75e 100644 --- a/drivers/net/wireless/ath/ath10k/pci.h +++ b/drivers/net/wireless/ath/ath10k/pci.h @@ -148,9 +148,6 @@ struct ath10k_pci_pipe { /* protects compl_free and num_send_allowed */ spinlock_t pipe_lock; - - struct ath10k_pci *ar_pci; - struct tasklet_struct intr; }; struct ath10k_pci_supp_chip { @@ -164,6 +161,12 @@ struct ath10k_bus_ops { int (*get_num_banks)(struct ath10k *ar); }; +enum ath10k_pci_irq_mode { + ATH10K_PCI_IRQ_AUTO = 0, + ATH10K_PCI_IRQ_LEGACY = 1, + ATH10K_PCI_IRQ_MSI = 2, +}; + struct ath10k_pci { struct pci_dev *pdev; struct device *dev; @@ -171,14 +174,10 @@ struct ath10k_pci { void __iomem *mem; size_t mem_len; - /* - * Number of MSI interrupts granted, 0 --> using legacy PCI line - * interrupts. - */ - int num_msi_intrs; + /* Operating interrupt mode */ + enum ath10k_pci_irq_mode oper_irq_mode; struct tasklet_struct intr_tq; - struct tasklet_struct msi_fw_err; struct ath10k_pci_pipe pipe_info[CE_COUNT_MAX]; From 3c435e2e414e82ec6c0e96a1dfc2be3ddc3c23b4 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Mon, 11 Apr 2016 21:52:35 +0200 Subject: [PATCH 0547/1649] netfilter: conntrack: de-inline nf_conntrack_eventmask_report Way too large; move it to nf_conntrack_ecache.c. Reduces total object size by 1216 byte on my machine. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- include/net/netfilter/nf_conntrack_ecache.h | 66 ++++----------------- net/netfilter/nf_conntrack_ecache.c | 54 +++++++++++++++++ 2 files changed, 66 insertions(+), 54 deletions(-) diff --git a/include/net/netfilter/nf_conntrack_ecache.h b/include/net/netfilter/nf_conntrack_ecache.h index 57c880378443..019a5b859868 100644 --- a/include/net/netfilter/nf_conntrack_ecache.h +++ b/include/net/netfilter/nf_conntrack_ecache.h @@ -73,6 +73,8 @@ void nf_conntrack_unregister_notifier(struct net *net, struct nf_ct_event_notifier *nb); void nf_ct_deliver_cached_events(struct nf_conn *ct); +int nf_conntrack_eventmask_report(unsigned int eventmask, struct nf_conn *ct, + u32 portid, int report); static inline void nf_conntrack_event_cache(enum ip_conntrack_events event, struct nf_conn *ct) @@ -90,70 +92,26 @@ nf_conntrack_event_cache(enum ip_conntrack_events event, struct nf_conn *ct) set_bit(event, &e->cache); } -static inline int -nf_conntrack_eventmask_report(unsigned int eventmask, - struct nf_conn *ct, - u32 portid, - int report) -{ - int ret = 0; - struct net *net = nf_ct_net(ct); - struct nf_ct_event_notifier *notify; - struct nf_conntrack_ecache *e; - - rcu_read_lock(); - notify = rcu_dereference(net->ct.nf_conntrack_event_cb); - if (notify == NULL) - goto out_unlock; - - e = nf_ct_ecache_find(ct); - if (e == NULL) - goto out_unlock; - - if (nf_ct_is_confirmed(ct) && !nf_ct_is_dying(ct)) { - struct nf_ct_event item = { - .ct = ct, - .portid = e->portid ? e->portid : portid, - .report = report - }; - /* This is a resent of a destroy event? If so, skip missed */ - unsigned long missed = e->portid ? 0 : e->missed; - - if (!((eventmask | missed) & e->ctmask)) - goto out_unlock; - - ret = notify->fcn(eventmask | missed, &item); - if (unlikely(ret < 0 || missed)) { - spin_lock_bh(&ct->lock); - if (ret < 0) { - /* This is a destroy event that has been - * triggered by a process, we store the PORTID - * to include it in the retransmission. */ - if (eventmask & (1 << IPCT_DESTROY) && - e->portid == 0 && portid != 0) - e->portid = portid; - else - e->missed |= eventmask; - } else - e->missed &= ~missed; - spin_unlock_bh(&ct->lock); - } - } -out_unlock: - rcu_read_unlock(); - return ret; -} - static inline int nf_conntrack_event_report(enum ip_conntrack_events event, struct nf_conn *ct, u32 portid, int report) { + const struct net *net = nf_ct_net(ct); + + if (!rcu_access_pointer(net->ct.nf_conntrack_event_cb)) + return 0; + return nf_conntrack_eventmask_report(1 << event, ct, portid, report); } static inline int nf_conntrack_event(enum ip_conntrack_events event, struct nf_conn *ct) { + const struct net *net = nf_ct_net(ct); + + if (!rcu_access_pointer(net->ct.nf_conntrack_event_cb)) + return 0; + return nf_conntrack_eventmask_report(1 << event, ct, 0, 0); } diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c index 4e78c57b818f..a0ebab96a92f 100644 --- a/net/netfilter/nf_conntrack_ecache.c +++ b/net/netfilter/nf_conntrack_ecache.c @@ -113,6 +113,60 @@ static void ecache_work(struct work_struct *work) schedule_delayed_work(&ctnet->ecache_dwork, delay); } +int nf_conntrack_eventmask_report(unsigned int eventmask, struct nf_conn *ct, + u32 portid, int report) +{ + int ret = 0; + struct net *net = nf_ct_net(ct); + struct nf_ct_event_notifier *notify; + struct nf_conntrack_ecache *e; + + rcu_read_lock(); + notify = rcu_dereference(net->ct.nf_conntrack_event_cb); + if (!notify) + goto out_unlock; + + e = nf_ct_ecache_find(ct); + if (!e) + goto out_unlock; + + if (nf_ct_is_confirmed(ct) && !nf_ct_is_dying(ct)) { + struct nf_ct_event item = { + .ct = ct, + .portid = e->portid ? e->portid : portid, + .report = report + }; + /* This is a resent of a destroy event? If so, skip missed */ + unsigned long missed = e->portid ? 0 : e->missed; + + if (!((eventmask | missed) & e->ctmask)) + goto out_unlock; + + ret = notify->fcn(eventmask | missed, &item); + if (unlikely(ret < 0 || missed)) { + spin_lock_bh(&ct->lock); + if (ret < 0) { + /* This is a destroy event that has been + * triggered by a process, we store the PORTID + * to include it in the retransmission. + */ + if (eventmask & (1 << IPCT_DESTROY) && + e->portid == 0 && portid != 0) + e->portid = portid; + else + e->missed |= eventmask; + } else { + e->missed &= ~missed; + } + spin_unlock_bh(&ct->lock); + } + } +out_unlock: + rcu_read_unlock(); + return ret; +} +EXPORT_SYMBOL_GPL(nf_conntrack_eventmask_report); + /* deliver cached events and clear cache entry - must be called with locally * disabled softirqs */ void nf_ct_deliver_cached_events(struct nf_conn *ct) From ecdfb48cddfd1096343148113d5b1bd789033aa8 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Mon, 11 Apr 2016 21:52:36 +0200 Subject: [PATCH 0548/1649] netfilter: conntrack: move expectation event helper to ecache.c Not performance critical, it is only invoked when an expectation is added/destroyed. While at it, kill unused nf_ct_expect_event() wrapper. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- include/net/netfilter/nf_conntrack_ecache.h | 42 ++------------------- net/netfilter/nf_conntrack_ecache.c | 30 +++++++++++++++ 2 files changed, 33 insertions(+), 39 deletions(-) diff --git a/include/net/netfilter/nf_conntrack_ecache.h b/include/net/netfilter/nf_conntrack_ecache.h index 019a5b859868..fa36447371c6 100644 --- a/include/net/netfilter/nf_conntrack_ecache.h +++ b/include/net/netfilter/nf_conntrack_ecache.h @@ -130,43 +130,9 @@ int nf_ct_expect_register_notifier(struct net *net, void nf_ct_expect_unregister_notifier(struct net *net, struct nf_exp_event_notifier *nb); -static inline void -nf_ct_expect_event_report(enum ip_conntrack_expect_events event, - struct nf_conntrack_expect *exp, - u32 portid, - int report) -{ - struct net *net = nf_ct_exp_net(exp); - struct nf_exp_event_notifier *notify; - struct nf_conntrack_ecache *e; - - rcu_read_lock(); - notify = rcu_dereference(net->ct.nf_expect_event_cb); - if (notify == NULL) - goto out_unlock; - - e = nf_ct_ecache_find(exp->master); - if (e == NULL) - goto out_unlock; - - if (e->expmask & (1 << event)) { - struct nf_exp_event item = { - .exp = exp, - .portid = portid, - .report = report - }; - notify->fcn(1 << event, &item); - } -out_unlock: - rcu_read_unlock(); -} - -static inline void -nf_ct_expect_event(enum ip_conntrack_expect_events event, - struct nf_conntrack_expect *exp) -{ - nf_ct_expect_event_report(event, exp, 0, 0); -} +void nf_ct_expect_event_report(enum ip_conntrack_expect_events event, + struct nf_conntrack_expect *exp, + u32 portid, int report); int nf_conntrack_ecache_pernet_init(struct net *net); void nf_conntrack_ecache_pernet_fini(struct net *net); @@ -203,8 +169,6 @@ static inline int nf_conntrack_event_report(enum ip_conntrack_events event, u32 portid, int report) { return 0; } static inline void nf_ct_deliver_cached_events(const struct nf_conn *ct) {} -static inline void nf_ct_expect_event(enum ip_conntrack_expect_events event, - struct nf_conntrack_expect *exp) {} static inline void nf_ct_expect_event_report(enum ip_conntrack_expect_events e, struct nf_conntrack_expect *exp, u32 portid, diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c index a0ebab96a92f..d28011b42845 100644 --- a/net/netfilter/nf_conntrack_ecache.c +++ b/net/netfilter/nf_conntrack_ecache.c @@ -221,6 +221,36 @@ out_unlock: } EXPORT_SYMBOL_GPL(nf_ct_deliver_cached_events); +void nf_ct_expect_event_report(enum ip_conntrack_expect_events event, + struct nf_conntrack_expect *exp, + u32 portid, int report) + +{ + struct net *net = nf_ct_exp_net(exp); + struct nf_exp_event_notifier *notify; + struct nf_conntrack_ecache *e; + + rcu_read_lock(); + notify = rcu_dereference(net->ct.nf_expect_event_cb); + if (!notify) + goto out_unlock; + + e = nf_ct_ecache_find(exp->master); + if (!e) + goto out_unlock; + + if (e->expmask & (1 << event)) { + struct nf_exp_event item = { + .exp = exp, + .portid = portid, + .report = report + }; + notify->fcn(1 << event, &item); + } +out_unlock: + rcu_read_unlock(); +} + int nf_conntrack_register_notifier(struct net *net, struct nf_ct_event_notifier *new) { From 82500aa01ad12eff41a9a68ad01f1d40db8921f9 Mon Sep 17 00:00:00 2001 From: John Crispin Date: Fri, 8 Apr 2016 00:54:04 +0200 Subject: [PATCH 0549/1649] net: mediatek: watchdog_timeo was not set The original commit failed to set watchdog_timeo. This patch sets watchdog_timeo to HZ. Signed-off-by: John Crispin Signed-off-by: David S. Miller --- drivers/net/ethernet/mediatek/mtk_eth_soc.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index e0b68afea56e..bb10d57c9999 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -1645,6 +1645,7 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np) mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET; SET_NETDEV_DEV(eth->netdev[id], eth->dev); + eth->netdev[id]->watchdog_timeo = HZ; eth->netdev[id]->netdev_ops = &mtk_netdev_ops; eth->netdev[id]->base_addr = (unsigned long)eth->base; eth->netdev[id]->vlan_features = MTK_HW_FEATURES & From beeb4ca466fa1c399d69e34c30ddf04e0b7cbefd Mon Sep 17 00:00:00 2001 From: John Crispin Date: Fri, 8 Apr 2016 00:54:05 +0200 Subject: [PATCH 0550/1649] net: mediatek: mtk_cal_txd_req() returns bad value The code used to also support the PDMA engine, which had 2 packet pointers per descriptor. Because of this we had to divide the result by 2 and round it up. This is no longer needed as the code only supports QDMA. Signed-off-by: John Crispin Signed-off-by: David S. Miller --- drivers/net/ethernet/mediatek/mtk_eth_soc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index bb10d57c9999..94cceb83b569 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -681,7 +681,7 @@ static inline int mtk_cal_txd_req(struct sk_buff *skb) nfrags += skb_shinfo(skb)->nr_frags; } - return DIV_ROUND_UP(nfrags, 2); + return nfrags; } static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev) From 13439eec7af24b0800e654ee03c57ab985083ae4 Mon Sep 17 00:00:00 2001 From: John Crispin Date: Fri, 8 Apr 2016 00:54:06 +0200 Subject: [PATCH 0551/1649] net: mediatek: remove superfluous reset call HW reset is triggered in the mtk_hw_init() function. There is no need to also reset the core during probe. Signed-off-by: John Crispin Signed-off-by: David S. Miller --- drivers/net/ethernet/mediatek/mtk_eth_soc.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 94cceb83b569..a4982e497ea6 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -1679,10 +1679,6 @@ static int mtk_probe(struct platform_device *pdev) struct mtk_eth *eth; int err; - err = device_reset(&pdev->dev); - if (err) - return err; - match = of_match_device(of_mtk_match, &pdev->dev); soc = (struct mtk_soc_data *)match->data; From 13c822f6d468ca5a16da4d9432b067d54245c5b9 Mon Sep 17 00:00:00 2001 From: John Crispin Date: Fri, 8 Apr 2016 00:54:07 +0200 Subject: [PATCH 0552/1649] net: mediatek: fix stop and wakeup of queue The driver supports 2 MACs. Both run on the same DMA ring. If we go above/below the TX rings threshold value, we always need to wake/stop the queue of both devices. Not doing to can cause TX stalls and packet drops on one of the devices. Signed-off-by: John Crispin Signed-off-by: David S. Miller --- drivers/net/ethernet/mediatek/mtk_eth_soc.c | 37 +++++++++++++++------ 1 file changed, 27 insertions(+), 10 deletions(-) diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index a4982e497ea6..4ebc42e0271a 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -684,6 +684,28 @@ static inline int mtk_cal_txd_req(struct sk_buff *skb) return nfrags; } +static void mtk_wake_queue(struct mtk_eth *eth) +{ + int i; + + for (i = 0; i < MTK_MAC_COUNT; i++) { + if (!eth->netdev[i]) + continue; + netif_wake_queue(eth->netdev[i]); + } +} + +static void mtk_stop_queue(struct mtk_eth *eth) +{ + int i; + + for (i = 0; i < MTK_MAC_COUNT; i++) { + if (!eth->netdev[i]) + continue; + netif_stop_queue(eth->netdev[i]); + } +} + static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct mtk_mac *mac = netdev_priv(dev); @@ -695,7 +717,7 @@ static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev) tx_num = mtk_cal_txd_req(skb); if (unlikely(atomic_read(&ring->free_count) <= tx_num)) { - netif_stop_queue(dev); + mtk_stop_queue(eth); netif_err(eth, tx_queued, dev, "Tx Ring full when queue awake!\n"); return NETDEV_TX_BUSY; @@ -720,10 +742,10 @@ static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev) goto drop; if (unlikely(atomic_read(&ring->free_count) <= ring->thresh)) { - netif_stop_queue(dev); + mtk_stop_queue(eth); if (unlikely(atomic_read(&ring->free_count) > ring->thresh)) - netif_wake_queue(dev); + mtk_wake_queue(eth); } return NETDEV_TX_OK; @@ -897,13 +919,8 @@ static int mtk_poll_tx(struct mtk_eth *eth, int budget, bool *tx_again) if (!total) return 0; - for (i = 0; i < MTK_MAC_COUNT; i++) { - if (!eth->netdev[i] || - unlikely(!netif_queue_stopped(eth->netdev[i]))) - continue; - if (atomic_read(&ring->free_count) > ring->thresh) - netif_wake_queue(eth->netdev[i]); - } + if (atomic_read(&ring->free_count) > ring->thresh) + mtk_wake_queue(eth); return total; } From 34c2e4c9e9b3e434a31f67eecf603dc1496c8cc9 Mon Sep 17 00:00:00 2001 From: John Crispin Date: Fri, 8 Apr 2016 00:54:08 +0200 Subject: [PATCH 0553/1649] net: mediatek: fix TX locking Inside the TX path there is a lock inside the tx_map function. This is however too late. The patch moves the lock to the start of the xmit function right before the free count check of the DMA ring happens. If we do not do this, the code becomes racy leading to TX stalls and dropped packets. This happens as there are 2 netdevs running on the same physical DMA ring. Signed-off-by: John Crispin Signed-off-by: David S. Miller --- drivers/net/ethernet/mediatek/mtk_eth_soc.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 4ebc42e0271a..7b760752d2c1 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -536,7 +536,6 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, struct mtk_eth *eth = mac->hw; struct mtk_tx_dma *itxd, *txd; struct mtk_tx_buf *tx_buf; - unsigned long flags; dma_addr_t mapped_addr; unsigned int nr_frags; int i, n_desc = 1; @@ -568,11 +567,6 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, if (unlikely(dma_mapping_error(&dev->dev, mapped_addr))) return -ENOMEM; - /* normally we can rely on the stack not calling this more than once, - * however we have 2 queues running ont he same ring so we need to lock - * the ring access - */ - spin_lock_irqsave(ð->page_lock, flags); WRITE_ONCE(itxd->txd1, mapped_addr); tx_buf->flags |= MTK_TX_FLAGS_SINGLE0; dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr); @@ -632,8 +626,6 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, WRITE_ONCE(itxd->txd3, (TX_DMA_SWC | TX_DMA_PLEN0(skb_headlen(skb)) | (!nr_frags * TX_DMA_LS0))); - spin_unlock_irqrestore(ð->page_lock, flags); - netdev_sent_queue(dev, skb->len); skb_tx_timestamp(skb); @@ -661,8 +653,6 @@ err_dma: itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2); } while (itxd != txd); - spin_unlock_irqrestore(ð->page_lock, flags); - return -ENOMEM; } @@ -712,14 +702,22 @@ static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev) struct mtk_eth *eth = mac->hw; struct mtk_tx_ring *ring = ð->tx_ring; struct net_device_stats *stats = &dev->stats; + unsigned long flags; bool gso = false; int tx_num; + /* normally we can rely on the stack not calling this more than once, + * however we have 2 queues running on the same ring so we need to lock + * the ring access + */ + spin_lock_irqsave(ð->page_lock, flags); + tx_num = mtk_cal_txd_req(skb); if (unlikely(atomic_read(&ring->free_count) <= tx_num)) { mtk_stop_queue(eth); netif_err(eth, tx_queued, dev, "Tx Ring full when queue awake!\n"); + spin_unlock_irqrestore(ð->page_lock, flags); return NETDEV_TX_BUSY; } @@ -747,10 +745,12 @@ static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev) ring->thresh)) mtk_wake_queue(eth); } + spin_unlock_irqrestore(ð->page_lock, flags); return NETDEV_TX_OK; drop: + spin_unlock_irqrestore(ð->page_lock, flags); stats->tx_dropped++; dev_kfree_skb(skb); return NETDEV_TX_OK; From e7d425dcea032f1d0b44b6fa4c6735f13882de6e Mon Sep 17 00:00:00 2001 From: John Crispin Date: Fri, 8 Apr 2016 00:54:09 +0200 Subject: [PATCH 0554/1649] net: mediatek: fix mtk_pending_work The driver supports 2 MACs. Both run on the same DMA ring. If we hit a TX timeout we need to stop both netdevs before restarting them again. If we don't do this, mtk_stop() wont shutdown DMA and the consecutive call to mtk_open() wont restart DMA and enable IRQs. Signed-off-by: John Crispin Signed-off-by: David S. Miller --- drivers/net/ethernet/mediatek/mtk_eth_soc.c | 28 +++++++++++++++------ 1 file changed, 20 insertions(+), 8 deletions(-) diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 7b760752d2c1..cd5d0c97f0ce 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -1432,17 +1432,29 @@ static void mtk_pending_work(struct work_struct *work) { struct mtk_mac *mac = container_of(work, struct mtk_mac, pending_work); struct mtk_eth *eth = mac->hw; - struct net_device *dev = eth->netdev[mac->id]; - int err; + int err, i; + unsigned long restart = 0; rtnl_lock(); - mtk_stop(dev); - err = mtk_open(dev); - if (err) { - netif_alert(eth, ifup, dev, - "Driver up/down cycle failed, closing device.\n"); - dev_close(dev); + /* stop all devices to make sure that dma is properly shut down */ + for (i = 0; i < MTK_MAC_COUNT; i++) { + if (!netif_oper_up(eth->netdev[i])) + continue; + mtk_stop(eth->netdev[i]); + __set_bit(i, &restart); + } + + /* restart DMA and enable IRQs */ + for (i = 0; i < MTK_MAC_COUNT; i++) { + if (!test_bit(i, &restart)) + continue; + err = mtk_open(eth->netdev[i]); + if (err) { + netif_alert(eth, ifup, eth->netdev[i], + "Driver up/down cycle failed, closing device.\n"); + dev_close(eth->netdev[i]); + } } rtnl_unlock(); } From 7c78b4ad9bbdbe0bb4fbc98841ad9d904ee116e9 Mon Sep 17 00:00:00 2001 From: John Crispin Date: Fri, 8 Apr 2016 00:54:10 +0200 Subject: [PATCH 0555/1649] net: mediatek: move the pending_work struct to the device generic struct The worker always touches both netdevs. It is ethernet core and not MAC specific. We only need one worker, which belongs into the ethernets core struct. Signed-off-by: John Crispin Signed-off-by: David S. Miller --- drivers/net/ethernet/mediatek/mtk_eth_soc.c | 13 +++++-------- drivers/net/ethernet/mediatek/mtk_eth_soc.h | 4 ++-- 2 files changed, 7 insertions(+), 10 deletions(-) diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index cd5d0c97f0ce..eb0d5544787a 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -1193,7 +1193,7 @@ static void mtk_tx_timeout(struct net_device *dev) eth->netdev[mac->id]->stats.tx_errors++; netif_err(eth, tx_err, dev, "transmit timed out\n"); - schedule_work(&mac->pending_work); + schedule_work(ð->pending_work); } static irqreturn_t mtk_handle_irq(int irq, void *_eth) @@ -1430,8 +1430,7 @@ static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) static void mtk_pending_work(struct work_struct *work) { - struct mtk_mac *mac = container_of(work, struct mtk_mac, pending_work); - struct mtk_eth *eth = mac->hw; + struct mtk_eth *eth = container_of(work, struct mtk_eth, pending_work); int err, i; unsigned long restart = 0; @@ -1439,7 +1438,7 @@ static void mtk_pending_work(struct work_struct *work) /* stop all devices to make sure that dma is properly shut down */ for (i = 0; i < MTK_MAC_COUNT; i++) { - if (!netif_oper_up(eth->netdev[i])) + if (!eth->netdev[i]) continue; mtk_stop(eth->netdev[i]); __set_bit(i, &restart); @@ -1464,15 +1463,13 @@ static int mtk_cleanup(struct mtk_eth *eth) int i; for (i = 0; i < MTK_MAC_COUNT; i++) { - struct mtk_mac *mac = netdev_priv(eth->netdev[i]); - if (!eth->netdev[i]) continue; unregister_netdev(eth->netdev[i]); free_netdev(eth->netdev[i]); - cancel_work_sync(&mac->pending_work); } + cancel_work_sync(ð->pending_work); return 0; } @@ -1660,7 +1657,6 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np) mac->id = id; mac->hw = eth; mac->of_node = np; - INIT_WORK(&mac->pending_work, mtk_pending_work); mac->hw_stats = devm_kzalloc(eth->dev, sizeof(*mac->hw_stats), @@ -1762,6 +1758,7 @@ static int mtk_probe(struct platform_device *pdev) eth->dev = &pdev->dev; eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE); + INIT_WORK(ð->pending_work, mtk_pending_work); err = mtk_hw_init(eth); if (err) diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h index 48a5292c8ed8..eed626d56ea4 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h @@ -363,6 +363,7 @@ struct mtk_rx_ring { * @clk_gp1: The gmac1 clock * @clk_gp2: The gmac2 clock * @mii_bus: If there is a bus we need to create an instance for it + * @pending_work: The workqueue used to reset the dma ring */ struct mtk_eth { @@ -389,6 +390,7 @@ struct mtk_eth { struct clk *clk_gp1; struct clk *clk_gp2; struct mii_bus *mii_bus; + struct work_struct pending_work; }; /* struct mtk_mac - the structure that holds the info about the MACs of the @@ -398,7 +400,6 @@ struct mtk_eth { * @hw: Backpointer to our main datastruture * @hw_stats: Packet statistics counter * @phy_dev: The attached PHY if available - * @pending_work: The workqueue used to reset the dma ring */ struct mtk_mac { int id; @@ -406,7 +407,6 @@ struct mtk_mac { struct mtk_eth *hw; struct mtk_hw_stats *hw_stats; struct phy_device *phy_dev; - struct work_struct pending_work; }; /* the struct describing the SoC. these are declared in the soc_xyz.c files */ From 369f04531f80c5e5d194a193a2b9e7676a77328d Mon Sep 17 00:00:00 2001 From: John Crispin Date: Fri, 8 Apr 2016 00:54:11 +0200 Subject: [PATCH 0556/1649] net: mediatek: do not set the QID field in the TX DMA descriptors The QID field gets set to the mac id. This made the DMA linked list queue the traffic of each MAC on a different internal queue. However during long term testing we found that this will cause traffic stalls as the multi queue setup requires a more complete initialisation which is not part of the upstream driver yet. This patch removes the code setting the QID field, resulting in all traffic ending up in queue 0 which works without any special setup. Signed-off-by: John Crispin Signed-off-by: David S. Miller --- drivers/net/ethernet/mediatek/mtk_eth_soc.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index eb0d5544787a..c984462fad2a 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -603,8 +603,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, WRITE_ONCE(txd->txd1, mapped_addr); WRITE_ONCE(txd->txd3, (TX_DMA_SWC | TX_DMA_PLEN0(frag_map_size) | - last_frag * TX_DMA_LS0) | - mac->id); + last_frag * TX_DMA_LS0)); WRITE_ONCE(txd->txd4, 0); tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC; From bc405cd69a728d0a82bae8395fe43ec7b0afd1c6 Mon Sep 17 00:00:00 2001 From: Alexandre Macabies Date: Tue, 12 Apr 2016 18:53:00 +0200 Subject: [PATCH 0557/1649] ieee802154: add security bit check function ieee802154_is_secen checks if the 802.15.4 security bit is set in the frame control field. Signed-off-by: Alexander Aring Signed-off-by: Alexandre Macabies Reviewed-by: Stefan Schmidt Acked-by: Alan Ott Signed-off-by: Marcel Holtmann --- include/linux/ieee802154.h | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/include/linux/ieee802154.h b/include/linux/ieee802154.h index d3e415674dac..56090f195339 100644 --- a/include/linux/ieee802154.h +++ b/include/linux/ieee802154.h @@ -218,6 +218,7 @@ enum { /* frame control handling */ #define IEEE802154_FCTL_FTYPE 0x0003 #define IEEE802154_FCTL_ACKREQ 0x0020 +#define IEEE802154_FCTL_SECEN 0x0004 #define IEEE802154_FCTL_INTRA_PAN 0x0040 #define IEEE802154_FTYPE_DATA 0x0001 @@ -232,6 +233,15 @@ static inline int ieee802154_is_data(__le16 fc) cpu_to_le16(IEEE802154_FTYPE_DATA); } +/** + * ieee802154_is_secen - check if Security bit is set + * @fc: frame control bytes in little-endian byteorder + */ +static inline bool ieee802154_is_secen(__le16 fc) +{ + return fc & cpu_to_le16(IEEE802154_FCTL_SECEN); +} + /** * ieee802154_is_ackreq - check if acknowledgment request bit is set * @fc: frame control bytes in little-endian byteorder From 5a62f3c6de73bd0b4ac40a33674d20f1bfb586d5 Mon Sep 17 00:00:00 2001 From: Alexandre Macabies Date: Tue, 12 Apr 2016 18:53:01 +0200 Subject: [PATCH 0558/1649] mrf24j40: fix security-enabled processing on inbound frames When receiving a security-enabled IEEE 802.15.4 frame, the MRF24J40 triggers a SECIF interrupt that needs to be handled for RX processing to keep functioning properly. This patch enables the SECIF interrupt and makes the MRF ignores all hardware processing of security-enabled frames, that is handled by the ieee802154 stack instead. Signed-off-by: Alexander Aring Signed-off-by: Alexandre Macabies Reviewed-by: Stefan Schmidt Acked-by: Alan Ott Signed-off-by: Marcel Holtmann --- drivers/net/ieee802154/mrf24j40.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/drivers/net/ieee802154/mrf24j40.c b/drivers/net/ieee802154/mrf24j40.c index 764a2bddfaee..adc67be2e04f 100644 --- a/drivers/net/ieee802154/mrf24j40.c +++ b/drivers/net/ieee802154/mrf24j40.c @@ -85,10 +85,13 @@ #define REG_INTSTAT 0x31 /* Interrupt Status */ #define BIT_TXNIF BIT(0) #define BIT_RXIF BIT(3) +#define BIT_SECIF BIT(4) +#define BIT_SECIGNORE BIT(7) #define REG_INTCON 0x32 /* Interrupt Control */ #define BIT_TXNIE BIT(0) #define BIT_RXIE BIT(3) +#define BIT_SECIE BIT(4) #define REG_GPIO 0x33 /* GPIO */ #define REG_TRISGPIO 0x34 /* GPIO direction */ @@ -616,7 +619,7 @@ static int mrf24j40_start(struct ieee802154_hw *hw) /* Clear TXNIE and RXIE. Enable interrupts */ return regmap_update_bits(devrec->regmap_short, REG_INTCON, - BIT_TXNIE | BIT_RXIE, 0); + BIT_TXNIE | BIT_RXIE | BIT_SECIE, 0); } static void mrf24j40_stop(struct ieee802154_hw *hw) @@ -1025,6 +1028,11 @@ static void mrf24j40_intstat_complete(void *context) enable_irq(devrec->spi->irq); + /* Ignore Rx security decryption */ + if (intstat & BIT_SECIF) + regmap_write_async(devrec->regmap_short, REG_SECCON0, + BIT_SECIGNORE); + /* Check for TX complete */ if (intstat & BIT_TXNIF) ieee802154_xmit_complete(devrec->hw, devrec->tx_skb, false); From 87820441c402f5fde42a84ae96ffb4cbd4109510 Mon Sep 17 00:00:00 2001 From: Alexandre Macabies Date: Tue, 12 Apr 2016 18:53:02 +0200 Subject: [PATCH 0559/1649] mrf24j40: apply the security-enabled bit on secured outbound frames We set the TXNSECEN bit of register TXNCON to on when transmitting a security-enabled frame, as described in section 3.12.2 of the MRF datasheet. Signed-off-by: Alexander Aring Signed-off-by: Alexandre Macabies Reviewed-by: Stefan Schmidt Acked-by: Alan Ott Signed-off-by: Marcel Holtmann --- drivers/net/ieee802154/mrf24j40.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/net/ieee802154/mrf24j40.c b/drivers/net/ieee802154/mrf24j40.c index adc67be2e04f..f446db828561 100644 --- a/drivers/net/ieee802154/mrf24j40.c +++ b/drivers/net/ieee802154/mrf24j40.c @@ -61,6 +61,7 @@ #define REG_TXBCON0 0x1A #define REG_TXNCON 0x1B /* Transmit Normal FIFO Control */ #define BIT_TXNTRIG BIT(0) +#define BIT_TXNSECEN BIT(1) #define BIT_TXNACKREQ BIT(2) #define REG_TXG1CON 0x1C @@ -551,6 +552,9 @@ static void write_tx_buf_complete(void *context) u8 val = BIT_TXNTRIG; int ret; + if (ieee802154_is_secen(fc)) + val |= BIT_TXNSECEN; + if (ieee802154_is_ackreq(fc)) val |= BIT_TXNACKREQ; From b7594148c73cb506487b5f00a6574beceea0e3a0 Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Mon, 11 Apr 2016 11:04:14 +0200 Subject: [PATCH 0560/1649] ieee802154: cleanups for ieee802154.h This patch removes some const from non-pointer types and fixes the function name for the ieee802154_is_valid_extended_unicast_addr comment. Reviewed-by: Stefan Schmidt Signed-off-by: Alexander Aring Acked-by: Jukka Rissanen Signed-off-by: Marcel Holtmann --- include/linux/ieee802154.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/include/linux/ieee802154.h b/include/linux/ieee802154.h index 56090f195339..9d84a924b747 100644 --- a/include/linux/ieee802154.h +++ b/include/linux/ieee802154.h @@ -270,17 +270,17 @@ static inline bool ieee802154_is_intra_pan(__le16 fc) * * @len: psdu len with (MHR + payload + MFR) */ -static inline bool ieee802154_is_valid_psdu_len(const u8 len) +static inline bool ieee802154_is_valid_psdu_len(u8 len) { return (len == IEEE802154_ACK_PSDU_LEN || (len >= IEEE802154_MIN_PSDU_LEN && len <= IEEE802154_MTU)); } /** - * ieee802154_is_valid_psdu_len - check if extended addr is valid + * ieee802154_is_valid_extended_unicast_addr - check if extended addr is valid * @addr: extended addr to check */ -static inline bool ieee802154_is_valid_extended_unicast_addr(const __le64 addr) +static inline bool ieee802154_is_valid_extended_unicast_addr(__le64 addr) { /* Bail out if the address is all zero, or if the group * address bit is set. From 118a5cf8ae236cdfa1eb4f21530843a8494722ef Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Mon, 11 Apr 2016 11:04:15 +0200 Subject: [PATCH 0561/1649] ieee802154: add short address helpers This patch introduce some short address handling functionality into ieee802154 headers. Reviewed-by: Stefan Schmidt Signed-off-by: Alexander Aring Acked-by: Jukka Rissanen Signed-off-by: Marcel Holtmann --- include/linux/ieee802154.h | 29 +++++++++++++++++++++++++++++ include/net/mac802154.h | 10 ++++++++++ 2 files changed, 39 insertions(+) diff --git a/include/linux/ieee802154.h b/include/linux/ieee802154.h index 9d84a924b747..acedbb68a5a3 100644 --- a/include/linux/ieee802154.h +++ b/include/linux/ieee802154.h @@ -47,6 +47,7 @@ #define IEEE802154_ADDR_SHORT_UNSPEC 0xfffe #define IEEE802154_EXTENDED_ADDR_LEN 8 +#define IEEE802154_SHORT_ADDR_LEN 2 #define IEEE802154_LIFS_PERIOD 40 #define IEEE802154_SIFS_PERIOD 12 @@ -289,6 +290,34 @@ static inline bool ieee802154_is_valid_extended_unicast_addr(__le64 addr) !(addr & cpu_to_le64(0x0100000000000000ULL))); } +/** + * ieee802154_is_broadcast_short_addr - check if short addr is broadcast + * @addr: short addr to check + */ +static inline bool ieee802154_is_broadcast_short_addr(__le16 addr) +{ + return (addr == cpu_to_le16(IEEE802154_ADDR_SHORT_BROADCAST)); +} + +/** + * ieee802154_is_unspec_short_addr - check if short addr is unspecified + * @addr: short addr to check + */ +static inline bool ieee802154_is_unspec_short_addr(__le16 addr) +{ + return (addr == cpu_to_le16(IEEE802154_ADDR_SHORT_UNSPEC)); +} + +/** + * ieee802154_is_valid_src_short_addr - check if source short address is valid + * @addr: short addr to check + */ +static inline bool ieee802154_is_valid_src_short_addr(__le16 addr) +{ + return !(ieee802154_is_broadcast_short_addr(addr) || + ieee802154_is_unspec_short_addr(addr)); +} + /** * ieee802154_random_extended_addr - generates a random extended address * @addr: extended addr pointer to place the random address diff --git a/include/net/mac802154.h b/include/net/mac802154.h index 6cd7a70706a9..e465c8551ac3 100644 --- a/include/net/mac802154.h +++ b/include/net/mac802154.h @@ -287,6 +287,16 @@ static inline void ieee802154_le16_to_be16(void *be16_dst, const void *le16_src) put_unaligned_be16(get_unaligned_le16(le16_src), be16_dst); } +/** + * ieee802154_be16_to_le16 - copies and convert be16 to le16 + * @le16_dst: le16 destination pointer + * @be16_src: be16 source pointer + */ +static inline void ieee802154_be16_to_le16(void *le16_dst, const void *be16_src) +{ + put_unaligned_le16(get_unaligned_be16(be16_src), le16_dst); +} + /** * ieee802154_alloc_hw - Allocate a new hardware device * From 9e3b71f3436415f917eb569cde792b223cceebc0 Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Mon, 11 Apr 2016 11:04:16 +0200 Subject: [PATCH 0562/1649] nl802154: avoid address change while running lowpan The generation of autoconfigured IPv6 link-local addresses starts with a notification on interface up. To handle autoconfiguration according to RFC 4944 requires pan id and short address to generate an autoconfigured link-local address. This patch will avoid changing of these link-layer address configuration while lowpan interface is up. Reviewed-by: Stefan Schmidt Signed-off-by: Alexander Aring Acked-by: Jukka Rissanen Signed-off-by: Marcel Holtmann --- net/ieee802154/nl802154.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/net/ieee802154/nl802154.c b/net/ieee802154/nl802154.c index 16ef0d9f566e..5f1dc4bfeed5 100644 --- a/net/ieee802154/nl802154.c +++ b/net/ieee802154/nl802154.c @@ -1074,6 +1074,11 @@ static int nl802154_set_pan_id(struct sk_buff *skb, struct genl_info *info) if (netif_running(dev)) return -EBUSY; + if (wpan_dev->lowpan_dev) { + if (netif_running(wpan_dev->lowpan_dev)) + return -EBUSY; + } + /* don't change address fields on monitor */ if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR || !info->attrs[NL802154_ATTR_PAN_ID]) @@ -1105,6 +1110,11 @@ static int nl802154_set_short_addr(struct sk_buff *skb, struct genl_info *info) if (netif_running(dev)) return -EBUSY; + if (wpan_dev->lowpan_dev) { + if (netif_running(wpan_dev->lowpan_dev)) + return -EBUSY; + } + /* don't change address fields on monitor */ if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR || !info->attrs[NL802154_ATTR_SHORT_ADDR]) From 5a7f97e570fbe0ae7e6fd035f7af0cd6a1a9baa1 Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Mon, 11 Apr 2016 11:04:17 +0200 Subject: [PATCH 0563/1649] ieee802154: 6lowpan: fix short addr hash The short address is unique in combination with the panid. This patch will add the panid for generating an ieee802154 address hash. Reviewed-by: Stefan Schmidt Signed-off-by: Alexander Aring Acked-by: Jukka Rissanen Signed-off-by: Marcel Holtmann --- net/ieee802154/6lowpan/6lowpan_i.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/ieee802154/6lowpan/6lowpan_i.h b/net/ieee802154/6lowpan/6lowpan_i.h index b4e17a7c0df0..b4092a9ff24a 100644 --- a/net/ieee802154/6lowpan/6lowpan_i.h +++ b/net/ieee802154/6lowpan/6lowpan_i.h @@ -41,7 +41,7 @@ static inline u32 ieee802154_addr_hash(const struct ieee802154_addr *a) return (((__force u64)a->extended_addr) >> 32) ^ (((__force u64)a->extended_addr) & 0xffffffff); case IEEE802154_ADDR_SHORT: - return (__force u32)(a->short_addr); + return (__force u32)(a->short_addr + (a->pan_id << 16)); default: return 0; } From 2e4d60cbcfc2d16a2a2efaae3fe08f2e457d59a1 Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Mon, 11 Apr 2016 11:04:18 +0200 Subject: [PATCH 0564/1649] 6lowpan: change naming for lowpan private data This patch changes the naming for interface private data for lowpan intefaces. The current private data scheme is: ------------------------------------------------- | 6LoWPAN Generic | LinkLayer 6LoWPAN | ------------------------------------------------- the current naming schemes are: - 6LoWPAN Generic: - lowpan_priv - LinkLayer 6LoWPAN: - BTLE - lowpan_dev - 802.15.4: - lowpan_dev_info the new naming scheme with this patch will be: - 6LoWPAN Generic: - lowpan_dev - LinkLayer 6LoWPAN: - BTLE - lowpan_btle_dev - 802.15.4: - lowpan_802154_dev Signed-off-by: Alexander Aring Reviewed-by: Stefan Schmidt Acked-by: Jukka Rissanen Signed-off-by: Marcel Holtmann --- include/net/6lowpan.h | 6 +-- net/6lowpan/core.c | 8 +-- net/6lowpan/debugfs.c | 22 ++++---- net/6lowpan/iphc.c | 38 +++++++------- net/6lowpan/nhc_udp.c | 2 +- net/bluetooth/6lowpan.c | 82 ++++++++++++++++-------------- net/ieee802154/6lowpan/6lowpan_i.h | 6 +-- net/ieee802154/6lowpan/core.c | 6 +-- net/ieee802154/6lowpan/tx.c | 14 ++--- 9 files changed, 94 insertions(+), 90 deletions(-) diff --git a/include/net/6lowpan.h b/include/net/6lowpan.h index da3a77d25fcb..f204664f37ab 100644 --- a/include/net/6lowpan.h +++ b/include/net/6lowpan.h @@ -93,7 +93,7 @@ static inline bool lowpan_is_iphc(u8 dispatch) } #define LOWPAN_PRIV_SIZE(llpriv_size) \ - (sizeof(struct lowpan_priv) + llpriv_size) + (sizeof(struct lowpan_dev) + llpriv_size) enum lowpan_lltypes { LOWPAN_LLTYPE_BTLE, @@ -129,7 +129,7 @@ lowpan_iphc_ctx_is_compression(const struct lowpan_iphc_ctx *ctx) return test_bit(LOWPAN_IPHC_CTX_FLAG_COMPRESSION, &ctx->flags); } -struct lowpan_priv { +struct lowpan_dev { enum lowpan_lltypes lltype; struct dentry *iface_debugfs; struct lowpan_iphc_ctx_table ctx; @@ -139,7 +139,7 @@ struct lowpan_priv { }; static inline -struct lowpan_priv *lowpan_priv(const struct net_device *dev) +struct lowpan_dev *lowpan_dev(const struct net_device *dev) { return netdev_priv(dev); } diff --git a/net/6lowpan/core.c b/net/6lowpan/core.c index 34e44c0c0836..7a240b3eaed1 100644 --- a/net/6lowpan/core.c +++ b/net/6lowpan/core.c @@ -27,11 +27,11 @@ int lowpan_register_netdevice(struct net_device *dev, dev->mtu = IPV6_MIN_MTU; dev->priv_flags |= IFF_NO_QUEUE; - lowpan_priv(dev)->lltype = lltype; + lowpan_dev(dev)->lltype = lltype; - spin_lock_init(&lowpan_priv(dev)->ctx.lock); + spin_lock_init(&lowpan_dev(dev)->ctx.lock); for (i = 0; i < LOWPAN_IPHC_CTX_TABLE_SIZE; i++) - lowpan_priv(dev)->ctx.table[i].id = i; + lowpan_dev(dev)->ctx.table[i].id = i; ret = register_netdevice(dev); if (ret < 0) @@ -85,7 +85,7 @@ static int lowpan_event(struct notifier_block *unused, case NETDEV_DOWN: for (i = 0; i < LOWPAN_IPHC_CTX_TABLE_SIZE; i++) clear_bit(LOWPAN_IPHC_CTX_FLAG_ACTIVE, - &lowpan_priv(dev)->ctx.table[i].flags); + &lowpan_dev(dev)->ctx.table[i].flags); break; default: return NOTIFY_DONE; diff --git a/net/6lowpan/debugfs.c b/net/6lowpan/debugfs.c index 0793a8157472..acbaa3db493b 100644 --- a/net/6lowpan/debugfs.c +++ b/net/6lowpan/debugfs.c @@ -172,7 +172,7 @@ static const struct file_operations lowpan_ctx_pfx_fops = { static int lowpan_dev_debugfs_ctx_init(struct net_device *dev, struct dentry *ctx, u8 id) { - struct lowpan_priv *lpriv = lowpan_priv(dev); + struct lowpan_dev *ldev = lowpan_dev(dev); struct dentry *dentry, *root; char buf[32]; @@ -185,25 +185,25 @@ static int lowpan_dev_debugfs_ctx_init(struct net_device *dev, return -EINVAL; dentry = debugfs_create_file("active", 0644, root, - &lpriv->ctx.table[id], + &ldev->ctx.table[id], &lowpan_ctx_flag_active_fops); if (!dentry) return -EINVAL; dentry = debugfs_create_file("compression", 0644, root, - &lpriv->ctx.table[id], + &ldev->ctx.table[id], &lowpan_ctx_flag_c_fops); if (!dentry) return -EINVAL; dentry = debugfs_create_file("prefix", 0644, root, - &lpriv->ctx.table[id], + &ldev->ctx.table[id], &lowpan_ctx_pfx_fops); if (!dentry) return -EINVAL; dentry = debugfs_create_file("prefix_len", 0644, root, - &lpriv->ctx.table[id], + &ldev->ctx.table[id], &lowpan_ctx_plen_fops); if (!dentry) return -EINVAL; @@ -247,21 +247,21 @@ static const struct file_operations lowpan_context_fops = { int lowpan_dev_debugfs_init(struct net_device *dev) { - struct lowpan_priv *lpriv = lowpan_priv(dev); + struct lowpan_dev *ldev = lowpan_dev(dev); struct dentry *contexts, *dentry; int ret, i; /* creating the root */ - lpriv->iface_debugfs = debugfs_create_dir(dev->name, lowpan_debugfs); - if (!lpriv->iface_debugfs) + ldev->iface_debugfs = debugfs_create_dir(dev->name, lowpan_debugfs); + if (!ldev->iface_debugfs) goto fail; - contexts = debugfs_create_dir("contexts", lpriv->iface_debugfs); + contexts = debugfs_create_dir("contexts", ldev->iface_debugfs); if (!contexts) goto remove_root; dentry = debugfs_create_file("show", 0644, contexts, - &lowpan_priv(dev)->ctx, + &lowpan_dev(dev)->ctx, &lowpan_context_fops); if (!dentry) goto remove_root; @@ -282,7 +282,7 @@ fail: void lowpan_dev_debugfs_exit(struct net_device *dev) { - debugfs_remove_recursive(lowpan_priv(dev)->iface_debugfs); + debugfs_remove_recursive(lowpan_dev(dev)->iface_debugfs); } int __init lowpan_debugfs_init(void) diff --git a/net/6lowpan/iphc.c b/net/6lowpan/iphc.c index 68c80f3c9add..5fb764e45d80 100644 --- a/net/6lowpan/iphc.c +++ b/net/6lowpan/iphc.c @@ -207,7 +207,7 @@ static inline void iphc_uncompress_802154_lladdr(struct in6_addr *ipaddr, static struct lowpan_iphc_ctx * lowpan_iphc_ctx_get_by_id(const struct net_device *dev, u8 id) { - struct lowpan_iphc_ctx *ret = &lowpan_priv(dev)->ctx.table[id]; + struct lowpan_iphc_ctx *ret = &lowpan_dev(dev)->ctx.table[id]; if (!lowpan_iphc_ctx_is_active(ret)) return NULL; @@ -219,7 +219,7 @@ static struct lowpan_iphc_ctx * lowpan_iphc_ctx_get_by_addr(const struct net_device *dev, const struct in6_addr *addr) { - struct lowpan_iphc_ctx *table = lowpan_priv(dev)->ctx.table; + struct lowpan_iphc_ctx *table = lowpan_dev(dev)->ctx.table; struct lowpan_iphc_ctx *ret = NULL; struct in6_addr addr_pfx; u8 addr_plen; @@ -263,7 +263,7 @@ static struct lowpan_iphc_ctx * lowpan_iphc_ctx_get_by_mcast_addr(const struct net_device *dev, const struct in6_addr *addr) { - struct lowpan_iphc_ctx *table = lowpan_priv(dev)->ctx.table; + struct lowpan_iphc_ctx *table = lowpan_dev(dev)->ctx.table; struct lowpan_iphc_ctx *ret = NULL; struct in6_addr addr_mcast, network_pfx = {}; int i; @@ -332,7 +332,7 @@ static int uncompress_addr(struct sk_buff *skb, const struct net_device *dev, case LOWPAN_IPHC_SAM_11: case LOWPAN_IPHC_DAM_11: fail = false; - switch (lowpan_priv(dev)->lltype) { + switch (lowpan_dev(dev)->lltype) { case LOWPAN_LLTYPE_IEEE802154: iphc_uncompress_802154_lladdr(ipaddr, lladdr); break; @@ -393,7 +393,7 @@ static int uncompress_ctx_addr(struct sk_buff *skb, case LOWPAN_IPHC_SAM_11: case LOWPAN_IPHC_DAM_11: fail = false; - switch (lowpan_priv(dev)->lltype) { + switch (lowpan_dev(dev)->lltype) { case LOWPAN_LLTYPE_IEEE802154: iphc_uncompress_802154_lladdr(ipaddr, lladdr); break; @@ -657,17 +657,17 @@ int lowpan_header_decompress(struct sk_buff *skb, const struct net_device *dev, } if (iphc1 & LOWPAN_IPHC_SAC) { - spin_lock_bh(&lowpan_priv(dev)->ctx.lock); + spin_lock_bh(&lowpan_dev(dev)->ctx.lock); ci = lowpan_iphc_ctx_get_by_id(dev, LOWPAN_IPHC_CID_SCI(cid)); if (!ci) { - spin_unlock_bh(&lowpan_priv(dev)->ctx.lock); + spin_unlock_bh(&lowpan_dev(dev)->ctx.lock); return -EINVAL; } pr_debug("SAC bit is set. Handle context based source address.\n"); err = uncompress_ctx_addr(skb, dev, ci, &hdr.saddr, iphc1 & LOWPAN_IPHC_SAM_MASK, saddr); - spin_unlock_bh(&lowpan_priv(dev)->ctx.lock); + spin_unlock_bh(&lowpan_dev(dev)->ctx.lock); } else { /* Source address uncompression */ pr_debug("source address stateless compression\n"); @@ -681,10 +681,10 @@ int lowpan_header_decompress(struct sk_buff *skb, const struct net_device *dev, switch (iphc1 & (LOWPAN_IPHC_M | LOWPAN_IPHC_DAC)) { case LOWPAN_IPHC_M | LOWPAN_IPHC_DAC: - spin_lock_bh(&lowpan_priv(dev)->ctx.lock); + spin_lock_bh(&lowpan_dev(dev)->ctx.lock); ci = lowpan_iphc_ctx_get_by_id(dev, LOWPAN_IPHC_CID_DCI(cid)); if (!ci) { - spin_unlock_bh(&lowpan_priv(dev)->ctx.lock); + spin_unlock_bh(&lowpan_dev(dev)->ctx.lock); return -EINVAL; } @@ -693,7 +693,7 @@ int lowpan_header_decompress(struct sk_buff *skb, const struct net_device *dev, err = lowpan_uncompress_multicast_ctx_daddr(skb, ci, &hdr.daddr, iphc1 & LOWPAN_IPHC_DAM_MASK); - spin_unlock_bh(&lowpan_priv(dev)->ctx.lock); + spin_unlock_bh(&lowpan_dev(dev)->ctx.lock); break; case LOWPAN_IPHC_M: /* multicast */ @@ -701,10 +701,10 @@ int lowpan_header_decompress(struct sk_buff *skb, const struct net_device *dev, iphc1 & LOWPAN_IPHC_DAM_MASK); break; case LOWPAN_IPHC_DAC: - spin_lock_bh(&lowpan_priv(dev)->ctx.lock); + spin_lock_bh(&lowpan_dev(dev)->ctx.lock); ci = lowpan_iphc_ctx_get_by_id(dev, LOWPAN_IPHC_CID_DCI(cid)); if (!ci) { - spin_unlock_bh(&lowpan_priv(dev)->ctx.lock); + spin_unlock_bh(&lowpan_dev(dev)->ctx.lock); return -EINVAL; } @@ -712,7 +712,7 @@ int lowpan_header_decompress(struct sk_buff *skb, const struct net_device *dev, pr_debug("DAC bit is set. Handle context based destination address.\n"); err = uncompress_ctx_addr(skb, dev, ci, &hdr.daddr, iphc1 & LOWPAN_IPHC_DAM_MASK, daddr); - spin_unlock_bh(&lowpan_priv(dev)->ctx.lock); + spin_unlock_bh(&lowpan_dev(dev)->ctx.lock); break; default: err = uncompress_addr(skb, dev, &hdr.daddr, @@ -736,7 +736,7 @@ int lowpan_header_decompress(struct sk_buff *skb, const struct net_device *dev, return err; } - switch (lowpan_priv(dev)->lltype) { + switch (lowpan_dev(dev)->lltype) { case LOWPAN_LLTYPE_IEEE802154: if (lowpan_802154_cb(skb)->d_size) hdr.payload_len = htons(lowpan_802154_cb(skb)->d_size - @@ -1033,7 +1033,7 @@ int lowpan_header_compress(struct sk_buff *skb, const struct net_device *dev, skb->data, skb->len); ipv6_daddr_type = ipv6_addr_type(&hdr->daddr); - spin_lock_bh(&lowpan_priv(dev)->ctx.lock); + spin_lock_bh(&lowpan_dev(dev)->ctx.lock); if (ipv6_daddr_type & IPV6_ADDR_MULTICAST) dci = lowpan_iphc_ctx_get_by_mcast_addr(dev, &hdr->daddr); else @@ -1042,15 +1042,15 @@ int lowpan_header_compress(struct sk_buff *skb, const struct net_device *dev, memcpy(&dci_entry, dci, sizeof(*dci)); cid |= dci->id; } - spin_unlock_bh(&lowpan_priv(dev)->ctx.lock); + spin_unlock_bh(&lowpan_dev(dev)->ctx.lock); - spin_lock_bh(&lowpan_priv(dev)->ctx.lock); + spin_lock_bh(&lowpan_dev(dev)->ctx.lock); sci = lowpan_iphc_ctx_get_by_addr(dev, &hdr->saddr); if (sci) { memcpy(&sci_entry, sci, sizeof(*sci)); cid |= (sci->id << 4); } - spin_unlock_bh(&lowpan_priv(dev)->ctx.lock); + spin_unlock_bh(&lowpan_dev(dev)->ctx.lock); /* if cid is zero it will be compressed */ if (cid) { diff --git a/net/6lowpan/nhc_udp.c b/net/6lowpan/nhc_udp.c index 69537a2eaab1..225d91906dfa 100644 --- a/net/6lowpan/nhc_udp.c +++ b/net/6lowpan/nhc_udp.c @@ -91,7 +91,7 @@ static int udp_uncompress(struct sk_buff *skb, size_t needed) * here, we obtain the hint from the remaining size of the * frame */ - switch (lowpan_priv(skb->dev)->lltype) { + switch (lowpan_dev(skb->dev)->lltype) { case LOWPAN_LLTYPE_IEEE802154: if (lowpan_802154_cb(skb)->d_size) uh.len = htons(lowpan_802154_cb(skb)->d_size - diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c index 8a4cc2f7f0db..38e82ddd7ccd 100644 --- a/net/bluetooth/6lowpan.c +++ b/net/bluetooth/6lowpan.c @@ -68,7 +68,7 @@ struct lowpan_peer { struct in6_addr peer_addr; }; -struct lowpan_dev { +struct lowpan_btle_dev { struct list_head list; struct hci_dev *hdev; @@ -80,18 +80,21 @@ struct lowpan_dev { struct delayed_work notify_peers; }; -static inline struct lowpan_dev *lowpan_dev(const struct net_device *netdev) +static inline struct lowpan_btle_dev * +lowpan_btle_dev(const struct net_device *netdev) { - return (struct lowpan_dev *)lowpan_priv(netdev)->priv; + return (struct lowpan_btle_dev *)lowpan_dev(netdev)->priv; } -static inline void peer_add(struct lowpan_dev *dev, struct lowpan_peer *peer) +static inline void peer_add(struct lowpan_btle_dev *dev, + struct lowpan_peer *peer) { list_add_rcu(&peer->list, &dev->peers); atomic_inc(&dev->peer_count); } -static inline bool peer_del(struct lowpan_dev *dev, struct lowpan_peer *peer) +static inline bool peer_del(struct lowpan_btle_dev *dev, + struct lowpan_peer *peer) { list_del_rcu(&peer->list); kfree_rcu(peer, rcu); @@ -106,7 +109,7 @@ static inline bool peer_del(struct lowpan_dev *dev, struct lowpan_peer *peer) return false; } -static inline struct lowpan_peer *peer_lookup_ba(struct lowpan_dev *dev, +static inline struct lowpan_peer *peer_lookup_ba(struct lowpan_btle_dev *dev, bdaddr_t *ba, __u8 type) { struct lowpan_peer *peer; @@ -134,8 +137,8 @@ static inline struct lowpan_peer *peer_lookup_ba(struct lowpan_dev *dev, return NULL; } -static inline struct lowpan_peer *__peer_lookup_chan(struct lowpan_dev *dev, - struct l2cap_chan *chan) +static inline struct lowpan_peer * +__peer_lookup_chan(struct lowpan_btle_dev *dev, struct l2cap_chan *chan) { struct lowpan_peer *peer; @@ -147,8 +150,8 @@ static inline struct lowpan_peer *__peer_lookup_chan(struct lowpan_dev *dev, return NULL; } -static inline struct lowpan_peer *__peer_lookup_conn(struct lowpan_dev *dev, - struct l2cap_conn *conn) +static inline struct lowpan_peer * +__peer_lookup_conn(struct lowpan_btle_dev *dev, struct l2cap_conn *conn) { struct lowpan_peer *peer; @@ -160,7 +163,7 @@ static inline struct lowpan_peer *__peer_lookup_conn(struct lowpan_dev *dev, return NULL; } -static inline struct lowpan_peer *peer_lookup_dst(struct lowpan_dev *dev, +static inline struct lowpan_peer *peer_lookup_dst(struct lowpan_btle_dev *dev, struct in6_addr *daddr, struct sk_buff *skb) { @@ -220,7 +223,7 @@ static inline struct lowpan_peer *peer_lookup_dst(struct lowpan_dev *dev, static struct lowpan_peer *lookup_peer(struct l2cap_conn *conn) { - struct lowpan_dev *entry; + struct lowpan_btle_dev *entry; struct lowpan_peer *peer = NULL; rcu_read_lock(); @@ -236,10 +239,10 @@ static struct lowpan_peer *lookup_peer(struct l2cap_conn *conn) return peer; } -static struct lowpan_dev *lookup_dev(struct l2cap_conn *conn) +static struct lowpan_btle_dev *lookup_dev(struct l2cap_conn *conn) { - struct lowpan_dev *entry; - struct lowpan_dev *dev = NULL; + struct lowpan_btle_dev *entry; + struct lowpan_btle_dev *dev = NULL; rcu_read_lock(); @@ -270,10 +273,10 @@ static int iphc_decompress(struct sk_buff *skb, struct net_device *netdev, struct l2cap_chan *chan) { const u8 *saddr, *daddr; - struct lowpan_dev *dev; + struct lowpan_btle_dev *dev; struct lowpan_peer *peer; - dev = lowpan_dev(netdev); + dev = lowpan_btle_dev(netdev); rcu_read_lock(); peer = __peer_lookup_chan(dev, chan); @@ -375,7 +378,7 @@ drop: /* Packet from BT LE device */ static int chan_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb) { - struct lowpan_dev *dev; + struct lowpan_btle_dev *dev; struct lowpan_peer *peer; int err; @@ -431,13 +434,13 @@ static int setup_header(struct sk_buff *skb, struct net_device *netdev, bdaddr_t *peer_addr, u8 *peer_addr_type) { struct in6_addr ipv6_daddr; - struct lowpan_dev *dev; + struct lowpan_btle_dev *dev; struct lowpan_peer *peer; bdaddr_t addr, *any = BDADDR_ANY; u8 *daddr = any->b; int err, status = 0; - dev = lowpan_dev(netdev); + dev = lowpan_btle_dev(netdev); memcpy(&ipv6_daddr, &lowpan_cb(skb)->addr, sizeof(ipv6_daddr)); @@ -543,19 +546,19 @@ static int send_pkt(struct l2cap_chan *chan, struct sk_buff *skb, static int send_mcast_pkt(struct sk_buff *skb, struct net_device *netdev) { struct sk_buff *local_skb; - struct lowpan_dev *entry; + struct lowpan_btle_dev *entry; int err = 0; rcu_read_lock(); list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) { struct lowpan_peer *pentry; - struct lowpan_dev *dev; + struct lowpan_btle_dev *dev; if (entry->netdev != netdev) continue; - dev = lowpan_dev(entry->netdev); + dev = lowpan_btle_dev(entry->netdev); list_for_each_entry_rcu(pentry, &dev->peers, list) { int ret; @@ -723,8 +726,8 @@ static void ifdown(struct net_device *netdev) static void do_notify_peers(struct work_struct *work) { - struct lowpan_dev *dev = container_of(work, struct lowpan_dev, - notify_peers.work); + struct lowpan_btle_dev *dev = container_of(work, struct lowpan_btle_dev, + notify_peers.work); netdev_notify_peers(dev->netdev); /* send neighbour adv at startup */ } @@ -766,7 +769,7 @@ static void set_ip_addr_bits(u8 addr_type, u8 *addr) } static struct l2cap_chan *add_peer_chan(struct l2cap_chan *chan, - struct lowpan_dev *dev) + struct lowpan_btle_dev *dev) { struct lowpan_peer *peer; @@ -803,12 +806,12 @@ static struct l2cap_chan *add_peer_chan(struct l2cap_chan *chan, return peer->chan; } -static int setup_netdev(struct l2cap_chan *chan, struct lowpan_dev **dev) +static int setup_netdev(struct l2cap_chan *chan, struct lowpan_btle_dev **dev) { struct net_device *netdev; int err = 0; - netdev = alloc_netdev(LOWPAN_PRIV_SIZE(sizeof(struct lowpan_dev)), + netdev = alloc_netdev(LOWPAN_PRIV_SIZE(sizeof(struct lowpan_btle_dev)), IFACE_NAME_TEMPLATE, NET_NAME_UNKNOWN, netdev_setup); if (!netdev) @@ -820,7 +823,7 @@ static int setup_netdev(struct l2cap_chan *chan, struct lowpan_dev **dev) SET_NETDEV_DEV(netdev, &chan->conn->hcon->hdev->dev); SET_NETDEV_DEVTYPE(netdev, &bt_type); - *dev = lowpan_dev(netdev); + *dev = lowpan_btle_dev(netdev); (*dev)->netdev = netdev; (*dev)->hdev = chan->conn->hcon->hdev; INIT_LIST_HEAD(&(*dev)->peers); @@ -853,7 +856,7 @@ out: static inline void chan_ready_cb(struct l2cap_chan *chan) { - struct lowpan_dev *dev; + struct lowpan_btle_dev *dev; dev = lookup_dev(chan->conn); @@ -890,8 +893,9 @@ static inline struct l2cap_chan *chan_new_conn_cb(struct l2cap_chan *pchan) static void delete_netdev(struct work_struct *work) { - struct lowpan_dev *entry = container_of(work, struct lowpan_dev, - delete_netdev); + struct lowpan_btle_dev *entry = container_of(work, + struct lowpan_btle_dev, + delete_netdev); lowpan_unregister_netdev(entry->netdev); @@ -900,8 +904,8 @@ static void delete_netdev(struct work_struct *work) static void chan_close_cb(struct l2cap_chan *chan) { - struct lowpan_dev *entry; - struct lowpan_dev *dev = NULL; + struct lowpan_btle_dev *entry; + struct lowpan_btle_dev *dev = NULL; struct lowpan_peer *peer; int err = -ENOENT; bool last = false, remove = true; @@ -921,7 +925,7 @@ static void chan_close_cb(struct l2cap_chan *chan) spin_lock(&devices_lock); list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) { - dev = lowpan_dev(entry->netdev); + dev = lowpan_btle_dev(entry->netdev); peer = __peer_lookup_chan(dev, chan); if (peer) { last = peer_del(dev, peer); @@ -1131,7 +1135,7 @@ static int get_l2cap_conn(char *buf, bdaddr_t *addr, u8 *addr_type, static void disconnect_all_peers(void) { - struct lowpan_dev *entry; + struct lowpan_btle_dev *entry; struct lowpan_peer *peer, *tmp_peer, *new_peer; struct list_head peers; @@ -1291,7 +1295,7 @@ static ssize_t lowpan_control_write(struct file *fp, static int lowpan_control_show(struct seq_file *f, void *ptr) { - struct lowpan_dev *entry; + struct lowpan_btle_dev *entry; struct lowpan_peer *peer; spin_lock(&devices_lock); @@ -1322,7 +1326,7 @@ static const struct file_operations lowpan_control_fops = { static void disconnect_devices(void) { - struct lowpan_dev *entry, *tmp, *new_dev; + struct lowpan_btle_dev *entry, *tmp, *new_dev; struct list_head devices; INIT_LIST_HEAD(&devices); @@ -1360,7 +1364,7 @@ static int device_event(struct notifier_block *unused, unsigned long event, void *ptr) { struct net_device *netdev = netdev_notifier_info_to_dev(ptr); - struct lowpan_dev *entry; + struct lowpan_btle_dev *entry; if (netdev->type != ARPHRD_6LOWPAN) return NOTIFY_DONE; diff --git a/net/ieee802154/6lowpan/6lowpan_i.h b/net/ieee802154/6lowpan/6lowpan_i.h index b4092a9ff24a..b02b74de8ffa 100644 --- a/net/ieee802154/6lowpan/6lowpan_i.h +++ b/net/ieee802154/6lowpan/6lowpan_i.h @@ -48,15 +48,15 @@ static inline u32 ieee802154_addr_hash(const struct ieee802154_addr *a) } /* private device info */ -struct lowpan_dev_info { +struct lowpan_802154_dev { struct net_device *wdev; /* wpan device ptr */ u16 fragment_tag; }; static inline struct -lowpan_dev_info *lowpan_dev_info(const struct net_device *dev) +lowpan_802154_dev *lowpan_802154_dev(const struct net_device *dev) { - return (struct lowpan_dev_info *)lowpan_priv(dev)->priv; + return (struct lowpan_802154_dev *)lowpan_dev(dev)->priv; } int lowpan_frag_rcv(struct sk_buff *skb, const u8 frag_type); diff --git a/net/ieee802154/6lowpan/core.c b/net/ieee802154/6lowpan/core.c index 0023c9048812..dd085db8580e 100644 --- a/net/ieee802154/6lowpan/core.c +++ b/net/ieee802154/6lowpan/core.c @@ -148,7 +148,7 @@ static int lowpan_newlink(struct net *src_net, struct net_device *ldev, return -EBUSY; } - lowpan_dev_info(ldev)->wdev = wdev; + lowpan_802154_dev(ldev)->wdev = wdev; /* Set the lowpan hardware address to the wpan hardware address. */ memcpy(ldev->dev_addr, wdev->dev_addr, IEEE802154_ADDR_LEN); /* We need headroom for possible wpan_dev_hard_header call and tailroom @@ -173,7 +173,7 @@ static int lowpan_newlink(struct net *src_net, struct net_device *ldev, static void lowpan_dellink(struct net_device *ldev, struct list_head *head) { - struct net_device *wdev = lowpan_dev_info(ldev)->wdev; + struct net_device *wdev = lowpan_802154_dev(ldev)->wdev; ASSERT_RTNL(); @@ -184,7 +184,7 @@ static void lowpan_dellink(struct net_device *ldev, struct list_head *head) static struct rtnl_link_ops lowpan_link_ops __read_mostly = { .kind = "lowpan", - .priv_size = LOWPAN_PRIV_SIZE(sizeof(struct lowpan_dev_info)), + .priv_size = LOWPAN_PRIV_SIZE(sizeof(struct lowpan_802154_dev)), .setup = lowpan_setup, .newlink = lowpan_newlink, .dellink = lowpan_dellink, diff --git a/net/ieee802154/6lowpan/tx.c b/net/ieee802154/6lowpan/tx.c index d4353faced35..e459afd16bb3 100644 --- a/net/ieee802154/6lowpan/tx.c +++ b/net/ieee802154/6lowpan/tx.c @@ -84,7 +84,7 @@ static struct sk_buff* lowpan_alloc_frag(struct sk_buff *skb, int size, const struct ieee802154_hdr *master_hdr, bool frag1) { - struct net_device *wdev = lowpan_dev_info(skb->dev)->wdev; + struct net_device *wdev = lowpan_802154_dev(skb->dev)->wdev; struct sk_buff *frag; int rc; @@ -148,8 +148,8 @@ lowpan_xmit_fragmented(struct sk_buff *skb, struct net_device *ldev, int frag_cap, frag_len, payload_cap, rc; int skb_unprocessed, skb_offset; - frag_tag = htons(lowpan_dev_info(ldev)->fragment_tag); - lowpan_dev_info(ldev)->fragment_tag++; + frag_tag = htons(lowpan_802154_dev(ldev)->fragment_tag); + lowpan_802154_dev(ldev)->fragment_tag++; frag_hdr[0] = LOWPAN_DISPATCH_FRAG1 | ((dgram_size >> 8) & 0x07); frag_hdr[1] = dgram_size & 0xff; @@ -208,7 +208,7 @@ err: static int lowpan_header(struct sk_buff *skb, struct net_device *ldev, u16 *dgram_size, u16 *dgram_offset) { - struct wpan_dev *wpan_dev = lowpan_dev_info(ldev)->wdev->ieee802154_ptr; + struct wpan_dev *wpan_dev = lowpan_802154_dev(ldev)->wdev->ieee802154_ptr; struct ieee802154_addr sa, da; struct ieee802154_mac_cb *cb = mac_cb_init(skb); struct lowpan_addr_info info; @@ -248,8 +248,8 @@ static int lowpan_header(struct sk_buff *skb, struct net_device *ldev, cb->ackreq = wpan_dev->ackreq; } - return wpan_dev_hard_header(skb, lowpan_dev_info(ldev)->wdev, &da, &sa, - 0); + return wpan_dev_hard_header(skb, lowpan_802154_dev(ldev)->wdev, &da, + &sa, 0); } netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *ldev) @@ -283,7 +283,7 @@ netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *ldev) max_single = ieee802154_max_payload(&wpan_hdr); if (skb_tail_pointer(skb) - skb_network_header(skb) <= max_single) { - skb->dev = lowpan_dev_info(ldev)->wdev; + skb->dev = lowpan_802154_dev(ldev)->wdev; ldev->stats.tx_packets++; ldev->stats.tx_bytes += dgram_size; return dev_queue_xmit(skb); From 353c224e28eb73e65720e5b2be224052569c0764 Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Mon, 11 Apr 2016 11:04:19 +0200 Subject: [PATCH 0565/1649] 6lowpan: move lowpan_802154_dev to 6lowpan This patch moves the 802.15.4 link layer specific structures to generic 6lowpan. This is necessary for special 802.15.4 6lowpan handling in 6lowpan generic layer. Reviewed-by: Stefan Schmidt Signed-off-by: Alexander Aring Acked-by: Jukka Rissanen Signed-off-by: Marcel Holtmann --- include/net/6lowpan.h | 12 ++++++++++++ net/ieee802154/6lowpan/6lowpan_i.h | 12 ------------ 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/include/net/6lowpan.h b/include/net/6lowpan.h index f204664f37ab..a0c01f55e0d3 100644 --- a/include/net/6lowpan.h +++ b/include/net/6lowpan.h @@ -144,6 +144,18 @@ struct lowpan_dev *lowpan_dev(const struct net_device *dev) return netdev_priv(dev); } +/* private device info */ +struct lowpan_802154_dev { + struct net_device *wdev; /* wpan device ptr */ + u16 fragment_tag; +}; + +static inline struct +lowpan_802154_dev *lowpan_802154_dev(const struct net_device *dev) +{ + return (struct lowpan_802154_dev *)lowpan_dev(dev)->priv; +} + struct lowpan_802154_cb { u16 d_tag; unsigned int d_size; diff --git a/net/ieee802154/6lowpan/6lowpan_i.h b/net/ieee802154/6lowpan/6lowpan_i.h index b02b74de8ffa..5ac778962e4e 100644 --- a/net/ieee802154/6lowpan/6lowpan_i.h +++ b/net/ieee802154/6lowpan/6lowpan_i.h @@ -47,18 +47,6 @@ static inline u32 ieee802154_addr_hash(const struct ieee802154_addr *a) } } -/* private device info */ -struct lowpan_802154_dev { - struct net_device *wdev; /* wpan device ptr */ - u16 fragment_tag; -}; - -static inline struct -lowpan_802154_dev *lowpan_802154_dev(const struct net_device *dev) -{ - return (struct lowpan_802154_dev *)lowpan_dev(dev)->priv; -} - int lowpan_frag_rcv(struct sk_buff *skb, const u8 frag_type); void lowpan_net_frag_exit(void); int lowpan_net_frag_init(void); From 7115a968b75e9f81f6f8f45b2f97b1b43e024703 Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Mon, 11 Apr 2016 11:04:20 +0200 Subject: [PATCH 0566/1649] 6lowpan: iphc: rename add lowpan prefix This patch adds a lowpan prefix to each functions which doesn't have such prefix currently. Reviewed-by: Stefan Schmidt Signed-off-by: Alexander Aring Acked-by: Jukka Rissanen Signed-off-by: Marcel Holtmann --- net/6lowpan/iphc.c | 56 +++++++++++++++++++++++++--------------------- 1 file changed, 31 insertions(+), 25 deletions(-) diff --git a/net/6lowpan/iphc.c b/net/6lowpan/iphc.c index 5fb764e45d80..66b41395ab80 100644 --- a/net/6lowpan/iphc.c +++ b/net/6lowpan/iphc.c @@ -156,8 +156,8 @@ #define LOWPAN_IPHC_CID_DCI(cid) (cid & 0x0f) #define LOWPAN_IPHC_CID_SCI(cid) ((cid & 0xf0) >> 4) -static inline void iphc_uncompress_eui64_lladdr(struct in6_addr *ipaddr, - const void *lladdr) +static inline void lowpan_iphc_uncompress_eui64_lladdr(struct in6_addr *ipaddr, + const void *lladdr) { /* fe:80::XXXX:XXXX:XXXX:XXXX * \_________________/ @@ -172,8 +172,9 @@ static inline void iphc_uncompress_eui64_lladdr(struct in6_addr *ipaddr, ipaddr->s6_addr[8] ^= 0x02; } -static inline void iphc_uncompress_802154_lladdr(struct in6_addr *ipaddr, - const void *lladdr) +static inline void +lowpan_iphc_uncompress_802154_lladdr(struct in6_addr *ipaddr, + const void *lladdr) { const struct ieee802154_addr *addr = lladdr; u8 eui64[EUI64_ADDR_LEN] = { }; @@ -181,7 +182,7 @@ static inline void iphc_uncompress_802154_lladdr(struct in6_addr *ipaddr, switch (addr->mode) { case IEEE802154_ADDR_LONG: ieee802154_le64_to_be64(eui64, &addr->extended_addr); - iphc_uncompress_eui64_lladdr(ipaddr, eui64); + lowpan_iphc_uncompress_eui64_lladdr(ipaddr, eui64); break; case IEEE802154_ADDR_SHORT: /* fe:80::ff:fe00:XXXX @@ -301,9 +302,10 @@ lowpan_iphc_ctx_get_by_mcast_addr(const struct net_device *dev, * * address_mode is the masked value for sam or dam value */ -static int uncompress_addr(struct sk_buff *skb, const struct net_device *dev, - struct in6_addr *ipaddr, u8 address_mode, - const void *lladdr) +static int lowpan_iphc_uncompress_addr(struct sk_buff *skb, + const struct net_device *dev, + struct in6_addr *ipaddr, + u8 address_mode, const void *lladdr) { bool fail; @@ -334,10 +336,10 @@ static int uncompress_addr(struct sk_buff *skb, const struct net_device *dev, fail = false; switch (lowpan_dev(dev)->lltype) { case LOWPAN_LLTYPE_IEEE802154: - iphc_uncompress_802154_lladdr(ipaddr, lladdr); + lowpan_iphc_uncompress_802154_lladdr(ipaddr, lladdr); break; default: - iphc_uncompress_eui64_lladdr(ipaddr, lladdr); + lowpan_iphc_uncompress_eui64_lladdr(ipaddr, lladdr); break; } break; @@ -360,11 +362,11 @@ static int uncompress_addr(struct sk_buff *skb, const struct net_device *dev, /* Uncompress address function for source context * based address(non-multicast). */ -static int uncompress_ctx_addr(struct sk_buff *skb, - const struct net_device *dev, - const struct lowpan_iphc_ctx *ctx, - struct in6_addr *ipaddr, u8 address_mode, - const void *lladdr) +static int lowpan_iphc_uncompress_ctx_addr(struct sk_buff *skb, + const struct net_device *dev, + const struct lowpan_iphc_ctx *ctx, + struct in6_addr *ipaddr, + u8 address_mode, const void *lladdr) { bool fail; @@ -395,10 +397,10 @@ static int uncompress_ctx_addr(struct sk_buff *skb, fail = false; switch (lowpan_dev(dev)->lltype) { case LOWPAN_LLTYPE_IEEE802154: - iphc_uncompress_802154_lladdr(ipaddr, lladdr); + lowpan_iphc_uncompress_802154_lladdr(ipaddr, lladdr); break; default: - iphc_uncompress_eui64_lladdr(ipaddr, lladdr); + lowpan_iphc_uncompress_eui64_lladdr(ipaddr, lladdr); break; } ipv6_addr_prefix_copy(ipaddr, &ctx->pfx, ctx->plen); @@ -665,14 +667,16 @@ int lowpan_header_decompress(struct sk_buff *skb, const struct net_device *dev, } pr_debug("SAC bit is set. Handle context based source address.\n"); - err = uncompress_ctx_addr(skb, dev, ci, &hdr.saddr, - iphc1 & LOWPAN_IPHC_SAM_MASK, saddr); + err = lowpan_iphc_uncompress_ctx_addr(skb, dev, ci, &hdr.saddr, + iphc1 & LOWPAN_IPHC_SAM_MASK, + saddr); spin_unlock_bh(&lowpan_dev(dev)->ctx.lock); } else { /* Source address uncompression */ pr_debug("source address stateless compression\n"); - err = uncompress_addr(skb, dev, &hdr.saddr, - iphc1 & LOWPAN_IPHC_SAM_MASK, saddr); + err = lowpan_iphc_uncompress_addr(skb, dev, &hdr.saddr, + iphc1 & LOWPAN_IPHC_SAM_MASK, + saddr); } /* Check on error of previous branch */ @@ -710,13 +714,15 @@ int lowpan_header_decompress(struct sk_buff *skb, const struct net_device *dev, /* Destination address context based uncompression */ pr_debug("DAC bit is set. Handle context based destination address.\n"); - err = uncompress_ctx_addr(skb, dev, ci, &hdr.daddr, - iphc1 & LOWPAN_IPHC_DAM_MASK, daddr); + err = lowpan_iphc_uncompress_ctx_addr(skb, dev, ci, &hdr.daddr, + iphc1 & LOWPAN_IPHC_DAM_MASK, + daddr); spin_unlock_bh(&lowpan_dev(dev)->ctx.lock); break; default: - err = uncompress_addr(skb, dev, &hdr.daddr, - iphc1 & LOWPAN_IPHC_DAM_MASK, daddr); + err = lowpan_iphc_uncompress_addr(skb, dev, &hdr.daddr, + iphc1 & LOWPAN_IPHC_DAM_MASK, + daddr); pr_debug("dest: stateless compression mode %d dest %pI6c\n", iphc1 & LOWPAN_IPHC_DAM_MASK, &hdr.daddr); break; From 2bc068c3d6e1212d09c11169c699560747ef8c2b Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Mon, 11 Apr 2016 11:04:21 +0200 Subject: [PATCH 0567/1649] 6lowpan: iphc: remove unnecessary zero data This patch removes unnecessary zero data for a stack variable. Signed-off-by: Alexander Aring Acked-by: Jukka Rissanen Signed-off-by: Marcel Holtmann --- net/6lowpan/iphc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/6lowpan/iphc.c b/net/6lowpan/iphc.c index 66b41395ab80..29992405c816 100644 --- a/net/6lowpan/iphc.c +++ b/net/6lowpan/iphc.c @@ -177,7 +177,7 @@ lowpan_iphc_uncompress_802154_lladdr(struct in6_addr *ipaddr, const void *lladdr) { const struct ieee802154_addr *addr = lladdr; - u8 eui64[EUI64_ADDR_LEN] = { }; + u8 eui64[EUI64_ADDR_LEN]; switch (addr->mode) { case IEEE802154_ADDR_LONG: From a5862f2aba4ba53d461450685a67ae252935ab94 Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Mon, 11 Apr 2016 11:04:22 +0200 Subject: [PATCH 0568/1649] 6lowpan: move eui64 uncompress function This function will be use in later functionality in other branches than generic 6lowpan, so we move it to the global 6lowpan header. Signed-off-by: Alexander Aring Reviewed-by: Stefan Schmidt Acked-by: Jukka Rissanen Signed-off-by: Marcel Holtmann --- include/net/6lowpan.h | 16 ++++++++++++++++ net/6lowpan/iphc.c | 16 ---------------- 2 files changed, 16 insertions(+), 16 deletions(-) diff --git a/include/net/6lowpan.h b/include/net/6lowpan.h index a0c01f55e0d3..04b877c5baff 100644 --- a/include/net/6lowpan.h +++ b/include/net/6lowpan.h @@ -169,6 +169,22 @@ struct lowpan_802154_cb *lowpan_802154_cb(const struct sk_buff *skb) return (struct lowpan_802154_cb *)skb->cb; } +static inline void lowpan_iphc_uncompress_eui64_lladdr(struct in6_addr *ipaddr, + const void *lladdr) +{ + /* fe:80::XXXX:XXXX:XXXX:XXXX + * \_________________/ + * hwaddr + */ + ipaddr->s6_addr[0] = 0xFE; + ipaddr->s6_addr[1] = 0x80; + memcpy(&ipaddr->s6_addr[8], lladdr, EUI64_ADDR_LEN); + /* second bit-flip (Universe/Local) + * is done according RFC2464 + */ + ipaddr->s6_addr[8] ^= 0x02; +} + #ifdef DEBUG /* print data in line */ static inline void raw_dump_inline(const char *caller, char *msg, diff --git a/net/6lowpan/iphc.c b/net/6lowpan/iphc.c index 29992405c816..dff15911bd04 100644 --- a/net/6lowpan/iphc.c +++ b/net/6lowpan/iphc.c @@ -156,22 +156,6 @@ #define LOWPAN_IPHC_CID_DCI(cid) (cid & 0x0f) #define LOWPAN_IPHC_CID_SCI(cid) ((cid & 0xf0) >> 4) -static inline void lowpan_iphc_uncompress_eui64_lladdr(struct in6_addr *ipaddr, - const void *lladdr) -{ - /* fe:80::XXXX:XXXX:XXXX:XXXX - * \_________________/ - * hwaddr - */ - ipaddr->s6_addr[0] = 0xFE; - ipaddr->s6_addr[1] = 0x80; - memcpy(&ipaddr->s6_addr[8], lladdr, EUI64_ADDR_LEN); - /* second bit-flip (Universe/Local) - * is done according RFC2464 - */ - ipaddr->s6_addr[8] ^= 0x02; -} - static inline void lowpan_iphc_uncompress_802154_lladdr(struct in6_addr *ipaddr, const void *lladdr) From 2732363181766533af65e9ced3dc04a30502c5d1 Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Mon, 11 Apr 2016 11:04:23 +0200 Subject: [PATCH 0569/1649] 6lowpan: add lowpan_is_ll function This patch adds the lowpan_is_ll function, which can be used to make a special 6lowpan linklayer handling for a specific 6lowpan linklayer type. Signed-off-by: Alexander Aring Reviewed-by: Stefan Schmidt Acked-by: Jukka Rissanen Signed-off-by: Marcel Holtmann --- net/6lowpan/6lowpan_i.h | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/net/6lowpan/6lowpan_i.h b/net/6lowpan/6lowpan_i.h index d16bb4b14aa1..97ecc27aeca6 100644 --- a/net/6lowpan/6lowpan_i.h +++ b/net/6lowpan/6lowpan_i.h @@ -3,6 +3,15 @@ #include +#include + +/* caller need to be sure it's dev->type is ARPHRD_6LOWPAN */ +static inline bool lowpan_is_ll(const struct net_device *dev, + enum lowpan_lltypes lltype) +{ + return lowpan_dev(dev)->lltype == lltype; +} + #ifdef CONFIG_6LOWPAN_DEBUGFS int lowpan_dev_debugfs_init(struct net_device *dev); void lowpan_dev_debugfs_exit(struct net_device *dev); From edc73417d8f33a1dd329295275168923298d9a7b Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Mon, 11 Apr 2016 11:04:24 +0200 Subject: [PATCH 0570/1649] 6lowpan: move mac802154 header In case of link-layer specific handling for 802.15.4 we need to cast to 802.15.4 sepcific structures. Simple add this header when include the 6lowpan header. Signed-off-by: Alexander Aring Reviewed-by: Stefan Schmidt Acked-by: Jukka Rissanen Signed-off-by: Marcel Holtmann --- include/net/6lowpan.h | 3 +++ net/6lowpan/iphc.c | 3 --- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/include/net/6lowpan.h b/include/net/6lowpan.h index 04b877c5baff..da84cf920b78 100644 --- a/include/net/6lowpan.h +++ b/include/net/6lowpan.h @@ -58,6 +58,9 @@ #include #include +/* special link-layer handling */ +#include + #define EUI64_ADDR_LEN 8 #define LOWPAN_NHC_MAX_ID_LEN 1 diff --git a/net/6lowpan/iphc.c b/net/6lowpan/iphc.c index dff15911bd04..8501dd532fe1 100644 --- a/net/6lowpan/iphc.c +++ b/net/6lowpan/iphc.c @@ -53,9 +53,6 @@ #include #include -/* special link-layer handling */ -#include - #include "6lowpan_i.h" #include "nhc.h" From 7d45a04cbc2683f9552572850f1c711d9b96dd26 Mon Sep 17 00:00:00 2001 From: Jon Paul Maloy Date: Wed, 13 Apr 2016 11:45:47 -0400 Subject: [PATCH 0571/1649] tipc: remove remnants of old broadcast code We remove a couple of leftover fields in struct tipc_bearer. Those were used by the old broadcast implementation, and are not needed any longer. There is no functional changes in this commit. Acked-by: Ying Xue Signed-off-by: Jon Maloy Signed-off-by: David S. Miller --- net/tipc/bearer.h | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h index e31820516774..f686e41b5abb 100644 --- a/net/tipc/bearer.h +++ b/net/tipc/bearer.h @@ -42,8 +42,6 @@ #include #define MAX_MEDIA 3 -#define MAX_NODES 4096 -#define WSIZE 32 /* Identifiers associated with TIPC message header media address info * - address info field is 32 bytes long @@ -61,16 +59,6 @@ #define TIPC_MEDIA_TYPE_IB 2 #define TIPC_MEDIA_TYPE_UDP 3 -/** - * struct tipc_node_map - set of node identifiers - * @count: # of nodes in set - * @map: bitmap of node identifiers that are in the set - */ -struct tipc_node_map { - u32 count; - u32 map[MAX_NODES / WSIZE]; -}; - /** * struct tipc_media_addr - destination address used by TIPC bearers * @value: address info (format defined by media) @@ -142,7 +130,6 @@ struct tipc_media { * @identity: array index of this bearer within TIPC bearer array * @link_req: ptr to (optional) structure making periodic link setup requests * @net_plane: network plane ('A' through 'H') currently associated with bearer - * @nodes: indicates which nodes in cluster can be reached through bearer * * Note: media-specific code is responsible for initialization of the fields * indicated below when a bearer is enabled; TIPC's generic bearer code takes @@ -163,8 +150,6 @@ struct tipc_bearer { u32 identity; struct tipc_link_req *link_req; char net_plane; - int node_cnt; - struct tipc_node_map nodes; }; struct tipc_bearer_names { From bbb8d793994c894eef2a48a35fac6de3c6b4fa93 Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Wed, 13 Apr 2016 02:40:39 +0200 Subject: [PATCH 0572/1649] net: dsa: Pass the dsa device to the switch drivers By passing a device structure to the switch devices, it allows them to use devm_* methods for resource management. Signed-off-by: Andrew Lunn Acked-by: Florian Fainelli Tested-by: Vivien Didelot Signed-off-by: David S. Miller --- drivers/net/dsa/bcm_sf2.c | 3 ++- drivers/net/dsa/mv88e6060.c | 3 ++- drivers/net/dsa/mv88e6123.c | 3 ++- drivers/net/dsa/mv88e6131.c | 3 ++- drivers/net/dsa/mv88e6171.c | 3 ++- drivers/net/dsa/mv88e6352.c | 3 ++- include/net/dsa.h | 3 ++- net/dsa/dsa.c | 7 ++++--- 8 files changed, 18 insertions(+), 10 deletions(-) diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index 780f22876538..18a79579141f 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -135,7 +135,8 @@ static int bcm_sf2_sw_get_sset_count(struct dsa_switch *ds) return BCM_SF2_STATS_SIZE; } -static char *bcm_sf2_sw_probe(struct device *host_dev, int sw_addr) +static char *bcm_sf2_sw_probe(struct device *dsa_dev, struct device *host_dev, + int sw_addr) { return "Broadcom Starfighter 2"; } diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c index 0527f485c3dc..34d0f9fe19db 100644 --- a/drivers/net/dsa/mv88e6060.c +++ b/drivers/net/dsa/mv88e6060.c @@ -57,7 +57,8 @@ static int reg_write(struct dsa_switch *ds, int addr, int reg, u16 val) return __ret; \ }) -static char *mv88e6060_probe(struct device *host_dev, int sw_addr) +static char *mv88e6060_probe(struct device *dsa_dev, struct device *host_dev, + int sw_addr) { struct mii_bus *bus = dsa_host_dev_to_mii_bus(host_dev); int ret; diff --git a/drivers/net/dsa/mv88e6123.c b/drivers/net/dsa/mv88e6123.c index 69a6f79dcb10..ede4c6f0b31a 100644 --- a/drivers/net/dsa/mv88e6123.c +++ b/drivers/net/dsa/mv88e6123.c @@ -29,7 +29,8 @@ static const struct mv88e6xxx_switch_id mv88e6123_table[] = { { PORT_SWITCH_ID_6165_A2, "Marvell 88e6165 (A2)" }, }; -static char *mv88e6123_probe(struct device *host_dev, int sw_addr) +static char *mv88e6123_probe(struct device *dsa_dev, struct device *host_dev, + int sw_addr) { return mv88e6xxx_lookup_name(host_dev, sw_addr, mv88e6123_table, ARRAY_SIZE(mv88e6123_table)); diff --git a/drivers/net/dsa/mv88e6131.c b/drivers/net/dsa/mv88e6131.c index 24070287c2bc..bfadfd2cbb8d 100644 --- a/drivers/net/dsa/mv88e6131.c +++ b/drivers/net/dsa/mv88e6131.c @@ -25,7 +25,8 @@ static const struct mv88e6xxx_switch_id mv88e6131_table[] = { { PORT_SWITCH_ID_6185, "Marvell 88E6185" }, }; -static char *mv88e6131_probe(struct device *host_dev, int sw_addr) +static char *mv88e6131_probe(struct device *dsa_dev, struct device *host_dev, + int sw_addr) { return mv88e6xxx_lookup_name(host_dev, sw_addr, mv88e6131_table, ARRAY_SIZE(mv88e6131_table)); diff --git a/drivers/net/dsa/mv88e6171.c b/drivers/net/dsa/mv88e6171.c index 0e62f3b5bc81..fb35d3ac1644 100644 --- a/drivers/net/dsa/mv88e6171.c +++ b/drivers/net/dsa/mv88e6171.c @@ -24,7 +24,8 @@ static const struct mv88e6xxx_switch_id mv88e6171_table[] = { { PORT_SWITCH_ID_6351, "Marvell 88E6351" }, }; -static char *mv88e6171_probe(struct device *host_dev, int sw_addr) +static char *mv88e6171_probe(struct device *dsa_dev, struct device *host_dev, + int sw_addr) { return mv88e6xxx_lookup_name(host_dev, sw_addr, mv88e6171_table, ARRAY_SIZE(mv88e6171_table)); diff --git a/drivers/net/dsa/mv88e6352.c b/drivers/net/dsa/mv88e6352.c index 7f452e4a04a5..577ab2cfa944 100644 --- a/drivers/net/dsa/mv88e6352.c +++ b/drivers/net/dsa/mv88e6352.c @@ -37,7 +37,8 @@ static const struct mv88e6xxx_switch_id mv88e6352_table[] = { { PORT_SWITCH_ID_6352_A1, "Marvell 88E6352 (A1)" }, }; -static char *mv88e6352_probe(struct device *host_dev, int sw_addr) +static char *mv88e6352_probe(struct device *dsa_dev, struct device *host_dev, + int sw_addr) { return mv88e6xxx_lookup_name(host_dev, sw_addr, mv88e6352_table, ARRAY_SIZE(mv88e6352_table)); diff --git a/include/net/dsa.h b/include/net/dsa.h index 18d1be3ad62d..0f9f6f38f255 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -212,7 +212,8 @@ struct dsa_switch_driver { /* * Probing and setup. */ - char *(*probe)(struct device *host_dev, int sw_addr); + char *(*probe)(struct device *dsa_dev, struct device *host_dev, + int sw_addr); int (*setup)(struct dsa_switch *ds); int (*set_addr)(struct dsa_switch *ds, u8 *addr); u32 (*get_phy_flags)(struct dsa_switch *ds, int port); diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c index c28c47463b7e..c06275311cb2 100644 --- a/net/dsa/dsa.c +++ b/net/dsa/dsa.c @@ -51,7 +51,8 @@ void unregister_switch_driver(struct dsa_switch_driver *drv) EXPORT_SYMBOL_GPL(unregister_switch_driver); static struct dsa_switch_driver * -dsa_switch_probe(struct device *host_dev, int sw_addr, char **_name) +dsa_switch_probe(struct device *parent, struct device *host_dev, int sw_addr, + char **_name) { struct dsa_switch_driver *ret; struct list_head *list; @@ -66,7 +67,7 @@ dsa_switch_probe(struct device *host_dev, int sw_addr, char **_name) drv = list_entry(list, struct dsa_switch_driver, list); - name = drv->probe(host_dev, sw_addr); + name = drv->probe(parent, host_dev, sw_addr); if (name != NULL) { ret = drv; break; @@ -387,7 +388,7 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index, /* * Probe for switch model. */ - drv = dsa_switch_probe(host_dev, pd->sw_addr, &name); + drv = dsa_switch_probe(parent, host_dev, pd->sw_addr, &name); if (drv == NULL) { netdev_err(dst->master_netdev, "[%d]: could not detect attached switch\n", index); From 7543a6d5359e371ce9434955dbe6a79f548ea321 Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Wed, 13 Apr 2016 02:40:40 +0200 Subject: [PATCH 0573/1649] net: dsa: Have the switch driver allocate there own private memory Now the switch devices have a dev pointer, make use of it for allocating the drivers private data structures using a devm_kzalloc(). Signed-off-by: Andrew Lunn Acked-by: Florian Fainelli Tested-by: Vivien Didelot Signed-off-by: David S. Miller --- drivers/net/dsa/bcm_sf2.c | 10 ++++++++-- drivers/net/dsa/mv88e6060.c | 3 ++- drivers/net/dsa/mv88e6123.c | 17 ++++++++++++++--- drivers/net/dsa/mv88e6131.c | 17 ++++++++++++++--- drivers/net/dsa/mv88e6171.c | 17 ++++++++++++++--- drivers/net/dsa/mv88e6352.c | 17 ++++++++++++++--- drivers/net/dsa/mv88e6xxx.c | 6 ++++-- drivers/net/dsa/mv88e6xxx.h | 3 +++ include/net/dsa.h | 10 ++++++++-- net/dsa/dsa.c | 8 +++++--- 10 files changed, 86 insertions(+), 22 deletions(-) diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index 18a79579141f..7d62802a66d5 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -136,8 +136,15 @@ static int bcm_sf2_sw_get_sset_count(struct dsa_switch *ds) } static char *bcm_sf2_sw_probe(struct device *dsa_dev, struct device *host_dev, - int sw_addr) + int sw_addr, void **_priv) { + struct bcm_sf2_priv *priv; + + priv = devm_kzalloc(dsa_dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return NULL; + *_priv = priv; + return "Broadcom Starfighter 2"; } @@ -1363,7 +1370,6 @@ static int bcm_sf2_sw_set_wol(struct dsa_switch *ds, int port, static struct dsa_switch_driver bcm_sf2_switch_driver = { .tag_protocol = DSA_TAG_PROTO_BRCM, - .priv_size = sizeof(struct bcm_sf2_priv), .probe = bcm_sf2_sw_probe, .setup = bcm_sf2_sw_setup, .set_addr = bcm_sf2_sw_set_addr, diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c index 34d0f9fe19db..41195f1417f1 100644 --- a/drivers/net/dsa/mv88e6060.c +++ b/drivers/net/dsa/mv88e6060.c @@ -58,11 +58,12 @@ static int reg_write(struct dsa_switch *ds, int addr, int reg, u16 val) }) static char *mv88e6060_probe(struct device *dsa_dev, struct device *host_dev, - int sw_addr) + int sw_addr, void **priv) { struct mii_bus *bus = dsa_host_dev_to_mii_bus(host_dev); int ret; + *priv = NULL; if (bus == NULL) return NULL; diff --git a/drivers/net/dsa/mv88e6123.c b/drivers/net/dsa/mv88e6123.c index ede4c6f0b31a..bcab3ef22448 100644 --- a/drivers/net/dsa/mv88e6123.c +++ b/drivers/net/dsa/mv88e6123.c @@ -30,10 +30,20 @@ static const struct mv88e6xxx_switch_id mv88e6123_table[] = { }; static char *mv88e6123_probe(struct device *dsa_dev, struct device *host_dev, - int sw_addr) + int sw_addr, void **priv) { - return mv88e6xxx_lookup_name(host_dev, sw_addr, mv88e6123_table, + struct mv88e6xxx_priv_state *ps; + char *name; + + name = mv88e6xxx_lookup_name(host_dev, sw_addr, mv88e6123_table, ARRAY_SIZE(mv88e6123_table)); + if (name) { + ps = devm_kzalloc(dsa_dev, sizeof(*ps), GFP_KERNEL); + if (!ps) + return NULL; + *priv = ps; + } + return name; } static int mv88e6123_setup_global(struct dsa_switch *ds) @@ -74,6 +84,8 @@ static int mv88e6123_setup(struct dsa_switch *ds) struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); int ret; + ps->ds = ds; + ret = mv88e6xxx_setup_common(ds); if (ret < 0) return ret; @@ -103,7 +115,6 @@ static int mv88e6123_setup(struct dsa_switch *ds) struct dsa_switch_driver mv88e6123_switch_driver = { .tag_protocol = DSA_TAG_PROTO_EDSA, - .priv_size = sizeof(struct mv88e6xxx_priv_state), .probe = mv88e6123_probe, .setup = mv88e6123_setup, .set_addr = mv88e6xxx_set_addr_indirect, diff --git a/drivers/net/dsa/mv88e6131.c b/drivers/net/dsa/mv88e6131.c index bfadfd2cbb8d..b9f9b009f65a 100644 --- a/drivers/net/dsa/mv88e6131.c +++ b/drivers/net/dsa/mv88e6131.c @@ -26,10 +26,20 @@ static const struct mv88e6xxx_switch_id mv88e6131_table[] = { }; static char *mv88e6131_probe(struct device *dsa_dev, struct device *host_dev, - int sw_addr) + int sw_addr, void **priv) { - return mv88e6xxx_lookup_name(host_dev, sw_addr, mv88e6131_table, + struct mv88e6xxx_priv_state *ps; + char *name; + + name = mv88e6xxx_lookup_name(host_dev, sw_addr, mv88e6131_table, ARRAY_SIZE(mv88e6131_table)); + if (name) { + ps = devm_kzalloc(dsa_dev, sizeof(*ps), GFP_KERNEL); + if (!ps) + return NULL; + *priv = ps; + } + return name; } static int mv88e6131_setup_global(struct dsa_switch *ds) @@ -92,6 +102,8 @@ static int mv88e6131_setup(struct dsa_switch *ds) struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); int ret; + ps->ds = ds; + ret = mv88e6xxx_setup_common(ds); if (ret < 0) return ret; @@ -160,7 +172,6 @@ mv88e6131_phy_write(struct dsa_switch *ds, struct dsa_switch_driver mv88e6131_switch_driver = { .tag_protocol = DSA_TAG_PROTO_DSA, - .priv_size = sizeof(struct mv88e6xxx_priv_state), .probe = mv88e6131_probe, .setup = mv88e6131_setup, .set_addr = mv88e6xxx_set_addr_direct, diff --git a/drivers/net/dsa/mv88e6171.c b/drivers/net/dsa/mv88e6171.c index fb35d3ac1644..15200928cecc 100644 --- a/drivers/net/dsa/mv88e6171.c +++ b/drivers/net/dsa/mv88e6171.c @@ -25,10 +25,20 @@ static const struct mv88e6xxx_switch_id mv88e6171_table[] = { }; static char *mv88e6171_probe(struct device *dsa_dev, struct device *host_dev, - int sw_addr) + int sw_addr, void **priv) { - return mv88e6xxx_lookup_name(host_dev, sw_addr, mv88e6171_table, + struct mv88e6xxx_priv_state *ps; + char *name; + + name = mv88e6xxx_lookup_name(host_dev, sw_addr, mv88e6171_table, ARRAY_SIZE(mv88e6171_table)); + if (name) { + ps = devm_kzalloc(dsa_dev, sizeof(*ps), GFP_KERNEL); + if (!ps) + return NULL; + *priv = ps; + } + return name; } static int mv88e6171_setup_global(struct dsa_switch *ds) @@ -70,6 +80,8 @@ static int mv88e6171_setup(struct dsa_switch *ds) struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); int ret; + ps->ds = ds; + ret = mv88e6xxx_setup_common(ds); if (ret < 0) return ret; @@ -89,7 +101,6 @@ static int mv88e6171_setup(struct dsa_switch *ds) struct dsa_switch_driver mv88e6171_switch_driver = { .tag_protocol = DSA_TAG_PROTO_EDSA, - .priv_size = sizeof(struct mv88e6xxx_priv_state), .probe = mv88e6171_probe, .setup = mv88e6171_setup, .set_addr = mv88e6xxx_set_addr_indirect, diff --git a/drivers/net/dsa/mv88e6352.c b/drivers/net/dsa/mv88e6352.c index 577ab2cfa944..7081a78a67e1 100644 --- a/drivers/net/dsa/mv88e6352.c +++ b/drivers/net/dsa/mv88e6352.c @@ -38,10 +38,20 @@ static const struct mv88e6xxx_switch_id mv88e6352_table[] = { }; static char *mv88e6352_probe(struct device *dsa_dev, struct device *host_dev, - int sw_addr) + int sw_addr, void **priv) { - return mv88e6xxx_lookup_name(host_dev, sw_addr, mv88e6352_table, + struct mv88e6xxx_priv_state *ps; + char *name; + + name = mv88e6xxx_lookup_name(host_dev, sw_addr, mv88e6352_table, ARRAY_SIZE(mv88e6352_table)); + if (name) { + ps = devm_kzalloc(dsa_dev, sizeof(*ps), GFP_KERNEL); + if (!ps) + return NULL; + *priv = ps; + } + return name; } static int mv88e6352_setup_global(struct dsa_switch *ds) @@ -82,6 +92,8 @@ static int mv88e6352_setup(struct dsa_switch *ds) struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); int ret; + ps->ds = ds; + ret = mv88e6xxx_setup_common(ds); if (ret < 0) return ret; @@ -303,7 +315,6 @@ static int mv88e6352_set_eeprom(struct dsa_switch *ds, struct dsa_switch_driver mv88e6352_switch_driver = { .tag_protocol = DSA_TAG_PROTO_EDSA, - .priv_size = sizeof(struct mv88e6xxx_priv_state), .probe = mv88e6352_probe, .setup = mv88e6352_setup, .set_addr = mv88e6xxx_set_addr_indirect, diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index 62320fca6712..085fc4a49eb2 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c @@ -281,7 +281,7 @@ static void mv88e6xxx_ppu_reenable_work(struct work_struct *ugly) ps = container_of(ugly, struct mv88e6xxx_priv_state, ppu_work); if (mutex_trylock(&ps->ppu_mutex)) { - struct dsa_switch *ds = ((struct dsa_switch *)ps) - 1; + struct dsa_switch *ds = ps->ds; if (mv88e6xxx_ppu_enable(ds) == 0) ps->ppu_disabled = 0; @@ -2322,7 +2322,7 @@ static void mv88e6xxx_bridge_work(struct work_struct *work) int port; ps = container_of(work, struct mv88e6xxx_priv_state, bridge_work); - ds = ((struct dsa_switch *)ps) - 1; + ds = ps->ds; mutex_lock(&ps->smi_mutex); @@ -2670,6 +2670,8 @@ int mv88e6xxx_setup_common(struct dsa_switch *ds) { struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + ps->ds = ds; + mutex_init(&ps->smi_mutex); ps->id = REG_READ(REG_PORT(0), PORT_SWITCH_ID) & 0xfff0; diff --git a/drivers/net/dsa/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx.h index 236bcaa606e7..0322e3e0e7d9 100644 --- a/drivers/net/dsa/mv88e6xxx.h +++ b/drivers/net/dsa/mv88e6xxx.h @@ -397,6 +397,9 @@ struct mv88e6xxx_priv_port { }; struct mv88e6xxx_priv_state { + /* The dsa_switch this private structure is related to */ + struct dsa_switch *ds; + /* When using multi-chip addressing, this mutex protects * access to the indirect access registers. (In single-chip * mode, this mutex is effectively useless.) diff --git a/include/net/dsa.h b/include/net/dsa.h index 0f9f6f38f255..7bc7bd9b5ded 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -129,6 +129,12 @@ struct dsa_switch { struct dsa_switch_tree *dst; int index; + /* + * Give the switch driver somewhere to hang its private data + * structure. + */ + void *priv; + /* * Tagging protocol understood by this switch */ @@ -213,7 +219,7 @@ struct dsa_switch_driver { * Probing and setup. */ char *(*probe)(struct device *dsa_dev, struct device *host_dev, - int sw_addr); + int sw_addr, void **priv); int (*setup)(struct dsa_switch *ds); int (*set_addr)(struct dsa_switch *ds, u8 *addr); u32 (*get_phy_flags)(struct dsa_switch *ds, int port); @@ -342,7 +348,7 @@ struct mii_bus *dsa_host_dev_to_mii_bus(struct device *dev); static inline void *ds_to_priv(struct dsa_switch *ds) { - return (void *)(ds + 1); + return ds->priv; } static inline bool dsa_uses_tagged_protocol(struct dsa_switch_tree *dst) diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c index c06275311cb2..7ef8a92a9e39 100644 --- a/net/dsa/dsa.c +++ b/net/dsa/dsa.c @@ -52,7 +52,7 @@ EXPORT_SYMBOL_GPL(unregister_switch_driver); static struct dsa_switch_driver * dsa_switch_probe(struct device *parent, struct device *host_dev, int sw_addr, - char **_name) + char **_name, void **priv) { struct dsa_switch_driver *ret; struct list_head *list; @@ -67,7 +67,7 @@ dsa_switch_probe(struct device *parent, struct device *host_dev, int sw_addr, drv = list_entry(list, struct dsa_switch_driver, list); - name = drv->probe(parent, host_dev, sw_addr); + name = drv->probe(parent, host_dev, sw_addr, priv); if (name != NULL) { ret = drv; break; @@ -384,11 +384,12 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index, struct dsa_switch *ds; int ret; char *name; + void *priv; /* * Probe for switch model. */ - drv = dsa_switch_probe(parent, host_dev, pd->sw_addr, &name); + drv = dsa_switch_probe(parent, host_dev, pd->sw_addr, &name, &priv); if (drv == NULL) { netdev_err(dst->master_netdev, "[%d]: could not detect attached switch\n", index); @@ -409,6 +410,7 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index, ds->index = index; ds->pd = pd; ds->drv = drv; + ds->priv = priv; ds->tag_protocol = drv->tag_protocol; ds->master_dev = host_dev; From 5feebd0a8a799fe076c606b7c3bc267ae8c4344a Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Wed, 13 Apr 2016 02:40:41 +0200 Subject: [PATCH 0574/1649] net: dsa: Remove allocation of driver private memory The drivers now allocate their own memory for private usage. Remove the allocation from the core code. Signed-off-by: Andrew Lunn Acked-by: Florian Fainelli Tested-by: Vivien Didelot Signed-off-by: David S. Miller --- include/net/dsa.h | 1 - net/dsa/dsa.c | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/include/net/dsa.h b/include/net/dsa.h index 7bc7bd9b5ded..165c2e10615c 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -213,7 +213,6 @@ struct dsa_switch_driver { struct list_head list; enum dsa_tag_protocol tag_protocol; - int priv_size; /* * Probing and setup. diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c index 7ef8a92a9e39..14bf12f637d2 100644 --- a/net/dsa/dsa.c +++ b/net/dsa/dsa.c @@ -402,7 +402,7 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index, /* * Allocate and initialise switch state. */ - ds = devm_kzalloc(parent, sizeof(*ds) + drv->priv_size, GFP_KERNEL); + ds = devm_kzalloc(parent, sizeof(*ds), GFP_KERNEL); if (ds == NULL) return ERR_PTR(-ENOMEM); From a77d43f1e9d59791b138b9903c58b89fffb0df97 Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Wed, 13 Apr 2016 02:40:42 +0200 Subject: [PATCH 0575/1649] net: dsa: Keep the mii bus and address in the private structure Rather than looking up the mii bus and address every time, do it once at probe, and keep it in the private structure. Centralise this probe code in mv88e6xxx. Signed-off-by: Andrew Lunn Acked-by: Florian Fainelli Tested-by: Vivien Didelot Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6060.c | 44 ++++++++++++++++++++++--------------- drivers/net/dsa/mv88e6060.h | 11 ++++++++++ drivers/net/dsa/mv88e6123.c | 15 +++---------- drivers/net/dsa/mv88e6131.c | 15 +++---------- drivers/net/dsa/mv88e6171.c | 15 +++---------- drivers/net/dsa/mv88e6352.c | 15 +++---------- drivers/net/dsa/mv88e6xxx.c | 43 ++++++++++++++++++++++++------------ drivers/net/dsa/mv88e6xxx.h | 14 +++++++++--- 8 files changed, 89 insertions(+), 83 deletions(-) diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c index 41195f1417f1..46fe5dc65a99 100644 --- a/drivers/net/dsa/mv88e6060.c +++ b/drivers/net/dsa/mv88e6060.c @@ -19,12 +19,9 @@ static int reg_read(struct dsa_switch *ds, int addr, int reg) { - struct mii_bus *bus = dsa_host_dev_to_mii_bus(ds->master_dev); + struct mv88e6060_priv *priv = ds_to_priv(ds); - if (bus == NULL) - return -EINVAL; - - return mdiobus_read_nested(bus, ds->pd->sw_addr + addr, reg); + return mdiobus_read_nested(priv->bus, priv->sw_addr + addr, reg); } #define REG_READ(addr, reg) \ @@ -40,12 +37,9 @@ static int reg_read(struct dsa_switch *ds, int addr, int reg) static int reg_write(struct dsa_switch *ds, int addr, int reg, u16 val) { - struct mii_bus *bus = dsa_host_dev_to_mii_bus(ds->master_dev); + struct mv88e6060_priv *priv = ds_to_priv(ds); - if (bus == NULL) - return -EINVAL; - - return mdiobus_write_nested(bus, ds->pd->sw_addr + addr, reg, val); + return mdiobus_write_nested(priv->bus, priv->sw_addr + addr, reg, val); } #define REG_WRITE(addr, reg, val) \ @@ -57,16 +51,10 @@ static int reg_write(struct dsa_switch *ds, int addr, int reg, u16 val) return __ret; \ }) -static char *mv88e6060_probe(struct device *dsa_dev, struct device *host_dev, - int sw_addr, void **priv) +static char *mv88e6060_get_name(struct mii_bus *bus, int sw_addr) { - struct mii_bus *bus = dsa_host_dev_to_mii_bus(host_dev); int ret; - *priv = NULL; - if (bus == NULL) - return NULL; - ret = mdiobus_read(bus, sw_addr + REG_PORT(0), PORT_SWITCH_ID); if (ret >= 0) { if (ret == PORT_SWITCH_ID_6060) @@ -81,6 +69,26 @@ static char *mv88e6060_probe(struct device *dsa_dev, struct device *host_dev, return NULL; } +static char *mv88e6060_probe(struct device *dsa_dev, struct device *host_dev, + int sw_addr, void **_priv) +{ + struct mii_bus *bus = dsa_host_dev_to_mii_bus(host_dev); + struct mv88e6060_priv *priv; + char *name; + + name = mv88e6060_get_name(bus, sw_addr); + if (name) { + priv = devm_kzalloc(dsa_dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return NULL; + *_priv = priv; + priv->bus = bus; + priv->sw_addr = sw_addr; + } + + return name; +} + static int mv88e6060_switch_reset(struct dsa_switch *ds) { int i; @@ -176,8 +184,8 @@ static int mv88e6060_setup_port(struct dsa_switch *ds, int p) static int mv88e6060_setup(struct dsa_switch *ds) { - int i; int ret; + int i; ret = mv88e6060_switch_reset(ds); if (ret < 0) diff --git a/drivers/net/dsa/mv88e6060.h b/drivers/net/dsa/mv88e6060.h index cc9b2ed4aff4..10249bd16292 100644 --- a/drivers/net/dsa/mv88e6060.h +++ b/drivers/net/dsa/mv88e6060.h @@ -108,4 +108,15 @@ #define GLOBAL_ATU_MAC_23 0x0e #define GLOBAL_ATU_MAC_45 0x0f +struct mv88e6060_priv { + /* MDIO bus and address on bus to use. When in single chip + * mode, address is 0, and the switch uses multiple addresses + * on the bus. When in multi-chip mode, the switch uses a + * single address which contains two registers used for + * indirect access to more registers. + */ + struct mii_bus *bus; + int sw_addr; +}; + #endif diff --git a/drivers/net/dsa/mv88e6123.c b/drivers/net/dsa/mv88e6123.c index bcab3ef22448..8aaac0894752 100644 --- a/drivers/net/dsa/mv88e6123.c +++ b/drivers/net/dsa/mv88e6123.c @@ -32,18 +32,9 @@ static const struct mv88e6xxx_switch_id mv88e6123_table[] = { static char *mv88e6123_probe(struct device *dsa_dev, struct device *host_dev, int sw_addr, void **priv) { - struct mv88e6xxx_priv_state *ps; - char *name; - - name = mv88e6xxx_lookup_name(host_dev, sw_addr, mv88e6123_table, - ARRAY_SIZE(mv88e6123_table)); - if (name) { - ps = devm_kzalloc(dsa_dev, sizeof(*ps), GFP_KERNEL); - if (!ps) - return NULL; - *priv = ps; - } - return name; + return mv88e6xxx_drv_probe(dsa_dev, host_dev, sw_addr, priv, + mv88e6123_table, + ARRAY_SIZE(mv88e6123_table)); } static int mv88e6123_setup_global(struct dsa_switch *ds) diff --git a/drivers/net/dsa/mv88e6131.c b/drivers/net/dsa/mv88e6131.c index b9f9b009f65a..9e6edf94a855 100644 --- a/drivers/net/dsa/mv88e6131.c +++ b/drivers/net/dsa/mv88e6131.c @@ -28,18 +28,9 @@ static const struct mv88e6xxx_switch_id mv88e6131_table[] = { static char *mv88e6131_probe(struct device *dsa_dev, struct device *host_dev, int sw_addr, void **priv) { - struct mv88e6xxx_priv_state *ps; - char *name; - - name = mv88e6xxx_lookup_name(host_dev, sw_addr, mv88e6131_table, - ARRAY_SIZE(mv88e6131_table)); - if (name) { - ps = devm_kzalloc(dsa_dev, sizeof(*ps), GFP_KERNEL); - if (!ps) - return NULL; - *priv = ps; - } - return name; + return mv88e6xxx_drv_probe(dsa_dev, host_dev, sw_addr, priv, + mv88e6131_table, + ARRAY_SIZE(mv88e6131_table)); } static int mv88e6131_setup_global(struct dsa_switch *ds) diff --git a/drivers/net/dsa/mv88e6171.c b/drivers/net/dsa/mv88e6171.c index 15200928cecc..6ab86071391f 100644 --- a/drivers/net/dsa/mv88e6171.c +++ b/drivers/net/dsa/mv88e6171.c @@ -27,18 +27,9 @@ static const struct mv88e6xxx_switch_id mv88e6171_table[] = { static char *mv88e6171_probe(struct device *dsa_dev, struct device *host_dev, int sw_addr, void **priv) { - struct mv88e6xxx_priv_state *ps; - char *name; - - name = mv88e6xxx_lookup_name(host_dev, sw_addr, mv88e6171_table, - ARRAY_SIZE(mv88e6171_table)); - if (name) { - ps = devm_kzalloc(dsa_dev, sizeof(*ps), GFP_KERNEL); - if (!ps) - return NULL; - *priv = ps; - } - return name; + return mv88e6xxx_drv_probe(dsa_dev, host_dev, sw_addr, priv, + mv88e6171_table, + ARRAY_SIZE(mv88e6171_table)); } static int mv88e6171_setup_global(struct dsa_switch *ds) diff --git a/drivers/net/dsa/mv88e6352.c b/drivers/net/dsa/mv88e6352.c index 7081a78a67e1..764b10ffb631 100644 --- a/drivers/net/dsa/mv88e6352.c +++ b/drivers/net/dsa/mv88e6352.c @@ -40,18 +40,9 @@ static const struct mv88e6xxx_switch_id mv88e6352_table[] = { static char *mv88e6352_probe(struct device *dsa_dev, struct device *host_dev, int sw_addr, void **priv) { - struct mv88e6xxx_priv_state *ps; - char *name; - - name = mv88e6xxx_lookup_name(host_dev, sw_addr, mv88e6352_table, - ARRAY_SIZE(mv88e6352_table)); - if (name) { - ps = devm_kzalloc(dsa_dev, sizeof(*ps), GFP_KERNEL); - if (!ps) - return NULL; - *priv = ps; - } - return name; + return mv88e6xxx_drv_probe(dsa_dev, host_dev, sw_addr, priv, + mv88e6352_table, + ARRAY_SIZE(mv88e6352_table)); } static int mv88e6352_setup_global(struct dsa_switch *ds) diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index 085fc4a49eb2..c242ffd8eb09 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c @@ -94,15 +94,12 @@ static int __mv88e6xxx_reg_read(struct mii_bus *bus, int sw_addr, int addr, static int _mv88e6xxx_reg_read(struct dsa_switch *ds, int addr, int reg) { - struct mii_bus *bus = dsa_host_dev_to_mii_bus(ds->master_dev); + struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); int ret; assert_smi_lock(ds); - if (bus == NULL) - return -EINVAL; - - ret = __mv88e6xxx_reg_read(bus, ds->pd->sw_addr, addr, reg); + ret = __mv88e6xxx_reg_read(ps->bus, ps->sw_addr, addr, reg); if (ret < 0) return ret; @@ -159,17 +156,14 @@ static int __mv88e6xxx_reg_write(struct mii_bus *bus, int sw_addr, int addr, static int _mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg, u16 val) { - struct mii_bus *bus = dsa_host_dev_to_mii_bus(ds->master_dev); + struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); assert_smi_lock(ds); - if (bus == NULL) - return -EINVAL; - dev_dbg(ds->master_dev, "-> addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n", addr, reg, val); - return __mv88e6xxx_reg_write(bus, ds->pd->sw_addr, addr, reg, val); + return __mv88e6xxx_reg_write(ps->bus, ps->sw_addr, addr, reg, val); } int mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg, u16 val) @@ -2671,7 +2665,6 @@ int mv88e6xxx_setup_common(struct dsa_switch *ds) struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); ps->ds = ds; - mutex_init(&ps->smi_mutex); ps->id = REG_READ(REG_PORT(0), PORT_SWITCH_ID) & 0xfff0; @@ -3075,9 +3068,9 @@ int mv88e6xxx_get_temp_alarm(struct dsa_switch *ds, bool *alarm) } #endif /* CONFIG_NET_DSA_HWMON */ -char *mv88e6xxx_lookup_name(struct device *host_dev, int sw_addr, - const struct mv88e6xxx_switch_id *table, - unsigned int num) +static char *mv88e6xxx_lookup_name(struct device *host_dev, int sw_addr, + const struct mv88e6xxx_switch_id *table, + unsigned int num) { struct mii_bus *bus = dsa_host_dev_to_mii_bus(host_dev); int i, ret; @@ -3107,6 +3100,28 @@ char *mv88e6xxx_lookup_name(struct device *host_dev, int sw_addr, return NULL; } +char *mv88e6xxx_drv_probe(struct device *dsa_dev, struct device *host_dev, + int sw_addr, void **priv, + const struct mv88e6xxx_switch_id *table, + unsigned int num) +{ + struct mv88e6xxx_priv_state *ps; + char *name; + + name = mv88e6xxx_lookup_name(host_dev, sw_addr, table, num); + if (name) { + ps = devm_kzalloc(dsa_dev, sizeof(*ps), GFP_KERNEL); + if (!ps) + return NULL; + *priv = ps; + ps->bus = dsa_host_dev_to_mii_bus(host_dev); + if (!ps->bus) + return NULL; + ps->sw_addr = sw_addr; + } + return name; +} + static int __init mv88e6xxx_init(void) { #if IS_ENABLED(CONFIG_NET_DSA_MV88E6131) diff --git a/drivers/net/dsa/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx.h index 0322e3e0e7d9..5d27decc85cb 100644 --- a/drivers/net/dsa/mv88e6xxx.h +++ b/drivers/net/dsa/mv88e6xxx.h @@ -406,6 +406,12 @@ struct mv88e6xxx_priv_state { */ struct mutex smi_mutex; + /* The MII bus and the address on the bus that is used to + * communication with the switch + */ + struct mii_bus *bus; + int sw_addr; + #ifdef CONFIG_NET_DSA_MV88E6XXX_NEED_PPU /* Handles automatic disabling and re-enabling of the PHY * polling unit. @@ -456,9 +462,11 @@ struct mv88e6xxx_hw_stat { }; int mv88e6xxx_switch_reset(struct dsa_switch *ds, bool ppu_active); -char *mv88e6xxx_lookup_name(struct device *host_dev, int sw_addr, - const struct mv88e6xxx_switch_id *table, - unsigned int num); +char *mv88e6xxx_drv_probe(struct device *dsa_dev, struct device *host_dev, + int sw_addr, void **priv, + const struct mv88e6xxx_switch_id *table, + unsigned int num); + int mv88e6xxx_setup_ports(struct dsa_switch *ds); int mv88e6xxx_setup_common(struct dsa_switch *ds); int mv88e6xxx_setup_global(struct dsa_switch *ds); From e49bad319630dedeeda3a638a707ec7b5d402ad5 Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Wed, 13 Apr 2016 02:40:43 +0200 Subject: [PATCH 0576/1649] net: dsa: Rename DSA probe function. Rename the function called from the DSA to perform a probe for the switch. This makes the normal _probe() name available for a standard Linux device driver probe function. Signed-off-by: Andrew Lunn Tested-by: Vivien Didelot Acked-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/dsa/bcm_sf2.c | 7 ++++--- drivers/net/dsa/mv88e6060.c | 7 ++++--- drivers/net/dsa/mv88e6123.c | 7 ++++--- drivers/net/dsa/mv88e6131.c | 7 ++++--- drivers/net/dsa/mv88e6171.c | 7 ++++--- drivers/net/dsa/mv88e6352.c | 7 ++++--- 6 files changed, 24 insertions(+), 18 deletions(-) diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index 7d62802a66d5..50caa525cda3 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -135,8 +135,9 @@ static int bcm_sf2_sw_get_sset_count(struct dsa_switch *ds) return BCM_SF2_STATS_SIZE; } -static char *bcm_sf2_sw_probe(struct device *dsa_dev, struct device *host_dev, - int sw_addr, void **_priv) +static char *bcm_sf2_sw_drv_probe(struct device *dsa_dev, + struct device *host_dev, + int sw_addr, void **_priv) { struct bcm_sf2_priv *priv; @@ -1370,7 +1371,7 @@ static int bcm_sf2_sw_set_wol(struct dsa_switch *ds, int port, static struct dsa_switch_driver bcm_sf2_switch_driver = { .tag_protocol = DSA_TAG_PROTO_BRCM, - .probe = bcm_sf2_sw_probe, + .probe = bcm_sf2_sw_drv_probe, .setup = bcm_sf2_sw_setup, .set_addr = bcm_sf2_sw_set_addr, .get_phy_flags = bcm_sf2_sw_get_phy_flags, diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c index 46fe5dc65a99..adb608ccd9aa 100644 --- a/drivers/net/dsa/mv88e6060.c +++ b/drivers/net/dsa/mv88e6060.c @@ -69,8 +69,9 @@ static char *mv88e6060_get_name(struct mii_bus *bus, int sw_addr) return NULL; } -static char *mv88e6060_probe(struct device *dsa_dev, struct device *host_dev, - int sw_addr, void **_priv) +static char *mv88e6060_drv_probe(struct device *dsa_dev, + struct device *host_dev, + int sw_addr, void **_priv) { struct mii_bus *bus = dsa_host_dev_to_mii_bus(host_dev); struct mv88e6060_priv *priv; @@ -248,7 +249,7 @@ mv88e6060_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val) static struct dsa_switch_driver mv88e6060_switch_driver = { .tag_protocol = DSA_TAG_PROTO_TRAILER, - .probe = mv88e6060_probe, + .probe = mv88e6060_drv_probe, .setup = mv88e6060_setup, .set_addr = mv88e6060_set_addr, .phy_read = mv88e6060_phy_read, diff --git a/drivers/net/dsa/mv88e6123.c b/drivers/net/dsa/mv88e6123.c index 8aaac0894752..c34283d929c4 100644 --- a/drivers/net/dsa/mv88e6123.c +++ b/drivers/net/dsa/mv88e6123.c @@ -29,8 +29,9 @@ static const struct mv88e6xxx_switch_id mv88e6123_table[] = { { PORT_SWITCH_ID_6165_A2, "Marvell 88e6165 (A2)" }, }; -static char *mv88e6123_probe(struct device *dsa_dev, struct device *host_dev, - int sw_addr, void **priv) +static char *mv88e6123_drv_probe(struct device *dsa_dev, + struct device *host_dev, + int sw_addr, void **priv) { return mv88e6xxx_drv_probe(dsa_dev, host_dev, sw_addr, priv, mv88e6123_table, @@ -106,7 +107,7 @@ static int mv88e6123_setup(struct dsa_switch *ds) struct dsa_switch_driver mv88e6123_switch_driver = { .tag_protocol = DSA_TAG_PROTO_EDSA, - .probe = mv88e6123_probe, + .probe = mv88e6123_drv_probe, .setup = mv88e6123_setup, .set_addr = mv88e6xxx_set_addr_indirect, .phy_read = mv88e6xxx_phy_read, diff --git a/drivers/net/dsa/mv88e6131.c b/drivers/net/dsa/mv88e6131.c index 9e6edf94a855..f5d75fce1e96 100644 --- a/drivers/net/dsa/mv88e6131.c +++ b/drivers/net/dsa/mv88e6131.c @@ -25,8 +25,9 @@ static const struct mv88e6xxx_switch_id mv88e6131_table[] = { { PORT_SWITCH_ID_6185, "Marvell 88E6185" }, }; -static char *mv88e6131_probe(struct device *dsa_dev, struct device *host_dev, - int sw_addr, void **priv) +static char *mv88e6131_drv_probe(struct device *dsa_dev, + struct device *host_dev, + int sw_addr, void **priv) { return mv88e6xxx_drv_probe(dsa_dev, host_dev, sw_addr, priv, mv88e6131_table, @@ -163,7 +164,7 @@ mv88e6131_phy_write(struct dsa_switch *ds, struct dsa_switch_driver mv88e6131_switch_driver = { .tag_protocol = DSA_TAG_PROTO_DSA, - .probe = mv88e6131_probe, + .probe = mv88e6131_drv_probe, .setup = mv88e6131_setup, .set_addr = mv88e6xxx_set_addr_direct, .phy_read = mv88e6131_phy_read, diff --git a/drivers/net/dsa/mv88e6171.c b/drivers/net/dsa/mv88e6171.c index 6ab86071391f..f5622506cdfa 100644 --- a/drivers/net/dsa/mv88e6171.c +++ b/drivers/net/dsa/mv88e6171.c @@ -24,8 +24,9 @@ static const struct mv88e6xxx_switch_id mv88e6171_table[] = { { PORT_SWITCH_ID_6351, "Marvell 88E6351" }, }; -static char *mv88e6171_probe(struct device *dsa_dev, struct device *host_dev, - int sw_addr, void **priv) +static char *mv88e6171_drv_probe(struct device *dsa_dev, + struct device *host_dev, + int sw_addr, void **priv) { return mv88e6xxx_drv_probe(dsa_dev, host_dev, sw_addr, priv, mv88e6171_table, @@ -92,7 +93,7 @@ static int mv88e6171_setup(struct dsa_switch *ds) struct dsa_switch_driver mv88e6171_switch_driver = { .tag_protocol = DSA_TAG_PROTO_EDSA, - .probe = mv88e6171_probe, + .probe = mv88e6171_drv_probe, .setup = mv88e6171_setup, .set_addr = mv88e6xxx_set_addr_indirect, .phy_read = mv88e6xxx_phy_read_indirect, diff --git a/drivers/net/dsa/mv88e6352.c b/drivers/net/dsa/mv88e6352.c index 764b10ffb631..e54ee27db129 100644 --- a/drivers/net/dsa/mv88e6352.c +++ b/drivers/net/dsa/mv88e6352.c @@ -37,8 +37,9 @@ static const struct mv88e6xxx_switch_id mv88e6352_table[] = { { PORT_SWITCH_ID_6352_A1, "Marvell 88E6352 (A1)" }, }; -static char *mv88e6352_probe(struct device *dsa_dev, struct device *host_dev, - int sw_addr, void **priv) +static char *mv88e6352_drv_probe(struct device *dsa_dev, + struct device *host_dev, + int sw_addr, void **priv) { return mv88e6xxx_drv_probe(dsa_dev, host_dev, sw_addr, priv, mv88e6352_table, @@ -306,7 +307,7 @@ static int mv88e6352_set_eeprom(struct dsa_switch *ds, struct dsa_switch_driver mv88e6352_switch_driver = { .tag_protocol = DSA_TAG_PROTO_EDSA, - .probe = mv88e6352_probe, + .probe = mv88e6352_drv_probe, .setup = mv88e6352_setup, .set_addr = mv88e6xxx_set_addr_indirect, .phy_read = mv88e6xxx_phy_read_indirect, From 74c3e2a54b7d9eb57f23fb0e157b90bb6dae629f Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Wed, 13 Apr 2016 02:40:44 +0200 Subject: [PATCH 0577/1649] dsa: Rename phys_port_mask to enabled_port_mask The phys in phys_port_mask suggests this mask is about PHYs. In fact, it means physical ports. Rename to enabled_port_mask, indicating external enabled ports of the switch, which is hopefully less confusing. Signed-off-by: Andrew Lunn Tested-by: Vivien Didelot Acked-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/dsa/bcm_sf2.c | 19 ++++++++++--------- drivers/net/dsa/mv88e6060.c | 2 +- include/net/dsa.h | 4 ++-- net/dsa/dsa.c | 8 ++++---- 4 files changed, 17 insertions(+), 16 deletions(-) diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index 50caa525cda3..7a5f0ef46bd6 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -160,7 +160,7 @@ static void bcm_sf2_imp_vlan_setup(struct dsa_switch *ds, int cpu_port) * the same VLAN. */ for (i = 0; i < priv->hw_params.num_ports; i++) { - if (!((1 << i) & ds->phys_port_mask)) + if (!((1 << i) & ds->enabled_port_mask)) continue; reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(i)); @@ -1009,7 +1009,7 @@ static int bcm_sf2_sw_setup(struct dsa_switch *ds) /* Enable all valid ports and disable those unused */ for (port = 0; port < priv->hw_params.num_ports; port++) { /* IMP port receives special treatment */ - if ((1 << port) & ds->phys_port_mask) + if ((1 << port) & ds->enabled_port_mask) bcm_sf2_port_setup(ds, port, NULL); else if (dsa_is_cpu_port(ds, port)) bcm_sf2_imp_setup(ds, port); @@ -1022,11 +1022,12 @@ static int bcm_sf2_sw_setup(struct dsa_switch *ds) * 7445D0, since 7445E0 disconnects the internal switch pseudo-PHY such * that we can use the regular SWITCH_MDIO master controller instead. * - * By default, DSA initializes ds->phys_mii_mask to ds->phys_port_mask - * to have a 1:1 mapping between Port address and PHY address in order - * to utilize the slave_mii_bus instance to read from Port PHYs. This is - * not what we want here, so we initialize phys_mii_mask 0 to always - * utilize the "master" MDIO bus backed by the "mdio-unimac" driver. + * By default, DSA initializes ds->phys_mii_mask to + * ds->enabled_port_mask to have a 1:1 mapping between Port address + * and PHY address in order to utilize the slave_mii_bus instance to + * read from Port PHYs. This is not what we want here, so we + * initialize phys_mii_mask 0 to always utilize the "master" MDIO + * bus backed by the "mdio-unimac" driver. */ if (of_machine_is_compatible("brcm,bcm7445d0")) ds->phys_mii_mask |= ((1 << BRCM_PSEUDO_PHY_ADDR) | (1 << 0)); @@ -1284,7 +1285,7 @@ static int bcm_sf2_sw_suspend(struct dsa_switch *ds) * bcm_sf2_sw_setup */ for (port = 0; port < DSA_MAX_PORTS; port++) { - if ((1 << port) & ds->phys_port_mask || + if ((1 << port) & ds->enabled_port_mask || dsa_is_cpu_port(ds, port)) bcm_sf2_port_disable(ds, port, NULL); } @@ -1308,7 +1309,7 @@ static int bcm_sf2_sw_resume(struct dsa_switch *ds) bcm_sf2_gphy_enable_set(ds, true); for (port = 0; port < DSA_MAX_PORTS; port++) { - if ((1 << port) & ds->phys_port_mask) + if ((1 << port) & ds->enabled_port_mask) bcm_sf2_port_setup(ds, port, NULL); else if (dsa_is_cpu_port(ds, port)) bcm_sf2_imp_setup(ds, port); diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c index adb608ccd9aa..92cebab9383e 100644 --- a/drivers/net/dsa/mv88e6060.c +++ b/drivers/net/dsa/mv88e6060.c @@ -170,7 +170,7 @@ static int mv88e6060_setup_port(struct dsa_switch *ds, int p) REG_WRITE(addr, PORT_VLAN_MAP, ((p & 0xf) << PORT_VLAN_MAP_DBNUM_SHIFT) | (dsa_is_cpu_port(ds, p) ? - ds->phys_port_mask : + ds->enabled_port_mask : BIT(ds->dst->cpu_port))); /* Port Association Vector: when learning source addresses diff --git a/include/net/dsa.h b/include/net/dsa.h index 165c2e10615c..689ebd3542ba 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -167,7 +167,7 @@ struct dsa_switch { * Slave mii_bus and devices for the individual ports. */ u32 dsa_port_mask; - u32 phys_port_mask; + u32 enabled_port_mask; u32 phys_mii_mask; struct mii_bus *slave_mii_bus; struct net_device *ports[DSA_MAX_PORTS]; @@ -185,7 +185,7 @@ static inline bool dsa_is_dsa_port(struct dsa_switch *ds, int p) static inline bool dsa_is_port_initialized(struct dsa_switch *ds, int p) { - return ds->phys_port_mask & (1 << p) && ds->ports[p]; + return ds->enabled_port_mask & (1 << p) && ds->ports[p]; } static inline u8 dsa_upstream_port(struct dsa_switch *ds) diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c index 14bf12f637d2..60ea98481806 100644 --- a/net/dsa/dsa.c +++ b/net/dsa/dsa.c @@ -246,7 +246,7 @@ static int dsa_switch_setup_one(struct dsa_switch *ds, struct device *parent) } else if (!strcmp(name, "dsa")) { ds->dsa_port_mask |= 1 << i; } else { - ds->phys_port_mask |= 1 << i; + ds->enabled_port_mask |= 1 << i; } valid_name_found = true; } @@ -259,7 +259,7 @@ static int dsa_switch_setup_one(struct dsa_switch *ds, struct device *parent) /* Make the built-in MII bus mask match the number of ports, * switch drivers can override this later */ - ds->phys_mii_mask = ds->phys_port_mask; + ds->phys_mii_mask = ds->enabled_port_mask; /* * If the CPU connects to this switch, set the switch tree @@ -325,7 +325,7 @@ static int dsa_switch_setup_one(struct dsa_switch *ds, struct device *parent) * Create network devices for physical switch ports. */ for (i = 0; i < DSA_MAX_PORTS; i++) { - if (!(ds->phys_port_mask & (1 << i))) + if (!(ds->enabled_port_mask & (1 << i))) continue; ret = dsa_slave_create(ds, parent, i, pd->port_names[i]); @@ -435,7 +435,7 @@ static void dsa_switch_destroy(struct dsa_switch *ds) /* Destroy network devices for physical switch ports. */ for (port = 0; port < DSA_MAX_PORTS; port++) { - if (!(ds->phys_port_mask & (1 << port))) + if (!(ds->enabled_port_mask & (1 << port))) continue; if (!ds->ports[port]) From c156913b5d62cbaa0e3be29409de562b7d2e006e Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Wed, 13 Apr 2016 02:40:45 +0200 Subject: [PATCH 0578/1649] dsa: mv88e6xxx: Use bus in mv88e6xxx_lookup_name() mv88e6xxx_lookup_name() returns the model name of a switch at a given address on an MII bus. Using mii_bus to identify the bus rather than the host device is more logical, so change the parameter. Signed-off-by: Andrew Lunn Tested-by: Vivien Didelot Reviewed-by: Vivien Didelot Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index c242ffd8eb09..9985a0cf31f1 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c @@ -3068,11 +3068,10 @@ int mv88e6xxx_get_temp_alarm(struct dsa_switch *ds, bool *alarm) } #endif /* CONFIG_NET_DSA_HWMON */ -static char *mv88e6xxx_lookup_name(struct device *host_dev, int sw_addr, +static char *mv88e6xxx_lookup_name(struct mii_bus *bus, int sw_addr, const struct mv88e6xxx_switch_id *table, unsigned int num) { - struct mii_bus *bus = dsa_host_dev_to_mii_bus(host_dev); int i, ret; if (!bus) @@ -3090,7 +3089,8 @@ static char *mv88e6xxx_lookup_name(struct device *host_dev, int sw_addr, /* Look up only the product number */ for (i = 0; i < num; ++i) { if (table[i].id == (ret & PORT_SWITCH_ID_PROD_NUM_MASK)) { - dev_warn(host_dev, "unknown revision %d, using base switch 0x%x\n", + dev_warn(&bus->dev, + "unknown revision %d, using base switch 0x%x\n", ret & PORT_SWITCH_ID_REV_MASK, ret & PORT_SWITCH_ID_PROD_NUM_MASK); return table[i].name; @@ -3106,9 +3106,13 @@ char *mv88e6xxx_drv_probe(struct device *dsa_dev, struct device *host_dev, unsigned int num) { struct mv88e6xxx_priv_state *ps; + struct mii_bus *bus = dsa_host_dev_to_mii_bus(host_dev); char *name; - name = mv88e6xxx_lookup_name(host_dev, sw_addr, table, num); + if (!bus) + return NULL; + + name = mv88e6xxx_lookup_name(bus, sw_addr, table, num); if (name) { ps = devm_kzalloc(dsa_dev, sizeof(*ps), GFP_KERNEL); if (!ps) From f24e230d257af1ad7476c6e81a8dc3127a74204e Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Fri, 1 Apr 2016 14:17:21 +0200 Subject: [PATCH 0579/1649] netfilter: x_tables: don't move to non-existent next rule Ben Hawkes says: In the mark_source_chains function (net/ipv4/netfilter/ip_tables.c) it is possible for a user-supplied ipt_entry structure to have a large next_offset field. This field is not bounds checked prior to writing a counter value at the supplied offset. Base chains enforce absolute verdict. User defined chains are supposed to end with an unconditional return, xtables userspace adds them automatically. But if such return is missing we will move to non-existent next rule. Reported-by: Ben Hawkes Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- net/ipv4/netfilter/arp_tables.c | 8 +++++--- net/ipv4/netfilter/ip_tables.c | 4 ++++ net/ipv6/netfilter/ip6_tables.c | 4 ++++ 3 files changed, 13 insertions(+), 3 deletions(-) diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index 4133b0f513af..82a434bf8653 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c @@ -439,6 +439,8 @@ static int mark_source_chains(const struct xt_table_info *newinfo, size = e->next_offset; e = (struct arpt_entry *) (entry0 + pos + size); + if (pos + size >= newinfo->size) + return 0; e->counters.pcnt = pos; pos += size; } else { @@ -461,6 +463,8 @@ static int mark_source_chains(const struct xt_table_info *newinfo, } else { /* ... this is a fallthru */ newpos = pos + e->next_offset; + if (newpos >= newinfo->size) + return 0; } e = (struct arpt_entry *) (entry0 + newpos); @@ -691,10 +695,8 @@ static int translate_table(struct xt_table_info *newinfo, void *entry0, } } - if (!mark_source_chains(newinfo, repl->valid_hooks, entry0)) { - duprintf("Looping hook\n"); + if (!mark_source_chains(newinfo, repl->valid_hooks, entry0)) return -ELOOP; - } /* Finally, each sanity check must pass */ i = 0; diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index 631c100a1338..e301a3db4717 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c @@ -520,6 +520,8 @@ mark_source_chains(const struct xt_table_info *newinfo, size = e->next_offset; e = (struct ipt_entry *) (entry0 + pos + size); + if (pos + size >= newinfo->size) + return 0; e->counters.pcnt = pos; pos += size; } else { @@ -541,6 +543,8 @@ mark_source_chains(const struct xt_table_info *newinfo, } else { /* ... this is a fallthru */ newpos = pos + e->next_offset; + if (newpos >= newinfo->size) + return 0; } e = (struct ipt_entry *) (entry0 + newpos); diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index 86b67b70b626..7b3335bce3fd 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c @@ -532,6 +532,8 @@ mark_source_chains(const struct xt_table_info *newinfo, size = e->next_offset; e = (struct ip6t_entry *) (entry0 + pos + size); + if (pos + size >= newinfo->size) + return 0; e->counters.pcnt = pos; pos += size; } else { @@ -553,6 +555,8 @@ mark_source_chains(const struct xt_table_info *newinfo, } else { /* ... this is a fallthru */ newpos = pos + e->next_offset; + if (newpos >= newinfo->size) + return 0; } e = (struct ip6t_entry *) (entry0 + newpos); From 36472341017529e2b12573093cc0f68719300997 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Fri, 1 Apr 2016 14:17:22 +0200 Subject: [PATCH 0580/1649] netfilter: x_tables: validate targets of jumps When we see a jump also check that the offset gets us to beginning of a rule (an ipt_entry). The extra overhead is negible, even with absurd cases. 300k custom rules, 300k jumps to 'next' user chain: [ plus one jump from INPUT to first userchain ]: Before: real 0m24.874s user 0m7.532s sys 0m16.076s After: real 0m27.464s user 0m7.436s sys 0m18.840s Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- net/ipv4/netfilter/arp_tables.c | 16 ++++++++++++++++ net/ipv4/netfilter/ip_tables.c | 16 ++++++++++++++++ net/ipv6/netfilter/ip6_tables.c | 16 ++++++++++++++++ 3 files changed, 48 insertions(+) diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index 82a434bf8653..ec37f7c3a033 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c @@ -367,6 +367,18 @@ static inline bool unconditional(const struct arpt_entry *e) memcmp(&e->arp, &uncond, sizeof(uncond)) == 0; } +static bool find_jump_target(const struct xt_table_info *t, + const struct arpt_entry *target) +{ + struct arpt_entry *iter; + + xt_entry_foreach(iter, t->entries, t->size) { + if (iter == target) + return true; + } + return false; +} + /* Figures out from what hook each rule can be called: returns 0 if * there are loops. Puts hook bitmask in comefrom. */ @@ -460,6 +472,10 @@ static int mark_source_chains(const struct xt_table_info *newinfo, /* This a jump; chase it. */ duprintf("Jump rule %u -> %u\n", pos, newpos); + e = (struct arpt_entry *) + (entry0 + newpos); + if (!find_jump_target(newinfo, e)) + return 0; } else { /* ... this is a fallthru */ newpos = pos + e->next_offset; diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index e301a3db4717..503038ea7735 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c @@ -443,6 +443,18 @@ ipt_do_table(struct sk_buff *skb, #endif } +static bool find_jump_target(const struct xt_table_info *t, + const struct ipt_entry *target) +{ + struct ipt_entry *iter; + + xt_entry_foreach(iter, t->entries, t->size) { + if (iter == target) + return true; + } + return false; +} + /* Figures out from what hook each rule can be called: returns 0 if there are loops. Puts hook bitmask in comefrom. */ static int @@ -540,6 +552,10 @@ mark_source_chains(const struct xt_table_info *newinfo, /* This a jump; chase it. */ duprintf("Jump rule %u -> %u\n", pos, newpos); + e = (struct ipt_entry *) + (entry0 + newpos); + if (!find_jump_target(newinfo, e)) + return 0; } else { /* ... this is a fallthru */ newpos = pos + e->next_offset; diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index 7b3335bce3fd..126f2a0f006a 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c @@ -455,6 +455,18 @@ ip6t_do_table(struct sk_buff *skb, #endif } +static bool find_jump_target(const struct xt_table_info *t, + const struct ip6t_entry *target) +{ + struct ip6t_entry *iter; + + xt_entry_foreach(iter, t->entries, t->size) { + if (iter == target) + return true; + } + return false; +} + /* Figures out from what hook each rule can be called: returns 0 if there are loops. Puts hook bitmask in comefrom. */ static int @@ -552,6 +564,10 @@ mark_source_chains(const struct xt_table_info *newinfo, /* This a jump; chase it. */ duprintf("Jump rule %u -> %u\n", pos, newpos); + e = (struct ip6t_entry *) + (entry0 + newpos); + if (!find_jump_target(newinfo, e)) + return 0; } else { /* ... this is a fallthru */ newpos = pos + e->next_offset; From 7d35812c3214afa5b37a675113555259cfd67b98 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Fri, 1 Apr 2016 14:17:23 +0200 Subject: [PATCH 0581/1649] netfilter: x_tables: add and use xt_check_entry_offsets Currently arp/ip and ip6tables each implement a short helper to check that the target offset is large enough to hold one xt_entry_target struct and that t->u.target_size fits within the current rule. Unfortunately these checks are not sufficient. To avoid adding new tests to all of ip/ip6/arptables move the current checks into a helper, then extend this helper in followup patches. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- include/linux/netfilter/x_tables.h | 4 ++++ net/ipv4/netfilter/arp_tables.c | 11 +--------- net/ipv4/netfilter/ip_tables.c | 12 +---------- net/ipv6/netfilter/ip6_tables.c | 12 +---------- net/netfilter/x_tables.c | 34 ++++++++++++++++++++++++++++++ 5 files changed, 41 insertions(+), 32 deletions(-) diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h index 80a305b85323..0de0862897a4 100644 --- a/include/linux/netfilter/x_tables.h +++ b/include/linux/netfilter/x_tables.h @@ -242,6 +242,10 @@ void xt_unregister_match(struct xt_match *target); int xt_register_matches(struct xt_match *match, unsigned int n); void xt_unregister_matches(struct xt_match *match, unsigned int n); +int xt_check_entry_offsets(const void *base, + unsigned int target_offset, + unsigned int next_offset); + int xt_check_match(struct xt_mtchk_param *, unsigned int size, u_int8_t proto, bool inv_proto); int xt_check_target(struct xt_tgchk_param *, unsigned int size, u_int8_t proto, diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index ec37f7c3a033..74668c1d3243 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c @@ -496,19 +496,10 @@ next: static inline int check_entry(const struct arpt_entry *e) { - const struct xt_entry_target *t; - if (!arp_checkentry(&e->arp)) return -EINVAL; - if (e->target_offset + sizeof(struct xt_entry_target) > e->next_offset) - return -EINVAL; - - t = arpt_get_target_c(e); - if (e->target_offset + t->u.target_size > e->next_offset) - return -EINVAL; - - return 0; + return xt_check_entry_offsets(e, e->target_offset, e->next_offset); } static inline int check_target(struct arpt_entry *e, const char *name) diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index 503038ea7735..71c204d4ca5f 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c @@ -590,20 +590,10 @@ static void cleanup_match(struct xt_entry_match *m, struct net *net) static int check_entry(const struct ipt_entry *e) { - const struct xt_entry_target *t; - if (!ip_checkentry(&e->ip)) return -EINVAL; - if (e->target_offset + sizeof(struct xt_entry_target) > - e->next_offset) - return -EINVAL; - - t = ipt_get_target_c(e); - if (e->target_offset + t->u.target_size > e->next_offset) - return -EINVAL; - - return 0; + return xt_check_entry_offsets(e, e->target_offset, e->next_offset); } static int diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index 126f2a0f006a..24ae7f458b3b 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c @@ -602,20 +602,10 @@ static void cleanup_match(struct xt_entry_match *m, struct net *net) static int check_entry(const struct ip6t_entry *e) { - const struct xt_entry_target *t; - if (!ip6_checkentry(&e->ipv6)) return -EINVAL; - if (e->target_offset + sizeof(struct xt_entry_target) > - e->next_offset) - return -EINVAL; - - t = ip6t_get_target_c(e); - if (e->target_offset + t->u.target_size > e->next_offset) - return -EINVAL; - - return 0; + return xt_check_entry_offsets(e, e->target_offset, e->next_offset); } static int check_match(struct xt_entry_match *m, struct xt_mtchk_param *par) diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c index 582c9cfd6567..1f44bfa8dd94 100644 --- a/net/netfilter/x_tables.c +++ b/net/netfilter/x_tables.c @@ -541,6 +541,40 @@ int xt_compat_match_to_user(const struct xt_entry_match *m, EXPORT_SYMBOL_GPL(xt_compat_match_to_user); #endif /* CONFIG_COMPAT */ +/** + * xt_check_entry_offsets - validate arp/ip/ip6t_entry + * + * @base: pointer to arp/ip/ip6t_entry + * @target_offset: the arp/ip/ip6_t->target_offset + * @next_offset: the arp/ip/ip6_t->next_offset + * + * validates that target_offset and next_offset are sane. + * + * The arp/ip/ip6t_entry structure @base must have passed following tests: + * - it must point to a valid memory location + * - base to base + next_offset must be accessible, i.e. not exceed allocated + * length. + * + * Return: 0 on success, negative errno on failure. + */ +int xt_check_entry_offsets(const void *base, + unsigned int target_offset, + unsigned int next_offset) +{ + const struct xt_entry_target *t; + const char *e = base; + + if (target_offset + sizeof(*t) > next_offset) + return -EINVAL; + + t = (void *)(e + target_offset); + if (target_offset + t->u.target_size > next_offset) + return -EINVAL; + + return 0; +} +EXPORT_SYMBOL(xt_check_entry_offsets); + int xt_check_target(struct xt_tgchk_param *par, unsigned int size, u_int8_t proto, bool inv_proto) { From aa412ba225dd3bc36d404c28cdc3d674850d80d0 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Fri, 1 Apr 2016 14:17:24 +0200 Subject: [PATCH 0582/1649] netfilter: x_tables: kill check_entry helper Once we add more sanity testing to xt_check_entry_offsets it becomes relvant if we're expecting a 32bit 'config_compat' blob or a normal one. Since we already have a lot of similar-named functions (check_entry, compat_check_entry, find_and_check_entry, etc.) and the current incarnation is short just fold its contents into the callers. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- net/ipv4/netfilter/arp_tables.c | 19 ++++++++----------- net/ipv4/netfilter/ip_tables.c | 20 ++++++++------------ net/ipv6/netfilter/ip6_tables.c | 20 ++++++++------------ 3 files changed, 24 insertions(+), 35 deletions(-) diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index 74668c1d3243..24ad92a60b7a 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c @@ -494,14 +494,6 @@ next: return 1; } -static inline int check_entry(const struct arpt_entry *e) -{ - if (!arp_checkentry(&e->arp)) - return -EINVAL; - - return xt_check_entry_offsets(e, e->target_offset, e->next_offset); -} - static inline int check_target(struct arpt_entry *e, const char *name) { struct xt_entry_target *t = arpt_get_target(e); @@ -597,7 +589,10 @@ static inline int check_entry_size_and_hooks(struct arpt_entry *e, return -EINVAL; } - err = check_entry(e); + if (!arp_checkentry(&e->arp)) + return -EINVAL; + + err = xt_check_entry_offsets(e, e->target_offset, e->next_offset); if (err) return err; @@ -1256,8 +1251,10 @@ check_compat_entry_size_and_hooks(struct compat_arpt_entry *e, return -EINVAL; } - /* For purposes of check_entry casting the compat entry is fine */ - ret = check_entry((struct arpt_entry *)e); + if (!arp_checkentry(&e->arp)) + return -EINVAL; + + ret = xt_check_entry_offsets(e, e->target_offset, e->next_offset); if (ret) return ret; diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index 71c204d4ca5f..cdf18502a047 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c @@ -587,15 +587,6 @@ static void cleanup_match(struct xt_entry_match *m, struct net *net) module_put(par.match->me); } -static int -check_entry(const struct ipt_entry *e) -{ - if (!ip_checkentry(&e->ip)) - return -EINVAL; - - return xt_check_entry_offsets(e, e->target_offset, e->next_offset); -} - static int check_match(struct xt_entry_match *m, struct xt_mtchk_param *par) { @@ -760,7 +751,10 @@ check_entry_size_and_hooks(struct ipt_entry *e, return -EINVAL; } - err = check_entry(e); + if (!ip_checkentry(&e->ip)) + return -EINVAL; + + err = xt_check_entry_offsets(e, e->target_offset, e->next_offset); if (err) return err; @@ -1516,8 +1510,10 @@ check_compat_entry_size_and_hooks(struct compat_ipt_entry *e, return -EINVAL; } - /* For purposes of check_entry casting the compat entry is fine */ - ret = check_entry((struct ipt_entry *)e); + if (!ip_checkentry(&e->ip)) + return -EINVAL; + + ret = xt_check_entry_offsets(e, e->target_offset, e->next_offset); if (ret) return ret; diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index 24ae7f458b3b..e3783116ed48 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c @@ -599,15 +599,6 @@ static void cleanup_match(struct xt_entry_match *m, struct net *net) module_put(par.match->me); } -static int -check_entry(const struct ip6t_entry *e) -{ - if (!ip6_checkentry(&e->ipv6)) - return -EINVAL; - - return xt_check_entry_offsets(e, e->target_offset, e->next_offset); -} - static int check_match(struct xt_entry_match *m, struct xt_mtchk_param *par) { const struct ip6t_ip6 *ipv6 = par->entryinfo; @@ -772,7 +763,10 @@ check_entry_size_and_hooks(struct ip6t_entry *e, return -EINVAL; } - err = check_entry(e); + if (!ip6_checkentry(&e->ipv6)) + return -EINVAL; + + err = xt_check_entry_offsets(e, e->target_offset, e->next_offset); if (err) return err; @@ -1528,8 +1522,10 @@ check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e, return -EINVAL; } - /* For purposes of check_entry casting the compat entry is fine */ - ret = check_entry((struct ip6t_entry *)e); + if (!ip6_checkentry(&e->ipv6)) + return -EINVAL; + + ret = xt_check_entry_offsets(e, e->target_offset, e->next_offset); if (ret) return ret; From a08e4e190b866579896c09af59b3bdca821da2cd Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Fri, 1 Apr 2016 14:17:25 +0200 Subject: [PATCH 0583/1649] netfilter: x_tables: assert minimum target size The target size includes the size of the xt_entry_target struct. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- net/netfilter/x_tables.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c index 1f44bfa8dd94..ec1b7183fff9 100644 --- a/net/netfilter/x_tables.c +++ b/net/netfilter/x_tables.c @@ -568,6 +568,9 @@ int xt_check_entry_offsets(const void *base, return -EINVAL; t = (void *)(e + target_offset); + if (t->u.target_size < sizeof(*t)) + return -EINVAL; + if (target_offset + t->u.target_size > next_offset) return -EINVAL; From fc1221b3a163d1386d1052184202d5dc50d302d1 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Fri, 1 Apr 2016 14:17:26 +0200 Subject: [PATCH 0584/1649] netfilter: x_tables: add compat version of xt_check_entry_offsets 32bit rulesets have different layout and alignment requirements, so once more integrity checks get added to xt_check_entry_offsets it will reject well-formed 32bit rulesets. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- include/linux/netfilter/x_tables.h | 3 +++ net/ipv4/netfilter/arp_tables.c | 3 ++- net/ipv4/netfilter/ip_tables.c | 3 ++- net/ipv6/netfilter/ip6_tables.c | 3 ++- net/netfilter/x_tables.c | 22 ++++++++++++++++++++++ 5 files changed, 31 insertions(+), 3 deletions(-) diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h index 0de0862897a4..08de48bbe92e 100644 --- a/include/linux/netfilter/x_tables.h +++ b/include/linux/netfilter/x_tables.h @@ -494,6 +494,9 @@ void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr, unsigned int *size); int xt_compat_target_to_user(const struct xt_entry_target *t, void __user **dstptr, unsigned int *size); +int xt_compat_check_entry_offsets(const void *base, + unsigned int target_offset, + unsigned int next_offset); #endif /* CONFIG_COMPAT */ #endif /* _X_TABLES_H */ diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index 24ad92a60b7a..ab8952a49bfa 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c @@ -1254,7 +1254,8 @@ check_compat_entry_size_and_hooks(struct compat_arpt_entry *e, if (!arp_checkentry(&e->arp)) return -EINVAL; - ret = xt_check_entry_offsets(e, e->target_offset, e->next_offset); + ret = xt_compat_check_entry_offsets(e, e->target_offset, + e->next_offset); if (ret) return ret; diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index cdf18502a047..7d24c872723f 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c @@ -1513,7 +1513,8 @@ check_compat_entry_size_and_hooks(struct compat_ipt_entry *e, if (!ip_checkentry(&e->ip)) return -EINVAL; - ret = xt_check_entry_offsets(e, e->target_offset, e->next_offset); + ret = xt_compat_check_entry_offsets(e, + e->target_offset, e->next_offset); if (ret) return ret; diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index e3783116ed48..73eee7b5fd60 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c @@ -1525,7 +1525,8 @@ check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e, if (!ip6_checkentry(&e->ipv6)) return -EINVAL; - ret = xt_check_entry_offsets(e, e->target_offset, e->next_offset); + ret = xt_compat_check_entry_offsets(e, + e->target_offset, e->next_offset); if (ret) return ret; diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c index ec1b7183fff9..fa206ceb269f 100644 --- a/net/netfilter/x_tables.c +++ b/net/netfilter/x_tables.c @@ -539,6 +539,27 @@ int xt_compat_match_to_user(const struct xt_entry_match *m, return 0; } EXPORT_SYMBOL_GPL(xt_compat_match_to_user); + +int xt_compat_check_entry_offsets(const void *base, + unsigned int target_offset, + unsigned int next_offset) +{ + const struct compat_xt_entry_target *t; + const char *e = base; + + if (target_offset + sizeof(*t) > next_offset) + return -EINVAL; + + t = (void *)(e + target_offset); + if (t->u.target_size < sizeof(*t)) + return -EINVAL; + + if (target_offset + t->u.target_size > next_offset) + return -EINVAL; + + return 0; +} +EXPORT_SYMBOL(xt_compat_check_entry_offsets); #endif /* CONFIG_COMPAT */ /** @@ -549,6 +570,7 @@ EXPORT_SYMBOL_GPL(xt_compat_match_to_user); * @next_offset: the arp/ip/ip6_t->next_offset * * validates that target_offset and next_offset are sane. + * Also see xt_compat_check_entry_offsets for CONFIG_COMPAT version. * * The arp/ip/ip6t_entry structure @base must have passed following tests: * - it must point to a valid memory location From 7ed2abddd20cf8f6bd27f65bd218f26fa5bf7f44 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Fri, 1 Apr 2016 14:17:27 +0200 Subject: [PATCH 0585/1649] netfilter: x_tables: check standard target size too We have targets and standard targets -- the latter carries a verdict. The ip/ip6tables validation functions will access t->verdict for the standard targets to fetch the jump offset or verdict for chainloop detection, but this happens before the targets get checked/validated. Thus we also need to check for verdict presence here, else t->verdict can point right after a blob. Spotted with UBSAN while testing malformed blobs. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- net/netfilter/x_tables.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c index fa206ceb269f..1cb7a271c024 100644 --- a/net/netfilter/x_tables.c +++ b/net/netfilter/x_tables.c @@ -540,6 +540,13 @@ int xt_compat_match_to_user(const struct xt_entry_match *m, } EXPORT_SYMBOL_GPL(xt_compat_match_to_user); +/* non-compat version may have padding after verdict */ +struct compat_xt_standard_target { + struct compat_xt_entry_target t; + compat_uint_t verdict; +}; + +/* see xt_check_entry_offsets */ int xt_compat_check_entry_offsets(const void *base, unsigned int target_offset, unsigned int next_offset) @@ -557,6 +564,10 @@ int xt_compat_check_entry_offsets(const void *base, if (target_offset + t->u.target_size > next_offset) return -EINVAL; + if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0 && + target_offset + sizeof(struct compat_xt_standard_target) != next_offset) + return -EINVAL; + return 0; } EXPORT_SYMBOL(xt_compat_check_entry_offsets); @@ -596,6 +607,10 @@ int xt_check_entry_offsets(const void *base, if (target_offset + t->u.target_size > next_offset) return -EINVAL; + if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0 && + target_offset + sizeof(struct xt_standard_target) != next_offset) + return -EINVAL; + return 0; } EXPORT_SYMBOL(xt_check_entry_offsets); From ce683e5f9d045e5d67d1312a42b359cb2ab2a13c Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Fri, 1 Apr 2016 14:17:28 +0200 Subject: [PATCH 0586/1649] netfilter: x_tables: check for bogus target offset We're currently asserting that targetoff + targetsize <= nextoff. Extend it to also check that targetoff is >= sizeof(xt_entry). Since this is generic code, add an argument pointing to the start of the match/target, we can then derive the base structure size from the delta. We also need the e->elems pointer in a followup change to validate matches. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- include/linux/netfilter/x_tables.h | 4 ++-- net/ipv4/netfilter/arp_tables.c | 5 +++-- net/ipv4/netfilter/ip_tables.c | 5 +++-- net/ipv6/netfilter/ip6_tables.c | 5 +++-- net/netfilter/x_tables.c | 17 +++++++++++++++-- 5 files changed, 26 insertions(+), 10 deletions(-) diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h index 08de48bbe92e..30cfb1e943fb 100644 --- a/include/linux/netfilter/x_tables.h +++ b/include/linux/netfilter/x_tables.h @@ -242,7 +242,7 @@ void xt_unregister_match(struct xt_match *target); int xt_register_matches(struct xt_match *match, unsigned int n); void xt_unregister_matches(struct xt_match *match, unsigned int n); -int xt_check_entry_offsets(const void *base, +int xt_check_entry_offsets(const void *base, const char *elems, unsigned int target_offset, unsigned int next_offset); @@ -494,7 +494,7 @@ void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr, unsigned int *size); int xt_compat_target_to_user(const struct xt_entry_target *t, void __user **dstptr, unsigned int *size); -int xt_compat_check_entry_offsets(const void *base, +int xt_compat_check_entry_offsets(const void *base, const char *elems, unsigned int target_offset, unsigned int next_offset); diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index ab8952a49bfa..95ed4e454c60 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c @@ -592,7 +592,8 @@ static inline int check_entry_size_and_hooks(struct arpt_entry *e, if (!arp_checkentry(&e->arp)) return -EINVAL; - err = xt_check_entry_offsets(e, e->target_offset, e->next_offset); + err = xt_check_entry_offsets(e, e->elems, e->target_offset, + e->next_offset); if (err) return err; @@ -1254,7 +1255,7 @@ check_compat_entry_size_and_hooks(struct compat_arpt_entry *e, if (!arp_checkentry(&e->arp)) return -EINVAL; - ret = xt_compat_check_entry_offsets(e, e->target_offset, + ret = xt_compat_check_entry_offsets(e, e->elems, e->target_offset, e->next_offset); if (ret) return ret; diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index 7d24c872723f..baab033d74e0 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c @@ -754,7 +754,8 @@ check_entry_size_and_hooks(struct ipt_entry *e, if (!ip_checkentry(&e->ip)) return -EINVAL; - err = xt_check_entry_offsets(e, e->target_offset, e->next_offset); + err = xt_check_entry_offsets(e, e->elems, e->target_offset, + e->next_offset); if (err) return err; @@ -1513,7 +1514,7 @@ check_compat_entry_size_and_hooks(struct compat_ipt_entry *e, if (!ip_checkentry(&e->ip)) return -EINVAL; - ret = xt_compat_check_entry_offsets(e, + ret = xt_compat_check_entry_offsets(e, e->elems, e->target_offset, e->next_offset); if (ret) return ret; diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index 73eee7b5fd60..6957627c7931 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c @@ -766,7 +766,8 @@ check_entry_size_and_hooks(struct ip6t_entry *e, if (!ip6_checkentry(&e->ipv6)) return -EINVAL; - err = xt_check_entry_offsets(e, e->target_offset, e->next_offset); + err = xt_check_entry_offsets(e, e->elems, e->target_offset, + e->next_offset); if (err) return err; @@ -1525,7 +1526,7 @@ check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e, if (!ip6_checkentry(&e->ipv6)) return -EINVAL; - ret = xt_compat_check_entry_offsets(e, + ret = xt_compat_check_entry_offsets(e, e->elems, e->target_offset, e->next_offset); if (ret) return ret; diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c index 1cb7a271c024..e2a6f2a9051b 100644 --- a/net/netfilter/x_tables.c +++ b/net/netfilter/x_tables.c @@ -546,14 +546,17 @@ struct compat_xt_standard_target { compat_uint_t verdict; }; -/* see xt_check_entry_offsets */ -int xt_compat_check_entry_offsets(const void *base, +int xt_compat_check_entry_offsets(const void *base, const char *elems, unsigned int target_offset, unsigned int next_offset) { + long size_of_base_struct = elems - (const char *)base; const struct compat_xt_entry_target *t; const char *e = base; + if (target_offset < size_of_base_struct) + return -EINVAL; + if (target_offset + sizeof(*t) > next_offset) return -EINVAL; @@ -577,12 +580,16 @@ EXPORT_SYMBOL(xt_compat_check_entry_offsets); * xt_check_entry_offsets - validate arp/ip/ip6t_entry * * @base: pointer to arp/ip/ip6t_entry + * @elems: pointer to first xt_entry_match, i.e. ip(6)t_entry->elems * @target_offset: the arp/ip/ip6_t->target_offset * @next_offset: the arp/ip/ip6_t->next_offset * * validates that target_offset and next_offset are sane. * Also see xt_compat_check_entry_offsets for CONFIG_COMPAT version. * + * This function does not validate the targets or matches themselves, it + * only tests that all the offsets and sizes are correct. + * * The arp/ip/ip6t_entry structure @base must have passed following tests: * - it must point to a valid memory location * - base to base + next_offset must be accessible, i.e. not exceed allocated @@ -591,12 +598,18 @@ EXPORT_SYMBOL(xt_compat_check_entry_offsets); * Return: 0 on success, negative errno on failure. */ int xt_check_entry_offsets(const void *base, + const char *elems, unsigned int target_offset, unsigned int next_offset) { + long size_of_base_struct = elems - (const char *)base; const struct xt_entry_target *t; const char *e = base; + /* target start is within the ip/ip6/arpt_entry struct */ + if (target_offset < size_of_base_struct) + return -EINVAL; + if (target_offset + sizeof(*t) > next_offset) return -EINVAL; From 13631bfc604161a9d69cd68991dff8603edd66f9 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Fri, 1 Apr 2016 14:17:29 +0200 Subject: [PATCH 0587/1649] netfilter: x_tables: validate all offsets and sizes in a rule Validate that all matches (if any) add up to the beginning of the target and that each match covers at least the base structure size. The compat path should be able to safely re-use the function as the structures only differ in alignment; added a BUILD_BUG_ON just in case we have an arch that adds padding as well. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- net/netfilter/x_tables.c | 81 +++++++++++++++++++++++++++++++++++++--- 1 file changed, 76 insertions(+), 5 deletions(-) diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c index e2a6f2a9051b..f9aa9715c32e 100644 --- a/net/netfilter/x_tables.c +++ b/net/netfilter/x_tables.c @@ -416,6 +416,47 @@ int xt_check_match(struct xt_mtchk_param *par, } EXPORT_SYMBOL_GPL(xt_check_match); +/** xt_check_entry_match - check that matches end before start of target + * + * @match: beginning of xt_entry_match + * @target: beginning of this rules target (alleged end of matches) + * @alignment: alignment requirement of match structures + * + * Validates that all matches add up to the beginning of the target, + * and that each match covers at least the base structure size. + * + * Return: 0 on success, negative errno on failure. + */ +static int xt_check_entry_match(const char *match, const char *target, + const size_t alignment) +{ + const struct xt_entry_match *pos; + int length = target - match; + + if (length == 0) /* no matches */ + return 0; + + pos = (struct xt_entry_match *)match; + do { + if ((unsigned long)pos % alignment) + return -EINVAL; + + if (length < (int)sizeof(struct xt_entry_match)) + return -EINVAL; + + if (pos->u.match_size < sizeof(struct xt_entry_match)) + return -EINVAL; + + if (pos->u.match_size > length) + return -EINVAL; + + length -= pos->u.match_size; + pos = ((void *)((char *)(pos) + (pos)->u.match_size)); + } while (length > 0); + + return 0; +} + #ifdef CONFIG_COMPAT int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta) { @@ -571,7 +612,14 @@ int xt_compat_check_entry_offsets(const void *base, const char *elems, target_offset + sizeof(struct compat_xt_standard_target) != next_offset) return -EINVAL; - return 0; + /* compat_xt_entry match has less strict aligment requirements, + * otherwise they are identical. In case of padding differences + * we need to add compat version of xt_check_entry_match. + */ + BUILD_BUG_ON(sizeof(struct compat_xt_entry_match) != sizeof(struct xt_entry_match)); + + return xt_check_entry_match(elems, base + target_offset, + __alignof__(struct compat_xt_entry_match)); } EXPORT_SYMBOL(xt_compat_check_entry_offsets); #endif /* CONFIG_COMPAT */ @@ -584,17 +632,39 @@ EXPORT_SYMBOL(xt_compat_check_entry_offsets); * @target_offset: the arp/ip/ip6_t->target_offset * @next_offset: the arp/ip/ip6_t->next_offset * - * validates that target_offset and next_offset are sane. - * Also see xt_compat_check_entry_offsets for CONFIG_COMPAT version. + * validates that target_offset and next_offset are sane and that all + * match sizes (if any) align with the target offset. * * This function does not validate the targets or matches themselves, it - * only tests that all the offsets and sizes are correct. + * only tests that all the offsets and sizes are correct, that all + * match structures are aligned, and that the last structure ends where + * the target structure begins. + * + * Also see xt_compat_check_entry_offsets for CONFIG_COMPAT version. * * The arp/ip/ip6t_entry structure @base must have passed following tests: * - it must point to a valid memory location * - base to base + next_offset must be accessible, i.e. not exceed allocated * length. * + * A well-formed entry looks like this: + * + * ip(6)t_entry match [mtdata] match [mtdata] target [tgdata] ip(6)t_entry + * e->elems[]-----' | | + * matchsize | | + * matchsize | | + * | | + * target_offset---------------------------------' | + * next_offset---------------------------------------------------' + * + * elems[]: flexible array member at end of ip(6)/arpt_entry struct. + * This is where matches (if any) and the target reside. + * target_offset: beginning of target. + * next_offset: start of the next rule; also: size of this rule. + * Since targets have a minimum size, target_offset + minlen <= next_offset. + * + * Every match stores its size, sum of sizes must not exceed target_offset. + * * Return: 0 on success, negative errno on failure. */ int xt_check_entry_offsets(const void *base, @@ -624,7 +694,8 @@ int xt_check_entry_offsets(const void *base, target_offset + sizeof(struct xt_standard_target) != next_offset) return -EINVAL; - return 0; + return xt_check_entry_match(elems, base + target_offset, + __alignof__(struct xt_entry_match)); } EXPORT_SYMBOL(xt_check_entry_offsets); From 7d3f843eed29222254c9feab481f55175a1afcc9 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Fri, 1 Apr 2016 14:17:30 +0200 Subject: [PATCH 0588/1649] netfilter: ip_tables: simplify translate_compat_table args Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- net/ipv4/netfilter/ip_tables.c | 59 ++++++++++++++-------------------- 1 file changed, 24 insertions(+), 35 deletions(-) diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index baab033d74e0..d70418604503 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c @@ -1449,7 +1449,6 @@ compat_copy_entry_to_user(struct ipt_entry *e, void __user **dstptr, static int compat_find_calc_match(struct xt_entry_match *m, - const char *name, const struct ipt_ip *ip, int *size) { @@ -1486,8 +1485,7 @@ check_compat_entry_size_and_hooks(struct compat_ipt_entry *e, const unsigned char *base, const unsigned char *limit, const unsigned int *hook_entries, - const unsigned int *underflows, - const char *name) + const unsigned int *underflows) { struct xt_entry_match *ematch; struct xt_entry_target *t; @@ -1523,7 +1521,7 @@ check_compat_entry_size_and_hooks(struct compat_ipt_entry *e, entry_offset = (void *)e - (void *)base; j = 0; xt_ematch_foreach(ematch, e) { - ret = compat_find_calc_match(ematch, name, &e->ip, &off); + ret = compat_find_calc_match(ematch, &e->ip, &off); if (ret != 0) goto release_matches; ++j; @@ -1572,7 +1570,7 @@ release_matches: static int compat_copy_entry_from_user(struct compat_ipt_entry *e, void **dstptr, - unsigned int *size, const char *name, + unsigned int *size, struct xt_table_info *newinfo, unsigned char *base) { struct xt_entry_target *t; @@ -1655,14 +1653,9 @@ compat_check_entry(struct ipt_entry *e, struct net *net, const char *name) static int translate_compat_table(struct net *net, - const char *name, - unsigned int valid_hooks, struct xt_table_info **pinfo, void **pentry0, - unsigned int total_size, - unsigned int number, - unsigned int *hook_entries, - unsigned int *underflows) + const struct compat_ipt_replace *compatr) { unsigned int i, j; struct xt_table_info *newinfo, *info; @@ -1674,8 +1667,8 @@ translate_compat_table(struct net *net, info = *pinfo; entry0 = *pentry0; - size = total_size; - info->number = number; + size = compatr->size; + info->number = compatr->num_entries; /* Init all hooks to impossible value. */ for (i = 0; i < NF_INET_NUMHOOKS; i++) { @@ -1686,40 +1679,39 @@ translate_compat_table(struct net *net, duprintf("translate_compat_table: size %u\n", info->size); j = 0; xt_compat_lock(AF_INET); - xt_compat_init_offsets(AF_INET, number); + xt_compat_init_offsets(AF_INET, compatr->num_entries); /* Walk through entries, checking offsets. */ - xt_entry_foreach(iter0, entry0, total_size) { + xt_entry_foreach(iter0, entry0, compatr->size) { ret = check_compat_entry_size_and_hooks(iter0, info, &size, entry0, - entry0 + total_size, - hook_entries, - underflows, - name); + entry0 + compatr->size, + compatr->hook_entry, + compatr->underflow); if (ret != 0) goto out_unlock; ++j; } ret = -EINVAL; - if (j != number) { + if (j != compatr->num_entries) { duprintf("translate_compat_table: %u not %u entries\n", - j, number); + j, compatr->num_entries); goto out_unlock; } /* Check hooks all assigned */ for (i = 0; i < NF_INET_NUMHOOKS; i++) { /* Only hooks which are valid */ - if (!(valid_hooks & (1 << i))) + if (!(compatr->valid_hooks & (1 << i))) continue; if (info->hook_entry[i] == 0xFFFFFFFF) { duprintf("Invalid hook entry %u %u\n", - i, hook_entries[i]); + i, info->hook_entry[i]); goto out_unlock; } if (info->underflow[i] == 0xFFFFFFFF) { duprintf("Invalid underflow %u %u\n", - i, underflows[i]); + i, info->underflow[i]); goto out_unlock; } } @@ -1729,17 +1721,17 @@ translate_compat_table(struct net *net, if (!newinfo) goto out_unlock; - newinfo->number = number; + newinfo->number = compatr->num_entries; for (i = 0; i < NF_INET_NUMHOOKS; i++) { newinfo->hook_entry[i] = info->hook_entry[i]; newinfo->underflow[i] = info->underflow[i]; } entry1 = newinfo->entries; pos = entry1; - size = total_size; - xt_entry_foreach(iter0, entry0, total_size) { + size = compatr->size; + xt_entry_foreach(iter0, entry0, compatr->size) { ret = compat_copy_entry_from_user(iter0, &pos, &size, - name, newinfo, entry1); + newinfo, entry1); if (ret != 0) break; } @@ -1749,12 +1741,12 @@ translate_compat_table(struct net *net, goto free_newinfo; ret = -ELOOP; - if (!mark_source_chains(newinfo, valid_hooks, entry1)) + if (!mark_source_chains(newinfo, compatr->valid_hooks, entry1)) goto free_newinfo; i = 0; xt_entry_foreach(iter1, entry1, newinfo->size) { - ret = compat_check_entry(iter1, net, name); + ret = compat_check_entry(iter1, net, compatr->name); if (ret != 0) break; ++i; @@ -1794,7 +1786,7 @@ translate_compat_table(struct net *net, free_newinfo: xt_free_table_info(newinfo); out: - xt_entry_foreach(iter0, entry0, total_size) { + xt_entry_foreach(iter0, entry0, compatr->size) { if (j-- == 0) break; compat_release_entry(iter0); @@ -1839,10 +1831,7 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len) goto free_newinfo; } - ret = translate_compat_table(net, tmp.name, tmp.valid_hooks, - &newinfo, &loc_cpu_entry, tmp.size, - tmp.num_entries, tmp.hook_entry, - tmp.underflow); + ret = translate_compat_table(net, &newinfo, &loc_cpu_entry, &tmp); if (ret != 0) goto free_newinfo; From 329a0807124f12fe1c8032f95d8a8eb47047fb0e Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Fri, 1 Apr 2016 14:17:31 +0200 Subject: [PATCH 0589/1649] netfilter: ip6_tables: simplify translate_compat_table args Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- net/ipv6/netfilter/ip6_tables.c | 59 ++++++++++++++------------------- 1 file changed, 24 insertions(+), 35 deletions(-) diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index 6957627c7931..8d082c557771 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c @@ -1461,7 +1461,6 @@ compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr, static int compat_find_calc_match(struct xt_entry_match *m, - const char *name, const struct ip6t_ip6 *ipv6, int *size) { @@ -1498,8 +1497,7 @@ check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e, const unsigned char *base, const unsigned char *limit, const unsigned int *hook_entries, - const unsigned int *underflows, - const char *name) + const unsigned int *underflows) { struct xt_entry_match *ematch; struct xt_entry_target *t; @@ -1535,7 +1533,7 @@ check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e, entry_offset = (void *)e - (void *)base; j = 0; xt_ematch_foreach(ematch, e) { - ret = compat_find_calc_match(ematch, name, &e->ipv6, &off); + ret = compat_find_calc_match(ematch, &e->ipv6, &off); if (ret != 0) goto release_matches; ++j; @@ -1584,7 +1582,7 @@ release_matches: static int compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr, - unsigned int *size, const char *name, + unsigned int *size, struct xt_table_info *newinfo, unsigned char *base) { struct xt_entry_target *t; @@ -1664,14 +1662,9 @@ static int compat_check_entry(struct ip6t_entry *e, struct net *net, static int translate_compat_table(struct net *net, - const char *name, - unsigned int valid_hooks, struct xt_table_info **pinfo, void **pentry0, - unsigned int total_size, - unsigned int number, - unsigned int *hook_entries, - unsigned int *underflows) + const struct compat_ip6t_replace *compatr) { unsigned int i, j; struct xt_table_info *newinfo, *info; @@ -1683,8 +1676,8 @@ translate_compat_table(struct net *net, info = *pinfo; entry0 = *pentry0; - size = total_size; - info->number = number; + size = compatr->size; + info->number = compatr->num_entries; /* Init all hooks to impossible value. */ for (i = 0; i < NF_INET_NUMHOOKS; i++) { @@ -1695,40 +1688,39 @@ translate_compat_table(struct net *net, duprintf("translate_compat_table: size %u\n", info->size); j = 0; xt_compat_lock(AF_INET6); - xt_compat_init_offsets(AF_INET6, number); + xt_compat_init_offsets(AF_INET6, compatr->num_entries); /* Walk through entries, checking offsets. */ - xt_entry_foreach(iter0, entry0, total_size) { + xt_entry_foreach(iter0, entry0, compatr->size) { ret = check_compat_entry_size_and_hooks(iter0, info, &size, entry0, - entry0 + total_size, - hook_entries, - underflows, - name); + entry0 + compatr->size, + compatr->hook_entry, + compatr->underflow); if (ret != 0) goto out_unlock; ++j; } ret = -EINVAL; - if (j != number) { + if (j != compatr->num_entries) { duprintf("translate_compat_table: %u not %u entries\n", - j, number); + j, compatr->num_entries); goto out_unlock; } /* Check hooks all assigned */ for (i = 0; i < NF_INET_NUMHOOKS; i++) { /* Only hooks which are valid */ - if (!(valid_hooks & (1 << i))) + if (!(compatr->valid_hooks & (1 << i))) continue; if (info->hook_entry[i] == 0xFFFFFFFF) { duprintf("Invalid hook entry %u %u\n", - i, hook_entries[i]); + i, info->hook_entry[i]); goto out_unlock; } if (info->underflow[i] == 0xFFFFFFFF) { duprintf("Invalid underflow %u %u\n", - i, underflows[i]); + i, info->underflow[i]); goto out_unlock; } } @@ -1738,17 +1730,17 @@ translate_compat_table(struct net *net, if (!newinfo) goto out_unlock; - newinfo->number = number; + newinfo->number = compatr->num_entries; for (i = 0; i < NF_INET_NUMHOOKS; i++) { newinfo->hook_entry[i] = info->hook_entry[i]; newinfo->underflow[i] = info->underflow[i]; } entry1 = newinfo->entries; pos = entry1; - size = total_size; - xt_entry_foreach(iter0, entry0, total_size) { + size = compatr->size; + xt_entry_foreach(iter0, entry0, compatr->size) { ret = compat_copy_entry_from_user(iter0, &pos, &size, - name, newinfo, entry1); + newinfo, entry1); if (ret != 0) break; } @@ -1758,12 +1750,12 @@ translate_compat_table(struct net *net, goto free_newinfo; ret = -ELOOP; - if (!mark_source_chains(newinfo, valid_hooks, entry1)) + if (!mark_source_chains(newinfo, compatr->valid_hooks, entry1)) goto free_newinfo; i = 0; xt_entry_foreach(iter1, entry1, newinfo->size) { - ret = compat_check_entry(iter1, net, name); + ret = compat_check_entry(iter1, net, compatr->name); if (ret != 0) break; ++i; @@ -1803,7 +1795,7 @@ translate_compat_table(struct net *net, free_newinfo: xt_free_table_info(newinfo); out: - xt_entry_foreach(iter0, entry0, total_size) { + xt_entry_foreach(iter0, entry0, compatr->size) { if (j-- == 0) break; compat_release_entry(iter0); @@ -1848,10 +1840,7 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len) goto free_newinfo; } - ret = translate_compat_table(net, tmp.name, tmp.valid_hooks, - &newinfo, &loc_cpu_entry, tmp.size, - tmp.num_entries, tmp.hook_entry, - tmp.underflow); + ret = translate_compat_table(net, &newinfo, &loc_cpu_entry, &tmp); if (ret != 0) goto free_newinfo; From 8dddd32756f6fe8e4e82a63361119b7e2384e02f Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Fri, 1 Apr 2016 14:17:32 +0200 Subject: [PATCH 0590/1649] netfilter: arp_tables: simplify translate_compat_table args Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- net/ipv4/netfilter/arp_tables.c | 82 +++++++++++++++------------------ 1 file changed, 36 insertions(+), 46 deletions(-) diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index 95ed4e454c60..1d1386dc159b 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c @@ -1214,6 +1214,18 @@ static int do_add_counters(struct net *net, const void __user *user, } #ifdef CONFIG_COMPAT +struct compat_arpt_replace { + char name[XT_TABLE_MAXNAMELEN]; + u32 valid_hooks; + u32 num_entries; + u32 size; + u32 hook_entry[NF_ARP_NUMHOOKS]; + u32 underflow[NF_ARP_NUMHOOKS]; + u32 num_counters; + compat_uptr_t counters; + struct compat_arpt_entry entries[0]; +}; + static inline void compat_release_entry(struct compat_arpt_entry *e) { struct xt_entry_target *t; @@ -1229,8 +1241,7 @@ check_compat_entry_size_and_hooks(struct compat_arpt_entry *e, const unsigned char *base, const unsigned char *limit, const unsigned int *hook_entries, - const unsigned int *underflows, - const char *name) + const unsigned int *underflows) { struct xt_entry_target *t; struct xt_target *target; @@ -1301,7 +1312,7 @@ out: static int compat_copy_entry_from_user(struct compat_arpt_entry *e, void **dstptr, - unsigned int *size, const char *name, + unsigned int *size, struct xt_table_info *newinfo, unsigned char *base) { struct xt_entry_target *t; @@ -1334,14 +1345,9 @@ compat_copy_entry_from_user(struct compat_arpt_entry *e, void **dstptr, return ret; } -static int translate_compat_table(const char *name, - unsigned int valid_hooks, - struct xt_table_info **pinfo, +static int translate_compat_table(struct xt_table_info **pinfo, void **pentry0, - unsigned int total_size, - unsigned int number, - unsigned int *hook_entries, - unsigned int *underflows) + const struct compat_arpt_replace *compatr) { unsigned int i, j; struct xt_table_info *newinfo, *info; @@ -1353,8 +1359,8 @@ static int translate_compat_table(const char *name, info = *pinfo; entry0 = *pentry0; - size = total_size; - info->number = number; + size = compatr->size; + info->number = compatr->num_entries; /* Init all hooks to impossible value. */ for (i = 0; i < NF_ARP_NUMHOOKS; i++) { @@ -1365,40 +1371,39 @@ static int translate_compat_table(const char *name, duprintf("translate_compat_table: size %u\n", info->size); j = 0; xt_compat_lock(NFPROTO_ARP); - xt_compat_init_offsets(NFPROTO_ARP, number); + xt_compat_init_offsets(NFPROTO_ARP, compatr->num_entries); /* Walk through entries, checking offsets. */ - xt_entry_foreach(iter0, entry0, total_size) { + xt_entry_foreach(iter0, entry0, compatr->size) { ret = check_compat_entry_size_and_hooks(iter0, info, &size, entry0, - entry0 + total_size, - hook_entries, - underflows, - name); + entry0 + compatr->size, + compatr->hook_entry, + compatr->underflow); if (ret != 0) goto out_unlock; ++j; } ret = -EINVAL; - if (j != number) { + if (j != compatr->num_entries) { duprintf("translate_compat_table: %u not %u entries\n", - j, number); + j, compatr->num_entries); goto out_unlock; } /* Check hooks all assigned */ for (i = 0; i < NF_ARP_NUMHOOKS; i++) { /* Only hooks which are valid */ - if (!(valid_hooks & (1 << i))) + if (!(compatr->valid_hooks & (1 << i))) continue; if (info->hook_entry[i] == 0xFFFFFFFF) { duprintf("Invalid hook entry %u %u\n", - i, hook_entries[i]); + i, info->hook_entry[i]); goto out_unlock; } if (info->underflow[i] == 0xFFFFFFFF) { duprintf("Invalid underflow %u %u\n", - i, underflows[i]); + i, info->underflow[i]); goto out_unlock; } } @@ -1408,17 +1413,17 @@ static int translate_compat_table(const char *name, if (!newinfo) goto out_unlock; - newinfo->number = number; + newinfo->number = compatr->num_entries; for (i = 0; i < NF_ARP_NUMHOOKS; i++) { newinfo->hook_entry[i] = info->hook_entry[i]; newinfo->underflow[i] = info->underflow[i]; } entry1 = newinfo->entries; pos = entry1; - size = total_size; - xt_entry_foreach(iter0, entry0, total_size) { + size = compatr->size; + xt_entry_foreach(iter0, entry0, compatr->size) { ret = compat_copy_entry_from_user(iter0, &pos, &size, - name, newinfo, entry1); + newinfo, entry1); if (ret != 0) break; } @@ -1428,7 +1433,7 @@ static int translate_compat_table(const char *name, goto free_newinfo; ret = -ELOOP; - if (!mark_source_chains(newinfo, valid_hooks, entry1)) + if (!mark_source_chains(newinfo, compatr->valid_hooks, entry1)) goto free_newinfo; i = 0; @@ -1439,7 +1444,7 @@ static int translate_compat_table(const char *name, break; } - ret = check_target(iter1, name); + ret = check_target(iter1, compatr->name); if (ret != 0) { xt_percpu_counter_free(iter1->counters.pcnt); break; @@ -1481,7 +1486,7 @@ static int translate_compat_table(const char *name, free_newinfo: xt_free_table_info(newinfo); out: - xt_entry_foreach(iter0, entry0, total_size) { + xt_entry_foreach(iter0, entry0, compatr->size) { if (j-- == 0) break; compat_release_entry(iter0); @@ -1493,18 +1498,6 @@ out_unlock: goto out; } -struct compat_arpt_replace { - char name[XT_TABLE_MAXNAMELEN]; - u32 valid_hooks; - u32 num_entries; - u32 size; - u32 hook_entry[NF_ARP_NUMHOOKS]; - u32 underflow[NF_ARP_NUMHOOKS]; - u32 num_counters; - compat_uptr_t counters; - struct compat_arpt_entry entries[0]; -}; - static int compat_do_replace(struct net *net, void __user *user, unsigned int len) { @@ -1537,10 +1530,7 @@ static int compat_do_replace(struct net *net, void __user *user, goto free_newinfo; } - ret = translate_compat_table(tmp.name, tmp.valid_hooks, - &newinfo, &loc_cpu_entry, tmp.size, - tmp.num_entries, tmp.hook_entry, - tmp.underflow); + ret = translate_compat_table(&newinfo, &loc_cpu_entry, &tmp); if (ret != 0) goto free_newinfo; From 0188346f21e6546498c2a0f84888797ad4063fc5 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Fri, 1 Apr 2016 14:17:33 +0200 Subject: [PATCH 0591/1649] netfilter: x_tables: xt_compat_match_from_user doesn't need a retval Always returned 0. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- include/linux/netfilter/x_tables.h | 2 +- net/ipv4/netfilter/arp_tables.c | 17 +++++------------ net/ipv4/netfilter/ip_tables.c | 26 +++++++++----------------- net/ipv6/netfilter/ip6_tables.c | 27 +++++++++------------------ net/netfilter/x_tables.c | 5 ++--- 5 files changed, 26 insertions(+), 51 deletions(-) diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h index 30cfb1e943fb..e2da9b90f1b8 100644 --- a/include/linux/netfilter/x_tables.h +++ b/include/linux/netfilter/x_tables.h @@ -484,7 +484,7 @@ void xt_compat_init_offsets(u_int8_t af, unsigned int number); int xt_compat_calc_jump(u_int8_t af, unsigned int offset); int xt_compat_match_offset(const struct xt_match *match); -int xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr, +void xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr, unsigned int *size); int xt_compat_match_to_user(const struct xt_entry_match *m, void __user **dstptr, unsigned int *size); diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index 1d1386dc159b..be514c676fad 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c @@ -1310,7 +1310,7 @@ out: return ret; } -static int +static void compat_copy_entry_from_user(struct compat_arpt_entry *e, void **dstptr, unsigned int *size, struct xt_table_info *newinfo, unsigned char *base) @@ -1319,9 +1319,8 @@ compat_copy_entry_from_user(struct compat_arpt_entry *e, void **dstptr, struct xt_target *target; struct arpt_entry *de; unsigned int origsize; - int ret, h; + int h; - ret = 0; origsize = *size; de = (struct arpt_entry *)*dstptr; memcpy(de, e, sizeof(struct arpt_entry)); @@ -1342,7 +1341,6 @@ compat_copy_entry_from_user(struct compat_arpt_entry *e, void **dstptr, if ((unsigned char *)de - base < newinfo->underflow[h]) newinfo->underflow[h] -= origsize - *size; } - return ret; } static int translate_compat_table(struct xt_table_info **pinfo, @@ -1421,16 +1419,11 @@ static int translate_compat_table(struct xt_table_info **pinfo, entry1 = newinfo->entries; pos = entry1; size = compatr->size; - xt_entry_foreach(iter0, entry0, compatr->size) { - ret = compat_copy_entry_from_user(iter0, &pos, &size, - newinfo, entry1); - if (ret != 0) - break; - } + xt_entry_foreach(iter0, entry0, compatr->size) + compat_copy_entry_from_user(iter0, &pos, &size, + newinfo, entry1); xt_compat_flush_offsets(NFPROTO_ARP); xt_compat_unlock(NFPROTO_ARP); - if (ret) - goto free_newinfo; ret = -ELOOP; if (!mark_source_chains(newinfo, compatr->valid_hooks, entry1)) diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index d70418604503..5c20eef980c1 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c @@ -1568,7 +1568,7 @@ release_matches: return ret; } -static int +static void compat_copy_entry_from_user(struct compat_ipt_entry *e, void **dstptr, unsigned int *size, struct xt_table_info *newinfo, unsigned char *base) @@ -1577,10 +1577,9 @@ compat_copy_entry_from_user(struct compat_ipt_entry *e, void **dstptr, struct xt_target *target; struct ipt_entry *de; unsigned int origsize; - int ret, h; + int h; struct xt_entry_match *ematch; - ret = 0; origsize = *size; de = (struct ipt_entry *)*dstptr; memcpy(de, e, sizeof(struct ipt_entry)); @@ -1589,11 +1588,9 @@ compat_copy_entry_from_user(struct compat_ipt_entry *e, void **dstptr, *dstptr += sizeof(struct ipt_entry); *size += sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry); - xt_ematch_foreach(ematch, e) { - ret = xt_compat_match_from_user(ematch, dstptr, size); - if (ret != 0) - return ret; - } + xt_ematch_foreach(ematch, e) + xt_compat_match_from_user(ematch, dstptr, size); + de->target_offset = e->target_offset - (origsize - *size); t = compat_ipt_get_target(e); target = t->u.kernel.target; @@ -1606,7 +1603,6 @@ compat_copy_entry_from_user(struct compat_ipt_entry *e, void **dstptr, if ((unsigned char *)de - base < newinfo->underflow[h]) newinfo->underflow[h] -= origsize - *size; } - return ret; } static int @@ -1729,16 +1725,12 @@ translate_compat_table(struct net *net, entry1 = newinfo->entries; pos = entry1; size = compatr->size; - xt_entry_foreach(iter0, entry0, compatr->size) { - ret = compat_copy_entry_from_user(iter0, &pos, &size, - newinfo, entry1); - if (ret != 0) - break; - } + xt_entry_foreach(iter0, entry0, compatr->size) + compat_copy_entry_from_user(iter0, &pos, &size, + newinfo, entry1); + xt_compat_flush_offsets(AF_INET); xt_compat_unlock(AF_INET); - if (ret) - goto free_newinfo; ret = -ELOOP; if (!mark_source_chains(newinfo, compatr->valid_hooks, entry1)) diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index 8d082c557771..620d54c1c119 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c @@ -1580,7 +1580,7 @@ release_matches: return ret; } -static int +static void compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr, unsigned int *size, struct xt_table_info *newinfo, unsigned char *base) @@ -1588,10 +1588,9 @@ compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr, struct xt_entry_target *t; struct ip6t_entry *de; unsigned int origsize; - int ret, h; + int h; struct xt_entry_match *ematch; - ret = 0; origsize = *size; de = (struct ip6t_entry *)*dstptr; memcpy(de, e, sizeof(struct ip6t_entry)); @@ -1600,11 +1599,9 @@ compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr, *dstptr += sizeof(struct ip6t_entry); *size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry); - xt_ematch_foreach(ematch, e) { - ret = xt_compat_match_from_user(ematch, dstptr, size); - if (ret != 0) - return ret; - } + xt_ematch_foreach(ematch, e) + xt_compat_match_from_user(ematch, dstptr, size); + de->target_offset = e->target_offset - (origsize - *size); t = compat_ip6t_get_target(e); xt_compat_target_from_user(t, dstptr, size); @@ -1616,7 +1613,6 @@ compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr, if ((unsigned char *)de - base < newinfo->underflow[h]) newinfo->underflow[h] -= origsize - *size; } - return ret; } static int compat_check_entry(struct ip6t_entry *e, struct net *net, @@ -1737,17 +1733,12 @@ translate_compat_table(struct net *net, } entry1 = newinfo->entries; pos = entry1; - size = compatr->size; - xt_entry_foreach(iter0, entry0, compatr->size) { - ret = compat_copy_entry_from_user(iter0, &pos, &size, - newinfo, entry1); - if (ret != 0) - break; - } + xt_entry_foreach(iter0, entry0, compatr->size) + compat_copy_entry_from_user(iter0, &pos, &size, + newinfo, entry1); + xt_compat_flush_offsets(AF_INET6); xt_compat_unlock(AF_INET6); - if (ret) - goto free_newinfo; ret = -ELOOP; if (!mark_source_chains(newinfo, compatr->valid_hooks, entry1)) diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c index f9aa9715c32e..7e7173b68344 100644 --- a/net/netfilter/x_tables.c +++ b/net/netfilter/x_tables.c @@ -526,8 +526,8 @@ int xt_compat_match_offset(const struct xt_match *match) } EXPORT_SYMBOL_GPL(xt_compat_match_offset); -int xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr, - unsigned int *size) +void xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr, + unsigned int *size) { const struct xt_match *match = m->u.kernel.match; struct compat_xt_entry_match *cm = (struct compat_xt_entry_match *)m; @@ -549,7 +549,6 @@ int xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr, *size += off; *dstptr += msize; - return 0; } EXPORT_SYMBOL_GPL(xt_compat_match_from_user); From 09d9686047dbbe1cf4faa558d3ecc4aae2046054 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Fri, 1 Apr 2016 14:17:34 +0200 Subject: [PATCH 0592/1649] netfilter: x_tables: do compat validation via translate_table This looks like refactoring, but its also a bug fix. Problem is that the compat path (32bit iptables, 64bit kernel) lacks a few sanity tests that are done in the normal path. For example, we do not check for underflows and the base chain policies. While its possible to also add such checks to the compat path, its more copy&pastry, for instance we cannot reuse check_underflow() helper as e->target_offset differs in the compat case. Other problem is that it makes auditing for validation errors harder; two places need to be checked and kept in sync. At a high level 32 bit compat works like this: 1- initial pass over blob: validate match/entry offsets, bounds checking lookup all matches and targets do bookkeeping wrt. size delta of 32/64bit structures assign match/target.u.kernel pointer (points at kernel implementation, needed to access ->compatsize etc.) 2- allocate memory according to the total bookkeeping size to contain the translated ruleset 3- second pass over original blob: for each entry, copy the 32bit representation to the newly allocated memory. This also does any special match translations (e.g. adjust 32bit to 64bit longs, etc). 4- check if ruleset is free of loops (chase all jumps) 5-first pass over translated blob: call the checkentry function of all matches and targets. The alternative implemented by this patch is to drop steps 3&4 from the compat process, the translation is changed into an intermediate step rather than a full 1:1 translate_table replacement. In the 2nd pass (step #3), change the 64bit ruleset back to a kernel representation, i.e. put() the kernel pointer and restore ->u.user.name . This gets us a 64bit ruleset that is in the format generated by a 64bit iptables userspace -- we can then use translate_table() to get the 'native' sanity checks. This has two drawbacks: 1. we re-validate all the match and target entry structure sizes even though compat translation is supposed to never generate bogus offsets. 2. we put and then re-lookup each match and target. THe upside is that we get all sanity tests and ruleset validations provided by the normal path and can remove some duplicated compat code. iptables-restore time of autogenerated ruleset with 300k chains of form -A CHAIN0001 -m limit --limit 1/s -j CHAIN0002 -A CHAIN0002 -m limit --limit 1/s -j CHAIN0003 shows no noticeable differences in restore times: old: 0m30.796s new: 0m31.521s 64bit: 0m25.674s Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- net/ipv4/netfilter/arp_tables.c | 116 +++++------------------- net/ipv4/netfilter/ip_tables.c | 155 ++++++-------------------------- net/ipv6/netfilter/ip6_tables.c | 148 +++++------------------------- net/netfilter/x_tables.c | 8 ++ 4 files changed, 84 insertions(+), 343 deletions(-) diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index be514c676fad..705179b0fd23 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c @@ -1234,19 +1234,17 @@ static inline void compat_release_entry(struct compat_arpt_entry *e) module_put(t->u.kernel.target->me); } -static inline int +static int check_compat_entry_size_and_hooks(struct compat_arpt_entry *e, struct xt_table_info *newinfo, unsigned int *size, const unsigned char *base, - const unsigned char *limit, - const unsigned int *hook_entries, - const unsigned int *underflows) + const unsigned char *limit) { struct xt_entry_target *t; struct xt_target *target; unsigned int entry_offset; - int ret, off, h; + int ret, off; duprintf("check_compat_entry_size_and_hooks %p\n", e); if ((unsigned long)e % __alignof__(struct compat_arpt_entry) != 0 || @@ -1291,17 +1289,6 @@ check_compat_entry_size_and_hooks(struct compat_arpt_entry *e, if (ret) goto release_target; - /* Check hooks & underflows */ - for (h = 0; h < NF_ARP_NUMHOOKS; h++) { - if ((unsigned char *)e - base == hook_entries[h]) - newinfo->hook_entry[h] = hook_entries[h]; - if ((unsigned char *)e - base == underflows[h]) - newinfo->underflow[h] = underflows[h]; - } - - /* Clear counters and comefrom */ - memset(&e->counters, 0, sizeof(e->counters)); - e->comefrom = 0; return 0; release_target: @@ -1351,7 +1338,7 @@ static int translate_compat_table(struct xt_table_info **pinfo, struct xt_table_info *newinfo, *info; void *pos, *entry0, *entry1; struct compat_arpt_entry *iter0; - struct arpt_entry *iter1; + struct arpt_replace repl; unsigned int size; int ret = 0; @@ -1360,12 +1347,6 @@ static int translate_compat_table(struct xt_table_info **pinfo, size = compatr->size; info->number = compatr->num_entries; - /* Init all hooks to impossible value. */ - for (i = 0; i < NF_ARP_NUMHOOKS; i++) { - info->hook_entry[i] = 0xFFFFFFFF; - info->underflow[i] = 0xFFFFFFFF; - } - duprintf("translate_compat_table: size %u\n", info->size); j = 0; xt_compat_lock(NFPROTO_ARP); @@ -1374,9 +1355,7 @@ static int translate_compat_table(struct xt_table_info **pinfo, xt_entry_foreach(iter0, entry0, compatr->size) { ret = check_compat_entry_size_and_hooks(iter0, info, &size, entry0, - entry0 + compatr->size, - compatr->hook_entry, - compatr->underflow); + entry0 + compatr->size); if (ret != 0) goto out_unlock; ++j; @@ -1389,23 +1368,6 @@ static int translate_compat_table(struct xt_table_info **pinfo, goto out_unlock; } - /* Check hooks all assigned */ - for (i = 0; i < NF_ARP_NUMHOOKS; i++) { - /* Only hooks which are valid */ - if (!(compatr->valid_hooks & (1 << i))) - continue; - if (info->hook_entry[i] == 0xFFFFFFFF) { - duprintf("Invalid hook entry %u %u\n", - i, info->hook_entry[i]); - goto out_unlock; - } - if (info->underflow[i] == 0xFFFFFFFF) { - duprintf("Invalid underflow %u %u\n", - i, info->underflow[i]); - goto out_unlock; - } - } - ret = -ENOMEM; newinfo = xt_alloc_table_info(size); if (!newinfo) @@ -1422,55 +1384,26 @@ static int translate_compat_table(struct xt_table_info **pinfo, xt_entry_foreach(iter0, entry0, compatr->size) compat_copy_entry_from_user(iter0, &pos, &size, newinfo, entry1); + + /* all module references in entry0 are now gone */ + xt_compat_flush_offsets(NFPROTO_ARP); xt_compat_unlock(NFPROTO_ARP); - ret = -ELOOP; - if (!mark_source_chains(newinfo, compatr->valid_hooks, entry1)) + memcpy(&repl, compatr, sizeof(*compatr)); + + for (i = 0; i < NF_ARP_NUMHOOKS; i++) { + repl.hook_entry[i] = newinfo->hook_entry[i]; + repl.underflow[i] = newinfo->underflow[i]; + } + + repl.num_counters = 0; + repl.counters = NULL; + repl.size = newinfo->size; + ret = translate_table(newinfo, entry1, &repl); + if (ret) goto free_newinfo; - i = 0; - xt_entry_foreach(iter1, entry1, newinfo->size) { - iter1->counters.pcnt = xt_percpu_counter_alloc(); - if (IS_ERR_VALUE(iter1->counters.pcnt)) { - ret = -ENOMEM; - break; - } - - ret = check_target(iter1, compatr->name); - if (ret != 0) { - xt_percpu_counter_free(iter1->counters.pcnt); - break; - } - ++i; - if (strcmp(arpt_get_target(iter1)->u.user.name, - XT_ERROR_TARGET) == 0) - ++newinfo->stacksize; - } - if (ret) { - /* - * The first i matches need cleanup_entry (calls ->destroy) - * because they had called ->check already. The other j-i - * entries need only release. - */ - int skip = i; - j -= i; - xt_entry_foreach(iter0, entry0, newinfo->size) { - if (skip-- > 0) - continue; - if (j-- == 0) - break; - compat_release_entry(iter0); - } - xt_entry_foreach(iter1, entry1, newinfo->size) { - if (i-- == 0) - break; - cleanup_entry(iter1); - } - xt_free_table_info(newinfo); - return ret; - } - *pinfo = newinfo; *pentry0 = entry1; xt_free_table_info(info); @@ -1478,17 +1411,16 @@ static int translate_compat_table(struct xt_table_info **pinfo, free_newinfo: xt_free_table_info(newinfo); -out: + return ret; +out_unlock: + xt_compat_flush_offsets(NFPROTO_ARP); + xt_compat_unlock(NFPROTO_ARP); xt_entry_foreach(iter0, entry0, compatr->size) { if (j-- == 0) break; compat_release_entry(iter0); } return ret; -out_unlock: - xt_compat_flush_offsets(NFPROTO_ARP); - xt_compat_unlock(NFPROTO_ARP); - goto out; } static int compat_do_replace(struct net *net, void __user *user, diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index 5c20eef980c1..c26ccd818e8f 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c @@ -1483,16 +1483,14 @@ check_compat_entry_size_and_hooks(struct compat_ipt_entry *e, struct xt_table_info *newinfo, unsigned int *size, const unsigned char *base, - const unsigned char *limit, - const unsigned int *hook_entries, - const unsigned int *underflows) + const unsigned char *limit) { struct xt_entry_match *ematch; struct xt_entry_target *t; struct xt_target *target; unsigned int entry_offset; unsigned int j; - int ret, off, h; + int ret, off; duprintf("check_compat_entry_size_and_hooks %p\n", e); if ((unsigned long)e % __alignof__(struct compat_ipt_entry) != 0 || @@ -1544,17 +1542,6 @@ check_compat_entry_size_and_hooks(struct compat_ipt_entry *e, if (ret) goto out; - /* Check hooks & underflows */ - for (h = 0; h < NF_INET_NUMHOOKS; h++) { - if ((unsigned char *)e - base == hook_entries[h]) - newinfo->hook_entry[h] = hook_entries[h]; - if ((unsigned char *)e - base == underflows[h]) - newinfo->underflow[h] = underflows[h]; - } - - /* Clear counters and comefrom */ - memset(&e->counters, 0, sizeof(e->counters)); - e->comefrom = 0; return 0; out: @@ -1597,6 +1584,7 @@ compat_copy_entry_from_user(struct compat_ipt_entry *e, void **dstptr, xt_compat_target_from_user(t, dstptr, size); de->next_offset = e->next_offset - (origsize - *size); + for (h = 0; h < NF_INET_NUMHOOKS; h++) { if ((unsigned char *)de - base < newinfo->hook_entry[h]) newinfo->hook_entry[h] -= origsize - *size; @@ -1605,48 +1593,6 @@ compat_copy_entry_from_user(struct compat_ipt_entry *e, void **dstptr, } } -static int -compat_check_entry(struct ipt_entry *e, struct net *net, const char *name) -{ - struct xt_entry_match *ematch; - struct xt_mtchk_param mtpar; - unsigned int j; - int ret = 0; - - e->counters.pcnt = xt_percpu_counter_alloc(); - if (IS_ERR_VALUE(e->counters.pcnt)) - return -ENOMEM; - - j = 0; - mtpar.net = net; - mtpar.table = name; - mtpar.entryinfo = &e->ip; - mtpar.hook_mask = e->comefrom; - mtpar.family = NFPROTO_IPV4; - xt_ematch_foreach(ematch, e) { - ret = check_match(ematch, &mtpar); - if (ret != 0) - goto cleanup_matches; - ++j; - } - - ret = check_target(e, net, name); - if (ret) - goto cleanup_matches; - return 0; - - cleanup_matches: - xt_ematch_foreach(ematch, e) { - if (j-- == 0) - break; - cleanup_match(ematch, net); - } - - xt_percpu_counter_free(e->counters.pcnt); - - return ret; -} - static int translate_compat_table(struct net *net, struct xt_table_info **pinfo, @@ -1657,7 +1603,7 @@ translate_compat_table(struct net *net, struct xt_table_info *newinfo, *info; void *pos, *entry0, *entry1; struct compat_ipt_entry *iter0; - struct ipt_entry *iter1; + struct ipt_replace repl; unsigned int size; int ret; @@ -1666,12 +1612,6 @@ translate_compat_table(struct net *net, size = compatr->size; info->number = compatr->num_entries; - /* Init all hooks to impossible value. */ - for (i = 0; i < NF_INET_NUMHOOKS; i++) { - info->hook_entry[i] = 0xFFFFFFFF; - info->underflow[i] = 0xFFFFFFFF; - } - duprintf("translate_compat_table: size %u\n", info->size); j = 0; xt_compat_lock(AF_INET); @@ -1680,9 +1620,7 @@ translate_compat_table(struct net *net, xt_entry_foreach(iter0, entry0, compatr->size) { ret = check_compat_entry_size_and_hooks(iter0, info, &size, entry0, - entry0 + compatr->size, - compatr->hook_entry, - compatr->underflow); + entry0 + compatr->size); if (ret != 0) goto out_unlock; ++j; @@ -1695,23 +1633,6 @@ translate_compat_table(struct net *net, goto out_unlock; } - /* Check hooks all assigned */ - for (i = 0; i < NF_INET_NUMHOOKS; i++) { - /* Only hooks which are valid */ - if (!(compatr->valid_hooks & (1 << i))) - continue; - if (info->hook_entry[i] == 0xFFFFFFFF) { - duprintf("Invalid hook entry %u %u\n", - i, info->hook_entry[i]); - goto out_unlock; - } - if (info->underflow[i] == 0xFFFFFFFF) { - duprintf("Invalid underflow %u %u\n", - i, info->underflow[i]); - goto out_unlock; - } - } - ret = -ENOMEM; newinfo = xt_alloc_table_info(size); if (!newinfo) @@ -1719,8 +1640,8 @@ translate_compat_table(struct net *net, newinfo->number = compatr->num_entries; for (i = 0; i < NF_INET_NUMHOOKS; i++) { - newinfo->hook_entry[i] = info->hook_entry[i]; - newinfo->underflow[i] = info->underflow[i]; + newinfo->hook_entry[i] = compatr->hook_entry[i]; + newinfo->underflow[i] = compatr->underflow[i]; } entry1 = newinfo->entries; pos = entry1; @@ -1729,47 +1650,30 @@ translate_compat_table(struct net *net, compat_copy_entry_from_user(iter0, &pos, &size, newinfo, entry1); + /* all module references in entry0 are now gone. + * entry1/newinfo contains a 64bit ruleset that looks exactly as + * generated by 64bit userspace. + * + * Call standard translate_table() to validate all hook_entrys, + * underflows, check for loops, etc. + */ xt_compat_flush_offsets(AF_INET); xt_compat_unlock(AF_INET); - ret = -ELOOP; - if (!mark_source_chains(newinfo, compatr->valid_hooks, entry1)) - goto free_newinfo; + memcpy(&repl, compatr, sizeof(*compatr)); - i = 0; - xt_entry_foreach(iter1, entry1, newinfo->size) { - ret = compat_check_entry(iter1, net, compatr->name); - if (ret != 0) - break; - ++i; - if (strcmp(ipt_get_target(iter1)->u.user.name, - XT_ERROR_TARGET) == 0) - ++newinfo->stacksize; - } - if (ret) { - /* - * The first i matches need cleanup_entry (calls ->destroy) - * because they had called ->check already. The other j-i - * entries need only release. - */ - int skip = i; - j -= i; - xt_entry_foreach(iter0, entry0, newinfo->size) { - if (skip-- > 0) - continue; - if (j-- == 0) - break; - compat_release_entry(iter0); - } - xt_entry_foreach(iter1, entry1, newinfo->size) { - if (i-- == 0) - break; - cleanup_entry(iter1, net); - } - xt_free_table_info(newinfo); - return ret; + for (i = 0; i < NF_INET_NUMHOOKS; i++) { + repl.hook_entry[i] = newinfo->hook_entry[i]; + repl.underflow[i] = newinfo->underflow[i]; } + repl.num_counters = 0; + repl.counters = NULL; + repl.size = newinfo->size; + ret = translate_table(net, newinfo, entry1, &repl); + if (ret) + goto free_newinfo; + *pinfo = newinfo; *pentry0 = entry1; xt_free_table_info(info); @@ -1777,17 +1681,16 @@ translate_compat_table(struct net *net, free_newinfo: xt_free_table_info(newinfo); -out: + return ret; +out_unlock: + xt_compat_flush_offsets(AF_INET); + xt_compat_unlock(AF_INET); xt_entry_foreach(iter0, entry0, compatr->size) { if (j-- == 0) break; compat_release_entry(iter0); } return ret; -out_unlock: - xt_compat_flush_offsets(AF_INET); - xt_compat_unlock(AF_INET); - goto out; } static int diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index 620d54c1c119..f5a4eb2d5084 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c @@ -1495,16 +1495,14 @@ check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e, struct xt_table_info *newinfo, unsigned int *size, const unsigned char *base, - const unsigned char *limit, - const unsigned int *hook_entries, - const unsigned int *underflows) + const unsigned char *limit) { struct xt_entry_match *ematch; struct xt_entry_target *t; struct xt_target *target; unsigned int entry_offset; unsigned int j; - int ret, off, h; + int ret, off; duprintf("check_compat_entry_size_and_hooks %p\n", e); if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0 || @@ -1556,17 +1554,6 @@ check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e, if (ret) goto out; - /* Check hooks & underflows */ - for (h = 0; h < NF_INET_NUMHOOKS; h++) { - if ((unsigned char *)e - base == hook_entries[h]) - newinfo->hook_entry[h] = hook_entries[h]; - if ((unsigned char *)e - base == underflows[h]) - newinfo->underflow[h] = underflows[h]; - } - - /* Clear counters and comefrom */ - memset(&e->counters, 0, sizeof(e->counters)); - e->comefrom = 0; return 0; out: @@ -1615,47 +1602,6 @@ compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr, } } -static int compat_check_entry(struct ip6t_entry *e, struct net *net, - const char *name) -{ - unsigned int j; - int ret = 0; - struct xt_mtchk_param mtpar; - struct xt_entry_match *ematch; - - e->counters.pcnt = xt_percpu_counter_alloc(); - if (IS_ERR_VALUE(e->counters.pcnt)) - return -ENOMEM; - j = 0; - mtpar.net = net; - mtpar.table = name; - mtpar.entryinfo = &e->ipv6; - mtpar.hook_mask = e->comefrom; - mtpar.family = NFPROTO_IPV6; - xt_ematch_foreach(ematch, e) { - ret = check_match(ematch, &mtpar); - if (ret != 0) - goto cleanup_matches; - ++j; - } - - ret = check_target(e, net, name); - if (ret) - goto cleanup_matches; - return 0; - - cleanup_matches: - xt_ematch_foreach(ematch, e) { - if (j-- == 0) - break; - cleanup_match(ematch, net); - } - - xt_percpu_counter_free(e->counters.pcnt); - - return ret; -} - static int translate_compat_table(struct net *net, struct xt_table_info **pinfo, @@ -1666,7 +1612,7 @@ translate_compat_table(struct net *net, struct xt_table_info *newinfo, *info; void *pos, *entry0, *entry1; struct compat_ip6t_entry *iter0; - struct ip6t_entry *iter1; + struct ip6t_replace repl; unsigned int size; int ret = 0; @@ -1675,12 +1621,6 @@ translate_compat_table(struct net *net, size = compatr->size; info->number = compatr->num_entries; - /* Init all hooks to impossible value. */ - for (i = 0; i < NF_INET_NUMHOOKS; i++) { - info->hook_entry[i] = 0xFFFFFFFF; - info->underflow[i] = 0xFFFFFFFF; - } - duprintf("translate_compat_table: size %u\n", info->size); j = 0; xt_compat_lock(AF_INET6); @@ -1689,9 +1629,7 @@ translate_compat_table(struct net *net, xt_entry_foreach(iter0, entry0, compatr->size) { ret = check_compat_entry_size_and_hooks(iter0, info, &size, entry0, - entry0 + compatr->size, - compatr->hook_entry, - compatr->underflow); + entry0 + compatr->size); if (ret != 0) goto out_unlock; ++j; @@ -1704,23 +1642,6 @@ translate_compat_table(struct net *net, goto out_unlock; } - /* Check hooks all assigned */ - for (i = 0; i < NF_INET_NUMHOOKS; i++) { - /* Only hooks which are valid */ - if (!(compatr->valid_hooks & (1 << i))) - continue; - if (info->hook_entry[i] == 0xFFFFFFFF) { - duprintf("Invalid hook entry %u %u\n", - i, info->hook_entry[i]); - goto out_unlock; - } - if (info->underflow[i] == 0xFFFFFFFF) { - duprintf("Invalid underflow %u %u\n", - i, info->underflow[i]); - goto out_unlock; - } - } - ret = -ENOMEM; newinfo = xt_alloc_table_info(size); if (!newinfo) @@ -1728,56 +1649,34 @@ translate_compat_table(struct net *net, newinfo->number = compatr->num_entries; for (i = 0; i < NF_INET_NUMHOOKS; i++) { - newinfo->hook_entry[i] = info->hook_entry[i]; - newinfo->underflow[i] = info->underflow[i]; + newinfo->hook_entry[i] = compatr->hook_entry[i]; + newinfo->underflow[i] = compatr->underflow[i]; } entry1 = newinfo->entries; pos = entry1; + size = compatr->size; xt_entry_foreach(iter0, entry0, compatr->size) compat_copy_entry_from_user(iter0, &pos, &size, newinfo, entry1); + /* all module references in entry0 are now gone. */ xt_compat_flush_offsets(AF_INET6); xt_compat_unlock(AF_INET6); - ret = -ELOOP; - if (!mark_source_chains(newinfo, compatr->valid_hooks, entry1)) - goto free_newinfo; + memcpy(&repl, compatr, sizeof(*compatr)); - i = 0; - xt_entry_foreach(iter1, entry1, newinfo->size) { - ret = compat_check_entry(iter1, net, compatr->name); - if (ret != 0) - break; - ++i; - if (strcmp(ip6t_get_target(iter1)->u.user.name, - XT_ERROR_TARGET) == 0) - ++newinfo->stacksize; - } - if (ret) { - /* - * The first i matches need cleanup_entry (calls ->destroy) - * because they had called ->check already. The other j-i - * entries need only release. - */ - int skip = i; - j -= i; - xt_entry_foreach(iter0, entry0, newinfo->size) { - if (skip-- > 0) - continue; - if (j-- == 0) - break; - compat_release_entry(iter0); - } - xt_entry_foreach(iter1, entry1, newinfo->size) { - if (i-- == 0) - break; - cleanup_entry(iter1, net); - } - xt_free_table_info(newinfo); - return ret; + for (i = 0; i < NF_INET_NUMHOOKS; i++) { + repl.hook_entry[i] = newinfo->hook_entry[i]; + repl.underflow[i] = newinfo->underflow[i]; } + repl.num_counters = 0; + repl.counters = NULL; + repl.size = newinfo->size; + ret = translate_table(net, newinfo, entry1, &repl); + if (ret) + goto free_newinfo; + *pinfo = newinfo; *pentry0 = entry1; xt_free_table_info(info); @@ -1785,17 +1684,16 @@ translate_compat_table(struct net *net, free_newinfo: xt_free_table_info(newinfo); -out: + return ret; +out_unlock: + xt_compat_flush_offsets(AF_INET6); + xt_compat_unlock(AF_INET6); xt_entry_foreach(iter0, entry0, compatr->size) { if (j-- == 0) break; compat_release_entry(iter0); } return ret; -out_unlock: - xt_compat_flush_offsets(AF_INET6); - xt_compat_unlock(AF_INET6); - goto out; } static int diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c index 7e7173b68344..9ec23ffa43b4 100644 --- a/net/netfilter/x_tables.c +++ b/net/netfilter/x_tables.c @@ -533,6 +533,7 @@ void xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr, struct compat_xt_entry_match *cm = (struct compat_xt_entry_match *)m; int pad, off = xt_compat_match_offset(match); u_int16_t msize = cm->u.user.match_size; + char name[sizeof(m->u.user.name)]; m = *dstptr; memcpy(m, cm, sizeof(*cm)); @@ -546,6 +547,9 @@ void xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr, msize += off; m->u.user.match_size = msize; + strlcpy(name, match->name, sizeof(name)); + module_put(match->me); + strncpy(m->u.user.name, name, sizeof(m->u.user.name)); *size += off; *dstptr += msize; @@ -763,6 +767,7 @@ void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr, struct compat_xt_entry_target *ct = (struct compat_xt_entry_target *)t; int pad, off = xt_compat_target_offset(target); u_int16_t tsize = ct->u.user.target_size; + char name[sizeof(t->u.user.name)]; t = *dstptr; memcpy(t, ct, sizeof(*ct)); @@ -776,6 +781,9 @@ void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr, tsize += off; t->u.user.target_size = tsize; + strlcpy(name, target->name, sizeof(name)); + module_put(target->me); + strncpy(t->u.user.name, name, sizeof(t->u.user.name)); *size += off; *dstptr += tsize; From 95609155d7fa08cc2e71d494acad39f72f0b4495 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Fri, 1 Apr 2016 14:17:35 +0200 Subject: [PATCH 0593/1649] netfilter: x_tables: remove obsolete overflow check for compat case too commit 9e67d5a739327c44885adebb4f3a538050be73e4 ("[NETFILTER]: x_tables: remove obsolete overflow check") left the compat parts alone, but we can kill it there as well. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- net/ipv4/netfilter/arp_tables.c | 2 -- net/ipv4/netfilter/ip_tables.c | 2 -- net/ipv6/netfilter/ip6_tables.c | 2 -- 3 files changed, 6 deletions(-) diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index 705179b0fd23..668c5dcb3a5f 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c @@ -1436,8 +1436,6 @@ static int compat_do_replace(struct net *net, void __user *user, return -EFAULT; /* overflow check */ - if (tmp.size >= INT_MAX / num_possible_cpus()) - return -ENOMEM; if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) return -ENOMEM; if (tmp.num_counters == 0) diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index c26ccd818e8f..4585aa78c4ca 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c @@ -1706,8 +1706,6 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len) return -EFAULT; /* overflow check */ - if (tmp.size >= INT_MAX / num_possible_cpus()) - return -ENOMEM; if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) return -ENOMEM; if (tmp.num_counters == 0) diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index f5a4eb2d5084..fd06251f504c 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c @@ -1709,8 +1709,6 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len) return -EFAULT; /* overflow check */ - if (tmp.size >= INT_MAX / num_possible_cpus()) - return -ENOMEM; if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) return -ENOMEM; if (tmp.num_counters == 0) From aded9f3e9fa8db559c5b7661bbb497754270e754 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Fri, 1 Apr 2016 14:17:36 +0200 Subject: [PATCH 0594/1649] netfilter: x_tables: remove obsolete check Since 'netfilter: x_tables: validate targets of jumps' change we validate that the target aligns exactly with beginning of a rule, so offset test is now redundant. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- net/ipv4/netfilter/arp_tables.c | 8 -------- net/ipv4/netfilter/ip_tables.c | 7 ------- net/ipv6/netfilter/ip6_tables.c | 7 ------- 3 files changed, 22 deletions(-) diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index 668c5dcb3a5f..8cefb7a2606b 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c @@ -461,14 +461,6 @@ static int mark_source_chains(const struct xt_table_info *newinfo, if (strcmp(t->target.u.user.name, XT_STANDARD_TARGET) == 0 && newpos >= 0) { - if (newpos > newinfo->size - - sizeof(struct arpt_entry)) { - duprintf("mark_source_chains: " - "bad verdict (%i)\n", - newpos); - return 0; - } - /* This a jump; chase it. */ duprintf("Jump rule %u -> %u\n", pos, newpos); diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index 4585aa78c4ca..9340ce0a7549 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c @@ -542,13 +542,6 @@ mark_source_chains(const struct xt_table_info *newinfo, if (strcmp(t->target.u.user.name, XT_STANDARD_TARGET) == 0 && newpos >= 0) { - if (newpos > newinfo->size - - sizeof(struct ipt_entry)) { - duprintf("mark_source_chains: " - "bad verdict (%i)\n", - newpos); - return 0; - } /* This a jump; chase it. */ duprintf("Jump rule %u -> %u\n", pos, newpos); diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index fd06251f504c..aa010856a255 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c @@ -554,13 +554,6 @@ mark_source_chains(const struct xt_table_info *newinfo, if (strcmp(t->target.u.user.name, XT_STANDARD_TARGET) == 0 && newpos >= 0) { - if (newpos > newinfo->size - - sizeof(struct ip6t_entry)) { - duprintf("mark_source_chains: " - "bad verdict (%i)\n", - newpos); - return 0; - } /* This a jump; chase it. */ duprintf("Jump rule %u -> %u\n", pos, newpos); From d7591f0c41ce3e67600a982bab6989ef0f07b3ce Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Fri, 1 Apr 2016 15:37:59 +0200 Subject: [PATCH 0595/1649] netfilter: x_tables: introduce and use xt_copy_counters_from_user The three variants use same copy&pasted code, condense this into a helper and use that. Make sure info.name is 0-terminated. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- include/linux/netfilter/x_tables.h | 3 ++ net/ipv4/netfilter/arp_tables.c | 48 ++----------------- net/ipv4/netfilter/ip_tables.c | 48 ++----------------- net/ipv6/netfilter/ip6_tables.c | 49 ++------------------ net/netfilter/x_tables.c | 74 ++++++++++++++++++++++++++++++ 5 files changed, 92 insertions(+), 130 deletions(-) diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h index e2da9b90f1b8..4dd9306c9d56 100644 --- a/include/linux/netfilter/x_tables.h +++ b/include/linux/netfilter/x_tables.h @@ -251,6 +251,9 @@ int xt_check_match(struct xt_mtchk_param *, unsigned int size, u_int8_t proto, int xt_check_target(struct xt_tgchk_param *, unsigned int size, u_int8_t proto, bool inv_proto); +void *xt_copy_counters_from_user(const void __user *user, unsigned int len, + struct xt_counters_info *info, bool compat); + struct xt_table *xt_register_table(struct net *net, const struct xt_table *table, struct xt_table_info *bootstrap, diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index 8cefb7a2606b..60f5161abcb4 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c @@ -1123,55 +1123,17 @@ static int do_add_counters(struct net *net, const void __user *user, unsigned int i; struct xt_counters_info tmp; struct xt_counters *paddc; - unsigned int num_counters; - const char *name; - int size; - void *ptmp; struct xt_table *t; const struct xt_table_info *private; int ret = 0; struct arpt_entry *iter; unsigned int addend; -#ifdef CONFIG_COMPAT - struct compat_xt_counters_info compat_tmp; - if (compat) { - ptmp = &compat_tmp; - size = sizeof(struct compat_xt_counters_info); - } else -#endif - { - ptmp = &tmp; - size = sizeof(struct xt_counters_info); - } + paddc = xt_copy_counters_from_user(user, len, &tmp, compat); + if (IS_ERR(paddc)) + return PTR_ERR(paddc); - if (copy_from_user(ptmp, user, size) != 0) - return -EFAULT; - -#ifdef CONFIG_COMPAT - if (compat) { - num_counters = compat_tmp.num_counters; - name = compat_tmp.name; - } else -#endif - { - num_counters = tmp.num_counters; - name = tmp.name; - } - - if (len != size + num_counters * sizeof(struct xt_counters)) - return -EINVAL; - - paddc = vmalloc(len - size); - if (!paddc) - return -ENOMEM; - - if (copy_from_user(paddc, user + size, len - size) != 0) { - ret = -EFAULT; - goto free; - } - - t = xt_find_table_lock(net, NFPROTO_ARP, name); + t = xt_find_table_lock(net, NFPROTO_ARP, tmp.name); if (IS_ERR_OR_NULL(t)) { ret = t ? PTR_ERR(t) : -ENOENT; goto free; @@ -1179,7 +1141,7 @@ static int do_add_counters(struct net *net, const void __user *user, local_bh_disable(); private = t->private; - if (private->number != num_counters) { + if (private->number != tmp.num_counters) { ret = -EINVAL; goto unlock_up_free; } diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index 9340ce0a7549..735d1ee8c1ab 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c @@ -1307,55 +1307,17 @@ do_add_counters(struct net *net, const void __user *user, unsigned int i; struct xt_counters_info tmp; struct xt_counters *paddc; - unsigned int num_counters; - const char *name; - int size; - void *ptmp; struct xt_table *t; const struct xt_table_info *private; int ret = 0; struct ipt_entry *iter; unsigned int addend; -#ifdef CONFIG_COMPAT - struct compat_xt_counters_info compat_tmp; - if (compat) { - ptmp = &compat_tmp; - size = sizeof(struct compat_xt_counters_info); - } else -#endif - { - ptmp = &tmp; - size = sizeof(struct xt_counters_info); - } + paddc = xt_copy_counters_from_user(user, len, &tmp, compat); + if (IS_ERR(paddc)) + return PTR_ERR(paddc); - if (copy_from_user(ptmp, user, size) != 0) - return -EFAULT; - -#ifdef CONFIG_COMPAT - if (compat) { - num_counters = compat_tmp.num_counters; - name = compat_tmp.name; - } else -#endif - { - num_counters = tmp.num_counters; - name = tmp.name; - } - - if (len != size + num_counters * sizeof(struct xt_counters)) - return -EINVAL; - - paddc = vmalloc(len - size); - if (!paddc) - return -ENOMEM; - - if (copy_from_user(paddc, user + size, len - size) != 0) { - ret = -EFAULT; - goto free; - } - - t = xt_find_table_lock(net, AF_INET, name); + t = xt_find_table_lock(net, AF_INET, tmp.name); if (IS_ERR_OR_NULL(t)) { ret = t ? PTR_ERR(t) : -ENOENT; goto free; @@ -1363,7 +1325,7 @@ do_add_counters(struct net *net, const void __user *user, local_bh_disable(); private = t->private; - if (private->number != num_counters) { + if (private->number != tmp.num_counters) { ret = -EINVAL; goto unlock_up_free; } diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index aa010856a255..73e606c719ef 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c @@ -1319,55 +1319,16 @@ do_add_counters(struct net *net, const void __user *user, unsigned int len, unsigned int i; struct xt_counters_info tmp; struct xt_counters *paddc; - unsigned int num_counters; - char *name; - int size; - void *ptmp; struct xt_table *t; const struct xt_table_info *private; int ret = 0; struct ip6t_entry *iter; unsigned int addend; -#ifdef CONFIG_COMPAT - struct compat_xt_counters_info compat_tmp; - if (compat) { - ptmp = &compat_tmp; - size = sizeof(struct compat_xt_counters_info); - } else -#endif - { - ptmp = &tmp; - size = sizeof(struct xt_counters_info); - } - - if (copy_from_user(ptmp, user, size) != 0) - return -EFAULT; - -#ifdef CONFIG_COMPAT - if (compat) { - num_counters = compat_tmp.num_counters; - name = compat_tmp.name; - } else -#endif - { - num_counters = tmp.num_counters; - name = tmp.name; - } - - if (len != size + num_counters * sizeof(struct xt_counters)) - return -EINVAL; - - paddc = vmalloc(len - size); - if (!paddc) - return -ENOMEM; - - if (copy_from_user(paddc, user + size, len - size) != 0) { - ret = -EFAULT; - goto free; - } - - t = xt_find_table_lock(net, AF_INET6, name); + paddc = xt_copy_counters_from_user(user, len, &tmp, compat); + if (IS_ERR(paddc)) + return PTR_ERR(paddc); + t = xt_find_table_lock(net, AF_INET6, tmp.name); if (IS_ERR_OR_NULL(t)) { ret = t ? PTR_ERR(t) : -ENOENT; goto free; @@ -1375,7 +1336,7 @@ do_add_counters(struct net *net, const void __user *user, unsigned int len, local_bh_disable(); private = t->private; - if (private->number != num_counters) { + if (private->number != tmp.num_counters) { ret = -EINVAL; goto unlock_up_free; } diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c index 9ec23ffa43b4..c69c892231d7 100644 --- a/net/netfilter/x_tables.c +++ b/net/netfilter/x_tables.c @@ -752,6 +752,80 @@ int xt_check_target(struct xt_tgchk_param *par, } EXPORT_SYMBOL_GPL(xt_check_target); +/** + * xt_copy_counters_from_user - copy counters and metadata from userspace + * + * @user: src pointer to userspace memory + * @len: alleged size of userspace memory + * @info: where to store the xt_counters_info metadata + * @compat: true if we setsockopt call is done by 32bit task on 64bit kernel + * + * Copies counter meta data from @user and stores it in @info. + * + * vmallocs memory to hold the counters, then copies the counter data + * from @user to the new memory and returns a pointer to it. + * + * If @compat is true, @info gets converted automatically to the 64bit + * representation. + * + * The metadata associated with the counters is stored in @info. + * + * Return: returns pointer that caller has to test via IS_ERR(). + * If IS_ERR is false, caller has to vfree the pointer. + */ +void *xt_copy_counters_from_user(const void __user *user, unsigned int len, + struct xt_counters_info *info, bool compat) +{ + void *mem; + u64 size; + +#ifdef CONFIG_COMPAT + if (compat) { + /* structures only differ in size due to alignment */ + struct compat_xt_counters_info compat_tmp; + + if (len <= sizeof(compat_tmp)) + return ERR_PTR(-EINVAL); + + len -= sizeof(compat_tmp); + if (copy_from_user(&compat_tmp, user, sizeof(compat_tmp)) != 0) + return ERR_PTR(-EFAULT); + + strlcpy(info->name, compat_tmp.name, sizeof(info->name)); + info->num_counters = compat_tmp.num_counters; + user += sizeof(compat_tmp); + } else +#endif + { + if (len <= sizeof(*info)) + return ERR_PTR(-EINVAL); + + len -= sizeof(*info); + if (copy_from_user(info, user, sizeof(*info)) != 0) + return ERR_PTR(-EFAULT); + + info->name[sizeof(info->name) - 1] = '\0'; + user += sizeof(*info); + } + + size = sizeof(struct xt_counters); + size *= info->num_counters; + + if (size != (u64)len) + return ERR_PTR(-EINVAL); + + mem = vmalloc(len); + if (!mem) + return ERR_PTR(-ENOMEM); + + if (copy_from_user(mem, user, len) == 0) + return mem; + + vfree(mem); + return ERR_PTR(-EFAULT); +} +EXPORT_SYMBOL_GPL(xt_copy_counters_from_user); + #ifdef CONFIG_COMPAT int xt_compat_target_offset(const struct xt_target *target) { From 4054ff45454a9a4652e29ac750a6600f6fdcc216 Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Tue, 12 Apr 2016 23:32:34 +0200 Subject: [PATCH 0596/1649] netfilter: ctnetlink: remove unnecessary inlining Many of these functions are called from control plane path. Move ctnetlink_nlmsg_size() under CONFIG_NF_CONNTRACK_EVENTS to avoid a compilation warning when CONFIG_NF_CONNTRACK_EVENTS=n. Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_conntrack_netlink.c | 117 +++++++++++---------------- 1 file changed, 48 insertions(+), 69 deletions(-) diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index 355e8552fd5b..caa4efe5930b 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -58,10 +58,9 @@ MODULE_LICENSE("GPL"); static char __initdata version[] = "0.93"; -static inline int -ctnetlink_dump_tuples_proto(struct sk_buff *skb, - const struct nf_conntrack_tuple *tuple, - struct nf_conntrack_l4proto *l4proto) +static int ctnetlink_dump_tuples_proto(struct sk_buff *skb, + const struct nf_conntrack_tuple *tuple, + struct nf_conntrack_l4proto *l4proto) { int ret = 0; struct nlattr *nest_parms; @@ -83,10 +82,9 @@ nla_put_failure: return -1; } -static inline int -ctnetlink_dump_tuples_ip(struct sk_buff *skb, - const struct nf_conntrack_tuple *tuple, - struct nf_conntrack_l3proto *l3proto) +static int ctnetlink_dump_tuples_ip(struct sk_buff *skb, + const struct nf_conntrack_tuple *tuple, + struct nf_conntrack_l3proto *l3proto) { int ret = 0; struct nlattr *nest_parms; @@ -106,9 +104,8 @@ nla_put_failure: return -1; } -static int -ctnetlink_dump_tuples(struct sk_buff *skb, - const struct nf_conntrack_tuple *tuple) +static int ctnetlink_dump_tuples(struct sk_buff *skb, + const struct nf_conntrack_tuple *tuple) { int ret; struct nf_conntrack_l3proto *l3proto; @@ -127,9 +124,8 @@ ctnetlink_dump_tuples(struct sk_buff *skb, return ret; } -static inline int -ctnetlink_dump_zone_id(struct sk_buff *skb, int attrtype, - const struct nf_conntrack_zone *zone, int dir) +static int ctnetlink_dump_zone_id(struct sk_buff *skb, int attrtype, + const struct nf_conntrack_zone *zone, int dir) { if (zone->id == NF_CT_DEFAULT_ZONE_ID || zone->dir != dir) return 0; @@ -141,8 +137,7 @@ nla_put_failure: return -1; } -static inline int -ctnetlink_dump_status(struct sk_buff *skb, const struct nf_conn *ct) +static int ctnetlink_dump_status(struct sk_buff *skb, const struct nf_conn *ct) { if (nla_put_be32(skb, CTA_STATUS, htonl(ct->status))) goto nla_put_failure; @@ -152,8 +147,7 @@ nla_put_failure: return -1; } -static inline int -ctnetlink_dump_timeout(struct sk_buff *skb, const struct nf_conn *ct) +static int ctnetlink_dump_timeout(struct sk_buff *skb, const struct nf_conn *ct) { long timeout = ((long)ct->timeout.expires - (long)jiffies) / HZ; @@ -168,8 +162,7 @@ nla_put_failure: return -1; } -static inline int -ctnetlink_dump_protoinfo(struct sk_buff *skb, struct nf_conn *ct) +static int ctnetlink_dump_protoinfo(struct sk_buff *skb, struct nf_conn *ct) { struct nf_conntrack_l4proto *l4proto; struct nlattr *nest_proto; @@ -193,8 +186,8 @@ nla_put_failure: return -1; } -static inline int -ctnetlink_dump_helpinfo(struct sk_buff *skb, const struct nf_conn *ct) +static int ctnetlink_dump_helpinfo(struct sk_buff *skb, + const struct nf_conn *ct) { struct nlattr *nest_helper; const struct nf_conn_help *help = nfct_help(ct); @@ -300,8 +293,7 @@ nla_put_failure: } #ifdef CONFIG_NF_CONNTRACK_MARK -static inline int -ctnetlink_dump_mark(struct sk_buff *skb, const struct nf_conn *ct) +static int ctnetlink_dump_mark(struct sk_buff *skb, const struct nf_conn *ct) { if (nla_put_be32(skb, CTA_MARK, htonl(ct->mark))) goto nla_put_failure; @@ -315,8 +307,7 @@ nla_put_failure: #endif #ifdef CONFIG_NF_CONNTRACK_SECMARK -static inline int -ctnetlink_dump_secctx(struct sk_buff *skb, const struct nf_conn *ct) +static int ctnetlink_dump_secctx(struct sk_buff *skb, const struct nf_conn *ct) { struct nlattr *nest_secctx; int len, ret; @@ -380,8 +371,7 @@ ctnetlink_dump_labels(struct sk_buff *skb, const struct nf_conn *ct) #define master_tuple(ct) &(ct->master->tuplehash[IP_CT_DIR_ORIGINAL].tuple) -static inline int -ctnetlink_dump_master(struct sk_buff *skb, const struct nf_conn *ct) +static int ctnetlink_dump_master(struct sk_buff *skb, const struct nf_conn *ct) { struct nlattr *nest_parms; @@ -426,8 +416,8 @@ nla_put_failure: return -1; } -static inline int -ctnetlink_dump_ct_seq_adj(struct sk_buff *skb, const struct nf_conn *ct) +static int ctnetlink_dump_ct_seq_adj(struct sk_buff *skb, + const struct nf_conn *ct) { struct nf_conn_seqadj *seqadj = nfct_seqadj(ct); struct nf_ct_seqadj *seq; @@ -446,8 +436,7 @@ ctnetlink_dump_ct_seq_adj(struct sk_buff *skb, const struct nf_conn *ct) return 0; } -static inline int -ctnetlink_dump_id(struct sk_buff *skb, const struct nf_conn *ct) +static int ctnetlink_dump_id(struct sk_buff *skb, const struct nf_conn *ct) { if (nla_put_be32(skb, CTA_ID, htonl((unsigned long)ct))) goto nla_put_failure; @@ -457,8 +446,7 @@ nla_put_failure: return -1; } -static inline int -ctnetlink_dump_use(struct sk_buff *skb, const struct nf_conn *ct) +static int ctnetlink_dump_use(struct sk_buff *skb, const struct nf_conn *ct) { if (nla_put_be32(skb, CTA_USE, htonl(atomic_read(&ct->ct_general.use)))) goto nla_put_failure; @@ -538,8 +526,7 @@ nla_put_failure: return -1; } -static inline size_t -ctnetlink_proto_size(const struct nf_conn *ct) +static size_t ctnetlink_proto_size(const struct nf_conn *ct) { struct nf_conntrack_l3proto *l3proto; struct nf_conntrack_l4proto *l4proto; @@ -556,8 +543,7 @@ ctnetlink_proto_size(const struct nf_conn *ct) return len; } -static inline size_t -ctnetlink_acct_size(const struct nf_conn *ct) +static size_t ctnetlink_acct_size(const struct nf_conn *ct) { if (!nf_ct_ext_exist(ct, NF_CT_EXT_ACCT)) return 0; @@ -567,8 +553,7 @@ ctnetlink_acct_size(const struct nf_conn *ct) ; } -static inline int -ctnetlink_secctx_size(const struct nf_conn *ct) +static int ctnetlink_secctx_size(const struct nf_conn *ct) { #ifdef CONFIG_NF_CONNTRACK_SECMARK int len, ret; @@ -584,8 +569,7 @@ ctnetlink_secctx_size(const struct nf_conn *ct) #endif } -static inline size_t -ctnetlink_timestamp_size(const struct nf_conn *ct) +static size_t ctnetlink_timestamp_size(const struct nf_conn *ct) { #ifdef CONFIG_NF_CONNTRACK_TIMESTAMP if (!nf_ct_ext_exist(ct, NF_CT_EXT_TSTAMP)) @@ -596,8 +580,8 @@ ctnetlink_timestamp_size(const struct nf_conn *ct) #endif } -static inline size_t -ctnetlink_nlmsg_size(const struct nf_conn *ct) +#ifdef CONFIG_NF_CONNTRACK_EVENTS +static size_t ctnetlink_nlmsg_size(const struct nf_conn *ct) { return NLMSG_ALIGN(sizeof(struct nfgenmsg)) + 3 * nla_total_size(0) /* CTA_TUPLE_ORIG|REPL|MASTER */ @@ -628,7 +612,6 @@ ctnetlink_nlmsg_size(const struct nf_conn *ct) ; } -#ifdef CONFIG_NF_CONNTRACK_EVENTS static int ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item) { @@ -891,8 +874,8 @@ out: return skb->len; } -static inline int -ctnetlink_parse_tuple_ip(struct nlattr *attr, struct nf_conntrack_tuple *tuple) +static int ctnetlink_parse_tuple_ip(struct nlattr *attr, + struct nf_conntrack_tuple *tuple) { struct nlattr *tb[CTA_IP_MAX+1]; struct nf_conntrack_l3proto *l3proto; @@ -921,9 +904,8 @@ static const struct nla_policy proto_nla_policy[CTA_PROTO_MAX+1] = { [CTA_PROTO_NUM] = { .type = NLA_U8 }, }; -static inline int -ctnetlink_parse_tuple_proto(struct nlattr *attr, - struct nf_conntrack_tuple *tuple) +static int ctnetlink_parse_tuple_proto(struct nlattr *attr, + struct nf_conntrack_tuple *tuple) { struct nlattr *tb[CTA_PROTO_MAX+1]; struct nf_conntrack_l4proto *l4proto; @@ -1050,9 +1032,8 @@ static const struct nla_policy help_nla_policy[CTA_HELP_MAX+1] = { .len = NF_CT_HELPER_NAME_LEN - 1 }, }; -static inline int -ctnetlink_parse_help(const struct nlattr *attr, char **helper_name, - struct nlattr **helpinfo) +static int ctnetlink_parse_help(const struct nlattr *attr, char **helper_name, + struct nlattr **helpinfo) { int err; struct nlattr *tb[CTA_HELP_MAX+1]; @@ -1463,8 +1444,8 @@ ctnetlink_setup_nat(struct nf_conn *ct, const struct nlattr * const cda[]) #endif } -static inline int -ctnetlink_change_helper(struct nf_conn *ct, const struct nlattr * const cda[]) +static int ctnetlink_change_helper(struct nf_conn *ct, + const struct nlattr * const cda[]) { struct nf_conntrack_helper *helper; struct nf_conn_help *help = nfct_help(ct); @@ -1524,8 +1505,8 @@ ctnetlink_change_helper(struct nf_conn *ct, const struct nlattr * const cda[]) return -EOPNOTSUPP; } -static inline int -ctnetlink_change_timeout(struct nf_conn *ct, const struct nlattr * const cda[]) +static int ctnetlink_change_timeout(struct nf_conn *ct, + const struct nlattr * const cda[]) { u_int32_t timeout = ntohl(nla_get_be32(cda[CTA_TIMEOUT])); @@ -1544,8 +1525,8 @@ static const struct nla_policy protoinfo_policy[CTA_PROTOINFO_MAX+1] = { [CTA_PROTOINFO_SCTP] = { .type = NLA_NESTED }, }; -static inline int -ctnetlink_change_protoinfo(struct nf_conn *ct, const struct nlattr * const cda[]) +static int ctnetlink_change_protoinfo(struct nf_conn *ct, + const struct nlattr * const cda[]) { const struct nlattr *attr = cda[CTA_PROTOINFO]; struct nlattr *tb[CTA_PROTOINFO_MAX+1]; @@ -1571,8 +1552,8 @@ static const struct nla_policy seqadj_policy[CTA_SEQADJ_MAX+1] = { [CTA_SEQADJ_OFFSET_AFTER] = { .type = NLA_U32 }, }; -static inline int -change_seq_adj(struct nf_ct_seqadj *seq, const struct nlattr * const attr) +static int change_seq_adj(struct nf_ct_seqadj *seq, + const struct nlattr * const attr) { int err; struct nlattr *cda[CTA_SEQADJ_MAX+1]; @@ -2405,10 +2386,9 @@ static struct nfnl_ct_hook ctnetlink_glue_hook = { * EXPECT ***********************************************************************/ -static inline int -ctnetlink_exp_dump_tuple(struct sk_buff *skb, - const struct nf_conntrack_tuple *tuple, - enum ctattr_expect type) +static int ctnetlink_exp_dump_tuple(struct sk_buff *skb, + const struct nf_conntrack_tuple *tuple, + enum ctattr_expect type) { struct nlattr *nest_parms; @@ -2425,10 +2405,9 @@ nla_put_failure: return -1; } -static inline int -ctnetlink_exp_dump_mask(struct sk_buff *skb, - const struct nf_conntrack_tuple *tuple, - const struct nf_conntrack_tuple_mask *mask) +static int ctnetlink_exp_dump_mask(struct sk_buff *skb, + const struct nf_conntrack_tuple *tuple, + const struct nf_conntrack_tuple_mask *mask) { int ret; struct nf_conntrack_l3proto *l3proto; From 9f9a45beaa96188085d52d273c2ecb052c7d8d27 Mon Sep 17 00:00:00 2001 From: Willem de Bruijn Date: Thu, 7 Apr 2016 18:12:58 -0400 Subject: [PATCH 0597/1649] udp: do not expect udp headers on ioctl SIOCINQ On udp sockets, ioctl SIOCINQ returns the payload size of the first packet. Since commit e6afc8ace6dd pulled the headers, the result is incorrect when subtracting header length. Remove that operation. Fixes: e6afc8ace6dd ("udp: remove headers from UDP packets before queueing") Signed-off-by: Willem de Bruijn Signed-off-by: David S. Miller --- net/ipv4/udp.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 3563788d064f..d2d294b0a1f1 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -1282,8 +1282,6 @@ int udp_ioctl(struct sock *sk, int cmd, unsigned long arg) * of this packet since that is all * that will be read. */ - amount -= sizeof(struct udphdr); - return put_user(amount, (int __user *)arg); } From 31c2e4926fe912f88388bcaa8450fcaa8f2ece47 Mon Sep 17 00:00:00 2001 From: Willem de Bruijn Date: Thu, 7 Apr 2016 18:12:59 -0400 Subject: [PATCH 0598/1649] udp: do not expect udp headers in recv cmsg IP_CMSG_CHECKSUM On udp sockets, recv cmsg IP_CMSG_CHECKSUM returns a checksum over the packet payload. Since commit e6afc8ace6dd pulled the headers, taking skb->data as the start of transport header is incorrect. Use the transport header pointer. Also, when peeking at an offset from the start of the packet, only return a checksum from the start of the peeked data. Note that the cmsg does not subtract a tail checkum when reading truncated data. Fixes: e6afc8ace6dd ("udp: remove headers from UDP packets before queueing") Signed-off-by: Willem de Bruijn Signed-off-by: David S. Miller --- net/ipv4/ip_sockglue.c | 3 ++- net/ipv4/udp.c | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index 89b5f3bd6694..279471c4e58f 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c @@ -106,7 +106,8 @@ static void ip_cmsg_recv_checksum(struct msghdr *msg, struct sk_buff *skb, return; if (offset != 0) - csum = csum_sub(csum, csum_partial(skb->data, offset, 0)); + csum = csum_sub(csum, csum_partial(skb_transport_header(skb), + offset, 0)); put_cmsg(msg, SOL_IP, IP_CHECKSUM, sizeof(__wsum), &csum); } diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index d2d294b0a1f1..f1863136d3e4 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -1375,7 +1375,7 @@ try_again: *addr_len = sizeof(*sin); } if (inet->cmsg_flags) - ip_cmsg_recv_offset(msg, skb, sizeof(struct udphdr)); + ip_cmsg_recv_offset(msg, skb, sizeof(struct udphdr) + off); err = copied; if (flags & MSG_TRUNC) From 18b46810eb61f1d1a66c5511d12e84ea8cb7f35c Mon Sep 17 00:00:00 2001 From: Alexandre TORGUE Date: Fri, 8 Apr 2016 11:18:03 +0200 Subject: [PATCH 0599/1649] net: ethernet: stmmac: GMAC4.xx: Fix TX descriptor preparation On GMAC4.xx each descriptor contains 2 buffers of 16KB (each). Initially, those 2 buffers was filled in dwmac4_rd_prepare_tx_desc but it is actually not needed. Indeed, stmmac driver supports frame up to 9000 bytes (jumbo). So only one buffer is needed. Reported-by: Dan Carpenter Signed-off-by: Alexandre TORGUE Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c index d4952c7a836d..4ec7397e7fb3 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c @@ -254,14 +254,7 @@ static void dwmac4_rd_prepare_tx_desc(struct dma_desc *p, int is_fs, int len, { unsigned int tdes3 = p->des3; - if (unlikely(len > BUF_SIZE_16KiB)) { - p->des2 |= (((len - BUF_SIZE_16KiB) << - TDES2_BUFFER2_SIZE_MASK_SHIFT) - & TDES2_BUFFER2_SIZE_MASK) - | (BUF_SIZE_16KiB & TDES2_BUFFER1_SIZE_MASK); - } else { - p->des2 |= (len & TDES2_BUFFER1_SIZE_MASK); - } + p->des2 |= (len & TDES2_BUFFER1_SIZE_MASK); if (is_fs) tdes3 |= TDES3_FIRST_DESCRIPTOR; From fafc4e1ea1a4c1eb13a30c9426fb799f5efacbc3 Mon Sep 17 00:00:00 2001 From: Hannes Frederic Sowa Date: Fri, 8 Apr 2016 15:11:27 +0200 Subject: [PATCH 0600/1649] sock: tigthen lockdep checks for sock_owned_by_user sock_owned_by_user should not be used without socket lock held. It seems to be a common practice to check .owned before lock reclassification, so provide a little help to abstract this check away. Cc: linux-cifs@vger.kernel.org Cc: linux-bluetooth@vger.kernel.org Cc: linux-nfs@vger.kernel.org Signed-off-by: Hannes Frederic Sowa Signed-off-by: David S. Miller --- fs/cifs/connect.c | 4 ++-- include/net/sock.h | 44 ++++++++++++++++++++++++------------ net/bluetooth/af_bluetooth.c | 2 +- net/llc/llc_proc.c | 2 +- net/sunrpc/svcsock.c | 3 +-- net/sunrpc/xprtsock.c | 3 +-- 6 files changed, 35 insertions(+), 23 deletions(-) diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 6f62ac821a84..2e2e0a6242d6 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -2918,7 +2918,7 @@ static inline void cifs_reclassify_socket4(struct socket *sock) { struct sock *sk = sock->sk; - BUG_ON(sock_owned_by_user(sk)); + BUG_ON(!sock_allow_reclassification(sk)); sock_lock_init_class_and_name(sk, "slock-AF_INET-CIFS", &cifs_slock_key[0], "sk_lock-AF_INET-CIFS", &cifs_key[0]); } @@ -2927,7 +2927,7 @@ static inline void cifs_reclassify_socket6(struct socket *sock) { struct sock *sk = sock->sk; - BUG_ON(sock_owned_by_user(sk)); + BUG_ON(!sock_allow_reclassification(sk)); sock_lock_init_class_and_name(sk, "slock-AF_INET6-CIFS", &cifs_slock_key[1], "sk_lock-AF_INET6-CIFS", &cifs_key[1]); } diff --git a/include/net/sock.h b/include/net/sock.h index 81d6fecec0a2..baba58770ac5 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1316,21 +1316,6 @@ static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb) __kfree_skb(skb); } -/* Used by processes to "lock" a socket state, so that - * interrupts and bottom half handlers won't change it - * from under us. It essentially blocks any incoming - * packets, so that we won't get any new data or any - * packets that change the state of the socket. - * - * While locked, BH processing will add new packets to - * the backlog queue. This queue is processed by the - * owner of the socket lock right before it is released. - * - * Since ~2.3.5 it is also exclusive sleep lock serializing - * accesses from user process context. - */ -#define sock_owned_by_user(sk) ((sk)->sk_lock.owned) - static inline void sock_release_ownership(struct sock *sk) { if (sk->sk_lock.owned) { @@ -1403,6 +1388,35 @@ static inline void unlock_sock_fast(struct sock *sk, bool slow) spin_unlock_bh(&sk->sk_lock.slock); } +/* Used by processes to "lock" a socket state, so that + * interrupts and bottom half handlers won't change it + * from under us. It essentially blocks any incoming + * packets, so that we won't get any new data or any + * packets that change the state of the socket. + * + * While locked, BH processing will add new packets to + * the backlog queue. This queue is processed by the + * owner of the socket lock right before it is released. + * + * Since ~2.3.5 it is also exclusive sleep lock serializing + * accesses from user process context. + */ + +static inline bool sock_owned_by_user(const struct sock *sk) +{ +#ifdef CONFIG_LOCKDEP + WARN_ON(!lockdep_sock_is_held(sk)); +#endif + return sk->sk_lock.owned; +} + +/* no reclassification while locks are held */ +static inline bool sock_allow_reclassification(const struct sock *csk) +{ + struct sock *sk = (struct sock *)csk; + + return !sk->sk_lock.owned && !spin_is_locked(&sk->sk_lock.slock); +} struct sock *sk_alloc(struct net *net, int family, gfp_t priority, struct proto *prot, int kern); diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c index 955eda93e66f..3df7aefb7663 100644 --- a/net/bluetooth/af_bluetooth.c +++ b/net/bluetooth/af_bluetooth.c @@ -65,7 +65,7 @@ static const char *const bt_slock_key_strings[BT_MAX_PROTO] = { void bt_sock_reclassify_lock(struct sock *sk, int proto) { BUG_ON(!sk); - BUG_ON(sock_owned_by_user(sk)); + BUG_ON(!sock_allow_reclassification(sk)); sock_lock_init_class_and_name(sk, bt_slock_key_strings[proto], &bt_slock_key[proto], diff --git a/net/llc/llc_proc.c b/net/llc/llc_proc.c index 1a3c7e0f5d0d..29c509c54bb2 100644 --- a/net/llc/llc_proc.c +++ b/net/llc/llc_proc.c @@ -195,7 +195,7 @@ static int llc_seq_core_show(struct seq_file *seq, void *v) timer_pending(&llc->pf_cycle_timer.timer), timer_pending(&llc->rej_sent_timer.timer), timer_pending(&llc->busy_state_timer.timer), - !!sk->sk_backlog.tail, !!sock_owned_by_user(sk)); + !!sk->sk_backlog.tail, !!sk->sk_lock.owned); out: return 0; } diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index 71d6072664d2..dadfec66dbd8 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c @@ -85,8 +85,7 @@ static void svc_reclassify_socket(struct socket *sock) { struct sock *sk = sock->sk; - WARN_ON_ONCE(sock_owned_by_user(sk)); - if (sock_owned_by_user(sk)) + if (WARN_ON_ONCE(!sock_allow_reclassification(sk))) return; switch (sk->sk_family) { diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index c1fc7b20bbc1..d0756ac5c0f2 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -1880,8 +1880,7 @@ static inline void xs_reclassify_socket6(struct socket *sock) static inline void xs_reclassify_socket(int family, struct socket *sock) { - WARN_ON_ONCE(sock_owned_by_user(sock->sk)); - if (sock_owned_by_user(sock->sk)) + if (WARN_ON_ONCE(!sock_allow_reclassification(sock->sk))) return; switch (family) { From 47e27d5e92c46a3a62d4dfd8895b1ddb8613f531 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Fri, 8 Apr 2016 15:55:00 +0200 Subject: [PATCH 0601/1649] ipv6, token: allow for clearing the current device token The original tokenized iid support implemented via f53adae4eae5 ("net: ipv6: add tokenized interface identifier support") didn't allow for clearing a device token as it was intended that this addressing mode was the only one active for globally scoped IPv6 addresses. Later we relaxed that restriction via 617fe29d45bd ("net: ipv6: only invalidate previously tokenized addresses"), and we should also allow for clearing tokens as there's no good reason why it shouldn't be allowed. Fixes: 617fe29d45bd ("net: ipv6: only invalidate previously tokenized addresses") Reported-by: Robin H. Johnson Signed-off-by: Daniel Borkmann Cc: Hannes Frederic Sowa Acked-by: Hannes Frederic Sowa Signed-off-by: David S. Miller --- net/ipv6/addrconf.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 27aed1afcf81..a6c99275bd8c 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -4995,15 +4995,13 @@ static int inet6_set_iftoken(struct inet6_dev *idev, struct in6_addr *token) { struct inet6_ifaddr *ifp; struct net_device *dev = idev->dev; - bool update_rs = false; + bool clear_token, update_rs = false; struct in6_addr ll_addr; ASSERT_RTNL(); if (!token) return -EINVAL; - if (ipv6_addr_any(token)) - return -EINVAL; if (dev->flags & (IFF_LOOPBACK | IFF_NOARP)) return -EINVAL; if (!ipv6_accept_ra(idev)) @@ -5018,10 +5016,13 @@ static int inet6_set_iftoken(struct inet6_dev *idev, struct in6_addr *token) write_unlock_bh(&idev->lock); + clear_token = ipv6_addr_any(token); + if (clear_token) + goto update_lft; + if (!idev->dead && (idev->if_flags & IF_READY) && !ipv6_get_lladdr(dev, &ll_addr, IFA_F_TENTATIVE | IFA_F_OPTIMISTIC)) { - /* If we're not ready, then normal ifup will take care * of this. Otherwise, we need to request our rs here. */ @@ -5029,6 +5030,7 @@ static int inet6_set_iftoken(struct inet6_dev *idev, struct in6_addr *token) update_rs = true; } +update_lft: write_lock_bh(&idev->lock); if (update_rs) { From bf91795e4a77eb75602702e4c4d9b98b155039e9 Mon Sep 17 00:00:00 2001 From: Masanari Iida Date: Sat, 9 Apr 2016 00:00:25 +0900 Subject: [PATCH 0602/1649] Doc: networking: Fix typo in dsa This patch fix typos in Documentation/networking/dsa. Signed-off-by: Masanari Iida Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- Documentation/networking/dsa/bcm_sf2.txt | 2 +- Documentation/networking/dsa/dsa.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Documentation/networking/dsa/bcm_sf2.txt b/Documentation/networking/dsa/bcm_sf2.txt index d999d0c1c5b8..eba3a2431e91 100644 --- a/Documentation/networking/dsa/bcm_sf2.txt +++ b/Documentation/networking/dsa/bcm_sf2.txt @@ -38,7 +38,7 @@ Implementation details ====================== The driver is located in drivers/net/dsa/bcm_sf2.c and is implemented as a DSA -driver; see Documentation/networking/dsa/dsa.txt for details on the subsytem +driver; see Documentation/networking/dsa/dsa.txt for details on the subsystem and what it provides. The SF2 switch is configured to enable a Broadcom specific 4-bytes switch tag diff --git a/Documentation/networking/dsa/dsa.txt b/Documentation/networking/dsa/dsa.txt index ba698c56919d..631b0f7ae16f 100644 --- a/Documentation/networking/dsa/dsa.txt +++ b/Documentation/networking/dsa/dsa.txt @@ -334,7 +334,7 @@ more specifically with its VLAN filtering portion when configuring VLANs on top of per-port slave network devices. Since DSA primarily deals with MDIO-connected switches, although not exclusively, SWITCHDEV's prepare/abort/commit phases are often simplified into a prepare phase which -checks whether the operation is supporte by the DSA switch driver, and a commit +checks whether the operation is supported by the DSA switch driver, and a commit phase which applies the changes. As of today, the only SWITCHDEV objects supported by DSA are the FDB and VLAN From f9a7cbbf18f1640907d6ca345b8337e4b50ea56f Mon Sep 17 00:00:00 2001 From: Denys Vlasenko Date: Fri, 8 Apr 2016 17:51:54 +0200 Subject: [PATCH 0603/1649] net: force inlining of netif_tx_start/stop_queue, sock_hold, __sock_put Sometimes gcc mysteriously doesn't inline very small functions we expect to be inlined. See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=66122 Arguably, gcc should do better, but gcc people aren't willing to invest time into it, asking to use __always_inline instead. With this .config: http://busybox.net/~vda/kernel_config_OPTIMIZE_INLINING_and_Os, the following functions get deinlined many times. netif_tx_stop_queue: 207 copies, 590 calls: 55 push %rbp 48 89 e5 mov %rsp,%rbp f0 80 8f e0 01 00 00 01 lock orb $0x1,0x1e0(%rdi) 5d pop %rbp c3 retq netif_tx_start_queue: 47 copies, 111 calls 55 push %rbp 48 89 e5 mov %rsp,%rbp f0 80 a7 e0 01 00 00 fe lock andb $0xfe,0x1e0(%rdi) 5d pop %rbp c3 retq sock_hold: 39 copies, 124 calls 55 push %rbp 48 89 e5 mov %rsp,%rbp f0 ff 87 80 00 00 00 lock incl 0x80(%rdi) 5d pop %rbp c3 retq __sock_put: 6 copies, 13 calls 55 push %rbp 48 89 e5 mov %rsp,%rbp f0 ff 8f 80 00 00 00 lock decl 0x80(%rdi) 5d pop %rbp c3 retq This patch fixes this via s/inline/__always_inline/. Code size decrease after the patch is ~2.5k: text data bss dec hex filename 56719876 56364551 36196352 149280779 8e5d80b vmlinux_before 56717440 56364551 36196352 149278343 8e5ce87 vmlinux Signed-off-by: Denys Vlasenko CC: David S. Miller CC: linux-kernel@vger.kernel.org CC: netdev@vger.kernel.org CC: netfilter-devel@vger.kernel.org Signed-off-by: David S. Miller --- include/linux/netdevice.h | 4 ++-- include/net/sock.h | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 166402ae3324..e906c6570b38 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -2787,7 +2787,7 @@ static inline void netif_tx_schedule_all(struct net_device *dev) netif_schedule_queue(netdev_get_tx_queue(dev, i)); } -static inline void netif_tx_start_queue(struct netdev_queue *dev_queue) +static __always_inline void netif_tx_start_queue(struct netdev_queue *dev_queue) { clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); } @@ -2837,7 +2837,7 @@ static inline void netif_tx_wake_all_queues(struct net_device *dev) } } -static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue) +static __always_inline void netif_tx_stop_queue(struct netdev_queue *dev_queue) { set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); } diff --git a/include/net/sock.h b/include/net/sock.h index baba58770ac5..d997ec13a643 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -569,7 +569,7 @@ static inline bool __sk_del_node_init(struct sock *sk) modifications. */ -static inline void sock_hold(struct sock *sk) +static __always_inline void sock_hold(struct sock *sk) { atomic_inc(&sk->sk_refcnt); } @@ -577,7 +577,7 @@ static inline void sock_hold(struct sock *sk) /* Ungrab socket in the context, which assumes that socket refcnt cannot hit zero, f.e. it is true in context of any socketcall. */ -static inline void __sock_put(struct sock *sk) +static __always_inline void __sock_put(struct sock *sk) { atomic_dec(&sk->sk_refcnt); } From 14f31bb39f5d4b69c179d219833d7edb9b36ebd9 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Sat, 9 Apr 2016 00:03:28 +0800 Subject: [PATCH 0604/1649] bridge: simplify the flush_store by calling store_bridge_parm There are some repetitive codes in flush_store, we can remove them by calling store_bridge_parm, also, it would send rtnl notification after we add it in store_bridge_parm in the following patches. Signed-off-by: Xin Long Reviewed-by: Nikolay Aleksandrov Signed-off-by: David S. Miller --- net/bridge/br_sysfs_br.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c index 6b8091407ca3..c48f6b0b2022 100644 --- a/net/bridge/br_sysfs_br.c +++ b/net/bridge/br_sysfs_br.c @@ -336,17 +336,17 @@ static ssize_t group_addr_store(struct device *d, static DEVICE_ATTR_RW(group_addr); +static int set_flush(struct net_bridge *br, unsigned long val) +{ + br_fdb_flush(br); + return 0; +} + static ssize_t flush_store(struct device *d, struct device_attribute *attr, const char *buf, size_t len) { - struct net_bridge *br = to_bridge(d); - - if (!ns_capable(dev_net(br->dev)->user_ns, CAP_NET_ADMIN)) - return -EPERM; - - br_fdb_flush(br); - return len; + return store_bridge_parm(d, buf, len, set_flush); } static DEVICE_ATTR_WO(flush); From 347db6b49ec0ba5ee3c9d946d45b7db59cf40480 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Sat, 9 Apr 2016 00:03:29 +0800 Subject: [PATCH 0605/1649] bridge: simplify the forward_delay_store by calling store_bridge_parm There are some repetitive codes in forward_delay_store, we can remove them by calling store_bridge_parm. Signed-off-by: Xin Long Reviewed-by: Nikolay Aleksandrov Signed-off-by: David S. Miller --- net/bridge/br_sysfs_br.c | 27 ++++++++++----------------- 1 file changed, 10 insertions(+), 17 deletions(-) diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c index c48f6b0b2022..137cd3bf2565 100644 --- a/net/bridge/br_sysfs_br.c +++ b/net/bridge/br_sysfs_br.c @@ -160,29 +160,22 @@ static ssize_t group_fwd_mask_show(struct device *d, return sprintf(buf, "%#x\n", br->group_fwd_mask); } +static int set_group_fwd_mask(struct net_bridge *br, unsigned long val) +{ + if (val & BR_GROUPFWD_RESTRICTED) + return -EINVAL; + + br->group_fwd_mask = val; + + return 0; +} static ssize_t group_fwd_mask_store(struct device *d, struct device_attribute *attr, const char *buf, size_t len) { - struct net_bridge *br = to_bridge(d); - char *endp; - unsigned long val; - - if (!ns_capable(dev_net(br->dev)->user_ns, CAP_NET_ADMIN)) - return -EPERM; - - val = simple_strtoul(buf, &endp, 0); - if (endp == buf) - return -EINVAL; - - if (val & BR_GROUPFWD_RESTRICTED) - return -EINVAL; - - br->group_fwd_mask = val; - - return len; + return store_bridge_parm(d, buf, len, set_group_fwd_mask); } static DEVICE_ATTR_RW(group_fwd_mask); From 4436156b6fbec746108d45431a9f1885de810ec1 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Sat, 9 Apr 2016 00:03:30 +0800 Subject: [PATCH 0606/1649] bridge: simplify the stp_state_store by calling store_bridge_parm There are some repetitive codes in stp_state_store, we can remove them by calling store_bridge_parm. Signed-off-by: Xin Long Reviewed-by: Nikolay Aleksandrov Signed-off-by: David S. Miller --- net/bridge/br_sysfs_br.c | 24 +++++++++--------------- 1 file changed, 9 insertions(+), 15 deletions(-) diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c index 137cd3bf2565..f9d484ecae07 100644 --- a/net/bridge/br_sysfs_br.c +++ b/net/bridge/br_sysfs_br.c @@ -128,27 +128,21 @@ static ssize_t stp_state_show(struct device *d, } -static ssize_t stp_state_store(struct device *d, - struct device_attribute *attr, const char *buf, - size_t len) +static int set_stp_state(struct net_bridge *br, unsigned long val) { - struct net_bridge *br = to_bridge(d); - char *endp; - unsigned long val; - - if (!ns_capable(dev_net(br->dev)->user_ns, CAP_NET_ADMIN)) - return -EPERM; - - val = simple_strtoul(buf, &endp, 0); - if (endp == buf) - return -EINVAL; - if (!rtnl_trylock()) return restart_syscall(); br_stp_set_enabled(br, val); rtnl_unlock(); - return len; + return 0; +} + +static ssize_t stp_state_store(struct device *d, + struct device_attribute *attr, const char *buf, + size_t len) +{ + return store_bridge_parm(d, buf, len, set_stp_state); } static DEVICE_ATTR_RW(stp_state); From 047831a9b9c3e34410025df84f629c005f437e42 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Sat, 9 Apr 2016 00:03:31 +0800 Subject: [PATCH 0607/1649] bridge: a netlink notification should be sent when those attributes are changed by br_sysfs_br Now when we change the attributes of bridge or br_port by netlink, a relevant netlink notification will be sent, but if we change them by ioctl or sysfs, no notification will be sent. We should ensure that whenever those attributes change internally or from sysfs/ioctl, that a netlink notification is sent out to listeners. Also, NetworkManager will use this in the future to listen for out-of-band bridge master attribute updates and incorporate them into the runtime configuration. This patch is used for br_sysfs_br. and we also need to remove some rtnl_trylock in old functions so that we can call it in a common one. For group_addr_store, we cannot make it use store_bridge_parm, because it's not a string-to-long convert, we will add notification on it individually. Signed-off-by: Xin Long Signed-off-by: Nikolay Aleksandrov Signed-off-by: David S. Miller --- net/bridge/br_sysfs_br.c | 21 +++++++++------------ net/bridge/br_vlan.c | 30 +++++------------------------- 2 files changed, 14 insertions(+), 37 deletions(-) diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c index f9d484ecae07..70bddfd0f3e9 100644 --- a/net/bridge/br_sysfs_br.c +++ b/net/bridge/br_sysfs_br.c @@ -43,7 +43,14 @@ static ssize_t store_bridge_parm(struct device *d, if (endp == buf) return -EINVAL; + if (!rtnl_trylock()) + return restart_syscall(); + err = (*set)(br, val); + if (!err) + netdev_state_change(br->dev); + rtnl_unlock(); + return err ? err : len; } @@ -101,15 +108,7 @@ static ssize_t ageing_time_show(struct device *d, static int set_ageing_time(struct net_bridge *br, unsigned long val) { - int ret; - - if (!rtnl_trylock()) - return restart_syscall(); - - ret = br_set_ageing_time(br, val); - rtnl_unlock(); - - return ret; + return br_set_ageing_time(br, val); } static ssize_t ageing_time_store(struct device *d, @@ -130,10 +129,7 @@ static ssize_t stp_state_show(struct device *d, static int set_stp_state(struct net_bridge *br, unsigned long val) { - if (!rtnl_trylock()) - return restart_syscall(); br_stp_set_enabled(br, val); - rtnl_unlock(); return 0; } @@ -315,6 +311,7 @@ static ssize_t group_addr_store(struct device *d, br->group_addr_set = true; br_recalculate_fwd_mask(br); + netdev_state_change(br->dev); rtnl_unlock(); diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c index 9309bb4f2a5b..e001152d6ad1 100644 --- a/net/bridge/br_vlan.c +++ b/net/bridge/br_vlan.c @@ -651,15 +651,7 @@ int __br_vlan_filter_toggle(struct net_bridge *br, unsigned long val) int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val) { - int err; - - if (!rtnl_trylock()) - return restart_syscall(); - - err = __br_vlan_filter_toggle(br, val); - rtnl_unlock(); - - return err; + return __br_vlan_filter_toggle(br, val); } int __br_vlan_set_proto(struct net_bridge *br, __be16 proto) @@ -713,18 +705,10 @@ err_filt: int br_vlan_set_proto(struct net_bridge *br, unsigned long val) { - int err; - if (val != ETH_P_8021Q && val != ETH_P_8021AD) return -EPROTONOSUPPORT; - if (!rtnl_trylock()) - return restart_syscall(); - - err = __br_vlan_set_proto(br, htons(val)); - rtnl_unlock(); - - return err; + return __br_vlan_set_proto(br, htons(val)); } static bool vlan_default_pvid(struct net_bridge_vlan_group *vg, u16 vid) @@ -855,21 +839,17 @@ int br_vlan_set_default_pvid(struct net_bridge *br, unsigned long val) if (val >= VLAN_VID_MASK) return -EINVAL; - if (!rtnl_trylock()) - return restart_syscall(); - if (pvid == br->default_pvid) - goto unlock; + goto out; /* Only allow default pvid change when filtering is disabled */ if (br->vlan_enabled) { pr_info_once("Please disable vlan filtering to change default_pvid\n"); err = -EPERM; - goto unlock; + goto out; } err = __br_vlan_set_default_pvid(br, pvid); -unlock: - rtnl_unlock(); +out: return err; } From bdaf0d5d98e1c42d3a48c5ce6db9d013cb882781 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Sat, 9 Apr 2016 00:03:32 +0800 Subject: [PATCH 0608/1649] bridge: a netlink notification should be sent when those attributes are changed by br_sysfs_if Now when we change the attributes of bridge or br_port by netlink, a relevant netlink notification will be sent, but if we change them by ioctl or sysfs, no notification will be sent. We should ensure that whenever those attributes change internally or from sysfs/ioctl, that a netlink notification is sent out to listeners. Also, NetworkManager will use this in the future to listen for out-of-band bridge master attribute updates and incorporate them into the runtime configuration. This patch is used for br_sysfs_if, and we also move br_ifinfo_notify out of store_flag. Signed-off-by: Xin Long Reviewed-by: Nikolay Aleksandrov Signed-off-by: David S. Miller --- net/bridge/br_sysfs_if.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/net/bridge/br_sysfs_if.c b/net/bridge/br_sysfs_if.c index efe415ad842a..1e04d4d44273 100644 --- a/net/bridge/br_sysfs_if.c +++ b/net/bridge/br_sysfs_if.c @@ -61,7 +61,6 @@ static int store_flag(struct net_bridge_port *p, unsigned long v, if (flags != p->flags) { p->flags = flags; br_port_flags_change(p, mask); - br_ifinfo_notify(RTM_NEWLINK, p); } return 0; } @@ -253,8 +252,10 @@ static ssize_t brport_store(struct kobject *kobj, spin_lock_bh(&p->br->lock); ret = brport_attr->store(p, val); spin_unlock_bh(&p->br->lock); - if (ret == 0) + if (!ret) { + br_ifinfo_notify(RTM_NEWLINK, p); ret = count; + } } rtnl_unlock(); } From bf871ad792e3c9f5dda0ef5bd519e0a2f1564001 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Sat, 9 Apr 2016 00:03:33 +0800 Subject: [PATCH 0609/1649] bridge: a netlink notification should be sent when those attributes are changed by ioctl Now when we change the attributes of bridge or br_port by netlink, a relevant netlink notification will be sent, but if we change them by ioctl or sysfs, no notification will be sent. We should ensure that whenever those attributes change internally or from sysfs/ioctl, that a netlink notification is sent out to listeners. Also, NetworkManager will use this in the future to listen for out-of-band bridge master attribute updates and incorporate them into the runtime configuration. This patch is used for ioctl. Signed-off-by: Xin Long Reviewed-by: Nikolay Aleksandrov Signed-off-by: David S. Miller --- net/bridge/br_ioctl.c | 40 ++++++++++++++++++++++++---------------- 1 file changed, 24 insertions(+), 16 deletions(-) diff --git a/net/bridge/br_ioctl.c b/net/bridge/br_ioctl.c index 263b4de4de57..f8fc6241469a 100644 --- a/net/bridge/br_ioctl.c +++ b/net/bridge/br_ioctl.c @@ -112,7 +112,9 @@ static int add_del_if(struct net_bridge *br, int ifindex, int isadd) static int old_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { struct net_bridge *br = netdev_priv(dev); + struct net_bridge_port *p = NULL; unsigned long args[4]; + int ret = -EOPNOTSUPP; if (copy_from_user(args, rq->ifr_data, sizeof(args))) return -EFAULT; @@ -182,25 +184,29 @@ static int old_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) if (!ns_capable(dev_net(dev)->user_ns, CAP_NET_ADMIN)) return -EPERM; - return br_set_forward_delay(br, args[1]); + ret = br_set_forward_delay(br, args[1]); + break; case BRCTL_SET_BRIDGE_HELLO_TIME: if (!ns_capable(dev_net(dev)->user_ns, CAP_NET_ADMIN)) return -EPERM; - return br_set_hello_time(br, args[1]); + ret = br_set_hello_time(br, args[1]); + break; case BRCTL_SET_BRIDGE_MAX_AGE: if (!ns_capable(dev_net(dev)->user_ns, CAP_NET_ADMIN)) return -EPERM; - return br_set_max_age(br, args[1]); + ret = br_set_max_age(br, args[1]); + break; case BRCTL_SET_AGEING_TIME: if (!ns_capable(dev_net(dev)->user_ns, CAP_NET_ADMIN)) return -EPERM; - return br_set_ageing_time(br, args[1]); + ret = br_set_ageing_time(br, args[1]); + break; case BRCTL_GET_PORT_INFO: { @@ -240,20 +246,19 @@ static int old_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) return -EPERM; br_stp_set_enabled(br, args[1]); - return 0; + ret = 0; + break; case BRCTL_SET_BRIDGE_PRIORITY: if (!ns_capable(dev_net(dev)->user_ns, CAP_NET_ADMIN)) return -EPERM; br_stp_set_bridge_priority(br, args[1]); - return 0; + ret = 0; + break; case BRCTL_SET_PORT_PRIORITY: { - struct net_bridge_port *p; - int ret; - if (!ns_capable(dev_net(dev)->user_ns, CAP_NET_ADMIN)) return -EPERM; @@ -263,14 +268,11 @@ static int old_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) else ret = br_stp_set_port_priority(p, args[2]); spin_unlock_bh(&br->lock); - return ret; + break; } case BRCTL_SET_PATH_COST: { - struct net_bridge_port *p; - int ret; - if (!ns_capable(dev_net(dev)->user_ns, CAP_NET_ADMIN)) return -EPERM; @@ -280,8 +282,7 @@ static int old_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) else ret = br_stp_set_path_cost(p, args[2]); spin_unlock_bh(&br->lock); - - return ret; + break; } case BRCTL_GET_FDB_ENTRIES: @@ -289,7 +290,14 @@ static int old_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) args[2], args[3]); } - return -EOPNOTSUPP; + if (!ret) { + if (p) + br_ifinfo_notify(RTM_NEWLINK, p); + else + netdev_state_change(br->dev); + } + + return ret; } static int old_deviceless(struct net *net, void __user *uarg) From ea019649c37b8aa0d1ac5727d122b2e8ed74f536 Mon Sep 17 00:00:00 2001 From: Denys Vlasenko Date: Fri, 8 Apr 2016 20:39:47 +0200 Subject: [PATCH 0610/1649] drivers/net/ethernet/jme.c: Deinline jme_reset_mac_processor, save 2816 bytes This function compiles to 895 bytes of machine code. Clearly, this isn't a time-critical function. For one, it has a number of udelay(1) calls. Signed-off-by: Denys Vlasenko CC: David S. Miller CC: linux-kernel@vger.kernel.org CC: netdev@vger.kernel.org Signed-off-by: David S. Miller --- drivers/net/ethernet/jme.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c index 3ddf657bc10b..836ebd8ee768 100644 --- a/drivers/net/ethernet/jme.c +++ b/drivers/net/ethernet/jme.c @@ -222,7 +222,7 @@ jme_clear_ghc_reset(struct jme_adapter *jme) jwrite32f(jme, JME_GHC, jme->reg_ghc); } -static inline void +static void jme_reset_mac_processor(struct jme_adapter *jme) { static const u32 mask[WAKEUP_FRAME_MASK_DWNR] = {0, 0, 0, 0}; From 250eb1f8815303f71c94a5680f8e4f2dcfa25cf5 Mon Sep 17 00:00:00 2001 From: Marcelo Ricardo Leitner Date: Fri, 8 Apr 2016 16:41:27 -0300 Subject: [PATCH 0611/1649] sctp: compress bit-wide flags to a bitfield on sctp_sock It wastes space and gets worse as we add new flags, so convert bit-wide flags to a bitfield. Currently it already saves 4 bytes in sctp_sock, which are left as holes in it for now. The whole struct needs packing, which should be done in another patch. Note that do_auto_asconf cannot be merged, as explained in the comment before it. Signed-off-by: Marcelo Ricardo Leitner Acked-by: Neil Horman Signed-off-by: David S. Miller --- include/net/sctp/structs.h | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index 6df1ce7a411c..1a6a626904bb 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -210,14 +210,14 @@ struct sctp_sock { int user_frag; __u32 autoclose; - __u8 nodelay; - __u8 disable_fragments; - __u8 v4mapped; - __u8 frag_interleave; __u32 adaptation_ind; __u32 pd_point; - __u8 recvrcvinfo; - __u8 recvnxtinfo; + __u16 nodelay:1, + disable_fragments:1, + v4mapped:1, + frag_interleave:1, + recvrcvinfo:1, + recvnxtinfo:1; atomic_t pd_mode; /* Receive to here while partial delivery is in effect. */ From fb586f25300f4587c7ebd097a604bf269b25bfa7 Mon Sep 17 00:00:00 2001 From: Marcelo Ricardo Leitner Date: Fri, 8 Apr 2016 16:41:28 -0300 Subject: [PATCH 0612/1649] sctp: delay calls to sk_data_ready() as much as possible Currently processing of multiple chunks in a single SCTP packet leads to multiple calls to sk_data_ready, causing multiple wake up signals which are costy and doesn't make it wake up any faster. With this patch it will note that the wake up is pending and will do it before leaving the state machine interpreter, latest place possible to do it realiably and cleanly. Note that sk_data_ready events are not dependent on asocs, unlike waking up writers. v2: series re-checked v3: use local vars to cleanup the code, suggested by Jakub Sitnicki Signed-off-by: Marcelo Ricardo Leitner Signed-off-by: David S. Miller --- include/net/sctp/structs.h | 3 ++- net/sctp/sm_sideeffect.c | 7 +++++++ net/sctp/ulpqueue.c | 4 ++-- 3 files changed, 11 insertions(+), 3 deletions(-) diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index 1a6a626904bb..21cb11107e37 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -217,7 +217,8 @@ struct sctp_sock { v4mapped:1, frag_interleave:1, recvrcvinfo:1, - recvnxtinfo:1; + recvnxtinfo:1, + pending_data_ready:1; atomic_t pd_mode; /* Receive to here while partial delivery is in effect. */ diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index 7fe56d0acabf..d06317de8730 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c @@ -1222,6 +1222,8 @@ static int sctp_cmd_interpreter(sctp_event_t event_type, sctp_cmd_seq_t *commands, gfp_t gfp) { + struct sock *sk = ep->base.sk; + struct sctp_sock *sp = sctp_sk(sk); int error = 0; int force; sctp_cmd_t *cmd; @@ -1742,6 +1744,11 @@ out: error = sctp_outq_uncork(&asoc->outqueue, gfp); } else if (local_cork) error = sctp_outq_uncork(&asoc->outqueue, gfp); + + if (sp->pending_data_ready) { + sk->sk_data_ready(sk); + sp->pending_data_ready = 0; + } return error; nomem: error = -ENOMEM; diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c index ce469d648ffb..72e5b3e41cdd 100644 --- a/net/sctp/ulpqueue.c +++ b/net/sctp/ulpqueue.c @@ -264,7 +264,7 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event) sctp_ulpq_clear_pd(ulpq); if (queue == &sk->sk_receive_queue) - sk->sk_data_ready(sk); + sctp_sk(sk)->pending_data_ready = 1; return 1; out_free: @@ -1140,5 +1140,5 @@ void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp) /* If there is data waiting, send it up the socket now. */ if (sctp_ulpq_clear_pd(ulpq) || ev) - sk->sk_data_ready(sk); + sctp_sk(sk)->pending_data_ready = 1; } From eb96ce01bab7af55d983feaae069c18792d427ef Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 8 Apr 2016 22:06:40 -0700 Subject: [PATCH 0613/1649] net: bcmgenet: use napi_complete_done() By using napi_complete_done(), we allow fine tuning of /sys/class/net/ethX/gro_flush_timeout for higher GRO aggregation efficiency for a Gbit NIC. Check commit 24d2e4a50737 ("tg3: use napi_complete_done()") for details. Signed-off-by: Eric Dumazet Cc: Petri Gynther Cc: Florian Fainelli Acked-by: Florian Fainelli Acked-by: Petri Gynther Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/genet/bcmgenet.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index f7b42b9fc979..e823013d3125 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -1735,7 +1735,7 @@ static int bcmgenet_rx_poll(struct napi_struct *napi, int budget) work_done = bcmgenet_desc_rx(ring, budget); if (work_done < budget) { - napi_complete(napi); + napi_complete_done(napi, work_done); ring->int_enable(ring); } From dac916f8fbd1ea514a61f4dcecaa97a5e7bac4c0 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Fri, 8 Apr 2016 22:30:56 -0700 Subject: [PATCH 0614/1649] net: bcmgenet: use __napi_schedule_irqoff() bcmgenet_isr1() and bcmgenet_isr0() run in hard irq context, we do not need to block irq again. Signed-off-by: Florian Fainelli Signed-off-by: Eric Dumazet Acked-by: Petri Gynther Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/genet/bcmgenet.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index e823013d3125..49f132c7ed99 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -2493,7 +2493,7 @@ static irqreturn_t bcmgenet_isr1(int irq, void *dev_id) if (likely(napi_schedule_prep(&rx_ring->napi))) { rx_ring->int_disable(rx_ring); - __napi_schedule(&rx_ring->napi); + __napi_schedule_irqoff(&rx_ring->napi); } } @@ -2506,7 +2506,7 @@ static irqreturn_t bcmgenet_isr1(int irq, void *dev_id) if (likely(napi_schedule_prep(&tx_ring->napi))) { tx_ring->int_disable(tx_ring); - __napi_schedule(&tx_ring->napi); + __napi_schedule_irqoff(&tx_ring->napi); } } @@ -2536,7 +2536,7 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id) if (likely(napi_schedule_prep(&rx_ring->napi))) { rx_ring->int_disable(rx_ring); - __napi_schedule(&rx_ring->napi); + __napi_schedule_irqoff(&rx_ring->napi); } } @@ -2545,7 +2545,7 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id) if (likely(napi_schedule_prep(&tx_ring->napi))) { tx_ring->int_disable(tx_ring); - __napi_schedule(&tx_ring->napi); + __napi_schedule_irqoff(&tx_ring->napi); } } From e178c8c2306ebff13aee365de703e6b8b2bea066 Mon Sep 17 00:00:00 2001 From: Petri Gynther Date: Sat, 9 Apr 2016 00:20:36 -0700 Subject: [PATCH 0615/1649] net: bcmgenet: add BQL support Add Byte Queue Limits (BQL) support to bcmgenet driver. Signed-off-by: Petri Gynther Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/genet/bcmgenet.c | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 49f132c7ed99..8150c74f054a 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -1221,8 +1221,10 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev, dev->stats.tx_packets += pkts_compl; dev->stats.tx_bytes += bytes_compl; + txq = netdev_get_tx_queue(dev, ring->queue); + netdev_tx_completed_queue(txq, pkts_compl, bytes_compl); + if (ring->free_bds > (MAX_SKB_FRAGS + 1)) { - txq = netdev_get_tx_queue(dev, ring->queue); if (netif_tx_queue_stopped(txq)) netif_tx_wake_queue(txq); } @@ -1516,6 +1518,8 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev) ring->prod_index += nr_frags + 1; ring->prod_index &= DMA_P_INDEX_MASK; + netdev_tx_sent_queue(txq, GENET_CB(skb)->bytes_sent); + if (ring->free_bds <= (MAX_SKB_FRAGS + 1)) netif_tx_stop_queue(txq); @@ -2364,6 +2368,7 @@ static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv) static void bcmgenet_fini_dma(struct bcmgenet_priv *priv) { int i; + struct netdev_queue *txq; bcmgenet_fini_rx_napi(priv); bcmgenet_fini_tx_napi(priv); @@ -2378,6 +2383,14 @@ static void bcmgenet_fini_dma(struct bcmgenet_priv *priv) } } + for (i = 0; i < priv->hw_params->tx_queues; i++) { + txq = netdev_get_tx_queue(priv->dev, priv->tx_rings[i].queue); + netdev_tx_reset_queue(txq); + } + + txq = netdev_get_tx_queue(priv->dev, priv->tx_rings[DESC_INDEX].queue); + netdev_tx_reset_queue(txq); + bcmgenet_free_rx_buffers(priv); kfree(priv->rx_cbs); kfree(priv->tx_cbs); From cfe2f14c72b0266a9f3573427f206a98ad3d409c Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Sat, 9 Apr 2016 10:49:22 +0200 Subject: [PATCH 0616/1649] qdisc: constify meta_type_ops structures The meta_type_ops structures are never modified, so declare them as const. Done with the help of Coccinelle. Signed-off-by: Julia Lawall Signed-off-by: David S. Miller --- net/sched/em_meta.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c index f2aabc0089da..a309a07ccb35 100644 --- a/net/sched/em_meta.c +++ b/net/sched/em_meta.c @@ -796,7 +796,7 @@ struct meta_type_ops { int (*dump)(struct sk_buff *, struct meta_value *, int); }; -static struct meta_type_ops __meta_type_ops[TCF_META_TYPE_MAX + 1] = { +static const struct meta_type_ops __meta_type_ops[TCF_META_TYPE_MAX + 1] = { [TCF_META_TYPE_VAR] = { .destroy = meta_var_destroy, .compare = meta_var_compare, @@ -812,7 +812,7 @@ static struct meta_type_ops __meta_type_ops[TCF_META_TYPE_MAX + 1] = { } }; -static inline struct meta_type_ops *meta_type_ops(struct meta_value *v) +static inline const struct meta_type_ops *meta_type_ops(struct meta_value *v) { return &__meta_type_ops[meta_type(v)]; } @@ -870,7 +870,7 @@ static int em_meta_match(struct sk_buff *skb, struct tcf_ematch *m, static void meta_delete(struct meta_match *meta) { if (meta) { - struct meta_type_ops *ops = meta_type_ops(&meta->lvalue); + const struct meta_type_ops *ops = meta_type_ops(&meta->lvalue); if (ops && ops->destroy) { ops->destroy(&meta->lvalue); @@ -964,7 +964,7 @@ static int em_meta_dump(struct sk_buff *skb, struct tcf_ematch *em) { struct meta_match *meta = (struct meta_match *) em->data; struct tcf_meta_hdr hdr; - struct meta_type_ops *ops; + const struct meta_type_ops *ops; memset(&hdr, 0, sizeof(hdr)); memcpy(&hdr.left, &meta->lvalue.hdr, sizeof(hdr.left)); From 743b03a83297690f0bd38c452a3bbb47d2be300a Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Sat, 9 Apr 2016 11:29:58 -0700 Subject: [PATCH 0617/1649] net: remove netdevice gso_min_segs After introduction of ndo_features_check(), we believe that very specific checks for rare features should not be done in core networking stack. No driver uses gso_min_segs yet, so we revert this feature and save few instructions per tx packet in fast path. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/linux/netdevice.h | 4 +--- net/core/dev.c | 3 +-- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index e906c6570b38..9884fe9a6552 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -1586,8 +1586,6 @@ enum netdev_priv_flags { * @gso_max_size: Maximum size of generic segmentation offload * @gso_max_segs: Maximum number of segments that can be passed to the * NIC for GSO - * @gso_min_segs: Minimum number of segments that can be passed to the - * NIC for GSO * * @dcbnl_ops: Data Center Bridging netlink ops * @num_tc: Number of traffic classes in the net device @@ -1858,7 +1856,7 @@ struct net_device { unsigned int gso_max_size; #define GSO_MAX_SEGS 65535 u16 gso_max_segs; - u16 gso_min_segs; + #ifdef CONFIG_DCB const struct dcbnl_rtnl_ops *dcbnl_ops; #endif diff --git a/net/core/dev.c b/net/core/dev.c index d51343a821ed..09fb1ace9dc8 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -2831,7 +2831,7 @@ netdev_features_t netif_skb_features(struct sk_buff *skb) netdev_features_t features = dev->features; u16 gso_segs = skb_shinfo(skb)->gso_segs; - if (gso_segs > dev->gso_max_segs || gso_segs < dev->gso_min_segs) + if (gso_segs > dev->gso_max_segs) features &= ~NETIF_F_GSO_MASK; /* If encapsulation offload request, verify we are testing @@ -7429,7 +7429,6 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, dev->gso_max_size = GSO_MAX_SIZE; dev->gso_max_segs = GSO_MAX_SEGS; - dev->gso_min_segs = 0; INIT_LIST_HEAD(&dev->napi_list); INIT_LIST_HEAD(&dev->unreg_list); From 95114344ea78649b1797d00ab6e88147bef66fa4 Mon Sep 17 00:00:00 2001 From: Rahul Verma Date: Sun, 10 Apr 2016 12:42:59 +0300 Subject: [PATCH 0618/1649] qed*: remove version dependency Inbox drivers don't need versioning scheme in order to guarantee compatibility, as both qed and qede are compiled from same codebase. Signed-off-by: Rahul Verma Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed.h | 2 -- drivers/net/ethernet/qlogic/qed/qed_l2.c | 8 +------- drivers/net/ethernet/qlogic/qed/qed_main.c | 11 ----------- drivers/net/ethernet/qlogic/qede/qede.h | 2 -- drivers/net/ethernet/qlogic/qede/qede_main.c | 11 +---------- include/linux/qed/qed_eth_if.h | 2 +- include/linux/qed/qed_if.h | 9 --------- 7 files changed, 3 insertions(+), 42 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index fcb8e9ba51d9..a3ee9df16dfe 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h @@ -507,6 +507,4 @@ u32 qed_unzip_data(struct qed_hwfn *p_hwfn, int qed_slowpath_irq_req(struct qed_hwfn *hwfn); -#define QED_ETH_INTERFACE_VERSION 300 - #endif /* _QED_H */ diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index 3f35c6ca9252..e848d5a1f7f6 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -2043,14 +2043,8 @@ static const struct qed_eth_ops qed_eth_ops_pass = { .get_vport_stats = &qed_get_vport_stats, }; -const struct qed_eth_ops *qed_get_eth_ops(u32 version) +const struct qed_eth_ops *qed_get_eth_ops(void) { - if (version != QED_ETH_INTERFACE_VERSION) { - pr_notice("Cannot supply ethtool operations [%08x != %08x]\n", - version, QED_ETH_INTERFACE_VERSION); - return NULL; - } - return &qed_eth_ops_pass; } EXPORT_SYMBOL(qed_get_eth_ops); diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 26d40db07ddd..c31d485f72d6 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -1172,14 +1172,3 @@ const struct qed_common_ops qed_common_ops_pass = { .chain_free = &qed_chain_free, .set_led = &qed_set_led, }; - -u32 qed_get_protocol_version(enum qed_protocol protocol) -{ - switch (protocol) { - case QED_PROTOCOL_ETH: - return QED_ETH_INTERFACE_VERSION; - default: - return 0; - } -} -EXPORT_SYMBOL(qed_get_protocol_version); diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h index d023251544d9..e0a696a57d4d 100644 --- a/drivers/net/ethernet/qlogic/qede/qede.h +++ b/drivers/net/ethernet/qlogic/qede/qede.h @@ -32,8 +32,6 @@ __stringify(QEDE_REVISION_VERSION) "." \ __stringify(QEDE_ENGINEERING_VERSION) -#define QEDE_ETH_INTERFACE_VERSION 300 - #define DRV_MODULE_SYM qede struct qede_stats { diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 518af329502d..a55d93eb41fa 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -141,19 +141,10 @@ static int __init qede_init(void) { int ret; - u32 qed_ver; pr_notice("qede_init: %s\n", version); - qed_ver = qed_get_protocol_version(QED_PROTOCOL_ETH); - if (qed_ver != QEDE_ETH_INTERFACE_VERSION) { - pr_notice("Version mismatch [%08x != %08x]\n", - qed_ver, - QEDE_ETH_INTERFACE_VERSION); - return -EINVAL; - } - - qed_ops = qed_get_eth_ops(QEDE_ETH_INTERFACE_VERSION); + qed_ops = qed_get_eth_ops(); if (!qed_ops) { pr_notice("Failed to get qed ethtool operations\n"); return -EINVAL; diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h index e1d69834a11f..e00c8dbfc324 100644 --- a/include/linux/qed/qed_eth_if.h +++ b/include/linux/qed/qed_eth_if.h @@ -167,7 +167,7 @@ struct qed_eth_ops { struct qed_eth_stats *stats); }; -const struct qed_eth_ops *qed_get_eth_ops(u32 version); +const struct qed_eth_ops *qed_get_eth_ops(void); void qed_put_eth_ops(void); #endif diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index 1f7599c77cd4..b007011e1c82 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -271,15 +271,6 @@ struct qed_common_ops { enum qed_led_mode mode); }; -/** - * @brief qed_get_protocol_version - * - * @param protocol - * - * @return version supported by qed for given protocol driver - */ -u32 qed_get_protocol_version(enum qed_protocol protocol); - #define MASK_FIELD(_name, _value) \ ((_value) &= (_name ## _MASK)) From 8c5ebd0c792a097fcc0e526debbe0887ee378ae5 Mon Sep 17 00:00:00 2001 From: Sudarsana Reddy Kalluru Date: Sun, 10 Apr 2016 12:43:00 +0300 Subject: [PATCH 0619/1649] qed: add Rx flow hash/indirection support. Adds the required API for passing RSS-related configuration from qede. Signed-off-by: Sudarsana Reddy Kalluru Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_l2.c | 17 +---------------- include/linux/qed/qed_eth_if.h | 1 + include/linux/qed/qed_if.h | 11 +++++++++++ 3 files changed, 13 insertions(+), 16 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index e848d5a1f7f6..5005497ee23e 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -35,19 +35,6 @@ #include "qed_reg_addr.h" #include "qed_sp.h" -enum qed_rss_caps { - QED_RSS_IPV4 = 0x1, - QED_RSS_IPV6 = 0x2, - QED_RSS_IPV4_TCP = 0x4, - QED_RSS_IPV6_TCP = 0x8, - QED_RSS_IPV4_UDP = 0x10, - QED_RSS_IPV6_UDP = 0x20, -}; - -/* Should be the same as ETH_RSS_IND_TABLE_ENTRIES_NUM */ -#define QED_RSS_IND_TABLE_SIZE 128 -#define QED_RSS_KEY_SIZE 10 /* size in 32b chunks */ - struct qed_rss_params { u8 update_rss_config; u8 rss_enable; @@ -1744,9 +1731,7 @@ static int qed_update_vport(struct qed_dev *cdev, sp_rss_params.update_rss_capabilities = 1; sp_rss_params.update_rss_ind_table = 1; sp_rss_params.update_rss_key = 1; - sp_rss_params.rss_caps = QED_RSS_IPV4 | - QED_RSS_IPV6 | - QED_RSS_IPV4_TCP | QED_RSS_IPV6_TCP; + sp_rss_params.rss_caps = params->rss_params.rss_caps; sp_rss_params.rss_table_size_log = 7; /* 2^7 = 128 */ memcpy(sp_rss_params.rss_ind_table, params->rss_params.rss_ind_table, diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h index e00c8dbfc324..795c9902e02f 100644 --- a/include/linux/qed/qed_eth_if.h +++ b/include/linux/qed/qed_eth_if.h @@ -27,6 +27,7 @@ struct qed_dev_eth_info { struct qed_update_vport_rss_params { u16 rss_ind_table[128]; u32 rss_key[10]; + u8 rss_caps; }; struct qed_update_vport_params { diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index b007011e1c82..67e8c206b2c1 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -515,4 +515,15 @@ static inline void internal_ram_wr(void __iomem *addr, __internal_ram_wr(NULL, addr, size, data); } +enum qed_rss_caps { + QED_RSS_IPV4 = 0x1, + QED_RSS_IPV6 = 0x2, + QED_RSS_IPV4_TCP = 0x4, + QED_RSS_IPV6_TCP = 0x8, + QED_RSS_IPV4_UDP = 0x10, + QED_RSS_IPV6_UDP = 0x20, +}; + +#define QED_RSS_IND_TABLE_SIZE 128 +#define QED_RSS_KEY_SIZE 10 /* size in 32b chunks */ #endif From 961acdeafd8f369a9e99b3d08f66eec5d8f93a8e Mon Sep 17 00:00:00 2001 From: Sudarsana Reddy Kalluru Date: Sun, 10 Apr 2016 12:43:01 +0300 Subject: [PATCH 0620/1649] qede: add Rx flow hash/indirection support. Adds support for the following via ethtool: - UDP configuration of RSS based on 2-tuple/4-tuple. - RSS hash key. - RSS indirection table. Signed-off-by: Sudarsana Reddy Kalluru Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qede/qede.h | 4 + .../net/ethernet/qlogic/qede/qede_ethtool.c | 237 +++++++++++++++++- drivers/net/ethernet/qlogic/qede/qede_main.c | 52 +++- 3 files changed, 283 insertions(+), 10 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h index e0a696a57d4d..80dbb7352ee3 100644 --- a/drivers/net/ethernet/qlogic/qede/qede.h +++ b/drivers/net/ethernet/qlogic/qede/qede.h @@ -154,6 +154,10 @@ struct qede_dev { SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) struct qede_stats stats; +#define QEDE_RSS_INDIR_INITED BIT(0) +#define QEDE_RSS_KEY_INITED BIT(1) +#define QEDE_RSS_CAPS_INITED BIT(2) + u32 rss_params_inited; /* bit-field to track initialized rss params */ struct qed_update_vport_rss_params rss_params; u16 q_num_rx_buffers; /* Must be a power of two */ u16 q_num_tx_buffers; /* Must be a power of two */ diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c index c49dc10ce151..f0982f163670 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c @@ -569,6 +569,236 @@ static int qede_set_phys_id(struct net_device *dev, return 0; } +static int qede_get_rss_flags(struct qede_dev *edev, struct ethtool_rxnfc *info) +{ + info->data = RXH_IP_SRC | RXH_IP_DST; + + switch (info->flow_type) { + case TCP_V4_FLOW: + case TCP_V6_FLOW: + info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + break; + case UDP_V4_FLOW: + if (edev->rss_params.rss_caps & QED_RSS_IPV4_UDP) + info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + break; + case UDP_V6_FLOW: + if (edev->rss_params.rss_caps & QED_RSS_IPV6_UDP) + info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + break; + case IPV4_FLOW: + case IPV6_FLOW: + break; + default: + info->data = 0; + break; + } + + return 0; +} + +static int qede_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, + u32 *rules __always_unused) +{ + struct qede_dev *edev = netdev_priv(dev); + + switch (info->cmd) { + case ETHTOOL_GRXRINGS: + info->data = edev->num_rss; + return 0; + case ETHTOOL_GRXFH: + return qede_get_rss_flags(edev, info); + default: + DP_ERR(edev, "Command parameters not supported\n"); + return -EOPNOTSUPP; + } +} + +static int qede_set_rss_flags(struct qede_dev *edev, struct ethtool_rxnfc *info) +{ + struct qed_update_vport_params vport_update_params; + u8 set_caps = 0, clr_caps = 0; + + DP_VERBOSE(edev, QED_MSG_DEBUG, + "Set rss flags command parameters: flow type = %d, data = %llu\n", + info->flow_type, info->data); + + switch (info->flow_type) { + case TCP_V4_FLOW: + case TCP_V6_FLOW: + /* For TCP only 4-tuple hash is supported */ + if (info->data ^ (RXH_IP_SRC | RXH_IP_DST | + RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + DP_INFO(edev, "Command parameters not supported\n"); + return -EINVAL; + } + return 0; + case UDP_V4_FLOW: + /* For UDP either 2-tuple hash or 4-tuple hash is supported */ + if (info->data == (RXH_IP_SRC | RXH_IP_DST | + RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + set_caps = QED_RSS_IPV4_UDP; + DP_VERBOSE(edev, QED_MSG_DEBUG, + "UDP 4-tuple enabled\n"); + } else if (info->data == (RXH_IP_SRC | RXH_IP_DST)) { + clr_caps = QED_RSS_IPV4_UDP; + DP_VERBOSE(edev, QED_MSG_DEBUG, + "UDP 4-tuple disabled\n"); + } else { + return -EINVAL; + } + break; + case UDP_V6_FLOW: + /* For UDP either 2-tuple hash or 4-tuple hash is supported */ + if (info->data == (RXH_IP_SRC | RXH_IP_DST | + RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + set_caps = QED_RSS_IPV6_UDP; + DP_VERBOSE(edev, QED_MSG_DEBUG, + "UDP 4-tuple enabled\n"); + } else if (info->data == (RXH_IP_SRC | RXH_IP_DST)) { + clr_caps = QED_RSS_IPV6_UDP; + DP_VERBOSE(edev, QED_MSG_DEBUG, + "UDP 4-tuple disabled\n"); + } else { + return -EINVAL; + } + break; + case IPV4_FLOW: + case IPV6_FLOW: + /* For IP only 2-tuple hash is supported */ + if (info->data ^ (RXH_IP_SRC | RXH_IP_DST)) { + DP_INFO(edev, "Command parameters not supported\n"); + return -EINVAL; + } + return 0; + case SCTP_V4_FLOW: + case AH_ESP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + case SCTP_V6_FLOW: + case AH_ESP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case IP_USER_FLOW: + case ETHER_FLOW: + /* RSS is not supported for these protocols */ + if (info->data) { + DP_INFO(edev, "Command parameters not supported\n"); + return -EINVAL; + } + return 0; + default: + return -EINVAL; + } + + /* No action is needed if there is no change in the rss capability */ + if (edev->rss_params.rss_caps == ((edev->rss_params.rss_caps & + ~clr_caps) | set_caps)) + return 0; + + /* Update internal configuration */ + edev->rss_params.rss_caps = (edev->rss_params.rss_caps & ~clr_caps) | + set_caps; + edev->rss_params_inited |= QEDE_RSS_CAPS_INITED; + + /* Re-configure if possible */ + if (netif_running(edev->ndev)) { + memset(&vport_update_params, 0, sizeof(vport_update_params)); + vport_update_params.update_rss_flg = 1; + vport_update_params.vport_id = 0; + memcpy(&vport_update_params.rss_params, &edev->rss_params, + sizeof(vport_update_params.rss_params)); + return edev->ops->vport_update(edev->cdev, + &vport_update_params); + } + + return 0; +} + +static int qede_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info) +{ + struct qede_dev *edev = netdev_priv(dev); + + switch (info->cmd) { + case ETHTOOL_SRXFH: + return qede_set_rss_flags(edev, info); + default: + DP_INFO(edev, "Command parameters not supported\n"); + return -EOPNOTSUPP; + } +} + +static u32 qede_get_rxfh_indir_size(struct net_device *dev) +{ + return QED_RSS_IND_TABLE_SIZE; +} + +static u32 qede_get_rxfh_key_size(struct net_device *dev) +{ + struct qede_dev *edev = netdev_priv(dev); + + return sizeof(edev->rss_params.rss_key); +} + +static int qede_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc) +{ + struct qede_dev *edev = netdev_priv(dev); + int i; + + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + + if (!indir) + return 0; + + for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) + indir[i] = edev->rss_params.rss_ind_table[i]; + + if (key) + memcpy(key, edev->rss_params.rss_key, + qede_get_rxfh_key_size(dev)); + + return 0; +} + +static int qede_set_rxfh(struct net_device *dev, const u32 *indir, + const u8 *key, const u8 hfunc) +{ + struct qed_update_vport_params vport_update_params; + struct qede_dev *edev = netdev_priv(dev); + int i; + + if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) + return -EOPNOTSUPP; + + if (!indir && !key) + return 0; + + if (indir) { + for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) + edev->rss_params.rss_ind_table[i] = indir[i]; + edev->rss_params_inited |= QEDE_RSS_INDIR_INITED; + } + + if (key) { + memcpy(&edev->rss_params.rss_key, key, + qede_get_rxfh_key_size(dev)); + edev->rss_params_inited |= QEDE_RSS_KEY_INITED; + } + + if (netif_running(edev->ndev)) { + memset(&vport_update_params, 0, sizeof(vport_update_params)); + vport_update_params.update_rss_flg = 1; + vport_update_params.vport_id = 0; + memcpy(&vport_update_params.rss_params, &edev->rss_params, + sizeof(vport_update_params.rss_params)); + return edev->ops->vport_update(edev->cdev, + &vport_update_params); + } + + return 0; +} + static const struct ethtool_ops qede_ethtool_ops = { .get_settings = qede_get_settings, .set_settings = qede_set_settings, @@ -585,7 +815,12 @@ static const struct ethtool_ops qede_ethtool_ops = { .set_phys_id = qede_set_phys_id, .get_ethtool_stats = qede_get_ethtool_stats, .get_sset_count = qede_get_sset_count, - + .get_rxnfc = qede_get_rxnfc, + .set_rxnfc = qede_set_rxnfc, + .get_rxfh_indir_size = qede_get_rxfh_indir_size, + .get_rxfh_key_size = qede_get_rxfh_key_size, + .get_rxfh = qede_get_rxfh, + .set_rxfh = qede_set_rxfh, .get_channels = qede_get_channels, .set_channels = qede_set_channels, }; diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index a55d93eb41fa..457caad2e752 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -2826,10 +2826,10 @@ static int qede_start_queues(struct qede_dev *edev) int rc, tc, i; int vlan_removal_en = 1; struct qed_dev *cdev = edev->cdev; - struct qed_update_vport_rss_params *rss_params = &edev->rss_params; struct qed_update_vport_params vport_update_params; struct qed_queue_start_common_params q_params; struct qed_start_vport_params start = {0}; + bool reset_rss_indir = false; if (!edev->num_rss) { DP_ERR(edev, @@ -2924,16 +2924,50 @@ static int qede_start_queues(struct qede_dev *edev) /* Fill struct with RSS params */ if (QEDE_RSS_CNT(edev) > 1) { vport_update_params.update_rss_flg = 1; - for (i = 0; i < 128; i++) - rss_params->rss_ind_table[i] = - ethtool_rxfh_indir_default(i, QEDE_RSS_CNT(edev)); - netdev_rss_key_fill(rss_params->rss_key, - sizeof(rss_params->rss_key)); + + /* Need to validate current RSS config uses valid entries */ + for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) { + if (edev->rss_params.rss_ind_table[i] >= + edev->num_rss) { + reset_rss_indir = true; + break; + } + } + + if (!(edev->rss_params_inited & QEDE_RSS_INDIR_INITED) || + reset_rss_indir) { + u16 val; + + for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) { + u16 indir_val; + + val = QEDE_RSS_CNT(edev); + indir_val = ethtool_rxfh_indir_default(i, val); + edev->rss_params.rss_ind_table[i] = indir_val; + } + edev->rss_params_inited |= QEDE_RSS_INDIR_INITED; + } + + if (!(edev->rss_params_inited & QEDE_RSS_KEY_INITED)) { + netdev_rss_key_fill(edev->rss_params.rss_key, + sizeof(edev->rss_params.rss_key)); + edev->rss_params_inited |= QEDE_RSS_KEY_INITED; + } + + if (!(edev->rss_params_inited & QEDE_RSS_CAPS_INITED)) { + edev->rss_params.rss_caps = QED_RSS_IPV4 | + QED_RSS_IPV6 | + QED_RSS_IPV4_TCP | + QED_RSS_IPV6_TCP; + edev->rss_params_inited |= QEDE_RSS_CAPS_INITED; + } + + memcpy(&vport_update_params.rss_params, &edev->rss_params, + sizeof(vport_update_params.rss_params)); } else { - memset(rss_params, 0, sizeof(*rss_params)); + memset(&vport_update_params.rss_params, 0, + sizeof(vport_update_params.rss_params)); } - memcpy(&vport_update_params.rss_params, rss_params, - sizeof(*rss_params)); rc = edev->ops->vport_update(cdev, &vport_update_params); if (rc) { From 7c2d7d7438c9808cff1c34decc80c49f87a764e7 Mon Sep 17 00:00:00 2001 From: Yuval Mintz Date: Sun, 10 Apr 2016 12:43:02 +0300 Subject: [PATCH 0621/1649] qed* - bump driver versions to 8.7.1.20 Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed.h | 2 +- drivers/net/ethernet/qlogic/qede/qede.h | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index a3ee9df16dfe..0f0d2d1d77e5 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h @@ -26,7 +26,7 @@ #include "qed_hsi.h" extern const struct qed_common_ops qed_common_ops_pass; -#define DRV_MODULE_VERSION "8.7.0.0" +#define DRV_MODULE_VERSION "8.7.1.20" #define MAX_HWFNS_PER_DEVICE (4) #define NAME_SIZE 16 diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h index 80dbb7352ee3..41c418909a5c 100644 --- a/drivers/net/ethernet/qlogic/qede/qede.h +++ b/drivers/net/ethernet/qlogic/qede/qede.h @@ -25,8 +25,8 @@ #define QEDE_MAJOR_VERSION 8 #define QEDE_MINOR_VERSION 7 -#define QEDE_REVISION_VERSION 0 -#define QEDE_ENGINEERING_VERSION 0 +#define QEDE_REVISION_VERSION 1 +#define QEDE_ENGINEERING_VERSION 20 #define DRV_MODULE_VERSION __stringify(QEDE_MAJOR_VERSION) "." \ __stringify(QEDE_MINOR_VERSION) "." \ __stringify(QEDE_REVISION_VERSION) "." \ From d0988a5f77e7a399ac579e629f1dcc23059246e9 Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Sun, 10 Apr 2016 23:55:15 +0300 Subject: [PATCH 0622/1649] ravb: make ravb_ptp_interrupt() *void* When we have the ISS.CGIS bit set, we already know that gPTP interrupt has happened, so an extra GIS register check at the end of ravb_ptp_interrupt() seems superfluous. We can model the gPTP interrupt handler like all other dedicated interrupt handlers in the driver and make it *void*. Signed-off-by: Sergei Shtylyov Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/ravb.h | 2 +- drivers/net/ethernet/renesas/ravb_main.c | 8 ++++++-- drivers/net/ethernet/renesas/ravb_ptp.c | 9 ++------- 3 files changed, 9 insertions(+), 10 deletions(-) diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h index 5c1624147778..4e5d5e953e15 100644 --- a/drivers/net/ethernet/renesas/ravb.h +++ b/drivers/net/ethernet/renesas/ravb.h @@ -1045,7 +1045,7 @@ void ravb_modify(struct net_device *ndev, enum ravb_reg reg, u32 clear, u32 set); int ravb_wait(struct net_device *ndev, enum ravb_reg reg, u32 mask, u32 value); -irqreturn_t ravb_ptp_interrupt(struct net_device *ndev); +void ravb_ptp_interrupt(struct net_device *ndev); void ravb_ptp_init(struct net_device *ndev, struct platform_device *pdev); void ravb_ptp_stop(struct net_device *ndev); diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index 4b71951e185d..0f1b314f4d64 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -807,8 +807,10 @@ static irqreturn_t ravb_interrupt(int irq, void *dev_id) } /* gPTP interrupt status summary */ - if ((iss & ISS_CGIS) && ravb_ptp_interrupt(ndev) == IRQ_HANDLED) + if (iss & ISS_CGIS) { + ravb_ptp_interrupt(ndev); result = IRQ_HANDLED; + } mmiowb(); spin_unlock(&priv->lock); @@ -838,8 +840,10 @@ static irqreturn_t ravb_multi_interrupt(int irq, void *dev_id) } /* gPTP interrupt status summary */ - if ((iss & ISS_CGIS) && ravb_ptp_interrupt(ndev) == IRQ_HANDLED) + if (iss & ISS_CGIS) { + ravb_ptp_interrupt(ndev); result = IRQ_HANDLED; + } mmiowb(); spin_unlock(&priv->lock); diff --git a/drivers/net/ethernet/renesas/ravb_ptp.c b/drivers/net/ethernet/renesas/ravb_ptp.c index f1b2cbb336e8..eede70ec37f8 100644 --- a/drivers/net/ethernet/renesas/ravb_ptp.c +++ b/drivers/net/ethernet/renesas/ravb_ptp.c @@ -296,7 +296,7 @@ static const struct ptp_clock_info ravb_ptp_info = { }; /* Caller must hold the lock */ -irqreturn_t ravb_ptp_interrupt(struct net_device *ndev) +void ravb_ptp_interrupt(struct net_device *ndev) { struct ravb_private *priv = netdev_priv(ndev); u32 gis = ravb_read(ndev, GIS); @@ -319,12 +319,7 @@ irqreturn_t ravb_ptp_interrupt(struct net_device *ndev) } } - if (gis) { - ravb_write(ndev, ~gis, GIS); - return IRQ_HANDLED; - } - - return IRQ_NONE; + ravb_write(ndev, ~gis, GIS); } void ravb_ptp_init(struct net_device *ndev, struct platform_device *pdev) From f38ba953bee01887d520f7abba536721a1d16477 Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Wed, 13 Apr 2016 17:02:21 -0700 Subject: [PATCH 0623/1649] gre: eliminate holes in ip_tunnel The structure can be packed denser by doing minor rearrangement of existing elements. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- include/net/ip_tunnels.h | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h index 16435d8b1f93..9ae9fbbccd67 100644 --- a/include/net/ip_tunnels.h +++ b/include/net/ip_tunnels.h @@ -105,24 +105,23 @@ struct ip_tunnel { struct net_device *dev; struct net *net; /* netns for packet i/o */ - int err_count; /* Number of arrived ICMP errors */ unsigned long err_time; /* Time when the last ICMP error * arrived */ + int err_count; /* Number of arrived ICMP errors */ /* These four fields used only by GRE */ u32 i_seqno; /* The last seen seqno */ u32 o_seqno; /* The last output seqno */ int tun_hlen; /* Precalculated header length */ - int mlink; struct dst_cache dst_cache; struct ip_tunnel_parm parms; + int mlink; int encap_hlen; /* Encap header length (FOU,GUE) */ - struct ip_tunnel_encap encap; - int hlen; /* tun_hlen + encap_hlen */ + struct ip_tunnel_encap encap; /* for SIT */ #ifdef CONFIG_IPV6_SIT_6RD From c606e662a5c8e9caa7183e7d88b7c24dadb3adea Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Thu, 7 Apr 2016 14:19:16 -0400 Subject: [PATCH 0624/1649] rtl8xxxu: Add MAC init table for 8192eu The 8192eu requires a different MAC init table. Add the missing table and specify the table to use in the fileops structure. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- .../net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 45 ++++++++++++++++--- .../net/wireless/realtek/rtl8xxxu/rtl8xxxu.h | 1 + 2 files changed, 40 insertions(+), 6 deletions(-) diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index db8433a9efe2..2869376c0e86 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -184,6 +184,36 @@ static struct rtl8xxxu_reg8val rtl8723b_mac_init_table[] = { {0xffff, 0xff}, }; +static struct rtl8xxxu_reg8val rtl8192e_mac_init_table[] = { + {0x011, 0xeb}, {0x012, 0x07}, {0x014, 0x75}, {0x303, 0xa7}, + {0x428, 0x0a}, {0x429, 0x10}, {0x430, 0x00}, {0x431, 0x00}, + {0x432, 0x00}, {0x433, 0x01}, {0x434, 0x04}, {0x435, 0x05}, + {0x436, 0x07}, {0x437, 0x08}, {0x43c, 0x04}, {0x43d, 0x05}, + {0x43e, 0x07}, {0x43f, 0x08}, {0x440, 0x5d}, {0x441, 0x01}, + {0x442, 0x00}, {0x444, 0x10}, {0x445, 0x00}, {0x446, 0x00}, + {0x447, 0x00}, {0x448, 0x00}, {0x449, 0xf0}, {0x44a, 0x0f}, + {0x44b, 0x3e}, {0x44c, 0x10}, {0x44d, 0x00}, {0x44e, 0x00}, + {0x44f, 0x00}, {0x450, 0x00}, {0x451, 0xf0}, {0x452, 0x0f}, + {0x453, 0x00}, {0x456, 0x5e}, {0x460, 0x66}, {0x461, 0x66}, + {0x4c8, 0xff}, {0x4c9, 0x08}, {0x4cc, 0xff}, {0x4cd, 0xff}, + {0x4ce, 0x01}, {0x500, 0x26}, {0x501, 0xa2}, {0x502, 0x2f}, + {0x503, 0x00}, {0x504, 0x28}, {0x505, 0xa3}, {0x506, 0x5e}, + {0x507, 0x00}, {0x508, 0x2b}, {0x509, 0xa4}, {0x50a, 0x5e}, + {0x50b, 0x00}, {0x50c, 0x4f}, {0x50d, 0xa4}, {0x50e, 0x00}, + {0x50f, 0x00}, {0x512, 0x1c}, {0x514, 0x0a}, {0x516, 0x0a}, + {0x525, 0x4f}, {0x540, 0x12}, {0x541, 0x64}, {0x550, 0x10}, + {0x551, 0x10}, {0x559, 0x02}, {0x55c, 0x50}, {0x55d, 0xff}, + {0x605, 0x30}, {0x608, 0x0e}, {0x609, 0x2a}, {0x620, 0xff}, + {0x621, 0xff}, {0x622, 0xff}, {0x623, 0xff}, {0x624, 0xff}, + {0x625, 0xff}, {0x626, 0xff}, {0x627, 0xff}, {0x638, 0x50}, + {0x63c, 0x0a}, {0x63d, 0x0a}, {0x63e, 0x0e}, {0x63f, 0x0e}, + {0x640, 0x40}, {0x642, 0x40}, {0x643, 0x00}, {0x652, 0xc8}, + {0x66e, 0x05}, {0x700, 0x21}, {0x701, 0x43}, {0x702, 0x65}, + {0x703, 0x87}, {0x708, 0x21}, {0x709, 0x43}, {0x70a, 0x65}, + {0x70b, 0x87}, + {0xffff, 0xff}, +}; + static struct rtl8xxxu_reg32val rtl8723a_phy_1t_init_table[] = { {0x800, 0x80040000}, {0x804, 0x00000003}, {0x808, 0x0000fc00}, {0x80c, 0x0000000a}, @@ -3087,8 +3117,9 @@ static void rtl8723bu_phy_init_antenna_selection(struct rtl8xxxu_priv *priv) } static int -rtl8xxxu_init_mac(struct rtl8xxxu_priv *priv, struct rtl8xxxu_reg8val *array) +rtl8xxxu_init_mac(struct rtl8xxxu_priv *priv) { + struct rtl8xxxu_reg8val *array = priv->fops->mactable; int i, ret; u16 reg; u8 val; @@ -3103,7 +3134,8 @@ rtl8xxxu_init_mac(struct rtl8xxxu_priv *priv, struct rtl8xxxu_reg8val *array) ret = rtl8xxxu_write8(priv, reg, val); if (ret != 1) { dev_warn(&priv->udev->dev, - "Failed to initialize MAC\n"); + "Failed to initialize MAC " + "(reg: %04x, val %02x)\n", reg, val); return -EAGAIN; } } @@ -6369,10 +6401,7 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw) if (priv->fops->phy_init_antenna_selection) priv->fops->phy_init_antenna_selection(priv); - if (priv->rtl_chip == RTL8723B) - ret = rtl8xxxu_init_mac(priv, rtl8723b_mac_init_table); - else - ret = rtl8xxxu_init_mac(priv, rtl8723a_mac_init_table); + ret = rtl8xxxu_init_mac(priv); dev_dbg(dev, "%s: init_mac %i\n", __func__, ret); if (ret) @@ -8452,6 +8481,7 @@ static struct rtl8xxxu_fileops rtl8723au_fops = { .adda_1t_path_on = 0x0bdb25a0, .adda_2t_path_on_a = 0x04db25a4, .adda_2t_path_on_b = 0x0b1b25a4, + .mactable = rtl8723a_mac_init_table, }; static struct rtl8xxxu_fileops rtl8723bu_fops = { @@ -8481,6 +8511,7 @@ static struct rtl8xxxu_fileops rtl8723bu_fops = { .adda_1t_path_on = 0x01c00014, .adda_2t_path_on_a = 0x01c00014, .adda_2t_path_on_b = 0x01c00014, + .mactable = rtl8723b_mac_init_table, }; #ifdef CONFIG_RTL8XXXU_UNTESTED @@ -8508,6 +8539,7 @@ static struct rtl8xxxu_fileops rtl8192cu_fops = { .adda_1t_path_on = 0x0bdb25a0, .adda_2t_path_on_a = 0x04db25a4, .adda_2t_path_on_b = 0x0b1b25a4, + .mactable = rtl8723a_mac_init_table, }; #endif @@ -8536,6 +8568,7 @@ static struct rtl8xxxu_fileops rtl8192eu_fops = { .adda_1t_path_on = 0x0fc01616, .adda_2t_path_on_a = 0x0fc01616, .adda_2t_path_on_b = 0x0fc01616, + .mactable = rtl8192e_mac_init_table, }; static struct usb_device_id dev_table[] = { diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h index 455e1122dbb5..94fc2172c0d0 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h @@ -1308,4 +1308,5 @@ struct rtl8xxxu_fileops { u32 adda_1t_path_on; u32 adda_2t_path_on_a; u32 adda_2t_path_on_b; + struct rtl8xxxu_reg8val *mactable; }; From 2ca73dc786c4ffb9c618e7b868d69d9faf5a755d Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Thu, 7 Apr 2016 14:19:17 -0400 Subject: [PATCH 0625/1649] rtl8xxxu: Do not mess with AFE_XTAL_CTRL on 8192eu To match the vendor driver, do not mess with AFE_XTAL_CTRL on 8192eu. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 2869376c0e86..63fa4c66794f 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -3208,7 +3208,7 @@ static int rtl8xxxu_init_phy_bb(struct rtl8xxxu_priv *priv) rtl8xxxu_write16(priv, REG_SYS_FUNC, val16); } - if (priv->rtl_chip != RTL8723B) { + if (priv->rtl_chip != RTL8723B && priv->rtl_chip != RTL8192E) { /* AFE_XTAL_RF_GATE (bit 14) if addressing as 32 bit register */ val32 = rtl8xxxu_read32(priv, REG_AFE_XTAL_CTRL); val32 &= ~AFE_XTAL_RF_GATE; From 80805aa5f33e315df23b6420c64f96a28dfcbb9a Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Thu, 7 Apr 2016 14:19:18 -0400 Subject: [PATCH 0626/1649] rtl8xxxu: Set TX page boundaries for 8192eu The 8192eu also has it's own TRXFF boundary value to set. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 7 ++++++- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h | 1 + 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 63fa4c66794f..5b2c1c80e49d 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -6495,7 +6495,10 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw) /* * Set TX buffer boundary */ - val8 = TX_TOTAL_PAGE_NUM + 1; + if (priv->rtl_chip == RTL8192E) + val8 = TX_TOTAL_PAGE_NUM_8192E + 1; + else + val8 = TX_TOTAL_PAGE_NUM + 1; if (priv->rtl_chip == RTL8723B) val8 -= 1; @@ -6532,6 +6535,8 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw) */ if (priv->rtl_chip == RTL8723B) rtl8xxxu_write16(priv, REG_TRXFF_BNDY + 2, 0x3f7f); + else if (priv->rtl_chip == RTL8192E) + rtl8xxxu_write16(priv, REG_TRXFF_BNDY + 2, 0x3cff); else rtl8xxxu_write16(priv, REG_TRXFF_BNDY + 2, 0x27ff); /* diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h index 94fc2172c0d0..63d72689481d 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h @@ -42,6 +42,7 @@ #define REALTEK_USB_CMD_IDX 0x00 #define TX_TOTAL_PAGE_NUM 0xf8 +#define TX_TOTAL_PAGE_NUM_8192E 0xf3 /* (HPQ + LPQ + NPQ + PUBQ) = TX_TOTAL_PAGE_NUM */ #define TX_PAGE_NUM_PUBQ 0xe7 #define TX_PAGE_NUM_HI_PQ 0x0c From 19102f8419342a3cdd0efb25539a268f2d095319 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Thu, 7 Apr 2016 14:19:19 -0400 Subject: [PATCH 0627/1649] rtl8xxxu: Add radio init tables for 8192eu Add the required radio init tables for 8192eu devices. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- .../net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 154 ++++++++++++++++++ 1 file changed, 154 insertions(+) diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 5b2c1c80e49d..0667aa77a2fd 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -1242,6 +1242,152 @@ static struct rtl8xxxu_rfregval rtl8188ru_radioa_1t_highpa_table[] = { {0xff, 0xffffffff} }; +static struct rtl8xxxu_rfregval rtl8192eu_radioa_init_table[] = { + {0x7f, 0x00000082}, {0x81, 0x0003fc00}, + {0x00, 0x00030000}, {0x08, 0x00008400}, + {0x18, 0x00000407}, {0x19, 0x00000012}, + {0x1b, 0x00000064}, {0x1e, 0x00080009}, + {0x1f, 0x00000880}, {0x2f, 0x0001a060}, + {0x3f, 0x00000000}, {0x42, 0x000060c0}, + {0x57, 0x000d0000}, {0x58, 0x000be180}, + {0x67, 0x00001552}, {0x83, 0x00000000}, + {0xb0, 0x000ff9f1}, {0xb1, 0x00055418}, + {0xb2, 0x0008cc00}, {0xb4, 0x00043083}, + {0xb5, 0x00008166}, {0xb6, 0x0000803e}, + {0xb7, 0x0001c69f}, {0xb8, 0x0000407f}, + {0xb9, 0x00080001}, {0xba, 0x00040001}, + {0xbb, 0x00000400}, {0xbf, 0x000c0000}, + {0xc2, 0x00002400}, {0xc3, 0x00000009}, + {0xc4, 0x00040c91}, {0xc5, 0x00099999}, + {0xc6, 0x000000a3}, {0xc7, 0x00088820}, + {0xc8, 0x00076c06}, {0xc9, 0x00000000}, + {0xca, 0x00080000}, {0xdf, 0x00000180}, + {0xef, 0x000001a0}, {0x51, 0x00069545}, + {0x52, 0x0007e45e}, {0x53, 0x00000071}, + {0x56, 0x00051ff3}, {0x35, 0x000000a8}, + {0x35, 0x000001e2}, {0x35, 0x000002a8}, + {0x36, 0x00001c24}, {0x36, 0x00009c24}, + {0x36, 0x00011c24}, {0x36, 0x00019c24}, + {0x18, 0x00000c07}, {0x5a, 0x00048000}, + {0x19, 0x000739d0}, +#ifdef EXT_PA_8192EU + /* External PA or external LNA */ + {0x34, 0x0000a093}, {0x34, 0x0000908f}, + {0x34, 0x0000808c}, {0x34, 0x0000704d}, + {0x34, 0x0000604a}, {0x34, 0x00005047}, + {0x34, 0x0000400a}, {0x34, 0x00003007}, + {0x34, 0x00002004}, {0x34, 0x00001001}, + {0x34, 0x00000000}, +#else + /* Regular */ + {0x34, 0x0000add7}, {0x34, 0x00009dd4}, + {0x34, 0x00008dd1}, {0x34, 0x00007dce}, + {0x34, 0x00006dcb}, {0x34, 0x00005dc8}, + {0x34, 0x00004dc5}, {0x34, 0x000034cc}, + {0x34, 0x0000244f}, {0x34, 0x0000144c}, + {0x34, 0x00000014}, +#endif + {0x00, 0x00030159}, + {0x84, 0x00068180}, + {0x86, 0x0000014e}, + {0x87, 0x00048e00}, + {0x8e, 0x00065540}, + {0x8f, 0x00088000}, + {0xef, 0x000020a0}, +#ifdef EXT_PA_8192EU + /* External PA or external LNA */ + {0x3b, 0x000f07b0}, +#else + {0x3b, 0x000f02b0}, +#endif + {0x3b, 0x000ef7b0}, {0x3b, 0x000d4fb0}, + {0x3b, 0x000cf060}, {0x3b, 0x000b0090}, + {0x3b, 0x000a0080}, {0x3b, 0x00090080}, + {0x3b, 0x0008f780}, +#ifdef EXT_PA_8192EU + /* External PA or external LNA */ + {0x3b, 0x000787b0}, +#else + {0x3b, 0x00078730}, +#endif + {0x3b, 0x00060fb0}, {0x3b, 0x0005ffa0}, + {0x3b, 0x00040620}, {0x3b, 0x00037090}, + {0x3b, 0x00020080}, {0x3b, 0x0001f060}, + {0x3b, 0x0000ffb0}, {0xef, 0x000000a0}, + {0xfe, 0x00000000}, {0x18, 0x0000fc07}, + {0xfe, 0x00000000}, {0xfe, 0x00000000}, + {0xfe, 0x00000000}, {0xfe, 0x00000000}, + {0x1e, 0x00000001}, {0x1f, 0x00080000}, + {0x00, 0x00033e70}, + {0xff, 0xffffffff} +}; + +static struct rtl8xxxu_rfregval rtl8192eu_radiob_init_table[] = { + {0x7f, 0x00000082}, {0x81, 0x0003fc00}, + {0x00, 0x00030000}, {0x08, 0x00008400}, + {0x18, 0x00000407}, {0x19, 0x00000012}, + {0x1b, 0x00000064}, {0x1e, 0x00080009}, + {0x1f, 0x00000880}, {0x2f, 0x0001a060}, + {0x3f, 0x00000000}, {0x42, 0x000060c0}, + {0x57, 0x000d0000}, {0x58, 0x000be180}, + {0x67, 0x00001552}, {0x7f, 0x00000082}, + {0x81, 0x0003f000}, {0x83, 0x00000000}, + {0xdf, 0x00000180}, {0xef, 0x000001a0}, + {0x51, 0x00069545}, {0x52, 0x0007e42e}, + {0x53, 0x00000071}, {0x56, 0x00051ff3}, + {0x35, 0x000000a8}, {0x35, 0x000001e0}, + {0x35, 0x000002a8}, {0x36, 0x00001ca8}, + {0x36, 0x00009c24}, {0x36, 0x00011c24}, + {0x36, 0x00019c24}, {0x18, 0x00000c07}, + {0x5a, 0x00048000}, {0x19, 0x000739d0}, +#ifdef EXT_PA_8192EU + /* External PA or external LNA */ + {0x34, 0x0000a093}, {0x34, 0x0000908f}, + {0x34, 0x0000808c}, {0x34, 0x0000704d}, + {0x34, 0x0000604a}, {0x34, 0x00005047}, + {0x34, 0x0000400a}, {0x34, 0x00003007}, + {0x34, 0x00002004}, {0x34, 0x00001001}, + {0x34, 0x00000000}, +#else + {0x34, 0x0000add7}, {0x34, 0x00009dd4}, + {0x34, 0x00008dd1}, {0x34, 0x00007dce}, + {0x34, 0x00006dcb}, {0x34, 0x00005dc8}, + {0x34, 0x00004dc5}, {0x34, 0x000034cc}, + {0x34, 0x0000244f}, {0x34, 0x0000144c}, + {0x34, 0x00000014}, +#endif + {0x00, 0x00030159}, {0x84, 0x00068180}, + {0x86, 0x000000ce}, {0x87, 0x00048a00}, + {0x8e, 0x00065540}, {0x8f, 0x00088000}, + {0xef, 0x000020a0}, +#ifdef EXT_PA_8192EU + /* External PA or external LNA */ + {0x3b, 0x000f07b0}, +#else + {0x3b, 0x000f02b0}, +#endif + + {0x3b, 0x000ef7b0}, {0x3b, 0x000d4fb0}, + {0x3b, 0x000cf060}, {0x3b, 0x000b0090}, + {0x3b, 0x000a0080}, {0x3b, 0x00090080}, + {0x3b, 0x0008f780}, +#ifdef EXT_PA_8192EU + /* External PA or external LNA */ + {0x3b, 0x000787b0}, +#else + {0x3b, 0x00078730}, +#endif + {0x3b, 0x00060fb0}, {0x3b, 0x0005ffa0}, + {0x3b, 0x00040620}, {0x3b, 0x00037090}, + {0x3b, 0x00020080}, {0x3b, 0x0001f060}, + {0x3b, 0x0000ffb0}, {0xef, 0x000000a0}, + {0x00, 0x00010159}, {0xfe, 0x00000000}, + {0xfe, 0x00000000}, {0xfe, 0x00000000}, + {0xfe, 0x00000000}, {0x1e, 0x00000001}, + {0x1f, 0x00080000}, {0x00, 0x00033e70}, + {0xff, 0xffffffff} +}; + static struct rtl8xxxu_rfregs rtl8xxxu_rfregs[] = { { /* RF_A */ .hssiparm1 = REG_FPGA0_XA_HSSI_PARM1, @@ -6447,6 +6593,14 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw) rftable = rtl8192cu_radiob_2t_init_table; ret = rtl8xxxu_init_phy_rf(priv, rftable, RF_B); break; + case RTL8192E: + rftable = rtl8192eu_radioa_init_table; + ret = rtl8xxxu_init_phy_rf(priv, rftable, RF_A); + if (ret) + break; + rftable = rtl8192eu_radiob_init_table; + ret = rtl8xxxu_init_phy_rf(priv, rftable, RF_B); + break; default: ret = -EINVAL; } From e293278debe7c1d3112aa8ec10373d73f171d75d Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Thu, 7 Apr 2016 14:19:20 -0400 Subject: [PATCH 0628/1649] rtl8xxxu: Add 8192eu AGC tables A device specific AGC table is required for the 8192eu as well. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- .../net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 147 +++++++++++++++++- 1 file changed, 146 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 0667aa77a2fd..650ebcbe7a2b 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -849,6 +849,144 @@ static struct rtl8xxxu_reg32val rtl8xxx_agc_8723bu_table[] = { {0xffff, 0xffffffff} }; +static struct rtl8xxxu_reg32val rtl8xxx_agc_8192eu_std_table[] = { + {0xc78, 0xfb000001}, {0xc78, 0xfb010001}, + {0xc78, 0xfb020001}, {0xc78, 0xfb030001}, + {0xc78, 0xfb040001}, {0xc78, 0xfb050001}, + {0xc78, 0xfa060001}, {0xc78, 0xf9070001}, + {0xc78, 0xf8080001}, {0xc78, 0xf7090001}, + {0xc78, 0xf60a0001}, {0xc78, 0xf50b0001}, + {0xc78, 0xf40c0001}, {0xc78, 0xf30d0001}, + {0xc78, 0xf20e0001}, {0xc78, 0xf10f0001}, + {0xc78, 0xf0100001}, {0xc78, 0xef110001}, + {0xc78, 0xee120001}, {0xc78, 0xed130001}, + {0xc78, 0xec140001}, {0xc78, 0xeb150001}, + {0xc78, 0xea160001}, {0xc78, 0xe9170001}, + {0xc78, 0xe8180001}, {0xc78, 0xe7190001}, + {0xc78, 0xc81a0001}, {0xc78, 0xc71b0001}, + {0xc78, 0xc61c0001}, {0xc78, 0x071d0001}, + {0xc78, 0x061e0001}, {0xc78, 0x051f0001}, + {0xc78, 0x04200001}, {0xc78, 0x03210001}, + {0xc78, 0xaa220001}, {0xc78, 0xa9230001}, + {0xc78, 0xa8240001}, {0xc78, 0xa7250001}, + {0xc78, 0xa6260001}, {0xc78, 0x85270001}, + {0xc78, 0x84280001}, {0xc78, 0x83290001}, + {0xc78, 0x252a0001}, {0xc78, 0x242b0001}, + {0xc78, 0x232c0001}, {0xc78, 0x222d0001}, + {0xc78, 0x672e0001}, {0xc78, 0x662f0001}, + {0xc78, 0x65300001}, {0xc78, 0x64310001}, + {0xc78, 0x63320001}, {0xc78, 0x62330001}, + {0xc78, 0x61340001}, {0xc78, 0x45350001}, + {0xc78, 0x44360001}, {0xc78, 0x43370001}, + {0xc78, 0x42380001}, {0xc78, 0x41390001}, + {0xc78, 0x403a0001}, {0xc78, 0x403b0001}, + {0xc78, 0x403c0001}, {0xc78, 0x403d0001}, + {0xc78, 0x403e0001}, {0xc78, 0x403f0001}, + {0xc78, 0xfb400001}, {0xc78, 0xfb410001}, + {0xc78, 0xfb420001}, {0xc78, 0xfb430001}, + {0xc78, 0xfb440001}, {0xc78, 0xfb450001}, + {0xc78, 0xfa460001}, {0xc78, 0xf9470001}, + {0xc78, 0xf8480001}, {0xc78, 0xf7490001}, + {0xc78, 0xf64a0001}, {0xc78, 0xf54b0001}, + {0xc78, 0xf44c0001}, {0xc78, 0xf34d0001}, + {0xc78, 0xf24e0001}, {0xc78, 0xf14f0001}, + {0xc78, 0xf0500001}, {0xc78, 0xef510001}, + {0xc78, 0xee520001}, {0xc78, 0xed530001}, + {0xc78, 0xec540001}, {0xc78, 0xeb550001}, + {0xc78, 0xea560001}, {0xc78, 0xe9570001}, + {0xc78, 0xe8580001}, {0xc78, 0xe7590001}, + {0xc78, 0xe65a0001}, {0xc78, 0xe55b0001}, + {0xc78, 0xe45c0001}, {0xc78, 0xe35d0001}, + {0xc78, 0xe25e0001}, {0xc78, 0xe15f0001}, + {0xc78, 0x8a600001}, {0xc78, 0x89610001}, + {0xc78, 0x88620001}, {0xc78, 0x87630001}, + {0xc78, 0x86640001}, {0xc78, 0x85650001}, + {0xc78, 0x84660001}, {0xc78, 0x83670001}, + {0xc78, 0x82680001}, {0xc78, 0x6b690001}, + {0xc78, 0x6a6a0001}, {0xc78, 0x696b0001}, + {0xc78, 0x686c0001}, {0xc78, 0x676d0001}, + {0xc78, 0x666e0001}, {0xc78, 0x656f0001}, + {0xc78, 0x64700001}, {0xc78, 0x63710001}, + {0xc78, 0x62720001}, {0xc78, 0x61730001}, + {0xc78, 0x49740001}, {0xc78, 0x48750001}, + {0xc78, 0x47760001}, {0xc78, 0x46770001}, + {0xc78, 0x45780001}, {0xc78, 0x44790001}, + {0xc78, 0x437a0001}, {0xc78, 0x427b0001}, + {0xc78, 0x417c0001}, {0xc78, 0x407d0001}, + {0xc78, 0x407e0001}, {0xc78, 0x407f0001}, + {0xc50, 0x00040022}, {0xc50, 0x00040020}, + {0xffff, 0xffffffff} +}; + +static struct rtl8xxxu_reg32val rtl8xxx_agc_8192eu_highpa_table[] = { + {0xc78, 0xfa000001}, {0xc78, 0xf9010001}, + {0xc78, 0xf8020001}, {0xc78, 0xf7030001}, + {0xc78, 0xf6040001}, {0xc78, 0xf5050001}, + {0xc78, 0xf4060001}, {0xc78, 0xf3070001}, + {0xc78, 0xf2080001}, {0xc78, 0xf1090001}, + {0xc78, 0xf00a0001}, {0xc78, 0xef0b0001}, + {0xc78, 0xee0c0001}, {0xc78, 0xed0d0001}, + {0xc78, 0xec0e0001}, {0xc78, 0xeb0f0001}, + {0xc78, 0xea100001}, {0xc78, 0xe9110001}, + {0xc78, 0xe8120001}, {0xc78, 0xe7130001}, + {0xc78, 0xe6140001}, {0xc78, 0xe5150001}, + {0xc78, 0xe4160001}, {0xc78, 0xe3170001}, + {0xc78, 0xe2180001}, {0xc78, 0xe1190001}, + {0xc78, 0x8a1a0001}, {0xc78, 0x891b0001}, + {0xc78, 0x881c0001}, {0xc78, 0x871d0001}, + {0xc78, 0x861e0001}, {0xc78, 0x851f0001}, + {0xc78, 0x84200001}, {0xc78, 0x83210001}, + {0xc78, 0x82220001}, {0xc78, 0x6a230001}, + {0xc78, 0x69240001}, {0xc78, 0x68250001}, + {0xc78, 0x67260001}, {0xc78, 0x66270001}, + {0xc78, 0x65280001}, {0xc78, 0x64290001}, + {0xc78, 0x632a0001}, {0xc78, 0x622b0001}, + {0xc78, 0x612c0001}, {0xc78, 0x602d0001}, + {0xc78, 0x472e0001}, {0xc78, 0x462f0001}, + {0xc78, 0x45300001}, {0xc78, 0x44310001}, + {0xc78, 0x43320001}, {0xc78, 0x42330001}, + {0xc78, 0x41340001}, {0xc78, 0x40350001}, + {0xc78, 0x40360001}, {0xc78, 0x40370001}, + {0xc78, 0x40380001}, {0xc78, 0x40390001}, + {0xc78, 0x403a0001}, {0xc78, 0x403b0001}, + {0xc78, 0x403c0001}, {0xc78, 0x403d0001}, + {0xc78, 0x403e0001}, {0xc78, 0x403f0001}, + {0xc78, 0xfa400001}, {0xc78, 0xf9410001}, + {0xc78, 0xf8420001}, {0xc78, 0xf7430001}, + {0xc78, 0xf6440001}, {0xc78, 0xf5450001}, + {0xc78, 0xf4460001}, {0xc78, 0xf3470001}, + {0xc78, 0xf2480001}, {0xc78, 0xf1490001}, + {0xc78, 0xf04a0001}, {0xc78, 0xef4b0001}, + {0xc78, 0xee4c0001}, {0xc78, 0xed4d0001}, + {0xc78, 0xec4e0001}, {0xc78, 0xeb4f0001}, + {0xc78, 0xea500001}, {0xc78, 0xe9510001}, + {0xc78, 0xe8520001}, {0xc78, 0xe7530001}, + {0xc78, 0xe6540001}, {0xc78, 0xe5550001}, + {0xc78, 0xe4560001}, {0xc78, 0xe3570001}, + {0xc78, 0xe2580001}, {0xc78, 0xe1590001}, + {0xc78, 0x8a5a0001}, {0xc78, 0x895b0001}, + {0xc78, 0x885c0001}, {0xc78, 0x875d0001}, + {0xc78, 0x865e0001}, {0xc78, 0x855f0001}, + {0xc78, 0x84600001}, {0xc78, 0x83610001}, + {0xc78, 0x82620001}, {0xc78, 0x6a630001}, + {0xc78, 0x69640001}, {0xc78, 0x68650001}, + {0xc78, 0x67660001}, {0xc78, 0x66670001}, + {0xc78, 0x65680001}, {0xc78, 0x64690001}, + {0xc78, 0x636a0001}, {0xc78, 0x626b0001}, + {0xc78, 0x616c0001}, {0xc78, 0x606d0001}, + {0xc78, 0x476e0001}, {0xc78, 0x466f0001}, + {0xc78, 0x45700001}, {0xc78, 0x44710001}, + {0xc78, 0x43720001}, {0xc78, 0x42730001}, + {0xc78, 0x41740001}, {0xc78, 0x40750001}, + {0xc78, 0x40760001}, {0xc78, 0x40770001}, + {0xc78, 0x40780001}, {0xc78, 0x40790001}, + {0xc78, 0x407a0001}, {0xc78, 0x407b0001}, + {0xc78, 0x407c0001}, {0xc78, 0x407d0001}, + {0xc78, 0x407e0001}, {0xc78, 0x407f0001}, + {0xc50, 0x00040222}, {0xc50, 0x00040220}, + {0xffff, 0xffffffff} +}; + static struct rtl8xxxu_rfregval rtl8723au_radioa_1t_init_table[] = { {0x00, 0x00030159}, {0x01, 0x00031284}, {0x02, 0x00098000}, {0x03, 0x00039c63}, @@ -3446,7 +3584,14 @@ static int rtl8xxxu_init_phy_bb(struct rtl8xxxu_priv *priv) if (priv->rtl_chip == RTL8723B) rtl8xxxu_init_phy_regs(priv, rtl8xxx_agc_8723bu_table); - else if (priv->hi_pa) + else if (priv->rtl_chip == RTL8192E) { + if (priv->hi_pa) + rtl8xxxu_init_phy_regs(priv, + rtl8xxx_agc_8192eu_highpa_table); + else + rtl8xxxu_init_phy_regs(priv, + rtl8xxx_agc_8192eu_std_table); + } else if (priv->hi_pa) rtl8xxxu_init_phy_regs(priv, rtl8xxx_agc_highpa_table); else rtl8xxxu_init_phy_regs(priv, rtl8xxx_agc_standard_table); From ae14c5d20dc9e5e66681a7e3045b61eb9348d9f4 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Thu, 7 Apr 2016 14:19:21 -0400 Subject: [PATCH 0629/1649] rtl8xxxu: Add 8192eu PHY init table The 8192eu also requires it's own PHY init table. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- .../net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 140 ++++++++++++++++++ 1 file changed, 140 insertions(+) diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 650ebcbe7a2b..48d2d56ec224 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -610,6 +610,138 @@ static struct rtl8xxxu_reg32val rtl8188ru_phy_1t_highpa_table[] = { {0xffff, 0xffffffff}, }; +static struct rtl8xxxu_reg32val rtl8192eu_phy_init_table[] = { + {0x800, 0x80040000}, {0x804, 0x00000003}, + {0x808, 0x0000fc00}, {0x80c, 0x0000000a}, + {0x810, 0x10001331}, {0x814, 0x020c3d10}, + {0x818, 0x02220385}, {0x81c, 0x00000000}, + {0x820, 0x01000100}, {0x824, 0x00390204}, + {0x828, 0x01000100}, {0x82c, 0x00390204}, + {0x830, 0x32323232}, {0x834, 0x30303030}, + {0x838, 0x30303030}, {0x83c, 0x30303030}, + {0x840, 0x00010000}, {0x844, 0x00010000}, + {0x848, 0x28282828}, {0x84c, 0x28282828}, + {0x850, 0x00000000}, {0x854, 0x00000000}, + {0x858, 0x009a009a}, {0x85c, 0x01000014}, + {0x860, 0x66f60000}, {0x864, 0x061f0000}, + {0x868, 0x30303030}, {0x86c, 0x30303030}, + {0x870, 0x00000000}, {0x874, 0x55004200}, + {0x878, 0x08080808}, {0x87c, 0x00000000}, + {0x880, 0xb0000c1c}, {0x884, 0x00000001}, + {0x888, 0x00000000}, {0x88c, 0xcc0000c0}, + {0x890, 0x00000800}, {0x894, 0xfffffffe}, + {0x898, 0x40302010}, {0x900, 0x00000000}, + {0x904, 0x00000023}, {0x908, 0x00000000}, + {0x90c, 0x81121313}, {0x910, 0x806c0001}, + {0x914, 0x00000001}, {0x918, 0x00000000}, + {0x91c, 0x00010000}, {0x924, 0x00000001}, + {0x928, 0x00000000}, {0x92c, 0x00000000}, + {0x930, 0x00000000}, {0x934, 0x00000000}, + {0x938, 0x00000000}, {0x93c, 0x00000000}, + {0x940, 0x00000000}, {0x944, 0x00000000}, + {0x94c, 0x00000008}, {0xa00, 0x00d0c7c8}, + {0xa04, 0x81ff000c}, {0xa08, 0x8c838300}, + {0xa0c, 0x2e68120f}, {0xa10, 0x95009b78}, + {0xa14, 0x1114d028}, {0xa18, 0x00881117}, + {0xa1c, 0x89140f00}, {0xa20, 0x1a1b0000}, + {0xa24, 0x090e1317}, {0xa28, 0x00000204}, + {0xa2c, 0x00d30000}, {0xa70, 0x101fff00}, + {0xa74, 0x00000007}, {0xa78, 0x00000900}, + {0xa7c, 0x225b0606}, {0xa80, 0x218075b1}, + {0xb38, 0x00000000}, {0xc00, 0x48071d40}, + {0xc04, 0x03a05633}, {0xc08, 0x000000e4}, + {0xc0c, 0x6c6c6c6c}, {0xc10, 0x08800000}, + {0xc14, 0x40000100}, {0xc18, 0x08800000}, + {0xc1c, 0x40000100}, {0xc20, 0x00000000}, + {0xc24, 0x00000000}, {0xc28, 0x00000000}, + {0xc2c, 0x00000000}, {0xc30, 0x69e9ac47}, + {0xc34, 0x469652af}, {0xc38, 0x49795994}, + {0xc3c, 0x0a97971c}, {0xc40, 0x1f7c403f}, + {0xc44, 0x000100b7}, {0xc48, 0xec020107}, + {0xc4c, 0x007f037f}, +#ifdef EXT_PA_8192EU + /* External PA or external LNA */ + {0xc50, 0x00340220}, +#else + {0xc50, 0x00340020}, +#endif + {0xc54, 0x0080801f}, +#ifdef EXT_PA_8192EU + /* External PA or external LNA */ + {0xc58, 0x00000220}, +#else + {0xc58, 0x00000020}, +#endif + {0xc5c, 0x00248492}, {0xc60, 0x00000000}, + {0xc64, 0x7112848b}, {0xc68, 0x47c00bff}, + {0xc6c, 0x00000036}, {0xc70, 0x00000600}, + {0xc74, 0x02013169}, {0xc78, 0x0000001f}, + {0xc7c, 0x00b91612}, +#ifdef EXT_PA_8192EU + /* External PA or external LNA */ + {0xc80, 0x2d4000b5}, +#else + {0xc80, 0x40000100}, +#endif + {0xc84, 0x21f60000}, +#ifdef EXT_PA_8192EU + /* External PA or external LNA */ + {0xc88, 0x2d4000b5}, +#else + {0xc88, 0x40000100}, +#endif + {0xc8c, 0xa0e40000}, {0xc90, 0x00121820}, + {0xc94, 0x00000000}, {0xc98, 0x00121820}, + {0xc9c, 0x00007f7f}, {0xca0, 0x00000000}, + {0xca4, 0x000300a0}, {0xca8, 0x00000000}, + {0xcac, 0x00000000}, {0xcb0, 0x00000000}, + {0xcb4, 0x00000000}, {0xcb8, 0x00000000}, + {0xcbc, 0x28000000}, {0xcc0, 0x00000000}, + {0xcc4, 0x00000000}, {0xcc8, 0x00000000}, + {0xccc, 0x00000000}, {0xcd0, 0x00000000}, + {0xcd4, 0x00000000}, {0xcd8, 0x64b22427}, + {0xcdc, 0x00766932}, {0xce0, 0x00222222}, + {0xce4, 0x00040000}, {0xce8, 0x77644302}, + {0xcec, 0x2f97d40c}, {0xd00, 0x00080740}, + {0xd04, 0x00020403}, {0xd08, 0x0000907f}, + {0xd0c, 0x20010201}, {0xd10, 0xa0633333}, + {0xd14, 0x3333bc43}, {0xd18, 0x7a8f5b6b}, + {0xd1c, 0x0000007f}, {0xd2c, 0xcc979975}, + {0xd30, 0x00000000}, {0xd34, 0x80608000}, + {0xd38, 0x00000000}, {0xd3c, 0x00127353}, + {0xd40, 0x00000000}, {0xd44, 0x00000000}, + {0xd48, 0x00000000}, {0xd4c, 0x00000000}, + {0xd50, 0x6437140a}, {0xd54, 0x00000000}, + {0xd58, 0x00000282}, {0xd5c, 0x30032064}, + {0xd60, 0x4653de68}, {0xd64, 0x04518a3c}, + {0xd68, 0x00002101}, {0xd6c, 0x2a201c16}, + {0xd70, 0x1812362e}, {0xd74, 0x322c2220}, + {0xd78, 0x000e3c24}, {0xd80, 0x01081008}, + {0xd84, 0x00000800}, {0xd88, 0xf0b50000}, + {0xe00, 0x30303030}, {0xe04, 0x30303030}, + {0xe08, 0x03903030}, {0xe10, 0x30303030}, + {0xe14, 0x30303030}, {0xe18, 0x30303030}, + {0xe1c, 0x30303030}, {0xe28, 0x00000000}, + {0xe30, 0x1000dc1f}, {0xe34, 0x10008c1f}, + {0xe38, 0x02140102}, {0xe3c, 0x681604c2}, + {0xe40, 0x01007c00}, {0xe44, 0x01004800}, + {0xe48, 0xfb000000}, {0xe4c, 0x000028d1}, + {0xe50, 0x1000dc1f}, {0xe54, 0x10008c1f}, + {0xe58, 0x02140102}, {0xe5c, 0x28160d05}, + {0xe60, 0x00000008}, {0xe68, 0x0fc05656}, + {0xe6c, 0x03c09696}, {0xe70, 0x03c09696}, + {0xe74, 0x0c005656}, {0xe78, 0x0c005656}, + {0xe7c, 0x0c005656}, {0xe80, 0x0c005656}, + {0xe84, 0x03c09696}, {0xe88, 0x0c005656}, + {0xe8c, 0x03c09696}, {0xed0, 0x03c09696}, + {0xed4, 0x03c09696}, {0xed8, 0x03c09696}, + {0xedc, 0x0000d6d6}, {0xee0, 0x0000d6d6}, + {0xeec, 0x0fc01616}, {0xee4, 0xb0000c1c}, + {0xee8, 0x00000001}, {0xf14, 0x00000003}, + {0xf4c, 0x00000000}, {0xf00, 0x00000300}, + {0xffff, 0xffffffff}, +}; + static struct rtl8xxxu_reg32val rtl8xxx_agc_standard_table[] = { {0xc78, 0x7b000001}, {0xc78, 0x7b010001}, {0xc78, 0x7b020001}, {0xc78, 0x7b030001}, @@ -3516,6 +3648,14 @@ static int rtl8xxxu_init_phy_bb(struct rtl8xxxu_priv *priv) rtl8xxxu_write8(priv, REG_SYS_FUNC, 0xe3); rtl8xxxu_write8(priv, REG_AFE_XTAL_CTRL + 1, 0x80); rtl8xxxu_init_phy_regs(priv, rtl8723b_phy_1t_init_table); + } else if (priv->rtl_chip == RTL8192E) { + val16 = rtl8xxxu_read16(priv, REG_SYS_FUNC); + val16 |= (SYS_FUNC_USBA | SYS_FUNC_USBD | SYS_FUNC_DIO_RF | + SYS_FUNC_BB_GLB_RSTN | SYS_FUNC_BBRSTB); + rtl8xxxu_write16(priv, REG_SYS_FUNC, val16); + val8 = RF_ENABLE | RF_RSTB | RF_SDMRSTB; + rtl8xxxu_write8(priv, REG_RF_CTRL, val8); + rtl8xxxu_init_phy_regs(priv, rtl8192eu_phy_init_table); } else rtl8xxxu_init_phy_regs(priv, rtl8723a_phy_1t_init_table); From abd71bdb94c3c4cd6bf4837ce568cb189fdb7f7a Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Thu, 7 Apr 2016 14:19:22 -0400 Subject: [PATCH 0630/1649] rtl8xxxu: Pick PHY init table based on chip version first Pick PHY init table based on device before distinguishing between 1T/2T/high PA tables. The latter is only currently used for 8188cu/8192cu/8188ru. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 48d2d56ec224..fcbb67936112 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -3637,11 +3637,7 @@ static int rtl8xxxu_init_phy_bb(struct rtl8xxxu_priv *priv) val8 = RF_ENABLE | RF_RSTB | RF_SDMRSTB; rtl8xxxu_write8(priv, REG_RF_CTRL, val8); - if (priv->hi_pa) - rtl8xxxu_init_phy_regs(priv, rtl8188ru_phy_1t_highpa_table); - else if (priv->tx_paths == 2) - rtl8xxxu_init_phy_regs(priv, rtl8192cu_phy_2t_init_table); - else if (priv->rtl_chip == RTL8723B) { + if (priv->rtl_chip == RTL8723B) { /* * Why? */ @@ -3656,10 +3652,13 @@ static int rtl8xxxu_init_phy_bb(struct rtl8xxxu_priv *priv) val8 = RF_ENABLE | RF_RSTB | RF_SDMRSTB; rtl8xxxu_write8(priv, REG_RF_CTRL, val8); rtl8xxxu_init_phy_regs(priv, rtl8192eu_phy_init_table); - } else + } else if (priv->hi_pa) + rtl8xxxu_init_phy_regs(priv, rtl8188ru_phy_1t_highpa_table); + else if (priv->tx_paths == 2) + rtl8xxxu_init_phy_regs(priv, rtl8192cu_phy_2t_init_table); + else rtl8xxxu_init_phy_regs(priv, rtl8723a_phy_1t_init_table); - if (priv->rtl_chip == RTL8188C && priv->hi_pa && priv->vendor_umc && priv->chip_cut == 1) rtl8xxxu_write8(priv, REG_OFDM0_AGC_PARM1 + 2, 0x50); From 9e24772ae222a867f98efc5502a5a01b3916db83 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Thu, 7 Apr 2016 14:19:23 -0400 Subject: [PATCH 0631/1649] rtl8xxxu: Correctly parse 8192eu efuse The 8192eu efuse only has power data for path A and B. It follows the same layout as 8723bu. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- .../net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 40 +++++++++++++++++++ .../net/wireless/realtek/rtl8xxxu/rtl8xxxu.h | 40 ++++++++----------- 2 files changed, 57 insertions(+), 23 deletions(-) diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index fcbb67936112..539f7b58bbf7 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -2122,6 +2122,9 @@ static int rtl8723a_channel_to_group(int channel) return group; } +/* + * Valid for rtl8723bu and rtl8192eu + */ static int rtl8723b_channel_to_group(int channel) { int group; @@ -2987,6 +2990,43 @@ static int rtl8192eu_parse_efuse(struct rtl8xxxu_priv *priv) ether_addr_copy(priv->mac_addr, efuse->mac_addr); + memcpy(priv->cck_tx_power_index_A, efuse->tx_power_index_A.cck_base, + sizeof(efuse->tx_power_index_A.cck_base)); + memcpy(priv->cck_tx_power_index_B, efuse->tx_power_index_B.cck_base, + sizeof(efuse->tx_power_index_B.cck_base)); + + memcpy(priv->ht40_1s_tx_power_index_A, + efuse->tx_power_index_A.ht40_base, + sizeof(efuse->tx_power_index_A.ht40_base)); + memcpy(priv->ht40_1s_tx_power_index_B, + efuse->tx_power_index_B.ht40_base, + sizeof(efuse->tx_power_index_B.ht40_base)); + + priv->ht20_tx_power_diff[0].a = + efuse->tx_power_index_A.ht20_ofdm_1s_diff.b; + priv->ht20_tx_power_diff[0].b = + efuse->tx_power_index_B.ht20_ofdm_1s_diff.b; + + priv->ht40_tx_power_diff[0].a = 0; + priv->ht40_tx_power_diff[0].b = 0; + + for (i = 1; i < RTL8723B_TX_COUNT; i++) { + priv->ofdm_tx_power_diff[i].a = + efuse->tx_power_index_A.pwr_diff[i - 1].ofdm; + priv->ofdm_tx_power_diff[i].b = + efuse->tx_power_index_B.pwr_diff[i - 1].ofdm; + + priv->ht20_tx_power_diff[i].a = + efuse->tx_power_index_A.pwr_diff[i - 1].ht20; + priv->ht20_tx_power_diff[i].b = + efuse->tx_power_index_B.pwr_diff[i - 1].ht20; + + priv->ht40_tx_power_diff[i].a = + efuse->tx_power_index_A.pwr_diff[i - 1].ht40; + priv->ht40_tx_power_diff[i].b = + efuse->tx_power_index_B.pwr_diff[i - 1].ht40; + } + priv->has_xtalk = 1; priv->xtalk = priv->efuse_wifi.efuse8192eu.xtal_k & 0x3f; diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h index 63d72689481d..48a80fa9eac2 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h @@ -788,55 +788,49 @@ struct rtl8192eu_efuse_tx_power { u8 cck_base[6]; u8 ht40_base[5]; struct rtl8723au_idx ht20_ofdm_1s_diff; - struct rtl8723au_idx ht40_ht20_2s_diff; - struct rtl8723au_idx ofdm_cck_2s_diff; /* not used */ - struct rtl8723au_idx ht40_ht20_3s_diff; - struct rtl8723au_idx ofdm_cck_3s_diff; /* not used */ - struct rtl8723au_idx ht40_ht20_4s_diff; - struct rtl8723au_idx ofdm_cck_4s_diff; /* not used */ + struct rtl8723bu_pwr_idx pwr_diff[3]; + u8 dummy5g[24]; /* max channel group (14) + power diff offset (10) */ }; struct rtl8192eu_efuse { __le16 rtl_id; u8 res0[0x0e]; struct rtl8192eu_efuse_tx_power tx_power_index_A; /* 0x10 */ - struct rtl8192eu_efuse_tx_power tx_power_index_B; /* 0x22 */ - struct rtl8192eu_efuse_tx_power tx_power_index_C; /* 0x34 */ - struct rtl8192eu_efuse_tx_power tx_power_index_D; /* 0x46 */ - u8 res1[0x60]; + struct rtl8192eu_efuse_tx_power tx_power_index_B; /* 0x3a */ + u8 res2[0x54]; u8 channel_plan; /* 0xb8 */ u8 xtal_k; u8 thermal_meter; u8 iqk_lck; u8 pa_type; /* 0xbc */ u8 lna_type_2g; /* 0xbd */ - u8 res2[1]; + u8 res3[1]; u8 lna_type_5g; /* 0xbf */ - u8 res13[1]; + u8 res4[1]; u8 rf_board_option; u8 rf_feature_option; u8 rf_bt_setting; u8 eeprom_version; u8 eeprom_customer_id; - u8 res3[3]; + u8 res5[3]; u8 rf_antenna_option; /* 0xc9 */ - u8 res4[6]; + u8 res6[6]; u8 vid; /* 0xd0 */ - u8 res5[1]; + u8 res7[1]; u8 pid; /* 0xd2 */ - u8 res6[1]; + u8 res8[1]; u8 usb_optional_function; - u8 res7[2]; - u8 mac_addr[ETH_ALEN]; /* 0xd7 */ - u8 res8[2]; - u8 vendor_name[7]; u8 res9[2]; - u8 device_name[0x0b]; /* 0xe8 */ + u8 mac_addr[ETH_ALEN]; /* 0xd7 */ u8 res10[2]; + u8 vendor_name[7]; + u8 res11[2]; + u8 device_name[0x0b]; /* 0xe8 */ + u8 res12[2]; u8 serial[0x0b]; /* 0xf5 */ - u8 res11[0x30]; + u8 res13[0x30]; u8 unknown[0x0d]; /* 0x130 */ - u8 res12[0xc3]; + u8 res14[0xc3]; }; struct rtl8xxxu_reg8val { From 444004bd134990456329267ac6365e71d73aeb85 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Thu, 7 Apr 2016 14:19:24 -0400 Subject: [PATCH 0632/1649] rtl8xxxu: Handle BB init for 8192eu The 8192eu does not use REG_AFE_PLL_CTRL in it's BB init sequence, so provide device specific handling. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 539f7b58bbf7..dc67f5b2dbae 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -3649,6 +3649,11 @@ static int rtl8xxxu_init_phy_bb(struct rtl8xxxu_priv *priv) rtl8xxxu_write16(priv, REG_SYS_FUNC, val16); rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, 0x00); + } else if (priv->rtl_chip == RTL8192E) { + val16 = rtl8xxxu_read16(priv, REG_SYS_FUNC); + val16 |= SYS_FUNC_BB_GLB_RSTN | SYS_FUNC_BBRSTB | + SYS_FUNC_DIO_RF; + rtl8xxxu_write16(priv, REG_SYS_FUNC, val16); } else { val8 = rtl8xxxu_read8(priv, REG_AFE_PLL_CTRL); udelay(2); From 2949b9ee77b819b90d23096ef44744244283e630 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Thu, 7 Apr 2016 14:19:25 -0400 Subject: [PATCH 0633/1649] rtl8xxxu: Provide special handling when writing RF regs on 8192eu The 8192eu requires clearing/restoring bit 17 in REG_FPGA0_POWER_SAVE before/after writing RF registers. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index dc67f5b2dbae..ad5b3d3f7084 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -1896,7 +1896,7 @@ static int rtl8xxxu_write_rfreg(struct rtl8xxxu_priv *priv, enum rtl8xxxu_rfpath path, u8 reg, u32 data) { int ret, retval; - u32 dataaddr; + u32 dataaddr, val32; if (rtl8xxxu_debug & RTL8XXXU_DEBUG_RFREG_WRITE) dev_info(&priv->udev->dev, "%s(%02x) = 0x%06x\n", @@ -1905,6 +1905,12 @@ static int rtl8xxxu_write_rfreg(struct rtl8xxxu_priv *priv, data &= FPGA0_LSSI_PARM_DATA_MASK; dataaddr = (reg << FPGA0_LSSI_PARM_ADDR_SHIFT) | data; + if (priv->rtl_chip == RTL8192E) { + val32 = rtl8xxxu_read32(priv, REG_FPGA0_POWER_SAVE); + val32 &= ~0x20000; + rtl8xxxu_write32(priv, REG_FPGA0_POWER_SAVE, val32); + } + /* Use XB for path B */ ret = rtl8xxxu_write32(priv, rtl8xxxu_rfregs[path].lssiparm, dataaddr); if (ret != sizeof(dataaddr)) @@ -1914,6 +1920,12 @@ static int rtl8xxxu_write_rfreg(struct rtl8xxxu_priv *priv, udelay(1); + if (priv->rtl_chip == RTL8192E) { + val32 = rtl8xxxu_read32(priv, REG_FPGA0_POWER_SAVE); + val32 |= 0x20000; + rtl8xxxu_write32(priv, REG_FPGA0_POWER_SAVE, val32); + } + return retval; } From 8a59485c8ee911e77ace1622520976321aaf3820 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Thu, 7 Apr 2016 14:19:26 -0400 Subject: [PATCH 0634/1649] rtl8xxxu: Handle XTAL value setting on 8192eu Set REG_AFE_XTAL_CTRL on 8192eu to the vendor driver value, and do not skip setting REG_MAX_AGGR_NUM on 8192eu. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index ad5b3d3f7084..e77019256c61 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -3608,7 +3608,7 @@ rtl8xxxu_init_mac(struct rtl8xxxu_priv *priv) } } - if (priv->rtl_chip != RTL8723B) + if (priv->rtl_chip != RTL8723B && priv->rtl_chip != RTL8192E) rtl8xxxu_write8(priv, REG_MAX_AGGR_NUM, 0x0a); return 0; @@ -3813,6 +3813,9 @@ static int rtl8xxxu_init_phy_bb(struct rtl8xxxu_priv *priv) rtl8xxxu_write32(priv, REG_LDOA15_CTRL, val32); } + if (priv->rtl_chip == RTL8192E) + rtl8xxxu_write32(priv, REG_AFE_XTAL_CTRL, 0x000f81fb); + return 0; } From 57e5e2e650fb7ad1fe32e1c5e5b1bd01faa238fc Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Thu, 7 Apr 2016 14:19:27 -0400 Subject: [PATCH 0635/1649] rtl8xxxu: Set correct interrupt masking registers on 8192eu Set HIMR[01] on 8192eu instead of HISR/HIMR. It's not obvious this really matters for USB devices, but this matches the register writes performed by the vendor driver. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- .../net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 20 +++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index e77019256c61..bcf229d2055b 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -6883,11 +6883,6 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw) rtl8xxxu_write8(priv, 0xfe42, 0x80); } - if (priv->rtl_chip == RTL8192E) { - rtl8xxxu_write32(priv, REG_HIMR0, 0x00); - rtl8xxxu_write32(priv, REG_HIMR1, 0x00); - } - if (priv->fops->phy_init_antenna_selection) priv->fops->phy_init_antenna_selection(priv); @@ -7053,11 +7048,16 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw) */ rtl8xxxu_write8(priv, REG_RX_DRVINFO_SZ, 4); - /* - * Enable all interrupts - not obvious USB needs to do this - */ - rtl8xxxu_write32(priv, REG_HISR, 0xffffffff); - rtl8xxxu_write32(priv, REG_HIMR, 0xffffffff); + if (priv->rtl_chip == RTL8192E) { + rtl8xxxu_write32(priv, REG_HIMR0, 0x00); + rtl8xxxu_write32(priv, REG_HIMR1, 0x00); + } else { + /* + * Enable all interrupts - not obvious USB needs to do this + */ + rtl8xxxu_write32(priv, REG_HISR, 0xffffffff); + rtl8xxxu_write32(priv, REG_HIMR, 0xffffffff); + } rtl8xxxu_set_mac(priv); rtl8xxxu_set_linktype(priv, NL80211_IFTYPE_STATION); From 3021e51f2b2f62a2fa6f49c2d14d103b5a8c331c Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Thu, 7 Apr 2016 14:19:28 -0400 Subject: [PATCH 0636/1649] rtl8xxxu: Set REG_USB_HRPWM for 8192eu The vendor driver set register 0xfe58 REG_USB_HWPWM in it's init sequence for 8192eu. Do the same here. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 2 ++ drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h | 1 + 2 files changed, 3 insertions(+) diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index bcf229d2055b..fdd11ece3806 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -7225,6 +7225,8 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw) val32 |= FPGA_RF_MODE_CCK; rtl8xxxu_write32(priv, REG_FPGA0_RF_MODE, val32); } + } else if (priv->rtl_chip == RTL8192E) { + rtl8xxxu_write8(priv, REG_USB_HRPWM, 0x00); } val32 = rtl8xxxu_read32(priv, REG_FWHW_TXQ_CTRL); diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h index ade42fe7e742..82cbe856694a 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h @@ -1043,6 +1043,7 @@ #define USB_HIMR_ROK BIT(0) /* Receive DMA OK Interrupt */ #define REG_USB_SPECIAL_OPTION 0xfe55 +#define REG_USB_HRPWM 0xfe58 #define REG_USB_DMA_AGG_TO 0xfe5b #define REG_USB_AGG_TO 0xfe5c #define REG_USB_AGG_TH 0xfe5d From e1394fe5f98638ac0231063245614bb20e94e57f Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Thu, 7 Apr 2016 14:19:29 -0400 Subject: [PATCH 0637/1649] rtl8xxxu: Fix LDPC RX hang issue on 8192eu Implement workaround for LDPC RX hands on 8192eu. This was inspired by workaround found in the 8192eu vendor driver. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index fdd11ece3806..ed266f081fd0 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -7234,6 +7234,16 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw) /* ack for xmit mgmt frames. */ rtl8xxxu_write32(priv, REG_FWHW_TXQ_CTRL, val32); + if (priv->rtl_chip == RTL8192E) { + /* + * Fix LDPC rx hang issue. + */ + val32 = rtl8xxxu_read32(priv, REG_AFE_MISC); + rtl8xxxu_write8(priv, REG_8192E_LDOV12_CTRL, 0x75); + val32 &= 0xfff00fff; + val32 |= 0x0007e000; + rtl8xxxu_write32(priv, REG_8192E_LDOV12_CTRL, val32); + } exit: return ret; } From b052b7fc7d25f7c783ffcd87c42b767f6c166724 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Thu, 7 Apr 2016 14:19:30 -0400 Subject: [PATCH 0638/1649] rtl8xxxu: Implement 8192eu device specific quirks Set REG_QUEUE_CTRL and REG_ACLK_MON for 8192eu. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 14 ++++++++++++++ .../net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h | 1 + 2 files changed, 15 insertions(+) diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index ed266f081fd0..3e96ba25bf91 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -7197,6 +7197,20 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw) if (priv->fops->init_statistics) priv->fops->init_statistics(priv); + if (priv->rtl_chip == RTL8192E) { + /* + * 0x4c6[3] 1: RTS BW = Data BW + * 0: RTS BW depends on CCA / secondary CCA result. + */ + val8 = rtl8xxxu_read8(priv, REG_QUEUE_CTRL); + val8 &= ~BIT(3); + rtl8xxxu_write8(priv, REG_QUEUE_CTRL, val8); + /* + * Reset USB mode switch setting + */ + rtl8xxxu_write8(priv, REG_ACLK_MON, 0x00); + } + rtl8723a_phy_lc_calibrate(priv); priv->fops->phy_iq_calibrate(priv); diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h index 82cbe856694a..c19f234f1934 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h @@ -512,6 +512,7 @@ #define REG_PKT_VO_VI_LIFE_TIME 0x04c0 #define REG_PKT_BE_BK_LIFE_TIME 0x04c2 #define REG_STBC_SETTING 0x04c4 +#define REG_QUEUE_CTRL 0x04c6 #define REG_HT_SINGLE_AMPDU_8723B 0x04c7 #define REG_PROT_MODE_CTRL 0x04c8 #define REG_MAX_AGGR_NUM 0x04ca From 70bc1e24d9e2df3002d714348ca600db83de4c64 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Thu, 7 Apr 2016 14:19:31 -0400 Subject: [PATCH 0639/1649] rtl8xxxu: Use proper register name for REG_PAD_CTRL1 Fixup another case where the hard coded register value was used instead of the name. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 3e96ba25bf91..cd536c0a2387 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -3550,9 +3550,9 @@ static void rtl8723bu_phy_init_antenna_selection(struct rtl8xxxu_priv *priv) { u32 val32; - val32 = rtl8xxxu_read32(priv, 0x64); + val32 = rtl8xxxu_read32(priv, REG_PAD_CTRL1); val32 &= ~(BIT(20) | BIT(24)); - rtl8xxxu_write32(priv, 0x64, val32); + rtl8xxxu_write32(priv, REG_PAD_CTRL1, val32); val32 = rtl8xxxu_read32(priv, REG_GPIO_MUXCFG); val32 &= ~BIT(4); From f991f4e9147b1b7c9546c08f80d8d8f0aa53df2e Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Thu, 7 Apr 2016 14:19:32 -0400 Subject: [PATCH 0640/1649] rtl8xxxu: Implement IQK calibration for 8192eu 8192eu has it's own IQK calibration procedure, and notably uses undocumented RF register 0x56 in the process. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- .../net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 608 +++++++++++++++++- .../wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h | 1 + 2 files changed, 608 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index cd536c0a2387..c08bbcbe4a0d 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -4955,6 +4955,334 @@ out: } #endif +static int rtl8192eu_iqk_path_a(struct rtl8xxxu_priv *priv) +{ + u32 reg_eac, reg_e94, reg_e9c; + int result = 0; + + /* + * TX IQK + * PA/PAD controlled by 0x0 + */ + rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x00000000); + rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_DF, 0x00180); + rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x80800000); + + /* Path A IQK setting */ + rtl8xxxu_write32(priv, REG_TX_IQK_TONE_A, 0x18008c1c); + rtl8xxxu_write32(priv, REG_RX_IQK_TONE_A, 0x38008c1c); + rtl8xxxu_write32(priv, REG_TX_IQK_TONE_B, 0x38008c1c); + rtl8xxxu_write32(priv, REG_RX_IQK_TONE_B, 0x38008c1c); + + rtl8xxxu_write32(priv, REG_TX_IQK_PI_A, 0x82140303); + rtl8xxxu_write32(priv, REG_RX_IQK_PI_A, 0x68160000); + + /* LO calibration setting */ + rtl8xxxu_write32(priv, REG_IQK_AGC_RSP, 0x00462911); + + /* One shot, path A LOK & IQK */ + rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf9000000); + rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf8000000); + + mdelay(10); + + /* Check failed */ + reg_eac = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_A_2); + reg_e94 = rtl8xxxu_read32(priv, REG_TX_POWER_BEFORE_IQK_A); + reg_e9c = rtl8xxxu_read32(priv, REG_TX_POWER_AFTER_IQK_A); + + if (!(reg_eac & BIT(28)) && + ((reg_e94 & 0x03ff0000) != 0x01420000) && + ((reg_e9c & 0x03ff0000) != 0x00420000)) + result |= 0x01; + + return result; +} + +static int rtl8192eu_rx_iqk_path_a(struct rtl8xxxu_priv *priv) +{ + u32 reg_ea4, reg_eac, reg_e94, reg_e9c, val32; + int result = 0; + + /* Leave IQK mode */ + rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x00); + + /* Enable path A PA in TX IQK mode */ + rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_WE_LUT, 0x800a0); + rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_RCK_OS, 0x30000); + rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G1, 0x0000f); + rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G2, 0xf117b); + + /* PA/PAD control by 0x56, and set = 0x0 */ + rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_DF, 0x00980); + rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_56, 0x51000); + + /* Enter IQK mode */ + rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x80800000); + + /* TX IQK setting */ + rtl8xxxu_write32(priv, REG_TX_IQK, 0x01007c00); + rtl8xxxu_write32(priv, REG_RX_IQK, 0x01004800); + + /* path-A IQK setting */ + rtl8xxxu_write32(priv, REG_TX_IQK_TONE_A, 0x18008c1c); + rtl8xxxu_write32(priv, REG_RX_IQK_TONE_A, 0x38008c1c); + rtl8xxxu_write32(priv, REG_TX_IQK_TONE_B, 0x38008c1c); + rtl8xxxu_write32(priv, REG_RX_IQK_TONE_B, 0x38008c1c); + + rtl8xxxu_write32(priv, REG_TX_IQK_PI_A, 0x82160c1f); + rtl8xxxu_write32(priv, REG_RX_IQK_PI_A, 0x68160c1f); + + /* LO calibration setting */ + rtl8xxxu_write32(priv, REG_IQK_AGC_RSP, 0x0046a911); + + /* One shot, path A LOK & IQK */ + rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xfa000000); + rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf8000000); + + mdelay(10); + + /* Check failed */ + reg_eac = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_A_2); + reg_e94 = rtl8xxxu_read32(priv, REG_TX_POWER_BEFORE_IQK_A); + reg_e9c = rtl8xxxu_read32(priv, REG_TX_POWER_AFTER_IQK_A); + + if (!(reg_eac & BIT(28)) && + ((reg_e94 & 0x03ff0000) != 0x01420000) && + ((reg_e9c & 0x03ff0000) != 0x00420000)) { + result |= 0x01; + } else { + /* PA/PAD controlled by 0x0 */ + rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x00000000); + rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_DF, 0x180); + goto out; + } + + val32 = 0x80007c00 | + (reg_e94 & 0x03ff0000) | ((reg_e9c >> 16) & 0x03ff); + rtl8xxxu_write32(priv, REG_TX_IQK, val32); + + /* Modify RX IQK mode table */ + rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x00000000); + + rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_WE_LUT, 0x800a0); + rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_RCK_OS, 0x30000); + rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G1, 0x0000f); + rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G2, 0xf7ffa); + + /* PA/PAD control by 0x56, and set = 0x0 */ + rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_DF, 0x00980); + rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_56, 0x51000); + + /* Enter IQK mode */ + rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x80800000); + + /* IQK setting */ + rtl8xxxu_write32(priv, REG_RX_IQK, 0x01004800); + + /* Path A IQK setting */ + rtl8xxxu_write32(priv, REG_TX_IQK_TONE_A, 0x38008c1c); + rtl8xxxu_write32(priv, REG_RX_IQK_TONE_A, 0x18008c1c); + rtl8xxxu_write32(priv, REG_TX_IQK_TONE_B, 0x38008c1c); + rtl8xxxu_write32(priv, REG_RX_IQK_TONE_B, 0x38008c1c); + + rtl8xxxu_write32(priv, REG_TX_IQK_PI_A, 0x82160c1f); + rtl8xxxu_write32(priv, REG_RX_IQK_PI_A, 0x28160c1f); + + /* LO calibration setting */ + rtl8xxxu_write32(priv, REG_IQK_AGC_RSP, 0x0046a891); + + /* One shot, path A LOK & IQK */ + rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xfa000000); + rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf8000000); + + mdelay(10); + + reg_eac = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_A_2); + reg_ea4 = rtl8xxxu_read32(priv, REG_RX_POWER_BEFORE_IQK_A_2); + + rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x00000000); + rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_DF, 0x180); + + if (!(reg_eac & BIT(27)) && + ((reg_ea4 & 0x03ff0000) != 0x01320000) && + ((reg_eac & 0x03ff0000) != 0x00360000)) + result |= 0x02; + else + dev_warn(&priv->udev->dev, "%s: Path A RX IQK failed!\n", + __func__); + +out: + return result; +} + +static int rtl8192eu_iqk_path_b(struct rtl8xxxu_priv *priv) +{ + u32 reg_eac, reg_eb4, reg_ebc; + int result = 0; + + rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x00000000); + rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_UNKNOWN_DF, 0x00180); + rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x80800000); + + rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x00000000); + rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x80800000); + + /* Path B IQK setting */ + rtl8xxxu_write32(priv, REG_TX_IQK_TONE_A, 0x38008c1c); + rtl8xxxu_write32(priv, REG_RX_IQK_TONE_A, 0x38008c1c); + rtl8xxxu_write32(priv, REG_TX_IQK_TONE_B, 0x18008c1c); + rtl8xxxu_write32(priv, REG_RX_IQK_TONE_B, 0x38008c1c); + + rtl8xxxu_write32(priv, REG_TX_IQK_PI_B, 0x821403e2); + rtl8xxxu_write32(priv, REG_RX_IQK_PI_B, 0x68160000); + + /* LO calibration setting */ + rtl8xxxu_write32(priv, REG_IQK_AGC_RSP, 0x00492911); + + /* One shot, path A LOK & IQK */ + rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xfa000000); + rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf8000000); + + mdelay(1); + + /* Check failed */ + reg_eac = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_A_2); + reg_eb4 = rtl8xxxu_read32(priv, REG_TX_POWER_BEFORE_IQK_B); + reg_ebc = rtl8xxxu_read32(priv, REG_TX_POWER_AFTER_IQK_B); + + if (!(reg_eac & BIT(31)) && + ((reg_eb4 & 0x03ff0000) != 0x01420000) && + ((reg_ebc & 0x03ff0000) != 0x00420000)) + result |= 0x01; + else + dev_warn(&priv->udev->dev, "%s: Path B IQK failed!\n", + __func__); + + return result; +} + +static int rtl8192eu_rx_iqk_path_b(struct rtl8xxxu_priv *priv) +{ + u32 reg_eac, reg_eb4, reg_ebc, reg_ec4, reg_ecc, val32; + int result = 0; + + /* Leave IQK mode */ + rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x00000000); + + /* Enable path A PA in TX IQK mode */ + rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_WE_LUT, 0x800a0); + rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_RCK_OS, 0x30000); + rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_TXPA_G1, 0x0000f); + rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_TXPA_G2, 0xf117b); + + /* PA/PAD control by 0x56, and set = 0x0 */ + rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_UNKNOWN_DF, 0x00980); + rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_UNKNOWN_56, 0x51000); + + /* Enter IQK mode */ + rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x80800000); + + /* TX IQK setting */ + rtl8xxxu_write32(priv, REG_TX_IQK, 0x01007c00); + rtl8xxxu_write32(priv, REG_RX_IQK, 0x01004800); + + /* path-A IQK setting */ + rtl8xxxu_write32(priv, REG_TX_IQK_TONE_A, 0x38008c1c); + rtl8xxxu_write32(priv, REG_RX_IQK_TONE_A, 0x38008c1c); + rtl8xxxu_write32(priv, REG_TX_IQK_TONE_B, 0x18008c1c); + rtl8xxxu_write32(priv, REG_RX_IQK_TONE_B, 0x38008c1c); + + rtl8xxxu_write32(priv, REG_TX_IQK_PI_B, 0x82160c1f); + rtl8xxxu_write32(priv, REG_RX_IQK_PI_B, 0x68160c1f); + + /* LO calibration setting */ + rtl8xxxu_write32(priv, REG_IQK_AGC_RSP, 0x0046a911); + + /* One shot, path A LOK & IQK */ + rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xfa000000); + rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf8000000); + + mdelay(10); + + /* Check failed */ + reg_eac = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_A_2); + reg_eb4 = rtl8xxxu_read32(priv, REG_TX_POWER_BEFORE_IQK_B); + reg_ebc = rtl8xxxu_read32(priv, REG_TX_POWER_AFTER_IQK_B); + + if (!(reg_eac & BIT(31)) && + ((reg_eb4 & 0x03ff0000) != 0x01420000) && + ((reg_ebc & 0x03ff0000) != 0x00420000)) { + result |= 0x01; + } else { + /* + * PA/PAD controlled by 0x0 + * Vendor driver restores RF_A here which I believe is a bug + */ + rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x00000000); + rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_UNKNOWN_DF, 0x180); + goto out; + } + + val32 = 0x80007c00 | + (reg_eb4 & 0x03ff0000) | ((reg_ebc >> 16) & 0x03ff); + rtl8xxxu_write32(priv, REG_TX_IQK, val32); + + /* Modify RX IQK mode table */ + rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x00000000); + + rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_WE_LUT, 0x800a0); + rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_RCK_OS, 0x30000); + rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_TXPA_G1, 0x0000f); + rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_TXPA_G2, 0xf7ffa); + + /* PA/PAD control by 0x56, and set = 0x0 */ + rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_UNKNOWN_DF, 0x00980); + rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_UNKNOWN_56, 0x51000); + + /* Enter IQK mode */ + rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x80800000); + + /* IQK setting */ + rtl8xxxu_write32(priv, REG_RX_IQK, 0x01004800); + + /* Path A IQK setting */ + rtl8xxxu_write32(priv, REG_TX_IQK_TONE_A, 0x38008c1c); + rtl8xxxu_write32(priv, REG_RX_IQK_TONE_A, 0x38008c1c); + rtl8xxxu_write32(priv, REG_TX_IQK_TONE_B, 0x38008c1c); + rtl8xxxu_write32(priv, REG_RX_IQK_TONE_B, 0x18008c1c); + + rtl8xxxu_write32(priv, REG_TX_IQK_PI_A, 0x82160c1f); + rtl8xxxu_write32(priv, REG_RX_IQK_PI_A, 0x28160c1f); + + /* LO calibration setting */ + rtl8xxxu_write32(priv, REG_IQK_AGC_RSP, 0x0046a891); + + /* One shot, path A LOK & IQK */ + rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xfa000000); + rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf8000000); + + mdelay(10); + + reg_eac = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_A_2); + reg_ec4 = rtl8xxxu_read32(priv, REG_RX_POWER_BEFORE_IQK_B_2); + reg_ecc = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_B_2); + + rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x00000000); + rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_UNKNOWN_DF, 0x180); + + if (!(reg_eac & BIT(30)) && + ((reg_ec4 & 0x03ff0000) != 0x01320000) && + ((reg_ecc & 0x03ff0000) != 0x00360000)) + result |= 0x02; + else + dev_warn(&priv->udev->dev, "%s: Path B RX IQK failed!\n", + __func__); + +out: + return result; +} + static void rtl8xxxu_phy_iqcalibrate(struct rtl8xxxu_priv *priv, int result[][8], int t) { @@ -5385,6 +5713,194 @@ static void rtl8723bu_phy_iqcalibrate(struct rtl8xxxu_priv *priv, } } +static void rtl8192eu_phy_iqcalibrate(struct rtl8xxxu_priv *priv, + int result[][8], int t) +{ + struct device *dev = &priv->udev->dev; + u32 i, val32; + int path_a_ok, path_b_ok; + int retry = 2; + const u32 adda_regs[RTL8XXXU_ADDA_REGS] = { + REG_FPGA0_XCD_SWITCH_CTRL, REG_BLUETOOTH, + REG_RX_WAIT_CCA, REG_TX_CCK_RFON, + REG_TX_CCK_BBON, REG_TX_OFDM_RFON, + REG_TX_OFDM_BBON, REG_TX_TO_RX, + REG_TX_TO_TX, REG_RX_CCK, + REG_RX_OFDM, REG_RX_WAIT_RIFS, + REG_RX_TO_RX, REG_STANDBY, + REG_SLEEP, REG_PMPD_ANAEN + }; + const u32 iqk_mac_regs[RTL8XXXU_MAC_REGS] = { + REG_TXPAUSE, REG_BEACON_CTRL, + REG_BEACON_CTRL_1, REG_GPIO_MUXCFG + }; + const u32 iqk_bb_regs[RTL8XXXU_BB_REGS] = { + REG_OFDM0_TRX_PATH_ENABLE, REG_OFDM0_TR_MUX_PAR, + REG_FPGA0_XCD_RF_SW_CTRL, REG_CONFIG_ANT_A, REG_CONFIG_ANT_B, + REG_FPGA0_XAB_RF_SW_CTRL, REG_FPGA0_XA_RF_INT_OE, + REG_FPGA0_XB_RF_INT_OE, REG_CCK0_AFE_SETTING + }; + u8 xa_agc = rtl8xxxu_read32(priv, REG_OFDM0_XA_AGC_CORE1) & 0xff; + u8 xb_agc = rtl8xxxu_read32(priv, REG_OFDM0_XB_AGC_CORE1) & 0xff; + + /* + * Note: IQ calibration must be performed after loading + * PHY_REG.txt , and radio_a, radio_b.txt + */ + + if (t == 0) { + /* Save ADDA parameters, turn Path A ADDA on */ + rtl8xxxu_save_regs(priv, adda_regs, priv->adda_backup, + RTL8XXXU_ADDA_REGS); + rtl8xxxu_save_mac_regs(priv, iqk_mac_regs, priv->mac_backup); + rtl8xxxu_save_regs(priv, iqk_bb_regs, + priv->bb_backup, RTL8XXXU_BB_REGS); + } + + rtl8xxxu_path_adda_on(priv, adda_regs, true); + + /* MAC settings */ + rtl8xxxu_mac_calibration(priv, iqk_mac_regs, priv->mac_backup); + + val32 = rtl8xxxu_read32(priv, REG_CCK0_AFE_SETTING); + val32 |= 0x0f000000; + rtl8xxxu_write32(priv, REG_CCK0_AFE_SETTING, val32); + + rtl8xxxu_write32(priv, REG_OFDM0_TRX_PATH_ENABLE, 0x03a05600); + rtl8xxxu_write32(priv, REG_OFDM0_TR_MUX_PAR, 0x000800e4); + rtl8xxxu_write32(priv, REG_FPGA0_XCD_RF_SW_CTRL, 0x22208200); + + val32 = rtl8xxxu_read32(priv, REG_FPGA0_XAB_RF_SW_CTRL); + val32 |= (FPGA0_RF_PAPE | (FPGA0_RF_PAPE << FPGA0_RF_BD_CTRL_SHIFT)); + rtl8xxxu_write32(priv, REG_FPGA0_XAB_RF_SW_CTRL, val32); + + val32 = rtl8xxxu_read32(priv, REG_FPGA0_XA_RF_INT_OE); + val32 |= BIT(10); + rtl8xxxu_write32(priv, REG_FPGA0_XA_RF_INT_OE, val32); + val32 = rtl8xxxu_read32(priv, REG_FPGA0_XB_RF_INT_OE); + val32 |= BIT(10); + rtl8xxxu_write32(priv, REG_FPGA0_XB_RF_INT_OE, val32); + + rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x80800000); + rtl8xxxu_write32(priv, REG_TX_IQK, 0x01007c00); + rtl8xxxu_write32(priv, REG_RX_IQK, 0x01004800); + + for (i = 0; i < retry; i++) { + path_a_ok = rtl8192eu_iqk_path_a(priv); + if (path_a_ok == 0x01) { + val32 = rtl8xxxu_read32(priv, + REG_TX_POWER_BEFORE_IQK_A); + result[t][0] = (val32 >> 16) & 0x3ff; + val32 = rtl8xxxu_read32(priv, + REG_TX_POWER_AFTER_IQK_A); + result[t][1] = (val32 >> 16) & 0x3ff; + + break; + } + } + + if (!path_a_ok) + dev_dbg(dev, "%s: Path A TX IQK failed!\n", __func__); + + for (i = 0; i < retry; i++) { + path_a_ok = rtl8192eu_rx_iqk_path_a(priv); + if (path_a_ok == 0x03) { + val32 = rtl8xxxu_read32(priv, + REG_RX_POWER_BEFORE_IQK_A_2); + result[t][2] = (val32 >> 16) & 0x3ff; + val32 = rtl8xxxu_read32(priv, + REG_RX_POWER_AFTER_IQK_A_2); + result[t][3] = (val32 >> 16) & 0x3ff; + + break; + } + } + + if (!path_a_ok) + dev_dbg(dev, "%s: Path A RX IQK failed!\n", __func__); + + if (priv->rf_paths > 1) { + dev_warn(dev, "%s: Path B ongoing\n", __func__); + + /* Path A into standby */ + rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x00000000); + rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_AC, 0x10000); + rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x80800000); + + /* Turn Path B ADDA on */ + rtl8xxxu_path_adda_on(priv, adda_regs, false); + + rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x80800000); + rtl8xxxu_write32(priv, REG_TX_IQK, 0x01007c00); + rtl8xxxu_write32(priv, REG_RX_IQK, 0x01004800); + + for (i = 0; i < retry; i++) { + path_b_ok = rtl8192eu_iqk_path_b(priv); + if (path_b_ok == 0x01) { + val32 = rtl8xxxu_read32(priv, REG_TX_POWER_BEFORE_IQK_B); + result[t][4] = (val32 >> 16) & 0x3ff; + val32 = rtl8xxxu_read32(priv, REG_TX_POWER_AFTER_IQK_B); + result[t][5] = (val32 >> 16) & 0x3ff; + break; + } + } + + if (!path_b_ok) + dev_dbg(dev, "%s: Path B IQK failed!\n", __func__); + + for (i = 0; i < retry; i++) { + path_b_ok = rtl8192eu_rx_iqk_path_b(priv); + if (path_a_ok == 0x03) { + val32 = rtl8xxxu_read32(priv, + REG_RX_POWER_BEFORE_IQK_B_2); + result[t][6] = (val32 >> 16) & 0x3ff; + val32 = rtl8xxxu_read32(priv, + REG_RX_POWER_AFTER_IQK_B_2); + result[t][7] = (val32 >> 16) & 0x3ff; + break; + } + } + + if (!path_b_ok) + dev_dbg(dev, "%s: Path B RX IQK failed!\n", __func__); + } + + /* Back to BB mode, load original value */ + rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x00000000); + + if (t) { + /* Reload ADDA power saving parameters */ + rtl8xxxu_restore_regs(priv, adda_regs, priv->adda_backup, + RTL8XXXU_ADDA_REGS); + + /* Reload MAC parameters */ + rtl8xxxu_restore_mac_regs(priv, iqk_mac_regs, priv->mac_backup); + + /* Reload BB parameters */ + rtl8xxxu_restore_regs(priv, iqk_bb_regs, + priv->bb_backup, RTL8XXXU_BB_REGS); + + /* Restore RX initial gain */ + val32 = rtl8xxxu_read32(priv, REG_OFDM0_XA_AGC_CORE1); + val32 &= 0xffffff00; + rtl8xxxu_write32(priv, REG_OFDM0_XA_AGC_CORE1, val32 | 0x50); + rtl8xxxu_write32(priv, REG_OFDM0_XA_AGC_CORE1, val32 | xa_agc); + + if (priv->rf_paths > 1) { + val32 = rtl8xxxu_read32(priv, REG_OFDM0_XB_AGC_CORE1); + val32 &= 0xffffff00; + rtl8xxxu_write32(priv, REG_OFDM0_XB_AGC_CORE1, + val32 | 0x50); + rtl8xxxu_write32(priv, REG_OFDM0_XB_AGC_CORE1, + val32 | xb_agc); + } + + /* Load 0xe30 IQC default value */ + rtl8xxxu_write32(priv, REG_TX_IQK_TONE_A, 0x01008c00); + rtl8xxxu_write32(priv, REG_RX_IQK_TONE_A, 0x01008c00); + } +} + static void rtl8xxxu_prepare_calibrate(struct rtl8xxxu_priv *priv, u8 start) { struct h2c_cmd h2c; @@ -5630,6 +6146,96 @@ static void rtl8723bu_phy_iq_calibrate(struct rtl8xxxu_priv *priv) rtl8xxxu_prepare_calibrate(priv, 0); } +static void rtl8192eu_phy_iq_calibrate(struct rtl8xxxu_priv *priv) +{ + struct device *dev = &priv->udev->dev; + int result[4][8]; /* last is final result */ + int i, candidate; + bool path_a_ok, path_b_ok; + u32 reg_e94, reg_e9c, reg_ea4, reg_eac; + u32 reg_eb4, reg_ebc, reg_ec4, reg_ecc; + bool simu; + + memset(result, 0, sizeof(result)); + candidate = -1; + + path_a_ok = false; + path_b_ok = false; + + for (i = 0; i < 3; i++) { + rtl8192eu_phy_iqcalibrate(priv, result, i); + + if (i == 1) { + simu = rtl8723bu_simularity_compare(priv, result, 0, 1); + if (simu) { + candidate = 0; + break; + } + } + + if (i == 2) { + simu = rtl8723bu_simularity_compare(priv, result, 0, 2); + if (simu) { + candidate = 0; + break; + } + + simu = rtl8723bu_simularity_compare(priv, result, 1, 2); + if (simu) + candidate = 1; + else + candidate = 3; + } + } + + for (i = 0; i < 4; i++) { + reg_e94 = result[i][0]; + reg_e9c = result[i][1]; + reg_ea4 = result[i][2]; + reg_eac = result[i][3]; + reg_eb4 = result[i][4]; + reg_ebc = result[i][5]; + reg_ec4 = result[i][6]; + reg_ecc = result[i][7]; + } + + if (candidate >= 0) { + reg_e94 = result[candidate][0]; + priv->rege94 = reg_e94; + reg_e9c = result[candidate][1]; + priv->rege9c = reg_e9c; + reg_ea4 = result[candidate][2]; + reg_eac = result[candidate][3]; + reg_eb4 = result[candidate][4]; + priv->regeb4 = reg_eb4; + reg_ebc = result[candidate][5]; + priv->regebc = reg_ebc; + reg_ec4 = result[candidate][6]; + reg_ecc = result[candidate][7]; + dev_dbg(dev, "%s: candidate is %x\n", __func__, candidate); + dev_dbg(dev, + "%s: e94 =%x e9c=%x ea4=%x eac=%x eb4=%x ebc=%x ec4=%x " + "ecc=%x\n ", __func__, reg_e94, reg_e9c, + reg_ea4, reg_eac, reg_eb4, reg_ebc, reg_ec4, reg_ecc); + path_a_ok = true; + path_b_ok = true; + } else { + reg_e94 = reg_eb4 = priv->rege94 = priv->regeb4 = 0x100; + reg_e9c = reg_ebc = priv->rege9c = priv->regebc = 0x0; + } + + if (reg_e94 && candidate >= 0) + rtl8xxxu_fill_iqk_matrix_a(priv, path_a_ok, result, + candidate, (reg_ea4 == 0)); + + if (priv->rf_paths > 1) + rtl8xxxu_fill_iqk_matrix_b(priv, path_b_ok, result, + candidate, (reg_ec4 == 0)); + + rtl8xxxu_save_regs(priv, rtl8723au_iqk_phy_iq_bb_reg, + priv->bb_recovery_backup, RTL8XXXU_BB_REGS); +} + static void rtl8723a_phy_lc_calibrate(struct rtl8xxxu_priv *priv) { u32 val32; @@ -9080,7 +9686,7 @@ static struct rtl8xxxu_fileops rtl8192eu_fops = { .power_off = rtl8xxxu_power_off, .reset_8051 = rtl8xxxu_reset_8051, .llt_init = rtl8xxxu_auto_llt_table, - .phy_iq_calibrate = rtl8723bu_phy_iq_calibrate, + .phy_iq_calibrate = rtl8192eu_phy_iq_calibrate, .config_channel = rtl8723bu_config_channel, .parse_rx_desc = rtl8723bu_parse_rx_desc, .enable_rf = rtl8723b_enable_rf, diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h index c19f234f1934..2aa14b14a0a5 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h @@ -1130,6 +1130,7 @@ #define RF6052_REG_T_METER_8723B 0x42 #define RF6052_REG_UNKNOWN_43 0x43 #define RF6052_REG_UNKNOWN_55 0x55 +#define RF6052_REG_UNKNOWN_56 0x56 #define RF6052_REG_S0S1 0xb0 #define RF6052_REG_UNKNOWN_DF 0xdf #define RF6052_REG_UNKNOWN_ED 0xed From 28e460b02c66116354ced05b570503d252b996e9 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Thu, 7 Apr 2016 14:19:33 -0400 Subject: [PATCH 0641/1649] rtl8xxxu: Adjust AFE crystal value on 8192eu Adjust AFE before enabling PLL on 8192eu, probably also needed for 8723bu. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- .../net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 39 +++++++++++++++++++ .../wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h | 4 ++ 2 files changed, 43 insertions(+) diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index c08bbcbe4a0d..e36fda8c1ad3 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -7093,6 +7093,41 @@ static int rtl8192cu_power_on(struct rtl8xxxu_priv *priv) #endif +/* + * This is needed for 8723bu as well, presumable + */ +static void rtl8192e_crystal_afe_adjust(struct rtl8xxxu_priv *priv) +{ + u8 val8; + u32 val32; + + /* + * 40Mhz crystal source, MAC 0x28[2]=0 + */ + val8 = rtl8xxxu_read8(priv, REG_AFE_PLL_CTRL); + val8 &= 0xfb; + rtl8xxxu_write8(priv, REG_AFE_PLL_CTRL, val8); + + val32 = rtl8xxxu_read32(priv, REG_AFE_CTRL4); + val32 &= 0xfffffc7f; + rtl8xxxu_write32(priv, REG_AFE_CTRL4, val32); + + /* + * 92e AFE parameter + * AFE PLL KVCO selection, MAC 0x28[6]=1 + */ + val8 = rtl8xxxu_read8(priv, REG_AFE_PLL_CTRL); + val8 &= 0xbf; + rtl8xxxu_write8(priv, REG_AFE_PLL_CTRL, val8); + + /* + * AFE PLL KVCO selection, MAC 0x78[21]=0 + */ + val32 = rtl8xxxu_read32(priv, REG_AFE_CTRL4); + val32 &= 0xffdfffff; + rtl8xxxu_write32(priv, REG_AFE_CTRL4, val32); +} + static int rtl8192eu_power_on(struct rtl8xxxu_priv *priv) { u16 val16; @@ -7115,6 +7150,10 @@ static int rtl8192eu_power_on(struct rtl8xxxu_priv *priv) rtl8xxxu_write8(priv, REG_LDO_SW_CTRL, 0x83); } + /* + * Adjust AFE before enabling PLL + */ + rtl8192e_crystal_afe_adjust(priv); rtl8192e_disabled_to_emu(priv); ret = rtl8192e_emu_to_active(priv); diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h index 2aa14b14a0a5..bb08a3939e46 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h @@ -109,6 +109,9 @@ #define AFE_XTAL_GATE_DIG BIT(17) #define AFE_XTAL_BT_GATE BIT(20) +/* + * 0x0028 is also known as REG_AFE_CTRL2 on 8723bu/8192eu + */ #define REG_AFE_PLL_CTRL 0x0028 #define AFE_PLL_ENABLE BIT(0) #define AFE_PLL_320_ENABLE BIT(1) @@ -192,6 +195,7 @@ control */ #define MULTI_GPS_FUNC_EN BIT(22) /* GPS function enable */ +#define REG_AFE_CTRL4 0x0078 /* 8192eu/8723bu */ #define REG_LDO_SW_CTRL 0x007c /* 8192eu */ #define REG_MCU_FW_DL 0x0080 From 7fde010d473d7eb98a2af3dd91e83838a1f24cdf Mon Sep 17 00:00:00 2001 From: Hante Meuleman Date: Mon, 11 Apr 2016 11:35:21 +0200 Subject: [PATCH 0642/1649] brcmfmac: clear eventmask array before using it When the event_msgs iovar is set an array is used to configure the enabled events. This arrays needs to nulled before configuring otherwise unhandled events will be enabled. This solves a problem where in case of wowl the host got woken by an incorrectly enabled event. Reviewed-by: Pieter-Paul Giesberts Reviewed-by: Arend Van Spriel Signed-off-by: Hante Meuleman Signed-off-by: Arend van Spriel Signed-off-by: Kalle Valo --- drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c index d414fbbcc814..b390561255b3 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c @@ -371,6 +371,7 @@ int brcmf_fweh_activate_events(struct brcmf_if *ifp) int i, err; s8 eventmask[BRCMF_EVENTING_MASK_LEN]; + memset(eventmask, 0, sizeof(eventmask)); for (i = 0; i < BRCMF_E_LAST; i++) { if (ifp->drvr->fweh.evt_handler[i]) { brcmf_dbg(EVENT, "enable event %s\n", From 28b285a6129d0c19bcf9e40bb7767da0fd62974f Mon Sep 17 00:00:00 2001 From: Hante Meuleman Date: Mon, 11 Apr 2016 11:35:22 +0200 Subject: [PATCH 0643/1649] brcmfmac: fix clearing wowl wake indicators Newer firmwares require the usage of the wowl wakeind struct as size for the iovar to clear the wake indicators. Older firmwares do not care, so change the used size. Reviewed-by: Arend Van Spriel Reviewed-by: Pieter-Paul Giesberts Signed-off-by: Hante Meuleman Signed-off-by: Arend van Spriel Signed-off-by: Kalle Valo --- drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index 9a567e263bb1..8daad782b3c3 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -3608,7 +3608,8 @@ static void brcmf_configure_wowl(struct brcmf_cfg80211_info *cfg, if (!test_bit(BRCMF_VIF_STATUS_CONNECTED, &ifp->vif->sme_state)) wowl_config |= BRCMF_WOWL_UNASSOC; - brcmf_fil_iovar_data_set(ifp, "wowl_wakeind", "clear", strlen("clear")); + brcmf_fil_iovar_data_set(ifp, "wowl_wakeind", "clear", + sizeof(struct brcmf_wowl_wakeind_le)); brcmf_fil_iovar_int_set(ifp, "wowl", wowl_config); brcmf_fil_iovar_int_set(ifp, "wowl_activate", 1); brcmf_bus_wowl_config(cfg->pub->bus_if, true); From 46f2b38a91b08ec7faf1839c3cdcec3cfcc6ad50 Mon Sep 17 00:00:00 2001 From: Hante Meuleman Date: Mon, 11 Apr 2016 11:35:23 +0200 Subject: [PATCH 0644/1649] brcmfmac: insert default boardrev in nvram data if missing Some nvram files/stores come without the boardrev information, but firmware requires this to be set. When not found in nvram then add a default boardrev string to the nvram data. Reported-by: Rafal Milecki Reviewed-by: Arend Van Spriel Reviewed-by: Franky (Zhenhui) Lin Reviewed-by: Pieter-Paul Giesberts Signed-off-by: Hante Meuleman Signed-off-by: Arend van Spriel Signed-off-by: Kalle Valo --- .../broadcom/brcm80211/brcmfmac/firmware.c | 30 +++++++++++++++++-- 1 file changed, 28 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c index 7269056d0044..c7c1e9906500 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c @@ -29,6 +29,7 @@ #define BRCMF_FW_MAX_NVRAM_SIZE 64000 #define BRCMF_FW_NVRAM_DEVPATH_LEN 19 /* devpath0=pcie/1/4/ */ #define BRCMF_FW_NVRAM_PCIEDEV_LEN 10 /* pcie/1/4/ + \0 */ +#define BRCMF_FW_DEFAULT_BOARDREV "boardrev=0xff" enum nvram_parser_state { IDLE, @@ -51,6 +52,7 @@ enum nvram_parser_state { * @entry: start position of key,value entry. * @multi_dev_v1: detect pcie multi device v1 (compressed). * @multi_dev_v2: detect pcie multi device v2. + * @boardrev_found: nvram contains boardrev information. */ struct nvram_parser { enum nvram_parser_state state; @@ -63,6 +65,7 @@ struct nvram_parser { u32 entry; bool multi_dev_v1; bool multi_dev_v2; + bool boardrev_found; }; /** @@ -125,6 +128,8 @@ static enum nvram_parser_state brcmf_nvram_handle_key(struct nvram_parser *nvp) nvp->multi_dev_v1 = true; if (strncmp(&nvp->data[nvp->entry], "pcie/", 5) == 0) nvp->multi_dev_v2 = true; + if (strncmp(&nvp->data[nvp->entry], "boardrev", 8) == 0) + nvp->boardrev_found = true; } else if (!is_nvram_char(c) || c == ' ') { brcmf_dbg(INFO, "warning: ln=%d:col=%d: '=' expected, skip invalid key entry\n", nvp->line, nvp->column); @@ -284,6 +289,8 @@ static void brcmf_fw_strip_multi_v1(struct nvram_parser *nvp, u16 domain_nr, while (i < nvp->nvram_len) { if ((nvp->nvram[i] - '0' == id) && (nvp->nvram[i + 1] == ':')) { i += 2; + if (strncmp(&nvp->nvram[i], "boardrev", 8) == 0) + nvp->boardrev_found = true; while (nvp->nvram[i] != 0) { nvram[j] = nvp->nvram[i]; i++; @@ -335,6 +342,8 @@ static void brcmf_fw_strip_multi_v2(struct nvram_parser *nvp, u16 domain_nr, while (i < nvp->nvram_len - len) { if (strncmp(&nvp->nvram[i], prefix, len) == 0) { i += len; + if (strncmp(&nvp->nvram[i], "boardrev", 8) == 0) + nvp->boardrev_found = true; while (nvp->nvram[i] != 0) { nvram[j] = nvp->nvram[i]; i++; @@ -356,6 +365,18 @@ fail: nvp->nvram_len = 0; } +static void brcmf_fw_add_defaults(struct nvram_parser *nvp) +{ + if (nvp->boardrev_found) + return; + + memcpy(&nvp->nvram[nvp->nvram_len], &BRCMF_FW_DEFAULT_BOARDREV, + strlen(BRCMF_FW_DEFAULT_BOARDREV)); + nvp->nvram_len += strlen(BRCMF_FW_DEFAULT_BOARDREV); + nvp->nvram[nvp->nvram_len] = '\0'; + nvp->nvram_len++; +} + /* brcmf_nvram_strip :Takes a buffer of "=\n" lines read from a fil * and ending in a NUL. Removes carriage returns, empty lines, comment lines, * and converts newlines to NULs. Shortens buffer as needed and pads with NULs. @@ -377,16 +398,21 @@ static void *brcmf_fw_nvram_strip(const u8 *data, size_t data_len, if (nvp.state == END) break; } - if (nvp.multi_dev_v1) + if (nvp.multi_dev_v1) { + nvp.boardrev_found = false; brcmf_fw_strip_multi_v1(&nvp, domain_nr, bus_nr); - else if (nvp.multi_dev_v2) + } else if (nvp.multi_dev_v2) { + nvp.boardrev_found = false; brcmf_fw_strip_multi_v2(&nvp, domain_nr, bus_nr); + } if (nvp.nvram_len == 0) { kfree(nvp.nvram); return NULL; } + brcmf_fw_add_defaults(&nvp); + pad = nvp.nvram_len; *new_length = roundup(nvp.nvram_len + 1, 4); while (pad != *new_length) { From 2aec2c9d42aa9ac4b31583cf4e1c7774e040e57d Mon Sep 17 00:00:00 2001 From: Hante Meuleman Date: Mon, 11 Apr 2016 11:35:24 +0200 Subject: [PATCH 0645/1649] brcmfmac: fix p2p scan abort null pointer exception When p2p connection setup is performed without having ever done an escan a null pointer exception can occur. This is because the ifp to abort scanning is taken from escan struct while it was never initialized. Fix this by using the primary ifp for scan abort. The abort should still be performed and all scan related commands are performed on primary ifp. Reviewed-by: Arend Van Spriel Reviewed-by: Pieter-Paul Giesberts Signed-off-by: Hante Meuleman Signed-off-by: Arend van Spriel Signed-off-by: Kalle Valo --- drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c index c2ac91df35ed..a70cda6c0592 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c @@ -1266,7 +1266,7 @@ static void brcmf_p2p_stop_wait_next_action_frame(struct brcmf_cfg80211_info *cfg) { struct brcmf_p2p_info *p2p = &cfg->p2p; - struct brcmf_if *ifp = cfg->escan_info.ifp; + struct brcmf_if *ifp = p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif->ifp; if (test_bit(BRCMF_P2P_STATUS_SENDING_ACT_FRAME, &p2p->status) && (test_bit(BRCMF_P2P_STATUS_ACTION_TX_COMPLETED, &p2p->status) || From c56caa9db8abbbfb9e31325e0897705aa897db37 Mon Sep 17 00:00:00 2001 From: Franky Lin Date: Mon, 11 Apr 2016 11:35:25 +0200 Subject: [PATCH 0646/1649] brcmfmac: screening firmware event packet Firmware uses asynchronized events as a communication method to the host. The event packets are marked as ETH_P_LINK_CTL protocol type. For SDIO and PCIe bus, this kind of packets are delivered through virtual event channel not data channel. This patch adds a screening logic to make sure the event handler only processes the events coming from the correct channel. Reviewed-by: Pieter-Paul Giesberts Signed-off-by: Franky Lin Signed-off-by: Arend van Spriel Signed-off-by: Kalle Valo --- .../broadcom/brcm80211/brcmfmac/bus.h | 4 +- .../broadcom/brcm80211/brcmfmac/core.c | 46 +++++++++++++++---- .../broadcom/brcm80211/brcmfmac/core.h | 3 +- .../broadcom/brcm80211/brcmfmac/msgbuf.c | 42 +++++++++-------- .../broadcom/brcm80211/brcmfmac/sdio.c | 32 +++++++++---- .../broadcom/brcm80211/brcmfmac/usb.c | 2 +- 6 files changed, 90 insertions(+), 39 deletions(-) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h index 8e02a478e889..31856eb57bc4 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h @@ -216,7 +216,9 @@ bool brcmf_c_prec_enq(struct device *dev, struct pktq *q, struct sk_buff *pkt, int prec); /* Receive frame for delivery to OS. Callee disposes of rxp. */ -void brcmf_rx_frame(struct device *dev, struct sk_buff *rxp); +void brcmf_rx_frame(struct device *dev, struct sk_buff *rxp, bool handle_evnt); +/* Receive async event packet from firmware. Callee disposes of rxp. */ +void brcmf_rx_event(struct device *dev, struct sk_buff *rxp); /* Indication from bus module regarding presence/insertion of dongle. */ int brcmf_attach(struct device *dev, struct brcmf_mp_device *settings); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c index ff825cd7739e..8a91a517478b 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c @@ -311,16 +311,17 @@ void brcmf_txflowblock(struct device *dev, bool state) brcmf_fws_bus_blocked(drvr, state); } -void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb) +void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb, + bool handle_event) { - skb->dev = ifp->ndev; - skb->protocol = eth_type_trans(skb, skb->dev); + skb->protocol = eth_type_trans(skb, ifp->ndev); if (skb->pkt_type == PACKET_MULTICAST) ifp->stats.multicast++; /* Process special event packets */ - brcmf_fweh_process_skb(ifp->drvr, skb); + if (handle_event) + brcmf_fweh_process_skb(ifp->drvr, skb); if (!(ifp->ndev->flags & IFF_UP)) { brcmu_pkt_buf_free_skb(skb); @@ -381,7 +382,7 @@ static void brcmf_rxreorder_process_info(struct brcmf_if *ifp, u8 *reorder_data, /* validate flags and flow id */ if (flags == 0xFF) { brcmf_err("invalid flags...so ignore this packet\n"); - brcmf_netif_rx(ifp, pkt); + brcmf_netif_rx(ifp, pkt, false); return; } @@ -393,7 +394,7 @@ static void brcmf_rxreorder_process_info(struct brcmf_if *ifp, u8 *reorder_data, if (rfi == NULL) { brcmf_dbg(INFO, "received flags to cleanup, but no flow (%d) yet\n", flow_id); - brcmf_netif_rx(ifp, pkt); + brcmf_netif_rx(ifp, pkt, false); return; } @@ -418,7 +419,7 @@ static void brcmf_rxreorder_process_info(struct brcmf_if *ifp, u8 *reorder_data, rfi = kzalloc(buf_size, GFP_ATOMIC); if (rfi == NULL) { brcmf_err("failed to alloc buffer\n"); - brcmf_netif_rx(ifp, pkt); + brcmf_netif_rx(ifp, pkt, false); return; } @@ -532,11 +533,11 @@ static void brcmf_rxreorder_process_info(struct brcmf_if *ifp, u8 *reorder_data, netif_rx: skb_queue_walk_safe(&reorder_list, pkt, pnext) { __skb_unlink(pkt, &reorder_list); - brcmf_netif_rx(ifp, pkt); + brcmf_netif_rx(ifp, pkt, false); } } -void brcmf_rx_frame(struct device *dev, struct sk_buff *skb) +void brcmf_rx_frame(struct device *dev, struct sk_buff *skb, bool handle_evnt) { struct brcmf_if *ifp; struct brcmf_bus *bus_if = dev_get_drvdata(dev); @@ -560,7 +561,32 @@ void brcmf_rx_frame(struct device *dev, struct sk_buff *skb) if (rd->reorder) brcmf_rxreorder_process_info(ifp, rd->reorder, skb); else - brcmf_netif_rx(ifp, skb); + brcmf_netif_rx(ifp, skb, handle_evnt); +} + +void brcmf_rx_event(struct device *dev, struct sk_buff *skb) +{ + struct brcmf_if *ifp; + struct brcmf_bus *bus_if = dev_get_drvdata(dev); + struct brcmf_pub *drvr = bus_if->drvr; + int ret; + + brcmf_dbg(EVENT, "Enter: %s: rxp=%p\n", dev_name(dev), skb); + + /* process and remove protocol-specific header */ + ret = brcmf_proto_hdrpull(drvr, true, skb, &ifp); + + if (ret || !ifp || !ifp->ndev) { + if (ret != -ENODATA && ifp) + ifp->stats.rx_errors++; + brcmu_pkt_buf_free_skb(skb); + return; + } + + skb->protocol = eth_type_trans(skb, ifp->ndev); + + brcmf_fweh_process_skb(ifp->drvr, skb); + brcmu_pkt_buf_free_skb(skb); } void brcmf_txfinalize(struct brcmf_if *ifp, struct sk_buff *txp, bool success) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h index 7bdb6fef99c3..d3497e8bd59c 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h @@ -225,7 +225,8 @@ int brcmf_get_next_free_bsscfgidx(struct brcmf_pub *drvr); void brcmf_txflowblock_if(struct brcmf_if *ifp, enum brcmf_netif_stop_reason reason, bool state); void brcmf_txfinalize(struct brcmf_if *ifp, struct sk_buff *txp, bool success); -void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb); +void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb, + bool handle_event); void brcmf_net_setcarrier(struct brcmf_if *ifp, bool on); int __init brcmf_core_init(void); void __exit brcmf_core_exit(void); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c index 922966734a7f..3795354b7f12 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c @@ -20,6 +20,7 @@ #include #include +#include #include #include @@ -1075,28 +1076,13 @@ static void brcmf_msgbuf_rxbuf_event_post(struct brcmf_msgbuf *msgbuf) } -static void -brcmf_msgbuf_rx_skb(struct brcmf_msgbuf *msgbuf, struct sk_buff *skb, - u8 ifidx) -{ - struct brcmf_if *ifp; - - ifp = brcmf_get_ifp(msgbuf->drvr, ifidx); - if (!ifp || !ifp->ndev) { - brcmf_err("Received pkt for invalid ifidx %d\n", ifidx); - brcmu_pkt_buf_free_skb(skb); - return; - } - brcmf_netif_rx(ifp, skb); -} - - static void brcmf_msgbuf_process_event(struct brcmf_msgbuf *msgbuf, void *buf) { struct msgbuf_rx_event *event; u32 idx; u16 buflen; struct sk_buff *skb; + struct brcmf_if *ifp; event = (struct msgbuf_rx_event *)buf; idx = le32_to_cpu(event->msg.request_id); @@ -1116,7 +1102,19 @@ static void brcmf_msgbuf_process_event(struct brcmf_msgbuf *msgbuf, void *buf) skb_trim(skb, buflen); - brcmf_msgbuf_rx_skb(msgbuf, skb, event->msg.ifidx); + ifp = brcmf_get_ifp(msgbuf->drvr, event->msg.ifidx); + if (!ifp || !ifp->ndev) { + brcmf_err("Received pkt for invalid ifidx %d\n", + event->msg.ifidx); + goto exit; + } + + skb->protocol = eth_type_trans(skb, ifp->ndev); + + brcmf_fweh_process_skb(ifp->drvr, skb); + +exit: + brcmu_pkt_buf_free_skb(skb); } @@ -1128,6 +1126,7 @@ brcmf_msgbuf_process_rx_complete(struct brcmf_msgbuf *msgbuf, void *buf) u16 data_offset; u16 buflen; u32 idx; + struct brcmf_if *ifp; brcmf_msgbuf_update_rxbufpost_count(msgbuf, 1); @@ -1148,7 +1147,14 @@ brcmf_msgbuf_process_rx_complete(struct brcmf_msgbuf *msgbuf, void *buf) skb_trim(skb, buflen); - brcmf_msgbuf_rx_skb(msgbuf, skb, rx_complete->msg.ifidx); + ifp = brcmf_get_ifp(msgbuf->drvr, rx_complete->msg.ifidx); + if (!ifp || !ifp->ndev) { + brcmf_err("Received pkt for invalid ifidx %d\n", + rx_complete->msg.ifidx); + brcmu_pkt_buf_free_skb(skb); + return; + } + brcmf_netif_rx(ifp, skb, false); } diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c index 48d7467d270e..4252fa82b89c 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c @@ -1294,6 +1294,17 @@ static inline u8 brcmf_sdio_getdatoffset(u8 *swheader) return (u8)((hdrvalue & SDPCM_DOFFSET_MASK) >> SDPCM_DOFFSET_SHIFT); } +static inline bool brcmf_sdio_fromevntchan(u8 *swheader) +{ + u32 hdrvalue; + u8 ret; + + hdrvalue = *(u32 *)swheader; + ret = (u8)((hdrvalue & SDPCM_CHANNEL_MASK) >> SDPCM_CHANNEL_SHIFT); + + return (ret == SDPCM_EVENT_CHANNEL); +} + static int brcmf_sdio_hdparse(struct brcmf_sdio *bus, u8 *header, struct brcmf_sdio_hdrinfo *rd, enum brcmf_sdio_frmtype type) @@ -1641,7 +1652,11 @@ static u8 brcmf_sdio_rxglom(struct brcmf_sdio *bus, u8 rxseq) pfirst->len, pfirst->next, pfirst->prev); skb_unlink(pfirst, &bus->glom); - brcmf_rx_frame(bus->sdiodev->dev, pfirst); + if (brcmf_sdio_fromevntchan(pfirst->data)) + brcmf_rx_event(bus->sdiodev->dev, pfirst); + else + brcmf_rx_frame(bus->sdiodev->dev, pfirst, + false); bus->sdcnt.rxglompkts++; } @@ -1967,18 +1982,19 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes) __skb_trim(pkt, rd->len); skb_pull(pkt, rd->dat_offset); + if (pkt->len == 0) + brcmu_pkt_buf_free_skb(pkt); + else if (rd->channel == SDPCM_EVENT_CHANNEL) + brcmf_rx_event(bus->sdiodev->dev, pkt); + else + brcmf_rx_frame(bus->sdiodev->dev, pkt, + false); + /* prepare the descriptor for the next read */ rd->len = rd->len_nxtfrm << 4; rd->len_nxtfrm = 0; /* treat all packet as event if we don't know */ rd->channel = SDPCM_EVENT_CHANNEL; - - if (pkt->len == 0) { - brcmu_pkt_buf_free_skb(pkt); - continue; - } - - brcmf_rx_frame(bus->sdiodev->dev, pkt); } rxcount = maxframes - rxleft; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c index 869eb82db8b1..aa0b2a192faa 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c @@ -514,7 +514,7 @@ static void brcmf_usb_rx_complete(struct urb *urb) if (devinfo->bus_pub.state == BRCMFMAC_USB_STATE_UP) { skb_put(skb, urb->actual_length); - brcmf_rx_frame(devinfo->dev, skb); + brcmf_rx_frame(devinfo->dev, skb, true); brcmf_usb_rx_refill(devinfo, req); } else { brcmu_pkt_buf_free_skb(skb); From bbd1f932e7c45ef173468ae2b49edefe52a8c835 Mon Sep 17 00:00:00 2001 From: Arend van Spriel Date: Mon, 11 Apr 2016 11:35:26 +0200 Subject: [PATCH 0647/1649] brcmfmac: cleanup ampdu-rx host reorder code The code for ampdu-rx host reorder is related to the firmware signalling supported in BCDC protocol. This change moves the code to fwsignal module. Reviewed-by: Hante Meuleman Reviewed-by: Pieter-Paul Giesberts Reviewed-by: Franky Lin Signed-off-by: Arend van Spriel Signed-off-by: Kalle Valo --- .../broadcom/brcm80211/brcmfmac/bcdc.c | 7 + .../broadcom/brcm80211/brcmfmac/core.c | 214 +----------------- .../broadcom/brcm80211/brcmfmac/core.h | 4 - .../broadcom/brcm80211/brcmfmac/fwsignal.c | 209 +++++++++++++++++ .../broadcom/brcm80211/brcmfmac/fwsignal.h | 1 + .../broadcom/brcm80211/brcmfmac/msgbuf.c | 4 + .../broadcom/brcm80211/brcmfmac/proto.h | 16 ++ 7 files changed, 239 insertions(+), 216 deletions(-) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c index 6af658e443e4..288fe906c80e 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c @@ -351,6 +351,12 @@ brcmf_proto_bcdc_add_tdls_peer(struct brcmf_pub *drvr, int ifidx, { } +static void brcmf_proto_bcdc_rxreorder(struct brcmf_if *ifp, + struct sk_buff *skb) +{ + brcmf_fws_rxreorder(ifp, skb); +} + int brcmf_proto_bcdc_attach(struct brcmf_pub *drvr) { struct brcmf_bcdc *bcdc; @@ -372,6 +378,7 @@ int brcmf_proto_bcdc_attach(struct brcmf_pub *drvr) drvr->proto->configure_addr_mode = brcmf_proto_bcdc_configure_addr_mode; drvr->proto->delete_peer = brcmf_proto_bcdc_delete_peer; drvr->proto->add_tdls_peer = brcmf_proto_bcdc_add_tdls_peer; + drvr->proto->rxreorder = brcmf_proto_bcdc_rxreorder; drvr->proto->pd = bcdc; drvr->hdrlen += BCDC_HEADER_LEN + BRCMF_PROT_FW_SIGNAL_MAX_TXBYTES; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c index 8a91a517478b..a30841bbc5a1 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c @@ -40,19 +40,6 @@ #define MAX_WAIT_FOR_8021X_TX msecs_to_jiffies(950) -/* AMPDU rx reordering definitions */ -#define BRCMF_RXREORDER_FLOWID_OFFSET 0 -#define BRCMF_RXREORDER_MAXIDX_OFFSET 2 -#define BRCMF_RXREORDER_FLAGS_OFFSET 4 -#define BRCMF_RXREORDER_CURIDX_OFFSET 6 -#define BRCMF_RXREORDER_EXPIDX_OFFSET 8 - -#define BRCMF_RXREORDER_DEL_FLOW 0x01 -#define BRCMF_RXREORDER_FLUSH_ALL 0x02 -#define BRCMF_RXREORDER_CURIDX_VALID 0x04 -#define BRCMF_RXREORDER_EXPIDX_VALID 0x08 -#define BRCMF_RXREORDER_NEW_HOLE 0x10 - #define BRCMF_BSSIDX_INVALID -1 char *brcmf_ifname(struct brcmf_if *ifp) @@ -342,207 +329,11 @@ void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb, netif_rx_ni(skb); } -static void brcmf_rxreorder_get_skb_list(struct brcmf_ampdu_rx_reorder *rfi, - u8 start, u8 end, - struct sk_buff_head *skb_list) -{ - /* initialize return list */ - __skb_queue_head_init(skb_list); - - if (rfi->pend_pkts == 0) { - brcmf_dbg(INFO, "no packets in reorder queue\n"); - return; - } - - do { - if (rfi->pktslots[start]) { - __skb_queue_tail(skb_list, rfi->pktslots[start]); - rfi->pktslots[start] = NULL; - } - start++; - if (start > rfi->max_idx) - start = 0; - } while (start != end); - rfi->pend_pkts -= skb_queue_len(skb_list); -} - -static void brcmf_rxreorder_process_info(struct brcmf_if *ifp, u8 *reorder_data, - struct sk_buff *pkt) -{ - u8 flow_id, max_idx, cur_idx, exp_idx, end_idx; - struct brcmf_ampdu_rx_reorder *rfi; - struct sk_buff_head reorder_list; - struct sk_buff *pnext; - u8 flags; - u32 buf_size; - - flow_id = reorder_data[BRCMF_RXREORDER_FLOWID_OFFSET]; - flags = reorder_data[BRCMF_RXREORDER_FLAGS_OFFSET]; - - /* validate flags and flow id */ - if (flags == 0xFF) { - brcmf_err("invalid flags...so ignore this packet\n"); - brcmf_netif_rx(ifp, pkt, false); - return; - } - - rfi = ifp->drvr->reorder_flows[flow_id]; - if (flags & BRCMF_RXREORDER_DEL_FLOW) { - brcmf_dbg(INFO, "flow-%d: delete\n", - flow_id); - - if (rfi == NULL) { - brcmf_dbg(INFO, "received flags to cleanup, but no flow (%d) yet\n", - flow_id); - brcmf_netif_rx(ifp, pkt, false); - return; - } - - brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx, rfi->exp_idx, - &reorder_list); - /* add the last packet */ - __skb_queue_tail(&reorder_list, pkt); - kfree(rfi); - ifp->drvr->reorder_flows[flow_id] = NULL; - goto netif_rx; - } - /* from here on we need a flow reorder instance */ - if (rfi == NULL) { - buf_size = sizeof(*rfi); - max_idx = reorder_data[BRCMF_RXREORDER_MAXIDX_OFFSET]; - - buf_size += (max_idx + 1) * sizeof(pkt); - - /* allocate space for flow reorder info */ - brcmf_dbg(INFO, "flow-%d: start, maxidx %d\n", - flow_id, max_idx); - rfi = kzalloc(buf_size, GFP_ATOMIC); - if (rfi == NULL) { - brcmf_err("failed to alloc buffer\n"); - brcmf_netif_rx(ifp, pkt, false); - return; - } - - ifp->drvr->reorder_flows[flow_id] = rfi; - rfi->pktslots = (struct sk_buff **)(rfi+1); - rfi->max_idx = max_idx; - } - if (flags & BRCMF_RXREORDER_NEW_HOLE) { - if (rfi->pend_pkts) { - brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx, - rfi->exp_idx, - &reorder_list); - WARN_ON(rfi->pend_pkts); - } else { - __skb_queue_head_init(&reorder_list); - } - rfi->cur_idx = reorder_data[BRCMF_RXREORDER_CURIDX_OFFSET]; - rfi->exp_idx = reorder_data[BRCMF_RXREORDER_EXPIDX_OFFSET]; - rfi->max_idx = reorder_data[BRCMF_RXREORDER_MAXIDX_OFFSET]; - rfi->pktslots[rfi->cur_idx] = pkt; - rfi->pend_pkts++; - brcmf_dbg(DATA, "flow-%d: new hole %d (%d), pending %d\n", - flow_id, rfi->cur_idx, rfi->exp_idx, rfi->pend_pkts); - } else if (flags & BRCMF_RXREORDER_CURIDX_VALID) { - cur_idx = reorder_data[BRCMF_RXREORDER_CURIDX_OFFSET]; - exp_idx = reorder_data[BRCMF_RXREORDER_EXPIDX_OFFSET]; - - if ((exp_idx == rfi->exp_idx) && (cur_idx != rfi->exp_idx)) { - /* still in the current hole */ - /* enqueue the current on the buffer chain */ - if (rfi->pktslots[cur_idx] != NULL) { - brcmf_dbg(INFO, "HOLE: ERROR buffer pending..free it\n"); - brcmu_pkt_buf_free_skb(rfi->pktslots[cur_idx]); - rfi->pktslots[cur_idx] = NULL; - } - rfi->pktslots[cur_idx] = pkt; - rfi->pend_pkts++; - rfi->cur_idx = cur_idx; - brcmf_dbg(DATA, "flow-%d: store pkt %d (%d), pending %d\n", - flow_id, cur_idx, exp_idx, rfi->pend_pkts); - - /* can return now as there is no reorder - * list to process. - */ - return; - } - if (rfi->exp_idx == cur_idx) { - if (rfi->pktslots[cur_idx] != NULL) { - brcmf_dbg(INFO, "error buffer pending..free it\n"); - brcmu_pkt_buf_free_skb(rfi->pktslots[cur_idx]); - rfi->pktslots[cur_idx] = NULL; - } - rfi->pktslots[cur_idx] = pkt; - rfi->pend_pkts++; - - /* got the expected one. flush from current to expected - * and update expected - */ - brcmf_dbg(DATA, "flow-%d: expected %d (%d), pending %d\n", - flow_id, cur_idx, exp_idx, rfi->pend_pkts); - - rfi->cur_idx = cur_idx; - rfi->exp_idx = exp_idx; - - brcmf_rxreorder_get_skb_list(rfi, cur_idx, exp_idx, - &reorder_list); - brcmf_dbg(DATA, "flow-%d: freeing buffers %d, pending %d\n", - flow_id, skb_queue_len(&reorder_list), - rfi->pend_pkts); - } else { - u8 end_idx; - - brcmf_dbg(DATA, "flow-%d (0x%x): both moved, old %d/%d, new %d/%d\n", - flow_id, flags, rfi->cur_idx, rfi->exp_idx, - cur_idx, exp_idx); - if (flags & BRCMF_RXREORDER_FLUSH_ALL) - end_idx = rfi->exp_idx; - else - end_idx = exp_idx; - - /* flush pkts first */ - brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx, end_idx, - &reorder_list); - - if (exp_idx == ((cur_idx + 1) % (rfi->max_idx + 1))) { - __skb_queue_tail(&reorder_list, pkt); - } else { - rfi->pktslots[cur_idx] = pkt; - rfi->pend_pkts++; - } - rfi->exp_idx = exp_idx; - rfi->cur_idx = cur_idx; - } - } else { - /* explicity window move updating the expected index */ - exp_idx = reorder_data[BRCMF_RXREORDER_EXPIDX_OFFSET]; - - brcmf_dbg(DATA, "flow-%d (0x%x): change expected: %d -> %d\n", - flow_id, flags, rfi->exp_idx, exp_idx); - if (flags & BRCMF_RXREORDER_FLUSH_ALL) - end_idx = rfi->exp_idx; - else - end_idx = exp_idx; - - brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx, end_idx, - &reorder_list); - __skb_queue_tail(&reorder_list, pkt); - /* set the new expected idx */ - rfi->exp_idx = exp_idx; - } -netif_rx: - skb_queue_walk_safe(&reorder_list, pkt, pnext) { - __skb_unlink(pkt, &reorder_list); - brcmf_netif_rx(ifp, pkt, false); - } -} - void brcmf_rx_frame(struct device *dev, struct sk_buff *skb, bool handle_evnt) { struct brcmf_if *ifp; struct brcmf_bus *bus_if = dev_get_drvdata(dev); struct brcmf_pub *drvr = bus_if->drvr; - struct brcmf_skb_reorder_data *rd; int ret; brcmf_dbg(DATA, "Enter: %s: rxp=%p\n", dev_name(dev), skb); @@ -557,9 +348,8 @@ void brcmf_rx_frame(struct device *dev, struct sk_buff *skb, bool handle_evnt) return; } - rd = (struct brcmf_skb_reorder_data *)skb->cb; - if (rd->reorder) - brcmf_rxreorder_process_info(ifp, rd->reorder, skb); + if (brcmf_proto_is_reorder_skb(skb)) + brcmf_proto_rxreorder(ifp, skb); else brcmf_netif_rx(ifp, skb, handle_evnt); } diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h index d3497e8bd59c..394ae050b960 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h @@ -208,10 +208,6 @@ struct brcmf_if { u8 ipv6addr_idx; }; -struct brcmf_skb_reorder_data { - u8 *reorder; -}; - int brcmf_netdev_wait_pend8021x(struct brcmf_if *ifp); /* Return pointer to interface name */ diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c index f82c9ab5480b..8a07687b46f8 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c @@ -92,6 +92,19 @@ enum brcmf_fws_tlv_len { }; #undef BRCMF_FWS_TLV_DEF +/* AMPDU rx reordering definitions */ +#define BRCMF_RXREORDER_FLOWID_OFFSET 0 +#define BRCMF_RXREORDER_MAXIDX_OFFSET 2 +#define BRCMF_RXREORDER_FLAGS_OFFSET 4 +#define BRCMF_RXREORDER_CURIDX_OFFSET 6 +#define BRCMF_RXREORDER_EXPIDX_OFFSET 8 + +#define BRCMF_RXREORDER_DEL_FLOW 0x01 +#define BRCMF_RXREORDER_FLUSH_ALL 0x02 +#define BRCMF_RXREORDER_CURIDX_VALID 0x04 +#define BRCMF_RXREORDER_EXPIDX_VALID 0x08 +#define BRCMF_RXREORDER_NEW_HOLE 0x10 + #ifdef DEBUG /* * brcmf_fws_tlv_names - array of tlv names. @@ -1614,6 +1627,202 @@ static int brcmf_fws_notify_bcmc_credit_support(struct brcmf_if *ifp, return 0; } +static void brcmf_rxreorder_get_skb_list(struct brcmf_ampdu_rx_reorder *rfi, + u8 start, u8 end, + struct sk_buff_head *skb_list) +{ + /* initialize return list */ + __skb_queue_head_init(skb_list); + + if (rfi->pend_pkts == 0) { + brcmf_dbg(INFO, "no packets in reorder queue\n"); + return; + } + + do { + if (rfi->pktslots[start]) { + __skb_queue_tail(skb_list, rfi->pktslots[start]); + rfi->pktslots[start] = NULL; + } + start++; + if (start > rfi->max_idx) + start = 0; + } while (start != end); + rfi->pend_pkts -= skb_queue_len(skb_list); +} + +void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *pkt) +{ + u8 *reorder_data; + u8 flow_id, max_idx, cur_idx, exp_idx, end_idx; + struct brcmf_ampdu_rx_reorder *rfi; + struct sk_buff_head reorder_list; + struct sk_buff *pnext; + u8 flags; + u32 buf_size; + + reorder_data = ((struct brcmf_skb_reorder_data *)pkt->cb)->reorder; + flow_id = reorder_data[BRCMF_RXREORDER_FLOWID_OFFSET]; + flags = reorder_data[BRCMF_RXREORDER_FLAGS_OFFSET]; + + /* validate flags and flow id */ + if (flags == 0xFF) { + brcmf_err("invalid flags...so ignore this packet\n"); + brcmf_netif_rx(ifp, pkt, false); + return; + } + + rfi = ifp->drvr->reorder_flows[flow_id]; + if (flags & BRCMF_RXREORDER_DEL_FLOW) { + brcmf_dbg(INFO, "flow-%d: delete\n", + flow_id); + + if (rfi == NULL) { + brcmf_dbg(INFO, "received flags to cleanup, but no flow (%d) yet\n", + flow_id); + brcmf_netif_rx(ifp, pkt, false); + return; + } + + brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx, rfi->exp_idx, + &reorder_list); + /* add the last packet */ + __skb_queue_tail(&reorder_list, pkt); + kfree(rfi); + ifp->drvr->reorder_flows[flow_id] = NULL; + goto netif_rx; + } + /* from here on we need a flow reorder instance */ + if (rfi == NULL) { + buf_size = sizeof(*rfi); + max_idx = reorder_data[BRCMF_RXREORDER_MAXIDX_OFFSET]; + + buf_size += (max_idx + 1) * sizeof(pkt); + + /* allocate space for flow reorder info */ + brcmf_dbg(INFO, "flow-%d: start, maxidx %d\n", + flow_id, max_idx); + rfi = kzalloc(buf_size, GFP_ATOMIC); + if (rfi == NULL) { + brcmf_err("failed to alloc buffer\n"); + brcmf_netif_rx(ifp, pkt, false); + return; + } + + ifp->drvr->reorder_flows[flow_id] = rfi; + rfi->pktslots = (struct sk_buff **)(rfi + 1); + rfi->max_idx = max_idx; + } + if (flags & BRCMF_RXREORDER_NEW_HOLE) { + if (rfi->pend_pkts) { + brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx, + rfi->exp_idx, + &reorder_list); + WARN_ON(rfi->pend_pkts); + } else { + __skb_queue_head_init(&reorder_list); + } + rfi->cur_idx = reorder_data[BRCMF_RXREORDER_CURIDX_OFFSET]; + rfi->exp_idx = reorder_data[BRCMF_RXREORDER_EXPIDX_OFFSET]; + rfi->max_idx = reorder_data[BRCMF_RXREORDER_MAXIDX_OFFSET]; + rfi->pktslots[rfi->cur_idx] = pkt; + rfi->pend_pkts++; + brcmf_dbg(DATA, "flow-%d: new hole %d (%d), pending %d\n", + flow_id, rfi->cur_idx, rfi->exp_idx, rfi->pend_pkts); + } else if (flags & BRCMF_RXREORDER_CURIDX_VALID) { + cur_idx = reorder_data[BRCMF_RXREORDER_CURIDX_OFFSET]; + exp_idx = reorder_data[BRCMF_RXREORDER_EXPIDX_OFFSET]; + + if ((exp_idx == rfi->exp_idx) && (cur_idx != rfi->exp_idx)) { + /* still in the current hole */ + /* enqueue the current on the buffer chain */ + if (rfi->pktslots[cur_idx] != NULL) { + brcmf_dbg(INFO, "HOLE: ERROR buffer pending..free it\n"); + brcmu_pkt_buf_free_skb(rfi->pktslots[cur_idx]); + rfi->pktslots[cur_idx] = NULL; + } + rfi->pktslots[cur_idx] = pkt; + rfi->pend_pkts++; + rfi->cur_idx = cur_idx; + brcmf_dbg(DATA, "flow-%d: store pkt %d (%d), pending %d\n", + flow_id, cur_idx, exp_idx, rfi->pend_pkts); + + /* can return now as there is no reorder + * list to process. + */ + return; + } + if (rfi->exp_idx == cur_idx) { + if (rfi->pktslots[cur_idx] != NULL) { + brcmf_dbg(INFO, "error buffer pending..free it\n"); + brcmu_pkt_buf_free_skb(rfi->pktslots[cur_idx]); + rfi->pktslots[cur_idx] = NULL; + } + rfi->pktslots[cur_idx] = pkt; + rfi->pend_pkts++; + + /* got the expected one. flush from current to expected + * and update expected + */ + brcmf_dbg(DATA, "flow-%d: expected %d (%d), pending %d\n", + flow_id, cur_idx, exp_idx, rfi->pend_pkts); + + rfi->cur_idx = cur_idx; + rfi->exp_idx = exp_idx; + + brcmf_rxreorder_get_skb_list(rfi, cur_idx, exp_idx, + &reorder_list); + brcmf_dbg(DATA, "flow-%d: freeing buffers %d, pending %d\n", + flow_id, skb_queue_len(&reorder_list), + rfi->pend_pkts); + } else { + u8 end_idx; + + brcmf_dbg(DATA, "flow-%d (0x%x): both moved, old %d/%d, new %d/%d\n", + flow_id, flags, rfi->cur_idx, rfi->exp_idx, + cur_idx, exp_idx); + if (flags & BRCMF_RXREORDER_FLUSH_ALL) + end_idx = rfi->exp_idx; + else + end_idx = exp_idx; + + /* flush pkts first */ + brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx, end_idx, + &reorder_list); + + if (exp_idx == ((cur_idx + 1) % (rfi->max_idx + 1))) { + __skb_queue_tail(&reorder_list, pkt); + } else { + rfi->pktslots[cur_idx] = pkt; + rfi->pend_pkts++; + } + rfi->exp_idx = exp_idx; + rfi->cur_idx = cur_idx; + } + } else { + /* explicity window move updating the expected index */ + exp_idx = reorder_data[BRCMF_RXREORDER_EXPIDX_OFFSET]; + + brcmf_dbg(DATA, "flow-%d (0x%x): change expected: %d -> %d\n", + flow_id, flags, rfi->exp_idx, exp_idx); + if (flags & BRCMF_RXREORDER_FLUSH_ALL) + end_idx = rfi->exp_idx; + else + end_idx = exp_idx; + + brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx, end_idx, + &reorder_list); + __skb_queue_tail(&reorder_list, pkt); + /* set the new expected idx */ + rfi->exp_idx = exp_idx; + } +netif_rx: + skb_queue_walk_safe(&reorder_list, pkt, pnext) { + __skb_unlink(pkt, &reorder_list); + brcmf_netif_rx(ifp, pkt, false); + } +} + void brcmf_fws_hdrpull(struct brcmf_if *ifp, s16 siglen, struct sk_buff *skb) { struct brcmf_skb_reorder_data *rd; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h index a36bac17eafd..ef0ad8597c8a 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h @@ -29,5 +29,6 @@ void brcmf_fws_add_interface(struct brcmf_if *ifp); void brcmf_fws_del_interface(struct brcmf_if *ifp); void brcmf_fws_bustxfail(struct brcmf_fws_info *fws, struct sk_buff *skb); void brcmf_fws_bus_blocked(struct brcmf_pub *drvr, bool flow_blocked); +void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *skb); #endif /* FWSIGNAL_H_ */ diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c index 3795354b7f12..8c064ab24b83 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c @@ -527,6 +527,9 @@ static int brcmf_msgbuf_hdrpull(struct brcmf_pub *drvr, bool do_fws, return -ENODEV; } +static void brcmf_msgbuf_rxreorder(struct brcmf_if *ifp, struct sk_buff *skb) +{ +} static void brcmf_msgbuf_remove_flowring(struct brcmf_msgbuf *msgbuf, u16 flowid) @@ -1466,6 +1469,7 @@ int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr) drvr->proto->configure_addr_mode = brcmf_msgbuf_configure_addr_mode; drvr->proto->delete_peer = brcmf_msgbuf_delete_peer; drvr->proto->add_tdls_peer = brcmf_msgbuf_add_tdls_peer; + drvr->proto->rxreorder = brcmf_msgbuf_rxreorder; drvr->proto->pd = msgbuf; init_waitqueue_head(&msgbuf->ioctl_resp_wait); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h index d55119d36755..57531f42190e 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h @@ -22,6 +22,9 @@ enum proto_addr_mode { ADDR_DIRECT }; +struct brcmf_skb_reorder_data { + u8 *reorder; +}; struct brcmf_proto { int (*hdrpull)(struct brcmf_pub *drvr, bool do_fws, @@ -38,6 +41,7 @@ struct brcmf_proto { u8 peer[ETH_ALEN]); void (*add_tdls_peer)(struct brcmf_pub *drvr, int ifidx, u8 peer[ETH_ALEN]); + void (*rxreorder)(struct brcmf_if *ifp, struct sk_buff *skb); void *pd; }; @@ -91,6 +95,18 @@ brcmf_proto_add_tdls_peer(struct brcmf_pub *drvr, int ifidx, u8 peer[ETH_ALEN]) { drvr->proto->add_tdls_peer(drvr, ifidx, peer); } +static inline bool brcmf_proto_is_reorder_skb(struct sk_buff *skb) +{ + struct brcmf_skb_reorder_data *rd; + rd = (struct brcmf_skb_reorder_data *)skb->cb; + return !!rd->reorder; +} + +static inline void +brcmf_proto_rxreorder(struct brcmf_if *ifp, struct sk_buff *skb) +{ + ifp->drvr->proto->rxreorder(ifp, skb); +} #endif /* BRCMFMAC_PROTO_H */ From 9c349892ccc90c6de2baaa69cc78449f58082273 Mon Sep 17 00:00:00 2001 From: Arend van Spriel Date: Mon, 11 Apr 2016 11:35:27 +0200 Subject: [PATCH 0648/1649] brcmfmac: revise handling events in receive path Move event handling out of brcmf_netif_rx() avoiding the need to pass a flag. This flag is only ever true for USB hosts as other interface use separate brcmf_rx_event() function. Reviewed-by: Hante Meuleman Reviewed-by: Pieter-Paul Giesberts Reviewed-by: Franky Lin Signed-off-by: Arend van Spriel Signed-off-by: Kalle Valo --- .../broadcom/brcm80211/brcmfmac/bus.h | 2 +- .../broadcom/brcm80211/brcmfmac/core.c | 24 +++++++++---------- .../broadcom/brcm80211/brcmfmac/core.h | 3 +-- .../broadcom/brcm80211/brcmfmac/fwsignal.c | 8 +++---- .../broadcom/brcm80211/brcmfmac/msgbuf.c | 2 +- 5 files changed, 19 insertions(+), 20 deletions(-) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h index 31856eb57bc4..2b246545647a 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h @@ -216,7 +216,7 @@ bool brcmf_c_prec_enq(struct device *dev, struct pktq *q, struct sk_buff *pkt, int prec); /* Receive frame for delivery to OS. Callee disposes of rxp. */ -void brcmf_rx_frame(struct device *dev, struct sk_buff *rxp, bool handle_evnt); +void brcmf_rx_frame(struct device *dev, struct sk_buff *rxp, bool handle_event); /* Receive async event packet from firmware. Callee disposes of rxp. */ void brcmf_rx_event(struct device *dev, struct sk_buff *rxp); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c index a30841bbc5a1..9b53555cf85c 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c @@ -298,18 +298,11 @@ void brcmf_txflowblock(struct device *dev, bool state) brcmf_fws_bus_blocked(drvr, state); } -void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb, - bool handle_event) +void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb) { - skb->protocol = eth_type_trans(skb, ifp->ndev); - if (skb->pkt_type == PACKET_MULTICAST) ifp->stats.multicast++; - /* Process special event packets */ - if (handle_event) - brcmf_fweh_process_skb(ifp->drvr, skb); - if (!(ifp->ndev->flags & IFF_UP)) { brcmu_pkt_buf_free_skb(skb); return; @@ -329,7 +322,7 @@ void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb, netif_rx_ni(skb); } -void brcmf_rx_frame(struct device *dev, struct sk_buff *skb, bool handle_evnt) +void brcmf_rx_frame(struct device *dev, struct sk_buff *skb, bool handle_event) { struct brcmf_if *ifp; struct brcmf_bus *bus_if = dev_get_drvdata(dev); @@ -348,10 +341,17 @@ void brcmf_rx_frame(struct device *dev, struct sk_buff *skb, bool handle_evnt) return; } - if (brcmf_proto_is_reorder_skb(skb)) + skb->protocol = eth_type_trans(skb, ifp->ndev); + + if (brcmf_proto_is_reorder_skb(skb)) { brcmf_proto_rxreorder(ifp, skb); - else - brcmf_netif_rx(ifp, skb, handle_evnt); + } else { + /* Process special event packets */ + if (handle_event) + brcmf_fweh_process_skb(ifp->drvr, skb); + + brcmf_netif_rx(ifp, skb); + } } void brcmf_rx_event(struct device *dev, struct sk_buff *skb) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h index 394ae050b960..241ee8d13e54 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h @@ -221,8 +221,7 @@ int brcmf_get_next_free_bsscfgidx(struct brcmf_pub *drvr); void brcmf_txflowblock_if(struct brcmf_if *ifp, enum brcmf_netif_stop_reason reason, bool state); void brcmf_txfinalize(struct brcmf_if *ifp, struct sk_buff *txp, bool success); -void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb, - bool handle_event); +void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb); void brcmf_net_setcarrier(struct brcmf_if *ifp, bool on); int __init brcmf_core_init(void); void __exit brcmf_core_exit(void); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c index 8a07687b46f8..5b30922b67ec 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c @@ -1668,7 +1668,7 @@ void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *pkt) /* validate flags and flow id */ if (flags == 0xFF) { brcmf_err("invalid flags...so ignore this packet\n"); - brcmf_netif_rx(ifp, pkt, false); + brcmf_netif_rx(ifp, pkt); return; } @@ -1680,7 +1680,7 @@ void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *pkt) if (rfi == NULL) { brcmf_dbg(INFO, "received flags to cleanup, but no flow (%d) yet\n", flow_id); - brcmf_netif_rx(ifp, pkt, false); + brcmf_netif_rx(ifp, pkt); return; } @@ -1705,7 +1705,7 @@ void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *pkt) rfi = kzalloc(buf_size, GFP_ATOMIC); if (rfi == NULL) { brcmf_err("failed to alloc buffer\n"); - brcmf_netif_rx(ifp, pkt, false); + brcmf_netif_rx(ifp, pkt); return; } @@ -1819,7 +1819,7 @@ void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *pkt) netif_rx: skb_queue_walk_safe(&reorder_list, pkt, pnext) { __skb_unlink(pkt, &reorder_list); - brcmf_netif_rx(ifp, pkt, false); + brcmf_netif_rx(ifp, pkt); } } diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c index 8c064ab24b83..68f1ce02f4bf 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c @@ -1157,7 +1157,7 @@ brcmf_msgbuf_process_rx_complete(struct brcmf_msgbuf *msgbuf, void *buf) brcmu_pkt_buf_free_skb(skb); return; } - brcmf_netif_rx(ifp, skb, false); + brcmf_netif_rx(ifp, skb); } From c462ebcdfe425a132aaba950d114ac32de3cdf44 Mon Sep 17 00:00:00 2001 From: Arend van Spriel Date: Mon, 11 Apr 2016 11:35:28 +0200 Subject: [PATCH 0649/1649] brcmfmac: create common function for handling brcmf_proto_hdrpull() In receive path brcmf_proto_hdrpull() needs to be called and handled similar in brcmf_rx_frame() and brcmf_rx_event(). Move that duplicated code in separate function. Reviewed-by: Hante Meuleman Reviewed-by: Pieter-Paul Giesberts Reviewed-by: Franky Lin Signed-off-by: Arend van Spriel Signed-off-by: Kalle Valo --- .../broadcom/brcm80211/brcmfmac/core.c | 43 +++++++++---------- 1 file changed, 21 insertions(+), 22 deletions(-) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c index 9b53555cf85c..1b476d1fec2c 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c @@ -322,26 +322,35 @@ void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb) netif_rx_ni(skb); } +static int brcmf_rx_hdrpull(struct brcmf_pub *drvr, struct sk_buff *skb, + struct brcmf_if **ifp) +{ + int ret; + + /* process and remove protocol-specific header */ + ret = brcmf_proto_hdrpull(drvr, true, skb, ifp); + + if (ret || !(*ifp) || !(*ifp)->ndev) { + if (ret != -ENODATA && ifp) + (*ifp)->stats.rx_errors++; + brcmu_pkt_buf_free_skb(skb); + return -ENODATA; + } + + skb->protocol = eth_type_trans(skb, (*ifp)->ndev); + return 0; +} + void brcmf_rx_frame(struct device *dev, struct sk_buff *skb, bool handle_event) { struct brcmf_if *ifp; struct brcmf_bus *bus_if = dev_get_drvdata(dev); struct brcmf_pub *drvr = bus_if->drvr; - int ret; brcmf_dbg(DATA, "Enter: %s: rxp=%p\n", dev_name(dev), skb); - /* process and remove protocol-specific header */ - ret = brcmf_proto_hdrpull(drvr, true, skb, &ifp); - - if (ret || !ifp || !ifp->ndev) { - if (ret != -ENODATA && ifp) - ifp->stats.rx_errors++; - brcmu_pkt_buf_free_skb(skb); + if (brcmf_rx_hdrpull(drvr, skb, &ifp)) return; - } - - skb->protocol = eth_type_trans(skb, ifp->ndev); if (brcmf_proto_is_reorder_skb(skb)) { brcmf_proto_rxreorder(ifp, skb); @@ -359,21 +368,11 @@ void brcmf_rx_event(struct device *dev, struct sk_buff *skb) struct brcmf_if *ifp; struct brcmf_bus *bus_if = dev_get_drvdata(dev); struct brcmf_pub *drvr = bus_if->drvr; - int ret; brcmf_dbg(EVENT, "Enter: %s: rxp=%p\n", dev_name(dev), skb); - /* process and remove protocol-specific header */ - ret = brcmf_proto_hdrpull(drvr, true, skb, &ifp); - - if (ret || !ifp || !ifp->ndev) { - if (ret != -ENODATA && ifp) - ifp->stats.rx_errors++; - brcmu_pkt_buf_free_skb(skb); + if (brcmf_rx_hdrpull(drvr, skb, &ifp)) return; - } - - skb->protocol = eth_type_trans(skb, ifp->ndev); brcmf_fweh_process_skb(ifp->drvr, skb); brcmu_pkt_buf_free_skb(skb); From c865a70098d9419d154f6d1ad97cd0150af968d1 Mon Sep 17 00:00:00 2001 From: Amitkumar Karwar Date: Mon, 11 Apr 2016 07:52:37 -0700 Subject: [PATCH 0650/1649] mwifiex: missing break statement This patch adds missing break statement at the end of PCIE_DEVICE_ID_MARVELL_88W8897 switch section. Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/pcie.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c index edf8b070f665..c28edbb73d8f 100644 --- a/drivers/net/wireless/marvell/mwifiex/pcie.c +++ b/drivers/net/wireless/marvell/mwifiex/pcie.c @@ -2831,6 +2831,7 @@ static void mwifiex_pcie_get_fw_name(struct mwifiex_adapter *adapter) default: break; } + break; case PCIE_DEVICE_ID_MARVELL_88W8997: mwifiex_read_reg(adapter, 0x0c48, &revision_id); switch (revision_id) { From e87650bce9a5499f133341db99c9293bfb8a0282 Mon Sep 17 00:00:00 2001 From: Shengzhen Li Date: Mon, 11 Apr 2016 07:52:38 -0700 Subject: [PATCH 0651/1649] mwifiex: add pcie usb/uart firmware download support This patch adds support for downloading usb/uart firmware for 8997 chipset by reading the chip version. Signed-off-by: Shengzhen Li Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/pcie.c | 17 +++++++++++++++-- drivers/net/wireless/marvell/mwifiex/pcie.h | 7 +++++-- 2 files changed, 20 insertions(+), 4 deletions(-) diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c index c28edbb73d8f..1d888b501e16 100644 --- a/drivers/net/wireless/marvell/mwifiex/pcie.c +++ b/drivers/net/wireless/marvell/mwifiex/pcie.c @@ -2811,6 +2811,7 @@ static int mwifiex_pcie_request_irq(struct mwifiex_adapter *adapter) static void mwifiex_pcie_get_fw_name(struct mwifiex_adapter *adapter) { int revision_id = 0; + int version; struct pcie_service_card *card = adapter->card; switch (card->dev->device) { @@ -2834,12 +2835,24 @@ static void mwifiex_pcie_get_fw_name(struct mwifiex_adapter *adapter) break; case PCIE_DEVICE_ID_MARVELL_88W8997: mwifiex_read_reg(adapter, 0x0c48, &revision_id); + mwifiex_read_reg(adapter, 0x0cd0, &version); + version &= 0x7; switch (revision_id) { case PCIE8997_V2: - strcpy(adapter->fw_name, PCIE8997_FW_NAME_V2); + if (version == CHIP_VER_PCIEUSB) + strcpy(adapter->fw_name, + PCIEUSB8997_FW_NAME_V2); + else + strcpy(adapter->fw_name, + PCIEUART8997_FW_NAME_V2); break; case PCIE8997_Z: - strcpy(adapter->fw_name, PCIE8997_FW_NAME_Z); + if (version == CHIP_VER_PCIEUSB) + strcpy(adapter->fw_name, + PCIEUSB8997_FW_NAME_Z); + else + strcpy(adapter->fw_name, + PCIEUART8997_FW_NAME_Z); break; default: break; diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.h b/drivers/net/wireless/marvell/mwifiex/pcie.h index cc7a5df903be..bbabfb01a10a 100644 --- a/drivers/net/wireless/marvell/mwifiex/pcie.h +++ b/drivers/net/wireless/marvell/mwifiex/pcie.h @@ -32,8 +32,10 @@ #define PCIE8766_DEFAULT_FW_NAME "mrvl/pcie8766_uapsta.bin" #define PCIE8897_A0_FW_NAME "mrvl/pcie8897_uapsta_a0.bin" #define PCIE8897_B0_FW_NAME "mrvl/pcie8897_uapsta.bin" -#define PCIE8997_FW_NAME_Z "mrvl/pcieusb8997_combo.bin" -#define PCIE8997_FW_NAME_V2 "mrvl/pcieusb8997_combo_v2.bin" +#define PCIEUART8997_FW_NAME_Z "mrvl/pcieuart8997_combo.bin" +#define PCIEUART8997_FW_NAME_V2 "mrvl/pcieuart8997_combo_v2.bin" +#define PCIEUSB8997_FW_NAME_Z "mrvl/pcieusb8997_combo.bin" +#define PCIEUSB8997_FW_NAME_V2 "mrvl/pcieusb8997_combo_v2.bin" #define PCIE_VENDOR_ID_MARVELL (0x11ab) #define PCIE_VENDOR_ID_V2_MARVELL (0x1b4b) @@ -45,6 +47,7 @@ #define PCIE8897_B0 0x1200 #define PCIE8997_Z 0x0 #define PCIE8997_V2 0x471 +#define CHIP_VER_PCIEUSB 0x2 /* Constants for Buffer Descriptor (BD) rings */ #define MWIFIEX_MAX_TXRX_BD 0x20 From b9db397879333c272f2bc888cd713773e7d2604f Mon Sep 17 00:00:00 2001 From: Shengzhen Li Date: Mon, 11 Apr 2016 07:52:39 -0700 Subject: [PATCH 0652/1649] mwifiex: add default setting for pcie firmware download This patch adds default setting for pcie firmware download name in case that there are newer chipset version. Signed-off-by: Shengzhen Li Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/pcie.c | 3 +++ drivers/net/wireless/marvell/mwifiex/pcie.h | 2 ++ 2 files changed, 5 insertions(+) diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c index 1d888b501e16..0c7937eb6b77 100644 --- a/drivers/net/wireless/marvell/mwifiex/pcie.c +++ b/drivers/net/wireless/marvell/mwifiex/pcie.c @@ -2830,6 +2830,8 @@ static void mwifiex_pcie_get_fw_name(struct mwifiex_adapter *adapter) strcpy(adapter->fw_name, PCIE8897_B0_FW_NAME); break; default: + strcpy(adapter->fw_name, PCIE8897_DEFAULT_FW_NAME); + break; } break; @@ -2855,6 +2857,7 @@ static void mwifiex_pcie_get_fw_name(struct mwifiex_adapter *adapter) PCIEUART8997_FW_NAME_Z); break; default: + strcpy(adapter->fw_name, PCIE8997_DEFAULT_FW_NAME); break; } default: diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.h b/drivers/net/wireless/marvell/mwifiex/pcie.h index bbabfb01a10a..5770b4396b21 100644 --- a/drivers/net/wireless/marvell/mwifiex/pcie.h +++ b/drivers/net/wireless/marvell/mwifiex/pcie.h @@ -30,8 +30,10 @@ #include "main.h" #define PCIE8766_DEFAULT_FW_NAME "mrvl/pcie8766_uapsta.bin" +#define PCIE8897_DEFAULT_FW_NAME "mrvl/pcie8897_uapsta.bin" #define PCIE8897_A0_FW_NAME "mrvl/pcie8897_uapsta_a0.bin" #define PCIE8897_B0_FW_NAME "mrvl/pcie8897_uapsta.bin" +#define PCIE8997_DEFAULT_FW_NAME "mrvl/pcieuart8997_combo_v2.bin" #define PCIEUART8997_FW_NAME_Z "mrvl/pcieuart8997_combo.bin" #define PCIEUART8997_FW_NAME_V2 "mrvl/pcieuart8997_combo_v2.bin" #define PCIEUSB8997_FW_NAME_Z "mrvl/pcieusb8997_combo.bin" From c3ec0ff6425b86655aa34b5c6cb0f7b6d559c6b2 Mon Sep 17 00:00:00 2001 From: Xinming Hu Date: Mon, 11 Apr 2016 07:52:40 -0700 Subject: [PATCH 0653/1649] mwifiex: do not wait on semaphore during card removal Host hang is observed if card is removed before firmware download gets completed. In this case, firmware will be failed to download and adapter structure gets freed. In other thread, mwifiex_remove_card() waits on semaphore until the firmware download fails. This wait is not necessary and may result in invalid adapter access. This patch uses down_trylock to return immediately so that hang issue won't occur. Signed-off-by: Xinming Hu Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c index 04b975cbb330..b459c70dc43f 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.c +++ b/drivers/net/wireless/marvell/mwifiex/main.c @@ -1434,7 +1434,7 @@ int mwifiex_remove_card(struct mwifiex_adapter *adapter, struct semaphore *sem) struct mwifiex_private *priv = NULL; int i; - if (down_interruptible(sem)) + if (down_trylock(sem)) goto exit_sem_err; if (!adapter) From bcc920e8f08336cbbdcdba7c4449c27137e6b4b9 Mon Sep 17 00:00:00 2001 From: Amitkumar Karwar Date: Mon, 11 Apr 2016 07:52:41 -0700 Subject: [PATCH 0654/1649] mwifiex: fix incorrect ht capability problem IEEE80211_CHAN_NO_HT40PLUS and IEEE80211_CHAN_NO_HT40PLUS channel flags tell if HT40 operation is allowed on a channel or not. This patch ensures ht_capability information is modified accordingly so that we don't end up creating a HT40 connection when it's not allowed for current regulatory domain. Signed-off-by: Amitkumar Karwar Signed-off-by: Cathy Luo Signed-off-by: Kalle Valo --- .../net/wireless/marvell/mwifiex/sta_ioctl.c | 44 ++++++++++++++++++- 1 file changed, 43 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c index d8de432d46a2..8e0862657122 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c @@ -146,6 +146,7 @@ int mwifiex_fill_new_bss_desc(struct mwifiex_private *priv, size_t beacon_ie_len; struct mwifiex_bss_priv *bss_priv = (void *)bss->priv; const struct cfg80211_bss_ies *ies; + int ret; rcu_read_lock(); ies = rcu_dereference(bss->ies); @@ -189,7 +190,48 @@ int mwifiex_fill_new_bss_desc(struct mwifiex_private *priv, if (bss_desc->cap_info_bitmap & WLAN_CAPABILITY_SPECTRUM_MGMT) bss_desc->sensed_11h = true; - return mwifiex_update_bss_desc_with_ie(priv->adapter, bss_desc); + ret = mwifiex_update_bss_desc_with_ie(priv->adapter, bss_desc); + if (ret) + return ret; + + /* Update HT40 capability based on current channel information */ + if (bss_desc->bcn_ht_oper && bss_desc->bcn_ht_cap) { + u8 ht_param = bss_desc->bcn_ht_oper->ht_param; + u8 radio = mwifiex_band_to_radio_type(bss_desc->bss_band); + struct ieee80211_supported_band *sband = + priv->wdev.wiphy->bands[radio]; + int freq = ieee80211_channel_to_frequency(bss_desc->channel, + radio); + struct ieee80211_channel *chan = + ieee80211_get_channel(priv->adapter->wiphy, freq); + + switch (ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) { + case IEEE80211_HT_PARAM_CHA_SEC_ABOVE: + if (chan->flags & IEEE80211_CHAN_NO_HT40PLUS) { + sband->ht_cap.cap &= + ~IEEE80211_HT_CAP_SUP_WIDTH_20_40; + sband->ht_cap.cap &= ~IEEE80211_HT_CAP_SGI_40; + } else { + sband->ht_cap.cap |= + IEEE80211_HT_CAP_SUP_WIDTH_20_40 | + IEEE80211_HT_CAP_SGI_40; + } + break; + case IEEE80211_HT_PARAM_CHA_SEC_BELOW: + if (chan->flags & IEEE80211_CHAN_NO_HT40MINUS) { + sband->ht_cap.cap &= + ~IEEE80211_HT_CAP_SUP_WIDTH_20_40; + sband->ht_cap.cap &= ~IEEE80211_HT_CAP_SGI_40; + } else { + sband->ht_cap.cap |= + IEEE80211_HT_CAP_SUP_WIDTH_20_40 | + IEEE80211_HT_CAP_SGI_40; + } + break; + } + } + + return 0; } void mwifiex_dnld_txpwr_table(struct mwifiex_private *priv) From 84349698f05dc7ed6df6772d43175eb3703ed8dc Mon Sep 17 00:00:00 2001 From: Vishal Thanki Date: Thu, 7 Apr 2016 23:37:12 +0200 Subject: [PATCH 0655/1649] mwifiex: fix the incorrect WARN_ON during suspend During system suspend, there is a kernel WARNING issued if there is a pending command present. By marking the wait queue disabled after calling the command completion routine fixes it. Signed-off-by: Vishal Thanki Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/cmdevt.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/marvell/mwifiex/cmdevt.c b/drivers/net/wireless/marvell/mwifiex/cmdevt.c index a12adee776c6..6f0470646483 100644 --- a/drivers/net/wireless/marvell/mwifiex/cmdevt.c +++ b/drivers/net/wireless/marvell/mwifiex/cmdevt.c @@ -1009,9 +1009,9 @@ mwifiex_cancel_all_pending_cmd(struct mwifiex_adapter *adapter) spin_lock_irqsave(&adapter->mwifiex_cmd_lock, cmd_flags); /* Cancel current cmd */ if ((adapter->curr_cmd) && (adapter->curr_cmd->wait_q_enabled)) { - adapter->curr_cmd->wait_q_enabled = false; adapter->cmd_wait_q.status = -1; mwifiex_complete_cmd(adapter, adapter->curr_cmd); + adapter->curr_cmd->wait_q_enabled = false; /* no recycle probably wait for response */ } /* Cancel all pending command */ From 14e105cd4085c7bbf42f0eef8c677ad6c7d78a83 Mon Sep 17 00:00:00 2001 From: Kalle Valo Date: Wed, 13 Apr 2016 14:13:21 +0300 Subject: [PATCH 0656/1649] ath10k: fix checkpatch warnings related to spaces Fix checkpatch warnings about use of spaces with operators: spaces preferred around that '*' (ctx:VxV) This has been recently added to checkpatch. Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/ce.c | 6 +++--- drivers/net/wireless/ath/ath10k/ce.h | 2 +- drivers/net/wireless/ath/ath10k/core.h | 6 +++--- drivers/net/wireless/ath/ath10k/debug.h | 2 +- drivers/net/wireless/ath/ath10k/htc.h | 4 ++-- drivers/net/wireless/ath/ath10k/htt.c | 2 +- drivers/net/wireless/ath/ath10k/mac.c | 8 ++++---- drivers/net/wireless/ath/ath10k/targaddrs.h | 2 +- drivers/net/wireless/ath/ath10k/thermal.h | 2 +- drivers/net/wireless/ath/ath10k/txrx.c | 2 +- drivers/net/wireless/ath/ath10k/wmi-tlv.h | 4 ++-- drivers/net/wireless/ath/ath10k/wmi.c | 2 +- drivers/net/wireless/ath/ath10k/wmi.h | 18 +++++++++--------- 13 files changed, 30 insertions(+), 30 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c index 7212802eb327..9fb8d7472d18 100644 --- a/drivers/net/wireless/ath/ath10k/ce.c +++ b/drivers/net/wireless/ath/ath10k/ce.c @@ -1050,11 +1050,11 @@ int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id, * * For the lack of a better place do the check here. */ - BUILD_BUG_ON(2*TARGET_NUM_MSDU_DESC > + BUILD_BUG_ON(2 * TARGET_NUM_MSDU_DESC > (CE_HTT_H2T_MSG_SRC_NENTRIES - 1)); - BUILD_BUG_ON(2*TARGET_10X_NUM_MSDU_DESC > + BUILD_BUG_ON(2 * TARGET_10X_NUM_MSDU_DESC > (CE_HTT_H2T_MSG_SRC_NENTRIES - 1)); - BUILD_BUG_ON(2*TARGET_TLV_NUM_MSDU_DESC > + BUILD_BUG_ON(2 * TARGET_TLV_NUM_MSDU_DESC > (CE_HTT_H2T_MSG_SRC_NENTRIES - 1)); ce_state->ar = ar; diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h index 25cafcfd6b12..dfc098606bee 100644 --- a/drivers/net/wireless/ath/ath10k/ce.h +++ b/drivers/net/wireless/ath/ath10k/ce.h @@ -408,7 +408,7 @@ static inline u32 ath10k_ce_base_address(struct ath10k *ar, unsigned int ce_id) /* Ring arithmetic (modulus number of entries in ring, which is a pwr of 2). */ #define CE_RING_DELTA(nentries_mask, fromidx, toidx) \ - (((int)(toidx)-(int)(fromidx)) & (nentries_mask)) + (((int)(toidx) - (int)(fromidx)) & (nentries_mask)) #define CE_RING_IDX_INCR(nentries_mask, idx) (((idx) + 1) & (nentries_mask)) #define CE_RING_IDX_ADD(nentries_mask, idx, num) \ diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h index d85b99164212..e6f889df1e0d 100644 --- a/drivers/net/wireless/ath/ath10k/core.h +++ b/drivers/net/wireless/ath/ath10k/core.h @@ -44,8 +44,8 @@ #define ATH10K_SCAN_ID 0 #define WMI_READY_TIMEOUT (5 * HZ) -#define ATH10K_FLUSH_TIMEOUT_HZ (5*HZ) -#define ATH10K_CONNECTION_LOSS_HZ (3*HZ) +#define ATH10K_FLUSH_TIMEOUT_HZ (5 * HZ) +#define ATH10K_CONNECTION_LOSS_HZ (3 * HZ) #define ATH10K_NUM_CHANS 39 /* Antenna noise floor */ @@ -334,7 +334,7 @@ struct ath10k_sta { #endif }; -#define ATH10K_VDEV_SETUP_TIMEOUT_HZ (5*HZ) +#define ATH10K_VDEV_SETUP_TIMEOUT_HZ (5 * HZ) enum ath10k_beacon_state { ATH10K_BEACON_SCHEDULED = 0, diff --git a/drivers/net/wireless/ath/ath10k/debug.h b/drivers/net/wireless/ath/ath10k/debug.h index 6206edd7c49f..75c89e3625ef 100644 --- a/drivers/net/wireless/ath/ath10k/debug.h +++ b/drivers/net/wireless/ath/ath10k/debug.h @@ -57,7 +57,7 @@ enum ath10k_dbg_aggr_mode { }; /* FIXME: How to calculate the buffer size sanely? */ -#define ATH10K_FW_STATS_BUF_SIZE (1024*1024) +#define ATH10K_FW_STATS_BUF_SIZE (1024 * 1024) extern unsigned int ath10k_debug_mask; diff --git a/drivers/net/wireless/ath/ath10k/htc.h b/drivers/net/wireless/ath/ath10k/htc.h index e70aa38e6e05..cc827185d3e9 100644 --- a/drivers/net/wireless/ath/ath10k/htc.h +++ b/drivers/net/wireless/ath/ath10k/htc.h @@ -297,10 +297,10 @@ struct ath10k_htc_svc_conn_resp { #define ATH10K_NUM_CONTROL_TX_BUFFERS 2 #define ATH10K_HTC_MAX_LEN 4096 #define ATH10K_HTC_MAX_CTRL_MSG_LEN 256 -#define ATH10K_HTC_WAIT_TIMEOUT_HZ (1*HZ) +#define ATH10K_HTC_WAIT_TIMEOUT_HZ (1 * HZ) #define ATH10K_HTC_CONTROL_BUFFER_SIZE (ATH10K_HTC_MAX_CTRL_MSG_LEN + \ sizeof(struct ath10k_htc_hdr)) -#define ATH10K_HTC_CONN_SVC_TIMEOUT_HZ (1*HZ) +#define ATH10K_HTC_CONN_SVC_TIMEOUT_HZ (1 * HZ) struct ath10k_htc_ep { struct ath10k_htc *htc; diff --git a/drivers/net/wireless/ath/ath10k/htt.c b/drivers/net/wireless/ath/ath10k/htt.c index 17a3008d9ab1..ee79512b1fcc 100644 --- a/drivers/net/wireless/ath/ath10k/htt.c +++ b/drivers/net/wireless/ath/ath10k/htt.c @@ -208,7 +208,7 @@ int ath10k_htt_init(struct ath10k *ar) return 0; } -#define HTT_TARGET_VERSION_TIMEOUT_HZ (3*HZ) +#define HTT_TARGET_VERSION_TIMEOUT_HZ (3 * HZ) static int ath10k_htt_verify_version(struct ath10k_htt *htt) { diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index c30a3944b612..4300410ecddf 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -3846,7 +3846,7 @@ static int ath10k_scan_stop(struct ath10k *ar) goto out; } - ret = wait_for_completion_timeout(&ar->scan.completed, 3*HZ); + ret = wait_for_completion_timeout(&ar->scan.completed, 3 * HZ); if (ret == 0) { ath10k_warn(ar, "failed to receive scan abortion completion: timed out\n"); ret = -ETIMEDOUT; @@ -3926,7 +3926,7 @@ static int ath10k_start_scan(struct ath10k *ar, if (ret) return ret; - ret = wait_for_completion_timeout(&ar->scan.started, 1*HZ); + ret = wait_for_completion_timeout(&ar->scan.started, 1 * HZ); if (ret == 0) { ret = ath10k_scan_stop(ar); if (ret) @@ -6168,7 +6168,7 @@ exit: return ret; } -#define ATH10K_ROC_TIMEOUT_HZ (2*HZ) +#define ATH10K_ROC_TIMEOUT_HZ (2 * HZ) static int ath10k_remain_on_channel(struct ieee80211_hw *hw, struct ieee80211_vif *vif, @@ -6232,7 +6232,7 @@ static int ath10k_remain_on_channel(struct ieee80211_hw *hw, goto exit; } - ret = wait_for_completion_timeout(&ar->scan.on_channel, 3*HZ); + ret = wait_for_completion_timeout(&ar->scan.on_channel, 3 * HZ); if (ret == 0) { ath10k_warn(ar, "failed to switch to channel for roc scan\n"); diff --git a/drivers/net/wireless/ath/ath10k/targaddrs.h b/drivers/net/wireless/ath/ath10k/targaddrs.h index 361f143b019c..8e24099fa936 100644 --- a/drivers/net/wireless/ath/ath10k/targaddrs.h +++ b/drivers/net/wireless/ath/ath10k/targaddrs.h @@ -438,7 +438,7 @@ Fw Mode/SubMode Mask ((HOST_INTEREST->hi_pwr_save_flags & HI_PWR_SAVE_LPL_ENABLED)) #define HI_DEV_LPL_TYPE_GET(_devix) \ (HOST_INTEREST->hi_pwr_save_flags & ((HI_PWR_SAVE_LPL_DEV_MASK) << \ - (HI_PWR_SAVE_LPL_DEV0_LSB + (_devix)*2))) + (HI_PWR_SAVE_LPL_DEV0_LSB + (_devix) * 2))) #define HOST_INTEREST_SMPS_IS_ALLOWED() \ ((HOST_INTEREST->hi_smps_options & HI_SMPS_ALLOW_MASK)) diff --git a/drivers/net/wireless/ath/ath10k/thermal.h b/drivers/net/wireless/ath/ath10k/thermal.h index c9223e9e962f..3abb97f63b1e 100644 --- a/drivers/net/wireless/ath/ath10k/thermal.h +++ b/drivers/net/wireless/ath/ath10k/thermal.h @@ -20,7 +20,7 @@ #define ATH10K_QUIET_PERIOD_MIN 25 #define ATH10K_QUIET_START_OFFSET 10 #define ATH10K_HWMON_NAME_LEN 15 -#define ATH10K_THERMAL_SYNC_TIMEOUT_HZ (5*HZ) +#define ATH10K_THERMAL_SYNC_TIMEOUT_HZ (5 * HZ) #define ATH10K_THERMAL_THROTTLE_MAX 100 struct ath10k_thermal { diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c index 9369411a9ac0..c503ff601a54 100644 --- a/drivers/net/wireless/ath/ath10k/txrx.c +++ b/drivers/net/wireless/ath/ath10k/txrx.c @@ -166,7 +166,7 @@ static int ath10k_wait_for_peer_common(struct ath10k *ar, int vdev_id, (mapped == expect_mapped || test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags)); - }), 3*HZ); + }), 3 * HZ); if (time_left == 0) return -ETIMEDOUT; diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.h b/drivers/net/wireless/ath/ath10k/wmi-tlv.h index dd678590531a..b8aa6000573c 100644 --- a/drivers/net/wireless/ath/ath10k/wmi-tlv.h +++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.h @@ -968,8 +968,8 @@ enum wmi_tlv_service { #define WMI_SERVICE_IS_ENABLED(wmi_svc_bmap, svc_id, len) \ ((svc_id) < (len) && \ - __le32_to_cpu((wmi_svc_bmap)[(svc_id)/(sizeof(u32))]) & \ - BIT((svc_id)%(sizeof(u32)))) + __le32_to_cpu((wmi_svc_bmap)[(svc_id) / (sizeof(u32))]) & \ + BIT((svc_id) % (sizeof(u32)))) #define SVCMAP(x, y, len) \ do { \ diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index e8d9a3e5c2df..d64e8681898d 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -1808,7 +1808,7 @@ int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id) ret = -ESHUTDOWN; (ret != -EAGAIN); - }), 3*HZ); + }), 3 * HZ); if (ret) dev_kfree_skb_any(skb); diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h index c83e1e39f8cc..378f2998cd5a 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.h +++ b/drivers/net/wireless/ath/ath10k/wmi.h @@ -405,8 +405,8 @@ static inline char *wmi_service_name(int service_id) #define WMI_SERVICE_IS_ENABLED(wmi_svc_bmap, svc_id, len) \ ((svc_id) < (len) && \ - __le32_to_cpu((wmi_svc_bmap)[(svc_id)/(sizeof(u32))]) & \ - BIT((svc_id)%(sizeof(u32)))) + __le32_to_cpu((wmi_svc_bmap)[(svc_id) / (sizeof(u32))]) & \ + BIT((svc_id) % (sizeof(u32)))) #define SVCMAP(x, y, len) \ do { \ @@ -1309,7 +1309,7 @@ enum wmi_10x_event_id { WMI_10X_PDEV_TPC_CONFIG_EVENTID, WMI_10X_GPIO_INPUT_EVENTID, - WMI_10X_PDEV_UTF_EVENTID = WMI_10X_END_EVENTID-1, + WMI_10X_PDEV_UTF_EVENTID = WMI_10X_END_EVENTID - 1, }; enum wmi_10_2_cmd_id { @@ -2042,8 +2042,8 @@ struct wmi_10x_service_ready_event { struct wlan_host_mem_req mem_reqs[0]; } __packed; -#define WMI_SERVICE_READY_TIMEOUT_HZ (5*HZ) -#define WMI_UNIFIED_READY_TIMEOUT_HZ (5*HZ) +#define WMI_SERVICE_READY_TIMEOUT_HZ (5 * HZ) +#define WMI_UNIFIED_READY_TIMEOUT_HZ (5 * HZ) struct wmi_ready_event { __le32 sw_version; @@ -4389,14 +4389,14 @@ enum wmi_vdev_subtype_10_4 { /* * Indicates that AP VDEV uses hidden ssid. only valid for * AP/GO */ -#define WMI_VDEV_START_HIDDEN_SSID (1<<0) +#define WMI_VDEV_START_HIDDEN_SSID (1 << 0) /* * Indicates if robust management frame/management frame * protection is enabled. For GO/AP vdevs, it indicates that * it may support station/client associations with RMF enabled. * For STA/client vdevs, it indicates that sta will * associate with AP with RMF enabled. */ -#define WMI_VDEV_START_PMF_ENABLED (1<<1) +#define WMI_VDEV_START_PMF_ENABLED (1 << 1) struct wmi_p2p_noa_descriptor { __le32 type_count; /* 255: continuous schedule, 0: reserved */ @@ -5342,7 +5342,7 @@ enum wmi_sta_ps_param_pspoll_count { #define WMI_UAPSD_AC_TYPE_TRIG 1 #define WMI_UAPSD_AC_BIT_MASK(ac, type) \ - ((type == WMI_UAPSD_AC_TYPE_DELI) ? (1<<(ac<<1)) : (1<<((ac<<1)+1))) + ((type == WMI_UAPSD_AC_TYPE_DELI) ? (1 << (ac << 1)) : (1 << ((ac << 1) + 1))) enum wmi_sta_ps_param_uapsd { WMI_STA_PS_UAPSD_AC0_DELIVERY_EN = (1 << 0), @@ -5757,7 +5757,7 @@ struct wmi_rate_set { * the rates are filled from least significant byte to most * significant byte. */ - __le32 rates[(MAX_SUPPORTED_RATES/4)+1]; + __le32 rates[(MAX_SUPPORTED_RATES / 4) + 1]; } __packed; struct wmi_rate_set_arg { From beeb1a302f783b0ddeabafebf03f30d589df15eb Mon Sep 17 00:00:00 2001 From: Kalle Valo Date: Wed, 13 Apr 2016 14:13:35 +0300 Subject: [PATCH 0657/1649] ath10k: prefer kernel type 'u64' over 'u_int64_t' Fixes checkpatch warnings: drivers/net/wireless/ath/ath10k/htt.h:1477: Prefer kernel type 'u64' over 'u_int64_t' drivers/net/wireless/ath/ath10k/htt.h:1480: Prefer kernel type 'u64' over 'u_int64_t' Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/htt.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h index 60bd9fe4b2d9..ee7c8f8f8073 100644 --- a/drivers/net/wireless/ath/ath10k/htt.h +++ b/drivers/net/wireless/ath/ath10k/htt.h @@ -1475,10 +1475,10 @@ union htt_rx_pn_t { u32 pn24; /* TKIP or CCMP: 48-bit PN */ - u_int64_t pn48; + u64 pn48; /* WAPI: 128-bit PN */ - u_int64_t pn128[2]; + u64 pn128[2]; }; struct htt_cmd { From c178da58c7aa73d1442ce10cc61df41224115d19 Mon Sep 17 00:00:00 2001 From: Kalle Valo Date: Wed, 13 Apr 2016 14:13:49 +0300 Subject: [PATCH 0658/1649] ath10k: prefer ether_addr_equal() or ether_addr_equal_unaligned() over memcmp() Fixes checkpatch warnings: drivers/net/wireless/ath/ath10k/mac.c:452: Prefer ether_addr_equal() or ether_addr_equal_unaligned() over memcmp() drivers/net/wireless/ath/ath10k/mac.c:455: Prefer ether_addr_equal() or ether_addr_equal_unaligned() over memcmp() drivers/net/wireless/ath/ath10k/txrx.c:133: Prefer ether_addr_equal() or ether_addr_equal_unaligned() over memcmp() Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/mac.c | 4 ++-- drivers/net/wireless/ath/ath10k/txrx.c | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index 4300410ecddf..76ea1ccb59c5 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -449,10 +449,10 @@ static int ath10k_mac_vif_update_wep_key(struct ath10k_vif *arvif, lockdep_assert_held(&ar->conf_mutex); list_for_each_entry(peer, &ar->peers, list) { - if (!memcmp(peer->addr, arvif->vif->addr, ETH_ALEN)) + if (ether_addr_equal(peer->addr, arvif->vif->addr)) continue; - if (!memcmp(peer->addr, arvif->bssid, ETH_ALEN)) + if (ether_addr_equal(peer->addr, arvif->bssid)) continue; if (peer->keys[key->keyidx] == key) diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c index c503ff601a54..8c7086989a71 100644 --- a/drivers/net/wireless/ath/ath10k/txrx.c +++ b/drivers/net/wireless/ath/ath10k/txrx.c @@ -130,7 +130,7 @@ struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id, list_for_each_entry(peer, &ar->peers, list) { if (peer->vdev_id != vdev_id) continue; - if (memcmp(peer->addr, addr, ETH_ALEN)) + if (!ether_addr_equal(peer->addr, addr)) continue; return peer; From 8f4ffb7de9630065040e8142d651708e85eeb863 Mon Sep 17 00:00:00 2001 From: Kalle Valo Date: Wed, 13 Apr 2016 14:14:02 +0300 Subject: [PATCH 0659/1649] ath10k: prefer ether_addr_copy() over memcpy() Fixes checkpatch warning: drivers/net/wireless/ath/ath10k/wmi.c:5800: Prefer ether_addr_copy() over memcpy() if the Ethernet addresses are __aligned(2) Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/wmi.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index d64e8681898d..7eb40f54fdb5 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -5827,9 +5827,8 @@ ath10k_wmi_put_start_scan_tlvs(struct wmi_start_scan_tlvs *tlvs, bssids->num_bssid = __cpu_to_le32(arg->n_bssids); for (i = 0; i < arg->n_bssids; i++) - memcpy(&bssids->bssid_list[i], - arg->bssids[i].bssid, - ETH_ALEN); + ether_addr_copy(bssids->bssid_list[i].addr, + arg->bssids[i].bssid); ptr += sizeof(*bssids); ptr += sizeof(struct wmi_mac_addr) * arg->n_bssids; From 4d16544d0bde4b19da30789a8715635fd9c3889d Mon Sep 17 00:00:00 2001 From: Kalle Valo Date: Wed, 13 Apr 2016 14:14:16 +0300 Subject: [PATCH 0660/1649] ath10k: fix parenthesis alignment Found by checkpatch: drivers/net/wireless/ath/ath10k/mac.c:6800: Alignment should match open parent Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/mac.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index 76ea1ccb59c5..d9d98bf22b3e 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -6797,7 +6797,7 @@ static u64 ath10k_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif) } static void ath10k_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - u64 tsf) + u64 tsf) { struct ath10k *ar = hw->priv; struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); From bf7974710a40aaeb69dee7f62d91048bdaf79c76 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Thu, 14 Apr 2016 18:19:13 +0200 Subject: [PATCH 0661/1649] devlink: add shared buffer configuration Define userspace API and drivers API for configuration of shared buffers. Four basic objects are defined: shared buffer - attributes are size, number of pools and TCs pool - chunk of sharedbuffer definition, it has some size and either static or dynamic threshold port pool threshold - to set per-port threshold for each pool port tc threshold bind - to bind port and TC to specified pool with threshold. Signed-off-by: Jiri Pirko Reviewed-by: Ido Schimmel Signed-off-by: David S. Miller --- include/net/devlink.h | 47 ++ include/uapi/linux/devlink.h | 57 +++ net/core/devlink.c | 940 +++++++++++++++++++++++++++++++++++ 3 files changed, 1044 insertions(+) diff --git a/include/net/devlink.h b/include/net/devlink.h index c37d257891d6..e4c27473ee4f 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -24,6 +24,7 @@ struct devlink_ops; struct devlink { struct list_head list; struct list_head port_list; + struct list_head sb_list; const struct devlink_ops *ops; struct device *dev; possible_net_t _net; @@ -42,6 +43,12 @@ struct devlink_port { u32 split_group; }; +struct devlink_sb_pool_info { + enum devlink_sb_pool_type pool_type; + u32 size; + enum devlink_sb_threshold_type threshold_type; +}; + struct devlink_ops { size_t priv_size; int (*port_type_set)(struct devlink_port *devlink_port, @@ -49,6 +56,28 @@ struct devlink_ops { int (*port_split)(struct devlink *devlink, unsigned int port_index, unsigned int count); int (*port_unsplit)(struct devlink *devlink, unsigned int port_index); + int (*sb_pool_get)(struct devlink *devlink, unsigned int sb_index, + u16 pool_index, + struct devlink_sb_pool_info *pool_info); + int (*sb_pool_set)(struct devlink *devlink, unsigned int sb_index, + u16 pool_index, u32 size, + enum devlink_sb_threshold_type threshold_type); + int (*sb_port_pool_get)(struct devlink_port *devlink_port, + unsigned int sb_index, u16 pool_index, + u32 *p_threshold); + int (*sb_port_pool_set)(struct devlink_port *devlink_port, + unsigned int sb_index, u16 pool_index, + u32 threshold); + int (*sb_tc_pool_bind_get)(struct devlink_port *devlink_port, + unsigned int sb_index, + u16 tc_index, + enum devlink_sb_pool_type pool_type, + u16 *p_pool_index, u32 *p_threshold); + int (*sb_tc_pool_bind_set)(struct devlink_port *devlink_port, + unsigned int sb_index, + u16 tc_index, + enum devlink_sb_pool_type pool_type, + u16 pool_index, u32 threshold); }; static inline void *devlink_priv(struct devlink *devlink) @@ -82,6 +111,11 @@ void devlink_port_type_ib_set(struct devlink_port *devlink_port, void devlink_port_type_clear(struct devlink_port *devlink_port); void devlink_port_split_set(struct devlink_port *devlink_port, u32 split_group); +int devlink_sb_register(struct devlink *devlink, unsigned int sb_index, + u32 size, u16 ingress_pools_count, + u16 egress_pools_count, u16 ingress_tc_count, + u16 egress_tc_count); +void devlink_sb_unregister(struct devlink *devlink, unsigned int sb_index); #else @@ -135,6 +169,19 @@ static inline void devlink_port_split_set(struct devlink_port *devlink_port, { } +static inline int devlink_sb_register(struct devlink *devlink, + unsigned int sb_index, u32 size, + u16 ingress_pools_count, + u16 egress_pools_count, u16 tc_count) +{ + return 0; +} + +static inline void devlink_sb_unregister(struct devlink *devlink, + unsigned int sb_index) +{ +} + #endif #endif /* _NET_DEVLINK_H_ */ diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h index c9fee5781eb1..9c1aa5783090 100644 --- a/include/uapi/linux/devlink.h +++ b/include/uapi/linux/devlink.h @@ -33,6 +33,26 @@ enum devlink_command { DEVLINK_CMD_PORT_SPLIT, DEVLINK_CMD_PORT_UNSPLIT, + DEVLINK_CMD_SB_GET, /* can dump */ + DEVLINK_CMD_SB_SET, + DEVLINK_CMD_SB_NEW, + DEVLINK_CMD_SB_DEL, + + DEVLINK_CMD_SB_POOL_GET, /* can dump */ + DEVLINK_CMD_SB_POOL_SET, + DEVLINK_CMD_SB_POOL_NEW, + DEVLINK_CMD_SB_POOL_DEL, + + DEVLINK_CMD_SB_PORT_POOL_GET, /* can dump */ + DEVLINK_CMD_SB_PORT_POOL_SET, + DEVLINK_CMD_SB_PORT_POOL_NEW, + DEVLINK_CMD_SB_PORT_POOL_DEL, + + DEVLINK_CMD_SB_TC_POOL_BIND_GET, /* can dump */ + DEVLINK_CMD_SB_TC_POOL_BIND_SET, + DEVLINK_CMD_SB_TC_POOL_BIND_NEW, + DEVLINK_CMD_SB_TC_POOL_BIND_DEL, + /* add new commands above here */ __DEVLINK_CMD_MAX, @@ -46,6 +66,31 @@ enum devlink_port_type { DEVLINK_PORT_TYPE_IB, }; +enum devlink_sb_pool_type { + DEVLINK_SB_POOL_TYPE_INGRESS, + DEVLINK_SB_POOL_TYPE_EGRESS, +}; + +/* static threshold - limiting the maximum number of bytes. + * dynamic threshold - limiting the maximum number of bytes + * based on the currently available free space in the shared buffer pool. + * In this mode, the maximum quota is calculated based + * on the following formula: + * max_quota = alpha / (1 + alpha) * Free_Buffer + * While Free_Buffer is the amount of none-occupied buffer associated to + * the relevant pool. + * The value range which can be passed is 0-20 and serves + * for computation of alpha by following formula: + * alpha = 2 ^ (passed_value - 10) + */ + +enum devlink_sb_threshold_type { + DEVLINK_SB_THRESHOLD_TYPE_STATIC, + DEVLINK_SB_THRESHOLD_TYPE_DYNAMIC, +}; + +#define DEVLINK_SB_THRESHOLD_TO_ALPHA_MAX 20 + enum devlink_attr { /* don't change the order or add anything between, this is ABI! */ DEVLINK_ATTR_UNSPEC, @@ -62,6 +107,18 @@ enum devlink_attr { DEVLINK_ATTR_PORT_IBDEV_NAME, /* string */ DEVLINK_ATTR_PORT_SPLIT_COUNT, /* u32 */ DEVLINK_ATTR_PORT_SPLIT_GROUP, /* u32 */ + DEVLINK_ATTR_SB_INDEX, /* u32 */ + DEVLINK_ATTR_SB_SIZE, /* u32 */ + DEVLINK_ATTR_SB_INGRESS_POOL_COUNT, /* u16 */ + DEVLINK_ATTR_SB_EGRESS_POOL_COUNT, /* u16 */ + DEVLINK_ATTR_SB_INGRESS_TC_COUNT, /* u16 */ + DEVLINK_ATTR_SB_EGRESS_TC_COUNT, /* u16 */ + DEVLINK_ATTR_SB_POOL_INDEX, /* u16 */ + DEVLINK_ATTR_SB_POOL_TYPE, /* u8 */ + DEVLINK_ATTR_SB_POOL_SIZE, /* u32 */ + DEVLINK_ATTR_SB_POOL_THRESHOLD_TYPE, /* u8 */ + DEVLINK_ATTR_SB_THRESHOLD, /* u32 */ + DEVLINK_ATTR_SB_TC_INDEX, /* u16 */ /* add new attributes above here, update the policy in devlink.c */ diff --git a/net/core/devlink.c b/net/core/devlink.c index b84cf0df4a0e..aa0b9e1542e7 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -119,8 +119,167 @@ static struct devlink_port *devlink_port_get_from_info(struct devlink *devlink, return devlink_port_get_from_attrs(devlink, info->attrs); } +struct devlink_sb { + struct list_head list; + unsigned int index; + u32 size; + u16 ingress_pools_count; + u16 egress_pools_count; + u16 ingress_tc_count; + u16 egress_tc_count; +}; + +static u16 devlink_sb_pool_count(struct devlink_sb *devlink_sb) +{ + return devlink_sb->ingress_pools_count + devlink_sb->egress_pools_count; +} + +static struct devlink_sb *devlink_sb_get_by_index(struct devlink *devlink, + unsigned int sb_index) +{ + struct devlink_sb *devlink_sb; + + list_for_each_entry(devlink_sb, &devlink->sb_list, list) { + if (devlink_sb->index == sb_index) + return devlink_sb; + } + return NULL; +} + +static bool devlink_sb_index_exists(struct devlink *devlink, + unsigned int sb_index) +{ + return devlink_sb_get_by_index(devlink, sb_index); +} + +static struct devlink_sb *devlink_sb_get_from_attrs(struct devlink *devlink, + struct nlattr **attrs) +{ + if (attrs[DEVLINK_ATTR_SB_INDEX]) { + u32 sb_index = nla_get_u32(attrs[DEVLINK_ATTR_SB_INDEX]); + struct devlink_sb *devlink_sb; + + devlink_sb = devlink_sb_get_by_index(devlink, sb_index); + if (!devlink_sb) + return ERR_PTR(-ENODEV); + return devlink_sb; + } + return ERR_PTR(-EINVAL); +} + +static struct devlink_sb *devlink_sb_get_from_info(struct devlink *devlink, + struct genl_info *info) +{ + return devlink_sb_get_from_attrs(devlink, info->attrs); +} + +static int devlink_sb_pool_index_get_from_attrs(struct devlink_sb *devlink_sb, + struct nlattr **attrs, + u16 *p_pool_index) +{ + u16 val; + + if (!attrs[DEVLINK_ATTR_SB_POOL_INDEX]) + return -EINVAL; + + val = nla_get_u16(attrs[DEVLINK_ATTR_SB_POOL_INDEX]); + if (val >= devlink_sb_pool_count(devlink_sb)) + return -EINVAL; + *p_pool_index = val; + return 0; +} + +static int devlink_sb_pool_index_get_from_info(struct devlink_sb *devlink_sb, + struct genl_info *info, + u16 *p_pool_index) +{ + return devlink_sb_pool_index_get_from_attrs(devlink_sb, info->attrs, + p_pool_index); +} + +static int +devlink_sb_pool_type_get_from_attrs(struct nlattr **attrs, + enum devlink_sb_pool_type *p_pool_type) +{ + u8 val; + + if (!attrs[DEVLINK_ATTR_SB_POOL_TYPE]) + return -EINVAL; + + val = nla_get_u8(attrs[DEVLINK_ATTR_SB_POOL_TYPE]); + if (val != DEVLINK_SB_POOL_TYPE_INGRESS && + val != DEVLINK_SB_POOL_TYPE_EGRESS) + return -EINVAL; + *p_pool_type = val; + return 0; +} + +static int +devlink_sb_pool_type_get_from_info(struct genl_info *info, + enum devlink_sb_pool_type *p_pool_type) +{ + return devlink_sb_pool_type_get_from_attrs(info->attrs, p_pool_type); +} + +static int +devlink_sb_th_type_get_from_attrs(struct nlattr **attrs, + enum devlink_sb_threshold_type *p_th_type) +{ + u8 val; + + if (!attrs[DEVLINK_ATTR_SB_POOL_THRESHOLD_TYPE]) + return -EINVAL; + + val = nla_get_u8(attrs[DEVLINK_ATTR_SB_POOL_THRESHOLD_TYPE]); + if (val != DEVLINK_SB_THRESHOLD_TYPE_STATIC && + val != DEVLINK_SB_THRESHOLD_TYPE_DYNAMIC) + return -EINVAL; + *p_th_type = val; + return 0; +} + +static int +devlink_sb_th_type_get_from_info(struct genl_info *info, + enum devlink_sb_threshold_type *p_th_type) +{ + return devlink_sb_th_type_get_from_attrs(info->attrs, p_th_type); +} + +static int +devlink_sb_tc_index_get_from_attrs(struct devlink_sb *devlink_sb, + struct nlattr **attrs, + enum devlink_sb_pool_type pool_type, + u16 *p_tc_index) +{ + u16 val; + + if (!attrs[DEVLINK_ATTR_SB_TC_INDEX]) + return -EINVAL; + + val = nla_get_u16(attrs[DEVLINK_ATTR_SB_TC_INDEX]); + if (pool_type == DEVLINK_SB_POOL_TYPE_INGRESS && + val >= devlink_sb->ingress_tc_count) + return -EINVAL; + if (pool_type == DEVLINK_SB_POOL_TYPE_EGRESS && + val >= devlink_sb->egress_tc_count) + return -EINVAL; + *p_tc_index = val; + return 0; +} + +static int +devlink_sb_tc_index_get_from_info(struct devlink_sb *devlink_sb, + struct genl_info *info, + enum devlink_sb_pool_type pool_type, + u16 *p_tc_index) +{ + return devlink_sb_tc_index_get_from_attrs(devlink_sb, info->attrs, + pool_type, p_tc_index); +} + #define DEVLINK_NL_FLAG_NEED_DEVLINK BIT(0) #define DEVLINK_NL_FLAG_NEED_PORT BIT(1) +#define DEVLINK_NL_FLAG_NEED_SB BIT(2) static int devlink_nl_pre_doit(const struct genl_ops *ops, struct sk_buff *skb, struct genl_info *info) @@ -147,6 +306,18 @@ static int devlink_nl_pre_doit(const struct genl_ops *ops, } info->user_ptr[0] = devlink_port; } + if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_SB) { + struct devlink_sb *devlink_sb; + + devlink_sb = devlink_sb_get_from_info(devlink, info); + if (IS_ERR(devlink_sb)) { + if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_PORT) + mutex_unlock(&devlink_port_mutex); + mutex_unlock(&devlink_mutex); + return PTR_ERR(devlink_sb); + } + info->user_ptr[1] = devlink_sb; + } return 0; } @@ -499,12 +670,675 @@ static int devlink_nl_cmd_port_unsplit_doit(struct sk_buff *skb, return devlink_port_unsplit(devlink, port_index); } +static int devlink_nl_sb_fill(struct sk_buff *msg, struct devlink *devlink, + struct devlink_sb *devlink_sb, + enum devlink_command cmd, u32 portid, + u32 seq, int flags) +{ + void *hdr; + + hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd); + if (!hdr) + return -EMSGSIZE; + + if (devlink_nl_put_handle(msg, devlink)) + goto nla_put_failure; + if (nla_put_u32(msg, DEVLINK_ATTR_SB_INDEX, devlink_sb->index)) + goto nla_put_failure; + if (nla_put_u32(msg, DEVLINK_ATTR_SB_SIZE, devlink_sb->size)) + goto nla_put_failure; + if (nla_put_u16(msg, DEVLINK_ATTR_SB_INGRESS_POOL_COUNT, + devlink_sb->ingress_pools_count)) + goto nla_put_failure; + if (nla_put_u16(msg, DEVLINK_ATTR_SB_EGRESS_POOL_COUNT, + devlink_sb->egress_pools_count)) + goto nla_put_failure; + if (nla_put_u16(msg, DEVLINK_ATTR_SB_INGRESS_TC_COUNT, + devlink_sb->ingress_tc_count)) + goto nla_put_failure; + if (nla_put_u16(msg, DEVLINK_ATTR_SB_EGRESS_TC_COUNT, + devlink_sb->egress_tc_count)) + goto nla_put_failure; + + genlmsg_end(msg, hdr); + return 0; + +nla_put_failure: + genlmsg_cancel(msg, hdr); + return -EMSGSIZE; +} + +static int devlink_nl_cmd_sb_get_doit(struct sk_buff *skb, + struct genl_info *info) +{ + struct devlink *devlink = info->user_ptr[0]; + struct devlink_sb *devlink_sb = info->user_ptr[1]; + struct sk_buff *msg; + int err; + + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + err = devlink_nl_sb_fill(msg, devlink, devlink_sb, + DEVLINK_CMD_SB_NEW, + info->snd_portid, info->snd_seq, 0); + if (err) { + nlmsg_free(msg); + return err; + } + + return genlmsg_reply(msg, info); +} + +static int devlink_nl_cmd_sb_get_dumpit(struct sk_buff *msg, + struct netlink_callback *cb) +{ + struct devlink *devlink; + struct devlink_sb *devlink_sb; + int start = cb->args[0]; + int idx = 0; + int err; + + mutex_lock(&devlink_mutex); + list_for_each_entry(devlink, &devlink_list, list) { + if (!net_eq(devlink_net(devlink), sock_net(msg->sk))) + continue; + list_for_each_entry(devlink_sb, &devlink->sb_list, list) { + if (idx < start) { + idx++; + continue; + } + err = devlink_nl_sb_fill(msg, devlink, devlink_sb, + DEVLINK_CMD_SB_NEW, + NETLINK_CB(cb->skb).portid, + cb->nlh->nlmsg_seq, + NLM_F_MULTI); + if (err) + goto out; + idx++; + } + } +out: + mutex_unlock(&devlink_mutex); + + cb->args[0] = idx; + return msg->len; +} + +static int devlink_nl_sb_pool_fill(struct sk_buff *msg, struct devlink *devlink, + struct devlink_sb *devlink_sb, + u16 pool_index, enum devlink_command cmd, + u32 portid, u32 seq, int flags) +{ + struct devlink_sb_pool_info pool_info; + void *hdr; + int err; + + err = devlink->ops->sb_pool_get(devlink, devlink_sb->index, + pool_index, &pool_info); + if (err) + return err; + + hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd); + if (!hdr) + return -EMSGSIZE; + + if (devlink_nl_put_handle(msg, devlink)) + goto nla_put_failure; + if (nla_put_u32(msg, DEVLINK_ATTR_SB_INDEX, devlink_sb->index)) + goto nla_put_failure; + if (nla_put_u16(msg, DEVLINK_ATTR_SB_POOL_INDEX, pool_index)) + goto nla_put_failure; + if (nla_put_u8(msg, DEVLINK_ATTR_SB_POOL_TYPE, pool_info.pool_type)) + goto nla_put_failure; + if (nla_put_u32(msg, DEVLINK_ATTR_SB_POOL_SIZE, pool_info.size)) + goto nla_put_failure; + if (nla_put_u8(msg, DEVLINK_ATTR_SB_POOL_THRESHOLD_TYPE, + pool_info.threshold_type)) + goto nla_put_failure; + + genlmsg_end(msg, hdr); + return 0; + +nla_put_failure: + genlmsg_cancel(msg, hdr); + return -EMSGSIZE; +} + +static int devlink_nl_cmd_sb_pool_get_doit(struct sk_buff *skb, + struct genl_info *info) +{ + struct devlink *devlink = info->user_ptr[0]; + struct devlink_sb *devlink_sb = info->user_ptr[1]; + struct sk_buff *msg; + u16 pool_index; + int err; + + err = devlink_sb_pool_index_get_from_info(devlink_sb, info, + &pool_index); + if (err) + return err; + + if (!devlink->ops || !devlink->ops->sb_pool_get) + return -EOPNOTSUPP; + + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + err = devlink_nl_sb_pool_fill(msg, devlink, devlink_sb, pool_index, + DEVLINK_CMD_SB_POOL_NEW, + info->snd_portid, info->snd_seq, 0); + if (err) { + nlmsg_free(msg); + return err; + } + + return genlmsg_reply(msg, info); +} + +static int __sb_pool_get_dumpit(struct sk_buff *msg, int start, int *p_idx, + struct devlink *devlink, + struct devlink_sb *devlink_sb, + u32 portid, u32 seq) +{ + u16 pool_count = devlink_sb_pool_count(devlink_sb); + u16 pool_index; + int err; + + for (pool_index = 0; pool_index < pool_count; pool_index++) { + if (*p_idx < start) { + (*p_idx)++; + continue; + } + err = devlink_nl_sb_pool_fill(msg, devlink, + devlink_sb, + pool_index, + DEVLINK_CMD_SB_POOL_NEW, + portid, seq, NLM_F_MULTI); + if (err) + return err; + (*p_idx)++; + } + return 0; +} + +static int devlink_nl_cmd_sb_pool_get_dumpit(struct sk_buff *msg, + struct netlink_callback *cb) +{ + struct devlink *devlink; + struct devlink_sb *devlink_sb; + int start = cb->args[0]; + int idx = 0; + int err; + + mutex_lock(&devlink_mutex); + list_for_each_entry(devlink, &devlink_list, list) { + if (!net_eq(devlink_net(devlink), sock_net(msg->sk)) || + !devlink->ops || !devlink->ops->sb_pool_get) + continue; + list_for_each_entry(devlink_sb, &devlink->sb_list, list) { + err = __sb_pool_get_dumpit(msg, start, &idx, devlink, + devlink_sb, + NETLINK_CB(cb->skb).portid, + cb->nlh->nlmsg_seq); + if (err && err != -EOPNOTSUPP) + goto out; + } + } +out: + mutex_unlock(&devlink_mutex); + + cb->args[0] = idx; + return msg->len; +} + +static int devlink_sb_pool_set(struct devlink *devlink, unsigned int sb_index, + u16 pool_index, u32 size, + enum devlink_sb_threshold_type threshold_type) + +{ + const struct devlink_ops *ops = devlink->ops; + + if (ops && ops->sb_pool_set) + return ops->sb_pool_set(devlink, sb_index, pool_index, + size, threshold_type); + return -EOPNOTSUPP; +} + +static int devlink_nl_cmd_sb_pool_set_doit(struct sk_buff *skb, + struct genl_info *info) +{ + struct devlink *devlink = info->user_ptr[0]; + struct devlink_sb *devlink_sb = info->user_ptr[1]; + enum devlink_sb_threshold_type threshold_type; + u16 pool_index; + u32 size; + int err; + + err = devlink_sb_pool_index_get_from_info(devlink_sb, info, + &pool_index); + if (err) + return err; + + err = devlink_sb_th_type_get_from_info(info, &threshold_type); + if (err) + return err; + + if (!info->attrs[DEVLINK_ATTR_SB_POOL_SIZE]) + return -EINVAL; + + size = nla_get_u32(info->attrs[DEVLINK_ATTR_SB_POOL_SIZE]); + return devlink_sb_pool_set(devlink, devlink_sb->index, + pool_index, size, threshold_type); +} + +static int devlink_nl_sb_port_pool_fill(struct sk_buff *msg, + struct devlink *devlink, + struct devlink_port *devlink_port, + struct devlink_sb *devlink_sb, + u16 pool_index, + enum devlink_command cmd, + u32 portid, u32 seq, int flags) +{ + u32 threshold; + void *hdr; + int err; + + err = devlink->ops->sb_port_pool_get(devlink_port, devlink_sb->index, + pool_index, &threshold); + if (err) + return err; + + hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd); + if (!hdr) + return -EMSGSIZE; + + if (devlink_nl_put_handle(msg, devlink)) + goto nla_put_failure; + if (nla_put_u32(msg, DEVLINK_ATTR_PORT_INDEX, devlink_port->index)) + goto nla_put_failure; + if (nla_put_u32(msg, DEVLINK_ATTR_SB_INDEX, devlink_sb->index)) + goto nla_put_failure; + if (nla_put_u16(msg, DEVLINK_ATTR_SB_POOL_INDEX, pool_index)) + goto nla_put_failure; + if (nla_put_u32(msg, DEVLINK_ATTR_SB_THRESHOLD, threshold)) + goto nla_put_failure; + + genlmsg_end(msg, hdr); + return 0; + +nla_put_failure: + genlmsg_cancel(msg, hdr); + return -EMSGSIZE; +} + +static int devlink_nl_cmd_sb_port_pool_get_doit(struct sk_buff *skb, + struct genl_info *info) +{ + struct devlink_port *devlink_port = info->user_ptr[0]; + struct devlink *devlink = devlink_port->devlink; + struct devlink_sb *devlink_sb = info->user_ptr[1]; + struct sk_buff *msg; + u16 pool_index; + int err; + + err = devlink_sb_pool_index_get_from_info(devlink_sb, info, + &pool_index); + if (err) + return err; + + if (!devlink->ops || !devlink->ops->sb_port_pool_get) + return -EOPNOTSUPP; + + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + err = devlink_nl_sb_port_pool_fill(msg, devlink, devlink_port, + devlink_sb, pool_index, + DEVLINK_CMD_SB_PORT_POOL_NEW, + info->snd_portid, info->snd_seq, 0); + if (err) { + nlmsg_free(msg); + return err; + } + + return genlmsg_reply(msg, info); +} + +static int __sb_port_pool_get_dumpit(struct sk_buff *msg, int start, int *p_idx, + struct devlink *devlink, + struct devlink_sb *devlink_sb, + u32 portid, u32 seq) +{ + struct devlink_port *devlink_port; + u16 pool_count = devlink_sb_pool_count(devlink_sb); + u16 pool_index; + int err; + + list_for_each_entry(devlink_port, &devlink->port_list, list) { + for (pool_index = 0; pool_index < pool_count; pool_index++) { + if (*p_idx < start) { + (*p_idx)++; + continue; + } + err = devlink_nl_sb_port_pool_fill(msg, devlink, + devlink_port, + devlink_sb, + pool_index, + DEVLINK_CMD_SB_PORT_POOL_NEW, + portid, seq, + NLM_F_MULTI); + if (err) + return err; + (*p_idx)++; + } + } + return 0; +} + +static int devlink_nl_cmd_sb_port_pool_get_dumpit(struct sk_buff *msg, + struct netlink_callback *cb) +{ + struct devlink *devlink; + struct devlink_sb *devlink_sb; + int start = cb->args[0]; + int idx = 0; + int err; + + mutex_lock(&devlink_mutex); + mutex_lock(&devlink_port_mutex); + list_for_each_entry(devlink, &devlink_list, list) { + if (!net_eq(devlink_net(devlink), sock_net(msg->sk)) || + !devlink->ops || !devlink->ops->sb_port_pool_get) + continue; + list_for_each_entry(devlink_sb, &devlink->sb_list, list) { + err = __sb_port_pool_get_dumpit(msg, start, &idx, + devlink, devlink_sb, + NETLINK_CB(cb->skb).portid, + cb->nlh->nlmsg_seq); + if (err && err != -EOPNOTSUPP) + goto out; + } + } +out: + mutex_unlock(&devlink_port_mutex); + mutex_unlock(&devlink_mutex); + + cb->args[0] = idx; + return msg->len; +} + +static int devlink_sb_port_pool_set(struct devlink_port *devlink_port, + unsigned int sb_index, u16 pool_index, + u32 threshold) + +{ + const struct devlink_ops *ops = devlink_port->devlink->ops; + + if (ops && ops->sb_port_pool_set) + return ops->sb_port_pool_set(devlink_port, sb_index, + pool_index, threshold); + return -EOPNOTSUPP; +} + +static int devlink_nl_cmd_sb_port_pool_set_doit(struct sk_buff *skb, + struct genl_info *info) +{ + struct devlink_port *devlink_port = info->user_ptr[0]; + struct devlink_sb *devlink_sb = info->user_ptr[1]; + u16 pool_index; + u32 threshold; + int err; + + err = devlink_sb_pool_index_get_from_info(devlink_sb, info, + &pool_index); + if (err) + return err; + + if (!info->attrs[DEVLINK_ATTR_SB_THRESHOLD]) + return -EINVAL; + + threshold = nla_get_u32(info->attrs[DEVLINK_ATTR_SB_THRESHOLD]); + return devlink_sb_port_pool_set(devlink_port, devlink_sb->index, + pool_index, threshold); +} + +static int +devlink_nl_sb_tc_pool_bind_fill(struct sk_buff *msg, struct devlink *devlink, + struct devlink_port *devlink_port, + struct devlink_sb *devlink_sb, u16 tc_index, + enum devlink_sb_pool_type pool_type, + enum devlink_command cmd, + u32 portid, u32 seq, int flags) +{ + u16 pool_index; + u32 threshold; + void *hdr; + int err; + + err = devlink->ops->sb_tc_pool_bind_get(devlink_port, devlink_sb->index, + tc_index, pool_type, + &pool_index, &threshold); + if (err) + return err; + + hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd); + if (!hdr) + return -EMSGSIZE; + + if (devlink_nl_put_handle(msg, devlink)) + goto nla_put_failure; + if (nla_put_u32(msg, DEVLINK_ATTR_PORT_INDEX, devlink_port->index)) + goto nla_put_failure; + if (nla_put_u32(msg, DEVLINK_ATTR_SB_INDEX, devlink_sb->index)) + goto nla_put_failure; + if (nla_put_u16(msg, DEVLINK_ATTR_SB_TC_INDEX, tc_index)) + goto nla_put_failure; + if (nla_put_u8(msg, DEVLINK_ATTR_SB_POOL_TYPE, pool_type)) + goto nla_put_failure; + if (nla_put_u16(msg, DEVLINK_ATTR_SB_POOL_INDEX, pool_index)) + goto nla_put_failure; + if (nla_put_u32(msg, DEVLINK_ATTR_SB_THRESHOLD, threshold)) + goto nla_put_failure; + + genlmsg_end(msg, hdr); + return 0; + +nla_put_failure: + genlmsg_cancel(msg, hdr); + return -EMSGSIZE; +} + +static int devlink_nl_cmd_sb_tc_pool_bind_get_doit(struct sk_buff *skb, + struct genl_info *info) +{ + struct devlink_port *devlink_port = info->user_ptr[0]; + struct devlink *devlink = devlink_port->devlink; + struct devlink_sb *devlink_sb = info->user_ptr[1]; + struct sk_buff *msg; + enum devlink_sb_pool_type pool_type; + u16 tc_index; + int err; + + err = devlink_sb_pool_type_get_from_info(info, &pool_type); + if (err) + return err; + + err = devlink_sb_tc_index_get_from_info(devlink_sb, info, + pool_type, &tc_index); + if (err) + return err; + + if (!devlink->ops || !devlink->ops->sb_tc_pool_bind_get) + return -EOPNOTSUPP; + + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + err = devlink_nl_sb_tc_pool_bind_fill(msg, devlink, devlink_port, + devlink_sb, tc_index, pool_type, + DEVLINK_CMD_SB_TC_POOL_BIND_NEW, + info->snd_portid, + info->snd_seq, 0); + if (err) { + nlmsg_free(msg); + return err; + } + + return genlmsg_reply(msg, info); +} + +static int __sb_tc_pool_bind_get_dumpit(struct sk_buff *msg, + int start, int *p_idx, + struct devlink *devlink, + struct devlink_sb *devlink_sb, + u32 portid, u32 seq) +{ + struct devlink_port *devlink_port; + u16 tc_index; + int err; + + list_for_each_entry(devlink_port, &devlink->port_list, list) { + for (tc_index = 0; + tc_index < devlink_sb->ingress_tc_count; tc_index++) { + if (*p_idx < start) { + (*p_idx)++; + continue; + } + err = devlink_nl_sb_tc_pool_bind_fill(msg, devlink, + devlink_port, + devlink_sb, + tc_index, + DEVLINK_SB_POOL_TYPE_INGRESS, + DEVLINK_CMD_SB_TC_POOL_BIND_NEW, + portid, seq, + NLM_F_MULTI); + if (err) + return err; + (*p_idx)++; + } + for (tc_index = 0; + tc_index < devlink_sb->egress_tc_count; tc_index++) { + if (*p_idx < start) { + (*p_idx)++; + continue; + } + err = devlink_nl_sb_tc_pool_bind_fill(msg, devlink, + devlink_port, + devlink_sb, + tc_index, + DEVLINK_SB_POOL_TYPE_EGRESS, + DEVLINK_CMD_SB_TC_POOL_BIND_NEW, + portid, seq, + NLM_F_MULTI); + if (err) + return err; + (*p_idx)++; + } + } + return 0; +} + +static int +devlink_nl_cmd_sb_tc_pool_bind_get_dumpit(struct sk_buff *msg, + struct netlink_callback *cb) +{ + struct devlink *devlink; + struct devlink_sb *devlink_sb; + int start = cb->args[0]; + int idx = 0; + int err; + + mutex_lock(&devlink_mutex); + mutex_lock(&devlink_port_mutex); + list_for_each_entry(devlink, &devlink_list, list) { + if (!net_eq(devlink_net(devlink), sock_net(msg->sk)) || + !devlink->ops || !devlink->ops->sb_tc_pool_bind_get) + continue; + list_for_each_entry(devlink_sb, &devlink->sb_list, list) { + err = __sb_tc_pool_bind_get_dumpit(msg, start, &idx, + devlink, + devlink_sb, + NETLINK_CB(cb->skb).portid, + cb->nlh->nlmsg_seq); + if (err && err != -EOPNOTSUPP) + goto out; + } + } +out: + mutex_unlock(&devlink_port_mutex); + mutex_unlock(&devlink_mutex); + + cb->args[0] = idx; + return msg->len; +} + +static int devlink_sb_tc_pool_bind_set(struct devlink_port *devlink_port, + unsigned int sb_index, u16 tc_index, + enum devlink_sb_pool_type pool_type, + u16 pool_index, u32 threshold) + +{ + const struct devlink_ops *ops = devlink_port->devlink->ops; + + if (ops && ops->sb_tc_pool_bind_set) + return ops->sb_tc_pool_bind_set(devlink_port, sb_index, + tc_index, pool_type, + pool_index, threshold); + return -EOPNOTSUPP; +} + +static int devlink_nl_cmd_sb_tc_pool_bind_set_doit(struct sk_buff *skb, + struct genl_info *info) +{ + struct devlink_port *devlink_port = info->user_ptr[0]; + struct devlink_sb *devlink_sb = info->user_ptr[1]; + enum devlink_sb_pool_type pool_type; + u16 tc_index; + u16 pool_index; + u32 threshold; + int err; + + err = devlink_sb_pool_type_get_from_info(info, &pool_type); + if (err) + return err; + + err = devlink_sb_tc_index_get_from_info(devlink_sb, info, + pool_type, &tc_index); + if (err) + return err; + + err = devlink_sb_pool_index_get_from_info(devlink_sb, info, + &pool_index); + if (err) + return err; + + if (!info->attrs[DEVLINK_ATTR_SB_THRESHOLD]) + return -EINVAL; + + threshold = nla_get_u32(info->attrs[DEVLINK_ATTR_SB_THRESHOLD]); + return devlink_sb_tc_pool_bind_set(devlink_port, devlink_sb->index, + tc_index, pool_type, + pool_index, threshold); +} + static const struct nla_policy devlink_nl_policy[DEVLINK_ATTR_MAX + 1] = { [DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING }, [DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING }, [DEVLINK_ATTR_PORT_INDEX] = { .type = NLA_U32 }, [DEVLINK_ATTR_PORT_TYPE] = { .type = NLA_U16 }, [DEVLINK_ATTR_PORT_SPLIT_COUNT] = { .type = NLA_U32 }, + [DEVLINK_ATTR_SB_INDEX] = { .type = NLA_U32 }, + [DEVLINK_ATTR_SB_POOL_INDEX] = { .type = NLA_U16 }, + [DEVLINK_ATTR_SB_POOL_TYPE] = { .type = NLA_U8 }, + [DEVLINK_ATTR_SB_POOL_SIZE] = { .type = NLA_U32 }, + [DEVLINK_ATTR_SB_POOL_THRESHOLD_TYPE] = { .type = NLA_U8 }, + [DEVLINK_ATTR_SB_THRESHOLD] = { .type = NLA_U32 }, + [DEVLINK_ATTR_SB_TC_INDEX] = { .type = NLA_U16 }, }; static const struct genl_ops devlink_nl_ops[] = { @@ -545,6 +1379,66 @@ static const struct genl_ops devlink_nl_ops[] = { .flags = GENL_ADMIN_PERM, .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK, }, + { + .cmd = DEVLINK_CMD_SB_GET, + .doit = devlink_nl_cmd_sb_get_doit, + .dumpit = devlink_nl_cmd_sb_get_dumpit, + .policy = devlink_nl_policy, + .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK | + DEVLINK_NL_FLAG_NEED_SB, + /* can be retrieved by unprivileged users */ + }, + { + .cmd = DEVLINK_CMD_SB_POOL_GET, + .doit = devlink_nl_cmd_sb_pool_get_doit, + .dumpit = devlink_nl_cmd_sb_pool_get_dumpit, + .policy = devlink_nl_policy, + .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK | + DEVLINK_NL_FLAG_NEED_SB, + /* can be retrieved by unprivileged users */ + }, + { + .cmd = DEVLINK_CMD_SB_POOL_SET, + .doit = devlink_nl_cmd_sb_pool_set_doit, + .policy = devlink_nl_policy, + .flags = GENL_ADMIN_PERM, + .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK | + DEVLINK_NL_FLAG_NEED_SB, + }, + { + .cmd = DEVLINK_CMD_SB_PORT_POOL_GET, + .doit = devlink_nl_cmd_sb_port_pool_get_doit, + .dumpit = devlink_nl_cmd_sb_port_pool_get_dumpit, + .policy = devlink_nl_policy, + .internal_flags = DEVLINK_NL_FLAG_NEED_PORT | + DEVLINK_NL_FLAG_NEED_SB, + /* can be retrieved by unprivileged users */ + }, + { + .cmd = DEVLINK_CMD_SB_PORT_POOL_SET, + .doit = devlink_nl_cmd_sb_port_pool_set_doit, + .policy = devlink_nl_policy, + .flags = GENL_ADMIN_PERM, + .internal_flags = DEVLINK_NL_FLAG_NEED_PORT | + DEVLINK_NL_FLAG_NEED_SB, + }, + { + .cmd = DEVLINK_CMD_SB_TC_POOL_BIND_GET, + .doit = devlink_nl_cmd_sb_tc_pool_bind_get_doit, + .dumpit = devlink_nl_cmd_sb_tc_pool_bind_get_dumpit, + .policy = devlink_nl_policy, + .internal_flags = DEVLINK_NL_FLAG_NEED_PORT | + DEVLINK_NL_FLAG_NEED_SB, + /* can be retrieved by unprivileged users */ + }, + { + .cmd = DEVLINK_CMD_SB_TC_POOL_BIND_SET, + .doit = devlink_nl_cmd_sb_tc_pool_bind_set_doit, + .policy = devlink_nl_policy, + .flags = GENL_ADMIN_PERM, + .internal_flags = DEVLINK_NL_FLAG_NEED_PORT | + DEVLINK_NL_FLAG_NEED_SB, + }, }; /** @@ -566,6 +1460,7 @@ struct devlink *devlink_alloc(const struct devlink_ops *ops, size_t priv_size) devlink->ops = ops; devlink_net_set(devlink, &init_net); INIT_LIST_HEAD(&devlink->port_list); + INIT_LIST_HEAD(&devlink->sb_list); return devlink; } EXPORT_SYMBOL_GPL(devlink_alloc); @@ -721,6 +1616,51 @@ void devlink_port_split_set(struct devlink_port *devlink_port, } EXPORT_SYMBOL_GPL(devlink_port_split_set); +int devlink_sb_register(struct devlink *devlink, unsigned int sb_index, + u32 size, u16 ingress_pools_count, + u16 egress_pools_count, u16 ingress_tc_count, + u16 egress_tc_count) +{ + struct devlink_sb *devlink_sb; + int err = 0; + + mutex_lock(&devlink_mutex); + if (devlink_sb_index_exists(devlink, sb_index)) { + err = -EEXIST; + goto unlock; + } + + devlink_sb = kzalloc(sizeof(*devlink_sb), GFP_KERNEL); + if (!devlink_sb) { + err = -ENOMEM; + goto unlock; + } + devlink_sb->index = sb_index; + devlink_sb->size = size; + devlink_sb->ingress_pools_count = ingress_pools_count; + devlink_sb->egress_pools_count = egress_pools_count; + devlink_sb->ingress_tc_count = ingress_tc_count; + devlink_sb->egress_tc_count = egress_tc_count; + list_add_tail(&devlink_sb->list, &devlink->sb_list); +unlock: + mutex_unlock(&devlink_mutex); + return err; +} +EXPORT_SYMBOL_GPL(devlink_sb_register); + +void devlink_sb_unregister(struct devlink *devlink, unsigned int sb_index) +{ + struct devlink_sb *devlink_sb; + + mutex_lock(&devlink_mutex); + devlink_sb = devlink_sb_get_by_index(devlink, sb_index); + WARN_ON(!devlink_sb); + list_del(&devlink_sb->list); + mutex_unlock(&devlink_mutex); + kfree(devlink_sb); +} +EXPORT_SYMBOL_GPL(devlink_sb_unregister); + static int __init devlink_module_init(void) { return genl_register_family_with_ops_groups(&devlink_nl_family, From df38dafd255954ee7012785c62e615f595d5cb3c Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Thu, 14 Apr 2016 18:19:14 +0200 Subject: [PATCH 0662/1649] devlink: implement shared buffer occupancy monitoring interface User needs to monitor shared buffer occupancy. For that, he issues a snapshot command in order to instruct hardware to catch current and maximal occupancy values, and clear command in order to clear the historical maximal values. Also port-pool and tc-pool-bind command response messages are extended to carry occupancy values. Signed-off-by: Jiri Pirko Reviewed-by: Ido Schimmel Signed-off-by: David S. Miller --- include/net/devlink.h | 12 +++++ include/uapi/linux/devlink.h | 6 +++ net/core/devlink.c | 98 +++++++++++++++++++++++++++++++++--- 3 files changed, 110 insertions(+), 6 deletions(-) diff --git a/include/net/devlink.h b/include/net/devlink.h index e4c27473ee4f..be64218e0254 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -78,6 +78,18 @@ struct devlink_ops { u16 tc_index, enum devlink_sb_pool_type pool_type, u16 pool_index, u32 threshold); + int (*sb_occ_snapshot)(struct devlink *devlink, + unsigned int sb_index); + int (*sb_occ_max_clear)(struct devlink *devlink, + unsigned int sb_index); + int (*sb_occ_port_pool_get)(struct devlink_port *devlink_port, + unsigned int sb_index, u16 pool_index, + u32 *p_cur, u32 *p_max); + int (*sb_occ_tc_port_bind_get)(struct devlink_port *devlink_port, + unsigned int sb_index, + u16 tc_index, + enum devlink_sb_pool_type pool_type, + u32 *p_cur, u32 *p_max); }; static inline void *devlink_priv(struct devlink *devlink) diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h index 9c1aa5783090..ba0073b26fa6 100644 --- a/include/uapi/linux/devlink.h +++ b/include/uapi/linux/devlink.h @@ -53,6 +53,10 @@ enum devlink_command { DEVLINK_CMD_SB_TC_POOL_BIND_NEW, DEVLINK_CMD_SB_TC_POOL_BIND_DEL, + /* Shared buffer occupancy monitoring commands */ + DEVLINK_CMD_SB_OCC_SNAPSHOT, + DEVLINK_CMD_SB_OCC_MAX_CLEAR, + /* add new commands above here */ __DEVLINK_CMD_MAX, @@ -119,6 +123,8 @@ enum devlink_attr { DEVLINK_ATTR_SB_POOL_THRESHOLD_TYPE, /* u8 */ DEVLINK_ATTR_SB_THRESHOLD, /* u32 */ DEVLINK_ATTR_SB_TC_INDEX, /* u16 */ + DEVLINK_ATTR_SB_OCC_CUR, /* u32 */ + DEVLINK_ATTR_SB_OCC_MAX, /* u32 */ /* add new attributes above here, update the policy in devlink.c */ diff --git a/net/core/devlink.c b/net/core/devlink.c index aa0b9e1542e7..933e8d4d3968 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -280,6 +280,10 @@ devlink_sb_tc_index_get_from_info(struct devlink_sb *devlink_sb, #define DEVLINK_NL_FLAG_NEED_DEVLINK BIT(0) #define DEVLINK_NL_FLAG_NEED_PORT BIT(1) #define DEVLINK_NL_FLAG_NEED_SB BIT(2) +#define DEVLINK_NL_FLAG_LOCK_PORTS BIT(3) + /* port is not needed but we need to ensure they don't + * change in the middle of command + */ static int devlink_nl_pre_doit(const struct genl_ops *ops, struct sk_buff *skb, struct genl_info *info) @@ -306,6 +310,9 @@ static int devlink_nl_pre_doit(const struct genl_ops *ops, } info->user_ptr[0] = devlink_port; } + if (ops->internal_flags & DEVLINK_NL_FLAG_LOCK_PORTS) { + mutex_lock(&devlink_port_mutex); + } if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_SB) { struct devlink_sb *devlink_sb; @@ -324,7 +331,8 @@ static int devlink_nl_pre_doit(const struct genl_ops *ops, static void devlink_nl_post_doit(const struct genl_ops *ops, struct sk_buff *skb, struct genl_info *info) { - if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_PORT) + if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_PORT || + ops->internal_flags & DEVLINK_NL_FLAG_LOCK_PORTS) mutex_unlock(&devlink_port_mutex); mutex_unlock(&devlink_mutex); } @@ -942,12 +950,13 @@ static int devlink_nl_sb_port_pool_fill(struct sk_buff *msg, enum devlink_command cmd, u32 portid, u32 seq, int flags) { + const struct devlink_ops *ops = devlink->ops; u32 threshold; void *hdr; int err; - err = devlink->ops->sb_port_pool_get(devlink_port, devlink_sb->index, - pool_index, &threshold); + err = ops->sb_port_pool_get(devlink_port, devlink_sb->index, + pool_index, &threshold); if (err) return err; @@ -966,6 +975,22 @@ static int devlink_nl_sb_port_pool_fill(struct sk_buff *msg, if (nla_put_u32(msg, DEVLINK_ATTR_SB_THRESHOLD, threshold)) goto nla_put_failure; + if (ops->sb_occ_port_pool_get) { + u32 cur; + u32 max; + + err = ops->sb_occ_port_pool_get(devlink_port, devlink_sb->index, + pool_index, &cur, &max); + if (err && err != -EOPNOTSUPP) + return err; + if (!err) { + if (nla_put_u32(msg, DEVLINK_ATTR_SB_OCC_CUR, cur)) + goto nla_put_failure; + if (nla_put_u32(msg, DEVLINK_ATTR_SB_OCC_MAX, max)) + goto nla_put_failure; + } + } + genlmsg_end(msg, hdr); return 0; @@ -1114,14 +1139,15 @@ devlink_nl_sb_tc_pool_bind_fill(struct sk_buff *msg, struct devlink *devlink, enum devlink_command cmd, u32 portid, u32 seq, int flags) { + const struct devlink_ops *ops = devlink->ops; u16 pool_index; u32 threshold; void *hdr; int err; - err = devlink->ops->sb_tc_pool_bind_get(devlink_port, devlink_sb->index, - tc_index, pool_type, - &pool_index, &threshold); + err = ops->sb_tc_pool_bind_get(devlink_port, devlink_sb->index, + tc_index, pool_type, + &pool_index, &threshold); if (err) return err; @@ -1144,6 +1170,24 @@ devlink_nl_sb_tc_pool_bind_fill(struct sk_buff *msg, struct devlink *devlink, if (nla_put_u32(msg, DEVLINK_ATTR_SB_THRESHOLD, threshold)) goto nla_put_failure; + if (ops->sb_occ_tc_port_bind_get) { + u32 cur; + u32 max; + + err = ops->sb_occ_tc_port_bind_get(devlink_port, + devlink_sb->index, + tc_index, pool_type, + &cur, &max); + if (err && err != -EOPNOTSUPP) + return err; + if (!err) { + if (nla_put_u32(msg, DEVLINK_ATTR_SB_OCC_CUR, cur)) + goto nla_put_failure; + if (nla_put_u32(msg, DEVLINK_ATTR_SB_OCC_MAX, max)) + goto nla_put_failure; + } + } + genlmsg_end(msg, hdr); return 0; @@ -1326,6 +1370,30 @@ static int devlink_nl_cmd_sb_tc_pool_bind_set_doit(struct sk_buff *skb, pool_index, threshold); } +static int devlink_nl_cmd_sb_occ_snapshot_doit(struct sk_buff *skb, + struct genl_info *info) +{ + struct devlink *devlink = info->user_ptr[0]; + struct devlink_sb *devlink_sb = info->user_ptr[1]; + const struct devlink_ops *ops = devlink->ops; + + if (ops && ops->sb_occ_snapshot) + return ops->sb_occ_snapshot(devlink, devlink_sb->index); + return -EOPNOTSUPP; +} + +static int devlink_nl_cmd_sb_occ_max_clear_doit(struct sk_buff *skb, + struct genl_info *info) +{ + struct devlink *devlink = info->user_ptr[0]; + struct devlink_sb *devlink_sb = info->user_ptr[1]; + const struct devlink_ops *ops = devlink->ops; + + if (ops && ops->sb_occ_max_clear) + return ops->sb_occ_max_clear(devlink, devlink_sb->index); + return -EOPNOTSUPP; +} + static const struct nla_policy devlink_nl_policy[DEVLINK_ATTR_MAX + 1] = { [DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING }, [DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING }, @@ -1439,6 +1507,24 @@ static const struct genl_ops devlink_nl_ops[] = { .internal_flags = DEVLINK_NL_FLAG_NEED_PORT | DEVLINK_NL_FLAG_NEED_SB, }, + { + .cmd = DEVLINK_CMD_SB_OCC_SNAPSHOT, + .doit = devlink_nl_cmd_sb_occ_snapshot_doit, + .policy = devlink_nl_policy, + .flags = GENL_ADMIN_PERM, + .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK | + DEVLINK_NL_FLAG_NEED_SB | + DEVLINK_NL_FLAG_LOCK_PORTS, + }, + { + .cmd = DEVLINK_CMD_SB_OCC_MAX_CLEAR, + .doit = devlink_nl_cmd_sb_occ_max_clear_doit, + .policy = devlink_nl_policy, + .flags = GENL_ADMIN_PERM, + .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK | + DEVLINK_NL_FLAG_NEED_SB | + DEVLINK_NL_FLAG_LOCK_PORTS, + }, }; /** From a6179bf0d17a4d71075bad2a0b17a752fc973b64 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Thu, 14 Apr 2016 18:19:15 +0200 Subject: [PATCH 0663/1649] mlxsw: core: Add devlink shared buffer callbacks Add middle layer in mlxsw core code to forward shared buffer calls into specific ASIC drivers. Signed-off-by: Jiri Pirko Reviewed-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/core.c | 105 ++++++++++++++++++++- drivers/net/ethernet/mellanox/mlxsw/core.h | 20 ++++ 2 files changed, 123 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index 3958195526d1..1278260118a4 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -816,9 +816,110 @@ static int mlxsw_devlink_port_unsplit(struct devlink *devlink, return mlxsw_core->driver->port_unsplit(mlxsw_core, port_index); } +static int +mlxsw_devlink_sb_pool_get(struct devlink *devlink, + unsigned int sb_index, u16 pool_index, + struct devlink_sb_pool_info *pool_info) +{ + struct mlxsw_core *mlxsw_core = devlink_priv(devlink); + struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; + + if (!mlxsw_driver->sb_pool_get) + return -EOPNOTSUPP; + return mlxsw_driver->sb_pool_get(mlxsw_core, sb_index, + pool_index, pool_info); +} + +static int +mlxsw_devlink_sb_pool_set(struct devlink *devlink, + unsigned int sb_index, u16 pool_index, u32 size, + enum devlink_sb_threshold_type threshold_type) +{ + struct mlxsw_core *mlxsw_core = devlink_priv(devlink); + struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; + + if (!mlxsw_driver->sb_pool_set) + return -EOPNOTSUPP; + return mlxsw_driver->sb_pool_set(mlxsw_core, sb_index, + pool_index, size, threshold_type); +} + +static void *__dl_port(struct devlink_port *devlink_port) +{ + return container_of(devlink_port, struct mlxsw_core_port, devlink_port); +} + +static int mlxsw_devlink_sb_port_pool_get(struct devlink_port *devlink_port, + unsigned int sb_index, u16 pool_index, + u32 *p_threshold) +{ + struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink); + struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; + struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port); + + if (!mlxsw_driver->sb_port_pool_get) + return -EOPNOTSUPP; + return mlxsw_driver->sb_port_pool_get(mlxsw_core_port, sb_index, + pool_index, p_threshold); +} + +static int mlxsw_devlink_sb_port_pool_set(struct devlink_port *devlink_port, + unsigned int sb_index, u16 pool_index, + u32 threshold) +{ + struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink); + struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; + struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port); + + if (!mlxsw_driver->sb_port_pool_set) + return -EOPNOTSUPP; + return mlxsw_driver->sb_port_pool_set(mlxsw_core_port, sb_index, + pool_index, threshold); +} + +static int +mlxsw_devlink_sb_tc_pool_bind_get(struct devlink_port *devlink_port, + unsigned int sb_index, u16 tc_index, + enum devlink_sb_pool_type pool_type, + u16 *p_pool_index, u32 *p_threshold) +{ + struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink); + struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; + struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port); + + if (!mlxsw_driver->sb_tc_pool_bind_get) + return -EOPNOTSUPP; + return mlxsw_driver->sb_tc_pool_bind_get(mlxsw_core_port, sb_index, + tc_index, pool_type, + p_pool_index, p_threshold); +} + +static int +mlxsw_devlink_sb_tc_pool_bind_set(struct devlink_port *devlink_port, + unsigned int sb_index, u16 tc_index, + enum devlink_sb_pool_type pool_type, + u16 pool_index, u32 threshold) +{ + struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink); + struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; + struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port); + + if (!mlxsw_driver->sb_tc_pool_bind_set) + return -EOPNOTSUPP; + return mlxsw_driver->sb_tc_pool_bind_set(mlxsw_core_port, sb_index, + tc_index, pool_type, + pool_index, threshold); +} + static const struct devlink_ops mlxsw_devlink_ops = { - .port_split = mlxsw_devlink_port_split, - .port_unsplit = mlxsw_devlink_port_unsplit, + .port_split = mlxsw_devlink_port_split, + .port_unsplit = mlxsw_devlink_port_unsplit, + .sb_pool_get = mlxsw_devlink_sb_pool_get, + .sb_pool_set = mlxsw_devlink_sb_pool_set, + .sb_port_pool_get = mlxsw_devlink_sb_port_pool_get, + .sb_port_pool_set = mlxsw_devlink_sb_port_pool_set, + .sb_tc_pool_bind_get = mlxsw_devlink_sb_tc_pool_bind_get, + .sb_tc_pool_bind_set = mlxsw_devlink_sb_tc_pool_bind_set, }; int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h index f3cebef9c31c..184e9853398b 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core.h @@ -200,6 +200,26 @@ struct mlxsw_driver { int (*port_split)(struct mlxsw_core *mlxsw_core, u8 local_port, unsigned int count); int (*port_unsplit)(struct mlxsw_core *mlxsw_core, u8 local_port); + int (*sb_pool_get)(struct mlxsw_core *mlxsw_core, + unsigned int sb_index, u16 pool_index, + struct devlink_sb_pool_info *pool_info); + int (*sb_pool_set)(struct mlxsw_core *mlxsw_core, + unsigned int sb_index, u16 pool_index, u32 size, + enum devlink_sb_threshold_type threshold_type); + int (*sb_port_pool_get)(struct mlxsw_core_port *mlxsw_core_port, + unsigned int sb_index, u16 pool_index, + u32 *p_threshold); + int (*sb_port_pool_set)(struct mlxsw_core_port *mlxsw_core_port, + unsigned int sb_index, u16 pool_index, + u32 threshold); + int (*sb_tc_pool_bind_get)(struct mlxsw_core_port *mlxsw_core_port, + unsigned int sb_index, u16 tc_index, + enum devlink_sb_pool_type pool_type, + u16 *p_pool_index, u32 *p_threshold); + int (*sb_tc_pool_bind_set)(struct mlxsw_core_port *mlxsw_core_port, + unsigned int sb_index, u16 tc_index, + enum devlink_sb_pool_type pool_type, + u16 pool_index, u32 threshold); void (*txhdr_construct)(struct sk_buff *skb, const struct mlxsw_tx_info *tx_info); u8 txhdr_len; From 94266e3278ef862bb422575a31ceb166dab1b406 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Thu, 14 Apr 2016 18:19:16 +0200 Subject: [PATCH 0664/1649] mlxsw: spectrum_buffers: Push out shared buffer register writes Pushed them into helper functions. Signed-off-by: Jiri Pirko Reviewed-by: Ido Schimmel Signed-off-by: David S. Miller --- .../mellanox/mlxsw/spectrum_buffers.c | 54 ++++++++++++++----- 1 file changed, 40 insertions(+), 14 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c index f58b1d3a619a..ae60838e0069 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c @@ -42,6 +42,37 @@ #include "port.h" #include "reg.h" +static int mlxsw_sp_sb_pr_write(struct mlxsw_sp *mlxsw_sp, u8 pool, + enum mlxsw_reg_sbxx_dir dir, + enum mlxsw_reg_sbpr_mode mode, u32 size) +{ + char sbpr_pl[MLXSW_REG_SBPR_LEN]; + + mlxsw_reg_sbpr_pack(sbpr_pl, pool, dir, mode, size); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpr), sbpr_pl); +} + +static int mlxsw_sp_sb_cm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port, + u8 pg_buff, enum mlxsw_reg_sbxx_dir dir, + u32 min_buff, u32 max_buff, u8 pool) +{ + char sbcm_pl[MLXSW_REG_SBCM_LEN]; + + mlxsw_reg_sbcm_pack(sbcm_pl, local_port, pg_buff, dir, + min_buff, max_buff, pool); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbcm), sbcm_pl); +} + +static int mlxsw_sp_sb_pm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port, + u8 pool, enum mlxsw_reg_sbxx_dir dir, + u32 min_buff, u32 max_buff) +{ + char sbpm_pl[MLXSW_REG_SBPM_LEN]; + + mlxsw_reg_sbpm_pack(sbpm_pl, local_port, pool, dir, min_buff, max_buff); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl); +} + struct mlxsw_sp_pb { u8 index; u16 size; @@ -151,7 +182,6 @@ static const struct mlxsw_sp_sb_pool mlxsw_sp_sb_pools[] = { static int mlxsw_sp_sb_pools_init(struct mlxsw_sp *mlxsw_sp) { - char sbpr_pl[MLXSW_REG_SBPR_LEN]; int i; int err; @@ -159,9 +189,8 @@ static int mlxsw_sp_sb_pools_init(struct mlxsw_sp *mlxsw_sp) const struct mlxsw_sp_sb_pool *pool; pool = &mlxsw_sp_sb_pools[i]; - mlxsw_reg_sbpr_pack(sbpr_pl, pool->pool, pool->dir, - pool->mode, pool->size); - err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpr), sbpr_pl); + err = mlxsw_sp_sb_pr_write(mlxsw_sp, pool->pool, pool->dir, + pool->mode, pool->size); if (err) return err; } @@ -272,7 +301,6 @@ static int mlxsw_sp_sb_cms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port, const struct mlxsw_sp_sb_cm *cms, size_t cms_len) { - char sbcm_pl[MLXSW_REG_SBCM_LEN]; int i; int err; @@ -280,9 +308,9 @@ static int mlxsw_sp_sb_cms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port, const struct mlxsw_sp_sb_cm *cm; cm = &cms[i]; - mlxsw_reg_sbcm_pack(sbcm_pl, local_port, cm->u.pg, cm->dir, - cm->min_buff, cm->max_buff, cm->pool); - err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbcm), sbcm_pl); + err = mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, cm->u.pg, + cm->dir, cm->min_buff, + cm->max_buff, cm->pool); if (err) return err; } @@ -340,7 +368,6 @@ static const struct mlxsw_sp_sb_pm mlxsw_sp_sb_pms[] = { static int mlxsw_sp_port_sb_pms_init(struct mlxsw_sp_port *mlxsw_sp_port) { - char sbpm_pl[MLXSW_REG_SBPM_LEN]; int i; int err; @@ -348,11 +375,10 @@ static int mlxsw_sp_port_sb_pms_init(struct mlxsw_sp_port *mlxsw_sp_port) const struct mlxsw_sp_sb_pm *pm; pm = &mlxsw_sp_sb_pms[i]; - mlxsw_reg_sbpm_pack(sbpm_pl, mlxsw_sp_port->local_port, - pm->pool, pm->dir, - pm->min_buff, pm->max_buff); - err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, - MLXSW_REG(sbpm), sbpm_pl); + err = mlxsw_sp_sb_pm_write(mlxsw_sp_port->mlxsw_sp, + mlxsw_sp_port->local_port, + pm->pool, pm->dir, + pm->min_buff, pm->max_buff); if (err) return err; } From b11c3b4018e630dfcbdc2b9fe7aaffce5da8cfd7 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Thu, 14 Apr 2016 18:19:17 +0200 Subject: [PATCH 0665/1649] mlxsw: spectrum_buffers: Push out indexes and direction out of SB structs Structs are in arrays so use array index as pool/tc/prio index. With that, there is need to maintain separate arrays for ingress and egress. Signed-off-by: Jiri Pirko Reviewed-by: Ido Schimmel Signed-off-by: David S. Miller --- .../mellanox/mlxsw/spectrum_buffers.c | 425 +++++++++--------- 1 file changed, 221 insertions(+), 204 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c index ae60838e0069..c326e586bc1c 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c @@ -73,27 +73,17 @@ static int mlxsw_sp_sb_pm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port, return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl); } -struct mlxsw_sp_pb { - u8 index; - u16 size; -}; - -#define MLXSW_SP_PB(_index, _size) \ - { \ - .index = _index, \ - .size = _size, \ - } - -static const struct mlxsw_sp_pb mlxsw_sp_pbs[] = { - MLXSW_SP_PB(0, 2 * MLXSW_SP_BYTES_TO_CELLS(ETH_FRAME_LEN)), - MLXSW_SP_PB(1, 0), - MLXSW_SP_PB(2, 0), - MLXSW_SP_PB(3, 0), - MLXSW_SP_PB(4, 0), - MLXSW_SP_PB(5, 0), - MLXSW_SP_PB(6, 0), - MLXSW_SP_PB(7, 0), - MLXSW_SP_PB(9, 2 * MLXSW_SP_BYTES_TO_CELLS(MLXSW_PORT_MAX_MTU)), +static const u16 mlxsw_sp_pbs[] = { + 2 * MLXSW_SP_BYTES_TO_CELLS(ETH_FRAME_LEN), + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, /* Unused */ + 2 * MLXSW_SP_BYTES_TO_CELLS(MLXSW_PORT_MAX_MTU), }; #define MLXSW_SP_PBS_LEN ARRAY_SIZE(mlxsw_sp_pbs) @@ -106,10 +96,9 @@ static int mlxsw_sp_port_pb_init(struct mlxsw_sp_port *mlxsw_sp_port) mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0xffff, 0xffff / 2); for (i = 0; i < MLXSW_SP_PBS_LEN; i++) { - const struct mlxsw_sp_pb *pb; - - pb = &mlxsw_sp_pbs[i]; - mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, pb->index, pb->size); + if (i == 8) + continue; + mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, i, mlxsw_sp_pbs[i]); } mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, MLXSW_REG_PBMC_PORT_SHARED_BUF_IDX, 0); @@ -140,8 +129,6 @@ static int mlxsw_sp_port_headroom_init(struct mlxsw_sp_port *mlxsw_sp_port) } struct mlxsw_sp_sb_pool { - u8 pool; - enum mlxsw_reg_sbxx_dir dir; enum mlxsw_reg_sbpr_mode mode; u32 size; }; @@ -151,45 +138,46 @@ struct mlxsw_sp_sb_pool { #define MLXSW_SP_SB_POOL_EGRESS_SIZE \ (14000000 - (8 * 1500 * MLXSW_PORT_MAX_PORTS)) -#define MLXSW_SP_SB_POOL(_pool, _dir, _mode, _size) \ - { \ - .pool = _pool, \ - .dir = _dir, \ - .mode = _mode, \ - .size = _size, \ +#define MLXSW_SP_SB_POOL(_mode, _size) \ + { \ + .mode = _mode, \ + .size = _size, \ } -#define MLXSW_SP_SB_POOL_INGRESS(_pool, _size) \ - MLXSW_SP_SB_POOL(_pool, MLXSW_REG_SBXX_DIR_INGRESS, \ - MLXSW_REG_SBPR_MODE_DYNAMIC, _size) - -#define MLXSW_SP_SB_POOL_EGRESS(_pool, _size) \ - MLXSW_SP_SB_POOL(_pool, MLXSW_REG_SBXX_DIR_EGRESS, \ - MLXSW_REG_SBPR_MODE_DYNAMIC, _size) - -static const struct mlxsw_sp_sb_pool mlxsw_sp_sb_pools[] = { - MLXSW_SP_SB_POOL_INGRESS(0, MLXSW_SP_BYTES_TO_CELLS(MLXSW_SP_SB_POOL_INGRESS_SIZE)), - MLXSW_SP_SB_POOL_INGRESS(1, 0), - MLXSW_SP_SB_POOL_INGRESS(2, 0), - MLXSW_SP_SB_POOL_INGRESS(3, 0), - MLXSW_SP_SB_POOL_EGRESS(0, MLXSW_SP_BYTES_TO_CELLS(MLXSW_SP_SB_POOL_EGRESS_SIZE)), - MLXSW_SP_SB_POOL_EGRESS(1, 0), - MLXSW_SP_SB_POOL_EGRESS(2, 0), - MLXSW_SP_SB_POOL_EGRESS(2, MLXSW_SP_BYTES_TO_CELLS(MLXSW_SP_SB_POOL_EGRESS_SIZE)), +static const struct mlxsw_sp_sb_pool mlxsw_sp_sb_pools_ingress[] = { + MLXSW_SP_SB_POOL(MLXSW_REG_SBPR_MODE_DYNAMIC, + MLXSW_SP_BYTES_TO_CELLS(MLXSW_SP_SB_POOL_INGRESS_SIZE)), + MLXSW_SP_SB_POOL(MLXSW_REG_SBPR_MODE_DYNAMIC, 0), + MLXSW_SP_SB_POOL(MLXSW_REG_SBPR_MODE_DYNAMIC, 0), + MLXSW_SP_SB_POOL(MLXSW_REG_SBPR_MODE_DYNAMIC, 0), }; -#define MLXSW_SP_SB_POOLS_LEN ARRAY_SIZE(mlxsw_sp_sb_pools) +#define MLXSW_SP_SB_POOLS_INGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_pools_ingress) -static int mlxsw_sp_sb_pools_init(struct mlxsw_sp *mlxsw_sp) +static const struct mlxsw_sp_sb_pool mlxsw_sp_sb_pools_egress[] = { + MLXSW_SP_SB_POOL(MLXSW_REG_SBPR_MODE_DYNAMIC, + MLXSW_SP_BYTES_TO_CELLS(MLXSW_SP_SB_POOL_EGRESS_SIZE)), + MLXSW_SP_SB_POOL(MLXSW_REG_SBPR_MODE_DYNAMIC, 0), + MLXSW_SP_SB_POOL(MLXSW_REG_SBPR_MODE_DYNAMIC, 0), + MLXSW_SP_SB_POOL(MLXSW_REG_SBPR_MODE_DYNAMIC, + MLXSW_SP_BYTES_TO_CELLS(MLXSW_SP_SB_POOL_EGRESS_SIZE)), +}; + +#define MLXSW_SP_SB_POOLS_EGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_pools_egress) + +static int __mlxsw_sp_sb_pools_init(struct mlxsw_sp *mlxsw_sp, + enum mlxsw_reg_sbxx_dir dir, + const struct mlxsw_sp_sb_pool *pools, + size_t pools_len) { int i; int err; - for (i = 0; i < MLXSW_SP_SB_POOLS_LEN; i++) { + for (i = 0; i < pools_len; i++) { const struct mlxsw_sp_sb_pool *pool; - pool = &mlxsw_sp_sb_pools[i]; - err = mlxsw_sp_sb_pr_write(mlxsw_sp, pool->pool, pool->dir, + pool = &pools[i]; + err = mlxsw_sp_sb_pr_write(mlxsw_sp, i, dir, pool->mode, pool->size); if (err) return err; @@ -197,109 +185,114 @@ static int mlxsw_sp_sb_pools_init(struct mlxsw_sp *mlxsw_sp) return 0; } +static int mlxsw_sp_sb_pools_init(struct mlxsw_sp *mlxsw_sp) +{ + int err; + + err = __mlxsw_sp_sb_pools_init(mlxsw_sp, MLXSW_REG_SBXX_DIR_INGRESS, + mlxsw_sp_sb_pools_ingress, + MLXSW_SP_SB_POOLS_INGRESS_LEN); + if (err) + return err; + return __mlxsw_sp_sb_pools_init(mlxsw_sp, MLXSW_REG_SBXX_DIR_EGRESS, + mlxsw_sp_sb_pools_egress, + MLXSW_SP_SB_POOLS_EGRESS_LEN); +} + struct mlxsw_sp_sb_cm { - union { - u8 pg; - u8 tc; - } u; - enum mlxsw_reg_sbxx_dir dir; u32 min_buff; u32 max_buff; u8 pool; }; -#define MLXSW_SP_SB_CM(_pg_tc, _dir, _min_buff, _max_buff, _pool) \ - { \ - .u.pg = _pg_tc, \ - .dir = _dir, \ - .min_buff = _min_buff, \ - .max_buff = _max_buff, \ - .pool = _pool, \ +#define MLXSW_SP_SB_CM(_min_buff, _max_buff, _pool) \ + { \ + .min_buff = _min_buff, \ + .max_buff = _max_buff, \ + .pool = _pool, \ } -#define MLXSW_SP_SB_CM_INGRESS(_pg, _min_buff, _max_buff) \ - MLXSW_SP_SB_CM(_pg, MLXSW_REG_SBXX_DIR_INGRESS, \ - _min_buff, _max_buff, 0) - -#define MLXSW_SP_SB_CM_EGRESS(_tc, _min_buff, _max_buff) \ - MLXSW_SP_SB_CM(_tc, MLXSW_REG_SBXX_DIR_EGRESS, \ - _min_buff, _max_buff, 0) - -#define MLXSW_SP_CPU_PORT_SB_CM_EGRESS(_tc) \ - MLXSW_SP_SB_CM(_tc, MLXSW_REG_SBXX_DIR_EGRESS, 104, 2, 3) - -static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms[] = { - MLXSW_SP_SB_CM_INGRESS(0, MLXSW_SP_BYTES_TO_CELLS(10000), 8), - MLXSW_SP_SB_CM_INGRESS(1, 0, 0), - MLXSW_SP_SB_CM_INGRESS(2, 0, 0), - MLXSW_SP_SB_CM_INGRESS(3, 0, 0), - MLXSW_SP_SB_CM_INGRESS(4, 0, 0), - MLXSW_SP_SB_CM_INGRESS(5, 0, 0), - MLXSW_SP_SB_CM_INGRESS(6, 0, 0), - MLXSW_SP_SB_CM_INGRESS(7, 0, 0), - MLXSW_SP_SB_CM_INGRESS(9, MLXSW_SP_BYTES_TO_CELLS(20000), 0xff), - MLXSW_SP_SB_CM_EGRESS(0, MLXSW_SP_BYTES_TO_CELLS(1500), 9), - MLXSW_SP_SB_CM_EGRESS(1, MLXSW_SP_BYTES_TO_CELLS(1500), 9), - MLXSW_SP_SB_CM_EGRESS(2, MLXSW_SP_BYTES_TO_CELLS(1500), 9), - MLXSW_SP_SB_CM_EGRESS(3, MLXSW_SP_BYTES_TO_CELLS(1500), 9), - MLXSW_SP_SB_CM_EGRESS(4, MLXSW_SP_BYTES_TO_CELLS(1500), 9), - MLXSW_SP_SB_CM_EGRESS(5, MLXSW_SP_BYTES_TO_CELLS(1500), 9), - MLXSW_SP_SB_CM_EGRESS(6, MLXSW_SP_BYTES_TO_CELLS(1500), 9), - MLXSW_SP_SB_CM_EGRESS(7, MLXSW_SP_BYTES_TO_CELLS(1500), 9), - MLXSW_SP_SB_CM_EGRESS(8, 0, 0), - MLXSW_SP_SB_CM_EGRESS(9, 0, 0), - MLXSW_SP_SB_CM_EGRESS(10, 0, 0), - MLXSW_SP_SB_CM_EGRESS(11, 0, 0), - MLXSW_SP_SB_CM_EGRESS(12, 0, 0), - MLXSW_SP_SB_CM_EGRESS(13, 0, 0), - MLXSW_SP_SB_CM_EGRESS(14, 0, 0), - MLXSW_SP_SB_CM_EGRESS(15, 0, 0), - MLXSW_SP_SB_CM_EGRESS(16, 1, 0xff), +static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_ingress[] = { + MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(10000), 8, 0), + MLXSW_SP_SB_CM(0, 0, 0), + MLXSW_SP_SB_CM(0, 0, 0), + MLXSW_SP_SB_CM(0, 0, 0), + MLXSW_SP_SB_CM(0, 0, 0), + MLXSW_SP_SB_CM(0, 0, 0), + MLXSW_SP_SB_CM(0, 0, 0), + MLXSW_SP_SB_CM(0, 0, 0), + MLXSW_SP_SB_CM(0, 0, 0), /* dummy, this PG does not exist */ + MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), }; -#define MLXSW_SP_SB_CMS_LEN ARRAY_SIZE(mlxsw_sp_sb_cms) +#define MLXSW_SP_SB_CMS_INGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_cms_ingress) + +static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_egress[] = { + MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0), + MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0), + MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0), + MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0), + MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0), + MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0), + MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0), + MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0), + MLXSW_SP_SB_CM(0, 0, 0), + MLXSW_SP_SB_CM(0, 0, 0), + MLXSW_SP_SB_CM(0, 0, 0), + MLXSW_SP_SB_CM(0, 0, 0), + MLXSW_SP_SB_CM(0, 0, 0), + MLXSW_SP_SB_CM(0, 0, 0), + MLXSW_SP_SB_CM(0, 0, 0), + MLXSW_SP_SB_CM(0, 0, 0), + MLXSW_SP_SB_CM(1, 0xff, 0), +}; + +#define MLXSW_SP_SB_CMS_EGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_cms_egress) + +#define MLXSW_SP_CPU_PORT_SB_CM MLXSW_SP_SB_CM(104, 2, 3) static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = { - MLXSW_SP_CPU_PORT_SB_CM_EGRESS(0), - MLXSW_SP_CPU_PORT_SB_CM_EGRESS(1), - MLXSW_SP_CPU_PORT_SB_CM_EGRESS(2), - MLXSW_SP_CPU_PORT_SB_CM_EGRESS(3), - MLXSW_SP_CPU_PORT_SB_CM_EGRESS(4), - MLXSW_SP_CPU_PORT_SB_CM_EGRESS(5), - MLXSW_SP_CPU_PORT_SB_CM_EGRESS(6), - MLXSW_SP_CPU_PORT_SB_CM_EGRESS(7), - MLXSW_SP_CPU_PORT_SB_CM_EGRESS(8), - MLXSW_SP_CPU_PORT_SB_CM_EGRESS(9), - MLXSW_SP_CPU_PORT_SB_CM_EGRESS(10), - MLXSW_SP_CPU_PORT_SB_CM_EGRESS(11), - MLXSW_SP_CPU_PORT_SB_CM_EGRESS(12), - MLXSW_SP_CPU_PORT_SB_CM_EGRESS(13), - MLXSW_SP_CPU_PORT_SB_CM_EGRESS(14), - MLXSW_SP_CPU_PORT_SB_CM_EGRESS(15), - MLXSW_SP_CPU_PORT_SB_CM_EGRESS(16), - MLXSW_SP_CPU_PORT_SB_CM_EGRESS(17), - MLXSW_SP_CPU_PORT_SB_CM_EGRESS(18), - MLXSW_SP_CPU_PORT_SB_CM_EGRESS(19), - MLXSW_SP_CPU_PORT_SB_CM_EGRESS(20), - MLXSW_SP_CPU_PORT_SB_CM_EGRESS(21), - MLXSW_SP_CPU_PORT_SB_CM_EGRESS(22), - MLXSW_SP_CPU_PORT_SB_CM_EGRESS(23), - MLXSW_SP_CPU_PORT_SB_CM_EGRESS(24), - MLXSW_SP_CPU_PORT_SB_CM_EGRESS(25), - MLXSW_SP_CPU_PORT_SB_CM_EGRESS(26), - MLXSW_SP_CPU_PORT_SB_CM_EGRESS(27), - MLXSW_SP_CPU_PORT_SB_CM_EGRESS(28), - MLXSW_SP_CPU_PORT_SB_CM_EGRESS(29), - MLXSW_SP_CPU_PORT_SB_CM_EGRESS(30), - MLXSW_SP_CPU_PORT_SB_CM_EGRESS(31), + MLXSW_SP_CPU_PORT_SB_CM, + MLXSW_SP_CPU_PORT_SB_CM, + MLXSW_SP_CPU_PORT_SB_CM, + MLXSW_SP_CPU_PORT_SB_CM, + MLXSW_SP_CPU_PORT_SB_CM, + MLXSW_SP_CPU_PORT_SB_CM, + MLXSW_SP_CPU_PORT_SB_CM, + MLXSW_SP_CPU_PORT_SB_CM, + MLXSW_SP_CPU_PORT_SB_CM, + MLXSW_SP_CPU_PORT_SB_CM, + MLXSW_SP_CPU_PORT_SB_CM, + MLXSW_SP_CPU_PORT_SB_CM, + MLXSW_SP_CPU_PORT_SB_CM, + MLXSW_SP_CPU_PORT_SB_CM, + MLXSW_SP_CPU_PORT_SB_CM, + MLXSW_SP_CPU_PORT_SB_CM, + MLXSW_SP_CPU_PORT_SB_CM, + MLXSW_SP_CPU_PORT_SB_CM, + MLXSW_SP_CPU_PORT_SB_CM, + MLXSW_SP_CPU_PORT_SB_CM, + MLXSW_SP_CPU_PORT_SB_CM, + MLXSW_SP_CPU_PORT_SB_CM, + MLXSW_SP_CPU_PORT_SB_CM, + MLXSW_SP_CPU_PORT_SB_CM, + MLXSW_SP_CPU_PORT_SB_CM, + MLXSW_SP_CPU_PORT_SB_CM, + MLXSW_SP_CPU_PORT_SB_CM, + MLXSW_SP_CPU_PORT_SB_CM, + MLXSW_SP_CPU_PORT_SB_CM, + MLXSW_SP_CPU_PORT_SB_CM, + MLXSW_SP_CPU_PORT_SB_CM, + MLXSW_SP_CPU_PORT_SB_CM, }; #define MLXSW_SP_CPU_PORT_SB_MCS_LEN \ ARRAY_SIZE(mlxsw_sp_cpu_port_sb_cms) -static int mlxsw_sp_sb_cms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port, - const struct mlxsw_sp_sb_cm *cms, - size_t cms_len) +static int __mlxsw_sp_sb_cms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port, + enum mlxsw_reg_sbxx_dir dir, + const struct mlxsw_sp_sb_cm *cms, + size_t cms_len) { int i; int err; @@ -307,10 +300,12 @@ static int mlxsw_sp_sb_cms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port, for (i = 0; i < cms_len; i++) { const struct mlxsw_sp_sb_cm *cm; + if (i == 8 && dir == MLXSW_REG_SBXX_DIR_INGRESS) + continue; /* PG number 8 does not exist, skip it */ cm = &cms[i]; - err = mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, cm->u.pg, - cm->dir, cm->min_buff, - cm->max_buff, cm->pool); + err = mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, i, dir, + cm->min_buff, cm->max_buff, + cm->pool); if (err) return err; } @@ -319,65 +314,71 @@ static int mlxsw_sp_sb_cms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port, static int mlxsw_sp_port_sb_cms_init(struct mlxsw_sp_port *mlxsw_sp_port) { - return mlxsw_sp_sb_cms_init(mlxsw_sp_port->mlxsw_sp, - mlxsw_sp_port->local_port, mlxsw_sp_sb_cms, - MLXSW_SP_SB_CMS_LEN); + int err; + + err = __mlxsw_sp_sb_cms_init(mlxsw_sp_port->mlxsw_sp, + mlxsw_sp_port->local_port, + MLXSW_REG_SBXX_DIR_INGRESS, + mlxsw_sp_sb_cms_ingress, + MLXSW_SP_SB_CMS_INGRESS_LEN); + if (err) + return err; + return __mlxsw_sp_sb_cms_init(mlxsw_sp_port->mlxsw_sp, + mlxsw_sp_port->local_port, + MLXSW_REG_SBXX_DIR_EGRESS, + mlxsw_sp_sb_cms_egress, + MLXSW_SP_SB_CMS_EGRESS_LEN); } static int mlxsw_sp_cpu_port_sb_cms_init(struct mlxsw_sp *mlxsw_sp) { - return mlxsw_sp_sb_cms_init(mlxsw_sp, 0, mlxsw_sp_cpu_port_sb_cms, - MLXSW_SP_CPU_PORT_SB_MCS_LEN); + return __mlxsw_sp_sb_cms_init(mlxsw_sp, 0, MLXSW_REG_SBXX_DIR_EGRESS, + mlxsw_sp_cpu_port_sb_cms, + MLXSW_SP_CPU_PORT_SB_MCS_LEN); } struct mlxsw_sp_sb_pm { - u8 pool; - enum mlxsw_reg_sbxx_dir dir; u32 min_buff; u32 max_buff; }; -#define MLXSW_SP_SB_PM(_pool, _dir, _min_buff, _max_buff) \ - { \ - .pool = _pool, \ - .dir = _dir, \ - .min_buff = _min_buff, \ - .max_buff = _max_buff, \ +#define MLXSW_SP_SB_PM(_min_buff, _max_buff) \ + { \ + .min_buff = _min_buff, \ + .max_buff = _max_buff, \ } -#define MLXSW_SP_SB_PM_INGRESS(_pool, _min_buff, _max_buff) \ - MLXSW_SP_SB_PM(_pool, MLXSW_REG_SBXX_DIR_INGRESS, \ - _min_buff, _max_buff) - -#define MLXSW_SP_SB_PM_EGRESS(_pool, _min_buff, _max_buff) \ - MLXSW_SP_SB_PM(_pool, MLXSW_REG_SBXX_DIR_EGRESS, \ - _min_buff, _max_buff) - -static const struct mlxsw_sp_sb_pm mlxsw_sp_sb_pms[] = { - MLXSW_SP_SB_PM_INGRESS(0, 0, 0xff), - MLXSW_SP_SB_PM_INGRESS(1, 0, 0), - MLXSW_SP_SB_PM_INGRESS(2, 0, 0), - MLXSW_SP_SB_PM_INGRESS(3, 0, 0), - MLXSW_SP_SB_PM_EGRESS(0, 0, 7), - MLXSW_SP_SB_PM_EGRESS(1, 0, 0), - MLXSW_SP_SB_PM_EGRESS(2, 0, 0), - MLXSW_SP_SB_PM_EGRESS(3, 0, 0), +static const struct mlxsw_sp_sb_pm mlxsw_sp_sb_pms_ingress[] = { + MLXSW_SP_SB_PM(0, 0xff), + MLXSW_SP_SB_PM(0, 0), + MLXSW_SP_SB_PM(0, 0), + MLXSW_SP_SB_PM(0, 0), }; -#define MLXSW_SP_SB_PMS_LEN ARRAY_SIZE(mlxsw_sp_sb_pms) +#define MLXSW_SP_SB_PMS_INGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_pms_ingress) -static int mlxsw_sp_port_sb_pms_init(struct mlxsw_sp_port *mlxsw_sp_port) +static const struct mlxsw_sp_sb_pm mlxsw_sp_sb_pms_egress[] = { + MLXSW_SP_SB_PM(0, 7), + MLXSW_SP_SB_PM(0, 0), + MLXSW_SP_SB_PM(0, 0), + MLXSW_SP_SB_PM(0, 0), +}; + +#define MLXSW_SP_SB_PMS_EGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_pms_egress) + +static int __mlxsw_sp_port_sb_pms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port, + enum mlxsw_reg_sbxx_dir dir, + const struct mlxsw_sp_sb_pm *pms, + size_t pms_len) { int i; int err; - for (i = 0; i < MLXSW_SP_SB_PMS_LEN; i++) { + for (i = 0; i < pms_len; i++) { const struct mlxsw_sp_sb_pm *pm; - pm = &mlxsw_sp_sb_pms[i]; - err = mlxsw_sp_sb_pm_write(mlxsw_sp_port->mlxsw_sp, - mlxsw_sp_port->local_port, - pm->pool, pm->dir, + pm = &pms[i]; + err = mlxsw_sp_sb_pm_write(mlxsw_sp, local_port, i, dir, pm->min_buff, pm->max_buff); if (err) return err; @@ -385,37 +386,53 @@ static int mlxsw_sp_port_sb_pms_init(struct mlxsw_sp_port *mlxsw_sp_port) return 0; } +static int mlxsw_sp_port_sb_pms_init(struct mlxsw_sp_port *mlxsw_sp_port) +{ + int err; + + err = __mlxsw_sp_port_sb_pms_init(mlxsw_sp_port->mlxsw_sp, + mlxsw_sp_port->local_port, + MLXSW_REG_SBXX_DIR_INGRESS, + mlxsw_sp_sb_pms_ingress, + MLXSW_SP_SB_PMS_INGRESS_LEN); + if (err) + return err; + return __mlxsw_sp_port_sb_pms_init(mlxsw_sp_port->mlxsw_sp, + mlxsw_sp_port->local_port, + MLXSW_REG_SBXX_DIR_EGRESS, + mlxsw_sp_sb_pms_egress, + MLXSW_SP_SB_PMS_EGRESS_LEN); +} + struct mlxsw_sp_sb_mm { - u8 prio; u32 min_buff; u32 max_buff; u8 pool; }; -#define MLXSW_SP_SB_MM(_prio, _min_buff, _max_buff, _pool) \ - { \ - .prio = _prio, \ - .min_buff = _min_buff, \ - .max_buff = _max_buff, \ - .pool = _pool, \ +#define MLXSW_SP_SB_MM(_min_buff, _max_buff, _pool) \ + { \ + .min_buff = _min_buff, \ + .max_buff = _max_buff, \ + .pool = _pool, \ } static const struct mlxsw_sp_sb_mm mlxsw_sp_sb_mms[] = { - MLXSW_SP_SB_MM(0, MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), - MLXSW_SP_SB_MM(1, MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), - MLXSW_SP_SB_MM(2, MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), - MLXSW_SP_SB_MM(3, MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), - MLXSW_SP_SB_MM(4, MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), - MLXSW_SP_SB_MM(5, MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), - MLXSW_SP_SB_MM(6, MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), - MLXSW_SP_SB_MM(7, MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), - MLXSW_SP_SB_MM(8, MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), - MLXSW_SP_SB_MM(9, MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), - MLXSW_SP_SB_MM(10, MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), - MLXSW_SP_SB_MM(11, MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), - MLXSW_SP_SB_MM(12, MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), - MLXSW_SP_SB_MM(13, MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), - MLXSW_SP_SB_MM(14, MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), + MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), + MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), + MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), + MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), + MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), + MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), + MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), + MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), + MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), + MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), + MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), + MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), + MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), + MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), + MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), }; #define MLXSW_SP_SB_MMS_LEN ARRAY_SIZE(mlxsw_sp_sb_mms) @@ -430,7 +447,7 @@ static int mlxsw_sp_sb_mms_init(struct mlxsw_sp *mlxsw_sp) const struct mlxsw_sp_sb_mm *mc; mc = &mlxsw_sp_sb_mms[i]; - mlxsw_reg_sbmm_pack(sbmm_pl, mc->prio, mc->min_buff, + mlxsw_reg_sbmm_pack(sbmm_pl, i, mc->min_buff, mc->max_buff, mc->pool); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbmm), sbmm_pl); if (err) From aa99bc70bac1bb6815ad30248d0e34b14cdec575 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Thu, 14 Apr 2016 18:19:18 +0200 Subject: [PATCH 0666/1649] mlxsw: spectrum_buffers: Rename "pool" to "pr" in initialization Be consintent with rest of the registers (pm, cm) and use "pr" here. Signed-off-by: Jiri Pirko Reviewed-by: Ido Schimmel Signed-off-by: David S. Miller --- .../mellanox/mlxsw/spectrum_buffers.c | 70 +++++++++---------- 1 file changed, 35 insertions(+), 35 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c index c326e586bc1c..15bd5aa23a2c 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c @@ -128,75 +128,75 @@ static int mlxsw_sp_port_headroom_init(struct mlxsw_sp_port *mlxsw_sp_port) return mlxsw_sp_port_pb_prio_init(mlxsw_sp_port); } -struct mlxsw_sp_sb_pool { +struct mlxsw_sp_sb_pr { enum mlxsw_reg_sbpr_mode mode; u32 size; }; -#define MLXSW_SP_SB_POOL_INGRESS_SIZE \ +#define MLXSW_SP_SB_PR_INGRESS_SIZE \ (15000000 - (2 * 20000 * MLXSW_PORT_MAX_PORTS)) -#define MLXSW_SP_SB_POOL_EGRESS_SIZE \ +#define MLXSW_SP_SB_PR_EGRESS_SIZE \ (14000000 - (8 * 1500 * MLXSW_PORT_MAX_PORTS)) -#define MLXSW_SP_SB_POOL(_mode, _size) \ +#define MLXSW_SP_SB_PR(_mode, _size) \ { \ .mode = _mode, \ .size = _size, \ } -static const struct mlxsw_sp_sb_pool mlxsw_sp_sb_pools_ingress[] = { - MLXSW_SP_SB_POOL(MLXSW_REG_SBPR_MODE_DYNAMIC, - MLXSW_SP_BYTES_TO_CELLS(MLXSW_SP_SB_POOL_INGRESS_SIZE)), - MLXSW_SP_SB_POOL(MLXSW_REG_SBPR_MODE_DYNAMIC, 0), - MLXSW_SP_SB_POOL(MLXSW_REG_SBPR_MODE_DYNAMIC, 0), - MLXSW_SP_SB_POOL(MLXSW_REG_SBPR_MODE_DYNAMIC, 0), +static const struct mlxsw_sp_sb_pr mlxsw_sp_sb_prs_ingress[] = { + MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, + MLXSW_SP_BYTES_TO_CELLS(MLXSW_SP_SB_PR_INGRESS_SIZE)), + MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0), + MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0), + MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0), }; -#define MLXSW_SP_SB_POOLS_INGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_pools_ingress) +#define MLXSW_SP_SB_PRS_INGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_prs_ingress) -static const struct mlxsw_sp_sb_pool mlxsw_sp_sb_pools_egress[] = { - MLXSW_SP_SB_POOL(MLXSW_REG_SBPR_MODE_DYNAMIC, - MLXSW_SP_BYTES_TO_CELLS(MLXSW_SP_SB_POOL_EGRESS_SIZE)), - MLXSW_SP_SB_POOL(MLXSW_REG_SBPR_MODE_DYNAMIC, 0), - MLXSW_SP_SB_POOL(MLXSW_REG_SBPR_MODE_DYNAMIC, 0), - MLXSW_SP_SB_POOL(MLXSW_REG_SBPR_MODE_DYNAMIC, - MLXSW_SP_BYTES_TO_CELLS(MLXSW_SP_SB_POOL_EGRESS_SIZE)), +static const struct mlxsw_sp_sb_pr mlxsw_sp_sb_prs_egress[] = { + MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, + MLXSW_SP_BYTES_TO_CELLS(MLXSW_SP_SB_PR_EGRESS_SIZE)), + MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0), + MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0), + MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, + MLXSW_SP_BYTES_TO_CELLS(MLXSW_SP_SB_PR_EGRESS_SIZE)), }; -#define MLXSW_SP_SB_POOLS_EGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_pools_egress) +#define MLXSW_SP_SB_PRS_EGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_prs_egress) -static int __mlxsw_sp_sb_pools_init(struct mlxsw_sp *mlxsw_sp, - enum mlxsw_reg_sbxx_dir dir, - const struct mlxsw_sp_sb_pool *pools, - size_t pools_len) +static int __mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp, + enum mlxsw_reg_sbxx_dir dir, + const struct mlxsw_sp_sb_pr *prs, + size_t prs_len) { int i; int err; - for (i = 0; i < pools_len; i++) { - const struct mlxsw_sp_sb_pool *pool; + for (i = 0; i < prs_len; i++) { + const struct mlxsw_sp_sb_pr *pr; - pool = &pools[i]; + pr = &prs[i]; err = mlxsw_sp_sb_pr_write(mlxsw_sp, i, dir, - pool->mode, pool->size); + pr->mode, pr->size); if (err) return err; } return 0; } -static int mlxsw_sp_sb_pools_init(struct mlxsw_sp *mlxsw_sp) +static int mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp) { int err; - err = __mlxsw_sp_sb_pools_init(mlxsw_sp, MLXSW_REG_SBXX_DIR_INGRESS, - mlxsw_sp_sb_pools_ingress, - MLXSW_SP_SB_POOLS_INGRESS_LEN); + err = __mlxsw_sp_sb_prs_init(mlxsw_sp, MLXSW_REG_SBXX_DIR_INGRESS, + mlxsw_sp_sb_prs_ingress, + MLXSW_SP_SB_PRS_INGRESS_LEN); if (err) return err; - return __mlxsw_sp_sb_pools_init(mlxsw_sp, MLXSW_REG_SBXX_DIR_EGRESS, - mlxsw_sp_sb_pools_egress, - MLXSW_SP_SB_POOLS_EGRESS_LEN); + return __mlxsw_sp_sb_prs_init(mlxsw_sp, MLXSW_REG_SBXX_DIR_EGRESS, + mlxsw_sp_sb_prs_egress, + MLXSW_SP_SB_PRS_EGRESS_LEN); } struct mlxsw_sp_sb_cm { @@ -460,7 +460,7 @@ int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp) { int err; - err = mlxsw_sp_sb_pools_init(mlxsw_sp); + err = mlxsw_sp_sb_prs_init(mlxsw_sp); if (err) return err; err = mlxsw_sp_cpu_port_sb_cms_init(mlxsw_sp); From 078f9c7132cbbe12c2484a817ca5f477d1641b61 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Thu, 14 Apr 2016 18:19:19 +0200 Subject: [PATCH 0667/1649] mlxsw: spectrum_buffers: Cache shared buffer configuration In order to achieve faster dumping of current setting and also in order to provide possibility to get pool mode without a need to query hardware, do cache the configuration in driver. Signed-off-by: Jiri Pirko Reviewed-by: Ido Schimmel Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/spectrum.h | 28 +++++++ .../mellanox/mlxsw/spectrum_buffers.c | 73 ++++++++++++++----- 2 files changed, 82 insertions(+), 19 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 361b0c270b56..790c292b3230 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -117,6 +117,33 @@ static inline bool mlxsw_sp_fid_is_vfid(u16 fid) return fid >= MLXSW_SP_VFID_BASE; } +struct mlxsw_sp_sb_pr { + enum mlxsw_reg_sbpr_mode mode; + u32 size; +}; + +struct mlxsw_sp_sb_cm { + u32 min_buff; + u32 max_buff; + u8 pool; +}; + +struct mlxsw_sp_sb_pm { + u32 min_buff; + u32 max_buff; +}; + +#define MLXSW_SP_SB_POOL_COUNT 4 +#define MLXSW_SP_SB_TC_COUNT 8 + +struct mlxsw_sp_sb { + struct mlxsw_sp_sb_pr prs[2][MLXSW_SP_SB_POOL_COUNT]; + struct { + struct mlxsw_sp_sb_cm cms[2][MLXSW_SP_SB_TC_COUNT]; + struct mlxsw_sp_sb_pm pms[2][MLXSW_SP_SB_POOL_COUNT]; + } ports[MLXSW_PORT_MAX_PORTS]; +}; + struct mlxsw_sp { struct { struct list_head list; @@ -147,6 +174,7 @@ struct mlxsw_sp { struct mlxsw_sp_upper master_bridge; struct mlxsw_sp_upper lags[MLXSW_SP_LAG_MAX]; u8 port_to_module[MLXSW_PORT_MAX_PORTS]; + struct mlxsw_sp_sb sb; }; static inline struct mlxsw_sp_upper * diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c index 15bd5aa23a2c..b43f7d36ec64 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c @@ -42,14 +42,44 @@ #include "port.h" #include "reg.h" +static struct mlxsw_sp_sb_pr *mlxsw_sp_sb_pr_get(struct mlxsw_sp *mlxsw_sp, + u8 pool, + enum mlxsw_reg_sbxx_dir dir) +{ + return &mlxsw_sp->sb.prs[dir][pool]; +} + +static struct mlxsw_sp_sb_cm *mlxsw_sp_sb_cm_get(struct mlxsw_sp *mlxsw_sp, + u8 local_port, u8 pg_buff, + enum mlxsw_reg_sbxx_dir dir) +{ + return &mlxsw_sp->sb.ports[local_port].cms[dir][pg_buff]; +} + +static struct mlxsw_sp_sb_pm *mlxsw_sp_sb_pm_get(struct mlxsw_sp *mlxsw_sp, + u8 local_port, u8 pool, + enum mlxsw_reg_sbxx_dir dir) +{ + return &mlxsw_sp->sb.ports[local_port].pms[dir][pool]; +} + static int mlxsw_sp_sb_pr_write(struct mlxsw_sp *mlxsw_sp, u8 pool, enum mlxsw_reg_sbxx_dir dir, enum mlxsw_reg_sbpr_mode mode, u32 size) { char sbpr_pl[MLXSW_REG_SBPR_LEN]; + struct mlxsw_sp_sb_pr *pr; + int err; mlxsw_reg_sbpr_pack(sbpr_pl, pool, dir, mode, size); - return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpr), sbpr_pl); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpr), sbpr_pl); + if (err) + return err; + + pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool, dir); + pr->mode = mode; + pr->size = size; + return 0; } static int mlxsw_sp_sb_cm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port, @@ -57,10 +87,22 @@ static int mlxsw_sp_sb_cm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port, u32 min_buff, u32 max_buff, u8 pool) { char sbcm_pl[MLXSW_REG_SBCM_LEN]; + int err; mlxsw_reg_sbcm_pack(sbcm_pl, local_port, pg_buff, dir, min_buff, max_buff, pool); - return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbcm), sbcm_pl); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbcm), sbcm_pl); + if (err) + return err; + if (pg_buff < MLXSW_SP_SB_TC_COUNT) { + struct mlxsw_sp_sb_cm *cm; + + cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, pg_buff, dir); + cm->min_buff = min_buff; + cm->max_buff = max_buff; + cm->pool = pool; + } + return 0; } static int mlxsw_sp_sb_pm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port, @@ -68,9 +110,18 @@ static int mlxsw_sp_sb_pm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port, u32 min_buff, u32 max_buff) { char sbpm_pl[MLXSW_REG_SBPM_LEN]; + struct mlxsw_sp_sb_pm *pm; + int err; mlxsw_reg_sbpm_pack(sbpm_pl, local_port, pool, dir, min_buff, max_buff); - return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl); + if (err) + return err; + + pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, pool, dir); + pm->min_buff = min_buff; + pm->max_buff = max_buff; + return 0; } static const u16 mlxsw_sp_pbs[] = { @@ -128,11 +179,6 @@ static int mlxsw_sp_port_headroom_init(struct mlxsw_sp_port *mlxsw_sp_port) return mlxsw_sp_port_pb_prio_init(mlxsw_sp_port); } -struct mlxsw_sp_sb_pr { - enum mlxsw_reg_sbpr_mode mode; - u32 size; -}; - #define MLXSW_SP_SB_PR_INGRESS_SIZE \ (15000000 - (2 * 20000 * MLXSW_PORT_MAX_PORTS)) #define MLXSW_SP_SB_PR_EGRESS_SIZE \ @@ -199,12 +245,6 @@ static int mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp) MLXSW_SP_SB_PRS_EGRESS_LEN); } -struct mlxsw_sp_sb_cm { - u32 min_buff; - u32 max_buff; - u8 pool; -}; - #define MLXSW_SP_SB_CM(_min_buff, _max_buff, _pool) \ { \ .min_buff = _min_buff, \ @@ -337,11 +377,6 @@ static int mlxsw_sp_cpu_port_sb_cms_init(struct mlxsw_sp *mlxsw_sp) MLXSW_SP_CPU_PORT_SB_MCS_LEN); } -struct mlxsw_sp_sb_pm { - u32 min_buff; - u32 max_buff; -}; - #define MLXSW_SP_SB_PM(_min_buff, _max_buff) \ { \ .min_buff = _min_buff, \ From 5408f7cba341b26edf2fa5bcaaa37e52301830e2 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Thu, 14 Apr 2016 18:19:20 +0200 Subject: [PATCH 0668/1649] mlxsw: spectrum_buffers: Remove eg pool 3 default init and CPU port TC binding to it Since there is no congestion control for CPU port traffic, we can change the CPU port TC binding to pool 0 with min_buff and max_buff zeroed. Remove initialization for pool egress pool 3 since it is no longer used by dafault. Signed-off-by: Jiri Pirko Reviewed-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c index b43f7d36ec64..dc57d779ef9d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c @@ -205,8 +205,7 @@ static const struct mlxsw_sp_sb_pr mlxsw_sp_sb_prs_egress[] = { MLXSW_SP_BYTES_TO_CELLS(MLXSW_SP_SB_PR_EGRESS_SIZE)), MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0), MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0), - MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, - MLXSW_SP_BYTES_TO_CELLS(MLXSW_SP_SB_PR_EGRESS_SIZE)), + MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0), }; #define MLXSW_SP_SB_PRS_EGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_prs_egress) @@ -289,7 +288,7 @@ static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_egress[] = { #define MLXSW_SP_SB_CMS_EGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_cms_egress) -#define MLXSW_SP_CPU_PORT_SB_CM MLXSW_SP_SB_CM(104, 2, 3) +#define MLXSW_SP_CPU_PORT_SB_CM MLXSW_SP_SB_CM(0, 0, 0) static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = { MLXSW_SP_CPU_PORT_SB_CM, From bc872506f58c21d1ee44b1303a5f5d00354e72c5 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Thu, 14 Apr 2016 18:19:21 +0200 Subject: [PATCH 0669/1649] mlxsw: spectrum_buffers: Change initialization of PG 9 As explained in commit ff6551ec0c27 ("mlxsw: spectrum: Correctly configure headroom size") control packets are directed to priority group buffer 9 (PG9) in the ports' headroom buffers. Since we don't want to drop control packets in case they can't be admitted to the switch's shared buffer we bind PG9 to a different ingress pool from the one used by all other PGs. Unlike other PGs, we currently don't expose the binding between PG9 to a pool and leave it fixed. Signed-off-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c index dc57d779ef9d..7ee2315c11f5 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c @@ -181,6 +181,7 @@ static int mlxsw_sp_port_headroom_init(struct mlxsw_sp_port *mlxsw_sp_port) #define MLXSW_SP_SB_PR_INGRESS_SIZE \ (15000000 - (2 * 20000 * MLXSW_PORT_MAX_PORTS)) +#define MLXSW_SP_SB_PR_INGRESS_MNG_SIZE (200 * 1000) #define MLXSW_SP_SB_PR_EGRESS_SIZE \ (14000000 - (8 * 1500 * MLXSW_PORT_MAX_PORTS)) @@ -195,7 +196,8 @@ static const struct mlxsw_sp_sb_pr mlxsw_sp_sb_prs_ingress[] = { MLXSW_SP_BYTES_TO_CELLS(MLXSW_SP_SB_PR_INGRESS_SIZE)), MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0), MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0), - MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0), + MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, + MLXSW_SP_BYTES_TO_CELLS(MLXSW_SP_SB_PR_INGRESS_MNG_SIZE)), }; #define MLXSW_SP_SB_PRS_INGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_prs_ingress) @@ -261,7 +263,7 @@ static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_ingress[] = { MLXSW_SP_SB_CM(0, 0, 0), MLXSW_SP_SB_CM(0, 0, 0), MLXSW_SP_SB_CM(0, 0, 0), /* dummy, this PG does not exist */ - MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), + MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(20000), 1, 3), }; #define MLXSW_SP_SB_CMS_INGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_cms_ingress) @@ -386,7 +388,7 @@ static const struct mlxsw_sp_sb_pm mlxsw_sp_sb_pms_ingress[] = { MLXSW_SP_SB_PM(0, 0xff), MLXSW_SP_SB_PM(0, 0), MLXSW_SP_SB_PM(0, 0), - MLXSW_SP_SB_PM(0, 0), + MLXSW_SP_SB_PM(0, 0xff), }; #define MLXSW_SP_SB_PMS_INGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_pms_ingress) From c30a53c7de5e52249cd5381078f84082449ecc86 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Thu, 14 Apr 2016 18:19:22 +0200 Subject: [PATCH 0670/1649] mlxsw: spectrum_buffers: Get max_buff defaults into limits exposed to user Although the device supports max_buff magic values 0 and 0xff, these are not exposed to the user via devlink. Therefore, adjust the default values to be within configurable range. Signed-off-by: Jiri Pirko Reviewed-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/reg.h | 4 +++ .../mellanox/mlxsw/spectrum_buffers.c | 28 +++++++++---------- 2 files changed, 18 insertions(+), 14 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 57e4a6337ae3..fce5a962bf74 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -3566,6 +3566,10 @@ MLXSW_ITEM32(reg, sbcm, dir, 0x00, 0, 2); */ MLXSW_ITEM32(reg, sbcm, min_buff, 0x18, 0, 24); +/* shared max_buff limits for dynamic threshold for SBCM, SBPM */ +#define MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN 1 +#define MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX 14 + /* reg_sbcm_max_buff * When the pool associated to the port-pg/tclass is configured to * static, Maximum buffer size for the limiter configured in cells. diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c index 7ee2315c11f5..fd6b08022e28 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c @@ -255,13 +255,13 @@ static int mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp) static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_ingress[] = { MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(10000), 8, 0), - MLXSW_SP_SB_CM(0, 0, 0), - MLXSW_SP_SB_CM(0, 0, 0), - MLXSW_SP_SB_CM(0, 0, 0), - MLXSW_SP_SB_CM(0, 0, 0), - MLXSW_SP_SB_CM(0, 0, 0), - MLXSW_SP_SB_CM(0, 0, 0), - MLXSW_SP_SB_CM(0, 0, 0), + MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0), + MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0), + MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0), + MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0), + MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0), + MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0), + MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0), MLXSW_SP_SB_CM(0, 0, 0), /* dummy, this PG does not exist */ MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(20000), 1, 3), }; @@ -385,19 +385,19 @@ static int mlxsw_sp_cpu_port_sb_cms_init(struct mlxsw_sp *mlxsw_sp) } static const struct mlxsw_sp_sb_pm mlxsw_sp_sb_pms_ingress[] = { - MLXSW_SP_SB_PM(0, 0xff), - MLXSW_SP_SB_PM(0, 0), - MLXSW_SP_SB_PM(0, 0), - MLXSW_SP_SB_PM(0, 0xff), + MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX), + MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN), + MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN), + MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX), }; #define MLXSW_SP_SB_PMS_INGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_pms_ingress) static const struct mlxsw_sp_sb_pm mlxsw_sp_sb_pms_egress[] = { MLXSW_SP_SB_PM(0, 7), - MLXSW_SP_SB_PM(0, 0), - MLXSW_SP_SB_PM(0, 0), - MLXSW_SP_SB_PM(0, 0), + MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN), + MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN), + MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN), }; #define MLXSW_SP_SB_PMS_EGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_pms_egress) From 325f2f197d730bb7f6f1af2ce93fedfd728e3eed Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Thu, 14 Apr 2016 18:19:23 +0200 Subject: [PATCH 0671/1649] mlxsw: core: Add mlxsw_core_port_driver_priv helper Needed in following patch. Signed-off-by: Jiri Pirko Reviewed-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/core.h | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h index 184e9853398b..d0c471f26748 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core.h @@ -137,6 +137,15 @@ struct mlxsw_core_port { struct devlink_port devlink_port; }; +static inline void * +mlxsw_core_port_driver_priv(struct mlxsw_core_port *mlxsw_core_port) +{ + /* mlxsw_core_port is ensured to always be the first field in driver + * port structure. + */ + return mlxsw_core_port; +} + int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, struct mlxsw_core_port *mlxsw_core_port, u8 local_port, struct net_device *dev, bool split, u32 split_group); From 0f433fa0ecc59c1d0792937a26436bec8dd42b6d Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Thu, 14 Apr 2016 18:19:24 +0200 Subject: [PATCH 0672/1649] mlxsw: spectrum_buffers: Implement shared buffer configuration Implement previously introduced mlxsw core shared buffer API. For Spectrum, that is done utilizing registers SBPR, SBCM and SBPM. Signed-off-by: Jiri Pirko Reviewed-by: Ido Schimmel Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/spectrum.c | 8 + .../net/ethernet/mellanox/mlxsw/spectrum.h | 22 +++ .../mellanox/mlxsw/spectrum_buffers.c | 187 +++++++++++++++++- 3 files changed, 216 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 19b3c144abc6..ecadb15c4907 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -2434,6 +2434,7 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, err_switchdev_init: err_lag_init: + mlxsw_sp_buffers_fini(mlxsw_sp); err_buffers_init: err_flood_init: mlxsw_sp_traps_fini(mlxsw_sp); @@ -2448,6 +2449,7 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) { struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); + mlxsw_sp_buffers_fini(mlxsw_sp); mlxsw_sp_switchdev_fini(mlxsw_sp); mlxsw_sp_traps_fini(mlxsw_sp); mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE); @@ -2498,6 +2500,12 @@ static struct mlxsw_driver mlxsw_sp_driver = { .fini = mlxsw_sp_fini, .port_split = mlxsw_sp_port_split, .port_unsplit = mlxsw_sp_port_unsplit, + .sb_pool_get = mlxsw_sp_sb_pool_get, + .sb_pool_set = mlxsw_sp_sb_pool_set, + .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, + .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, + .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, + .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, .txhdr_construct = mlxsw_sp_txhdr_construct, .txhdr_len = MLXSW_TXHDR_LEN, .profile = &mlxsw_sp_config_profile, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 790c292b3230..6458efa5607e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -65,6 +65,7 @@ #define MLXSW_SP_BYTES_PER_CELL 96 #define MLXSW_SP_BYTES_TO_CELLS(b) DIV_ROUND_UP(b, MLXSW_SP_BYTES_PER_CELL) +#define MLXSW_SP_CELLS_TO_BYTES(c) (c * MLXSW_SP_BYTES_PER_CELL) /* Maximum delay buffer needed in case of PAUSE frames, in cells. * Assumes 100m cable and maximum MTU. @@ -305,7 +306,28 @@ enum mlxsw_sp_flood_table { }; int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp); +void mlxsw_sp_buffers_fini(struct mlxsw_sp *mlxsw_sp); int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port); +int mlxsw_sp_sb_pool_get(struct mlxsw_core *mlxsw_core, + unsigned int sb_index, u16 pool_index, + struct devlink_sb_pool_info *pool_info); +int mlxsw_sp_sb_pool_set(struct mlxsw_core *mlxsw_core, + unsigned int sb_index, u16 pool_index, u32 size, + enum devlink_sb_threshold_type threshold_type); +int mlxsw_sp_sb_port_pool_get(struct mlxsw_core_port *mlxsw_core_port, + unsigned int sb_index, u16 pool_index, + u32 *p_threshold); +int mlxsw_sp_sb_port_pool_set(struct mlxsw_core_port *mlxsw_core_port, + unsigned int sb_index, u16 pool_index, + u32 threshold); +int mlxsw_sp_sb_tc_pool_bind_get(struct mlxsw_core_port *mlxsw_core_port, + unsigned int sb_index, u16 tc_index, + enum devlink_sb_pool_type pool_type, + u16 *p_pool_index, u32 *p_threshold); +int mlxsw_sp_sb_tc_pool_bind_set(struct mlxsw_core_port *mlxsw_core_port, + unsigned int sb_index, u16 tc_index, + enum devlink_sb_pool_type pool_type, + u16 pool_index, u32 threshold); int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp); void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c index fd6b08022e28..6042c1741f77 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c @@ -492,6 +492,8 @@ static int mlxsw_sp_sb_mms_init(struct mlxsw_sp *mlxsw_sp) return 0; } +#define MLXSW_SP_SB_SIZE (16 * 1024 * 1024) + int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp) { int err; @@ -503,8 +505,19 @@ int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp) if (err) return err; err = mlxsw_sp_sb_mms_init(mlxsw_sp); + if (err) + return err; + return devlink_sb_register(priv_to_devlink(mlxsw_sp->core), 0, + MLXSW_SP_SB_SIZE, + MLXSW_SP_SB_POOL_COUNT, + MLXSW_SP_SB_POOL_COUNT, + MLXSW_SP_SB_TC_COUNT, + MLXSW_SP_SB_TC_COUNT); +} - return err; +void mlxsw_sp_buffers_fini(struct mlxsw_sp *mlxsw_sp) +{ + devlink_sb_unregister(priv_to_devlink(mlxsw_sp->core), 0); } int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port) @@ -521,3 +534,175 @@ int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port) return err; } + +static u8 pool_get(u16 pool_index) +{ + return pool_index % MLXSW_SP_SB_POOL_COUNT; +} + +static u16 pool_index_get(u8 pool, enum mlxsw_reg_sbxx_dir dir) +{ + u16 pool_index; + + pool_index = pool; + if (dir == MLXSW_REG_SBXX_DIR_EGRESS) + pool_index += MLXSW_SP_SB_POOL_COUNT; + return pool_index; +} + +static enum mlxsw_reg_sbxx_dir dir_get(u16 pool_index) +{ + return pool_index < MLXSW_SP_SB_POOL_COUNT ? + MLXSW_REG_SBXX_DIR_INGRESS : MLXSW_REG_SBXX_DIR_EGRESS; +} + +int mlxsw_sp_sb_pool_get(struct mlxsw_core *mlxsw_core, + unsigned int sb_index, u16 pool_index, + struct devlink_sb_pool_info *pool_info) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); + u8 pool = pool_get(pool_index); + enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index); + struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool, dir); + + pool_info->pool_type = dir; + pool_info->size = MLXSW_SP_CELLS_TO_BYTES(pr->size); + pool_info->threshold_type = pr->mode; + return 0; +} + +int mlxsw_sp_sb_pool_set(struct mlxsw_core *mlxsw_core, + unsigned int sb_index, u16 pool_index, u32 size, + enum devlink_sb_threshold_type threshold_type) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); + u8 pool = pool_get(pool_index); + enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index); + enum mlxsw_reg_sbpr_mode mode = threshold_type; + u32 pool_size = MLXSW_SP_BYTES_TO_CELLS(size); + + return mlxsw_sp_sb_pr_write(mlxsw_sp, pool, dir, mode, pool_size); +} + +#define MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET (-2) /* 3->1, 16->14 */ + +static u32 mlxsw_sp_sb_threshold_out(struct mlxsw_sp *mlxsw_sp, u8 pool, + enum mlxsw_reg_sbxx_dir dir, u32 max_buff) +{ + struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool, dir); + + if (pr->mode == MLXSW_REG_SBPR_MODE_DYNAMIC) + return max_buff - MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET; + return MLXSW_SP_CELLS_TO_BYTES(max_buff); +} + +static int mlxsw_sp_sb_threshold_in(struct mlxsw_sp *mlxsw_sp, u8 pool, + enum mlxsw_reg_sbxx_dir dir, u32 threshold, + u32 *p_max_buff) +{ + struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool, dir); + + if (pr->mode == MLXSW_REG_SBPR_MODE_DYNAMIC) { + int val; + + val = threshold + MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET; + if (val < MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN || + val > MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX) + return -EINVAL; + *p_max_buff = val; + } else { + *p_max_buff = MLXSW_SP_BYTES_TO_CELLS(threshold); + } + return 0; +} + +int mlxsw_sp_sb_port_pool_get(struct mlxsw_core_port *mlxsw_core_port, + unsigned int sb_index, u16 pool_index, + u32 *p_threshold) +{ + struct mlxsw_sp_port *mlxsw_sp_port = + mlxsw_core_port_driver_priv(mlxsw_core_port); + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + u8 local_port = mlxsw_sp_port->local_port; + u8 pool = pool_get(pool_index); + enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index); + struct mlxsw_sp_sb_pm *pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, + pool, dir); + + *p_threshold = mlxsw_sp_sb_threshold_out(mlxsw_sp, pool, dir, + pm->max_buff); + return 0; +} + +int mlxsw_sp_sb_port_pool_set(struct mlxsw_core_port *mlxsw_core_port, + unsigned int sb_index, u16 pool_index, + u32 threshold) +{ + struct mlxsw_sp_port *mlxsw_sp_port = + mlxsw_core_port_driver_priv(mlxsw_core_port); + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + u8 local_port = mlxsw_sp_port->local_port; + u8 pool = pool_get(pool_index); + enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index); + u32 max_buff; + int err; + + err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool, dir, + threshold, &max_buff); + if (err) + return err; + + return mlxsw_sp_sb_pm_write(mlxsw_sp, local_port, pool, dir, + 0, max_buff); +} + +int mlxsw_sp_sb_tc_pool_bind_get(struct mlxsw_core_port *mlxsw_core_port, + unsigned int sb_index, u16 tc_index, + enum devlink_sb_pool_type pool_type, + u16 *p_pool_index, u32 *p_threshold) +{ + struct mlxsw_sp_port *mlxsw_sp_port = + mlxsw_core_port_driver_priv(mlxsw_core_port); + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + u8 local_port = mlxsw_sp_port->local_port; + u8 pg_buff = tc_index; + enum mlxsw_reg_sbxx_dir dir = pool_type; + struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, + pg_buff, dir); + + *p_threshold = mlxsw_sp_sb_threshold_out(mlxsw_sp, cm->pool, dir, + cm->max_buff); + *p_pool_index = pool_index_get(cm->pool, pool_type); + return 0; +} + +int mlxsw_sp_sb_tc_pool_bind_set(struct mlxsw_core_port *mlxsw_core_port, + unsigned int sb_index, u16 tc_index, + enum devlink_sb_pool_type pool_type, + u16 pool_index, u32 threshold) +{ + struct mlxsw_sp_port *mlxsw_sp_port = + mlxsw_core_port_driver_priv(mlxsw_core_port); + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + u8 local_port = mlxsw_sp_port->local_port; + u8 pg_buff = tc_index; + enum mlxsw_reg_sbxx_dir dir = pool_type; + u8 pool = pool_index; + u32 max_buff; + int err; + + err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool, dir, + threshold, &max_buff); + if (err) + return err; + + if (pool_type == DEVLINK_SB_POOL_TYPE_EGRESS) { + if (pool < MLXSW_SP_SB_POOL_COUNT) + return -EINVAL; + pool -= MLXSW_SP_SB_POOL_COUNT; + } else if (pool >= MLXSW_SP_SB_POOL_COUNT) { + return -EINVAL; + } + return mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, pg_buff, dir, + 0, max_buff, pool); +} From 1ceecc88d29bbbb0f8d0e49e3bf6d020dc582934 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Thu, 14 Apr 2016 18:19:25 +0200 Subject: [PATCH 0673/1649] mlxsw: core: Add devlink shared buffer occupancy callbacks Add middle layer in mlxsw core code to forward shared buffer occupancy calls into specific ASIC drivers. Signed-off-by: Jiri Pirko Reviewed-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/core.c | 74 +++++++++++++++++++--- drivers/net/ethernet/mellanox/mlxsw/core.h | 11 ++++ 2 files changed, 77 insertions(+), 8 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index 1278260118a4..63a977767c01 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -911,15 +911,73 @@ mlxsw_devlink_sb_tc_pool_bind_set(struct devlink_port *devlink_port, pool_index, threshold); } +static int mlxsw_devlink_sb_occ_snapshot(struct devlink *devlink, + unsigned int sb_index) +{ + struct mlxsw_core *mlxsw_core = devlink_priv(devlink); + struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; + + if (!mlxsw_driver->sb_occ_snapshot) + return -EOPNOTSUPP; + return mlxsw_driver->sb_occ_snapshot(mlxsw_core, sb_index); +} + +static int mlxsw_devlink_sb_occ_max_clear(struct devlink *devlink, + unsigned int sb_index) +{ + struct mlxsw_core *mlxsw_core = devlink_priv(devlink); + struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; + + if (!mlxsw_driver->sb_occ_max_clear) + return -EOPNOTSUPP; + return mlxsw_driver->sb_occ_max_clear(mlxsw_core, sb_index); +} + +static int +mlxsw_devlink_sb_occ_port_pool_get(struct devlink_port *devlink_port, + unsigned int sb_index, u16 pool_index, + u32 *p_cur, u32 *p_max) +{ + struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink); + struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; + struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port); + + if (!mlxsw_driver->sb_occ_port_pool_get) + return -EOPNOTSUPP; + return mlxsw_driver->sb_occ_port_pool_get(mlxsw_core_port, sb_index, + pool_index, p_cur, p_max); +} + +static int +mlxsw_devlink_sb_occ_tc_port_bind_get(struct devlink_port *devlink_port, + unsigned int sb_index, u16 tc_index, + enum devlink_sb_pool_type pool_type, + u32 *p_cur, u32 *p_max) +{ + struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink); + struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; + struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port); + + if (!mlxsw_driver->sb_occ_tc_port_bind_get) + return -EOPNOTSUPP; + return mlxsw_driver->sb_occ_tc_port_bind_get(mlxsw_core_port, + sb_index, tc_index, + pool_type, p_cur, p_max); +} + static const struct devlink_ops mlxsw_devlink_ops = { - .port_split = mlxsw_devlink_port_split, - .port_unsplit = mlxsw_devlink_port_unsplit, - .sb_pool_get = mlxsw_devlink_sb_pool_get, - .sb_pool_set = mlxsw_devlink_sb_pool_set, - .sb_port_pool_get = mlxsw_devlink_sb_port_pool_get, - .sb_port_pool_set = mlxsw_devlink_sb_port_pool_set, - .sb_tc_pool_bind_get = mlxsw_devlink_sb_tc_pool_bind_get, - .sb_tc_pool_bind_set = mlxsw_devlink_sb_tc_pool_bind_set, + .port_split = mlxsw_devlink_port_split, + .port_unsplit = mlxsw_devlink_port_unsplit, + .sb_pool_get = mlxsw_devlink_sb_pool_get, + .sb_pool_set = mlxsw_devlink_sb_pool_set, + .sb_port_pool_get = mlxsw_devlink_sb_port_pool_get, + .sb_port_pool_set = mlxsw_devlink_sb_port_pool_set, + .sb_tc_pool_bind_get = mlxsw_devlink_sb_tc_pool_bind_get, + .sb_tc_pool_bind_set = mlxsw_devlink_sb_tc_pool_bind_set, + .sb_occ_snapshot = mlxsw_devlink_sb_occ_snapshot, + .sb_occ_max_clear = mlxsw_devlink_sb_occ_max_clear, + .sb_occ_port_pool_get = mlxsw_devlink_sb_occ_port_pool_get, + .sb_occ_tc_port_bind_get = mlxsw_devlink_sb_occ_tc_port_bind_get, }; int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h index d0c471f26748..377daccf0063 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core.h @@ -229,6 +229,17 @@ struct mlxsw_driver { unsigned int sb_index, u16 tc_index, enum devlink_sb_pool_type pool_type, u16 pool_index, u32 threshold); + int (*sb_occ_snapshot)(struct mlxsw_core *mlxsw_core, + unsigned int sb_index); + int (*sb_occ_max_clear)(struct mlxsw_core *mlxsw_core, + unsigned int sb_index); + int (*sb_occ_port_pool_get)(struct mlxsw_core_port *mlxsw_core_port, + unsigned int sb_index, u16 pool_index, + u32 *p_cur, u32 *p_max); + int (*sb_occ_tc_port_bind_get)(struct mlxsw_core_port *mlxsw_core_port, + unsigned int sb_index, u16 tc_index, + enum devlink_sb_pool_type pool_type, + u32 *p_cur, u32 *p_max); void (*txhdr_construct)(struct sk_buff *skb, const struct mlxsw_tx_info *tx_info); u8 txhdr_len; From 26176def3c1e7933601d04de3d4980199c1c87d1 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Thu, 14 Apr 2016 18:19:26 +0200 Subject: [PATCH 0674/1649] mlxsw: reg: Add Shared Buffer Status register definition This register allows to query HW for current and maximal buffer usage. Signed-off-by: Jiri Pirko Reviewed-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/reg.h | 100 ++++++++++++++++++++++ 1 file changed, 100 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index fce5a962bf74..656466a00386 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -3722,6 +3722,104 @@ static inline void mlxsw_reg_sbmm_pack(char *payload, u8 prio, u32 min_buff, mlxsw_reg_sbmm_pool_set(payload, pool); } +/* SBSR - Shared Buffer Status Register + * ------------------------------------ + * The SBSR register retrieves the shared buffer occupancy according to + * Port-Pool. Note that this register enables reading a large amount of data. + * It is the user's responsibility to limit the amount of data to ensure the + * response can match the maximum transfer unit. In case the response exceeds + * the maximum transport unit, it will be truncated with no special notice. + */ +#define MLXSW_REG_SBSR_ID 0xB005 +#define MLXSW_REG_SBSR_BASE_LEN 0x5C /* base length, without records */ +#define MLXSW_REG_SBSR_REC_LEN 0x8 /* record length */ +#define MLXSW_REG_SBSR_REC_MAX_COUNT 120 +#define MLXSW_REG_SBSR_LEN (MLXSW_REG_SBSR_BASE_LEN + \ + MLXSW_REG_SBSR_REC_LEN * \ + MLXSW_REG_SBSR_REC_MAX_COUNT) + +static const struct mlxsw_reg_info mlxsw_reg_sbsr = { + .id = MLXSW_REG_SBSR_ID, + .len = MLXSW_REG_SBSR_LEN, +}; + +/* reg_sbsr_clr + * Clear Max Buffer Occupancy. When this bit is set, the max_buff_occupancy + * field is cleared (and a new max value is tracked from the time the clear + * was performed). + * Access: OP + */ +MLXSW_ITEM32(reg, sbsr, clr, 0x00, 31, 1); + +/* reg_sbsr_ingress_port_mask + * Bit vector for all ingress network ports. + * Indicates which of the ports (for which the relevant bit is set) + * are affected by the set operation. Configuration of any other port + * does not change. + * Access: Index + */ +MLXSW_ITEM_BIT_ARRAY(reg, sbsr, ingress_port_mask, 0x10, 0x20, 1); + +/* reg_sbsr_pg_buff_mask + * Bit vector for all switch priority groups. + * Indicates which of the priorities (for which the relevant bit is set) + * are affected by the set operation. Configuration of any other priority + * does not change. + * Range is 0..cap_max_pg_buffers - 1 + * Access: Index + */ +MLXSW_ITEM_BIT_ARRAY(reg, sbsr, pg_buff_mask, 0x30, 0x4, 1); + +/* reg_sbsr_egress_port_mask + * Bit vector for all egress network ports. + * Indicates which of the ports (for which the relevant bit is set) + * are affected by the set operation. Configuration of any other port + * does not change. + * Access: Index + */ +MLXSW_ITEM_BIT_ARRAY(reg, sbsr, egress_port_mask, 0x34, 0x20, 1); + +/* reg_sbsr_tclass_mask + * Bit vector for all traffic classes. + * Indicates which of the traffic classes (for which the relevant bit is + * set) are affected by the set operation. Configuration of any other + * traffic class does not change. + * Range is 0..cap_max_tclass - 1 + * Access: Index + */ +MLXSW_ITEM_BIT_ARRAY(reg, sbsr, tclass_mask, 0x54, 0x8, 1); + +static inline void mlxsw_reg_sbsr_pack(char *payload, bool clr) +{ + MLXSW_REG_ZERO(sbsr, payload); + mlxsw_reg_sbsr_clr_set(payload, clr); +} + +/* reg_sbsr_rec_buff_occupancy + * Current buffer occupancy in cells. + * Access: RO + */ +MLXSW_ITEM32_INDEXED(reg, sbsr, rec_buff_occupancy, MLXSW_REG_SBSR_BASE_LEN, + 0, 24, MLXSW_REG_SBSR_REC_LEN, 0x00, false); + +/* reg_sbsr_rec_max_buff_occupancy + * Maximum value of buffer occupancy in cells monitored. Cleared by + * writing to the clr field. + * Access: RO + */ +MLXSW_ITEM32_INDEXED(reg, sbsr, rec_max_buff_occupancy, MLXSW_REG_SBSR_BASE_LEN, + 0, 24, MLXSW_REG_SBSR_REC_LEN, 0x04, false); + +static inline void mlxsw_reg_sbsr_rec_unpack(char *payload, int rec_index, + u32 *p_buff_occupancy, + u32 *p_max_buff_occupancy) +{ + *p_buff_occupancy = + mlxsw_reg_sbsr_rec_buff_occupancy_get(payload, rec_index); + *p_max_buff_occupancy = + mlxsw_reg_sbsr_rec_max_buff_occupancy_get(payload, rec_index); +} + static inline const char *mlxsw_reg_id_str(u16 reg_id) { switch (reg_id) { @@ -3817,6 +3915,8 @@ static inline const char *mlxsw_reg_id_str(u16 reg_id) return "SBPM"; case MLXSW_REG_SBMM_ID: return "SBMM"; + case MLXSW_REG_SBSR_ID: + return "SBSR"; default: return "*UNKNOWN*"; } From 42a7f1d7747904d89e9831fb85a678add00facf3 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Thu, 14 Apr 2016 18:19:27 +0200 Subject: [PATCH 0675/1649] mlxsw: reg: Extend SBPM register for occupancy control Since it is not possible to get and clear Port-Pool occupancy data using SBSR register, there's a need to implement that using SBPM. Extend pack helper and add unpack helper to get occupancy values. Signed-off-by: Jiri Pirko Reviewed-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/reg.h | 31 ++++++++++++++++++- .../mellanox/mlxsw/spectrum_buffers.c | 3 +- 2 files changed, 32 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 656466a00386..1977e7a5c530 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -3636,6 +3636,27 @@ MLXSW_ITEM32(reg, sbpm, pool, 0x00, 8, 4); */ MLXSW_ITEM32(reg, sbpm, dir, 0x00, 0, 2); +/* reg_sbpm_buff_occupancy + * Current buffer occupancy in cells. + * Access: RO + */ +MLXSW_ITEM32(reg, sbpm, buff_occupancy, 0x10, 0, 24); + +/* reg_sbpm_clr + * Clear Max Buffer Occupancy + * When this bit is set, max_buff_occupancy field is cleared (and a + * new max value is tracked from the time the clear was performed). + * Access: OP + */ +MLXSW_ITEM32(reg, sbpm, clr, 0x14, 31, 1); + +/* reg_sbpm_max_buff_occupancy + * Maximum value of buffer occupancy in cells monitored. Cleared by + * writing to the clr field. + * Access: RO + */ +MLXSW_ITEM32(reg, sbpm, max_buff_occupancy, 0x14, 0, 24); + /* reg_sbpm_min_buff * Minimum buffer size for the limiter, in cells. * Access: RW @@ -3656,17 +3677,25 @@ MLXSW_ITEM32(reg, sbpm, min_buff, 0x18, 0, 24); MLXSW_ITEM32(reg, sbpm, max_buff, 0x1C, 0, 24); static inline void mlxsw_reg_sbpm_pack(char *payload, u8 local_port, u8 pool, - enum mlxsw_reg_sbxx_dir dir, + enum mlxsw_reg_sbxx_dir dir, bool clr, u32 min_buff, u32 max_buff) { MLXSW_REG_ZERO(sbpm, payload); mlxsw_reg_sbpm_local_port_set(payload, local_port); mlxsw_reg_sbpm_pool_set(payload, pool); mlxsw_reg_sbpm_dir_set(payload, dir); + mlxsw_reg_sbpm_clr_set(payload, clr); mlxsw_reg_sbpm_min_buff_set(payload, min_buff); mlxsw_reg_sbpm_max_buff_set(payload, max_buff); } +static inline void mlxsw_reg_sbpm_unpack(char *payload, u32 *p_buff_occupancy, + u32 *p_max_buff_occupancy) +{ + *p_buff_occupancy = mlxsw_reg_sbpm_buff_occupancy_get(payload); + *p_max_buff_occupancy = mlxsw_reg_sbpm_max_buff_occupancy_get(payload); +} + /* SBMM - Shared Buffer Multicast Management Register * -------------------------------------------------- * The SBMM register configures and retrieves the shared buffer allocation diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c index 6042c1741f77..639ba5ae8bbd 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c @@ -113,7 +113,8 @@ static int mlxsw_sp_sb_pm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port, struct mlxsw_sp_sb_pm *pm; int err; - mlxsw_reg_sbpm_pack(sbpm_pl, local_port, pool, dir, min_buff, max_buff); + mlxsw_reg_sbpm_pack(sbpm_pl, local_port, pool, dir, false, + min_buff, max_buff); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl); if (err) return err; From dd9bdb04d2d7c428b0203b5e4e435abc229fa656 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Thu, 14 Apr 2016 18:19:28 +0200 Subject: [PATCH 0676/1649] mlxsw: core: Add mlxsw specific workqueue and use it for FDB notif. processing Follow-up patch is going to need to use delayed work as well and frequently. The FDB notification processing is already using that and also quite frequently. It makes sense to create separate workqueue just for mlxsw driver in this case and do not pollute system_wq. Signed-off-by: Jiri Pirko Reviewed-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/core.c | 25 +++++++++++++++++-- drivers/net/ethernet/mellanox/mlxsw/core.h | 3 +++ .../mellanox/mlxsw/spectrum_switchdev.c | 4 +-- 3 files changed, 28 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index 63a977767c01..a14e422fae4d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -55,6 +55,7 @@ #include #include #include +#include #include #include @@ -73,6 +74,8 @@ static const char mlxsw_core_driver_name[] = "mlxsw_core"; static struct dentry *mlxsw_core_dbg_root; +static struct workqueue_struct *mlxsw_wq; + struct mlxsw_core_pcpu_stats { u64 trap_rx_packets[MLXSW_TRAP_ID_MAX]; u64 trap_rx_bytes[MLXSW_TRAP_ID_MAX]; @@ -1575,17 +1578,35 @@ int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod, } EXPORT_SYMBOL(mlxsw_cmd_exec); +int mlxsw_core_schedule_dw(struct delayed_work *dwork, unsigned long delay) +{ + return queue_delayed_work(mlxsw_wq, dwork, delay); +} +EXPORT_SYMBOL(mlxsw_core_schedule_dw); + static int __init mlxsw_core_module_init(void) { - mlxsw_core_dbg_root = debugfs_create_dir(mlxsw_core_driver_name, NULL); - if (!mlxsw_core_dbg_root) + int err; + + mlxsw_wq = create_workqueue(mlxsw_core_driver_name); + if (!mlxsw_wq) return -ENOMEM; + mlxsw_core_dbg_root = debugfs_create_dir(mlxsw_core_driver_name, NULL); + if (!mlxsw_core_dbg_root) { + err = -ENOMEM; + goto err_debugfs_create_dir; + } return 0; + +err_debugfs_create_dir: + destroy_workqueue(mlxsw_wq); + return err; } static void __exit mlxsw_core_module_exit(void) { debugfs_remove_recursive(mlxsw_core_dbg_root); + destroy_workqueue(mlxsw_wq); } module_init(mlxsw_core_module_init); diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h index 377daccf0063..b41ebf8cad72 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core.h @@ -43,6 +43,7 @@ #include #include #include +#include #include #include "trap.h" @@ -151,6 +152,8 @@ int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, struct net_device *dev, bool split, u32 split_group); void mlxsw_core_port_fini(struct mlxsw_core_port *mlxsw_core_port); +int mlxsw_core_schedule_dw(struct delayed_work *dwork, unsigned long delay); + #define MLXSW_CONFIG_PROFILE_SWID_COUNT 8 struct mlxsw_swid_config { diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index e1c74efff51a..fb9efb84f13b 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -1430,8 +1430,8 @@ static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp *mlxsw_sp, static void mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp *mlxsw_sp) { - schedule_delayed_work(&mlxsw_sp->fdb_notify.dw, - msecs_to_jiffies(mlxsw_sp->fdb_notify.interval)); + mlxsw_core_schedule_dw(&mlxsw_sp->fdb_notify.dw, + msecs_to_jiffies(mlxsw_sp->fdb_notify.interval)); } static void mlxsw_sp_fdb_notify_work(struct work_struct *work) From caf7297e7ab5f8aa9d482200748a066adbfa5775 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Thu, 14 Apr 2016 18:19:29 +0200 Subject: [PATCH 0677/1649] mlxsw: core: Introduce support for asynchronous EMAD register access So far it was possible to have one EMAD register access at a time, locked by mutex. This patch extends this interface to allow multiple EMAD register accesses to be in fly at once. That allows faster processing on firmware side avoiding unused time in between EMADs. Measured speedup is ~30% for shared occupancy snapshot operation. Signed-off-by: Jiri Pirko Reviewed-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/core.c | 508 +++++++++++++-------- drivers/net/ethernet/mellanox/mlxsw/core.h | 13 + 2 files changed, 341 insertions(+), 180 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index a14e422fae4d..b0a0b01bb4ef 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -44,7 +44,7 @@ #include #include #include -#include +#include #include #include #include @@ -96,11 +96,9 @@ struct mlxsw_core { struct list_head rx_listener_list; struct list_head event_listener_list; struct { - struct sk_buff *resp_skb; - u64 tid; - wait_queue_head_t wait; - bool trans_active; - struct mutex lock; /* One EMAD transaction at a time. */ + atomic64_t tid; + struct list_head trans_list; + spinlock_t trans_list_lock; /* protects trans_list writes */ bool use_emad; } emad; struct mlxsw_core_pcpu_stats __percpu *pcpu_stats; @@ -293,7 +291,7 @@ static void mlxsw_emad_pack_reg_tlv(char *reg_tlv, static void mlxsw_emad_pack_op_tlv(char *op_tlv, const struct mlxsw_reg_info *reg, enum mlxsw_core_reg_access_type type, - struct mlxsw_core *mlxsw_core) + u64 tid) { mlxsw_emad_op_tlv_type_set(op_tlv, MLXSW_EMAD_TLV_TYPE_OP); mlxsw_emad_op_tlv_len_set(op_tlv, MLXSW_EMAD_OP_TLV_LEN); @@ -309,7 +307,7 @@ static void mlxsw_emad_pack_op_tlv(char *op_tlv, MLXSW_EMAD_OP_TLV_METHOD_WRITE); mlxsw_emad_op_tlv_class_set(op_tlv, MLXSW_EMAD_OP_TLV_CLASS_REG_ACCESS); - mlxsw_emad_op_tlv_tid_set(op_tlv, mlxsw_core->emad.tid); + mlxsw_emad_op_tlv_tid_set(op_tlv, tid); } static int mlxsw_emad_construct_eth_hdr(struct sk_buff *skb) @@ -331,7 +329,7 @@ static void mlxsw_emad_construct(struct sk_buff *skb, const struct mlxsw_reg_info *reg, char *payload, enum mlxsw_core_reg_access_type type, - struct mlxsw_core *mlxsw_core) + u64 tid) { char *buf; @@ -342,7 +340,7 @@ static void mlxsw_emad_construct(struct sk_buff *skb, mlxsw_emad_pack_reg_tlv(buf, reg, payload); buf = skb_push(skb, MLXSW_EMAD_OP_TLV_LEN * sizeof(u32)); - mlxsw_emad_pack_op_tlv(buf, reg, type, mlxsw_core); + mlxsw_emad_pack_op_tlv(buf, reg, type, tid); mlxsw_emad_construct_eth_hdr(skb); } @@ -379,58 +377,16 @@ static bool mlxsw_emad_is_resp(const struct sk_buff *skb) return (mlxsw_emad_op_tlv_r_get(op_tlv) == MLXSW_EMAD_OP_TLV_RESPONSE); } -#define MLXSW_EMAD_TIMEOUT_MS 200 - -static int __mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core, - struct sk_buff *skb, - const struct mlxsw_tx_info *tx_info) +static int mlxsw_emad_process_status(char *op_tlv, + enum mlxsw_emad_op_tlv_status *p_status) { - int err; - int ret; + *p_status = mlxsw_emad_op_tlv_status_get(op_tlv); - mlxsw_core->emad.trans_active = true; - - err = mlxsw_core_skb_transmit(mlxsw_core, skb, tx_info); - if (err) { - dev_err(mlxsw_core->bus_info->dev, "Failed to transmit EMAD (tid=%llx)\n", - mlxsw_core->emad.tid); - dev_kfree_skb(skb); - goto trans_inactive_out; - } - - ret = wait_event_timeout(mlxsw_core->emad.wait, - !(mlxsw_core->emad.trans_active), - msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS)); - if (!ret) { - dev_warn(mlxsw_core->bus_info->dev, "EMAD timed-out (tid=%llx)\n", - mlxsw_core->emad.tid); - err = -EIO; - goto trans_inactive_out; - } - - return 0; - -trans_inactive_out: - mlxsw_core->emad.trans_active = false; - return err; -} - -static int mlxsw_emad_process_status(struct mlxsw_core *mlxsw_core, - char *op_tlv) -{ - enum mlxsw_emad_op_tlv_status status; - u64 tid; - - status = mlxsw_emad_op_tlv_status_get(op_tlv); - tid = mlxsw_emad_op_tlv_tid_get(op_tlv); - - switch (status) { + switch (*p_status) { case MLXSW_EMAD_OP_TLV_STATUS_SUCCESS: return 0; case MLXSW_EMAD_OP_TLV_STATUS_BUSY: case MLXSW_EMAD_OP_TLV_STATUS_MESSAGE_RECEIPT_ACK: - dev_warn(mlxsw_core->bus_info->dev, "Reg access status again (tid=%llx,status=%x(%s))\n", - tid, status, mlxsw_emad_op_tlv_status_str(status)); return -EAGAIN; case MLXSW_EMAD_OP_TLV_STATUS_VERSION_NOT_SUPPORTED: case MLXSW_EMAD_OP_TLV_STATUS_UNKNOWN_TLV: @@ -441,70 +397,150 @@ static int mlxsw_emad_process_status(struct mlxsw_core *mlxsw_core, case MLXSW_EMAD_OP_TLV_STATUS_RESOURCE_NOT_AVAILABLE: case MLXSW_EMAD_OP_TLV_STATUS_INTERNAL_ERROR: default: - dev_err(mlxsw_core->bus_info->dev, "Reg access status failed (tid=%llx,status=%x(%s))\n", - tid, status, mlxsw_emad_op_tlv_status_str(status)); return -EIO; } } -static int mlxsw_emad_process_status_skb(struct mlxsw_core *mlxsw_core, - struct sk_buff *skb) +static int +mlxsw_emad_process_status_skb(struct sk_buff *skb, + enum mlxsw_emad_op_tlv_status *p_status) { - return mlxsw_emad_process_status(mlxsw_core, mlxsw_emad_op_tlv(skb)); + return mlxsw_emad_process_status(mlxsw_emad_op_tlv(skb), p_status); +} + +struct mlxsw_reg_trans { + struct list_head list; + struct list_head bulk_list; + struct mlxsw_core *core; + struct sk_buff *tx_skb; + struct mlxsw_tx_info tx_info; + struct delayed_work timeout_dw; + unsigned int retries; + u64 tid; + struct completion completion; + atomic_t active; + mlxsw_reg_trans_cb_t *cb; + unsigned long cb_priv; + const struct mlxsw_reg_info *reg; + enum mlxsw_core_reg_access_type type; + int err; + enum mlxsw_emad_op_tlv_status emad_status; + struct rcu_head rcu; +}; + +#define MLXSW_EMAD_TIMEOUT_MS 200 + +static void mlxsw_emad_trans_timeout_schedule(struct mlxsw_reg_trans *trans) +{ + unsigned long timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS); + + mlxsw_core_schedule_dw(&trans->timeout_dw, timeout); } static int mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core, - struct sk_buff *skb, - const struct mlxsw_tx_info *tx_info) + struct mlxsw_reg_trans *trans) { - struct sk_buff *trans_skb; - int n_retry; + struct sk_buff *skb; int err; - n_retry = 0; -retry: - /* We copy the EMAD to a new skb, since we might need - * to retransmit it in case of failure. - */ - trans_skb = skb_copy(skb, GFP_KERNEL); - if (!trans_skb) { - err = -ENOMEM; - goto out; + skb = skb_copy(trans->tx_skb, GFP_KERNEL); + if (!skb) + return -ENOMEM; + + atomic_set(&trans->active, 1); + err = mlxsw_core_skb_transmit(mlxsw_core, skb, &trans->tx_info); + if (err) { + dev_kfree_skb(skb); + return err; } - - err = __mlxsw_emad_transmit(mlxsw_core, trans_skb, tx_info); - if (!err) { - struct sk_buff *resp_skb = mlxsw_core->emad.resp_skb; - - err = mlxsw_emad_process_status_skb(mlxsw_core, resp_skb); - if (err) - dev_kfree_skb(resp_skb); - if (!err || err != -EAGAIN) - goto out; - } - if (n_retry++ < MLXSW_EMAD_MAX_RETRY) - goto retry; - -out: - dev_kfree_skb(skb); - mlxsw_core->emad.tid++; - return err; + mlxsw_emad_trans_timeout_schedule(trans); + return 0; } +static void mlxsw_emad_trans_finish(struct mlxsw_reg_trans *trans, int err) +{ + struct mlxsw_core *mlxsw_core = trans->core; + + dev_kfree_skb(trans->tx_skb); + spin_lock_bh(&mlxsw_core->emad.trans_list_lock); + list_del_rcu(&trans->list); + spin_unlock_bh(&mlxsw_core->emad.trans_list_lock); + trans->err = err; + complete(&trans->completion); +} + +static void mlxsw_emad_transmit_retry(struct mlxsw_core *mlxsw_core, + struct mlxsw_reg_trans *trans) +{ + int err; + + if (trans->retries < MLXSW_EMAD_MAX_RETRY) { + trans->retries++; + err = mlxsw_emad_transmit(trans->core, trans); + if (err == 0) + return; + } else { + err = -EIO; + } + mlxsw_emad_trans_finish(trans, err); +} + +static void mlxsw_emad_trans_timeout_work(struct work_struct *work) +{ + struct mlxsw_reg_trans *trans = container_of(work, + struct mlxsw_reg_trans, + timeout_dw.work); + + if (!atomic_dec_and_test(&trans->active)) + return; + + mlxsw_emad_transmit_retry(trans->core, trans); +} + +static void mlxsw_emad_process_response(struct mlxsw_core *mlxsw_core, + struct mlxsw_reg_trans *trans, + struct sk_buff *skb) +{ + int err; + + if (!atomic_dec_and_test(&trans->active)) + return; + + err = mlxsw_emad_process_status_skb(skb, &trans->emad_status); + if (err == -EAGAIN) { + mlxsw_emad_transmit_retry(mlxsw_core, trans); + } else { + if (err == 0) { + char *op_tlv = mlxsw_emad_op_tlv(skb); + + if (trans->cb) + trans->cb(mlxsw_core, + mlxsw_emad_reg_payload(op_tlv), + trans->reg->len, trans->cb_priv); + } + mlxsw_emad_trans_finish(trans, err); + } +} + +/* called with rcu read lock held */ static void mlxsw_emad_rx_listener_func(struct sk_buff *skb, u8 local_port, void *priv) { struct mlxsw_core *mlxsw_core = priv; + struct mlxsw_reg_trans *trans; - if (mlxsw_emad_is_resp(skb) && - mlxsw_core->emad.trans_active && - mlxsw_emad_get_tid(skb) == mlxsw_core->emad.tid) { - mlxsw_core->emad.resp_skb = skb; - mlxsw_core->emad.trans_active = false; - wake_up(&mlxsw_core->emad.wait); - } else { - dev_kfree_skb(skb); + if (!mlxsw_emad_is_resp(skb)) + goto free_skb; + + list_for_each_entry_rcu(trans, &mlxsw_core->emad.trans_list, list) { + if (mlxsw_emad_get_tid(skb) == trans->tid) { + mlxsw_emad_process_response(mlxsw_core, trans, skb); + break; + } } + +free_skb: + dev_kfree_skb(skb); } static const struct mlxsw_rx_listener mlxsw_emad_rx_listener = { @@ -531,18 +567,19 @@ static int mlxsw_emad_traps_set(struct mlxsw_core *mlxsw_core) static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core) { + u64 tid; int err; /* Set the upper 32 bits of the transaction ID field to a random * number. This allows us to discard EMADs addressed to other * devices. */ - get_random_bytes(&mlxsw_core->emad.tid, 4); - mlxsw_core->emad.tid = mlxsw_core->emad.tid << 32; + get_random_bytes(&tid, 4); + tid <<= 32; + atomic64_set(&mlxsw_core->emad.tid, tid); - init_waitqueue_head(&mlxsw_core->emad.wait); - mlxsw_core->emad.trans_active = false; - mutex_init(&mlxsw_core->emad.lock); + INIT_LIST_HEAD(&mlxsw_core->emad.trans_list); + spin_lock_init(&mlxsw_core->emad.trans_list_lock); err = mlxsw_core_rx_listener_register(mlxsw_core, &mlxsw_emad_rx_listener, @@ -600,6 +637,59 @@ static struct sk_buff *mlxsw_emad_alloc(const struct mlxsw_core *mlxsw_core, return skb; } +static int mlxsw_emad_reg_access(struct mlxsw_core *mlxsw_core, + const struct mlxsw_reg_info *reg, + char *payload, + enum mlxsw_core_reg_access_type type, + struct mlxsw_reg_trans *trans, + struct list_head *bulk_list, + mlxsw_reg_trans_cb_t *cb, + unsigned long cb_priv, u64 tid) +{ + struct sk_buff *skb; + int err; + + dev_dbg(mlxsw_core->bus_info->dev, "EMAD reg access (tid=%llx,reg_id=%x(%s),type=%s)\n", + trans->tid, reg->id, mlxsw_reg_id_str(reg->id), + mlxsw_core_reg_access_type_str(type)); + + skb = mlxsw_emad_alloc(mlxsw_core, reg->len); + if (!skb) + return -ENOMEM; + + list_add_tail(&trans->bulk_list, bulk_list); + trans->core = mlxsw_core; + trans->tx_skb = skb; + trans->tx_info.local_port = MLXSW_PORT_CPU_PORT; + trans->tx_info.is_emad = true; + INIT_DELAYED_WORK(&trans->timeout_dw, mlxsw_emad_trans_timeout_work); + trans->tid = tid; + init_completion(&trans->completion); + trans->cb = cb; + trans->cb_priv = cb_priv; + trans->reg = reg; + trans->type = type; + + mlxsw_emad_construct(skb, reg, payload, type, trans->tid); + mlxsw_core->driver->txhdr_construct(skb, &trans->tx_info); + + spin_lock_bh(&mlxsw_core->emad.trans_list_lock); + list_add_tail_rcu(&trans->list, &mlxsw_core->emad.trans_list); + spin_unlock_bh(&mlxsw_core->emad.trans_list_lock); + err = mlxsw_emad_transmit(mlxsw_core, trans); + if (err) + goto err_out; + return 0; + +err_out: + spin_lock_bh(&mlxsw_core->emad.trans_list_lock); + list_del_rcu(&trans->list); + spin_unlock_bh(&mlxsw_core->emad.trans_list_lock); + list_del(&trans->bulk_list); + dev_kfree_skb(trans->tx_skb); + return err; +} + /***************** * Core functions *****************/ @@ -689,24 +779,6 @@ static const struct file_operations mlxsw_core_rx_stats_dbg_ops = { .llseek = seq_lseek }; -static void mlxsw_core_buf_dump_dbg(struct mlxsw_core *mlxsw_core, - const char *buf, size_t size) -{ - __be32 *m = (__be32 *) buf; - int i; - int count = size / sizeof(__be32); - - for (i = count - 1; i >= 0; i--) - if (m[i]) - break; - i++; - count = i ? i : 1; - for (i = 0; i < count; i += 4) - dev_dbg(mlxsw_core->bus_info->dev, "%04x - %08x %08x %08x %08x\n", - i * 4, be32_to_cpu(m[i]), be32_to_cpu(m[i + 1]), - be32_to_cpu(m[i + 2]), be32_to_cpu(m[i + 3])); -} - int mlxsw_core_driver_register(struct mlxsw_driver *mlxsw_driver) { spin_lock(&mlxsw_core_driver_list_lock); @@ -1264,56 +1336,112 @@ void mlxsw_core_event_listener_unregister(struct mlxsw_core *mlxsw_core, } EXPORT_SYMBOL(mlxsw_core_event_listener_unregister); +static u64 mlxsw_core_tid_get(struct mlxsw_core *mlxsw_core) +{ + return atomic64_inc_return(&mlxsw_core->emad.tid); +} + static int mlxsw_core_reg_access_emad(struct mlxsw_core *mlxsw_core, const struct mlxsw_reg_info *reg, char *payload, - enum mlxsw_core_reg_access_type type) + enum mlxsw_core_reg_access_type type, + struct list_head *bulk_list, + mlxsw_reg_trans_cb_t *cb, + unsigned long cb_priv) { + u64 tid = mlxsw_core_tid_get(mlxsw_core); + struct mlxsw_reg_trans *trans; int err; - char *op_tlv; - struct sk_buff *skb; - struct mlxsw_tx_info tx_info = { - .local_port = MLXSW_PORT_CPU_PORT, - .is_emad = true, - }; - skb = mlxsw_emad_alloc(mlxsw_core, reg->len); - if (!skb) + trans = kzalloc(sizeof(*trans), GFP_KERNEL); + if (!trans) return -ENOMEM; - mlxsw_emad_construct(skb, reg, payload, type, mlxsw_core); - mlxsw_core->driver->txhdr_construct(skb, &tx_info); - - dev_dbg(mlxsw_core->bus_info->dev, "EMAD send (tid=%llx)\n", - mlxsw_core->emad.tid); - mlxsw_core_buf_dump_dbg(mlxsw_core, skb->data, skb->len); - - err = mlxsw_emad_transmit(mlxsw_core, skb, &tx_info); - if (!err) { - op_tlv = mlxsw_emad_op_tlv(mlxsw_core->emad.resp_skb); - memcpy(payload, mlxsw_emad_reg_payload(op_tlv), - reg->len); - - dev_dbg(mlxsw_core->bus_info->dev, "EMAD recv (tid=%llx)\n", - mlxsw_core->emad.tid - 1); - mlxsw_core_buf_dump_dbg(mlxsw_core, - mlxsw_core->emad.resp_skb->data, - mlxsw_core->emad.resp_skb->len); - - dev_kfree_skb(mlxsw_core->emad.resp_skb); + err = mlxsw_emad_reg_access(mlxsw_core, reg, payload, type, trans, + bulk_list, cb, cb_priv, tid); + if (err) { + kfree(trans); + return err; } + return 0; +} +int mlxsw_reg_trans_query(struct mlxsw_core *mlxsw_core, + const struct mlxsw_reg_info *reg, char *payload, + struct list_head *bulk_list, + mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv) +{ + return mlxsw_core_reg_access_emad(mlxsw_core, reg, payload, + MLXSW_CORE_REG_ACCESS_TYPE_QUERY, + bulk_list, cb, cb_priv); +} +EXPORT_SYMBOL(mlxsw_reg_trans_query); + +int mlxsw_reg_trans_write(struct mlxsw_core *mlxsw_core, + const struct mlxsw_reg_info *reg, char *payload, + struct list_head *bulk_list, + mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv) +{ + return mlxsw_core_reg_access_emad(mlxsw_core, reg, payload, + MLXSW_CORE_REG_ACCESS_TYPE_WRITE, + bulk_list, cb, cb_priv); +} +EXPORT_SYMBOL(mlxsw_reg_trans_write); + +static int mlxsw_reg_trans_wait(struct mlxsw_reg_trans *trans) +{ + struct mlxsw_core *mlxsw_core = trans->core; + int err; + + wait_for_completion(&trans->completion); + cancel_delayed_work_sync(&trans->timeout_dw); + err = trans->err; + + if (trans->retries) + dev_warn(mlxsw_core->bus_info->dev, "EMAD retries (%d/%d) (tid=%llx)\n", + trans->retries, MLXSW_EMAD_MAX_RETRY, trans->tid); + if (err) + dev_err(mlxsw_core->bus_info->dev, "EMAD reg access failed (tid=%llx,reg_id=%x(%s),type=%s,status=%x(%s))\n", + trans->tid, trans->reg->id, + mlxsw_reg_id_str(trans->reg->id), + mlxsw_core_reg_access_type_str(trans->type), + trans->emad_status, + mlxsw_emad_op_tlv_status_str(trans->emad_status)); + + list_del(&trans->bulk_list); + kfree_rcu(trans, rcu); return err; } +int mlxsw_reg_trans_bulk_wait(struct list_head *bulk_list) +{ + struct mlxsw_reg_trans *trans; + struct mlxsw_reg_trans *tmp; + int sum_err = 0; + int err; + + list_for_each_entry_safe(trans, tmp, bulk_list, bulk_list) { + err = mlxsw_reg_trans_wait(trans); + if (err && sum_err == 0) + sum_err = err; /* first error to be returned */ + } + return sum_err; +} +EXPORT_SYMBOL(mlxsw_reg_trans_bulk_wait); + static int mlxsw_core_reg_access_cmd(struct mlxsw_core *mlxsw_core, const struct mlxsw_reg_info *reg, char *payload, enum mlxsw_core_reg_access_type type) { + enum mlxsw_emad_op_tlv_status status; int err, n_retry; char *in_mbox, *out_mbox, *tmp; + dev_dbg(mlxsw_core->bus_info->dev, "Reg cmd access (reg_id=%x(%s),type=%s)\n", + reg->id, mlxsw_reg_id_str(reg->id), + mlxsw_core_reg_access_type_str(type)); + in_mbox = mlxsw_cmd_mbox_alloc(); if (!in_mbox) return -ENOMEM; @@ -1324,7 +1452,8 @@ static int mlxsw_core_reg_access_cmd(struct mlxsw_core *mlxsw_core, goto free_in_mbox; } - mlxsw_emad_pack_op_tlv(in_mbox, reg, type, mlxsw_core); + mlxsw_emad_pack_op_tlv(in_mbox, reg, type, + mlxsw_core_tid_get(mlxsw_core)); tmp = in_mbox + MLXSW_EMAD_OP_TLV_LEN * sizeof(u32); mlxsw_emad_pack_reg_tlv(tmp, reg, payload); @@ -1332,60 +1461,61 @@ static int mlxsw_core_reg_access_cmd(struct mlxsw_core *mlxsw_core, retry: err = mlxsw_cmd_access_reg(mlxsw_core, in_mbox, out_mbox); if (!err) { - err = mlxsw_emad_process_status(mlxsw_core, out_mbox); - if (err == -EAGAIN && n_retry++ < MLXSW_EMAD_MAX_RETRY) - goto retry; + err = mlxsw_emad_process_status(out_mbox, &status); + if (err) { + if (err == -EAGAIN && n_retry++ < MLXSW_EMAD_MAX_RETRY) + goto retry; + dev_err(mlxsw_core->bus_info->dev, "Reg cmd access status failed (status=%x(%s))\n", + status, mlxsw_emad_op_tlv_status_str(status)); + } } if (!err) memcpy(payload, mlxsw_emad_reg_payload(out_mbox), reg->len); - mlxsw_core->emad.tid++; mlxsw_cmd_mbox_free(out_mbox); free_in_mbox: mlxsw_cmd_mbox_free(in_mbox); + if (err) + dev_err(mlxsw_core->bus_info->dev, "Reg cmd access failed (reg_id=%x(%s),type=%s)\n", + reg->id, mlxsw_reg_id_str(reg->id), + mlxsw_core_reg_access_type_str(type)); return err; } +static void mlxsw_core_reg_access_cb(struct mlxsw_core *mlxsw_core, + char *payload, size_t payload_len, + unsigned long cb_priv) +{ + char *orig_payload = (char *) cb_priv; + + memcpy(orig_payload, payload, payload_len); +} + static int mlxsw_core_reg_access(struct mlxsw_core *mlxsw_core, const struct mlxsw_reg_info *reg, char *payload, enum mlxsw_core_reg_access_type type) { - u64 cur_tid; + LIST_HEAD(bulk_list); int err; - if (mutex_lock_interruptible(&mlxsw_core->emad.lock)) { - dev_err(mlxsw_core->bus_info->dev, "Reg access interrupted (reg_id=%x(%s),type=%s)\n", - reg->id, mlxsw_reg_id_str(reg->id), - mlxsw_core_reg_access_type_str(type)); - return -EINTR; - } - - cur_tid = mlxsw_core->emad.tid; - dev_dbg(mlxsw_core->bus_info->dev, "Reg access (tid=%llx,reg_id=%x(%s),type=%s)\n", - cur_tid, reg->id, mlxsw_reg_id_str(reg->id), - mlxsw_core_reg_access_type_str(type)); - /* During initialization EMAD interface is not available to us, * so we default to command interface. We switch to EMAD interface * after setting the appropriate traps. */ if (!mlxsw_core->emad.use_emad) - err = mlxsw_core_reg_access_cmd(mlxsw_core, reg, - payload, type); - else - err = mlxsw_core_reg_access_emad(mlxsw_core, reg, + return mlxsw_core_reg_access_cmd(mlxsw_core, reg, payload, type); + err = mlxsw_core_reg_access_emad(mlxsw_core, reg, + payload, type, &bulk_list, + mlxsw_core_reg_access_cb, + (unsigned long) payload); if (err) - dev_err(mlxsw_core->bus_info->dev, "Reg access failed (tid=%llx,reg_id=%x(%s),type=%s)\n", - cur_tid, reg->id, mlxsw_reg_id_str(reg->id), - mlxsw_core_reg_access_type_str(type)); - - mutex_unlock(&mlxsw_core->emad.lock); - return err; + return err; + return mlxsw_reg_trans_bulk_wait(&bulk_list); } int mlxsw_reg_query(struct mlxsw_core *mlxsw_core, @@ -1536,6 +1666,24 @@ void mlxsw_core_port_fini(struct mlxsw_core_port *mlxsw_core_port) } EXPORT_SYMBOL(mlxsw_core_port_fini); +static void mlxsw_core_buf_dump_dbg(struct mlxsw_core *mlxsw_core, + const char *buf, size_t size) +{ + __be32 *m = (__be32 *) buf; + int i; + int count = size / sizeof(__be32); + + for (i = count - 1; i >= 0; i--) + if (m[i]) + break; + i++; + count = i ? i : 1; + for (i = 0; i < count; i += 4) + dev_dbg(mlxsw_core->bus_info->dev, "%04x - %08x %08x %08x %08x\n", + i * 4, be32_to_cpu(m[i]), be32_to_cpu(m[i + 1]), + be32_to_cpu(m[i + 2]), be32_to_cpu(m[i + 3])); +} + int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod, u32 in_mod, bool out_mbox_direct, char *in_mbox, size_t in_mbox_size, diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h index b41ebf8cad72..436bc49df6ab 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core.h @@ -109,6 +109,19 @@ void mlxsw_core_event_listener_unregister(struct mlxsw_core *mlxsw_core, const struct mlxsw_event_listener *el, void *priv); +typedef void mlxsw_reg_trans_cb_t(struct mlxsw_core *mlxsw_core, char *payload, + size_t payload_len, unsigned long cb_priv); + +int mlxsw_reg_trans_query(struct mlxsw_core *mlxsw_core, + const struct mlxsw_reg_info *reg, char *payload, + struct list_head *bulk_list, + mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv); +int mlxsw_reg_trans_write(struct mlxsw_core *mlxsw_core, + const struct mlxsw_reg_info *reg, char *payload, + struct list_head *bulk_list, + mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv); +int mlxsw_reg_trans_bulk_wait(struct list_head *bulk_list); + int mlxsw_reg_query(struct mlxsw_core *mlxsw_core, const struct mlxsw_reg_info *reg, char *payload); int mlxsw_reg_write(struct mlxsw_core *mlxsw_core, From 2d0ed39fbdee64835dc710b4ee3897f2bb9f8cf4 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Thu, 14 Apr 2016 18:19:30 +0200 Subject: [PATCH 0678/1649] mlxsw: spectrum_buffers: Implement occupancy monitoring Implement occupancy API introduced in devlink and mlxsw core. This is done by accessing SBPM register for Port-Pool and SBSR for Port-TC current and max occupancy values. Max clear is implemented using the same registers. Signed-off-by: Jiri Pirko Reviewed-by: Ido Schimmel Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/spectrum.c | 36 +-- .../net/ethernet/mellanox/mlxsw/spectrum.h | 18 ++ .../mellanox/mlxsw/spectrum_buffers.c | 255 ++++++++++++++++++ 3 files changed, 293 insertions(+), 16 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index ecadb15c4907..681afe1a3802 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -2493,22 +2493,26 @@ static struct mlxsw_config_profile mlxsw_sp_config_profile = { }; static struct mlxsw_driver mlxsw_sp_driver = { - .kind = MLXSW_DEVICE_KIND_SPECTRUM, - .owner = THIS_MODULE, - .priv_size = sizeof(struct mlxsw_sp), - .init = mlxsw_sp_init, - .fini = mlxsw_sp_fini, - .port_split = mlxsw_sp_port_split, - .port_unsplit = mlxsw_sp_port_unsplit, - .sb_pool_get = mlxsw_sp_sb_pool_get, - .sb_pool_set = mlxsw_sp_sb_pool_set, - .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, - .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, - .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, - .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, - .txhdr_construct = mlxsw_sp_txhdr_construct, - .txhdr_len = MLXSW_TXHDR_LEN, - .profile = &mlxsw_sp_config_profile, + .kind = MLXSW_DEVICE_KIND_SPECTRUM, + .owner = THIS_MODULE, + .priv_size = sizeof(struct mlxsw_sp), + .init = mlxsw_sp_init, + .fini = mlxsw_sp_fini, + .port_split = mlxsw_sp_port_split, + .port_unsplit = mlxsw_sp_port_unsplit, + .sb_pool_get = mlxsw_sp_sb_pool_get, + .sb_pool_set = mlxsw_sp_sb_pool_set, + .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, + .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, + .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, + .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, + .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, + .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, + .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, + .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, + .txhdr_construct = mlxsw_sp_txhdr_construct, + .txhdr_len = MLXSW_TXHDR_LEN, + .profile = &mlxsw_sp_config_profile, }; static int diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 6458efa5607e..e2c022d3e2f3 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -123,15 +123,22 @@ struct mlxsw_sp_sb_pr { u32 size; }; +struct mlxsw_cp_sb_occ { + u32 cur; + u32 max; +}; + struct mlxsw_sp_sb_cm { u32 min_buff; u32 max_buff; u8 pool; + struct mlxsw_cp_sb_occ occ; }; struct mlxsw_sp_sb_pm { u32 min_buff; u32 max_buff; + struct mlxsw_cp_sb_occ occ; }; #define MLXSW_SP_SB_POOL_COUNT 4 @@ -328,6 +335,17 @@ int mlxsw_sp_sb_tc_pool_bind_set(struct mlxsw_core_port *mlxsw_core_port, unsigned int sb_index, u16 tc_index, enum devlink_sb_pool_type pool_type, u16 pool_index, u32 threshold); +int mlxsw_sp_sb_occ_snapshot(struct mlxsw_core *mlxsw_core, + unsigned int sb_index); +int mlxsw_sp_sb_occ_max_clear(struct mlxsw_core *mlxsw_core, + unsigned int sb_index); +int mlxsw_sp_sb_occ_port_pool_get(struct mlxsw_core_port *mlxsw_core_port, + unsigned int sb_index, u16 pool_index, + u32 *p_cur, u32 *p_max); +int mlxsw_sp_sb_occ_tc_port_bind_get(struct mlxsw_core_port *mlxsw_core_port, + unsigned int sb_index, u16 tc_index, + enum devlink_sb_pool_type pool_type, + u32 *p_cur, u32 *p_max); int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp); void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c index 639ba5ae8bbd..f2e073af5dd2 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c @@ -36,6 +36,7 @@ #include #include #include +#include #include "spectrum.h" #include "core.h" @@ -125,6 +126,41 @@ static int mlxsw_sp_sb_pm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port, return 0; } +static int mlxsw_sp_sb_pm_occ_clear(struct mlxsw_sp *mlxsw_sp, u8 local_port, + u8 pool, enum mlxsw_reg_sbxx_dir dir, + struct list_head *bulk_list) +{ + char sbpm_pl[MLXSW_REG_SBPM_LEN]; + + mlxsw_reg_sbpm_pack(sbpm_pl, local_port, pool, dir, true, 0, 0); + return mlxsw_reg_trans_query(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl, + bulk_list, NULL, 0); +} + +static void mlxsw_sp_sb_pm_occ_query_cb(struct mlxsw_core *mlxsw_core, + char *sbpm_pl, size_t sbpm_pl_len, + unsigned long cb_priv) +{ + struct mlxsw_sp_sb_pm *pm = (struct mlxsw_sp_sb_pm *) cb_priv; + + mlxsw_reg_sbpm_unpack(sbpm_pl, &pm->occ.cur, &pm->occ.max); +} + +static int mlxsw_sp_sb_pm_occ_query(struct mlxsw_sp *mlxsw_sp, u8 local_port, + u8 pool, enum mlxsw_reg_sbxx_dir dir, + struct list_head *bulk_list) +{ + char sbpm_pl[MLXSW_REG_SBPM_LEN]; + struct mlxsw_sp_sb_pm *pm; + + pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, pool, dir); + mlxsw_reg_sbpm_pack(sbpm_pl, local_port, pool, dir, false, 0, 0); + return mlxsw_reg_trans_query(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl, + bulk_list, + mlxsw_sp_sb_pm_occ_query_cb, + (unsigned long) pm); +} + static const u16 mlxsw_sp_pbs[] = { 2 * MLXSW_SP_BYTES_TO_CELLS(ETH_FRAME_LEN), 0, @@ -707,3 +743,222 @@ int mlxsw_sp_sb_tc_pool_bind_set(struct mlxsw_core_port *mlxsw_core_port, return mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, pg_buff, dir, 0, max_buff, pool); } + +#define MASKED_COUNT_MAX \ + (MLXSW_REG_SBSR_REC_MAX_COUNT / (MLXSW_SP_SB_TC_COUNT * 2)) + +struct mlxsw_sp_sb_sr_occ_query_cb_ctx { + u8 masked_count; + u8 local_port_1; +}; + +static void mlxsw_sp_sb_sr_occ_query_cb(struct mlxsw_core *mlxsw_core, + char *sbsr_pl, size_t sbsr_pl_len, + unsigned long cb_priv) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); + struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx; + u8 masked_count; + u8 local_port; + int rec_index = 0; + struct mlxsw_sp_sb_cm *cm; + int i; + + memcpy(&cb_ctx, &cb_priv, sizeof(cb_ctx)); + + masked_count = 0; + for (local_port = cb_ctx.local_port_1; + local_port < MLXSW_PORT_MAX_PORTS; local_port++) { + if (!mlxsw_sp->ports[local_port]) + continue; + for (i = 0; i < MLXSW_SP_SB_TC_COUNT; i++) { + cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, i, + MLXSW_REG_SBXX_DIR_INGRESS); + mlxsw_reg_sbsr_rec_unpack(sbsr_pl, rec_index++, + &cm->occ.cur, &cm->occ.max); + } + if (++masked_count == cb_ctx.masked_count) + break; + } + masked_count = 0; + for (local_port = cb_ctx.local_port_1; + local_port < MLXSW_PORT_MAX_PORTS; local_port++) { + if (!mlxsw_sp->ports[local_port]) + continue; + for (i = 0; i < MLXSW_SP_SB_TC_COUNT; i++) { + cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, i, + MLXSW_REG_SBXX_DIR_EGRESS); + mlxsw_reg_sbsr_rec_unpack(sbsr_pl, rec_index++, + &cm->occ.cur, &cm->occ.max); + } + if (++masked_count == cb_ctx.masked_count) + break; + } +} + +int mlxsw_sp_sb_occ_snapshot(struct mlxsw_core *mlxsw_core, + unsigned int sb_index) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); + struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx; + unsigned long cb_priv; + LIST_HEAD(bulk_list); + char *sbsr_pl; + u8 masked_count; + u8 local_port_1; + u8 local_port = 0; + int i; + int err; + int err2; + + sbsr_pl = kmalloc(MLXSW_REG_SBSR_LEN, GFP_KERNEL); + if (!sbsr_pl) + return -ENOMEM; + +next_batch: + local_port++; + local_port_1 = local_port; + masked_count = 0; + mlxsw_reg_sbsr_pack(sbsr_pl, false); + for (i = 0; i < MLXSW_SP_SB_TC_COUNT; i++) { + mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1); + mlxsw_reg_sbsr_tclass_mask_set(sbsr_pl, i, 1); + } + for (; local_port < MLXSW_PORT_MAX_PORTS; local_port++) { + if (!mlxsw_sp->ports[local_port]) + continue; + mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl, local_port, 1); + mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1); + for (i = 0; i < MLXSW_SP_SB_POOL_COUNT; i++) { + err = mlxsw_sp_sb_pm_occ_query(mlxsw_sp, local_port, i, + MLXSW_REG_SBXX_DIR_INGRESS, + &bulk_list); + if (err) + goto out; + err = mlxsw_sp_sb_pm_occ_query(mlxsw_sp, local_port, i, + MLXSW_REG_SBXX_DIR_EGRESS, + &bulk_list); + if (err) + goto out; + } + if (++masked_count == MASKED_COUNT_MAX) + goto do_query; + } + +do_query: + cb_ctx.masked_count = masked_count; + cb_ctx.local_port_1 = local_port_1; + memcpy(&cb_priv, &cb_ctx, sizeof(cb_ctx)); + err = mlxsw_reg_trans_query(mlxsw_core, MLXSW_REG(sbsr), sbsr_pl, + &bulk_list, mlxsw_sp_sb_sr_occ_query_cb, + cb_priv); + if (err) + goto out; + if (local_port < MLXSW_PORT_MAX_PORTS) + goto next_batch; + +out: + err2 = mlxsw_reg_trans_bulk_wait(&bulk_list); + if (!err) + err = err2; + kfree(sbsr_pl); + return err; +} + +int mlxsw_sp_sb_occ_max_clear(struct mlxsw_core *mlxsw_core, + unsigned int sb_index) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); + LIST_HEAD(bulk_list); + char *sbsr_pl; + unsigned int masked_count; + u8 local_port = 0; + int i; + int err; + int err2; + + sbsr_pl = kmalloc(MLXSW_REG_SBSR_LEN, GFP_KERNEL); + if (!sbsr_pl) + return -ENOMEM; + +next_batch: + local_port++; + masked_count = 0; + mlxsw_reg_sbsr_pack(sbsr_pl, true); + for (i = 0; i < MLXSW_SP_SB_TC_COUNT; i++) { + mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1); + mlxsw_reg_sbsr_tclass_mask_set(sbsr_pl, i, 1); + } + for (; local_port < MLXSW_PORT_MAX_PORTS; local_port++) { + if (!mlxsw_sp->ports[local_port]) + continue; + mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl, local_port, 1); + mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1); + for (i = 0; i < MLXSW_SP_SB_POOL_COUNT; i++) { + err = mlxsw_sp_sb_pm_occ_clear(mlxsw_sp, local_port, i, + MLXSW_REG_SBXX_DIR_INGRESS, + &bulk_list); + if (err) + goto out; + err = mlxsw_sp_sb_pm_occ_clear(mlxsw_sp, local_port, i, + MLXSW_REG_SBXX_DIR_EGRESS, + &bulk_list); + if (err) + goto out; + } + if (++masked_count == MASKED_COUNT_MAX) + goto do_query; + } + +do_query: + err = mlxsw_reg_trans_query(mlxsw_core, MLXSW_REG(sbsr), sbsr_pl, + &bulk_list, NULL, 0); + if (err) + goto out; + if (local_port < MLXSW_PORT_MAX_PORTS) + goto next_batch; + +out: + err2 = mlxsw_reg_trans_bulk_wait(&bulk_list); + if (!err) + err = err2; + kfree(sbsr_pl); + return err; +} + +int mlxsw_sp_sb_occ_port_pool_get(struct mlxsw_core_port *mlxsw_core_port, + unsigned int sb_index, u16 pool_index, + u32 *p_cur, u32 *p_max) +{ + struct mlxsw_sp_port *mlxsw_sp_port = + mlxsw_core_port_driver_priv(mlxsw_core_port); + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + u8 local_port = mlxsw_sp_port->local_port; + u8 pool = pool_get(pool_index); + enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index); + struct mlxsw_sp_sb_pm *pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, + pool, dir); + + *p_cur = MLXSW_SP_CELLS_TO_BYTES(pm->occ.cur); + *p_max = MLXSW_SP_CELLS_TO_BYTES(pm->occ.max); + return 0; +} + +int mlxsw_sp_sb_occ_tc_port_bind_get(struct mlxsw_core_port *mlxsw_core_port, + unsigned int sb_index, u16 tc_index, + enum devlink_sb_pool_type pool_type, + u32 *p_cur, u32 *p_max) +{ + struct mlxsw_sp_port *mlxsw_sp_port = + mlxsw_core_port_driver_priv(mlxsw_core_port); + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + u8 local_port = mlxsw_sp_port->local_port; + u8 pg_buff = tc_index; + enum mlxsw_reg_sbxx_dir dir = pool_type; + struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, + pg_buff, dir); + + *p_cur = MLXSW_SP_CELLS_TO_BYTES(cm->occ.cur); + *p_max = MLXSW_SP_CELLS_TO_BYTES(cm->occ.max); + return 0; +} From 518f213dddb3375e8cf0fcb791dda4c7d1ce4c74 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Sun, 10 Apr 2016 21:44:44 -0400 Subject: [PATCH 0679/1649] ethtool: Add support for toggling any of the GSO offloads The strings were missing for several of the GSO offloads that are available. This patch provides the missing strings so that we can toggle or query any of them via the ethtool command. Signed-off-by: Alexander Duyck Signed-off-by: David S. Miller --- net/core/ethtool.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/net/core/ethtool.c b/net/core/ethtool.c index f426c5ad6149..6a7f99661c2f 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c @@ -82,9 +82,11 @@ static const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN] [NETIF_F_TSO6_BIT] = "tx-tcp6-segmentation", [NETIF_F_FSO_BIT] = "tx-fcoe-segmentation", [NETIF_F_GSO_GRE_BIT] = "tx-gre-segmentation", + [NETIF_F_GSO_GRE_CSUM_BIT] = "tx-gre-csum-segmentation", [NETIF_F_GSO_IPIP_BIT] = "tx-ipip-segmentation", [NETIF_F_GSO_SIT_BIT] = "tx-sit-segmentation", [NETIF_F_GSO_UDP_TUNNEL_BIT] = "tx-udp_tnl-segmentation", + [NETIF_F_GSO_UDP_TUNNEL_CSUM_BIT] = "tx-udp_tnl-csum-segmentation", [NETIF_F_FCOE_CRC_BIT] = "tx-checksum-fcoe-crc", [NETIF_F_SCTP_CRC_BIT] = "tx-checksum-sctp", From cbc53e08a793b073e79f42ca33f1f3568703540d Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Sun, 10 Apr 2016 21:44:51 -0400 Subject: [PATCH 0680/1649] GSO: Add GSO type for fixed IPv4 ID This patch adds support for TSO using IPv4 headers with a fixed IP ID field. This is meant to allow us to do a lossless GRO in the case of TCP flows that use a fixed IP ID such as those that convert IPv6 header to IPv4 headers. In addition I am adding a feature that for now I am referring to TSO with IP ID mangling. Basically when this flag is enabled the device has the option to either output the flow with incrementing IP IDs or with a fixed IP ID regardless of what the original IP ID ordering was. This is useful in cases where the DF bit is set and we do not care if the original IP ID value is maintained. Signed-off-by: Alexander Duyck Signed-off-by: David S. Miller --- include/linux/netdev_features.h | 3 +++ include/linux/netdevice.h | 1 + include/linux/skbuff.h | 20 ++++++++++--------- net/core/dev.c | 34 ++++++++++++++++++++++++++++----- net/core/ethtool.c | 1 + net/ipv4/af_inet.c | 19 ++++++++++-------- net/ipv4/gre_offload.c | 1 + net/ipv4/tcp_offload.c | 4 +++- net/ipv6/ip6_offload.c | 3 ++- net/mpls/mpls_gso.c | 1 + 10 files changed, 63 insertions(+), 24 deletions(-) diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h index a734bf43d190..7cf272a4b5c8 100644 --- a/include/linux/netdev_features.h +++ b/include/linux/netdev_features.h @@ -39,6 +39,7 @@ enum { NETIF_F_UFO_BIT, /* ... UDPv4 fragmentation */ NETIF_F_GSO_ROBUST_BIT, /* ... ->SKB_GSO_DODGY */ NETIF_F_TSO_ECN_BIT, /* ... TCP ECN support */ + NETIF_F_TSO_MANGLEID_BIT, /* ... IPV4 ID mangling allowed */ NETIF_F_TSO6_BIT, /* ... TCPv6 segmentation */ NETIF_F_FSO_BIT, /* ... FCoE segmentation */ NETIF_F_GSO_GRE_BIT, /* ... GRE with TSO */ @@ -120,6 +121,7 @@ enum { #define NETIF_F_GSO_SIT __NETIF_F(GSO_SIT) #define NETIF_F_GSO_UDP_TUNNEL __NETIF_F(GSO_UDP_TUNNEL) #define NETIF_F_GSO_UDP_TUNNEL_CSUM __NETIF_F(GSO_UDP_TUNNEL_CSUM) +#define NETIF_F_TSO_MANGLEID __NETIF_F(TSO_MANGLEID) #define NETIF_F_GSO_TUNNEL_REMCSUM __NETIF_F(GSO_TUNNEL_REMCSUM) #define NETIF_F_HW_VLAN_STAG_FILTER __NETIF_F(HW_VLAN_STAG_FILTER) #define NETIF_F_HW_VLAN_STAG_RX __NETIF_F(HW_VLAN_STAG_RX) @@ -147,6 +149,7 @@ enum { /* List of features with software fallbacks. */ #define NETIF_F_GSO_SOFTWARE (NETIF_F_TSO | NETIF_F_TSO_ECN | \ + NETIF_F_TSO_MANGLEID | \ NETIF_F_TSO6 | NETIF_F_UFO) /* List of IP checksum features. Note that NETIF_F_ HW_CSUM should not be diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 9884fe9a6552..8e372d01b3c1 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -3992,6 +3992,7 @@ static inline bool net_gso_ok(netdev_features_t features, int gso_type) BUILD_BUG_ON(SKB_GSO_UDP != (NETIF_F_UFO >> NETIF_F_GSO_SHIFT)); BUILD_BUG_ON(SKB_GSO_DODGY != (NETIF_F_GSO_ROBUST >> NETIF_F_GSO_SHIFT)); BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT)); + BUILD_BUG_ON(SKB_GSO_TCP_FIXEDID != (NETIF_F_TSO_MANGLEID >> NETIF_F_GSO_SHIFT)); BUILD_BUG_ON(SKB_GSO_TCPV6 != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT)); BUILD_BUG_ON(SKB_GSO_FCOE != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT)); BUILD_BUG_ON(SKB_GSO_GRE != (NETIF_F_GSO_GRE >> NETIF_F_GSO_SHIFT)); diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 007381270ff8..5fba16658f9d 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -465,23 +465,25 @@ enum { /* This indicates the tcp segment has CWR set. */ SKB_GSO_TCP_ECN = 1 << 3, - SKB_GSO_TCPV6 = 1 << 4, + SKB_GSO_TCP_FIXEDID = 1 << 4, - SKB_GSO_FCOE = 1 << 5, + SKB_GSO_TCPV6 = 1 << 5, - SKB_GSO_GRE = 1 << 6, + SKB_GSO_FCOE = 1 << 6, - SKB_GSO_GRE_CSUM = 1 << 7, + SKB_GSO_GRE = 1 << 7, - SKB_GSO_IPIP = 1 << 8, + SKB_GSO_GRE_CSUM = 1 << 8, - SKB_GSO_SIT = 1 << 9, + SKB_GSO_IPIP = 1 << 9, - SKB_GSO_UDP_TUNNEL = 1 << 10, + SKB_GSO_SIT = 1 << 10, - SKB_GSO_UDP_TUNNEL_CSUM = 1 << 11, + SKB_GSO_UDP_TUNNEL = 1 << 11, - SKB_GSO_TUNNEL_REMCSUM = 1 << 12, + SKB_GSO_UDP_TUNNEL_CSUM = 1 << 12, + + SKB_GSO_TUNNEL_REMCSUM = 1 << 13, }; #if BITS_PER_LONG > 32 diff --git a/net/core/dev.c b/net/core/dev.c index 09fb1ace9dc8..e896b1953ab6 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -2825,14 +2825,36 @@ static netdev_features_t dflt_features_check(const struct sk_buff *skb, return vlan_features_check(skb, features); } +static netdev_features_t gso_features_check(const struct sk_buff *skb, + struct net_device *dev, + netdev_features_t features) +{ + u16 gso_segs = skb_shinfo(skb)->gso_segs; + + if (gso_segs > dev->gso_max_segs) + return features & ~NETIF_F_GSO_MASK; + + /* Make sure to clear the IPv4 ID mangling feature if + * the IPv4 header has the potential to be fragmented. + */ + if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) { + struct iphdr *iph = skb->encapsulation ? + inner_ip_hdr(skb) : ip_hdr(skb); + + if (!(iph->frag_off & htons(IP_DF))) + features &= ~NETIF_F_TSO_MANGLEID; + } + + return features; +} + netdev_features_t netif_skb_features(struct sk_buff *skb) { struct net_device *dev = skb->dev; netdev_features_t features = dev->features; - u16 gso_segs = skb_shinfo(skb)->gso_segs; - if (gso_segs > dev->gso_max_segs) - features &= ~NETIF_F_GSO_MASK; + if (skb_is_gso(skb)) + features = gso_features_check(skb, dev, features); /* If encapsulation offload request, verify we are testing * hardware encapsulation features instead of standard @@ -6976,9 +6998,11 @@ int register_netdevice(struct net_device *dev) dev->features |= NETIF_F_SOFT_FEATURES; dev->wanted_features = dev->features & dev->hw_features; - if (!(dev->flags & IFF_LOOPBACK)) { + if (!(dev->flags & IFF_LOOPBACK)) dev->hw_features |= NETIF_F_NOCACHE_COPY; - } + + if (dev->hw_features & NETIF_F_TSO) + dev->hw_features |= NETIF_F_TSO_MANGLEID; /* Make NETIF_F_HIGHDMA inheritable to VLAN devices. */ diff --git a/net/core/ethtool.c b/net/core/ethtool.c index 6a7f99661c2f..9494c41cc77c 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c @@ -79,6 +79,7 @@ static const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN] [NETIF_F_UFO_BIT] = "tx-udp-fragmentation", [NETIF_F_GSO_ROBUST_BIT] = "tx-gso-robust", [NETIF_F_TSO_ECN_BIT] = "tx-tcp-ecn-segmentation", + [NETIF_F_TSO_MANGLEID_BIT] = "tx-tcp-mangleid-segmentation", [NETIF_F_TSO6_BIT] = "tx-tcp6-segmentation", [NETIF_F_FSO_BIT] = "tx-fcoe-segmentation", [NETIF_F_GSO_GRE_BIT] = "tx-gre-segmentation", diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 8217cd22f921..5bbea9a0ce96 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -1195,10 +1195,10 @@ EXPORT_SYMBOL(inet_sk_rebuild_header); static struct sk_buff *inet_gso_segment(struct sk_buff *skb, netdev_features_t features) { + bool udpfrag = false, fixedid = false, encap; struct sk_buff *segs = ERR_PTR(-EINVAL); const struct net_offload *ops; unsigned int offset = 0; - bool udpfrag, encap; struct iphdr *iph; int proto; int nhoff; @@ -1217,6 +1217,7 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb, SKB_GSO_TCPV6 | SKB_GSO_UDP_TUNNEL | SKB_GSO_UDP_TUNNEL_CSUM | + SKB_GSO_TCP_FIXEDID | SKB_GSO_TUNNEL_REMCSUM | 0))) goto out; @@ -1248,11 +1249,14 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb, segs = ERR_PTR(-EPROTONOSUPPORT); - if (skb->encapsulation && - skb_shinfo(skb)->gso_type & (SKB_GSO_SIT|SKB_GSO_IPIP)) - udpfrag = proto == IPPROTO_UDP && encap; - else - udpfrag = proto == IPPROTO_UDP && !skb->encapsulation; + if (!skb->encapsulation || encap) { + udpfrag = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP); + fixedid = !!(skb_shinfo(skb)->gso_type & SKB_GSO_TCP_FIXEDID); + + /* fixed ID is invalid if DF bit is not set */ + if (fixedid && !(iph->frag_off & htons(IP_DF))) + goto out; + } ops = rcu_dereference(inet_offloads[proto]); if (likely(ops && ops->callbacks.gso_segment)) @@ -1265,12 +1269,11 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb, do { iph = (struct iphdr *)(skb_mac_header(skb) + nhoff); if (udpfrag) { - iph->id = htons(id); iph->frag_off = htons(offset >> 3); if (skb->next) iph->frag_off |= htons(IP_MF); offset += skb->len - nhoff - ihl; - } else { + } else if (!fixedid) { iph->id = htons(id++); } iph->tot_len = htons(skb->len - nhoff); diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c index 6a5bd4317866..6376b0cdf693 100644 --- a/net/ipv4/gre_offload.c +++ b/net/ipv4/gre_offload.c @@ -32,6 +32,7 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb, SKB_GSO_UDP | SKB_GSO_DODGY | SKB_GSO_TCP_ECN | + SKB_GSO_TCP_FIXEDID | SKB_GSO_GRE | SKB_GSO_GRE_CSUM | SKB_GSO_IPIP | diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c index 773083b7f1e9..08dd25d835af 100644 --- a/net/ipv4/tcp_offload.c +++ b/net/ipv4/tcp_offload.c @@ -89,6 +89,7 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb, ~(SKB_GSO_TCPV4 | SKB_GSO_DODGY | SKB_GSO_TCP_ECN | + SKB_GSO_TCP_FIXEDID | SKB_GSO_TCPV6 | SKB_GSO_GRE | SKB_GSO_GRE_CSUM | @@ -98,7 +99,8 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb, SKB_GSO_UDP_TUNNEL_CSUM | SKB_GSO_TUNNEL_REMCSUM | 0) || - !(type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))) + !(type & (SKB_GSO_TCPV4 | + SKB_GSO_TCPV6)))) goto out; skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss); diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c index 204af2219471..b3a779393d71 100644 --- a/net/ipv6/ip6_offload.c +++ b/net/ipv6/ip6_offload.c @@ -73,6 +73,8 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, SKB_GSO_UDP | SKB_GSO_DODGY | SKB_GSO_TCP_ECN | + SKB_GSO_TCP_FIXEDID | + SKB_GSO_TCPV6 | SKB_GSO_GRE | SKB_GSO_GRE_CSUM | SKB_GSO_IPIP | @@ -80,7 +82,6 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, SKB_GSO_UDP_TUNNEL | SKB_GSO_UDP_TUNNEL_CSUM | SKB_GSO_TUNNEL_REMCSUM | - SKB_GSO_TCPV6 | 0))) goto out; diff --git a/net/mpls/mpls_gso.c b/net/mpls/mpls_gso.c index 0183b32da942..bbcf60465e5c 100644 --- a/net/mpls/mpls_gso.c +++ b/net/mpls/mpls_gso.c @@ -31,6 +31,7 @@ static struct sk_buff *mpls_gso_segment(struct sk_buff *skb, SKB_GSO_TCPV6 | SKB_GSO_UDP | SKB_GSO_DODGY | + SKB_GSO_TCP_FIXEDID | SKB_GSO_TCP_ECN))) goto out; From 1530545ed64b42e87acb43c0c16401bd1ebae6bf Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Sun, 10 Apr 2016 21:44:57 -0400 Subject: [PATCH 0681/1649] GRO: Add support for TCP with fixed IPv4 ID field, limit tunnel IP ID values This patch does two things. First it allows TCP to aggregate TCP frames with a fixed IPv4 ID field. As a result we should now be able to aggregate flows that were converted from IPv6 to IPv4. In addition this allows us more flexibility for future implementations of segmentation as we may be able to use a fixed IP ID when segmenting the flow. The second thing this does is that it places limitations on the outer IPv4 ID header in the case of tunneled frames. Specifically it forces the IP ID to be incrementing by 1 unless the DF bit is set in the outer IPv4 header. This way we can avoid creating overlapping series of IP IDs that could possibly be fragmented if the frame goes through GRO and is then resegmented via GSO. Signed-off-by: Alexander Duyck Signed-off-by: David S. Miller --- include/linux/netdevice.h | 5 ++++- net/core/dev.c | 1 + net/ipv4/af_inet.c | 35 ++++++++++++++++++++++++++++------- net/ipv4/tcp_offload.c | 16 +++++++++++++++- net/ipv6/ip6_offload.c | 8 ++++++-- 5 files changed, 54 insertions(+), 11 deletions(-) diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 8e372d01b3c1..2d70c521d516 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -2121,7 +2121,10 @@ struct napi_gro_cb { /* Used in GRE, set in fou/gue_gro_receive */ u8 is_fou:1; - /* 6 bit hole */ + /* Used to determine if flush_id can be ignored */ + u8 is_atomic:1; + + /* 5 bit hole */ /* used to support CHECKSUM_COMPLETE for tunneling protocols */ __wsum csum; diff --git a/net/core/dev.c b/net/core/dev.c index e896b1953ab6..b78b586b1856 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -4462,6 +4462,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff NAPI_GRO_CB(skb)->free = 0; NAPI_GRO_CB(skb)->encap_mark = 0; NAPI_GRO_CB(skb)->is_fou = 0; + NAPI_GRO_CB(skb)->is_atomic = 1; NAPI_GRO_CB(skb)->gro_remcsum_start = 0; /* Setup for GRO checksum validation */ diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 5bbea9a0ce96..8564cab96189 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -1328,6 +1328,7 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head, for (p = *head; p; p = p->next) { struct iphdr *iph2; + u16 flush_id; if (!NAPI_GRO_CB(p)->same_flow) continue; @@ -1351,16 +1352,36 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head, (iph->tos ^ iph2->tos) | ((iph->frag_off ^ iph2->frag_off) & htons(IP_DF)); - /* Save the IP ID check to be included later when we get to - * the transport layer so only the inner most IP ID is checked. - * This is because some GSO/TSO implementations do not - * correctly increment the IP ID for the outer hdrs. - */ - NAPI_GRO_CB(p)->flush_id = - ((u16)(ntohs(iph2->id) + NAPI_GRO_CB(p)->count) ^ id); NAPI_GRO_CB(p)->flush |= flush; + + /* We need to store of the IP ID check to be included later + * when we can verify that this packet does in fact belong + * to a given flow. + */ + flush_id = (u16)(id - ntohs(iph2->id)); + + /* This bit of code makes it much easier for us to identify + * the cases where we are doing atomic vs non-atomic IP ID + * checks. Specifically an atomic check can return IP ID + * values 0 - 0xFFFF, while a non-atomic check can only + * return 0 or 0xFFFF. + */ + if (!NAPI_GRO_CB(p)->is_atomic || + !(iph->frag_off & htons(IP_DF))) { + flush_id ^= NAPI_GRO_CB(p)->count; + flush_id = flush_id ? 0xFFFF : 0; + } + + /* If the previous IP ID value was based on an atomic + * datagram we can overwrite the value and ignore it. + */ + if (NAPI_GRO_CB(skb)->is_atomic) + NAPI_GRO_CB(p)->flush_id = flush_id; + else + NAPI_GRO_CB(p)->flush_id |= flush_id; } + NAPI_GRO_CB(skb)->is_atomic = !!(iph->frag_off & htons(IP_DF)); NAPI_GRO_CB(skb)->flush |= flush; skb_set_network_header(skb, off); /* The above will be needed by the transport layer if there is one diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c index 08dd25d835af..d1ffd55289bd 100644 --- a/net/ipv4/tcp_offload.c +++ b/net/ipv4/tcp_offload.c @@ -239,7 +239,7 @@ struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb) found: /* Include the IP ID check below from the inner most IP hdr */ - flush = NAPI_GRO_CB(p)->flush | NAPI_GRO_CB(p)->flush_id; + flush = NAPI_GRO_CB(p)->flush; flush |= (__force int)(flags & TCP_FLAG_CWR); flush |= (__force int)((flags ^ tcp_flag_word(th2)) & ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH)); @@ -248,6 +248,17 @@ found: flush |= *(u32 *)((u8 *)th + i) ^ *(u32 *)((u8 *)th2 + i); + /* When we receive our second frame we can made a decision on if we + * continue this flow as an atomic flow with a fixed ID or if we use + * an incrementing ID. + */ + if (NAPI_GRO_CB(p)->flush_id != 1 || + NAPI_GRO_CB(p)->count != 1 || + !NAPI_GRO_CB(p)->is_atomic) + flush |= NAPI_GRO_CB(p)->flush_id; + else + NAPI_GRO_CB(p)->is_atomic = false; + mss = skb_shinfo(p)->gso_size; flush |= (len - 1) >= mss; @@ -316,6 +327,9 @@ static int tcp4_gro_complete(struct sk_buff *skb, int thoff) iph->daddr, 0); skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4; + if (NAPI_GRO_CB(skb)->is_atomic) + skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_FIXEDID; + return tcp_gro_complete(skb); } diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c index b3a779393d71..061adcda65f3 100644 --- a/net/ipv6/ip6_offload.c +++ b/net/ipv6/ip6_offload.c @@ -240,10 +240,14 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head, NAPI_GRO_CB(p)->flush |= !!(first_word & htonl(0x0FF00000)); NAPI_GRO_CB(p)->flush |= flush; - /* Clear flush_id, there's really no concept of ID in IPv6. */ - NAPI_GRO_CB(p)->flush_id = 0; + /* If the previous IP ID value was based on an atomic + * datagram we can overwrite the value and ignore it. + */ + if (NAPI_GRO_CB(skb)->is_atomic) + NAPI_GRO_CB(p)->flush_id = 0; } + NAPI_GRO_CB(skb)->is_atomic = true; NAPI_GRO_CB(skb)->flush |= flush; skb_gro_postpull_rcsum(skb, iph, nlen); From 802ab55adc39a06940a1b384e9fd0387fc762d7e Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Sun, 10 Apr 2016 21:45:03 -0400 Subject: [PATCH 0682/1649] GSO: Support partial segmentation offload This patch adds support for something I am referring to as GSO partial. The basic idea is that we can support a broader range of devices for segmentation if we use fixed outer headers and have the hardware only really deal with segmenting the inner header. The idea behind the naming is due to the fact that everything before csum_start will be fixed headers, and everything after will be the region that is handled by hardware. With the current implementation it allows us to add support for the following GSO types with an inner TSO_MANGLEID or TSO6 offload: NETIF_F_GSO_GRE NETIF_F_GSO_GRE_CSUM NETIF_F_GSO_IPIP NETIF_F_GSO_SIT NETIF_F_UDP_TUNNEL NETIF_F_UDP_TUNNEL_CSUM In the case of hardware that already supports tunneling we may be able to extend this further to support TSO_TCPV4 without TSO_MANGLEID if the hardware can support updating inner IPv4 headers. Signed-off-by: Alexander Duyck Signed-off-by: David S. Miller --- include/linux/netdev_features.h | 5 +++++ include/linux/netdevice.h | 2 ++ include/linux/skbuff.h | 9 +++++++-- net/core/dev.c | 36 ++++++++++++++++++++++++++++++--- net/core/ethtool.c | 1 + net/core/skbuff.c | 29 +++++++++++++++++++++++++- net/ipv4/af_inet.c | 20 ++++++++++++++---- net/ipv4/gre_offload.c | 26 +++++++++++++++++++----- net/ipv4/tcp_offload.c | 10 +++++++-- net/ipv4/udp_offload.c | 27 +++++++++++++++++++------ net/ipv6/ip6_offload.c | 10 ++++++++- 11 files changed, 151 insertions(+), 24 deletions(-) diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h index 7cf272a4b5c8..9fc79df0e561 100644 --- a/include/linux/netdev_features.h +++ b/include/linux/netdev_features.h @@ -48,6 +48,10 @@ enum { NETIF_F_GSO_SIT_BIT, /* ... SIT tunnel with TSO */ NETIF_F_GSO_UDP_TUNNEL_BIT, /* ... UDP TUNNEL with TSO */ NETIF_F_GSO_UDP_TUNNEL_CSUM_BIT,/* ... UDP TUNNEL with TSO & CSUM */ + NETIF_F_GSO_PARTIAL_BIT, /* ... Only segment inner-most L4 + * in hardware and all other + * headers in software. + */ NETIF_F_GSO_TUNNEL_REMCSUM_BIT, /* ... TUNNEL with TSO & REMCSUM */ /**/NETIF_F_GSO_LAST = /* last bit, see GSO_MASK */ NETIF_F_GSO_TUNNEL_REMCSUM_BIT, @@ -122,6 +126,7 @@ enum { #define NETIF_F_GSO_UDP_TUNNEL __NETIF_F(GSO_UDP_TUNNEL) #define NETIF_F_GSO_UDP_TUNNEL_CSUM __NETIF_F(GSO_UDP_TUNNEL_CSUM) #define NETIF_F_TSO_MANGLEID __NETIF_F(TSO_MANGLEID) +#define NETIF_F_GSO_PARTIAL __NETIF_F(GSO_PARTIAL) #define NETIF_F_GSO_TUNNEL_REMCSUM __NETIF_F(GSO_TUNNEL_REMCSUM) #define NETIF_F_HW_VLAN_STAG_FILTER __NETIF_F(HW_VLAN_STAG_FILTER) #define NETIF_F_HW_VLAN_STAG_RX __NETIF_F(HW_VLAN_STAG_RX) diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 2d70c521d516..a3bb534576a3 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -1654,6 +1654,7 @@ struct net_device { netdev_features_t vlan_features; netdev_features_t hw_enc_features; netdev_features_t mpls_features; + netdev_features_t gso_partial_features; int ifindex; int group; @@ -4004,6 +4005,7 @@ static inline bool net_gso_ok(netdev_features_t features, int gso_type) BUILD_BUG_ON(SKB_GSO_SIT != (NETIF_F_GSO_SIT >> NETIF_F_GSO_SHIFT)); BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL != (NETIF_F_GSO_UDP_TUNNEL >> NETIF_F_GSO_SHIFT)); BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL_CSUM != (NETIF_F_GSO_UDP_TUNNEL_CSUM >> NETIF_F_GSO_SHIFT)); + BUILD_BUG_ON(SKB_GSO_PARTIAL != (NETIF_F_GSO_PARTIAL >> NETIF_F_GSO_SHIFT)); BUILD_BUG_ON(SKB_GSO_TUNNEL_REMCSUM != (NETIF_F_GSO_TUNNEL_REMCSUM >> NETIF_F_GSO_SHIFT)); return (features & feature) == feature; diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 5fba16658f9d..da0ace389fec 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -483,7 +483,9 @@ enum { SKB_GSO_UDP_TUNNEL_CSUM = 1 << 12, - SKB_GSO_TUNNEL_REMCSUM = 1 << 13, + SKB_GSO_PARTIAL = 1 << 13, + + SKB_GSO_TUNNEL_REMCSUM = 1 << 14, }; #if BITS_PER_LONG > 32 @@ -3591,7 +3593,10 @@ static inline struct sec_path *skb_sec_path(struct sk_buff *skb) * Keeps track of level of encapsulation of network headers. */ struct skb_gso_cb { - int mac_offset; + union { + int mac_offset; + int data_offset; + }; int encap_level; __wsum csum; __u16 csum_start; diff --git a/net/core/dev.c b/net/core/dev.c index b78b586b1856..556dd09af3b8 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -2711,6 +2711,19 @@ struct sk_buff *__skb_gso_segment(struct sk_buff *skb, return ERR_PTR(err); } + /* Only report GSO partial support if it will enable us to + * support segmentation on this frame without needing additional + * work. + */ + if (features & NETIF_F_GSO_PARTIAL) { + netdev_features_t partial_features = NETIF_F_GSO_ROBUST; + struct net_device *dev = skb->dev; + + partial_features |= dev->features & dev->gso_partial_features; + if (!skb_gso_ok(skb, features | partial_features)) + features &= ~NETIF_F_GSO_PARTIAL; + } + BUILD_BUG_ON(SKB_SGO_CB_OFFSET + sizeof(*SKB_GSO_CB(skb)) > sizeof(skb->cb)); @@ -2834,8 +2847,17 @@ static netdev_features_t gso_features_check(const struct sk_buff *skb, if (gso_segs > dev->gso_max_segs) return features & ~NETIF_F_GSO_MASK; - /* Make sure to clear the IPv4 ID mangling feature if - * the IPv4 header has the potential to be fragmented. + /* Support for GSO partial features requires software + * intervention before we can actually process the packets + * so we need to strip support for any partial features now + * and we can pull them back in after we have partially + * segmented the frame. + */ + if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL)) + features &= ~dev->gso_partial_features; + + /* Make sure to clear the IPv4 ID mangling feature if the + * IPv4 header has the potential to be fragmented. */ if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) { struct iphdr *iph = skb->encapsulation ? @@ -6729,6 +6751,14 @@ static netdev_features_t netdev_fix_features(struct net_device *dev, } } + /* GSO partial features require GSO partial be set */ + if ((features & dev->gso_partial_features) && + !(features & NETIF_F_GSO_PARTIAL)) { + netdev_dbg(dev, + "Dropping partially supported GSO features since no GSO partial.\n"); + features &= ~dev->gso_partial_features; + } + #ifdef CONFIG_NET_RX_BUSY_POLL if (dev->netdev_ops->ndo_busy_poll) features |= NETIF_F_BUSY_POLL; @@ -7011,7 +7041,7 @@ int register_netdevice(struct net_device *dev) /* Make NETIF_F_SG inheritable to tunnel devices. */ - dev->hw_enc_features |= NETIF_F_SG; + dev->hw_enc_features |= NETIF_F_SG | NETIF_F_GSO_PARTIAL; /* Make NETIF_F_SG inheritable to MPLS. */ diff --git a/net/core/ethtool.c b/net/core/ethtool.c index 9494c41cc77c..e0cf20a3b3dd 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c @@ -88,6 +88,7 @@ static const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN] [NETIF_F_GSO_SIT_BIT] = "tx-sit-segmentation", [NETIF_F_GSO_UDP_TUNNEL_BIT] = "tx-udp_tnl-segmentation", [NETIF_F_GSO_UDP_TUNNEL_CSUM_BIT] = "tx-udp_tnl-csum-segmentation", + [NETIF_F_GSO_PARTIAL_BIT] = "tx-gso-partial", [NETIF_F_FCOE_CRC_BIT] = "tx-checksum-fcoe-crc", [NETIF_F_SCTP_CRC_BIT] = "tx-checksum-sctp", diff --git a/net/core/skbuff.c b/net/core/skbuff.c index d04c2d1c8c87..4cc594cdaada 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -3076,8 +3076,9 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb, struct sk_buff *frag_skb = head_skb; unsigned int offset = doffset; unsigned int tnl_hlen = skb_tnl_header_len(head_skb); + unsigned int partial_segs = 0; unsigned int headroom; - unsigned int len; + unsigned int len = head_skb->len; __be16 proto; bool csum; int sg = !!(features & NETIF_F_SG); @@ -3094,6 +3095,15 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb, csum = !!can_checksum_protocol(features, proto); + /* GSO partial only requires that we trim off any excess that + * doesn't fit into an MSS sized block, so take care of that + * now. + */ + if (features & NETIF_F_GSO_PARTIAL) { + partial_segs = len / mss; + mss *= partial_segs; + } + headroom = skb_headroom(head_skb); pos = skb_headlen(head_skb); @@ -3281,6 +3291,23 @@ perform_csum_check: */ segs->prev = tail; + /* Update GSO info on first skb in partial sequence. */ + if (partial_segs) { + int type = skb_shinfo(head_skb)->gso_type; + + /* Update type to add partial and then remove dodgy if set */ + type |= SKB_GSO_PARTIAL; + type &= ~SKB_GSO_DODGY; + + /* Update GSO info and prepare to start updating headers on + * our way back down the stack of protocols. + */ + skb_shinfo(segs)->gso_size = skb_shinfo(head_skb)->gso_size; + skb_shinfo(segs)->gso_segs = partial_segs; + skb_shinfo(segs)->gso_type = type; + SKB_GSO_CB(segs)->data_offset = skb_headroom(segs) + doffset; + } + /* Following permits correct backpressure, for protocols * using skb_set_owner_w(). * Idea is to tranfert ownership from head_skb to last segment. diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 8564cab96189..2e6e65fc4d20 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -1200,7 +1200,7 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb, const struct net_offload *ops; unsigned int offset = 0; struct iphdr *iph; - int proto; + int proto, tot_len; int nhoff; int ihl; int id; @@ -1219,6 +1219,7 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb, SKB_GSO_UDP_TUNNEL_CSUM | SKB_GSO_TCP_FIXEDID | SKB_GSO_TUNNEL_REMCSUM | + SKB_GSO_PARTIAL | 0))) goto out; @@ -1273,10 +1274,21 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb, if (skb->next) iph->frag_off |= htons(IP_MF); offset += skb->len - nhoff - ihl; - } else if (!fixedid) { - iph->id = htons(id++); + tot_len = skb->len - nhoff; + } else if (skb_is_gso(skb)) { + if (!fixedid) { + iph->id = htons(id); + id += skb_shinfo(skb)->gso_segs; + } + tot_len = skb_shinfo(skb)->gso_size + + SKB_GSO_CB(skb)->data_offset + + skb->head - (unsigned char *)iph; + } else { + if (!fixedid) + iph->id = htons(id++); + tot_len = skb->len - nhoff; } - iph->tot_len = htons(skb->len - nhoff); + iph->tot_len = htons(tot_len); ip_send_check(iph); if (encap) skb_reset_inner_headers(skb); diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c index 6376b0cdf693..20557f211408 100644 --- a/net/ipv4/gre_offload.c +++ b/net/ipv4/gre_offload.c @@ -36,7 +36,8 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb, SKB_GSO_GRE | SKB_GSO_GRE_CSUM | SKB_GSO_IPIP | - SKB_GSO_SIT))) + SKB_GSO_SIT | + SKB_GSO_PARTIAL))) goto out; if (!skb->encapsulation) @@ -87,7 +88,7 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb, skb = segs; do { struct gre_base_hdr *greh; - __be32 *pcsum; + __sum16 *pcsum; /* Set up inner headers if we are offloading inner checksum */ if (skb->ip_summed == CHECKSUM_PARTIAL) { @@ -107,10 +108,25 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb, continue; greh = (struct gre_base_hdr *)skb_transport_header(skb); - pcsum = (__be32 *)(greh + 1); + pcsum = (__sum16 *)(greh + 1); - *pcsum = 0; - *(__sum16 *)pcsum = gso_make_checksum(skb, 0); + if (skb_is_gso(skb)) { + unsigned int partial_adj; + + /* Adjust checksum to account for the fact that + * the partial checksum is based on actual size + * whereas headers should be based on MSS size. + */ + partial_adj = skb->len + skb_headroom(skb) - + SKB_GSO_CB(skb)->data_offset - + skb_shinfo(skb)->gso_size; + *pcsum = ~csum_fold((__force __wsum)htonl(partial_adj)); + } else { + *pcsum = 0; + } + + *(pcsum + 1) = 0; + *pcsum = gso_make_checksum(skb, 0); } while ((skb = skb->next)); out: return segs; diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c index d1ffd55289bd..02737b607aa7 100644 --- a/net/ipv4/tcp_offload.c +++ b/net/ipv4/tcp_offload.c @@ -109,6 +109,12 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb, goto out; } + /* GSO partial only requires splitting the frame into an MSS + * multiple and possibly a remainder. So update the mss now. + */ + if (features & NETIF_F_GSO_PARTIAL) + mss = skb->len - (skb->len % mss); + copy_destructor = gso_skb->destructor == tcp_wfree; ooo_okay = gso_skb->ooo_okay; /* All segments but the first should have ooo_okay cleared */ @@ -133,7 +139,7 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb, newcheck = ~csum_fold((__force __wsum)((__force u32)th->check + (__force u32)delta)); - do { + while (skb->next) { th->fin = th->psh = 0; th->check = newcheck; @@ -153,7 +159,7 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb, th->seq = htonl(seq); th->cwr = 0; - } while (skb->next); + } /* Following permits TCP Small Queues to work well with GSO : * The callback to TCP stack will be called at the time last frag diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c index 6230cf4b0d2d..097060def7f0 100644 --- a/net/ipv4/udp_offload.c +++ b/net/ipv4/udp_offload.c @@ -39,8 +39,11 @@ static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb, * 16 bit length field due to the header being added outside of an * IP or IPv6 frame that was already limited to 64K - 1. */ - partial = csum_sub(csum_unfold(uh->check), - (__force __wsum)htonl(skb->len)); + if (skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) + partial = (__force __wsum)uh->len; + else + partial = (__force __wsum)htonl(skb->len); + partial = csum_sub(csum_unfold(uh->check), partial); /* setup inner skb. */ skb->encapsulation = 0; @@ -89,7 +92,7 @@ static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb, udp_offset = outer_hlen - tnl_hlen; skb = segs; do { - __be16 len; + unsigned int len; if (remcsum) skb->ip_summed = CHECKSUM_NONE; @@ -107,14 +110,26 @@ static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb, skb_reset_mac_header(skb); skb_set_network_header(skb, mac_len); skb_set_transport_header(skb, udp_offset); - len = htons(skb->len - udp_offset); + len = skb->len - udp_offset; uh = udp_hdr(skb); - uh->len = len; + + /* If we are only performing partial GSO the inner header + * will be using a length value equal to only one MSS sized + * segment instead of the entire frame. + */ + if (skb_is_gso(skb)) { + uh->len = htons(skb_shinfo(skb)->gso_size + + SKB_GSO_CB(skb)->data_offset + + skb->head - (unsigned char *)uh); + } else { + uh->len = htons(len); + } if (!need_csum) continue; - uh->check = ~csum_fold(csum_add(partial, (__force __wsum)len)); + uh->check = ~csum_fold(csum_add(partial, + (__force __wsum)htonl(len))); if (skb->encapsulation || !offload_csum) { uh->check = gso_make_checksum(skb, ~uh->check); diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c index 061adcda65f3..f5eb184e1093 100644 --- a/net/ipv6/ip6_offload.c +++ b/net/ipv6/ip6_offload.c @@ -63,6 +63,7 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, int proto; struct frag_hdr *fptr; unsigned int unfrag_ip6hlen; + unsigned int payload_len; u8 *prevhdr; int offset = 0; bool encap, udpfrag; @@ -82,6 +83,7 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, SKB_GSO_UDP_TUNNEL | SKB_GSO_UDP_TUNNEL_CSUM | SKB_GSO_TUNNEL_REMCSUM | + SKB_GSO_PARTIAL | 0))) goto out; @@ -118,7 +120,13 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, for (skb = segs; skb; skb = skb->next) { ipv6h = (struct ipv6hdr *)(skb_mac_header(skb) + nhoff); - ipv6h->payload_len = htons(skb->len - nhoff - sizeof(*ipv6h)); + if (skb_is_gso(skb)) + payload_len = skb_shinfo(skb)->gso_size + + SKB_GSO_CB(skb)->data_offset + + skb->head - (unsigned char *)(ipv6h + 1); + else + payload_len = skb->len - nhoff - sizeof(*ipv6h); + ipv6h->payload_len = htons(payload_len); skb->network_header = (u8 *)ipv6h - skb->head; if (udpfrag) { From f7a6272bf3cbd2576165dba020e0329c9ca67c1f Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Sun, 10 Apr 2016 21:45:09 -0400 Subject: [PATCH 0683/1649] Documentation: Add documentation for TSO and GSO features This document is a starting point for defining the TSO and GSO features. The whole thing is starting to get a bit messy so I wanted to make sure we have notes somwhere to start describing what does and doesn't work. Signed-off-by: Alexander Duyck Signed-off-by: David S. Miller --- .../networking/segmentation-offloads.txt | 130 ++++++++++++++++++ 1 file changed, 130 insertions(+) create mode 100644 Documentation/networking/segmentation-offloads.txt diff --git a/Documentation/networking/segmentation-offloads.txt b/Documentation/networking/segmentation-offloads.txt new file mode 100644 index 000000000000..f200467ade38 --- /dev/null +++ b/Documentation/networking/segmentation-offloads.txt @@ -0,0 +1,130 @@ +Segmentation Offloads in the Linux Networking Stack + +Introduction +============ + +This document describes a set of techniques in the Linux networking stack +to take advantage of segmentation offload capabilities of various NICs. + +The following technologies are described: + * TCP Segmentation Offload - TSO + * UDP Fragmentation Offload - UFO + * IPIP, SIT, GRE, and UDP Tunnel Offloads + * Generic Segmentation Offload - GSO + * Generic Receive Offload - GRO + * Partial Generic Segmentation Offload - GSO_PARTIAL + +TCP Segmentation Offload +======================== + +TCP segmentation allows a device to segment a single frame into multiple +frames with a data payload size specified in skb_shinfo()->gso_size. +When TCP segmentation requested the bit for either SKB_GSO_TCP or +SKB_GSO_TCP6 should be set in skb_shinfo()->gso_type and +skb_shinfo()->gso_size should be set to a non-zero value. + +TCP segmentation is dependent on support for the use of partial checksum +offload. For this reason TSO is normally disabled if the Tx checksum +offload for a given device is disabled. + +In order to support TCP segmentation offload it is necessary to populate +the network and transport header offsets of the skbuff so that the device +drivers will be able determine the offsets of the IP or IPv6 header and the +TCP header. In addition as CHECKSUM_PARTIAL is required csum_start should +also point to the TCP header of the packet. + +For IPv4 segmentation we support one of two types in terms of the IP ID. +The default behavior is to increment the IP ID with every segment. If the +GSO type SKB_GSO_TCP_FIXEDID is specified then we will not increment the IP +ID and all segments will use the same IP ID. If a device has +NETIF_F_TSO_MANGLEID set then the IP ID can be ignored when performing TSO +and we will either increment the IP ID for all frames, or leave it at a +static value based on driver preference. + +UDP Fragmentation Offload +========================= + +UDP fragmentation offload allows a device to fragment an oversized UDP +datagram into multiple IPv4 fragments. Many of the requirements for UDP +fragmentation offload are the same as TSO. However the IPv4 ID for +fragments should not increment as a single IPv4 datagram is fragmented. + +IPIP, SIT, GRE, UDP Tunnel, and Remote Checksum Offloads +======================================================== + +In addition to the offloads described above it is possible for a frame to +contain additional headers such as an outer tunnel. In order to account +for such instances an additional set of segmentation offload types were +introduced including SKB_GSO_IPIP, SKB_GSO_SIT, SKB_GSO_GRE, and +SKB_GSO_UDP_TUNNEL. These extra segmentation types are used to identify +cases where there are more than just 1 set of headers. For example in the +case of IPIP and SIT we should have the network and transport headers moved +from the standard list of headers to "inner" header offsets. + +Currently only two levels of headers are supported. The convention is to +refer to the tunnel headers as the outer headers, while the encapsulated +data is normally referred to as the inner headers. Below is the list of +calls to access the given headers: + +IPIP/SIT Tunnel: + Outer Inner +MAC skb_mac_header +Network skb_network_header skb_inner_network_header +Transport skb_transport_header + +UDP/GRE Tunnel: + Outer Inner +MAC skb_mac_header skb_inner_mac_header +Network skb_network_header skb_inner_network_header +Transport skb_transport_header skb_inner_transport_header + +In addition to the above tunnel types there are also SKB_GSO_GRE_CSUM and +SKB_GSO_UDP_TUNNEL_CSUM. These two additional tunnel types reflect the +fact that the outer header also requests to have a non-zero checksum +included in the outer header. + +Finally there is SKB_GSO_REMCSUM which indicates that a given tunnel header +has requested a remote checksum offload. In this case the inner headers +will be left with a partial checksum and only the outer header checksum +will be computed. + +Generic Segmentation Offload +============================ + +Generic segmentation offload is a pure software offload that is meant to +deal with cases where device drivers cannot perform the offloads described +above. What occurs in GSO is that a given skbuff will have its data broken +out over multiple skbuffs that have been resized to match the MSS provided +via skb_shinfo()->gso_size. + +Before enabling any hardware segmentation offload a corresponding software +offload is required in GSO. Otherwise it becomes possible for a frame to +be re-routed between devices and end up being unable to be transmitted. + +Generic Receive Offload +======================= + +Generic receive offload is the complement to GSO. Ideally any frame +assembled by GRO should be segmented to create an identical sequence of +frames using GSO, and any sequence of frames segmented by GSO should be +able to be reassembled back to the original by GRO. The only exception to +this is IPv4 ID in the case that the DF bit is set for a given IP header. +If the value of the IPv4 ID is not sequentially incrementing it will be +altered so that it is when a frame assembled via GRO is segmented via GSO. + +Partial Generic Segmentation Offload +==================================== + +Partial generic segmentation offload is a hybrid between TSO and GSO. What +it effectively does is take advantage of certain traits of TCP and tunnels +so that instead of having to rewrite the packet headers for each segment +only the inner-most transport header and possibly the outer-most network +header need to be updated. This allows devices that do not support tunnel +offloads or tunnel offloads with checksum to still make use of segmentation. + +With the partial offload what occurs is that all headers excluding the +inner transport header are updated such that they will contain the correct +values for if the header was simply duplicated. The one exception to this +is the outer IPv4 ID field. It is up to the device drivers to guarantee +that the IPv4 ID field is incremented in the case that a given header does +not have the DF bit set. From 333f796235a52727db7e0a13888045f3aa3d5335 Mon Sep 17 00:00:00 2001 From: Parthasarathy Bhuvaragan Date: Tue, 12 Apr 2016 13:05:21 +0200 Subject: [PATCH 0684/1649] tipc: fix a race condition leading to subscriber refcnt bug Until now, the requests sent to topology server are queued to a workqueue by the generic server framework. These messages are processed by worker threads and trigger the registered callbacks. To reduce latency on uniprocessor systems, explicit rescheduling is performed using cond_resched() after MAX_RECV_MSG_COUNT(25) messages. This implementation on SMP systems leads to an subscriber refcnt error as described below: When a worker thread yields by calling cond_resched() in a SMP system, a new worker is created on another CPU to process the pending workitem. Sometimes the sleeping thread wakes up before the new thread finishes execution. This breaks the assumption on ordering and being single threaded. The fault is more frequent when MAX_RECV_MSG_COUNT is lowered. If the first thread was processing subscription create and the second thread processing close(), the close request will free the subscriber and the create request oops as follows: [31.224137] WARNING: CPU: 2 PID: 266 at include/linux/kref.h:46 tipc_subscrb_rcv_cb+0x317/0x380 [tipc] [31.228143] CPU: 2 PID: 266 Comm: kworker/u8:1 Not tainted 4.5.0+ #97 [31.228377] Workqueue: tipc_rcv tipc_recv_work [tipc] [...] [31.228377] Call Trace: [31.228377] [] dump_stack+0x4d/0x72 [31.228377] [] __warn+0xd1/0xf0 [31.228377] [] warn_slowpath_null+0x1d/0x20 [31.228377] [] tipc_subscrb_rcv_cb+0x317/0x380 [tipc] [31.228377] [] tipc_receive_from_sock+0xd4/0x130 [tipc] [31.228377] [] tipc_recv_work+0x2b/0x50 [tipc] [31.228377] [] process_one_work+0x145/0x3d0 [31.246554] ---[ end trace c3882c9baa05a4fd ]--- [31.248327] BUG: spinlock bad magic on CPU#2, kworker/u8:1/266 [31.249119] BUG: unable to handle kernel NULL pointer dereference at 0000000000000428 [31.249323] IP: [] spin_dump+0x5c/0xe0 [31.249323] PGD 0 [31.249323] Oops: 0000 [#1] SMP In this commit, we - rename tipc_conn_shutdown() to tipc_conn_release(). - move connection release callback execution from tipc_close_conn() to a new function tipc_sock_release(), which is executed before we free the connection. Thus we release the subscriber during connection release procedure rather than connection shutdown procedure. Signed-off-by: Parthasarathy Bhuvaragan Acked-by: Ying Xue Signed-off-by: David S. Miller --- net/tipc/server.c | 19 +++++++++++++------ net/tipc/server.h | 4 ++-- net/tipc/subscr.c | 4 ++-- 3 files changed, 17 insertions(+), 10 deletions(-) diff --git a/net/tipc/server.c b/net/tipc/server.c index 2446bfbaa309..7a0af2dc0406 100644 --- a/net/tipc/server.c +++ b/net/tipc/server.c @@ -86,6 +86,7 @@ struct outqueue_entry { static void tipc_recv_work(struct work_struct *work); static void tipc_send_work(struct work_struct *work); static void tipc_clean_outqueues(struct tipc_conn *con); +static void tipc_sock_release(struct tipc_conn *con); static void tipc_conn_kref_release(struct kref *kref) { @@ -102,6 +103,7 @@ static void tipc_conn_kref_release(struct kref *kref) } saddr->scope = -TIPC_NODE_SCOPE; kernel_bind(sock, (struct sockaddr *)saddr, sizeof(*saddr)); + tipc_sock_release(con); sock_release(sock); con->sock = NULL; } @@ -184,26 +186,31 @@ static void tipc_unregister_callbacks(struct tipc_conn *con) write_unlock_bh(&sk->sk_callback_lock); } +static void tipc_sock_release(struct tipc_conn *con) +{ + struct tipc_server *s = con->server; + + if (con->conid) + s->tipc_conn_release(con->conid, con->usr_data); + + tipc_unregister_callbacks(con); +} + static void tipc_close_conn(struct tipc_conn *con) { struct tipc_server *s = con->server; if (test_and_clear_bit(CF_CONNECTED, &con->flags)) { - if (con->conid) - s->tipc_conn_shutdown(con->conid, con->usr_data); spin_lock_bh(&s->idr_lock); idr_remove(&s->conn_idr, con->conid); s->idr_in_use--; spin_unlock_bh(&s->idr_lock); - tipc_unregister_callbacks(con); - /* We shouldn't flush pending works as we may be in the * thread. In fact the races with pending rx/tx work structs * are harmless for us here as we have already deleted this - * connection from server connection list and set - * sk->sk_user_data to 0 before releasing connection object. + * connection from server connection list. */ kernel_sock_shutdown(con->sock, SHUT_RDWR); diff --git a/net/tipc/server.h b/net/tipc/server.h index 9015faedb1b0..34f8055afa3b 100644 --- a/net/tipc/server.h +++ b/net/tipc/server.h @@ -53,7 +53,7 @@ * @send_wq: send workqueue * @max_rcvbuf_size: maximum permitted receive message length * @tipc_conn_new: callback will be called when new connection is incoming - * @tipc_conn_shutdown: callback will be called when connection is shut down + * @tipc_conn_release: callback will be called before releasing the connection * @tipc_conn_recvmsg: callback will be called when message arrives * @saddr: TIPC server address * @name: server name @@ -70,7 +70,7 @@ struct tipc_server { struct workqueue_struct *send_wq; int max_rcvbuf_size; void *(*tipc_conn_new)(int conid); - void (*tipc_conn_shutdown)(int conid, void *usr_data); + void (*tipc_conn_release)(int conid, void *usr_data); void (*tipc_conn_recvmsg)(struct net *net, int conid, struct sockaddr_tipc *addr, void *usr_data, void *buf, size_t len); diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c index e6cb386fbf34..79de588c7bd6 100644 --- a/net/tipc/subscr.c +++ b/net/tipc/subscr.c @@ -302,7 +302,7 @@ static void tipc_subscrp_subscribe(struct net *net, struct tipc_subscr *s, } /* Handle one termination request for the subscriber */ -static void tipc_subscrb_shutdown_cb(int conid, void *usr_data) +static void tipc_subscrb_release_cb(int conid, void *usr_data) { tipc_subscrb_delete((struct tipc_subscriber *)usr_data); } @@ -365,7 +365,7 @@ int tipc_topsrv_start(struct net *net) topsrv->max_rcvbuf_size = sizeof(struct tipc_subscr); topsrv->tipc_conn_recvmsg = tipc_subscrb_rcv_cb; topsrv->tipc_conn_new = tipc_subscrb_connect_cb; - topsrv->tipc_conn_shutdown = tipc_subscrb_shutdown_cb; + topsrv->tipc_conn_release = tipc_subscrb_release_cb; strncpy(topsrv->name, name, strlen(name) + 1); tn->topsrv = topsrv; From da37845fdce24e174f44d020bc4085ddd1c8a6bd Mon Sep 17 00:00:00 2001 From: Weongyo Jeong Date: Thu, 14 Apr 2016 14:10:04 -0700 Subject: [PATCH 0685/1649] packet: uses kfree_skb() for errors. consume_skb() isn't for error cases that kfree_skb() is more proper one. At this patch, it fixed tpacket_rcv() and packet_rcv() to be consistent for error or non-error cases letting perf trace its event properly. Signed-off-by: Weongyo Jeong Signed-off-by: David S. Miller --- net/packet/af_packet.c | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 81a4c0574d73..4d5e699d0bfa 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -2052,6 +2052,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev, u8 *skb_head = skb->data; int skb_len = skb->len; unsigned int snaplen, res; + bool is_drop_n_account = false; if (skb->pkt_type == PACKET_LOOPBACK) goto drop; @@ -2140,6 +2141,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev, return 0; drop_n_acct: + is_drop_n_account = true; spin_lock(&sk->sk_receive_queue.lock); po->stats.stats1.tp_drops++; atomic_inc(&sk->sk_drops); @@ -2151,7 +2153,10 @@ drop_n_restore: skb->len = skb_len; } drop: - consume_skb(skb); + if (!is_drop_n_account) + consume_skb(skb); + else + kfree_skb(skb); return 0; } @@ -2170,6 +2175,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, struct sk_buff *copy_skb = NULL; struct timespec ts; __u32 ts_status; + bool is_drop_n_account = false; /* struct tpacket{2,3}_hdr is aligned to a multiple of TPACKET_ALIGNMENT. * We may add members to them until current aligned size without forcing @@ -2377,10 +2383,14 @@ drop_n_restore: skb->len = skb_len; } drop: - kfree_skb(skb); + if (!is_drop_n_account) + consume_skb(skb); + else + kfree_skb(skb); return 0; drop_n_account: + is_drop_n_account = true; po->stats.stats1.tp_drops++; spin_unlock(&sk->sk_receive_queue.lock); From d21fd63ea3856208c3a1cb9b26d81898a2ccf71b Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 12 Apr 2016 21:50:07 -0700 Subject: [PATCH 0686/1649] net: validate_xmit_skb() changes skbs given to validate_xmit_skb() should not have a next pointer anymore. Also if a packet is dropped, increment dev->tx_dropped __dev_queue_xmit() no longer has to change tx_dropped in this case. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/core/dev.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/net/core/dev.c b/net/core/dev.c index 556dd09af3b8..52d446b2cb99 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -2959,9 +2959,6 @@ static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device { netdev_features_t features; - if (skb->next) - return skb; - features = netif_skb_features(skb); skb = validate_xmit_vlan(skb, features); if (unlikely(!skb)) @@ -3004,6 +3001,7 @@ static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device out_kfree_skb: kfree_skb(skb); out_null: + atomic_long_inc(&dev->tx_dropped); return NULL; } @@ -3393,7 +3391,7 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv) skb = validate_xmit_skb(skb, dev); if (!skb) - goto drop; + goto out; HARD_TX_LOCK(dev, txq, cpu); @@ -3420,7 +3418,6 @@ recursion_alert: } rc = -ENETDOWN; -drop: rcu_read_unlock_bh(); atomic_long_inc(&dev->tx_dropped); From 486bdee0134cf21c3714ded809d5933d2b8dfb81 Mon Sep 17 00:00:00 2001 From: Marcelo Ricardo Leitner Date: Tue, 12 Apr 2016 18:11:31 -0300 Subject: [PATCH 0687/1649] sctp: add support for RPS and RFS This patch adds what's missing to properly support RPS and RFS on SCTP, as some of it is already implemented in common calls. Having support for RPS and RFS allows better scaling specially because not all NICs support hashing SCTP headers. Save the hash right when we dequeue a skb from inqueue so we do it only once per skb instead of per chunk. New sockets will then inherit the hash through sctp_copy_sock(). Signed-off-by: Marcelo Ricardo Leitner Signed-off-by: David S. Miller --- net/sctp/inqueue.c | 3 +++ net/sctp/socket.c | 3 +++ 2 files changed, 6 insertions(+) diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c index 7e8a16c77039..b335ffcef0b9 100644 --- a/net/sctp/inqueue.c +++ b/net/sctp/inqueue.c @@ -163,6 +163,9 @@ struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue) chunk->singleton = 1; ch = (sctp_chunkhdr_t *) chunk->skb->data; chunk->data_accepted = 0; + + if (chunk->asoc) + sock_rps_save_rxhash(chunk->asoc->base.sk, chunk->skb); } chunk->chunk_hdr = ch; diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 878d28eda1a6..36697f85ce48 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -6430,6 +6430,8 @@ unsigned int sctp_poll(struct file *file, struct socket *sock, poll_table *wait) poll_wait(file, sk_sleep(sk), wait); + sock_rps_record_flow(sk); + /* A TCP-style listening socket becomes readable when the accept queue * is not empty. */ @@ -7186,6 +7188,7 @@ void sctp_copy_sock(struct sock *newsk, struct sock *sk, newsk->sk_lingertime = sk->sk_lingertime; newsk->sk_rcvtimeo = sk->sk_rcvtimeo; newsk->sk_sndtimeo = sk->sk_sndtimeo; + newsk->sk_rxhash = sk->sk_rxhash; newinet = inet_sk(newsk); From 33ff9823c569f3aceb071071914919177a6bed6a Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Wed, 13 Apr 2016 00:10:50 +0200 Subject: [PATCH 0688/1649] bpf, verifier: add bpf_call_arg_meta for passing meta data Currently, when the verifier checks calls in check_call() function, we call check_func_arg() for all 5 arguments e.g. to make sure expected types are correct. In some cases, we collect meta data (here: map pointer) to perform additional checks such as checking stack boundary on key/value sizes for subsequent arguments. As we're going to extend the meta data, add a generic struct bpf_call_arg_meta that we can use for passing into check_func_arg(). Signed-off-by: Daniel Borkmann Acked-by: Alexei Starovoitov Signed-off-by: David S. Miller --- kernel/bpf/verifier.c | 40 +++++++++++++++++++++++----------------- 1 file changed, 23 insertions(+), 17 deletions(-) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 6c5d7cd4cb0e..202f8f738542 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -205,6 +205,10 @@ struct verifier_env { #define BPF_COMPLEXITY_LIMIT_INSNS 65536 #define BPF_COMPLEXITY_LIMIT_STACK 1024 +struct bpf_call_arg_meta { + struct bpf_map *map_ptr; +}; + /* verbose verifier prints what it's seeing * bpf_check() is called under lock, so no race to access these global vars */ @@ -822,7 +826,8 @@ static int check_stack_boundary(struct verifier_env *env, int regno, } static int check_func_arg(struct verifier_env *env, u32 regno, - enum bpf_arg_type arg_type, struct bpf_map **mapp) + enum bpf_arg_type arg_type, + struct bpf_call_arg_meta *meta) { struct reg_state *reg = env->cur_state.regs + regno; enum bpf_reg_type expected_type; @@ -875,14 +880,13 @@ static int check_func_arg(struct verifier_env *env, u32 regno, if (arg_type == ARG_CONST_MAP_PTR) { /* bpf_map_xxx(map_ptr) call: remember that map_ptr */ - *mapp = reg->map_ptr; - + meta->map_ptr = reg->map_ptr; } else if (arg_type == ARG_PTR_TO_MAP_KEY) { /* bpf_map_xxx(..., map_ptr, ..., key) call: * check that [key, key + map->key_size) are within * stack limits and initialized */ - if (!*mapp) { + if (!meta->map_ptr) { /* in function declaration map_ptr must come before * map_key, so that it's verified and known before * we have to check map_key here. Otherwise it means @@ -891,19 +895,19 @@ static int check_func_arg(struct verifier_env *env, u32 regno, verbose("invalid map_ptr to access map->key\n"); return -EACCES; } - err = check_stack_boundary(env, regno, (*mapp)->key_size, + err = check_stack_boundary(env, regno, meta->map_ptr->key_size, false); } else if (arg_type == ARG_PTR_TO_MAP_VALUE) { /* bpf_map_xxx(..., map_ptr, ..., value) call: * check [value, value + map->value_size) validity */ - if (!*mapp) { + if (!meta->map_ptr) { /* kernel subsystem misconfigured verifier */ verbose("invalid map_ptr to access map->value\n"); return -EACCES; } - err = check_stack_boundary(env, regno, (*mapp)->value_size, - false); + err = check_stack_boundary(env, regno, + meta->map_ptr->value_size, false); } else if (arg_type == ARG_CONST_STACK_SIZE || arg_type == ARG_CONST_STACK_SIZE_OR_ZERO) { bool zero_size_allowed = (arg_type == ARG_CONST_STACK_SIZE_OR_ZERO); @@ -954,8 +958,8 @@ static int check_call(struct verifier_env *env, int func_id) struct verifier_state *state = &env->cur_state; const struct bpf_func_proto *fn = NULL; struct reg_state *regs = state->regs; - struct bpf_map *map = NULL; struct reg_state *reg; + struct bpf_call_arg_meta meta; int i, err; /* find function prototype */ @@ -978,20 +982,22 @@ static int check_call(struct verifier_env *env, int func_id) return -EINVAL; } + memset(&meta, 0, sizeof(meta)); + /* check args */ - err = check_func_arg(env, BPF_REG_1, fn->arg1_type, &map); + err = check_func_arg(env, BPF_REG_1, fn->arg1_type, &meta); if (err) return err; - err = check_func_arg(env, BPF_REG_2, fn->arg2_type, &map); + err = check_func_arg(env, BPF_REG_2, fn->arg2_type, &meta); if (err) return err; - err = check_func_arg(env, BPF_REG_3, fn->arg3_type, &map); + err = check_func_arg(env, BPF_REG_3, fn->arg3_type, &meta); if (err) return err; - err = check_func_arg(env, BPF_REG_4, fn->arg4_type, &map); + err = check_func_arg(env, BPF_REG_4, fn->arg4_type, &meta); if (err) return err; - err = check_func_arg(env, BPF_REG_5, fn->arg5_type, &map); + err = check_func_arg(env, BPF_REG_5, fn->arg5_type, &meta); if (err) return err; @@ -1013,18 +1019,18 @@ static int check_call(struct verifier_env *env, int func_id) * can check 'value_size' boundary of memory access * to map element returned from bpf_map_lookup_elem() */ - if (map == NULL) { + if (meta.map_ptr == NULL) { verbose("kernel subsystem misconfigured verifier\n"); return -EINVAL; } - regs[BPF_REG_0].map_ptr = map; + regs[BPF_REG_0].map_ptr = meta.map_ptr; } else { verbose("unknown return type %d of func %d\n", fn->ret_type, func_id); return -EINVAL; } - err = check_map_func_compatibility(map, func_id); + err = check_map_func_compatibility(meta.map_ptr, func_id); if (err) return err; From 435faee1aae9c1ac231f89e4faf0437bfe29f425 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Wed, 13 Apr 2016 00:10:51 +0200 Subject: [PATCH 0689/1649] bpf, verifier: add ARG_PTR_TO_RAW_STACK type When passing buffers from eBPF stack space into a helper function, we have ARG_PTR_TO_STACK argument type for helpers available. The verifier makes sure that such buffers are initialized, within boundaries, etc. However, the downside with this is that we have a couple of helper functions such as bpf_skb_load_bytes() that fill out the passed buffer in the expected success case anyway, so zero initializing them prior to the helper call is unneeded/wasted instructions in the eBPF program that can be avoided. Therefore, add a new helper function argument type called ARG_PTR_TO_RAW_STACK. The idea is to skip the STACK_MISC check in check_stack_boundary() and color the related stack slots as STACK_MISC after we checked all call arguments. Helper functions using ARG_PTR_TO_RAW_STACK must make sure that every path of the helper function will fill the provided buffer area, so that we cannot leak any uninitialized stack memory. This f.e. means that error paths need to memset() the buffers, but the expected fast-path doesn't have to do this anymore. Since there's no such helper needing more than at most one ARG_PTR_TO_RAW_STACK argument, we can keep it simple and don't need to check for multiple areas. Should in future such a use-case really appear, we have check_raw_mode() that will make sure we implement support for it first. Signed-off-by: Daniel Borkmann Acked-by: Alexei Starovoitov Signed-off-by: David S. Miller --- include/linux/bpf.h | 5 ++++ kernel/bpf/verifier.c | 59 +++++++++++++++++++++++++++++++++++++++---- 2 files changed, 59 insertions(+), 5 deletions(-) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index b2365a6eba3d..5fb3c610fa96 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -66,6 +66,11 @@ enum bpf_arg_type { * functions that access data on eBPF program stack */ ARG_PTR_TO_STACK, /* any pointer to eBPF program stack */ + ARG_PTR_TO_RAW_STACK, /* any pointer to eBPF program stack, area does not + * need to be initialized, helper function must fill + * all bytes or clear them in error case. + */ + ARG_CONST_STACK_SIZE, /* number of bytes accessed from stack */ ARG_CONST_STACK_SIZE_OR_ZERO, /* number of bytes accessed from stack or 0 */ diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 202f8f738542..9c843a5417da 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -207,6 +207,9 @@ struct verifier_env { struct bpf_call_arg_meta { struct bpf_map *map_ptr; + bool raw_mode; + int regno; + int access_size; }; /* verbose verifier prints what it's seeing @@ -789,7 +792,8 @@ static int check_xadd(struct verifier_env *env, struct bpf_insn *insn) * and all elements of stack are initialized */ static int check_stack_boundary(struct verifier_env *env, int regno, - int access_size, bool zero_size_allowed) + int access_size, bool zero_size_allowed, + struct bpf_call_arg_meta *meta) { struct verifier_state *state = &env->cur_state; struct reg_state *regs = state->regs; @@ -815,6 +819,12 @@ static int check_stack_boundary(struct verifier_env *env, int regno, return -EACCES; } + if (meta && meta->raw_mode) { + meta->access_size = access_size; + meta->regno = regno; + return 0; + } + for (i = 0; i < access_size; i++) { if (state->stack_slot_type[MAX_BPF_STACK + off + i] != STACK_MISC) { verbose("invalid indirect read from stack off %d+%d size %d\n", @@ -859,7 +869,8 @@ static int check_func_arg(struct verifier_env *env, u32 regno, expected_type = CONST_PTR_TO_MAP; } else if (arg_type == ARG_PTR_TO_CTX) { expected_type = PTR_TO_CTX; - } else if (arg_type == ARG_PTR_TO_STACK) { + } else if (arg_type == ARG_PTR_TO_STACK || + arg_type == ARG_PTR_TO_RAW_STACK) { expected_type = PTR_TO_STACK; /* One exception here. In case function allows for NULL to be * passed in as argument, it's a CONST_IMM type. Final test @@ -867,6 +878,7 @@ static int check_func_arg(struct verifier_env *env, u32 regno, */ if (reg->type == CONST_IMM && reg->imm == 0) expected_type = CONST_IMM; + meta->raw_mode = arg_type == ARG_PTR_TO_RAW_STACK; } else { verbose("unsupported arg_type %d\n", arg_type); return -EFAULT; @@ -896,7 +908,7 @@ static int check_func_arg(struct verifier_env *env, u32 regno, return -EACCES; } err = check_stack_boundary(env, regno, meta->map_ptr->key_size, - false); + false, NULL); } else if (arg_type == ARG_PTR_TO_MAP_VALUE) { /* bpf_map_xxx(..., map_ptr, ..., value) call: * check [value, value + map->value_size) validity @@ -907,7 +919,8 @@ static int check_func_arg(struct verifier_env *env, u32 regno, return -EACCES; } err = check_stack_boundary(env, regno, - meta->map_ptr->value_size, false); + meta->map_ptr->value_size, + false, NULL); } else if (arg_type == ARG_CONST_STACK_SIZE || arg_type == ARG_CONST_STACK_SIZE_OR_ZERO) { bool zero_size_allowed = (arg_type == ARG_CONST_STACK_SIZE_OR_ZERO); @@ -922,7 +935,7 @@ static int check_func_arg(struct verifier_env *env, u32 regno, return -EACCES; } err = check_stack_boundary(env, regno - 1, reg->imm, - zero_size_allowed); + zero_size_allowed, meta); } return err; @@ -953,6 +966,24 @@ static int check_map_func_compatibility(struct bpf_map *map, int func_id) return 0; } +static int check_raw_mode(const struct bpf_func_proto *fn) +{ + int count = 0; + + if (fn->arg1_type == ARG_PTR_TO_RAW_STACK) + count++; + if (fn->arg2_type == ARG_PTR_TO_RAW_STACK) + count++; + if (fn->arg3_type == ARG_PTR_TO_RAW_STACK) + count++; + if (fn->arg4_type == ARG_PTR_TO_RAW_STACK) + count++; + if (fn->arg5_type == ARG_PTR_TO_RAW_STACK) + count++; + + return count > 1 ? -EINVAL : 0; +} + static int check_call(struct verifier_env *env, int func_id) { struct verifier_state *state = &env->cur_state; @@ -984,6 +1015,15 @@ static int check_call(struct verifier_env *env, int func_id) memset(&meta, 0, sizeof(meta)); + /* We only support one arg being in raw mode at the moment, which + * is sufficient for the helper functions we have right now. + */ + err = check_raw_mode(fn); + if (err) { + verbose("kernel subsystem misconfigured func %d\n", func_id); + return err; + } + /* check args */ err = check_func_arg(env, BPF_REG_1, fn->arg1_type, &meta); if (err) @@ -1001,6 +1041,15 @@ static int check_call(struct verifier_env *env, int func_id) if (err) return err; + /* Mark slots with STACK_MISC in case of raw mode, stack offset + * is inferred from register state. + */ + for (i = 0; i < meta.access_size; i++) { + err = check_mem_access(env, meta.regno, i, BPF_B, BPF_WRITE, -1); + if (err) + return err; + } + /* reset caller saved regs */ for (i = 0; i < CALLER_SAVED_REGS; i++) { reg = regs + caller_saved[i]; From 074f528eed408b467516e142fa4c45e5b0d2ba16 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Wed, 13 Apr 2016 00:10:52 +0200 Subject: [PATCH 0690/1649] bpf: convert relevant helper args to ARG_PTR_TO_RAW_STACK This patch converts all helpers that can use ARG_PTR_TO_RAW_STACK as argument type. For tc programs this is bpf_skb_load_bytes(), bpf_skb_get_tunnel_key(), bpf_skb_get_tunnel_opt(). For tracing, this optimizes bpf_get_current_comm() and bpf_probe_read(). The check in bpf_skb_load_bytes() for MAX_BPF_STACK can also be removed since the verifier already makes sure we stay within bounds on stack buffers. Signed-off-by: Daniel Borkmann Acked-by: Alexei Starovoitov Signed-off-by: David S. Miller --- kernel/bpf/helpers.c | 17 +++++++++--- kernel/trace/bpf_trace.c | 10 ++++--- net/core/filter.c | 57 ++++++++++++++++++++++++++++------------ 3 files changed, 60 insertions(+), 24 deletions(-) diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index 50da680c479f..ad7a0573f71b 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -163,17 +163,26 @@ static u64 bpf_get_current_comm(u64 r1, u64 size, u64 r3, u64 r4, u64 r5) struct task_struct *task = current; char *buf = (char *) (long) r1; - if (!task) - return -EINVAL; + if (unlikely(!task)) + goto err_clear; - strlcpy(buf, task->comm, min_t(size_t, size, sizeof(task->comm))); + strncpy(buf, task->comm, size); + + /* Verifier guarantees that size > 0. For task->comm exceeding + * size, guarantee that buf is %NUL-terminated. Unconditionally + * done here to save the size test. + */ + buf[size - 1] = 0; return 0; +err_clear: + memset(buf, 0, size); + return -EINVAL; } const struct bpf_func_proto bpf_get_current_comm_proto = { .func = bpf_get_current_comm, .gpl_only = false, .ret_type = RET_INTEGER, - .arg1_type = ARG_PTR_TO_STACK, + .arg1_type = ARG_PTR_TO_RAW_STACK, .arg2_type = ARG_CONST_STACK_SIZE, }; diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 413ec5614180..685587885374 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -62,17 +62,21 @@ EXPORT_SYMBOL_GPL(trace_call_bpf); static u64 bpf_probe_read(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) { void *dst = (void *) (long) r1; - int size = (int) r2; + int ret, size = (int) r2; void *unsafe_ptr = (void *) (long) r3; - return probe_kernel_read(dst, unsafe_ptr, size); + ret = probe_kernel_read(dst, unsafe_ptr, size); + if (unlikely(ret < 0)) + memset(dst, 0, size); + + return ret; } static const struct bpf_func_proto bpf_probe_read_proto = { .func = bpf_probe_read, .gpl_only = true, .ret_type = RET_INTEGER, - .arg1_type = ARG_PTR_TO_STACK, + .arg1_type = ARG_PTR_TO_RAW_STACK, .arg2_type = ARG_CONST_STACK_SIZE, .arg3_type = ARG_ANYTHING, }; diff --git a/net/core/filter.c b/net/core/filter.c index e8486ba601ea..5d2ac2b9d1c4 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -1409,16 +1409,19 @@ static u64 bpf_skb_load_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) unsigned int len = (unsigned int) r4; void *ptr; - if (unlikely((u32) offset > 0xffff || len > MAX_BPF_STACK)) - return -EFAULT; + if (unlikely((u32) offset > 0xffff)) + goto err_clear; ptr = skb_header_pointer(skb, offset, len, to); if (unlikely(!ptr)) - return -EFAULT; + goto err_clear; if (ptr != to) memcpy(to, ptr, len); return 0; +err_clear: + memset(to, 0, len); + return -EFAULT; } static const struct bpf_func_proto bpf_skb_load_bytes_proto = { @@ -1427,7 +1430,7 @@ static const struct bpf_func_proto bpf_skb_load_bytes_proto = { .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, - .arg3_type = ARG_PTR_TO_STACK, + .arg3_type = ARG_PTR_TO_RAW_STACK, .arg4_type = ARG_CONST_STACK_SIZE, }; @@ -1756,12 +1759,19 @@ static u64 bpf_skb_get_tunnel_key(u64 r1, u64 r2, u64 size, u64 flags, u64 r5) struct bpf_tunnel_key *to = (struct bpf_tunnel_key *) (long) r2; const struct ip_tunnel_info *info = skb_tunnel_info(skb); u8 compat[sizeof(struct bpf_tunnel_key)]; + void *to_orig = to; + int err; - if (unlikely(!info || (flags & ~(BPF_F_TUNINFO_IPV6)))) - return -EINVAL; - if (ip_tunnel_info_af(info) != bpf_tunnel_key_af(flags)) - return -EPROTO; + if (unlikely(!info || (flags & ~(BPF_F_TUNINFO_IPV6)))) { + err = -EINVAL; + goto err_clear; + } + if (ip_tunnel_info_af(info) != bpf_tunnel_key_af(flags)) { + err = -EPROTO; + goto err_clear; + } if (unlikely(size != sizeof(struct bpf_tunnel_key))) { + err = -EINVAL; switch (size) { case offsetof(struct bpf_tunnel_key, tunnel_label): case offsetof(struct bpf_tunnel_key, tunnel_ext): @@ -1771,12 +1781,12 @@ static u64 bpf_skb_get_tunnel_key(u64 r1, u64 r2, u64 size, u64 flags, u64 r5) * a common path later on. */ if (ip_tunnel_info_af(info) != AF_INET) - return -EINVAL; + goto err_clear; set_compat: to = (struct bpf_tunnel_key *)compat; break; default: - return -EINVAL; + goto err_clear; } } @@ -1793,9 +1803,12 @@ set_compat: } if (unlikely(size != sizeof(struct bpf_tunnel_key))) - memcpy((void *)(long) r2, to, size); + memcpy(to_orig, to, size); return 0; +err_clear: + memset(to_orig, 0, size); + return err; } static const struct bpf_func_proto bpf_skb_get_tunnel_key_proto = { @@ -1803,7 +1816,7 @@ static const struct bpf_func_proto bpf_skb_get_tunnel_key_proto = { .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, - .arg2_type = ARG_PTR_TO_STACK, + .arg2_type = ARG_PTR_TO_RAW_STACK, .arg3_type = ARG_CONST_STACK_SIZE, .arg4_type = ARG_ANYTHING, }; @@ -1813,16 +1826,26 @@ static u64 bpf_skb_get_tunnel_opt(u64 r1, u64 r2, u64 size, u64 r4, u64 r5) struct sk_buff *skb = (struct sk_buff *) (long) r1; u8 *to = (u8 *) (long) r2; const struct ip_tunnel_info *info = skb_tunnel_info(skb); + int err; if (unlikely(!info || - !(info->key.tun_flags & TUNNEL_OPTIONS_PRESENT))) - return -ENOENT; - if (unlikely(size < info->options_len)) - return -ENOMEM; + !(info->key.tun_flags & TUNNEL_OPTIONS_PRESENT))) { + err = -ENOENT; + goto err_clear; + } + if (unlikely(size < info->options_len)) { + err = -ENOMEM; + goto err_clear; + } ip_tunnel_info_opts_get(to, info); + if (size > info->options_len) + memset(to + info->options_len, 0, size - info->options_len); return info->options_len; +err_clear: + memset(to, 0, size); + return err; } static const struct bpf_func_proto bpf_skb_get_tunnel_opt_proto = { @@ -1830,7 +1853,7 @@ static const struct bpf_func_proto bpf_skb_get_tunnel_opt_proto = { .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, - .arg2_type = ARG_PTR_TO_STACK, + .arg2_type = ARG_PTR_TO_RAW_STACK, .arg3_type = ARG_CONST_STACK_SIZE, }; From 02413cabd6b67f1444f153ea85d44076deae2056 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Wed, 13 Apr 2016 00:10:53 +0200 Subject: [PATCH 0691/1649] bpf, samples: don't zero data when not needed Remove the zero initialization in the sample programs where appropriate. Note that this is an optimization which is now possible, old programs still doing the zero initialization are just fine as well. Also, make sure we don't have padding issues when we don't memset() the entire struct anymore. Signed-off-by: Daniel Borkmann Acked-by: Alexei Starovoitov Signed-off-by: David S. Miller --- samples/bpf/offwaketime_kern.c | 10 ++++++---- samples/bpf/tracex1_kern.c | 4 +--- samples/bpf/tracex2_kern.c | 4 ++-- samples/bpf/tracex5_kern.c | 6 +++--- 4 files changed, 12 insertions(+), 12 deletions(-) diff --git a/samples/bpf/offwaketime_kern.c b/samples/bpf/offwaketime_kern.c index 983629a31c79..e7d9a0a3d45b 100644 --- a/samples/bpf/offwaketime_kern.c +++ b/samples/bpf/offwaketime_kern.c @@ -11,7 +11,7 @@ #include #include -#define _(P) ({typeof(P) val = 0; bpf_probe_read(&val, sizeof(val), &P); val;}) +#define _(P) ({typeof(P) val; bpf_probe_read(&val, sizeof(val), &P); val;}) #define MINBLOCK_US 1 @@ -61,7 +61,7 @@ SEC("kprobe/try_to_wake_up") int waker(struct pt_regs *ctx) { struct task_struct *p = (void *) PT_REGS_PARM1(ctx); - struct wokeby_t woke = {}; + struct wokeby_t woke; u32 pid; pid = _(p->pid); @@ -75,17 +75,19 @@ int waker(struct pt_regs *ctx) static inline int update_counts(void *ctx, u32 pid, u64 delta) { - struct key_t key = {}; struct wokeby_t *woke; u64 zero = 0, *val; + struct key_t key; + __builtin_memset(&key.waker, 0, sizeof(key.waker)); bpf_get_current_comm(&key.target, sizeof(key.target)); key.tret = bpf_get_stackid(ctx, &stackmap, STACKID_FLAGS); + key.wret = 0; woke = bpf_map_lookup_elem(&wokeby, &pid); if (woke) { key.wret = woke->ret; - __builtin_memcpy(&key.waker, woke->name, TASK_COMM_LEN); + __builtin_memcpy(&key.waker, woke->name, sizeof(key.waker)); bpf_map_delete_elem(&wokeby, &pid); } diff --git a/samples/bpf/tracex1_kern.c b/samples/bpf/tracex1_kern.c index 3f450a8fa1f3..107da148820f 100644 --- a/samples/bpf/tracex1_kern.c +++ b/samples/bpf/tracex1_kern.c @@ -23,16 +23,14 @@ int bpf_prog1(struct pt_regs *ctx) /* attaches to kprobe netif_receive_skb, * looks for packets on loobpack device and prints them */ - char devname[IFNAMSIZ] = {}; + char devname[IFNAMSIZ]; struct net_device *dev; struct sk_buff *skb; int len; /* non-portable! works for the given kernel only */ skb = (struct sk_buff *) PT_REGS_PARM1(ctx); - dev = _(skb->dev); - len = _(skb->len); bpf_probe_read(devname, sizeof(devname), dev->name); diff --git a/samples/bpf/tracex2_kern.c b/samples/bpf/tracex2_kern.c index 6d6eefd0d465..5e11c20ce5ec 100644 --- a/samples/bpf/tracex2_kern.c +++ b/samples/bpf/tracex2_kern.c @@ -66,7 +66,7 @@ struct hist_key { char comm[16]; u64 pid_tgid; u64 uid_gid; - u32 index; + u64 index; }; struct bpf_map_def SEC("maps") my_hist_map = { @@ -82,7 +82,7 @@ int bpf_prog3(struct pt_regs *ctx) long write_size = PT_REGS_PARM3(ctx); long init_val = 1; long *value; - struct hist_key key = {}; + struct hist_key key; key.index = log2l(write_size); key.pid_tgid = bpf_get_current_pid_tgid(); diff --git a/samples/bpf/tracex5_kern.c b/samples/bpf/tracex5_kern.c index b3f4295bf288..f95f232cbab9 100644 --- a/samples/bpf/tracex5_kern.c +++ b/samples/bpf/tracex5_kern.c @@ -22,7 +22,7 @@ struct bpf_map_def SEC("maps") progs = { SEC("kprobe/seccomp_phase1") int bpf_prog1(struct pt_regs *ctx) { - struct seccomp_data sd = {}; + struct seccomp_data sd; bpf_probe_read(&sd, sizeof(sd), (void *)PT_REGS_PARM1(ctx)); @@ -40,7 +40,7 @@ int bpf_prog1(struct pt_regs *ctx) /* we jump here when syscall number == __NR_write */ PROG(__NR_write)(struct pt_regs *ctx) { - struct seccomp_data sd = {}; + struct seccomp_data sd; bpf_probe_read(&sd, sizeof(sd), (void *)PT_REGS_PARM1(ctx)); if (sd.args[2] == 512) { @@ -53,7 +53,7 @@ PROG(__NR_write)(struct pt_regs *ctx) PROG(__NR_read)(struct pt_regs *ctx) { - struct seccomp_data sd = {}; + struct seccomp_data sd; bpf_probe_read(&sd, sizeof(sd), (void *)PT_REGS_PARM1(ctx)); if (sd.args[2] > 128 && sd.args[2] <= 1024) { From 3f2050e20e6f3a762eb08397db651dcec9498998 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Wed, 13 Apr 2016 00:10:54 +0200 Subject: [PATCH 0692/1649] bpf, samples: add test cases for raw stack This adds test cases mostly around ARG_PTR_TO_RAW_STACK to check the verifier behaviour. [...] #84 raw_stack: no skb_load_bytes OK #85 raw_stack: skb_load_bytes, no init OK #86 raw_stack: skb_load_bytes, init OK #87 raw_stack: skb_load_bytes, spilled regs around bounds OK #88 raw_stack: skb_load_bytes, spilled regs corruption OK #89 raw_stack: skb_load_bytes, spilled regs corruption 2 OK #90 raw_stack: skb_load_bytes, spilled regs + data OK #91 raw_stack: skb_load_bytes, invalid access 1 OK #92 raw_stack: skb_load_bytes, invalid access 2 OK #93 raw_stack: skb_load_bytes, invalid access 3 OK #94 raw_stack: skb_load_bytes, invalid access 4 OK #95 raw_stack: skb_load_bytes, invalid access 5 OK #96 raw_stack: skb_load_bytes, invalid access 6 OK #97 raw_stack: skb_load_bytes, large access OK Summary: 98 PASSED, 0 FAILED Signed-off-by: Daniel Borkmann Acked-by: Alexei Starovoitov Signed-off-by: David S. Miller --- samples/bpf/test_verifier.c | 268 ++++++++++++++++++++++++++++++++++++ 1 file changed, 268 insertions(+) diff --git a/samples/bpf/test_verifier.c b/samples/bpf/test_verifier.c index 4b51a9039c0d..9eba8d1d9dcc 100644 --- a/samples/bpf/test_verifier.c +++ b/samples/bpf/test_verifier.c @@ -308,6 +308,19 @@ static struct bpf_test tests[] = { .result = ACCEPT, .result_unpriv = REJECT, }, + { + "check valid spill/fill, skb mark", + .insns = { + BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, + offsetof(struct __sk_buff, mark)), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .result_unpriv = ACCEPT, + }, { "check corrupted spill/fill", .insns = { @@ -1180,6 +1193,261 @@ static struct bpf_test tests[] = { .result_unpriv = REJECT, .result = ACCEPT, }, + { + "raw_stack: no skb_load_bytes", + .insns = { + BPF_MOV64_IMM(BPF_REG_2, 4), + BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), + BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), + BPF_MOV64_IMM(BPF_REG_4, 8), + /* Call to skb_load_bytes() omitted. */ + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "invalid read from stack off -8+0 size 8", + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + }, + { + "raw_stack: skb_load_bytes, no init", + .insns = { + BPF_MOV64_IMM(BPF_REG_2, 4), + BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), + BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), + BPF_MOV64_IMM(BPF_REG_4, 8), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + }, + { + "raw_stack: skb_load_bytes, init", + .insns = { + BPF_MOV64_IMM(BPF_REG_2, 4), + BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xcafe), + BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), + BPF_MOV64_IMM(BPF_REG_4, 8), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + }, + { + "raw_stack: skb_load_bytes, spilled regs around bounds", + .insns = { + BPF_MOV64_IMM(BPF_REG_2, 4), + BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16), + BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8), /* spill ctx from R1 */ + BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8), /* spill ctx from R1 */ + BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), + BPF_MOV64_IMM(BPF_REG_4, 8), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8), /* fill ctx into R0 */ + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8), /* fill ctx into R2 */ + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, + offsetof(struct __sk_buff, mark)), + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2, + offsetof(struct __sk_buff, priority)), + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + }, + { + "raw_stack: skb_load_bytes, spilled regs corruption", + .insns = { + BPF_MOV64_IMM(BPF_REG_2, 4), + BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), + BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), /* spill ctx from R1 */ + BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), + BPF_MOV64_IMM(BPF_REG_4, 8), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), /* fill ctx into R0 */ + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, + offsetof(struct __sk_buff, mark)), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "R0 invalid mem access 'inv'", + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + }, + { + "raw_stack: skb_load_bytes, spilled regs corruption 2", + .insns = { + BPF_MOV64_IMM(BPF_REG_2, 4), + BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16), + BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8), /* spill ctx from R1 */ + BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), /* spill ctx from R1 */ + BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8), /* spill ctx from R1 */ + BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), + BPF_MOV64_IMM(BPF_REG_4, 8), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8), /* fill ctx into R0 */ + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8), /* fill ctx into R2 */ + BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0), /* fill ctx into R3 */ + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, + offsetof(struct __sk_buff, mark)), + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2, + offsetof(struct __sk_buff, priority)), + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_3, + offsetof(struct __sk_buff, pkt_type)), + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "R3 invalid mem access 'inv'", + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + }, + { + "raw_stack: skb_load_bytes, spilled regs + data", + .insns = { + BPF_MOV64_IMM(BPF_REG_2, 4), + BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16), + BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8), /* spill ctx from R1 */ + BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), /* spill ctx from R1 */ + BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8), /* spill ctx from R1 */ + BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), + BPF_MOV64_IMM(BPF_REG_4, 8), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8), /* fill ctx into R0 */ + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8), /* fill ctx into R2 */ + BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0), /* fill data into R3 */ + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, + offsetof(struct __sk_buff, mark)), + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2, + offsetof(struct __sk_buff, priority)), + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2), + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + }, + { + "raw_stack: skb_load_bytes, invalid access 1", + .insns = { + BPF_MOV64_IMM(BPF_REG_2, 4), + BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -513), + BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), + BPF_MOV64_IMM(BPF_REG_4, 8), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "invalid stack type R3 off=-513 access_size=8", + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + }, + { + "raw_stack: skb_load_bytes, invalid access 2", + .insns = { + BPF_MOV64_IMM(BPF_REG_2, 4), + BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1), + BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), + BPF_MOV64_IMM(BPF_REG_4, 8), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "invalid stack type R3 off=-1 access_size=8", + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + }, + { + "raw_stack: skb_load_bytes, invalid access 3", + .insns = { + BPF_MOV64_IMM(BPF_REG_2, 4), + BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 0xffffffff), + BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), + BPF_MOV64_IMM(BPF_REG_4, 0xffffffff), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "invalid stack type R3 off=-1 access_size=-1", + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + }, + { + "raw_stack: skb_load_bytes, invalid access 4", + .insns = { + BPF_MOV64_IMM(BPF_REG_2, 4), + BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1), + BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), + BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "invalid stack type R3 off=-1 access_size=2147483647", + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + }, + { + "raw_stack: skb_load_bytes, invalid access 5", + .insns = { + BPF_MOV64_IMM(BPF_REG_2, 4), + BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512), + BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), + BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "invalid stack type R3 off=-512 access_size=2147483647", + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + }, + { + "raw_stack: skb_load_bytes, invalid access 6", + .insns = { + BPF_MOV64_IMM(BPF_REG_2, 4), + BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512), + BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), + BPF_MOV64_IMM(BPF_REG_4, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "invalid stack type R3 off=-512 access_size=0", + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + }, + { + "raw_stack: skb_load_bytes, large access", + .insns = { + BPF_MOV64_IMM(BPF_REG_2, 4), + BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512), + BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), + BPF_MOV64_IMM(BPF_REG_4, 512), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + }, }; static int probe_filter_length(struct bpf_insn *fp) From 608b9977260f67d8b032ea170666a6174a48e2f1 Mon Sep 17 00:00:00 2001 From: Paolo Abeni Date: Wed, 13 Apr 2016 10:52:20 +0200 Subject: [PATCH 0693/1649] tun: use per cpu variables for stats accounting Currently the tun device accounting uses dev->stats without applying any kind of protection, regardless that accounting happens in preemptible process context. This patch move the tun stats to a per cpu data structure, and protect the updates with u64_stats_update_begin()/u64_stats_update_end() or this_cpu_inc according to the stat type. The per cpu stats are aggregated by the newly added ndo_get_stats64 ops. Signed-off-by: Paolo Abeni Signed-off-by: David S. Miller --- drivers/net/tun.c | 95 +++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 83 insertions(+), 12 deletions(-) diff --git a/drivers/net/tun.c b/drivers/net/tun.c index a74661690a11..faf9297db2cf 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -131,6 +131,17 @@ struct tap_filter { #define TUN_FLOW_EXPIRE (3 * HZ) +struct tun_pcpu_stats { + u64 rx_packets; + u64 rx_bytes; + u64 tx_packets; + u64 tx_bytes; + struct u64_stats_sync syncp; + u32 rx_dropped; + u32 tx_dropped; + u32 rx_frame_errors; +}; + /* A tun_file connects an open character device to a tuntap netdevice. It * also contains all socket related structures (except sock_fprog and tap_filter) * to serve as one transmit queue for tuntap device. The sock_fprog and @@ -205,6 +216,7 @@ struct tun_struct { struct list_head disabled; void *security; u32 flow_count; + struct tun_pcpu_stats __percpu *pcpu_stats; }; #ifdef CONFIG_TUN_VNET_CROSS_LE @@ -886,7 +898,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_OK; drop: - dev->stats.tx_dropped++; + this_cpu_inc(tun->pcpu_stats->tx_dropped); skb_tx_error(skb); kfree_skb(skb); rcu_read_unlock(); @@ -949,6 +961,43 @@ static void tun_set_headroom(struct net_device *dev, int new_hr) tun->align = new_hr; } +static struct rtnl_link_stats64 * +tun_net_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) +{ + u32 rx_dropped = 0, tx_dropped = 0, rx_frame_errors = 0; + struct tun_struct *tun = netdev_priv(dev); + struct tun_pcpu_stats *p; + int i; + + for_each_possible_cpu(i) { + u64 rxpackets, rxbytes, txpackets, txbytes; + unsigned int start; + + p = per_cpu_ptr(tun->pcpu_stats, i); + do { + start = u64_stats_fetch_begin(&p->syncp); + rxpackets = p->rx_packets; + rxbytes = p->rx_bytes; + txpackets = p->tx_packets; + txbytes = p->tx_bytes; + } while (u64_stats_fetch_retry(&p->syncp, start)); + + stats->rx_packets += rxpackets; + stats->rx_bytes += rxbytes; + stats->tx_packets += txpackets; + stats->tx_bytes += txbytes; + + /* u32 counters */ + rx_dropped += p->rx_dropped; + rx_frame_errors += p->rx_frame_errors; + tx_dropped += p->tx_dropped; + } + stats->rx_dropped = rx_dropped; + stats->rx_frame_errors = rx_frame_errors; + stats->tx_dropped = tx_dropped; + return stats; +} + static const struct net_device_ops tun_netdev_ops = { .ndo_uninit = tun_net_uninit, .ndo_open = tun_net_open, @@ -961,6 +1010,7 @@ static const struct net_device_ops tun_netdev_ops = { .ndo_poll_controller = tun_poll_controller, #endif .ndo_set_rx_headroom = tun_set_headroom, + .ndo_get_stats64 = tun_net_get_stats64, }; static const struct net_device_ops tap_netdev_ops = { @@ -979,6 +1029,7 @@ static const struct net_device_ops tap_netdev_ops = { #endif .ndo_features_check = passthru_features_check, .ndo_set_rx_headroom = tun_set_headroom, + .ndo_get_stats64 = tun_net_get_stats64, }; static void tun_flow_init(struct tun_struct *tun) @@ -1103,6 +1154,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, size_t total_len = iov_iter_count(from); size_t len = total_len, align = tun->align, linear; struct virtio_net_hdr gso = { 0 }; + struct tun_pcpu_stats *stats; int good_linear; int copylen; bool zerocopy = false; @@ -1177,7 +1229,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, skb = tun_alloc_skb(tfile, align, copylen, linear, noblock); if (IS_ERR(skb)) { if (PTR_ERR(skb) != -EAGAIN) - tun->dev->stats.rx_dropped++; + this_cpu_inc(tun->pcpu_stats->rx_dropped); return PTR_ERR(skb); } @@ -1192,7 +1244,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, } if (err) { - tun->dev->stats.rx_dropped++; + this_cpu_inc(tun->pcpu_stats->rx_dropped); kfree_skb(skb); return -EFAULT; } @@ -1200,7 +1252,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, if (gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { if (!skb_partial_csum_set(skb, tun16_to_cpu(tun, gso.csum_start), tun16_to_cpu(tun, gso.csum_offset))) { - tun->dev->stats.rx_frame_errors++; + this_cpu_inc(tun->pcpu_stats->rx_frame_errors); kfree_skb(skb); return -EINVAL; } @@ -1217,7 +1269,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, pi.proto = htons(ETH_P_IPV6); break; default: - tun->dev->stats.rx_dropped++; + this_cpu_inc(tun->pcpu_stats->rx_dropped); kfree_skb(skb); return -EINVAL; } @@ -1245,7 +1297,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, skb_shinfo(skb)->gso_type = SKB_GSO_UDP; break; default: - tun->dev->stats.rx_frame_errors++; + this_cpu_inc(tun->pcpu_stats->rx_frame_errors); kfree_skb(skb); return -EINVAL; } @@ -1255,7 +1307,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, skb_shinfo(skb)->gso_size = tun16_to_cpu(tun, gso.gso_size); if (skb_shinfo(skb)->gso_size == 0) { - tun->dev->stats.rx_frame_errors++; + this_cpu_inc(tun->pcpu_stats->rx_frame_errors); kfree_skb(skb); return -EINVAL; } @@ -1278,8 +1330,12 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, rxhash = skb_get_hash(skb); netif_rx_ni(skb); - tun->dev->stats.rx_packets++; - tun->dev->stats.rx_bytes += len; + stats = get_cpu_ptr(tun->pcpu_stats); + u64_stats_update_begin(&stats->syncp); + stats->rx_packets++; + stats->rx_bytes += len; + u64_stats_update_end(&stats->syncp); + put_cpu_ptr(stats); tun_flow_update(tun, rxhash, tfile); return total_len; @@ -1308,6 +1364,7 @@ static ssize_t tun_put_user(struct tun_struct *tun, struct iov_iter *iter) { struct tun_pi pi = { 0, skb->protocol }; + struct tun_pcpu_stats *stats; ssize_t total; int vlan_offset = 0; int vlan_hlen = 0; @@ -1408,8 +1465,13 @@ static ssize_t tun_put_user(struct tun_struct *tun, skb_copy_datagram_iter(skb, vlan_offset, iter, skb->len - vlan_offset); done: - tun->dev->stats.tx_packets++; - tun->dev->stats.tx_bytes += skb->len + vlan_hlen; + /* caller is in process context, */ + stats = get_cpu_ptr(tun->pcpu_stats); + u64_stats_update_begin(&stats->syncp); + stats->tx_packets++; + stats->tx_bytes += skb->len + vlan_hlen; + u64_stats_update_end(&stats->syncp); + put_cpu_ptr(tun->pcpu_stats); return total; } @@ -1467,6 +1529,7 @@ static void tun_free_netdev(struct net_device *dev) struct tun_struct *tun = netdev_priv(dev); BUG_ON(!(list_empty(&tun->disabled))); + free_percpu(tun->pcpu_stats); tun_flow_uninit(tun); security_tun_dev_free_security(tun->security); free_netdev(dev); @@ -1715,11 +1778,17 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) tun->filter_attached = false; tun->sndbuf = tfile->socket.sk->sk_sndbuf; + tun->pcpu_stats = netdev_alloc_pcpu_stats(struct tun_pcpu_stats); + if (!tun->pcpu_stats) { + err = -ENOMEM; + goto err_free_dev; + } + spin_lock_init(&tun->lock); err = security_tun_dev_alloc_security(&tun->security); if (err < 0) - goto err_free_dev; + goto err_free_stat; tun_net_init(dev); tun_flow_init(tun); @@ -1763,6 +1832,8 @@ err_detach: err_free_flow: tun_flow_uninit(tun); security_tun_dev_free_security(tun->security); +err_free_stat: + free_percpu(tun->pcpu_stats); err_free_dev: free_netdev(dev); return err; From de33efd0fb7f923cd41671b1f743c3a0d44dd953 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Fri, 15 Apr 2016 09:17:08 +0200 Subject: [PATCH 0694/1649] devlink: fix sb register stub in case devlink is disabled Reported-by: kbuild test robot Fixes: bf7974710a40 ("devlink: add shared buffer configuration") Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- include/net/devlink.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/include/net/devlink.h b/include/net/devlink.h index be64218e0254..1d45b61cb320 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -184,7 +184,9 @@ static inline void devlink_port_split_set(struct devlink_port *devlink_port, static inline int devlink_sb_register(struct devlink *devlink, unsigned int sb_index, u32 size, u16 ingress_pools_count, - u16 egress_pools_count, u16 tc_count) + u16 egress_pools_count, + u16 ingress_tc_count, + u16 egress_tc_count) { return 0; } From ce78f020429c649b206e2fd7d44411fbc1ec920e Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Fri, 15 Apr 2016 15:09:37 +0200 Subject: [PATCH 0695/1649] mlxsw: spectrum_buffers: Use designated initializers for mlxsw_sp_pbs Suggested-by: David Laight Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/spectrum_buffers.c | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c index f2e073af5dd2..64166dd67e6f 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c @@ -162,16 +162,8 @@ static int mlxsw_sp_sb_pm_occ_query(struct mlxsw_sp *mlxsw_sp, u8 local_port, } static const u16 mlxsw_sp_pbs[] = { - 2 * MLXSW_SP_BYTES_TO_CELLS(ETH_FRAME_LEN), - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, /* Unused */ - 2 * MLXSW_SP_BYTES_TO_CELLS(MLXSW_PORT_MAX_MTU), + [0] = 2 * MLXSW_SP_BYTES_TO_CELLS(ETH_FRAME_LEN), + [9] = 2 * MLXSW_SP_BYTES_TO_CELLS(MLXSW_PORT_MAX_MTU), }; #define MLXSW_SP_PBS_LEN ARRAY_SIZE(mlxsw_sp_pbs) From b94cdabbf174a197020632748339392b494494e5 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Fri, 15 Apr 2016 15:09:38 +0200 Subject: [PATCH 0696/1649] mlxsw: spectrum_buffers: Use MLXSW_SP_PB_UNUSED define for unused pb Suggested-by: David Laight Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c index 64166dd67e6f..a3720a0fad7d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c @@ -167,6 +167,7 @@ static const u16 mlxsw_sp_pbs[] = { }; #define MLXSW_SP_PBS_LEN ARRAY_SIZE(mlxsw_sp_pbs) +#define MLXSW_SP_PB_UNUSED 8 static int mlxsw_sp_port_pb_init(struct mlxsw_sp_port *mlxsw_sp_port) { @@ -176,7 +177,7 @@ static int mlxsw_sp_port_pb_init(struct mlxsw_sp_port *mlxsw_sp_port) mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0xffff, 0xffff / 2); for (i = 0; i < MLXSW_SP_PBS_LEN; i++) { - if (i == 8) + if (i == MLXSW_SP_PB_UNUSED) continue; mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, i, mlxsw_sp_pbs[i]); } From 0a233c98a8e933a65aec07663d3a2953c7642bbc Mon Sep 17 00:00:00 2001 From: Andreas Fenkart Date: Thu, 10 Mar 2016 09:44:05 +0100 Subject: [PATCH 0697/1649] mwifiex: scan: simplify dereference of bss_desc fields given this structure: struct foo { struct bar { int baz; } } these accesses are equivalent: (*(foo->bar)).baz foo->bar->baz Signed-off-by: Andreas Fenkart Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/scan.c | 98 ++++++++++----------- 1 file changed, 46 insertions(+), 52 deletions(-) diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c index 624b0a95c64e..99026cef3179 100644 --- a/drivers/net/wireless/marvell/mwifiex/scan.c +++ b/drivers/net/wireless/marvell/mwifiex/scan.c @@ -121,8 +121,8 @@ mwifiex_is_rsn_oui_present(struct mwifiex_bssdescriptor *bss_desc, u32 cipher) struct ie_body *iebody; u8 ret = MWIFIEX_OUI_NOT_PRESENT; - if (((bss_desc->bcn_rsn_ie) && ((*(bss_desc->bcn_rsn_ie)). - ieee_hdr.element_id == WLAN_EID_RSN))) { + if (bss_desc->bcn_rsn_ie && + bss_desc->bcn_rsn_ie->ieee_hdr.element_id == WLAN_EID_RSN) { iebody = (struct ie_body *) (((u8 *) bss_desc->bcn_rsn_ie->data) + RSN_GTK_OUI_OFFSET); @@ -148,9 +148,9 @@ mwifiex_is_wpa_oui_present(struct mwifiex_bssdescriptor *bss_desc, u32 cipher) struct ie_body *iebody; u8 ret = MWIFIEX_OUI_NOT_PRESENT; - if (((bss_desc->bcn_wpa_ie) && - ((*(bss_desc->bcn_wpa_ie)).vend_hdr.element_id == - WLAN_EID_VENDOR_SPECIFIC))) { + if (bss_desc->bcn_wpa_ie && + bss_desc->bcn_wpa_ie->vend_hdr.element_id == + WLAN_EID_VENDOR_SPECIFIC) { iebody = (struct ie_body *) bss_desc->bcn_wpa_ie->data; oui = &mwifiex_wpa_oui[cipher][0]; ret = mwifiex_search_oui_in_ie(iebody, oui); @@ -181,8 +181,8 @@ mwifiex_is_bss_wapi(struct mwifiex_private *priv, { if (priv->sec_info.wapi_enabled && (bss_desc->bcn_wapi_ie && - ((*(bss_desc->bcn_wapi_ie)).ieee_hdr.element_id == - WLAN_EID_BSS_AC_ACCESS_DELAY))) { + bss_desc->bcn_wapi_ie->ieee_hdr.element_id == + WLAN_EID_BSS_AC_ACCESS_DELAY)) { return true; } return false; @@ -197,12 +197,12 @@ mwifiex_is_bss_no_sec(struct mwifiex_private *priv, struct mwifiex_bssdescriptor *bss_desc) { if (!priv->sec_info.wep_enabled && !priv->sec_info.wpa_enabled && - !priv->sec_info.wpa2_enabled && ((!bss_desc->bcn_wpa_ie) || - ((*(bss_desc->bcn_wpa_ie)).vend_hdr.element_id != - WLAN_EID_VENDOR_SPECIFIC)) && - ((!bss_desc->bcn_rsn_ie) || - ((*(bss_desc->bcn_rsn_ie)).ieee_hdr.element_id != - WLAN_EID_RSN)) && + !priv->sec_info.wpa2_enabled && + (!bss_desc->bcn_wpa_ie || + bss_desc->bcn_wpa_ie->vend_hdr.element_id != + WLAN_EID_VENDOR_SPECIFIC) && + (!bss_desc->bcn_rsn_ie || + bss_desc->bcn_rsn_ie->ieee_hdr.element_id != WLAN_EID_RSN) && !priv->sec_info.encryption_mode && !bss_desc->privacy) { return true; } @@ -233,9 +233,10 @@ mwifiex_is_bss_wpa(struct mwifiex_private *priv, struct mwifiex_bssdescriptor *bss_desc) { if (!priv->sec_info.wep_enabled && priv->sec_info.wpa_enabled && - !priv->sec_info.wpa2_enabled && ((bss_desc->bcn_wpa_ie) && - ((*(bss_desc->bcn_wpa_ie)). - vend_hdr.element_id == WLAN_EID_VENDOR_SPECIFIC)) + !priv->sec_info.wpa2_enabled && + (bss_desc->bcn_wpa_ie && + bss_desc->bcn_wpa_ie->vend_hdr.element_id == + WLAN_EID_VENDOR_SPECIFIC) /* * Privacy bit may NOT be set in some APs like * LinkSys WRT54G && bss_desc->privacy @@ -245,12 +246,10 @@ mwifiex_is_bss_wpa(struct mwifiex_private *priv, "info: %s: WPA:\t" "wpa_ie=%#x wpa2_ie=%#x WEP=%s WPA=%s WPA2=%s\t" "EncMode=%#x privacy=%#x\n", __func__, - (bss_desc->bcn_wpa_ie) ? - (*bss_desc->bcn_wpa_ie). - vend_hdr.element_id : 0, - (bss_desc->bcn_rsn_ie) ? - (*bss_desc->bcn_rsn_ie). - ieee_hdr.element_id : 0, + bss_desc->bcn_wpa_ie ? + bss_desc->bcn_wpa_ie->vend_hdr.element_id : 0, + bss_desc->bcn_rsn_ie ? + bss_desc->bcn_rsn_ie->ieee_hdr.element_id : 0, (priv->sec_info.wep_enabled) ? "e" : "d", (priv->sec_info.wpa_enabled) ? "e" : "d", (priv->sec_info.wpa2_enabled) ? "e" : "d", @@ -269,11 +268,10 @@ static bool mwifiex_is_bss_wpa2(struct mwifiex_private *priv, struct mwifiex_bssdescriptor *bss_desc) { - if (!priv->sec_info.wep_enabled && - !priv->sec_info.wpa_enabled && + if (!priv->sec_info.wep_enabled && !priv->sec_info.wpa_enabled && priv->sec_info.wpa2_enabled && - ((bss_desc->bcn_rsn_ie) && - ((*(bss_desc->bcn_rsn_ie)).ieee_hdr.element_id == WLAN_EID_RSN))) { + (bss_desc->bcn_rsn_ie && + bss_desc->bcn_rsn_ie->ieee_hdr.element_id == WLAN_EID_RSN)) { /* * Privacy bit may NOT be set in some APs like * LinkSys WRT54G && bss_desc->privacy @@ -282,12 +280,10 @@ mwifiex_is_bss_wpa2(struct mwifiex_private *priv, "info: %s: WPA2:\t" "wpa_ie=%#x wpa2_ie=%#x WEP=%s WPA=%s WPA2=%s\t" "EncMode=%#x privacy=%#x\n", __func__, - (bss_desc->bcn_wpa_ie) ? - (*bss_desc->bcn_wpa_ie). - vend_hdr.element_id : 0, - (bss_desc->bcn_rsn_ie) ? - (*bss_desc->bcn_rsn_ie). - ieee_hdr.element_id : 0, + bss_desc->bcn_wpa_ie ? + bss_desc->bcn_wpa_ie->vend_hdr.element_id : 0, + bss_desc->bcn_rsn_ie ? + bss_desc->bcn_rsn_ie->ieee_hdr.element_id : 0, (priv->sec_info.wep_enabled) ? "e" : "d", (priv->sec_info.wpa_enabled) ? "e" : "d", (priv->sec_info.wpa2_enabled) ? "e" : "d", @@ -308,11 +304,11 @@ mwifiex_is_bss_adhoc_aes(struct mwifiex_private *priv, { if (!priv->sec_info.wep_enabled && !priv->sec_info.wpa_enabled && !priv->sec_info.wpa2_enabled && - ((!bss_desc->bcn_wpa_ie) || - ((*(bss_desc->bcn_wpa_ie)). - vend_hdr.element_id != WLAN_EID_VENDOR_SPECIFIC)) && - ((!bss_desc->bcn_rsn_ie) || - ((*(bss_desc->bcn_rsn_ie)).ieee_hdr.element_id != WLAN_EID_RSN)) && + (!bss_desc->bcn_wpa_ie || + bss_desc->bcn_wpa_ie->vend_hdr.element_id != + WLAN_EID_VENDOR_SPECIFIC) && + (!bss_desc->bcn_rsn_ie || + bss_desc->bcn_rsn_ie->ieee_hdr.element_id != WLAN_EID_RSN) && !priv->sec_info.encryption_mode && bss_desc->privacy) { return true; } @@ -329,23 +325,21 @@ mwifiex_is_bss_dynamic_wep(struct mwifiex_private *priv, { if (!priv->sec_info.wep_enabled && !priv->sec_info.wpa_enabled && !priv->sec_info.wpa2_enabled && - ((!bss_desc->bcn_wpa_ie) || - ((*(bss_desc->bcn_wpa_ie)). - vend_hdr.element_id != WLAN_EID_VENDOR_SPECIFIC)) && - ((!bss_desc->bcn_rsn_ie) || - ((*(bss_desc->bcn_rsn_ie)).ieee_hdr.element_id != WLAN_EID_RSN)) && + (!bss_desc->bcn_wpa_ie || + bss_desc->bcn_wpa_ie->vend_hdr.element_id != + WLAN_EID_VENDOR_SPECIFIC) && + (!bss_desc->bcn_rsn_ie || + bss_desc->bcn_rsn_ie->ieee_hdr.element_id != WLAN_EID_RSN) && priv->sec_info.encryption_mode && bss_desc->privacy) { mwifiex_dbg(priv->adapter, INFO, "info: %s: dynamic\t" "WEP: wpa_ie=%#x wpa2_ie=%#x\t" "EncMode=%#x privacy=%#x\n", __func__, - (bss_desc->bcn_wpa_ie) ? - (*bss_desc->bcn_wpa_ie). - vend_hdr.element_id : 0, - (bss_desc->bcn_rsn_ie) ? - (*bss_desc->bcn_rsn_ie). - ieee_hdr.element_id : 0, + bss_desc->bcn_wpa_ie ? + bss_desc->bcn_wpa_ie->vend_hdr.element_id : 0, + bss_desc->bcn_rsn_ie ? + bss_desc->bcn_rsn_ie->ieee_hdr.element_id : 0, priv->sec_info.encryption_mode, bss_desc->privacy); return true; @@ -464,10 +458,10 @@ mwifiex_is_network_compatible(struct mwifiex_private *priv, "info: %s: failed: wpa_ie=%#x wpa2_ie=%#x WEP=%s\t" "WPA=%s WPA2=%s EncMode=%#x privacy=%#x\n", __func__, - (bss_desc->bcn_wpa_ie) ? - (*bss_desc->bcn_wpa_ie).vend_hdr.element_id : 0, - (bss_desc->bcn_rsn_ie) ? - (*bss_desc->bcn_rsn_ie).ieee_hdr.element_id : 0, + bss_desc->bcn_wpa_ie ? + bss_desc->bcn_wpa_ie->vend_hdr.element_id : 0, + bss_desc->bcn_rsn_ie ? + bss_desc->bcn_rsn_ie->ieee_hdr.element_id : 0, (priv->sec_info.wep_enabled) ? "e" : "d", (priv->sec_info.wpa_enabled) ? "e" : "d", (priv->sec_info.wpa2_enabled) ? "e" : "d", From 38329568c3f7075c7751984a65e16a9c7fc186bd Mon Sep 17 00:00:00 2001 From: Andreas Fenkart Date: Thu, 10 Mar 2016 09:44:06 +0100 Subject: [PATCH 0698/1649] mwifiex: scan: factor out has_ieee_hdr/has_vendor_hdr Signed-off-by: Andreas Fenkart Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/scan.c | 52 +++++++++------------ 1 file changed, 23 insertions(+), 29 deletions(-) diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c index 99026cef3179..e926d3ab7286 100644 --- a/drivers/net/wireless/marvell/mwifiex/scan.c +++ b/drivers/net/wireless/marvell/mwifiex/scan.c @@ -76,6 +76,18 @@ static u8 mwifiex_rsn_oui[CIPHER_SUITE_MAX][4] = { { 0x00, 0x0f, 0xac, 0x04 }, /* AES */ }; +static bool +has_ieee_hdr(struct ieee_types_generic *ie, u8 key) +{ + return (ie && ie->ieee_hdr.element_id == key); +} + +static bool +has_vendor_hdr(struct ieee_types_vendor_specific *ie, u8 key) +{ + return (ie && ie->vend_hdr.element_id == key); +} + /* * This function parses a given IE for a given OUI. * @@ -121,8 +133,7 @@ mwifiex_is_rsn_oui_present(struct mwifiex_bssdescriptor *bss_desc, u32 cipher) struct ie_body *iebody; u8 ret = MWIFIEX_OUI_NOT_PRESENT; - if (bss_desc->bcn_rsn_ie && - bss_desc->bcn_rsn_ie->ieee_hdr.element_id == WLAN_EID_RSN) { + if (has_ieee_hdr(bss_desc->bcn_rsn_ie, WLAN_EID_RSN)) { iebody = (struct ie_body *) (((u8 *) bss_desc->bcn_rsn_ie->data) + RSN_GTK_OUI_OFFSET); @@ -148,9 +159,7 @@ mwifiex_is_wpa_oui_present(struct mwifiex_bssdescriptor *bss_desc, u32 cipher) struct ie_body *iebody; u8 ret = MWIFIEX_OUI_NOT_PRESENT; - if (bss_desc->bcn_wpa_ie && - bss_desc->bcn_wpa_ie->vend_hdr.element_id == - WLAN_EID_VENDOR_SPECIFIC) { + if (has_vendor_hdr(bss_desc->bcn_wpa_ie, WLAN_EID_VENDOR_SPECIFIC)) { iebody = (struct ie_body *) bss_desc->bcn_wpa_ie->data; oui = &mwifiex_wpa_oui[cipher][0]; ret = mwifiex_search_oui_in_ie(iebody, oui); @@ -180,11 +189,8 @@ mwifiex_is_bss_wapi(struct mwifiex_private *priv, struct mwifiex_bssdescriptor *bss_desc) { if (priv->sec_info.wapi_enabled && - (bss_desc->bcn_wapi_ie && - bss_desc->bcn_wapi_ie->ieee_hdr.element_id == - WLAN_EID_BSS_AC_ACCESS_DELAY)) { + has_ieee_hdr(bss_desc->bcn_wapi_ie, WLAN_EID_BSS_AC_ACCESS_DELAY)) return true; - } return false; } @@ -198,11 +204,8 @@ mwifiex_is_bss_no_sec(struct mwifiex_private *priv, { if (!priv->sec_info.wep_enabled && !priv->sec_info.wpa_enabled && !priv->sec_info.wpa2_enabled && - (!bss_desc->bcn_wpa_ie || - bss_desc->bcn_wpa_ie->vend_hdr.element_id != - WLAN_EID_VENDOR_SPECIFIC) && - (!bss_desc->bcn_rsn_ie || - bss_desc->bcn_rsn_ie->ieee_hdr.element_id != WLAN_EID_RSN) && + !has_vendor_hdr(bss_desc->bcn_wpa_ie, WLAN_EID_VENDOR_SPECIFIC) && + !has_ieee_hdr(bss_desc->bcn_rsn_ie, WLAN_EID_RSN) && !priv->sec_info.encryption_mode && !bss_desc->privacy) { return true; } @@ -234,9 +237,7 @@ mwifiex_is_bss_wpa(struct mwifiex_private *priv, { if (!priv->sec_info.wep_enabled && priv->sec_info.wpa_enabled && !priv->sec_info.wpa2_enabled && - (bss_desc->bcn_wpa_ie && - bss_desc->bcn_wpa_ie->vend_hdr.element_id == - WLAN_EID_VENDOR_SPECIFIC) + has_vendor_hdr(bss_desc->bcn_wpa_ie, WLAN_EID_VENDOR_SPECIFIC) /* * Privacy bit may NOT be set in some APs like * LinkSys WRT54G && bss_desc->privacy @@ -270,8 +271,7 @@ mwifiex_is_bss_wpa2(struct mwifiex_private *priv, { if (!priv->sec_info.wep_enabled && !priv->sec_info.wpa_enabled && priv->sec_info.wpa2_enabled && - (bss_desc->bcn_rsn_ie && - bss_desc->bcn_rsn_ie->ieee_hdr.element_id == WLAN_EID_RSN)) { + has_ieee_hdr(bss_desc->bcn_rsn_ie, WLAN_EID_RSN)) { /* * Privacy bit may NOT be set in some APs like * LinkSys WRT54G && bss_desc->privacy @@ -304,11 +304,8 @@ mwifiex_is_bss_adhoc_aes(struct mwifiex_private *priv, { if (!priv->sec_info.wep_enabled && !priv->sec_info.wpa_enabled && !priv->sec_info.wpa2_enabled && - (!bss_desc->bcn_wpa_ie || - bss_desc->bcn_wpa_ie->vend_hdr.element_id != - WLAN_EID_VENDOR_SPECIFIC) && - (!bss_desc->bcn_rsn_ie || - bss_desc->bcn_rsn_ie->ieee_hdr.element_id != WLAN_EID_RSN) && + !has_vendor_hdr(bss_desc->bcn_wpa_ie, WLAN_EID_VENDOR_SPECIFIC) && + !has_ieee_hdr(bss_desc->bcn_rsn_ie, WLAN_EID_RSN) && !priv->sec_info.encryption_mode && bss_desc->privacy) { return true; } @@ -325,11 +322,8 @@ mwifiex_is_bss_dynamic_wep(struct mwifiex_private *priv, { if (!priv->sec_info.wep_enabled && !priv->sec_info.wpa_enabled && !priv->sec_info.wpa2_enabled && - (!bss_desc->bcn_wpa_ie || - bss_desc->bcn_wpa_ie->vend_hdr.element_id != - WLAN_EID_VENDOR_SPECIFIC) && - (!bss_desc->bcn_rsn_ie || - bss_desc->bcn_rsn_ie->ieee_hdr.element_id != WLAN_EID_RSN) && + !has_vendor_hdr(bss_desc->bcn_wpa_ie, WLAN_EID_VENDOR_SPECIFIC) && + !has_ieee_hdr(bss_desc->bcn_rsn_ie, WLAN_EID_RSN) && priv->sec_info.encryption_mode && bss_desc->privacy) { mwifiex_dbg(priv->adapter, INFO, "info: %s: dynamic\t" From 2ccf7cef0cf984241d0b6932ed1cfb1e0d6587d5 Mon Sep 17 00:00:00 2001 From: Andreas Fenkart Date: Thu, 10 Mar 2016 09:44:07 +0100 Subject: [PATCH 0699/1649] mwifiex: scan: simplify ternary operators using gnu extension "x ? x : y" can be simplified as "x ? : y" https://gcc.gnu.org/onlinedocs/gcc/Conditionals.html#Conditionals Reviewed-by: Julian Calaby Signed-off-by: Andreas Fenkart Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/scan.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c index e926d3ab7286..11f8a608b8af 100644 --- a/drivers/net/wireless/marvell/mwifiex/scan.c +++ b/drivers/net/wireless/marvell/mwifiex/scan.c @@ -900,14 +900,11 @@ mwifiex_config_scan(struct mwifiex_private *priv, /* Set the BSS type scan filter, use Adapter setting if unset */ scan_cfg_out->bss_mode = - (user_scan_in->bss_mode ? (u8) user_scan_in-> - bss_mode : (u8) adapter->scan_mode); + (u8)(user_scan_in->bss_mode ?: adapter->scan_mode); /* Set the number of probes to send, use Adapter setting if unset */ - num_probes = - (user_scan_in->num_probes ? user_scan_in-> - num_probes : adapter->scan_probes); + num_probes = user_scan_in->num_probes ?: adapter->scan_probes; /* * Set the BSSID filter to the incoming configuration, From 679b687bc96ddf2ef077ca4aa755924032bd18a8 Mon Sep 17 00:00:00 2001 From: Andreas Fenkart Date: Thu, 10 Mar 2016 09:44:08 +0100 Subject: [PATCH 0700/1649] mwifiex: scan: factor out dbg_security_flags merge copy/paste code Signed-off-by: Andreas Fenkart Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/scan.c | 74 +++++++-------------- 1 file changed, 25 insertions(+), 49 deletions(-) diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c index 11f8a608b8af..11d57185a090 100644 --- a/drivers/net/wireless/marvell/mwifiex/scan.c +++ b/drivers/net/wireless/marvell/mwifiex/scan.c @@ -76,6 +76,27 @@ static u8 mwifiex_rsn_oui[CIPHER_SUITE_MAX][4] = { { 0x00, 0x0f, 0xac, 0x04 }, /* AES */ }; +static void +_dbg_security_flags(int log_level, const char *func, const char *desc, + struct mwifiex_private *priv, + struct mwifiex_bssdescriptor *bss_desc) +{ + _mwifiex_dbg(priv->adapter, log_level, + "info: %s: %s:\twpa_ie=%#x wpa2_ie=%#x WEP=%s WPA=%s WPA2=%s\tEncMode=%#x privacy=%#x\n", + func, desc, + bss_desc->bcn_wpa_ie ? + bss_desc->bcn_wpa_ie->vend_hdr.element_id : 0, + bss_desc->bcn_rsn_ie ? + bss_desc->bcn_rsn_ie->ieee_hdr.element_id : 0, + priv->sec_info.wep_enabled ? "e" : "d", + priv->sec_info.wpa_enabled ? "e" : "d", + priv->sec_info.wpa2_enabled ? "e" : "d", + priv->sec_info.encryption_mode, + bss_desc->privacy); +} +#define dbg_security_flags(mask, desc, priv, bss_desc) \ + _dbg_security_flags(MWIFIEX_DBG_##mask, desc, __func__, priv, bss_desc) + static bool has_ieee_hdr(struct ieee_types_generic *ie, u8 key) { @@ -243,19 +264,7 @@ mwifiex_is_bss_wpa(struct mwifiex_private *priv, * LinkSys WRT54G && bss_desc->privacy */ ) { - mwifiex_dbg(priv->adapter, INFO, - "info: %s: WPA:\t" - "wpa_ie=%#x wpa2_ie=%#x WEP=%s WPA=%s WPA2=%s\t" - "EncMode=%#x privacy=%#x\n", __func__, - bss_desc->bcn_wpa_ie ? - bss_desc->bcn_wpa_ie->vend_hdr.element_id : 0, - bss_desc->bcn_rsn_ie ? - bss_desc->bcn_rsn_ie->ieee_hdr.element_id : 0, - (priv->sec_info.wep_enabled) ? "e" : "d", - (priv->sec_info.wpa_enabled) ? "e" : "d", - (priv->sec_info.wpa2_enabled) ? "e" : "d", - priv->sec_info.encryption_mode, - bss_desc->privacy); + dbg_security_flags(INFO, "WPA", priv, bss_desc); return true; } return false; @@ -276,19 +285,7 @@ mwifiex_is_bss_wpa2(struct mwifiex_private *priv, * Privacy bit may NOT be set in some APs like * LinkSys WRT54G && bss_desc->privacy */ - mwifiex_dbg(priv->adapter, INFO, - "info: %s: WPA2:\t" - "wpa_ie=%#x wpa2_ie=%#x WEP=%s WPA=%s WPA2=%s\t" - "EncMode=%#x privacy=%#x\n", __func__, - bss_desc->bcn_wpa_ie ? - bss_desc->bcn_wpa_ie->vend_hdr.element_id : 0, - bss_desc->bcn_rsn_ie ? - bss_desc->bcn_rsn_ie->ieee_hdr.element_id : 0, - (priv->sec_info.wep_enabled) ? "e" : "d", - (priv->sec_info.wpa_enabled) ? "e" : "d", - (priv->sec_info.wpa2_enabled) ? "e" : "d", - priv->sec_info.encryption_mode, - bss_desc->privacy); + dbg_security_flags(INFO, "WAP2", priv, bss_desc); return true; } return false; @@ -325,17 +322,7 @@ mwifiex_is_bss_dynamic_wep(struct mwifiex_private *priv, !has_vendor_hdr(bss_desc->bcn_wpa_ie, WLAN_EID_VENDOR_SPECIFIC) && !has_ieee_hdr(bss_desc->bcn_rsn_ie, WLAN_EID_RSN) && priv->sec_info.encryption_mode && bss_desc->privacy) { - mwifiex_dbg(priv->adapter, INFO, - "info: %s: dynamic\t" - "WEP: wpa_ie=%#x wpa2_ie=%#x\t" - "EncMode=%#x privacy=%#x\n", - __func__, - bss_desc->bcn_wpa_ie ? - bss_desc->bcn_wpa_ie->vend_hdr.element_id : 0, - bss_desc->bcn_rsn_ie ? - bss_desc->bcn_rsn_ie->ieee_hdr.element_id : 0, - priv->sec_info.encryption_mode, - bss_desc->privacy); + dbg_security_flags(INFO, "dynamic", priv, bss_desc); return true; } return false; @@ -448,18 +435,7 @@ mwifiex_is_network_compatible(struct mwifiex_private *priv, } /* Security doesn't match */ - mwifiex_dbg(adapter, ERROR, - "info: %s: failed: wpa_ie=%#x wpa2_ie=%#x WEP=%s\t" - "WPA=%s WPA2=%s EncMode=%#x privacy=%#x\n", - __func__, - bss_desc->bcn_wpa_ie ? - bss_desc->bcn_wpa_ie->vend_hdr.element_id : 0, - bss_desc->bcn_rsn_ie ? - bss_desc->bcn_rsn_ie->ieee_hdr.element_id : 0, - (priv->sec_info.wep_enabled) ? "e" : "d", - (priv->sec_info.wpa_enabled) ? "e" : "d", - (priv->sec_info.wpa2_enabled) ? "e" : "d", - priv->sec_info.encryption_mode, bss_desc->privacy); + dbg_security_flags(ERROR, "failed", priv, bss_desc); return -1; } From 948ad6b34943a1247653392d59bcfc9896da8fe7 Mon Sep 17 00:00:00 2001 From: Andreas Fenkart Date: Thu, 10 Mar 2016 09:44:09 +0100 Subject: [PATCH 0701/1649] mwifiex: scan: replace pointer arithmetic with array access improves readability Reviewed-by: Julian Calaby Signed-off-by: Andreas Fenkart Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/scan.c | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c index 11d57185a090..753d92a731af 100644 --- a/drivers/net/wireless/marvell/mwifiex/scan.c +++ b/drivers/net/wireless/marvell/mwifiex/scan.c @@ -1055,27 +1055,24 @@ mwifiex_config_scan(struct mwifiex_private *priv, chan_idx++) { channel = user_scan_in->chan_list[chan_idx].chan_number; - (scan_chan_list + chan_idx)->chan_number = channel; + scan_chan_list[chan_idx].chan_number = channel; radio_type = user_scan_in->chan_list[chan_idx].radio_type; - (scan_chan_list + chan_idx)->radio_type = radio_type; + scan_chan_list[chan_idx].radio_type = radio_type; scan_type = user_scan_in->chan_list[chan_idx].scan_type; if (scan_type == MWIFIEX_SCAN_TYPE_PASSIVE) - (scan_chan_list + - chan_idx)->chan_scan_mode_bitmap + scan_chan_list[chan_idx].chan_scan_mode_bitmap |= (MWIFIEX_PASSIVE_SCAN | MWIFIEX_HIDDEN_SSID_REPORT); else - (scan_chan_list + - chan_idx)->chan_scan_mode_bitmap + scan_chan_list[chan_idx].chan_scan_mode_bitmap &= ~MWIFIEX_PASSIVE_SCAN; if (*filtered_scan) - (scan_chan_list + - chan_idx)->chan_scan_mode_bitmap + scan_chan_list[chan_idx].chan_scan_mode_bitmap |= MWIFIEX_DISABLE_CHAN_FILT; if (user_scan_in->chan_list[chan_idx].scan_time) { @@ -1090,9 +1087,9 @@ mwifiex_config_scan(struct mwifiex_private *priv, scan_dur = adapter->active_scan_time; } - (scan_chan_list + chan_idx)->min_scan_time = + scan_chan_list[chan_idx].min_scan_time = cpu_to_le16(scan_dur); - (scan_chan_list + chan_idx)->max_scan_time = + scan_chan_list[chan_idx].max_scan_time = cpu_to_le16(scan_dur); } From c70ca8cb9a7c6722d5bb6d428b6571921998c48d Mon Sep 17 00:00:00 2001 From: Andreas Fenkart Date: Thu, 10 Mar 2016 09:44:10 +0100 Subject: [PATCH 0702/1649] mwifiex: factor out mwifiex_cancel_pending_scan_cmd Releasing the scan_pending lock in mwifiex_check_next_scan_command introduces a short window where pending scan commands can be removed or added before removing them all in mwifiex_cancel_pending_scan_cmd. I think this is safe, since the worst thing to happen is that a pending scan cmd is removed by the command handler. Adding new scan commands is not possible while one is pending, see scan_processing flag. Since all commands are removed from the queue anyway, we don't care if some commands are removed by a different code path earlier, the final state remains the same. I assume, that the critical section needed for the check has been extended over clearing the pending scan queue out of convenience. The lock was already held and releasing it and grab it again was just more work. It doesn't seem to be necessary because of concurrency. Signed-off-by: Andreas Fenkart Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/cmdevt.c | 43 +++++++++---------- drivers/net/wireless/marvell/mwifiex/main.h | 1 + drivers/net/wireless/marvell/mwifiex/scan.c | 23 +++------- .../wireless/marvell/mwifiex/sta_cmdresp.c | 13 +----- 4 files changed, 27 insertions(+), 53 deletions(-) diff --git a/drivers/net/wireless/marvell/mwifiex/cmdevt.c b/drivers/net/wireless/marvell/mwifiex/cmdevt.c index 6f0470646483..f5af525485f6 100644 --- a/drivers/net/wireless/marvell/mwifiex/cmdevt.c +++ b/drivers/net/wireless/marvell/mwifiex/cmdevt.c @@ -991,6 +991,23 @@ mwifiex_cmd_timeout_func(unsigned long function_context) adapter->if_ops.card_reset(adapter); } +void +mwifiex_cancel_pending_scan_cmd(struct mwifiex_adapter *adapter) +{ + struct cmd_ctrl_node *cmd_node = NULL, *tmp_node; + unsigned long flags; + + /* Cancel all pending scan command */ + spin_lock_irqsave(&adapter->scan_pending_q_lock, flags); + list_for_each_entry_safe(cmd_node, tmp_node, + &adapter->scan_pending_q, list) { + list_del(&cmd_node->list); + cmd_node->wait_q_enabled = false; + mwifiex_insert_cmd_to_free_q(adapter, cmd_node); + } + spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags); +} + /* * This function cancels all the pending commands. * @@ -1029,16 +1046,7 @@ mwifiex_cancel_all_pending_cmd(struct mwifiex_adapter *adapter) spin_unlock_irqrestore(&adapter->cmd_pending_q_lock, flags); spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags); - /* Cancel all pending scan command */ - spin_lock_irqsave(&adapter->scan_pending_q_lock, flags); - list_for_each_entry_safe(cmd_node, tmp_node, - &adapter->scan_pending_q, list) { - list_del(&cmd_node->list); - - cmd_node->wait_q_enabled = false; - mwifiex_insert_cmd_to_free_q(adapter, cmd_node); - } - spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags); + mwifiex_cancel_pending_scan_cmd(adapter); if (adapter->scan_processing) { spin_lock_irqsave(&adapter->mwifiex_cmd_lock, cmd_flags); @@ -1070,9 +1078,8 @@ mwifiex_cancel_all_pending_cmd(struct mwifiex_adapter *adapter) void mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter) { - struct cmd_ctrl_node *cmd_node = NULL, *tmp_node = NULL; + struct cmd_ctrl_node *cmd_node = NULL; unsigned long cmd_flags; - unsigned long scan_pending_q_flags; struct mwifiex_private *priv; int i; @@ -1094,17 +1101,7 @@ mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter) mwifiex_recycle_cmd_node(adapter, cmd_node); } - /* Cancel all pending scan command */ - spin_lock_irqsave(&adapter->scan_pending_q_lock, - scan_pending_q_flags); - list_for_each_entry_safe(cmd_node, tmp_node, - &adapter->scan_pending_q, list) { - list_del(&cmd_node->list); - cmd_node->wait_q_enabled = false; - mwifiex_insert_cmd_to_free_q(adapter, cmd_node); - } - spin_unlock_irqrestore(&adapter->scan_pending_q_lock, - scan_pending_q_flags); + mwifiex_cancel_pending_scan_cmd(adapter); if (adapter->scan_processing) { spin_lock_irqsave(&adapter->mwifiex_cmd_lock, cmd_flags); diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h index a159fbef20cd..6306654d2799 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.h +++ b/drivers/net/wireless/marvell/mwifiex/main.h @@ -1042,6 +1042,7 @@ int mwifiex_alloc_cmd_buffer(struct mwifiex_adapter *adapter); int mwifiex_free_cmd_buffer(struct mwifiex_adapter *adapter); void mwifiex_cancel_all_pending_cmd(struct mwifiex_adapter *adapter); void mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter); +void mwifiex_cancel_pending_scan_cmd(struct mwifiex_adapter *adapter); void mwifiex_insert_cmd_to_free_q(struct mwifiex_adapter *adapter, struct cmd_ctrl_node *cmd_node); diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c index 753d92a731af..36cc9cca95fc 100644 --- a/drivers/net/wireless/marvell/mwifiex/scan.c +++ b/drivers/net/wireless/marvell/mwifiex/scan.c @@ -619,8 +619,6 @@ mwifiex_scan_channel_list(struct mwifiex_private *priv, int ret = 0; struct mwifiex_chan_scan_param_set *tmp_chan_list; struct mwifiex_chan_scan_param_set *start_chan; - struct cmd_ctrl_node *cmd_node, *tmp_node; - unsigned long flags; u32 tlv_idx, rates_size, cmd_no; u32 total_scan_time; u32 done_early; @@ -777,16 +775,7 @@ mwifiex_scan_channel_list(struct mwifiex_private *priv, sizeof(struct mwifiex_ie_types_header) + rates_size; if (ret) { - spin_lock_irqsave(&adapter->scan_pending_q_lock, flags); - list_for_each_entry_safe(cmd_node, tmp_node, - &adapter->scan_pending_q, - list) { - list_del(&cmd_node->list); - cmd_node->wait_q_enabled = false; - mwifiex_insert_cmd_to_free_q(adapter, cmd_node); - } - spin_unlock_irqrestore(&adapter->scan_pending_q_lock, - flags); + mwifiex_cancel_pending_scan_cmd(adapter); break; } } @@ -1949,12 +1938,13 @@ mwifiex_active_scan_req_for_passive_chan(struct mwifiex_private *priv) static void mwifiex_check_next_scan_command(struct mwifiex_private *priv) { struct mwifiex_adapter *adapter = priv->adapter; - struct cmd_ctrl_node *cmd_node, *tmp_node; + struct cmd_ctrl_node *cmd_node; unsigned long flags; spin_lock_irqsave(&adapter->scan_pending_q_lock, flags); if (list_empty(&adapter->scan_pending_q)) { spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags); + spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags); adapter->scan_processing = false; spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags); @@ -1976,13 +1966,10 @@ static void mwifiex_check_next_scan_command(struct mwifiex_private *priv) } } else if ((priv->scan_aborting && !priv->scan_request) || priv->scan_block) { - list_for_each_entry_safe(cmd_node, tmp_node, - &adapter->scan_pending_q, list) { - list_del(&cmd_node->list); - mwifiex_insert_cmd_to_free_q(adapter, cmd_node); - } spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags); + mwifiex_cancel_pending_scan_cmd(adapter); + spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags); adapter->scan_processing = false; spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags); diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c index 434b9776db45..d18c7979d723 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c @@ -44,7 +44,6 @@ static void mwifiex_process_cmdresp_error(struct mwifiex_private *priv, struct host_cmd_ds_command *resp) { - struct cmd_ctrl_node *cmd_node = NULL, *tmp_node; struct mwifiex_adapter *adapter = priv->adapter; struct host_cmd_ds_802_11_ps_mode_enh *pm; unsigned long flags; @@ -71,17 +70,7 @@ mwifiex_process_cmdresp_error(struct mwifiex_private *priv, break; case HostCmd_CMD_802_11_SCAN: case HostCmd_CMD_802_11_SCAN_EXT: - /* Cancel all pending scan command */ - spin_lock_irqsave(&adapter->scan_pending_q_lock, flags); - list_for_each_entry_safe(cmd_node, tmp_node, - &adapter->scan_pending_q, list) { - list_del(&cmd_node->list); - spin_unlock_irqrestore(&adapter->scan_pending_q_lock, - flags); - mwifiex_insert_cmd_to_free_q(adapter, cmd_node); - spin_lock_irqsave(&adapter->scan_pending_q_lock, flags); - } - spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags); + mwifiex_cancel_pending_scan_cmd(adapter); spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags); adapter->scan_processing = false; From 85abfb1239ecda32222c161670667c7b2f6832be Mon Sep 17 00:00:00 2001 From: Andreas Fenkart Date: Thu, 10 Mar 2016 09:44:11 +0100 Subject: [PATCH 0703/1649] mwifiex: make mwifiex_insert_cmd_to_free_q local static after factoring out mwifiex_cancel_pending_scan_cmd the function is not called outside of cmdevt file moved function to head of file to avoid forward declaration, also moved mwifiex_recycle_cmd_node since they are very similar Signed-off-by: Andreas Fenkart Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/cmdevt.c | 82 +++++++++---------- drivers/net/wireless/marvell/mwifiex/main.h | 2 - 2 files changed, 41 insertions(+), 43 deletions(-) diff --git a/drivers/net/wireless/marvell/mwifiex/cmdevt.c b/drivers/net/wireless/marvell/mwifiex/cmdevt.c index f5af525485f6..6bc2011d8609 100644 --- a/drivers/net/wireless/marvell/mwifiex/cmdevt.c +++ b/drivers/net/wireless/marvell/mwifiex/cmdevt.c @@ -104,6 +104,47 @@ mwifiex_clean_cmd_node(struct mwifiex_adapter *adapter, } } +/* + * This function returns a command to the command free queue. + * + * The function also calls the completion callback if required, before + * cleaning the command node and re-inserting it into the free queue. + */ +static void +mwifiex_insert_cmd_to_free_q(struct mwifiex_adapter *adapter, + struct cmd_ctrl_node *cmd_node) +{ + unsigned long flags; + + if (!cmd_node) + return; + + if (cmd_node->wait_q_enabled) + mwifiex_complete_cmd(adapter, cmd_node); + /* Clean the node */ + mwifiex_clean_cmd_node(adapter, cmd_node); + + /* Insert node into cmd_free_q */ + spin_lock_irqsave(&adapter->cmd_free_q_lock, flags); + list_add_tail(&cmd_node->list, &adapter->cmd_free_q); + spin_unlock_irqrestore(&adapter->cmd_free_q_lock, flags); +} + +/* This function reuses a command node. */ +void mwifiex_recycle_cmd_node(struct mwifiex_adapter *adapter, + struct cmd_ctrl_node *cmd_node) +{ + struct host_cmd_ds_command *host_cmd = (void *)cmd_node->cmd_skb->data; + + mwifiex_insert_cmd_to_free_q(adapter, cmd_node); + + atomic_dec(&adapter->cmd_pending); + mwifiex_dbg(adapter, CMD, + "cmd: FREE_CMD: cmd=%#x, cmd_pending=%d\n", + le16_to_cpu(host_cmd->command), + atomic_read(&adapter->cmd_pending)); +} + /* * This function sends a host command to the firmware. * @@ -613,47 +654,6 @@ int mwifiex_send_cmd(struct mwifiex_private *priv, u16 cmd_no, return ret; } -/* - * This function returns a command to the command free queue. - * - * The function also calls the completion callback if required, before - * cleaning the command node and re-inserting it into the free queue. - */ -void -mwifiex_insert_cmd_to_free_q(struct mwifiex_adapter *adapter, - struct cmd_ctrl_node *cmd_node) -{ - unsigned long flags; - - if (!cmd_node) - return; - - if (cmd_node->wait_q_enabled) - mwifiex_complete_cmd(adapter, cmd_node); - /* Clean the node */ - mwifiex_clean_cmd_node(adapter, cmd_node); - - /* Insert node into cmd_free_q */ - spin_lock_irqsave(&adapter->cmd_free_q_lock, flags); - list_add_tail(&cmd_node->list, &adapter->cmd_free_q); - spin_unlock_irqrestore(&adapter->cmd_free_q_lock, flags); -} - -/* This function reuses a command node. */ -void mwifiex_recycle_cmd_node(struct mwifiex_adapter *adapter, - struct cmd_ctrl_node *cmd_node) -{ - struct host_cmd_ds_command *host_cmd = (void *)cmd_node->cmd_skb->data; - - mwifiex_insert_cmd_to_free_q(adapter, cmd_node); - - atomic_dec(&adapter->cmd_pending); - mwifiex_dbg(adapter, CMD, - "cmd: FREE_CMD: cmd=%#x, cmd_pending=%d\n", - le16_to_cpu(host_cmd->command), - atomic_read(&adapter->cmd_pending)); -} - /* * This function queues a command to the command pending queue. * diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h index 6306654d2799..63069dd8b8e8 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.h +++ b/drivers/net/wireless/marvell/mwifiex/main.h @@ -1044,8 +1044,6 @@ void mwifiex_cancel_all_pending_cmd(struct mwifiex_adapter *adapter); void mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter); void mwifiex_cancel_pending_scan_cmd(struct mwifiex_adapter *adapter); -void mwifiex_insert_cmd_to_free_q(struct mwifiex_adapter *adapter, - struct cmd_ctrl_node *cmd_node); void mwifiex_recycle_cmd_node(struct mwifiex_adapter *adapter, struct cmd_ctrl_node *cmd_node); From c157863d99797f6fed60e6e56d53afeb0bc3b436 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Thu, 14 Apr 2016 14:58:42 -0400 Subject: [PATCH 0704/1649] rtl8xxxu: Reorder parts of init code to match the 8192eu vendor code flow In order to debug 8192eu support, reorder some init code to match the flow of the vendor driver. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- .../net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 35 +++++++++++-------- 1 file changed, 20 insertions(+), 15 deletions(-) diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index e36fda8c1ad3..d67b88665194 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -7592,6 +7592,26 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw) if (ret) goto exit; + /* RFSW Control - clear bit 14 ?? */ + if (priv->rtl_chip != RTL8723B) + rtl8xxxu_write32(priv, REG_FPGA0_TX_INFO, 0x00000003); + /* 0x07000760 */ + if (priv->rtl_chip == RTL8192E) { + val32 = 0; + } else { + val32 = FPGA0_RF_TRSW | FPGA0_RF_TRSWB | FPGA0_RF_ANTSW | + FPGA0_RF_ANTSWB | FPGA0_RF_PAPE | + ((FPGA0_RF_ANTSW | FPGA0_RF_ANTSWB | FPGA0_RF_PAPE) << + FPGA0_RF_BD_CTRL_SHIFT); + } + rtl8xxxu_write32(priv, REG_FPGA0_XAB_RF_SW_CTRL, val32); + /* 0x860[6:5]= 00 - why? - this sets antenna B */ + if (priv->rtl_chip != RTL8192E) + rtl8xxxu_write32(priv, REG_FPGA0_XA_RF_INT_OE, 0x66f60210); + + priv->rf_mode_ag[0] = rtl8xxxu_read_rfreg(priv, RF_A, + RF6052_REG_MODE_AG); + /* * Chip specific quirks */ @@ -7653,21 +7673,6 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw) if (ret) goto exit; - /* RFSW Control - clear bit 14 ?? */ - if (priv->rtl_chip != RTL8723B) - rtl8xxxu_write32(priv, REG_FPGA0_TX_INFO, 0x00000003); - /* 0x07000760 */ - val32 = FPGA0_RF_TRSW | FPGA0_RF_TRSWB | FPGA0_RF_ANTSW | - FPGA0_RF_ANTSWB | FPGA0_RF_PAPE | - ((FPGA0_RF_ANTSW | FPGA0_RF_ANTSWB | FPGA0_RF_PAPE) << - FPGA0_RF_BD_CTRL_SHIFT); - rtl8xxxu_write32(priv, REG_FPGA0_XAB_RF_SW_CTRL, val32); - /* 0x860[6:5]= 00 - why? - this sets antenna B */ - rtl8xxxu_write32(priv, REG_FPGA0_XA_RF_INT_OE, 0x66F60210); - - priv->rf_mode_ag[0] = rtl8xxxu_read_rfreg(priv, RF_A, - RF6052_REG_MODE_AG); - /* * Set RX page boundary */ From 59b24dad20d41c00b51f5ca549a9e83a53671bc9 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Thu, 14 Apr 2016 14:58:43 -0400 Subject: [PATCH 0705/1649] rtl8xxxu: Reorg more code to match the flow of the 8192eu vendor driver This further reorganizes the init code flow to match that of the 8192eu vendor driver. This helps diffing the register write log against that of the vendor driver. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- .../net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 109 +++++++++--------- 1 file changed, 56 insertions(+), 53 deletions(-) diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index d67b88665194..c6c41ba2a511 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -7468,34 +7468,39 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw) goto exit; } - dev_dbg(dev, "%s: macpower %i\n", __func__, macpower); if (!macpower) { - ret = priv->fops->llt_init(priv, TX_TOTAL_PAGE_NUM); - if (ret) { - dev_warn(dev, "%s: LLT table init failed\n", __func__); - goto exit; - } + if (priv->ep_tx_normal_queue) + val8 = TX_PAGE_NUM_NORM_PQ; + else + val8 = 0; - /* - * Presumably this is for 8188EU as well - * Enable TX report and TX report timer - */ - if (priv->rtl_chip == RTL8723B) { - val8 = rtl8xxxu_read8(priv, REG_TX_REPORT_CTRL); - val8 |= TX_REPORT_CTRL_TIMER_ENABLE; - rtl8xxxu_write8(priv, REG_TX_REPORT_CTRL, val8); - /* Set MAX RPT MACID */ - rtl8xxxu_write8(priv, REG_TX_REPORT_CTRL + 1, 0x02); - /* TX report Timer. Unit: 32us */ - rtl8xxxu_write16(priv, REG_TX_REPORT_TIME, 0xcdf0); + rtl8xxxu_write8(priv, REG_RQPN_NPQ, val8); - /* tmp ps ? */ - val8 = rtl8xxxu_read8(priv, 0xa3); - val8 &= 0xf8; - rtl8xxxu_write8(priv, 0xa3, val8); - } + val32 = (TX_PAGE_NUM_PUBQ << RQPN_NORM_PQ_SHIFT) | RQPN_LOAD; + + if (priv->ep_tx_high_queue) + val32 |= (TX_PAGE_NUM_HI_PQ << RQPN_HI_PQ_SHIFT); + if (priv->ep_tx_low_queue) + val32 |= (TX_PAGE_NUM_LO_PQ << RQPN_LO_PQ_SHIFT); + + rtl8xxxu_write32(priv, REG_RQPN, val32); } + ret = rtl8xxxu_init_queue_priority(priv); + dev_dbg(dev, "%s: init_queue_priority %i\n", __func__, ret); + if (ret) + goto exit; + + /* + * Set RX page boundary + */ + if (priv->rtl_chip == RTL8723B) + rtl8xxxu_write16(priv, REG_TRXFF_BNDY + 2, 0x3f7f); + else if (priv->rtl_chip == RTL8192E) + rtl8xxxu_write16(priv, REG_TRXFF_BNDY + 2, 0x3cff); + else + rtl8xxxu_write16(priv, REG_TRXFF_BNDY + 2, 0x27ff); + ret = rtl8xxxu_download_firmware(priv); dev_dbg(dev, "%s: download_fiwmare %i\n", __func__, ret); if (ret) @@ -7634,22 +7639,6 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw) } if (!macpower) { - if (priv->ep_tx_normal_queue) - val8 = TX_PAGE_NUM_NORM_PQ; - else - val8 = 0; - - rtl8xxxu_write8(priv, REG_RQPN_NPQ, val8); - - val32 = (TX_PAGE_NUM_PUBQ << RQPN_NORM_PQ_SHIFT) | RQPN_LOAD; - - if (priv->ep_tx_high_queue) - val32 |= (TX_PAGE_NUM_HI_PQ << RQPN_HI_PQ_SHIFT); - if (priv->ep_tx_low_queue) - val32 |= (TX_PAGE_NUM_LO_PQ << RQPN_LO_PQ_SHIFT); - - rtl8xxxu_write32(priv, REG_RQPN, val32); - /* * Set TX buffer boundary */ @@ -7668,20 +7657,6 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw) rtl8xxxu_write8(priv, REG_TDECTRL + 1, val8); } - ret = rtl8xxxu_init_queue_priority(priv); - dev_dbg(dev, "%s: init_queue_priority %i\n", __func__, ret); - if (ret) - goto exit; - - /* - * Set RX page boundary - */ - if (priv->rtl_chip == RTL8723B) - rtl8xxxu_write16(priv, REG_TRXFF_BNDY + 2, 0x3f7f); - else if (priv->rtl_chip == RTL8192E) - rtl8xxxu_write16(priv, REG_TRXFF_BNDY + 2, 0x3cff); - else - rtl8xxxu_write16(priv, REG_TRXFF_BNDY + 2, 0x27ff); /* * Transfer page size is always 128 */ @@ -7693,6 +7668,34 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw) (PBP_PAGE_SIZE_128 << PBP_PAGE_SIZE_TX_SHIFT); rtl8xxxu_write8(priv, REG_PBP, val8); + dev_dbg(dev, "%s: macpower %i\n", __func__, macpower); + if (!macpower) { + ret = priv->fops->llt_init(priv, TX_TOTAL_PAGE_NUM); + if (ret) { + dev_warn(dev, "%s: LLT table init failed\n", __func__); + goto exit; + } + + /* + * Presumably this is for 8188EU as well + * Enable TX report and TX report timer + */ + if (priv->rtl_chip == RTL8723B) { + val8 = rtl8xxxu_read8(priv, REG_TX_REPORT_CTRL); + val8 |= TX_REPORT_CTRL_TIMER_ENABLE; + rtl8xxxu_write8(priv, REG_TX_REPORT_CTRL, val8); + /* Set MAX RPT MACID */ + rtl8xxxu_write8(priv, REG_TX_REPORT_CTRL + 1, 0x02); + /* TX report Timer. Unit: 32us */ + rtl8xxxu_write16(priv, REG_TX_REPORT_TIME, 0xcdf0); + + /* tmp ps ? */ + val8 = rtl8xxxu_read8(priv, 0xa3); + val8 &= 0xf8; + rtl8xxxu_write8(priv, 0xa3, val8); + } + } + /* * Unit in 8 bytes, not obvious what it is used for */ From 89c2a097df34362a5e8373749a2f81ca2503b598 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Thu, 14 Apr 2016 14:58:44 -0400 Subject: [PATCH 0706/1649] rtl8xxxu: Implement generic init_queue_reserved_page() function Longer term we should switch all the chips over to use this function instead of the random chip specific ifdef hacks. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- .../net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 75 +++++++++++++++---- .../net/wireless/realtek/rtl8xxxu/rtl8xxxu.h | 9 +++ .../wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h | 2 +- 3 files changed, 71 insertions(+), 15 deletions(-) diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index c6c41ba2a511..a9290d469759 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -7439,6 +7439,60 @@ static void rtl8723bu_init_statistics(struct rtl8xxxu_priv *priv) rtl8xxxu_write32(priv, REG_OFDM0_FA_RSTC, val32); } +static void rtl8xxxu_old_init_queue_reserved_page(struct rtl8xxxu_priv *priv) +{ + u8 val8; + u32 val32; + + if (priv->ep_tx_normal_queue) + val8 = TX_PAGE_NUM_NORM_PQ; + else + val8 = 0; + + rtl8xxxu_write8(priv, REG_RQPN_NPQ, val8); + + val32 = (TX_PAGE_NUM_PUBQ << RQPN_PUB_PQ_SHIFT) | RQPN_LOAD; + + if (priv->ep_tx_high_queue) + val32 |= (TX_PAGE_NUM_HI_PQ << RQPN_HI_PQ_SHIFT); + if (priv->ep_tx_low_queue) + val32 |= (TX_PAGE_NUM_LO_PQ << RQPN_LO_PQ_SHIFT); + + rtl8xxxu_write32(priv, REG_RQPN, val32); +} + +static void rtl8xxxu_init_queue_reserved_page(struct rtl8xxxu_priv *priv) +{ + struct rtl8xxxu_fileops *fops = priv->fops; + u32 hq, lq, nq, eq, pubq; + u32 val32; + + hq = 0; + lq = 0; + nq = 0; + eq = 0; + pubq = 0; + + if (priv->ep_tx_high_queue) + hq = fops->page_num_hi; + if (priv->ep_tx_low_queue) + lq = fops->page_num_lo; + if (priv->ep_tx_normal_queue) + nq = fops->page_num_norm; + + val32 = (nq << RQPN_NPQ_SHIFT) | (eq << RQPN_EPQ_SHIFT); + rtl8xxxu_write32(priv, REG_RQPN_NPQ, val32); + + pubq = fops->total_page_num - hq - lq - nq; + + val32 = RQPN_LOAD; + val32 |= (hq << RQPN_HI_PQ_SHIFT); + val32 |= (lq << RQPN_LO_PQ_SHIFT); + val32 |= (pubq << RQPN_PUB_PQ_SHIFT); + + rtl8xxxu_write32(priv, REG_RQPN, val32); +} + static int rtl8xxxu_init_device(struct ieee80211_hw *hw) { struct rtl8xxxu_priv *priv = hw->priv; @@ -7469,21 +7523,10 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw) } if (!macpower) { - if (priv->ep_tx_normal_queue) - val8 = TX_PAGE_NUM_NORM_PQ; + if (priv->fops->total_page_num) + rtl8xxxu_init_queue_reserved_page(priv); else - val8 = 0; - - rtl8xxxu_write8(priv, REG_RQPN_NPQ, val8); - - val32 = (TX_PAGE_NUM_PUBQ << RQPN_NORM_PQ_SHIFT) | RQPN_LOAD; - - if (priv->ep_tx_high_queue) - val32 |= (TX_PAGE_NUM_HI_PQ << RQPN_HI_PQ_SHIFT); - if (priv->ep_tx_low_queue) - val32 |= (TX_PAGE_NUM_LO_PQ << RQPN_LO_PQ_SHIFT); - - rtl8xxxu_write32(priv, REG_RQPN, val32); + rtl8xxxu_old_init_queue_reserved_page(priv); } ret = rtl8xxxu_init_queue_priority(priv); @@ -9751,6 +9794,10 @@ static struct rtl8xxxu_fileops rtl8192eu_fops = { .adda_2t_path_on_a = 0x0fc01616, .adda_2t_path_on_b = 0x0fc01616, .mactable = rtl8192e_mac_init_table, + .total_page_num = TX_TOTAL_PAGE_NUM_8192E, + .page_num_hi = TX_PAGE_NUM_HI_PQ_8192E, + .page_num_lo = TX_PAGE_NUM_LO_PQ_8192E, + .page_num_norm = TX_PAGE_NUM_NORM_PQ_8192E, }; static struct usb_device_id dev_table[] = { diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h index 48a80fa9eac2..4545e10bede1 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h @@ -49,6 +49,11 @@ #define TX_PAGE_NUM_LO_PQ 0x02 #define TX_PAGE_NUM_NORM_PQ 0x02 +#define TX_PAGE_NUM_PUBQ_8192E 0xe7 +#define TX_PAGE_NUM_HI_PQ_8192E 0x08 +#define TX_PAGE_NUM_LO_PQ_8192E 0x0c +#define TX_PAGE_NUM_NORM_PQ_8192E 0x00 + #define RTL_FW_PAGE_SIZE 4096 #define RTL8XXXU_FIRMWARE_POLL_MAX 1000 @@ -1304,4 +1309,8 @@ struct rtl8xxxu_fileops { u32 adda_2t_path_on_a; u32 adda_2t_path_on_b; struct rtl8xxxu_reg8val *mactable; + u8 total_page_num; + u8 page_num_hi; + u8 page_num_lo; + u8 page_num_norm; }; diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h index bb08a3939e46..a2cff2273e72 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h @@ -387,7 +387,7 @@ #define REG_RQPN 0x0200 #define RQPN_HI_PQ_SHIFT 0 #define RQPN_LO_PQ_SHIFT 8 -#define RQPN_NORM_PQ_SHIFT 16 +#define RQPN_PUB_PQ_SHIFT 16 #define RQPN_LOAD BIT(31) #define REG_FIFOPAGE 0x0204 From 0486e80b2a447fc94687505ca106091b15e61651 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Thu, 14 Apr 2016 14:58:45 -0400 Subject: [PATCH 0707/1649] rtl8xxxu: Reorder chip quirks to follow flow of 8192eu driver Another flow order change to match the vendor driver. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- .../net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 42 +++++++++---------- 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index a9290d469759..9e9d3e351e65 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -7660,27 +7660,6 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw) priv->rf_mode_ag[0] = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_MODE_AG); - /* - * Chip specific quirks - */ - if (priv->rtl_chip == RTL8723A) { - /* Fix USB interface interference issue */ - rtl8xxxu_write8(priv, 0xfe40, 0xe0); - rtl8xxxu_write8(priv, 0xfe41, 0x8d); - rtl8xxxu_write8(priv, 0xfe42, 0x80); - rtl8xxxu_write32(priv, REG_TXDMA_OFFSET_CHK, 0xfd0320); - - /* Reduce 80M spur */ - rtl8xxxu_write32(priv, REG_AFE_XTAL_CTRL, 0x0381808d); - rtl8xxxu_write32(priv, REG_AFE_PLL_CTRL, 0xf0ffff83); - rtl8xxxu_write32(priv, REG_AFE_PLL_CTRL, 0xf0ffff82); - rtl8xxxu_write32(priv, REG_AFE_PLL_CTRL, 0xf0ffff83); - } else { - val32 = rtl8xxxu_read32(priv, REG_TXDMA_OFFSET_CHK); - val32 |= TXDMA_OFFSET_DROP_DATA_EN; - rtl8xxxu_write32(priv, REG_TXDMA_OFFSET_CHK, val32); - } - if (!macpower) { /* * Set TX buffer boundary @@ -7719,6 +7698,27 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw) goto exit; } + /* + * Chip specific quirks + */ + if (priv->rtl_chip == RTL8723A) { + /* Fix USB interface interference issue */ + rtl8xxxu_write8(priv, 0xfe40, 0xe0); + rtl8xxxu_write8(priv, 0xfe41, 0x8d); + rtl8xxxu_write8(priv, 0xfe42, 0x80); + rtl8xxxu_write32(priv, REG_TXDMA_OFFSET_CHK, 0xfd0320); + + /* Reduce 80M spur */ + rtl8xxxu_write32(priv, REG_AFE_XTAL_CTRL, 0x0381808d); + rtl8xxxu_write32(priv, REG_AFE_PLL_CTRL, 0xf0ffff83); + rtl8xxxu_write32(priv, REG_AFE_PLL_CTRL, 0xf0ffff82); + rtl8xxxu_write32(priv, REG_AFE_PLL_CTRL, 0xf0ffff83); + } else { + val32 = rtl8xxxu_read32(priv, REG_TXDMA_OFFSET_CHK); + val32 |= TXDMA_OFFSET_DROP_DATA_EN; + rtl8xxxu_write32(priv, REG_TXDMA_OFFSET_CHK, val32); + } + /* * Presumably this is for 8188EU as well * Enable TX report and TX report timer From 2e7c7b347d93e41a2c4adf9350da6a351f585810 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Thu, 14 Apr 2016 14:58:46 -0400 Subject: [PATCH 0708/1649] rtl8xxxu: Do not set REG_PBP on 8192eu The vendor driver does not set REG_PBP on 8192eu. Whether this is due to the device not supporting it or simply an oversight in the vendor driver is not clear. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 9e9d3e351e65..36bd4fc1aee8 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -7688,7 +7688,8 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw) else val8 = (PBP_PAGE_SIZE_128 << PBP_PAGE_SIZE_RX_SHIFT) | (PBP_PAGE_SIZE_128 << PBP_PAGE_SIZE_TX_SHIFT); - rtl8xxxu_write8(priv, REG_PBP, val8); + if (priv->rtl_chip != RTL8192E) + rtl8xxxu_write8(priv, REG_PBP, val8); dev_dbg(dev, "%s: macpower %i\n", __func__, macpower); if (!macpower) { From b816901b3d20e9e2941b130139d440953c3b0ba8 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Thu, 14 Apr 2016 14:58:47 -0400 Subject: [PATCH 0709/1649] rtl8xxxu: Do not init FPGA0_TX_INFO on 8192eu Like the 8723bu, the vendor driver does not set FPGA0_TX_INFO for 8192eu in the init sequence. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 36bd4fc1aee8..ed8c594dd10d 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -7641,7 +7641,7 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw) goto exit; /* RFSW Control - clear bit 14 ?? */ - if (priv->rtl_chip != RTL8723B) + if (priv->rtl_chip != RTL8723B && priv->rtl_chip != RTL8192E) rtl8xxxu_write32(priv, REG_FPGA0_TX_INFO, 0x00000003); /* 0x07000760 */ if (priv->rtl_chip == RTL8192E) { From 5bdb6b0859887bad5c83639ca9fb2d5fd0d5a8a0 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Thu, 14 Apr 2016 14:58:48 -0400 Subject: [PATCH 0710/1649] rtl8xxxu: Do not try to set REG_LEDCFG2 on 8192eu Presumably 8192eu devices do not have leds, so do not enable them. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index ed8c594dd10d..99cf4aa97c0b 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -7880,9 +7880,11 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw) priv->fops->set_tx_power(priv, 1, false); /* Let the 8051 take control of antenna setting */ - val8 = rtl8xxxu_read8(priv, REG_LEDCFG2); - val8 |= LEDCFG2_DPDT_SELECT; - rtl8xxxu_write8(priv, REG_LEDCFG2, val8); + if (priv->rtl_chip != RTL8192E) { + val8 = rtl8xxxu_read8(priv, REG_LEDCFG2); + val8 |= LEDCFG2_DPDT_SELECT; + rtl8xxxu_write8(priv, REG_LEDCFG2, val8); + } rtl8xxxu_write8(priv, REG_HWSEQ_CTRL, 0xff); From 57e42a21a941decc15f1198db64fe3959b066096 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Thu, 14 Apr 2016 14:58:49 -0400 Subject: [PATCH 0711/1649] rtl8xxxu: Implment rtl8192e_set_tx_power() 8192eu is a 2T part, so setting TX power for path A only, as done by rtl8723bu_set_tx_power() is not sufficient. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- .../net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 78 ++++++++++++++++++- 1 file changed, 77 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 99cf4aa97c0b..7ab009fc5ecc 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -2555,6 +2555,82 @@ rtl8723b_set_tx_power(struct rtl8xxxu_priv *priv, int channel, bool ht40) rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS07_MCS04, mcs); } +static void +rtl8192e_set_tx_power(struct rtl8xxxu_priv *priv, int channel, bool ht40) +{ + u32 val32, ofdm, mcs; + u8 cck, ofdmbase, mcsbase; + int group, tx_idx; + + tx_idx = 0; + group = rtl8723b_channel_to_group(channel); + + cck = priv->cck_tx_power_index_A[group]; + + val32 = rtl8xxxu_read32(priv, REG_TX_AGC_A_CCK1_MCS32); + val32 &= 0xffff00ff; + val32 |= (cck << 8); + rtl8xxxu_write32(priv, REG_TX_AGC_A_CCK1_MCS32, val32); + + val32 = rtl8xxxu_read32(priv, REG_TX_AGC_B_CCK11_A_CCK2_11); + val32 &= 0xff; + val32 |= ((cck << 8) | (cck << 16) | (cck << 24)); + rtl8xxxu_write32(priv, REG_TX_AGC_B_CCK11_A_CCK2_11, val32); + + ofdmbase = priv->ht40_1s_tx_power_index_A[group]; + ofdmbase += priv->ofdm_tx_power_diff[tx_idx].a; + ofdm = ofdmbase | ofdmbase << 8 | ofdmbase << 16 | ofdmbase << 24; + + rtl8xxxu_write32(priv, REG_TX_AGC_A_RATE18_06, ofdm); + rtl8xxxu_write32(priv, REG_TX_AGC_A_RATE54_24, ofdm); + + mcsbase = priv->ht40_1s_tx_power_index_A[group]; + if (ht40) + mcsbase += priv->ht40_tx_power_diff[tx_idx++].a; + else + mcsbase += priv->ht20_tx_power_diff[tx_idx++].a; + mcs = mcsbase | mcsbase << 8 | mcsbase << 16 | mcsbase << 24; + + rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS03_MCS00, mcs); + rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS07_MCS04, mcs); + rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS11_MCS08, mcs); + rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS15_MCS12, mcs); + + if (priv->tx_paths > 1) { + cck = priv->cck_tx_power_index_B[group]; + + val32 = rtl8xxxu_read32(priv, REG_TX_AGC_B_CCK1_55_MCS32); + val32 &= 0xff; + val32 |= ((cck << 8) | (cck << 16) | (cck << 24)); + rtl8xxxu_write32(priv, REG_TX_AGC_B_CCK1_55_MCS32, val32); + + val32 = rtl8xxxu_read32(priv, REG_TX_AGC_B_CCK11_A_CCK2_11); + val32 &= 0xffffff00; + val32 |= cck; + rtl8xxxu_write32(priv, REG_TX_AGC_B_CCK11_A_CCK2_11, val32); + + ofdmbase = priv->ht40_1s_tx_power_index_B[group]; + ofdmbase += priv->ofdm_tx_power_diff[tx_idx].b; + ofdm = ofdmbase | ofdmbase << 8 | + ofdmbase << 16 | ofdmbase << 24; + + rtl8xxxu_write32(priv, REG_TX_AGC_B_RATE18_06, ofdm); + rtl8xxxu_write32(priv, REG_TX_AGC_B_RATE54_24, ofdm); + + mcsbase = priv->ht40_1s_tx_power_index_B[group]; + if (ht40) + mcsbase += priv->ht40_tx_power_diff[tx_idx++].b; + else + mcsbase += priv->ht20_tx_power_diff[tx_idx++].b; + mcs = mcsbase | mcsbase << 8 | mcsbase << 16 | mcsbase << 24; + + rtl8xxxu_write32(priv, REG_TX_AGC_B_MCS03_MCS00, mcs); + rtl8xxxu_write32(priv, REG_TX_AGC_B_MCS07_MCS04, mcs); + rtl8xxxu_write32(priv, REG_TX_AGC_B_MCS11_MCS08, mcs); + rtl8xxxu_write32(priv, REG_TX_AGC_B_MCS15_MCS12, mcs); + } +} + static void rtl8xxxu_set_linktype(struct rtl8xxxu_priv *priv, enum nl80211_iftype linktype) { @@ -9784,7 +9860,7 @@ static struct rtl8xxxu_fileops rtl8192eu_fops = { .parse_rx_desc = rtl8723bu_parse_rx_desc, .enable_rf = rtl8723b_enable_rf, .disable_rf = rtl8723b_disable_rf, - .set_tx_power = rtl8723b_set_tx_power, + .set_tx_power = rtl8192e_set_tx_power, .update_rate_mask = rtl8723bu_update_rate_mask, .report_connect = rtl8723bu_report_connect, .writeN_block_size = 128, From 55c0b6ae1e7b1a402f23ca8fafa1874f854b4834 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Thu, 14 Apr 2016 14:58:50 -0400 Subject: [PATCH 0712/1649] rtl8xxxu: Use has_s0s1 for REG_S0S1 issues only Instead use tx_desc_size() to distinguish between gen1 (8723a/8192c/8188c) and gen2 (8723b/8192e) parts. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 7ab009fc5ecc..8b0b6c92793c 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -2758,7 +2758,8 @@ static int rtl8xxxu_identify_chip(struct rtl8xxxu_priv *priv) } else if (val32 & SYS_CFG_TYPE_ID) { bonding = rtl8xxxu_read32(priv, REG_HPON_FSM); bonding &= HPON_FSM_BONDING_MASK; - if (priv->fops->has_s0s1) { + if (priv->fops->tx_desc_size == + sizeof(struct rtl8xxxu_txdesc40)) { if (bonding == HPON_FSM_BONDING_1T2R) { sprintf(priv->chip_name, "8191EU"); priv->rf_paths = 2; @@ -7993,7 +7994,7 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw) /* * This should enable thermal meter */ - if (priv->fops->has_s0s1) + if (priv->fops->tx_desc_size == sizeof(struct rtl8xxxu_txdesc40)) rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_T_METER_8723B, 0x37cf8); else @@ -9867,7 +9868,7 @@ static struct rtl8xxxu_fileops rtl8192eu_fops = { .mbox_ext_reg = REG_HMBOX_EXT0_8723B, .mbox_ext_width = 4, .tx_desc_size = sizeof(struct rtl8xxxu_txdesc40), - .has_s0s1 = 1, + .has_s0s1 = 0, .adda_1t_init = 0x0fc01616, .adda_1t_path_on = 0x0fc01616, .adda_2t_path_on_a = 0x0fc01616, From 2cb79eb74f08f0217a6c4f21ddc42627016771ff Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Thu, 14 Apr 2016 14:58:51 -0400 Subject: [PATCH 0713/1649] rtl8xxxu: byteswap the entire RX descriptor for 24 byte RX descriptors This shouldn't affect little endian system, but may have prevented the driver working on big endian systems for devices with the larger 24 byte RX descriptors. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- .../net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 8b0b6c92793c..6545012a49ad 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -8832,7 +8832,13 @@ static int rtl8723au_parse_rx_desc(struct rtl8xxxu_priv *priv, { struct rtl8xxxu_rx_desc *rx_desc = (struct rtl8xxxu_rx_desc *)skb->data; struct rtl8723au_phy_stats *phy_stats; + __le32 *_rx_desc_le = (__le32 *)skb->data; + u32 *_rx_desc = (u32 *)skb->data; int drvinfo_sz, desc_shift; + int i; + + for (i = 0; i < (sizeof(struct rtl8xxxu_rx_desc) / sizeof(u32)); i++) + _rx_desc[i] = le32_to_cpu(_rx_desc_le[i]); skb_pull(skb, sizeof(struct rtl8xxxu_rx_desc)); @@ -8873,7 +8879,13 @@ static int rtl8723bu_parse_rx_desc(struct rtl8xxxu_priv *priv, struct rtl8723bu_rx_desc *rx_desc = (struct rtl8723bu_rx_desc *)skb->data; struct rtl8723au_phy_stats *phy_stats; + __le32 *_rx_desc_le = (__le32 *)skb->data; + u32 *_rx_desc = (u32 *)skb->data; int drvinfo_sz, desc_shift; + int i; + + for (i = 0; i < (sizeof(struct rtl8723bu_rx_desc) / sizeof(u32)); i++) + _rx_desc[i] = le32_to_cpu(_rx_desc_le[i]); skb_pull(skb, sizeof(struct rtl8723bu_rx_desc)); @@ -8967,12 +8979,7 @@ static void rtl8xxxu_rx_complete(struct urb *urb) struct sk_buff *skb = (struct sk_buff *)urb->context; struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb); struct device *dev = &priv->udev->dev; - __le32 *_rx_desc_le = (__le32 *)skb->data; - u32 *_rx_desc = (u32 *)skb->data; - int rx_type, i; - - for (i = 0; i < (sizeof(struct rtl8xxxu_rx_desc) / sizeof(u32)); i++) - _rx_desc[i] = le32_to_cpu(_rx_desc_le[i]); + int rx_type; skb_put(skb, urb->actual_length); From a49c7ce183cd78c9786348f8dea3d8027cee8177 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Thu, 14 Apr 2016 14:58:52 -0400 Subject: [PATCH 0714/1649] rtl8xxxu: Name RX descriptor types rxdesc16/rxdesc24 This caught a bug where too little memory was allocated for RX urbs for parts using 24 byte RX descriptors Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- .../net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 38 +++++++++++-------- .../net/wireless/realtek/rtl8xxxu/rtl8xxxu.h | 5 ++- 2 files changed, 25 insertions(+), 18 deletions(-) diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 6545012a49ad..4b309a60f5a1 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -8826,21 +8826,22 @@ static void rtl8xxxu_rx_urb_work(struct work_struct *work) } } -static int rtl8723au_parse_rx_desc(struct rtl8xxxu_priv *priv, +static int rtl8xxxu_parse_rxdesc16(struct rtl8xxxu_priv *priv, struct sk_buff *skb, struct ieee80211_rx_status *rx_status) { - struct rtl8xxxu_rx_desc *rx_desc = (struct rtl8xxxu_rx_desc *)skb->data; + struct rtl8xxxu_rxdesc16 *rx_desc = + (struct rtl8xxxu_rxdesc16 *)skb->data; struct rtl8723au_phy_stats *phy_stats; __le32 *_rx_desc_le = (__le32 *)skb->data; u32 *_rx_desc = (u32 *)skb->data; int drvinfo_sz, desc_shift; int i; - for (i = 0; i < (sizeof(struct rtl8xxxu_rx_desc) / sizeof(u32)); i++) + for (i = 0; i < (sizeof(struct rtl8xxxu_rxdesc16) / sizeof(u32)); i++) _rx_desc[i] = le32_to_cpu(_rx_desc_le[i]); - skb_pull(skb, sizeof(struct rtl8xxxu_rx_desc)); + skb_pull(skb, sizeof(struct rtl8xxxu_rxdesc16)); phy_stats = (struct rtl8723au_phy_stats *)skb->data; @@ -8872,22 +8873,22 @@ static int rtl8723au_parse_rx_desc(struct rtl8xxxu_priv *priv, return RX_TYPE_DATA_PKT; } -static int rtl8723bu_parse_rx_desc(struct rtl8xxxu_priv *priv, +static int rtl8xxxu_parse_rxdesc24(struct rtl8xxxu_priv *priv, struct sk_buff *skb, struct ieee80211_rx_status *rx_status) { - struct rtl8723bu_rx_desc *rx_desc = - (struct rtl8723bu_rx_desc *)skb->data; + struct rtl8xxxu_rxdesc24 *rx_desc = + (struct rtl8xxxu_rxdesc24 *)skb->data; struct rtl8723au_phy_stats *phy_stats; __le32 *_rx_desc_le = (__le32 *)skb->data; u32 *_rx_desc = (u32 *)skb->data; int drvinfo_sz, desc_shift; int i; - for (i = 0; i < (sizeof(struct rtl8723bu_rx_desc) / sizeof(u32)); i++) + for (i = 0; i < (sizeof(struct rtl8xxxu_rxdesc24) / sizeof(u32)); i++) _rx_desc[i] = le32_to_cpu(_rx_desc_le[i]); - skb_pull(skb, sizeof(struct rtl8723bu_rx_desc)); + skb_pull(skb, sizeof(struct rtl8xxxu_rxdesc24)); phy_stats = (struct rtl8723au_phy_stats *)skb->data; @@ -9018,14 +9019,15 @@ static int rtl8xxxu_submit_rx_urb(struct rtl8xxxu_priv *priv, { struct sk_buff *skb; int skb_size; - int ret; + int ret, rx_desc_sz; - skb_size = sizeof(struct rtl8xxxu_rx_desc) + RTL_RX_BUFFER_SIZE; + rx_desc_sz = priv->fops->rx_desc_size; + skb_size = rx_desc_sz + RTL_RX_BUFFER_SIZE; skb = __netdev_alloc_skb(NULL, skb_size, GFP_KERNEL); if (!skb) return -ENOMEM; - memset(skb->data, 0, sizeof(struct rtl8xxxu_rx_desc)); + memset(skb->data, 0, rx_desc_sz); usb_fill_bulk_urb(&rx_urb->urb, priv->udev, priv->pipe_in, skb->data, skb_size, rtl8xxxu_rx_complete, skb); usb_anchor_urb(&rx_urb->urb, &priv->rx_anchor); @@ -9779,7 +9781,7 @@ static struct rtl8xxxu_fileops rtl8723au_fops = { .llt_init = rtl8xxxu_init_llt_table, .phy_iq_calibrate = rtl8723au_phy_iq_calibrate, .config_channel = rtl8723au_config_channel, - .parse_rx_desc = rtl8723au_parse_rx_desc, + .parse_rx_desc = rtl8xxxu_parse_rxdesc16, .enable_rf = rtl8723a_enable_rf, .disable_rf = rtl8723a_disable_rf, .set_tx_power = rtl8723a_set_tx_power, @@ -9789,6 +9791,7 @@ static struct rtl8xxxu_fileops rtl8723au_fops = { .mbox_ext_reg = REG_HMBOX_EXT_0, .mbox_ext_width = 2, .tx_desc_size = sizeof(struct rtl8xxxu_txdesc32), + .rx_desc_size = sizeof(struct rtl8xxxu_rxdesc16), .adda_1t_init = 0x0b1b25a0, .adda_1t_path_on = 0x0bdb25a0, .adda_2t_path_on_a = 0x04db25a4, @@ -9806,7 +9809,7 @@ static struct rtl8xxxu_fileops rtl8723bu_fops = { .phy_init_antenna_selection = rtl8723bu_phy_init_antenna_selection, .phy_iq_calibrate = rtl8723bu_phy_iq_calibrate, .config_channel = rtl8723bu_config_channel, - .parse_rx_desc = rtl8723bu_parse_rx_desc, + .parse_rx_desc = rtl8xxxu_parse_rxdesc24, .init_aggregation = rtl8723bu_init_aggregation, .init_statistics = rtl8723bu_init_statistics, .enable_rf = rtl8723b_enable_rf, @@ -9818,6 +9821,7 @@ static struct rtl8xxxu_fileops rtl8723bu_fops = { .mbox_ext_reg = REG_HMBOX_EXT0_8723B, .mbox_ext_width = 4, .tx_desc_size = sizeof(struct rtl8xxxu_txdesc40), + .rx_desc_size = sizeof(struct rtl8xxxu_rxdesc24), .has_s0s1 = 1, .adda_1t_init = 0x01c00014, .adda_1t_path_on = 0x01c00014, @@ -9837,7 +9841,7 @@ static struct rtl8xxxu_fileops rtl8192cu_fops = { .llt_init = rtl8xxxu_init_llt_table, .phy_iq_calibrate = rtl8723au_phy_iq_calibrate, .config_channel = rtl8723au_config_channel, - .parse_rx_desc = rtl8723au_parse_rx_desc, + .parse_rx_desc = rtl8xxxu_parse_rxdesc16, .enable_rf = rtl8723a_enable_rf, .disable_rf = rtl8723a_disable_rf, .set_tx_power = rtl8723a_set_tx_power, @@ -9847,6 +9851,7 @@ static struct rtl8xxxu_fileops rtl8192cu_fops = { .mbox_ext_reg = REG_HMBOX_EXT_0, .mbox_ext_width = 2, .tx_desc_size = sizeof(struct rtl8xxxu_txdesc32), + .rx_desc_size = sizeof(struct rtl8xxxu_rxdesc16), .adda_1t_init = 0x0b1b25a0, .adda_1t_path_on = 0x0bdb25a0, .adda_2t_path_on_a = 0x04db25a4, @@ -9865,7 +9870,7 @@ static struct rtl8xxxu_fileops rtl8192eu_fops = { .llt_init = rtl8xxxu_auto_llt_table, .phy_iq_calibrate = rtl8192eu_phy_iq_calibrate, .config_channel = rtl8723bu_config_channel, - .parse_rx_desc = rtl8723bu_parse_rx_desc, + .parse_rx_desc = rtl8xxxu_parse_rxdesc24, .enable_rf = rtl8723b_enable_rf, .disable_rf = rtl8723b_disable_rf, .set_tx_power = rtl8192e_set_tx_power, @@ -9875,6 +9880,7 @@ static struct rtl8xxxu_fileops rtl8192eu_fops = { .mbox_ext_reg = REG_HMBOX_EXT0_8723B, .mbox_ext_width = 4, .tx_desc_size = sizeof(struct rtl8xxxu_txdesc40), + .rx_desc_size = sizeof(struct rtl8xxxu_rxdesc24), .has_s0s1 = 0, .adda_1t_init = 0x0fc01616, .adda_1t_path_on = 0x0fc01616, diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h index 4545e10bede1..2f0709ee0cb3 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h @@ -101,7 +101,7 @@ enum rtl8xxxu_rx_type { RX_TYPE_ERROR = -1 }; -struct rtl8xxxu_rx_desc { +struct rtl8xxxu_rxdesc16 { #ifdef __LITTLE_ENDIAN u32 pktlen:14; u32 crc32:1; @@ -237,7 +237,7 @@ struct rtl8xxxu_rx_desc { #endif }; -struct rtl8723bu_rx_desc { +struct rtl8xxxu_rxdesc24 { #ifdef __LITTLE_ENDIAN u32 pktlen:14; u32 crc32:1; @@ -1303,6 +1303,7 @@ struct rtl8xxxu_fileops { u16 mbox_ext_reg; char mbox_ext_width; char tx_desc_size; + char rx_desc_size; char has_s0s1; u32 adda_1t_init; u32 adda_1t_path_on; From fe62171fb5cac42d0ff665e61b2735965e9efd45 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Thu, 14 Apr 2016 14:58:53 -0400 Subject: [PATCH 0715/1649] rtl8xxxu: Remove misleading warning from rtl8192eu_phy_iqcalibrate() No actual code flow change, but no need to warn about something that isn't a prioblem. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 4b309a60f5a1..71d375c5ffa3 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -5897,8 +5897,6 @@ static void rtl8192eu_phy_iqcalibrate(struct rtl8xxxu_priv *priv, dev_dbg(dev, "%s: Path A RX IQK failed!\n", __func__); if (priv->rf_paths > 1) { - dev_warn(dev, "%s: Path B ongoing\n", __func__); - /* Path A into standby */ rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x00000000); rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_AC, 0x10000); From 15f9dc99237df4b29f000464c35925bb0a56ead7 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Thu, 14 Apr 2016 14:58:54 -0400 Subject: [PATCH 0716/1649] rtl8xxxu: Remove unused 8723bu path B IQ calibration code The 8723bu is a combo WiFi/BT dongle, and path B is not used for WiFi, so no point in calibrating it. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- .../net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 76 +------------------ 1 file changed, 3 insertions(+), 73 deletions(-) diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 71d375c5ffa3..c355d27a26d0 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -4988,50 +4988,6 @@ out: return result; } -#ifdef RTL8723BU_PATH_B -static int rtl8723bu_iqk_path_b(struct rtl8xxxu_priv *priv) -{ - u32 reg_eac, reg_eb4, reg_ebc, reg_ec4, reg_ecc, path_sel; - int result = 0; - - path_sel = rtl8xxxu_read32(priv, REG_S0S1_PATH_SWITCH); - - val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK); - val32 &= 0x000000ff; - rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32); - - /* One shot, path B LOK & IQK */ - rtl8xxxu_write32(priv, REG_IQK_AGC_CONT, 0x00000002); - rtl8xxxu_write32(priv, REG_IQK_AGC_CONT, 0x00000000); - - mdelay(1); - - /* Check failed */ - reg_eac = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_A_2); - reg_eb4 = rtl8xxxu_read32(priv, REG_TX_POWER_BEFORE_IQK_B); - reg_ebc = rtl8xxxu_read32(priv, REG_TX_POWER_AFTER_IQK_B); - reg_ec4 = rtl8xxxu_read32(priv, REG_RX_POWER_BEFORE_IQK_B_2); - reg_ecc = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_B_2); - - if (!(reg_eac & BIT(31)) && - ((reg_eb4 & 0x03ff0000) != 0x01420000) && - ((reg_ebc & 0x03ff0000) != 0x00420000)) - result |= 0x01; - else - goto out; - - if (!(reg_eac & BIT(30)) && - (((reg_ec4 & 0x03ff0000) >> 16) != 0x132) && - (((reg_ecc & 0x03ff0000) >> 16) != 0x36)) - result |= 0x02; - else - dev_warn(&priv->udev->dev, "%s: Path B RX IQK failed!\n", - __func__); -out: - return result; -} -#endif - static int rtl8192eu_iqk_path_a(struct rtl8xxxu_priv *priv) { u32 reg_eac, reg_e94, reg_e9c; @@ -5619,20 +5575,6 @@ static void rtl8723bu_phy_iqcalibrate(struct rtl8xxxu_priv *priv, rtl8xxxu_write32(priv, REG_OFDM0_TR_MUX_PAR, 0x000800e4); rtl8xxxu_write32(priv, REG_FPGA0_XCD_RF_SW_CTRL, 0x22204000); -#ifdef RTL8723BU_PATH_B - /* Set RF mode to standby Path B */ - if (priv->tx_paths > 1) - rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_AC, 0x10000); -#endif - -#if 0 - /* Page B init */ - rtl8xxxu_write32(priv, REG_CONFIG_ANT_A, 0x0f600000); - - if (priv->tx_paths > 1) - rtl8xxxu_write32(priv, REG_CONFIG_ANT_B, 0x0f600000); -#endif - /* * RX IQ calibration setting for 8723B D cut large current issue * when leaving IPS @@ -5662,12 +5604,6 @@ static void rtl8723bu_phy_iqcalibrate(struct rtl8xxxu_priv *priv, val32 &= 0x000000ff; rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32); -#if 0 /* Only needed in restore case, we may need this when going to suspend */ - priv->RFCalibrateInfo.TxLOK[RF_A] = - rtl8xxxu_read_rfreg(priv, RF_A, - RF6052_REG_TXM_IDAC); -#endif - val32 = rtl8xxxu_read32(priv, REG_TX_POWER_BEFORE_IQK_A); result[t][0] = (val32 >> 16) & 0x3ff; @@ -6209,15 +6145,9 @@ static void rtl8723bu_phy_iq_calibrate(struct rtl8xxxu_priv *priv) rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_ED, val32); rtl8xxxu_write_rfreg(priv, RF_A, 0x43, 0x300bd); - if (priv->rf_paths > 1) { - dev_dbg(dev, "%s: beware 2T not yet supported\n", __func__); -#ifdef RTL8723BU_PATH_B - if (RF_Path == 0x0) //S1 - ODM_SetIQCbyRFpath(pDM_Odm, 0); - else //S0 - ODM_SetIQCbyRFpath(pDM_Odm, 1); -#endif - } + if (priv->rf_paths > 1) + dev_dbg(dev, "%s: 8723BU 2T not supported\n", __func__); + rtl8xxxu_prepare_calibrate(priv, 0); } From 9068308ad185a3c2c5d7beb77146b6df7b96b9c6 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Thu, 14 Apr 2016 14:58:55 -0400 Subject: [PATCH 0717/1649] rtl8xxxu: Correctly mask what was read from REG_CCK0_AFE_SETTING The old code incorrectly wiped out bits 0-23 by mistake when setting the RX path for 1T parts. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index c355d27a26d0..d0330a8f32fb 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -3814,7 +3814,7 @@ static int rtl8xxxu_init_phy_bb(struct rtl8xxxu_priv *priv) rtl8xxxu_write32(priv, REG_FPGA1_TX_INFO, val32); val32 = rtl8xxxu_read32(priv, REG_CCK0_AFE_SETTING); - val32 &= 0xff000000; + val32 &= 0x00ffffff; val32 |= 0x45000000; rtl8xxxu_write32(priv, REG_CCK0_AFE_SETTING, val32); From bd8fe40cc4f30f3f0982345135d97a676d1d3652 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Thu, 14 Apr 2016 14:58:56 -0400 Subject: [PATCH 0718/1649] rtl8xxxu: Use descriptive bits for setting RX paths for 1T2R parts This reduce the use of magic values a little. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 4 +++- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h | 4 ++++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index d0330a8f32fb..3c45ad0bdbca 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -3814,8 +3814,10 @@ static int rtl8xxxu_init_phy_bb(struct rtl8xxxu_priv *priv) rtl8xxxu_write32(priv, REG_FPGA1_TX_INFO, val32); val32 = rtl8xxxu_read32(priv, REG_CCK0_AFE_SETTING); + val32 &= ~CCK0_AFE_RX_MASK; val32 &= 0x00ffffff; - val32 |= 0x45000000; + val32 |= 0x40000000; + val32 |= CCK0_AFE_RX_ANT_B; rtl8xxxu_write32(priv, REG_CCK0_AFE_SETTING, val32); val32 = rtl8xxxu_read32(priv, REG_OFDM0_TRX_PATH_ENABLE); diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h index a2cff2273e72..e7709a5bcb3a 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h @@ -882,6 +882,10 @@ #define CCK0_SIDEBAND BIT(4) #define REG_CCK0_AFE_SETTING 0x0a04 +#define CCK0_AFE_RX_MASK 0x0f000000 +#define CCK0_AFE_RX_ANT_AB BIT(24) +#define CCK0_AFE_RX_ANT_A 0 +#define CCK0_AFE_RX_ANT_B (BIT(24) | BIT(26)) #define REG_CONFIG_ANT_A 0x0b68 #define REG_CONFIG_ANT_B 0x0b6c From cb8772504a27d7ada67840efe5439f3df3621e13 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Thu, 14 Apr 2016 14:58:57 -0400 Subject: [PATCH 0719/1649] rtl8xxxu: Split rtl8xxxu_init_phy_bb() into device specific functions This reduces the if () clutter. Longer term it probably makes sense to split this between gen1 (8723au/8188cu/8192cu) and gen2 (8192eu/8723bu) devices. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- .../net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 134 ++++++++++-------- .../net/wireless/realtek/rtl8xxxu/rtl8xxxu.h | 1 + 2 files changed, 77 insertions(+), 58 deletions(-) diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 3c45ad0bdbca..43359a394503 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -3717,76 +3717,36 @@ static int rtl8xxxu_init_phy_regs(struct rtl8xxxu_priv *priv, return 0; } -/* - * Most of this is black magic retrieved from the old rtl8723au driver - */ -static int rtl8xxxu_init_phy_bb(struct rtl8xxxu_priv *priv) +static void rtl8723au_init_phy_bb(struct rtl8xxxu_priv *priv) { - u8 val8, ldoa15, ldov12d, lpldo, ldohci12; + u8 val8; u16 val16; u32 val32; - /* - * Todo: The vendor driver maintains a table of PHY register - * addresses, which is initialized here. Do we need this? - */ + val8 = rtl8xxxu_read8(priv, REG_AFE_PLL_CTRL); + udelay(2); + val8 |= AFE_PLL_320_ENABLE; + rtl8xxxu_write8(priv, REG_AFE_PLL_CTRL, val8); + udelay(2); - if (priv->rtl_chip == RTL8723B) { - val16 = rtl8xxxu_read16(priv, REG_SYS_FUNC); - val16 |= SYS_FUNC_BB_GLB_RSTN | SYS_FUNC_BBRSTB | - SYS_FUNC_DIO_RF; - rtl8xxxu_write16(priv, REG_SYS_FUNC, val16); + rtl8xxxu_write8(priv, REG_AFE_PLL_CTRL + 1, 0xff); + udelay(2); - rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, 0x00); - } else if (priv->rtl_chip == RTL8192E) { - val16 = rtl8xxxu_read16(priv, REG_SYS_FUNC); - val16 |= SYS_FUNC_BB_GLB_RSTN | SYS_FUNC_BBRSTB | - SYS_FUNC_DIO_RF; - rtl8xxxu_write16(priv, REG_SYS_FUNC, val16); - } else { - val8 = rtl8xxxu_read8(priv, REG_AFE_PLL_CTRL); - udelay(2); - val8 |= AFE_PLL_320_ENABLE; - rtl8xxxu_write8(priv, REG_AFE_PLL_CTRL, val8); - udelay(2); + val16 = rtl8xxxu_read16(priv, REG_SYS_FUNC); + val16 |= SYS_FUNC_BB_GLB_RSTN | SYS_FUNC_BBRSTB; + rtl8xxxu_write16(priv, REG_SYS_FUNC, val16); - rtl8xxxu_write8(priv, REG_AFE_PLL_CTRL + 1, 0xff); - udelay(2); - - val16 = rtl8xxxu_read16(priv, REG_SYS_FUNC); - val16 |= SYS_FUNC_BB_GLB_RSTN | SYS_FUNC_BBRSTB; - rtl8xxxu_write16(priv, REG_SYS_FUNC, val16); - } - - if (priv->rtl_chip != RTL8723B && priv->rtl_chip != RTL8192E) { - /* AFE_XTAL_RF_GATE (bit 14) if addressing as 32 bit register */ - val32 = rtl8xxxu_read32(priv, REG_AFE_XTAL_CTRL); - val32 &= ~AFE_XTAL_RF_GATE; - if (priv->has_bluetooth) - val32 &= ~AFE_XTAL_BT_GATE; - rtl8xxxu_write32(priv, REG_AFE_XTAL_CTRL, val32); - } + val32 = rtl8xxxu_read32(priv, REG_AFE_XTAL_CTRL); + val32 &= ~AFE_XTAL_RF_GATE; + if (priv->has_bluetooth) + val32 &= ~AFE_XTAL_BT_GATE; + rtl8xxxu_write32(priv, REG_AFE_XTAL_CTRL, val32); /* 6. 0x1f[7:0] = 0x07 */ val8 = RF_ENABLE | RF_RSTB | RF_SDMRSTB; rtl8xxxu_write8(priv, REG_RF_CTRL, val8); - if (priv->rtl_chip == RTL8723B) { - /* - * Why? - */ - rtl8xxxu_write8(priv, REG_SYS_FUNC, 0xe3); - rtl8xxxu_write8(priv, REG_AFE_XTAL_CTRL + 1, 0x80); - rtl8xxxu_init_phy_regs(priv, rtl8723b_phy_1t_init_table); - } else if (priv->rtl_chip == RTL8192E) { - val16 = rtl8xxxu_read16(priv, REG_SYS_FUNC); - val16 |= (SYS_FUNC_USBA | SYS_FUNC_USBD | SYS_FUNC_DIO_RF | - SYS_FUNC_BB_GLB_RSTN | SYS_FUNC_BBRSTB); - rtl8xxxu_write16(priv, REG_SYS_FUNC, val16); - val8 = RF_ENABLE | RF_RSTB | RF_SDMRSTB; - rtl8xxxu_write8(priv, REG_RF_CTRL, val8); - rtl8xxxu_init_phy_regs(priv, rtl8192eu_phy_init_table); - } else if (priv->hi_pa) + if (priv->hi_pa) rtl8xxxu_init_phy_regs(priv, rtl8188ru_phy_1t_highpa_table); else if (priv->tx_paths == 2) rtl8xxxu_init_phy_regs(priv, rtl8192cu_phy_2t_init_table); @@ -3796,6 +3756,60 @@ static int rtl8xxxu_init_phy_bb(struct rtl8xxxu_priv *priv) if (priv->rtl_chip == RTL8188C && priv->hi_pa && priv->vendor_umc && priv->chip_cut == 1) rtl8xxxu_write8(priv, REG_OFDM0_AGC_PARM1 + 2, 0x50); +} + +static void rtl8723bu_init_phy_bb(struct rtl8xxxu_priv *priv) +{ + u8 val8; + u16 val16; + + val16 = rtl8xxxu_read16(priv, REG_SYS_FUNC); + val16 |= SYS_FUNC_BB_GLB_RSTN | SYS_FUNC_BBRSTB | SYS_FUNC_DIO_RF; + rtl8xxxu_write16(priv, REG_SYS_FUNC, val16); + + rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, 0x00); + + /* 6. 0x1f[7:0] = 0x07 */ + val8 = RF_ENABLE | RF_RSTB | RF_SDMRSTB; + rtl8xxxu_write8(priv, REG_RF_CTRL, val8); + + /* Why? */ + rtl8xxxu_write8(priv, REG_SYS_FUNC, 0xe3); + rtl8xxxu_write8(priv, REG_AFE_XTAL_CTRL + 1, 0x80); + rtl8xxxu_init_phy_regs(priv, rtl8723b_phy_1t_init_table); +} + +static void rtl8192eu_init_phy_bb(struct rtl8xxxu_priv *priv) +{ + u8 val8; + u16 val16; + + val16 = rtl8xxxu_read16(priv, REG_SYS_FUNC); + val16 |= SYS_FUNC_BB_GLB_RSTN | SYS_FUNC_BBRSTB | SYS_FUNC_DIO_RF; + rtl8xxxu_write16(priv, REG_SYS_FUNC, val16); + + /* 6. 0x1f[7:0] = 0x07 */ + val8 = RF_ENABLE | RF_RSTB | RF_SDMRSTB; + rtl8xxxu_write8(priv, REG_RF_CTRL, val8); + + val16 = rtl8xxxu_read16(priv, REG_SYS_FUNC); + val16 |= (SYS_FUNC_USBA | SYS_FUNC_USBD | SYS_FUNC_DIO_RF | + SYS_FUNC_BB_GLB_RSTN | SYS_FUNC_BBRSTB); + rtl8xxxu_write16(priv, REG_SYS_FUNC, val16); + val8 = RF_ENABLE | RF_RSTB | RF_SDMRSTB; + rtl8xxxu_write8(priv, REG_RF_CTRL, val8); + rtl8xxxu_init_phy_regs(priv, rtl8192eu_phy_init_table); +} + +/* + * Most of this is black magic retrieved from the old rtl8723au driver + */ +static int rtl8xxxu_init_phy_bb(struct rtl8xxxu_priv *priv) +{ + u8 val8, ldoa15, ldov12d, lpldo, ldohci12; + u32 val32; + + priv->fops->init_phy_bb(priv); if (priv->tx_paths == 1 && priv->rx_paths == 2) { /* @@ -9709,6 +9723,7 @@ static struct rtl8xxxu_fileops rtl8723au_fops = { .power_off = rtl8xxxu_power_off, .reset_8051 = rtl8xxxu_reset_8051, .llt_init = rtl8xxxu_init_llt_table, + .init_phy_bb = rtl8723au_init_phy_bb, .phy_iq_calibrate = rtl8723au_phy_iq_calibrate, .config_channel = rtl8723au_config_channel, .parse_rx_desc = rtl8xxxu_parse_rxdesc16, @@ -9736,6 +9751,7 @@ static struct rtl8xxxu_fileops rtl8723bu_fops = { .power_off = rtl8723bu_power_off, .reset_8051 = rtl8723bu_reset_8051, .llt_init = rtl8xxxu_auto_llt_table, + .init_phy_bb = rtl8723bu_init_phy_bb, .phy_init_antenna_selection = rtl8723bu_phy_init_antenna_selection, .phy_iq_calibrate = rtl8723bu_phy_iq_calibrate, .config_channel = rtl8723bu_config_channel, @@ -9769,6 +9785,7 @@ static struct rtl8xxxu_fileops rtl8192cu_fops = { .power_off = rtl8xxxu_power_off, .reset_8051 = rtl8xxxu_reset_8051, .llt_init = rtl8xxxu_init_llt_table, + .init_phy_bb = rtl8723au_init_phy_bb, .phy_iq_calibrate = rtl8723au_phy_iq_calibrate, .config_channel = rtl8723au_config_channel, .parse_rx_desc = rtl8xxxu_parse_rxdesc16, @@ -9798,6 +9815,7 @@ static struct rtl8xxxu_fileops rtl8192eu_fops = { .power_off = rtl8xxxu_power_off, .reset_8051 = rtl8xxxu_reset_8051, .llt_init = rtl8xxxu_auto_llt_table, + .init_phy_bb = rtl8192eu_init_phy_bb, .phy_iq_calibrate = rtl8192eu_phy_iq_calibrate, .config_channel = rtl8723bu_config_channel, .parse_rx_desc = rtl8xxxu_parse_rxdesc24, diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h index 2f0709ee0cb3..28874fae750e 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h @@ -1284,6 +1284,7 @@ struct rtl8xxxu_fileops { void (*power_off) (struct rtl8xxxu_priv *priv); void (*reset_8051) (struct rtl8xxxu_priv *priv); int (*llt_init) (struct rtl8xxxu_priv *priv, u8 last_tx_page); + void (*init_phy_bb) (struct rtl8xxxu_priv *priv); void (*phy_init_antenna_selection) (struct rtl8xxxu_priv *priv); void (*phy_iq_calibrate) (struct rtl8xxxu_priv *priv); void (*config_channel) (struct ieee80211_hw *hw); From ade0dedde11923e1d1f17bd81395b627821ad49c Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Thu, 14 Apr 2016 14:58:58 -0400 Subject: [PATCH 0720/1649] rtl8xxxu: Load AGC table before patching for 1T2R parts This should get the order right and avoid patching something that is later overwritten. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- .../net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 28 +++++++++---------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 43359a394503..21275078b6e2 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -3811,6 +3811,20 @@ static int rtl8xxxu_init_phy_bb(struct rtl8xxxu_priv *priv) priv->fops->init_phy_bb(priv); + if (priv->rtl_chip == RTL8723B) + rtl8xxxu_init_phy_regs(priv, rtl8xxx_agc_8723bu_table); + else if (priv->rtl_chip == RTL8192E) { + if (priv->hi_pa) + rtl8xxxu_init_phy_regs(priv, + rtl8xxx_agc_8192eu_highpa_table); + else + rtl8xxxu_init_phy_regs(priv, + rtl8xxx_agc_8192eu_std_table); + } else if (priv->hi_pa) + rtl8xxxu_init_phy_regs(priv, rtl8xxx_agc_highpa_table); + else + rtl8xxxu_init_phy_regs(priv, rtl8xxx_agc_standard_table); + if (priv->tx_paths == 1 && priv->rx_paths == 2) { /* * For 1T2R boards, patch the registers. @@ -3871,20 +3885,6 @@ static int rtl8xxxu_init_phy_bb(struct rtl8xxxu_priv *priv) rtl8xxxu_write32(priv, REG_TX_TO_TX, val32); } - if (priv->rtl_chip == RTL8723B) - rtl8xxxu_init_phy_regs(priv, rtl8xxx_agc_8723bu_table); - else if (priv->rtl_chip == RTL8192E) { - if (priv->hi_pa) - rtl8xxxu_init_phy_regs(priv, - rtl8xxx_agc_8192eu_highpa_table); - else - rtl8xxxu_init_phy_regs(priv, - rtl8xxx_agc_8192eu_std_table); - } else if (priv->hi_pa) - rtl8xxxu_init_phy_regs(priv, rtl8xxx_agc_highpa_table); - else - rtl8xxxu_init_phy_regs(priv, rtl8xxx_agc_standard_table); - if (priv->has_xtalk) { val32 = rtl8xxxu_read32(priv, REG_MAC_PHY_CTRL); From c82f8d113e354588fe3351b10e0c1ea154f5c600 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Thu, 14 Apr 2016 14:58:59 -0400 Subject: [PATCH 0721/1649] rtl8xxxu: Move loading of AGC table to device specific function This moves the loading of the AGC table into init_phy_bb() and reduces the if() clutter. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- .../net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 26 +++++++++---------- 1 file changed, 12 insertions(+), 14 deletions(-) diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 21275078b6e2..baa53a14a30d 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -3756,6 +3756,11 @@ static void rtl8723au_init_phy_bb(struct rtl8xxxu_priv *priv) if (priv->rtl_chip == RTL8188C && priv->hi_pa && priv->vendor_umc && priv->chip_cut == 1) rtl8xxxu_write8(priv, REG_OFDM0_AGC_PARM1 + 2, 0x50); + + if (priv->hi_pa) + rtl8xxxu_init_phy_regs(priv, rtl8xxx_agc_highpa_table); + else + rtl8xxxu_init_phy_regs(priv, rtl8xxx_agc_standard_table); } static void rtl8723bu_init_phy_bb(struct rtl8xxxu_priv *priv) @@ -3777,6 +3782,8 @@ static void rtl8723bu_init_phy_bb(struct rtl8xxxu_priv *priv) rtl8xxxu_write8(priv, REG_SYS_FUNC, 0xe3); rtl8xxxu_write8(priv, REG_AFE_XTAL_CTRL + 1, 0x80); rtl8xxxu_init_phy_regs(priv, rtl8723b_phy_1t_init_table); + + rtl8xxxu_init_phy_regs(priv, rtl8xxx_agc_8723bu_table); } static void rtl8192eu_init_phy_bb(struct rtl8xxxu_priv *priv) @@ -3799,6 +3806,11 @@ static void rtl8192eu_init_phy_bb(struct rtl8xxxu_priv *priv) val8 = RF_ENABLE | RF_RSTB | RF_SDMRSTB; rtl8xxxu_write8(priv, REG_RF_CTRL, val8); rtl8xxxu_init_phy_regs(priv, rtl8192eu_phy_init_table); + + if (priv->hi_pa) + rtl8xxxu_init_phy_regs(priv, rtl8xxx_agc_8192eu_highpa_table); + else + rtl8xxxu_init_phy_regs(priv, rtl8xxx_agc_8192eu_std_table); } /* @@ -3811,20 +3823,6 @@ static int rtl8xxxu_init_phy_bb(struct rtl8xxxu_priv *priv) priv->fops->init_phy_bb(priv); - if (priv->rtl_chip == RTL8723B) - rtl8xxxu_init_phy_regs(priv, rtl8xxx_agc_8723bu_table); - else if (priv->rtl_chip == RTL8192E) { - if (priv->hi_pa) - rtl8xxxu_init_phy_regs(priv, - rtl8xxx_agc_8192eu_highpa_table); - else - rtl8xxxu_init_phy_regs(priv, - rtl8xxx_agc_8192eu_std_table); - } else if (priv->hi_pa) - rtl8xxxu_init_phy_regs(priv, rtl8xxx_agc_highpa_table); - else - rtl8xxxu_init_phy_regs(priv, rtl8xxx_agc_standard_table); - if (priv->tx_paths == 1 && priv->rx_paths == 2) { /* * For 1T2R boards, patch the registers. From b84cac16f0f42b9b3f6210984c939f712c471026 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Thu, 14 Apr 2016 14:59:00 -0400 Subject: [PATCH 0722/1649] rtl8xxxu: REG_LDOA15_CTRL is only used on gen1 parts Move setting it to rtl8723au_init_phy_bb() Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- .../net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 22 ++++++++----------- 1 file changed, 9 insertions(+), 13 deletions(-) diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index baa53a14a30d..4ef8a05d924f 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -3719,7 +3719,7 @@ static int rtl8xxxu_init_phy_regs(struct rtl8xxxu_priv *priv, static void rtl8723au_init_phy_bb(struct rtl8xxxu_priv *priv) { - u8 val8; + u8 val8, ldoa15, ldov12d, lpldo, ldohci12; u16 val16; u32 val32; @@ -3761,6 +3761,13 @@ static void rtl8723au_init_phy_bb(struct rtl8xxxu_priv *priv) rtl8xxxu_init_phy_regs(priv, rtl8xxx_agc_highpa_table); else rtl8xxxu_init_phy_regs(priv, rtl8xxx_agc_standard_table); + + ldoa15 = LDOA15_ENABLE | LDOA15_OBUF; + ldov12d = LDOV12D_ENABLE | BIT(2) | (2 << LDOV12D_VADJ_SHIFT); + ldohci12 = 0x57; + lpldo = 1; + val32 = (lpldo << 24) | (ldohci12 << 16) | (ldov12d << 8) | ldoa15; + rtl8xxxu_write32(priv, REG_LDOA15_CTRL, val32); } static void rtl8723bu_init_phy_bb(struct rtl8xxxu_priv *priv) @@ -3818,7 +3825,7 @@ static void rtl8192eu_init_phy_bb(struct rtl8xxxu_priv *priv) */ static int rtl8xxxu_init_phy_bb(struct rtl8xxxu_priv *priv) { - u8 val8, ldoa15, ldov12d, lpldo, ldohci12; + u8 val8; u32 val32; priv->fops->init_phy_bb(priv); @@ -3893,17 +3900,6 @@ static int rtl8xxxu_init_phy_bb(struct rtl8xxxu_priv *priv) rtl8xxxu_write32(priv, REG_MAC_PHY_CTRL, val32); } - if (priv->rtl_chip != RTL8723B && priv->rtl_chip != RTL8192E) { - ldoa15 = LDOA15_ENABLE | LDOA15_OBUF; - ldov12d = LDOV12D_ENABLE | BIT(2) | (2 << LDOV12D_VADJ_SHIFT); - ldohci12 = 0x57; - lpldo = 1; - val32 = (lpldo << 24) | (ldohci12 << 16) | - (ldov12d << 8) | ldoa15; - - rtl8xxxu_write32(priv, REG_LDOA15_CTRL, val32); - } - if (priv->rtl_chip == RTL8192E) rtl8xxxu_write32(priv, REG_AFE_XTAL_CTRL, 0x000f81fb); From 24e8e7ec331d04c329048d3b85034c54d23f4fab Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Thu, 14 Apr 2016 14:59:01 -0400 Subject: [PATCH 0723/1649] rtl8xxxu: Store device specific TRXFF boundary in the fileops This removes another case of ugly if () clutter Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 11 +++++------ drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h | 1 + 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 4ef8a05d924f..ef93f6285aa2 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -7552,12 +7552,7 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw) /* * Set RX page boundary */ - if (priv->rtl_chip == RTL8723B) - rtl8xxxu_write16(priv, REG_TRXFF_BNDY + 2, 0x3f7f); - else if (priv->rtl_chip == RTL8192E) - rtl8xxxu_write16(priv, REG_TRXFF_BNDY + 2, 0x3cff); - else - rtl8xxxu_write16(priv, REG_TRXFF_BNDY + 2, 0x27ff); + rtl8xxxu_write16(priv, REG_TRXFF_BNDY + 2, priv->fops->trxff_boundary); ret = rtl8xxxu_download_firmware(priv); dev_dbg(dev, "%s: download_fiwmare %i\n", __func__, ret); @@ -9735,6 +9730,7 @@ static struct rtl8xxxu_fileops rtl8723au_fops = { .adda_1t_path_on = 0x0bdb25a0, .adda_2t_path_on_a = 0x04db25a4, .adda_2t_path_on_b = 0x0b1b25a4, + .trxff_boundary = 0x27ff, .mactable = rtl8723a_mac_init_table, }; @@ -9767,6 +9763,7 @@ static struct rtl8xxxu_fileops rtl8723bu_fops = { .adda_1t_path_on = 0x01c00014, .adda_2t_path_on_a = 0x01c00014, .adda_2t_path_on_b = 0x01c00014, + .trxff_boundary = 0x3f7f, .mactable = rtl8723b_mac_init_table, }; @@ -9797,6 +9794,7 @@ static struct rtl8xxxu_fileops rtl8192cu_fops = { .adda_1t_path_on = 0x0bdb25a0, .adda_2t_path_on_a = 0x04db25a4, .adda_2t_path_on_b = 0x0b1b25a4, + .trxff_boundary = 0x27ff, .mactable = rtl8723a_mac_init_table, }; @@ -9828,6 +9826,7 @@ static struct rtl8xxxu_fileops rtl8192eu_fops = { .adda_1t_path_on = 0x0fc01616, .adda_2t_path_on_a = 0x0fc01616, .adda_2t_path_on_b = 0x0fc01616, + .trxff_boundary = 0x3cff, .mactable = rtl8192e_mac_init_table, .total_page_num = TX_TOTAL_PAGE_NUM_8192E, .page_num_hi = TX_PAGE_NUM_HI_PQ_8192E, diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h index 28874fae750e..f66e20d4449b 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h @@ -1310,6 +1310,7 @@ struct rtl8xxxu_fileops { u32 adda_1t_path_on; u32 adda_2t_path_on_a; u32 adda_2t_path_on_b; + u16 trxff_boundary; struct rtl8xxxu_reg8val *mactable; u8 total_page_num; u8 page_num_hi; From a77372069119061313b4815d119b5fbde0676dbb Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Thu, 14 Apr 2016 14:59:02 -0400 Subject: [PATCH 0724/1649] rtl8xxxu: Do not backup RF_MODE_AG when it's never being used This was expired by the vendor driver, but we never ended up using the backed up value. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 3 --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h | 1 - 2 files changed, 4 deletions(-) diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index ef93f6285aa2..c12a4c8cb63a 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -7667,9 +7667,6 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw) if (priv->rtl_chip != RTL8192E) rtl8xxxu_write32(priv, REG_FPGA0_XA_RF_INT_OE, 0x66f60210); - priv->rf_mode_ag[0] = rtl8xxxu_read_rfreg(priv, RF_A, - RF6052_REG_MODE_AG); - if (!macpower) { /* * Set TX buffer boundary diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h index f66e20d4449b..b87cd2bef53c 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h @@ -1228,7 +1228,6 @@ struct rtl8xxxu_priv { u8 rf_paths; u8 rx_paths; u8 tx_paths; - u32 rf_mode_ag[2]; u32 rege94; u32 rege9c; u32 regeb4; From 9b323ee97af6e0c982b57ba0a21db14728dd4476 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Thu, 14 Apr 2016 14:59:03 -0400 Subject: [PATCH 0725/1649] rtl8xxxu: Make PBP tuning a fileops parameter Rather than scattering the code with #ifdefs, use the fileops structure to hold device specific PBP values. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- .../net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 17 ++++++++++------- .../net/wireless/realtek/rtl8xxxu/rtl8xxxu.h | 2 ++ 2 files changed, 12 insertions(+), 7 deletions(-) diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index c12a4c8cb63a..3e3ca28d5709 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -7687,14 +7687,11 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw) } /* - * Transfer page size is always 128 + * The vendor drivers set PBP for all devices, except 8192e. + * There is no explanation for this in any of the sources. */ - if (priv->rtl_chip == RTL8723B) - val8 = (PBP_PAGE_SIZE_256 << PBP_PAGE_SIZE_RX_SHIFT) | - (PBP_PAGE_SIZE_256 << PBP_PAGE_SIZE_TX_SHIFT); - else - val8 = (PBP_PAGE_SIZE_128 << PBP_PAGE_SIZE_RX_SHIFT) | - (PBP_PAGE_SIZE_128 << PBP_PAGE_SIZE_TX_SHIFT); + val8 = (priv->fops->pbp_rx << PBP_PAGE_SIZE_RX_SHIFT) | + (priv->fops->pbp_tx << PBP_PAGE_SIZE_TX_SHIFT); if (priv->rtl_chip != RTL8192E) rtl8xxxu_write8(priv, REG_PBP, val8); @@ -9728,6 +9725,8 @@ static struct rtl8xxxu_fileops rtl8723au_fops = { .adda_2t_path_on_a = 0x04db25a4, .adda_2t_path_on_b = 0x0b1b25a4, .trxff_boundary = 0x27ff, + .pbp_rx = PBP_PAGE_SIZE_128, + .pbp_tx = PBP_PAGE_SIZE_128, .mactable = rtl8723a_mac_init_table, }; @@ -9761,6 +9760,8 @@ static struct rtl8xxxu_fileops rtl8723bu_fops = { .adda_2t_path_on_a = 0x01c00014, .adda_2t_path_on_b = 0x01c00014, .trxff_boundary = 0x3f7f, + .pbp_rx = PBP_PAGE_SIZE_256, + .pbp_tx = PBP_PAGE_SIZE_256, .mactable = rtl8723b_mac_init_table, }; @@ -9792,6 +9793,8 @@ static struct rtl8xxxu_fileops rtl8192cu_fops = { .adda_2t_path_on_a = 0x04db25a4, .adda_2t_path_on_b = 0x0b1b25a4, .trxff_boundary = 0x27ff, + .pbp_rx = PBP_PAGE_SIZE_128, + .pbp_tx = PBP_PAGE_SIZE_128, .mactable = rtl8723a_mac_init_table, }; diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h index b87cd2bef53c..8064b264ad0b 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h @@ -1310,6 +1310,8 @@ struct rtl8xxxu_fileops { u32 adda_2t_path_on_a; u32 adda_2t_path_on_b; u16 trxff_boundary; + u8 pbp_rx; + u8 pbp_tx; struct rtl8xxxu_reg8val *mactable; u8 total_page_num; u8 page_num_hi; From 747bf237592cd7237e52692772a3fe3abeca0872 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Thu, 14 Apr 2016 14:59:04 -0400 Subject: [PATCH 0726/1649] rtl8xxxu: Split USB quirks into gen1 and gen2 quirks This removes a bunch of if () spaghetti and re-applies the USB bus quirks for 8188/8192 that had gotten lost. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- .../net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 95 +++++++++++-------- .../net/wireless/realtek/rtl8xxxu/rtl8xxxu.h | 1 + 2 files changed, 56 insertions(+), 40 deletions(-) diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 3e3ca28d5709..821be8725256 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -6899,6 +6899,50 @@ static int rtl8xxxu_flush_fifo(struct rtl8xxxu_priv *priv) return retval; } +static void rtl8xxxu_gen1_usb_quirks(struct rtl8xxxu_priv *priv) +{ + /* Fix USB interface interference issue */ + rtl8xxxu_write8(priv, 0xfe40, 0xe0); + rtl8xxxu_write8(priv, 0xfe41, 0x8d); + rtl8xxxu_write8(priv, 0xfe42, 0x80); + /* + * This sets TXDMA_OFFSET_DROP_DATA_EN (bit 9) as well as bits + * 8 and 5, for which I have found no documentation. + */ + rtl8xxxu_write32(priv, REG_TXDMA_OFFSET_CHK, 0xfd0320); + + /* + * Solve too many protocol error on USB bus. + * Can't do this for 8188/8192 UMC A cut parts + */ + if (!(!priv->chip_cut && priv->vendor_umc)) { + rtl8xxxu_write8(priv, 0xfe40, 0xe6); + rtl8xxxu_write8(priv, 0xfe41, 0x94); + rtl8xxxu_write8(priv, 0xfe42, 0x80); + + rtl8xxxu_write8(priv, 0xfe40, 0xe0); + rtl8xxxu_write8(priv, 0xfe41, 0x19); + rtl8xxxu_write8(priv, 0xfe42, 0x80); + + rtl8xxxu_write8(priv, 0xfe40, 0xe5); + rtl8xxxu_write8(priv, 0xfe41, 0x91); + rtl8xxxu_write8(priv, 0xfe42, 0x80); + + rtl8xxxu_write8(priv, 0xfe40, 0xe2); + rtl8xxxu_write8(priv, 0xfe41, 0x81); + rtl8xxxu_write8(priv, 0xfe42, 0x80); + } +} + +static void rtl8xxxu_gen2_usb_quirks(struct rtl8xxxu_priv *priv) +{ + u32 val32; + + val32 = rtl8xxxu_read32(priv, REG_TXDMA_OFFSET_CHK); + val32 |= TXDMA_OFFSET_DROP_DATA_EN; + rtl8xxxu_write32(priv, REG_TXDMA_OFFSET_CHK, val32); +} + static int rtl8723au_power_on(struct rtl8xxxu_priv *priv) { u8 val8; @@ -7563,29 +7607,6 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw) if (ret) goto exit; - /* Solve too many protocol error on USB bus */ - /* Can't do this for 8188/8192 UMC A cut parts */ - if (priv->rtl_chip == RTL8723A || - ((priv->rtl_chip == RTL8192C || priv->rtl_chip == RTL8191C || - priv->rtl_chip == RTL8188C) && - (priv->chip_cut || !priv->vendor_umc))) { - rtl8xxxu_write8(priv, 0xfe40, 0xe6); - rtl8xxxu_write8(priv, 0xfe41, 0x94); - rtl8xxxu_write8(priv, 0xfe42, 0x80); - - rtl8xxxu_write8(priv, 0xfe40, 0xe0); - rtl8xxxu_write8(priv, 0xfe41, 0x19); - rtl8xxxu_write8(priv, 0xfe42, 0x80); - - rtl8xxxu_write8(priv, 0xfe40, 0xe5); - rtl8xxxu_write8(priv, 0xfe41, 0x91); - rtl8xxxu_write8(priv, 0xfe42, 0x80); - - rtl8xxxu_write8(priv, 0xfe40, 0xe2); - rtl8xxxu_write8(priv, 0xfe41, 0x81); - rtl8xxxu_write8(priv, 0xfe42, 0x80); - } - if (priv->fops->phy_init_antenna_selection) priv->fops->phy_init_antenna_selection(priv); @@ -7604,6 +7625,12 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw) case RTL8723A: rftable = rtl8723au_radioa_1t_init_table; ret = rtl8xxxu_init_phy_rf(priv, rftable, RF_A); + + /* Reduce 80M spur */ + rtl8xxxu_write32(priv, REG_AFE_XTAL_CTRL, 0x0381808d); + rtl8xxxu_write32(priv, REG_AFE_PLL_CTRL, 0xf0ffff83); + rtl8xxxu_write32(priv, REG_AFE_PLL_CTRL, 0xf0ffff82); + rtl8xxxu_write32(priv, REG_AFE_PLL_CTRL, 0xf0ffff83); break; case RTL8723B: rftable = rtl8723bu_radioa_1t_init_table; @@ -7706,23 +7733,7 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw) /* * Chip specific quirks */ - if (priv->rtl_chip == RTL8723A) { - /* Fix USB interface interference issue */ - rtl8xxxu_write8(priv, 0xfe40, 0xe0); - rtl8xxxu_write8(priv, 0xfe41, 0x8d); - rtl8xxxu_write8(priv, 0xfe42, 0x80); - rtl8xxxu_write32(priv, REG_TXDMA_OFFSET_CHK, 0xfd0320); - - /* Reduce 80M spur */ - rtl8xxxu_write32(priv, REG_AFE_XTAL_CTRL, 0x0381808d); - rtl8xxxu_write32(priv, REG_AFE_PLL_CTRL, 0xf0ffff83); - rtl8xxxu_write32(priv, REG_AFE_PLL_CTRL, 0xf0ffff82); - rtl8xxxu_write32(priv, REG_AFE_PLL_CTRL, 0xf0ffff83); - } else { - val32 = rtl8xxxu_read32(priv, REG_TXDMA_OFFSET_CHK); - val32 |= TXDMA_OFFSET_DROP_DATA_EN; - rtl8xxxu_write32(priv, REG_TXDMA_OFFSET_CHK, val32); - } + priv->fops->usb_quirks(priv); /* * Presumably this is for 8188EU as well @@ -9712,6 +9723,7 @@ static struct rtl8xxxu_fileops rtl8723au_fops = { .parse_rx_desc = rtl8xxxu_parse_rxdesc16, .enable_rf = rtl8723a_enable_rf, .disable_rf = rtl8723a_disable_rf, + .usb_quirks = rtl8xxxu_gen1_usb_quirks, .set_tx_power = rtl8723a_set_tx_power, .update_rate_mask = rtl8723au_update_rate_mask, .report_connect = rtl8723au_report_connect, @@ -9746,6 +9758,7 @@ static struct rtl8xxxu_fileops rtl8723bu_fops = { .init_statistics = rtl8723bu_init_statistics, .enable_rf = rtl8723b_enable_rf, .disable_rf = rtl8723b_disable_rf, + .usb_quirks = rtl8xxxu_gen2_usb_quirks, .set_tx_power = rtl8723b_set_tx_power, .update_rate_mask = rtl8723bu_update_rate_mask, .report_connect = rtl8723bu_report_connect, @@ -9780,6 +9793,7 @@ static struct rtl8xxxu_fileops rtl8192cu_fops = { .parse_rx_desc = rtl8xxxu_parse_rxdesc16, .enable_rf = rtl8723a_enable_rf, .disable_rf = rtl8723a_disable_rf, + .usb_quirks = rtl8xxxu_gen1_usb_quirks, .set_tx_power = rtl8723a_set_tx_power, .update_rate_mask = rtl8723au_update_rate_mask, .report_connect = rtl8723au_report_connect, @@ -9813,6 +9827,7 @@ static struct rtl8xxxu_fileops rtl8192eu_fops = { .parse_rx_desc = rtl8xxxu_parse_rxdesc24, .enable_rf = rtl8723b_enable_rf, .disable_rf = rtl8723b_disable_rf, + .usb_quirks = rtl8xxxu_gen2_usb_quirks, .set_tx_power = rtl8192e_set_tx_power, .update_rate_mask = rtl8723bu_update_rate_mask, .report_connect = rtl8723bu_report_connect, diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h index 8064b264ad0b..da86f3f528b4 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h @@ -1293,6 +1293,7 @@ struct rtl8xxxu_fileops { void (*init_statistics) (struct rtl8xxxu_priv *priv); void (*enable_rf) (struct rtl8xxxu_priv *priv); void (*disable_rf) (struct rtl8xxxu_priv *priv); + void (*usb_quirks) (struct rtl8xxxu_priv *priv); void (*set_tx_power) (struct rtl8xxxu_priv *priv, int channel, bool ht40); void (*update_rate_mask) (struct rtl8xxxu_priv *priv, From 31133da702e4229acbd813350660f3053daa57c6 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Thu, 14 Apr 2016 14:59:05 -0400 Subject: [PATCH 0727/1649] rtl8xxxu: Remove unneeded 8192eu hack This removes an unneeded hack for 8192eu, and allows for initializing REG_FPGA0_XAB_RF_SW_CTRL at the same point as it is done for all other parts. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 821be8725256..5c56a4fc088b 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -7680,15 +7680,12 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw) /* RFSW Control - clear bit 14 ?? */ if (priv->rtl_chip != RTL8723B && priv->rtl_chip != RTL8192E) rtl8xxxu_write32(priv, REG_FPGA0_TX_INFO, 0x00000003); - /* 0x07000760 */ - if (priv->rtl_chip == RTL8192E) { - val32 = 0; - } else { - val32 = FPGA0_RF_TRSW | FPGA0_RF_TRSWB | FPGA0_RF_ANTSW | - FPGA0_RF_ANTSWB | FPGA0_RF_PAPE | - ((FPGA0_RF_ANTSW | FPGA0_RF_ANTSWB | FPGA0_RF_PAPE) << - FPGA0_RF_BD_CTRL_SHIFT); - } + + val32 = FPGA0_RF_TRSW | FPGA0_RF_TRSWB | FPGA0_RF_ANTSW | + FPGA0_RF_ANTSWB | FPGA0_RF_PAPE | + ((FPGA0_RF_ANTSW | FPGA0_RF_ANTSWB | FPGA0_RF_PAPE) << + FPGA0_RF_BD_CTRL_SHIFT); + rtl8xxxu_write32(priv, REG_FPGA0_XAB_RF_SW_CTRL, val32); /* 0x860[6:5]= 00 - why? - this sets antenna B */ if (priv->rtl_chip != RTL8192E) From 46b378318de9e8e9d3b479e6adb2cec55c01c2ef Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Thu, 14 Apr 2016 14:59:06 -0400 Subject: [PATCH 0728/1649] rtl8xxxu: 8192eu Fix bug in LDPC RX hang fix Write the adjusted value back to the correct register Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 5c56a4fc088b..6febda5d0c75 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -7967,7 +7967,7 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw) rtl8xxxu_write8(priv, REG_8192E_LDOV12_CTRL, 0x75); val32 &= 0xfff00fff; val32 |= 0x0007e000; - rtl8xxxu_write32(priv, REG_8192E_LDOV12_CTRL, val32); + rtl8xxxu_write32(priv, REG_AFE_MISC, val32); } exit: return ret; From a39b683966559902dab4ea7d24b3223102d6971f Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Thu, 14 Apr 2016 14:59:07 -0400 Subject: [PATCH 0729/1649] Re-enable 8192eu support Revert "rtl8xxxu: Temporarily disable 8192eu device init" This reverts commit ccfe1e85322090649d2fae599e55300c1512bf15. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 6febda5d0c75..39a033c02871 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -3137,10 +3137,6 @@ static int rtl8192eu_parse_efuse(struct rtl8xxxu_priv *priv) raw[i + 6], raw[i + 7]); } } - /* - * Temporarily disable 8192eu support - */ - return -EINVAL; return 0; } From e1d70c9b04000ee122c450627ed0614c676339a5 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Thu, 14 Apr 2016 16:37:06 -0400 Subject: [PATCH 0730/1649] rtl8xxxu: Mark 0x050d:0x1004 as tested This dongle was tested successfully by Andrea Merello Reported-by: Andrea Merello Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 39a033c02871..037b64f7bea4 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -9560,6 +9560,10 @@ static int rtl8xxxu_probe(struct usb_interface *interface, if (id->idProduct == 0x7811) untested = 0; break; + case 0x050d: + if (id->idProduct == 0x1004) + untested = 0; + break; default: break; } @@ -9864,6 +9868,9 @@ static struct usb_device_id dev_table[] = { /* Tested by Larry Finger */ {USB_DEVICE_AND_INTERFACE_INFO(0x7392, 0x7811, 0xff, 0xff, 0xff), .driver_info = (unsigned long)&rtl8192cu_fops}, +/* Tested by Andrea Merello */ +{USB_DEVICE_AND_INTERFACE_INFO(0x050d, 0x1004, 0xff, 0xff, 0xff), + .driver_info = (unsigned long)&rtl8192cu_fops}, /* Currently untested 8188 series devices */ {USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x8191, 0xff, 0xff, 0xff), .driver_info = (unsigned long)&rtl8192cu_fops}, @@ -9948,8 +9955,6 @@ static struct usb_device_id dev_table[] = { /* Currently untested 8192 series devices */ {USB_DEVICE_AND_INTERFACE_INFO(0x04bb, 0x0950, 0xff, 0xff, 0xff), .driver_info = (unsigned long)&rtl8192cu_fops}, -{USB_DEVICE_AND_INTERFACE_INFO(0x050d, 0x1004, 0xff, 0xff, 0xff), - .driver_info = (unsigned long)&rtl8192cu_fops}, {USB_DEVICE_AND_INTERFACE_INFO(0x050d, 0x2102, 0xff, 0xff, 0xff), .driver_info = (unsigned long)&rtl8192cu_fops}, {USB_DEVICE_AND_INTERFACE_INFO(0x050d, 0x2103, 0xff, 0xff, 0xff), From 37ba4b6265ea0aa6c3369e4cf736ecbac31c40c9 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Thu, 14 Apr 2016 16:37:07 -0400 Subject: [PATCH 0731/1649] rtl8xxxu: fix uninitialized return value in ret several functions are not initializing a return status in ret resulting in garbage to be returned instead of 0 for success. Currently, the calls to these functions are not checking the return, however, it seems prudent to return the correct status in case they are to be checked at a later date. Signed-off-by: Colin Ian King Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 037b64f7bea4..b88cf1631489 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -6370,7 +6370,7 @@ static void rtl8xxxu_set_ampdu_min_space(struct rtl8xxxu_priv *priv, u8 density) static int rtl8xxxu_active_to_emu(struct rtl8xxxu_priv *priv) { u8 val8; - int count, ret; + int count, ret = 0; /* Start of rtl8723AU_card_enable_flow */ /* Act to Cardemu sequence*/ @@ -6420,7 +6420,7 @@ static int rtl8723bu_active_to_emu(struct rtl8xxxu_priv *priv) u8 val8; u16 val16; u32 val32; - int count, ret; + int count, ret = 0; /* Turn off RF */ rtl8xxxu_write8(priv, REG_RF_CTRL, 0); @@ -6477,7 +6477,7 @@ static int rtl8xxxu_active_to_lps(struct rtl8xxxu_priv *priv) { u8 val8; u8 val32; - int count, ret; + int count, ret = 0; rtl8xxxu_write8(priv, REG_TXPAUSE, 0xff); From 4062b8ffec36df80d808d3bc3352e0f9c9abd3de Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Thu, 14 Apr 2016 16:37:08 -0400 Subject: [PATCH 0732/1649] rtl8xxxu: Move PHY RF init into device specific functions Load the RF table in init_phy_rf(), which allows for applying device specific RF hacks in the same place. Getting rid of more ugly if () clutter. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- .../net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 133 +++++++++++------- .../net/wireless/realtek/rtl8xxxu/rtl8xxxu.h | 1 + 2 files changed, 80 insertions(+), 54 deletions(-) diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index b88cf1631489..50ca3eb875bd 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -4015,6 +4015,80 @@ static int rtl8xxxu_init_phy_rf(struct rtl8xxxu_priv *priv, return 0; } +static int rtl8723au_init_phy_rf(struct rtl8xxxu_priv *priv) +{ + int ret; + + ret = rtl8xxxu_init_phy_rf(priv, rtl8723au_radioa_1t_init_table, RF_A); + + /* Reduce 80M spur */ + rtl8xxxu_write32(priv, REG_AFE_XTAL_CTRL, 0x0381808d); + rtl8xxxu_write32(priv, REG_AFE_PLL_CTRL, 0xf0ffff83); + rtl8xxxu_write32(priv, REG_AFE_PLL_CTRL, 0xf0ffff82); + rtl8xxxu_write32(priv, REG_AFE_PLL_CTRL, 0xf0ffff83); + + return ret; +} + +static int rtl8723bu_init_phy_rf(struct rtl8xxxu_priv *priv) +{ + int ret; + + ret = rtl8xxxu_init_phy_rf(priv, rtl8723bu_radioa_1t_init_table, RF_A); + /* + * PHY LCK + */ + rtl8xxxu_write_rfreg(priv, RF_A, 0xb0, 0xdfbe0); + rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_MODE_AG, 0x8c01); + msleep(200); + rtl8xxxu_write_rfreg(priv, RF_A, 0xb0, 0xdffe0); + + return ret; +} + +#ifdef CONFIG_RTL8XXXU_UNTESTED +static int rtl8192cu_init_phy_rf(struct rtl8xxxu_priv *priv) +{ + struct rtl8xxxu_rfregval *rftable; + int ret; + + if (priv->rtl_chip == RTL8188C) { + if (priv->hi_pa) + rftable = rtl8188ru_radioa_1t_highpa_table; + else + rftable = rtl8192cu_radioa_1t_init_table; + ret = rtl8xxxu_init_phy_rf(priv, rftable, RF_A); + } else if (priv->rf_paths == 1) { + rftable = rtl8192cu_radioa_1t_init_table; + ret = rtl8xxxu_init_phy_rf(priv, rftable, RF_A); + } else { + rftable = rtl8192cu_radioa_2t_init_table; + ret = rtl8xxxu_init_phy_rf(priv, rftable, RF_A); + if (ret) + goto exit; + rftable = rtl8192cu_radiob_2t_init_table; + ret = rtl8xxxu_init_phy_rf(priv, rftable, RF_B); + } + +exit: + return ret; +} +#endif + +static int rtl8192eu_init_phy_rf(struct rtl8xxxu_priv *priv) +{ + int ret; + + ret = rtl8xxxu_init_phy_rf(priv, rtl8192eu_radioa_init_table, RF_A); + if (ret) + goto exit; + + ret = rtl8xxxu_init_phy_rf(priv, rtl8192eu_radiob_init_table, RF_B); + +exit: + return ret; +} + static int rtl8xxxu_llt_write(struct rtl8xxxu_priv *priv, u8 address, u8 data) { int ret = -EBUSY; @@ -7552,7 +7626,6 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw) { struct rtl8xxxu_priv *priv = hw->priv; struct device *dev = &priv->udev->dev; - struct rtl8xxxu_rfregval *rftable; bool macpower; int ret; u8 val8; @@ -7617,59 +7690,7 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw) if (ret) goto exit; - switch(priv->rtl_chip) { - case RTL8723A: - rftable = rtl8723au_radioa_1t_init_table; - ret = rtl8xxxu_init_phy_rf(priv, rftable, RF_A); - - /* Reduce 80M spur */ - rtl8xxxu_write32(priv, REG_AFE_XTAL_CTRL, 0x0381808d); - rtl8xxxu_write32(priv, REG_AFE_PLL_CTRL, 0xf0ffff83); - rtl8xxxu_write32(priv, REG_AFE_PLL_CTRL, 0xf0ffff82); - rtl8xxxu_write32(priv, REG_AFE_PLL_CTRL, 0xf0ffff83); - break; - case RTL8723B: - rftable = rtl8723bu_radioa_1t_init_table; - ret = rtl8xxxu_init_phy_rf(priv, rftable, RF_A); - /* - * PHY LCK - */ - rtl8xxxu_write_rfreg(priv, RF_A, 0xb0, 0xdfbe0); - rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_MODE_AG, 0x8c01); - msleep(200); - rtl8xxxu_write_rfreg(priv, RF_A, 0xb0, 0xdffe0); - break; - case RTL8188C: - if (priv->hi_pa) - rftable = rtl8188ru_radioa_1t_highpa_table; - else - rftable = rtl8192cu_radioa_1t_init_table; - ret = rtl8xxxu_init_phy_rf(priv, rftable, RF_A); - break; - case RTL8191C: - rftable = rtl8192cu_radioa_1t_init_table; - ret = rtl8xxxu_init_phy_rf(priv, rftable, RF_A); - break; - case RTL8192C: - rftable = rtl8192cu_radioa_2t_init_table; - ret = rtl8xxxu_init_phy_rf(priv, rftable, RF_A); - if (ret) - break; - rftable = rtl8192cu_radiob_2t_init_table; - ret = rtl8xxxu_init_phy_rf(priv, rftable, RF_B); - break; - case RTL8192E: - rftable = rtl8192eu_radioa_init_table; - ret = rtl8xxxu_init_phy_rf(priv, rftable, RF_A); - if (ret) - break; - rftable = rtl8192eu_radiob_init_table; - ret = rtl8xxxu_init_phy_rf(priv, rftable, RF_B); - break; - default: - ret = -EINVAL; - } - + ret = priv->fops->init_phy_rf(priv); if (ret) goto exit; @@ -9715,6 +9736,7 @@ static struct rtl8xxxu_fileops rtl8723au_fops = { .reset_8051 = rtl8xxxu_reset_8051, .llt_init = rtl8xxxu_init_llt_table, .init_phy_bb = rtl8723au_init_phy_bb, + .init_phy_rf = rtl8723au_init_phy_rf, .phy_iq_calibrate = rtl8723au_phy_iq_calibrate, .config_channel = rtl8723au_config_channel, .parse_rx_desc = rtl8xxxu_parse_rxdesc16, @@ -9747,6 +9769,7 @@ static struct rtl8xxxu_fileops rtl8723bu_fops = { .reset_8051 = rtl8723bu_reset_8051, .llt_init = rtl8xxxu_auto_llt_table, .init_phy_bb = rtl8723bu_init_phy_bb, + .init_phy_rf = rtl8723bu_init_phy_rf, .phy_init_antenna_selection = rtl8723bu_phy_init_antenna_selection, .phy_iq_calibrate = rtl8723bu_phy_iq_calibrate, .config_channel = rtl8723bu_config_channel, @@ -9785,6 +9808,7 @@ static struct rtl8xxxu_fileops rtl8192cu_fops = { .reset_8051 = rtl8xxxu_reset_8051, .llt_init = rtl8xxxu_init_llt_table, .init_phy_bb = rtl8723au_init_phy_bb, + .init_phy_rf = rtl8192cu_init_phy_rf, .phy_iq_calibrate = rtl8723au_phy_iq_calibrate, .config_channel = rtl8723au_config_channel, .parse_rx_desc = rtl8xxxu_parse_rxdesc16, @@ -9819,6 +9843,7 @@ static struct rtl8xxxu_fileops rtl8192eu_fops = { .reset_8051 = rtl8xxxu_reset_8051, .llt_init = rtl8xxxu_auto_llt_table, .init_phy_bb = rtl8192eu_init_phy_bb, + .init_phy_rf = rtl8192eu_init_phy_rf, .phy_iq_calibrate = rtl8192eu_phy_iq_calibrate, .config_channel = rtl8723bu_config_channel, .parse_rx_desc = rtl8xxxu_parse_rxdesc24, diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h index da86f3f528b4..3efbb6042ca2 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h @@ -1284,6 +1284,7 @@ struct rtl8xxxu_fileops { void (*reset_8051) (struct rtl8xxxu_priv *priv); int (*llt_init) (struct rtl8xxxu_priv *priv, u8 last_tx_page); void (*init_phy_bb) (struct rtl8xxxu_priv *priv); + int (*init_phy_rf) (struct rtl8xxxu_priv *priv); void (*phy_init_antenna_selection) (struct rtl8xxxu_priv *priv); void (*phy_iq_calibrate) (struct rtl8xxxu_priv *priv); void (*config_channel) (struct ieee80211_hw *hw); From b591e982bc44baf79f6ebb963eb47fe5e223e8e7 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Thu, 14 Apr 2016 16:37:09 -0400 Subject: [PATCH 0733/1649] rtl8xxxu: For devices with external PA (8188RU), limit CCK TX power Per the vendor driver, devices with an external PA needs limiting it's TX power to 0x20. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 50ca3eb875bd..578d1bf2c821 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -2421,6 +2421,13 @@ rtl8723a_set_tx_power(struct rtl8xxxu_priv *priv, int channel, bool ht40) cck[0] = priv->cck_tx_power_index_A[group]; cck[1] = priv->cck_tx_power_index_B[group]; + if (priv->hi_pa) { + if (cck[0] > 0x20) + cck[0] = 0x20; + if (cck[1] > 0x20) + cck[1] = 0x20; + } + ofdm[0] = priv->ht40_1s_tx_power_index_A[group]; ofdm[1] = priv->ht40_1s_tx_power_index_B[group]; From 78a8421959af5e13dd96493e3b74d8d7c7406609 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Thu, 14 Apr 2016 16:37:10 -0400 Subject: [PATCH 0734/1649] rtl8xxxu: Apply 8188RU workaround for UMC B cut parts correctly This patch was being missed since rtl_chip will never match RTL8188C if hi_pa is true. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 578d1bf2c821..bebe484c1dd6 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -3756,7 +3756,7 @@ static void rtl8723au_init_phy_bb(struct rtl8xxxu_priv *priv) else rtl8xxxu_init_phy_regs(priv, rtl8723a_phy_1t_init_table); - if (priv->rtl_chip == RTL8188C && priv->hi_pa && + if (priv->rtl_chip == RTL8188R && priv->hi_pa && priv->vendor_umc && priv->chip_cut == 1) rtl8xxxu_write8(priv, REG_OFDM0_AGC_PARM1 + 2, 0x50); From 8d95c8084f5621240006f730986a3346b7794863 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Thu, 14 Apr 2016 16:37:11 -0400 Subject: [PATCH 0735/1649] rtl8xxxu: Use rtl_chip == RTL8188R to identify high PA parts This is simpler than checking for RTL8188C && hi_pa. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index bebe484c1dd6..8bf9a74bec63 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -3054,6 +3054,7 @@ static int rtl8192cu_parse_efuse(struct rtl8xxxu_priv *priv) if (efuse->rf_regulatory & 0x20) { sprintf(priv->chip_name, "8188RU"); + priv->rtl_chip = RTL8188R; priv->hi_pa = 1; } @@ -4059,11 +4060,8 @@ static int rtl8192cu_init_phy_rf(struct rtl8xxxu_priv *priv) struct rtl8xxxu_rfregval *rftable; int ret; - if (priv->rtl_chip == RTL8188C) { - if (priv->hi_pa) - rftable = rtl8188ru_radioa_1t_highpa_table; - else - rftable = rtl8192cu_radioa_1t_init_table; + if (priv->rtl_chip == RTL8188R) { + rftable = rtl8188ru_radioa_1t_highpa_table; ret = rtl8xxxu_init_phy_rf(priv, rftable, RF_A); } else if (priv->rf_paths == 1) { rftable = rtl8192cu_radioa_1t_init_table; @@ -7219,7 +7217,7 @@ static int rtl8192cu_power_on(struct rtl8xxxu_priv *priv) /* * Workaround for 8188RU LNA power leakage problem. */ - if (priv->rtl_chip == RTL8188C && priv->hi_pa) { + if (priv->rtl_chip == RTL8188R) { val32 = rtl8xxxu_read32(priv, REG_FPGA0_XCD_RF_PARM); val32 &= ~BIT(1); rtl8xxxu_write32(priv, REG_FPGA0_XCD_RF_PARM, val32); @@ -7323,7 +7321,7 @@ static void rtl8xxxu_power_off(struct rtl8xxxu_priv *priv) /* * Workaround for 8188RU LNA power leakage problem. */ - if (priv->rtl_chip == RTL8188C && priv->hi_pa) { + if (priv->rtl_chip == RTL8188R) { val32 = rtl8xxxu_read32(priv, REG_FPGA0_XCD_RF_PARM); val32 |= BIT(1); rtl8xxxu_write32(priv, REG_FPGA0_XCD_RF_PARM, val32); From 8e25496090431553ce2200f53bbe194b9e0a19d2 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Thu, 14 Apr 2016 16:37:12 -0400 Subject: [PATCH 0736/1649] rtl8xxxu: Match 8723bu power down sequence to vendor driver In particular set APS_FSMCO_WLON_RESET in the right register, and do not overwrite too much of REG_CR. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 8bf9a74bec63..5685fd744fbc 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -6510,9 +6510,9 @@ static int rtl8723bu_active_to_emu(struct rtl8xxxu_priv *priv) rtl8xxxu_write16(priv, REG_GPIO_INTM, val16); /* Release WLON reset 0x04[16]= 1*/ - val32 = rtl8xxxu_read32(priv, REG_GPIO_INTM); + val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO); val32 |= APS_FSMCO_WLON_RESET; - rtl8xxxu_write32(priv, REG_GPIO_INTM, val32); + rtl8xxxu_write32(priv, REG_APS_FSMCO, val32); /* 0x0005[1] = 1 turn off MAC by HW state machine*/ val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1); @@ -7376,7 +7376,7 @@ static void rtl8723bu_power_off(struct rtl8xxxu_priv *priv) val8 &= ~TX_REPORT_CTRL_TIMER_ENABLE; rtl8xxxu_write8(priv, REG_TX_REPORT_CTRL, val8); - rtl8xxxu_write16(priv, REG_CR, 0x0000); + rtl8xxxu_write8(priv, REG_CR, 0x0000); rtl8xxxu_active_to_lps(priv); @@ -7393,7 +7393,15 @@ static void rtl8723bu_power_off(struct rtl8xxxu_priv *priv) rtl8xxxu_write8(priv, REG_MCU_FW_DL, 0x00); rtl8723bu_active_to_emu(priv); - rtl8xxxu_emu_to_disabled(priv); + + val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1); + val8 |= BIT(3); /* APS_FSMCO_HW_SUSPEND */ + rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8); + + /* 0x48[16] = 1 to enable GPIO9 as EXT wakeup */ + val8 = rtl8xxxu_read8(priv, REG_GPIO_INTM + 2); + val8 |= BIT(0); + rtl8xxxu_write8(priv, REG_GPIO_INTM + 2, val8); } #ifdef NEED_PS_TDMA From 8cae2f1da87c82b2a0031f4d19c142e7bc22f1d7 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Thu, 14 Apr 2016 16:37:13 -0400 Subject: [PATCH 0737/1649] rtl8xxxu: Unregister from mac80211 before shutting down the device This fixes a long standing bug where mac80211 would send disconnect packets to the device, after we had shut down the device. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 5685fd744fbc..2b71a2baaf9b 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -8003,13 +8003,6 @@ exit: return ret; } -static void rtl8xxxu_disable_device(struct ieee80211_hw *hw) -{ - struct rtl8xxxu_priv *priv = hw->priv; - - priv->fops->power_off(priv); -} - static void rtl8xxxu_cam_write(struct rtl8xxxu_priv *priv, struct ieee80211_key_conf *key, const u8 *mac) { @@ -9726,13 +9719,14 @@ static void rtl8xxxu_disconnect(struct usb_interface *interface) hw = usb_get_intfdata(interface); priv = hw->priv; - rtl8xxxu_disable_device(hw); + ieee80211_unregister_hw(hw); + + priv->fops->power_off(priv); + usb_set_intfdata(interface, NULL); dev_info(&priv->udev->dev, "disconnecting\n"); - ieee80211_unregister_hw(hw); - kfree(priv->fw_data); mutex_destroy(&priv->usb_buf_mutex); mutex_destroy(&priv->h2c_mutex); From eb18806261da3de958b5569fb6d93ed0d773028b Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Thu, 14 Apr 2016 16:37:14 -0400 Subject: [PATCH 0738/1649] rtl8xxxu: Update copyright statement to include 2016 Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 2 +- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h | 2 +- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 2b71a2baaf9b..de62f0ba11d7 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -1,7 +1,7 @@ /* * RTL8XXXU mac80211 USB driver * - * Copyright (c) 2014 - 2015 Jes Sorensen + * Copyright (c) 2014 - 2016 Jes Sorensen * * Portions, notably calibration code: * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved. diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h index 3efbb6042ca2..a1b076cdab6f 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014 - 2015 Jes Sorensen + * Copyright (c) 2014 - 2016 Jes Sorensen * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h index e7709a5bcb3a..b0e0c642302c 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014 - 2015 Jes Sorensen + * Copyright (c) 2014 - 2016 Jes Sorensen * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as From b9f9d6992f8338e919c977e4e18473f57dcb874d Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Thu, 14 Apr 2016 16:37:15 -0400 Subject: [PATCH 0739/1649] rtl8xxxu: Set register 0xfe10 on rtl8192cu based parts This register is undocumented in the vendor code, but it is set unconditionally for all 8192cu/8188cu/8188ru parts. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index de62f0ba11d7..6280d3d0cec7 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -7214,6 +7214,8 @@ static int rtl8192cu_power_on(struct rtl8xxxu_priv *priv) CR_SCHEDULE_ENABLE | CR_MAC_TX_ENABLE | CR_MAC_RX_ENABLE; rtl8xxxu_write16(priv, REG_CR, val16); + rtl8xxxu_write8(priv, 0xfe10, 0x19); + /* * Workaround for 8188RU LNA power leakage problem. */ From 2fc0b8e5a17dc415508d918eeeb1aa47443ae642 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Thu, 14 Apr 2016 16:37:16 -0400 Subject: [PATCH 0740/1649] rtl8xxxu: Add TX power base values for gen1 parts Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- .../net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 118 +++++++++++++++--- .../net/wireless/realtek/rtl8xxxu/rtl8xxxu.h | 26 ++++ 2 files changed, 130 insertions(+), 14 deletions(-) diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 6280d3d0cec7..422e7fa6756d 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -214,6 +214,72 @@ static struct rtl8xxxu_reg8val rtl8192e_mac_init_table[] = { {0xffff, 0xff}, }; +static struct rtl8xxxu_power_base rtl8188r_power_base = { + .reg_0e00 = 0x06080808, + .reg_0e04 = 0x00040406, + .reg_0e08 = 0x00000000, + .reg_086c = 0x00000000, + + .reg_0e10 = 0x04060608, + .reg_0e14 = 0x00020204, + .reg_0e18 = 0x04060608, + .reg_0e1c = 0x00020204, + + .reg_0830 = 0x06080808, + .reg_0834 = 0x00040406, + .reg_0838 = 0x00000000, + .reg_086c_2 = 0x00000000, + + .reg_083c = 0x04060608, + .reg_0848 = 0x00020204, + .reg_084c = 0x04060608, + .reg_0868 = 0x00020204, +}; + +static struct rtl8xxxu_power_base rtl8192c_power_base = { + .reg_0e00 = 0x07090c0c, + .reg_0e04 = 0x01020405, + .reg_0e08 = 0x00000000, + .reg_086c = 0x00000000, + + .reg_0e10 = 0x0b0c0c0e, + .reg_0e14 = 0x01030506, + .reg_0e18 = 0x0b0c0d0e, + .reg_0e1c = 0x01030509, + + .reg_0830 = 0x07090c0c, + .reg_0834 = 0x01020405, + .reg_0838 = 0x00000000, + .reg_086c_2 = 0x00000000, + + .reg_083c = 0x0b0c0d0e, + .reg_0848 = 0x01030509, + .reg_084c = 0x0b0c0d0e, + .reg_0868 = 0x01030509, +}; + +static struct rtl8xxxu_power_base rtl8723a_power_base = { + .reg_0e00 = 0x0a0c0c0c, + .reg_0e04 = 0x02040608, + .reg_0e08 = 0x00000000, + .reg_086c = 0x00000000, + + .reg_0e10 = 0x0a0c0d0e, + .reg_0e14 = 0x02040608, + .reg_0e18 = 0x0a0c0d0e, + .reg_0e1c = 0x02040608, + + .reg_0830 = 0x0a0c0c0c, + .reg_0834 = 0x02040608, + .reg_0838 = 0x00000000, + .reg_086c_2 = 0x00000000, + + .reg_083c = 0x0a0c0d0e, + .reg_0848 = 0x02040608, + .reg_084c = 0x0a0c0d0e, + .reg_0868 = 0x02040608, +}; + static struct rtl8xxxu_reg32val rtl8723a_phy_1t_init_table[] = { {0x800, 0x80040000}, {0x804, 0x00000003}, {0x808, 0x0000fc00}, {0x80c, 0x0000000a}, @@ -2410,6 +2476,7 @@ static void rtl8723bu_config_channel(struct ieee80211_hw *hw) static void rtl8723a_set_tx_power(struct rtl8xxxu_priv *priv, int channel, bool ht40) { + struct rtl8xxxu_power_base *power_base = priv->power_base; u8 cck[RTL8723A_MAX_RF_PATHS], ofdm[RTL8723A_MAX_RF_PATHS]; u8 ofdmbase[RTL8723A_MAX_RF_PATHS], mcsbase[RTL8723A_MAX_RF_PATHS]; u32 val32, ofdm_a, ofdm_b, mcs_a, mcs_b; @@ -2418,8 +2485,8 @@ rtl8723a_set_tx_power(struct rtl8xxxu_priv *priv, int channel, bool ht40) group = rtl8723a_channel_to_group(channel); - cck[0] = priv->cck_tx_power_index_A[group]; - cck[1] = priv->cck_tx_power_index_B[group]; + cck[0] = priv->cck_tx_power_index_A[group] - 1; + cck[1] = priv->cck_tx_power_index_B[group] - 1; if (priv->hi_pa) { if (cck[0] > 0x20) @@ -2430,6 +2497,10 @@ rtl8723a_set_tx_power(struct rtl8xxxu_priv *priv, int channel, bool ht40) ofdm[0] = priv->ht40_1s_tx_power_index_A[group]; ofdm[1] = priv->ht40_1s_tx_power_index_B[group]; + if (ofdm[0]) + ofdm[0] -= 1; + if (ofdm[1]) + ofdm[1] -= 1; ofdmbase[0] = ofdm[0] + priv->ofdm_tx_power_index_diff[group].a; ofdmbase[1] = ofdm[1] + priv->ofdm_tx_power_index_diff[group].b; @@ -2485,27 +2556,39 @@ rtl8723a_set_tx_power(struct rtl8xxxu_priv *priv, int channel, bool ht40) ofdmbase[0] << 16 | ofdmbase[0] << 24; ofdm_b = ofdmbase[1] | ofdmbase[1] << 8 | ofdmbase[1] << 16 | ofdmbase[1] << 24; - rtl8xxxu_write32(priv, REG_TX_AGC_A_RATE18_06, ofdm_a); - rtl8xxxu_write32(priv, REG_TX_AGC_B_RATE18_06, ofdm_b); - rtl8xxxu_write32(priv, REG_TX_AGC_A_RATE54_24, ofdm_a); - rtl8xxxu_write32(priv, REG_TX_AGC_B_RATE54_24, ofdm_b); + rtl8xxxu_write32(priv, REG_TX_AGC_A_RATE18_06, + ofdm_a + power_base->reg_0e00); + rtl8xxxu_write32(priv, REG_TX_AGC_B_RATE18_06, + ofdm_b + power_base->reg_0830); + + rtl8xxxu_write32(priv, REG_TX_AGC_A_RATE54_24, + ofdm_a + power_base->reg_0e04); + rtl8xxxu_write32(priv, REG_TX_AGC_B_RATE54_24, + ofdm_b + power_base->reg_0834); mcs_a = mcsbase[0] | mcsbase[0] << 8 | mcsbase[0] << 16 | mcsbase[0] << 24; mcs_b = mcsbase[1] | mcsbase[1] << 8 | mcsbase[1] << 16 | mcsbase[1] << 24; - rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS03_MCS00, mcs_a); - rtl8xxxu_write32(priv, REG_TX_AGC_B_MCS03_MCS00, mcs_b); + rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS03_MCS00, + mcs_a + power_base->reg_0e10); + rtl8xxxu_write32(priv, REG_TX_AGC_B_MCS03_MCS00, + mcs_b + power_base->reg_083c); - rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS07_MCS04, mcs_a); - rtl8xxxu_write32(priv, REG_TX_AGC_B_MCS07_MCS04, mcs_b); + rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS07_MCS04, + mcs_a + power_base->reg_0e14); + rtl8xxxu_write32(priv, REG_TX_AGC_B_MCS07_MCS04, + mcs_b + power_base->reg_0848); - rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS11_MCS08, mcs_a); - rtl8xxxu_write32(priv, REG_TX_AGC_B_MCS11_MCS08, mcs_b); + rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS11_MCS08, + mcs_a + power_base->reg_0e18); + rtl8xxxu_write32(priv, REG_TX_AGC_B_MCS11_MCS08, + mcs_b + power_base->reg_084c); - rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS15_MCS12, mcs_a); + rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS15_MCS12, + mcs_a + power_base->reg_0e1c); for (i = 0; i < 3; i++) { if (i != 2) val8 = (mcsbase[0] > 8) ? (mcsbase[0] - 8) : 0; @@ -2513,7 +2596,8 @@ rtl8723a_set_tx_power(struct rtl8xxxu_priv *priv, int channel, bool ht40) val8 = (mcsbase[0] > 6) ? (mcsbase[0] - 6) : 0; rtl8xxxu_write8(priv, REG_OFDM0_XC_TX_IQ_IMBALANCE + i, val8); } - rtl8xxxu_write32(priv, REG_TX_AGC_B_MCS15_MCS12, mcs_b); + rtl8xxxu_write32(priv, REG_TX_AGC_B_MCS15_MCS12, + mcs_b + power_base->reg_0868); for (i = 0; i < 3; i++) { if (i != 2) val8 = (mcsbase[1] > 8) ? (mcsbase[1] - 8) : 0; @@ -2920,6 +3004,9 @@ static int rtl8723au_parse_efuse(struct rtl8xxxu_priv *priv) priv->has_xtalk = 1; priv->xtalk = priv->efuse_wifi.efuse8723.xtal_k & 0x3f; } + + priv->power_base = &rtl8723a_power_base; + dev_info(&priv->udev->dev, "Vendor: %.7s\n", efuse->vendor_name); dev_info(&priv->udev->dev, "Product: %.41s\n", @@ -3052,10 +3139,13 @@ static int rtl8192cu_parse_efuse(struct rtl8xxxu_priv *priv) dev_info(&priv->udev->dev, "Product: %.20s\n", efuse->device_name); + priv->power_base = &rtl8192c_power_base; + if (efuse->rf_regulatory & 0x20) { sprintf(priv->chip_name, "8188RU"); priv->rtl_chip = RTL8188R; priv->hi_pa = 1; + priv->power_base = &rtl8188r_power_base; } if (rtl8xxxu_debug & RTL8XXXU_DEBUG_EFUSE) { diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h index a1b076cdab6f..ebd1a6e7b813 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h @@ -629,6 +629,31 @@ struct rtl8xxxu_firmware_header { u8 data[0]; }; +/* + * 8723au/8192cu/8188ru required base power index offset tables. + */ +struct rtl8xxxu_power_base { + u32 reg_0e00; + u32 reg_0e04; + u32 reg_0e08; + u32 reg_086c; + + u32 reg_0e10; + u32 reg_0e14; + u32 reg_0e18; + u32 reg_0e1c; + + u32 reg_0830; + u32 reg_0834; + u32 reg_0838; + u32 reg_086c_2; + + u32 reg_083c; + u32 reg_0848; + u32 reg_084c; + u32 reg_0868; +}; + /* * The 8723au has 3 channel groups: 1-3, 4-9, and 10-14 */ @@ -1201,6 +1226,7 @@ struct rtl8xxxu_priv { struct rtl8723au_idx ofdm_tx_power_diff[RTL8723B_TX_COUNT]; struct rtl8723au_idx ht20_tx_power_diff[RTL8723B_TX_COUNT]; struct rtl8723au_idx ht40_tx_power_diff[RTL8723B_TX_COUNT]; + struct rtl8xxxu_power_base *power_base; u32 chip_cut:4; u32 rom_rev:4; u32 is_multi_func:1; From cabb550e2b97b8ea42124a93d4665ca052f3e3ff Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Thu, 14 Apr 2016 16:37:17 -0400 Subject: [PATCH 0741/1649] rtl8xxxu: Fix 8188RU support The 8188RU does not like PAPE to be enabled, while all the other gen1 parts seem to require it. This makes the RTL8188RU able to associate for me. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- .../net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 21 ++++++++++++------- .../net/wireless/realtek/rtl8xxxu/rtl8xxxu.h | 1 + 2 files changed, 15 insertions(+), 7 deletions(-) diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 422e7fa6756d..cf7832bfdde8 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -3145,6 +3145,7 @@ static int rtl8192cu_parse_efuse(struct rtl8xxxu_priv *priv) sprintf(priv->chip_name, "8188RU"); priv->rtl_chip = RTL8188R; priv->hi_pa = 1; + priv->no_pape = 1; priv->power_base = &rtl8188r_power_base; } @@ -5555,9 +5556,12 @@ static void rtl8xxxu_phy_iqcalibrate(struct rtl8xxxu_priv *priv, rtl8xxxu_write32(priv, REG_OFDM0_TR_MUX_PAR, 0x000800e4); rtl8xxxu_write32(priv, REG_FPGA0_XCD_RF_SW_CTRL, 0x22204000); - val32 = rtl8xxxu_read32(priv, REG_FPGA0_XAB_RF_SW_CTRL); - val32 |= (FPGA0_RF_PAPE | (FPGA0_RF_PAPE << FPGA0_RF_BD_CTRL_SHIFT)); - rtl8xxxu_write32(priv, REG_FPGA0_XAB_RF_SW_CTRL, val32); + if (!priv->no_pape) { + val32 = rtl8xxxu_read32(priv, REG_FPGA0_XAB_RF_SW_CTRL); + val32 |= (FPGA0_RF_PAPE | + (FPGA0_RF_PAPE << FPGA0_RF_BD_CTRL_SHIFT)); + rtl8xxxu_write32(priv, REG_FPGA0_XAB_RF_SW_CTRL, val32); + } val32 = rtl8xxxu_read32(priv, REG_FPGA0_XA_RF_INT_OE); val32 &= ~BIT(10); @@ -7804,11 +7808,14 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw) rtl8xxxu_write32(priv, REG_FPGA0_TX_INFO, 0x00000003); val32 = FPGA0_RF_TRSW | FPGA0_RF_TRSWB | FPGA0_RF_ANTSW | - FPGA0_RF_ANTSWB | FPGA0_RF_PAPE | - ((FPGA0_RF_ANTSW | FPGA0_RF_ANTSWB | FPGA0_RF_PAPE) << - FPGA0_RF_BD_CTRL_SHIFT); - + FPGA0_RF_ANTSWB | + ((FPGA0_RF_ANTSW | FPGA0_RF_ANTSWB) << FPGA0_RF_BD_CTRL_SHIFT); + if (!priv->no_pape) { + val32 |= (FPGA0_RF_PAPE | + (FPGA0_RF_PAPE << FPGA0_RF_BD_CTRL_SHIFT)); + } rtl8xxxu_write32(priv, REG_FPGA0_XAB_RF_SW_CTRL, val32); + /* 0x860[6:5]= 00 - why? - this sets antenna B */ if (priv->rtl_chip != RTL8192E) rtl8xxxu_write32(priv, REG_FPGA0_XA_RF_INT_OE, 0x66f60210); diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h index ebd1a6e7b813..3e2643c79b56 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h @@ -1287,6 +1287,7 @@ struct rtl8xxxu_priv { u32 bb_recovery_backup[RTL8XXXU_BB_REGS]; enum rtl8xxxu_rtl_chip rtl_chip; u8 pi_enabled:1; + u8 no_pape:1; u8 int_buf[USB_INTR_CONTENT_LENGTH]; }; From 6a62f9d5273dbc8ec776717a73a8a223b9e5f38e Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Thu, 14 Apr 2016 16:37:18 -0400 Subject: [PATCH 0742/1649] rtl8xxxu: Fix OOPS if user tries to add device via /sys This driver relies on driver_info in struct usb_device_id, so allowing adding a device via /sys/bus/usb/drivers/rtl8xxxu/new_id will cause a NULL pointer dereference. Set .no_dynamic_id = 1 to disable hot add of USB IDs. Reported-by: Xose Vazquez Perez Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index cf7832bfdde8..2d92e643fcd6 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -10141,6 +10141,7 @@ static struct usb_driver rtl8xxxu_driver = { .probe = rtl8xxxu_probe, .disconnect = rtl8xxxu_disconnect, .id_table = dev_table, + .no_dynamic_id = 1, .disable_hub_initiated_lpm = 1, }; From ae5c01fd2fd816486169787440bf900d1b703230 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Thu, 14 Apr 2016 16:37:19 -0400 Subject: [PATCH 0743/1649] rtl8xxxu: Implement rtl8192e_enable_rf() This implements an 8192eu specific enable_rf() function. The 8192eu is not a combo device, so no need for doing the BT specific bits needed by the 8723bu. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- .../net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 39 ++++++++++++++++++- 1 file changed, 38 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 2d92e643fcd6..a86b5c40efe4 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -7517,6 +7517,43 @@ static void rtl8723bu_set_ps_tdma(struct rtl8xxxu_priv *priv, } #endif +static void rtl8192e_enable_rf(struct rtl8xxxu_priv *priv) +{ + u32 val32; + u8 val8; + + val8 = rtl8xxxu_read8(priv, REG_GPIO_MUXCFG); + val8 |= BIT(5); + rtl8xxxu_write8(priv, REG_GPIO_MUXCFG, val8); + + /* + * WLAN action by PTA + */ + rtl8xxxu_write8(priv, REG_WLAN_ACT_CONTROL_8723B, 0x04); + + val32 = rtl8xxxu_read32(priv, REG_PWR_DATA); + val32 |= PWR_DATA_EEPRPAD_RFE_CTRL_EN; + rtl8xxxu_write32(priv, REG_PWR_DATA, val32); + + val32 = rtl8xxxu_read32(priv, REG_RFE_BUFFER); + val32 |= (BIT(0) | BIT(1)); + rtl8xxxu_write32(priv, REG_RFE_BUFFER, val32); + + rtl8xxxu_write8(priv, REG_RFE_CTRL_ANTA_SRC, 0x77); + + val32 = rtl8xxxu_read32(priv, REG_LEDCFG0); + val32 &= ~BIT(24); + val32 |= BIT(23); + rtl8xxxu_write32(priv, REG_LEDCFG0, val32); + + /* + * Fix external switch Main->S1, Aux->S0 + */ + val8 = rtl8xxxu_read8(priv, REG_PAD_CTRL1); + val8 &= ~BIT(0); + rtl8xxxu_write8(priv, REG_PAD_CTRL1, val8); +} + static void rtl8723b_enable_rf(struct rtl8xxxu_priv *priv) { struct h2c_cmd h2c; @@ -9953,7 +9990,7 @@ static struct rtl8xxxu_fileops rtl8192eu_fops = { .phy_iq_calibrate = rtl8192eu_phy_iq_calibrate, .config_channel = rtl8723bu_config_channel, .parse_rx_desc = rtl8xxxu_parse_rxdesc24, - .enable_rf = rtl8723b_enable_rf, + .enable_rf = rtl8192e_enable_rf, .disable_rf = rtl8723b_disable_rf, .usb_quirks = rtl8xxxu_gen2_usb_quirks, .set_tx_power = rtl8192e_set_tx_power, From 265697eb2f1b6c472c4b09ff7e008bc8ab145b4f Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Thu, 14 Apr 2016 16:37:20 -0400 Subject: [PATCH 0744/1649] rtl8xxxu: Pause TX before calling disable_rf() All the disable_rf() functions were setting REG_TXPAUSE to 0xff to stop transmission. Do it centrally before calling disable_rf() instead. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index a86b5c40efe4..928ca56f751c 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -2125,8 +2125,6 @@ static void rtl8723a_disable_rf(struct rtl8xxxu_priv *priv) u8 sps0; u32 val32; - rtl8xxxu_write8(priv, REG_TXPAUSE, 0xff); - sps0 = rtl8xxxu_read8(priv, REG_SPS0_CTRL); /* RF RX code for preamble power saving */ @@ -7665,8 +7663,6 @@ static void rtl8723b_disable_rf(struct rtl8xxxu_priv *priv) { u32 val32; - rtl8xxxu_write8(priv, REG_TXPAUSE, 0xff); - val32 = rtl8xxxu_read32(priv, REG_RX_WAIT_CCA); val32 &= ~(BIT(22) | BIT(23)); rtl8xxxu_write32(priv, REG_RX_WAIT_CCA, val32); @@ -9591,6 +9587,8 @@ static void rtl8xxxu_stop(struct ieee80211_hw *hw) if (priv->usb_interrupts) usb_kill_anchored_urbs(&priv->int_anchor); + rtl8xxxu_write8(priv, REG_TXPAUSE, 0xff); + priv->fops->disable_rf(priv); /* From 171a900c4eb7b34667e64ffed316a50425b45581 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Thu, 14 Apr 2016 16:37:21 -0400 Subject: [PATCH 0745/1649] rtl8xxxu: MAINTAINERS: Update to point to the active devel branch Update the MAINTAINERS info to reflect active development of the rtl8xxxu driver. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- MAINTAINERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/MAINTAINERS b/MAINTAINERS index 4851f02281d6..6ac970ba54b8 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -9490,7 +9490,7 @@ F: drivers/net/wireless/realtek/rtlwifi/rtl8192ce/ RTL8XXXU WIRELESS DRIVER (rtl8xxxu) M: Jes Sorensen L: linux-wireless@vger.kernel.org -T: git git://git.kernel.org/pub/scm/linux/kernel/git/jes/linux.git rtl8723au-mac80211 +T: git git://git.kernel.org/pub/scm/linux/kernel/git/jes/linux.git rtl8xxxu-devel S: Maintained F: drivers/net/wireless/realtek/rtl8xxxu/ From 634696b197411e7a95b346d6e5c21841f29fcedd Mon Sep 17 00:00:00 2001 From: Jon Paul Maloy Date: Fri, 15 Apr 2016 13:33:03 -0400 Subject: [PATCH 0746/1649] tipc: guarantee peer bearer id exchange after reboot When a link endpoint is going down locally, e.g., because its interface is being stopped, it will spontaneously send out a RESET message to its peer, informing it about this fact. This saves the peer from detecting the failure via probing, and hence gives both speedier and less resource consuming failure detection on the peer side. According to the link FSM, a receiver of a RESET message, ignoring the reason for it, must now consider the sender ready to come back up, and starts periodically sending out ACTIVATE messages to the peer in order to re-establish the link. Also, according to the FSM, the receiver of an ACTIVATE message can now go directly to state ESTABLISHED and start sending regular traffic packets. This is a well-proven and robust FSM. However, in the case of a reboot, there is a small possibilty that link endpoint on the rebooted node may have been re-created with a new bearer identity between the moment it sent its (pre-boot) RESET and the moment it receives the ACTIVATE from the peer. The new bearer identity cannot be known by the peer according to this scenario, since traffic headers don't convey such information. This is a problem, because both endpoints need to know the correct value of the peer's bearer id at any moment in time in order to be able to produce correct link events for their users. The only way to guarantee this is to enforce a full setup message exchange (RESET + ACTIVATE) even after the reboot, since those messages carry the bearer idientity in their header. In this commit we do this by introducing and setting a "stopping" bit in the header of the spontaneously generated RESET messages, informing the peer that the sender will not be immediately ready to re-establish the link. A receiver seeing this bit must act as if this were a locally detected connectivity failure, and hence has to go through a full two- way setup message exchange before any link can be re-established. Although never reported, this problem seems to have always been around. This protocol addition is fully backwards compatible. Acked-by: Ying Xue Signed-off-by: Jon Maloy Signed-off-by: David S. Miller --- net/tipc/link.c | 10 +++++++++- net/tipc/msg.h | 10 ++++++++++ 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/net/tipc/link.c b/net/tipc/link.c index 7d2bb3e70baa..8b98fafc88a4 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c @@ -1140,11 +1140,17 @@ int tipc_link_build_ack_msg(struct tipc_link *l, struct sk_buff_head *xmitq) void tipc_link_build_reset_msg(struct tipc_link *l, struct sk_buff_head *xmitq) { int mtyp = RESET_MSG; + struct sk_buff *skb; if (l->state == LINK_ESTABLISHING) mtyp = ACTIVATE_MSG; tipc_link_build_proto_msg(l, mtyp, 0, 0, 0, 0, xmitq); + + /* Inform peer that this endpoint is going down if applicable */ + skb = skb_peek_tail(xmitq); + if (skb && (l->state == LINK_RESET)) + msg_set_peer_stopping(buf_msg(skb), 1); } /* tipc_link_build_nack_msg: prepare link nack message for transmission @@ -1411,7 +1417,9 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb, l->priority = peers_prio; /* ACTIVATE_MSG serves as PEER_RESET if link is already down */ - if ((mtyp == RESET_MSG) || !link_is_up(l)) + if (msg_peer_stopping(hdr)) + rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT); + else if ((mtyp == RESET_MSG) || !link_is_up(l)) rc = tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT); /* ACTIVATE_MSG takes up link if it was already locally reset */ diff --git a/net/tipc/msg.h b/net/tipc/msg.h index f34f639df643..58bf51541813 100644 --- a/net/tipc/msg.h +++ b/net/tipc/msg.h @@ -715,6 +715,16 @@ static inline void msg_set_redundant_link(struct tipc_msg *m, u32 r) msg_set_bits(m, 5, 12, 0x1, r); } +static inline u32 msg_peer_stopping(struct tipc_msg *m) +{ + return msg_bits(m, 5, 13, 0x1); +} + +static inline void msg_set_peer_stopping(struct tipc_msg *m, u32 s) +{ + msg_set_bits(m, 5, 13, 0x1, s); +} + static inline char *msg_media_addr(struct tipc_msg *m) { return (char *)&m->hdr[TIPC_MEDIA_INFO_OFFSET]; From 88e8ac7000dc7ccf99975cc4070907e26a1027f9 Mon Sep 17 00:00:00 2001 From: Jon Paul Maloy Date: Fri, 15 Apr 2016 13:33:04 -0400 Subject: [PATCH 0747/1649] tipc: reduce transmission rate of reset messages when link is down When a link is down, it will continuously try to re-establish contact with the peer by sending out a RESET or an ACTIVATE message at each timeout interval. The default value for this interval is currently 375 ms. This is wasteful, and may become a problem in very large clusters with dozens or hundreds of nodes being down simultaneously. We now introduce a simple backoff algorithm for these cases. The first five messages are sent at default rate; thereafter a message is sent only each 16th timer interval. This will cover the vast majority of link recycling cases, since the endpoint starting last will transmit at the higher speed, and the link should normally be established well be before the rate needs to be reduced. The only case where we will see a degradation of link re-establishment times is when the endpoints remain intact, and a glitch in the transmission media is causing the link reset. We will then experience a worst-case re-establishing time of 6 seconds, something we deem acceptable. Acked-by: Ying Xue Signed-off-by: Jon Maloy Signed-off-by: David S. Miller --- net/tipc/link.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/net/tipc/link.c b/net/tipc/link.c index 8b98fafc88a4..238b12526b58 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c @@ -140,6 +140,7 @@ struct tipc_link { char if_name[TIPC_MAX_IF_NAME]; u32 priority; char net_plane; + u16 rst_cnt; /* Failover/synch */ u16 drop_point; @@ -699,8 +700,6 @@ static void link_profile_stats(struct tipc_link *l) l->stats.msg_length_profile[6]++; } -/* tipc_link_timeout - perform periodic task as instructed from node timeout - */ /* tipc_link_timeout - perform periodic task as instructed from node timeout */ int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq) @@ -730,7 +729,8 @@ int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq) l->silent_intv_cnt++; break; case LINK_RESET: - xmit = true; + xmit = l->rst_cnt++ <= 4; + xmit |= !(l->rst_cnt % 16); mtyp = RESET_MSG; break; case LINK_ESTABLISHING: @@ -833,6 +833,7 @@ void tipc_link_reset(struct tipc_link *l) l->rcv_nxt = 1; l->acked = 0; l->silent_intv_cnt = 0; + l->rst_cnt = 0; l->stats.recv_info = 0; l->stale_count = 0; l->bc_peer_is_up = false; From 42b18f605feaf7aa1825b35656bb7d6fdc132b45 Mon Sep 17 00:00:00 2001 From: Jon Paul Maloy Date: Fri, 15 Apr 2016 13:33:05 -0400 Subject: [PATCH 0748/1649] tipc: refactor function tipc_link_timeout() The function tipc_link_timeout() is unnecessary complex, and can easily be made more readable. We do that with this commit. The only functional change is that we remove a redundant test for whether the broadcast link is up or not. Acked-by: Ying Xue Signed-off-by: Jon Maloy Signed-off-by: David S. Miller --- net/tipc/link.c | 36 ++++++++++++++++-------------------- 1 file changed, 16 insertions(+), 20 deletions(-) diff --git a/net/tipc/link.c b/net/tipc/link.c index 238b12526b58..774ad3cd1f1c 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c @@ -704,37 +704,33 @@ static void link_profile_stats(struct tipc_link *l) */ int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq) { - int rc = 0; - int mtyp = STATE_MSG; - bool xmit = false; - bool prb = false; + int mtyp, rc = 0; + bool state = false; + bool probe = false; + bool setup = false; u16 bc_snt = l->bc_sndlink->snd_nxt - 1; u16 bc_acked = l->bc_rcvlink->acked; - bool bc_up = link_is_up(l->bc_rcvlink); link_profile_stats(l); switch (l->state) { case LINK_ESTABLISHED: case LINK_SYNCHING: - if (!l->silent_intv_cnt) { - if (bc_up && (bc_acked != bc_snt)) - xmit = true; - } else if (l->silent_intv_cnt <= l->abort_limit) { - xmit = true; - prb = true; - } else { - rc |= tipc_link_fsm_evt(l, LINK_FAILURE_EVT); - } - l->silent_intv_cnt++; + if (l->silent_intv_cnt > l->abort_limit) + return tipc_link_fsm_evt(l, LINK_FAILURE_EVT); + mtyp = STATE_MSG; + state = bc_acked != bc_snt; + probe = l->silent_intv_cnt; + if (probe) + l->silent_intv_cnt++; break; case LINK_RESET: - xmit = l->rst_cnt++ <= 4; - xmit |= !(l->rst_cnt % 16); + setup = l->rst_cnt++ <= 4; + setup |= !(l->rst_cnt % 16); mtyp = RESET_MSG; break; case LINK_ESTABLISHING: - xmit = true; + setup = true; mtyp = ACTIVATE_MSG; break; case LINK_PEER_RESET: @@ -745,8 +741,8 @@ int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq) break; } - if (xmit) - tipc_link_build_proto_msg(l, mtyp, prb, 0, 0, 0, xmitq); + if (state || probe || setup) + tipc_link_build_proto_msg(l, mtyp, probe, 0, 0, 0, xmitq); return rc; } From de7e07f9ee14f47d05aa43046404c2904f0247dc Mon Sep 17 00:00:00 2001 From: Jon Paul Maloy Date: Fri, 15 Apr 2016 13:33:06 -0400 Subject: [PATCH 0749/1649] tipc: ensure that first packets on link are sent in order In some link establishment scenarios we see that packet #2 may be sent out before packet #1, forcing the receiver to demand retransmission of the missing packet. This is harmless, but may cause confusion among people tracing the packet flow. Since this is extremely easy to fix, we do so by adding en extra send call to the bearer immediately after the link has come up. Acked-by: Ying Xue Signed-off-by: Jon Maloy Signed-off-by: David S. Miller --- net/tipc/node.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/net/tipc/node.c b/net/tipc/node.c index ace178fd3850..b00e12cda66c 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c @@ -581,8 +581,12 @@ static void __tipc_node_link_up(struct tipc_node *n, int bearer_id, static void tipc_node_link_up(struct tipc_node *n, int bearer_id, struct sk_buff_head *xmitq) { + struct tipc_media_addr *maddr; + tipc_node_write_lock(n); __tipc_node_link_up(n, bearer_id, xmitq); + maddr = &n->links[bearer_id].maddr; + tipc_bearer_xmit(n->net, bearer_id, xmitq, maddr); tipc_node_write_unlock(n); } From 34b9cd64c889d41eb990aec33fc185cab706c9b0 Mon Sep 17 00:00:00 2001 From: Jon Paul Maloy Date: Fri, 15 Apr 2016 13:33:07 -0400 Subject: [PATCH 0750/1649] tipc: let first message on link be a state message According to the link FSM, a received traffic packet can take a link from state ESTABLISHING to ESTABLISHED, but the link can still not be fully set up in one atomic operation. This means that even if the the very first packet on the link is a traffic packet with sequence number 1 (one), it has to be dropped and retransmitted. This can be avoided if we let the mentioned packet be preceded by a LINK_PROTOCOL/STATE message, which takes up the endpoint before the arrival of the traffic. We add this small feature in this commit. This is a fully compatible change. Acked-by: Ying Xue Signed-off-by: Jon Maloy Signed-off-by: David S. Miller --- net/tipc/link.c | 6 +++--- net/tipc/link.h | 2 +- net/tipc/node.c | 5 ++++- 3 files changed, 8 insertions(+), 5 deletions(-) diff --git a/net/tipc/link.c b/net/tipc/link.c index 774ad3cd1f1c..2e28a7d7e802 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c @@ -1107,12 +1107,12 @@ static bool tipc_link_release_pkts(struct tipc_link *l, u16 acked) return released; } -/* tipc_link_build_ack_msg: prepare link acknowledge message for transmission +/* tipc_link_build_state_msg: prepare link state message for transmission * * Note that sending of broadcast ack is coordinated among nodes, to reduce * risk of ack storms towards the sender */ -int tipc_link_build_ack_msg(struct tipc_link *l, struct sk_buff_head *xmitq) +int tipc_link_build_state_msg(struct tipc_link *l, struct sk_buff_head *xmitq) { if (!l) return 0; @@ -1222,7 +1222,7 @@ int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb, if (!tipc_data_input(l, skb, l->inputq)) rc |= tipc_link_input(l, skb, l->inputq); if (unlikely(++l->rcv_unacked >= TIPC_MIN_LINK_WIN)) - rc |= tipc_link_build_ack_msg(l, xmitq); + rc |= tipc_link_build_state_msg(l, xmitq); if (unlikely(rc & ~TIPC_LINK_SND_BC_ACK)) break; } while ((skb = __skb_dequeue(defq))); diff --git a/net/tipc/link.h b/net/tipc/link.h index 6a94175ee20a..d7e9d42fcb2d 100644 --- a/net/tipc/link.h +++ b/net/tipc/link.h @@ -123,7 +123,7 @@ int tipc_nl_parse_link_prop(struct nlattr *prop, struct nlattr *props[]); int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq); int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb, struct sk_buff_head *xmitq); -int tipc_link_build_ack_msg(struct tipc_link *l, struct sk_buff_head *xmitq); +int tipc_link_build_state_msg(struct tipc_link *l, struct sk_buff_head *xmitq); void tipc_link_add_bc_peer(struct tipc_link *snd_l, struct tipc_link *uc_l, struct sk_buff_head *xmitq); diff --git a/net/tipc/node.c b/net/tipc/node.c index b00e12cda66c..68d9f7b8485c 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c @@ -545,6 +545,9 @@ static void __tipc_node_link_up(struct tipc_node *n, int bearer_id, pr_debug("Established link <%s> on network plane %c\n", tipc_link_name(nl), tipc_link_plane(nl)); + /* Ensure that a STATE message goes first */ + tipc_link_build_state_msg(nl, xmitq); + /* First link? => give it both slots */ if (!ol) { *slot0 = bearer_id; @@ -1283,7 +1286,7 @@ static void tipc_node_bc_rcv(struct net *net, struct sk_buff *skb, int bearer_id /* Broadcast ACKs are sent on a unicast link */ if (rc & TIPC_LINK_SND_BC_ACK) { tipc_node_read_lock(n); - tipc_link_build_ack_msg(le->link, &xmitq); + tipc_link_build_state_msg(le->link, &xmitq); tipc_node_read_unlock(n); } From ac18dd9e842294377dbaf1e8d169493567a81fa1 Mon Sep 17 00:00:00 2001 From: Amitoj Kaur Chawla Date: Sat, 9 Apr 2016 17:27:45 +0530 Subject: [PATCH 0751/1649] qlge: Replace create_singlethread_workqueue with alloc_ordered_workqueue Replace deprecated create_singlethread_workqueue with alloc_ordered_workqueue. Work items include getting tx/rx frame sizes, resetting MPI processor, setting asic recovery bit so ordering seems necessary as only one work item should be in queue/executing at any given time, hence the use of alloc_ordered_workqueue. WQ_MEM_RECLAIM flag has been set since ethernet devices seem to sit in memory reclaim path, so to guarantee forward progress regardless of memory pressure. Signed-off-by: Amitoj Kaur Chawla Acked-by: Tejun Heo Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qlge/qlge_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c index b28e73ea2c25..83d72106471c 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c @@ -4687,7 +4687,7 @@ static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev, /* * Set up the operating parameters. */ - qdev->workqueue = create_singlethread_workqueue(ndev->name); + qdev->workqueue = alloc_ordered_workqueue(ndev->name, WQ_MEM_RECLAIM); INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work); INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work); INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work); From b3d051477cf94e9d71d6acadb8a90de15237b9c1 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 13 Apr 2016 22:05:39 -0700 Subject: [PATCH 0752/1649] tcp: do not mess with listener sk_wmem_alloc When removing sk_refcnt manipulation on synflood, I missed that using skb_set_owner_w() was racy, if sk->sk_wmem_alloc had already transitioned to 0. We should hold sk_refcnt instead, but this is a big deal under attack. (Doing so increase performance from 3.2 Mpps to 3.8 Mpps only) In this patch, I chose to not attach a socket to syncookies skb. Performance is now 5 Mpps instead of 3.2 Mpps. Following patch will remove last known false sharing in tcp_rcv_state_process() Fixes: 3b24d854cb35 ("tcp/dccp: do not touch listener sk_refcnt under synflood") Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/net/tcp.h | 9 +++++++-- net/ipv4/tcp_input.c | 7 ++++--- net/ipv4/tcp_ipv4.c | 4 ++-- net/ipv4/tcp_output.c | 16 ++++++++++++---- net/ipv6/tcp_ipv6.c | 4 ++-- 5 files changed, 27 insertions(+), 13 deletions(-) diff --git a/include/net/tcp.h b/include/net/tcp.h index 74d3ed5eb219..fd40f8c64d5f 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -452,10 +452,15 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb, int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb); int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len); int tcp_connect(struct sock *sk); +enum tcp_synack_type { + TCP_SYNACK_NORMAL, + TCP_SYNACK_FASTOPEN, + TCP_SYNACK_COOKIE, +}; struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst, struct request_sock *req, struct tcp_fastopen_cookie *foc, - bool attach_req); + enum tcp_synack_type synack_type); int tcp_disconnect(struct sock *sk, int flags); void tcp_finish_connect(struct sock *sk, struct sk_buff *skb); @@ -1728,7 +1733,7 @@ struct tcp_request_sock_ops { int (*send_synack)(const struct sock *sk, struct dst_entry *dst, struct flowi *fl, struct request_sock *req, struct tcp_fastopen_cookie *foc, - bool attach_req); + enum tcp_synack_type synack_type); }; #ifdef CONFIG_SYN_COOKIES diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 983f04c11177..7ea7034af83f 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -6327,7 +6327,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops, } if (fastopen_sk) { af_ops->send_synack(fastopen_sk, dst, &fl, req, - &foc, false); + &foc, TCP_SYNACK_FASTOPEN); /* Add the child socket directly into the accept queue */ inet_csk_reqsk_queue_add(sk, req, fastopen_sk); sk->sk_data_ready(sk); @@ -6337,8 +6337,9 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops, tcp_rsk(req)->tfo_listener = false; if (!want_cookie) inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT); - af_ops->send_synack(sk, dst, &fl, req, - &foc, !want_cookie); + af_ops->send_synack(sk, dst, &fl, req, &foc, + !want_cookie ? TCP_SYNACK_NORMAL : + TCP_SYNACK_COOKIE); if (want_cookie) { reqsk_free(req); return 0; diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index f4f2a0a3849d..d2a5763e5abc 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -830,7 +830,7 @@ static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst, struct flowi *fl, struct request_sock *req, struct tcp_fastopen_cookie *foc, - bool attach_req) + enum tcp_synack_type synack_type) { const struct inet_request_sock *ireq = inet_rsk(req); struct flowi4 fl4; @@ -841,7 +841,7 @@ static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst, if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL) return -1; - skb = tcp_make_synack(sk, dst, req, foc, attach_req); + skb = tcp_make_synack(sk, dst, req, foc, synack_type); if (skb) { __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr); diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 7d2dc015cd19..6451b83d81e9 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -2944,7 +2944,7 @@ int tcp_send_synack(struct sock *sk) struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst, struct request_sock *req, struct tcp_fastopen_cookie *foc, - bool attach_req) + enum tcp_synack_type synack_type) { struct inet_request_sock *ireq = inet_rsk(req); const struct tcp_sock *tp = tcp_sk(sk); @@ -2964,14 +2964,22 @@ struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst, /* Reserve space for headers. */ skb_reserve(skb, MAX_TCP_HEADER); - if (attach_req) { + switch (synack_type) { + case TCP_SYNACK_NORMAL: skb_set_owner_w(skb, req_to_sk(req)); - } else { + break; + case TCP_SYNACK_COOKIE: + /* Under synflood, we do not attach skb to a socket, + * to avoid false sharing. + */ + break; + case TCP_SYNACK_FASTOPEN: /* sk is a const pointer, because we want to express multiple * cpu might call us concurrently. * sk->sk_wmem_alloc in an atomic, we can promote to rw. */ skb_set_owner_w(skb, (struct sock *)sk); + break; } skb_dst_set(skb, dst); @@ -3516,7 +3524,7 @@ int tcp_rtx_synack(const struct sock *sk, struct request_sock *req) int res; tcp_rsk(req)->txhash = net_tx_rndhash(); - res = af_ops->send_synack(sk, NULL, &fl, req, NULL, true); + res = af_ops->send_synack(sk, NULL, &fl, req, NULL, TCP_SYNACK_NORMAL); if (!res) { TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNRETRANS); diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 0e621bc1ae11..800265c7fd3f 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -439,7 +439,7 @@ static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst, struct flowi *fl, struct request_sock *req, struct tcp_fastopen_cookie *foc, - bool attach_req) + enum tcp_synack_type synack_type) { struct inet_request_sock *ireq = inet_rsk(req); struct ipv6_pinfo *np = inet6_sk(sk); @@ -452,7 +452,7 @@ static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst, IPPROTO_TCP)) == NULL) goto done; - skb = tcp_make_synack(sk, dst, req, foc, attach_req); + skb = tcp_make_synack(sk, dst, req, foc, synack_type); if (skb) { __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr, From 8804b2722dc5d6f9b7ba0a9e812eae9ee5ce95bc Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 13 Apr 2016 22:05:40 -0700 Subject: [PATCH 0753/1649] tcp: remove false sharing in tcp_rcv_state_process() Last known hot point during SYNFLOOD attack is the clearing of rx_opt.saw_tstamp in tcp_rcv_state_process() It is not needed for a listener, so we move it where it matters. Performance while a SYNFLOOD hits a single listener socket went from 5 Mpps to 6 Mpps on my test server (24 cores, 8 NIC RX queues) Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/ipv4/tcp_input.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 7ea7034af83f..90e0d9256b74 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -5796,8 +5796,6 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb) int queued = 0; bool acceptable; - tp->rx_opt.saw_tstamp = 0; - switch (sk->sk_state) { case TCP_CLOSE: goto discard; @@ -5838,6 +5836,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb) goto discard; case TCP_SYN_SENT: + tp->rx_opt.saw_tstamp = 0; queued = tcp_rcv_synsent_state_process(sk, skb, th); if (queued >= 0) return queued; @@ -5849,6 +5848,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb) return 0; } + tp->rx_opt.saw_tstamp = 0; req = tp->fastopen_rsk; if (req) { WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV && From ee1c27977284907d40f7f72c2d078d709f15811f Mon Sep 17 00:00:00 2001 From: Peter Heise Date: Wed, 13 Apr 2016 13:52:22 +0200 Subject: [PATCH 0754/1649] net/hsr: Added support for HSR v1 This patch adds support for the newer version 1 of the HSR networking standard. Version 0 is still default and the new version has to be selected via iproute2. Main changes are in the supervision frame handling and its ethertype field. Signed-off-by: Peter Heise Signed-off-by: David S. Miller --- include/uapi/linux/if_ether.h | 1 + include/uapi/linux/if_link.h | 1 + net/hsr/Kconfig | 5 ++- net/hsr/hsr_device.c | 80 ++++++++++++++++++++--------------- net/hsr/hsr_device.h | 2 +- net/hsr/hsr_forward.c | 43 ++++++++++++++----- net/hsr/hsr_framereg.c | 30 ++++++++----- net/hsr/hsr_main.h | 13 +++++- net/hsr/hsr_netlink.c | 10 ++++- net/hsr/hsr_slave.c | 4 +- 10 files changed, 126 insertions(+), 63 deletions(-) diff --git a/include/uapi/linux/if_ether.h b/include/uapi/linux/if_ether.h index 4a93051c578c..cec849a239f6 100644 --- a/include/uapi/linux/if_ether.h +++ b/include/uapi/linux/if_ether.h @@ -92,6 +92,7 @@ #define ETH_P_TDLS 0x890D /* TDLS */ #define ETH_P_FIP 0x8914 /* FCoE Initialization Protocol */ #define ETH_P_80221 0x8917 /* IEEE 802.21 Media Independent Handover Protocol */ +#define ETH_P_HSR 0x892F /* IEC 62439-3 HSRv1 */ #define ETH_P_LOOPBACK 0x9000 /* Ethernet loopback packet, per IEEE 802.3 */ #define ETH_P_QINQ1 0x9100 /* deprecated QinQ VLAN [ NOT AN OFFICIALLY REGISTERED ID ] */ #define ETH_P_QINQ2 0x9200 /* deprecated QinQ VLAN [ NOT AN OFFICIALLY REGISTERED ID ] */ diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index 9427f17d06d6..bb3a90b57199 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -773,6 +773,7 @@ enum { IFLA_HSR_SLAVE1, IFLA_HSR_SLAVE2, IFLA_HSR_MULTICAST_SPEC, /* Last byte of supervision addr */ + IFLA_HSR_VERSION, /* HSR version */ IFLA_HSR_SUPERVISION_ADDR, /* Supervision frame multicast addr */ IFLA_HSR_SEQ_NR, __IFLA_HSR_MAX, diff --git a/net/hsr/Kconfig b/net/hsr/Kconfig index 0d3d709052ca..4b683fd0abf1 100644 --- a/net/hsr/Kconfig +++ b/net/hsr/Kconfig @@ -18,8 +18,9 @@ config HSR earlier. This code is a "best effort" to comply with the HSR standard as - described in IEC 62439-3:2010 (HSRv0), but no compliancy tests have - been made. + described in IEC 62439-3:2010 (HSRv0) and IEC 62439-3:2012 (HSRv1), + but no compliancy tests have been made. Use iproute2 to select + the version you desire. You need to perform any and all necessary tests yourself before relying on this code in a safety critical system! diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c index c7d1adca30d8..386cbce7bc51 100644 --- a/net/hsr/hsr_device.c +++ b/net/hsr/hsr_device.c @@ -90,7 +90,8 @@ static void hsr_check_announce(struct net_device *hsr_dev, hsr = netdev_priv(hsr_dev); - if ((hsr_dev->operstate == IF_OPER_UP) && (old_operstate != IF_OPER_UP)) { + if ((hsr_dev->operstate == IF_OPER_UP) + && (old_operstate != IF_OPER_UP)) { /* Went up */ hsr->announce_count = 0; hsr->announce_timer.expires = jiffies + @@ -250,31 +251,22 @@ static const struct header_ops hsr_header_ops = { .parse = eth_header_parse, }; - -/* HSR:2010 supervision frames should be padded so that the whole frame, - * including headers and FCS, is 64 bytes (without VLAN). - */ -static int hsr_pad(int size) -{ - const int min_size = ETH_ZLEN - HSR_HLEN - ETH_HLEN; - - if (size >= min_size) - return size; - return min_size; -} - -static void send_hsr_supervision_frame(struct hsr_port *master, u8 type) +static void send_hsr_supervision_frame(struct hsr_port *master, + u8 type, u8 hsrVer) { struct sk_buff *skb; int hlen, tlen; + struct hsr_tag *hsr_tag; struct hsr_sup_tag *hsr_stag; struct hsr_sup_payload *hsr_sp; unsigned long irqflags; hlen = LL_RESERVED_SPACE(master->dev); tlen = master->dev->needed_tailroom; - skb = alloc_skb(hsr_pad(sizeof(struct hsr_sup_payload)) + hlen + tlen, - GFP_ATOMIC); + skb = dev_alloc_skb( + sizeof(struct hsr_tag) + + sizeof(struct hsr_sup_tag) + + sizeof(struct hsr_sup_payload) + hlen + tlen); if (skb == NULL) return; @@ -282,32 +274,48 @@ static void send_hsr_supervision_frame(struct hsr_port *master, u8 type) skb_reserve(skb, hlen); skb->dev = master->dev; - skb->protocol = htons(ETH_P_PRP); + skb->protocol = htons(hsrVer ? ETH_P_HSR : ETH_P_PRP); skb->priority = TC_PRIO_CONTROL; - if (dev_hard_header(skb, skb->dev, ETH_P_PRP, + if (dev_hard_header(skb, skb->dev, (hsrVer ? ETH_P_HSR : ETH_P_PRP), master->hsr->sup_multicast_addr, skb->dev->dev_addr, skb->len) <= 0) goto out; skb_reset_mac_header(skb); - hsr_stag = (typeof(hsr_stag)) skb_put(skb, sizeof(*hsr_stag)); + if (hsrVer > 0) { + hsr_tag = (typeof(hsr_tag)) skb_put(skb, sizeof(struct hsr_tag)); + hsr_tag->encap_proto = htons(ETH_P_PRP); + set_hsr_tag_LSDU_size(hsr_tag, HSR_V1_SUP_LSDUSIZE); + } - set_hsr_stag_path(hsr_stag, 0xf); - set_hsr_stag_HSR_Ver(hsr_stag, 0); + hsr_stag = (typeof(hsr_stag)) skb_put(skb, sizeof(struct hsr_sup_tag)); + set_hsr_stag_path(hsr_stag, (hsrVer ? 0x0 : 0xf)); + set_hsr_stag_HSR_Ver(hsr_stag, hsrVer); + /* From HSRv1 on we have separate supervision sequence numbers. */ spin_lock_irqsave(&master->hsr->seqnr_lock, irqflags); - hsr_stag->sequence_nr = htons(master->hsr->sequence_nr); - master->hsr->sequence_nr++; + if (hsrVer > 0) { + hsr_stag->sequence_nr = htons(master->hsr->sup_sequence_nr); + hsr_tag->sequence_nr = htons(master->hsr->sequence_nr); + master->hsr->sup_sequence_nr++; + master->hsr->sequence_nr++; + } else { + hsr_stag->sequence_nr = htons(master->hsr->sequence_nr); + master->hsr->sequence_nr++; + } spin_unlock_irqrestore(&master->hsr->seqnr_lock, irqflags); hsr_stag->HSR_TLV_Type = type; - hsr_stag->HSR_TLV_Length = 12; + /* TODO: Why 12 in HSRv0? */ + hsr_stag->HSR_TLV_Length = hsrVer ? sizeof(struct hsr_sup_payload) : 12; /* Payload: MacAddressA */ - hsr_sp = (typeof(hsr_sp)) skb_put(skb, sizeof(*hsr_sp)); + hsr_sp = (typeof(hsr_sp)) skb_put(skb, sizeof(struct hsr_sup_payload)); ether_addr_copy(hsr_sp->MacAddressA, master->dev->dev_addr); + skb_put_padto(skb, ETH_ZLEN + HSR_HLEN); + hsr_forward_skb(skb, master); return; @@ -329,19 +337,20 @@ static void hsr_announce(unsigned long data) rcu_read_lock(); master = hsr_port_get_hsr(hsr, HSR_PT_MASTER); - if (hsr->announce_count < 3) { - send_hsr_supervision_frame(master, HSR_TLV_ANNOUNCE); + if (hsr->announce_count < 3 && hsr->protVersion == 0) { + send_hsr_supervision_frame(master, HSR_TLV_ANNOUNCE, + hsr->protVersion); hsr->announce_count++; - } else { - send_hsr_supervision_frame(master, HSR_TLV_LIFE_CHECK); - } - if (hsr->announce_count < 3) hsr->announce_timer.expires = jiffies + msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL); - else + } else { + send_hsr_supervision_frame(master, HSR_TLV_LIFE_CHECK, + hsr->protVersion); + hsr->announce_timer.expires = jiffies + msecs_to_jiffies(HSR_LIFE_CHECK_INTERVAL); + } if (is_admin_up(master->dev)) add_timer(&hsr->announce_timer); @@ -428,7 +437,7 @@ static const unsigned char def_multicast_addr[ETH_ALEN] __aligned(2) = { }; int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2], - unsigned char multicast_spec) + unsigned char multicast_spec, u8 protocol_version) { struct hsr_priv *hsr; struct hsr_port *port; @@ -450,6 +459,7 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2], spin_lock_init(&hsr->seqnr_lock); /* Overflow soon to find bugs easier: */ hsr->sequence_nr = HSR_SEQNR_START; + hsr->sup_sequence_nr = HSR_SUP_SEQNR_START; init_timer(&hsr->announce_timer); hsr->announce_timer.function = hsr_announce; @@ -462,6 +472,8 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2], ether_addr_copy(hsr->sup_multicast_addr, def_multicast_addr); hsr->sup_multicast_addr[ETH_ALEN - 1] = multicast_spec; + hsr->protVersion = protocol_version; + /* FIXME: should I modify the value of these? * * - hsr_dev->flags - i.e. diff --git a/net/hsr/hsr_device.h b/net/hsr/hsr_device.h index 108a5d59d2a6..9975e31bbb82 100644 --- a/net/hsr/hsr_device.h +++ b/net/hsr/hsr_device.h @@ -17,7 +17,7 @@ void hsr_dev_setup(struct net_device *dev); int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2], - unsigned char multicast_spec); + unsigned char multicast_spec, u8 protocol_version); void hsr_check_carrier_and_operstate(struct hsr_priv *hsr); bool is_hsr_master(struct net_device *dev); int hsr_get_max_mtu(struct hsr_priv *hsr); diff --git a/net/hsr/hsr_forward.c b/net/hsr/hsr_forward.c index 7871ed6d3825..5ee1d43f1310 100644 --- a/net/hsr/hsr_forward.c +++ b/net/hsr/hsr_forward.c @@ -50,21 +50,40 @@ struct hsr_frame_info { */ static bool is_supervision_frame(struct hsr_priv *hsr, struct sk_buff *skb) { - struct hsr_ethhdr_sp *hdr; + struct ethhdr *ethHdr; + struct hsr_sup_tag *hsrSupTag; + struct hsrv1_ethhdr_sp *hsrV1Hdr; WARN_ON_ONCE(!skb_mac_header_was_set(skb)); - hdr = (struct hsr_ethhdr_sp *) skb_mac_header(skb); + ethHdr = (struct ethhdr *) skb_mac_header(skb); - if (!ether_addr_equal(hdr->ethhdr.h_dest, + /* Correct addr? */ + if (!ether_addr_equal(ethHdr->h_dest, hsr->sup_multicast_addr)) return false; - if (get_hsr_stag_path(&hdr->hsr_sup) != 0x0f) + /* Correct ether type?. */ + if (!(ethHdr->h_proto == htons(ETH_P_PRP) + || ethHdr->h_proto == htons(ETH_P_HSR))) return false; - if ((hdr->hsr_sup.HSR_TLV_Type != HSR_TLV_ANNOUNCE) && - (hdr->hsr_sup.HSR_TLV_Type != HSR_TLV_LIFE_CHECK)) + + /* Get the supervision header from correct location. */ + if (ethHdr->h_proto == htons(ETH_P_HSR)) { /* Okay HSRv1. */ + hsrV1Hdr = (struct hsrv1_ethhdr_sp *) skb_mac_header(skb); + if (hsrV1Hdr->hsr.encap_proto != htons(ETH_P_PRP)) + return false; + + hsrSupTag = &hsrV1Hdr->hsr_sup; + } else { + hsrSupTag = &((struct hsrv0_ethhdr_sp *) skb_mac_header(skb))->hsr_sup; + } + + if ((hsrSupTag->HSR_TLV_Type != HSR_TLV_ANNOUNCE) && + (hsrSupTag->HSR_TLV_Type != HSR_TLV_LIFE_CHECK)) return false; - if (hdr->hsr_sup.HSR_TLV_Length != 12) + if ((hsrSupTag->HSR_TLV_Length != 12) && + (hsrSupTag->HSR_TLV_Length != + sizeof(struct hsr_sup_payload))) return false; return true; @@ -110,7 +129,7 @@ static struct sk_buff *frame_get_stripped_skb(struct hsr_frame_info *frame, static void hsr_fill_tag(struct sk_buff *skb, struct hsr_frame_info *frame, - struct hsr_port *port) + struct hsr_port *port, u8 protoVersion) { struct hsr_ethhdr *hsr_ethhdr; int lane_id; @@ -131,7 +150,8 @@ static void hsr_fill_tag(struct sk_buff *skb, struct hsr_frame_info *frame, set_hsr_tag_LSDU_size(&hsr_ethhdr->hsr_tag, lsdu_size); hsr_ethhdr->hsr_tag.sequence_nr = htons(frame->sequence_nr); hsr_ethhdr->hsr_tag.encap_proto = hsr_ethhdr->ethhdr.h_proto; - hsr_ethhdr->ethhdr.h_proto = htons(ETH_P_PRP); + hsr_ethhdr->ethhdr.h_proto = htons(protoVersion ? + ETH_P_HSR : ETH_P_PRP); } static struct sk_buff *create_tagged_skb(struct sk_buff *skb_o, @@ -160,7 +180,7 @@ static struct sk_buff *create_tagged_skb(struct sk_buff *skb_o, memmove(dst, src, movelen); skb_reset_mac_header(skb); - hsr_fill_tag(skb, frame, port); + hsr_fill_tag(skb, frame, port, port->hsr->protVersion); return skb; } @@ -320,7 +340,8 @@ static int hsr_fill_frame_info(struct hsr_frame_info *frame, /* FIXME: */ WARN_ONCE(1, "HSR: VLAN not yet supported"); } - if (ethhdr->h_proto == htons(ETH_P_PRP)) { + if (ethhdr->h_proto == htons(ETH_P_PRP) + || ethhdr->h_proto == htons(ETH_P_HSR)) { frame->skb_std = NULL; frame->skb_hsr = skb; frame->sequence_nr = hsr_get_skb_sequence_nr(skb); diff --git a/net/hsr/hsr_framereg.c b/net/hsr/hsr_framereg.c index bace124d14ef..7ea925816f79 100644 --- a/net/hsr/hsr_framereg.c +++ b/net/hsr/hsr_framereg.c @@ -177,17 +177,17 @@ struct hsr_node *hsr_get_node(struct list_head *node_db, struct sk_buff *skb, return node; } - if (!is_sup) - return NULL; /* Only supervision frame may create node entry */ + /* Everyone may create a node entry, connected node to a HSR device. */ - if (ethhdr->h_proto == htons(ETH_P_PRP)) { + if (ethhdr->h_proto == htons(ETH_P_PRP) + || ethhdr->h_proto == htons(ETH_P_HSR)) { /* Use the existing sequence_nr from the tag as starting point * for filtering duplicate frames. */ seq_out = hsr_get_skb_sequence_nr(skb) - 1; } else { WARN_ONCE(1, "%s: Non-HSR frame\n", __func__); - seq_out = 0; + seq_out = HSR_SEQNR_START; } return hsr_add_node(node_db, ethhdr->h_source, seq_out); @@ -200,17 +200,25 @@ struct hsr_node *hsr_get_node(struct list_head *node_db, struct sk_buff *skb, void hsr_handle_sup_frame(struct sk_buff *skb, struct hsr_node *node_curr, struct hsr_port *port_rcv) { + struct ethhdr *ethhdr; struct hsr_node *node_real; struct hsr_sup_payload *hsr_sp; struct list_head *node_db; int i; - skb_pull(skb, sizeof(struct hsr_ethhdr_sp)); - hsr_sp = (struct hsr_sup_payload *) skb->data; + ethhdr = (struct ethhdr *) skb_mac_header(skb); - if (ether_addr_equal(eth_hdr(skb)->h_source, hsr_sp->MacAddressA)) - /* Not sent from MacAddressB of a PICS_SUBS capable node */ - goto done; + /* Leave the ethernet header. */ + skb_pull(skb, sizeof(struct ethhdr)); + + /* And leave the HSR tag. */ + if (ethhdr->h_proto == htons(ETH_P_HSR)) + skb_pull(skb, sizeof(struct hsr_tag)); + + /* And leave the HSR sup tag. */ + skb_pull(skb, sizeof(struct hsr_sup_tag)); + + hsr_sp = (struct hsr_sup_payload *) skb->data; /* Merge node_curr (registered on MacAddressB) into node_real */ node_db = &port_rcv->hsr->node_db; @@ -225,7 +233,7 @@ void hsr_handle_sup_frame(struct sk_buff *skb, struct hsr_node *node_curr, /* Node has already been merged */ goto done; - ether_addr_copy(node_real->MacAddressB, eth_hdr(skb)->h_source); + ether_addr_copy(node_real->MacAddressB, ethhdr->h_source); for (i = 0; i < HSR_PT_PORTS; i++) { if (!node_curr->time_in_stale[i] && time_after(node_curr->time_in[i], node_real->time_in[i])) { @@ -241,7 +249,7 @@ void hsr_handle_sup_frame(struct sk_buff *skb, struct hsr_node *node_curr, kfree_rcu(node_curr, rcu_head); done: - skb_push(skb, sizeof(struct hsr_ethhdr_sp)); + skb_push(skb, sizeof(struct hsrv1_ethhdr_sp)); } diff --git a/net/hsr/hsr_main.h b/net/hsr/hsr_main.h index 5a9c69962ded..9b9909e89e9e 100644 --- a/net/hsr/hsr_main.h +++ b/net/hsr/hsr_main.h @@ -30,6 +30,7 @@ */ #define MAX_SLAVE_DIFF 3000 /* ms */ #define HSR_SEQNR_START (USHRT_MAX - 1024) +#define HSR_SUP_SEQNR_START (HSR_SEQNR_START / 2) /* How often shall we check for broken ring and remove node entries older than @@ -58,6 +59,8 @@ struct hsr_tag { #define HSR_HLEN 6 +#define HSR_V1_SUP_LSDUSIZE 52 + /* The helper functions below assumes that 'path' occupies the 4 most * significant bits of the 16-bit field shared by 'path' and 'LSDU_size' (or * equivalently, the 4 most significant bits of HSR tag byte 14). @@ -131,11 +134,17 @@ static inline void set_hsr_stag_HSR_Ver(struct hsr_sup_tag *hst, u16 HSR_Ver) set_hsr_tag_LSDU_size((struct hsr_tag *) hst, HSR_Ver); } -struct hsr_ethhdr_sp { +struct hsrv0_ethhdr_sp { struct ethhdr ethhdr; struct hsr_sup_tag hsr_sup; } __packed; +struct hsrv1_ethhdr_sp { + struct ethhdr ethhdr; + struct hsr_tag hsr; + struct hsr_sup_tag hsr_sup; +} __packed; + enum hsr_port_type { HSR_PT_NONE = 0, /* Must be 0, used by framereg */ @@ -162,6 +171,8 @@ struct hsr_priv { struct timer_list prune_timer; int announce_count; u16 sequence_nr; + u16 sup_sequence_nr; /* For HSRv1 separate seq_nr for supervision */ + u8 protVersion; /* Indicate if HSRv0 or HSRv1. */ spinlock_t seqnr_lock; /* locking for sequence_nr */ unsigned char sup_multicast_addr[ETH_ALEN]; }; diff --git a/net/hsr/hsr_netlink.c b/net/hsr/hsr_netlink.c index a2c7e4c0ac1e..5425d87611fc 100644 --- a/net/hsr/hsr_netlink.c +++ b/net/hsr/hsr_netlink.c @@ -23,6 +23,7 @@ static const struct nla_policy hsr_policy[IFLA_HSR_MAX + 1] = { [IFLA_HSR_SLAVE1] = { .type = NLA_U32 }, [IFLA_HSR_SLAVE2] = { .type = NLA_U32 }, [IFLA_HSR_MULTICAST_SPEC] = { .type = NLA_U8 }, + [IFLA_HSR_VERSION] = { .type = NLA_U8 }, [IFLA_HSR_SUPERVISION_ADDR] = { .type = NLA_BINARY, .len = ETH_ALEN }, [IFLA_HSR_SEQ_NR] = { .type = NLA_U16 }, }; @@ -35,7 +36,7 @@ static int hsr_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[]) { struct net_device *link[2]; - unsigned char multicast_spec; + unsigned char multicast_spec, hsr_version; if (!data) { netdev_info(dev, "HSR: No slave devices specified\n"); @@ -62,7 +63,12 @@ static int hsr_newlink(struct net *src_net, struct net_device *dev, else multicast_spec = nla_get_u8(data[IFLA_HSR_MULTICAST_SPEC]); - return hsr_dev_finalize(dev, link, multicast_spec); + if (!data[IFLA_HSR_VERSION]) + hsr_version = 0; + else + hsr_version = nla_get_u8(data[IFLA_HSR_VERSION]); + + return hsr_dev_finalize(dev, link, multicast_spec, hsr_version); } static int hsr_fill_info(struct sk_buff *skb, const struct net_device *dev) diff --git a/net/hsr/hsr_slave.c b/net/hsr/hsr_slave.c index 7d37366cc695..f5b60388d02f 100644 --- a/net/hsr/hsr_slave.c +++ b/net/hsr/hsr_slave.c @@ -22,6 +22,7 @@ static rx_handler_result_t hsr_handle_frame(struct sk_buff **pskb) { struct sk_buff *skb = *pskb; struct hsr_port *port; + u16 protocol; if (!skb_mac_header_was_set(skb)) { WARN_ONCE(1, "%s: skb invalid", __func__); @@ -37,7 +38,8 @@ static rx_handler_result_t hsr_handle_frame(struct sk_buff **pskb) goto finish_consume; } - if (eth_hdr(skb)->h_proto != htons(ETH_P_PRP)) + protocol = eth_hdr(skb)->h_proto; + if (protocol != htons(ETH_P_PRP) && protocol != htons(ETH_P_HSR)) goto finish_pass; skb_push(skb, ETH_HLEN); From 464f664501816ef5fbbc00b8de96f4ae5a1c9325 Mon Sep 17 00:00:00 2001 From: Manish Chopra Date: Thu, 14 Apr 2016 01:38:29 -0400 Subject: [PATCH 0755/1649] qed: Add infrastructure support for tunneling This patch adds various structure/APIs needed to configure/enable different tunnel [VXLAN/GRE/GENEVE] parameters on the adapter. Signed-off-by: Manish Chopra Signed-off-by: Yuval Mintz Signed-off-by: Ariel Elior Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed.h | 46 ++++ drivers/net/ethernet/qlogic/qed/qed_dev.c | 6 +- drivers/net/ethernet/qlogic/qed/qed_dev_api.h | 2 + drivers/net/ethernet/qlogic/qed/qed_hsi.h | 51 +++- .../ethernet/qlogic/qed/qed_init_fw_funcs.c | 127 +++++++++ drivers/net/ethernet/qlogic/qed/qed_l2.c | 31 +++ drivers/net/ethernet/qlogic/qed/qed_main.c | 2 +- .../net/ethernet/qlogic/qed/qed_reg_addr.h | 31 +++ drivers/net/ethernet/qlogic/qed/qed_sp.h | 7 + .../net/ethernet/qlogic/qed/qed_sp_commands.c | 254 ++++++++++++++++++ include/linux/qed/qed_eth_if.h | 10 + 11 files changed, 563 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index 0f0d2d1d77e5..33e2ed60c18f 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h @@ -74,6 +74,51 @@ struct qed_rt_data { bool *b_valid; }; +enum qed_tunn_mode { + QED_MODE_L2GENEVE_TUNN, + QED_MODE_IPGENEVE_TUNN, + QED_MODE_L2GRE_TUNN, + QED_MODE_IPGRE_TUNN, + QED_MODE_VXLAN_TUNN, +}; + +enum qed_tunn_clss { + QED_TUNN_CLSS_MAC_VLAN, + QED_TUNN_CLSS_MAC_VNI, + QED_TUNN_CLSS_INNER_MAC_VLAN, + QED_TUNN_CLSS_INNER_MAC_VNI, + MAX_QED_TUNN_CLSS, +}; + +struct qed_tunn_start_params { + unsigned long tunn_mode; + u16 vxlan_udp_port; + u16 geneve_udp_port; + u8 update_vxlan_udp_port; + u8 update_geneve_udp_port; + u8 tunn_clss_vxlan; + u8 tunn_clss_l2geneve; + u8 tunn_clss_ipgeneve; + u8 tunn_clss_l2gre; + u8 tunn_clss_ipgre; +}; + +struct qed_tunn_update_params { + unsigned long tunn_mode_update_mask; + unsigned long tunn_mode; + u16 vxlan_udp_port; + u16 geneve_udp_port; + u8 update_rx_pf_clss; + u8 update_tx_pf_clss; + u8 update_vxlan_udp_port; + u8 update_geneve_udp_port; + u8 tunn_clss_vxlan; + u8 tunn_clss_l2geneve; + u8 tunn_clss_ipgeneve; + u8 tunn_clss_l2gre; + u8 tunn_clss_ipgre; +}; + /* The PCI personality is not quite synonymous to protocol ID: * 1. All personalities need CORE connections * 2. The Ethernet personality may support also the RoCE protocol @@ -430,6 +475,7 @@ struct qed_dev { u8 num_hwfns; struct qed_hwfn hwfns[MAX_HWFNS_PER_DEVICE]; + unsigned long tunn_mode; u32 drv_type; struct qed_eth_stats *reset_stats; diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index b7d100f6bd6f..bdae5a55afa4 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -558,6 +558,7 @@ static int qed_hw_init_port(struct qed_hwfn *p_hwfn, static int qed_hw_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + struct qed_tunn_start_params *p_tunn, int hw_mode, bool b_hw_start, enum qed_int_mode int_mode, @@ -625,7 +626,7 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn, qed_int_igu_enable(p_hwfn, p_ptt, int_mode); /* send function start command */ - rc = qed_sp_pf_start(p_hwfn, p_hwfn->cdev->mf_mode); + rc = qed_sp_pf_start(p_hwfn, p_tunn, p_hwfn->cdev->mf_mode); if (rc) DP_NOTICE(p_hwfn, "Function start ramrod failed\n"); } @@ -672,6 +673,7 @@ static void qed_reset_mb_shadow(struct qed_hwfn *p_hwfn, } int qed_hw_init(struct qed_dev *cdev, + struct qed_tunn_start_params *p_tunn, bool b_hw_start, enum qed_int_mode int_mode, bool allow_npar_tx_switch, @@ -724,7 +726,7 @@ int qed_hw_init(struct qed_dev *cdev, /* Fall into */ case FW_MSG_CODE_DRV_LOAD_FUNCTION: rc = qed_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt, - p_hwfn->hw_info.hw_mode, + p_tunn, p_hwfn->hw_info.hw_mode, b_hw_start, int_mode, allow_npar_tx_switch); break; diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h index d6c7ddf4f4d4..6aac3f855aa1 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h +++ b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h @@ -62,6 +62,7 @@ void qed_resc_setup(struct qed_dev *cdev); * @brief qed_hw_init - * * @param cdev + * @param p_tunn * @param b_hw_start * @param int_mode - interrupt mode [msix, inta, etc.] to use. * @param allow_npar_tx_switch - npar tx switching to be used @@ -72,6 +73,7 @@ void qed_resc_setup(struct qed_dev *cdev); * @return int */ int qed_hw_init(struct qed_dev *cdev, + struct qed_tunn_start_params *p_tunn, bool b_hw_start, enum qed_int_mode int_mode, bool allow_npar_tx_switch, diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index a368f5e71d95..15e02ab9be5a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -46,7 +46,7 @@ enum common_ramrod_cmd_id { COMMON_RAMROD_PF_STOP /* PF Function Stop Ramrod */, COMMON_RAMROD_RESERVED, COMMON_RAMROD_RESERVED2, - COMMON_RAMROD_RESERVED3, + COMMON_RAMROD_PF_UPDATE, COMMON_RAMROD_EMPTY, MAX_COMMON_RAMROD_CMD_ID }; @@ -626,6 +626,42 @@ struct pf_start_ramrod_data { u8 reserved0[4]; }; +/* tunnel configuration */ +struct pf_update_tunnel_config { + u8 update_rx_pf_clss; + u8 update_tx_pf_clss; + u8 set_vxlan_udp_port_flg; + u8 set_geneve_udp_port_flg; + u8 tx_enable_vxlan; + u8 tx_enable_l2geneve; + u8 tx_enable_ipgeneve; + u8 tx_enable_l2gre; + u8 tx_enable_ipgre; + u8 tunnel_clss_vxlan; + u8 tunnel_clss_l2geneve; + u8 tunnel_clss_ipgeneve; + u8 tunnel_clss_l2gre; + u8 tunnel_clss_ipgre; + __le16 vxlan_udp_port; + __le16 geneve_udp_port; + __le16 reserved[3]; +}; + +struct pf_update_ramrod_data { + u32 reserved[2]; + u32 reserved_1[6]; + struct pf_update_tunnel_config tunnel_config; +}; + +/* Tunnel classification scheme */ +enum tunnel_clss { + TUNNEL_CLSS_MAC_VLAN = 0, + TUNNEL_CLSS_MAC_VNI, + TUNNEL_CLSS_INNER_MAC_VLAN, + TUNNEL_CLSS_INNER_MAC_VNI, + MAX_TUNNEL_CLSS +}; + enum ports_mode { ENGX2_PORTX1 /* 2 engines x 1 port */, ENGX2_PORTX2 /* 2 engines x 2 ports */, @@ -1603,6 +1639,19 @@ bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn, u16 start_pq, u16 num_pqs); +void qed_set_vxlan_dest_port(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u16 dest_port); +void qed_set_vxlan_enable(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, bool vxlan_enable); +void qed_set_gre_enable(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, bool eth_gre_enable, + bool ip_gre_enable); +void qed_set_geneve_dest_port(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u16 dest_port); +void qed_set_geneve_enable(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, bool eth_geneve_enable, + bool ip_geneve_enable); + /* Ystorm flow control mode. Use enum fw_flow_ctrl_mode */ #define YSTORM_FLOW_CONTROL_MODE_OFFSET (IRO[0].base) #define YSTORM_FLOW_CONTROL_MODE_SIZE (IRO[0].size) diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c index f55ebdc3c832..1dd53248b984 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c +++ b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c @@ -788,3 +788,130 @@ bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn, return true; } + +static void +qed_set_tunnel_type_enable_bit(unsigned long *var, int bit, bool enable) +{ + if (enable) + set_bit(bit, var); + else + clear_bit(bit, var); +} + +#define PRS_ETH_TUNN_FIC_FORMAT -188897008 + +void qed_set_vxlan_dest_port(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u16 dest_port) +{ + qed_wr(p_hwfn, p_ptt, PRS_REG_VXLAN_PORT, dest_port); + qed_wr(p_hwfn, p_ptt, NIG_REG_VXLAN_PORT, dest_port); + qed_wr(p_hwfn, p_ptt, PBF_REG_VXLAN_PORT, dest_port); +} + +void qed_set_vxlan_enable(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + bool vxlan_enable) +{ + unsigned long reg_val = 0; + u8 shift; + + reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN); + shift = PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT; + qed_set_tunnel_type_enable_bit(®_val, shift, vxlan_enable); + + qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val); + + if (reg_val) + qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0, + PRS_ETH_TUNN_FIC_FORMAT); + + reg_val = qed_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE); + shift = NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT; + qed_set_tunnel_type_enable_bit(®_val, shift, vxlan_enable); + + qed_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val); + + qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN, + vxlan_enable ? 1 : 0); +} + +void qed_set_gre_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + bool eth_gre_enable, bool ip_gre_enable) +{ + unsigned long reg_val = 0; + u8 shift; + + reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN); + shift = PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT; + qed_set_tunnel_type_enable_bit(®_val, shift, eth_gre_enable); + + shift = PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT; + qed_set_tunnel_type_enable_bit(®_val, shift, ip_gre_enable); + qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val); + if (reg_val) + qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0, + PRS_ETH_TUNN_FIC_FORMAT); + + reg_val = qed_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE); + shift = NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE_SHIFT; + qed_set_tunnel_type_enable_bit(®_val, shift, eth_gre_enable); + + shift = NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE_SHIFT; + qed_set_tunnel_type_enable_bit(®_val, shift, ip_gre_enable); + qed_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val); + + qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN, + eth_gre_enable ? 1 : 0); + qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN, + ip_gre_enable ? 1 : 0); +} + +void qed_set_geneve_dest_port(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u16 dest_port) +{ + qed_wr(p_hwfn, p_ptt, PRS_REG_NGE_PORT, dest_port); + qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_PORT, dest_port); + qed_wr(p_hwfn, p_ptt, PBF_REG_NGE_PORT, dest_port); +} + +void qed_set_geneve_enable(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + bool eth_geneve_enable, + bool ip_geneve_enable) +{ + unsigned long reg_val = 0; + u8 shift; + + reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN); + shift = PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT; + qed_set_tunnel_type_enable_bit(®_val, shift, eth_geneve_enable); + + shift = PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT; + qed_set_tunnel_type_enable_bit(®_val, shift, ip_geneve_enable); + + qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val); + if (reg_val) + qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0, + PRS_ETH_TUNN_FIC_FORMAT); + + qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_ETH_ENABLE, + eth_geneve_enable ? 1 : 0); + qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_IP_ENABLE, ip_geneve_enable ? 1 : 0); + + /* comp ver */ + reg_val = (ip_geneve_enable || eth_geneve_enable) ? 1 : 0; + qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_COMP_VER, reg_val); + qed_wr(p_hwfn, p_ptt, PBF_REG_NGE_COMP_VER, reg_val); + qed_wr(p_hwfn, p_ptt, PRS_REG_NGE_COMP_VER, reg_val); + + /* EDPM with geneve tunnel not supported in BB_B0 */ + if (QED_IS_BB_B0(p_hwfn->cdev)) + return; + + qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN, + eth_geneve_enable ? 1 : 0); + qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN, + ip_geneve_enable ? 1 : 0); +} diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index 5005497ee23e..fb5f3b815340 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -1884,6 +1884,36 @@ static int qed_stop_txq(struct qed_dev *cdev, return 0; } +static int qed_tunn_configure(struct qed_dev *cdev, + struct qed_tunn_params *tunn_params) +{ + struct qed_tunn_update_params tunn_info; + int i, rc; + + memset(&tunn_info, 0, sizeof(tunn_info)); + if (tunn_params->update_vxlan_port == 1) { + tunn_info.update_vxlan_udp_port = 1; + tunn_info.vxlan_udp_port = tunn_params->vxlan_port; + } + + if (tunn_params->update_geneve_port == 1) { + tunn_info.update_geneve_udp_port = 1; + tunn_info.geneve_udp_port = tunn_params->geneve_port; + } + + for_each_hwfn(cdev, i) { + struct qed_hwfn *hwfn = &cdev->hwfns[i]; + + rc = qed_sp_pf_update_tunn_cfg(hwfn, &tunn_info, + QED_SPQ_MODE_EBLOCK, NULL); + + if (rc) + return rc; + } + + return 0; +} + static int qed_configure_filter_rx_mode(struct qed_dev *cdev, enum qed_filter_rx_mode_type type) { @@ -2026,6 +2056,7 @@ static const struct qed_eth_ops qed_eth_ops_pass = { .fastpath_stop = &qed_fastpath_stop, .eth_cqe_completion = &qed_fp_cqe_completion, .get_vport_stats = &qed_get_vport_stats, + .tunn_config = &qed_tunn_configure, }; const struct qed_eth_ops *qed_get_eth_ops(void) diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index c31d485f72d6..1916992ae8b1 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -776,7 +776,7 @@ static int qed_slowpath_start(struct qed_dev *cdev, /* Start the slowpath */ data = cdev->firmware->data; - rc = qed_hw_init(cdev, true, cdev->int_params.out.int_mode, + rc = qed_hw_init(cdev, NULL, true, cdev->int_params.out.int_mode, true, data); if (rc) goto err2; diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h index c15b1622e636..55451a4dc587 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h +++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h @@ -427,4 +427,35 @@ 0x2aae60UL #define PGLUE_B_REG_PF_BAR1_SIZE \ 0x2aae64UL +#define PRS_REG_ENCAPSULATION_TYPE_EN 0x1f0730UL +#define PRS_REG_GRE_PROTOCOL 0x1f0734UL +#define PRS_REG_VXLAN_PORT 0x1f0738UL +#define PRS_REG_OUTPUT_FORMAT_4_0 0x1f099cUL +#define NIG_REG_ENC_TYPE_ENABLE 0x501058UL + +#define NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE (0x1 << 0) +#define NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE_SHIFT 0 +#define NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE (0x1 << 1) +#define NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE_SHIFT 1 +#define NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE (0x1 << 2) +#define NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT 2 + +#define NIG_REG_VXLAN_PORT 0x50105cUL +#define PBF_REG_VXLAN_PORT 0xd80518UL +#define PBF_REG_NGE_PORT 0xd8051cUL +#define PRS_REG_NGE_PORT 0x1f086cUL +#define NIG_REG_NGE_PORT 0x508b38UL + +#define DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN 0x10090cUL +#define DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN 0x100910UL +#define DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN 0x100914UL +#define DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN 0x10092cUL +#define DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN 0x100930UL + +#define NIG_REG_NGE_IP_ENABLE 0x508b28UL +#define NIG_REG_NGE_ETH_ENABLE 0x508b2cUL +#define NIG_REG_NGE_COMP_VER 0x508b30UL +#define PBF_REG_NGE_COMP_VER 0xd80524UL +#define PRS_REG_NGE_COMP_VER 0x1f0878UL + #endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h index d39f914b66ee..4b91cb32f317 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h @@ -52,6 +52,7 @@ int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn, union ramrod_data { struct pf_start_ramrod_data pf_start; + struct pf_update_ramrod_data pf_update; struct rx_queue_start_ramrod_data rx_queue_start; struct rx_queue_update_ramrod_data rx_queue_update; struct rx_queue_stop_ramrod_data rx_queue_stop; @@ -338,12 +339,14 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn, * to the internal RAM of the UStorm by the Function Start Ramrod. * * @param p_hwfn + * @param p_tunn * @param mode * * @return int */ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, + struct qed_tunn_start_params *p_tunn, enum qed_mf_mode mode); /** @@ -362,4 +365,8 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, int qed_sp_pf_stop(struct qed_hwfn *p_hwfn); +int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn, + struct qed_tunn_update_params *p_tunn, + enum spq_mode comp_mode, + struct qed_spq_comp_cb *p_comp_data); #endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c index 1c06c37d4c3d..306da7000ddc 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c @@ -87,7 +87,217 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn, return 0; } +static enum tunnel_clss qed_tunn_get_clss_type(u8 type) +{ + switch (type) { + case QED_TUNN_CLSS_MAC_VLAN: + return TUNNEL_CLSS_MAC_VLAN; + case QED_TUNN_CLSS_MAC_VNI: + return TUNNEL_CLSS_MAC_VNI; + case QED_TUNN_CLSS_INNER_MAC_VLAN: + return TUNNEL_CLSS_INNER_MAC_VLAN; + case QED_TUNN_CLSS_INNER_MAC_VNI: + return TUNNEL_CLSS_INNER_MAC_VNI; + default: + return TUNNEL_CLSS_MAC_VLAN; + } +} + +static void +qed_tunn_set_pf_fix_tunn_mode(struct qed_hwfn *p_hwfn, + struct qed_tunn_update_params *p_src, + struct pf_update_tunnel_config *p_tunn_cfg) +{ + unsigned long cached_tunn_mode = p_hwfn->cdev->tunn_mode; + unsigned long update_mask = p_src->tunn_mode_update_mask; + unsigned long tunn_mode = p_src->tunn_mode; + unsigned long new_tunn_mode = 0; + + if (test_bit(QED_MODE_L2GRE_TUNN, &update_mask)) { + if (test_bit(QED_MODE_L2GRE_TUNN, &tunn_mode)) + __set_bit(QED_MODE_L2GRE_TUNN, &new_tunn_mode); + } else { + if (test_bit(QED_MODE_L2GRE_TUNN, &cached_tunn_mode)) + __set_bit(QED_MODE_L2GRE_TUNN, &new_tunn_mode); + } + + if (test_bit(QED_MODE_IPGRE_TUNN, &update_mask)) { + if (test_bit(QED_MODE_IPGRE_TUNN, &tunn_mode)) + __set_bit(QED_MODE_IPGRE_TUNN, &new_tunn_mode); + } else { + if (test_bit(QED_MODE_IPGRE_TUNN, &cached_tunn_mode)) + __set_bit(QED_MODE_IPGRE_TUNN, &new_tunn_mode); + } + + if (test_bit(QED_MODE_VXLAN_TUNN, &update_mask)) { + if (test_bit(QED_MODE_VXLAN_TUNN, &tunn_mode)) + __set_bit(QED_MODE_VXLAN_TUNN, &new_tunn_mode); + } else { + if (test_bit(QED_MODE_VXLAN_TUNN, &cached_tunn_mode)) + __set_bit(QED_MODE_VXLAN_TUNN, &new_tunn_mode); + } + + if (p_src->update_geneve_udp_port) { + p_tunn_cfg->set_geneve_udp_port_flg = 1; + p_tunn_cfg->geneve_udp_port = + cpu_to_le16(p_src->geneve_udp_port); + } + + if (test_bit(QED_MODE_L2GENEVE_TUNN, &update_mask)) { + if (test_bit(QED_MODE_L2GENEVE_TUNN, &tunn_mode)) + __set_bit(QED_MODE_L2GENEVE_TUNN, &new_tunn_mode); + } else { + if (test_bit(QED_MODE_L2GENEVE_TUNN, &cached_tunn_mode)) + __set_bit(QED_MODE_L2GENEVE_TUNN, &new_tunn_mode); + } + + if (test_bit(QED_MODE_IPGENEVE_TUNN, &update_mask)) { + if (test_bit(QED_MODE_IPGENEVE_TUNN, &tunn_mode)) + __set_bit(QED_MODE_IPGENEVE_TUNN, &new_tunn_mode); + } else { + if (test_bit(QED_MODE_IPGENEVE_TUNN, &cached_tunn_mode)) + __set_bit(QED_MODE_IPGENEVE_TUNN, &new_tunn_mode); + } + + p_src->tunn_mode = new_tunn_mode; +} + +static void +qed_tunn_set_pf_update_params(struct qed_hwfn *p_hwfn, + struct qed_tunn_update_params *p_src, + struct pf_update_tunnel_config *p_tunn_cfg) +{ + unsigned long tunn_mode = p_src->tunn_mode; + enum tunnel_clss type; + + qed_tunn_set_pf_fix_tunn_mode(p_hwfn, p_src, p_tunn_cfg); + p_tunn_cfg->update_rx_pf_clss = p_src->update_rx_pf_clss; + p_tunn_cfg->update_tx_pf_clss = p_src->update_tx_pf_clss; + + type = qed_tunn_get_clss_type(p_src->tunn_clss_vxlan); + p_tunn_cfg->tunnel_clss_vxlan = type; + + type = qed_tunn_get_clss_type(p_src->tunn_clss_l2gre); + p_tunn_cfg->tunnel_clss_l2gre = type; + + type = qed_tunn_get_clss_type(p_src->tunn_clss_ipgre); + p_tunn_cfg->tunnel_clss_ipgre = type; + + if (p_src->update_vxlan_udp_port) { + p_tunn_cfg->set_vxlan_udp_port_flg = 1; + p_tunn_cfg->vxlan_udp_port = cpu_to_le16(p_src->vxlan_udp_port); + } + + if (test_bit(QED_MODE_L2GRE_TUNN, &tunn_mode)) + p_tunn_cfg->tx_enable_l2gre = 1; + + if (test_bit(QED_MODE_IPGRE_TUNN, &tunn_mode)) + p_tunn_cfg->tx_enable_ipgre = 1; + + if (test_bit(QED_MODE_VXLAN_TUNN, &tunn_mode)) + p_tunn_cfg->tx_enable_vxlan = 1; + + if (p_src->update_geneve_udp_port) { + p_tunn_cfg->set_geneve_udp_port_flg = 1; + p_tunn_cfg->geneve_udp_port = + cpu_to_le16(p_src->geneve_udp_port); + } + + if (test_bit(QED_MODE_L2GENEVE_TUNN, &tunn_mode)) + p_tunn_cfg->tx_enable_l2geneve = 1; + + if (test_bit(QED_MODE_IPGENEVE_TUNN, &tunn_mode)) + p_tunn_cfg->tx_enable_ipgeneve = 1; + + type = qed_tunn_get_clss_type(p_src->tunn_clss_l2geneve); + p_tunn_cfg->tunnel_clss_l2geneve = type; + + type = qed_tunn_get_clss_type(p_src->tunn_clss_ipgeneve); + p_tunn_cfg->tunnel_clss_ipgeneve = type; +} + +static void qed_set_hw_tunn_mode(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + unsigned long tunn_mode) +{ + u8 l2gre_enable = 0, ipgre_enable = 0, vxlan_enable = 0; + u8 l2geneve_enable = 0, ipgeneve_enable = 0; + + if (test_bit(QED_MODE_L2GRE_TUNN, &tunn_mode)) + l2gre_enable = 1; + + if (test_bit(QED_MODE_IPGRE_TUNN, &tunn_mode)) + ipgre_enable = 1; + + if (test_bit(QED_MODE_VXLAN_TUNN, &tunn_mode)) + vxlan_enable = 1; + + qed_set_gre_enable(p_hwfn, p_ptt, l2gre_enable, ipgre_enable); + qed_set_vxlan_enable(p_hwfn, p_ptt, vxlan_enable); + + if (test_bit(QED_MODE_L2GENEVE_TUNN, &tunn_mode)) + l2geneve_enable = 1; + + if (test_bit(QED_MODE_IPGENEVE_TUNN, &tunn_mode)) + ipgeneve_enable = 1; + + qed_set_geneve_enable(p_hwfn, p_ptt, l2geneve_enable, + ipgeneve_enable); +} + +static void +qed_tunn_set_pf_start_params(struct qed_hwfn *p_hwfn, + struct qed_tunn_start_params *p_src, + struct pf_start_tunnel_config *p_tunn_cfg) +{ + unsigned long tunn_mode; + enum tunnel_clss type; + + if (!p_src) + return; + + tunn_mode = p_src->tunn_mode; + type = qed_tunn_get_clss_type(p_src->tunn_clss_vxlan); + p_tunn_cfg->tunnel_clss_vxlan = type; + type = qed_tunn_get_clss_type(p_src->tunn_clss_l2gre); + p_tunn_cfg->tunnel_clss_l2gre = type; + type = qed_tunn_get_clss_type(p_src->tunn_clss_ipgre); + p_tunn_cfg->tunnel_clss_ipgre = type; + + if (p_src->update_vxlan_udp_port) { + p_tunn_cfg->set_vxlan_udp_port_flg = 1; + p_tunn_cfg->vxlan_udp_port = cpu_to_le16(p_src->vxlan_udp_port); + } + + if (test_bit(QED_MODE_L2GRE_TUNN, &tunn_mode)) + p_tunn_cfg->tx_enable_l2gre = 1; + + if (test_bit(QED_MODE_IPGRE_TUNN, &tunn_mode)) + p_tunn_cfg->tx_enable_ipgre = 1; + + if (test_bit(QED_MODE_VXLAN_TUNN, &tunn_mode)) + p_tunn_cfg->tx_enable_vxlan = 1; + + if (p_src->update_geneve_udp_port) { + p_tunn_cfg->set_geneve_udp_port_flg = 1; + p_tunn_cfg->geneve_udp_port = + cpu_to_le16(p_src->geneve_udp_port); + } + + if (test_bit(QED_MODE_L2GENEVE_TUNN, &tunn_mode)) + p_tunn_cfg->tx_enable_l2geneve = 1; + + if (test_bit(QED_MODE_IPGENEVE_TUNN, &tunn_mode)) + p_tunn_cfg->tx_enable_ipgeneve = 1; + + type = qed_tunn_get_clss_type(p_src->tunn_clss_l2geneve); + p_tunn_cfg->tunnel_clss_l2geneve = type; + type = qed_tunn_get_clss_type(p_src->tunn_clss_ipgeneve); + p_tunn_cfg->tunnel_clss_ipgeneve = type; +} + int qed_sp_pf_start(struct qed_hwfn *p_hwfn, + struct qed_tunn_start_params *p_tunn, enum qed_mf_mode mode) { struct pf_start_ramrod_data *p_ramrod = NULL; @@ -143,6 +353,7 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_addr, p_hwfn->p_consq->chain.pbl.p_phys_table); + qed_tunn_set_pf_start_params(p_hwfn, NULL, NULL); p_hwfn->hw_info.personality = PERSONALITY_ETH; DP_VERBOSE(p_hwfn, QED_MSG_SPQ, @@ -153,6 +364,49 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, return qed_spq_post(p_hwfn, p_ent, NULL); } +/* Set pf update ramrod command params */ +int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn, + struct qed_tunn_update_params *p_tunn, + enum spq_mode comp_mode, + struct qed_spq_comp_cb *p_comp_data) +{ + struct qed_spq_entry *p_ent = NULL; + struct qed_sp_init_data init_data; + int rc = -EINVAL; + + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = qed_spq_get_cid(p_hwfn); + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = comp_mode; + init_data.p_comp_data = p_comp_data; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON, + &init_data); + if (rc) + return rc; + + qed_tunn_set_pf_update_params(p_hwfn, p_tunn, + &p_ent->ramrod.pf_update.tunnel_config); + + rc = qed_spq_post(p_hwfn, p_ent, NULL); + if (rc) + return rc; + + if (p_tunn->update_vxlan_udp_port) + qed_set_vxlan_dest_port(p_hwfn, p_hwfn->p_main_ptt, + p_tunn->vxlan_udp_port); + if (p_tunn->update_geneve_udp_port) + qed_set_geneve_dest_port(p_hwfn, p_hwfn->p_main_ptt, + p_tunn->geneve_udp_port); + + qed_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt, p_tunn->tunn_mode); + p_hwfn->cdev->tunn_mode = p_tunn->tunn_mode; + + return rc; +} + int qed_sp_pf_stop(struct qed_hwfn *p_hwfn) { struct qed_spq_entry *p_ent = NULL; diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h index 795c9902e02f..3a4c806be156 100644 --- a/include/linux/qed/qed_eth_if.h +++ b/include/linux/qed/qed_eth_if.h @@ -112,6 +112,13 @@ struct qed_queue_start_common_params { u16 sb_idx; }; +struct qed_tunn_params { + u16 vxlan_port; + u8 update_vxlan_port; + u16 geneve_port; + u8 update_geneve_port; +}; + struct qed_eth_cb_ops { struct qed_common_cb_ops common; }; @@ -166,6 +173,9 @@ struct qed_eth_ops { void (*get_vport_stats)(struct qed_dev *cdev, struct qed_eth_stats *stats); + + int (*tunn_config)(struct qed_dev *cdev, + struct qed_tunn_params *params); }; const struct qed_eth_ops *qed_get_eth_ops(void); From b18e170cac62cb7c46d6778c50d7335e01ce566f Mon Sep 17 00:00:00 2001 From: Manish Chopra Date: Thu, 14 Apr 2016 01:38:30 -0400 Subject: [PATCH 0756/1649] qed/qede: Add VXLAN tunnel slowpath configuration support This patch enables VXLAN tunnel on the adapter and add support for driver hooks to configure UDP ports for VXLAN tunnel offload to be performed by the adapter. Signed-off-by: Manish Chopra Signed-off-by: Yuval Mintz Signed-off-by: Ariel Elior Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/Kconfig | 11 ++++ drivers/net/ethernet/qlogic/qed/qed_main.c | 8 ++- .../net/ethernet/qlogic/qed/qed_sp_commands.c | 3 +- drivers/net/ethernet/qlogic/qede/qede.h | 4 +- drivers/net/ethernet/qlogic/qede/qede_main.c | 64 ++++++++++++++++++- 5 files changed, 86 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig index ddcfcab034c2..7a65522005ee 100644 --- a/drivers/net/ethernet/qlogic/Kconfig +++ b/drivers/net/ethernet/qlogic/Kconfig @@ -103,4 +103,15 @@ config QEDE depends on QED ---help--- This enables the support for ... + +config QEDE_VXLAN + bool "Virtual eXtensible Local Area Network support" + default n + depends on QEDE && VXLAN && !(QEDE=y && VXLAN=m) + ---help--- + This enables hardware offload support for VXLAN protocol over + qede module. Say Y here if you want to enable hardware offload + support for Virtual eXtensible Local Area Network (VXLAN) + in the driver. + endif # NET_VENDOR_QLOGIC diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 1916992ae8b1..0bb2c574df79 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -744,6 +744,7 @@ static void qed_update_pf_params(struct qed_dev *cdev, static int qed_slowpath_start(struct qed_dev *cdev, struct qed_slowpath_params *params) { + struct qed_tunn_start_params tunn_info; struct qed_mcp_drv_version drv_version; const u8 *data = NULL; struct qed_hwfn *hwfn; @@ -776,7 +777,12 @@ static int qed_slowpath_start(struct qed_dev *cdev, /* Start the slowpath */ data = cdev->firmware->data; - rc = qed_hw_init(cdev, NULL, true, cdev->int_params.out.int_mode, + memset(&tunn_info, 0, sizeof(tunn_info)); + tunn_info.tunn_mode |= 1 << QED_MODE_VXLAN_TUNN; + tunn_info.tunn_clss_vxlan = QED_TUNN_CLSS_MAC_VLAN; + + rc = qed_hw_init(cdev, &tunn_info, true, + cdev->int_params.out.int_mode, true, data); if (rc) goto err2; diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c index 306da7000ddc..7ccd96e5802b 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c @@ -353,7 +353,8 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_addr, p_hwfn->p_consq->chain.pbl.p_phys_table); - qed_tunn_set_pf_start_params(p_hwfn, NULL, NULL); + qed_tunn_set_pf_start_params(p_hwfn, p_tunn, + &p_ramrod->tunnel_config); p_hwfn->hw_info.personality = PERSONALITY_ETH; DP_VERBOSE(p_hwfn, QED_MSG_SPQ, diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h index 41c418909a5c..16a43444af21 100644 --- a/drivers/net/ethernet/qlogic/qede/qede.h +++ b/drivers/net/ethernet/qlogic/qede/qede.h @@ -169,6 +169,7 @@ struct qede_dev { bool accept_any_vlan; struct delayed_work sp_task; unsigned long sp_flags; + u16 vxlan_dst_port; }; enum QEDE_STATE { @@ -289,7 +290,8 @@ struct qede_fastpath { #define QEDE_CSUM_ERROR BIT(0) #define QEDE_CSUM_UNNECESSARY BIT(1) -#define QEDE_SP_RX_MODE 1 +#define QEDE_SP_RX_MODE 1 +#define QEDE_SP_VXLAN_PORT_CONFIG 2 union qede_reload_args { u16 mtu; diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 457caad2e752..895016d9f7e3 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -24,7 +24,9 @@ #include #include #include +#ifdef CONFIG_QEDE_VXLAN #include +#endif #include #include #include @@ -1821,6 +1823,42 @@ static void qede_vlan_mark_nonconfigured(struct qede_dev *edev) edev->accept_any_vlan = false; } +#ifdef CONFIG_QEDE_VXLAN +static void qede_add_vxlan_port(struct net_device *dev, + sa_family_t sa_family, __be16 port) +{ + struct qede_dev *edev = netdev_priv(dev); + u16 t_port = ntohs(port); + + if (edev->vxlan_dst_port) + return; + + edev->vxlan_dst_port = t_port; + + DP_VERBOSE(edev, QED_MSG_DEBUG, "Added vxlan port=%d", t_port); + + set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags); + schedule_delayed_work(&edev->sp_task, 0); +} + +static void qede_del_vxlan_port(struct net_device *dev, + sa_family_t sa_family, __be16 port) +{ + struct qede_dev *edev = netdev_priv(dev); + u16 t_port = ntohs(port); + + if (t_port != edev->vxlan_dst_port) + return; + + edev->vxlan_dst_port = 0; + + DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted vxlan port=%d", t_port); + + set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags); + schedule_delayed_work(&edev->sp_task, 0); +} +#endif + static const struct net_device_ops qede_netdev_ops = { .ndo_open = qede_open, .ndo_stop = qede_close, @@ -1832,6 +1870,10 @@ static const struct net_device_ops qede_netdev_ops = { .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid, .ndo_get_stats64 = qede_get_stats64, +#ifdef CONFIG_QEDE_VXLAN + .ndo_add_vxlan_port = qede_add_vxlan_port, + .ndo_del_vxlan_port = qede_del_vxlan_port, +#endif }; /* ------------------------------------------------------------------------- @@ -2004,6 +2046,8 @@ static void qede_sp_task(struct work_struct *work) { struct qede_dev *edev = container_of(work, struct qede_dev, sp_task.work); + struct qed_dev *cdev = edev->cdev; + mutex_lock(&edev->qede_lock); if (edev->state == QEDE_STATE_OPEN) { @@ -2011,6 +2055,15 @@ static void qede_sp_task(struct work_struct *work) qede_config_rx_mode(edev->ndev); } + if (test_and_clear_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags)) { + struct qed_tunn_params tunn_params; + + memset(&tunn_params, 0, sizeof(tunn_params)); + tunn_params.update_vxlan_port = 1; + tunn_params.vxlan_port = edev->vxlan_dst_port; + qed_ops->tunn_config(cdev, &tunn_params); + } + mutex_unlock(&edev->qede_lock); } @@ -3149,12 +3202,21 @@ void qede_reload(struct qede_dev *edev, static int qede_open(struct net_device *ndev) { struct qede_dev *edev = netdev_priv(ndev); + int rc; netif_carrier_off(ndev); edev->ops->common->set_power_state(edev->cdev, PCI_D0); - return qede_load(edev, QEDE_LOAD_NORMAL); + rc = qede_load(edev, QEDE_LOAD_NORMAL); + + if (rc) + return rc; + +#ifdef CONFIG_QEDE_VXLAN + vxlan_get_rx_port(ndev); +#endif + return 0; } static int qede_close(struct net_device *ndev) From 9a109dd073582f69eba591888e64aa617340da6f Mon Sep 17 00:00:00 2001 From: Manish Chopra Date: Thu, 14 Apr 2016 01:38:31 -0400 Subject: [PATCH 0757/1649] qed/qede: Add GENEVE tunnel slowpath configuration support This patch enables GENEVE tunnel on the adapter and add support for driver hooks to configure UDP ports for GENEVE tunnel offload to be performed by the adapter. Signed-off-by: Manish Chopra Signed-off-by: Yuval Mintz Signed-off-by: Ariel Elior Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/Kconfig | 10 ++++ drivers/net/ethernet/qlogic/qed/qed_main.c | 5 +- drivers/net/ethernet/qlogic/qede/qede.h | 2 + drivers/net/ethernet/qlogic/qede/qede_main.c | 53 ++++++++++++++++++++ 4 files changed, 69 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig index 7a65522005ee..c0a11b5158e7 100644 --- a/drivers/net/ethernet/qlogic/Kconfig +++ b/drivers/net/ethernet/qlogic/Kconfig @@ -114,4 +114,14 @@ config QEDE_VXLAN support for Virtual eXtensible Local Area Network (VXLAN) in the driver. +config QEDE_GENEVE + bool "Generic Network Virtualization Encapsulation (GENEVE) support" + depends on QEDE && GENEVE && !(QEDE=y && GENEVE=m) + ---help--- + This allows one to create GENEVE virtual interfaces that provide + Layer 2 Networks over Layer 3 Networks. GENEVE is often used + to tunnel virtual network infrastructure in virtualized environments. + Say Y here if you want to enable hardware offload support for + Generic Network Virtualization Encapsulation (GENEVE) in the driver. + endif # NET_VENDOR_QLOGIC diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 0bb2c574df79..c1533a64f41c 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -778,7 +778,10 @@ static int qed_slowpath_start(struct qed_dev *cdev, data = cdev->firmware->data; memset(&tunn_info, 0, sizeof(tunn_info)); - tunn_info.tunn_mode |= 1 << QED_MODE_VXLAN_TUNN; + tunn_info.tunn_mode |= 1 << QED_MODE_VXLAN_TUNN | + 1 << QED_MODE_L2GENEVE_TUNN | + 1 << QED_MODE_IPGENEVE_TUNN; + tunn_info.tunn_clss_vxlan = QED_TUNN_CLSS_MAC_VLAN; rc = qed_hw_init(cdev, &tunn_info, true, diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h index 16a43444af21..8521feeda4de 100644 --- a/drivers/net/ethernet/qlogic/qede/qede.h +++ b/drivers/net/ethernet/qlogic/qede/qede.h @@ -170,6 +170,7 @@ struct qede_dev { struct delayed_work sp_task; unsigned long sp_flags; u16 vxlan_dst_port; + u16 geneve_dst_port; }; enum QEDE_STATE { @@ -292,6 +293,7 @@ struct qede_fastpath { #define QEDE_SP_RX_MODE 1 #define QEDE_SP_VXLAN_PORT_CONFIG 2 +#define QEDE_SP_GENEVE_PORT_CONFIG 3 union qede_reload_args { u16 mtu; diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 895016d9f7e3..6c40316f1e70 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -27,6 +27,9 @@ #ifdef CONFIG_QEDE_VXLAN #include #endif +#ifdef CONFIG_QEDE_GENEVE +#include +#endif #include #include #include @@ -1859,6 +1862,40 @@ static void qede_del_vxlan_port(struct net_device *dev, } #endif +#ifdef CONFIG_QEDE_GENEVE +static void qede_add_geneve_port(struct net_device *dev, + sa_family_t sa_family, __be16 port) +{ + struct qede_dev *edev = netdev_priv(dev); + u16 t_port = ntohs(port); + + if (edev->geneve_dst_port) + return; + + edev->geneve_dst_port = t_port; + + DP_VERBOSE(edev, QED_MSG_DEBUG, "Added geneve port=%d", t_port); + set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags); + schedule_delayed_work(&edev->sp_task, 0); +} + +static void qede_del_geneve_port(struct net_device *dev, + sa_family_t sa_family, __be16 port) +{ + struct qede_dev *edev = netdev_priv(dev); + u16 t_port = ntohs(port); + + if (t_port != edev->geneve_dst_port) + return; + + edev->geneve_dst_port = 0; + + DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted geneve port=%d", t_port); + set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags); + schedule_delayed_work(&edev->sp_task, 0); +} +#endif + static const struct net_device_ops qede_netdev_ops = { .ndo_open = qede_open, .ndo_stop = qede_close, @@ -1874,6 +1911,10 @@ static const struct net_device_ops qede_netdev_ops = { .ndo_add_vxlan_port = qede_add_vxlan_port, .ndo_del_vxlan_port = qede_del_vxlan_port, #endif +#ifdef CONFIG_QEDE_GENEVE + .ndo_add_geneve_port = qede_add_geneve_port, + .ndo_del_geneve_port = qede_del_geneve_port, +#endif }; /* ------------------------------------------------------------------------- @@ -2064,6 +2105,15 @@ static void qede_sp_task(struct work_struct *work) qed_ops->tunn_config(cdev, &tunn_params); } + if (test_and_clear_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags)) { + struct qed_tunn_params tunn_params; + + memset(&tunn_params, 0, sizeof(tunn_params)); + tunn_params.update_geneve_port = 1; + tunn_params.geneve_port = edev->geneve_dst_port; + qed_ops->tunn_config(cdev, &tunn_params); + } + mutex_unlock(&edev->qede_lock); } @@ -3215,6 +3265,9 @@ static int qede_open(struct net_device *ndev) #ifdef CONFIG_QEDE_VXLAN vxlan_get_rx_port(ndev); +#endif +#ifdef CONFIG_QEDE_GENEVE + geneve_get_rx_port(ndev); #endif return 0; } From f7985869209b6d0c71c2cb1fd6fba0522d2c2b61 Mon Sep 17 00:00:00 2001 From: Manish Chopra Date: Thu, 14 Apr 2016 01:38:32 -0400 Subject: [PATCH 0758/1649] qed: Enable GRE tunnel slowpath configuration Signed-off-by: Manish Chopra Signed-off-by: Yuval Mintz Signed-off-by: Ariel Elior Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_main.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index c1533a64f41c..1e9f321f1ac4 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -779,10 +779,14 @@ static int qed_slowpath_start(struct qed_dev *cdev, memset(&tunn_info, 0, sizeof(tunn_info)); tunn_info.tunn_mode |= 1 << QED_MODE_VXLAN_TUNN | + 1 << QED_MODE_L2GRE_TUNN | + 1 << QED_MODE_IPGRE_TUNN | 1 << QED_MODE_L2GENEVE_TUNN | 1 << QED_MODE_IPGENEVE_TUNN; tunn_info.tunn_clss_vxlan = QED_TUNN_CLSS_MAC_VLAN; + tunn_info.tunn_clss_l2gre = QED_TUNN_CLSS_MAC_VLAN; + tunn_info.tunn_clss_ipgre = QED_TUNN_CLSS_MAC_VLAN; rc = qed_hw_init(cdev, &tunn_info, true, cdev->int_params.out.int_mode, From 14db81defa1fb6dd2ff154fb9facb4243ad63b95 Mon Sep 17 00:00:00 2001 From: Manish Chopra Date: Thu, 14 Apr 2016 01:38:33 -0400 Subject: [PATCH 0759/1649] qede: Add fastpath support for tunneling This patch enables netdev tunneling features and adds TX/RX fastpath support for tunneling in driver. Signed-off-by: Manish Chopra Signed-off-by: Yuval Mintz Signed-off-by: Ariel Elior Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qede/qede.h | 1 + drivers/net/ethernet/qlogic/qede/qede_main.c | 101 +++++++++++++++++-- 2 files changed, 92 insertions(+), 10 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h index 8521feeda4de..16df1591388f 100644 --- a/drivers/net/ethernet/qlogic/qede/qede.h +++ b/drivers/net/ethernet/qlogic/qede/qede.h @@ -290,6 +290,7 @@ struct qede_fastpath { #define QEDE_CSUM_ERROR BIT(0) #define QEDE_CSUM_UNNECESSARY BIT(1) +#define QEDE_TUNN_CSUM_UNNECESSARY BIT(2) #define QEDE_SP_RX_MODE 1 #define QEDE_SP_VXLAN_PORT_CONFIG 2 diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 6c40316f1e70..e5dc35ae6313 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -315,6 +315,9 @@ static u32 qede_xmit_type(struct qede_dev *edev, (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6)) *ipv6_ext = 1; + if (skb->encapsulation) + rc |= XMIT_ENC; + if (skb_is_gso(skb)) rc |= XMIT_LSO; @@ -376,6 +379,16 @@ static int map_frag_to_bd(struct qede_dev *edev, return 0; } +static u16 qede_get_skb_hlen(struct sk_buff *skb, bool is_encap_pkt) +{ + if (is_encap_pkt) + return (skb_inner_transport_header(skb) + + inner_tcp_hdrlen(skb) - skb->data); + else + return (skb_transport_header(skb) + + tcp_hdrlen(skb) - skb->data); +} + /* +2 for 1st BD for headers and 2nd BD for headlen (if required) */ #if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET) static bool qede_pkt_req_lin(struct qede_dev *edev, struct sk_buff *skb, @@ -386,8 +399,7 @@ static bool qede_pkt_req_lin(struct qede_dev *edev, struct sk_buff *skb, if (xmit_type & XMIT_LSO) { int hlen; - hlen = skb_transport_header(skb) + - tcp_hdrlen(skb) - skb->data; + hlen = qede_get_skb_hlen(skb, xmit_type & XMIT_ENC); /* linear payload would require its own BD */ if (skb_headlen(skb) > hlen) @@ -495,7 +507,18 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, first_bd->data.bd_flags.bitfields |= 1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT; - first_bd->data.bitfields |= cpu_to_le16(temp); + if (xmit_type & XMIT_ENC) { + first_bd->data.bd_flags.bitfields |= + 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT; + } else { + /* In cases when OS doesn't indicate for inner offloads + * when packet is tunnelled, we need to override the HW + * tunnel configuration so that packets are treated as + * regular non tunnelled packets and no inner offloads + * are done by the hardware. + */ + first_bd->data.bitfields |= cpu_to_le16(temp); + } /* If the packet is IPv6 with extension header, indicate that * to FW and pass few params, since the device cracker doesn't @@ -511,10 +534,15 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, third_bd->data.lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size); - first_bd->data.bd_flags.bitfields |= - 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT; - hlen = skb_transport_header(skb) + - tcp_hdrlen(skb) - skb->data; + if (unlikely(xmit_type & XMIT_ENC)) { + first_bd->data.bd_flags.bitfields |= + 1 << ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT; + hlen = qede_get_skb_hlen(skb, true); + } else { + first_bd->data.bd_flags.bitfields |= + 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT; + hlen = qede_get_skb_hlen(skb, false); + } /* @@@TBD - if will not be removed need to check */ third_bd->data.bitfields |= @@ -848,6 +876,9 @@ static void qede_set_skb_csum(struct sk_buff *skb, u8 csum_flag) if (csum_flag & QEDE_CSUM_UNNECESSARY) skb->ip_summed = CHECKSUM_UNNECESSARY; + + if (csum_flag & QEDE_TUNN_CSUM_UNNECESSARY) + skb->csum_level = 1; } static inline void qede_skb_receive(struct qede_dev *edev, @@ -1137,13 +1168,47 @@ err: tpa_info->skb = NULL; } -static u8 qede_check_csum(u16 flag) +static bool qede_tunn_exist(u16 flag) +{ + return !!(flag & (PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK << + PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT)); +} + +static u8 qede_check_tunn_csum(u16 flag) +{ + u16 csum_flag = 0; + u8 tcsum = 0; + + if (flag & (PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK << + PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT)) + csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK << + PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT; + + if (flag & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK << + PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT)) { + csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK << + PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT; + tcsum = QEDE_TUNN_CSUM_UNNECESSARY; + } + + csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK << + PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT | + PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK << + PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT; + + if (csum_flag & flag) + return QEDE_CSUM_ERROR; + + return QEDE_CSUM_UNNECESSARY | tcsum; +} + +static u8 qede_check_notunn_csum(u16 flag) { u16 csum_flag = 0; u8 csum = 0; - if ((PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK << - PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT) & flag) { + if (flag & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK << + PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT)) { csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK << PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT; csum = QEDE_CSUM_UNNECESSARY; @@ -1158,6 +1223,14 @@ static u8 qede_check_csum(u16 flag) return csum; } +static u8 qede_check_csum(u16 flag) +{ + if (!qede_tunn_exist(flag)) + return qede_check_notunn_csum(flag); + else + return qede_check_tunn_csum(flag); +} + static int qede_rx_int(struct qede_fastpath *fp, int budget) { struct qede_dev *edev = fp->edev; @@ -1987,6 +2060,14 @@ static void qede_init_ndev(struct qede_dev *edev) NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO6; + /* Encap features*/ + hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_TSO_ECN; + ndev->hw_enc_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | + NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO_ECN | + NETIF_F_TSO6 | NETIF_F_GSO_GRE | + NETIF_F_GSO_UDP_TUNNEL | NETIF_F_RXCSUM; + ndev->vlan_features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM | NETIF_F_HIGHDMA; ndev->features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM | From e1c9c62b9a3a761b56359a7437215ae2e9253821 Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Mon, 11 Apr 2016 23:10:21 +0300 Subject: [PATCH 0760/1649] net/mlx5: Fix mlx5 ifc cmd_hca_cap bad offsets All reserved fields after early_vf_enable are off by 1, since early_vf_enable was not explicitly declared as array of size 1. Reserved field before cqe_zip had a wrong size, it should be 0x80 + 0x3f. Fixes: b0844444590e ("net/mlx5_core: Introduce access function to read internal timer ") Fixes: b4ff3a36d3e4 ("net/mlx5: Use offset based reserved field names in the IFC header file") Signed-off-by: Tariq Toukan Signed-off-by: Saeed Mahameed Signed-off-by: Matan Barak Signed-off-by: David S. Miller --- include/linux/mlx5/mlx5_ifc.h | 107 +++++++++++++++++----------------- 1 file changed, 55 insertions(+), 52 deletions(-) diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index c15b8a864937..c300e7491d80 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -750,21 +750,21 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 ets[0x1]; u8 nic_flow_table[0x1]; u8 eswitch_flow_table[0x1]; - u8 early_vf_enable; - u8 reserved_at_1a8[0x2]; + u8 early_vf_enable[0x1]; + u8 reserved_at_1a9[0x2]; u8 local_ca_ack_delay[0x5]; u8 reserved_at_1af[0x6]; u8 port_type[0x2]; u8 num_ports[0x8]; - u8 reserved_at_1bf[0x3]; + u8 reserved_at_1c0[0x3]; u8 log_max_msg[0x5]; - u8 reserved_at_1c7[0x4]; + u8 reserved_at_1c8[0x4]; u8 max_tc[0x4]; - u8 reserved_at_1cf[0x6]; + u8 reserved_at_1d0[0x6]; u8 rol_s[0x1]; u8 rol_g[0x1]; - u8 reserved_at_1d7[0x1]; + u8 reserved_at_1d8[0x1]; u8 wol_s[0x1]; u8 wol_g[0x1]; u8 wol_a[0x1]; @@ -774,47 +774,47 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 wol_p[0x1]; u8 stat_rate_support[0x10]; - u8 reserved_at_1ef[0xc]; + u8 reserved_at_1f0[0xc]; u8 cqe_version[0x4]; u8 compact_address_vector[0x1]; u8 reserved_at_200[0x3]; u8 ipoib_basic_offloads[0x1]; - u8 reserved_at_204[0xa]; + u8 reserved_at_205[0xa]; u8 drain_sigerr[0x1]; u8 cmdif_checksum[0x2]; u8 sigerr_cqe[0x1]; - u8 reserved_at_212[0x1]; + u8 reserved_at_213[0x1]; u8 wq_signature[0x1]; u8 sctr_data_cqe[0x1]; - u8 reserved_at_215[0x1]; + u8 reserved_at_216[0x1]; u8 sho[0x1]; u8 tph[0x1]; u8 rf[0x1]; u8 dct[0x1]; - u8 reserved_at_21a[0x1]; + u8 reserved_at_21b[0x1]; u8 eth_net_offloads[0x1]; u8 roce[0x1]; u8 atomic[0x1]; - u8 reserved_at_21e[0x1]; + u8 reserved_at_21f[0x1]; u8 cq_oi[0x1]; u8 cq_resize[0x1]; u8 cq_moderation[0x1]; - u8 reserved_at_222[0x3]; + u8 reserved_at_223[0x3]; u8 cq_eq_remap[0x1]; u8 pg[0x1]; u8 block_lb_mc[0x1]; - u8 reserved_at_228[0x1]; + u8 reserved_at_229[0x1]; u8 scqe_break_moderation[0x1]; u8 reserved_at_22a[0x1]; u8 cd[0x1]; - u8 reserved_at_22c[0x1]; + u8 reserved_at_22d[0x1]; u8 apm[0x1]; u8 vector_calc[0x1]; u8 reserved_at_22f[0x1]; u8 imaicl[0x1]; - u8 reserved_at_231[0x4]; + u8 reserved_at_232[0x4]; u8 qkv[0x1]; u8 pkv[0x1]; u8 set_deth_sqpn[0x1]; @@ -824,98 +824,101 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 uc[0x1]; u8 rc[0x1]; - u8 reserved_at_23f[0xa]; + u8 reserved_at_240[0xa]; u8 uar_sz[0x6]; - u8 reserved_at_24f[0x8]; + u8 reserved_at_250[0x8]; u8 log_pg_sz[0x8]; u8 bf[0x1]; - u8 reserved_at_260[0x1]; + u8 reserved_at_261[0x1]; u8 pad_tx_eth_packet[0x1]; - u8 reserved_at_262[0x8]; + u8 reserved_at_263[0x8]; u8 log_bf_reg_size[0x5]; - u8 reserved_at_26f[0x10]; + u8 reserved_at_270[0x10]; - u8 reserved_at_27f[0x10]; + u8 reserved_at_280[0x10]; u8 max_wqe_sz_sq[0x10]; - u8 reserved_at_29f[0x10]; + u8 reserved_at_2a0[0x10]; u8 max_wqe_sz_rq[0x10]; - u8 reserved_at_2bf[0x10]; + u8 reserved_at_2c0[0x10]; u8 max_wqe_sz_sq_dc[0x10]; - u8 reserved_at_2df[0x7]; + u8 reserved_at_2e0[0x7]; u8 max_qp_mcg[0x19]; - u8 reserved_at_2ff[0x18]; + u8 reserved_at_300[0x18]; u8 log_max_mcg[0x8]; - u8 reserved_at_31f[0x3]; + u8 reserved_at_320[0x3]; u8 log_max_transport_domain[0x5]; - u8 reserved_at_327[0x3]; + u8 reserved_at_328[0x3]; u8 log_max_pd[0x5]; - u8 reserved_at_32f[0xb]; + u8 reserved_at_330[0xb]; u8 log_max_xrcd[0x5]; - u8 reserved_at_33f[0x20]; + u8 reserved_at_340[0x20]; - u8 reserved_at_35f[0x3]; + u8 reserved_at_360[0x3]; u8 log_max_rq[0x5]; - u8 reserved_at_367[0x3]; + u8 reserved_at_368[0x3]; u8 log_max_sq[0x5]; - u8 reserved_at_36f[0x3]; + u8 reserved_at_370[0x3]; u8 log_max_tir[0x5]; - u8 reserved_at_377[0x3]; + u8 reserved_at_378[0x3]; u8 log_max_tis[0x5]; u8 basic_cyclic_rcv_wqe[0x1]; - u8 reserved_at_380[0x2]; + u8 reserved_at_381[0x2]; u8 log_max_rmp[0x5]; - u8 reserved_at_387[0x3]; + u8 reserved_at_388[0x3]; u8 log_max_rqt[0x5]; - u8 reserved_at_38f[0x3]; + u8 reserved_at_390[0x3]; u8 log_max_rqt_size[0x5]; - u8 reserved_at_397[0x3]; + u8 reserved_at_398[0x3]; u8 log_max_tis_per_sq[0x5]; - u8 reserved_at_39f[0x3]; + u8 reserved_at_3a0[0x3]; u8 log_max_stride_sz_rq[0x5]; - u8 reserved_at_3a7[0x3]; + u8 reserved_at_3a8[0x3]; u8 log_min_stride_sz_rq[0x5]; - u8 reserved_at_3af[0x3]; + u8 reserved_at_3b0[0x3]; u8 log_max_stride_sz_sq[0x5]; - u8 reserved_at_3b7[0x3]; + u8 reserved_at_3b8[0x3]; u8 log_min_stride_sz_sq[0x5]; - u8 reserved_at_3bf[0x1b]; + u8 reserved_at_3c0[0x1b]; u8 log_max_wq_sz[0x5]; u8 nic_vport_change_event[0x1]; - u8 reserved_at_3e0[0xa]; + u8 reserved_at_3e1[0xa]; u8 log_max_vlan_list[0x5]; - u8 reserved_at_3ef[0x3]; + u8 reserved_at_3f0[0x3]; u8 log_max_current_mc_list[0x5]; - u8 reserved_at_3f7[0x3]; + u8 reserved_at_3f8[0x3]; u8 log_max_current_uc_list[0x5]; - u8 reserved_at_3ff[0x80]; + u8 reserved_at_400[0x80]; - u8 reserved_at_47f[0x3]; + u8 reserved_at_480[0x3]; u8 log_max_l2_table[0x5]; - u8 reserved_at_487[0x8]; + u8 reserved_at_488[0x8]; u8 log_uar_page_sz[0x10]; - u8 reserved_at_49f[0x20]; + u8 reserved_at_4a0[0x20]; u8 device_frequency_mhz[0x20]; u8 device_frequency_khz[0x20]; - u8 reserved_at_4ff[0x5f]; + + u8 reserved_at_500[0x80]; + + u8 reserved_at_580[0x3f]; u8 cqe_zip[0x1]; u8 cqe_zip_timeout[0x10]; u8 cqe_zip_max_num[0x10]; - u8 reserved_at_57f[0x220]; + u8 reserved_at_5e0[0x220]; }; enum mlx5_flow_destination_type { From 7d5e14237a551a5de3d287f2e8db2d044ee81a1a Mon Sep 17 00:00:00 2001 From: Saeed Mahameed Date: Mon, 11 Apr 2016 23:10:22 +0300 Subject: [PATCH 0761/1649] net/mlx5: Update mlx5_ifc hardware features Adding the needed mlx5_ifc hardware bits and structs for the following feature: * Add vport to steering commands for SRIOV ACL support * Add mlcr, pcmr and mcia registers for dump module EEPROM * Add support for FCS, baeacon led and disable_link bits to hca caps * Add CQE period mode bit in CQ context for CQE based CQ moderation support * Add umr SQ bit for fragmented memory registration * Add needed bits and caps for Striding RQ support Signed-off-by: Saeed Mahameed Signed-off-by: Matan Barak Signed-off-by: David S. Miller --- include/linux/mlx5/mlx5_ifc.h | 146 +++++++++++++++++++++++++++++----- 1 file changed, 124 insertions(+), 22 deletions(-) diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index c300e7491d80..4ce4ea422a10 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -513,7 +513,9 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits { u8 max_lso_cap[0x5]; u8 reserved_at_10[0x4]; u8 rss_ind_tbl_cap[0x4]; - u8 reserved_at_18[0x3]; + u8 reg_umr_sq[0x1]; + u8 scatter_fcs[0x1]; + u8 reserved_at_1a[0x1]; u8 tunnel_lso_const_out_ip_id[0x1]; u8 reserved_at_1c[0x2]; u8 tunnel_statless_gre[0x1]; @@ -648,7 +650,7 @@ struct mlx5_ifc_vector_calc_cap_bits { enum { MLX5_WQ_TYPE_LINKED_LIST = 0x0, MLX5_WQ_TYPE_CYCLIC = 0x1, - MLX5_WQ_TYPE_STRQ = 0x2, + MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ = 0x2, }; enum { @@ -753,7 +755,11 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 early_vf_enable[0x1]; u8 reserved_at_1a9[0x2]; u8 local_ca_ack_delay[0x5]; - u8 reserved_at_1af[0x6]; + u8 reserved_at_1af[0x2]; + u8 ports_check[0x1]; + u8 reserved_at_1b2[0x1]; + u8 disable_link_up[0x1]; + u8 beacon_led[0x1]; u8 port_type[0x2]; u8 num_ports[0x8]; @@ -778,7 +784,8 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 cqe_version[0x4]; u8 compact_address_vector[0x1]; - u8 reserved_at_200[0x3]; + u8 striding_rq[0x1]; + u8 reserved_at_201[0x2]; u8 ipoib_basic_offloads[0x1]; u8 reserved_at_205[0xa]; u8 drain_sigerr[0x1]; @@ -807,12 +814,12 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 block_lb_mc[0x1]; u8 reserved_at_229[0x1]; u8 scqe_break_moderation[0x1]; - u8 reserved_at_22a[0x1]; + u8 cq_period_start_from_cqe[0x1]; u8 cd[0x1]; u8 reserved_at_22d[0x1]; u8 apm[0x1]; u8 vector_calc[0x1]; - u8 reserved_at_22f[0x1]; + u8 umr_ptr_rlky[0x1]; u8 imaicl[0x1]; u8 reserved_at_232[0x4]; u8 qkv[0x1]; @@ -913,10 +920,10 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 reserved_at_500[0x80]; u8 reserved_at_580[0x3f]; - u8 cqe_zip[0x1]; + u8 cqe_compression[0x1]; - u8 cqe_zip_timeout[0x10]; - u8 cqe_zip_max_num[0x10]; + u8 cqe_compression_timeout[0x10]; + u8 cqe_compression_max_num[0x10]; u8 reserved_at_5e0[0x220]; }; @@ -1000,7 +1007,13 @@ struct mlx5_ifc_wq_bits { u8 reserved_at_118[0x3]; u8 log_wq_sz[0x5]; - u8 reserved_at_120[0x4e0]; + u8 reserved_at_120[0x15]; + u8 log_wqe_num_of_strides[0x3]; + u8 two_byte_shift_en[0x1]; + u8 reserved_at_139[0x4]; + u8 log_wqe_stride_size[0x3]; + + u8 reserved_at_140[0x4c0]; struct mlx5_ifc_cmd_pas_bits pas[0]; }; @@ -2199,7 +2212,8 @@ struct mlx5_ifc_sqc_bits { u8 flush_in_error_en[0x1]; u8 reserved_at_4[0x4]; u8 state[0x4]; - u8 reserved_at_c[0x14]; + u8 reg_umr[0x1]; + u8 reserved_at_d[0x13]; u8 reserved_at_20[0x8]; u8 user_index[0x18]; @@ -2247,7 +2261,8 @@ enum { struct mlx5_ifc_rqc_bits { u8 rlky[0x1]; - u8 reserved_at_1[0x2]; + u8 reserved_at_1[0x1]; + u8 scatter_fcs[0x1]; u8 vsd[0x1]; u8 mem_rq_type[0x4]; u8 state[0x4]; @@ -2604,6 +2619,11 @@ enum { MLX5_CQC_ST_FIRED = 0xa, }; +enum { + MLX5_CQ_PERIOD_MODE_START_FROM_EQE = 0x0, + MLX5_CQ_PERIOD_MODE_START_FROM_CQE = 0x1, +}; + struct mlx5_ifc_cqc_bits { u8 status[0x4]; u8 reserved_at_4[0x4]; @@ -2612,8 +2632,8 @@ struct mlx5_ifc_cqc_bits { u8 reserved_at_c[0x1]; u8 scqe_break_moderation_en[0x1]; u8 oi[0x1]; - u8 reserved_at_f[0x2]; - u8 cqe_zip_en[0x1]; + u8 cq_period_mode[0x2]; + u8 cqe_comp_en[0x1]; u8 mini_cqe_res_format[0x2]; u8 st[0x4]; u8 reserved_at_18[0x8]; @@ -2987,7 +3007,11 @@ struct mlx5_ifc_set_fte_in_bits { u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_at_40[0x40]; + u8 other_vport[0x1]; + u8 reserved_at_41[0xf]; + u8 vport_number[0x10]; + + u8 reserved_at_60[0x20]; u8 table_type[0x8]; u8 reserved_at_88[0x18]; @@ -5181,7 +5205,11 @@ struct mlx5_ifc_destroy_flow_table_in_bits { u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_at_40[0x40]; + u8 other_vport[0x1]; + u8 reserved_at_41[0xf]; + u8 vport_number[0x10]; + + u8 reserved_at_60[0x20]; u8 table_type[0x8]; u8 reserved_at_88[0x18]; @@ -5208,7 +5236,11 @@ struct mlx5_ifc_destroy_flow_group_in_bits { u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_at_40[0x40]; + u8 other_vport[0x1]; + u8 reserved_at_41[0xf]; + u8 vport_number[0x10]; + + u8 reserved_at_60[0x20]; u8 table_type[0x8]; u8 reserved_at_88[0x18]; @@ -5349,7 +5381,11 @@ struct mlx5_ifc_delete_fte_in_bits { u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_at_40[0x40]; + u8 other_vport[0x1]; + u8 reserved_at_41[0xf]; + u8 vport_number[0x10]; + + u8 reserved_at_60[0x20]; u8 table_type[0x8]; u8 reserved_at_88[0x18]; @@ -5795,7 +5831,11 @@ struct mlx5_ifc_create_flow_table_in_bits { u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_at_40[0x40]; + u8 other_vport[0x1]; + u8 reserved_at_41[0xf]; + u8 vport_number[0x10]; + + u8 reserved_at_60[0x20]; u8 table_type[0x8]; u8 reserved_at_88[0x18]; @@ -5839,7 +5879,11 @@ struct mlx5_ifc_create_flow_group_in_bits { u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_at_40[0x40]; + u8 other_vport[0x1]; + u8 reserved_at_41[0xf]; + u8 vport_number[0x10]; + + u8 reserved_at_60[0x20]; u8 table_type[0x8]; u8 reserved_at_88[0x18]; @@ -6372,6 +6416,17 @@ struct mlx5_ifc_ptys_reg_bits { u8 reserved_at_1a0[0x60]; }; +struct mlx5_ifc_mlcr_reg_bits { + u8 reserved_at_0[0x8]; + u8 local_port[0x8]; + u8 reserved_at_10[0x20]; + + u8 beacon_duration[0x10]; + u8 reserved_at_40[0x10]; + + u8 beacon_remain[0x10]; +}; + struct mlx5_ifc_ptas_reg_bits { u8 reserved_at_0[0x20]; @@ -6781,6 +6836,16 @@ struct mlx5_ifc_pamp_reg_bits { u8 index_data[18][0x10]; }; +struct mlx5_ifc_pcmr_reg_bits { + u8 reserved_at_0[0x8]; + u8 local_port[0x8]; + u8 reserved_at_10[0x2e]; + u8 fcs_cap[0x1]; + u8 reserved_at_3f[0x1f]; + u8 fcs_chk[0x1]; + u8 reserved_at_5f[0x1]; +}; + struct mlx5_ifc_lane_2_module_mapping_bits { u8 reserved_at_0[0x6]; u8 rx_lane[0x2]; @@ -7117,6 +7182,7 @@ union mlx5_ifc_ports_control_registers_document_bits { struct mlx5_ifc_pspa_reg_bits pspa_reg; struct mlx5_ifc_ptas_reg_bits ptas_reg; struct mlx5_ifc_ptys_reg_bits ptys_reg; + struct mlx5_ifc_mlcr_reg_bits mlcr_reg; struct mlx5_ifc_pude_reg_bits pude_reg; struct mlx5_ifc_pvlc_reg_bits pvlc_reg; struct mlx5_ifc_slrg_reg_bits slrg_reg; @@ -7150,7 +7216,11 @@ struct mlx5_ifc_set_flow_table_root_in_bits { u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_at_40[0x40]; + u8 other_vport[0x1]; + u8 reserved_at_41[0xf]; + u8 vport_number[0x10]; + + u8 reserved_at_60[0x20]; u8 table_type[0x8]; u8 reserved_at_88[0x18]; @@ -7181,7 +7251,9 @@ struct mlx5_ifc_modify_flow_table_in_bits { u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_at_40[0x20]; + u8 other_vport[0x1]; + u8 reserved_at_41[0xf]; + u8 vport_number[0x10]; u8 reserved_at_60[0x10]; u8 modify_field_select[0x10]; @@ -7247,4 +7319,34 @@ struct mlx5_ifc_qtct_reg_bits { u8 tclass[0x3]; }; +struct mlx5_ifc_mcia_reg_bits { + u8 l[0x1]; + u8 reserved_at_1[0x7]; + u8 module[0x8]; + u8 reserved_at_10[0x8]; + u8 status[0x8]; + + u8 i2c_device_address[0x8]; + u8 page_number[0x8]; + u8 device_address[0x10]; + + u8 reserved_at_40[0x10]; + u8 size[0x10]; + + u8 reserved_at_60[0x20]; + + u8 dword_0[0x20]; + u8 dword_1[0x20]; + u8 dword_2[0x20]; + u8 dword_3[0x20]; + u8 dword_4[0x20]; + u8 dword_5[0x20]; + u8 dword_6[0x20]; + u8 dword_7[0x20]; + u8 dword_8[0x20]; + u8 dword_9[0x20]; + u8 dword_10[0x20]; + u8 dword_11[0x20]; +}; + #endif /* MLX5_IFC_H */ From 311b21774f1389f9c34eac4da90c43c95fc2b62b Mon Sep 17 00:00:00 2001 From: Marcelo Ricardo Leitner Date: Wed, 13 Apr 2016 19:12:29 -0300 Subject: [PATCH 0762/1649] sctp: simplify sk_receive_queue locking SCTP already serializes access to rcvbuf through its sock lock: sctp_recvmsg takes it right in the start and release at the end, while rx path will also take the lock before doing any socket processing. On sctp_rcv() it will check if there is an user using the socket and, if there is, it will queue incoming packets to the backlog. The backlog processing will do the same. Even timers will do such check and re-schedule if an user is using the socket. Simplifying this will allow us to remove sctp_skb_list_tail and get ride of some expensive lockings. The lists that it is used on are also mangled with functions like __skb_queue_tail and __skb_unlink in the same context, like on sctp_ulpq_tail_event() and sctp_clear_pd(). sctp_close() will also purge those while using only the sock lock. Therefore the lockings performed by sctp_skb_list_tail() are not necessary. This patch removes this function and replaces its calls with just skb_queue_splice_tail_init() instead. The biggest gain is at sctp_ulpq_tail_event(), because the events always contain a list, even if it's queueing a single skb and this was triggering expensive calls to spin_lock_irqsave/_irqrestore for every data chunk received. As SCTP will deliver each data chunk on a corresponding recvmsg, the more effective the change will be. Before this patch, with chunks with 30 bytes: netperf -t SCTP_STREAM -H 192.168.1.2 -cC -l 60 -- -m 30 -S 400000 400000 -s 400000 400000 on a 10Gbit link with 1500 MTU: SCTP STREAM TEST from 0.0.0.0 (0.0.0.0) port 0 AF_INET to 192.168.1.1 () port 0 AF_INET Recv Send Send Utilization Service Demand Socket Socket Message Elapsed Send Recv Send Recv Size Size Size Time Throughput local remote local remote bytes bytes bytes secs. 10^6bits/s % S % S us/KB us/KB 425984 425984 30 60.00 137.45 7.34 7.36 52.504 52.608 With it: SCTP STREAM TEST from 0.0.0.0 (0.0.0.0) port 0 AF_INET to 192.168.1.1 () port 0 AF_INET Recv Send Send Utilization Service Demand Socket Socket Message Elapsed Send Recv Send Recv Size Size Size Time Throughput local remote local remote bytes bytes bytes secs. 10^6bits/s % S % S us/KB us/KB 425984 425984 30 60.00 179.10 7.97 6.70 43.740 36.788 Signed-off-by: Marcelo Ricardo Leitner Signed-off-by: David S. Miller --- include/net/sctp/sctp.h | 15 --------------- net/sctp/socket.c | 4 +--- net/sctp/ulpqueue.c | 5 +++-- 3 files changed, 4 insertions(+), 20 deletions(-) diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index 03fb33efcae2..978d5f67d5a7 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h @@ -359,21 +359,6 @@ int sctp_do_peeloff(struct sock *sk, sctp_assoc_t id, struct socket **sockp); #define sctp_skb_for_each(pos, head, tmp) \ skb_queue_walk_safe(head, pos, tmp) -/* A helper to append an entire skb list (list) to another (head). */ -static inline void sctp_skb_list_tail(struct sk_buff_head *list, - struct sk_buff_head *head) -{ - unsigned long flags; - - spin_lock_irqsave(&head->lock, flags); - spin_lock(&list->lock); - - skb_queue_splice_tail_init(list, head); - - spin_unlock(&list->lock); - spin_unlock_irqrestore(&head->lock, flags); -} - /** * sctp_list_dequeue - remove from the head of the queue * @list: list to dequeue from diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 36697f85ce48..bf265a4bba6e 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -6766,13 +6766,11 @@ struct sk_buff *sctp_skb_recv_datagram(struct sock *sk, int flags, * However, this function was correct in any case. 8) */ if (flags & MSG_PEEK) { - spin_lock_bh(&sk->sk_receive_queue.lock); skb = skb_peek(&sk->sk_receive_queue); if (skb) atomic_inc(&skb->users); - spin_unlock_bh(&sk->sk_receive_queue.lock); } else { - skb = skb_dequeue(&sk->sk_receive_queue); + skb = __skb_dequeue(&sk->sk_receive_queue); } if (skb) diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c index 72e5b3e41cdd..ec12a8920e5f 100644 --- a/net/sctp/ulpqueue.c +++ b/net/sctp/ulpqueue.c @@ -141,7 +141,8 @@ int sctp_clear_pd(struct sock *sk, struct sctp_association *asoc) */ if (!skb_queue_empty(&sp->pd_lobby)) { struct list_head *list; - sctp_skb_list_tail(&sp->pd_lobby, &sk->sk_receive_queue); + skb_queue_splice_tail_init(&sp->pd_lobby, + &sk->sk_receive_queue); list = (struct list_head *)&sctp_sk(sk)->pd_lobby; INIT_LIST_HEAD(list); return 1; @@ -252,7 +253,7 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event) * collected on a list. */ if (skb_list) - sctp_skb_list_tail(skb_list, queue); + skb_queue_splice_tail_init(skb_list, queue); else __skb_queue_tail(queue, skb); From 52c52a61a39fb319c14a582f8631619e5d5f55bf Mon Sep 17 00:00:00 2001 From: Xin Long Date: Thu, 14 Apr 2016 15:35:30 +0800 Subject: [PATCH 0763/1649] sctp: add sctp_info dump api for sctp_diag sctp_diag will dump some important details of sctp's assoc or ep, we use sctp_info to describe them, sctp_get_sctp_info to get them, and export it to sctp_diag.ko. v2->v3: - we will not use list_for_each_safe in sctp_get_sctp_info, cause all the callers of it will use lock_sock. - fix the holes in struct sctp_info with __reserved* field. because sctp_diag is a new feature, and sctp_info is just for now, it may be changed in the future. Signed-off-by: Xin Long Signed-off-by: David S. Miller --- include/linux/sctp.h | 67 ++++++++++++++++++++++++++++++++ include/net/sctp/sctp.h | 3 ++ net/sctp/socket.c | 86 +++++++++++++++++++++++++++++++++++++++++ 3 files changed, 156 insertions(+) diff --git a/include/linux/sctp.h b/include/linux/sctp.h index a9414fd49dc6..dacb5e711994 100644 --- a/include/linux/sctp.h +++ b/include/linux/sctp.h @@ -705,4 +705,71 @@ typedef struct sctp_auth_chunk { sctp_authhdr_t auth_hdr; } __packed sctp_auth_chunk_t; +struct sctp_info { + __u32 sctpi_tag; + __u32 sctpi_state; + __u32 sctpi_rwnd; + __u16 sctpi_unackdata; + __u16 sctpi_penddata; + __u16 sctpi_instrms; + __u16 sctpi_outstrms; + __u32 sctpi_fragmentation_point; + __u32 sctpi_inqueue; + __u32 sctpi_outqueue; + __u32 sctpi_overall_error; + __u32 sctpi_max_burst; + __u32 sctpi_maxseg; + __u32 sctpi_peer_rwnd; + __u32 sctpi_peer_tag; + __u8 sctpi_peer_capable; + __u8 sctpi_peer_sack; + __u16 __reserved1; + + /* assoc status info */ + __u64 sctpi_isacks; + __u64 sctpi_osacks; + __u64 sctpi_opackets; + __u64 sctpi_ipackets; + __u64 sctpi_rtxchunks; + __u64 sctpi_outofseqtsns; + __u64 sctpi_idupchunks; + __u64 sctpi_gapcnt; + __u64 sctpi_ouodchunks; + __u64 sctpi_iuodchunks; + __u64 sctpi_oodchunks; + __u64 sctpi_iodchunks; + __u64 sctpi_octrlchunks; + __u64 sctpi_ictrlchunks; + + /* primary transport info */ + struct sockaddr_storage sctpi_p_address; + __s32 sctpi_p_state; + __u32 sctpi_p_cwnd; + __u32 sctpi_p_srtt; + __u32 sctpi_p_rto; + __u32 sctpi_p_hbinterval; + __u32 sctpi_p_pathmaxrxt; + __u32 sctpi_p_sackdelay; + __u32 sctpi_p_sackfreq; + __u32 sctpi_p_ssthresh; + __u32 sctpi_p_partial_bytes_acked; + __u32 sctpi_p_flight_size; + __u16 sctpi_p_error; + __u16 __reserved2; + + /* sctp sock info */ + __u32 sctpi_s_autoclose; + __u32 sctpi_s_adaptation_ind; + __u32 sctpi_s_pd_point; + __u8 sctpi_s_nodelay; + __u8 sctpi_s_disable_fragments; + __u8 sctpi_s_v4mapped; + __u8 sctpi_s_frag_interleave; +}; + +struct sctp_infox { + struct sctp_info *sctpinfo; + struct sctp_association *asoc; +}; + #endif /* __LINUX_SCTP_H__ */ diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index 978d5f67d5a7..268b10058ef5 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h @@ -116,6 +116,9 @@ extern struct percpu_counter sctp_sockets_allocated; int sctp_asconf_mgmt(struct sctp_sock *, struct sctp_sockaddr_entry *); struct sk_buff *sctp_skb_recv_datagram(struct sock *, int, int, int *); +int sctp_get_sctp_info(struct sock *sk, struct sctp_association *asoc, + struct sctp_info *info); + /* * sctp/primitive.c */ diff --git a/net/sctp/socket.c b/net/sctp/socket.c index bf265a4bba6e..cd0fb3bb493c 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -4202,6 +4202,92 @@ static void sctp_shutdown(struct sock *sk, int how) } } +int sctp_get_sctp_info(struct sock *sk, struct sctp_association *asoc, + struct sctp_info *info) +{ + struct sctp_transport *prim; + struct list_head *pos; + int mask; + + memset(info, 0, sizeof(*info)); + if (!asoc) { + struct sctp_sock *sp = sctp_sk(sk); + + info->sctpi_s_autoclose = sp->autoclose; + info->sctpi_s_adaptation_ind = sp->adaptation_ind; + info->sctpi_s_pd_point = sp->pd_point; + info->sctpi_s_nodelay = sp->nodelay; + info->sctpi_s_disable_fragments = sp->disable_fragments; + info->sctpi_s_v4mapped = sp->v4mapped; + info->sctpi_s_frag_interleave = sp->frag_interleave; + + return 0; + } + + info->sctpi_tag = asoc->c.my_vtag; + info->sctpi_state = asoc->state; + info->sctpi_rwnd = asoc->a_rwnd; + info->sctpi_unackdata = asoc->unack_data; + info->sctpi_penddata = sctp_tsnmap_pending(&asoc->peer.tsn_map); + info->sctpi_instrms = asoc->c.sinit_max_instreams; + info->sctpi_outstrms = asoc->c.sinit_num_ostreams; + list_for_each(pos, &asoc->base.inqueue.in_chunk_list) + info->sctpi_inqueue++; + list_for_each(pos, &asoc->outqueue.out_chunk_list) + info->sctpi_outqueue++; + info->sctpi_overall_error = asoc->overall_error_count; + info->sctpi_max_burst = asoc->max_burst; + info->sctpi_maxseg = asoc->frag_point; + info->sctpi_peer_rwnd = asoc->peer.rwnd; + info->sctpi_peer_tag = asoc->c.peer_vtag; + + mask = asoc->peer.ecn_capable << 1; + mask = (mask | asoc->peer.ipv4_address) << 1; + mask = (mask | asoc->peer.ipv6_address) << 1; + mask = (mask | asoc->peer.hostname_address) << 1; + mask = (mask | asoc->peer.asconf_capable) << 1; + mask = (mask | asoc->peer.prsctp_capable) << 1; + mask = (mask | asoc->peer.auth_capable); + info->sctpi_peer_capable = mask; + mask = asoc->peer.sack_needed << 1; + mask = (mask | asoc->peer.sack_generation) << 1; + mask = (mask | asoc->peer.zero_window_announced); + info->sctpi_peer_sack = mask; + + info->sctpi_isacks = asoc->stats.isacks; + info->sctpi_osacks = asoc->stats.osacks; + info->sctpi_opackets = asoc->stats.opackets; + info->sctpi_ipackets = asoc->stats.ipackets; + info->sctpi_rtxchunks = asoc->stats.rtxchunks; + info->sctpi_outofseqtsns = asoc->stats.outofseqtsns; + info->sctpi_idupchunks = asoc->stats.idupchunks; + info->sctpi_gapcnt = asoc->stats.gapcnt; + info->sctpi_ouodchunks = asoc->stats.ouodchunks; + info->sctpi_iuodchunks = asoc->stats.iuodchunks; + info->sctpi_oodchunks = asoc->stats.oodchunks; + info->sctpi_iodchunks = asoc->stats.iodchunks; + info->sctpi_octrlchunks = asoc->stats.octrlchunks; + info->sctpi_ictrlchunks = asoc->stats.ictrlchunks; + + prim = asoc->peer.primary_path; + memcpy(&info->sctpi_p_address, &prim->ipaddr, + sizeof(struct sockaddr_storage)); + info->sctpi_p_state = prim->state; + info->sctpi_p_cwnd = prim->cwnd; + info->sctpi_p_srtt = prim->srtt; + info->sctpi_p_rto = jiffies_to_msecs(prim->rto); + info->sctpi_p_hbinterval = prim->hbinterval; + info->sctpi_p_pathmaxrxt = prim->pathmaxrxt; + info->sctpi_p_sackdelay = jiffies_to_msecs(prim->sackdelay); + info->sctpi_p_ssthresh = prim->ssthresh; + info->sctpi_p_partial_bytes_acked = prim->partial_bytes_acked; + info->sctpi_p_flight_size = prim->flight_size; + info->sctpi_p_error = prim->error_count; + + return 0; +} +EXPORT_SYMBOL_GPL(sctp_get_sctp_info); + /* 7.2.1 Association Status (SCTP_STATUS) * Applications can retrieve current status information about an From 626d16f50f39bb9c44f98fd256cae2b864900a01 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Thu, 14 Apr 2016 15:35:31 +0800 Subject: [PATCH 0764/1649] sctp: export some apis or variables for sctp_diag and reuse some for proc For some main variables in sctp.ko, we couldn't export it to other modules, so we have to define some api to access them. It will include sctp transport and endpoint's traversal. There are some transport traversal functions for sctp_diag, we can also use it for sctp_proc. cause they have the similar situation to traversal transport. v2->v3: - rhashtable_walk_init need the parameter gfp, because of recent upstrem update Signed-off-by: Xin Long Signed-off-by: David S. Miller --- include/net/sctp/sctp.h | 13 +++++ net/sctp/proc.c | 81 ++++++-------------------- net/sctp/socket.c | 125 ++++++++++++++++++++++++++++++++++++++++ 3 files changed, 156 insertions(+), 63 deletions(-) diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index 268b10058ef5..3f1c0ff7d4b6 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h @@ -116,6 +116,19 @@ extern struct percpu_counter sctp_sockets_allocated; int sctp_asconf_mgmt(struct sctp_sock *, struct sctp_sockaddr_entry *); struct sk_buff *sctp_skb_recv_datagram(struct sock *, int, int, int *); +int sctp_transport_walk_start(struct rhashtable_iter *iter); +void sctp_transport_walk_stop(struct rhashtable_iter *iter); +struct sctp_transport *sctp_transport_get_next(struct net *net, + struct rhashtable_iter *iter); +struct sctp_transport *sctp_transport_get_idx(struct net *net, + struct rhashtable_iter *iter, int pos); +int sctp_transport_lookup_process(int (*cb)(struct sctp_transport *, void *), + struct net *net, + const union sctp_addr *laddr, + const union sctp_addr *paddr, void *p); +int sctp_for_each_transport(int (*cb)(struct sctp_transport *, void *), + struct net *net, int pos, void *p); +int sctp_for_each_endpoint(int (*cb)(struct sctp_endpoint *, void *), void *p); int sctp_get_sctp_info(struct sock *sk, struct sctp_association *asoc, struct sctp_info *info); diff --git a/net/sctp/proc.c b/net/sctp/proc.c index 6d45d53321e6..dd8492f0037d 100644 --- a/net/sctp/proc.c +++ b/net/sctp/proc.c @@ -282,81 +282,31 @@ struct sctp_ht_iter { struct rhashtable_iter hti; }; -static struct sctp_transport *sctp_transport_get_next(struct seq_file *seq) -{ - struct sctp_ht_iter *iter = seq->private; - struct sctp_transport *t; - - t = rhashtable_walk_next(&iter->hti); - for (; t; t = rhashtable_walk_next(&iter->hti)) { - if (IS_ERR(t)) { - if (PTR_ERR(t) == -EAGAIN) - continue; - break; - } - - if (net_eq(sock_net(t->asoc->base.sk), seq_file_net(seq)) && - t->asoc->peer.primary_path == t) - break; - } - - return t; -} - -static struct sctp_transport *sctp_transport_get_idx(struct seq_file *seq, - loff_t pos) -{ - void *obj = SEQ_START_TOKEN; - - while (pos && (obj = sctp_transport_get_next(seq)) && !IS_ERR(obj)) - pos--; - - return obj; -} - -static int sctp_transport_walk_start(struct seq_file *seq) -{ - struct sctp_ht_iter *iter = seq->private; - int err; - - err = rhashtable_walk_init(&sctp_transport_hashtable, &iter->hti, - GFP_KERNEL); - if (err) - return err; - - err = rhashtable_walk_start(&iter->hti); - - return err == -EAGAIN ? 0 : err; -} - -static void sctp_transport_walk_stop(struct seq_file *seq) -{ - struct sctp_ht_iter *iter = seq->private; - - rhashtable_walk_stop(&iter->hti); - rhashtable_walk_exit(&iter->hti); -} - static void *sctp_assocs_seq_start(struct seq_file *seq, loff_t *pos) { - int err = sctp_transport_walk_start(seq); + struct sctp_ht_iter *iter = seq->private; + int err = sctp_transport_walk_start(&iter->hti); if (err) return ERR_PTR(err); - return sctp_transport_get_idx(seq, *pos); + return sctp_transport_get_idx(seq_file_net(seq), &iter->hti, *pos); } static void sctp_assocs_seq_stop(struct seq_file *seq, void *v) { - sctp_transport_walk_stop(seq); + struct sctp_ht_iter *iter = seq->private; + + sctp_transport_walk_stop(&iter->hti); } static void *sctp_assocs_seq_next(struct seq_file *seq, void *v, loff_t *pos) { + struct sctp_ht_iter *iter = seq->private; + ++*pos; - return sctp_transport_get_next(seq); + return sctp_transport_get_next(seq_file_net(seq), &iter->hti); } /* Display sctp associations (/proc/net/sctp/assocs). */ @@ -458,24 +408,29 @@ void sctp_assocs_proc_exit(struct net *net) static void *sctp_remaddr_seq_start(struct seq_file *seq, loff_t *pos) { - int err = sctp_transport_walk_start(seq); + struct sctp_ht_iter *iter = seq->private; + int err = sctp_transport_walk_start(&iter->hti); if (err) return ERR_PTR(err); - return sctp_transport_get_idx(seq, *pos); + return sctp_transport_get_idx(seq_file_net(seq), &iter->hti, *pos); } static void *sctp_remaddr_seq_next(struct seq_file *seq, void *v, loff_t *pos) { + struct sctp_ht_iter *iter = seq->private; + ++*pos; - return sctp_transport_get_next(seq); + return sctp_transport_get_next(seq_file_net(seq), &iter->hti); } static void sctp_remaddr_seq_stop(struct seq_file *seq, void *v) { - sctp_transport_walk_stop(seq); + struct sctp_ht_iter *iter = seq->private; + + sctp_transport_walk_stop(&iter->hti); } static int sctp_remaddr_seq_show(struct seq_file *seq, void *v) diff --git a/net/sctp/socket.c b/net/sctp/socket.c index cd0fb3bb493c..5e5bc08d2b25 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -4288,6 +4288,131 @@ int sctp_get_sctp_info(struct sock *sk, struct sctp_association *asoc, } EXPORT_SYMBOL_GPL(sctp_get_sctp_info); +/* use callback to avoid exporting the core structure */ +int sctp_transport_walk_start(struct rhashtable_iter *iter) +{ + int err; + + err = rhashtable_walk_init(&sctp_transport_hashtable, iter, + GFP_KERNEL); + if (err) + return err; + + err = rhashtable_walk_start(iter); + + return err == -EAGAIN ? 0 : err; +} + +void sctp_transport_walk_stop(struct rhashtable_iter *iter) +{ + rhashtable_walk_stop(iter); + rhashtable_walk_exit(iter); +} + +struct sctp_transport *sctp_transport_get_next(struct net *net, + struct rhashtable_iter *iter) +{ + struct sctp_transport *t; + + t = rhashtable_walk_next(iter); + for (; t; t = rhashtable_walk_next(iter)) { + if (IS_ERR(t)) { + if (PTR_ERR(t) == -EAGAIN) + continue; + break; + } + + if (net_eq(sock_net(t->asoc->base.sk), net) && + t->asoc->peer.primary_path == t) + break; + } + + return t; +} + +struct sctp_transport *sctp_transport_get_idx(struct net *net, + struct rhashtable_iter *iter, + int pos) +{ + void *obj = SEQ_START_TOKEN; + + while (pos && (obj = sctp_transport_get_next(net, iter)) && + !IS_ERR(obj)) + pos--; + + return obj; +} + +int sctp_for_each_endpoint(int (*cb)(struct sctp_endpoint *, void *), + void *p) { + int err = 0; + int hash = 0; + struct sctp_ep_common *epb; + struct sctp_hashbucket *head; + + for (head = sctp_ep_hashtable; hash < sctp_ep_hashsize; + hash++, head++) { + read_lock(&head->lock); + sctp_for_each_hentry(epb, &head->chain) { + err = cb(sctp_ep(epb), p); + if (err) + break; + } + read_unlock(&head->lock); + } + + return err; +} +EXPORT_SYMBOL_GPL(sctp_for_each_endpoint); + +int sctp_transport_lookup_process(int (*cb)(struct sctp_transport *, void *), + struct net *net, + const union sctp_addr *laddr, + const union sctp_addr *paddr, void *p) +{ + struct sctp_transport *transport; + int err = 0; + + rcu_read_lock(); + transport = sctp_addrs_lookup_transport(net, laddr, paddr); + if (!transport || !sctp_transport_hold(transport)) + goto out; + err = cb(transport, p); + sctp_transport_put(transport); + +out: + rcu_read_unlock(); + return err; +} +EXPORT_SYMBOL_GPL(sctp_transport_lookup_process); + +int sctp_for_each_transport(int (*cb)(struct sctp_transport *, void *), + struct net *net, int pos, void *p) { + struct rhashtable_iter hti; + int err = 0; + void *obj; + + if (sctp_transport_walk_start(&hti)) + goto out; + + sctp_transport_get_idx(net, &hti, pos); + obj = sctp_transport_get_next(net, &hti); + for (; obj && !IS_ERR(obj); obj = sctp_transport_get_next(net, &hti)) { + struct sctp_transport *transport = obj; + + if (!sctp_transport_hold(transport)) + continue; + err = cb(transport, p); + sctp_transport_put(transport); + if (err) + break; + } +out: + sctp_transport_walk_stop(&hti); + return err; +} +EXPORT_SYMBOL_GPL(sctp_for_each_transport); + /* 7.2.1 Association Status (SCTP_STATUS) * Applications can retrieve current status information about an From cb2050a7b8131a9a9f3f97276df1feaae8987dc8 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Thu, 14 Apr 2016 15:35:32 +0800 Subject: [PATCH 0765/1649] sctp: export some functions for sctp_diag in inet_diag inet_diag_msg_common_fill is used to fill the diag msg common info, we need to use it in sctp_diag as well, so export it. inet_diag_msg_attrs_fill is used to fill some common attrs info between sctp diag and tcp diag. v2->v3: - do not need to define and export inet_diag_get_handler any more. cause all the functions in it are in sctp_diag.ko, we just call them in sctp_diag.ko. - add inet_diag_msg_attrs_fill to make codes clear. Signed-off-by: Xin Long Signed-off-by: David S. Miller --- net/ipv4/inet_diag.c | 73 ++++++++++++++++++++++++++------------------ 1 file changed, 44 insertions(+), 29 deletions(-) diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c index bd591eb81ec9..70212bddf0f8 100644 --- a/net/ipv4/inet_diag.c +++ b/net/ipv4/inet_diag.c @@ -66,7 +66,7 @@ static void inet_diag_unlock_handler(const struct inet_diag_handler *handler) mutex_unlock(&inet_diag_table_mutex); } -static void inet_diag_msg_common_fill(struct inet_diag_msg *r, struct sock *sk) +void inet_diag_msg_common_fill(struct inet_diag_msg *r, struct sock *sk) { r->idiag_family = sk->sk_family; @@ -89,6 +89,7 @@ static void inet_diag_msg_common_fill(struct inet_diag_msg *r, struct sock *sk) r->id.idiag_dst[0] = sk->sk_daddr; } } +EXPORT_SYMBOL_GPL(inet_diag_msg_common_fill); static size_t inet_sk_attr_size(void) { @@ -104,36 +105,11 @@ static size_t inet_sk_attr_size(void) + 64; } -int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk, - struct sk_buff *skb, const struct inet_diag_req_v2 *req, - struct user_namespace *user_ns, - u32 portid, u32 seq, u16 nlmsg_flags, - const struct nlmsghdr *unlh) +int inet_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb, + struct inet_diag_msg *r, int ext, + struct user_namespace *user_ns) { const struct inet_sock *inet = inet_sk(sk); - const struct tcp_congestion_ops *ca_ops; - const struct inet_diag_handler *handler; - int ext = req->idiag_ext; - struct inet_diag_msg *r; - struct nlmsghdr *nlh; - struct nlattr *attr; - void *info = NULL; - - handler = inet_diag_table[req->sdiag_protocol]; - BUG_ON(!handler); - - nlh = nlmsg_put(skb, portid, seq, unlh->nlmsg_type, sizeof(*r), - nlmsg_flags); - if (!nlh) - return -EMSGSIZE; - - r = nlmsg_data(nlh); - BUG_ON(!sk_fullsock(sk)); - - inet_diag_msg_common_fill(r, sk); - r->idiag_state = sk->sk_state; - r->idiag_timer = 0; - r->idiag_retrans = 0; if (nla_put_u8(skb, INET_DIAG_SHUTDOWN, sk->sk_shutdown)) goto errout; @@ -161,6 +137,45 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk, r->idiag_uid = from_kuid_munged(user_ns, sock_i_uid(sk)); r->idiag_inode = sock_i_ino(sk); + return 0; +errout: + return 1; +} +EXPORT_SYMBOL_GPL(inet_diag_msg_attrs_fill); + +int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk, + struct sk_buff *skb, const struct inet_diag_req_v2 *req, + struct user_namespace *user_ns, + u32 portid, u32 seq, u16 nlmsg_flags, + const struct nlmsghdr *unlh) +{ + const struct tcp_congestion_ops *ca_ops; + const struct inet_diag_handler *handler; + int ext = req->idiag_ext; + struct inet_diag_msg *r; + struct nlmsghdr *nlh; + struct nlattr *attr; + void *info = NULL; + + handler = inet_diag_table[req->sdiag_protocol]; + BUG_ON(!handler); + + nlh = nlmsg_put(skb, portid, seq, unlh->nlmsg_type, sizeof(*r), + nlmsg_flags); + if (!nlh) + return -EMSGSIZE; + + r = nlmsg_data(nlh); + BUG_ON(!sk_fullsock(sk)); + + inet_diag_msg_common_fill(r, sk); + r->idiag_state = sk->sk_state; + r->idiag_timer = 0; + r->idiag_retrans = 0; + + if (inet_diag_msg_attrs_fill(sk, skb, r, ext, user_ns)) + goto errout; + if (ext & (1 << (INET_DIAG_MEMINFO - 1))) { struct inet_diag_meminfo minfo = { .idiag_rmem = sk_rmem_alloc_get(sk), From 8f840e47f190cbe61a96945c13e9551048d42cef Mon Sep 17 00:00:00 2001 From: Xin Long Date: Thu, 14 Apr 2016 15:35:33 +0800 Subject: [PATCH 0766/1649] sctp: add the sctp_diag.c file This one will implement all the interface of inet_diag, inet_diag_handler. which includes sctp_diag_dump, sctp_diag_dump_one and sctp_diag_get_info. It will work as a module, and register inet_diag_handler when loading. v2->v3: - fix the mistake in inet_assoc_attr_size(). - change inet_diag_msg_laddrs_fill() name to inet_diag_msg_sctpladdrs_fill. - change inet_diag_msg_paddrs_fill() name to inet_diag_msg_sctpaddrs_fill. - add inet_diag_msg_sctpinfo_fill() to make asoc/ep fill code clearer. - add inet_diag_msg_sctpasoc_fill() to make asoc fill code clearer. - merge inet_asoc_diag_fill() and inet_ep_diag_fill() to inet_sctp_diag_fill(). - call sctp_diag_get_info() directly, instead by handler, cause the caller is in the same file with it. - call lock_sock in sctp_tsp_dump_one() to make sure we call get sctp info safely. - after lock_sock(sk), we should check sk != assoc->base.sk. - change mem[SK_MEMINFO_WMEM_ALLOC] to asoc->sndbuf_used for asoc dump when asoc->ep->sndbuf_policy is set. don't use INET_DIAG_MEMINFO attr any more. Signed-off-by: Xin Long Signed-off-by: David S. Miller --- include/uapi/linux/inet_diag.h | 2 + net/sctp/Kconfig | 4 + net/sctp/Makefile | 1 + net/sctp/sctp_diag.c | 497 +++++++++++++++++++++++++++++++++ 4 files changed, 504 insertions(+) create mode 100644 net/sctp/sctp_diag.c diff --git a/include/uapi/linux/inet_diag.h b/include/uapi/linux/inet_diag.h index 68a1f71fde9f..f5f3629dd553 100644 --- a/include/uapi/linux/inet_diag.h +++ b/include/uapi/linux/inet_diag.h @@ -113,6 +113,8 @@ enum { INET_DIAG_DCTCPINFO, INET_DIAG_PROTOCOL, /* response attribute only */ INET_DIAG_SKV6ONLY, + INET_DIAG_LOCALS, + INET_DIAG_PEERS, }; #define INET_DIAG_MAX INET_DIAG_SKV6ONLY diff --git a/net/sctp/Kconfig b/net/sctp/Kconfig index 71c1a598d9bc..d9c04dc1b3f3 100644 --- a/net/sctp/Kconfig +++ b/net/sctp/Kconfig @@ -99,5 +99,9 @@ config SCTP_COOKIE_HMAC_SHA1 select CRYPTO_HMAC if SCTP_COOKIE_HMAC_SHA1 select CRYPTO_SHA1 if SCTP_COOKIE_HMAC_SHA1 +config INET_SCTP_DIAG + depends on INET_DIAG + def_tristate INET_DIAG + endif # IP_SCTP diff --git a/net/sctp/Makefile b/net/sctp/Makefile index 3b4ffb021cf1..0fca5824ad0e 100644 --- a/net/sctp/Makefile +++ b/net/sctp/Makefile @@ -4,6 +4,7 @@ obj-$(CONFIG_IP_SCTP) += sctp.o obj-$(CONFIG_NET_SCTPPROBE) += sctp_probe.o +obj-$(CONFIG_INET_SCTP_DIAG) += sctp_diag.o sctp-y := sm_statetable.o sm_statefuns.o sm_sideeffect.o \ protocol.o endpointola.o associola.o \ diff --git a/net/sctp/sctp_diag.c b/net/sctp/sctp_diag.c new file mode 100644 index 000000000000..98ecd16da0c9 --- /dev/null +++ b/net/sctp/sctp_diag.c @@ -0,0 +1,497 @@ +#include +#include +#include +#include + +extern void inet_diag_msg_common_fill(struct inet_diag_msg *r, + struct sock *sk); +extern int inet_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb, + struct inet_diag_msg *r, int ext, + struct user_namespace *user_ns); + +static void sctp_diag_get_info(struct sock *sk, struct inet_diag_msg *r, + void *info); + +/* define some functions to make asoc/ep fill look clean */ +static void inet_diag_msg_sctpasoc_fill(struct inet_diag_msg *r, + struct sock *sk, + struct sctp_association *asoc) +{ + union sctp_addr laddr, paddr; + struct dst_entry *dst; + + laddr = list_entry(asoc->base.bind_addr.address_list.next, + struct sctp_sockaddr_entry, list)->a; + paddr = asoc->peer.primary_path->ipaddr; + dst = asoc->peer.primary_path->dst; + + r->idiag_family = sk->sk_family; + r->id.idiag_sport = htons(asoc->base.bind_addr.port); + r->id.idiag_dport = htons(asoc->peer.port); + r->id.idiag_if = dst ? dst->dev->ifindex : 0; + sock_diag_save_cookie(sk, r->id.idiag_cookie); + +#if IS_ENABLED(CONFIG_IPV6) + if (sk->sk_family == AF_INET6) { + *(struct in6_addr *)r->id.idiag_src = laddr.v6.sin6_addr; + *(struct in6_addr *)r->id.idiag_dst = paddr.v6.sin6_addr; + } else +#endif + { + memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src)); + memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst)); + + r->id.idiag_src[0] = laddr.v4.sin_addr.s_addr; + r->id.idiag_dst[0] = paddr.v4.sin_addr.s_addr; + } + + r->idiag_state = asoc->state; + r->idiag_timer = SCTP_EVENT_TIMEOUT_T3_RTX; + r->idiag_retrans = asoc->rtx_data_chunks; +#define EXPIRES_IN_MS(tmo) DIV_ROUND_UP((tmo - jiffies) * 1000, HZ) + r->idiag_expires = + EXPIRES_IN_MS(asoc->timeouts[SCTP_EVENT_TIMEOUT_T3_RTX]); +#undef EXPIRES_IN_MS +} + +static int inet_diag_msg_sctpladdrs_fill(struct sk_buff *skb, + struct list_head *address_list) +{ + struct sctp_sockaddr_entry *laddr; + int addrlen = sizeof(struct sockaddr_storage); + int addrcnt = 0; + struct nlattr *attr; + void *info = NULL; + + list_for_each_entry_rcu(laddr, address_list, list) + addrcnt++; + + attr = nla_reserve(skb, INET_DIAG_LOCALS, addrlen * addrcnt); + if (!attr) + return -EMSGSIZE; + + info = nla_data(attr); + list_for_each_entry_rcu(laddr, address_list, list) { + memcpy(info, &laddr->a, addrlen); + info += addrlen; + } + + return 0; +} + +static int inet_diag_msg_sctpaddrs_fill(struct sk_buff *skb, + struct sctp_association *asoc) +{ + int addrlen = sizeof(struct sockaddr_storage); + struct sctp_transport *from; + struct nlattr *attr; + void *info = NULL; + + attr = nla_reserve(skb, INET_DIAG_PEERS, + addrlen * asoc->peer.transport_count); + if (!attr) + return -EMSGSIZE; + + info = nla_data(attr); + list_for_each_entry(from, &asoc->peer.transport_addr_list, + transports) { + memcpy(info, &from->ipaddr, addrlen); + info += addrlen; + } + + return 0; +} + +/* sctp asoc/ep fill*/ +static int inet_sctp_diag_fill(struct sock *sk, struct sctp_association *asoc, + struct sk_buff *skb, + const struct inet_diag_req_v2 *req, + struct user_namespace *user_ns, + int portid, u32 seq, u16 nlmsg_flags, + const struct nlmsghdr *unlh) +{ + struct sctp_endpoint *ep = sctp_sk(sk)->ep; + struct list_head *addr_list; + struct inet_diag_msg *r; + struct nlmsghdr *nlh; + int ext = req->idiag_ext; + struct sctp_infox infox; + void *info = NULL; + + nlh = nlmsg_put(skb, portid, seq, unlh->nlmsg_type, sizeof(*r), + nlmsg_flags); + if (!nlh) + return -EMSGSIZE; + + r = nlmsg_data(nlh); + BUG_ON(!sk_fullsock(sk)); + + if (asoc) { + inet_diag_msg_sctpasoc_fill(r, sk, asoc); + } else { + inet_diag_msg_common_fill(r, sk); + r->idiag_state = sk->sk_state; + r->idiag_timer = 0; + r->idiag_retrans = 0; + } + + if (inet_diag_msg_attrs_fill(sk, skb, r, ext, user_ns)) + goto errout; + + if (ext & (1 << (INET_DIAG_SKMEMINFO - 1))) { + u32 mem[SK_MEMINFO_VARS]; + int amt; + + if (asoc && asoc->ep->sndbuf_policy) + amt = asoc->sndbuf_used; + else + amt = sk_wmem_alloc_get(sk); + mem[SK_MEMINFO_WMEM_ALLOC] = amt; + mem[SK_MEMINFO_RMEM_ALLOC] = sk_rmem_alloc_get(sk); + mem[SK_MEMINFO_RCVBUF] = sk->sk_rcvbuf; + mem[SK_MEMINFO_SNDBUF] = sk->sk_sndbuf; + mem[SK_MEMINFO_FWD_ALLOC] = sk->sk_forward_alloc; + mem[SK_MEMINFO_WMEM_QUEUED] = sk->sk_wmem_queued; + mem[SK_MEMINFO_OPTMEM] = atomic_read(&sk->sk_omem_alloc); + mem[SK_MEMINFO_BACKLOG] = sk->sk_backlog.len; + mem[SK_MEMINFO_DROPS] = atomic_read(&sk->sk_drops); + + if (nla_put(skb, INET_DIAG_SKMEMINFO, sizeof(mem), &mem) < 0) + goto errout; + } + + if (ext & (1 << (INET_DIAG_INFO - 1))) { + struct nlattr *attr; + + attr = nla_reserve(skb, INET_DIAG_INFO, + sizeof(struct sctp_info)); + if (!attr) + goto errout; + + info = nla_data(attr); + } + infox.sctpinfo = (struct sctp_info *)info; + infox.asoc = asoc; + sctp_diag_get_info(sk, r, &infox); + + addr_list = asoc ? &asoc->base.bind_addr.address_list + : &ep->base.bind_addr.address_list; + if (inet_diag_msg_sctpladdrs_fill(skb, addr_list)) + goto errout; + + if (asoc && (ext & (1 << (INET_DIAG_CONG - 1)))) + if (nla_put_string(skb, INET_DIAG_CONG, "reno") < 0) + goto errout; + + if (asoc && inet_diag_msg_sctpaddrs_fill(skb, asoc)) + goto errout; + + nlmsg_end(skb, nlh); + return 0; + +errout: + nlmsg_cancel(skb, nlh); + return -EMSGSIZE; +} + +/* callback and param */ +struct sctp_comm_param { + struct sk_buff *skb; + struct netlink_callback *cb; + const struct inet_diag_req_v2 *r; + const struct nlmsghdr *nlh; +}; + +static size_t inet_assoc_attr_size(struct sctp_association *asoc) +{ + int addrlen = sizeof(struct sockaddr_storage); + int addrcnt = 0; + struct sctp_sockaddr_entry *laddr; + + list_for_each_entry_rcu(laddr, &asoc->base.bind_addr.address_list, + list) + addrcnt++; + + return nla_total_size(sizeof(struct sctp_info)) + + nla_total_size(1) /* INET_DIAG_SHUTDOWN */ + + nla_total_size(1) /* INET_DIAG_TOS */ + + nla_total_size(1) /* INET_DIAG_TCLASS */ + + nla_total_size(addrlen * asoc->peer.transport_count) + + nla_total_size(addrlen * addrcnt) + + nla_total_size(sizeof(struct inet_diag_meminfo)) + + nla_total_size(sizeof(struct inet_diag_msg)) + + 64; +} + +static int sctp_tsp_dump_one(struct sctp_transport *tsp, void *p) +{ + struct sctp_association *assoc = tsp->asoc; + struct sock *sk = tsp->asoc->base.sk; + struct sctp_comm_param *commp = p; + struct sk_buff *in_skb = commp->skb; + const struct inet_diag_req_v2 *req = commp->r; + const struct nlmsghdr *nlh = commp->nlh; + struct net *net = sock_net(in_skb->sk); + struct sk_buff *rep; + int err; + + err = sock_diag_check_cookie(sk, req->id.idiag_cookie); + if (err) + goto out; + + err = -ENOMEM; + rep = nlmsg_new(inet_assoc_attr_size(assoc), GFP_KERNEL); + if (!rep) + goto out; + + lock_sock(sk); + if (sk != assoc->base.sk) { + release_sock(sk); + sk = assoc->base.sk; + lock_sock(sk); + } + err = inet_sctp_diag_fill(sk, assoc, rep, req, + sk_user_ns(NETLINK_CB(in_skb).sk), + NETLINK_CB(in_skb).portid, + nlh->nlmsg_seq, 0, nlh); + release_sock(sk); + if (err < 0) { + WARN_ON(err == -EMSGSIZE); + kfree_skb(rep); + goto out; + } + + err = netlink_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid, + MSG_DONTWAIT); + if (err > 0) + err = 0; +out: + return err; +} + +static int sctp_tsp_dump(struct sctp_transport *tsp, void *p) +{ + struct sctp_endpoint *ep = tsp->asoc->ep; + struct sctp_comm_param *commp = p; + struct sock *sk = ep->base.sk; + struct sk_buff *skb = commp->skb; + struct netlink_callback *cb = commp->cb; + const struct inet_diag_req_v2 *r = commp->r; + struct sctp_association *assoc = + list_entry(ep->asocs.next, struct sctp_association, asocs); + int err = 0; + + /* find the ep only once through the transports by this condition */ + if (tsp->asoc != assoc) + goto out; + + if (r->sdiag_family != AF_UNSPEC && sk->sk_family != r->sdiag_family) + goto out; + + lock_sock(sk); + if (sk != assoc->base.sk) + goto release; + list_for_each_entry(assoc, &ep->asocs, asocs) { + if (cb->args[4] < cb->args[1]) + goto next; + + if (r->id.idiag_sport != htons(assoc->base.bind_addr.port) && + r->id.idiag_sport) + goto next; + if (r->id.idiag_dport != htons(assoc->peer.port) && + r->id.idiag_dport) + goto next; + + if (!cb->args[3] && + inet_sctp_diag_fill(sk, NULL, skb, r, + sk_user_ns(NETLINK_CB(cb->skb).sk), + NETLINK_CB(cb->skb).portid, + cb->nlh->nlmsg_seq, + NLM_F_MULTI, cb->nlh) < 0) { + cb->args[3] = 1; + err = 2; + goto release; + } + cb->args[3] = 1; + + if (inet_sctp_diag_fill(sk, assoc, skb, r, + sk_user_ns(NETLINK_CB(cb->skb).sk), + NETLINK_CB(cb->skb).portid, + cb->nlh->nlmsg_seq, 0, cb->nlh) < 0) { + err = 2; + goto release; + } +next: + cb->args[4]++; + } + cb->args[1] = 0; + cb->args[2]++; + cb->args[3] = 0; + cb->args[4] = 0; +release: + release_sock(sk); + return err; +out: + cb->args[2]++; + return err; +} + +static int sctp_ep_dump(struct sctp_endpoint *ep, void *p) +{ + struct sctp_comm_param *commp = p; + struct sock *sk = ep->base.sk; + struct sk_buff *skb = commp->skb; + struct netlink_callback *cb = commp->cb; + const struct inet_diag_req_v2 *r = commp->r; + struct net *net = sock_net(skb->sk); + struct inet_sock *inet = inet_sk(sk); + int err = 0; + + if (!net_eq(sock_net(sk), net)) + goto out; + + if (cb->args[4] < cb->args[1]) + goto next; + + if (r->sdiag_family != AF_UNSPEC && + sk->sk_family != r->sdiag_family) + goto next; + + if (r->id.idiag_sport != inet->inet_sport && + r->id.idiag_sport) + goto next; + + if (r->id.idiag_dport != inet->inet_dport && + r->id.idiag_dport) + goto next; + + if (inet_sctp_diag_fill(sk, NULL, skb, r, + sk_user_ns(NETLINK_CB(cb->skb).sk), + NETLINK_CB(cb->skb).portid, + cb->nlh->nlmsg_seq, NLM_F_MULTI, + cb->nlh) < 0) { + err = 2; + goto out; + } +next: + cb->args[4]++; +out: + return err; +} + +/* define the functions for sctp_diag_handler*/ +static void sctp_diag_get_info(struct sock *sk, struct inet_diag_msg *r, + void *info) +{ + struct sctp_infox *infox = (struct sctp_infox *)info; + + if (infox->asoc) { + r->idiag_rqueue = atomic_read(&infox->asoc->rmem_alloc); + r->idiag_wqueue = infox->asoc->sndbuf_used; + } else { + r->idiag_rqueue = sk->sk_ack_backlog; + r->idiag_wqueue = sk->sk_max_ack_backlog; + } + if (infox->sctpinfo) + sctp_get_sctp_info(sk, infox->asoc, infox->sctpinfo); +} + +static int sctp_diag_dump_one(struct sk_buff *in_skb, + const struct nlmsghdr *nlh, + const struct inet_diag_req_v2 *req) +{ + struct net *net = sock_net(in_skb->sk); + union sctp_addr laddr, paddr; + struct sctp_comm_param commp = { + .skb = in_skb, + .r = req, + .nlh = nlh, + }; + + if (req->sdiag_family == AF_INET) { + laddr.v4.sin_port = req->id.idiag_sport; + laddr.v4.sin_addr.s_addr = req->id.idiag_src[0]; + laddr.v4.sin_family = AF_INET; + + paddr.v4.sin_port = req->id.idiag_dport; + paddr.v4.sin_addr.s_addr = req->id.idiag_dst[0]; + paddr.v4.sin_family = AF_INET; + } else { + laddr.v6.sin6_port = req->id.idiag_sport; + memcpy(&laddr.v6.sin6_addr, req->id.idiag_src, 64); + laddr.v6.sin6_family = AF_INET6; + + paddr.v6.sin6_port = req->id.idiag_dport; + memcpy(&paddr.v6.sin6_addr, req->id.idiag_dst, 64); + paddr.v6.sin6_family = AF_INET6; + } + + return sctp_transport_lookup_process(sctp_tsp_dump_one, + net, &laddr, &paddr, &commp); +} + +static void sctp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb, + const struct inet_diag_req_v2 *r, struct nlattr *bc) +{ + u32 idiag_states = r->idiag_states; + struct net *net = sock_net(skb->sk); + struct sctp_comm_param commp = { + .skb = skb, + .cb = cb, + .r = r, + }; + + /* eps hashtable dumps + * args: + * 0 : if it will traversal listen sock + * 1 : to record the sock pos of this time's traversal + * 4 : to work as a temporary variable to traversal list + */ + if (cb->args[0] == 0) { + if (!(idiag_states & TCPF_LISTEN)) + goto skip; + if (sctp_for_each_endpoint(sctp_ep_dump, &commp)) + goto done; +skip: + cb->args[0] = 1; + cb->args[1] = 0; + cb->args[4] = 0; + } + + /* asocs by transport hashtable dump + * args: + * 1 : to record the assoc pos of this time's traversal + * 2 : to record the transport pos of this time's traversal + * 3 : to mark if we have dumped the ep info of the current asoc + * 4 : to work as a temporary variable to traversal list + */ + if (!(idiag_states & ~TCPF_LISTEN)) + goto done; + sctp_for_each_transport(sctp_tsp_dump, net, cb->args[2], &commp); +done: + cb->args[1] = cb->args[4]; + cb->args[4] = 0; +} + +static const struct inet_diag_handler sctp_diag_handler = { + .dump = sctp_diag_dump, + .dump_one = sctp_diag_dump_one, + .idiag_get_info = sctp_diag_get_info, + .idiag_type = IPPROTO_SCTP, + .idiag_info_size = sizeof(struct sctp_info), +}; + +static int __init sctp_diag_init(void) +{ + return inet_diag_register(&sctp_diag_handler); +} + +static void __exit sctp_diag_exit(void) +{ + inet_diag_unregister(&sctp_diag_handler); +} + +module_init(sctp_diag_init); +module_exit(sctp_diag_exit); +MODULE_LICENSE("GPL"); +MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 2-132); From b5e2f4e6998a2b999da8fa0290b692f0bd85c8b7 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Thu, 14 Apr 2016 15:35:34 +0800 Subject: [PATCH 0767/1649] sctp: merge the seq_start/next/exits in remaddrs and assocs In sctp proc, these three functions in remaddrs and assocs are the same. we should merge them into one. Signed-off-by: Xin Long Signed-off-by: David S. Miller --- net/sctp/proc.c | 45 +++++++++------------------------------------ 1 file changed, 9 insertions(+), 36 deletions(-) diff --git a/net/sctp/proc.c b/net/sctp/proc.c index dd8492f0037d..9fe139368ad7 100644 --- a/net/sctp/proc.c +++ b/net/sctp/proc.c @@ -282,7 +282,7 @@ struct sctp_ht_iter { struct rhashtable_iter hti; }; -static void *sctp_assocs_seq_start(struct seq_file *seq, loff_t *pos) +static void *sctp_transport_seq_start(struct seq_file *seq, loff_t *pos) { struct sctp_ht_iter *iter = seq->private; int err = sctp_transport_walk_start(&iter->hti); @@ -293,14 +293,14 @@ static void *sctp_assocs_seq_start(struct seq_file *seq, loff_t *pos) return sctp_transport_get_idx(seq_file_net(seq), &iter->hti, *pos); } -static void sctp_assocs_seq_stop(struct seq_file *seq, void *v) +static void sctp_transport_seq_stop(struct seq_file *seq, void *v) { struct sctp_ht_iter *iter = seq->private; sctp_transport_walk_stop(&iter->hti); } -static void *sctp_assocs_seq_next(struct seq_file *seq, void *v, loff_t *pos) +static void *sctp_transport_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct sctp_ht_iter *iter = seq->private; @@ -367,9 +367,9 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v) } static const struct seq_operations sctp_assoc_ops = { - .start = sctp_assocs_seq_start, - .next = sctp_assocs_seq_next, - .stop = sctp_assocs_seq_stop, + .start = sctp_transport_seq_start, + .next = sctp_transport_seq_next, + .stop = sctp_transport_seq_stop, .show = sctp_assocs_seq_show, }; @@ -406,33 +406,6 @@ void sctp_assocs_proc_exit(struct net *net) remove_proc_entry("assocs", net->sctp.proc_net_sctp); } -static void *sctp_remaddr_seq_start(struct seq_file *seq, loff_t *pos) -{ - struct sctp_ht_iter *iter = seq->private; - int err = sctp_transport_walk_start(&iter->hti); - - if (err) - return ERR_PTR(err); - - return sctp_transport_get_idx(seq_file_net(seq), &iter->hti, *pos); -} - -static void *sctp_remaddr_seq_next(struct seq_file *seq, void *v, loff_t *pos) -{ - struct sctp_ht_iter *iter = seq->private; - - ++*pos; - - return sctp_transport_get_next(seq_file_net(seq), &iter->hti); -} - -static void sctp_remaddr_seq_stop(struct seq_file *seq, void *v) -{ - struct sctp_ht_iter *iter = seq->private; - - sctp_transport_walk_stop(&iter->hti); -} - static int sctp_remaddr_seq_show(struct seq_file *seq, void *v) { struct sctp_association *assoc; @@ -506,9 +479,9 @@ static int sctp_remaddr_seq_show(struct seq_file *seq, void *v) } static const struct seq_operations sctp_remaddr_ops = { - .start = sctp_remaddr_seq_start, - .next = sctp_remaddr_seq_next, - .stop = sctp_remaddr_seq_stop, + .start = sctp_transport_seq_start, + .next = sctp_transport_seq_next, + .stop = sctp_transport_seq_stop, .show = sctp_remaddr_seq_show, }; From 53fa10369c45a51947f06e8b622d2fa2cc64fda1 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Thu, 14 Apr 2016 15:35:35 +0800 Subject: [PATCH 0768/1649] sctp: fix some rhashtable functions using in sctp proc/diag When rhashtable_walk_init return err, no release function should be called, and when rhashtable_walk_start return err, we should only invoke rhashtable_walk_exit to release the source. But now when sctp_transport_walk_start return err, we just call rhashtable_walk_stop/exit, and never care about if rhashtable_walk_init or start return err, which is so bad. We will fix it by calling rhashtable_walk_exit if rhashtable_walk_start return err in sctp_transport_walk_start, and if sctp_transport_walk_start return err, we do not need to call sctp_transport_walk_stop any more. For sctp proc, we will use 'iter->start_fail' to decide if we will call rhashtable_walk_stop/exit. Signed-off-by: Xin Long Signed-off-by: David S. Miller --- net/sctp/proc.c | 7 ++++++- net/sctp/socket.c | 15 ++++++++++----- 2 files changed, 16 insertions(+), 6 deletions(-) diff --git a/net/sctp/proc.c b/net/sctp/proc.c index 9fe139368ad7..4cb5aedfe3ee 100644 --- a/net/sctp/proc.c +++ b/net/sctp/proc.c @@ -280,6 +280,7 @@ void sctp_eps_proc_exit(struct net *net) struct sctp_ht_iter { struct seq_net_private p; struct rhashtable_iter hti; + int start_fail; }; static void *sctp_transport_seq_start(struct seq_file *seq, loff_t *pos) @@ -287,8 +288,10 @@ static void *sctp_transport_seq_start(struct seq_file *seq, loff_t *pos) struct sctp_ht_iter *iter = seq->private; int err = sctp_transport_walk_start(&iter->hti); - if (err) + if (err) { + iter->start_fail = 1; return ERR_PTR(err); + } return sctp_transport_get_idx(seq_file_net(seq), &iter->hti, *pos); } @@ -297,6 +300,8 @@ static void sctp_transport_seq_stop(struct seq_file *seq, void *v) { struct sctp_ht_iter *iter = seq->private; + if (iter->start_fail) + return; sctp_transport_walk_stop(&iter->hti); } diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 5e5bc08d2b25..777d0324594a 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -4299,8 +4299,12 @@ int sctp_transport_walk_start(struct rhashtable_iter *iter) return err; err = rhashtable_walk_start(iter); + if (err && err != -EAGAIN) { + rhashtable_walk_exit(iter); + return err; + } - return err == -EAGAIN ? 0 : err; + return 0; } void sctp_transport_walk_stop(struct rhashtable_iter *iter) @@ -4389,11 +4393,12 @@ EXPORT_SYMBOL_GPL(sctp_transport_lookup_process); int sctp_for_each_transport(int (*cb)(struct sctp_transport *, void *), struct net *net, int pos, void *p) { struct rhashtable_iter hti; - int err = 0; void *obj; + int err; - if (sctp_transport_walk_start(&hti)) - goto out; + err = sctp_transport_walk_start(&hti); + if (err) + return err; sctp_transport_get_idx(net, &hti, pos); obj = sctp_transport_get_next(net, &hti); @@ -4407,8 +4412,8 @@ int sctp_for_each_transport(int (*cb)(struct sctp_transport *, void *), if (err) break; } -out: sctp_transport_walk_stop(&hti); + return err; } EXPORT_SYMBOL_GPL(sctp_for_each_transport); From f48256efededaa87f475c0d6330d83f853cb064a Mon Sep 17 00:00:00 2001 From: wangweidong Date: Thu, 14 Apr 2016 15:43:52 +0800 Subject: [PATCH 0769/1649] phy: make some bits preserved while setup forced mode When tested the PHY SGMII Loopback: 1.set the LOOPBACK bit, 2.set the autoneg to AUTONEG_DISABLE, it calls the genphy_setup_forced which will clear the bit. The BMCR_LOOPBACK bit should be preserved. As Florian pointed out that other bits should be preserved too. So I make the BMCR_ISOLATE and BMCR_PDOWN as well. Signed-off-by: Weidong Wang Signed-off-by: David S. Miller --- drivers/net/phy/phy_device.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index e551f3a89cfd..10e39c2fbf81 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -1123,8 +1123,9 @@ static int genphy_config_advert(struct phy_device *phydev) */ int genphy_setup_forced(struct phy_device *phydev) { - int ctl = 0; + int ctl = phy_read(phydev, MII_BMCR); + ctl &= BMCR_LOOPBACK | BMCR_ISOLATE | BMCR_PDOWN; phydev->pause = 0; phydev->asym_pause = 0; From 0412bd931f5f94d1054e958415c4a945d8ee62f4 Mon Sep 17 00:00:00 2001 From: Hannes Frederic Sowa Date: Fri, 8 Apr 2016 22:55:01 +0200 Subject: [PATCH 0770/1649] vxlan: synchronously and race-free destruction of vxlan sockets Due to the fact that the udp socket is destructed asynchronously in a work queue, we have some nondeterministic behavior during shutdown of vxlan tunnels and creating new ones. Fix this by keeping the destruction process synchronous in regards to the user space process so IFF_UP can be reliably set. udp_tunnel_sock_release destroys vs->sock->sk if reference counter indicates so. We expect to have the same lifetime of vxlan_sock and vxlan_sock->sock->sk even in fast paths with only rcu locks held. So only destruct the whole socket after we can be sure it cannot be found by searching vxlan_net->sock_list. Cc: Eric Dumazet Cc: Jiri Benc Cc: Marcelo Ricardo Leitner Signed-off-by: Hannes Frederic Sowa Signed-off-by: David S. Miller --- drivers/net/vxlan.c | 20 +++----------------- include/net/vxlan.h | 2 -- 2 files changed, 3 insertions(+), 19 deletions(-) diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 7f697a3f00a4..19383371a27d 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -98,7 +98,6 @@ struct vxlan_fdb { /* salt for hash table */ static u32 vxlan_salt __read_mostly; -static struct workqueue_struct *vxlan_wq; static inline bool vxlan_collect_metadata(struct vxlan_sock *vs) { @@ -1053,7 +1052,9 @@ static void __vxlan_sock_release(struct vxlan_sock *vs) vxlan_notify_del_rx_port(vs); spin_unlock(&vn->sock_lock); - queue_work(vxlan_wq, &vs->del_work); + synchronize_net(); + udp_tunnel_sock_release(vs->sock); + kfree(vs); } static void vxlan_sock_release(struct vxlan_dev *vxlan) @@ -2674,13 +2675,6 @@ static const struct ethtool_ops vxlan_ethtool_ops = { .get_link = ethtool_op_get_link, }; -static void vxlan_del_work(struct work_struct *work) -{ - struct vxlan_sock *vs = container_of(work, struct vxlan_sock, del_work); - udp_tunnel_sock_release(vs->sock); - kfree_rcu(vs, rcu); -} - static struct socket *vxlan_create_sock(struct net *net, bool ipv6, __be16 port, u32 flags) { @@ -2726,8 +2720,6 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, bool ipv6, for (h = 0; h < VNI_HASH_SIZE; ++h) INIT_HLIST_HEAD(&vs->vni_list[h]); - INIT_WORK(&vs->del_work, vxlan_del_work); - sock = vxlan_create_sock(net, ipv6, port, flags); if (IS_ERR(sock)) { pr_info("Cannot bind port %d, err=%ld\n", ntohs(port), @@ -3346,10 +3338,6 @@ static int __init vxlan_init_module(void) { int rc; - vxlan_wq = alloc_workqueue("vxlan", 0, 0); - if (!vxlan_wq) - return -ENOMEM; - get_random_bytes(&vxlan_salt, sizeof(vxlan_salt)); rc = register_pernet_subsys(&vxlan_net_ops); @@ -3370,7 +3358,6 @@ out3: out2: unregister_pernet_subsys(&vxlan_net_ops); out1: - destroy_workqueue(vxlan_wq); return rc; } late_initcall(vxlan_init_module); @@ -3379,7 +3366,6 @@ static void __exit vxlan_cleanup_module(void) { rtnl_link_unregister(&vxlan_link_ops); unregister_netdevice_notifier(&vxlan_notifier_block); - destroy_workqueue(vxlan_wq); unregister_pernet_subsys(&vxlan_net_ops); /* rcu_barrier() is called by netns */ } diff --git a/include/net/vxlan.h b/include/net/vxlan.h index 2f168f0ea32c..d442eb3129cd 100644 --- a/include/net/vxlan.h +++ b/include/net/vxlan.h @@ -184,9 +184,7 @@ struct vxlan_metadata { /* per UDP socket information */ struct vxlan_sock { struct hlist_node hlist; - struct work_struct del_work; struct socket *sock; - struct rcu_head rcu; struct hlist_head vni_list[VNI_HASH_SIZE]; atomic_t refcnt; u32 flags; From 544a773a01828e3cc3b553721f68d880d0d27a97 Mon Sep 17 00:00:00 2001 From: Hannes Frederic Sowa Date: Sat, 9 Apr 2016 12:46:23 +0200 Subject: [PATCH 0771/1649] vxlan: reduce usage of synchronize_net in ndo_stop We only need to do the synchronize_net dance once for both, ipv4 and ipv6 sockets, thus removing one synchronize_net in case both sockets get dismantled. Signed-off-by: Hannes Frederic Sowa Signed-off-by: David S. Miller --- drivers/net/vxlan.c | 28 ++++++++++++++++++++-------- 1 file changed, 20 insertions(+), 8 deletions(-) diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 19383371a27d..a7112b3bc9b4 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -1037,14 +1037,14 @@ static bool vxlan_group_used(struct vxlan_net *vn, struct vxlan_dev *dev) return false; } -static void __vxlan_sock_release(struct vxlan_sock *vs) +static bool __vxlan_sock_release_prep(struct vxlan_sock *vs) { struct vxlan_net *vn; if (!vs) - return; + return false; if (!atomic_dec_and_test(&vs->refcnt)) - return; + return false; vn = net_generic(sock_net(vs->sock->sk), vxlan_net_id); spin_lock(&vn->sock_lock); @@ -1052,16 +1052,28 @@ static void __vxlan_sock_release(struct vxlan_sock *vs) vxlan_notify_del_rx_port(vs); spin_unlock(&vn->sock_lock); - synchronize_net(); - udp_tunnel_sock_release(vs->sock); - kfree(vs); + return true; } static void vxlan_sock_release(struct vxlan_dev *vxlan) { - __vxlan_sock_release(vxlan->vn4_sock); + bool ipv4 = __vxlan_sock_release_prep(vxlan->vn4_sock); #if IS_ENABLED(CONFIG_IPV6) - __vxlan_sock_release(vxlan->vn6_sock); + bool ipv6 = __vxlan_sock_release_prep(vxlan->vn6_sock); +#endif + + synchronize_net(); + + if (ipv4) { + udp_tunnel_sock_release(vxlan->vn4_sock->sock); + kfree(vxlan->vn4_sock); + } + +#if IS_ENABLED(CONFIG_IPV6) + if (ipv6) { + udp_tunnel_sock_release(vxlan->vn6_sock->sock); + kfree(vxlan->vn6_sock); + } #endif } From d6586d2ef4608f113df16ca8ca757563891389ce Mon Sep 17 00:00:00 2001 From: Akinobu Mita Date: Fri, 15 Apr 2016 00:11:29 +0900 Subject: [PATCH 0772/1649] net: w5100: move mmiowb into register access callbacks Instead of sprinkle mmiowb over the driver code, move it into primary register write callbacks. (w5100_write, w5100_write16, w5100_writebuf) This is a preparation for supporting SPI interface which doesn't use MMIO for accessing w5100 registers. Signed-off-by: Akinobu Mita Cc: Mike Sinkovsky Cc: David S. Miller Signed-off-by: David S. Miller --- drivers/net/ethernet/wiznet/w5100.c | 44 ++++++++++------------------- 1 file changed, 15 insertions(+), 29 deletions(-) diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c index 8b282d0b169c..f4b7200bc0f5 100644 --- a/drivers/net/ethernet/wiznet/w5100.c +++ b/drivers/net/ethernet/wiznet/w5100.c @@ -122,10 +122,17 @@ static inline u8 w5100_read_direct(struct w5100_priv *priv, u16 addr) return ioread8(priv->base + (addr << CONFIG_WIZNET_BUS_SHIFT)); } +static inline void __w5100_write_direct(struct w5100_priv *priv, u16 addr, + u8 data) +{ + iowrite8(data, priv->base + (addr << CONFIG_WIZNET_BUS_SHIFT)); +} + static inline void w5100_write_direct(struct w5100_priv *priv, u16 addr, u8 data) { - iowrite8(data, priv->base + (addr << CONFIG_WIZNET_BUS_SHIFT)); + __w5100_write_direct(priv, addr, data); + mmiowb(); } static u16 w5100_read16_direct(struct w5100_priv *priv, u16 addr) @@ -138,8 +145,9 @@ static u16 w5100_read16_direct(struct w5100_priv *priv, u16 addr) static void w5100_write16_direct(struct w5100_priv *priv, u16 addr, u16 data) { - w5100_write_direct(priv, addr, data >> 8); - w5100_write_direct(priv, addr + 1, data); + __w5100_write_direct(priv, addr, data >> 8); + __w5100_write_direct(priv, addr + 1, data); + mmiowb(); } static void w5100_readbuf_direct(struct w5100_priv *priv, @@ -164,8 +172,9 @@ static void w5100_writebuf_direct(struct w5100_priv *priv, for (i = 0; i < len; i++, addr++) { if (unlikely(addr > W5100_TX_MEM_END)) addr = W5100_TX_MEM_START; - w5100_write_direct(priv, addr, *buf++); + __w5100_write_direct(priv, addr, *buf++); } + mmiowb(); } /* @@ -186,7 +195,6 @@ static u8 w5100_read_indirect(struct w5100_priv *priv, u16 addr) spin_lock_irqsave(&priv->reg_lock, flags); w5100_write16_direct(priv, W5100_IDM_AR, addr); - mmiowb(); data = w5100_read_direct(priv, W5100_IDM_DR); spin_unlock_irqrestore(&priv->reg_lock, flags); @@ -199,9 +207,7 @@ static void w5100_write_indirect(struct w5100_priv *priv, u16 addr, u8 data) spin_lock_irqsave(&priv->reg_lock, flags); w5100_write16_direct(priv, W5100_IDM_AR, addr); - mmiowb(); w5100_write_direct(priv, W5100_IDM_DR, data); - mmiowb(); spin_unlock_irqrestore(&priv->reg_lock, flags); } @@ -212,7 +218,6 @@ static u16 w5100_read16_indirect(struct w5100_priv *priv, u16 addr) spin_lock_irqsave(&priv->reg_lock, flags); w5100_write16_direct(priv, W5100_IDM_AR, addr); - mmiowb(); data = w5100_read_direct(priv, W5100_IDM_DR) << 8; data |= w5100_read_direct(priv, W5100_IDM_DR); spin_unlock_irqrestore(&priv->reg_lock, flags); @@ -226,10 +231,8 @@ static void w5100_write16_indirect(struct w5100_priv *priv, u16 addr, u16 data) spin_lock_irqsave(&priv->reg_lock, flags); w5100_write16_direct(priv, W5100_IDM_AR, addr); - mmiowb(); - w5100_write_direct(priv, W5100_IDM_DR, data >> 8); + __w5100_write_direct(priv, W5100_IDM_DR, data >> 8); w5100_write_direct(priv, W5100_IDM_DR, data); - mmiowb(); spin_unlock_irqrestore(&priv->reg_lock, flags); } @@ -242,13 +245,11 @@ static void w5100_readbuf_indirect(struct w5100_priv *priv, spin_lock_irqsave(&priv->reg_lock, flags); w5100_write16_direct(priv, W5100_IDM_AR, addr); - mmiowb(); for (i = 0; i < len; i++, addr++) { if (unlikely(addr > W5100_RX_MEM_END)) { addr = W5100_RX_MEM_START; w5100_write16_direct(priv, W5100_IDM_AR, addr); - mmiowb(); } *buf++ = w5100_read_direct(priv, W5100_IDM_DR); } @@ -265,15 +266,13 @@ static void w5100_writebuf_indirect(struct w5100_priv *priv, spin_lock_irqsave(&priv->reg_lock, flags); w5100_write16_direct(priv, W5100_IDM_AR, addr); - mmiowb(); for (i = 0; i < len; i++, addr++) { if (unlikely(addr > W5100_TX_MEM_END)) { addr = W5100_TX_MEM_START; w5100_write16_direct(priv, W5100_IDM_AR, addr); - mmiowb(); } - w5100_write_direct(priv, W5100_IDM_DR, *buf++); + __w5100_write_direct(priv, W5100_IDM_DR, *buf++); } mmiowb(); spin_unlock_irqrestore(&priv->reg_lock, flags); @@ -309,7 +308,6 @@ static int w5100_command(struct w5100_priv *priv, u16 cmd) unsigned long timeout = jiffies + msecs_to_jiffies(100); w5100_write(priv, W5100_S0_CR, cmd); - mmiowb(); while (w5100_read(priv, W5100_S0_CR) != 0) { if (time_after(jiffies, timeout)) @@ -327,18 +325,15 @@ static void w5100_write_macaddr(struct w5100_priv *priv) for (i = 0; i < ETH_ALEN; i++) w5100_write(priv, W5100_SHAR + i, ndev->dev_addr[i]); - mmiowb(); } static void w5100_hw_reset(struct w5100_priv *priv) { w5100_write_direct(priv, W5100_MR, MR_RST); - mmiowb(); mdelay(5); w5100_write_direct(priv, W5100_MR, priv->indirect ? MR_PB | MR_AI | MR_IND : MR_PB); - mmiowb(); w5100_write(priv, W5100_IMR, 0); w5100_write_macaddr(priv); @@ -347,23 +342,19 @@ static void w5100_hw_reset(struct w5100_priv *priv) */ w5100_write(priv, W5100_RMSR, 0x03); w5100_write(priv, W5100_TMSR, 0x03); - mmiowb(); } static void w5100_hw_start(struct w5100_priv *priv) { w5100_write(priv, W5100_S0_MR, priv->promisc ? S0_MR_MACRAW : S0_MR_MACRAW_MF); - mmiowb(); w5100_command(priv, S0_CR_OPEN); w5100_write(priv, W5100_IMR, IR_S0); - mmiowb(); } static void w5100_hw_close(struct w5100_priv *priv) { w5100_write(priv, W5100_IMR, 0); - mmiowb(); w5100_command(priv, S0_CR_CLOSE); } @@ -447,7 +438,6 @@ static int w5100_start_tx(struct sk_buff *skb, struct net_device *ndev) offset = w5100_read16(priv, W5100_S0_TX_WR); w5100_writebuf(priv, offset, skb->data, skb->len); w5100_write16(priv, W5100_S0_TX_WR, offset + skb->len); - mmiowb(); ndev->stats.tx_bytes += skb->len; ndev->stats.tx_packets++; dev_kfree_skb(skb); @@ -488,7 +478,6 @@ static int w5100_napi_poll(struct napi_struct *napi, int budget) skb_put(skb, rx_len); w5100_readbuf(priv, offset + 2, skb->data, rx_len); w5100_write16(priv, W5100_S0_RX_RD, offset + 2 + rx_len); - mmiowb(); w5100_command(priv, S0_CR_RECV); skb->protocol = eth_type_trans(skb, ndev); @@ -500,7 +489,6 @@ static int w5100_napi_poll(struct napi_struct *napi, int budget) if (rx_count < budget) { napi_complete(napi); w5100_write(priv, W5100_IMR, IR_S0); - mmiowb(); } return rx_count; @@ -515,7 +503,6 @@ static irqreturn_t w5100_interrupt(int irq, void *ndev_instance) if (!ir) return IRQ_NONE; w5100_write(priv, W5100_S0_IR, ir); - mmiowb(); if (ir & S0_IR_SENDOK) { netif_dbg(priv, tx_done, ndev, "tx done\n"); @@ -525,7 +512,6 @@ static irqreturn_t w5100_interrupt(int irq, void *ndev_instance) if (ir & S0_IR_RECV) { if (napi_schedule_prep(&priv->napi)) { w5100_write(priv, W5100_IMR, 0); - mmiowb(); __napi_schedule(&priv->napi); } } From 850576cfede986f0683bed25e34bc15712ffb463 Mon Sep 17 00:00:00 2001 From: Akinobu Mita Date: Fri, 15 Apr 2016 00:11:30 +0900 Subject: [PATCH 0773/1649] net: w5100: add ability to support other bus interface The w5100 driver currently only supports direct and indirect bus interface mode which use MMIO space for accessing w5100 registers. In order to support SPI interface mode which is supported by W5100 chip, this makes the bus interface abstraction layer more generic so that separated w5100-spi driver can use w5100 driver as core module. Signed-off-by: Akinobu Mita Cc: Mike Sinkovsky Cc: David S. Miller Signed-off-by: David S. Miller --- drivers/net/ethernet/wiznet/w5100.c | 606 ++++++++++++++++++---------- drivers/net/ethernet/wiznet/w5100.h | 28 ++ 2 files changed, 432 insertions(+), 202 deletions(-) create mode 100644 drivers/net/ethernet/wiznet/w5100.h diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c index f4b7200bc0f5..89cba6741e7f 100644 --- a/drivers/net/ethernet/wiznet/w5100.c +++ b/drivers/net/ethernet/wiznet/w5100.c @@ -27,6 +27,8 @@ #include #include +#include "w5100.h" + #define DRV_NAME "w5100" #define DRV_VERSION "2012-04-04" @@ -76,25 +78,16 @@ MODULE_LICENSE("GPL"); #define W5100_S0_REGS_LEN 0x0040 #define W5100_TX_MEM_START 0x4000 -#define W5100_TX_MEM_END 0x5fff -#define W5100_TX_MEM_MASK 0x1fff +#define W5100_TX_MEM_SIZE 0x2000 #define W5100_RX_MEM_START 0x6000 -#define W5100_RX_MEM_END 0x7fff -#define W5100_RX_MEM_MASK 0x1fff +#define W5100_RX_MEM_SIZE 0x2000 /* * Device driver private data structure */ + struct w5100_priv { - void __iomem *base; - spinlock_t reg_lock; - bool indirect; - u8 (*read)(struct w5100_priv *priv, u16 addr); - void (*write)(struct w5100_priv *priv, u16 addr, u8 data); - u16 (*read16)(struct w5100_priv *priv, u16 addr); - void (*write16)(struct w5100_priv *priv, u16 addr, u16 data); - void (*readbuf)(struct w5100_priv *priv, u16 addr, u8 *buf, int len); - void (*writebuf)(struct w5100_priv *priv, u16 addr, u8 *buf, int len); + const struct w5100_ops *ops; int irq; int link_irq; int link_gpio; @@ -111,72 +104,121 @@ struct w5100_priv { * ***********************************************************************/ +struct w5100_mmio_priv { + void __iomem *base; + /* Serialize access in indirect address mode */ + spinlock_t reg_lock; +}; + +static inline struct w5100_mmio_priv *w5100_mmio_priv(struct net_device *dev) +{ + return w5100_ops_priv(dev); +} + +static inline void __iomem *w5100_mmio(struct net_device *ndev) +{ + struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev); + + return mmio_priv->base; +} + /* * In direct address mode host system can directly access W5100 registers * after mapping to Memory-Mapped I/O space. * * 0x8000 bytes are required for memory space. */ -static inline u8 w5100_read_direct(struct w5100_priv *priv, u16 addr) +static inline int w5100_read_direct(struct net_device *ndev, u16 addr) { - return ioread8(priv->base + (addr << CONFIG_WIZNET_BUS_SHIFT)); + return ioread8(w5100_mmio(ndev) + (addr << CONFIG_WIZNET_BUS_SHIFT)); } -static inline void __w5100_write_direct(struct w5100_priv *priv, u16 addr, - u8 data) +static inline int __w5100_write_direct(struct net_device *ndev, u16 addr, + u8 data) { - iowrite8(data, priv->base + (addr << CONFIG_WIZNET_BUS_SHIFT)); + iowrite8(data, w5100_mmio(ndev) + (addr << CONFIG_WIZNET_BUS_SHIFT)); + + return 0; } -static inline void w5100_write_direct(struct w5100_priv *priv, - u16 addr, u8 data) +static inline int w5100_write_direct(struct net_device *ndev, u16 addr, u8 data) { - __w5100_write_direct(priv, addr, data); + __w5100_write_direct(ndev, addr, data); mmiowb(); + + return 0; } -static u16 w5100_read16_direct(struct w5100_priv *priv, u16 addr) +static int w5100_read16_direct(struct net_device *ndev, u16 addr) { u16 data; - data = w5100_read_direct(priv, addr) << 8; - data |= w5100_read_direct(priv, addr + 1); + data = w5100_read_direct(ndev, addr) << 8; + data |= w5100_read_direct(ndev, addr + 1); return data; } -static void w5100_write16_direct(struct w5100_priv *priv, u16 addr, u16 data) +static int w5100_write16_direct(struct net_device *ndev, u16 addr, u16 data) { - __w5100_write_direct(priv, addr, data >> 8); - __w5100_write_direct(priv, addr + 1, data); + __w5100_write_direct(ndev, addr, data >> 8); + __w5100_write_direct(ndev, addr + 1, data); mmiowb(); + + return 0; } -static void w5100_readbuf_direct(struct w5100_priv *priv, - u16 offset, u8 *buf, int len) +static int w5100_readbulk_direct(struct net_device *ndev, u16 addr, u8 *buf, + int len) { - u16 addr = W5100_RX_MEM_START + (offset & W5100_RX_MEM_MASK); int i; - for (i = 0; i < len; i++, addr++) { - if (unlikely(addr > W5100_RX_MEM_END)) - addr = W5100_RX_MEM_START; - *buf++ = w5100_read_direct(priv, addr); - } + for (i = 0; i < len; i++, addr++) + *buf++ = w5100_read_direct(ndev, addr); + + return 0; } -static void w5100_writebuf_direct(struct w5100_priv *priv, - u16 offset, u8 *buf, int len) +static int w5100_writebulk_direct(struct net_device *ndev, u16 addr, + const u8 *buf, int len) { - u16 addr = W5100_TX_MEM_START + (offset & W5100_TX_MEM_MASK); int i; - for (i = 0; i < len; i++, addr++) { - if (unlikely(addr > W5100_TX_MEM_END)) - addr = W5100_TX_MEM_START; - __w5100_write_direct(priv, addr, *buf++); - } + for (i = 0; i < len; i++, addr++) + __w5100_write_direct(ndev, addr, *buf++); + mmiowb(); + + return 0; } +static int w5100_mmio_init(struct net_device *ndev) +{ + struct platform_device *pdev = to_platform_device(ndev->dev.parent); + struct w5100_priv *priv = netdev_priv(ndev); + struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev); + struct resource *mem; + + spin_lock_init(&mmio_priv->reg_lock); + + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + mmio_priv->base = devm_ioremap_resource(&pdev->dev, mem); + if (IS_ERR(mmio_priv->base)) + return PTR_ERR(mmio_priv->base); + + netdev_info(ndev, "at 0x%llx irq %d\n", (u64)mem->start, priv->irq); + + return 0; +} + +static const struct w5100_ops w5100_mmio_direct_ops = { + .read = w5100_read_direct, + .write = w5100_write_direct, + .read16 = w5100_read16_direct, + .write16 = w5100_write16_direct, + .readbulk = w5100_readbulk_direct, + .writebulk = w5100_writebulk_direct, + .init = w5100_mmio_init, +}; + /* * In indirect address mode host system indirectly accesses registers by * using Indirect Mode Address Register (IDM_AR) and Indirect Mode Data @@ -188,121 +230,276 @@ static void w5100_writebuf_direct(struct w5100_priv *priv, #define W5100_IDM_AR 0x01 /* Indirect Mode Address Register */ #define W5100_IDM_DR 0x03 /* Indirect Mode Data Register */ -static u8 w5100_read_indirect(struct w5100_priv *priv, u16 addr) +static int w5100_read_indirect(struct net_device *ndev, u16 addr) { + struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev); unsigned long flags; u8 data; - spin_lock_irqsave(&priv->reg_lock, flags); - w5100_write16_direct(priv, W5100_IDM_AR, addr); - data = w5100_read_direct(priv, W5100_IDM_DR); - spin_unlock_irqrestore(&priv->reg_lock, flags); + spin_lock_irqsave(&mmio_priv->reg_lock, flags); + w5100_write16_direct(ndev, W5100_IDM_AR, addr); + data = w5100_read_direct(ndev, W5100_IDM_DR); + spin_unlock_irqrestore(&mmio_priv->reg_lock, flags); return data; } -static void w5100_write_indirect(struct w5100_priv *priv, u16 addr, u8 data) +static int w5100_write_indirect(struct net_device *ndev, u16 addr, u8 data) { + struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev); unsigned long flags; - spin_lock_irqsave(&priv->reg_lock, flags); - w5100_write16_direct(priv, W5100_IDM_AR, addr); - w5100_write_direct(priv, W5100_IDM_DR, data); - spin_unlock_irqrestore(&priv->reg_lock, flags); + spin_lock_irqsave(&mmio_priv->reg_lock, flags); + w5100_write16_direct(ndev, W5100_IDM_AR, addr); + w5100_write_direct(ndev, W5100_IDM_DR, data); + spin_unlock_irqrestore(&mmio_priv->reg_lock, flags); + + return 0; } -static u16 w5100_read16_indirect(struct w5100_priv *priv, u16 addr) +static int w5100_read16_indirect(struct net_device *ndev, u16 addr) { + struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev); unsigned long flags; u16 data; - spin_lock_irqsave(&priv->reg_lock, flags); - w5100_write16_direct(priv, W5100_IDM_AR, addr); - data = w5100_read_direct(priv, W5100_IDM_DR) << 8; - data |= w5100_read_direct(priv, W5100_IDM_DR); - spin_unlock_irqrestore(&priv->reg_lock, flags); + spin_lock_irqsave(&mmio_priv->reg_lock, flags); + w5100_write16_direct(ndev, W5100_IDM_AR, addr); + data = w5100_read_direct(ndev, W5100_IDM_DR) << 8; + data |= w5100_read_direct(ndev, W5100_IDM_DR); + spin_unlock_irqrestore(&mmio_priv->reg_lock, flags); return data; } -static void w5100_write16_indirect(struct w5100_priv *priv, u16 addr, u16 data) +static int w5100_write16_indirect(struct net_device *ndev, u16 addr, u16 data) { + struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev); unsigned long flags; - spin_lock_irqsave(&priv->reg_lock, flags); - w5100_write16_direct(priv, W5100_IDM_AR, addr); - __w5100_write_direct(priv, W5100_IDM_DR, data >> 8); - w5100_write_direct(priv, W5100_IDM_DR, data); - spin_unlock_irqrestore(&priv->reg_lock, flags); + spin_lock_irqsave(&mmio_priv->reg_lock, flags); + w5100_write16_direct(ndev, W5100_IDM_AR, addr); + __w5100_write_direct(ndev, W5100_IDM_DR, data >> 8); + w5100_write_direct(ndev, W5100_IDM_DR, data); + spin_unlock_irqrestore(&mmio_priv->reg_lock, flags); + + return 0; } -static void w5100_readbuf_indirect(struct w5100_priv *priv, - u16 offset, u8 *buf, int len) +static int w5100_readbulk_indirect(struct net_device *ndev, u16 addr, u8 *buf, + int len) { - u16 addr = W5100_RX_MEM_START + (offset & W5100_RX_MEM_MASK); + struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev); unsigned long flags; int i; - spin_lock_irqsave(&priv->reg_lock, flags); - w5100_write16_direct(priv, W5100_IDM_AR, addr); + spin_lock_irqsave(&mmio_priv->reg_lock, flags); + w5100_write16_direct(ndev, W5100_IDM_AR, addr); + + for (i = 0; i < len; i++) + *buf++ = w5100_read_direct(ndev, W5100_IDM_DR); - for (i = 0; i < len; i++, addr++) { - if (unlikely(addr > W5100_RX_MEM_END)) { - addr = W5100_RX_MEM_START; - w5100_write16_direct(priv, W5100_IDM_AR, addr); - } - *buf++ = w5100_read_direct(priv, W5100_IDM_DR); - } mmiowb(); - spin_unlock_irqrestore(&priv->reg_lock, flags); + spin_unlock_irqrestore(&mmio_priv->reg_lock, flags); + + return 0; } -static void w5100_writebuf_indirect(struct w5100_priv *priv, - u16 offset, u8 *buf, int len) +static int w5100_writebulk_indirect(struct net_device *ndev, u16 addr, + const u8 *buf, int len) { - u16 addr = W5100_TX_MEM_START + (offset & W5100_TX_MEM_MASK); + struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev); unsigned long flags; int i; - spin_lock_irqsave(&priv->reg_lock, flags); - w5100_write16_direct(priv, W5100_IDM_AR, addr); + spin_lock_irqsave(&mmio_priv->reg_lock, flags); + w5100_write16_direct(ndev, W5100_IDM_AR, addr); + + for (i = 0; i < len; i++) + __w5100_write_direct(ndev, W5100_IDM_DR, *buf++); - for (i = 0; i < len; i++, addr++) { - if (unlikely(addr > W5100_TX_MEM_END)) { - addr = W5100_TX_MEM_START; - w5100_write16_direct(priv, W5100_IDM_AR, addr); - } - __w5100_write_direct(priv, W5100_IDM_DR, *buf++); - } mmiowb(); - spin_unlock_irqrestore(&priv->reg_lock, flags); + spin_unlock_irqrestore(&mmio_priv->reg_lock, flags); + + return 0; } +static int w5100_reset_indirect(struct net_device *ndev) +{ + w5100_write_direct(ndev, W5100_MR, MR_RST); + mdelay(5); + w5100_write_direct(ndev, W5100_MR, MR_PB | MR_AI | MR_IND); + + return 0; +} + +static const struct w5100_ops w5100_mmio_indirect_ops = { + .read = w5100_read_indirect, + .write = w5100_write_indirect, + .read16 = w5100_read16_indirect, + .write16 = w5100_write16_indirect, + .readbulk = w5100_readbulk_indirect, + .writebulk = w5100_writebulk_indirect, + .init = w5100_mmio_init, + .reset = w5100_reset_indirect, +}; + #if defined(CONFIG_WIZNET_BUS_DIRECT) -#define w5100_read w5100_read_direct -#define w5100_write w5100_write_direct -#define w5100_read16 w5100_read16_direct -#define w5100_write16 w5100_write16_direct -#define w5100_readbuf w5100_readbuf_direct -#define w5100_writebuf w5100_writebuf_direct + +static int w5100_read(struct w5100_priv *priv, u16 addr) +{ + return w5100_read_direct(priv->ndev, addr); +} + +static int w5100_write(struct w5100_priv *priv, u16 addr, u8 data) +{ + return w5100_write_direct(priv->ndev, addr, data); +} + +static int w5100_read16(struct w5100_priv *priv, u16 addr) +{ + return w5100_read16_direct(priv->ndev, addr); +} + +static int w5100_write16(struct w5100_priv *priv, u16 addr, u16 data) +{ + return w5100_write16_direct(priv->ndev, addr, data); +} + +static int w5100_readbulk(struct w5100_priv *priv, u16 addr, u8 *buf, int len) +{ + return w5100_readbulk_direct(priv->ndev, addr, buf, len); +} + +static int w5100_writebulk(struct w5100_priv *priv, u16 addr, const u8 *buf, + int len) +{ + return w5100_writebulk_direct(priv->ndev, addr, buf, len); +} #elif defined(CONFIG_WIZNET_BUS_INDIRECT) -#define w5100_read w5100_read_indirect -#define w5100_write w5100_write_indirect -#define w5100_read16 w5100_read16_indirect -#define w5100_write16 w5100_write16_indirect -#define w5100_readbuf w5100_readbuf_indirect -#define w5100_writebuf w5100_writebuf_indirect + +static int w5100_read(struct w5100_priv *priv, u16 addr) +{ + return w5100_read_indirect(priv->ndev, addr); +} + +static int w5100_write(struct w5100_priv *priv, u16 addr, u8 data) +{ + return w5100_write_indirect(priv->ndev, addr, data); +} + +static int w5100_read16(struct w5100_priv *priv, u16 addr) +{ + return w5100_read16_indirect(priv->ndev, addr); +} + +static int w5100_write16(struct w5100_priv *priv, u16 addr, u16 data) +{ + return w5100_write16_indirect(priv->ndev, addr, data); +} + +static int w5100_readbulk(struct w5100_priv *priv, u16 addr, u8 *buf, int len) +{ + return w5100_readbulk_indirect(priv->ndev, addr, buf, len); +} + +static int w5100_writebulk(struct w5100_priv *priv, u16 addr, const u8 *buf, + int len) +{ + return w5100_writebulk_indirect(priv->ndev, addr, buf, len); +} #else /* CONFIG_WIZNET_BUS_ANY */ -#define w5100_read priv->read -#define w5100_write priv->write -#define w5100_read16 priv->read16 -#define w5100_write16 priv->write16 -#define w5100_readbuf priv->readbuf -#define w5100_writebuf priv->writebuf + +static int w5100_read(struct w5100_priv *priv, u16 addr) +{ + return priv->ops->read(priv->ndev, addr); +} + +static int w5100_write(struct w5100_priv *priv, u16 addr, u8 data) +{ + return priv->ops->write(priv->ndev, addr, data); +} + +static int w5100_read16(struct w5100_priv *priv, u16 addr) +{ + return priv->ops->read16(priv->ndev, addr); +} + +static int w5100_write16(struct w5100_priv *priv, u16 addr, u16 data) +{ + return priv->ops->write16(priv->ndev, addr, data); +} + +static int w5100_readbulk(struct w5100_priv *priv, u16 addr, u8 *buf, int len) +{ + return priv->ops->readbulk(priv->ndev, addr, buf, len); +} + +static int w5100_writebulk(struct w5100_priv *priv, u16 addr, const u8 *buf, + int len) +{ + return priv->ops->writebulk(priv->ndev, addr, buf, len); +} + #endif +static int w5100_readbuf(struct w5100_priv *priv, u16 offset, u8 *buf, int len) +{ + u16 addr; + int remain = 0; + int ret; + + offset %= W5100_RX_MEM_SIZE; + addr = W5100_RX_MEM_START + offset; + + if (offset + len > W5100_RX_MEM_SIZE) { + remain = (offset + len) % W5100_RX_MEM_SIZE; + len = W5100_RX_MEM_SIZE - offset; + } + + ret = w5100_readbulk(priv, addr, buf, len); + if (ret || !remain) + return ret; + + return w5100_readbulk(priv, W5100_RX_MEM_START, buf + len, remain); +} + +static int w5100_writebuf(struct w5100_priv *priv, u16 offset, const u8 *buf, + int len) +{ + u16 addr; + int ret; + int remain = 0; + + offset %= W5100_TX_MEM_SIZE; + addr = W5100_TX_MEM_START + offset; + + if (offset + len > W5100_TX_MEM_SIZE) { + remain = (offset + len) % W5100_TX_MEM_SIZE; + len = W5100_TX_MEM_SIZE - offset; + } + + ret = w5100_writebulk(priv, addr, buf, len); + if (ret || !remain) + return ret; + + return w5100_writebulk(priv, W5100_TX_MEM_START, buf + len, remain); +} + +static int w5100_reset(struct w5100_priv *priv) +{ + if (priv->ops->reset) + return priv->ops->reset(priv->ndev); + + w5100_write(priv, W5100_MR, MR_RST); + mdelay(5); + w5100_write(priv, W5100_MR, MR_PB); + + return 0; +} + static int w5100_command(struct w5100_priv *priv, u16 cmd) { unsigned long timeout = jiffies + msecs_to_jiffies(100); @@ -321,19 +518,14 @@ static int w5100_command(struct w5100_priv *priv, u16 cmd) static void w5100_write_macaddr(struct w5100_priv *priv) { struct net_device *ndev = priv->ndev; - int i; - for (i = 0; i < ETH_ALEN; i++) - w5100_write(priv, W5100_SHAR + i, ndev->dev_addr[i]); + w5100_writebulk(priv, W5100_SHAR, ndev->dev_addr, ETH_ALEN); } static void w5100_hw_reset(struct w5100_priv *priv) { - w5100_write_direct(priv, W5100_MR, MR_RST); - mdelay(5); - w5100_write_direct(priv, W5100_MR, priv->indirect ? - MR_PB | MR_AI | MR_IND : - MR_PB); + w5100_reset(priv); + w5100_write(priv, W5100_IMR, 0); w5100_write_macaddr(priv); @@ -403,17 +595,14 @@ static int w5100_get_regs_len(struct net_device *ndev) } static void w5100_get_regs(struct net_device *ndev, - struct ethtool_regs *regs, void *_buf) + struct ethtool_regs *regs, void *buf) { struct w5100_priv *priv = netdev_priv(ndev); - u8 *buf = _buf; - u16 i; regs->version = 1; - for (i = 0; i < W5100_COMMON_REGS_LEN; i++) - *buf++ = w5100_read(priv, W5100_COMMON_REGS + i); - for (i = 0; i < W5100_S0_REGS_LEN; i++) - *buf++ = w5100_read(priv, W5100_S0_REGS + i); + w5100_readbulk(priv, W5100_COMMON_REGS, buf, W5100_COMMON_REGS_LEN); + buf += W5100_COMMON_REGS_LEN; + w5100_readbulk(priv, W5100_S0_REGS, buf, W5100_S0_REGS_LEN); } static void w5100_tx_timeout(struct net_device *ndev) @@ -606,91 +795,68 @@ static const struct net_device_ops w5100_netdev_ops = { .ndo_change_mtu = eth_change_mtu, }; -static int w5100_hw_probe(struct platform_device *pdev) +static int w5100_mmio_probe(struct platform_device *pdev) { struct wiznet_platform_data *data = dev_get_platdata(&pdev->dev); - struct net_device *ndev = platform_get_drvdata(pdev); - struct w5100_priv *priv = netdev_priv(ndev); - const char *name = netdev_name(ndev); + u8 *mac_addr = NULL; struct resource *mem; - int mem_size; + const struct w5100_ops *ops; int irq; - int ret; - if (data && is_valid_ether_addr(data->mac_addr)) { - memcpy(ndev->dev_addr, data->mac_addr, ETH_ALEN); - } else { - eth_hw_addr_random(ndev); - } + if (data && is_valid_ether_addr(data->mac_addr)) + mac_addr = data->mac_addr; mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - priv->base = devm_ioremap_resource(&pdev->dev, mem); - if (IS_ERR(priv->base)) - return PTR_ERR(priv->base); - - mem_size = resource_size(mem); - - spin_lock_init(&priv->reg_lock); - priv->indirect = mem_size < W5100_BUS_DIRECT_SIZE; - if (priv->indirect) { - priv->read = w5100_read_indirect; - priv->write = w5100_write_indirect; - priv->read16 = w5100_read16_indirect; - priv->write16 = w5100_write16_indirect; - priv->readbuf = w5100_readbuf_indirect; - priv->writebuf = w5100_writebuf_indirect; - } else { - priv->read = w5100_read_direct; - priv->write = w5100_write_direct; - priv->read16 = w5100_read16_direct; - priv->write16 = w5100_write16_direct; - priv->readbuf = w5100_readbuf_direct; - priv->writebuf = w5100_writebuf_direct; - } - - w5100_hw_reset(priv); - if (w5100_read16(priv, W5100_RTR) != RTR_DEFAULT) - return -ENODEV; + if (resource_size(mem) < W5100_BUS_DIRECT_SIZE) + ops = &w5100_mmio_indirect_ops; + else + ops = &w5100_mmio_direct_ops; irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; - ret = request_irq(irq, w5100_interrupt, - IRQ_TYPE_LEVEL_LOW, name, ndev); - if (ret < 0) - return ret; - priv->irq = irq; - priv->link_gpio = data ? data->link_gpio : -EINVAL; - if (gpio_is_valid(priv->link_gpio)) { - char *link_name = devm_kzalloc(&pdev->dev, 16, GFP_KERNEL); - if (!link_name) - return -ENOMEM; - snprintf(link_name, 16, "%s-link", name); - priv->link_irq = gpio_to_irq(priv->link_gpio); - if (request_any_context_irq(priv->link_irq, w5100_detect_link, - IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, - link_name, priv->ndev) < 0) - priv->link_gpio = -EINVAL; - } - - netdev_info(ndev, "at 0x%llx irq %d\n", (u64)mem->start, irq); - return 0; + return w5100_probe(&pdev->dev, ops, sizeof(struct w5100_mmio_priv), + mac_addr, irq, data ? data->link_gpio : -EINVAL); } -static int w5100_probe(struct platform_device *pdev) +static int w5100_mmio_remove(struct platform_device *pdev) +{ + return w5100_remove(&pdev->dev); +} + +void *w5100_ops_priv(const struct net_device *ndev) +{ + return netdev_priv(ndev) + + ALIGN(sizeof(struct w5100_priv), NETDEV_ALIGN); +} +EXPORT_SYMBOL_GPL(w5100_ops_priv); + +int w5100_probe(struct device *dev, const struct w5100_ops *ops, + int sizeof_ops_priv, u8 *mac_addr, int irq, int link_gpio) { struct w5100_priv *priv; struct net_device *ndev; int err; + size_t alloc_size; - ndev = alloc_etherdev(sizeof(*priv)); + alloc_size = sizeof(*priv); + if (sizeof_ops_priv) { + alloc_size = ALIGN(alloc_size, NETDEV_ALIGN); + alloc_size += sizeof_ops_priv; + } + alloc_size += NETDEV_ALIGN - 1; + + ndev = alloc_etherdev(alloc_size); if (!ndev) return -ENOMEM; - SET_NETDEV_DEV(ndev, &pdev->dev); - platform_set_drvdata(pdev, ndev); + SET_NETDEV_DEV(ndev, dev); + dev_set_drvdata(dev, ndev); priv = netdev_priv(ndev); priv->ndev = ndev; + priv->ops = ops; + priv->irq = irq; + priv->link_gpio = link_gpio; ndev->netdev_ops = &w5100_netdev_ops; ndev->ethtool_ops = &w5100_ethtool_ops; @@ -706,22 +872,59 @@ static int w5100_probe(struct platform_device *pdev) if (err < 0) goto err_register; - err = w5100_hw_probe(pdev); - if (err < 0) - goto err_hw_probe; + if (mac_addr) + memcpy(ndev->dev_addr, mac_addr, ETH_ALEN); + else + eth_hw_addr_random(ndev); + + if (priv->ops->init) { + err = priv->ops->init(priv->ndev); + if (err) + goto err_hw; + } + + w5100_hw_reset(priv); + if (w5100_read16(priv, W5100_RTR) != RTR_DEFAULT) { + err = -ENODEV; + goto err_hw; + } + + err = request_irq(priv->irq, w5100_interrupt, IRQF_TRIGGER_LOW, + netdev_name(ndev), ndev); + if (err) + goto err_hw; + + if (gpio_is_valid(priv->link_gpio)) { + char *link_name = devm_kzalloc(dev, 16, GFP_KERNEL); + + if (!link_name) { + err = -ENOMEM; + goto err_gpio; + } + snprintf(link_name, 16, "%s-link", netdev_name(ndev)); + priv->link_irq = gpio_to_irq(priv->link_gpio); + if (request_any_context_irq(priv->link_irq, w5100_detect_link, + IRQF_TRIGGER_RISING | + IRQF_TRIGGER_FALLING, + link_name, priv->ndev) < 0) + priv->link_gpio = -EINVAL; + } return 0; -err_hw_probe: +err_gpio: + free_irq(priv->irq, ndev); +err_hw: unregister_netdev(ndev); err_register: free_netdev(ndev); return err; } +EXPORT_SYMBOL_GPL(w5100_probe); -static int w5100_remove(struct platform_device *pdev) +int w5100_remove(struct device *dev) { - struct net_device *ndev = platform_get_drvdata(pdev); + struct net_device *ndev = dev_get_drvdata(dev); struct w5100_priv *priv = netdev_priv(ndev); w5100_hw_reset(priv); @@ -733,12 +936,12 @@ static int w5100_remove(struct platform_device *pdev) free_netdev(ndev); return 0; } +EXPORT_SYMBOL_GPL(w5100_remove); #ifdef CONFIG_PM_SLEEP static int w5100_suspend(struct device *dev) { - struct platform_device *pdev = to_platform_device(dev); - struct net_device *ndev = platform_get_drvdata(pdev); + struct net_device *ndev = dev_get_drvdata(dev); struct w5100_priv *priv = netdev_priv(ndev); if (netif_running(ndev)) { @@ -752,8 +955,7 @@ static int w5100_suspend(struct device *dev) static int w5100_resume(struct device *dev) { - struct platform_device *pdev = to_platform_device(dev); - struct net_device *ndev = platform_get_drvdata(pdev); + struct net_device *ndev = dev_get_drvdata(dev); struct w5100_priv *priv = netdev_priv(ndev); if (netif_running(ndev)) { @@ -769,15 +971,15 @@ static int w5100_resume(struct device *dev) } #endif /* CONFIG_PM_SLEEP */ -static SIMPLE_DEV_PM_OPS(w5100_pm_ops, w5100_suspend, w5100_resume); +SIMPLE_DEV_PM_OPS(w5100_pm_ops, w5100_suspend, w5100_resume); +EXPORT_SYMBOL_GPL(w5100_pm_ops); -static struct platform_driver w5100_driver = { +static struct platform_driver w5100_mmio_driver = { .driver = { .name = DRV_NAME, .pm = &w5100_pm_ops, }, - .probe = w5100_probe, - .remove = w5100_remove, + .probe = w5100_mmio_probe, + .remove = w5100_mmio_remove, }; - -module_platform_driver(w5100_driver); +module_platform_driver(w5100_mmio_driver); diff --git a/drivers/net/ethernet/wiznet/w5100.h b/drivers/net/ethernet/wiznet/w5100.h new file mode 100644 index 000000000000..39d452d878e7 --- /dev/null +++ b/drivers/net/ethernet/wiznet/w5100.h @@ -0,0 +1,28 @@ +/* + * Ethernet driver for the WIZnet W5100 chip. + * + * Copyright (C) 2006-2008 WIZnet Co.,Ltd. + * Copyright (C) 2012 Mike Sinkovsky + * + * Licensed under the GPL-2 or later. + */ + +struct w5100_ops { + int (*read)(struct net_device *ndev, u16 addr); + int (*write)(struct net_device *ndev, u16 addr, u8 data); + int (*read16)(struct net_device *ndev, u16 addr); + int (*write16)(struct net_device *ndev, u16 addr, u16 data); + int (*readbulk)(struct net_device *ndev, u16 addr, u8 *buf, int len); + int (*writebulk)(struct net_device *ndev, u16 addr, const u8 *buf, + int len); + int (*reset)(struct net_device *ndev); + int (*init)(struct net_device *ndev); +}; + +void *w5100_ops_priv(const struct net_device *ndev); + +int w5100_probe(struct device *dev, const struct w5100_ops *ops, + int sizeof_ops_priv, u8 *mac_addr, int irq, int link_gpio); +int w5100_remove(struct device *dev); + +extern const struct dev_pm_ops w5100_pm_ops; From bf2c6b90b385c163ad9c48fe97f5dc6af0091de6 Mon Sep 17 00:00:00 2001 From: Akinobu Mita Date: Fri, 15 Apr 2016 00:11:31 +0900 Subject: [PATCH 0774/1649] net: w5100: enable to support sleepable register access interface SPI transfer routines are callable only from contexts that can sleep. This adds ability to tell the core driver that the interface mode cannot access w5100 register on atomic contexts. In this case, workqueue and threaded irq are required. This also corrects timeout period waiting for command register to be automatically cleared because the latency of the register access with SPI transfer can be interfered by other contexts. Signed-off-by: Akinobu Mita Cc: Mike Sinkovsky Cc: David S. Miller Signed-off-by: David S. Miller --- drivers/net/ethernet/wiznet/w5100.c | 196 ++++++++++++++++++++++------ drivers/net/ethernet/wiznet/w5100.h | 1 + 2 files changed, 156 insertions(+), 41 deletions(-) diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c index 89cba6741e7f..42a9de4a48b1 100644 --- a/drivers/net/ethernet/wiznet/w5100.c +++ b/drivers/net/ethernet/wiznet/w5100.c @@ -96,6 +96,13 @@ struct w5100_priv { struct net_device *ndev; bool promisc; u32 msg_enable; + + struct workqueue_struct *xfer_wq; + struct work_struct rx_work; + struct sk_buff *tx_skb; + struct work_struct tx_work; + struct work_struct setrx_work; + struct work_struct restart_work; }; /************************************************************************ @@ -502,10 +509,12 @@ static int w5100_reset(struct w5100_priv *priv) static int w5100_command(struct w5100_priv *priv, u16 cmd) { - unsigned long timeout = jiffies + msecs_to_jiffies(100); + unsigned long timeout; w5100_write(priv, W5100_S0_CR, cmd); + timeout = jiffies + msecs_to_jiffies(100); + while (w5100_read(priv, W5100_S0_CR) != 0) { if (time_after(jiffies, timeout)) return -EIO; @@ -605,7 +614,7 @@ static void w5100_get_regs(struct net_device *ndev, w5100_readbulk(priv, W5100_S0_REGS, buf, W5100_S0_REGS_LEN); } -static void w5100_tx_timeout(struct net_device *ndev) +static void w5100_restart(struct net_device *ndev) { struct w5100_priv *priv = netdev_priv(ndev); @@ -617,13 +626,29 @@ static void w5100_tx_timeout(struct net_device *ndev) netif_wake_queue(ndev); } -static int w5100_start_tx(struct sk_buff *skb, struct net_device *ndev) +static void w5100_restart_work(struct work_struct *work) +{ + struct w5100_priv *priv = container_of(work, struct w5100_priv, + restart_work); + + w5100_restart(priv->ndev); +} + +static void w5100_tx_timeout(struct net_device *ndev) +{ + struct w5100_priv *priv = netdev_priv(ndev); + + if (priv->ops->may_sleep) + schedule_work(&priv->restart_work); + else + w5100_restart(ndev); +} + +static void w5100_tx_skb(struct net_device *ndev, struct sk_buff *skb) { struct w5100_priv *priv = netdev_priv(ndev); u16 offset; - netif_stop_queue(ndev); - offset = w5100_read16(priv, W5100_S0_TX_WR); w5100_writebuf(priv, offset, skb->data, skb->len); w5100_write16(priv, W5100_S0_TX_WR, offset + skb->len); @@ -632,47 +657,98 @@ static int w5100_start_tx(struct sk_buff *skb, struct net_device *ndev) dev_kfree_skb(skb); w5100_command(priv, S0_CR_SEND); +} + +static void w5100_tx_work(struct work_struct *work) +{ + struct w5100_priv *priv = container_of(work, struct w5100_priv, + tx_work); + struct sk_buff *skb = priv->tx_skb; + + priv->tx_skb = NULL; + + if (WARN_ON(!skb)) + return; + w5100_tx_skb(priv->ndev, skb); +} + +static int w5100_start_tx(struct sk_buff *skb, struct net_device *ndev) +{ + struct w5100_priv *priv = netdev_priv(ndev); + + netif_stop_queue(ndev); + + if (priv->ops->may_sleep) { + WARN_ON(priv->tx_skb); + priv->tx_skb = skb; + queue_work(priv->xfer_wq, &priv->tx_work); + } else { + w5100_tx_skb(ndev, skb); + } return NETDEV_TX_OK; } +static struct sk_buff *w5100_rx_skb(struct net_device *ndev) +{ + struct w5100_priv *priv = netdev_priv(ndev); + struct sk_buff *skb; + u16 rx_len; + u16 offset; + u8 header[2]; + u16 rx_buf_len = w5100_read16(priv, W5100_S0_RX_RSR); + + if (rx_buf_len == 0) + return NULL; + + offset = w5100_read16(priv, W5100_S0_RX_RD); + w5100_readbuf(priv, offset, header, 2); + rx_len = get_unaligned_be16(header) - 2; + + skb = netdev_alloc_skb_ip_align(ndev, rx_len); + if (unlikely(!skb)) { + w5100_write16(priv, W5100_S0_RX_RD, offset + rx_buf_len); + w5100_command(priv, S0_CR_RECV); + ndev->stats.rx_dropped++; + return NULL; + } + + skb_put(skb, rx_len); + w5100_readbuf(priv, offset + 2, skb->data, rx_len); + w5100_write16(priv, W5100_S0_RX_RD, offset + 2 + rx_len); + w5100_command(priv, S0_CR_RECV); + skb->protocol = eth_type_trans(skb, ndev); + + ndev->stats.rx_packets++; + ndev->stats.rx_bytes += rx_len; + + return skb; +} + +static void w5100_rx_work(struct work_struct *work) +{ + struct w5100_priv *priv = container_of(work, struct w5100_priv, + rx_work); + struct sk_buff *skb; + + while ((skb = w5100_rx_skb(priv->ndev))) + netif_rx_ni(skb); + + w5100_write(priv, W5100_IMR, IR_S0); +} + static int w5100_napi_poll(struct napi_struct *napi, int budget) { struct w5100_priv *priv = container_of(napi, struct w5100_priv, napi); - struct net_device *ndev = priv->ndev; - struct sk_buff *skb; int rx_count; - u16 rx_len; - u16 offset; - u8 header[2]; for (rx_count = 0; rx_count < budget; rx_count++) { - u16 rx_buf_len = w5100_read16(priv, W5100_S0_RX_RSR); - if (rx_buf_len == 0) + struct sk_buff *skb = w5100_rx_skb(priv->ndev); + + if (skb) + netif_receive_skb(skb); + else break; - - offset = w5100_read16(priv, W5100_S0_RX_RD); - w5100_readbuf(priv, offset, header, 2); - rx_len = get_unaligned_be16(header) - 2; - - skb = netdev_alloc_skb_ip_align(ndev, rx_len); - if (unlikely(!skb)) { - w5100_write16(priv, W5100_S0_RX_RD, - offset + rx_buf_len); - w5100_command(priv, S0_CR_RECV); - ndev->stats.rx_dropped++; - return -ENOMEM; - } - - skb_put(skb, rx_len); - w5100_readbuf(priv, offset + 2, skb->data, rx_len); - w5100_write16(priv, W5100_S0_RX_RD, offset + 2 + rx_len); - w5100_command(priv, S0_CR_RECV); - skb->protocol = eth_type_trans(skb, ndev); - - netif_receive_skb(skb); - ndev->stats.rx_packets++; - ndev->stats.rx_bytes += rx_len; } if (rx_count < budget) { @@ -699,10 +775,12 @@ static irqreturn_t w5100_interrupt(int irq, void *ndev_instance) } if (ir & S0_IR_RECV) { - if (napi_schedule_prep(&priv->napi)) { - w5100_write(priv, W5100_IMR, 0); + w5100_write(priv, W5100_IMR, 0); + + if (priv->ops->may_sleep) + queue_work(priv->xfer_wq, &priv->rx_work); + else if (napi_schedule_prep(&priv->napi)) __napi_schedule(&priv->napi); - } } return IRQ_HANDLED; @@ -726,6 +804,14 @@ static irqreturn_t w5100_detect_link(int irq, void *ndev_instance) return IRQ_HANDLED; } +static void w5100_setrx_work(struct work_struct *work) +{ + struct w5100_priv *priv = container_of(work, struct w5100_priv, + setrx_work); + + w5100_hw_start(priv); +} + static void w5100_set_rx_mode(struct net_device *ndev) { struct w5100_priv *priv = netdev_priv(ndev); @@ -733,7 +819,11 @@ static void w5100_set_rx_mode(struct net_device *ndev) if (priv->promisc != set_promisc) { priv->promisc = set_promisc; - w5100_hw_start(priv); + + if (priv->ops->may_sleep) + schedule_work(&priv->setrx_work); + else + w5100_hw_start(priv); } } @@ -872,6 +962,17 @@ int w5100_probe(struct device *dev, const struct w5100_ops *ops, if (err < 0) goto err_register; + priv->xfer_wq = create_workqueue(netdev_name(ndev)); + if (!priv->xfer_wq) { + err = -ENOMEM; + goto err_wq; + } + + INIT_WORK(&priv->rx_work, w5100_rx_work); + INIT_WORK(&priv->tx_work, w5100_tx_work); + INIT_WORK(&priv->setrx_work, w5100_setrx_work); + INIT_WORK(&priv->restart_work, w5100_restart_work); + if (mac_addr) memcpy(ndev->dev_addr, mac_addr, ETH_ALEN); else @@ -889,8 +990,14 @@ int w5100_probe(struct device *dev, const struct w5100_ops *ops, goto err_hw; } - err = request_irq(priv->irq, w5100_interrupt, IRQF_TRIGGER_LOW, - netdev_name(ndev), ndev); + if (ops->may_sleep) { + err = request_threaded_irq(priv->irq, NULL, w5100_interrupt, + IRQF_TRIGGER_LOW | IRQF_ONESHOT, + netdev_name(ndev), ndev); + } else { + err = request_irq(priv->irq, w5100_interrupt, + IRQF_TRIGGER_LOW, netdev_name(ndev), ndev); + } if (err) goto err_hw; @@ -915,6 +1022,8 @@ int w5100_probe(struct device *dev, const struct w5100_ops *ops, err_gpio: free_irq(priv->irq, ndev); err_hw: + destroy_workqueue(priv->xfer_wq); +err_wq: unregister_netdev(ndev); err_register: free_netdev(ndev); @@ -932,6 +1041,11 @@ int w5100_remove(struct device *dev) if (gpio_is_valid(priv->link_gpio)) free_irq(priv->link_irq, ndev); + flush_work(&priv->setrx_work); + flush_work(&priv->restart_work); + flush_workqueue(priv->xfer_wq); + destroy_workqueue(priv->xfer_wq); + unregister_netdev(ndev); free_netdev(ndev); return 0; diff --git a/drivers/net/ethernet/wiznet/w5100.h b/drivers/net/ethernet/wiznet/w5100.h index 39d452d878e7..69045f0f9e10 100644 --- a/drivers/net/ethernet/wiznet/w5100.h +++ b/drivers/net/ethernet/wiznet/w5100.h @@ -8,6 +8,7 @@ */ struct w5100_ops { + bool may_sleep; int (*read)(struct net_device *ndev, u16 addr); int (*write)(struct net_device *ndev, u16 addr, u8 data); int (*read16)(struct net_device *ndev, u16 addr); From 630cf09751fe166ffc25d1ae35ce804bf58eb3c7 Mon Sep 17 00:00:00 2001 From: Akinobu Mita Date: Fri, 15 Apr 2016 00:11:32 +0900 Subject: [PATCH 0775/1649] net: w5100: support SPI interface mode This adds new w5100-spi driver which shares the bus interface independent code with existing w5100 driver. Signed-off-by: Akinobu Mita Cc: Mike Sinkovsky Cc: David S. Miller Signed-off-by: David S. Miller --- drivers/net/ethernet/wiznet/Kconfig | 14 +++ drivers/net/ethernet/wiznet/Makefile | 1 + drivers/net/ethernet/wiznet/w5100-spi.c | 136 ++++++++++++++++++++++++ 3 files changed, 151 insertions(+) create mode 100644 drivers/net/ethernet/wiznet/w5100-spi.c diff --git a/drivers/net/ethernet/wiznet/Kconfig b/drivers/net/ethernet/wiznet/Kconfig index f98b91d21f33..d1ab353790de 100644 --- a/drivers/net/ethernet/wiznet/Kconfig +++ b/drivers/net/ethernet/wiznet/Kconfig @@ -69,4 +69,18 @@ config WIZNET_BUS_ANY Performance may decrease compared to explicitly selected bus mode. endchoice +config WIZNET_W5100_SPI + tristate "WIZnet W5100 Ethernet support for SPI mode" + depends on WIZNET_BUS_ANY + depends on SPI + ---help--- + In SPI mode host system accesses registers using SPI protocol + (mode 0) on the SPI bus. + + Performance decreases compared to other bus interface mode. + In W5100 SPI mode, burst READ/WRITE processing are not provided. + + To compile this driver as a module, choose M here: the module + will be called w5100-spi. + endif # NET_VENDOR_WIZNET diff --git a/drivers/net/ethernet/wiznet/Makefile b/drivers/net/ethernet/wiznet/Makefile index c614535227e8..1e05e1a84208 100644 --- a/drivers/net/ethernet/wiznet/Makefile +++ b/drivers/net/ethernet/wiznet/Makefile @@ -1,2 +1,3 @@ obj-$(CONFIG_WIZNET_W5100) += w5100.o +obj-$(CONFIG_WIZNET_W5100_SPI) += w5100-spi.o obj-$(CONFIG_WIZNET_W5300) += w5300.o diff --git a/drivers/net/ethernet/wiznet/w5100-spi.c b/drivers/net/ethernet/wiznet/w5100-spi.c new file mode 100644 index 000000000000..32f406cfce4b --- /dev/null +++ b/drivers/net/ethernet/wiznet/w5100-spi.c @@ -0,0 +1,136 @@ +/* + * Ethernet driver for the WIZnet W5100 chip. + * + * Copyright (C) 2016 Akinobu Mita + * + * Licensed under the GPL-2 or later. + */ + +#include +#include +#include +#include +#include + +#include "w5100.h" + +#define W5100_SPI_WRITE_OPCODE 0xf0 +#define W5100_SPI_READ_OPCODE 0x0f + +static int w5100_spi_read(struct net_device *ndev, u16 addr) +{ + struct spi_device *spi = to_spi_device(ndev->dev.parent); + u8 cmd[3] = { W5100_SPI_READ_OPCODE, addr >> 8, addr & 0xff }; + u8 data; + int ret; + + ret = spi_write_then_read(spi, cmd, sizeof(cmd), &data, 1); + + return ret ? ret : data; +} + +static int w5100_spi_write(struct net_device *ndev, u16 addr, u8 data) +{ + struct spi_device *spi = to_spi_device(ndev->dev.parent); + u8 cmd[4] = { W5100_SPI_WRITE_OPCODE, addr >> 8, addr & 0xff, data}; + + return spi_write_then_read(spi, cmd, sizeof(cmd), NULL, 0); +} + +static int w5100_spi_read16(struct net_device *ndev, u16 addr) +{ + u16 data; + int ret; + + ret = w5100_spi_read(ndev, addr); + if (ret < 0) + return ret; + data = ret << 8; + ret = w5100_spi_read(ndev, addr + 1); + + return ret < 0 ? ret : data | ret; +} + +static int w5100_spi_write16(struct net_device *ndev, u16 addr, u16 data) +{ + int ret; + + ret = w5100_spi_write(ndev, addr, data >> 8); + if (ret) + return ret; + + return w5100_spi_write(ndev, addr + 1, data & 0xff); +} + +static int w5100_spi_readbulk(struct net_device *ndev, u16 addr, u8 *buf, + int len) +{ + int i; + + for (i = 0; i < len; i++) { + int ret = w5100_spi_read(ndev, addr + i); + + if (ret < 0) + return ret; + buf[i] = ret; + } + + return 0; +} + +static int w5100_spi_writebulk(struct net_device *ndev, u16 addr, const u8 *buf, + int len) +{ + int i; + + for (i = 0; i < len; i++) { + int ret = w5100_spi_write(ndev, addr + i, buf[i]); + + if (ret) + return ret; + } + + return 0; +} + +static const struct w5100_ops w5100_spi_ops = { + .may_sleep = true, + .read = w5100_spi_read, + .write = w5100_spi_write, + .read16 = w5100_spi_read16, + .write16 = w5100_spi_write16, + .readbulk = w5100_spi_readbulk, + .writebulk = w5100_spi_writebulk, +}; + +static int w5100_spi_probe(struct spi_device *spi) +{ + return w5100_probe(&spi->dev, &w5100_spi_ops, 0, NULL, spi->irq, + -EINVAL); +} + +static int w5100_spi_remove(struct spi_device *spi) +{ + return w5100_remove(&spi->dev); +} + +static const struct spi_device_id w5100_spi_ids[] = { + { "w5100", 0 }, + {} +}; +MODULE_DEVICE_TABLE(spi, w5100_spi_ids); + +static struct spi_driver w5100_spi_driver = { + .driver = { + .name = "w5100", + .pm = &w5100_pm_ops, + }, + .probe = w5100_spi_probe, + .remove = w5100_spi_remove, + .id_table = w5100_spi_ids, +}; +module_spi_driver(w5100_spi_driver); + +MODULE_DESCRIPTION("WIZnet W5100 Ethernet driver for SPI mode"); +MODULE_AUTHOR("Akinobu Mita "); +MODULE_LICENSE("GPL"); From 0c165ff2d8db575efa41f2586c2de193850dec48 Mon Sep 17 00:00:00 2001 From: Akinobu Mita Date: Fri, 15 Apr 2016 00:11:33 +0900 Subject: [PATCH 0776/1649] net: w5100: support W5200 This adds support for W5200 chip. W5100 and W5200 have similar memory map although some of their offsets are different. The register access sequences between them are different but w5100 driver has abstraction layer for difference bus interface modes so it is easy to add W5200 support to w5100 and w5100-spi drivers. Signed-off-by: Akinobu Mita Cc: Mike Sinkovsky Cc: David S. Miller Signed-off-by: David S. Miller --- drivers/net/ethernet/wiznet/Kconfig | 2 +- drivers/net/ethernet/wiznet/w5100-spi.c | 174 +++++++++++++++++++++++- drivers/net/ethernet/wiznet/w5100.c | 157 +++++++++++++++------ drivers/net/ethernet/wiznet/w5100.h | 6 + 4 files changed, 290 insertions(+), 49 deletions(-) diff --git a/drivers/net/ethernet/wiznet/Kconfig b/drivers/net/ethernet/wiznet/Kconfig index d1ab353790de..1f15376e9856 100644 --- a/drivers/net/ethernet/wiznet/Kconfig +++ b/drivers/net/ethernet/wiznet/Kconfig @@ -70,7 +70,7 @@ config WIZNET_BUS_ANY endchoice config WIZNET_W5100_SPI - tristate "WIZnet W5100 Ethernet support for SPI mode" + tristate "WIZnet W5100/W5200 Ethernet support for SPI mode" depends on WIZNET_BUS_ANY depends on SPI ---help--- diff --git a/drivers/net/ethernet/wiznet/w5100-spi.c b/drivers/net/ethernet/wiznet/w5100-spi.c index 32f406cfce4b..598a7b00fdb9 100644 --- a/drivers/net/ethernet/wiznet/w5100-spi.c +++ b/drivers/net/ethernet/wiznet/w5100-spi.c @@ -1,9 +1,13 @@ /* - * Ethernet driver for the WIZnet W5100 chip. + * Ethernet driver for the WIZnet W5100/W5200 chip. * * Copyright (C) 2016 Akinobu Mita * * Licensed under the GPL-2 or later. + * + * Datasheet: + * http://www.wiznet.co.kr/wp-content/uploads/wiznethome/Chip/W5100/Document/W5100_Datasheet_v1.2.6.pdf + * http://wiznethome.cafe24.com/wp-content/uploads/wiznethome/Chip/W5200/Documents/W5200_DS_V140E.pdf */ #include @@ -95,6 +99,7 @@ static int w5100_spi_writebulk(struct net_device *ndev, u16 addr, const u8 *buf, static const struct w5100_ops w5100_spi_ops = { .may_sleep = true, + .chip_id = W5100, .read = w5100_spi_read, .write = w5100_spi_write, .read16 = w5100_spi_read16, @@ -103,10 +108,168 @@ static const struct w5100_ops w5100_spi_ops = { .writebulk = w5100_spi_writebulk, }; +#define W5200_SPI_WRITE_OPCODE 0x80 + +struct w5200_spi_priv { + /* Serialize access to cmd_buf */ + struct mutex cmd_lock; + + /* DMA (thus cache coherency maintenance) requires the + * transfer buffers to live in their own cache lines. + */ + u8 cmd_buf[4] ____cacheline_aligned; +}; + +static struct w5200_spi_priv *w5200_spi_priv(struct net_device *ndev) +{ + return w5100_ops_priv(ndev); +} + +static int w5200_spi_init(struct net_device *ndev) +{ + struct w5200_spi_priv *spi_priv = w5200_spi_priv(ndev); + + mutex_init(&spi_priv->cmd_lock); + + return 0; +} + +static int w5200_spi_read(struct net_device *ndev, u16 addr) +{ + struct spi_device *spi = to_spi_device(ndev->dev.parent); + u8 cmd[4] = { addr >> 8, addr & 0xff, 0, 1 }; + u8 data; + int ret; + + ret = spi_write_then_read(spi, cmd, sizeof(cmd), &data, 1); + + return ret ? ret : data; +} + +static int w5200_spi_write(struct net_device *ndev, u16 addr, u8 data) +{ + struct spi_device *spi = to_spi_device(ndev->dev.parent); + u8 cmd[5] = { addr >> 8, addr & 0xff, W5200_SPI_WRITE_OPCODE, 1, data }; + + return spi_write_then_read(spi, cmd, sizeof(cmd), NULL, 0); +} + +static int w5200_spi_read16(struct net_device *ndev, u16 addr) +{ + struct spi_device *spi = to_spi_device(ndev->dev.parent); + u8 cmd[4] = { addr >> 8, addr & 0xff, 0, 2 }; + __be16 data; + int ret; + + ret = spi_write_then_read(spi, cmd, sizeof(cmd), &data, sizeof(data)); + + return ret ? ret : be16_to_cpu(data); +} + +static int w5200_spi_write16(struct net_device *ndev, u16 addr, u16 data) +{ + struct spi_device *spi = to_spi_device(ndev->dev.parent); + u8 cmd[6] = { + addr >> 8, addr & 0xff, + W5200_SPI_WRITE_OPCODE, 2, + data >> 8, data & 0xff + }; + + return spi_write_then_read(spi, cmd, sizeof(cmd), NULL, 0); +} + +static int w5200_spi_readbulk(struct net_device *ndev, u16 addr, u8 *buf, + int len) +{ + struct spi_device *spi = to_spi_device(ndev->dev.parent); + struct w5200_spi_priv *spi_priv = w5200_spi_priv(ndev); + struct spi_transfer xfer[] = { + { + .tx_buf = spi_priv->cmd_buf, + .len = sizeof(spi_priv->cmd_buf), + }, + { + .rx_buf = buf, + .len = len, + }, + }; + int ret; + + mutex_lock(&spi_priv->cmd_lock); + + spi_priv->cmd_buf[0] = addr >> 8; + spi_priv->cmd_buf[1] = addr; + spi_priv->cmd_buf[2] = len >> 8; + spi_priv->cmd_buf[3] = len; + ret = spi_sync_transfer(spi, xfer, ARRAY_SIZE(xfer)); + + mutex_unlock(&spi_priv->cmd_lock); + + return ret; +} + +static int w5200_spi_writebulk(struct net_device *ndev, u16 addr, const u8 *buf, + int len) +{ + struct spi_device *spi = to_spi_device(ndev->dev.parent); + struct w5200_spi_priv *spi_priv = w5200_spi_priv(ndev); + struct spi_transfer xfer[] = { + { + .tx_buf = spi_priv->cmd_buf, + .len = sizeof(spi_priv->cmd_buf), + }, + { + .tx_buf = buf, + .len = len, + }, + }; + int ret; + + mutex_lock(&spi_priv->cmd_lock); + + spi_priv->cmd_buf[0] = addr >> 8; + spi_priv->cmd_buf[1] = addr; + spi_priv->cmd_buf[2] = W5200_SPI_WRITE_OPCODE | (len >> 8); + spi_priv->cmd_buf[3] = len; + ret = spi_sync_transfer(spi, xfer, ARRAY_SIZE(xfer)); + + mutex_unlock(&spi_priv->cmd_lock); + + return ret; +} + +static const struct w5100_ops w5200_ops = { + .may_sleep = true, + .chip_id = W5200, + .read = w5200_spi_read, + .write = w5200_spi_write, + .read16 = w5200_spi_read16, + .write16 = w5200_spi_write16, + .readbulk = w5200_spi_readbulk, + .writebulk = w5200_spi_writebulk, + .init = w5200_spi_init, +}; + static int w5100_spi_probe(struct spi_device *spi) { - return w5100_probe(&spi->dev, &w5100_spi_ops, 0, NULL, spi->irq, - -EINVAL); + const struct spi_device_id *id = spi_get_device_id(spi); + const struct w5100_ops *ops; + int priv_size; + + switch (id->driver_data) { + case W5100: + ops = &w5100_spi_ops; + priv_size = 0; + break; + case W5200: + ops = &w5200_ops; + priv_size = sizeof(struct w5200_spi_priv); + break; + default: + return -EINVAL; + } + + return w5100_probe(&spi->dev, ops, priv_size, NULL, spi->irq, -EINVAL); } static int w5100_spi_remove(struct spi_device *spi) @@ -115,7 +278,8 @@ static int w5100_spi_remove(struct spi_device *spi) } static const struct spi_device_id w5100_spi_ids[] = { - { "w5100", 0 }, + { "w5100", W5100 }, + { "w5200", W5200 }, {} }; MODULE_DEVICE_TABLE(spi, w5100_spi_ids); @@ -131,6 +295,6 @@ static struct spi_driver w5100_spi_driver = { }; module_spi_driver(w5100_spi_driver); -MODULE_DESCRIPTION("WIZnet W5100 Ethernet driver for SPI mode"); +MODULE_DESCRIPTION("WIZnet W5100/W5200 Ethernet driver for SPI mode"); MODULE_AUTHOR("Akinobu Mita "); MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c index 42a9de4a48b1..09149c9ebeff 100644 --- a/drivers/net/ethernet/wiznet/w5100.c +++ b/drivers/net/ethernet/wiznet/w5100.c @@ -38,7 +38,7 @@ MODULE_ALIAS("platform:"DRV_NAME); MODULE_LICENSE("GPL"); /* - * Registers + * W5100 and W5100 common registers */ #define W5100_COMMON_REGS 0x0000 #define W5100_MR 0x0000 /* Mode Register */ @@ -52,36 +52,68 @@ MODULE_LICENSE("GPL"); #define IR_S0 0x01 /* S0 interrupt */ #define W5100_RTR 0x0017 /* Retry Time-value Register */ #define RTR_DEFAULT 2000 /* =0x07d0 (2000) */ -#define W5100_RMSR 0x001a /* Receive Memory Size */ -#define W5100_TMSR 0x001b /* Transmit Memory Size */ #define W5100_COMMON_REGS_LEN 0x0040 -#define W5100_S0_REGS 0x0400 -#define W5100_S0_MR 0x0400 /* S0 Mode Register */ +#define W5100_Sn_MR 0x0000 /* Sn Mode Register */ +#define W5100_Sn_CR 0x0001 /* Sn Command Register */ +#define W5100_Sn_IR 0x0002 /* Sn Interrupt Register */ +#define W5100_Sn_SR 0x0003 /* Sn Status Register */ +#define W5100_Sn_TX_FSR 0x0020 /* Sn Transmit free memory size */ +#define W5100_Sn_TX_RD 0x0022 /* Sn Transmit memory read pointer */ +#define W5100_Sn_TX_WR 0x0024 /* Sn Transmit memory write pointer */ +#define W5100_Sn_RX_RSR 0x0026 /* Sn Receive free memory size */ +#define W5100_Sn_RX_RD 0x0028 /* Sn Receive memory read pointer */ + +#define S0_REGS(priv) (is_w5200(priv) ? W5200_S0_REGS : W5100_S0_REGS) + +#define W5100_S0_MR(priv) (S0_REGS(priv) + W5100_Sn_MR) #define S0_MR_MACRAW 0x04 /* MAC RAW mode (promiscuous) */ #define S0_MR_MACRAW_MF 0x44 /* MAC RAW mode (filtered) */ -#define W5100_S0_CR 0x0401 /* S0 Command Register */ +#define W5100_S0_CR(priv) (S0_REGS(priv) + W5100_Sn_CR) #define S0_CR_OPEN 0x01 /* OPEN command */ #define S0_CR_CLOSE 0x10 /* CLOSE command */ #define S0_CR_SEND 0x20 /* SEND command */ #define S0_CR_RECV 0x40 /* RECV command */ -#define W5100_S0_IR 0x0402 /* S0 Interrupt Register */ +#define W5100_S0_IR(priv) (S0_REGS(priv) + W5100_Sn_IR) #define S0_IR_SENDOK 0x10 /* complete sending */ #define S0_IR_RECV 0x04 /* receiving data */ -#define W5100_S0_SR 0x0403 /* S0 Status Register */ +#define W5100_S0_SR(priv) (S0_REGS(priv) + W5100_Sn_SR) #define S0_SR_MACRAW 0x42 /* mac raw mode */ -#define W5100_S0_TX_FSR 0x0420 /* S0 Transmit free memory size */ -#define W5100_S0_TX_RD 0x0422 /* S0 Transmit memory read pointer */ -#define W5100_S0_TX_WR 0x0424 /* S0 Transmit memory write pointer */ -#define W5100_S0_RX_RSR 0x0426 /* S0 Receive free memory size */ -#define W5100_S0_RX_RD 0x0428 /* S0 Receive memory read pointer */ +#define W5100_S0_TX_FSR(priv) (S0_REGS(priv) + W5100_Sn_TX_FSR) +#define W5100_S0_TX_RD(priv) (S0_REGS(priv) + W5100_Sn_TX_RD) +#define W5100_S0_TX_WR(priv) (S0_REGS(priv) + W5100_Sn_TX_WR) +#define W5100_S0_RX_RSR(priv) (S0_REGS(priv) + W5100_Sn_RX_RSR) +#define W5100_S0_RX_RD(priv) (S0_REGS(priv) + W5100_Sn_RX_RD) + #define W5100_S0_REGS_LEN 0x0040 +/* + * W5100 specific registers + */ +#define W5100_RMSR 0x001a /* Receive Memory Size */ +#define W5100_TMSR 0x001b /* Transmit Memory Size */ + +#define W5100_S0_REGS 0x0400 + #define W5100_TX_MEM_START 0x4000 #define W5100_TX_MEM_SIZE 0x2000 #define W5100_RX_MEM_START 0x6000 #define W5100_RX_MEM_SIZE 0x2000 +/* + * W5200 specific registers + */ +#define W5200_S0_REGS 0x4000 + +#define W5200_Sn_RXMEM_SIZE(n) (0x401e + (n) * 0x0100) /* Sn RX Memory Size */ +#define W5200_Sn_TXMEM_SIZE(n) (0x401f + (n) * 0x0100) /* Sn TX Memory Size */ +#define W5200_S0_IMR 0x402c /* S0 Interrupt Mask Register */ + +#define W5200_TX_MEM_START 0x8000 +#define W5200_TX_MEM_SIZE 0x4000 +#define W5200_RX_MEM_START 0xc000 +#define W5200_RX_MEM_SIZE 0x4000 + /* * Device driver private data structure */ @@ -105,6 +137,11 @@ struct w5100_priv { struct work_struct restart_work; }; +static inline bool is_w5200(struct w5100_priv *priv) +{ + return priv->ops->chip_id == W5200; +} + /************************************************************************ * * Lowlevel I/O functions @@ -217,6 +254,7 @@ static int w5100_mmio_init(struct net_device *ndev) } static const struct w5100_ops w5100_mmio_direct_ops = { + .chip_id = W5100, .read = w5100_read_direct, .write = w5100_write_direct, .read16 = w5100_read16_direct, @@ -341,6 +379,7 @@ static int w5100_reset_indirect(struct net_device *ndev) } static const struct w5100_ops w5100_mmio_indirect_ops = { + .chip_id = W5100, .read = w5100_read_indirect, .write = w5100_write_indirect, .read16 = w5100_read16_indirect, @@ -457,20 +496,24 @@ static int w5100_readbuf(struct w5100_priv *priv, u16 offset, u8 *buf, int len) u16 addr; int remain = 0; int ret; + const u16 mem_start = + is_w5200(priv) ? W5200_RX_MEM_START : W5100_RX_MEM_START; + const u16 mem_size = + is_w5200(priv) ? W5200_RX_MEM_SIZE : W5100_RX_MEM_SIZE; - offset %= W5100_RX_MEM_SIZE; - addr = W5100_RX_MEM_START + offset; + offset %= mem_size; + addr = mem_start + offset; - if (offset + len > W5100_RX_MEM_SIZE) { - remain = (offset + len) % W5100_RX_MEM_SIZE; - len = W5100_RX_MEM_SIZE - offset; + if (offset + len > mem_size) { + remain = (offset + len) % mem_size; + len = mem_size - offset; } ret = w5100_readbulk(priv, addr, buf, len); if (ret || !remain) return ret; - return w5100_readbulk(priv, W5100_RX_MEM_START, buf + len, remain); + return w5100_readbulk(priv, mem_start, buf + len, remain); } static int w5100_writebuf(struct w5100_priv *priv, u16 offset, const u8 *buf, @@ -479,20 +522,24 @@ static int w5100_writebuf(struct w5100_priv *priv, u16 offset, const u8 *buf, u16 addr; int ret; int remain = 0; + const u16 mem_start = + is_w5200(priv) ? W5200_TX_MEM_START : W5100_TX_MEM_START; + const u16 mem_size = + is_w5200(priv) ? W5200_TX_MEM_SIZE : W5100_TX_MEM_SIZE; - offset %= W5100_TX_MEM_SIZE; - addr = W5100_TX_MEM_START + offset; + offset %= mem_size; + addr = mem_start + offset; - if (offset + len > W5100_TX_MEM_SIZE) { - remain = (offset + len) % W5100_TX_MEM_SIZE; - len = W5100_TX_MEM_SIZE - offset; + if (offset + len > mem_size) { + remain = (offset + len) % mem_size; + len = mem_size - offset; } ret = w5100_writebulk(priv, addr, buf, len); if (ret || !remain) return ret; - return w5100_writebulk(priv, W5100_TX_MEM_START, buf + len, remain); + return w5100_writebulk(priv, mem_start, buf + len, remain); } static int w5100_reset(struct w5100_priv *priv) @@ -511,11 +558,11 @@ static int w5100_command(struct w5100_priv *priv, u16 cmd) { unsigned long timeout; - w5100_write(priv, W5100_S0_CR, cmd); + w5100_write(priv, W5100_S0_CR(priv), cmd); timeout = jiffies + msecs_to_jiffies(100); - while (w5100_read(priv, W5100_S0_CR) != 0) { + while (w5100_read(priv, W5100_S0_CR(priv)) != 0) { if (time_after(jiffies, timeout)) return -EIO; cpu_relax(); @@ -531,13 +578,8 @@ static void w5100_write_macaddr(struct w5100_priv *priv) w5100_writebulk(priv, W5100_SHAR, ndev->dev_addr, ETH_ALEN); } -static void w5100_hw_reset(struct w5100_priv *priv) +static void w5100_memory_configure(struct w5100_priv *priv) { - w5100_reset(priv); - - w5100_write(priv, W5100_IMR, 0); - w5100_write_macaddr(priv); - /* Configure 16K of internal memory * as 8K RX buffer and 8K TX buffer */ @@ -545,9 +587,38 @@ static void w5100_hw_reset(struct w5100_priv *priv) w5100_write(priv, W5100_TMSR, 0x03); } +static void w5200_memory_configure(struct w5100_priv *priv) +{ + int i; + + /* Configure internal RX memory as 16K RX buffer and + * internal TX memory as 16K TX buffer + */ + w5100_write(priv, W5200_Sn_RXMEM_SIZE(0), 0x10); + w5100_write(priv, W5200_Sn_TXMEM_SIZE(0), 0x10); + + for (i = 1; i < 8; i++) { + w5100_write(priv, W5200_Sn_RXMEM_SIZE(i), 0); + w5100_write(priv, W5200_Sn_TXMEM_SIZE(i), 0); + } +} + +static void w5100_hw_reset(struct w5100_priv *priv) +{ + w5100_reset(priv); + + w5100_write(priv, W5100_IMR, 0); + w5100_write_macaddr(priv); + + if (is_w5200(priv)) + w5200_memory_configure(priv); + else + w5100_memory_configure(priv); +} + static void w5100_hw_start(struct w5100_priv *priv) { - w5100_write(priv, W5100_S0_MR, priv->promisc ? + w5100_write(priv, W5100_S0_MR(priv), priv->promisc ? S0_MR_MACRAW : S0_MR_MACRAW_MF); w5100_command(priv, S0_CR_OPEN); w5100_write(priv, W5100_IMR, IR_S0); @@ -611,7 +682,7 @@ static void w5100_get_regs(struct net_device *ndev, regs->version = 1; w5100_readbulk(priv, W5100_COMMON_REGS, buf, W5100_COMMON_REGS_LEN); buf += W5100_COMMON_REGS_LEN; - w5100_readbulk(priv, W5100_S0_REGS, buf, W5100_S0_REGS_LEN); + w5100_readbulk(priv, S0_REGS(priv), buf, W5100_S0_REGS_LEN); } static void w5100_restart(struct net_device *ndev) @@ -649,9 +720,9 @@ static void w5100_tx_skb(struct net_device *ndev, struct sk_buff *skb) struct w5100_priv *priv = netdev_priv(ndev); u16 offset; - offset = w5100_read16(priv, W5100_S0_TX_WR); + offset = w5100_read16(priv, W5100_S0_TX_WR(priv)); w5100_writebuf(priv, offset, skb->data, skb->len); - w5100_write16(priv, W5100_S0_TX_WR, offset + skb->len); + w5100_write16(priv, W5100_S0_TX_WR(priv), offset + skb->len); ndev->stats.tx_bytes += skb->len; ndev->stats.tx_packets++; dev_kfree_skb(skb); @@ -696,18 +767,18 @@ static struct sk_buff *w5100_rx_skb(struct net_device *ndev) u16 rx_len; u16 offset; u8 header[2]; - u16 rx_buf_len = w5100_read16(priv, W5100_S0_RX_RSR); + u16 rx_buf_len = w5100_read16(priv, W5100_S0_RX_RSR(priv)); if (rx_buf_len == 0) return NULL; - offset = w5100_read16(priv, W5100_S0_RX_RD); + offset = w5100_read16(priv, W5100_S0_RX_RD(priv)); w5100_readbuf(priv, offset, header, 2); rx_len = get_unaligned_be16(header) - 2; skb = netdev_alloc_skb_ip_align(ndev, rx_len); if (unlikely(!skb)) { - w5100_write16(priv, W5100_S0_RX_RD, offset + rx_buf_len); + w5100_write16(priv, W5100_S0_RX_RD(priv), offset + rx_buf_len); w5100_command(priv, S0_CR_RECV); ndev->stats.rx_dropped++; return NULL; @@ -715,7 +786,7 @@ static struct sk_buff *w5100_rx_skb(struct net_device *ndev) skb_put(skb, rx_len); w5100_readbuf(priv, offset + 2, skb->data, rx_len); - w5100_write16(priv, W5100_S0_RX_RD, offset + 2 + rx_len); + w5100_write16(priv, W5100_S0_RX_RD(priv), offset + 2 + rx_len); w5100_command(priv, S0_CR_RECV); skb->protocol = eth_type_trans(skb, ndev); @@ -764,10 +835,10 @@ static irqreturn_t w5100_interrupt(int irq, void *ndev_instance) struct net_device *ndev = ndev_instance; struct w5100_priv *priv = netdev_priv(ndev); - int ir = w5100_read(priv, W5100_S0_IR); + int ir = w5100_read(priv, W5100_S0_IR(priv)); if (!ir) return IRQ_NONE; - w5100_write(priv, W5100_S0_IR, ir); + w5100_write(priv, W5100_S0_IR(priv), ir); if (ir & S0_IR_SENDOK) { netif_dbg(priv, tx_done, ndev, "tx done\n"); diff --git a/drivers/net/ethernet/wiznet/w5100.h b/drivers/net/ethernet/wiznet/w5100.h index 69045f0f9e10..9b1fa23b46fe 100644 --- a/drivers/net/ethernet/wiznet/w5100.h +++ b/drivers/net/ethernet/wiznet/w5100.h @@ -7,8 +7,14 @@ * Licensed under the GPL-2 or later. */ +enum { + W5100, + W5200, +}; + struct w5100_ops { bool may_sleep; + int chip_id; int (*read)(struct net_device *ndev, u16 addr); int (*write)(struct net_device *ndev, u16 addr, u8 data); int (*read16)(struct net_device *ndev, u16 addr); From aed069df099cd1a27900acb56bb892ec24c66ac4 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Thu, 14 Apr 2016 15:33:37 -0400 Subject: [PATCH 0777/1649] ip_tunnel_core: iptunnel_handle_offloads returns int and doesn't free skb This patch updates the IP tunnel core function iptunnel_handle_offloads so that we return an int and do not free the skb inside the function. This actually allows us to clean up several paths in several tunnels so that we can free the skb at one point in the path without having to have a secondary path if we are supporting tunnel offloads. In addition it should resolve some double-free issues I have found in the tunnels paths as I believe it is possible for us to end up triggering such an event in the case of fou or gue. Signed-off-by: Alexander Duyck Signed-off-by: David S. Miller --- drivers/net/geneve.c | 32 ++++++++++++-------------------- drivers/net/vxlan.c | 6 +++--- include/net/ip_tunnels.h | 2 +- include/net/udp_tunnel.h | 3 +-- net/ipv4/fou.c | 16 ++++++++-------- net/ipv4/ip_gre.c | 20 ++++++-------------- net/ipv4/ip_tunnel_core.c | 13 +++++-------- net/ipv4/ipip.c | 7 +++---- net/ipv6/sit.c | 14 ++++++-------- net/netfilter/ipvs/ip_vs_xmit.c | 6 ++---- 10 files changed, 47 insertions(+), 72 deletions(-) diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index a9fbf17eb256..efbc7ceedc3a 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -696,16 +696,12 @@ static int geneve_build_skb(struct rtable *rt, struct sk_buff *skb, min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len + GENEVE_BASE_HLEN + opt_len + sizeof(struct iphdr); err = skb_cow_head(skb, min_headroom); - if (unlikely(err)) { - kfree_skb(skb); + if (unlikely(err)) goto free_rt; - } - skb = udp_tunnel_handle_offloads(skb, udp_sum); - if (IS_ERR(skb)) { - err = PTR_ERR(skb); + err = udp_tunnel_handle_offloads(skb, udp_sum); + if (err) goto free_rt; - } gnvh = (struct genevehdr *)__skb_push(skb, sizeof(*gnvh) + opt_len); geneve_build_header(gnvh, tun_flags, vni, opt_len, opt); @@ -733,16 +729,12 @@ static int geneve6_build_skb(struct dst_entry *dst, struct sk_buff *skb, min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len + GENEVE_BASE_HLEN + opt_len + sizeof(struct ipv6hdr); err = skb_cow_head(skb, min_headroom); - if (unlikely(err)) { - kfree_skb(skb); + if (unlikely(err)) goto free_dst; - } - skb = udp_tunnel_handle_offloads(skb, udp_sum); - if (IS_ERR(skb)) { - err = PTR_ERR(skb); + err = udp_tunnel_handle_offloads(skb, udp_sum); + if (IS_ERR(skb)) goto free_dst; - } gnvh = (struct genevehdr *)__skb_push(skb, sizeof(*gnvh) + opt_len); geneve_build_header(gnvh, tun_flags, vni, opt_len, opt); @@ -937,7 +929,7 @@ static netdev_tx_t geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev, err = geneve_build_skb(rt, skb, key->tun_flags, vni, info->options_len, opts, flags, xnet); if (unlikely(err)) - goto err; + goto tx_error; tos = ip_tunnel_ecn_encap(key->tos, iip, skb); ttl = key->ttl; @@ -946,7 +938,7 @@ static netdev_tx_t geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev, err = geneve_build_skb(rt, skb, 0, geneve->vni, 0, NULL, flags, xnet); if (unlikely(err)) - goto err; + goto tx_error; tos = ip_tunnel_ecn_encap(fl4.flowi4_tos, iip, skb); ttl = geneve->ttl; @@ -964,7 +956,7 @@ static netdev_tx_t geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev, tx_error: dev_kfree_skb(skb); -err: + if (err == -ELOOP) dev->stats.collisions++; else if (err == -ENETUNREACH) @@ -1026,7 +1018,7 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev, info->options_len, opts, flags, xnet); if (unlikely(err)) - goto err; + goto tx_error; prio = ip_tunnel_ecn_encap(key->tos, iip, skb); ttl = key->ttl; @@ -1035,7 +1027,7 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev, err = geneve6_build_skb(dst, skb, 0, geneve->vni, 0, NULL, flags, xnet); if (unlikely(err)) - goto err; + goto tx_error; prio = ip_tunnel_ecn_encap(ip6_tclass(fl6.flowlabel), iip, skb); @@ -1054,7 +1046,7 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev, tx_error: dev_kfree_skb(skb); -err: + if (err == -ELOOP) dev->stats.collisions++; else if (err == -ENETUNREACH) diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index a7112b3bc9b4..c2e22c2532a1 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -1797,9 +1797,9 @@ static int vxlan_build_skb(struct sk_buff *skb, struct dst_entry *dst, if (WARN_ON(!skb)) return -ENOMEM; - skb = iptunnel_handle_offloads(skb, type); - if (IS_ERR(skb)) - return PTR_ERR(skb); + err = iptunnel_handle_offloads(skb, type); + if (err) + goto out_free; vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh)); vxh->vx_flags = VXLAN_HF_VNI; diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h index 9ae9fbbccd67..6d790910ebdf 100644 --- a/include/net/ip_tunnels.h +++ b/include/net/ip_tunnels.h @@ -309,7 +309,7 @@ void iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb, struct metadata_dst *iptunnel_metadata_reply(struct metadata_dst *md, gfp_t flags); -struct sk_buff *iptunnel_handle_offloads(struct sk_buff *skb, int gso_type_mask); +int iptunnel_handle_offloads(struct sk_buff *skb, int gso_type_mask); static inline int iptunnel_pull_offloads(struct sk_buff *skb) { diff --git a/include/net/udp_tunnel.h b/include/net/udp_tunnel.h index 2dcf1de948ac..4f543262dd81 100644 --- a/include/net/udp_tunnel.h +++ b/include/net/udp_tunnel.h @@ -105,8 +105,7 @@ struct metadata_dst *udp_tun_rx_dst(struct sk_buff *skb, unsigned short family, __be16 flags, __be64 tunnel_id, int md_size); -static inline struct sk_buff *udp_tunnel_handle_offloads(struct sk_buff *skb, - bool udp_csum) +static inline int udp_tunnel_handle_offloads(struct sk_buff *skb, bool udp_csum) { int type = udp_csum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL; diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c index d039f8fff57f..7ac5ec87b004 100644 --- a/net/ipv4/fou.c +++ b/net/ipv4/fou.c @@ -802,11 +802,11 @@ int fou_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e, int type = e->flags & TUNNEL_ENCAP_FLAG_CSUM ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL; __be16 sport; + int err; - skb = iptunnel_handle_offloads(skb, type); - - if (IS_ERR(skb)) - return PTR_ERR(skb); + err = iptunnel_handle_offloads(skb, type); + if (err) + return err; sport = e->sport ? : udp_flow_src_port(dev_net(skb->dev), skb, 0, 0, false); @@ -826,6 +826,7 @@ int gue_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e, __be16 sport; void *data; bool need_priv = false; + int err; if ((e->flags & TUNNEL_ENCAP_FLAG_REMCSUM) && skb->ip_summed == CHECKSUM_PARTIAL) { @@ -836,10 +837,9 @@ int gue_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e, optlen += need_priv ? GUE_LEN_PRIV : 0; - skb = iptunnel_handle_offloads(skb, type); - - if (IS_ERR(skb)) - return PTR_ERR(skb); + err = iptunnel_handle_offloads(skb, type); + if (err) + return err; /* Get source port (based on flow hash) before skb_push */ sport = e->sport ? : udp_flow_src_port(dev_net(skb->dev), diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index af5d1f38217f..eedd829a2f87 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -500,8 +500,7 @@ static void __gre_xmit(struct sk_buff *skb, struct net_device *dev, ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol); } -static struct sk_buff *gre_handle_offloads(struct sk_buff *skb, - bool csum) +static int gre_handle_offloads(struct sk_buff *skb, bool csum) { return iptunnel_handle_offloads(skb, csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE); } @@ -568,11 +567,8 @@ static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev) } /* Push Tunnel header. */ - skb = gre_handle_offloads(skb, !!(tun_info->key.tun_flags & TUNNEL_CSUM)); - if (IS_ERR(skb)) { - skb = NULL; + if (gre_handle_offloads(skb, !!(tun_info->key.tun_flags & TUNNEL_CSUM))) goto err_free_rt; - } flags = tun_info->key.tun_flags & (TUNNEL_CSUM | TUNNEL_KEY); build_header(skb, tunnel_hlen, flags, htons(ETH_P_TEB), @@ -640,16 +636,14 @@ static netdev_tx_t ipgre_xmit(struct sk_buff *skb, tnl_params = &tunnel->parms.iph; } - skb = gre_handle_offloads(skb, !!(tunnel->parms.o_flags&TUNNEL_CSUM)); - if (IS_ERR(skb)) - goto out; + if (gre_handle_offloads(skb, !!(tunnel->parms.o_flags & TUNNEL_CSUM))) + goto free_skb; __gre_xmit(skb, dev, tnl_params, skb->protocol); return NETDEV_TX_OK; free_skb: kfree_skb(skb); -out: dev->stats.tx_dropped++; return NETDEV_TX_OK; } @@ -664,9 +658,8 @@ static netdev_tx_t gre_tap_xmit(struct sk_buff *skb, return NETDEV_TX_OK; } - skb = gre_handle_offloads(skb, !!(tunnel->parms.o_flags&TUNNEL_CSUM)); - if (IS_ERR(skb)) - goto out; + if (gre_handle_offloads(skb, !!(tunnel->parms.o_flags & TUNNEL_CSUM))) + goto free_skb; if (skb_cow_head(skb, dev->needed_headroom)) goto free_skb; @@ -676,7 +669,6 @@ static netdev_tx_t gre_tap_xmit(struct sk_buff *skb, free_skb: kfree_skb(skb); -out: dev->stats.tx_dropped++; return NETDEV_TX_OK; } diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c index 43445df61efd..f46c5c873831 100644 --- a/net/ipv4/ip_tunnel_core.c +++ b/net/ipv4/ip_tunnel_core.c @@ -146,8 +146,8 @@ struct metadata_dst *iptunnel_metadata_reply(struct metadata_dst *md, } EXPORT_SYMBOL_GPL(iptunnel_metadata_reply); -struct sk_buff *iptunnel_handle_offloads(struct sk_buff *skb, - int gso_type_mask) +int iptunnel_handle_offloads(struct sk_buff *skb, + int gso_type_mask) { int err; @@ -159,9 +159,9 @@ struct sk_buff *iptunnel_handle_offloads(struct sk_buff *skb, if (skb_is_gso(skb)) { err = skb_unclone(skb, GFP_ATOMIC); if (unlikely(err)) - goto error; + return err; skb_shinfo(skb)->gso_type |= gso_type_mask; - return skb; + return 0; } if (skb->ip_summed != CHECKSUM_PARTIAL) { @@ -174,10 +174,7 @@ struct sk_buff *iptunnel_handle_offloads(struct sk_buff *skb, skb->encapsulation = 0; } - return skb; -error: - kfree_skb(skb); - return ERR_PTR(err); + return 0; } EXPORT_SYMBOL_GPL(iptunnel_handle_offloads); diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c index ec51d02166de..92827483ee3d 100644 --- a/net/ipv4/ipip.c +++ b/net/ipv4/ipip.c @@ -219,9 +219,8 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) if (unlikely(skb->protocol != htons(ETH_P_IP))) goto tx_error; - skb = iptunnel_handle_offloads(skb, SKB_GSO_IPIP); - if (IS_ERR(skb)) - goto out; + if (iptunnel_handle_offloads(skb, SKB_GSO_IPIP)) + goto tx_error; skb_set_inner_ipproto(skb, IPPROTO_IPIP); @@ -230,7 +229,7 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) tx_error: kfree_skb(skb); -out: + dev->stats.tx_errors++; return NETDEV_TX_OK; } diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index 83384308d032..a13d8c114ccb 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c @@ -913,10 +913,9 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, goto tx_error; } - skb = iptunnel_handle_offloads(skb, SKB_GSO_SIT); - if (IS_ERR(skb)) { + if (iptunnel_handle_offloads(skb, SKB_GSO_SIT)) { ip_rt_put(rt); - goto out; + goto tx_error; } if (df) { @@ -992,7 +991,6 @@ tx_error_icmp: dst_link_failure(skb); tx_error: kfree_skb(skb); -out: dev->stats.tx_errors++; return NETDEV_TX_OK; } @@ -1002,15 +1000,15 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) struct ip_tunnel *tunnel = netdev_priv(dev); const struct iphdr *tiph = &tunnel->parms.iph; - skb = iptunnel_handle_offloads(skb, SKB_GSO_IPIP); - if (IS_ERR(skb)) - goto out; + if (iptunnel_handle_offloads(skb, SKB_GSO_IPIP)) + goto tx_error; skb_set_inner_ipproto(skb, IPPROTO_IPIP); ip_tunnel_xmit(skb, dev, tiph, IPPROTO_IPIP); return NETDEV_TX_OK; -out: +tx_error: + kfree_skb(skb); dev->stats.tx_errors++; return NETDEV_TX_OK; } diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c index dc196a0f501d..6d19d2eeaa60 100644 --- a/net/netfilter/ipvs/ip_vs_xmit.c +++ b/net/netfilter/ipvs/ip_vs_xmit.c @@ -1013,8 +1013,7 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, if (IS_ERR(skb)) goto tx_error; - skb = iptunnel_handle_offloads(skb, __tun_gso_type_mask(AF_INET, cp->af)); - if (IS_ERR(skb)) + if (iptunnel_handle_offloads(skb, __tun_gso_type_mask(AF_INET, cp->af))) goto tx_error; skb->transport_header = skb->network_header; @@ -1105,8 +1104,7 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, if (IS_ERR(skb)) goto tx_error; - skb = iptunnel_handle_offloads(skb, __tun_gso_type_mask(AF_INET6, cp->af)); - if (IS_ERR(skb)) + if (iptunnel_handle_offloads(skb, __tun_gso_type_mask(AF_INET6, cp->af))) goto tx_error; skb->transport_header = skb->network_header; From a9e242ca43b13e5a5d176f97dfd2481c339934b7 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Thu, 14 Apr 2016 15:33:45 -0400 Subject: [PATCH 0778/1649] ip6gretap: Fix MTU to allow for Ethernet header When we were creating an ip6gretap interface the MTU was about 6 bytes short of what was needed. It turns out we were not taking the Ethernet header into account and as a result we were eating into the 8 bytes reserved for the encap limit. Signed-off-by: Alexander Duyck Signed-off-by: David S. Miller --- net/ipv6/ip6_gre.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index 4e636e60a360..2be66e7b4a78 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c @@ -987,6 +987,8 @@ static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu) dev->mtu = rt->dst.dev->mtu - addend; if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) dev->mtu -= 8; + if (dev->type == ARPHRD_ETHER) + dev->mtu -= ETH_HLEN; if (dev->mtu < IPV6_MIN_MTU) dev->mtu = IPV6_MIN_MTU; From ac4eb009e4776e9ef4c0484865c2f5a3786eecae Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Thu, 14 Apr 2016 15:33:51 -0400 Subject: [PATCH 0779/1649] ip6gre: Add support for basic offloads offloads excluding GSO This patch adds support for the basic offloads we support on most devices. Specifically with this patch set we can support checksum offload, basic scatter-gather, and highdma. Signed-off-by: Alexander Duyck Signed-off-by: David S. Miller --- net/ipv6/ip6_gre.c | 23 +++++++++++++++++++++-- 1 file changed, 21 insertions(+), 2 deletions(-) diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index 2be66e7b4a78..1a5ad143be40 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c @@ -598,6 +598,18 @@ static void init_tel_txopt(struct ipv6_tel_txoption *opt, __u8 encap_limit) opt->ops.opt_nflen = 8; } +static __sum16 gre6_checksum(struct sk_buff *skb) +{ + __wsum csum; + + if (skb->ip_summed == CHECKSUM_PARTIAL) + csum = lco_csum(skb); + else + csum = skb_checksum(skb, sizeof(struct ipv6hdr), + skb->len - sizeof(struct ipv6hdr), 0); + return csum_fold(csum); +} + static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb, struct net_device *dev, __u8 dsfield, @@ -750,8 +762,7 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb, } if (tunnel->parms.o_flags&GRE_CSUM) { *ptr = 0; - *(__sum16 *)ptr = ip_compute_csum((void *)(ipv6h+1), - skb->len - sizeof(struct ipv6hdr)); + *(__sum16 *)ptr = gre6_checksum(skb); } } @@ -1507,6 +1518,11 @@ static const struct net_device_ops ip6gre_tap_netdev_ops = { .ndo_get_iflink = ip6_tnl_get_iflink, }; +#define GRE6_FEATURES (NETIF_F_SG | \ + NETIF_F_FRAGLIST | \ + NETIF_F_HIGHDMA | \ + NETIF_F_HW_CSUM) + static void ip6gre_tap_setup(struct net_device *dev) { @@ -1540,6 +1556,9 @@ static int ip6gre_newlink(struct net *src_net, struct net_device *dev, nt->net = dev_net(dev); ip6gre_tnl_link_config(nt, !tb[IFLA_MTU]); + dev->features |= GRE6_FEATURES; + dev->hw_features |= GRE6_FEATURES; + /* Can use a lockless transmit, unless we generate output sequences */ if (!(nt->parms.o_flags & GRE_SEQ)) dev->features |= NETIF_F_LLTX; From e0c20967c8a653d0213238621381e224d8f065fc Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Thu, 14 Apr 2016 15:33:58 -0400 Subject: [PATCH 0780/1649] GRE: Add support for GRO/GSO of IPv6 GRE traffic Since GRE doesn't really care about L3 protocol we can support IPv4 and IPv6 using the same offloads. With that being the case we can add a call to register the offloads for IPv6 as a part of our GRE offload initialization. Signed-off-by: Alexander Duyck Signed-off-by: David S. Miller --- net/ipv4/gre_offload.c | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c index 20557f211408..e88190a8699a 100644 --- a/net/ipv4/gre_offload.c +++ b/net/ipv4/gre_offload.c @@ -292,6 +292,18 @@ static const struct net_offload gre_offload = { static int __init gre_offload_init(void) { - return inet_add_offload(&gre_offload, IPPROTO_GRE); + int err; + + err = inet_add_offload(&gre_offload, IPPROTO_GRE); +#if IS_ENABLED(CONFIG_IPV6) + if (err) + return err; + + err = inet6_add_offload(&gre_offload, IPPROTO_GRE); + if (err) + inet_del_offload(&gre_offload, IPPROTO_GRE); +#endif + + return err; } device_initcall(gre_offload_init); From 3a80e1facd3c825c5ac804bc2efe118872832e33 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Thu, 14 Apr 2016 15:34:04 -0400 Subject: [PATCH 0781/1649] ip6gre: Add support for GSO This patch adds code borrowed from bits and pieces of other protocols to the IPv6 GRE path so that we can support GSO over IPv6 based GRE tunnels. By adding this support we are able to significantly improve the throughput for GRE tunnels as we are able to make use of GSO. Signed-off-by: Alexander Duyck Signed-off-by: David S. Miller --- net/ipv6/ip6_gre.c | 56 +++++++++++++++++++++++++++------------------- 1 file changed, 33 insertions(+), 23 deletions(-) diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index 1a5ad143be40..ca5a2c5675c5 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c @@ -621,7 +621,7 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb, struct net *net = tunnel->net; struct net_device *tdev; /* Device to other host */ struct ipv6hdr *ipv6h; /* Our new IP header */ - unsigned int max_headroom = 0; /* The extra header space needed */ + unsigned int min_headroom = 0; /* The extra header space needed */ int gre_hlen; struct ipv6_tel_txoption opt; int mtu; @@ -629,7 +629,6 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb, struct net_device_stats *stats = &tunnel->dev->stats; int err = -1; u8 proto; - struct sk_buff *new_skb; __be16 protocol; if (dev->type == ARPHRD_ETHER) @@ -672,14 +671,14 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb, mtu = dst_mtu(dst) - sizeof(*ipv6h); if (encap_limit >= 0) { - max_headroom += 8; + min_headroom += 8; mtu -= 8; } if (mtu < IPV6_MIN_MTU) mtu = IPV6_MIN_MTU; if (skb_dst(skb)) skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu); - if (skb->len > mtu) { + if (skb->len > mtu && !skb_is_gso(skb)) { *pmtu = mtu; err = -EMSGSIZE; goto tx_err_dst_release; @@ -697,20 +696,19 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb, skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(dev))); - max_headroom += LL_RESERVED_SPACE(tdev) + gre_hlen + dst->header_len; + min_headroom += LL_RESERVED_SPACE(tdev) + gre_hlen + dst->header_len; - if (skb_headroom(skb) < max_headroom || skb_shared(skb) || - (skb_cloned(skb) && !skb_clone_writable(skb, 0))) { - new_skb = skb_realloc_headroom(skb, max_headroom); - if (max_headroom > dev->needed_headroom) - dev->needed_headroom = max_headroom; - if (!new_skb) + if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) { + int head_delta = SKB_DATA_ALIGN(min_headroom - + skb_headroom(skb) + + 16); + + err = pskb_expand_head(skb, max_t(int, head_delta, 0), + 0, GFP_ATOMIC); + if (min_headroom > dev->needed_headroom) + dev->needed_headroom = min_headroom; + if (unlikely(err)) goto tx_err_dst_release; - - if (skb->sk) - skb_set_owner_w(new_skb, skb->sk); - consume_skb(skb); - skb = new_skb; } if (!fl6->flowi6_mark && ndst) @@ -723,10 +721,11 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb, ipv6_push_nfrag_opts(skb, &opt.ops, &proto, NULL); } - if (likely(!skb->encapsulation)) { - skb_reset_inner_headers(skb); - skb->encapsulation = 1; - } + err = iptunnel_handle_offloads(skb, + (tunnel->parms.o_flags & GRE_CSUM) ? + SKB_GSO_GRE_CSUM : SKB_GSO_GRE); + if (err) + goto tx_err_dst_release; skb_push(skb, gre_hlen); skb_reset_network_header(skb); @@ -760,7 +759,9 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb, *ptr = tunnel->parms.o_key; ptr--; } - if (tunnel->parms.o_flags&GRE_CSUM) { + if ((tunnel->parms.o_flags & GRE_CSUM) && + !(skb_shinfo(skb)->gso_type & + (SKB_GSO_GRE | SKB_GSO_GRE_CSUM))) { *ptr = 0; *(__sum16 *)ptr = gre6_checksum(skb); } @@ -1559,9 +1560,18 @@ static int ip6gre_newlink(struct net *src_net, struct net_device *dev, dev->features |= GRE6_FEATURES; dev->hw_features |= GRE6_FEATURES; - /* Can use a lockless transmit, unless we generate output sequences */ - if (!(nt->parms.o_flags & GRE_SEQ)) + if (!(nt->parms.o_flags & GRE_SEQ)) { + /* TCP segmentation offload is not supported when we + * generate output sequences. + */ + dev->features |= NETIF_F_GSO_SOFTWARE; + dev->hw_features |= NETIF_F_GSO_SOFTWARE; + + /* Can use a lockless transmit, unless we generate + * output sequences + */ dev->features |= NETIF_F_LLTX; + } err = register_netdevice(dev); if (err) From 756ca874417695f77941948a77e9b8562635cc0a Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Thu, 14 Apr 2016 17:04:34 -0400 Subject: [PATCH 0782/1649] netdev_features: Add NETIF_F_TSO_MANGLEID to NETIF_F_ALL_TSO I realized that when I added NETIF_F_TSO_MANGLEID as a TSO type I forgot to add it to NETIF_F_ALL_TSO. This patch corrects that so the flag will be included correctly. The result should be minor as it was only used by a few drivers and in a few specific cases such as when NETIF_F_SG was not supported on a device so the TSO flags were cleared. Signed-off-by: Alexander Duyck Signed-off-by: David S. Miller --- include/linux/netdev_features.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h index 9fc79df0e561..15eb0b12fff9 100644 --- a/include/linux/netdev_features.h +++ b/include/linux/netdev_features.h @@ -164,7 +164,8 @@ enum { #define NETIF_F_CSUM_MASK (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | \ NETIF_F_HW_CSUM) -#define NETIF_F_ALL_TSO (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN) +#define NETIF_F_ALL_TSO (NETIF_F_TSO | NETIF_F_TSO6 | \ + NETIF_F_TSO_ECN | NETIF_F_TSO_MANGLEID) #define NETIF_F_ALL_FCOE (NETIF_F_FCOE_CRC | NETIF_F_FCOE_MTU | \ NETIF_F_FSO) From 48ace4ef4c3f99ebf6f801c9a8326a4a39f31dbf Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Thu, 14 Apr 2016 23:47:12 +0200 Subject: [PATCH 0783/1649] dsa: mv88e6xxx: Kill the REG_READ and REG_WRITE macros These macros hide a ds variable and a return statement on error, which can lead to locking issues. Kill them off. Signed-off-by: Andrew Lunn Tested-by: Vivien Didelot Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6123.c | 13 +- drivers/net/dsa/mv88e6131.c | 41 +++--- drivers/net/dsa/mv88e6171.c | 16 ++- drivers/net/dsa/mv88e6352.c | 15 ++- drivers/net/dsa/mv88e6xxx.c | 241 ++++++++++++++++++++++++++---------- drivers/net/dsa/mv88e6xxx.h | 21 ---- 6 files changed, 224 insertions(+), 123 deletions(-) diff --git a/drivers/net/dsa/mv88e6123.c b/drivers/net/dsa/mv88e6123.c index c34283d929c4..140e44e50e8a 100644 --- a/drivers/net/dsa/mv88e6123.c +++ b/drivers/net/dsa/mv88e6123.c @@ -52,7 +52,9 @@ static int mv88e6123_setup_global(struct dsa_switch *ds) * external PHYs to poll), don't discard packets with * excessive collisions, and mask all interrupt sources. */ - REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL, 0x0000); + ret = mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_CONTROL, 0x0000); + if (ret) + return ret; /* Configure the upstream port, and configure the upstream * port as the port to which ingress and egress monitor frames @@ -61,14 +63,15 @@ static int mv88e6123_setup_global(struct dsa_switch *ds) reg = upstream_port << GLOBAL_MONITOR_CONTROL_INGRESS_SHIFT | upstream_port << GLOBAL_MONITOR_CONTROL_EGRESS_SHIFT | upstream_port << GLOBAL_MONITOR_CONTROL_ARP_SHIFT; - REG_WRITE(REG_GLOBAL, GLOBAL_MONITOR_CONTROL, reg); + ret = mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_MONITOR_CONTROL, reg); + if (ret) + return ret; /* Disable remote management for now, and set the switch's * DSA device number. */ - REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL_2, ds->index & 0x1f); - - return 0; + return mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_CONTROL_2, + ds->index & 0x1f); } static int mv88e6123_setup(struct dsa_switch *ds) diff --git a/drivers/net/dsa/mv88e6131.c b/drivers/net/dsa/mv88e6131.c index f5d75fce1e96..34d297b65040 100644 --- a/drivers/net/dsa/mv88e6131.c +++ b/drivers/net/dsa/mv88e6131.c @@ -49,11 +49,16 @@ static int mv88e6131_setup_global(struct dsa_switch *ds) * to arbitrate between packet queues, set the maximum frame * size to 1632, and mask all interrupt sources. */ - REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL, - GLOBAL_CONTROL_PPU_ENABLE | GLOBAL_CONTROL_MAX_FRAME_1632); + ret = mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_CONTROL, + GLOBAL_CONTROL_PPU_ENABLE | + GLOBAL_CONTROL_MAX_FRAME_1632); + if (ret) + return ret; /* Set the VLAN ethertype to 0x8100. */ - REG_WRITE(REG_GLOBAL, GLOBAL_CORE_TAG_TYPE, 0x8100); + ret = mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_CORE_TAG_TYPE, 0x8100); + if (ret) + return ret; /* Disable ARP mirroring, and configure the upstream port as * the port to which ingress and egress monitor frames are to @@ -62,31 +67,33 @@ static int mv88e6131_setup_global(struct dsa_switch *ds) reg = upstream_port << GLOBAL_MONITOR_CONTROL_INGRESS_SHIFT | upstream_port << GLOBAL_MONITOR_CONTROL_EGRESS_SHIFT | GLOBAL_MONITOR_CONTROL_ARP_DISABLED; - REG_WRITE(REG_GLOBAL, GLOBAL_MONITOR_CONTROL, reg); + ret = mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_MONITOR_CONTROL, reg); + if (ret) + return ret; /* Disable cascade port functionality unless this device * is used in a cascade configuration, and set the switch's * DSA device number. */ if (ds->dst->pd->nr_chips > 1) - REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL_2, - GLOBAL_CONTROL_2_MULTIPLE_CASCADE | - (ds->index & 0x1f)); + ret = mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_CONTROL_2, + GLOBAL_CONTROL_2_MULTIPLE_CASCADE | + (ds->index & 0x1f)); else - REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL_2, - GLOBAL_CONTROL_2_NO_CASCADE | - (ds->index & 0x1f)); + ret = mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_CONTROL_2, + GLOBAL_CONTROL_2_NO_CASCADE | + (ds->index & 0x1f)); + if (ret) + return ret; /* Force the priority of IGMP/MLD snoop frames and ARP frames * to the highest setting. */ - REG_WRITE(REG_GLOBAL2, GLOBAL2_PRIO_OVERRIDE, - GLOBAL2_PRIO_OVERRIDE_FORCE_SNOOP | - 7 << GLOBAL2_PRIO_OVERRIDE_SNOOP_SHIFT | - GLOBAL2_PRIO_OVERRIDE_FORCE_ARP | - 7 << GLOBAL2_PRIO_OVERRIDE_ARP_SHIFT); - - return 0; + return mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_PRIO_OVERRIDE, + GLOBAL2_PRIO_OVERRIDE_FORCE_SNOOP | + 7 << GLOBAL2_PRIO_OVERRIDE_SNOOP_SHIFT | + GLOBAL2_PRIO_OVERRIDE_FORCE_ARP | + 7 << GLOBAL2_PRIO_OVERRIDE_ARP_SHIFT); } static int mv88e6131_setup(struct dsa_switch *ds) diff --git a/drivers/net/dsa/mv88e6171.c b/drivers/net/dsa/mv88e6171.c index f5622506cdfa..b7af2b78f8ee 100644 --- a/drivers/net/dsa/mv88e6171.c +++ b/drivers/net/dsa/mv88e6171.c @@ -46,8 +46,11 @@ static int mv88e6171_setup_global(struct dsa_switch *ds) /* Discard packets with excessive collisions, mask all * interrupt sources, enable PPU. */ - REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL, - GLOBAL_CONTROL_PPU_ENABLE | GLOBAL_CONTROL_DISCARD_EXCESS); + ret = mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_CONTROL, + GLOBAL_CONTROL_PPU_ENABLE | + GLOBAL_CONTROL_DISCARD_EXCESS); + if (ret) + return ret; /* Configure the upstream port, and configure the upstream * port as the port to which ingress and egress monitor frames @@ -57,14 +60,15 @@ static int mv88e6171_setup_global(struct dsa_switch *ds) upstream_port << GLOBAL_MONITOR_CONTROL_EGRESS_SHIFT | upstream_port << GLOBAL_MONITOR_CONTROL_ARP_SHIFT | upstream_port << GLOBAL_MONITOR_CONTROL_MIRROR_SHIFT; - REG_WRITE(REG_GLOBAL, GLOBAL_MONITOR_CONTROL, reg); + ret = mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_MONITOR_CONTROL, reg); + if (ret) + return ret; /* Disable remote management for now, and set the switch's * DSA device number. */ - REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL_2, ds->index & 0x1f); - - return 0; + return mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_CONTROL_2, + ds->index & 0x1f); } static int mv88e6171_setup(struct dsa_switch *ds) diff --git a/drivers/net/dsa/mv88e6352.c b/drivers/net/dsa/mv88e6352.c index e54ee27db129..e8cb03fad21a 100644 --- a/drivers/net/dsa/mv88e6352.c +++ b/drivers/net/dsa/mv88e6352.c @@ -59,8 +59,11 @@ static int mv88e6352_setup_global(struct dsa_switch *ds) /* Discard packets with excessive collisions, * mask all interrupt sources, enable PPU (bit 14, undocumented). */ - REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL, - GLOBAL_CONTROL_PPU_ENABLE | GLOBAL_CONTROL_DISCARD_EXCESS); + ret = mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_CONTROL, + GLOBAL_CONTROL_PPU_ENABLE | + GLOBAL_CONTROL_DISCARD_EXCESS); + if (ret) + return ret; /* Configure the upstream port, and configure the upstream * port as the port to which ingress and egress monitor frames @@ -69,14 +72,14 @@ static int mv88e6352_setup_global(struct dsa_switch *ds) reg = upstream_port << GLOBAL_MONITOR_CONTROL_INGRESS_SHIFT | upstream_port << GLOBAL_MONITOR_CONTROL_EGRESS_SHIFT | upstream_port << GLOBAL_MONITOR_CONTROL_ARP_SHIFT; - REG_WRITE(REG_GLOBAL, GLOBAL_MONITOR_CONTROL, reg); + ret = mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_MONITOR_CONTROL, reg); + if (ret) + return ret; /* Disable remote management for now, and set the switch's * DSA device number. */ - REG_WRITE(REG_GLOBAL, 0x1c, ds->index & 0x1f); - - return 0; + return mv88e6xxx_reg_write(ds, REG_GLOBAL, 0x1c, ds->index & 0x1f); } static int mv88e6352_setup(struct dsa_switch *ds) diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index 9985a0cf31f1..b018f20829fb 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c @@ -180,28 +180,44 @@ int mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg, u16 val) int mv88e6xxx_set_addr_direct(struct dsa_switch *ds, u8 *addr) { - REG_WRITE(REG_GLOBAL, GLOBAL_MAC_01, (addr[0] << 8) | addr[1]); - REG_WRITE(REG_GLOBAL, GLOBAL_MAC_23, (addr[2] << 8) | addr[3]); - REG_WRITE(REG_GLOBAL, GLOBAL_MAC_45, (addr[4] << 8) | addr[5]); + int err; - return 0; + err = mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_MAC_01, + (addr[0] << 8) | addr[1]); + if (err) + return err; + + err = mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_MAC_23, + (addr[2] << 8) | addr[3]); + if (err) + return err; + + return mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_MAC_45, + (addr[4] << 8) | addr[5]); } int mv88e6xxx_set_addr_indirect(struct dsa_switch *ds, u8 *addr) { - int i; int ret; + int i; for (i = 0; i < 6; i++) { int j; /* Write the MAC address byte. */ - REG_WRITE(REG_GLOBAL2, GLOBAL2_SWITCH_MAC, - GLOBAL2_SWITCH_MAC_BUSY | (i << 8) | addr[i]); + ret = mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_SWITCH_MAC, + GLOBAL2_SWITCH_MAC_BUSY | + (i << 8) | addr[i]); + if (ret) + return ret; /* Wait for the write to complete. */ for (j = 0; j < 16; j++) { - ret = REG_READ(REG_GLOBAL2, GLOBAL2_SWITCH_MAC); + ret = mv88e6xxx_reg_read(ds, REG_GLOBAL2, + GLOBAL2_SWITCH_MAC); + if (ret < 0) + return ret; + if ((ret & GLOBAL2_SWITCH_MAC_BUSY) == 0) break; } @@ -233,13 +249,21 @@ static int mv88e6xxx_ppu_disable(struct dsa_switch *ds) int ret; unsigned long timeout; - ret = REG_READ(REG_GLOBAL, GLOBAL_CONTROL); - REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL, - ret & ~GLOBAL_CONTROL_PPU_ENABLE); + ret = mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_CONTROL); + if (ret < 0) + return ret; + + ret = mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_CONTROL, + ret & ~GLOBAL_CONTROL_PPU_ENABLE); + if (ret) + return ret; timeout = jiffies + 1 * HZ; while (time_before(jiffies, timeout)) { - ret = REG_READ(REG_GLOBAL, GLOBAL_STATUS); + ret = mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_STATUS); + if (ret < 0) + return ret; + usleep_range(1000, 2000); if ((ret & GLOBAL_STATUS_PPU_MASK) != GLOBAL_STATUS_PPU_POLLING) @@ -251,15 +275,24 @@ static int mv88e6xxx_ppu_disable(struct dsa_switch *ds) static int mv88e6xxx_ppu_enable(struct dsa_switch *ds) { - int ret; + int ret, err; unsigned long timeout; - ret = REG_READ(REG_GLOBAL, GLOBAL_CONTROL); - REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL, ret | GLOBAL_CONTROL_PPU_ENABLE); + ret = mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_CONTROL); + if (ret < 0) + return ret; + + err = mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_CONTROL, + ret | GLOBAL_CONTROL_PPU_ENABLE); + if (err) + return err; timeout = jiffies + 1 * HZ; while (time_before(jiffies, timeout)) { - ret = REG_READ(REG_GLOBAL, GLOBAL_STATUS); + ret = mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_STATUS); + if (ret < 0) + return ret; + usleep_range(1000, 2000); if ((ret & GLOBAL_STATUS_PPU_MASK) == GLOBAL_STATUS_PPU_POLLING) @@ -2667,7 +2700,9 @@ int mv88e6xxx_setup_common(struct dsa_switch *ds) ps->ds = ds; mutex_init(&ps->smi_mutex); - ps->id = REG_READ(REG_PORT(0), PORT_SWITCH_ID) & 0xfff0; + ps->id = mv88e6xxx_reg_read(ds, REG_PORT(0), PORT_SWITCH_ID) & 0xfff0; + if (ps->id < 0) + return ps->id; INIT_WORK(&ps->bridge_work, mv88e6xxx_bridge_work); @@ -2677,42 +2712,67 @@ int mv88e6xxx_setup_common(struct dsa_switch *ds) int mv88e6xxx_setup_global(struct dsa_switch *ds) { struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); - int ret; + int err; int i; + mutex_lock(&ps->smi_mutex); /* Set the default address aging time to 5 minutes, and * enable address learn messages to be sent to all message * ports. */ - REG_WRITE(REG_GLOBAL, GLOBAL_ATU_CONTROL, - 0x0140 | GLOBAL_ATU_CONTROL_LEARN2ALL); + err = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_CONTROL, + 0x0140 | GLOBAL_ATU_CONTROL_LEARN2ALL); + if (err) + goto unlock; /* Configure the IP ToS mapping registers. */ - REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_0, 0x0000); - REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_1, 0x0000); - REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_2, 0x5555); - REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_3, 0x5555); - REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_4, 0xaaaa); - REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_5, 0xaaaa); - REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_6, 0xffff); - REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_7, 0xffff); + err = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_IP_PRI_0, 0x0000); + if (err) + goto unlock; + err = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_IP_PRI_1, 0x0000); + if (err) + goto unlock; + err = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_IP_PRI_2, 0x5555); + if (err) + goto unlock; + err = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_IP_PRI_3, 0x5555); + if (err) + goto unlock; + err = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_IP_PRI_4, 0xaaaa); + if (err) + goto unlock; + err = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_IP_PRI_5, 0xaaaa); + if (err) + goto unlock; + err = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_IP_PRI_6, 0xffff); + if (err) + goto unlock; + err = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_IP_PRI_7, 0xffff); + if (err) + goto unlock; /* Configure the IEEE 802.1p priority mapping register. */ - REG_WRITE(REG_GLOBAL, GLOBAL_IEEE_PRI, 0xfa41); + err = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_IEEE_PRI, 0xfa41); + if (err) + goto unlock; /* Send all frames with destination addresses matching * 01:80:c2:00:00:0x to the CPU port. */ - REG_WRITE(REG_GLOBAL2, GLOBAL2_MGMT_EN_0X, 0xffff); + err = _mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_MGMT_EN_0X, 0xffff); + if (err) + goto unlock; /* Ignore removed tag data on doubly tagged packets, disable * flow control messages, force flow control priority to the * highest, and send all special multicast frames to the CPU * port at the highest priority. */ - REG_WRITE(REG_GLOBAL2, GLOBAL2_SWITCH_MGMT, - 0x7 | GLOBAL2_SWITCH_MGMT_RSVD2CPU | 0x70 | - GLOBAL2_SWITCH_MGMT_FORCE_FLOW_CTRL_PRI); + err = _mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_SWITCH_MGMT, + 0x7 | GLOBAL2_SWITCH_MGMT_RSVD2CPU | 0x70 | + GLOBAL2_SWITCH_MGMT_FORCE_FLOW_CTRL_PRI); + if (err) + goto unlock; /* Program the DSA routing table. */ for (i = 0; i < 32; i++) { @@ -2722,23 +2782,35 @@ int mv88e6xxx_setup_global(struct dsa_switch *ds) i != ds->index && i < ds->dst->pd->nr_chips) nexthop = ds->pd->rtable[i] & 0x1f; - REG_WRITE(REG_GLOBAL2, GLOBAL2_DEVICE_MAPPING, - GLOBAL2_DEVICE_MAPPING_UPDATE | - (i << GLOBAL2_DEVICE_MAPPING_TARGET_SHIFT) | - nexthop); + err = _mv88e6xxx_reg_write( + ds, REG_GLOBAL2, + GLOBAL2_DEVICE_MAPPING, + GLOBAL2_DEVICE_MAPPING_UPDATE | + (i << GLOBAL2_DEVICE_MAPPING_TARGET_SHIFT) | nexthop); + if (err) + goto unlock; } /* Clear all trunk masks. */ - for (i = 0; i < 8; i++) - REG_WRITE(REG_GLOBAL2, GLOBAL2_TRUNK_MASK, - 0x8000 | (i << GLOBAL2_TRUNK_MASK_NUM_SHIFT) | - ((1 << ps->num_ports) - 1)); + for (i = 0; i < 8; i++) { + err = _mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_TRUNK_MASK, + 0x8000 | + (i << GLOBAL2_TRUNK_MASK_NUM_SHIFT) | + ((1 << ps->num_ports) - 1)); + if (err) + goto unlock; + } /* Clear all trunk mappings. */ - for (i = 0; i < 16; i++) - REG_WRITE(REG_GLOBAL2, GLOBAL2_TRUNK_MAPPING, - GLOBAL2_TRUNK_MAPPING_UPDATE | - (i << GLOBAL2_TRUNK_MAPPING_ID_SHIFT)); + for (i = 0; i < 16; i++) { + err = _mv88e6xxx_reg_write( + ds, REG_GLOBAL2, + GLOBAL2_TRUNK_MAPPING, + GLOBAL2_TRUNK_MAPPING_UPDATE | + (i << GLOBAL2_TRUNK_MAPPING_ID_SHIFT)); + if (err) + goto unlock; + } if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) || mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) || @@ -2746,17 +2818,27 @@ int mv88e6xxx_setup_global(struct dsa_switch *ds) /* Send all frames with destination addresses matching * 01:80:c2:00:00:2x to the CPU port. */ - REG_WRITE(REG_GLOBAL2, GLOBAL2_MGMT_EN_2X, 0xffff); + err = _mv88e6xxx_reg_write(ds, REG_GLOBAL2, + GLOBAL2_MGMT_EN_2X, 0xffff); + if (err) + goto unlock; /* Initialise cross-chip port VLAN table to reset * defaults. */ - REG_WRITE(REG_GLOBAL2, GLOBAL2_PVT_ADDR, 0x9000); + err = _mv88e6xxx_reg_write(ds, REG_GLOBAL2, + GLOBAL2_PVT_ADDR, 0x9000); + if (err) + goto unlock; /* Clear the priority override table. */ - for (i = 0; i < 16; i++) - REG_WRITE(REG_GLOBAL2, GLOBAL2_PRIO_OVERRIDE, - 0x8000 | (i << 8)); + for (i = 0; i < 16; i++) { + err = _mv88e6xxx_reg_write(ds, REG_GLOBAL2, + GLOBAL2_PRIO_OVERRIDE, + 0x8000 | (i << 8)); + if (err) + goto unlock; + } } if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) || @@ -2767,31 +2849,37 @@ int mv88e6xxx_setup_global(struct dsa_switch *ds) * ingress rate limit registers to their initial * state. */ - for (i = 0; i < ps->num_ports; i++) - REG_WRITE(REG_GLOBAL2, GLOBAL2_INGRESS_OP, - 0x9000 | (i << 8)); + for (i = 0; i < ps->num_ports; i++) { + err = _mv88e6xxx_reg_write(ds, REG_GLOBAL2, + GLOBAL2_INGRESS_OP, + 0x9000 | (i << 8)); + if (err) + goto unlock; + } } /* Clear the statistics counters for all ports */ - REG_WRITE(REG_GLOBAL, GLOBAL_STATS_OP, GLOBAL_STATS_OP_FLUSH_ALL); + err = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_STATS_OP, + GLOBAL_STATS_OP_FLUSH_ALL); + if (err) + goto unlock; /* Wait for the flush to complete. */ - mutex_lock(&ps->smi_mutex); - ret = _mv88e6xxx_stats_wait(ds); - if (ret < 0) + err = _mv88e6xxx_stats_wait(ds); + if (err < 0) goto unlock; /* Clear all ATU entries */ - ret = _mv88e6xxx_atu_flush(ds, 0, true); - if (ret < 0) + err = _mv88e6xxx_atu_flush(ds, 0, true); + if (err < 0) goto unlock; /* Clear all the VTU and STU entries */ - ret = _mv88e6xxx_vtu_stu_flush(ds); + err = _mv88e6xxx_vtu_stu_flush(ds); unlock: mutex_unlock(&ps->smi_mutex); - return ret; + return err; } int mv88e6xxx_switch_reset(struct dsa_switch *ds, bool ppu_active) @@ -2803,10 +2891,18 @@ int mv88e6xxx_switch_reset(struct dsa_switch *ds, bool ppu_active) int ret; int i; + mutex_lock(&ps->smi_mutex); + /* Set all ports to the disabled state. */ for (i = 0; i < ps->num_ports; i++) { - ret = REG_READ(REG_PORT(i), PORT_CONTROL); - REG_WRITE(REG_PORT(i), PORT_CONTROL, ret & 0xfffc); + ret = _mv88e6xxx_reg_read(ds, REG_PORT(i), PORT_CONTROL); + if (ret < 0) + goto unlock; + + ret = _mv88e6xxx_reg_write(ds, REG_PORT(i), PORT_CONTROL, + ret & 0xfffc); + if (ret) + goto unlock; } /* Wait for transmit queues to drain. */ @@ -2825,22 +2921,31 @@ int mv88e6xxx_switch_reset(struct dsa_switch *ds, bool ppu_active) * through global registers 0x18 and 0x19. */ if (ppu_active) - REG_WRITE(REG_GLOBAL, 0x04, 0xc000); + ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, 0x04, 0xc000); else - REG_WRITE(REG_GLOBAL, 0x04, 0xc400); + ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, 0x04, 0xc400); + if (ret) + goto unlock; /* Wait up to one second for reset to complete. */ timeout = jiffies + 1 * HZ; while (time_before(jiffies, timeout)) { - ret = REG_READ(REG_GLOBAL, 0x00); + ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, 0x00); + if (ret < 0) + goto unlock; + if ((ret & is_reset) == is_reset) break; usleep_range(1000, 2000); } if (time_after(jiffies, timeout)) - return -ETIMEDOUT; + ret = -ETIMEDOUT; + else + ret = 0; +unlock: + mutex_unlock(&ps->smi_mutex); - return 0; + return ret; } int mv88e6xxx_phy_page_read(struct dsa_switch *ds, int port, int page, int reg) diff --git a/drivers/net/dsa/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx.h index 5d27decc85cb..0debb9f3cf0a 100644 --- a/drivers/net/dsa/mv88e6xxx.h +++ b/drivers/net/dsa/mv88e6xxx.h @@ -542,25 +542,4 @@ extern struct dsa_switch_driver mv88e6123_switch_driver; extern struct dsa_switch_driver mv88e6352_switch_driver; extern struct dsa_switch_driver mv88e6171_switch_driver; -#define REG_READ(addr, reg) \ - ({ \ - int __ret; \ - \ - __ret = mv88e6xxx_reg_read(ds, addr, reg); \ - if (__ret < 0) \ - return __ret; \ - __ret; \ - }) - -#define REG_WRITE(addr, reg, val) \ - ({ \ - int __ret; \ - \ - __ret = mv88e6xxx_reg_write(ds, addr, reg, val); \ - if (__ret < 0) \ - return __ret; \ - }) - - - #endif From f66bc94174e850a4de4adbe7a08fc37507051185 Mon Sep 17 00:00:00 2001 From: Dinh Nguyen Date: Thu, 14 Apr 2016 20:42:29 -0500 Subject: [PATCH 0784/1649] stmmac: socfpga: remove extra call to socfpga_dwmac_setup In the socfpga_dwmac_probe function, we have a call to socfpga_dwmac_setup, which is already called from socfpga_dwmac_init later in the probe function. Remove this extra call to socfpga_dwmac_setup. Also we should not be calling socfpga_dwmac_setup() directly without wrapping it around the proper reset assert/deasserts. That is because the socfpga_dwmac_setup() is setting up PHY modes in the system manager, and it is requires the EMAC's to be in reset during the PHY setup. Reported-by: Matthew Gerlach Signed-off-by: Dinh Nguyen Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c | 6 ------ 1 file changed, 6 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c index f0d797ab74d8..41f4c58b22bd 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c @@ -267,12 +267,6 @@ static int socfpga_dwmac_probe(struct platform_device *pdev) return ret; } - ret = socfpga_dwmac_setup(dwmac); - if (ret) { - dev_err(dev, "couldn't setup SoC glue (%d)\n", ret); - return ret; - } - plat_dat->bsp_priv = dwmac; plat_dat->init = socfpga_dwmac_init; plat_dat->exit = socfpga_dwmac_exit; From da5a2383c9a2de88550841f6c100f991a6850230 Mon Sep 17 00:00:00 2001 From: Taku Izumi Date: Fri, 15 Apr 2016 11:25:21 +0900 Subject: [PATCH 0785/1649] fjes: optimize timeout value This patch optimizes the following timeout value. - FJES_DEVICE_RESET_TIMEOUT - FJES_COMMAND_REQ_TIMEOUT - FJES_COMMAND_REQ_BUFF_TIMEOUT Signed-off-by: Taku Izumi Signed-off-by: David S. Miller --- drivers/net/fjes/fjes_hw.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/fjes/fjes_hw.h b/drivers/net/fjes/fjes_hw.h index 6d57b89a0ee8..baee7f59834b 100644 --- a/drivers/net/fjes/fjes_hw.h +++ b/drivers/net/fjes/fjes_hw.h @@ -33,9 +33,9 @@ struct fjes_hw; #define EP_BUFFER_SUPPORT_VLAN_MAX 4 #define EP_BUFFER_INFO_SIZE 4096 -#define FJES_DEVICE_RESET_TIMEOUT ((17 + 1) * 3) /* sec */ -#define FJES_COMMAND_REQ_TIMEOUT (5 + 1) /* sec */ -#define FJES_COMMAND_REQ_BUFF_TIMEOUT (8 * 3) /* sec */ +#define FJES_DEVICE_RESET_TIMEOUT ((17 + 1) * 3 * 8) /* sec */ +#define FJES_COMMAND_REQ_TIMEOUT ((5 + 1) * 3 * 8) /* sec */ +#define FJES_COMMAND_REQ_BUFF_TIMEOUT (60 * 3) /* sec */ #define FJES_COMMAND_EPSTOP_WAIT_TIMEOUT (1) /* sec */ #define FJES_CMD_REQ_ERR_INFO_PARAM (0x0001) From 3c3bd4a91ec12ad7c140bb3fd04b199e411760cb Mon Sep 17 00:00:00 2001 From: Taku Izumi Date: Fri, 15 Apr 2016 11:25:27 +0900 Subject: [PATCH 0786/1649] fjes: fix incorrect statistics information in fjes_xmit_frame() There are bugs of acounting statistics in fjes_xmit_frame(). Accounting self stats is wrong. accounting stats of other EPs to be transmitted is right. This patch fixes this bug. Signed-off-by: Taku Izumi Signed-off-by: David S. Miller --- drivers/net/fjes/fjes_main.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/net/fjes/fjes_main.c b/drivers/net/fjes/fjes_main.c index 061b4af4ee62..05bdd8bfee00 100644 --- a/drivers/net/fjes/fjes_main.c +++ b/drivers/net/fjes/fjes_main.c @@ -653,7 +653,7 @@ fjes_xmit_frame(struct sk_buff *skb, struct net_device *netdev) &adapter->hw.ep_shm_info[dest_epid].rx, 0)) { /* version is NOT 0 */ adapter->stats64.tx_carrier_errors += 1; - hw->ep_shm_info[my_epid].net_stats + hw->ep_shm_info[dest_epid].net_stats .tx_carrier_errors += 1; ret = NETDEV_TX_OK; @@ -661,9 +661,9 @@ fjes_xmit_frame(struct sk_buff *skb, struct net_device *netdev) &adapter->hw.ep_shm_info[dest_epid].rx, netdev->mtu)) { adapter->stats64.tx_dropped += 1; - hw->ep_shm_info[my_epid].net_stats.tx_dropped += 1; + hw->ep_shm_info[dest_epid].net_stats.tx_dropped += 1; adapter->stats64.tx_errors += 1; - hw->ep_shm_info[my_epid].net_stats.tx_errors += 1; + hw->ep_shm_info[dest_epid].net_stats.tx_errors += 1; ret = NETDEV_TX_OK; } else if (vlan && @@ -694,10 +694,10 @@ fjes_xmit_frame(struct sk_buff *skb, struct net_device *netdev) (long)adapter->tx_start_jiffies) >= FJES_TX_RETRY_TIMEOUT) { adapter->stats64.tx_fifo_errors += 1; - hw->ep_shm_info[my_epid].net_stats + hw->ep_shm_info[dest_epid].net_stats .tx_fifo_errors += 1; adapter->stats64.tx_errors += 1; - hw->ep_shm_info[my_epid].net_stats + hw->ep_shm_info[dest_epid].net_stats .tx_errors += 1; ret = NETDEV_TX_OK; @@ -714,10 +714,10 @@ fjes_xmit_frame(struct sk_buff *skb, struct net_device *netdev) } else { if (!is_multi) { adapter->stats64.tx_packets += 1; - hw->ep_shm_info[my_epid].net_stats + hw->ep_shm_info[dest_epid].net_stats .tx_packets += 1; adapter->stats64.tx_bytes += len; - hw->ep_shm_info[my_epid].net_stats + hw->ep_shm_info[dest_epid].net_stats .tx_bytes += len; } From 19a0a7fd55af4658414de955f401cddaffc1f0ba Mon Sep 17 00:00:00 2001 From: Taku Izumi Date: Fri, 15 Apr 2016 11:25:34 +0900 Subject: [PATCH 0787/1649] fjes: fix bitwise check bug in fjes_raise_intr_rxdata_task In fjes_raise_intr_rxdata_task(), there's a bug of bitwise check because of missing "& FJES_RX_POLL_WORK". This patch fixes this bug. Signed-off-by: Taku Izumi Signed-off-by: David S. Miller --- drivers/net/fjes/fjes_main.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/fjes/fjes_main.c b/drivers/net/fjes/fjes_main.c index 05bdd8bfee00..e22a86976dcf 100644 --- a/drivers/net/fjes/fjes_main.c +++ b/drivers/net/fjes/fjes_main.c @@ -549,7 +549,8 @@ static void fjes_raise_intr_rxdata_task(struct work_struct *work) if ((hw->ep_shm_info[epid].tx_status_work == FJES_TX_DELAY_SEND_PENDING) && (pstatus == EP_PARTNER_SHARED) && - !(hw->ep_shm_info[epid].rx.info->v1i.rx_status)) { + !(hw->ep_shm_info[epid].rx.info->v1i.rx_status & + FJES_RX_POLL_WORK)) { fjes_hw_raise_interrupt(hw, epid, REG_ICTL_MASK_RX_DATA); } From 16bbec3a50efec46b679c3408c8be09f09dbcb7e Mon Sep 17 00:00:00 2001 From: Taku Izumi Date: Fri, 15 Apr 2016 11:25:40 +0900 Subject: [PATCH 0788/1649] fjes: Enhance changing MTU related work This patch enhances the fjes_change_mtu() method by introducing new flag named FJES_RX_MTU_CHANGING_DONE in rx_status. At the same time, default MTU value is changed into 65510 bytes. Signed-off-by: Taku Izumi Signed-off-by: David S. Miller --- drivers/net/fjes/fjes_hw.c | 8 ++++- drivers/net/fjes/fjes_hw.h | 1 + drivers/net/fjes/fjes_main.c | 66 +++++++++++++++++++++++++++++------- 3 files changed, 61 insertions(+), 14 deletions(-) diff --git a/drivers/net/fjes/fjes_hw.c b/drivers/net/fjes/fjes_hw.c index b103adb8d62e..e9f494bd70ad 100644 --- a/drivers/net/fjes/fjes_hw.c +++ b/drivers/net/fjes/fjes_hw.c @@ -179,6 +179,8 @@ void fjes_hw_setup_epbuf(struct epbuf_handler *epbh, u8 *mac_addr, u32 mtu) for (i = 0; i < EP_BUFFER_SUPPORT_VLAN_MAX; i++) info->v1i.vlan_id[i] = vlan_id[i]; + + info->v1i.rx_status |= FJES_RX_MTU_CHANGING_DONE; } void @@ -810,7 +812,8 @@ bool fjes_hw_check_mtu(struct epbuf_handler *epbh, u32 mtu) { union ep_buffer_info *info = epbh->info; - return (info->v1i.frame_max == FJES_MTU_TO_FRAME_SIZE(mtu)); + return ((info->v1i.frame_max == FJES_MTU_TO_FRAME_SIZE(mtu)) && + info->v1i.rx_status & FJES_RX_MTU_CHANGING_DONE); } bool fjes_hw_check_vlan_id(struct epbuf_handler *epbh, u16 vlan_id) @@ -863,6 +866,9 @@ bool fjes_hw_epbuf_rx_is_empty(struct epbuf_handler *epbh) { union ep_buffer_info *info = epbh->info; + if (!(info->v1i.rx_status & FJES_RX_MTU_CHANGING_DONE)) + return true; + if (info->v1i.count_max == 0) return true; diff --git a/drivers/net/fjes/fjes_hw.h b/drivers/net/fjes/fjes_hw.h index baee7f59834b..f40cf0792a39 100644 --- a/drivers/net/fjes/fjes_hw.h +++ b/drivers/net/fjes/fjes_hw.h @@ -57,6 +57,7 @@ struct fjes_hw; #define FJES_RX_STOP_REQ_DONE (0x1) #define FJES_RX_STOP_REQ_REQUEST (0x2) #define FJES_RX_POLL_WORK (0x4) +#define FJES_RX_MTU_CHANGING_DONE (0x8) #define EP_BUFFER_SIZE \ (((sizeof(union ep_buffer_info) + (128 * (64 * 1024))) \ diff --git a/drivers/net/fjes/fjes_main.c b/drivers/net/fjes/fjes_main.c index e22a86976dcf..3c0c1202f237 100644 --- a/drivers/net/fjes/fjes_main.c +++ b/drivers/net/fjes/fjes_main.c @@ -481,6 +481,9 @@ static void fjes_tx_stall_task(struct work_struct *work) info = adapter->hw.ep_shm_info[epid].tx.info; + if (!(info->v1i.rx_status & FJES_RX_MTU_CHANGING_DONE)) + return; + if (EP_RING_FULL(info->v1i.head, info->v1i.tail, info->v1i.count_max)) { all_queue_available = 0; @@ -760,9 +763,11 @@ fjes_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) static int fjes_change_mtu(struct net_device *netdev, int new_mtu) { + struct fjes_adapter *adapter = netdev_priv(netdev); bool running = netif_running(netdev); - int ret = 0; - int idx; + struct fjes_hw *hw = &adapter->hw; + int ret = -EINVAL; + int idx, epidx; for (idx = 0; fjes_support_mtu[idx] != 0; idx++) { if (new_mtu <= fjes_support_mtu[idx]) { @@ -770,19 +775,54 @@ static int fjes_change_mtu(struct net_device *netdev, int new_mtu) if (new_mtu == netdev->mtu) return 0; - if (running) - fjes_close(netdev); - - netdev->mtu = new_mtu; - - if (running) - ret = fjes_open(netdev); - - return ret; + ret = 0; + break; } } - return -EINVAL; + if (ret) + return ret; + + if (running) { + for (epidx = 0; epidx < hw->max_epid; epidx++) { + if (epidx == hw->my_epid) + continue; + hw->ep_shm_info[epidx].tx.info->v1i.rx_status &= + ~FJES_RX_MTU_CHANGING_DONE; + } + netif_tx_stop_all_queues(netdev); + netif_carrier_off(netdev); + cancel_work_sync(&adapter->tx_stall_task); + napi_disable(&adapter->napi); + + msleep(1000); + + netif_tx_stop_all_queues(netdev); + } + + netdev->mtu = new_mtu; + + if (running) { + for (epidx = 0; epidx < hw->max_epid; epidx++) { + if (epidx == hw->my_epid) + continue; + + local_irq_disable(); + fjes_hw_setup_epbuf(&hw->ep_shm_info[epidx].tx, + netdev->dev_addr, + netdev->mtu); + local_irq_enable(); + + hw->ep_shm_info[epidx].tx.info->v1i.rx_status |= + FJES_RX_MTU_CHANGING_DONE; + } + + netif_tx_wake_all_queues(netdev); + netif_carrier_on(netdev); + napi_enable(&adapter->napi); + } + + return ret; } static int fjes_vlan_rx_add_vid(struct net_device *netdev, @@ -1204,7 +1244,7 @@ static void fjes_netdev_setup(struct net_device *netdev) netdev->watchdog_timeo = FJES_TX_RETRY_INTERVAL; netdev->netdev_ops = &fjes_netdev_ops; fjes_set_ethtool_ops(netdev); - netdev->mtu = fjes_support_mtu[0]; + netdev->mtu = fjes_support_mtu[3]; netdev->flags |= IFF_BROADCAST; netdev->features |= NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_FILTER; } From bd5a256991f9a9cc0b7f6385dd1d8cfc90559b12 Mon Sep 17 00:00:00 2001 From: Taku Izumi Date: Fri, 15 Apr 2016 11:25:46 +0900 Subject: [PATCH 0789/1649] fjes: Introduce spinlock for rx_status This patch introduces spinlock of rx_status for proper excusive control. Signed-off-by: Taku Izumi Signed-off-by: David S. Miller --- drivers/net/fjes/fjes_hw.c | 22 +++++++++++++- drivers/net/fjes/fjes_hw.h | 2 ++ drivers/net/fjes/fjes_main.c | 57 +++++++++++++++++++++++++++++++----- 3 files changed, 72 insertions(+), 9 deletions(-) diff --git a/drivers/net/fjes/fjes_hw.c b/drivers/net/fjes/fjes_hw.c index e9f494bd70ad..0dbafedc0a34 100644 --- a/drivers/net/fjes/fjes_hw.c +++ b/drivers/net/fjes/fjes_hw.c @@ -216,6 +216,7 @@ static int fjes_hw_setup(struct fjes_hw *hw) u8 mac[ETH_ALEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; struct fjes_device_command_param param; struct ep_share_mem_info *buf_pair; + unsigned long flags; size_t mem_size; int result; int epidx; @@ -264,10 +265,12 @@ static int fjes_hw_setup(struct fjes_hw *hw) if (result) return result; + spin_lock_irqsave(&hw->rx_status_lock, flags); fjes_hw_setup_epbuf(&buf_pair->tx, mac, fjes_support_mtu[0]); fjes_hw_setup_epbuf(&buf_pair->rx, mac, fjes_support_mtu[0]); + spin_unlock_irqrestore(&hw->rx_status_lock, flags); } } @@ -329,6 +332,7 @@ int fjes_hw_init(struct fjes_hw *hw) INIT_WORK(&hw->epstop_task, fjes_hw_epstop_task); mutex_init(&hw->hw_info.lock); + spin_lock_init(&hw->rx_status_lock); hw->max_epid = fjes_hw_get_max_epid(hw); hw->my_epid = fjes_hw_get_my_epid(hw); @@ -736,6 +740,7 @@ fjes_hw_get_partner_ep_status(struct fjes_hw *hw, int epid) void fjes_hw_raise_epstop(struct fjes_hw *hw) { enum ep_partner_status status; + unsigned long flags; int epidx; for (epidx = 0; epidx < hw->max_epid; epidx++) { @@ -755,8 +760,10 @@ void fjes_hw_raise_epstop(struct fjes_hw *hw) set_bit(epidx, &hw->hw_info.buffer_unshare_reserve_bit); set_bit(epidx, &hw->txrx_stop_req_bit); + spin_lock_irqsave(&hw->rx_status_lock, flags); hw->ep_shm_info[epidx].tx.info->v1i.rx_status |= FJES_RX_STOP_REQ_REQUEST; + spin_unlock_irqrestore(&hw->rx_status_lock, flags); } } @@ -938,6 +945,7 @@ static void fjes_hw_update_zone_task(struct work_struct *work) struct fjes_adapter *adapter; struct net_device *netdev; + unsigned long flags; ulong unshare_bit = 0; ulong share_bit = 0; @@ -1030,8 +1038,10 @@ static void fjes_hw_update_zone_task(struct work_struct *work) continue; if (test_bit(epidx, &share_bit)) { + spin_lock_irqsave(&hw->rx_status_lock, flags); fjes_hw_setup_epbuf(&hw->ep_shm_info[epidx].tx, netdev->dev_addr, netdev->mtu); + spin_unlock_irqrestore(&hw->rx_status_lock, flags); mutex_lock(&hw->hw_info.lock); @@ -1075,10 +1085,14 @@ static void fjes_hw_update_zone_task(struct work_struct *work) mutex_unlock(&hw->hw_info.lock); - if (ret == 0) + if (ret == 0) { + spin_lock_irqsave(&hw->rx_status_lock, flags); fjes_hw_setup_epbuf( &hw->ep_shm_info[epidx].tx, netdev->dev_addr, netdev->mtu); + spin_unlock_irqrestore(&hw->rx_status_lock, + flags); + } } if (test_bit(epidx, &irq_bit)) { @@ -1086,9 +1100,11 @@ static void fjes_hw_update_zone_task(struct work_struct *work) REG_ICTL_MASK_TXRX_STOP_REQ); set_bit(epidx, &hw->txrx_stop_req_bit); + spin_lock_irqsave(&hw->rx_status_lock, flags); hw->ep_shm_info[epidx].tx. info->v1i.rx_status |= FJES_RX_STOP_REQ_REQUEST; + spin_unlock_irqrestore(&hw->rx_status_lock, flags); set_bit(epidx, &hw->hw_info.buffer_unshare_reserve_bit); } } @@ -1104,6 +1120,7 @@ static void fjes_hw_epstop_task(struct work_struct *work) { struct fjes_hw *hw = container_of(work, struct fjes_hw, epstop_task); struct fjes_adapter *adapter = (struct fjes_adapter *)hw->back; + unsigned long flags; ulong remain_bit; int epid_bit; @@ -1111,9 +1128,12 @@ static void fjes_hw_epstop_task(struct work_struct *work) while ((remain_bit = hw->epstop_req_bit)) { for (epid_bit = 0; remain_bit; remain_bit >>= 1, epid_bit++) { if (remain_bit & 1) { + spin_lock_irqsave(&hw->rx_status_lock, flags); hw->ep_shm_info[epid_bit]. tx.info->v1i.rx_status |= FJES_RX_STOP_REQ_DONE; + spin_unlock_irqrestore(&hw->rx_status_lock, + flags); clear_bit(epid_bit, &hw->epstop_req_bit); set_bit(epid_bit, diff --git a/drivers/net/fjes/fjes_hw.h b/drivers/net/fjes/fjes_hw.h index f40cf0792a39..1445ac99d6e3 100644 --- a/drivers/net/fjes/fjes_hw.h +++ b/drivers/net/fjes/fjes_hw.h @@ -300,6 +300,8 @@ struct fjes_hw { u8 *base; struct fjes_hw_info hw_info; + + spinlock_t rx_status_lock; /* spinlock for rx_status */ }; int fjes_hw_init(struct fjes_hw *); diff --git a/drivers/net/fjes/fjes_main.c b/drivers/net/fjes/fjes_main.c index 3c0c1202f237..87b24748bfcd 100644 --- a/drivers/net/fjes/fjes_main.c +++ b/drivers/net/fjes/fjes_main.c @@ -290,6 +290,7 @@ static int fjes_close(struct net_device *netdev) { struct fjes_adapter *adapter = netdev_priv(netdev); struct fjes_hw *hw = &adapter->hw; + unsigned long flags; int epidx; netif_tx_stop_all_queues(netdev); @@ -299,13 +300,18 @@ static int fjes_close(struct net_device *netdev) napi_disable(&adapter->napi); + spin_lock_irqsave(&hw->rx_status_lock, flags); for (epidx = 0; epidx < hw->max_epid; epidx++) { if (epidx == hw->my_epid) continue; - adapter->hw.ep_shm_info[epidx].tx.info->v1i.rx_status &= - ~FJES_RX_POLL_WORK; + if (fjes_hw_get_partner_ep_status(hw, epidx) == + EP_PARTNER_SHARED) + adapter->hw.ep_shm_info[epidx] + .tx.info->v1i.rx_status &= + ~FJES_RX_POLL_WORK; } + spin_unlock_irqrestore(&hw->rx_status_lock, flags); fjes_free_irq(adapter); @@ -330,6 +336,7 @@ static int fjes_setup_resources(struct fjes_adapter *adapter) struct net_device *netdev = adapter->netdev; struct ep_share_mem_info *buf_pair; struct fjes_hw *hw = &adapter->hw; + unsigned long flags; int result; int epidx; @@ -371,8 +378,10 @@ static int fjes_setup_resources(struct fjes_adapter *adapter) buf_pair = &hw->ep_shm_info[epidx]; + spin_lock_irqsave(&hw->rx_status_lock, flags); fjes_hw_setup_epbuf(&buf_pair->tx, netdev->dev_addr, netdev->mtu); + spin_unlock_irqrestore(&hw->rx_status_lock, flags); if (fjes_hw_epid_is_same_zone(hw, epidx)) { mutex_lock(&hw->hw_info.lock); @@ -402,6 +411,7 @@ static void fjes_free_resources(struct fjes_adapter *adapter) struct ep_share_mem_info *buf_pair; struct fjes_hw *hw = &adapter->hw; bool reset_flag = false; + unsigned long flags; int result; int epidx; @@ -418,8 +428,10 @@ static void fjes_free_resources(struct fjes_adapter *adapter) buf_pair = &hw->ep_shm_info[epidx]; + spin_lock_irqsave(&hw->rx_status_lock, flags); fjes_hw_setup_epbuf(&buf_pair->tx, netdev->dev_addr, netdev->mtu); + spin_unlock_irqrestore(&hw->rx_status_lock, flags); clear_bit(epidx, &hw->txrx_stop_req_bit); } @@ -766,6 +778,7 @@ static int fjes_change_mtu(struct net_device *netdev, int new_mtu) struct fjes_adapter *adapter = netdev_priv(netdev); bool running = netif_running(netdev); struct fjes_hw *hw = &adapter->hw; + unsigned long flags; int ret = -EINVAL; int idx, epidx; @@ -784,12 +797,15 @@ static int fjes_change_mtu(struct net_device *netdev, int new_mtu) return ret; if (running) { + spin_lock_irqsave(&hw->rx_status_lock, flags); for (epidx = 0; epidx < hw->max_epid; epidx++) { if (epidx == hw->my_epid) continue; hw->ep_shm_info[epidx].tx.info->v1i.rx_status &= ~FJES_RX_MTU_CHANGING_DONE; } + spin_unlock_irqrestore(&hw->rx_status_lock, flags); + netif_tx_stop_all_queues(netdev); netif_carrier_off(netdev); cancel_work_sync(&adapter->tx_stall_task); @@ -803,23 +819,25 @@ static int fjes_change_mtu(struct net_device *netdev, int new_mtu) netdev->mtu = new_mtu; if (running) { + spin_lock_irqsave(&hw->rx_status_lock, flags); for (epidx = 0; epidx < hw->max_epid; epidx++) { if (epidx == hw->my_epid) continue; - local_irq_disable(); + spin_lock_irqsave(&hw->rx_status_lock, flags); fjes_hw_setup_epbuf(&hw->ep_shm_info[epidx].tx, netdev->dev_addr, netdev->mtu); - local_irq_enable(); hw->ep_shm_info[epidx].tx.info->v1i.rx_status |= FJES_RX_MTU_CHANGING_DONE; + spin_unlock_irqrestore(&hw->rx_status_lock, flags); } netif_tx_wake_all_queues(netdev); netif_carrier_on(netdev); napi_enable(&adapter->napi); + napi_schedule(&adapter->napi); } return ret; @@ -866,6 +884,7 @@ static void fjes_txrx_stop_req_irq(struct fjes_adapter *adapter, { struct fjes_hw *hw = &adapter->hw; enum ep_partner_status status; + unsigned long flags; status = fjes_hw_get_partner_ep_status(hw, src_epid); switch (status) { @@ -875,8 +894,10 @@ static void fjes_txrx_stop_req_irq(struct fjes_adapter *adapter, break; case EP_PARTNER_WAITING: if (src_epid < hw->my_epid) { + spin_lock_irqsave(&hw->rx_status_lock, flags); hw->ep_shm_info[src_epid].tx.info->v1i.rx_status |= FJES_RX_STOP_REQ_DONE; + spin_unlock_irqrestore(&hw->rx_status_lock, flags); clear_bit(src_epid, &hw->txrx_stop_req_bit); set_bit(src_epid, &adapter->unshare_watch_bitmask); @@ -902,14 +923,17 @@ static void fjes_stop_req_irq(struct fjes_adapter *adapter, int src_epid) { struct fjes_hw *hw = &adapter->hw; enum ep_partner_status status; + unsigned long flags; set_bit(src_epid, &hw->hw_info.buffer_unshare_reserve_bit); status = fjes_hw_get_partner_ep_status(hw, src_epid); switch (status) { case EP_PARTNER_WAITING: + spin_lock_irqsave(&hw->rx_status_lock, flags); hw->ep_shm_info[src_epid].tx.info->v1i.rx_status |= FJES_RX_STOP_REQ_DONE; + spin_unlock_irqrestore(&hw->rx_status_lock, flags); clear_bit(src_epid, &hw->txrx_stop_req_bit); /* fall through */ case EP_PARTNER_UNSHARE: @@ -1042,13 +1066,17 @@ static int fjes_poll(struct napi_struct *napi, int budget) size_t frame_len; void *frame; + spin_lock(&hw->rx_status_lock); for (epidx = 0; epidx < hw->max_epid; epidx++) { if (epidx == hw->my_epid) continue; - adapter->hw.ep_shm_info[epidx].tx.info->v1i.rx_status |= - FJES_RX_POLL_WORK; + if (fjes_hw_get_partner_ep_status(hw, epidx) == + EP_PARTNER_SHARED) + adapter->hw.ep_shm_info[epidx] + .tx.info->v1i.rx_status |= FJES_RX_POLL_WORK; } + spin_unlock(&hw->rx_status_lock); while (work_done < budget) { prefetch(&adapter->hw); @@ -1106,13 +1134,17 @@ static int fjes_poll(struct napi_struct *napi, int budget) if (((long)jiffies - (long)adapter->rx_last_jiffies) < 3) { napi_reschedule(napi); } else { + spin_lock(&hw->rx_status_lock); for (epidx = 0; epidx < hw->max_epid; epidx++) { if (epidx == hw->my_epid) continue; - adapter->hw.ep_shm_info[epidx] - .tx.info->v1i.rx_status &= + if (fjes_hw_get_partner_ep_status(hw, epidx) == + EP_PARTNER_SHARED) + adapter->hw.ep_shm_info[epidx].tx + .info->v1i.rx_status &= ~FJES_RX_POLL_WORK; } + spin_unlock(&hw->rx_status_lock); fjes_hw_set_irqmask(hw, REG_ICTL_MASK_RX_DATA, false); } @@ -1281,6 +1313,7 @@ static void fjes_watch_unshare_task(struct work_struct *work) int max_epid, my_epid, epidx; int stop_req, stop_req_done; ulong unshare_watch_bitmask; + unsigned long flags; int wait_time = 0; int is_shared; int ret; @@ -1333,8 +1366,10 @@ static void fjes_watch_unshare_task(struct work_struct *work) } mutex_unlock(&hw->hw_info.lock); + spin_lock_irqsave(&hw->rx_status_lock, flags); fjes_hw_setup_epbuf(&hw->ep_shm_info[epidx].tx, netdev->dev_addr, netdev->mtu); + spin_unlock_irqrestore(&hw->rx_status_lock, flags); clear_bit(epidx, &hw->txrx_stop_req_bit); clear_bit(epidx, &unshare_watch_bitmask); @@ -1372,9 +1407,12 @@ static void fjes_watch_unshare_task(struct work_struct *work) } mutex_unlock(&hw->hw_info.lock); + spin_lock_irqsave(&hw->rx_status_lock, flags); fjes_hw_setup_epbuf( &hw->ep_shm_info[epidx].tx, netdev->dev_addr, netdev->mtu); + spin_unlock_irqrestore(&hw->rx_status_lock, + flags); clear_bit(epidx, &hw->txrx_stop_req_bit); clear_bit(epidx, &unshare_watch_bitmask); @@ -1382,8 +1420,11 @@ static void fjes_watch_unshare_task(struct work_struct *work) } if (test_bit(epidx, &unshare_watch_bitmask)) { + spin_lock_irqsave(&hw->rx_status_lock, flags); hw->ep_shm_info[epidx].tx.info->v1i.rx_status &= ~FJES_RX_STOP_REQ_DONE; + spin_unlock_irqrestore(&hw->rx_status_lock, + flags); } } } From 8f180fadb521ce440af82b5fa12ed56845110f15 Mon Sep 17 00:00:00 2001 From: Taku Izumi Date: Fri, 15 Apr 2016 11:25:52 +0900 Subject: [PATCH 0790/1649] fjes: Update fjes driver version : 1.1 Signed-off-by: Taku Izumi Signed-off-by: David S. Miller --- drivers/net/fjes/fjes_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/fjes/fjes_main.c b/drivers/net/fjes/fjes_main.c index 87b24748bfcd..bb7e90368f8f 100644 --- a/drivers/net/fjes/fjes_main.c +++ b/drivers/net/fjes/fjes_main.c @@ -29,7 +29,7 @@ #include "fjes.h" #define MAJ 1 -#define MIN 0 +#define MIN 1 #define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) #define DRV_NAME "fjes" char fjes_driver_name[] = DRV_NAME; From 9e7399173200934612edf2153b93a8f232b88e4b Mon Sep 17 00:00:00 2001 From: Phil Sutter Date: Fri, 15 Apr 2016 19:14:19 +0200 Subject: [PATCH 0791/1649] staging: rtl8188eu: Convert to using IFF_NO_QUEUE Cc: Jakub Sitnicki Signed-off-by: Phil Sutter Signed-off-by: David S. Miller --- drivers/staging/rtl8188eu/os_dep/mon.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/staging/rtl8188eu/os_dep/mon.c b/drivers/staging/rtl8188eu/os_dep/mon.c index 63bb87593af0..d976e5e18d50 100644 --- a/drivers/staging/rtl8188eu/os_dep/mon.c +++ b/drivers/staging/rtl8188eu/os_dep/mon.c @@ -155,7 +155,7 @@ static void mon_setup(struct net_device *dev) dev->netdev_ops = &mon_netdev_ops; dev->destructor = free_netdev; ether_setup(dev); - dev->tx_queue_len = 0; + dev->priv_flags |= IFF_NO_QUEUE; dev->type = ARPHRD_IEEE80211; /* * Use a locally administered address (IEEE 802) From 4272cc51a6dcf2c086863372fd593809ffced7d5 Mon Sep 17 00:00:00 2001 From: Phil Sutter Date: Fri, 15 Apr 2016 19:14:20 +0200 Subject: [PATCH 0792/1649] openvswitch: Convert to using IFF_NO_QUEUE Cc: Pravin Shelar Signed-off-by: Phil Sutter Signed-off-by: David S. Miller --- net/openvswitch/vport-internal_dev.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/net/openvswitch/vport-internal_dev.c b/net/openvswitch/vport-internal_dev.c index 7c8b90bf0e54..2ee48e447b72 100644 --- a/net/openvswitch/vport-internal_dev.c +++ b/net/openvswitch/vport-internal_dev.c @@ -165,11 +165,10 @@ static void do_setup(struct net_device *netdev) netdev->priv_flags &= ~IFF_TX_SKB_SHARING; netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_OPENVSWITCH | - IFF_PHONY_HEADROOM; + IFF_PHONY_HEADROOM | IFF_NO_QUEUE; netdev->destructor = internal_dev_destructor; netdev->ethtool_ops = &internal_dev_ethtool_ops; netdev->rtnl_link_ops = &internal_dev_link_ops; - netdev->tx_queue_len = 0; netdev->features = NETIF_F_LLTX | NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA | NETIF_F_HW_CSUM | From 5b161096f0515b61b72156cd0c1e5c72e77cfed8 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Sat, 16 Apr 2016 11:25:49 +0100 Subject: [PATCH 0793/1649] nfp: check the right pointer for errors Correct checking error condition on wrong pointer - copy/paste mistake most likely. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c index f86a1f13d27b..d77ae4d0e4dc 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c @@ -200,7 +200,7 @@ void nfp_net_debugfs_adapter_add(struct nfp_net *nn) /* Create queue debugging sub-tree */ queues = debugfs_create_dir("queue", nn->debugfs_dir); - if (IS_ERR_OR_NULL(nn->debugfs_dir)) + if (IS_ERR_OR_NULL(queues)) return; rx = debugfs_create_dir("rx", queues); From c160692e8665880c844bebdee219ed9cdcb2346b Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Sat, 16 Apr 2016 11:25:50 +0100 Subject: [PATCH 0794/1649] nfp: remove unnecessary static There is no reason for those local variables to be static. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c index d77ae4d0e4dc..f7c9a5bc4aa3 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c @@ -187,7 +187,7 @@ static const struct file_operations nfp_tx_q_fops = { void nfp_net_debugfs_adapter_add(struct nfp_net *nn) { - static struct dentry *queues, *tx, *rx; + struct dentry *queues, *tx, *rx; char int_name[16]; int i; From 6ffa622d8567de1daab2e0ca23b99520ad504215 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Sat, 16 Apr 2016 11:25:51 +0100 Subject: [PATCH 0795/1649] nfp: correct names of constants in comments Documentation in comments lacks CFG in some names. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h index 8692003aeed8..3ec950555892 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h @@ -152,9 +152,9 @@ * @NFP_NET_CFG_VERSION: Firmware version number * @NFP_NET_CFG_STS: Status * @NFP_NET_CFG_CAP: Capabilities (same bits as @NFP_NET_CFG_CTRL) - * @NFP_NET_MAX_TXRINGS: Maximum number of TX rings - * @NFP_NET_MAX_RXRINGS: Maximum number of RX rings - * @NFP_NET_MAX_MTU: Maximum support MTU + * @NFP_NET_CFG_MAX_TXRINGS: Maximum number of TX rings + * @NFP_NET_CFG_MAX_RXRINGS: Maximum number of RX rings + * @NFP_NET_CFG_MAX_MTU: Maximum support MTU * @NFP_NET_CFG_START_TXQ: Start Queue Control Queue to use for TX (PF only) * @NFP_NET_CFG_START_RXQ: Start Queue Control Queue to use for RX (PF only) * From 2db221cd444bf356243c57b653b6bf84c3491806 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Sat, 16 Apr 2016 11:25:52 +0100 Subject: [PATCH 0796/1649] nfp: remove unused suspicious mask defines NFP_NET_RXR_MASK sounds like a mask which could be used on NFP_NET_CFG_RXRS_ENABLE register but its value is quite strange. In fact there are no users of this define so let's just remove it. Same for TX rings. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h | 4 ---- 1 file changed, 4 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h index 3ec950555892..ad6c4e31cedd 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h @@ -81,14 +81,10 @@ /** * @NFP_NET_TXR_MAX: Maximum number of TX rings - * @NFP_NET_TXR_MASK: Mask for TX rings * @NFP_NET_RXR_MAX: Maximum number of RX rings - * @NFP_NET_RXR_MASK: Mask for RX rings */ #define NFP_NET_TXR_MAX 64 -#define NFP_NET_TXR_MASK (NFP_NET_TXR_MAX - 1) #define NFP_NET_RXR_MAX 64 -#define NFP_NET_RXR_MASK (NFP_NET_RXR_MAX - 1) /** * Read/Write config words (0x0000 - 0x002c) From 180012dc05e565260a25696767c8f5b2df5fc50e Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Sat, 16 Apr 2016 11:25:53 +0100 Subject: [PATCH 0797/1649] nfp: remove buggy RX buffer length validation Meaning of data_len and meta_len RX WB descriptor fields is slightly confusing. Add a comment with a diagram clarifying the layout. Also remove the buffer length validation: (a) it's imprecise for static rx-offsets; (b) if firmware is buggy enough to DMA past the end of the buffer WARN_ON_ONCE() doesn't seem like a strong enough response. skb_put() will do the checking for us anyway. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- .../ethernet/netronome/nfp/nfp_net_common.c | 26 ++++++++++--------- 1 file changed, 14 insertions(+), 12 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index 0bdff390c958..5235e86eb684 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -1298,23 +1298,25 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget) nfp_net_rx_give_one(rx_ring, new_skb, new_dma_addr); + /* < meta_len > + * <-- [rx_offset] --> + * --------------------------------------------------------- + * | [XX] | metadata | packet | XXXX | + * --------------------------------------------------------- + * <---------------- data_len ---------------> + * + * The rx_offset is fixed for all packets, the meta_len can vary + * on a packet by packet basis. If rx_offset is set to zero + * (_RX_OFFSET_DYNAMIC) metadata starts at the beginning of the + * buffer and is immediately followed by the packet (no [XX]). + */ meta_len = rxd->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK; data_len = le16_to_cpu(rxd->rxd.data_len); - if (WARN_ON_ONCE(data_len > nn->fl_bufsz)) { - dev_kfree_skb_any(skb); - continue; - } - - if (nn->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC) { - /* The packet data starts after the metadata */ + if (nn->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC) skb_reserve(skb, meta_len); - } else { - /* The packet data starts at a fixed offset */ + else skb_reserve(skb, nn->rx_offset); - } - - /* Adjust the SKB for the dynamic meta data pre-pended */ skb_put(skb, data_len - meta_len); nfp_net_set_hash(nn->netdev, skb, rxd); From 3d780b926a12dd798417446d733d457f1be1cc73 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Sat, 16 Apr 2016 11:25:54 +0100 Subject: [PATCH 0798/1649] nfp: add async reconfiguration mechanism Some callers of nfp_net_reconfig() are in atomic context so we used to busy wait for commands to complete. In worst case scenario that means locking up a core for up to 5 seconds when a command times out. Lets add a timer-based mechanism of asynchronously checking whether reconfiguration completed successfully for atomic callers to use. Non-atomic callers can now just sleep. The approach taken is quite simple because (1) synchronous reconfigurations always happen under RTNL (or before device is registered); (2) we can coalesce pending reconfigs. There is no need for request queues, timer which eventually takes a look at reconfiguration result to report errors is good enough. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/nfp_net.h | 12 +- .../ethernet/netronome/nfp/nfp_net_common.c | 172 +++++++++++++++--- 2 files changed, 157 insertions(+), 27 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h index 3d53fcf323eb..e744acc18ef4 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h @@ -59,8 +59,8 @@ netdev_warn((nn)->netdev, fmt, ## args); \ } while (0) -/* Max time to wait for NFP to respond on updates (in ms) */ -#define NFP_NET_POLL_TIMEOUT 5000 +/* Max time to wait for NFP to respond on updates (in seconds) */ +#define NFP_NET_POLL_TIMEOUT 5 /* Bar allocation */ #define NFP_NET_CRTL_BAR 0 @@ -447,6 +447,10 @@ static inline bool nfp_net_fw_ver_eq(struct nfp_net_fw_version *fw_ver, * @shared_name: Name for shared interrupt * @me_freq_mhz: ME clock_freq (MHz) * @reconfig_lock: Protects HW reconfiguration request regs/machinery + * @reconfig_posted: Pending reconfig bits coming from async sources + * @reconfig_timer_active: Timer for reading reconfiguration results is pending + * @reconfig_sync_present: Some thread is performing synchronous reconfig + * @reconfig_timer: Timer for async reading of reconfig results * @link_up: Is the link up? * @link_status_lock: Protects @link_up and ensures atomicity with BAR reading * @rx_coalesce_usecs: RX interrupt moderation usecs delay parameter @@ -531,6 +535,10 @@ struct nfp_net { spinlock_t link_status_lock; spinlock_t reconfig_lock; + u32 reconfig_posted; + bool reconfig_timer_active; + bool reconfig_sync_present; + struct timer_list reconfig_timer; u32 rx_coalesce_usecs; u32 rx_coalesce_max_frames; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index 5235e86eb684..fa47c14c743a 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -80,6 +80,116 @@ void nfp_net_get_fw_version(struct nfp_net_fw_version *fw_ver, put_unaligned_le32(reg, fw_ver); } +/* Firmware reconfig + * + * Firmware reconfig may take a while so we have two versions of it - + * synchronous and asynchronous (posted). All synchronous callers are holding + * RTNL so we don't have to worry about serializing them. + */ +static void nfp_net_reconfig_start(struct nfp_net *nn, u32 update) +{ + nn_writel(nn, NFP_NET_CFG_UPDATE, update); + /* ensure update is written before pinging HW */ + nn_pci_flush(nn); + nfp_qcp_wr_ptr_add(nn->qcp_cfg, 1); +} + +/* Pass 0 as update to run posted reconfigs. */ +static void nfp_net_reconfig_start_async(struct nfp_net *nn, u32 update) +{ + update |= nn->reconfig_posted; + nn->reconfig_posted = 0; + + nfp_net_reconfig_start(nn, update); + + nn->reconfig_timer_active = true; + mod_timer(&nn->reconfig_timer, jiffies + NFP_NET_POLL_TIMEOUT * HZ); +} + +static bool nfp_net_reconfig_check_done(struct nfp_net *nn, bool last_check) +{ + u32 reg; + + reg = nn_readl(nn, NFP_NET_CFG_UPDATE); + if (reg == 0) + return true; + if (reg & NFP_NET_CFG_UPDATE_ERR) { + nn_err(nn, "Reconfig error: 0x%08x\n", reg); + return true; + } else if (last_check) { + nn_err(nn, "Reconfig timeout: 0x%08x\n", reg); + return true; + } + + return false; +} + +static int nfp_net_reconfig_wait(struct nfp_net *nn, unsigned long deadline) +{ + bool timed_out = false; + + /* Poll update field, waiting for NFP to ack the config */ + while (!nfp_net_reconfig_check_done(nn, timed_out)) { + msleep(1); + timed_out = time_is_before_eq_jiffies(deadline); + } + + if (nn_readl(nn, NFP_NET_CFG_UPDATE) & NFP_NET_CFG_UPDATE_ERR) + return -EIO; + + return timed_out ? -EIO : 0; +} + +static void nfp_net_reconfig_timer(unsigned long data) +{ + struct nfp_net *nn = (void *)data; + + spin_lock_bh(&nn->reconfig_lock); + + nn->reconfig_timer_active = false; + + /* If sync caller is present it will take over from us */ + if (nn->reconfig_sync_present) + goto done; + + /* Read reconfig status and report errors */ + nfp_net_reconfig_check_done(nn, true); + + if (nn->reconfig_posted) + nfp_net_reconfig_start_async(nn, 0); +done: + spin_unlock_bh(&nn->reconfig_lock); +} + +/** + * nfp_net_reconfig_post() - Post async reconfig request + * @nn: NFP Net device to reconfigure + * @update: The value for the update field in the BAR config + * + * Record FW reconfiguration request. Reconfiguration will be kicked off + * whenever reconfiguration machinery is idle. Multiple requests can be + * merged together! + */ +static void nfp_net_reconfig_post(struct nfp_net *nn, u32 update) +{ + spin_lock_bh(&nn->reconfig_lock); + + /* Sync caller will kick off async reconf when it's done, just post */ + if (nn->reconfig_sync_present) { + nn->reconfig_posted |= update; + goto done; + } + + /* Opportunistically check if the previous command is done */ + if (!nn->reconfig_timer_active || + nfp_net_reconfig_check_done(nn, false)) + nfp_net_reconfig_start_async(nn, update); + else + nn->reconfig_posted |= update; +done: + spin_unlock_bh(&nn->reconfig_lock); +} + /** * nfp_net_reconfig() - Reconfigure the firmware * @nn: NFP Net device to reconfigure @@ -93,35 +203,45 @@ void nfp_net_get_fw_version(struct nfp_net_fw_version *fw_ver, */ int nfp_net_reconfig(struct nfp_net *nn, u32 update) { - int cnt, ret = 0; - u32 new; + bool cancelled_timer = false; + u32 pre_posted_requests; + int ret; spin_lock_bh(&nn->reconfig_lock); - nn_writel(nn, NFP_NET_CFG_UPDATE, update); - /* ensure update is written before pinging HW */ - nn_pci_flush(nn); - nfp_qcp_wr_ptr_add(nn->qcp_cfg, 1); + nn->reconfig_sync_present = true; - /* Poll update field, waiting for NFP to ack the config */ - for (cnt = 0; ; cnt++) { - new = nn_readl(nn, NFP_NET_CFG_UPDATE); - if (new == 0) - break; - if (new & NFP_NET_CFG_UPDATE_ERR) { - nn_err(nn, "Reconfig error: 0x%08x\n", new); - ret = -EIO; - break; - } else if (cnt >= NFP_NET_POLL_TIMEOUT) { - nn_err(nn, "Reconfig timeout for 0x%08x after %dms\n", - update, cnt); - ret = -EIO; - break; - } - mdelay(1); + if (nn->reconfig_timer_active) { + del_timer(&nn->reconfig_timer); + nn->reconfig_timer_active = false; + cancelled_timer = true; } + pre_posted_requests = nn->reconfig_posted; + nn->reconfig_posted = 0; spin_unlock_bh(&nn->reconfig_lock); + + if (cancelled_timer) + nfp_net_reconfig_wait(nn, nn->reconfig_timer.expires); + + /* Run the posted reconfigs which were issued before we started */ + if (pre_posted_requests) { + nfp_net_reconfig_start(nn, pre_posted_requests); + nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT); + } + + nfp_net_reconfig_start(nn, update); + ret = nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT); + + spin_lock_bh(&nn->reconfig_lock); + + if (nn->reconfig_posted) + nfp_net_reconfig_start_async(nn, 0); + + nn->reconfig_sync_present = false; + + spin_unlock_bh(&nn->reconfig_lock); + return ret; } @@ -2096,8 +2216,7 @@ static void nfp_net_set_rx_mode(struct net_device *netdev) return; nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl); - if (nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN)) - return; + nfp_net_reconfig_post(nn, NFP_NET_CFG_UPDATE_GEN); nn->ctrl = new_ctrl; } @@ -2405,7 +2524,7 @@ static void nfp_net_set_vxlan_port(struct nfp_net *nn, int idx, __be16 port) be16_to_cpu(nn->vxlan_ports[i + 1]) << 16 | be16_to_cpu(nn->vxlan_ports[i])); - nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_VXLAN); + nfp_net_reconfig_post(nn, NFP_NET_CFG_UPDATE_VXLAN); } /** @@ -2551,6 +2670,9 @@ struct nfp_net *nfp_net_netdev_alloc(struct pci_dev *pdev, spin_lock_init(&nn->reconfig_lock); spin_lock_init(&nn->link_status_lock); + setup_timer(&nn->reconfig_timer, + nfp_net_reconfig_timer, (unsigned long)nn); + return nn; } From 0209d144e3097fee1fe5d38532e6f0919c80d1ea Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Sun, 17 Apr 2016 13:23:55 -0400 Subject: [PATCH 0799/1649] net: dsa: constify probed name Change the dsa_switch_driver.probe function to return a const char *. Signed-off-by: Vivien Didelot Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/dsa/bcm_sf2.c | 6 +++--- drivers/net/dsa/mv88e6060.c | 10 +++++----- drivers/net/dsa/mv88e6123.c | 6 +++--- drivers/net/dsa/mv88e6131.c | 6 +++--- drivers/net/dsa/mv88e6171.c | 6 +++--- drivers/net/dsa/mv88e6352.c | 6 +++--- drivers/net/dsa/mv88e6xxx.c | 17 +++++++++-------- drivers/net/dsa/mv88e6xxx.h | 8 ++++---- include/net/dsa.h | 5 +++-- net/dsa/dsa.c | 6 +++--- 10 files changed, 39 insertions(+), 37 deletions(-) diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index 7a5f0ef46bd6..448deb59b9a4 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -135,9 +135,9 @@ static int bcm_sf2_sw_get_sset_count(struct dsa_switch *ds) return BCM_SF2_STATS_SIZE; } -static char *bcm_sf2_sw_drv_probe(struct device *dsa_dev, - struct device *host_dev, - int sw_addr, void **_priv) +static const char *bcm_sf2_sw_drv_probe(struct device *dsa_dev, + struct device *host_dev, int sw_addr, + void **_priv) { struct bcm_sf2_priv *priv; diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c index 92cebab9383e..e36b40886bd8 100644 --- a/drivers/net/dsa/mv88e6060.c +++ b/drivers/net/dsa/mv88e6060.c @@ -51,7 +51,7 @@ static int reg_write(struct dsa_switch *ds, int addr, int reg, u16 val) return __ret; \ }) -static char *mv88e6060_get_name(struct mii_bus *bus, int sw_addr) +static const char *mv88e6060_get_name(struct mii_bus *bus, int sw_addr) { int ret; @@ -69,13 +69,13 @@ static char *mv88e6060_get_name(struct mii_bus *bus, int sw_addr) return NULL; } -static char *mv88e6060_drv_probe(struct device *dsa_dev, - struct device *host_dev, - int sw_addr, void **_priv) +static const char *mv88e6060_drv_probe(struct device *dsa_dev, + struct device *host_dev, int sw_addr, + void **_priv) { struct mii_bus *bus = dsa_host_dev_to_mii_bus(host_dev); struct mv88e6060_priv *priv; - char *name; + const char *name; name = mv88e6060_get_name(bus, sw_addr); if (name) { diff --git a/drivers/net/dsa/mv88e6123.c b/drivers/net/dsa/mv88e6123.c index 140e44e50e8a..9701c0f9a60a 100644 --- a/drivers/net/dsa/mv88e6123.c +++ b/drivers/net/dsa/mv88e6123.c @@ -29,9 +29,9 @@ static const struct mv88e6xxx_switch_id mv88e6123_table[] = { { PORT_SWITCH_ID_6165_A2, "Marvell 88e6165 (A2)" }, }; -static char *mv88e6123_drv_probe(struct device *dsa_dev, - struct device *host_dev, - int sw_addr, void **priv) +static const char *mv88e6123_drv_probe(struct device *dsa_dev, + struct device *host_dev, int sw_addr, + void **priv) { return mv88e6xxx_drv_probe(dsa_dev, host_dev, sw_addr, priv, mv88e6123_table, diff --git a/drivers/net/dsa/mv88e6131.c b/drivers/net/dsa/mv88e6131.c index 34d297b65040..fa3a35460453 100644 --- a/drivers/net/dsa/mv88e6131.c +++ b/drivers/net/dsa/mv88e6131.c @@ -25,9 +25,9 @@ static const struct mv88e6xxx_switch_id mv88e6131_table[] = { { PORT_SWITCH_ID_6185, "Marvell 88E6185" }, }; -static char *mv88e6131_drv_probe(struct device *dsa_dev, - struct device *host_dev, - int sw_addr, void **priv) +static const char *mv88e6131_drv_probe(struct device *dsa_dev, + struct device *host_dev, int sw_addr, + void **priv) { return mv88e6xxx_drv_probe(dsa_dev, host_dev, sw_addr, priv, mv88e6131_table, diff --git a/drivers/net/dsa/mv88e6171.c b/drivers/net/dsa/mv88e6171.c index b7af2b78f8ee..8d86c9ee1adf 100644 --- a/drivers/net/dsa/mv88e6171.c +++ b/drivers/net/dsa/mv88e6171.c @@ -24,9 +24,9 @@ static const struct mv88e6xxx_switch_id mv88e6171_table[] = { { PORT_SWITCH_ID_6351, "Marvell 88E6351" }, }; -static char *mv88e6171_drv_probe(struct device *dsa_dev, - struct device *host_dev, - int sw_addr, void **priv) +static const char *mv88e6171_drv_probe(struct device *dsa_dev, + struct device *host_dev, int sw_addr, + void **priv) { return mv88e6xxx_drv_probe(dsa_dev, host_dev, sw_addr, priv, mv88e6171_table, diff --git a/drivers/net/dsa/mv88e6352.c b/drivers/net/dsa/mv88e6352.c index e8cb03fad21a..c7fa69c9a564 100644 --- a/drivers/net/dsa/mv88e6352.c +++ b/drivers/net/dsa/mv88e6352.c @@ -37,9 +37,9 @@ static const struct mv88e6xxx_switch_id mv88e6352_table[] = { { PORT_SWITCH_ID_6352_A1, "Marvell 88E6352 (A1)" }, }; -static char *mv88e6352_drv_probe(struct device *dsa_dev, - struct device *host_dev, - int sw_addr, void **priv) +static const char *mv88e6352_drv_probe(struct device *dsa_dev, + struct device *host_dev, int sw_addr, + void **priv) { return mv88e6xxx_drv_probe(dsa_dev, host_dev, sw_addr, priv, mv88e6352_table, diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index b018f20829fb..25d7fec98f8e 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c @@ -3173,9 +3173,10 @@ int mv88e6xxx_get_temp_alarm(struct dsa_switch *ds, bool *alarm) } #endif /* CONFIG_NET_DSA_HWMON */ -static char *mv88e6xxx_lookup_name(struct mii_bus *bus, int sw_addr, - const struct mv88e6xxx_switch_id *table, - unsigned int num) +static const char * +mv88e6xxx_lookup_name(struct mii_bus *bus, int sw_addr, + const struct mv88e6xxx_switch_id *table, + unsigned int num) { int i, ret; @@ -3205,14 +3206,14 @@ static char *mv88e6xxx_lookup_name(struct mii_bus *bus, int sw_addr, return NULL; } -char *mv88e6xxx_drv_probe(struct device *dsa_dev, struct device *host_dev, - int sw_addr, void **priv, - const struct mv88e6xxx_switch_id *table, - unsigned int num) +const char *mv88e6xxx_drv_probe(struct device *dsa_dev, struct device *host_dev, + int sw_addr, void **priv, + const struct mv88e6xxx_switch_id *table, + unsigned int num) { struct mv88e6xxx_priv_state *ps; struct mii_bus *bus = dsa_host_dev_to_mii_bus(host_dev); - char *name; + const char *name; if (!bus) return NULL; diff --git a/drivers/net/dsa/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx.h index 0debb9f3cf0a..5eb601398835 100644 --- a/drivers/net/dsa/mv88e6xxx.h +++ b/drivers/net/dsa/mv88e6xxx.h @@ -462,10 +462,10 @@ struct mv88e6xxx_hw_stat { }; int mv88e6xxx_switch_reset(struct dsa_switch *ds, bool ppu_active); -char *mv88e6xxx_drv_probe(struct device *dsa_dev, struct device *host_dev, - int sw_addr, void **priv, - const struct mv88e6xxx_switch_id *table, - unsigned int num); +const char *mv88e6xxx_drv_probe(struct device *dsa_dev, struct device *host_dev, + int sw_addr, void **priv, + const struct mv88e6xxx_switch_id *table, + unsigned int num); int mv88e6xxx_setup_ports(struct dsa_switch *ds); int mv88e6xxx_setup_common(struct dsa_switch *ds); diff --git a/include/net/dsa.h b/include/net/dsa.h index 689ebd3542ba..c4bc42bd3538 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -217,8 +217,9 @@ struct dsa_switch_driver { /* * Probing and setup. */ - char *(*probe)(struct device *dsa_dev, struct device *host_dev, - int sw_addr, void **priv); + const char *(*probe)(struct device *dsa_dev, + struct device *host_dev, int sw_addr, + void **priv); int (*setup)(struct dsa_switch *ds); int (*set_addr)(struct dsa_switch *ds, u8 *addr); u32 (*get_phy_flags)(struct dsa_switch *ds, int port); diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c index 60ea98481806..efa612f0ab9b 100644 --- a/net/dsa/dsa.c +++ b/net/dsa/dsa.c @@ -52,11 +52,11 @@ EXPORT_SYMBOL_GPL(unregister_switch_driver); static struct dsa_switch_driver * dsa_switch_probe(struct device *parent, struct device *host_dev, int sw_addr, - char **_name, void **priv) + const char **_name, void **priv) { struct dsa_switch_driver *ret; struct list_head *list; - char *name; + const char *name; ret = NULL; name = NULL; @@ -383,7 +383,7 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index, struct dsa_switch_driver *drv; struct dsa_switch *ds; int ret; - char *name; + const char *name; void *priv; /* From b346204737fafce585f62543ed7691fb4a72789d Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Sun, 17 Apr 2016 13:23:56 -0400 Subject: [PATCH 0800/1649] net: dsa: mv88e6xxx: drop double ds assignment Every driver assigns ps->ds even though it gets assigned in the shared mv88e6xxx_setup_common function. Kill redundancy. Signed-off-by: Vivien Didelot Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6123.c | 2 -- drivers/net/dsa/mv88e6131.c | 2 -- drivers/net/dsa/mv88e6171.c | 2 -- drivers/net/dsa/mv88e6352.c | 2 -- 4 files changed, 8 deletions(-) diff --git a/drivers/net/dsa/mv88e6123.c b/drivers/net/dsa/mv88e6123.c index 9701c0f9a60a..85537eb2806a 100644 --- a/drivers/net/dsa/mv88e6123.c +++ b/drivers/net/dsa/mv88e6123.c @@ -79,8 +79,6 @@ static int mv88e6123_setup(struct dsa_switch *ds) struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); int ret; - ps->ds = ds; - ret = mv88e6xxx_setup_common(ds); if (ret < 0) return ret; diff --git a/drivers/net/dsa/mv88e6131.c b/drivers/net/dsa/mv88e6131.c index fa3a35460453..4117c9b56571 100644 --- a/drivers/net/dsa/mv88e6131.c +++ b/drivers/net/dsa/mv88e6131.c @@ -101,8 +101,6 @@ static int mv88e6131_setup(struct dsa_switch *ds) struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); int ret; - ps->ds = ds; - ret = mv88e6xxx_setup_common(ds); if (ret < 0) return ret; diff --git a/drivers/net/dsa/mv88e6171.c b/drivers/net/dsa/mv88e6171.c index 8d86c9ee1adf..ae328750eae8 100644 --- a/drivers/net/dsa/mv88e6171.c +++ b/drivers/net/dsa/mv88e6171.c @@ -76,8 +76,6 @@ static int mv88e6171_setup(struct dsa_switch *ds) struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); int ret; - ps->ds = ds; - ret = mv88e6xxx_setup_common(ds); if (ret < 0) return ret; diff --git a/drivers/net/dsa/mv88e6352.c b/drivers/net/dsa/mv88e6352.c index c7fa69c9a564..10c36abf4c64 100644 --- a/drivers/net/dsa/mv88e6352.c +++ b/drivers/net/dsa/mv88e6352.c @@ -87,8 +87,6 @@ static int mv88e6352_setup(struct dsa_switch *ds) struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); int ret; - ps->ds = ds; - ret = mv88e6xxx_setup_common(ds); if (ret < 0) return ret; From 54c6f4bda7dabab7a7a9eeb8f1ced4b0b5fc4fd0 Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Sun, 17 Apr 2016 13:23:57 -0400 Subject: [PATCH 0801/1649] net: dsa: mv88e6xxx: drop revision probing There is no point in having a special case for the revision when probing a switch model. The code gets cluttered with unnecessary defines, and leads to errors when code such as mv88e6131_setup compares PORT_SWITCH_ID_6131_B2 to ps->id which masks the revision. Drop every revision definition, and lookup only the product number. Signed-off-by: Vivien Didelot Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6123.c | 6 ------ drivers/net/dsa/mv88e6131.c | 2 -- drivers/net/dsa/mv88e6352.c | 6 ------ drivers/net/dsa/mv88e6xxx.c | 14 +------------- drivers/net/dsa/mv88e6xxx.h | 15 --------------- 5 files changed, 1 insertion(+), 42 deletions(-) diff --git a/drivers/net/dsa/mv88e6123.c b/drivers/net/dsa/mv88e6123.c index 85537eb2806a..d6921ba144b9 100644 --- a/drivers/net/dsa/mv88e6123.c +++ b/drivers/net/dsa/mv88e6123.c @@ -19,14 +19,8 @@ static const struct mv88e6xxx_switch_id mv88e6123_table[] = { { PORT_SWITCH_ID_6123, "Marvell 88E6123" }, - { PORT_SWITCH_ID_6123_A1, "Marvell 88E6123 (A1)" }, - { PORT_SWITCH_ID_6123_A2, "Marvell 88E6123 (A2)" }, { PORT_SWITCH_ID_6161, "Marvell 88E6161" }, - { PORT_SWITCH_ID_6161_A1, "Marvell 88E6161 (A1)" }, - { PORT_SWITCH_ID_6161_A2, "Marvell 88E6161 (A2)" }, { PORT_SWITCH_ID_6165, "Marvell 88E6165" }, - { PORT_SWITCH_ID_6165_A1, "Marvell 88E6165 (A1)" }, - { PORT_SWITCH_ID_6165_A2, "Marvell 88e6165 (A2)" }, }; static const char *mv88e6123_drv_probe(struct device *dsa_dev, diff --git a/drivers/net/dsa/mv88e6131.c b/drivers/net/dsa/mv88e6131.c index 4117c9b56571..8dc136576fe4 100644 --- a/drivers/net/dsa/mv88e6131.c +++ b/drivers/net/dsa/mv88e6131.c @@ -21,7 +21,6 @@ static const struct mv88e6xxx_switch_id mv88e6131_table[] = { { PORT_SWITCH_ID_6085, "Marvell 88E6085" }, { PORT_SWITCH_ID_6095, "Marvell 88E6095/88E6095F" }, { PORT_SWITCH_ID_6131, "Marvell 88E6131" }, - { PORT_SWITCH_ID_6131_B2, "Marvell 88E6131 (B2)" }, { PORT_SWITCH_ID_6185, "Marvell 88E6185" }, }; @@ -116,7 +115,6 @@ static int mv88e6131_setup(struct dsa_switch *ds) ps->num_ports = 11; break; case PORT_SWITCH_ID_6131: - case PORT_SWITCH_ID_6131_B2: ps->num_ports = 8; break; default: diff --git a/drivers/net/dsa/mv88e6352.c b/drivers/net/dsa/mv88e6352.c index 10c36abf4c64..34f92b17146b 100644 --- a/drivers/net/dsa/mv88e6352.c +++ b/drivers/net/dsa/mv88e6352.c @@ -27,14 +27,8 @@ static const struct mv88e6xxx_switch_id mv88e6352_table[] = { { PORT_SWITCH_ID_6176, "Marvell 88E6176" }, { PORT_SWITCH_ID_6240, "Marvell 88E6240" }, { PORT_SWITCH_ID_6320, "Marvell 88E6320" }, - { PORT_SWITCH_ID_6320_A1, "Marvell 88E6320 (A1)" }, - { PORT_SWITCH_ID_6320_A2, "Marvell 88e6320 (A2)" }, { PORT_SWITCH_ID_6321, "Marvell 88E6321" }, - { PORT_SWITCH_ID_6321_A1, "Marvell 88E6321 (A1)" }, - { PORT_SWITCH_ID_6321_A2, "Marvell 88e6321 (A2)" }, { PORT_SWITCH_ID_6352, "Marvell 88E6352" }, - { PORT_SWITCH_ID_6352_A0, "Marvell 88E6352 (A0)" }, - { PORT_SWITCH_ID_6352_A1, "Marvell 88E6352 (A1)" }, }; static const char *mv88e6352_drv_probe(struct device *dsa_dev, diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index 25d7fec98f8e..469d8a3476cb 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c @@ -3187,22 +3187,10 @@ mv88e6xxx_lookup_name(struct mii_bus *bus, int sw_addr, if (ret < 0) return NULL; - /* Look up the exact switch ID */ for (i = 0; i < num; ++i) - if (table[i].id == ret) + if (table[i].id == (ret & 0xfff0)) return table[i].name; - /* Look up only the product number */ - for (i = 0; i < num; ++i) { - if (table[i].id == (ret & PORT_SWITCH_ID_PROD_NUM_MASK)) { - dev_warn(&bus->dev, - "unknown revision %d, using base switch 0x%x\n", - ret & PORT_SWITCH_ID_REV_MASK, - ret & PORT_SWITCH_ID_PROD_NUM_MASK); - return table[i].name; - } - } - return NULL; } diff --git a/drivers/net/dsa/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx.h index 5eb601398835..6513450d60bf 100644 --- a/drivers/net/dsa/mv88e6xxx.h +++ b/drivers/net/dsa/mv88e6xxx.h @@ -68,8 +68,6 @@ #define PORT_PCS_CTRL_UNFORCED 0x03 #define PORT_PAUSE_CTRL 0x02 #define PORT_SWITCH_ID 0x03 -#define PORT_SWITCH_ID_PROD_NUM_MASK 0xfff0 -#define PORT_SWITCH_ID_REV_MASK 0x000f #define PORT_SWITCH_ID_6031 0x0310 #define PORT_SWITCH_ID_6035 0x0350 #define PORT_SWITCH_ID_6046 0x0480 @@ -84,18 +82,11 @@ #define PORT_SWITCH_ID_6121 0x1040 #define PORT_SWITCH_ID_6122 0x1050 #define PORT_SWITCH_ID_6123 0x1210 -#define PORT_SWITCH_ID_6123_A1 0x1212 -#define PORT_SWITCH_ID_6123_A2 0x1213 #define PORT_SWITCH_ID_6131 0x1060 -#define PORT_SWITCH_ID_6131_B2 0x1066 #define PORT_SWITCH_ID_6152 0x1a40 #define PORT_SWITCH_ID_6155 0x1a50 #define PORT_SWITCH_ID_6161 0x1610 -#define PORT_SWITCH_ID_6161_A1 0x1612 -#define PORT_SWITCH_ID_6161_A2 0x1613 #define PORT_SWITCH_ID_6165 0x1650 -#define PORT_SWITCH_ID_6165_A1 0x1652 -#define PORT_SWITCH_ID_6165_A2 0x1653 #define PORT_SWITCH_ID_6171 0x1710 #define PORT_SWITCH_ID_6172 0x1720 #define PORT_SWITCH_ID_6175 0x1750 @@ -104,16 +95,10 @@ #define PORT_SWITCH_ID_6185 0x1a70 #define PORT_SWITCH_ID_6240 0x2400 #define PORT_SWITCH_ID_6320 0x1150 -#define PORT_SWITCH_ID_6320_A1 0x1151 -#define PORT_SWITCH_ID_6320_A2 0x1152 #define PORT_SWITCH_ID_6321 0x3100 -#define PORT_SWITCH_ID_6321_A1 0x3101 -#define PORT_SWITCH_ID_6321_A2 0x3102 #define PORT_SWITCH_ID_6350 0x3710 #define PORT_SWITCH_ID_6351 0x3750 #define PORT_SWITCH_ID_6352 0x3520 -#define PORT_SWITCH_ID_6352_A0 0x3521 -#define PORT_SWITCH_ID_6352_A1 0x3522 #define PORT_CONTROL 0x04 #define PORT_CONTROL_USE_CORE_TAG BIT(15) #define PORT_CONTROL_DROP_ON_LOCK BIT(14) From a439c0612d7bdbd5ce8ea868e6a1084f0d7300dc Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Sun, 17 Apr 2016 13:23:58 -0400 Subject: [PATCH 0802/1649] net: dsa: mv88e6xxx: read switch ID in probe Read the switch ID only once, at probe time, to avoid multiple read accesses and MII bus checking. Signed-off-by: Vivien Didelot Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx.c | 57 +++++++++++++++++++------------------ 1 file changed, 30 insertions(+), 27 deletions(-) diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index 469d8a3476cb..49f085a8453d 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c @@ -2700,10 +2700,6 @@ int mv88e6xxx_setup_common(struct dsa_switch *ds) ps->ds = ds; mutex_init(&ps->smi_mutex); - ps->id = mv88e6xxx_reg_read(ds, REG_PORT(0), PORT_SWITCH_ID) & 0xfff0; - if (ps->id < 0) - return ps->id; - INIT_WORK(&ps->bridge_work, mv88e6xxx_bridge_work); return 0; @@ -3174,21 +3170,13 @@ int mv88e6xxx_get_temp_alarm(struct dsa_switch *ds, bool *alarm) #endif /* CONFIG_NET_DSA_HWMON */ static const char * -mv88e6xxx_lookup_name(struct mii_bus *bus, int sw_addr, - const struct mv88e6xxx_switch_id *table, +mv88e6xxx_lookup_name(unsigned int id, const struct mv88e6xxx_switch_id *table, unsigned int num) { - int i, ret; - - if (!bus) - return NULL; - - ret = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), PORT_SWITCH_ID); - if (ret < 0) - return NULL; + int i; for (i = 0; i < num; ++i) - if (table[i].id == (ret & 0xfff0)) + if (table[i].id == (id & 0xfff0)) return table[i].name; return NULL; @@ -3200,23 +3188,38 @@ const char *mv88e6xxx_drv_probe(struct device *dsa_dev, struct device *host_dev, unsigned int num) { struct mv88e6xxx_priv_state *ps; - struct mii_bus *bus = dsa_host_dev_to_mii_bus(host_dev); + struct mii_bus *bus; const char *name; + int id, prod_num, rev; + bus = dsa_host_dev_to_mii_bus(host_dev); if (!bus) return NULL; - name = mv88e6xxx_lookup_name(bus, sw_addr, table, num); - if (name) { - ps = devm_kzalloc(dsa_dev, sizeof(*ps), GFP_KERNEL); - if (!ps) - return NULL; - *priv = ps; - ps->bus = dsa_host_dev_to_mii_bus(host_dev); - if (!ps->bus) - return NULL; - ps->sw_addr = sw_addr; - } + id = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), PORT_SWITCH_ID); + if (id < 0) + return NULL; + + prod_num = (id & 0xfff0) >> 4; + rev = id & 0x000f; + + name = mv88e6xxx_lookup_name(id, table, num); + if (!name) + return NULL; + + ps = devm_kzalloc(dsa_dev, sizeof(*ps), GFP_KERNEL); + if (!ps) + return NULL; + + ps->bus = bus; + ps->sw_addr = sw_addr; + ps->id = id & 0xfff0; + + *priv = ps; + + dev_info(&ps->bus->dev, "switch 0x%x probed: %s, revision %u\n", + prod_num, name, rev); + return name; } From f6271e676b7f62a609f8ee5523a6a8ed47c0f333 Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Sun, 17 Apr 2016 13:23:59 -0400 Subject: [PATCH 0803/1649] net: dsa: mv88e6xxx: add switch info Add a new switch info structure which is meant to store switch models static information, such as product number, name, number of ports, number of databases, etc. Signed-off-by: Vivien Didelot Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6123.c | 15 +++++++++++---- drivers/net/dsa/mv88e6131.c | 19 ++++++++++++++----- drivers/net/dsa/mv88e6171.c | 19 ++++++++++++++----- drivers/net/dsa/mv88e6352.c | 27 ++++++++++++++++++++------- drivers/net/dsa/mv88e6xxx.c | 18 +++++++++++------- drivers/net/dsa/mv88e6xxx.h | 27 +++++++++++++++++++++++---- 6 files changed, 93 insertions(+), 32 deletions(-) diff --git a/drivers/net/dsa/mv88e6123.c b/drivers/net/dsa/mv88e6123.c index d6921ba144b9..62dffcf915a7 100644 --- a/drivers/net/dsa/mv88e6123.c +++ b/drivers/net/dsa/mv88e6123.c @@ -17,10 +17,17 @@ #include #include "mv88e6xxx.h" -static const struct mv88e6xxx_switch_id mv88e6123_table[] = { - { PORT_SWITCH_ID_6123, "Marvell 88E6123" }, - { PORT_SWITCH_ID_6161, "Marvell 88E6161" }, - { PORT_SWITCH_ID_6165, "Marvell 88E6165" }, +static const struct mv88e6xxx_info mv88e6123_table[] = { + { + .prod_num = PORT_SWITCH_ID_PROD_NUM_6123, + .name = "Marvell 88E6123", + }, { + .prod_num = PORT_SWITCH_ID_PROD_NUM_6161, + .name = "Marvell 88E6161", + }, { + .prod_num = PORT_SWITCH_ID_PROD_NUM_6165, + .name = "Marvell 88E6165", + } }; static const char *mv88e6123_drv_probe(struct device *dsa_dev, diff --git a/drivers/net/dsa/mv88e6131.c b/drivers/net/dsa/mv88e6131.c index 8dc136576fe4..00567156f335 100644 --- a/drivers/net/dsa/mv88e6131.c +++ b/drivers/net/dsa/mv88e6131.c @@ -17,11 +17,20 @@ #include #include "mv88e6xxx.h" -static const struct mv88e6xxx_switch_id mv88e6131_table[] = { - { PORT_SWITCH_ID_6085, "Marvell 88E6085" }, - { PORT_SWITCH_ID_6095, "Marvell 88E6095/88E6095F" }, - { PORT_SWITCH_ID_6131, "Marvell 88E6131" }, - { PORT_SWITCH_ID_6185, "Marvell 88E6185" }, +static const struct mv88e6xxx_info mv88e6131_table[] = { + { + .prod_num = PORT_SWITCH_ID_PROD_NUM_6095, + .name = "Marvell 88E6095/88E6095F", + }, { + .prod_num = PORT_SWITCH_ID_PROD_NUM_6085, + .name = "Marvell 88E6085", + }, { + .prod_num = PORT_SWITCH_ID_PROD_NUM_6131, + .name = "Marvell 88E6131", + }, { + .prod_num = PORT_SWITCH_ID_PROD_NUM_6185, + .name = "Marvell 88E6185", + } }; static const char *mv88e6131_drv_probe(struct device *dsa_dev, diff --git a/drivers/net/dsa/mv88e6171.c b/drivers/net/dsa/mv88e6171.c index ae328750eae8..ea14ab22d313 100644 --- a/drivers/net/dsa/mv88e6171.c +++ b/drivers/net/dsa/mv88e6171.c @@ -17,11 +17,20 @@ #include #include "mv88e6xxx.h" -static const struct mv88e6xxx_switch_id mv88e6171_table[] = { - { PORT_SWITCH_ID_6171, "Marvell 88E6171" }, - { PORT_SWITCH_ID_6175, "Marvell 88E6175" }, - { PORT_SWITCH_ID_6350, "Marvell 88E6350" }, - { PORT_SWITCH_ID_6351, "Marvell 88E6351" }, +static const struct mv88e6xxx_info mv88e6171_table[] = { + { + .prod_num = PORT_SWITCH_ID_PROD_NUM_6171, + .name = "Marvell 88E6171", + }, { + .prod_num = PORT_SWITCH_ID_PROD_NUM_6175, + .name = "Marvell 88E6175", + }, { + .prod_num = PORT_SWITCH_ID_PROD_NUM_6350, + .name = "Marvell 88E6350", + }, { + .prod_num = PORT_SWITCH_ID_PROD_NUM_6351, + .name = "Marvell 88E6351", + } }; static const char *mv88e6171_drv_probe(struct device *dsa_dev, diff --git a/drivers/net/dsa/mv88e6352.c b/drivers/net/dsa/mv88e6352.c index 34f92b17146b..2f72606ecc9e 100644 --- a/drivers/net/dsa/mv88e6352.c +++ b/drivers/net/dsa/mv88e6352.c @@ -22,13 +22,26 @@ #include #include "mv88e6xxx.h" -static const struct mv88e6xxx_switch_id mv88e6352_table[] = { - { PORT_SWITCH_ID_6172, "Marvell 88E6172" }, - { PORT_SWITCH_ID_6176, "Marvell 88E6176" }, - { PORT_SWITCH_ID_6240, "Marvell 88E6240" }, - { PORT_SWITCH_ID_6320, "Marvell 88E6320" }, - { PORT_SWITCH_ID_6321, "Marvell 88E6321" }, - { PORT_SWITCH_ID_6352, "Marvell 88E6352" }, +static const struct mv88e6xxx_info mv88e6352_table[] = { + { + .prod_num = PORT_SWITCH_ID_PROD_NUM_6320, + .name = "Marvell 88E6320", + }, { + .prod_num = PORT_SWITCH_ID_PROD_NUM_6321, + .name = "Marvell 88E6321", + }, { + .prod_num = PORT_SWITCH_ID_PROD_NUM_6172, + .name = "Marvell 88E6172", + }, { + .prod_num = PORT_SWITCH_ID_PROD_NUM_6176, + .name = "Marvell 88E6176", + }, { + .prod_num = PORT_SWITCH_ID_PROD_NUM_6240, + .name = "Marvell 88E6240", + }, { + .prod_num = PORT_SWITCH_ID_PROD_NUM_6352, + .name = "Marvell 88E6352", + } }; static const char *mv88e6352_drv_probe(struct device *dsa_dev, diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index 49f085a8453d..5fb21e059f35 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c @@ -3169,24 +3169,25 @@ int mv88e6xxx_get_temp_alarm(struct dsa_switch *ds, bool *alarm) } #endif /* CONFIG_NET_DSA_HWMON */ -static const char * -mv88e6xxx_lookup_name(unsigned int id, const struct mv88e6xxx_switch_id *table, +static const struct mv88e6xxx_info * +mv88e6xxx_lookup_info(unsigned int prod_num, const struct mv88e6xxx_info *table, unsigned int num) { int i; for (i = 0; i < num; ++i) - if (table[i].id == (id & 0xfff0)) - return table[i].name; + if (table[i].prod_num == prod_num) + return &table[i]; return NULL; } const char *mv88e6xxx_drv_probe(struct device *dsa_dev, struct device *host_dev, int sw_addr, void **priv, - const struct mv88e6xxx_switch_id *table, + const struct mv88e6xxx_info *table, unsigned int num) { + const struct mv88e6xxx_info *info; struct mv88e6xxx_priv_state *ps; struct mii_bus *bus; const char *name; @@ -3203,16 +3204,19 @@ const char *mv88e6xxx_drv_probe(struct device *dsa_dev, struct device *host_dev, prod_num = (id & 0xfff0) >> 4; rev = id & 0x000f; - name = mv88e6xxx_lookup_name(id, table, num); - if (!name) + info = mv88e6xxx_lookup_info(prod_num, table, num); + if (!info) return NULL; + name = info->name; + ps = devm_kzalloc(dsa_dev, sizeof(*ps), GFP_KERNEL); if (!ps) return NULL; ps->bus = bus; ps->sw_addr = sw_addr; + ps->info = info; ps->id = id & 0xfff0; *priv = ps; diff --git a/drivers/net/dsa/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx.h index 6513450d60bf..b87f574a84de 100644 --- a/drivers/net/dsa/mv88e6xxx.h +++ b/drivers/net/dsa/mv88e6xxx.h @@ -68,6 +68,23 @@ #define PORT_PCS_CTRL_UNFORCED 0x03 #define PORT_PAUSE_CTRL 0x02 #define PORT_SWITCH_ID 0x03 +#define PORT_SWITCH_ID_PROD_NUM_6085 0x04a +#define PORT_SWITCH_ID_PROD_NUM_6095 0x095 +#define PORT_SWITCH_ID_PROD_NUM_6131 0x106 +#define PORT_SWITCH_ID_PROD_NUM_6320 0x115 +#define PORT_SWITCH_ID_PROD_NUM_6123 0x121 +#define PORT_SWITCH_ID_PROD_NUM_6161 0x161 +#define PORT_SWITCH_ID_PROD_NUM_6165 0x165 +#define PORT_SWITCH_ID_PROD_NUM_6171 0x171 +#define PORT_SWITCH_ID_PROD_NUM_6172 0x172 +#define PORT_SWITCH_ID_PROD_NUM_6175 0x175 +#define PORT_SWITCH_ID_PROD_NUM_6176 0x176 +#define PORT_SWITCH_ID_PROD_NUM_6185 0x1a7 +#define PORT_SWITCH_ID_PROD_NUM_6240 0x240 +#define PORT_SWITCH_ID_PROD_NUM_6321 0x310 +#define PORT_SWITCH_ID_PROD_NUM_6352 0x352 +#define PORT_SWITCH_ID_PROD_NUM_6350 0x371 +#define PORT_SWITCH_ID_PROD_NUM_6351 0x375 #define PORT_SWITCH_ID_6031 0x0310 #define PORT_SWITCH_ID_6035 0x0350 #define PORT_SWITCH_ID_6046 0x0480 @@ -352,9 +369,9 @@ #define MV88E6XXX_N_FID 4096 -struct mv88e6xxx_switch_id { - u16 id; - char *name; +struct mv88e6xxx_info { + u16 prod_num; + const char *name; }; struct mv88e6xxx_atu_entry { @@ -382,6 +399,8 @@ struct mv88e6xxx_priv_port { }; struct mv88e6xxx_priv_state { + const struct mv88e6xxx_info *info; + /* The dsa_switch this private structure is related to */ struct dsa_switch *ds; @@ -449,7 +468,7 @@ struct mv88e6xxx_hw_stat { int mv88e6xxx_switch_reset(struct dsa_switch *ds, bool ppu_active); const char *mv88e6xxx_drv_probe(struct device *dsa_dev, struct device *host_dev, int sw_addr, void **priv, - const struct mv88e6xxx_switch_id *table, + const struct mv88e6xxx_info *table, unsigned int num); int mv88e6xxx_setup_ports(struct dsa_switch *ds); From 22356476a86fc569c34cbf209d3a247c01e0ef6d Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Sun, 17 Apr 2016 13:24:00 -0400 Subject: [PATCH 0804/1649] net: dsa: mv88e6xxx: add family to info Add an mv88e6xxx_family enum to the info structure for better family indentification. Signed-off-by: Vivien Didelot Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6123.c | 3 ++ drivers/net/dsa/mv88e6131.c | 4 +++ drivers/net/dsa/mv88e6171.c | 4 +++ drivers/net/dsa/mv88e6352.c | 6 ++++ drivers/net/dsa/mv88e6xxx.c | 71 +++++-------------------------------- drivers/net/dsa/mv88e6xxx.h | 13 +++++++ 6 files changed, 38 insertions(+), 63 deletions(-) diff --git a/drivers/net/dsa/mv88e6123.c b/drivers/net/dsa/mv88e6123.c index 62dffcf915a7..776e6ef3e29c 100644 --- a/drivers/net/dsa/mv88e6123.c +++ b/drivers/net/dsa/mv88e6123.c @@ -20,12 +20,15 @@ static const struct mv88e6xxx_info mv88e6123_table[] = { { .prod_num = PORT_SWITCH_ID_PROD_NUM_6123, + .family = MV88E6XXX_FAMILY_6165, .name = "Marvell 88E6123", }, { .prod_num = PORT_SWITCH_ID_PROD_NUM_6161, + .family = MV88E6XXX_FAMILY_6165, .name = "Marvell 88E6161", }, { .prod_num = PORT_SWITCH_ID_PROD_NUM_6165, + .family = MV88E6XXX_FAMILY_6165, .name = "Marvell 88E6165", } }; diff --git a/drivers/net/dsa/mv88e6131.c b/drivers/net/dsa/mv88e6131.c index 00567156f335..1986651ac054 100644 --- a/drivers/net/dsa/mv88e6131.c +++ b/drivers/net/dsa/mv88e6131.c @@ -20,15 +20,19 @@ static const struct mv88e6xxx_info mv88e6131_table[] = { { .prod_num = PORT_SWITCH_ID_PROD_NUM_6095, + .family = MV88E6XXX_FAMILY_6095, .name = "Marvell 88E6095/88E6095F", }, { .prod_num = PORT_SWITCH_ID_PROD_NUM_6085, + .family = MV88E6XXX_FAMILY_6097, .name = "Marvell 88E6085", }, { .prod_num = PORT_SWITCH_ID_PROD_NUM_6131, + .family = MV88E6XXX_FAMILY_6185, .name = "Marvell 88E6131", }, { .prod_num = PORT_SWITCH_ID_PROD_NUM_6185, + .family = MV88E6XXX_FAMILY_6185, .name = "Marvell 88E6185", } }; diff --git a/drivers/net/dsa/mv88e6171.c b/drivers/net/dsa/mv88e6171.c index ea14ab22d313..9a3b1e19b01a 100644 --- a/drivers/net/dsa/mv88e6171.c +++ b/drivers/net/dsa/mv88e6171.c @@ -20,15 +20,19 @@ static const struct mv88e6xxx_info mv88e6171_table[] = { { .prod_num = PORT_SWITCH_ID_PROD_NUM_6171, + .family = MV88E6XXX_FAMILY_6351, .name = "Marvell 88E6171", }, { .prod_num = PORT_SWITCH_ID_PROD_NUM_6175, + .family = MV88E6XXX_FAMILY_6351, .name = "Marvell 88E6175", }, { .prod_num = PORT_SWITCH_ID_PROD_NUM_6350, + .family = MV88E6XXX_FAMILY_6351, .name = "Marvell 88E6350", }, { .prod_num = PORT_SWITCH_ID_PROD_NUM_6351, + .family = MV88E6XXX_FAMILY_6351, .name = "Marvell 88E6351", } }; diff --git a/drivers/net/dsa/mv88e6352.c b/drivers/net/dsa/mv88e6352.c index 2f72606ecc9e..bae62eb9c3b5 100644 --- a/drivers/net/dsa/mv88e6352.c +++ b/drivers/net/dsa/mv88e6352.c @@ -25,21 +25,27 @@ static const struct mv88e6xxx_info mv88e6352_table[] = { { .prod_num = PORT_SWITCH_ID_PROD_NUM_6320, + .family = MV88E6XXX_FAMILY_6320, .name = "Marvell 88E6320", }, { .prod_num = PORT_SWITCH_ID_PROD_NUM_6321, + .family = MV88E6XXX_FAMILY_6320, .name = "Marvell 88E6321", }, { .prod_num = PORT_SWITCH_ID_PROD_NUM_6172, + .family = MV88E6XXX_FAMILY_6352, .name = "Marvell 88E6172", }, { .prod_num = PORT_SWITCH_ID_PROD_NUM_6176, + .family = MV88E6XXX_FAMILY_6352, .name = "Marvell 88E6176", }, { .prod_num = PORT_SWITCH_ID_PROD_NUM_6240, + .family = MV88E6XXX_FAMILY_6352, .name = "Marvell 88E6240", }, { .prod_num = PORT_SWITCH_ID_PROD_NUM_6352, + .family = MV88E6XXX_FAMILY_6352, .name = "Marvell 88E6352", } }; diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index 5fb21e059f35..8f8a1cf59fda 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c @@ -402,111 +402,56 @@ static bool mv88e6xxx_6065_family(struct dsa_switch *ds) { struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); - switch (ps->id) { - case PORT_SWITCH_ID_6031: - case PORT_SWITCH_ID_6061: - case PORT_SWITCH_ID_6035: - case PORT_SWITCH_ID_6065: - return true; - } - return false; + return ps->info->family == MV88E6XXX_FAMILY_6065; } static bool mv88e6xxx_6095_family(struct dsa_switch *ds) { struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); - switch (ps->id) { - case PORT_SWITCH_ID_6092: - case PORT_SWITCH_ID_6095: - return true; - } - return false; + return ps->info->family == MV88E6XXX_FAMILY_6095; } static bool mv88e6xxx_6097_family(struct dsa_switch *ds) { struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); - switch (ps->id) { - case PORT_SWITCH_ID_6046: - case PORT_SWITCH_ID_6085: - case PORT_SWITCH_ID_6096: - case PORT_SWITCH_ID_6097: - return true; - } - return false; + return ps->info->family == MV88E6XXX_FAMILY_6097; } static bool mv88e6xxx_6165_family(struct dsa_switch *ds) { struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); - switch (ps->id) { - case PORT_SWITCH_ID_6123: - case PORT_SWITCH_ID_6161: - case PORT_SWITCH_ID_6165: - return true; - } - return false; + return ps->info->family == MV88E6XXX_FAMILY_6165; } static bool mv88e6xxx_6185_family(struct dsa_switch *ds) { struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); - switch (ps->id) { - case PORT_SWITCH_ID_6121: - case PORT_SWITCH_ID_6122: - case PORT_SWITCH_ID_6152: - case PORT_SWITCH_ID_6155: - case PORT_SWITCH_ID_6182: - case PORT_SWITCH_ID_6185: - case PORT_SWITCH_ID_6108: - case PORT_SWITCH_ID_6131: - return true; - } - return false; + return ps->info->family == MV88E6XXX_FAMILY_6185; } static bool mv88e6xxx_6320_family(struct dsa_switch *ds) { struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); - switch (ps->id) { - case PORT_SWITCH_ID_6320: - case PORT_SWITCH_ID_6321: - return true; - } - return false; + return ps->info->family == MV88E6XXX_FAMILY_6320; } static bool mv88e6xxx_6351_family(struct dsa_switch *ds) { struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); - switch (ps->id) { - case PORT_SWITCH_ID_6171: - case PORT_SWITCH_ID_6175: - case PORT_SWITCH_ID_6350: - case PORT_SWITCH_ID_6351: - return true; - } - return false; + return ps->info->family == MV88E6XXX_FAMILY_6351; } static bool mv88e6xxx_6352_family(struct dsa_switch *ds) { struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); - switch (ps->id) { - case PORT_SWITCH_ID_6172: - case PORT_SWITCH_ID_6176: - case PORT_SWITCH_ID_6240: - case PORT_SWITCH_ID_6352: - return true; - } - return false; + return ps->info->family == MV88E6XXX_FAMILY_6352; } static unsigned int mv88e6xxx_num_databases(struct dsa_switch *ds) diff --git a/drivers/net/dsa/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx.h index b87f574a84de..b4eec9a8c8ff 100644 --- a/drivers/net/dsa/mv88e6xxx.h +++ b/drivers/net/dsa/mv88e6xxx.h @@ -369,7 +369,20 @@ #define MV88E6XXX_N_FID 4096 +enum mv88e6xxx_family { + MV88E6XXX_FAMILY_NONE, + MV88E6XXX_FAMILY_6065, /* 6031 6035 6061 6065 */ + MV88E6XXX_FAMILY_6095, /* 6092 6095 */ + MV88E6XXX_FAMILY_6097, /* 6046 6085 6096 6097 */ + MV88E6XXX_FAMILY_6165, /* 6123 6161 6165 */ + MV88E6XXX_FAMILY_6185, /* 6108 6121 6122 6131 6152 6155 6182 6185 */ + MV88E6XXX_FAMILY_6320, /* 6320 6321 */ + MV88E6XXX_FAMILY_6351, /* 6171 6175 6350 6351 */ + MV88E6XXX_FAMILY_6352, /* 6172 6176 6240 6352 */ +}; + struct mv88e6xxx_info { + enum mv88e6xxx_family family; u16 prod_num; const char *name; }; From 009a2b9843bf0b1a85fbf79f76e1de4995de527c Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Sun, 17 Apr 2016 13:24:01 -0400 Subject: [PATCH 0805/1649] net: dsa: mv88e6xxx: add number of ports to info Drop the ps->num_ports variable in favor of a new member of the info structure. This removes the need to assign it at setup time. Signed-off-by: Vivien Didelot Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6123.c | 16 +++------------- drivers/net/dsa/mv88e6131.c | 22 +++++---------------- drivers/net/dsa/mv88e6171.c | 7 ++++--- drivers/net/dsa/mv88e6352.c | 8 ++++++-- drivers/net/dsa/mv88e6xxx.c | 38 ++++++++++++++++++------------------- drivers/net/dsa/mv88e6xxx.h | 3 +-- 6 files changed, 38 insertions(+), 56 deletions(-) diff --git a/drivers/net/dsa/mv88e6123.c b/drivers/net/dsa/mv88e6123.c index 776e6ef3e29c..0bf43bb17993 100644 --- a/drivers/net/dsa/mv88e6123.c +++ b/drivers/net/dsa/mv88e6123.c @@ -22,14 +22,17 @@ static const struct mv88e6xxx_info mv88e6123_table[] = { .prod_num = PORT_SWITCH_ID_PROD_NUM_6123, .family = MV88E6XXX_FAMILY_6165, .name = "Marvell 88E6123", + .num_ports = 3, }, { .prod_num = PORT_SWITCH_ID_PROD_NUM_6161, .family = MV88E6XXX_FAMILY_6165, .name = "Marvell 88E6161", + .num_ports = 6, }, { .prod_num = PORT_SWITCH_ID_PROD_NUM_6165, .family = MV88E6XXX_FAMILY_6165, .name = "Marvell 88E6165", + .num_ports = 6, } }; @@ -80,25 +83,12 @@ static int mv88e6123_setup_global(struct dsa_switch *ds) static int mv88e6123_setup(struct dsa_switch *ds) { - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); int ret; ret = mv88e6xxx_setup_common(ds); if (ret < 0) return ret; - switch (ps->id) { - case PORT_SWITCH_ID_6123: - ps->num_ports = 3; - break; - case PORT_SWITCH_ID_6161: - case PORT_SWITCH_ID_6165: - ps->num_ports = 6; - break; - default: - return -ENODEV; - } - ret = mv88e6xxx_switch_reset(ds, false); if (ret < 0) return ret; diff --git a/drivers/net/dsa/mv88e6131.c b/drivers/net/dsa/mv88e6131.c index 1986651ac054..c01bbb1e857e 100644 --- a/drivers/net/dsa/mv88e6131.c +++ b/drivers/net/dsa/mv88e6131.c @@ -22,18 +22,22 @@ static const struct mv88e6xxx_info mv88e6131_table[] = { .prod_num = PORT_SWITCH_ID_PROD_NUM_6095, .family = MV88E6XXX_FAMILY_6095, .name = "Marvell 88E6095/88E6095F", + .num_ports = 11, }, { .prod_num = PORT_SWITCH_ID_PROD_NUM_6085, .family = MV88E6XXX_FAMILY_6097, .name = "Marvell 88E6085", + .num_ports = 10, }, { .prod_num = PORT_SWITCH_ID_PROD_NUM_6131, .family = MV88E6XXX_FAMILY_6185, .name = "Marvell 88E6131", + .num_ports = 8, }, { .prod_num = PORT_SWITCH_ID_PROD_NUM_6185, .family = MV88E6XXX_FAMILY_6185, .name = "Marvell 88E6185", + .num_ports = 10, } }; @@ -110,7 +114,6 @@ static int mv88e6131_setup_global(struct dsa_switch *ds) static int mv88e6131_setup(struct dsa_switch *ds) { - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); int ret; ret = mv88e6xxx_setup_common(ds); @@ -119,21 +122,6 @@ static int mv88e6131_setup(struct dsa_switch *ds) mv88e6xxx_ppu_state_init(ds); - switch (ps->id) { - case PORT_SWITCH_ID_6085: - case PORT_SWITCH_ID_6185: - ps->num_ports = 10; - break; - case PORT_SWITCH_ID_6095: - ps->num_ports = 11; - break; - case PORT_SWITCH_ID_6131: - ps->num_ports = 8; - break; - default: - return -ENODEV; - } - ret = mv88e6xxx_switch_reset(ds, false); if (ret < 0) return ret; @@ -149,7 +137,7 @@ static int mv88e6131_port_to_phy_addr(struct dsa_switch *ds, int port) { struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); - if (port >= 0 && port < ps->num_ports) + if (port >= 0 && port < ps->info->num_ports) return port; return -EINVAL; diff --git a/drivers/net/dsa/mv88e6171.c b/drivers/net/dsa/mv88e6171.c index 9a3b1e19b01a..172824fe1dc0 100644 --- a/drivers/net/dsa/mv88e6171.c +++ b/drivers/net/dsa/mv88e6171.c @@ -22,18 +22,22 @@ static const struct mv88e6xxx_info mv88e6171_table[] = { .prod_num = PORT_SWITCH_ID_PROD_NUM_6171, .family = MV88E6XXX_FAMILY_6351, .name = "Marvell 88E6171", + .num_ports = 7, }, { .prod_num = PORT_SWITCH_ID_PROD_NUM_6175, .family = MV88E6XXX_FAMILY_6351, .name = "Marvell 88E6175", + .num_ports = 7, }, { .prod_num = PORT_SWITCH_ID_PROD_NUM_6350, .family = MV88E6XXX_FAMILY_6351, .name = "Marvell 88E6350", + .num_ports = 7, }, { .prod_num = PORT_SWITCH_ID_PROD_NUM_6351, .family = MV88E6XXX_FAMILY_6351, .name = "Marvell 88E6351", + .num_ports = 7, } }; @@ -86,15 +90,12 @@ static int mv88e6171_setup_global(struct dsa_switch *ds) static int mv88e6171_setup(struct dsa_switch *ds) { - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); int ret; ret = mv88e6xxx_setup_common(ds); if (ret < 0) return ret; - ps->num_ports = 7; - ret = mv88e6xxx_switch_reset(ds, true); if (ret < 0) return ret; diff --git a/drivers/net/dsa/mv88e6352.c b/drivers/net/dsa/mv88e6352.c index bae62eb9c3b5..12b9a7b5cb31 100644 --- a/drivers/net/dsa/mv88e6352.c +++ b/drivers/net/dsa/mv88e6352.c @@ -27,26 +27,32 @@ static const struct mv88e6xxx_info mv88e6352_table[] = { .prod_num = PORT_SWITCH_ID_PROD_NUM_6320, .family = MV88E6XXX_FAMILY_6320, .name = "Marvell 88E6320", + .num_ports = 7, }, { .prod_num = PORT_SWITCH_ID_PROD_NUM_6321, .family = MV88E6XXX_FAMILY_6320, .name = "Marvell 88E6321", + .num_ports = 7, }, { .prod_num = PORT_SWITCH_ID_PROD_NUM_6172, .family = MV88E6XXX_FAMILY_6352, .name = "Marvell 88E6172", + .num_ports = 7, }, { .prod_num = PORT_SWITCH_ID_PROD_NUM_6176, .family = MV88E6XXX_FAMILY_6352, .name = "Marvell 88E6176", + .num_ports = 7, }, { .prod_num = PORT_SWITCH_ID_PROD_NUM_6240, .family = MV88E6XXX_FAMILY_6352, .name = "Marvell 88E6240", + .num_ports = 7, }, { .prod_num = PORT_SWITCH_ID_PROD_NUM_6352, .family = MV88E6XXX_FAMILY_6352, .name = "Marvell 88E6352", + .num_ports = 7, } }; @@ -104,8 +110,6 @@ static int mv88e6352_setup(struct dsa_switch *ds) if (ret < 0) return ret; - ps->num_ports = 7; - mutex_init(&ps->eeprom_mutex); ret = mv88e6xxx_switch_reset(ds, true); diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index 8f8a1cf59fda..c952d91a5b88 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c @@ -551,7 +551,7 @@ void mv88e6xxx_adjust_link(struct dsa_switch *ds, int port, reg |= PORT_PCS_CTRL_DUPLEX_FULL; if ((mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds)) && - (port >= ps->num_ports - 2)) { + (port >= ps->info->num_ports - 2)) { if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) reg |= PORT_PCS_CTRL_RGMII_DELAY_RXCLK; if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) @@ -1132,7 +1132,7 @@ static int _mv88e6xxx_port_based_vlan_map(struct dsa_switch *ds, int port) { struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); struct net_device *bridge = ps->ports[port].bridge_dev; - const u16 mask = (1 << ps->num_ports) - 1; + const u16 mask = (1 << ps->info->num_ports) - 1; u16 output_ports = 0; int reg; int i; @@ -1141,7 +1141,7 @@ static int _mv88e6xxx_port_based_vlan_map(struct dsa_switch *ds, int port) if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) { output_ports = mask; } else { - for (i = 0; i < ps->num_ports; ++i) { + for (i = 0; i < ps->info->num_ports; ++i) { /* allow sending frames to every group member */ if (bridge && ps->ports[i].bridge_dev == bridge) output_ports |= BIT(i); @@ -1282,7 +1282,7 @@ static int _mv88e6xxx_vtu_stu_data_read(struct dsa_switch *ds, regs[i] = ret; } - for (i = 0; i < ps->num_ports; ++i) { + for (i = 0; i < ps->info->num_ports; ++i) { unsigned int shift = (i % 4) * 4 + nibble_offset; u16 reg = regs[i / 4]; @@ -1301,7 +1301,7 @@ static int _mv88e6xxx_vtu_stu_data_write(struct dsa_switch *ds, int i; int ret; - for (i = 0; i < ps->num_ports; ++i) { + for (i = 0; i < ps->info->num_ports; ++i) { unsigned int shift = (i % 4) * 4 + nibble_offset; u8 data = entry->data[i]; @@ -1633,7 +1633,7 @@ static int _mv88e6xxx_fid_new(struct dsa_switch *ds, u16 *fid) bitmap_zero(fid_bitmap, MV88E6XXX_N_FID); /* Set every FID bit used by the (un)bridged ports */ - for (i = 0; i < ps->num_ports; ++i) { + for (i = 0; i < ps->info->num_ports; ++i) { err = _mv88e6xxx_port_fid_get(ds, i, fid); if (err) return err; @@ -1683,7 +1683,7 @@ static int _mv88e6xxx_vtu_new(struct dsa_switch *ds, u16 vid, return err; /* exclude all ports except the CPU and DSA ports */ - for (i = 0; i < ps->num_ports; ++i) + for (i = 0; i < ps->info->num_ports; ++i) vlan.data[i] = dsa_is_cpu_port(ds, i) || dsa_is_dsa_port(ds, i) ? GLOBAL_VTU_DATA_MEMBER_TAG_UNMODIFIED : GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER; @@ -1772,7 +1772,7 @@ static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port, if (vlan.vid > vid_end) break; - for (i = 0; i < ps->num_ports; ++i) { + for (i = 0; i < ps->info->num_ports; ++i) { if (dsa_is_dsa_port(ds, i) || dsa_is_cpu_port(ds, i)) continue; @@ -1921,7 +1921,7 @@ static int _mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port, u16 vid) /* keep the VLAN unless all ports are excluded */ vlan.valid = false; - for (i = 0; i < ps->num_ports; ++i) { + for (i = 0; i < ps->info->num_ports; ++i) { if (dsa_is_cpu_port(ds, i) || dsa_is_dsa_port(ds, i)) continue; @@ -2230,11 +2230,11 @@ int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port, mutex_lock(&ps->smi_mutex); /* Get or create the bridge FID and assign it to the port */ - for (i = 0; i < ps->num_ports; ++i) + for (i = 0; i < ps->info->num_ports; ++i) if (ps->ports[i].bridge_dev == bridge) break; - if (i < ps->num_ports) + if (i < ps->info->num_ports) err = _mv88e6xxx_port_fid_get(ds, i, &fid); else err = _mv88e6xxx_fid_new(ds, &fid); @@ -2248,7 +2248,7 @@ int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port, /* Assign the bridge and remap each port's VLANTable */ ps->ports[port].bridge_dev = bridge; - for (i = 0; i < ps->num_ports; ++i) { + for (i = 0; i < ps->info->num_ports; ++i) { if (ps->ports[i].bridge_dev == bridge) { err = _mv88e6xxx_port_based_vlan_map(ds, i); if (err) @@ -2279,7 +2279,7 @@ void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port) /* Unassign the bridge and remap each port's VLANTable */ ps->ports[port].bridge_dev = NULL; - for (i = 0; i < ps->num_ports; ++i) + for (i = 0; i < ps->info->num_ports; ++i) if (i == port || ps->ports[i].bridge_dev == bridge) if (_mv88e6xxx_port_based_vlan_map(ds, i)) netdev_warn(ds->ports[i], "failed to remap\n"); @@ -2298,7 +2298,7 @@ static void mv88e6xxx_bridge_work(struct work_struct *work) mutex_lock(&ps->smi_mutex); - for (port = 0; port < ps->num_ports; ++port) + for (port = 0; port < ps->info->num_ports; ++port) if (test_and_clear_bit(port, ps->port_state_update_mask) && _mv88e6xxx_port_state(ds, port, ps->ports[port].state)) netdev_warn(ds->ports[port], "failed to update state to %s\n", @@ -2630,7 +2630,7 @@ int mv88e6xxx_setup_ports(struct dsa_switch *ds) int ret; int i; - for (i = 0; i < ps->num_ports; i++) { + for (i = 0; i < ps->info->num_ports; i++) { ret = mv88e6xxx_setup_port(ds, i); if (ret < 0) return ret; @@ -2737,7 +2737,7 @@ int mv88e6xxx_setup_global(struct dsa_switch *ds) err = _mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_TRUNK_MASK, 0x8000 | (i << GLOBAL2_TRUNK_MASK_NUM_SHIFT) | - ((1 << ps->num_ports) - 1)); + ((1 << ps->info->num_ports) - 1)); if (err) goto unlock; } @@ -2790,7 +2790,7 @@ int mv88e6xxx_setup_global(struct dsa_switch *ds) * ingress rate limit registers to their initial * state. */ - for (i = 0; i < ps->num_ports; i++) { + for (i = 0; i < ps->info->num_ports; i++) { err = _mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_INGRESS_OP, 0x9000 | (i << 8)); @@ -2835,7 +2835,7 @@ int mv88e6xxx_switch_reset(struct dsa_switch *ds, bool ppu_active) mutex_lock(&ps->smi_mutex); /* Set all ports to the disabled state. */ - for (i = 0; i < ps->num_ports; i++) { + for (i = 0; i < ps->info->num_ports; i++) { ret = _mv88e6xxx_reg_read(ds, REG_PORT(i), PORT_CONTROL); if (ret < 0) goto unlock; @@ -2918,7 +2918,7 @@ static int mv88e6xxx_port_to_phy_addr(struct dsa_switch *ds, int port) { struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); - if (port >= 0 && port < ps->num_ports) + if (port >= 0 && port < ps->info->num_ports) return port; return -EINVAL; } diff --git a/drivers/net/dsa/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx.h index b4eec9a8c8ff..801486aefe78 100644 --- a/drivers/net/dsa/mv88e6xxx.h +++ b/drivers/net/dsa/mv88e6xxx.h @@ -385,6 +385,7 @@ struct mv88e6xxx_info { enum mv88e6xxx_family family; u16 prod_num; const char *name; + unsigned int num_ports; }; struct mv88e6xxx_atu_entry { @@ -456,8 +457,6 @@ struct mv88e6xxx_priv_state { struct mutex eeprom_mutex; int id; /* switch product id */ - int num_ports; /* number of switch ports */ - struct mv88e6xxx_priv_port ports[DSA_MAX_PORTS]; DECLARE_BITMAP(port_state_update_mask, DSA_MAX_PORTS); From cd5a2c82bad9e59f2674befc07c12effa0aea49d Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Sun, 17 Apr 2016 13:24:02 -0400 Subject: [PATCH 0806/1649] net: dsa: mv88e6xxx: add number of db to info Add the number of databases to the info structure. Signed-off-by: Vivien Didelot Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6123.c | 3 +++ drivers/net/dsa/mv88e6131.c | 4 ++++ drivers/net/dsa/mv88e6171.c | 4 ++++ drivers/net/dsa/mv88e6352.c | 6 ++++++ drivers/net/dsa/mv88e6xxx.c | 19 +------------------ drivers/net/dsa/mv88e6xxx.h | 1 + 6 files changed, 19 insertions(+), 18 deletions(-) diff --git a/drivers/net/dsa/mv88e6123.c b/drivers/net/dsa/mv88e6123.c index 0bf43bb17993..534ebc84de84 100644 --- a/drivers/net/dsa/mv88e6123.c +++ b/drivers/net/dsa/mv88e6123.c @@ -22,16 +22,19 @@ static const struct mv88e6xxx_info mv88e6123_table[] = { .prod_num = PORT_SWITCH_ID_PROD_NUM_6123, .family = MV88E6XXX_FAMILY_6165, .name = "Marvell 88E6123", + .num_databases = 4096, .num_ports = 3, }, { .prod_num = PORT_SWITCH_ID_PROD_NUM_6161, .family = MV88E6XXX_FAMILY_6165, .name = "Marvell 88E6161", + .num_databases = 4096, .num_ports = 6, }, { .prod_num = PORT_SWITCH_ID_PROD_NUM_6165, .family = MV88E6XXX_FAMILY_6165, .name = "Marvell 88E6165", + .num_databases = 4096, .num_ports = 6, } }; diff --git a/drivers/net/dsa/mv88e6131.c b/drivers/net/dsa/mv88e6131.c index c01bbb1e857e..c3eb9a884cfd 100644 --- a/drivers/net/dsa/mv88e6131.c +++ b/drivers/net/dsa/mv88e6131.c @@ -22,21 +22,25 @@ static const struct mv88e6xxx_info mv88e6131_table[] = { .prod_num = PORT_SWITCH_ID_PROD_NUM_6095, .family = MV88E6XXX_FAMILY_6095, .name = "Marvell 88E6095/88E6095F", + .num_databases = 256, .num_ports = 11, }, { .prod_num = PORT_SWITCH_ID_PROD_NUM_6085, .family = MV88E6XXX_FAMILY_6097, .name = "Marvell 88E6085", + .num_databases = 4096, .num_ports = 10, }, { .prod_num = PORT_SWITCH_ID_PROD_NUM_6131, .family = MV88E6XXX_FAMILY_6185, .name = "Marvell 88E6131", + .num_databases = 256, .num_ports = 8, }, { .prod_num = PORT_SWITCH_ID_PROD_NUM_6185, .family = MV88E6XXX_FAMILY_6185, .name = "Marvell 88E6185", + .num_databases = 256, .num_ports = 10, } }; diff --git a/drivers/net/dsa/mv88e6171.c b/drivers/net/dsa/mv88e6171.c index 172824fe1dc0..841ffe14ef75 100644 --- a/drivers/net/dsa/mv88e6171.c +++ b/drivers/net/dsa/mv88e6171.c @@ -22,21 +22,25 @@ static const struct mv88e6xxx_info mv88e6171_table[] = { .prod_num = PORT_SWITCH_ID_PROD_NUM_6171, .family = MV88E6XXX_FAMILY_6351, .name = "Marvell 88E6171", + .num_databases = 4096, .num_ports = 7, }, { .prod_num = PORT_SWITCH_ID_PROD_NUM_6175, .family = MV88E6XXX_FAMILY_6351, .name = "Marvell 88E6175", + .num_databases = 4096, .num_ports = 7, }, { .prod_num = PORT_SWITCH_ID_PROD_NUM_6350, .family = MV88E6XXX_FAMILY_6351, .name = "Marvell 88E6350", + .num_databases = 4096, .num_ports = 7, }, { .prod_num = PORT_SWITCH_ID_PROD_NUM_6351, .family = MV88E6XXX_FAMILY_6351, .name = "Marvell 88E6351", + .num_databases = 4096, .num_ports = 7, } }; diff --git a/drivers/net/dsa/mv88e6352.c b/drivers/net/dsa/mv88e6352.c index 12b9a7b5cb31..4afc24df56b8 100644 --- a/drivers/net/dsa/mv88e6352.c +++ b/drivers/net/dsa/mv88e6352.c @@ -27,31 +27,37 @@ static const struct mv88e6xxx_info mv88e6352_table[] = { .prod_num = PORT_SWITCH_ID_PROD_NUM_6320, .family = MV88E6XXX_FAMILY_6320, .name = "Marvell 88E6320", + .num_databases = 4096, .num_ports = 7, }, { .prod_num = PORT_SWITCH_ID_PROD_NUM_6321, .family = MV88E6XXX_FAMILY_6320, .name = "Marvell 88E6321", + .num_databases = 4096, .num_ports = 7, }, { .prod_num = PORT_SWITCH_ID_PROD_NUM_6172, .family = MV88E6XXX_FAMILY_6352, .name = "Marvell 88E6172", + .num_databases = 4096, .num_ports = 7, }, { .prod_num = PORT_SWITCH_ID_PROD_NUM_6176, .family = MV88E6XXX_FAMILY_6352, .name = "Marvell 88E6176", + .num_databases = 4096, .num_ports = 7, }, { .prod_num = PORT_SWITCH_ID_PROD_NUM_6240, .family = MV88E6XXX_FAMILY_6352, .name = "Marvell 88E6240", + .num_databases = 4096, .num_ports = 7, }, { .prod_num = PORT_SWITCH_ID_PROD_NUM_6352, .family = MV88E6XXX_FAMILY_6352, .name = "Marvell 88E6352", + .num_databases = 4096, .num_ports = 7, } }; diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index c952d91a5b88..67b1dd1c22f7 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c @@ -458,24 +458,7 @@ static unsigned int mv88e6xxx_num_databases(struct dsa_switch *ds) { struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); - /* The following devices have 4-bit identifiers for 16 databases */ - if (ps->id == PORT_SWITCH_ID_6061) - return 16; - - /* The following devices have 6-bit identifiers for 64 databases */ - if (ps->id == PORT_SWITCH_ID_6065) - return 64; - - /* The following devices have 8-bit identifiers for 256 databases */ - if (mv88e6xxx_6095_family(ds) || mv88e6xxx_6185_family(ds)) - return 256; - - /* The following devices have 12-bit identifiers for 4096 databases */ - if (mv88e6xxx_6097_family(ds) || mv88e6xxx_6165_family(ds) || - mv88e6xxx_6351_family(ds) || mv88e6xxx_6352_family(ds)) - return 4096; - - return 0; + return ps->info->num_databases; } static bool mv88e6xxx_has_fid_reg(struct dsa_switch *ds) diff --git a/drivers/net/dsa/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx.h index 801486aefe78..8eeafff27a82 100644 --- a/drivers/net/dsa/mv88e6xxx.h +++ b/drivers/net/dsa/mv88e6xxx.h @@ -385,6 +385,7 @@ struct mv88e6xxx_info { enum mv88e6xxx_family family; u16 prod_num; const char *name; + unsigned int num_databases; unsigned int num_ports; }; From d967ecbc0b875081624857f27df4ed23c5eca106 Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Sun, 17 Apr 2016 13:24:03 -0400 Subject: [PATCH 0807/1649] net: dsa: mv88e6xxx: remove switch ID from ps ps->id is not needed anymore, so remove it as well as the related defined values. Signed-off-by: Vivien Didelot Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx.c | 1 - drivers/net/dsa/mv88e6xxx.h | 32 -------------------------------- 2 files changed, 33 deletions(-) diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index 67b1dd1c22f7..1dd525d8dc0a 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c @@ -3145,7 +3145,6 @@ const char *mv88e6xxx_drv_probe(struct device *dsa_dev, struct device *host_dev, ps->bus = bus; ps->sw_addr = sw_addr; ps->info = info; - ps->id = id & 0xfff0; *priv = ps; diff --git a/drivers/net/dsa/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx.h index 8eeafff27a82..0dbe2d1779dd 100644 --- a/drivers/net/dsa/mv88e6xxx.h +++ b/drivers/net/dsa/mv88e6xxx.h @@ -85,37 +85,6 @@ #define PORT_SWITCH_ID_PROD_NUM_6352 0x352 #define PORT_SWITCH_ID_PROD_NUM_6350 0x371 #define PORT_SWITCH_ID_PROD_NUM_6351 0x375 -#define PORT_SWITCH_ID_6031 0x0310 -#define PORT_SWITCH_ID_6035 0x0350 -#define PORT_SWITCH_ID_6046 0x0480 -#define PORT_SWITCH_ID_6061 0x0610 -#define PORT_SWITCH_ID_6065 0x0650 -#define PORT_SWITCH_ID_6085 0x04a0 -#define PORT_SWITCH_ID_6092 0x0970 -#define PORT_SWITCH_ID_6095 0x0950 -#define PORT_SWITCH_ID_6096 0x0980 -#define PORT_SWITCH_ID_6097 0x0990 -#define PORT_SWITCH_ID_6108 0x1070 -#define PORT_SWITCH_ID_6121 0x1040 -#define PORT_SWITCH_ID_6122 0x1050 -#define PORT_SWITCH_ID_6123 0x1210 -#define PORT_SWITCH_ID_6131 0x1060 -#define PORT_SWITCH_ID_6152 0x1a40 -#define PORT_SWITCH_ID_6155 0x1a50 -#define PORT_SWITCH_ID_6161 0x1610 -#define PORT_SWITCH_ID_6165 0x1650 -#define PORT_SWITCH_ID_6171 0x1710 -#define PORT_SWITCH_ID_6172 0x1720 -#define PORT_SWITCH_ID_6175 0x1750 -#define PORT_SWITCH_ID_6176 0x1760 -#define PORT_SWITCH_ID_6182 0x1a60 -#define PORT_SWITCH_ID_6185 0x1a70 -#define PORT_SWITCH_ID_6240 0x2400 -#define PORT_SWITCH_ID_6320 0x1150 -#define PORT_SWITCH_ID_6321 0x3100 -#define PORT_SWITCH_ID_6350 0x3710 -#define PORT_SWITCH_ID_6351 0x3750 -#define PORT_SWITCH_ID_6352 0x3520 #define PORT_CONTROL 0x04 #define PORT_CONTROL_USE_CORE_TAG BIT(15) #define PORT_CONTROL_DROP_ON_LOCK BIT(14) @@ -457,7 +426,6 @@ struct mv88e6xxx_priv_state { */ struct mutex eeprom_mutex; - int id; /* switch product id */ struct mv88e6xxx_priv_port ports[DSA_MAX_PORTS]; DECLARE_BITMAP(port_state_update_mask, DSA_MAX_PORTS); From 550bce59baf3f3059cd4ae1e268f08f2d2cb1d5c Mon Sep 17 00:00:00 2001 From: Roopa Prabhu Date: Fri, 15 Apr 2016 20:36:25 -0700 Subject: [PATCH 0808/1649] rtnetlink: rtnl_fill_stats: avoid an unnecssary stats copy This patch passes netlink attr data ptr directly to dev_get_stats thus elimiating a stats copy. Suggested-by: David Miller Signed-off-by: Roopa Prabhu Signed-off-by: David S. Miller --- net/core/rtnetlink.c | 27 ++++++++++----------------- 1 file changed, 10 insertions(+), 17 deletions(-) diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index a75f7e94b445..a7a3d345134a 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -808,11 +808,6 @@ static void copy_rtnl_link_stats(struct rtnl_link_stats *a, a->rx_nohandler = b->rx_nohandler; } -static void copy_rtnl_link_stats64(void *v, const struct rtnl_link_stats64 *b) -{ - memcpy(v, b, sizeof(*b)); -} - /* All VF info */ static inline int rtnl_vfinfo_size(const struct net_device *dev, u32 ext_filter_mask) @@ -1054,25 +1049,23 @@ static int rtnl_phys_switch_id_fill(struct sk_buff *skb, struct net_device *dev) static noinline_for_stack int rtnl_fill_stats(struct sk_buff *skb, struct net_device *dev) { - const struct rtnl_link_stats64 *stats; - struct rtnl_link_stats64 temp; + struct rtnl_link_stats64 *sp; struct nlattr *attr; - stats = dev_get_stats(dev, &temp); - - attr = nla_reserve(skb, IFLA_STATS, - sizeof(struct rtnl_link_stats)); - if (!attr) - return -EMSGSIZE; - - copy_rtnl_link_stats(nla_data(attr), stats); - attr = nla_reserve(skb, IFLA_STATS64, sizeof(struct rtnl_link_stats64)); if (!attr) return -EMSGSIZE; - copy_rtnl_link_stats64(nla_data(attr), stats); + sp = nla_data(attr); + dev_get_stats(dev, sp); + + attr = nla_reserve(skb, IFLA_STATS, + sizeof(struct rtnl_link_stats)); + if (!attr) + return -EMSGSIZE; + + copy_rtnl_link_stats(nla_data(attr), sp); return 0; } From 110361f41c17d1f565e2fd03a0af044c29e6513a Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Mon, 18 Apr 2016 11:44:49 +0300 Subject: [PATCH 0809/1649] udp: fix if statement in SIOCINQ ioctl We deleted a line of code and accidentally made the "return put_user()" part of the if statement when it's supposed to be unconditional. Fixes: 9f9a45beaa96 ('udp: do not expect udp headers on ioctl SIOCINQ') Signed-off-by: Dan Carpenter Acked-by: Eric Dumazet Acked-by: Willem de Bruijn Signed-off-by: David S. Miller --- net/ipv4/udp.c | 6 ------ 1 file changed, 6 deletions(-) diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index f1863136d3e4..37e09c3dd046 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -1276,12 +1276,6 @@ int udp_ioctl(struct sock *sk, int cmd, unsigned long arg) { unsigned int amount = first_packet_length(sk); - if (amount) - /* - * We will only return the amount - * of this packet since that is all - * that will be read. - */ return put_user(amount, (int __user *)arg); } From 2a2bbf170054e2525f11e08bb36d4027d76b7bff Mon Sep 17 00:00:00 2001 From: Paolo Abeni Date: Thu, 14 Apr 2016 18:39:39 +0200 Subject: [PATCH 0810/1649] tun: don't require serialization lock on tx The current tun_net_xmit() implementation don't need any external lock since it relies on rcu protection for the tun data structure and on socket queue lock for skb queuing. This patch set the NETIF_F_LLTX feature bit in the tun device, so that on xmit, in absence of qdisc, no serialization lock is acquired by the caller. The user space can remove the default tun qdisc with: tc qdisc replace dev root noqueue Signed-off-by: Paolo Abeni Acked-by: Hannes Frederic Sowa Acked-by: Eric Dumazet Acked-by: Michael S. Tsirkin Signed-off-by: David S. Miller --- drivers/net/tun.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/tun.c b/drivers/net/tun.c index faf9297db2cf..42992dcbdda8 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -1796,7 +1796,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX; - dev->features = dev->hw_features; + dev->features = dev->hw_features | NETIF_F_LLTX; dev->vlan_features = dev->features & ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX); From b4ef159927150bf1d63f36330bbb5239516ceb69 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Tue, 12 Apr 2016 18:14:23 +0200 Subject: [PATCH 0811/1649] netfilter: connlabels: move helpers to xt_connlabel Currently labels can only be set either by iptables connlabel match or via ctnetlink. Before adding nftables set support, clean up the clabel core and move helpers that nft will not need after all to the xtables module. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- include/net/netfilter/nf_conntrack_labels.h | 1 - net/netfilter/nf_conntrack_labels.c | 19 +------------------ net/netfilter/xt_connlabel.c | 12 +++++++++++- 3 files changed, 12 insertions(+), 20 deletions(-) diff --git a/include/net/netfilter/nf_conntrack_labels.h b/include/net/netfilter/nf_conntrack_labels.h index 7e2b1d025f50..51678180e56c 100644 --- a/include/net/netfilter/nf_conntrack_labels.h +++ b/include/net/netfilter/nf_conntrack_labels.h @@ -45,7 +45,6 @@ static inline struct nf_conn_labels *nf_ct_labels_ext_add(struct nf_conn *ct) #endif } -bool nf_connlabel_match(const struct nf_conn *ct, u16 bit); int nf_connlabel_set(struct nf_conn *ct, u16 bit); int nf_connlabels_replace(struct nf_conn *ct, diff --git a/net/netfilter/nf_conntrack_labels.c b/net/netfilter/nf_conntrack_labels.c index 3ce5c314ea4b..3a30900891c1 100644 --- a/net/netfilter/nf_conntrack_labels.c +++ b/net/netfilter/nf_conntrack_labels.c @@ -16,28 +16,11 @@ static spinlock_t nf_connlabels_lock; -static unsigned int label_bits(const struct nf_conn_labels *l) -{ - unsigned int longs = l->words; - return longs * BITS_PER_LONG; -} - -bool nf_connlabel_match(const struct nf_conn *ct, u16 bit) -{ - struct nf_conn_labels *labels = nf_ct_labels_find(ct); - - if (!labels) - return false; - - return bit < label_bits(labels) && test_bit(bit, labels->bits); -} -EXPORT_SYMBOL_GPL(nf_connlabel_match); - int nf_connlabel_set(struct nf_conn *ct, u16 bit) { struct nf_conn_labels *labels = nf_ct_labels_find(ct); - if (!labels || bit >= label_bits(labels)) + if (!labels || BIT_WORD(bit) >= labels->words) return -ENOSPC; if (test_bit(bit, labels->bits)) diff --git a/net/netfilter/xt_connlabel.c b/net/netfilter/xt_connlabel.c index bb9cbeb18868..d9b3e535d13a 100644 --- a/net/netfilter/xt_connlabel.c +++ b/net/netfilter/xt_connlabel.c @@ -18,6 +18,16 @@ MODULE_DESCRIPTION("Xtables: add/match connection trackling labels"); MODULE_ALIAS("ipt_connlabel"); MODULE_ALIAS("ip6t_connlabel"); +static bool connlabel_match(const struct nf_conn *ct, u16 bit) +{ + struct nf_conn_labels *labels = nf_ct_labels_find(ct); + + if (!labels) + return false; + + return BIT_WORD(bit) < labels->words && test_bit(bit, labels->bits); +} + static bool connlabel_mt(const struct sk_buff *skb, struct xt_action_param *par) { @@ -33,7 +43,7 @@ connlabel_mt(const struct sk_buff *skb, struct xt_action_param *par) if (info->options & XT_CONNLABEL_OP_SET) return (nf_connlabel_set(ct, info->bit) == 0) ^ invert; - return nf_connlabel_match(ct, info->bit) ^ invert; + return connlabel_match(ct, info->bit) ^ invert; } static int connlabel_mt_check(const struct xt_mtchk_param *par) From 5a8145f7b22269adaf9e98b160a20486d1ad5669 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Tue, 12 Apr 2016 18:14:24 +0200 Subject: [PATCH 0812/1649] netfilter: labels: don't emit ct event if labels were not changed make the replace function only send a ctnetlink event if the contents of the new set is different. Otherwise 'ct label set ct label | bar' will cause netlink event storm since we "replace" labels for each packet. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_conntrack_labels.c | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/net/netfilter/nf_conntrack_labels.c b/net/netfilter/nf_conntrack_labels.c index 3a30900891c1..bd7f26b97ac6 100644 --- a/net/netfilter/nf_conntrack_labels.c +++ b/net/netfilter/nf_conntrack_labels.c @@ -33,14 +33,18 @@ int nf_connlabel_set(struct nf_conn *ct, u16 bit) } EXPORT_SYMBOL_GPL(nf_connlabel_set); -static void replace_u32(u32 *address, u32 mask, u32 new) +static int replace_u32(u32 *address, u32 mask, u32 new) { u32 old, tmp; do { old = *address; tmp = (old & mask) ^ new; + if (old == tmp) + return 0; } while (cmpxchg(address, old, tmp) != old); + + return 1; } int nf_connlabels_replace(struct nf_conn *ct, @@ -49,6 +53,7 @@ int nf_connlabels_replace(struct nf_conn *ct, { struct nf_conn_labels *labels; unsigned int size, i; + int changed = 0; u32 *dst; labels = nf_ct_labels_find(ct); @@ -60,16 +65,15 @@ int nf_connlabels_replace(struct nf_conn *ct, words32 = size / sizeof(u32); dst = (u32 *) labels->bits; - if (words32) { - for (i = 0; i < words32; i++) - replace_u32(&dst[i], mask ? ~mask[i] : 0, data[i]); - } + for (i = 0; i < words32; i++) + changed |= replace_u32(&dst[i], mask ? ~mask[i] : 0, data[i]); size /= sizeof(u32); for (i = words32; i < size; i++) /* pad */ replace_u32(&dst[i], 0, 0); - nf_conntrack_event_cache(IPCT_LABEL, ct); + if (changed) + nf_conntrack_event_cache(IPCT_LABEL, ct); return 0; } EXPORT_SYMBOL_GPL(nf_connlabels_replace); From adff6c65600000ec2bb71840c943ee12668080f5 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Tue, 12 Apr 2016 18:14:25 +0200 Subject: [PATCH 0813/1649] netfilter: connlabels: change nf_connlabels_get bit arg to 'highest used' nf_connlabel_set() takes the bit number that we would like to set. nf_connlabels_get() however took the number of bits that we want to support. So e.g. nf_connlabels_get(32) support bits 0 to 31, but not 32. This changes nf_connlabels_get() to take the highest bit that we want to set. Callers then don't have to cope with a potential integer wrap when using nf_connlabels_get(bit + 1) anymore. Current callers are fine, this change is only to make folloup nft ct label set support simpler. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- include/net/netfilter/nf_conntrack_labels.h | 4 ++-- net/netfilter/nf_conntrack_labels.c | 9 +++++---- net/netfilter/nft_ct.c | 2 ++ net/netfilter/xt_connlabel.c | 2 +- net/openvswitch/conntrack.c | 2 +- 5 files changed, 11 insertions(+), 8 deletions(-) diff --git a/include/net/netfilter/nf_conntrack_labels.h b/include/net/netfilter/nf_conntrack_labels.h index 51678180e56c..c5f8fc736b3d 100644 --- a/include/net/netfilter/nf_conntrack_labels.h +++ b/include/net/netfilter/nf_conntrack_labels.h @@ -53,11 +53,11 @@ int nf_connlabels_replace(struct nf_conn *ct, #ifdef CONFIG_NF_CONNTRACK_LABELS int nf_conntrack_labels_init(void); void nf_conntrack_labels_fini(void); -int nf_connlabels_get(struct net *net, unsigned int n_bits); +int nf_connlabels_get(struct net *net, unsigned int bit); void nf_connlabels_put(struct net *net); #else static inline int nf_conntrack_labels_init(void) { return 0; } static inline void nf_conntrack_labels_fini(void) {} -static inline int nf_connlabels_get(struct net *net, unsigned int n_bits) { return 0; } +static inline int nf_connlabels_get(struct net *net, unsigned int bit) { return 0; } static inline void nf_connlabels_put(struct net *net) {} #endif diff --git a/net/netfilter/nf_conntrack_labels.c b/net/netfilter/nf_conntrack_labels.c index bd7f26b97ac6..252e6a7cd2f1 100644 --- a/net/netfilter/nf_conntrack_labels.c +++ b/net/netfilter/nf_conntrack_labels.c @@ -78,15 +78,14 @@ int nf_connlabels_replace(struct nf_conn *ct, } EXPORT_SYMBOL_GPL(nf_connlabels_replace); -int nf_connlabels_get(struct net *net, unsigned int n_bits) +int nf_connlabels_get(struct net *net, unsigned int bits) { size_t words; - if (n_bits > (NF_CT_LABELS_MAX_SIZE * BITS_PER_BYTE)) + words = BIT_WORD(bits) + 1; + if (words > NF_CT_LABELS_MAX_SIZE / sizeof(long)) return -ERANGE; - words = BITS_TO_LONGS(n_bits); - spin_lock(&nf_connlabels_lock); net->ct.labels_used++; if (words > net->ct.label_words) @@ -115,6 +114,8 @@ static struct nf_ct_ext_type labels_extend __read_mostly = { int nf_conntrack_labels_init(void) { + BUILD_BUG_ON(NF_CT_LABELS_MAX_SIZE / sizeof(long) >= U8_MAX); + spin_lock_init(&nf_connlabels_lock); return nf_ct_extend_register(&labels_extend); } diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c index d4a4619fcebc..25998facefd0 100644 --- a/net/netfilter/nft_ct.c +++ b/net/netfilter/nft_ct.c @@ -484,6 +484,8 @@ static struct nft_expr_type nft_ct_type __read_mostly = { static int __init nft_ct_module_init(void) { + BUILD_BUG_ON(NF_CT_LABELS_MAX_SIZE > NFT_REG_SIZE); + return nft_register_expr(&nft_ct_type); } diff --git a/net/netfilter/xt_connlabel.c b/net/netfilter/xt_connlabel.c index d9b3e535d13a..a79af255561a 100644 --- a/net/netfilter/xt_connlabel.c +++ b/net/netfilter/xt_connlabel.c @@ -65,7 +65,7 @@ static int connlabel_mt_check(const struct xt_mtchk_param *par) return ret; } - ret = nf_connlabels_get(par->net, info->bit + 1); + ret = nf_connlabels_get(par->net, info->bit); if (ret < 0) nf_ct_l3proto_module_put(par->family); return ret; diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c index 1b9d286756be..e5fe24a61f9f 100644 --- a/net/openvswitch/conntrack.c +++ b/net/openvswitch/conntrack.c @@ -1344,7 +1344,7 @@ void ovs_ct_init(struct net *net) unsigned int n_bits = sizeof(struct ovs_key_ct_labels) * BITS_PER_BYTE; struct ovs_net *ovs_net = net_generic(net, ovs_net_id); - if (nf_connlabels_get(net, n_bits)) { + if (nf_connlabels_get(net, n_bits - 1)) { ovs_net->xt_label = false; OVS_NLERR(true, "Failed to set connlabel length"); } else { From 6d62b4d5fac620ee0ca65dc6d99b0306d96bc541 Mon Sep 17 00:00:00 2001 From: Philippe Reynes Date: Fri, 15 Apr 2016 00:34:59 +0200 Subject: [PATCH 0814/1649] net: ethtool: export conversion function between u32 and link mode The function convert_legacy_u32_to_link_mode and convert_link_mode_to_legacy_u32 may be used outside of ethtool.c. We rename them to ethtool_convert_... and export them, so we could use them in others drivers and modules. Signed-off-by: Philippe Reynes Signed-off-by: David S. Miller --- include/linux/ethtool.h | 7 +++++++ net/core/ethtool.c | 21 ++++++++++++--------- 2 files changed, 19 insertions(+), 9 deletions(-) diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index e2b7bf27c03e..9ded8c6d8176 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h @@ -150,6 +150,13 @@ extern int __ethtool_get_link_ksettings(struct net_device *dev, struct ethtool_link_ksettings *link_ksettings); +void ethtool_convert_legacy_u32_to_link_mode(unsigned long *dst, + u32 legacy_u32); + +/* return false if src had higher bits set. lower bits always updated. */ +bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32, + const unsigned long *src); + /** * struct ethtool_ops - optional netdev operations * @get_settings: DEPRECATED, use %get_link_ksettings/%set_link_ksettings diff --git a/net/core/ethtool.c b/net/core/ethtool.c index e0cf20a3b3dd..bdb4013581b1 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c @@ -391,15 +391,17 @@ static int __ethtool_set_flags(struct net_device *dev, u32 data) return 0; } -static void convert_legacy_u32_to_link_mode(unsigned long *dst, u32 legacy_u32) +void ethtool_convert_legacy_u32_to_link_mode(unsigned long *dst, + u32 legacy_u32) { bitmap_zero(dst, __ETHTOOL_LINK_MODE_MASK_NBITS); dst[0] = legacy_u32; } +EXPORT_SYMBOL(ethtool_convert_legacy_u32_to_link_mode); /* return false if src had higher bits set. lower bits always updated. */ -static bool convert_link_mode_to_legacy_u32(u32 *legacy_u32, - const unsigned long *src) +bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32, + const unsigned long *src) { bool retval = true; @@ -419,6 +421,7 @@ static bool convert_link_mode_to_legacy_u32(u32 *legacy_u32, *legacy_u32 = src[0]; return retval; } +EXPORT_SYMBOL(ethtool_convert_link_mode_to_legacy_u32); /* return false if legacy contained non-0 deprecated fields * transceiver/maxtxpkt/maxrxpkt. rest of ksettings always updated @@ -441,13 +444,13 @@ convert_legacy_settings_to_link_ksettings( legacy_settings->maxrxpkt) retval = false; - convert_legacy_u32_to_link_mode( + ethtool_convert_legacy_u32_to_link_mode( link_ksettings->link_modes.supported, legacy_settings->supported); - convert_legacy_u32_to_link_mode( + ethtool_convert_legacy_u32_to_link_mode( link_ksettings->link_modes.advertising, legacy_settings->advertising); - convert_legacy_u32_to_link_mode( + ethtool_convert_legacy_u32_to_link_mode( link_ksettings->link_modes.lp_advertising, legacy_settings->lp_advertising); link_ksettings->base.speed @@ -486,13 +489,13 @@ convert_link_ksettings_to_legacy_settings( * __u32 maxrxpkt; */ - retval &= convert_link_mode_to_legacy_u32( + retval &= ethtool_convert_link_mode_to_legacy_u32( &legacy_settings->supported, link_ksettings->link_modes.supported); - retval &= convert_link_mode_to_legacy_u32( + retval &= ethtool_convert_link_mode_to_legacy_u32( &legacy_settings->advertising, link_ksettings->link_modes.advertising); - retval &= convert_link_mode_to_legacy_u32( + retval &= ethtool_convert_link_mode_to_legacy_u32( &legacy_settings->lp_advertising, link_ksettings->link_modes.lp_advertising); ethtool_cmd_speed_set(legacy_settings, link_ksettings->base.speed); From 2d55173e71b06c5a369489852d972304e14189fd Mon Sep 17 00:00:00 2001 From: Philippe Reynes Date: Fri, 15 Apr 2016 00:35:00 +0200 Subject: [PATCH 0815/1649] phy: add generic function to support ksetting support The old ethtool api (get_setting and set_setting) has generic phy functions phy_ethtool_sset and phy_ethtool_gset. To supprt the new ethtool api (get_link_ksettings and set_link_ksettings), we add generic phy function phy_ethtool_ksettings_get and phy_ethtool_ksettings_set. Signed-off-by: Philippe Reynes Signed-off-by: David S. Miller --- drivers/net/phy/phy.c | 81 +++++++++++++++++++++++++++++++++++++++++++ include/linux/phy.h | 4 +++ 2 files changed, 85 insertions(+) diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 5590b9c182c9..6f221c8c2a7f 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -362,6 +362,60 @@ int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd) } EXPORT_SYMBOL(phy_ethtool_sset); +int phy_ethtool_ksettings_set(struct phy_device *phydev, + const struct ethtool_link_ksettings *cmd) +{ + u8 autoneg = cmd->base.autoneg; + u8 duplex = cmd->base.duplex; + u32 speed = cmd->base.speed; + u32 advertising; + + if (cmd->base.phy_address != phydev->mdio.addr) + return -EINVAL; + + ethtool_convert_link_mode_to_legacy_u32(&advertising, + cmd->link_modes.advertising); + + /* We make sure that we don't pass unsupported values in to the PHY */ + advertising &= phydev->supported; + + /* Verify the settings we care about. */ + if (autoneg != AUTONEG_ENABLE && autoneg != AUTONEG_DISABLE) + return -EINVAL; + + if (autoneg == AUTONEG_ENABLE && advertising == 0) + return -EINVAL; + + if (autoneg == AUTONEG_DISABLE && + ((speed != SPEED_1000 && + speed != SPEED_100 && + speed != SPEED_10) || + (duplex != DUPLEX_HALF && + duplex != DUPLEX_FULL))) + return -EINVAL; + + phydev->autoneg = autoneg; + + phydev->speed = speed; + + phydev->advertising = advertising; + + if (autoneg == AUTONEG_ENABLE) + phydev->advertising |= ADVERTISED_Autoneg; + else + phydev->advertising &= ~ADVERTISED_Autoneg; + + phydev->duplex = duplex; + + phydev->mdix = cmd->base.eth_tp_mdix_ctrl; + + /* Restart the PHY */ + phy_start_aneg(phydev); + + return 0; +} +EXPORT_SYMBOL(phy_ethtool_ksettings_set); + int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd) { cmd->supported = phydev->supported; @@ -385,6 +439,33 @@ int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd) } EXPORT_SYMBOL(phy_ethtool_gset); +int phy_ethtool_ksettings_get(struct phy_device *phydev, + struct ethtool_link_ksettings *cmd) +{ + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + phydev->supported); + + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, + phydev->advertising); + + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising, + phydev->lp_advertising); + + cmd->base.speed = phydev->speed; + cmd->base.duplex = phydev->duplex; + if (phydev->interface == PHY_INTERFACE_MODE_MOCA) + cmd->base.port = PORT_BNC; + else + cmd->base.port = PORT_MII; + + cmd->base.phy_address = phydev->mdio.addr; + cmd->base.autoneg = phydev->autoneg; + cmd->base.eth_tp_mdix_ctrl = phydev->mdix; + + return 0; +} +EXPORT_SYMBOL(phy_ethtool_ksettings_get); + /** * phy_mii_ioctl - generic PHY MII ioctl interface * @phydev: the phy_device struct diff --git a/include/linux/phy.h b/include/linux/phy.h index 2abd7918f64f..be3f83bbdc0b 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -805,6 +805,10 @@ void phy_start_machine(struct phy_device *phydev); void phy_stop_machine(struct phy_device *phydev); int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd); int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd); +int phy_ethtool_ksettings_get(struct phy_device *phydev, + struct ethtool_link_ksettings *cmd); +int phy_ethtool_ksettings_set(struct phy_device *phydev, + const struct ethtool_link_ksettings *cmd); int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd); int phy_start_interrupts(struct phy_device *phydev); void phy_print_status(struct phy_device *phydev); From 54846f5838d871ad69ebe6eb1999cae3867dabc7 Mon Sep 17 00:00:00 2001 From: Philippe Reynes Date: Fri, 15 Apr 2016 00:35:01 +0200 Subject: [PATCH 0816/1649] fec: move to new ethtool api {get|set}_link_ksettings The ethtool api {get|set}_settings is deprecated. We move the fec driver to new api {get|set}_link_ksettings. Signed-off-by: Philippe Reynes Acked-by: Fugang Duan Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/fec_main.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 08243c2ff4b4..bfa10c3da35f 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -2058,8 +2058,8 @@ static void fec_enet_mii_remove(struct fec_enet_private *fep) } } -static int fec_enet_get_settings(struct net_device *ndev, - struct ethtool_cmd *cmd) +static int fec_enet_get_link_ksettings(struct net_device *ndev, + struct ethtool_link_ksettings *cmd) { struct fec_enet_private *fep = netdev_priv(ndev); struct phy_device *phydev = fep->phy_dev; @@ -2067,11 +2067,11 @@ static int fec_enet_get_settings(struct net_device *ndev, if (!phydev) return -ENODEV; - return phy_ethtool_gset(phydev, cmd); + return phy_ethtool_ksettings_get(phydev, cmd); } -static int fec_enet_set_settings(struct net_device *ndev, - struct ethtool_cmd *cmd) +static int fec_enet_set_link_ksettings(struct net_device *ndev, + const struct ethtool_link_ksettings *cmd) { struct fec_enet_private *fep = netdev_priv(ndev); struct phy_device *phydev = fep->phy_dev; @@ -2079,7 +2079,7 @@ static int fec_enet_set_settings(struct net_device *ndev, if (!phydev) return -ENODEV; - return phy_ethtool_sset(phydev, cmd); + return phy_ethtool_ksettings_set(phydev, cmd); } static void fec_enet_get_drvinfo(struct net_device *ndev, @@ -2562,8 +2562,6 @@ fec_enet_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) } static const struct ethtool_ops fec_enet_ethtool_ops = { - .get_settings = fec_enet_get_settings, - .set_settings = fec_enet_set_settings, .get_drvinfo = fec_enet_get_drvinfo, .get_regs_len = fec_enet_get_regs_len, .get_regs = fec_enet_get_regs, @@ -2583,6 +2581,8 @@ static const struct ethtool_ops fec_enet_ethtool_ops = { .set_tunable = fec_enet_set_tunable, .get_wol = fec_enet_get_wol, .set_wol = fec_enet_set_wol, + .get_link_ksettings = fec_enet_get_link_ksettings, + .set_link_ksettings = fec_enet_set_link_ksettings, }; static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) From 84bf9cefb162b197da20a0f4388929f4b5ba5db4 Mon Sep 17 00:00:00 2001 From: KY Srinivasan Date: Thu, 14 Apr 2016 16:31:54 -0700 Subject: [PATCH 0817/1649] hv_netvsc: Implement support for VF drivers on Hyper-V Support VF drivers on Hyper-V. On Hyper-V, each VF instance presented to the guest has an associated synthetic interface that shares the MAC address with the VF instance. Typically these are bonded together to support live migration. By default, the host delivers all the incoming packets on the synthetic interface. Once the VF is up, we need to explicitly switch the data path on the host to divert traffic onto the VF interface. Even after switching the data path, broadcast and multicast packets are always delivered on the synthetic interface and these will have to be injected back onto the VF interface (if VF is up). This patch implements the necessary support in netvsc to support Linux VF drivers. Signed-off-by: K. Y. Srinivasan Reviewed-by: Haiyang Zhang Signed-off-by: David S. Miller --- drivers/net/hyperv/hyperv_net.h | 14 ++ drivers/net/hyperv/netvsc.c | 29 +++ drivers/net/hyperv/netvsc_drv.c | 312 +++++++++++++++++++++++++++--- drivers/net/hyperv/rndis_filter.c | 6 + 4 files changed, 335 insertions(+), 26 deletions(-) diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index 8b3bd8ecd1c4..6700a4dca7c8 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -202,6 +202,8 @@ int rndis_filter_receive(struct hv_device *dev, int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter); int rndis_filter_set_device_mac(struct hv_device *hdev, char *mac); +void netvsc_switch_datapath(struct netvsc_device *nv_dev, bool vf); + #define NVSP_INVALID_PROTOCOL_VERSION ((u32)0xFFFFFFFF) #define NVSP_PROTOCOL_VERSION_1 2 @@ -641,6 +643,12 @@ struct netvsc_reconfig { u32 event; }; +struct garp_wrk { + struct work_struct dwrk; + struct net_device *netdev; + struct netvsc_device *netvsc_dev; +}; + /* The context of the netvsc device */ struct net_device_context { /* point back to our device context */ @@ -656,6 +664,7 @@ struct net_device_context { struct work_struct work; u32 msg_enable; /* debug level */ + struct garp_wrk gwrk; struct netvsc_stats __percpu *tx_stats; struct netvsc_stats __percpu *rx_stats; @@ -730,6 +739,11 @@ struct netvsc_device { u32 vf_alloc; /* Serial number of the VF to team with */ u32 vf_serial; + atomic_t open_cnt; + /* State to manage the associated VF interface. */ + bool vf_inject; + struct net_device *vf_netdev; + atomic_t vf_use_cnt; }; /* NdisInitialize message */ diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index ec313fc08d82..eddce3cdafa8 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -33,6 +33,30 @@ #include "hyperv_net.h" +/* + * Switch the data path from the synthetic interface to the VF + * interface. + */ +void netvsc_switch_datapath(struct netvsc_device *nv_dev, bool vf) +{ + struct nvsp_message *init_pkt = &nv_dev->channel_init_pkt; + struct hv_device *dev = nv_dev->dev; + + memset(init_pkt, 0, sizeof(struct nvsp_message)); + init_pkt->hdr.msg_type = NVSP_MSG4_TYPE_SWITCH_DATA_PATH; + if (vf) + init_pkt->msg.v4_msg.active_dp.active_datapath = + NVSP_DATAPATH_VF; + else + init_pkt->msg.v4_msg.active_dp.active_datapath = + NVSP_DATAPATH_SYNTHETIC; + + vmbus_sendpacket(dev->channel, init_pkt, + sizeof(struct nvsp_message), + (unsigned long)init_pkt, + VM_PKT_DATA_INBAND, 0); +} + static struct netvsc_device *alloc_net_device(struct hv_device *device) { @@ -52,11 +76,16 @@ static struct netvsc_device *alloc_net_device(struct hv_device *device) init_waitqueue_head(&net_device->wait_drain); net_device->start_remove = false; net_device->destroy = false; + atomic_set(&net_device->open_cnt, 0); + atomic_set(&net_device->vf_use_cnt, 0); net_device->dev = device; net_device->ndev = ndev; net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT; net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT; + net_device->vf_netdev = NULL; + net_device->vf_inject = false; + hv_set_drvdata(device, net_device); return net_device; } diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index b8121eba33ff..bfdb568ac6b8 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -610,42 +610,24 @@ void netvsc_linkstatus_callback(struct hv_device *device_obj, schedule_delayed_work(&ndev_ctx->dwork, 0); } -/* - * netvsc_recv_callback - Callback when we receive a packet from the - * "wire" on the specified device. - */ -int netvsc_recv_callback(struct hv_device *device_obj, + +static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net, struct hv_netvsc_packet *packet, - void **data, struct ndis_tcp_ip_checksum_info *csum_info, - struct vmbus_channel *channel, - u16 vlan_tci) + void *data, u16 vlan_tci) { - struct net_device *net; - struct net_device_context *net_device_ctx; struct sk_buff *skb; - struct netvsc_stats *rx_stats; - net = ((struct netvsc_device *)hv_get_drvdata(device_obj))->ndev; - if (!net || net->reg_state != NETREG_REGISTERED) { - return NVSP_STAT_FAIL; - } - net_device_ctx = netdev_priv(net); - rx_stats = this_cpu_ptr(net_device_ctx->rx_stats); - - /* Allocate a skb - TODO direct I/O to pages? */ skb = netdev_alloc_skb_ip_align(net, packet->total_data_buflen); - if (unlikely(!skb)) { - ++net->stats.rx_dropped; - return NVSP_STAT_FAIL; - } + if (!skb) + return skb; /* * Copy to skb. This copy is needed here since the memory pointed by * hv_netvsc_packet cannot be deallocated */ - memcpy(skb_put(skb, packet->total_data_buflen), *data, - packet->total_data_buflen); + memcpy(skb_put(skb, packet->total_data_buflen), data, + packet->total_data_buflen); skb->protocol = eth_type_trans(skb, net); if (csum_info) { @@ -663,6 +645,75 @@ int netvsc_recv_callback(struct hv_device *device_obj, __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci); + return skb; +} + +/* + * netvsc_recv_callback - Callback when we receive a packet from the + * "wire" on the specified device. + */ +int netvsc_recv_callback(struct hv_device *device_obj, + struct hv_netvsc_packet *packet, + void **data, + struct ndis_tcp_ip_checksum_info *csum_info, + struct vmbus_channel *channel, + u16 vlan_tci) +{ + struct net_device *net; + struct net_device_context *net_device_ctx; + struct sk_buff *skb; + struct sk_buff *vf_skb; + struct netvsc_stats *rx_stats; + struct netvsc_device *netvsc_dev = hv_get_drvdata(device_obj); + u32 bytes_recvd = packet->total_data_buflen; + int ret = 0; + + net = netvsc_dev->ndev; + if (!net || net->reg_state != NETREG_REGISTERED) + return NVSP_STAT_FAIL; + + if (READ_ONCE(netvsc_dev->vf_inject)) { + atomic_inc(&netvsc_dev->vf_use_cnt); + if (!READ_ONCE(netvsc_dev->vf_inject)) { + /* + * We raced; just move on. + */ + atomic_dec(&netvsc_dev->vf_use_cnt); + goto vf_injection_done; + } + + /* + * Inject this packet into the VF inerface. + * On Hyper-V, multicast and brodcast packets + * are only delivered on the synthetic interface + * (after subjecting these to policy filters on + * the host). Deliver these via the VF interface + * in the guest. + */ + vf_skb = netvsc_alloc_recv_skb(netvsc_dev->vf_netdev, packet, + csum_info, *data, vlan_tci); + if (vf_skb != NULL) { + ++netvsc_dev->vf_netdev->stats.rx_packets; + netvsc_dev->vf_netdev->stats.rx_bytes += bytes_recvd; + netif_receive_skb(vf_skb); + } else { + ++net->stats.rx_dropped; + ret = NVSP_STAT_FAIL; + } + atomic_dec(&netvsc_dev->vf_use_cnt); + return ret; + } + +vf_injection_done: + net_device_ctx = netdev_priv(net); + rx_stats = this_cpu_ptr(net_device_ctx->rx_stats); + + /* Allocate a skb - TODO direct I/O to pages? */ + skb = netvsc_alloc_recv_skb(net, packet, csum_info, *data, vlan_tci); + if (unlikely(!skb)) { + ++net->stats.rx_dropped; + return NVSP_STAT_FAIL; + } skb_record_rx_queue(skb, channel-> offermsg.offer.sub_channel_index); @@ -1102,6 +1153,175 @@ static void netvsc_free_netdev(struct net_device *netdev) free_netdev(netdev); } +static void netvsc_notify_peers(struct work_struct *wrk) +{ + struct garp_wrk *gwrk; + + gwrk = container_of(wrk, struct garp_wrk, dwrk); + + netdev_notify_peers(gwrk->netdev); + + atomic_dec(&gwrk->netvsc_dev->vf_use_cnt); +} + +static struct netvsc_device *get_netvsc_device(char *mac) +{ + struct net_device *dev; + struct net_device_context *netvsc_ctx = NULL; + int rtnl_locked; + + rtnl_locked = rtnl_trylock(); + + for_each_netdev(&init_net, dev) { + if (memcmp(dev->dev_addr, mac, ETH_ALEN) == 0) { + if (dev->netdev_ops != &device_ops) + continue; + netvsc_ctx = netdev_priv(dev); + break; + } + } + if (rtnl_locked) + rtnl_unlock(); + + if (netvsc_ctx == NULL) + return NULL; + + return hv_get_drvdata(netvsc_ctx->device_ctx); +} + +static int netvsc_register_vf(struct net_device *vf_netdev) +{ + struct netvsc_device *netvsc_dev; + const struct ethtool_ops *eth_ops = vf_netdev->ethtool_ops; + + if (eth_ops == NULL || eth_ops == ðtool_ops) + return NOTIFY_DONE; + + /* + * We will use the MAC address to locate the synthetic interface to + * associate with the VF interface. If we don't find a matching + * synthetic interface, move on. + */ + netvsc_dev = get_netvsc_device(vf_netdev->dev_addr); + if (netvsc_dev == NULL) + return NOTIFY_DONE; + + netdev_info(netvsc_dev->ndev, "VF registering: %s\n", vf_netdev->name); + /* + * Take a reference on the module. + */ + try_module_get(THIS_MODULE); + netvsc_dev->vf_netdev = vf_netdev; + return NOTIFY_OK; +} + + +static int netvsc_vf_up(struct net_device *vf_netdev) +{ + struct netvsc_device *netvsc_dev; + const struct ethtool_ops *eth_ops = vf_netdev->ethtool_ops; + struct net_device_context *net_device_ctx; + + if (eth_ops == ðtool_ops) + return NOTIFY_DONE; + + netvsc_dev = get_netvsc_device(vf_netdev->dev_addr); + + if ((netvsc_dev == NULL) || (netvsc_dev->vf_netdev == NULL)) + return NOTIFY_DONE; + + netdev_info(netvsc_dev->ndev, "VF up: %s\n", vf_netdev->name); + net_device_ctx = netdev_priv(netvsc_dev->ndev); + netvsc_dev->vf_inject = true; + + /* + * Open the device before switching data path. + */ + rndis_filter_open(net_device_ctx->device_ctx); + + /* + * notify the host to switch the data path. + */ + netvsc_switch_datapath(netvsc_dev, true); + netdev_info(netvsc_dev->ndev, "Data path switched to VF: %s\n", + vf_netdev->name); + + netif_carrier_off(netvsc_dev->ndev); + + /* + * Now notify peers. We are scheduling work to + * notify peers; take a reference to prevent + * the VF interface from vanishing. + */ + atomic_inc(&netvsc_dev->vf_use_cnt); + net_device_ctx->gwrk.netdev = vf_netdev; + net_device_ctx->gwrk.netvsc_dev = netvsc_dev; + schedule_work(&net_device_ctx->gwrk.dwrk); + + return NOTIFY_OK; +} + + +static int netvsc_vf_down(struct net_device *vf_netdev) +{ + struct netvsc_device *netvsc_dev; + struct net_device_context *net_device_ctx; + const struct ethtool_ops *eth_ops = vf_netdev->ethtool_ops; + + if (eth_ops == ðtool_ops) + return NOTIFY_DONE; + + netvsc_dev = get_netvsc_device(vf_netdev->dev_addr); + + if ((netvsc_dev == NULL) || (netvsc_dev->vf_netdev == NULL)) + return NOTIFY_DONE; + + netdev_info(netvsc_dev->ndev, "VF down: %s\n", vf_netdev->name); + net_device_ctx = netdev_priv(netvsc_dev->ndev); + netvsc_dev->vf_inject = false; + /* + * Wait for currently active users to + * drain out. + */ + + while (atomic_read(&netvsc_dev->vf_use_cnt) != 0) + udelay(50); + netvsc_switch_datapath(netvsc_dev, false); + netdev_info(netvsc_dev->ndev, "Data path switched from VF: %s\n", + vf_netdev->name); + rndis_filter_close(net_device_ctx->device_ctx); + netif_carrier_on(netvsc_dev->ndev); + /* + * Notify peers. + */ + atomic_inc(&netvsc_dev->vf_use_cnt); + net_device_ctx->gwrk.netdev = netvsc_dev->ndev; + net_device_ctx->gwrk.netvsc_dev = netvsc_dev; + schedule_work(&net_device_ctx->gwrk.dwrk); + + return NOTIFY_OK; +} + + +static int netvsc_unregister_vf(struct net_device *vf_netdev) +{ + struct netvsc_device *netvsc_dev; + const struct ethtool_ops *eth_ops = vf_netdev->ethtool_ops; + + if (eth_ops == ðtool_ops) + return NOTIFY_DONE; + + netvsc_dev = get_netvsc_device(vf_netdev->dev_addr); + if (netvsc_dev == NULL) + return NOTIFY_DONE; + netdev_info(netvsc_dev->ndev, "VF unregistering: %s\n", + vf_netdev->name); + + netvsc_dev->vf_netdev = NULL; + module_put(THIS_MODULE); + return NOTIFY_OK; +} + static int netvsc_probe(struct hv_device *dev, const struct hv_vmbus_device_id *dev_id) { @@ -1140,6 +1360,7 @@ static int netvsc_probe(struct hv_device *dev, hv_set_drvdata(dev, net); INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change); INIT_WORK(&net_device_ctx->work, do_set_multicast); + INIT_WORK(&net_device_ctx->gwrk.dwrk, netvsc_notify_peers); spin_lock_init(&net_device_ctx->lock); INIT_LIST_HEAD(&net_device_ctx->reconfig_events); @@ -1235,19 +1456,58 @@ static struct hv_driver netvsc_drv = { .remove = netvsc_remove, }; + +/* + * On Hyper-V, every VF interface is matched with a corresponding + * synthetic interface. The synthetic interface is presented first + * to the guest. When the corresponding VF instance is registered, + * we will take care of switching the data path. + */ +static int netvsc_netdev_event(struct notifier_block *this, + unsigned long event, void *ptr) +{ + struct net_device *event_dev = netdev_notifier_info_to_dev(ptr); + + switch (event) { + case NETDEV_REGISTER: + return netvsc_register_vf(event_dev); + case NETDEV_UNREGISTER: + return netvsc_unregister_vf(event_dev); + case NETDEV_UP: + return netvsc_vf_up(event_dev); + case NETDEV_DOWN: + return netvsc_vf_down(event_dev); + default: + return NOTIFY_DONE; + } +} + +static struct notifier_block netvsc_netdev_notifier = { + .notifier_call = netvsc_netdev_event, +}; + static void __exit netvsc_drv_exit(void) { + unregister_netdevice_notifier(&netvsc_netdev_notifier); vmbus_driver_unregister(&netvsc_drv); } static int __init netvsc_drv_init(void) { + int ret; + if (ring_size < RING_SIZE_MIN) { ring_size = RING_SIZE_MIN; pr_info("Increased ring_size to %d (min allowed)\n", ring_size); } - return vmbus_driver_register(&netvsc_drv); + ret = vmbus_driver_register(&netvsc_drv); + + if (ret) + return ret; + + register_netdevice_notifier(&netvsc_netdev_notifier); + return 0; } MODULE_LICENSE("GPL"); diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index c4e1e0408433..a59cdebc9b4b 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -1229,6 +1229,9 @@ int rndis_filter_open(struct hv_device *dev) if (!net_device) return -EINVAL; + if (atomic_inc_return(&net_device->open_cnt) != 1) + return 0; + return rndis_filter_open_device(net_device->extension); } @@ -1239,5 +1242,8 @@ int rndis_filter_close(struct hv_device *dev) if (!nvdev) return -EINVAL; + if (atomic_dec_return(&nvdev->open_cnt) != 0) + return 0; + return rndis_filter_close_device(nvdev->extension); } From e7600449bef0650ee7818be6de26955e81579d13 Mon Sep 17 00:00:00 2001 From: Govindarajulu Varadarajan <_govind@gmx.com> Date: Sat, 16 Apr 2016 00:40:43 +0530 Subject: [PATCH 0818/1649] enic: set netdev->vlan_features Driver sets vlan_feature to netdev->features as hardware supports all of them on vlan interface. Signed-off-by: Govindarajulu Varadarajan <_govind@gmx.com> Signed-off-by: David S. Miller --- drivers/net/ethernet/cisco/enic/enic_main.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index b2182d3ba3cc..f15560a06718 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -2740,6 +2740,7 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) netdev->hw_features |= NETIF_F_RXCSUM; netdev->features |= netdev->hw_features; + netdev->vlan_features |= netdev->features; #ifdef CONFIG_RFS_ACCEL netdev->hw_features |= NETIF_F_NTUPLE; From 4a96300cec88729415683db8a2b909563b09fbaa Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Fri, 15 Apr 2016 12:24:57 +0200 Subject: [PATCH 0819/1649] netfilter: ctnetlink: restore inlining for netlink message size calculation Calm down gcc warnings: net/netfilter/nf_conntrack_netlink.c:529:15: warning: 'ctnetlink_proto_size' defined but not used [-Wunused-function] static size_t ctnetlink_proto_size(const struct nf_conn *ct) ^ net/netfilter/nf_conntrack_netlink.c:546:15: warning: 'ctnetlink_acct_size' defined but not used [-Wunused-function] static size_t ctnetlink_acct_size(const struct nf_conn *ct) ^ net/netfilter/nf_conntrack_netlink.c:556:12: warning: 'ctnetlink_secctx_size' defined but not used [-Wunused-function] static int ctnetlink_secctx_size(const struct nf_conn *ct) ^ net/netfilter/nf_conntrack_netlink.c:572:15: warning: 'ctnetlink_timestamp_size' defined but not used [-Wunused-function] static size_t ctnetlink_timestamp_size(const struct nf_conn *ct) ^ So gcc compiles them out when CONFIG_NF_CONNTRACK_EVENTS and CONFIG_NETFILTER_NETLINK_GLUE_CT are not set. Fixes: 4054ff45454a9a4 ("netfilter: ctnetlink: remove unnecessary inlining") Reported-by: Stephen Rothwell Signed-off-by: Pablo Neira Ayuso Acked-by: Arnd Bergmann --- net/netfilter/nf_conntrack_netlink.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index caa4efe5930b..6cca30ec8535 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -336,7 +336,7 @@ nla_put_failure: #endif #ifdef CONFIG_NF_CONNTRACK_LABELS -static int ctnetlink_label_size(const struct nf_conn *ct) +static inline int ctnetlink_label_size(const struct nf_conn *ct) { struct nf_conn_labels *labels = nf_ct_labels_find(ct); @@ -526,7 +526,7 @@ nla_put_failure: return -1; } -static size_t ctnetlink_proto_size(const struct nf_conn *ct) +static inline size_t ctnetlink_proto_size(const struct nf_conn *ct) { struct nf_conntrack_l3proto *l3proto; struct nf_conntrack_l4proto *l4proto; @@ -543,7 +543,7 @@ static size_t ctnetlink_proto_size(const struct nf_conn *ct) return len; } -static size_t ctnetlink_acct_size(const struct nf_conn *ct) +static inline size_t ctnetlink_acct_size(const struct nf_conn *ct) { if (!nf_ct_ext_exist(ct, NF_CT_EXT_ACCT)) return 0; @@ -553,7 +553,7 @@ static size_t ctnetlink_acct_size(const struct nf_conn *ct) ; } -static int ctnetlink_secctx_size(const struct nf_conn *ct) +static inline int ctnetlink_secctx_size(const struct nf_conn *ct) { #ifdef CONFIG_NF_CONNTRACK_SECMARK int len, ret; @@ -569,7 +569,7 @@ static int ctnetlink_secctx_size(const struct nf_conn *ct) #endif } -static size_t ctnetlink_timestamp_size(const struct nf_conn *ct) +static inline size_t ctnetlink_timestamp_size(const struct nf_conn *ct) { #ifdef CONFIG_NF_CONNTRACK_TIMESTAMP if (!nf_ct_ext_exist(ct, NF_CT_EXT_TSTAMP)) From b520bd07595b117a08871ebc0a16452cc798d35b Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Sun, 17 Apr 2016 01:05:19 +0300 Subject: [PATCH 0820/1649] of_mdio: make of_mdiobus_register_{device|phy}() *void* The results of of_mdiobus_register_{device|phy}() are never checked, so we can make both these functions *void*... Signed-off-by: Sergei Shtylyov Signed-off-by: David S. Miller --- drivers/of/of_mdio.c | 21 ++++++++------------- 1 file changed, 8 insertions(+), 13 deletions(-) diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c index 8453f08d2ef4..2c1e52e06102 100644 --- a/drivers/of/of_mdio.c +++ b/drivers/of/of_mdio.c @@ -41,8 +41,8 @@ static int of_get_phy_id(struct device_node *device, u32 *phy_id) return -EINVAL; } -static int of_mdiobus_register_phy(struct mii_bus *mdio, struct device_node *child, - u32 addr) +static void of_mdiobus_register_phy(struct mii_bus *mdio, + struct device_node *child, u32 addr) { struct phy_device *phy; bool is_c45; @@ -57,7 +57,7 @@ static int of_mdiobus_register_phy(struct mii_bus *mdio, struct device_node *chi else phy = get_phy_device(mdio, addr, is_c45); if (IS_ERR_OR_NULL(phy)) - return 1; + return; rc = irq_of_parse_and_map(child, 0); if (rc > 0) { @@ -81,25 +81,22 @@ static int of_mdiobus_register_phy(struct mii_bus *mdio, struct device_node *chi if (rc) { phy_device_free(phy); of_node_put(child); - return 1; + return; } dev_dbg(&mdio->dev, "registered phy %s at address %i\n", child->name, addr); - - return 0; } -static int of_mdiobus_register_device(struct mii_bus *mdio, - struct device_node *child, - u32 addr) +static void of_mdiobus_register_device(struct mii_bus *mdio, + struct device_node *child, u32 addr) { struct mdio_device *mdiodev; int rc; mdiodev = mdio_device_create(mdio, addr); if (IS_ERR(mdiodev)) - return 1; + return; /* Associate the OF node with the device structure so it * can be looked up later. @@ -112,13 +109,11 @@ static int of_mdiobus_register_device(struct mii_bus *mdio, if (rc) { mdio_device_free(mdiodev); of_node_put(child); - return 1; + return; } dev_dbg(&mdio->dev, "registered mdio device %s at address %i\n", child->name, addr); - - return 0; } int of_mdio_parse_addr(struct device *dev, const struct device_node *np) From 266a0a790fb545fa1802a899ac44f61b1d6335a7 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Sat, 16 Apr 2016 22:29:33 +0200 Subject: [PATCH 0821/1649] bpf: avoid warning for wrong pointer cast Two new functions in bpf contain a cast from a 'u64' to a pointer. This works on 64-bit architectures but causes a warning on all 32-bit architectures: kernel/trace/bpf_trace.c: In function 'bpf_perf_event_output_tp': kernel/trace/bpf_trace.c:350:13: error: cast to pointer from integer of different size [-Werror=int-to-pointer-cast] u64 ctx = *(long *)r1; This changes the cast to first convert the u64 argument into a uintptr_t, which is guaranteed to be the same size as a pointer. Signed-off-by: Arnd Bergmann Fixes: 9940d67c93b5 ("bpf: support bpf_get_stackid() and bpf_perf_event_output() in tracepoint programs") Acked-by: Alexei Starovoitov Signed-off-by: David S. Miller --- kernel/trace/bpf_trace.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 685587885374..f389629dade7 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -351,7 +351,7 @@ static u64 bpf_perf_event_output_tp(u64 r1, u64 r2, u64 index, u64 r4, u64 size) * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it * from there and call the same bpf_perf_event_output() helper */ - u64 ctx = *(long *)r1; + u64 ctx = *(long *)(uintptr_t)r1; return bpf_perf_event_output(ctx, r2, index, r4, size); } @@ -369,7 +369,7 @@ static const struct bpf_func_proto bpf_perf_event_output_proto_tp = { static u64 bpf_get_stackid_tp(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) { - u64 ctx = *(long *)r1; + u64 ctx = *(long *)(uintptr_t)r1; return bpf_get_stackid(ctx, r2, r3, r4, r5); } From b67d1df5ad0c227adc89d2913e933ed4addc5dab Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 18 Apr 2016 23:58:30 +0200 Subject: [PATCH 0822/1649] net: w5100: don't build spi driver without w5100 The w5100-spi driver front-end only makes sense when the w5100 core driver is enabled, not for a configuration that only has w5300: drivers/net/built-in.o: In function `w5100_spi_remove': drivers/net/ethernet/wiznet/w5100-spi.c:277: undefined reference to `w5100_remove' drivers/net/built-in.o: In function `w5100_spi_probe': drivers/net/ethernet/wiznet/w5100-spi.c:272: undefined reference to `w5100_probe' drivers/net/built-in.o: In function `w5200_spi_init': drivers/net/ethernet/wiznet/w5100-spi.c:125: undefined reference to `w5100_ops_priv' drivers/net/built-in.o: In function `w5200_spi_readbulk': drivers/net/ethernet/wiznet/w5100-spi.c:125: undefined reference to `w5100_ops_priv' drivers/net/built-in.o: In function `w5200_spi_writebulk': drivers/net/ethernet/wiznet/w5100-spi.c:125: undefined reference to `w5100_ops_priv' drivers/net/built-in.o:(.data+0x3ed1c): undefined reference to `w5100_pm_ops' This adds an appropriate Kconfig dependency. Signed-off-by: Arnd Bergmann Fixes: 630cf09751fe ("net: w5100: support SPI interface mode") Signed-off-by: David S. Miller --- drivers/net/ethernet/wiznet/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/wiznet/Kconfig b/drivers/net/ethernet/wiznet/Kconfig index 1f15376e9856..f3385a1999a2 100644 --- a/drivers/net/ethernet/wiznet/Kconfig +++ b/drivers/net/ethernet/wiznet/Kconfig @@ -71,7 +71,7 @@ endchoice config WIZNET_W5100_SPI tristate "WIZnet W5100/W5200 Ethernet support for SPI mode" - depends on WIZNET_BUS_ANY + depends on WIZNET_BUS_ANY && WIZNET_W5100 depends on SPI ---help--- In SPI mode host system accesses registers using SPI protocol From 87be054a30de1d48a4c9850543080b8cc9854d2c Mon Sep 17 00:00:00 2001 From: Mohammed Shafi Shajakhan Date: Tue, 5 Apr 2016 20:58:26 +0530 Subject: [PATCH 0823/1649] ath10k: fix return value for btcoex and peer stats debugfs Return value is incorrect for btcoex and peer stats debugfs 'write' entries if the user provides a value that matches with the already available debugfs entry, this results in the debugfs entry getting stuck and the operation has to be terminated manually. Fix this by returning the appropriate return 'count' as we do it for other debugfs entries like pktlog etc. Fixes: cc61a1bbbc0e ("ath10k: enable debugfs provision to enable Peer Stats feature") Fixes: c28e6f06ff40 ("ath10k: fix sanity check on enabling btcoex via debugfs") Signed-off-by: Mohammed Shafi Shajakhan Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/debug.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c index 76bbe17b25b6..e7d441caa288 100644 --- a/drivers/net/wireless/ath/ath10k/debug.c +++ b/drivers/net/wireless/ath/ath10k/debug.c @@ -2122,7 +2122,7 @@ static ssize_t ath10k_write_btcoex(struct file *file, struct ath10k *ar = file->private_data; char buf[32]; size_t buf_size; - int ret = 0; + int ret; bool val; buf_size = min(count, (sizeof(buf) - 1)); @@ -2142,8 +2142,10 @@ static ssize_t ath10k_write_btcoex(struct file *file, goto exit; } - if (!(test_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags) ^ val)) + if (!(test_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags) ^ val)) { + ret = count; goto exit; + } if (val) set_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags); @@ -2189,7 +2191,7 @@ static ssize_t ath10k_write_peer_stats(struct file *file, struct ath10k *ar = file->private_data; char buf[32]; size_t buf_size; - int ret = 0; + int ret; bool val; buf_size = min(count, (sizeof(buf) - 1)); @@ -2209,8 +2211,10 @@ static ssize_t ath10k_write_peer_stats(struct file *file, goto exit; } - if (!(test_bit(ATH10K_FLAG_PEER_STATS, &ar->dev_flags) ^ val)) + if (!(test_bit(ATH10K_FLAG_PEER_STATS, &ar->dev_flags) ^ val)) { + ret = count; goto exit; + } if (val) set_bit(ATH10K_FLAG_PEER_STATS, &ar->dev_flags); From 1ce8c1484e80010a6e4b9611c65668ff77556f45 Mon Sep 17 00:00:00 2001 From: Rajkumar Manoharan Date: Thu, 7 Apr 2016 12:11:54 +0530 Subject: [PATCH 0824/1649] ath10k: fix rx_channel during hw reconfigure Upon firmware assert, restart work will be triggered so that mac80211 will reconfigure the driver. An issue is reported that after restart work, survey dump data do not contain in-use (SURVEY_INFO_IN_USE) info for operating channel. During reconfigure, since mac80211 already has valid channel context for given radio, channel context iteration return num_chanctx > 0. Hence rx_channel is always NULL. Fix this by assigning channel context to rx_channel when driver restart is in progress. Cc: stable@vger.kernel.org Signed-off-by: Rajkumar Manoharan Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/mac.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index d9d98bf22b3e..32e9d5010b4e 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -6893,7 +6893,13 @@ ath10k_mac_update_rx_channel(struct ath10k *ar, def = &vifs[0].new_ctx->def; ar->rx_channel = def->chan; - } else if (ctx && ath10k_mac_num_chanctxs(ar) == 0) { + } else if ((ctx && ath10k_mac_num_chanctxs(ar) == 0) || + (ctx && (ar->state == ATH10K_STATE_RESTARTED))) { + /* During driver restart due to firmware assert, since mac80211 + * already has valid channel context for given radio, channel + * context iteration return num_chanctx > 0. So fix rx_channel + * when restart is in progress. + */ ar->rx_channel = ctx->def.chan; } else { ar->rx_channel = NULL; From de72a20dc3714918b208430dd426c9f6a23ffaec Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Mon, 11 Apr 2016 11:15:20 +0300 Subject: [PATCH 0825/1649] ath10k: add some sanity checks to peer_map_event() functions Smatch complains that since "ev->peer_id" comes from skb->data that means we can't trust it and have to do a bounds check on it to prevent an array overflow. Fixes: 6942726f7f7b ('ath10k: add fast peer_map lookup') Signed-off-by: Dan Carpenter Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/txrx.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c index 8c7086989a71..576e7c42ed65 100644 --- a/drivers/net/wireless/ath/ath10k/txrx.c +++ b/drivers/net/wireless/ath/ath10k/txrx.c @@ -190,6 +190,13 @@ void ath10k_peer_map_event(struct ath10k_htt *htt, struct ath10k *ar = htt->ar; struct ath10k_peer *peer; + if (ev->peer_id >= ATH10K_MAX_NUM_PEER_IDS) { + ath10k_warn(ar, + "received htt peer map event with idx out of bounds: %hu\n", + ev->peer_id); + return; + } + spin_lock_bh(&ar->data_lock); peer = ath10k_peer_find(ar, ev->vdev_id, ev->addr); if (!peer) { @@ -218,6 +225,13 @@ void ath10k_peer_unmap_event(struct ath10k_htt *htt, struct ath10k *ar = htt->ar; struct ath10k_peer *peer; + if (ev->peer_id >= ATH10K_MAX_NUM_PEER_IDS) { + ath10k_warn(ar, + "received htt peer unmap event with idx out of bounds: %hu\n", + ev->peer_id); + return; + } + spin_lock_bh(&ar->data_lock); peer = ath10k_peer_find_by_id(ar, ev->peer_id); if (!peer) { From 7e247a9e88dc811d0b7b6a70af1d741054772bc4 Mon Sep 17 00:00:00 2001 From: Raja Mani Date: Tue, 12 Apr 2016 20:15:53 +0530 Subject: [PATCH 0826/1649] ath10k: add dynamic tx mode switch config support for qca4019 push-pull mode needs certain amount the host driver involvement for managing queues in the host memory and packet delivery to firmware. qca4019 wifi firmware has an option to stay in push mode for less number of active traffic flow and then switch to push-pull mode when the active traffic flow goes beyond the certain limit. The advantage of staying in push mode for less active traffic is, the host cpu consumption is reduced. qca4019 firmware supports this flexibility of the mode switch. It takes the host driver interest (LOW_PERF/HIGH_PERF) via WMI_EXT_RESOURCE_CFG_CMDID, LOW_PERF - fw would stay in push mode and switch to push-pull based on demand. HIGH_PERF - fw would stay in push-pull mode from the boot. To make this configuration generic, new WMI services WMI_SERVICE_TX_MODE_PUSH_ONLY, WMI_SERVICE_TX_MODE_PUSH_PULL, WMI_SERVICE_TX_MODE_DYNAMIC are introduced to take dynamic tx mode switch support availability in firmware. Based on WMI_SERVICE_TX_MODE_DYNAMIC, LOW_PERF or HIGHT_PERF is configured to the firmware. Signed-off-by: Raja Mani Signed-off-by: Tamizh chelvam Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/core.c | 3 +-- drivers/net/wireless/ath/ath10k/mac.c | 20 ++++++++++++++++++++ drivers/net/wireless/ath/ath10k/mac.h | 1 + drivers/net/wireless/ath/ath10k/wmi.h | 15 +++++++++++++++ 4 files changed, 37 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index b2c7fe3d30a4..1c4106b84a35 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -1787,8 +1787,7 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode) if (ath10k_peer_stats_enabled(ar)) val = WMI_10_4_PEER_STATS; - status = ath10k_wmi_ext_resource_config(ar, - WMI_HOST_PLATFORM_HIGH_PERF, val); + status = ath10k_mac_ext_resource_config(ar, val); if (status) { ath10k_err(ar, "failed to send ext resource cfg command : %d\n", diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index 32e9d5010b4e..fb393596f236 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -157,6 +157,26 @@ ath10k_mac_max_vht_nss(const u16 vht_mcs_mask[NL80211_VHT_NSS_MAX]) return 1; } +int ath10k_mac_ext_resource_config(struct ath10k *ar, u32 val) +{ + enum wmi_host_platform_type platform_type; + int ret; + + if (test_bit(WMI_SERVICE_TX_MODE_DYNAMIC, ar->wmi.svc_map)) + platform_type = WMI_HOST_PLATFORM_LOW_PERF; + else + platform_type = WMI_HOST_PLATFORM_HIGH_PERF; + + ret = ath10k_wmi_ext_resource_config(ar, platform_type, val); + + if (ret && ret != -EOPNOTSUPP) { + ath10k_warn(ar, "failed to configure ext resource: %d\n", ret); + return ret; + } + + return 0; +} + /**********/ /* Crypto */ /**********/ diff --git a/drivers/net/wireless/ath/ath10k/mac.h b/drivers/net/wireless/ath/ath10k/mac.h index 2c3327beb445..1bd29ecfcdcc 100644 --- a/drivers/net/wireless/ath/ath10k/mac.h +++ b/drivers/net/wireless/ath/ath10k/mac.h @@ -81,6 +81,7 @@ int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw, struct ieee80211_txq *ath10k_mac_txq_lookup(struct ath10k *ar, u16 peer_id, u8 tid); +int ath10k_mac_ext_resource_config(struct ath10k *ar, u32 val); static inline struct ath10k_vif *ath10k_vif_to_arvif(struct ieee80211_vif *vif) { diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h index 378f2998cd5a..db2553522d8b 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.h +++ b/drivers/net/wireless/ath/ath10k/wmi.h @@ -180,6 +180,9 @@ enum wmi_service { WMI_SERVICE_MESH_NON_11S, WMI_SERVICE_PEER_STATS, WMI_SERVICE_RESTRT_CHNL_SUPPORT, + WMI_SERVICE_TX_MODE_PUSH_ONLY, + WMI_SERVICE_TX_MODE_PUSH_PULL, + WMI_SERVICE_TX_MODE_DYNAMIC, /* keep last */ WMI_SERVICE_MAX, @@ -302,6 +305,9 @@ enum wmi_10_4_service { WMI_10_4_SERVICE_RESTRT_CHNL_SUPPORT, WMI_10_4_SERVICE_PEER_STATS, WMI_10_4_SERVICE_MESH_11S, + WMI_10_4_SERVICE_TX_MODE_PUSH_ONLY, + WMI_10_4_SERVICE_TX_MODE_PUSH_PULL, + WMI_10_4_SERVICE_TX_MODE_DYNAMIC, }; static inline char *wmi_service_name(int service_id) @@ -396,6 +402,9 @@ static inline char *wmi_service_name(int service_id) SVCSTR(WMI_SERVICE_MESH_NON_11S); SVCSTR(WMI_SERVICE_PEER_STATS); SVCSTR(WMI_SERVICE_RESTRT_CHNL_SUPPORT); + SVCSTR(WMI_SERVICE_TX_MODE_PUSH_ONLY); + SVCSTR(WMI_SERVICE_TX_MODE_PUSH_PULL); + SVCSTR(WMI_SERVICE_TX_MODE_DYNAMIC); default: return NULL; } @@ -643,6 +652,12 @@ static inline void wmi_10_4_svc_map(const __le32 *in, unsigned long *out, WMI_SERVICE_PEER_STATS, len); SVCMAP(WMI_10_4_SERVICE_MESH_11S, WMI_SERVICE_MESH_11S, len); + SVCMAP(WMI_10_4_SERVICE_TX_MODE_PUSH_ONLY, + WMI_SERVICE_TX_MODE_PUSH_ONLY, len); + SVCMAP(WMI_10_4_SERVICE_TX_MODE_PUSH_PULL, + WMI_SERVICE_TX_MODE_PUSH_PULL, len); + SVCMAP(WMI_10_4_SERVICE_TX_MODE_DYNAMIC, + WMI_SERVICE_TX_MODE_DYNAMIC, len); } #undef SVCMAP From f286dd899b4f1445279af6b5965c335ae6f998f7 Mon Sep 17 00:00:00 2001 From: Markus Elfring Date: Fri, 1 Jan 2016 19:09:32 +0100 Subject: [PATCH 0827/1649] ath9k_htc: Replace a variable initialisation by an assignment in ath9k_htc_set_channel() Replace an explicit initialisation for one local variable at the beginning by a conditional assignment. Signed-off-by: Markus Elfring Reviewed-by: Oleksij Rempel Reviewed-by: Julian Calaby Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath9k/htc_drv_main.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c index 639294a9e34d..6c5047cc837e 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c @@ -246,7 +246,7 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv, struct ieee80211_conf *conf = &common->hw->conf; bool fastcc; struct ieee80211_channel *channel = hw->conf.chandef.chan; - struct ath9k_hw_cal_data *caldata = NULL; + struct ath9k_hw_cal_data *caldata; enum htc_phymode mode; __be16 htc_mode; u8 cmd_rsp; @@ -274,10 +274,7 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv, priv->ah->curchan->channel, channel->center_freq, conf_is_ht(conf), conf_is_ht40(conf), fastcc); - - if (!fastcc) - caldata = &priv->caldata; - + caldata = fastcc ? NULL : &priv->caldata; ret = ath9k_hw_reset(ah, hchan, caldata, fastcc); if (ret) { ath_err(common, From 71f5137bf010c6faffab50c0ec15374c59c4a411 Mon Sep 17 00:00:00 2001 From: Zefir Kurtisi Date: Fri, 1 Apr 2016 11:37:08 +0200 Subject: [PATCH 0828/1649] ath9k: interpret requested txpower in EIRP domain Tx power limitations at upper layers are interpreted in the EIRP domain. When the user requests a given maximum txpower, e.g. with: 'iw phy0 set txpower fixed 1500', he expects the EIRP to be at or below 15dBm. In ath9k_hw_apply_txpower(), the interpretation is different: the antenna-gain is capped against the current txpower limit in the regulatory, but not against the user set value. It ensures that the resulting EIRP is below the limit defined by the active countrycode, but not below the value the user requested. In a scenario like e.g. a) antenna_gain=6 b) countrycode limits to eirp=18 c) user set txpower=15 this will cause a setting for AR_PHY_POWER_TX_RATE regs resulting in an EIRP > 15. This patch ensures that antenna-gain is considered whenever the txpower limit is adjusted and with that the user set limits are kept. Signed-off-by: Zefir Kurtisi Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath9k/hw.c | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index 42009065e234..8b2895f9ac7a 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c @@ -2914,8 +2914,7 @@ void ath9k_hw_apply_txpower(struct ath_hw *ah, struct ath9k_channel *chan, { struct ath_regulatory *reg = ath9k_hw_regulatory(ah); struct ieee80211_channel *channel; - int chan_pwr, new_pwr, max_gain; - int ant_gain, ant_reduction = 0; + int chan_pwr, new_pwr; if (!chan) return; @@ -2923,15 +2922,10 @@ void ath9k_hw_apply_txpower(struct ath_hw *ah, struct ath9k_channel *chan, channel = chan->chan; chan_pwr = min_t(int, channel->max_power * 2, MAX_RATE_POWER); new_pwr = min_t(int, chan_pwr, reg->power_limit); - max_gain = chan_pwr - new_pwr + channel->max_antenna_gain * 2; - - ant_gain = get_antenna_gain(ah, chan); - if (ant_gain > max_gain) - ant_reduction = ant_gain - max_gain; ah->eep_ops->set_txpower(ah, chan, ath9k_regd_get_ctl(reg, chan), - ant_reduction, new_pwr, test); + get_antenna_gain(ah, chan), new_pwr, test); } void ath9k_hw_set_txpowerlimit(struct ath_hw *ah, u32 limit, bool test) From 47f58b1ebe3739dad1ddeb5cd1f1e718648b4d24 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Sun, 10 Apr 2016 12:25:31 +0100 Subject: [PATCH 0829/1649] ath9k: remove duplicate assignment of variable ah ah is written twice with the same value, remove one of the redundant assignments to ah. Signed-off-by: Colin Ian King Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath9k/init.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c index 77ace8d72d54..fb702c48a233 100644 --- a/drivers/net/wireless/ath/ath9k/init.c +++ b/drivers/net/wireless/ath/ath9k/init.c @@ -477,7 +477,7 @@ static void ath9k_eeprom_request_cb(const struct firmware *eeprom_blob, static int ath9k_eeprom_request(struct ath_softc *sc, const char *name) { struct ath9k_eeprom_ctx ec; - struct ath_hw *ah = ah = sc->sc_ah; + struct ath_hw *ah = sc->sc_ah; int err; /* try to load the EEPROM content asynchronously */ From a163f2cb393d9d71cad57bfe6a8c7f452a478fb4 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Mon, 11 Apr 2016 21:14:29 +0200 Subject: [PATCH 0830/1649] netfilter: conntrack: don't acquire lock during seq_printf read access doesn't need any lock here. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_conntrack_proto_sctp.c | 8 +------- net/netfilter/nf_conntrack_proto_tcp.c | 8 +------- 2 files changed, 2 insertions(+), 14 deletions(-) diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c index 9578a7c371ef..1d7ab960a9e6 100644 --- a/net/netfilter/nf_conntrack_proto_sctp.c +++ b/net/netfilter/nf_conntrack_proto_sctp.c @@ -191,13 +191,7 @@ static void sctp_print_tuple(struct seq_file *s, /* Print out the private part of the conntrack. */ static void sctp_print_conntrack(struct seq_file *s, struct nf_conn *ct) { - enum sctp_conntrack state; - - spin_lock_bh(&ct->lock); - state = ct->proto.sctp.state; - spin_unlock_bh(&ct->lock); - - seq_printf(s, "%s ", sctp_conntrack_names[state]); + seq_printf(s, "%s ", sctp_conntrack_names[ct->proto.sctp.state]); } #define for_each_sctp_chunk(skb, sch, _sch, offset, dataoff, count) \ diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c index 278f3b9356ef..e0cb0ce38746 100644 --- a/net/netfilter/nf_conntrack_proto_tcp.c +++ b/net/netfilter/nf_conntrack_proto_tcp.c @@ -313,13 +313,7 @@ static void tcp_print_tuple(struct seq_file *s, /* Print out the private part of the conntrack. */ static void tcp_print_conntrack(struct seq_file *s, struct nf_conn *ct) { - enum tcp_conntrack state; - - spin_lock_bh(&ct->lock); - state = ct->proto.tcp.state; - spin_unlock_bh(&ct->lock); - - seq_printf(s, "%s ", tcp_conntrack_names[state]); + seq_printf(s, "%s ", tcp_conntrack_names[ct->proto.tcp.state]); } static unsigned int get_conntrack_index(const struct tcphdr *tcph) From 18402843bf88c2e9674e1a3a05c73b7d9b09ee05 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 19 Apr 2016 14:30:10 -0400 Subject: [PATCH 0831/1649] net: Align IFLA_STATS64 attributes properly on architectures that need it. Since the nlattr header is 4 bytes in size, it can cause the netlink attribute payload to not be 8-byte aligned. This is particularly troublesome for IFLA_STATS64 which contains 64-bit statistic values. Solve this by creating a dummy IFLA_PAD attribute which has a payload which is zero bytes in size. When HAVE_EFFICIENT_UNALIGNED_ACCESS is false, we insert an IFLA_PAD attribute into the netlink response when necessary such that the IFLA_STATS64 payload will be properly aligned. With help and suggestions from Eric Dumazet. Signed-off-by: David S. Miller --- include/uapi/linux/if_link.h | 1 + net/core/rtnetlink.c | 19 +++++++++++++++++++ 2 files changed, 20 insertions(+) diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index bb3a90b57199..5ffdcb34e35b 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -155,6 +155,7 @@ enum { IFLA_PROTO_DOWN, IFLA_GSO_MAX_SEGS, IFLA_GSO_MAX_SIZE, + IFLA_PAD, __IFLA_MAX }; diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index a7a3d345134a..198ca2c99510 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -878,6 +878,9 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev, + nla_total_size(IFNAMSIZ) /* IFLA_QDISC */ + nla_total_size(sizeof(struct rtnl_link_ifmap)) + nla_total_size(sizeof(struct rtnl_link_stats)) +#ifndef HAVE_EFFICIENT_UNALIGNED_ACCESS + + nla_total_size(0) /* IFLA_PAD */ +#endif + nla_total_size(sizeof(struct rtnl_link_stats64)) + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */ + nla_total_size(MAX_ADDR_LEN) /* IFLA_BROADCAST */ @@ -1052,6 +1055,22 @@ static noinline_for_stack int rtnl_fill_stats(struct sk_buff *skb, struct rtnl_link_stats64 *sp; struct nlattr *attr; +#ifndef HAVE_EFFICIENT_UNALIGNED_ACCESS + /* IF necessary, add a zero length NOP attribute so that the + * nla_data() of the IFLA_STATS64 will be 64-bit aligned. + * + * The nlattr header is 4 bytes in size, that's why we test + * if the skb->data _is_ aligned. This NOP attribute, plus + * nlattr header for IFLA_STATS64, will make nla_data() 8-byte + * aligned. + */ + if (IS_ALIGNED((unsigned long)skb->data, 8)) { + attr = nla_reserve(skb, IFLA_PAD, 0); + if (!attr) + return -EMSGSIZE; + } +#endif + attr = nla_reserve(skb, IFLA_STATS64, sizeof(struct rtnl_link_stats64)); if (!attr) From 35c5845957c7982dac1f525ff3412f8acf0a0385 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 19 Apr 2016 19:49:29 -0400 Subject: [PATCH 0832/1649] net: Add helpers for 64-bit aligning netlink attributes. Suggested-by: Eric Dumazet Suggested-by: Nicolas Dichtel Signed-off-by: David S. Miller --- include/net/netlink.h | 37 +++++++++++++++++++++++++++++++++++++ net/core/rtnetlink.c | 24 +++++------------------- 2 files changed, 42 insertions(+), 19 deletions(-) diff --git a/include/net/netlink.h b/include/net/netlink.h index 0e3172751755..e644b3489acf 100644 --- a/include/net/netlink.h +++ b/include/net/netlink.h @@ -1230,6 +1230,43 @@ static inline int nla_validate_nested(const struct nlattr *start, int maxtype, return nla_validate(nla_data(start), nla_len(start), maxtype, policy); } +/** + * nla_align_64bit - 64-bit align the nla_data() of next attribute + * @skb: socket buffer the message is stored in + * @padattr: attribute type for the padding + * + * Conditionally emit a padding netlink attribute in order to make + * the next attribute we emit have a 64-bit aligned nla_data() area. + * This will only be done in architectures which do not have + * HAVE_EFFICIENT_UNALIGNED_ACCESS defined. + * + * Returns zero on success or a negative error code. + */ +static inline int nla_align_64bit(struct sk_buff *skb, int padattr) +{ +#ifndef HAVE_EFFICIENT_UNALIGNED_ACCESS + if (IS_ALIGNED((unsigned long)skb->data, 8)) { + struct nlattr *attr = nla_reserve(skb, padattr, 0); + if (!attr) + return -EMSGSIZE; + } +#endif + return 0; +} + +/** + * nla_total_size_64bit - total length of attribute including padding + * @payload: length of payload + */ +static inline int nla_total_size_64bit(int payload) +{ + return NLA_ALIGN(nla_attr_size(payload)) +#ifndef HAVE_EFFICIENT_UNALIGNED_ACCESS + + NLA_ALIGN(nla_attr_size(0)) +#endif + ; +} + /** * nla_for_each_attr - iterate over a stream of attributes * @pos: loop counter, set to current attribute diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 198ca2c99510..d3694a13c85a 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -878,10 +878,7 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev, + nla_total_size(IFNAMSIZ) /* IFLA_QDISC */ + nla_total_size(sizeof(struct rtnl_link_ifmap)) + nla_total_size(sizeof(struct rtnl_link_stats)) -#ifndef HAVE_EFFICIENT_UNALIGNED_ACCESS - + nla_total_size(0) /* IFLA_PAD */ -#endif - + nla_total_size(sizeof(struct rtnl_link_stats64)) + + nla_total_size_64bit(sizeof(struct rtnl_link_stats64)) + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */ + nla_total_size(MAX_ADDR_LEN) /* IFLA_BROADCAST */ + nla_total_size(4) /* IFLA_TXQLEN */ @@ -1054,22 +1051,11 @@ static noinline_for_stack int rtnl_fill_stats(struct sk_buff *skb, { struct rtnl_link_stats64 *sp; struct nlattr *attr; + int err; -#ifndef HAVE_EFFICIENT_UNALIGNED_ACCESS - /* IF necessary, add a zero length NOP attribute so that the - * nla_data() of the IFLA_STATS64 will be 64-bit aligned. - * - * The nlattr header is 4 bytes in size, that's why we test - * if the skb->data _is_ aligned. This NOP attribute, plus - * nlattr header for IFLA_STATS64, will make nla_data() 8-byte - * aligned. - */ - if (IS_ALIGNED((unsigned long)skb->data, 8)) { - attr = nla_reserve(skb, IFLA_PAD, 0); - if (!attr) - return -EMSGSIZE; - } -#endif + err = nla_align_64bit(skb, IFLA_PAD); + if (err) + return err; attr = nla_reserve(skb, IFLA_STATS64, sizeof(struct rtnl_link_stats64)); From 607ea7cda6315be0ad8be2f98bc9de6f2d656ae6 Mon Sep 17 00:00:00 2001 From: Konstantin Khlebnikov Date: Mon, 18 Apr 2016 14:41:10 +0300 Subject: [PATCH 0833/1649] net/ipv6/addrconf: simplify sysctl registration Struct ctl_table_header holds pointer to sysctl table which could be used for freeing it after unregistration. IPv4 sysctls already use that. Remove redundant NULL assignment: ndev allocated using kzalloc. This also saves some bytes: sysctl table could be shorter than DEVCONF_MAX+1 if some options are disable in config. Signed-off-by: Konstantin Khlebnikov Signed-off-by: David S. Miller --- include/linux/ipv6.h | 3 ++- net/ipv6/addrconf.c | 43 +++++++++++++++++-------------------------- 2 files changed, 19 insertions(+), 27 deletions(-) diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index 7edc14fb66b6..58d6e158755f 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h @@ -63,7 +63,8 @@ struct ipv6_devconf { } stable_secret; __s32 use_oif_addrs_only; __s32 keep_addr_on_down; - void *sysctl; + + struct ctl_table_header *sysctl_header; }; struct ipv6_params { diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index a6c99275bd8c..5a42c0fe0449 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -359,7 +359,6 @@ static struct inet6_dev *ipv6_add_dev(struct net_device *dev) ndev->addr_gen_mode = IN6_ADDR_GEN_MODE_EUI64; ndev->cnf.mtu6 = dev->mtu; - ndev->cnf.sysctl = NULL; ndev->nd_parms = neigh_parms_alloc(dev, &nd_tbl); if (!ndev->nd_parms) { kfree(ndev); @@ -5620,13 +5619,7 @@ int addrconf_sysctl_ignore_routes_with_linkdown(struct ctl_table *ctl, return ret; } -static struct addrconf_sysctl_table -{ - struct ctl_table_header *sysctl_header; - struct ctl_table addrconf_vars[DEVCONF_MAX+1]; -} addrconf_sysctl __read_mostly = { - .sysctl_header = NULL, - .addrconf_vars = { +static const struct ctl_table addrconf_sysctl[] = { { .procname = "forwarding", .data = &ipv6_devconf.forwarding, @@ -5944,52 +5937,50 @@ static struct addrconf_sysctl_table { /* sentinel */ } - }, }; static int __addrconf_sysctl_register(struct net *net, char *dev_name, struct inet6_dev *idev, struct ipv6_devconf *p) { int i; - struct addrconf_sysctl_table *t; + struct ctl_table *table; char path[sizeof("net/ipv6/conf/") + IFNAMSIZ]; - t = kmemdup(&addrconf_sysctl, sizeof(*t), GFP_KERNEL); - if (!t) + table = kmemdup(addrconf_sysctl, sizeof(addrconf_sysctl), GFP_KERNEL); + if (!table) goto out; - for (i = 0; t->addrconf_vars[i].data; i++) { - t->addrconf_vars[i].data += (char *)p - (char *)&ipv6_devconf; - t->addrconf_vars[i].extra1 = idev; /* embedded; no ref */ - t->addrconf_vars[i].extra2 = net; + for (i = 0; table[i].data; i++) { + table[i].data += (char *)p - (char *)&ipv6_devconf; + table[i].extra1 = idev; /* embedded; no ref */ + table[i].extra2 = net; } snprintf(path, sizeof(path), "net/ipv6/conf/%s", dev_name); - t->sysctl_header = register_net_sysctl(net, path, t->addrconf_vars); - if (!t->sysctl_header) + p->sysctl_header = register_net_sysctl(net, path, table); + if (!p->sysctl_header) goto free; - p->sysctl = t; return 0; free: - kfree(t); + kfree(table); out: return -ENOBUFS; } static void __addrconf_sysctl_unregister(struct ipv6_devconf *p) { - struct addrconf_sysctl_table *t; + struct ctl_table *table; - if (!p->sysctl) + if (!p->sysctl_header) return; - t = p->sysctl; - p->sysctl = NULL; - unregister_net_sysctl_table(t->sysctl_header); - kfree(t); + table = p->sysctl_header->ctl_table_arg; + unregister_net_sysctl_table(p->sysctl_header); + p->sysctl_header = NULL; + kfree(table); } static int addrconf_sysctl_register(struct inet6_dev *idev) From 5df1f77f65e11f2d7454de70998a68c42293397a Mon Sep 17 00:00:00 2001 From: Konstantin Khlebnikov Date: Mon, 18 Apr 2016 14:41:17 +0300 Subject: [PATCH 0834/1649] net/ipv6/addrconf: fix sysctl table indentation Separated from previous patch for readability. Signed-off-by: Konstantin Khlebnikov Signed-off-by: David S. Miller --- net/ipv6/addrconf.c | 614 ++++++++++++++++++++++---------------------- 1 file changed, 306 insertions(+), 308 deletions(-) diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 5a42c0fe0449..19258d97494c 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -5620,323 +5620,321 @@ int addrconf_sysctl_ignore_routes_with_linkdown(struct ctl_table *ctl, } static const struct ctl_table addrconf_sysctl[] = { - { - .procname = "forwarding", - .data = &ipv6_devconf.forwarding, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = addrconf_sysctl_forward, - }, - { - .procname = "hop_limit", - .data = &ipv6_devconf.hop_limit, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = addrconf_sysctl_hop_limit, - }, - { - .procname = "mtu", - .data = &ipv6_devconf.mtu6, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = addrconf_sysctl_mtu, - }, - { - .procname = "accept_ra", - .data = &ipv6_devconf.accept_ra, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec, - }, - { - .procname = "accept_redirects", - .data = &ipv6_devconf.accept_redirects, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec, - }, - { - .procname = "autoconf", - .data = &ipv6_devconf.autoconf, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec, - }, - { - .procname = "dad_transmits", - .data = &ipv6_devconf.dad_transmits, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec, - }, - { - .procname = "router_solicitations", - .data = &ipv6_devconf.rtr_solicits, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec, - }, - { - .procname = "router_solicitation_interval", - .data = &ipv6_devconf.rtr_solicit_interval, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec_jiffies, - }, - { - .procname = "router_solicitation_delay", - .data = &ipv6_devconf.rtr_solicit_delay, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec_jiffies, - }, - { - .procname = "force_mld_version", - .data = &ipv6_devconf.force_mld_version, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec, - }, - { - .procname = "mldv1_unsolicited_report_interval", - .data = - &ipv6_devconf.mldv1_unsolicited_report_interval, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec_ms_jiffies, - }, - { - .procname = "mldv2_unsolicited_report_interval", - .data = - &ipv6_devconf.mldv2_unsolicited_report_interval, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec_ms_jiffies, - }, - { - .procname = "use_tempaddr", - .data = &ipv6_devconf.use_tempaddr, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec, - }, - { - .procname = "temp_valid_lft", - .data = &ipv6_devconf.temp_valid_lft, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec, - }, - { - .procname = "temp_prefered_lft", - .data = &ipv6_devconf.temp_prefered_lft, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec, - }, - { - .procname = "regen_max_retry", - .data = &ipv6_devconf.regen_max_retry, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec, - }, - { - .procname = "max_desync_factor", - .data = &ipv6_devconf.max_desync_factor, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec, - }, - { - .procname = "max_addresses", - .data = &ipv6_devconf.max_addresses, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec, - }, - { - .procname = "accept_ra_defrtr", - .data = &ipv6_devconf.accept_ra_defrtr, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec, - }, - { - .procname = "accept_ra_min_hop_limit", - .data = &ipv6_devconf.accept_ra_min_hop_limit, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec, - }, - { - .procname = "accept_ra_pinfo", - .data = &ipv6_devconf.accept_ra_pinfo, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec, - }, + { + .procname = "forwarding", + .data = &ipv6_devconf.forwarding, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = addrconf_sysctl_forward, + }, + { + .procname = "hop_limit", + .data = &ipv6_devconf.hop_limit, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = addrconf_sysctl_hop_limit, + }, + { + .procname = "mtu", + .data = &ipv6_devconf.mtu6, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = addrconf_sysctl_mtu, + }, + { + .procname = "accept_ra", + .data = &ipv6_devconf.accept_ra, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, + { + .procname = "accept_redirects", + .data = &ipv6_devconf.accept_redirects, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, + { + .procname = "autoconf", + .data = &ipv6_devconf.autoconf, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, + { + .procname = "dad_transmits", + .data = &ipv6_devconf.dad_transmits, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, + { + .procname = "router_solicitations", + .data = &ipv6_devconf.rtr_solicits, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, + { + .procname = "router_solicitation_interval", + .data = &ipv6_devconf.rtr_solicit_interval, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_jiffies, + }, + { + .procname = "router_solicitation_delay", + .data = &ipv6_devconf.rtr_solicit_delay, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_jiffies, + }, + { + .procname = "force_mld_version", + .data = &ipv6_devconf.force_mld_version, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, + { + .procname = "mldv1_unsolicited_report_interval", + .data = + &ipv6_devconf.mldv1_unsolicited_report_interval, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_ms_jiffies, + }, + { + .procname = "mldv2_unsolicited_report_interval", + .data = + &ipv6_devconf.mldv2_unsolicited_report_interval, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_ms_jiffies, + }, + { + .procname = "use_tempaddr", + .data = &ipv6_devconf.use_tempaddr, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, + { + .procname = "temp_valid_lft", + .data = &ipv6_devconf.temp_valid_lft, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, + { + .procname = "temp_prefered_lft", + .data = &ipv6_devconf.temp_prefered_lft, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, + { + .procname = "regen_max_retry", + .data = &ipv6_devconf.regen_max_retry, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, + { + .procname = "max_desync_factor", + .data = &ipv6_devconf.max_desync_factor, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, + { + .procname = "max_addresses", + .data = &ipv6_devconf.max_addresses, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, + { + .procname = "accept_ra_defrtr", + .data = &ipv6_devconf.accept_ra_defrtr, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, + { + .procname = "accept_ra_min_hop_limit", + .data = &ipv6_devconf.accept_ra_min_hop_limit, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, + { + .procname = "accept_ra_pinfo", + .data = &ipv6_devconf.accept_ra_pinfo, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, #ifdef CONFIG_IPV6_ROUTER_PREF - { - .procname = "accept_ra_rtr_pref", - .data = &ipv6_devconf.accept_ra_rtr_pref, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec, - }, - { - .procname = "router_probe_interval", - .data = &ipv6_devconf.rtr_probe_interval, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec_jiffies, - }, + { + .procname = "accept_ra_rtr_pref", + .data = &ipv6_devconf.accept_ra_rtr_pref, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, + { + .procname = "router_probe_interval", + .data = &ipv6_devconf.rtr_probe_interval, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_jiffies, + }, #ifdef CONFIG_IPV6_ROUTE_INFO - { - .procname = "accept_ra_rt_info_max_plen", - .data = &ipv6_devconf.accept_ra_rt_info_max_plen, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec, - }, + { + .procname = "accept_ra_rt_info_max_plen", + .data = &ipv6_devconf.accept_ra_rt_info_max_plen, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, #endif #endif - { - .procname = "proxy_ndp", - .data = &ipv6_devconf.proxy_ndp, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = addrconf_sysctl_proxy_ndp, - }, - { - .procname = "accept_source_route", - .data = &ipv6_devconf.accept_source_route, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec, - }, + { + .procname = "proxy_ndp", + .data = &ipv6_devconf.proxy_ndp, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = addrconf_sysctl_proxy_ndp, + }, + { + .procname = "accept_source_route", + .data = &ipv6_devconf.accept_source_route, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, #ifdef CONFIG_IPV6_OPTIMISTIC_DAD - { - .procname = "optimistic_dad", - .data = &ipv6_devconf.optimistic_dad, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec, - - }, - { - .procname = "use_optimistic", - .data = &ipv6_devconf.use_optimistic, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec, - - }, + { + .procname = "optimistic_dad", + .data = &ipv6_devconf.optimistic_dad, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, + { + .procname = "use_optimistic", + .data = &ipv6_devconf.use_optimistic, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, #endif #ifdef CONFIG_IPV6_MROUTE - { - .procname = "mc_forwarding", - .data = &ipv6_devconf.mc_forwarding, - .maxlen = sizeof(int), - .mode = 0444, - .proc_handler = proc_dointvec, - }, + { + .procname = "mc_forwarding", + .data = &ipv6_devconf.mc_forwarding, + .maxlen = sizeof(int), + .mode = 0444, + .proc_handler = proc_dointvec, + }, #endif - { - .procname = "disable_ipv6", - .data = &ipv6_devconf.disable_ipv6, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = addrconf_sysctl_disable, - }, - { - .procname = "accept_dad", - .data = &ipv6_devconf.accept_dad, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec, - }, - { - .procname = "force_tllao", - .data = &ipv6_devconf.force_tllao, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec - }, - { - .procname = "ndisc_notify", - .data = &ipv6_devconf.ndisc_notify, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec - }, - { - .procname = "suppress_frag_ndisc", - .data = &ipv6_devconf.suppress_frag_ndisc, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec - }, - { - .procname = "accept_ra_from_local", - .data = &ipv6_devconf.accept_ra_from_local, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec, - }, - { - .procname = "accept_ra_mtu", - .data = &ipv6_devconf.accept_ra_mtu, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec, - }, - { - .procname = "stable_secret", - .data = &ipv6_devconf.stable_secret, - .maxlen = IPV6_MAX_STRLEN, - .mode = 0600, - .proc_handler = addrconf_sysctl_stable_secret, - }, - { - .procname = "use_oif_addrs_only", - .data = &ipv6_devconf.use_oif_addrs_only, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec, - }, - { - .procname = "ignore_routes_with_linkdown", - .data = &ipv6_devconf.ignore_routes_with_linkdown, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = addrconf_sysctl_ignore_routes_with_linkdown, - }, - { - .procname = "drop_unicast_in_l2_multicast", - .data = &ipv6_devconf.drop_unicast_in_l2_multicast, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec, - }, - { - .procname = "drop_unsolicited_na", - .data = &ipv6_devconf.drop_unsolicited_na, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec, - }, - { - .procname = "keep_addr_on_down", - .data = &ipv6_devconf.keep_addr_on_down, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec, + { + .procname = "disable_ipv6", + .data = &ipv6_devconf.disable_ipv6, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = addrconf_sysctl_disable, + }, + { + .procname = "accept_dad", + .data = &ipv6_devconf.accept_dad, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, + { + .procname = "force_tllao", + .data = &ipv6_devconf.force_tllao, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec + }, + { + .procname = "ndisc_notify", + .data = &ipv6_devconf.ndisc_notify, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec + }, + { + .procname = "suppress_frag_ndisc", + .data = &ipv6_devconf.suppress_frag_ndisc, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec + }, + { + .procname = "accept_ra_from_local", + .data = &ipv6_devconf.accept_ra_from_local, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, + { + .procname = "accept_ra_mtu", + .data = &ipv6_devconf.accept_ra_mtu, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, + { + .procname = "stable_secret", + .data = &ipv6_devconf.stable_secret, + .maxlen = IPV6_MAX_STRLEN, + .mode = 0600, + .proc_handler = addrconf_sysctl_stable_secret, + }, + { + .procname = "use_oif_addrs_only", + .data = &ipv6_devconf.use_oif_addrs_only, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, + { + .procname = "ignore_routes_with_linkdown", + .data = &ipv6_devconf.ignore_routes_with_linkdown, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = addrconf_sysctl_ignore_routes_with_linkdown, + }, + { + .procname = "drop_unicast_in_l2_multicast", + .data = &ipv6_devconf.drop_unicast_in_l2_multicast, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, + { + .procname = "drop_unsolicited_na", + .data = &ipv6_devconf.drop_unsolicited_na, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, + { + .procname = "keep_addr_on_down", + .data = &ipv6_devconf.keep_addr_on_down, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, - }, - { - /* sentinel */ - } + }, + { + /* sentinel */ + } }; static int __addrconf_sysctl_register(struct net *net, char *dev_name, From 553bc087caf052458dc9f92bc42710027740caa9 Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Mon, 18 Apr 2016 16:55:35 +0200 Subject: [PATCH 0835/1649] arcnet: com90xx: add __init attribute Add __init attribute on a function that is only called from other __init functions and that is not inlined, at least with gcc version 4.8.4 on an x86 machine with allyesconfig. Currently, the function is put in the .text.unlikely segment. Declaring it as __init will cause it to be put in the .init.text and to disappear after initialization. The result of objdump -x on the function before the change is as follows: 0000000000000000 l F .text.unlikely 00000000000000bf check_mirror And after the change it is as follows: 0000000000000000 l F .init.text 00000000000000ba check_mirror Done with the help of Coccinelle. The semantic patch checks for local static non-init functions that are called from an __init function and are not called from any other function. Signed-off-by: Julia Lawall Acked-by: Michael Grzeschik Signed-off-by: David S. Miller --- drivers/net/arcnet/com90xx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/arcnet/com90xx.c b/drivers/net/arcnet/com90xx.c index 0d9b45ff1bb2..81f90c4703ae 100644 --- a/drivers/net/arcnet/com90xx.c +++ b/drivers/net/arcnet/com90xx.c @@ -433,7 +433,7 @@ static void __init com90xx_probe(void) kfree(iomem); } -static int check_mirror(unsigned long addr, size_t size) +static int __init check_mirror(unsigned long addr, size_t size) { void __iomem *p; int res = -1; From 1e33759c788c78f31d4d6f65bac647b23624734c Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Mon, 18 Apr 2016 21:01:23 +0200 Subject: [PATCH 0836/1649] bpf, trace: add BPF_F_CURRENT_CPU flag for bpf_perf_event_output Add a BPF_F_CURRENT_CPU flag to optimize the use-case where user space has per-CPU ring buffers and the eBPF program pushes the data into the current CPU's ring buffer which saves us an extra helper function call in eBPF. Also, make sure to properly reserve the remaining flags which are not used. Signed-off-by: Daniel Borkmann Signed-off-by: Alexei Starovoitov Signed-off-by: David S. Miller --- include/uapi/linux/bpf.h | 4 ++++ kernel/trace/bpf_trace.c | 7 ++++++- 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 70eda5aeb304..b7b0fb1292e7 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -347,6 +347,10 @@ enum bpf_func_id { #define BPF_F_ZERO_CSUM_TX (1ULL << 1) #define BPF_F_DONT_FRAGMENT (1ULL << 2) +/* BPF_FUNC_perf_event_output flags. */ +#define BPF_F_INDEX_MASK 0xffffffffULL +#define BPF_F_CURRENT_CPU BPF_F_INDEX_MASK + /* user accessible mirror of in-kernel sk_buff. * new fields can only be added to the end of this structure */ diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index f389629dade7..b3cc24cb4321 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -225,11 +225,12 @@ static const struct bpf_func_proto bpf_perf_event_read_proto = { .arg2_type = ARG_ANYTHING, }; -static u64 bpf_perf_event_output(u64 r1, u64 r2, u64 index, u64 r4, u64 size) +static u64 bpf_perf_event_output(u64 r1, u64 r2, u64 flags, u64 r4, u64 size) { struct pt_regs *regs = (struct pt_regs *) (long) r1; struct bpf_map *map = (struct bpf_map *) (long) r2; struct bpf_array *array = container_of(map, struct bpf_array, map); + u64 index = flags & BPF_F_INDEX_MASK; void *data = (void *) (long) r4; struct perf_sample_data sample_data; struct perf_event *event; @@ -239,6 +240,10 @@ static u64 bpf_perf_event_output(u64 r1, u64 r2, u64 index, u64 r4, u64 size) .data = data, }; + if (unlikely(flags & ~(BPF_F_INDEX_MASK))) + return -EINVAL; + if (index == BPF_F_CURRENT_CPU) + index = raw_smp_processor_id(); if (unlikely(index >= array->map.max_entries)) return -E2BIG; From bd570ff970a54df653b48ed0cfb373f2ebed083d Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Mon, 18 Apr 2016 21:01:24 +0200 Subject: [PATCH 0837/1649] bpf: add event output helper for notifications/sampling/logging This patch adds a new helper for cls/act programs that can push events to user space applications. For networking, this can be f.e. for sampling, debugging, logging purposes or pushing of arbitrary wake-up events. The idea is similar to a43eec304259 ("bpf: introduce bpf_perf_event_output() helper") and 39111695b1b8 ("samples: bpf: add bpf_perf_event_output example"). The eBPF program utilizes a perf event array map that user space populates with fds from perf_event_open(), the eBPF program calls into the helper f.e. as skb_event_output(skb, &my_map, BPF_F_CURRENT_CPU, raw, sizeof(raw)) so that the raw data is pushed into the fd f.e. at the map index of the current CPU. User space can poll/mmap/etc on this and has a data channel for receiving events that can be post-processed. The nice thing is that since the eBPF program and user space application making use of it are tightly coupled, they can define their own arbitrary raw data format and what/when they want to push. While f.e. packet headers could be one part of the meta data that is being pushed, this is not a substitute for things like packet sockets as whole packet is not being pushed and push is only done in a single direction. Intention is more of a generically usable, efficient event pipe to applications. Workflow is that tc can pin the map and applications can attach themselves e.g. after cls/act setup to one or multiple map slots, demuxing is done by the eBPF program. Adding this facility is with minimal effort, it reuses the helper introduced in a43eec304259 ("bpf: introduce bpf_perf_event_output() helper") and we get its functionality for free by overloading its BPF_FUNC_ identifier for cls/act programs, ctx is currently unused, but will be made use of in future. Example will be added to iproute2's BPF example files. Signed-off-by: Daniel Borkmann Signed-off-by: Alexei Starovoitov Signed-off-by: David S. Miller --- include/linux/bpf.h | 2 ++ kernel/bpf/core.c | 7 +++++++ kernel/trace/bpf_trace.c | 27 +++++++++++++++++++++++++++ net/core/filter.c | 2 ++ 4 files changed, 38 insertions(+) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 5fb3c610fa96..f63afdc43bec 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -169,7 +169,9 @@ u64 bpf_tail_call(u64 ctx, u64 r2, u64 index, u64 r4, u64 r5); u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); void bpf_fd_array_map_clear(struct bpf_map *map); bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp); + const struct bpf_func_proto *bpf_get_trace_printk_proto(void); +const struct bpf_func_proto *bpf_get_event_output_proto(void); #ifdef CONFIG_BPF_SYSCALL DECLARE_PER_CPU(int, bpf_prog_active); diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index be0abf669ced..e4248fe79513 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -764,14 +764,21 @@ const struct bpf_func_proto bpf_map_delete_elem_proto __weak; const struct bpf_func_proto bpf_get_prandom_u32_proto __weak; const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak; const struct bpf_func_proto bpf_ktime_get_ns_proto __weak; + const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak; const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak; const struct bpf_func_proto bpf_get_current_comm_proto __weak; + const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void) { return NULL; } +const struct bpf_func_proto * __weak bpf_get_event_output_proto(void) +{ + return NULL; +} + /* Always built-in helper functions. */ const struct bpf_func_proto bpf_tail_call_proto = { .func = NULL, diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index b3cc24cb4321..780bcbe1d4de 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -277,6 +277,33 @@ static const struct bpf_func_proto bpf_perf_event_output_proto = { .arg5_type = ARG_CONST_STACK_SIZE, }; +static DEFINE_PER_CPU(struct pt_regs, bpf_pt_regs); + +static u64 bpf_event_output(u64 r1, u64 r2, u64 flags, u64 r4, u64 size) +{ + struct pt_regs *regs = this_cpu_ptr(&bpf_pt_regs); + + perf_fetch_caller_regs(regs); + + return bpf_perf_event_output((long)regs, r2, flags, r4, size); +} + +static const struct bpf_func_proto bpf_event_output_proto = { + .func = bpf_event_output, + .gpl_only = true, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, + .arg2_type = ARG_CONST_MAP_PTR, + .arg3_type = ARG_ANYTHING, + .arg4_type = ARG_PTR_TO_STACK, + .arg5_type = ARG_CONST_STACK_SIZE, +}; + +const struct bpf_func_proto *bpf_get_event_output_proto(void) +{ + return &bpf_event_output_proto; +} + static const struct bpf_func_proto *tracing_func_proto(enum bpf_func_id func_id) { switch (func_id) { diff --git a/net/core/filter.c b/net/core/filter.c index 5d2ac2b9d1c4..218e5de8c402 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -2039,6 +2039,8 @@ tc_cls_act_func_proto(enum bpf_func_id func_id) return &bpf_redirect_proto; case BPF_FUNC_get_route_realm: return &bpf_get_route_realm_proto; + case BPF_FUNC_perf_event_output: + return bpf_get_event_output_proto(); default: return sk_filter_func_proto(func_id); } From 46e7b8d8d53bcde075dca6da3a3816a663073499 Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Mon, 18 Apr 2016 16:10:24 -0400 Subject: [PATCH 0838/1649] net: dsa: kill circular reference with slave priv The dsa_slave_priv structure does not need a pointer to its net_device. Kill it. Signed-off-by: Vivien Didelot Signed-off-by: David S. Miller --- net/dsa/dsa_priv.h | 5 ----- net/dsa/slave.c | 9 ++++----- 2 files changed, 4 insertions(+), 10 deletions(-) diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h index 1d1a54687e4a..dfa33779d49c 100644 --- a/net/dsa/dsa_priv.h +++ b/net/dsa/dsa_priv.h @@ -22,11 +22,6 @@ struct dsa_device_ops { }; struct dsa_slave_priv { - /* - * The linux network interface corresponding to this - * switch port. - */ - struct net_device *dev; struct sk_buff * (*xmit)(struct sk_buff *skb, struct net_device *dev); diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 2dae0d064359..3b6750f5e68b 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -673,10 +673,10 @@ static void dsa_slave_get_ethtool_stats(struct net_device *dev, struct dsa_slave_priv *p = netdev_priv(dev); struct dsa_switch *ds = p->parent; - data[0] = p->dev->stats.tx_packets; - data[1] = p->dev->stats.tx_bytes; - data[2] = p->dev->stats.rx_packets; - data[3] = p->dev->stats.rx_bytes; + data[0] = dev->stats.tx_packets; + data[1] = dev->stats.tx_bytes; + data[2] = dev->stats.rx_packets; + data[3] = dev->stats.rx_bytes; if (ds->drv->get_ethtool_stats != NULL) ds->drv->get_ethtool_stats(ds, p->port, data + 4); } @@ -1063,7 +1063,6 @@ int dsa_slave_create(struct dsa_switch *ds, struct device *parent, slave_dev->vlan_features = master->vlan_features; p = netdev_priv(slave_dev); - p->dev = slave_dev; p->parent = ds; p->port = port; From 39b9722315364121c6e2524515a6e95d52287549 Mon Sep 17 00:00:00 2001 From: Marco Angaroni Date: Tue, 5 Apr 2016 18:26:29 +0200 Subject: [PATCH 0839/1649] ipvs: handle connections started by real-servers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When using LVS-NAT and SIP persistence-egine over UDP, the following limitations are present with current implementation: 1) To actually have load-balancing based on Call-ID header, you need to use one-packet-scheduling mode. But with one-packet-scheduling the connection is deleted just after packet is forwarded, so SIP responses coming from real-servers do not match any connection and SNAT is not applied. 2) If you do not use "-o" option, IPVS behaves as normal UDP load balancer, so different SIP calls (each one identified by a different Call-ID) coming from the same ip-address/port go to the same real-server. So basically you don’t have load-balancing based on Call-ID as intended. 3) Call-ID is not learned when a new SIP call is started by a real-server (inside-to-outside direction), but only in the outside-to-inside direction. This would be a general problem for all SIP servers acting as Back2BackUserAgent. This patch aims to solve problems 1) and 3) while keeping OPS mode mandatory for SIP-UDP, so that 2) is not a problem anymore. The basic mechanism implemented is to make packets, that do not match any existent connection but come from real-servers, create new connections instead of let them pass without any effect. When such packets pass through ip_vs_out(), if their source ip address and source port match a configured real-server, a new connection is automatically created in the same way as it would have happened if the packet had come from outside-to-inside direction. A new connection template is created too if the virtual-service is persistent and there is no matching connection template found. The new connection automatically created, if the service had "-o" option, is an OPS connection that lasts only the time to forward the packet, just like it happens on the ingress side. The main part of this mechanism is implemented inside a persistent-engine specific callback (at the moment only SIP persistent engine exists) and is triggered only for UDP packets, since connection oriented protocols, by using different set of ports (typically ephemeral ports) to open new outgoing connections, should not need this feature. The following requisites are needed for automatic connection creation; if any is missing the packet simply goes the same way as before. a) virtual-service is not fwmark based (this is because fwmark services do not store address and port of the virtual-service, required to build the connection data). b) virtual-service and real-servers must not have been configured with omitted port (this is again to have all data to create the connection). Signed-off-by: Marco Angaroni Acked-by: Julian Anastasov Signed-off-by: Simon Horman --- include/net/ip_vs.h | 17 ++++ net/netfilter/ipvs/ip_vs_core.c | 154 ++++++++++++++++++++++++++++++ net/netfilter/ipvs/ip_vs_ctl.c | 46 ++++++++- net/netfilter/ipvs/ip_vs_pe_sip.c | 15 +++ 4 files changed, 231 insertions(+), 1 deletion(-) diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h index a6cc576fd467..af4c10ebb241 100644 --- a/include/net/ip_vs.h +++ b/include/net/ip_vs.h @@ -731,6 +731,12 @@ struct ip_vs_pe { u32 (*hashkey_raw)(const struct ip_vs_conn_param *p, u32 initval, bool inverse); int (*show_pe_data)(const struct ip_vs_conn *cp, char *buf); + /* create connections for real-server outgoing packets */ + struct ip_vs_conn* (*conn_out)(struct ip_vs_service *svc, + struct ip_vs_dest *dest, + struct sk_buff *skb, + const struct ip_vs_iphdr *iph, + __be16 dport, __be16 cport); }; /* The application module object (a.k.a. app incarnation) */ @@ -874,6 +880,7 @@ struct netns_ipvs { /* Service counters */ atomic_t ftpsvc_counter; atomic_t nullsvc_counter; + atomic_t conn_out_counter; #ifdef CONFIG_SYSCTL /* 1/rate drop and drop-entry variables */ @@ -1147,6 +1154,12 @@ static inline int sysctl_cache_bypass(struct netns_ipvs *ipvs) */ const char *ip_vs_proto_name(unsigned int proto); void ip_vs_init_hash_table(struct list_head *table, int rows); +struct ip_vs_conn *ip_vs_new_conn_out(struct ip_vs_service *svc, + struct ip_vs_dest *dest, + struct sk_buff *skb, + const struct ip_vs_iphdr *iph, + __be16 dport, + __be16 cport); #define IP_VS_INIT_HASH_TABLE(t) ip_vs_init_hash_table((t), ARRAY_SIZE((t))) #define IP_VS_APP_TYPE_FTP 1 @@ -1378,6 +1391,10 @@ ip_vs_service_find(struct netns_ipvs *ipvs, int af, __u32 fwmark, __u16 protocol bool ip_vs_has_real_service(struct netns_ipvs *ipvs, int af, __u16 protocol, const union nf_inet_addr *daddr, __be16 dport); +struct ip_vs_dest * +ip_vs_find_real_service(struct netns_ipvs *ipvs, int af, __u16 protocol, + const union nf_inet_addr *daddr, __be16 dport); + int ip_vs_use_count_inc(void); void ip_vs_use_count_dec(void); int ip_vs_register_nl_ioctl(void); diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c index b9a4082afa3a..f3bac2e9a25a 100644 --- a/net/netfilter/ipvs/ip_vs_core.c +++ b/net/netfilter/ipvs/ip_vs_core.c @@ -68,6 +68,7 @@ EXPORT_SYMBOL(ip_vs_conn_put); #ifdef CONFIG_IP_VS_DEBUG EXPORT_SYMBOL(ip_vs_get_debug_level); #endif +EXPORT_SYMBOL(ip_vs_new_conn_out); static int ip_vs_net_id __read_mostly; /* netns cnt used for uniqueness */ @@ -1100,6 +1101,143 @@ static inline bool is_new_conn_expected(const struct ip_vs_conn *cp, } } +/* Generic function to create new connections for outgoing RS packets + * + * Pre-requisites for successful connection creation: + * 1) Virtual Service is NOT fwmark based: + * In fwmark-VS actual vaddr and vport are unknown to IPVS + * 2) Real Server and Virtual Service were NOT configured without port: + * This is to allow match of different VS to the same RS ip-addr + */ +struct ip_vs_conn *ip_vs_new_conn_out(struct ip_vs_service *svc, + struct ip_vs_dest *dest, + struct sk_buff *skb, + const struct ip_vs_iphdr *iph, + __be16 dport, + __be16 cport) +{ + struct ip_vs_conn_param param; + struct ip_vs_conn *ct = NULL, *cp = NULL; + const union nf_inet_addr *vaddr, *daddr, *caddr; + union nf_inet_addr snet; + __be16 vport; + unsigned int flags; + + EnterFunction(12); + vaddr = &svc->addr; + vport = svc->port; + daddr = &iph->saddr; + caddr = &iph->daddr; + + /* check pre-requisites are satisfied */ + if (svc->fwmark) + return NULL; + if (!vport || !dport) + return NULL; + + /* for persistent service first create connection template */ + if (svc->flags & IP_VS_SVC_F_PERSISTENT) { + /* apply netmask the same way ingress-side does */ +#ifdef CONFIG_IP_VS_IPV6 + if (svc->af == AF_INET6) + ipv6_addr_prefix(&snet.in6, &caddr->in6, + (__force __u32)svc->netmask); + else +#endif + snet.ip = caddr->ip & svc->netmask; + /* fill params and create template if not existent */ + if (ip_vs_conn_fill_param_persist(svc, skb, iph->protocol, + &snet, 0, vaddr, + vport, ¶m) < 0) + return NULL; + ct = ip_vs_ct_in_get(¶m); + if (!ct) { + ct = ip_vs_conn_new(¶m, dest->af, daddr, dport, + IP_VS_CONN_F_TEMPLATE, dest, 0); + if (!ct) { + kfree(param.pe_data); + return NULL; + } + ct->timeout = svc->timeout; + } else { + kfree(param.pe_data); + } + } + + /* connection flags */ + flags = ((svc->flags & IP_VS_SVC_F_ONEPACKET) && + iph->protocol == IPPROTO_UDP) ? IP_VS_CONN_F_ONE_PACKET : 0; + /* create connection */ + ip_vs_conn_fill_param(svc->ipvs, svc->af, iph->protocol, + caddr, cport, vaddr, vport, ¶m); + cp = ip_vs_conn_new(¶m, dest->af, daddr, dport, flags, dest, 0); + if (!cp) { + if (ct) + ip_vs_conn_put(ct); + return NULL; + } + if (ct) { + ip_vs_control_add(cp, ct); + ip_vs_conn_put(ct); + } + ip_vs_conn_stats(cp, svc); + + /* return connection (will be used to handle outgoing packet) */ + IP_VS_DBG_BUF(6, "New connection RS-initiated:%c c:%s:%u v:%s:%u " + "d:%s:%u conn->flags:%X conn->refcnt:%d\n", + ip_vs_fwd_tag(cp), + IP_VS_DBG_ADDR(cp->af, &cp->caddr), ntohs(cp->cport), + IP_VS_DBG_ADDR(cp->af, &cp->vaddr), ntohs(cp->vport), + IP_VS_DBG_ADDR(cp->af, &cp->daddr), ntohs(cp->dport), + cp->flags, atomic_read(&cp->refcnt)); + LeaveFunction(12); + return cp; +} + +/* Handle outgoing packets which are considered requests initiated by + * real servers, so that subsequent responses from external client can be + * routed to the right real server. + * Used also for outgoing responses in OPS mode. + * + * Connection management is handled by persistent-engine specific callback. + */ +static struct ip_vs_conn *__ip_vs_rs_conn_out(unsigned int hooknum, + struct netns_ipvs *ipvs, + int af, struct sk_buff *skb, + const struct ip_vs_iphdr *iph) +{ + struct ip_vs_dest *dest; + struct ip_vs_conn *cp = NULL; + __be16 _ports[2], *pptr; + + if (hooknum == NF_INET_LOCAL_IN) + return NULL; + + pptr = frag_safe_skb_hp(skb, iph->len, + sizeof(_ports), _ports, iph); + if (!pptr) + return NULL; + + rcu_read_lock(); + dest = ip_vs_find_real_service(ipvs, af, iph->protocol, + &iph->saddr, pptr[0]); + if (dest) { + struct ip_vs_service *svc; + struct ip_vs_pe *pe; + + svc = rcu_dereference(dest->svc); + if (svc) { + pe = rcu_dereference(svc->pe); + if (pe && pe->conn_out) + cp = pe->conn_out(svc, dest, skb, iph, + pptr[0], pptr[1]); + } + } + rcu_read_unlock(); + + return cp; +} + /* Handle response packets: rewrite addresses and send away... */ static unsigned int @@ -1245,6 +1383,22 @@ ip_vs_out(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, in if (likely(cp)) return handle_response(af, skb, pd, cp, &iph, hooknum); + + /* Check for real-server-started requests */ + if (atomic_read(&ipvs->conn_out_counter)) { + /* Currently only for UDP: + * connection oriented protocols typically use + * ephemeral ports for outgoing connections, so + * related incoming responses would not match any VS + */ + if (pp->protocol == IPPROTO_UDP) { + cp = __ip_vs_rs_conn_out(hooknum, ipvs, af, skb, &iph); + if (likely(cp)) + return handle_response(af, skb, pd, cp, &iph, + hooknum); + } + } + if (sysctl_nat_icmp_send(ipvs) && (pp->protocol == IPPROTO_TCP || pp->protocol == IPPROTO_UDP || diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index 404b2a4f4b5b..6794391c5a32 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c @@ -567,6 +567,36 @@ bool ip_vs_has_real_service(struct netns_ipvs *ipvs, int af, __u16 protocol, return false; } +/* Find real service record by . + * In case of multiple records with the same , only + * the first found record is returned. + * + * To be called under RCU lock. + */ +struct ip_vs_dest *ip_vs_find_real_service(struct netns_ipvs *ipvs, int af, + __u16 protocol, + const union nf_inet_addr *daddr, + __be16 dport) +{ + unsigned int hash; + struct ip_vs_dest *dest; + + /* Check for "full" addressed entries */ + hash = ip_vs_rs_hashkey(af, daddr, dport); + + hlist_for_each_entry_rcu(dest, &ipvs->rs_table[hash], d_list) { + if (dest->port == dport && + dest->af == af && + ip_vs_addr_equal(af, &dest->addr, daddr) && + (dest->protocol == protocol || dest->vfwmark)) { + /* HIT */ + return dest; + } + } + + return NULL; +} + /* Lookup destination by {addr,port} in the given service * Called under RCU lock. */ @@ -1253,6 +1283,8 @@ ip_vs_add_service(struct netns_ipvs *ipvs, struct ip_vs_service_user_kern *u, atomic_inc(&ipvs->ftpsvc_counter); else if (svc->port == 0) atomic_inc(&ipvs->nullsvc_counter); + if (svc->pe && svc->pe->conn_out) + atomic_inc(&ipvs->conn_out_counter); ip_vs_start_estimator(ipvs, &svc->stats); @@ -1293,6 +1325,7 @@ ip_vs_edit_service(struct ip_vs_service *svc, struct ip_vs_service_user_kern *u) struct ip_vs_scheduler *sched = NULL, *old_sched; struct ip_vs_pe *pe = NULL, *old_pe = NULL; int ret = 0; + bool new_pe_conn_out, old_pe_conn_out; /* * Lookup the scheduler, by 'u->sched_name' @@ -1355,8 +1388,16 @@ ip_vs_edit_service(struct ip_vs_service *svc, struct ip_vs_service_user_kern *u) svc->netmask = u->netmask; old_pe = rcu_dereference_protected(svc->pe, 1); - if (pe != old_pe) + if (pe != old_pe) { rcu_assign_pointer(svc->pe, pe); + /* check for optional methods in new pe */ + new_pe_conn_out = (pe && pe->conn_out) ? true : false; + old_pe_conn_out = (old_pe && old_pe->conn_out) ? true : false; + if (new_pe_conn_out && !old_pe_conn_out) + atomic_inc(&svc->ipvs->conn_out_counter); + if (old_pe_conn_out && !new_pe_conn_out) + atomic_dec(&svc->ipvs->conn_out_counter); + } out: ip_vs_scheduler_put(old_sched); @@ -1389,6 +1430,8 @@ static void __ip_vs_del_service(struct ip_vs_service *svc, bool cleanup) /* Unbind persistence engine, keep svc->pe */ old_pe = rcu_dereference_protected(svc->pe, 1); + if (old_pe && old_pe->conn_out) + atomic_dec(&ipvs->conn_out_counter); ip_vs_pe_put(old_pe); /* @@ -3957,6 +4000,7 @@ int __net_init ip_vs_control_net_init(struct netns_ipvs *ipvs) (unsigned long) ipvs); atomic_set(&ipvs->ftpsvc_counter, 0); atomic_set(&ipvs->nullsvc_counter, 0); + atomic_set(&ipvs->conn_out_counter, 0); /* procfs stats */ ipvs->tot_stats.cpustats = alloc_percpu(struct ip_vs_cpu_stats); diff --git a/net/netfilter/ipvs/ip_vs_pe_sip.c b/net/netfilter/ipvs/ip_vs_pe_sip.c index 0a6eb5c0d9e9..d07ef9e31c12 100644 --- a/net/netfilter/ipvs/ip_vs_pe_sip.c +++ b/net/netfilter/ipvs/ip_vs_pe_sip.c @@ -143,6 +143,20 @@ static int ip_vs_sip_show_pe_data(const struct ip_vs_conn *cp, char *buf) return cp->pe_data_len; } +static struct ip_vs_conn * +ip_vs_sip_conn_out(struct ip_vs_service *svc, + struct ip_vs_dest *dest, + struct sk_buff *skb, + const struct ip_vs_iphdr *iph, + __be16 dport, + __be16 cport) +{ + if (likely(iph->protocol == IPPROTO_UDP)) + return ip_vs_new_conn_out(svc, dest, skb, iph, dport, cport); + /* currently no need to handle other than UDP */ + return NULL; +} + static struct ip_vs_pe ip_vs_sip_pe = { .name = "sip", @@ -153,6 +167,7 @@ static struct ip_vs_pe ip_vs_sip_pe = .ct_match = ip_vs_sip_ct_match, .hashkey_raw = ip_vs_sip_hashkey_raw, .show_pe_data = ip_vs_sip_show_pe_data, + .conn_out = ip_vs_sip_conn_out, }; static int __init ip_vs_sip_init(void) From 013b042465d3fefef84b4b87947747eda08277e2 Mon Sep 17 00:00:00 2001 From: Marco Angaroni Date: Tue, 5 Apr 2016 18:26:52 +0200 Subject: [PATCH 0840/1649] ipvs: optimize release of connections in OPS mode One-packet-scheduling is the most expensive mode in IPVS from performance point of view: for each packet to be processed a new connection data structure is created and, after packet is sent, deleted by starting a new timer set to expire immediately. SIP persistent-engine needs OPS mode to have Call-ID based load balancing, so OPS mode performance has negative impact in SIP protocol load balancing. This patch aims to improve performance of OPS mode by means of the following changes in the release mechanism of OPS connections: a) call expire callback ip_vs_conn_expire() directly instead of starting a timer programmed to fire immediately. b) avoid call_rcu() overhead inside expire callback, since OPS connection are not inserted in the hash-table and last just the time to process the packet, hence there is no concurrent access to such data structures. Signed-off-by: Marco Angaroni Acked-by: Julian Anastasov Signed-off-by: Simon Horman --- net/netfilter/ipvs/ip_vs_conn.c | 26 +++++++++++++++++++++++--- 1 file changed, 23 insertions(+), 3 deletions(-) diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c index 85ca189bdc3d..dd75d4120099 100644 --- a/net/netfilter/ipvs/ip_vs_conn.c +++ b/net/netfilter/ipvs/ip_vs_conn.c @@ -104,6 +104,7 @@ static inline void ct_write_unlock_bh(unsigned int key) spin_unlock_bh(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l); } +static void ip_vs_conn_expire(unsigned long data); /* * Returns hash value for IPVS connection entry @@ -453,10 +454,16 @@ ip_vs_conn_out_get_proto(struct netns_ipvs *ipvs, int af, } EXPORT_SYMBOL_GPL(ip_vs_conn_out_get_proto); +static void __ip_vs_conn_put_notimer(struct ip_vs_conn *cp) +{ + __ip_vs_conn_put(cp); + ip_vs_conn_expire((unsigned long)cp); +} + /* * Put back the conn and restart its timer with its timeout */ -void ip_vs_conn_put(struct ip_vs_conn *cp) +static void __ip_vs_conn_put_timer(struct ip_vs_conn *cp) { unsigned long t = (cp->flags & IP_VS_CONN_F_ONE_PACKET) ? 0 : cp->timeout; @@ -465,6 +472,16 @@ void ip_vs_conn_put(struct ip_vs_conn *cp) __ip_vs_conn_put(cp); } +void ip_vs_conn_put(struct ip_vs_conn *cp) +{ + if ((cp->flags & IP_VS_CONN_F_ONE_PACKET) && + (atomic_read(&cp->refcnt) == 1) && + !timer_pending(&cp->timer)) + /* expire connection immediately */ + __ip_vs_conn_put_notimer(cp); + else + __ip_vs_conn_put_timer(cp); +} /* * Fill a no_client_port connection with a client port number @@ -834,7 +851,10 @@ static void ip_vs_conn_expire(unsigned long data) ip_vs_unbind_dest(cp); if (cp->flags & IP_VS_CONN_F_NO_CPORT) atomic_dec(&ip_vs_conn_no_cport_cnt); - call_rcu(&cp->rcu_head, ip_vs_conn_rcu_free); + if (cp->flags & IP_VS_CONN_F_ONE_PACKET) + ip_vs_conn_rcu_free(&cp->rcu_head); + else + call_rcu(&cp->rcu_head, ip_vs_conn_rcu_free); atomic_dec(&ipvs->conn_count); return; } @@ -850,7 +870,7 @@ static void ip_vs_conn_expire(unsigned long data) if (ipvs->sync_state & IP_VS_STATE_MASTER) ip_vs_sync_conn(ipvs, cp, sysctl_sync_threshold(ipvs)); - ip_vs_conn_put(cp); + __ip_vs_conn_put_timer(cp); } /* Modify timer, so that it expires as soon as possible. From 8fb04d9fc70a67ccabf71dbabf92d7f6fca64a16 Mon Sep 17 00:00:00 2001 From: Marco Angaroni Date: Sat, 9 Apr 2016 14:14:23 +0200 Subject: [PATCH 0841/1649] ipvs: don't alter conntrack in OPS mode When using OPS mode in conjunction with SIP persistent-engine, packets originating from the same ip-address/port could be balanced to different real servers, and (to properly handle SIP responses) OPS connections are created in the in-out direction too, where ip_vs_update_conntrack() is called to modify the reply tuple. As a result, there can be collision of conntrack tuples, causing random packet drops, as explained below: conntrack1: orig=CIP->VIP, reply=RIP1->CIP conntrack2: orig=RIP2->CIP, reply=CIP->VIP Tuple CIP->VIP is both in orig of conntrack1 and reply of conntrack2. The collision triggers packet drop inside nf_conntrack processing. In addition, the current implementation deletes the conntrack object at every expire of an OPS connection (once every forwarded packet), to have it recreated from scratch at next packet traversing IPVS. Since in OPS mode, by definition, we don't expect any associated response, the choices implemented in this patch are: a) don't call nf_conntrack_alter_reply() for OPS connections inside ip_vs_update_conntrack(). b) don't delete the conntrack object at OPS connection expire. The result is that created conntrack objects for each tuple CIP->VIP, RIP-N->CIP, etc. are left in UNREPLIED state and not modified by IPVS OPS connection management. This eliminates packet drops and leaves a single conntrack object for each tuple packets are sent from. Signed-off-by: Marco Angaroni Signed-off-by: Julian Anastasov Signed-off-by: Simon Horman --- net/netfilter/ipvs/ip_vs_conn.c | 3 ++- net/netfilter/ipvs/ip_vs_nfct.c | 4 ++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c index dd75d4120099..292365ffa4f0 100644 --- a/net/netfilter/ipvs/ip_vs_conn.c +++ b/net/netfilter/ipvs/ip_vs_conn.c @@ -836,7 +836,8 @@ static void ip_vs_conn_expire(unsigned long data) if (cp->control) ip_vs_control_del(cp); - if (cp->flags & IP_VS_CONN_F_NFCT) { + if ((cp->flags & IP_VS_CONN_F_NFCT) && + !(cp->flags & IP_VS_CONN_F_ONE_PACKET)) { /* Do not access conntracks during subsys cleanup * because nf_conntrack_find_get can not be used after * conntrack cleanup for the net. diff --git a/net/netfilter/ipvs/ip_vs_nfct.c b/net/netfilter/ipvs/ip_vs_nfct.c index 30434fb133df..f04fd8df210b 100644 --- a/net/netfilter/ipvs/ip_vs_nfct.c +++ b/net/netfilter/ipvs/ip_vs_nfct.c @@ -93,6 +93,10 @@ ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp, int outin) if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ) return; + /* Never alter conntrack for OPS conns (no reply is expected) */ + if (cp->flags & IP_VS_CONN_F_ONE_PACKET) + return; + /* Alter reply only in original direction */ if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) return; From afd356dfb3a4127b61a3519802a4db9046703724 Mon Sep 17 00:00:00 2001 From: Bjorn Andersson Date: Mon, 28 Mar 2016 21:35:23 -0700 Subject: [PATCH 0842/1649] soc: qcom: smem: Use write-combine remap for SMEM Mapping the SMEM region as write combine makes the contiguous writes in SMD perform better and also allows us to do unaligned read and writes on ARM64. Signed-off-by: Bjorn Andersson Reviewed-by: Andy Gross Signed-off-by: Andy Gross --- drivers/soc/qcom/smem.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/soc/qcom/smem.c b/drivers/soc/qcom/smem.c index 19019aa092e8..2e1aa9f130f4 100644 --- a/drivers/soc/qcom/smem.c +++ b/drivers/soc/qcom/smem.c @@ -684,8 +684,7 @@ static int qcom_smem_map_memory(struct qcom_smem *smem, struct device *dev, smem->regions[i].aux_base = (u32)r.start; smem->regions[i].size = resource_size(&r); - smem->regions[i].virt_base = devm_ioremap_nocache(dev, r.start, - resource_size(&r)); + smem->regions[i].virt_base = devm_ioremap_wc(dev, r.start, resource_size(&r)); if (!smem->regions[i].virt_base) return -ENOMEM; From b853cb9628bfbcc4017da46d5f5b46e3eba9d8c6 Mon Sep 17 00:00:00 2001 From: Bjorn Andersson Date: Mon, 28 Mar 2016 21:35:22 -0700 Subject: [PATCH 0843/1649] soc: qcom: smd: Make callback pass channel reference By passing the smd channel reference to the callback, rather than the smd device, we can open additional smd channels from sub-devices of smd devices. Also updates the two smd clients today found in mainline. Signed-off-by: Bjorn Andersson Signed-off-by: Andy Gross --- drivers/soc/qcom/smd-rpm.c | 9 ++++++--- drivers/soc/qcom/smd.c | 22 ++++++++++++++++++---- drivers/soc/qcom/wcnss_ctrl.c | 8 ++++---- include/linux/soc/qcom/smd.h | 7 +++++-- 4 files changed, 33 insertions(+), 13 deletions(-) diff --git a/drivers/soc/qcom/smd-rpm.c b/drivers/soc/qcom/smd-rpm.c index 731fa066f712..6609d7e0edb0 100644 --- a/drivers/soc/qcom/smd-rpm.c +++ b/drivers/soc/qcom/smd-rpm.c @@ -33,6 +33,7 @@ */ struct qcom_smd_rpm { struct qcom_smd_channel *rpm_channel; + struct device *dev; struct completion ack; struct mutex lock; @@ -149,14 +150,14 @@ out: } EXPORT_SYMBOL(qcom_rpm_smd_write); -static int qcom_smd_rpm_callback(struct qcom_smd_device *qsdev, +static int qcom_smd_rpm_callback(struct qcom_smd_channel *channel, const void *data, size_t count) { const struct qcom_rpm_header *hdr = data; size_t hdr_length = le32_to_cpu(hdr->length); const struct qcom_rpm_message *msg; - struct qcom_smd_rpm *rpm = dev_get_drvdata(&qsdev->dev); + struct qcom_smd_rpm *rpm = qcom_smd_get_drvdata(channel); const u8 *buf = data + sizeof(struct qcom_rpm_header); const u8 *end = buf + hdr_length; char msgbuf[32]; @@ -165,7 +166,7 @@ static int qcom_smd_rpm_callback(struct qcom_smd_device *qsdev, if (le32_to_cpu(hdr->service_type) != RPM_SERVICE_TYPE_REQUEST || hdr_length < sizeof(struct qcom_rpm_message)) { - dev_err(&qsdev->dev, "invalid request\n"); + dev_err(rpm->dev, "invalid request\n"); return 0; } @@ -206,7 +207,9 @@ static int qcom_smd_rpm_probe(struct qcom_smd_device *sdev) mutex_init(&rpm->lock); init_completion(&rpm->ack); + rpm->dev = &sdev->dev; rpm->rpm_channel = sdev->channel; + qcom_smd_set_drvdata(sdev->channel, rpm); dev_set_drvdata(&sdev->dev, rpm); diff --git a/drivers/soc/qcom/smd.c b/drivers/soc/qcom/smd.c index b6434c4be86a..ac1957dfdf24 100644 --- a/drivers/soc/qcom/smd.c +++ b/drivers/soc/qcom/smd.c @@ -194,6 +194,8 @@ struct qcom_smd_channel { int pkt_size; + void *drvdata; + struct list_head list; struct list_head dev_list; }; @@ -513,7 +515,6 @@ static void qcom_smd_channel_advance(struct qcom_smd_channel *channel, */ static int qcom_smd_channel_recv_single(struct qcom_smd_channel *channel) { - struct qcom_smd_device *qsdev = channel->qsdev; unsigned tail; size_t len; void *ptr; @@ -533,7 +534,7 @@ static int qcom_smd_channel_recv_single(struct qcom_smd_channel *channel) len = channel->pkt_size; } - ret = channel->cb(qsdev, ptr, len); + ret = channel->cb(channel, ptr, len); if (ret < 0) return ret; @@ -1034,6 +1035,18 @@ int qcom_smd_driver_register(struct qcom_smd_driver *qsdrv) } EXPORT_SYMBOL(qcom_smd_driver_register); +void *qcom_smd_get_drvdata(struct qcom_smd_channel *channel) +{ + return channel->drvdata; +} +EXPORT_SYMBOL(qcom_smd_get_drvdata); + +void qcom_smd_set_drvdata(struct qcom_smd_channel *channel, void *data) +{ + channel->drvdata = data; +} +EXPORT_SYMBOL(qcom_smd_set_drvdata); + /** * qcom_smd_driver_unregister - unregister a smd driver * @qsdrv: qcom_smd_driver struct @@ -1079,12 +1092,13 @@ qcom_smd_find_channel(struct qcom_smd_edge *edge, const char *name) * Returns a channel handle on success, or -EPROBE_DEFER if the channel isn't * ready. */ -struct qcom_smd_channel *qcom_smd_open_channel(struct qcom_smd_device *sdev, +struct qcom_smd_channel *qcom_smd_open_channel(struct qcom_smd_channel *parent, const char *name, qcom_smd_cb_t cb) { struct qcom_smd_channel *channel; - struct qcom_smd_edge *edge = sdev->channel->edge; + struct qcom_smd_device *sdev = parent->qsdev; + struct qcom_smd_edge *edge = parent->edge; int ret; /* Wait up to HZ for the channel to appear */ diff --git a/drivers/soc/qcom/wcnss_ctrl.c b/drivers/soc/qcom/wcnss_ctrl.c index 7a986f881d5c..c544f3d2c6ee 100644 --- a/drivers/soc/qcom/wcnss_ctrl.c +++ b/drivers/soc/qcom/wcnss_ctrl.c @@ -100,17 +100,17 @@ struct wcnss_download_nv_resp { /** * wcnss_ctrl_smd_callback() - handler from SMD responses - * @qsdev: smd device handle + * @channel: smd channel handle * @data: pointer to the incoming data packet * @count: size of the incoming data packet * * Handles any incoming packets from the remote WCNSS_CTRL service. */ -static int wcnss_ctrl_smd_callback(struct qcom_smd_device *qsdev, +static int wcnss_ctrl_smd_callback(struct qcom_smd_channel *channel, const void *data, size_t count) { - struct wcnss_ctrl *wcnss = dev_get_drvdata(&qsdev->dev); + struct wcnss_ctrl *wcnss = qcom_smd_get_drvdata(channel); const struct wcnss_download_nv_resp *nvresp; const struct wcnss_version_resp *version; const struct wcnss_msg_hdr *hdr = data; @@ -246,7 +246,7 @@ static int wcnss_ctrl_probe(struct qcom_smd_device *sdev) init_completion(&wcnss->ack); INIT_WORK(&wcnss->download_nv_work, wcnss_download_nv); - dev_set_drvdata(&sdev->dev, wcnss); + qcom_smd_set_drvdata(sdev->channel, wcnss); return wcnss_request_version(wcnss); } diff --git a/include/linux/soc/qcom/smd.h b/include/linux/soc/qcom/smd.h index bd51c8a9d807..cb2f81559bc0 100644 --- a/include/linux/soc/qcom/smd.h +++ b/include/linux/soc/qcom/smd.h @@ -26,7 +26,7 @@ struct qcom_smd_device { struct qcom_smd_channel *channel; }; -typedef int (*qcom_smd_cb_t)(struct qcom_smd_device *, const void *, size_t); +typedef int (*qcom_smd_cb_t)(struct qcom_smd_channel *, const void *, size_t); /** * struct qcom_smd_driver - smd driver struct @@ -50,13 +50,16 @@ struct qcom_smd_driver { int qcom_smd_driver_register(struct qcom_smd_driver *drv); void qcom_smd_driver_unregister(struct qcom_smd_driver *drv); +void *qcom_smd_get_drvdata(struct qcom_smd_channel *channel); +void qcom_smd_set_drvdata(struct qcom_smd_channel *channel, void *data); + #define module_qcom_smd_driver(__smd_driver) \ module_driver(__smd_driver, qcom_smd_driver_register, \ qcom_smd_driver_unregister) int qcom_smd_send(struct qcom_smd_channel *channel, const void *data, int len); -struct qcom_smd_channel *qcom_smd_open_channel(struct qcom_smd_device *sdev, +struct qcom_smd_channel *qcom_smd_open_channel(struct qcom_smd_channel *channel, const char *name, qcom_smd_cb_t cb); From 60f5f5d3a106dc5385b39348d13b20b15ac9cbf9 Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Tue, 19 Apr 2016 15:34:22 +0200 Subject: [PATCH 0844/1649] at86rf230: increase sleep to off timings I expierenced when setting channel while sleep mode it didn't changed the channel inside the hardware registers. Then I got another report of an user which has similar issues. I increased the sleep to off state change timing, which is according at86rf233 at maximum 1000 us. After this change I got no similar effects again. I tried another option to wait on AWAKE_END irq, which can be used to wait until the transceiver is awaked. I tested it and the IRQ took 4 seconds after starting state change. I don't believe it takes 4 seconds to go into the TRX_OFF state from SLEEP state. The alternative is to increase the timings which seems to work. Cc: Oleg Hahm Signed-off-by: Alexander Aring Reviewed-by: Stefan Schmidt Signed-off-by: Marcel Holtmann --- drivers/net/ieee802154/at86rf230.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c index cb9e9fe6d77a..9f10da60e02d 100644 --- a/drivers/net/ieee802154/at86rf230.c +++ b/drivers/net/ieee802154/at86rf230.c @@ -1340,7 +1340,7 @@ static struct at86rf2xx_chip_data at86rf233_data = { .t_off_to_aack = 80, .t_off_to_tx_on = 80, .t_off_to_sleep = 35, - .t_sleep_to_off = 210, + .t_sleep_to_off = 1000, .t_frame = 4096, .t_p_ack = 545, .rssi_base_val = -91, @@ -1355,7 +1355,7 @@ static struct at86rf2xx_chip_data at86rf231_data = { .t_off_to_aack = 110, .t_off_to_tx_on = 110, .t_off_to_sleep = 35, - .t_sleep_to_off = 380, + .t_sleep_to_off = 1000, .t_frame = 4096, .t_p_ack = 545, .rssi_base_val = -91, @@ -1370,7 +1370,7 @@ static struct at86rf2xx_chip_data at86rf212_data = { .t_off_to_aack = 200, .t_off_to_tx_on = 200, .t_off_to_sleep = 35, - .t_sleep_to_off = 380, + .t_sleep_to_off = 1000, .t_frame = 4096, .t_p_ack = 545, .rssi_base_val = -100, From c7c999cb18da88a881e10e07f0724ad0bfaff770 Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Thu, 14 Apr 2016 17:32:19 +0200 Subject: [PATCH 0845/1649] Bluetooth: vhci: Fix race at creating hci device hci_vhci driver creates a hci device object dynamically upon each HCI_VENDOR_PKT write. Although it checks the already created object and returns an error, it's still racy and may build multiple hci_dev objects concurrently when parallel writes are performed, as the device tracks only a single hci_dev object. This patch introduces a mutex to protect against the concurrent device creations. Cc: Signed-off-by: Takashi Iwai Signed-off-by: Marcel Holtmann --- drivers/bluetooth/hci_vhci.c | 23 +++++++++++++++++------ 1 file changed, 17 insertions(+), 6 deletions(-) diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c index f67ea1c090cb..aba31210c802 100644 --- a/drivers/bluetooth/hci_vhci.c +++ b/drivers/bluetooth/hci_vhci.c @@ -50,6 +50,7 @@ struct vhci_data { wait_queue_head_t read_wait; struct sk_buff_head readq; + struct mutex open_mutex; struct delayed_work open_timeout; }; @@ -87,12 +88,15 @@ static int vhci_send_frame(struct hci_dev *hdev, struct sk_buff *skb) return 0; } -static int vhci_create_device(struct vhci_data *data, __u8 opcode) +static int __vhci_create_device(struct vhci_data *data, __u8 opcode) { struct hci_dev *hdev; struct sk_buff *skb; __u8 dev_type; + if (data->hdev) + return -EBADFD; + /* bits 0-1 are dev_type (BR/EDR or AMP) */ dev_type = opcode & 0x03; @@ -151,6 +155,17 @@ static int vhci_create_device(struct vhci_data *data, __u8 opcode) return 0; } +static int vhci_create_device(struct vhci_data *data, __u8 opcode) +{ + int err; + + mutex_lock(&data->open_mutex); + err = __vhci_create_device(data, opcode); + mutex_unlock(&data->open_mutex); + + return err; +} + static inline ssize_t vhci_get_user(struct vhci_data *data, struct iov_iter *from) { @@ -191,11 +206,6 @@ static inline ssize_t vhci_get_user(struct vhci_data *data, case HCI_VENDOR_PKT: cancel_delayed_work_sync(&data->open_timeout); - if (data->hdev) { - kfree_skb(skb); - return -EBADFD; - } - opcode = *((__u8 *) skb->data); skb_pull(skb, 1); @@ -320,6 +330,7 @@ static int vhci_open(struct inode *inode, struct file *file) skb_queue_head_init(&data->readq); init_waitqueue_head(&data->read_wait); + mutex_init(&data->open_mutex); INIT_DELAYED_WORK(&data->open_timeout, vhci_open_timeout); file->private_data = data; From b84e93077fe926bc65e23d887b54fc46be60b76e Mon Sep 17 00:00:00 2001 From: Peter Heise Date: Wed, 20 Apr 2016 09:08:29 +0200 Subject: [PATCH 0846/1649] net/hsr: Fixed version field in ENUM New field (IFLA_HSR_VERSION) was added in the middle of an existing ENUM and would break kernel ABI, therefore moved to the end. Reported by Stephen Hemminger. Signed-off-by: Peter Heise Signed-off-by: David S. Miller --- include/uapi/linux/if_link.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index 5ffdcb34e35b..af8fd58b4006 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -774,9 +774,9 @@ enum { IFLA_HSR_SLAVE1, IFLA_HSR_SLAVE2, IFLA_HSR_MULTICAST_SPEC, /* Last byte of supervision addr */ - IFLA_HSR_VERSION, /* HSR version */ IFLA_HSR_SUPERVISION_ADDR, /* Supervision frame multicast addr */ IFLA_HSR_SEQ_NR, + IFLA_HSR_VERSION, /* HSR version */ __IFLA_HSR_MAX, }; From cca1d81574d266d4a3aa33f3947297564525e127 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 20 Apr 2016 07:31:31 -0700 Subject: [PATCH 0847/1649] net: fix HAVE_EFFICIENT_UNALIGNED_ACCESS typos HAVE_EFFICIENT_UNALIGNED_ACCESS needs CONFIG_ prefix. Also add a comment in nla_align_64bit() explaining we have to add a padding if current skb->data is aligned, as it certainly can be confusing. Fixes: 35c5845957c7 ("net: Add helpers for 64-bit aligning netlink attributes.") Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/net/netlink.h | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/include/net/netlink.h b/include/net/netlink.h index e644b3489acf..cf95df1fa14b 100644 --- a/include/net/netlink.h +++ b/include/net/netlink.h @@ -1238,18 +1238,21 @@ static inline int nla_validate_nested(const struct nlattr *start, int maxtype, * Conditionally emit a padding netlink attribute in order to make * the next attribute we emit have a 64-bit aligned nla_data() area. * This will only be done in architectures which do not have - * HAVE_EFFICIENT_UNALIGNED_ACCESS defined. + * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS defined. * * Returns zero on success or a negative error code. */ static inline int nla_align_64bit(struct sk_buff *skb, int padattr) { -#ifndef HAVE_EFFICIENT_UNALIGNED_ACCESS - if (IS_ALIGNED((unsigned long)skb->data, 8)) { - struct nlattr *attr = nla_reserve(skb, padattr, 0); - if (!attr) - return -EMSGSIZE; - } +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS + /* The nlattr header is 4 bytes in size, that's why we test + * if the skb->data _is_ aligned. This NOP attribute, plus + * nlattr header for next attribute, will make nla_data() + * 8-byte aligned. + */ + if (IS_ALIGNED((unsigned long)skb->data, 8) && + !nla_reserve(skb, padattr, 0)) + return -EMSGSIZE; #endif return 0; } @@ -1261,7 +1264,7 @@ static inline int nla_align_64bit(struct sk_buff *skb, int padattr) static inline int nla_total_size_64bit(int payload) { return NLA_ALIGN(nla_attr_size(payload)) -#ifndef HAVE_EFFICIENT_UNALIGNED_ACCESS +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS + NLA_ALIGN(nla_attr_size(0)) #endif ; From 3fb55c79d092d085bddd4fc94f250acfc1275f3d Mon Sep 17 00:00:00 2001 From: Kalle Valo Date: Wed, 20 Apr 2016 19:44:36 +0300 Subject: [PATCH 0848/1649] ath10k: remove deprecated firmware API 1 support This has ben deprecated years ago, I haven't heard anyone using it since and most likely it won't even work anymore. So just remove all of it. Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/core.c | 73 -------------------------- drivers/net/wireless/ath/ath10k/core.h | 3 -- drivers/net/wireless/ath/ath10k/hw.h | 12 ----- drivers/net/wireless/ath/ath10k/pci.c | 1 - drivers/net/wireless/ath/ath10k/wmi.c | 4 -- 5 files changed, 93 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index 1c4106b84a35..48389e0b87f6 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -63,8 +63,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .cal_data_len = 2116, .fw = { .dir = QCA988X_HW_2_0_FW_DIR, - .fw = QCA988X_HW_2_0_FW_FILE, - .otp = QCA988X_HW_2_0_OTP_FILE, .board = QCA988X_HW_2_0_BOARD_DATA_FILE, .board_size = QCA988X_BOARD_DATA_SZ, .board_ext_size = QCA988X_BOARD_EXT_DATA_SZ, @@ -82,8 +80,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .cal_data_len = 8124, .fw = { .dir = QCA6174_HW_2_1_FW_DIR, - .fw = QCA6174_HW_2_1_FW_FILE, - .otp = QCA6174_HW_2_1_OTP_FILE, .board = QCA6174_HW_2_1_BOARD_DATA_FILE, .board_size = QCA6174_BOARD_DATA_SZ, .board_ext_size = QCA6174_BOARD_EXT_DATA_SZ, @@ -102,8 +98,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .cal_data_len = 8124, .fw = { .dir = QCA6174_HW_2_1_FW_DIR, - .fw = QCA6174_HW_2_1_FW_FILE, - .otp = QCA6174_HW_2_1_OTP_FILE, .board = QCA6174_HW_2_1_BOARD_DATA_FILE, .board_size = QCA6174_BOARD_DATA_SZ, .board_ext_size = QCA6174_BOARD_EXT_DATA_SZ, @@ -122,8 +116,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .cal_data_len = 8124, .fw = { .dir = QCA6174_HW_3_0_FW_DIR, - .fw = QCA6174_HW_3_0_FW_FILE, - .otp = QCA6174_HW_3_0_OTP_FILE, .board = QCA6174_HW_3_0_BOARD_DATA_FILE, .board_size = QCA6174_BOARD_DATA_SZ, .board_ext_size = QCA6174_BOARD_EXT_DATA_SZ, @@ -143,8 +135,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .fw = { /* uses same binaries as hw3.0 */ .dir = QCA6174_HW_3_0_FW_DIR, - .fw = QCA6174_HW_3_0_FW_FILE, - .otp = QCA6174_HW_3_0_OTP_FILE, .board = QCA6174_HW_3_0_BOARD_DATA_FILE, .board_size = QCA6174_BOARD_DATA_SZ, .board_ext_size = QCA6174_BOARD_EXT_DATA_SZ, @@ -167,8 +157,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .cal_data_len = 12064, .fw = { .dir = QCA99X0_HW_2_0_FW_DIR, - .fw = QCA99X0_HW_2_0_FW_FILE, - .otp = QCA99X0_HW_2_0_OTP_FILE, .board = QCA99X0_HW_2_0_BOARD_DATA_FILE, .board_size = QCA99X0_BOARD_DATA_SZ, .board_ext_size = QCA99X0_BOARD_EXT_DATA_SZ, @@ -186,8 +174,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .cal_data_len = 8124, .fw = { .dir = QCA9377_HW_1_0_FW_DIR, - .fw = QCA9377_HW_1_0_FW_FILE, - .otp = QCA9377_HW_1_0_OTP_FILE, .board = QCA9377_HW_1_0_BOARD_DATA_FILE, .board_size = QCA9377_BOARD_DATA_SZ, .board_ext_size = QCA9377_BOARD_EXT_DATA_SZ, @@ -205,8 +191,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .cal_data_len = 8124, .fw = { .dir = QCA9377_HW_1_0_FW_DIR, - .fw = QCA9377_HW_1_0_FW_FILE, - .otp = QCA9377_HW_1_0_OTP_FILE, .board = QCA9377_HW_1_0_BOARD_DATA_FILE, .board_size = QCA9377_BOARD_DATA_SZ, .board_ext_size = QCA9377_BOARD_EXT_DATA_SZ, @@ -229,8 +213,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .cal_data_len = 12064, .fw = { .dir = QCA4019_HW_1_0_FW_DIR, - .fw = QCA4019_HW_1_0_FW_FILE, - .otp = QCA4019_HW_1_0_OTP_FILE, .board = QCA4019_HW_1_0_BOARD_DATA_FILE, .board_size = QCA4019_BOARD_DATA_SZ, .board_ext_size = QCA4019_BOARD_EXT_DATA_SZ, @@ -703,9 +685,6 @@ static void ath10k_core_free_board_files(struct ath10k *ar) static void ath10k_core_free_firmware_files(struct ath10k *ar) { - if (!IS_ERR(ar->otp)) - release_firmware(ar->otp); - if (!IS_ERR(ar->firmware)) release_firmware(ar->firmware); @@ -714,7 +693,6 @@ static void ath10k_core_free_firmware_files(struct ath10k *ar) ath10k_swap_code_seg_release(ar); - ar->otp = NULL; ar->otp_data = NULL; ar->otp_len = 0; @@ -1000,50 +978,6 @@ success: return 0; } -static int ath10k_core_fetch_firmware_api_1(struct ath10k *ar) -{ - int ret = 0; - - if (ar->hw_params.fw.fw == NULL) { - ath10k_err(ar, "firmware file not defined\n"); - return -EINVAL; - } - - ar->firmware = ath10k_fetch_fw_file(ar, - ar->hw_params.fw.dir, - ar->hw_params.fw.fw); - if (IS_ERR(ar->firmware)) { - ret = PTR_ERR(ar->firmware); - ath10k_err(ar, "could not fetch firmware (%d)\n", ret); - goto err; - } - - ar->firmware_data = ar->firmware->data; - ar->firmware_len = ar->firmware->size; - - /* OTP may be undefined. If so, don't fetch it at all */ - if (ar->hw_params.fw.otp == NULL) - return 0; - - ar->otp = ath10k_fetch_fw_file(ar, - ar->hw_params.fw.dir, - ar->hw_params.fw.otp); - if (IS_ERR(ar->otp)) { - ret = PTR_ERR(ar->otp); - ath10k_err(ar, "could not fetch otp (%d)\n", ret); - goto err; - } - - ar->otp_data = ar->otp->data; - ar->otp_len = ar->otp->size; - - return 0; - -err: - ath10k_core_free_firmware_files(ar); - return ret; -} - static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name) { size_t magic_len, len, ie_len; @@ -1253,13 +1187,6 @@ static int ath10k_core_fetch_firmware_files(struct ath10k *ar) ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api); ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API2_FILE); - if (ret == 0) - goto success; - - ar->fw_api = 1; - ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api); - - ret = ath10k_core_fetch_firmware_api_1(ar); if (ret) return ret; diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h index e6f889df1e0d..2d0cc92f3c5b 100644 --- a/drivers/net/wireless/ath/ath10k/core.h +++ b/drivers/net/wireless/ath/ath10k/core.h @@ -708,8 +708,6 @@ struct ath10k { struct ath10k_hw_params_fw { const char *dir; - const char *fw; - const char *otp; const char *board; size_t board_size; size_t board_ext_size; @@ -720,7 +718,6 @@ struct ath10k { const void *board_data; size_t board_len; - const struct firmware *otp; const void *otp_data; size_t otp_len; diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h index c0179bc4af29..aedd8987040b 100644 --- a/drivers/net/wireless/ath/ath10k/hw.h +++ b/drivers/net/wireless/ath/ath10k/hw.h @@ -35,8 +35,6 @@ #define QCA988X_HW_2_0_VERSION 0x4100016c #define QCA988X_HW_2_0_CHIP_ID_REV 0x2 #define QCA988X_HW_2_0_FW_DIR ATH10K_FW_DIR "/QCA988X/hw2.0" -#define QCA988X_HW_2_0_FW_FILE "firmware.bin" -#define QCA988X_HW_2_0_OTP_FILE "otp.bin" #define QCA988X_HW_2_0_BOARD_DATA_FILE "board.bin" #define QCA988X_HW_2_0_PATCH_LOAD_ADDR 0x1234 @@ -76,14 +74,10 @@ enum qca9377_chip_id_rev { }; #define QCA6174_HW_2_1_FW_DIR "ath10k/QCA6174/hw2.1" -#define QCA6174_HW_2_1_FW_FILE "firmware.bin" -#define QCA6174_HW_2_1_OTP_FILE "otp.bin" #define QCA6174_HW_2_1_BOARD_DATA_FILE "board.bin" #define QCA6174_HW_2_1_PATCH_LOAD_ADDR 0x1234 #define QCA6174_HW_3_0_FW_DIR "ath10k/QCA6174/hw3.0" -#define QCA6174_HW_3_0_FW_FILE "firmware.bin" -#define QCA6174_HW_3_0_OTP_FILE "otp.bin" #define QCA6174_HW_3_0_BOARD_DATA_FILE "board.bin" #define QCA6174_HW_3_0_PATCH_LOAD_ADDR 0x1234 @@ -94,23 +88,17 @@ enum qca9377_chip_id_rev { #define QCA99X0_HW_2_0_DEV_VERSION 0x01000000 #define QCA99X0_HW_2_0_CHIP_ID_REV 0x1 #define QCA99X0_HW_2_0_FW_DIR ATH10K_FW_DIR "/QCA99X0/hw2.0" -#define QCA99X0_HW_2_0_FW_FILE "firmware.bin" -#define QCA99X0_HW_2_0_OTP_FILE "otp.bin" #define QCA99X0_HW_2_0_BOARD_DATA_FILE "board.bin" #define QCA99X0_HW_2_0_PATCH_LOAD_ADDR 0x1234 /* QCA9377 1.0 definitions */ #define QCA9377_HW_1_0_FW_DIR ATH10K_FW_DIR "/QCA9377/hw1.0" -#define QCA9377_HW_1_0_FW_FILE "firmware.bin" -#define QCA9377_HW_1_0_OTP_FILE "otp.bin" #define QCA9377_HW_1_0_BOARD_DATA_FILE "board.bin" #define QCA9377_HW_1_0_PATCH_LOAD_ADDR 0x1234 /* QCA4019 1.0 definitions */ #define QCA4019_HW_1_0_DEV_VERSION 0x01000000 #define QCA4019_HW_1_0_FW_DIR ATH10K_FW_DIR "/QCA4019/hw1.0" -#define QCA4019_HW_1_0_FW_FILE "firmware.bin" -#define QCA4019_HW_1_0_OTP_FILE "otp.bin" #define QCA4019_HW_1_0_BOARD_DATA_FILE "board.bin" #define QCA4019_HW_1_0_PATCH_LOAD_ADDR 0x1234 diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index cdd8a307c55b..8133d7b5b956 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c @@ -3173,7 +3173,6 @@ MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices"); MODULE_LICENSE("Dual BSD/GPL"); /* QCA988x 2.0 firmware files */ -MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE); MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API2_FILE); MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API3_FILE); MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API4_FILE); diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index 7eb40f54fdb5..254844b37d92 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -4604,10 +4604,6 @@ static void ath10k_wmi_event_service_ready_work(struct work_struct *work) ath10k_dbg_dump(ar, ATH10K_DBG_WMI, NULL, "wmi svc: ", arg.service_map, arg.service_map_len); - /* only manually set fw features when not using FW IE format */ - if (ar->fw_api == 1 && ar->fw_version_build > 636) - set_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX, ar->fw_features); - if (ar->num_rf_chains > ar->max_spatial_stream) { ath10k_warn(ar, "hardware advertises support for more spatial streams than it should (%d > %d)\n", ar->num_rf_chains, ar->max_spatial_stream); From 7ebf721d0d47150f6e327a6ae2692779495a2c2a Mon Sep 17 00:00:00 2001 From: Kalle Valo Date: Wed, 20 Apr 2016 19:44:51 +0300 Subject: [PATCH 0849/1649] ath10k: refactor firmware images to struct ath10k_fw_components To make it easier to share ath10k_core_fetch_board_data_api_n() with testmode.c refactor all firmware components to struct ath10k_fw_components. This structure will hold firmware related files, for example firmware-N.bin and board-N.bin. For firmware-N.bin create a new struct ath10k_fw_file which contains the actual firmware image as well as the parsed data from the image. Modify ath10k_core_start() to take struct ath10k_fw_components() as an argument which makes it possible in following patches to drop some ugly hacks from testmode.c. Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/core.c | 176 +++++++++++---------- drivers/net/wireless/ath/ath10k/core.h | 46 ++++-- drivers/net/wireless/ath/ath10k/debug.c | 28 ++-- drivers/net/wireless/ath/ath10k/mac.c | 3 +- drivers/net/wireless/ath/ath10k/swap.c | 22 ++- drivers/net/wireless/ath/ath10k/testmode.c | 61 ++++--- 6 files changed, 199 insertions(+), 137 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index 48389e0b87f6..b2efece5b32d 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -538,7 +538,8 @@ static int ath10k_core_get_board_id_from_otp(struct ath10k *ar) address = ar->hw_params.patch_load_addr; - if (!ar->otp_data || !ar->otp_len) { + if (!ar->normal_mode_fw.fw_file.otp_data || + !ar->normal_mode_fw.fw_file.otp_len) { ath10k_warn(ar, "failed to retrieve board id because of invalid otp\n"); return -ENODATA; @@ -546,9 +547,11 @@ static int ath10k_core_get_board_id_from_otp(struct ath10k *ar) ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot upload otp to 0x%x len %zd for board id\n", - address, ar->otp_len); + address, ar->normal_mode_fw.fw_file.otp_len); - ret = ath10k_bmi_fast_download(ar, address, ar->otp_data, ar->otp_len); + ret = ath10k_bmi_fast_download(ar, address, + ar->normal_mode_fw.fw_file.otp_data, + ar->normal_mode_fw.fw_file.otp_len); if (ret) { ath10k_err(ar, "could not write otp for board id check: %d\n", ret); @@ -586,7 +589,9 @@ static int ath10k_download_and_run_otp(struct ath10k *ar) u32 bmi_otp_exe_param = ar->hw_params.otp_exe_param; int ret; - ret = ath10k_download_board_data(ar, ar->board_data, ar->board_len); + ret = ath10k_download_board_data(ar, + ar->running_fw->board_data, + ar->running_fw->board_len); if (ret) { ath10k_err(ar, "failed to download board data: %d\n", ret); return ret; @@ -594,16 +599,20 @@ static int ath10k_download_and_run_otp(struct ath10k *ar) /* OTP is optional */ - if (!ar->otp_data || !ar->otp_len) { + if (!ar->running_fw->fw_file.otp_data || + !ar->running_fw->fw_file.otp_len) { ath10k_warn(ar, "Not running otp, calibration will be incorrect (otp-data %p otp_len %zd)!\n", - ar->otp_data, ar->otp_len); + ar->running_fw->fw_file.otp_data, + ar->running_fw->fw_file.otp_len); return 0; } ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot upload otp to 0x%x len %zd\n", - address, ar->otp_len); + address, ar->running_fw->fw_file.otp_len); - ret = ath10k_bmi_fast_download(ar, address, ar->otp_data, ar->otp_len); + ret = ath10k_bmi_fast_download(ar, address, + ar->running_fw->fw_file.otp_data, + ar->running_fw->fw_file.otp_len); if (ret) { ath10k_err(ar, "could not write otp (%d)\n", ret); return ret; @@ -627,46 +636,33 @@ static int ath10k_download_and_run_otp(struct ath10k *ar) return 0; } -static int ath10k_download_fw(struct ath10k *ar, enum ath10k_firmware_mode mode) +static int ath10k_download_fw(struct ath10k *ar) { u32 address, data_len; - const char *mode_name; const void *data; int ret; address = ar->hw_params.patch_load_addr; - switch (mode) { - case ATH10K_FIRMWARE_MODE_NORMAL: - data = ar->firmware_data; - data_len = ar->firmware_len; - mode_name = "normal"; - ret = ath10k_swap_code_seg_configure(ar, - ATH10K_SWAP_CODE_SEG_BIN_TYPE_FW); - if (ret) { - ath10k_err(ar, "failed to configure fw code swap: %d\n", - ret); - return ret; - } - break; - case ATH10K_FIRMWARE_MODE_UTF: - data = ar->testmode.utf_firmware_data; - data_len = ar->testmode.utf_firmware_len; - mode_name = "utf"; - break; - default: - ath10k_err(ar, "unknown firmware mode: %d\n", mode); - return -EINVAL; + data = ar->running_fw->fw_file.firmware_data; + data_len = ar->running_fw->fw_file.firmware_len; + + ret = ath10k_swap_code_seg_configure(ar, + ATH10K_SWAP_CODE_SEG_BIN_TYPE_FW); + if (ret) { + ath10k_err(ar, "failed to configure fw code swap: %d\n", + ret); + return ret; } ath10k_dbg(ar, ATH10K_DBG_BOOT, - "boot uploading firmware image %p len %d mode %s\n", - data, data_len, mode_name); + "boot uploading firmware image %p len %d\n", + data, data_len); ret = ath10k_bmi_fast_download(ar, address, data, data_len); if (ret) { - ath10k_err(ar, "failed to download %s firmware: %d\n", - mode_name, ret); + ath10k_err(ar, "failed to download firmware: %d\n", + ret); return ret; } @@ -675,30 +671,30 @@ static int ath10k_download_fw(struct ath10k *ar, enum ath10k_firmware_mode mode) static void ath10k_core_free_board_files(struct ath10k *ar) { - if (!IS_ERR(ar->board)) - release_firmware(ar->board); + if (!IS_ERR(ar->normal_mode_fw.board)) + release_firmware(ar->normal_mode_fw.board); - ar->board = NULL; - ar->board_data = NULL; - ar->board_len = 0; + ar->normal_mode_fw.board = NULL; + ar->normal_mode_fw.board_data = NULL; + ar->normal_mode_fw.board_len = 0; } static void ath10k_core_free_firmware_files(struct ath10k *ar) { - if (!IS_ERR(ar->firmware)) - release_firmware(ar->firmware); + if (!IS_ERR(ar->normal_mode_fw.fw_file.firmware)) + release_firmware(ar->normal_mode_fw.fw_file.firmware); if (!IS_ERR(ar->cal_file)) release_firmware(ar->cal_file); ath10k_swap_code_seg_release(ar); - ar->otp_data = NULL; - ar->otp_len = 0; + ar->normal_mode_fw.fw_file.otp_data = NULL; + ar->normal_mode_fw.fw_file.otp_len = 0; - ar->firmware = NULL; - ar->firmware_data = NULL; - ar->firmware_len = 0; + ar->normal_mode_fw.fw_file.firmware = NULL; + ar->normal_mode_fw.fw_file.firmware_data = NULL; + ar->normal_mode_fw.fw_file.firmware_len = 0; ar->cal_file = NULL; } @@ -737,14 +733,14 @@ static int ath10k_core_fetch_board_data_api_1(struct ath10k *ar) return -EINVAL; } - ar->board = ath10k_fetch_fw_file(ar, - ar->hw_params.fw.dir, - ar->hw_params.fw.board); - if (IS_ERR(ar->board)) - return PTR_ERR(ar->board); + ar->normal_mode_fw.board = ath10k_fetch_fw_file(ar, + ar->hw_params.fw.dir, + ar->hw_params.fw.board); + if (IS_ERR(ar->normal_mode_fw.board)) + return PTR_ERR(ar->normal_mode_fw.board); - ar->board_data = ar->board->data; - ar->board_len = ar->board->size; + ar->normal_mode_fw.board_data = ar->normal_mode_fw.board->data; + ar->normal_mode_fw.board_len = ar->normal_mode_fw.board->size; return 0; } @@ -804,8 +800,8 @@ static int ath10k_core_parse_bd_ie_board(struct ath10k *ar, "boot found board data for '%s'", boardname); - ar->board_data = board_ie_data; - ar->board_len = board_ie_len; + ar->normal_mode_fw.board_data = board_ie_data; + ar->normal_mode_fw.board_len = board_ie_len; ret = 0; goto out; @@ -838,12 +834,14 @@ static int ath10k_core_fetch_board_data_api_n(struct ath10k *ar, const u8 *data; int ret, ie_id; - ar->board = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir, filename); - if (IS_ERR(ar->board)) - return PTR_ERR(ar->board); + ar->normal_mode_fw.board = ath10k_fetch_fw_file(ar, + ar->hw_params.fw.dir, + filename); + if (IS_ERR(ar->normal_mode_fw.board)) + return PTR_ERR(ar->normal_mode_fw.board); - data = ar->board->data; - len = ar->board->size; + data = ar->normal_mode_fw.board->data; + len = ar->normal_mode_fw.board->size; /* magic has extra null byte padded */ magic_len = strlen(ATH10K_BOARD_MAGIC) + 1; @@ -910,7 +908,7 @@ static int ath10k_core_fetch_board_data_api_n(struct ath10k *ar, } out: - if (!ar->board_data || !ar->board_len) { + if (!ar->normal_mode_fw.board_data || !ar->normal_mode_fw.board_len) { ath10k_err(ar, "failed to fetch board data for %s from %s/%s\n", boardname, ar->hw_params.fw.dir, filename); @@ -978,7 +976,8 @@ success: return 0; } -static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name) +static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name, + struct ath10k_fw_file *fw_file) { size_t magic_len, len, ie_len; int ie_id, i, index, bit, ret; @@ -987,15 +986,17 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name) __le32 *timestamp, *version; /* first fetch the firmware file (firmware-*.bin) */ - ar->firmware = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir, name); - if (IS_ERR(ar->firmware)) { + fw_file->firmware = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir, + name); + if (IS_ERR(fw_file->firmware)) { ath10k_err(ar, "could not fetch firmware file '%s/%s': %ld\n", - ar->hw_params.fw.dir, name, PTR_ERR(ar->firmware)); - return PTR_ERR(ar->firmware); + ar->hw_params.fw.dir, name, + PTR_ERR(fw_file->firmware)); + return PTR_ERR(fw_file->firmware); } - data = ar->firmware->data; - len = ar->firmware->size; + data = fw_file->firmware->data; + len = fw_file->firmware->size; /* magic also includes the null byte, check that as well */ magic_len = strlen(ATH10K_FIRMWARE_MAGIC) + 1; @@ -1086,8 +1087,8 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name) "found fw image ie (%zd B)\n", ie_len); - ar->firmware_data = data; - ar->firmware_len = ie_len; + fw_file->firmware_data = data; + fw_file->firmware_len = ie_len; break; case ATH10K_FW_IE_OTP_IMAGE: @@ -1095,8 +1096,8 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name) "found otp image ie (%zd B)\n", ie_len); - ar->otp_data = data; - ar->otp_len = ie_len; + fw_file->otp_data = data; + fw_file->otp_len = ie_len; break; case ATH10K_FW_IE_WMI_OP_VERSION: @@ -1125,8 +1126,8 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name) ath10k_dbg(ar, ATH10K_DBG_BOOT, "found fw code swap image ie (%zd B)\n", ie_len); - ar->swap.firmware_codeswap_data = data; - ar->swap.firmware_codeswap_len = ie_len; + fw_file->codeswap_data = data; + fw_file->codeswap_len = ie_len; break; default: ath10k_warn(ar, "Unknown FW IE: %u\n", @@ -1141,7 +1142,8 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name) data += ie_len; } - if (!ar->firmware_data || !ar->firmware_len) { + if (!fw_file->firmware_data || + !fw_file->firmware_len) { ath10k_warn(ar, "No ATH10K_FW_IE_FW_IMAGE found from '%s/%s', skipping\n", ar->hw_params.fw.dir, name); ret = -ENOMEDIUM; @@ -1165,28 +1167,32 @@ static int ath10k_core_fetch_firmware_files(struct ath10k *ar) ar->fw_api = 5; ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api); - ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API5_FILE); + ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API5_FILE, + &ar->normal_mode_fw.fw_file); if (ret == 0) goto success; ar->fw_api = 4; ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api); - ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API4_FILE); + ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API4_FILE, + &ar->normal_mode_fw.fw_file); if (ret == 0) goto success; ar->fw_api = 3; ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api); - ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API3_FILE); + ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API3_FILE, + &ar->normal_mode_fw.fw_file); if (ret == 0) goto success; ar->fw_api = 2; ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api); - ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API2_FILE); + ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API2_FILE, + &ar->normal_mode_fw.fw_file); if (ret) return ret; @@ -1585,7 +1591,8 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar) return 0; } -int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode) +int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode, + const struct ath10k_fw_components *fw) { int status; u32 val; @@ -1594,6 +1601,8 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode) clear_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags); + ar->running_fw = fw; + ath10k_bmi_start(ar); if (ath10k_init_configure_target(ar)) { @@ -1621,7 +1630,7 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode) } } - status = ath10k_download_fw(ar, mode); + status = ath10k_download_fw(ar); if (status) goto err; @@ -1899,7 +1908,8 @@ static int ath10k_core_probe_fw(struct ath10k *ar) mutex_lock(&ar->conf_mutex); - ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_NORMAL); + ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_NORMAL, + &ar->normal_mode_fw); if (ret) { ath10k_err(ar, "could not init core (%d)\n", ret); goto err_unlock; diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h index 2d0cc92f3c5b..b377fd42c0a3 100644 --- a/drivers/net/wireless/ath/ath10k/core.h +++ b/drivers/net/wireless/ath/ath10k/core.h @@ -627,6 +627,27 @@ enum ath10k_tx_pause_reason { ATH10K_TX_PAUSE_MAX, }; +struct ath10k_fw_file { + const struct firmware *firmware; + + const void *firmware_data; + size_t firmware_len; + + const void *otp_data; + size_t otp_len; + + const void *codeswap_data; + size_t codeswap_len; +}; + +struct ath10k_fw_components { + const struct firmware *board; + const void *board_data; + size_t board_len; + + struct ath10k_fw_file fw_file; +}; + struct ath10k { struct ath_common ath_common; struct ieee80211_hw *hw; @@ -714,23 +735,18 @@ struct ath10k { } fw; } hw_params; - const struct firmware *board; - const void *board_data; - size_t board_len; + /* contains the firmware images used with ATH10K_FIRMWARE_MODE_NORMAL */ + struct ath10k_fw_components normal_mode_fw; - const void *otp_data; - size_t otp_len; - - const struct firmware *firmware; - const void *firmware_data; - size_t firmware_len; + /* READ-ONLY images of the running firmware, which can be either + * normal or UTF. Do not modify, release etc! + */ + const struct ath10k_fw_components *running_fw; const struct firmware *pre_cal_file; const struct firmware *cal_file; struct { - const void *firmware_codeswap_data; - size_t firmware_codeswap_len; struct ath10k_swap_code_seg_info *firmware_swap_code_seg_info; } swap; @@ -876,13 +892,12 @@ struct ath10k { struct { /* protected by conf_mutex */ - const struct firmware *utf; + struct ath10k_fw_components utf_mode_fw; char utf_version[32]; - const void *utf_firmware_data; - size_t utf_firmware_len; DECLARE_BITMAP(orig_fw_features, ATH10K_FW_FEATURE_COUNT); enum ath10k_fw_wmi_op_version orig_wmi_op_version; enum ath10k_fw_wmi_op_version op_version; + /* protected by data_lock */ bool utf_monitor; } testmode; @@ -919,7 +934,8 @@ void ath10k_core_get_fw_features_str(struct ath10k *ar, char *buf, size_t max_len); -int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode); +int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode, + const struct ath10k_fw_components *fw_components); int ath10k_wait_for_suspend(struct ath10k *ar, u32 suspend_opt); void ath10k_core_stop(struct ath10k *ar); int ath10k_core_register(struct ath10k *ar, u32 chip_id); diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c index e7d441caa288..27787d23b2bd 100644 --- a/drivers/net/wireless/ath/ath10k/debug.c +++ b/drivers/net/wireless/ath/ath10k/debug.c @@ -126,6 +126,7 @@ EXPORT_SYMBOL(ath10k_info); void ath10k_debug_print_hwfw_info(struct ath10k *ar) { + const struct firmware *firmware; char fw_features[128] = {}; u32 crc = 0; @@ -144,8 +145,9 @@ void ath10k_debug_print_hwfw_info(struct ath10k *ar) config_enabled(CONFIG_ATH10K_DFS_CERTIFIED), config_enabled(CONFIG_NL80211_TESTMODE)); - if (ar->firmware) - crc = crc32_le(0, ar->firmware->data, ar->firmware->size); + firmware = ar->normal_mode_fw.fw_file.firmware; + if (firmware) + crc = crc32_le(0, firmware->data, firmware->size); ath10k_info(ar, "firmware ver %s api %d features %s crc32 %08x\n", ar->hw->wiphy->fw_version, @@ -167,7 +169,8 @@ void ath10k_debug_print_board_info(struct ath10k *ar) ath10k_info(ar, "board_file api %d bmi_id %s crc32 %08x", ar->bd_api, boardinfo, - crc32_le(0, ar->board->data, ar->board->size)); + crc32_le(0, ar->normal_mode_fw.board->data, + ar->normal_mode_fw.board->size)); } void ath10k_debug_print_boot_info(struct ath10k *ar) @@ -2270,23 +2273,28 @@ static ssize_t ath10k_debug_fw_checksums_read(struct file *file, len += scnprintf(buf + len, buf_len - len, "firmware-N.bin\t\t%08x\n", - crc32_le(0, ar->firmware->data, ar->firmware->size)); + crc32_le(0, ar->normal_mode_fw.fw_file.firmware->data, + ar->normal_mode_fw.fw_file.firmware->size)); len += scnprintf(buf + len, buf_len - len, "athwlan\t\t\t%08x\n", - crc32_le(0, ar->firmware_data, ar->firmware_len)); + crc32_le(0, ar->normal_mode_fw.fw_file.firmware_data, + ar->normal_mode_fw.fw_file.firmware_len)); len += scnprintf(buf + len, buf_len - len, "otp\t\t\t%08x\n", - crc32_le(0, ar->otp_data, ar->otp_len)); + crc32_le(0, ar->normal_mode_fw.fw_file.otp_data, + ar->normal_mode_fw.fw_file.otp_len)); len += scnprintf(buf + len, buf_len - len, "codeswap\t\t%08x\n", - crc32_le(0, ar->swap.firmware_codeswap_data, - ar->swap.firmware_codeswap_len)); + crc32_le(0, ar->normal_mode_fw.fw_file.codeswap_data, + ar->normal_mode_fw.fw_file.codeswap_len)); len += scnprintf(buf + len, buf_len - len, "board-N.bin\t\t%08x\n", - crc32_le(0, ar->board->data, ar->board->size)); + crc32_le(0, ar->normal_mode_fw.board->data, + ar->normal_mode_fw.board->size)); len += scnprintf(buf + len, buf_len - len, "board\t\t\t%08x\n", - crc32_le(0, ar->board_data, ar->board_len)); + crc32_le(0, ar->normal_mode_fw.board_data, + ar->normal_mode_fw.board_len)); ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len); diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index fb393596f236..56abbf2f2a59 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -4376,7 +4376,8 @@ static int ath10k_start(struct ieee80211_hw *hw) goto err_off; } - ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_NORMAL); + ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_NORMAL, + &ar->normal_mode_fw); if (ret) { ath10k_err(ar, "Could not init core: %d\n", ret); goto err_power_down; diff --git a/drivers/net/wireless/ath/ath10k/swap.c b/drivers/net/wireless/ath/ath10k/swap.c index 3ca3fae408a7..47d449bac86d 100644 --- a/drivers/net/wireless/ath/ath10k/swap.c +++ b/drivers/net/wireless/ath/ath10k/swap.c @@ -171,8 +171,13 @@ int ath10k_swap_code_seg_configure(struct ath10k *ar, void ath10k_swap_code_seg_release(struct ath10k *ar) { ath10k_swap_code_seg_free(ar, ar->swap.firmware_swap_code_seg_info); - ar->swap.firmware_codeswap_data = NULL; - ar->swap.firmware_codeswap_len = 0; + + /* FIXME: these two assignments look to bein wrong place! Shouldn't + * they be in ath10k_core_free_firmware_files() like the rest? + */ + ar->normal_mode_fw.fw_file.codeswap_data = NULL; + ar->normal_mode_fw.fw_file.codeswap_len = 0; + ar->swap.firmware_swap_code_seg_info = NULL; } @@ -180,20 +185,23 @@ int ath10k_swap_code_seg_init(struct ath10k *ar) { int ret; struct ath10k_swap_code_seg_info *seg_info; + const void *codeswap_data; + size_t codeswap_len; - if (!ar->swap.firmware_codeswap_len || !ar->swap.firmware_codeswap_data) + codeswap_data = ar->normal_mode_fw.fw_file.codeswap_data; + codeswap_len = ar->normal_mode_fw.fw_file.codeswap_len; + + if (!codeswap_len || !codeswap_data) return 0; - seg_info = ath10k_swap_code_seg_alloc(ar, - ar->swap.firmware_codeswap_len); + seg_info = ath10k_swap_code_seg_alloc(ar, codeswap_len); if (!seg_info) { ath10k_err(ar, "failed to allocate fw code swap segment\n"); return -ENOMEM; } ret = ath10k_swap_code_seg_fill(ar, seg_info, - ar->swap.firmware_codeswap_data, - ar->swap.firmware_codeswap_len); + codeswap_data, codeswap_len); if (ret) { ath10k_warn(ar, "failed to initialize fw code swap segment: %d\n", diff --git a/drivers/net/wireless/ath/ath10k/testmode.c b/drivers/net/wireless/ath/ath10k/testmode.c index 1d5a2fdcbf56..480fad301fad 100644 --- a/drivers/net/wireless/ath/ath10k/testmode.c +++ b/drivers/net/wireless/ath/ath10k/testmode.c @@ -139,7 +139,8 @@ static int ath10k_tm_cmd_get_version(struct ath10k *ar, struct nlattr *tb[]) return cfg80211_testmode_reply(skb); } -static int ath10k_tm_fetch_utf_firmware_api_2(struct ath10k *ar) +static int ath10k_tm_fetch_utf_firmware_api_2(struct ath10k *ar, + struct ath10k_fw_file *fw_file) { size_t len, magic_len, ie_len; struct ath10k_fw_ie *hdr; @@ -152,15 +153,15 @@ static int ath10k_tm_fetch_utf_firmware_api_2(struct ath10k *ar) ar->hw_params.fw.dir, ATH10K_FW_UTF_API2_FILE); /* load utf firmware image */ - ret = request_firmware(&ar->testmode.utf, filename, ar->dev); + ret = request_firmware(&fw_file->firmware, filename, ar->dev); if (ret) { ath10k_warn(ar, "failed to retrieve utf firmware '%s': %d\n", filename, ret); return ret; } - data = ar->testmode.utf->data; - len = ar->testmode.utf->size; + data = fw_file->firmware->data; + len = fw_file->firmware->size; /* FIXME: call release_firmware() in error cases */ @@ -222,8 +223,8 @@ static int ath10k_tm_fetch_utf_firmware_api_2(struct ath10k *ar) "testmode found fw image ie (%zd B)\n", ie_len); - ar->testmode.utf_firmware_data = data; - ar->testmode.utf_firmware_len = ie_len; + fw_file->firmware_data = data; + fw_file->firmware_len = ie_len; break; case ATH10K_FW_IE_WMI_OP_VERSION: if (ie_len != sizeof(u32)) @@ -245,7 +246,7 @@ static int ath10k_tm_fetch_utf_firmware_api_2(struct ath10k *ar) data += ie_len; } - if (!ar->testmode.utf_firmware_data || !ar->testmode.utf_firmware_len) { + if (!fw_file->firmware_data || !fw_file->firmware_len) { ath10k_err(ar, "No ATH10K_FW_IE_FW_IMAGE found\n"); ret = -EINVAL; goto err; @@ -254,12 +255,13 @@ static int ath10k_tm_fetch_utf_firmware_api_2(struct ath10k *ar) return 0; err: - release_firmware(ar->testmode.utf); + release_firmware(fw_file->firmware); return ret; } -static int ath10k_tm_fetch_utf_firmware_api_1(struct ath10k *ar) +static int ath10k_tm_fetch_utf_firmware_api_1(struct ath10k *ar, + struct ath10k_fw_file *fw_file) { char filename[100]; int ret; @@ -268,7 +270,7 @@ static int ath10k_tm_fetch_utf_firmware_api_1(struct ath10k *ar) ar->hw_params.fw.dir, ATH10K_FW_UTF_FILE); /* load utf firmware image */ - ret = request_firmware(&ar->testmode.utf, filename, ar->dev); + ret = request_firmware(&fw_file->firmware, filename, ar->dev); if (ret) { ath10k_warn(ar, "failed to retrieve utf firmware '%s': %d\n", filename, ret); @@ -282,23 +284,24 @@ static int ath10k_tm_fetch_utf_firmware_api_1(struct ath10k *ar) */ ar->testmode.op_version = ATH10K_FW_WMI_OP_VERSION_10_1; - ar->testmode.utf_firmware_data = ar->testmode.utf->data; - ar->testmode.utf_firmware_len = ar->testmode.utf->size; + fw_file->firmware_data = fw_file->firmware->data; + fw_file->firmware_len = fw_file->firmware->size; return 0; } static int ath10k_tm_fetch_firmware(struct ath10k *ar) { + struct ath10k_fw_components *utf_mode_fw; int ret; - ret = ath10k_tm_fetch_utf_firmware_api_2(ar); + ret = ath10k_tm_fetch_utf_firmware_api_2(ar, &ar->testmode.utf_mode_fw.fw_file); if (ret == 0) { ath10k_dbg(ar, ATH10K_DBG_TESTMODE, "testmode using fw utf api 2"); - return 0; + goto out; } - ret = ath10k_tm_fetch_utf_firmware_api_1(ar); + ret = ath10k_tm_fetch_utf_firmware_api_1(ar, &ar->testmode.utf_mode_fw.fw_file); if (ret) { ath10k_err(ar, "failed to fetch utf firmware binary: %d", ret); return ret; @@ -306,6 +309,21 @@ static int ath10k_tm_fetch_firmware(struct ath10k *ar) ath10k_dbg(ar, ATH10K_DBG_TESTMODE, "testmode using utf api 1"); +out: + utf_mode_fw = &ar->testmode.utf_mode_fw; + + /* Use the same board data file as the normal firmware uses (but + * it's still "owned" by normal_mode_fw so we shouldn't free it. + */ + utf_mode_fw->board_data = ar->normal_mode_fw.board_data; + utf_mode_fw->board_len = ar->normal_mode_fw.board_len; + + if (!utf_mode_fw->fw_file.otp_data) { + ath10k_info(ar, "utf.bin didn't contain otp binary, taking it from the normal mode firmware"); + utf_mode_fw->fw_file.otp_data = ar->normal_mode_fw.fw_file.otp_data; + utf_mode_fw->fw_file.otp_len = ar->normal_mode_fw.fw_file.otp_len; + } + return 0; } @@ -329,7 +347,7 @@ static int ath10k_tm_cmd_utf_start(struct ath10k *ar, struct nlattr *tb[]) goto err; } - if (WARN_ON(ar->testmode.utf != NULL)) { + if (WARN_ON(ar->testmode.utf_mode_fw.fw_file.firmware != NULL)) { /* utf image is already downloaded, it shouldn't be */ ret = -EEXIST; goto err; @@ -364,7 +382,8 @@ static int ath10k_tm_cmd_utf_start(struct ath10k *ar, struct nlattr *tb[]) goto err_fw_features; } - ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_UTF); + ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_UTF, + &ar->testmode.utf_mode_fw); if (ret) { ath10k_err(ar, "failed to start core (testmode): %d\n", ret); ar->state = ATH10K_STATE_OFF; @@ -393,8 +412,8 @@ err_fw_features: sizeof(ar->fw_features)); ar->wmi.op_version = ar->testmode.orig_wmi_op_version; - release_firmware(ar->testmode.utf); - ar->testmode.utf = NULL; + release_firmware(ar->testmode.utf_mode_fw.fw_file.firmware); + ar->testmode.utf_mode_fw.fw_file.firmware = NULL; err: mutex_unlock(&ar->conf_mutex); @@ -420,8 +439,8 @@ static void __ath10k_tm_cmd_utf_stop(struct ath10k *ar) sizeof(ar->fw_features)); ar->wmi.op_version = ar->testmode.orig_wmi_op_version; - release_firmware(ar->testmode.utf); - ar->testmode.utf = NULL; + release_firmware(ar->testmode.utf_mode_fw.fw_file.firmware); + ar->testmode.utf_mode_fw.fw_file.firmware = NULL; ar->state = ATH10K_STATE_OFF; } From 453173550256542c20b24a8d85b806941b77ac76 Mon Sep 17 00:00:00 2001 From: Kalle Valo Date: Wed, 20 Apr 2016 19:45:05 +0300 Subject: [PATCH 0850/1649] ath10k: move fw_version inside struct ath10k_fw_file Preparation for testmode.c to use ath10k_core_fetch_board_data_api_n(). Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/core.c | 13 +++++++++---- drivers/net/wireless/ath/ath10k/core.h | 3 ++- drivers/net/wireless/ath/ath10k/testmode.c | 12 ++++++------ 3 files changed, 17 insertions(+), 11 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index b2efece5b32d..015241aec608 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -1039,15 +1039,15 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name, switch (ie_id) { case ATH10K_FW_IE_FW_VERSION: - if (ie_len > sizeof(ar->hw->wiphy->fw_version) - 1) + if (ie_len > sizeof(fw_file->fw_version) - 1) break; - memcpy(ar->hw->wiphy->fw_version, data, ie_len); - ar->hw->wiphy->fw_version[ie_len] = '\0'; + memcpy(fw_file->fw_version, data, ie_len); + fw_file->fw_version[ie_len] = '\0'; ath10k_dbg(ar, ATH10K_DBG_BOOT, "found fw version %s\n", - ar->hw->wiphy->fw_version); + fw_file->fw_version); break; case ATH10K_FW_IE_TIMESTAMP: if (ie_len != sizeof(u32)) @@ -1866,6 +1866,11 @@ static int ath10k_core_probe_fw(struct ath10k *ar) goto err_power_down; } + BUILD_BUG_ON(sizeof(ar->hw->wiphy->fw_version) != + sizeof(ar->normal_mode_fw.fw_file.fw_version)); + memcpy(ar->hw->wiphy->fw_version, ar->normal_mode_fw.fw_file.fw_version, + sizeof(ar->hw->wiphy->fw_version)); + ath10k_debug_print_hwfw_info(ar); ret = ath10k_core_pre_cal_download(ar); diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h index b377fd42c0a3..432b1590f6e6 100644 --- a/drivers/net/wireless/ath/ath10k/core.h +++ b/drivers/net/wireless/ath/ath10k/core.h @@ -630,6 +630,8 @@ enum ath10k_tx_pause_reason { struct ath10k_fw_file { const struct firmware *firmware; + char fw_version[ETHTOOL_FWVERS_LEN]; + const void *firmware_data; size_t firmware_len; @@ -893,7 +895,6 @@ struct ath10k { struct { /* protected by conf_mutex */ struct ath10k_fw_components utf_mode_fw; - char utf_version[32]; DECLARE_BITMAP(orig_fw_features, ATH10K_FW_FEATURE_COUNT); enum ath10k_fw_wmi_op_version orig_wmi_op_version; enum ath10k_fw_wmi_op_version op_version; diff --git a/drivers/net/wireless/ath/ath10k/testmode.c b/drivers/net/wireless/ath/ath10k/testmode.c index 480fad301fad..2c4a5d31cf0c 100644 --- a/drivers/net/wireless/ath/ath10k/testmode.c +++ b/drivers/net/wireless/ath/ath10k/testmode.c @@ -205,15 +205,15 @@ static int ath10k_tm_fetch_utf_firmware_api_2(struct ath10k *ar, switch (ie_id) { case ATH10K_FW_IE_FW_VERSION: - if (ie_len > sizeof(ar->testmode.utf_version) - 1) + if (ie_len > sizeof(fw_file->fw_version) - 1) break; - memcpy(ar->testmode.utf_version, data, ie_len); - ar->testmode.utf_version[ie_len] = '\0'; + memcpy(fw_file->fw_version, data, ie_len); + fw_file->fw_version[ie_len] = '\0'; ath10k_dbg(ar, ATH10K_DBG_TESTMODE, "testmode found fw utf version %s\n", - ar->testmode.utf_version); + fw_file->fw_version); break; case ATH10K_FW_IE_TIMESTAMP: /* ignore timestamp, but don't warn about it either */ @@ -392,8 +392,8 @@ static int ath10k_tm_cmd_utf_start(struct ath10k *ar, struct nlattr *tb[]) ar->state = ATH10K_STATE_UTF; - if (strlen(ar->testmode.utf_version) > 0) - ver = ar->testmode.utf_version; + if (strlen(ar->testmode.utf_mode_fw.fw_file.fw_version) > 0) + ver = ar->testmode.utf_mode_fw.fw_file.fw_version; else ver = "API 1"; From c4cdf753ed4287467248126a4fac072fbba53b31 Mon Sep 17 00:00:00 2001 From: Kalle Valo Date: Wed, 20 Apr 2016 19:45:18 +0300 Subject: [PATCH 0851/1649] ath10k: move fw_features to struct ath10k_fw_file Preparation for testmode.c to use ath10k_core_fetch_board_data_api_n(). Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/core.c | 28 ++++++++++++---------- drivers/net/wireless/ath/ath10k/core.h | 5 ++-- drivers/net/wireless/ath/ath10k/htt_rx.c | 2 +- drivers/net/wireless/ath/ath10k/htt_tx.c | 9 ++++--- drivers/net/wireless/ath/ath10k/mac.c | 14 ++++++----- drivers/net/wireless/ath/ath10k/testmode.c | 14 ----------- drivers/net/wireless/ath/ath10k/wmi.c | 5 ++-- drivers/net/wireless/ath/ath10k/wow.c | 7 +++--- 8 files changed, 39 insertions(+), 45 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index 015241aec608..71b8ca71d1da 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -261,7 +261,7 @@ void ath10k_core_get_fw_features_str(struct ath10k *ar, int i; for (i = 0; i < ATH10K_FW_FEATURE_COUNT; i++) { - if (test_bit(i, ar->fw_features)) { + if (test_bit(i, ar->normal_mode_fw.fw_file.fw_features)) { if (len > 0) len += scnprintf(buf + len, buf_len - len, ","); @@ -627,7 +627,7 @@ static int ath10k_download_and_run_otp(struct ath10k *ar) ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot otp execute result %d\n", result); if (!(skip_otp || test_bit(ATH10K_FW_FEATURE_IGNORE_OTP_RESULT, - ar->fw_features)) && + ar->running_fw->fw_file.fw_features)) && result != 0) { ath10k_err(ar, "otp calibration failed: %d", result); return -EINVAL; @@ -1074,13 +1074,13 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name, ath10k_dbg(ar, ATH10K_DBG_BOOT, "Enabling feature bit: %i\n", i); - __set_bit(i, ar->fw_features); + __set_bit(i, fw_file->fw_features); } } ath10k_dbg_dump(ar, ATH10K_DBG_BOOT, "features", "", - ar->fw_features, - sizeof(ar->fw_features)); + ar->running_fw->fw_file.fw_features, + sizeof(fw_file->fw_features)); break; case ATH10K_FW_IE_FW_IMAGE: ath10k_dbg(ar, ATH10K_DBG_BOOT, @@ -1430,8 +1430,10 @@ static void ath10k_core_restart(struct work_struct *work) static int ath10k_core_init_firmware_features(struct ath10k *ar) { - if (test_bit(ATH10K_FW_FEATURE_WMI_10_2, ar->fw_features) && - !test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) { + struct ath10k_fw_file *fw_file = &ar->normal_mode_fw.fw_file; + + if (test_bit(ATH10K_FW_FEATURE_WMI_10_2, fw_file->fw_features) && + !test_bit(ATH10K_FW_FEATURE_WMI_10X, fw_file->fw_features)) { ath10k_err(ar, "feature bits corrupted: 10.2 feature requires 10.x feature to be set as well"); return -EINVAL; } @@ -1450,7 +1452,7 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar) break; case ATH10K_CRYPT_MODE_SW: if (!test_bit(ATH10K_FW_FEATURE_RAW_MODE_SUPPORT, - ar->fw_features)) { + fw_file->fw_features)) { ath10k_err(ar, "cryptmode > 0 requires raw mode support from firmware"); return -EINVAL; } @@ -1469,7 +1471,7 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar) if (rawmode) { if (!test_bit(ATH10K_FW_FEATURE_RAW_MODE_SUPPORT, - ar->fw_features)) { + fw_file->fw_features)) { ath10k_err(ar, "rawmode = 1 requires support from firmware"); return -EINVAL; } @@ -1495,9 +1497,9 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar) * ATH10K_FW_IE_WMI_OP_VERSION. */ if (ar->wmi.op_version == ATH10K_FW_WMI_OP_VERSION_UNSET) { - if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) { + if (test_bit(ATH10K_FW_FEATURE_WMI_10X, fw_file->fw_features)) { if (test_bit(ATH10K_FW_FEATURE_WMI_10_2, - ar->fw_features)) + fw_file->fw_features)) ar->wmi.op_version = ATH10K_FW_WMI_OP_VERSION_10_2; else ar->wmi.op_version = ATH10K_FW_WMI_OP_VERSION_10_1; @@ -1553,7 +1555,7 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar) ar->max_spatial_stream = ar->hw_params.max_spatial_stream; if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL, - ar->fw_features)) + fw_file->fw_features)) ar->htt.max_num_pending_tx = TARGET_10_4_NUM_MSDU_DESC_PFC; else ar->htt.max_num_pending_tx = TARGET_10_4_NUM_MSDU_DESC; @@ -1621,7 +1623,7 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode, * to set the clock source once the target is initialized. */ if (test_bit(ATH10K_FW_FEATURE_SUPPORTS_SKIP_CLOCK_INIT, - ar->fw_features)) { + ar->running_fw->fw_file.fw_features)) { status = ath10k_bmi_write32(ar, hi_skip_clock_init, 1); if (status) { ath10k_err(ar, "could not write to skip_clock_init: %d\n", diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h index 432b1590f6e6..18e21b4fe034 100644 --- a/drivers/net/wireless/ath/ath10k/core.h +++ b/drivers/net/wireless/ath/ath10k/core.h @@ -632,6 +632,8 @@ struct ath10k_fw_file { char fw_version[ETHTOOL_FWVERS_LEN]; + DECLARE_BITMAP(fw_features, ATH10K_FW_FEATURE_COUNT); + const void *firmware_data; size_t firmware_len; @@ -675,8 +677,6 @@ struct ath10k { /* protected by conf_mutex */ bool ani_enabled; - DECLARE_BITMAP(fw_features, ATH10K_FW_FEATURE_COUNT); - bool p2p; struct { @@ -895,7 +895,6 @@ struct ath10k { struct { /* protected by conf_mutex */ struct ath10k_fw_components utf_mode_fw; - DECLARE_BITMAP(orig_fw_features, ATH10K_FW_FEATURE_COUNT); enum ath10k_fw_wmi_op_version orig_wmi_op_version; enum ath10k_fw_wmi_op_version op_version; diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c index 9390897a00c6..5b777c24d2ba 100644 --- a/drivers/net/wireless/ath/ath10k/htt_rx.c +++ b/drivers/net/wireless/ath/ath10k/htt_rx.c @@ -966,7 +966,7 @@ static int ath10k_htt_rx_nwifi_hdrlen(struct ath10k *ar, int len = ieee80211_hdrlen(hdr->frame_control); if (!test_bit(ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING, - ar->fw_features)) + ar->running_fw->fw_file.fw_features)) len = round_up(len, 4); return len; diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c index 9baa2e677f8a..6269c610b0a3 100644 --- a/drivers/net/wireless/ath/ath10k/htt_tx.c +++ b/drivers/net/wireless/ath/ath10k/htt_tx.c @@ -267,7 +267,8 @@ static void ath10k_htt_tx_free_txq(struct ath10k_htt *htt) struct ath10k *ar = htt->ar; size_t size; - if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL, ar->fw_features)) + if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL, + ar->running_fw->fw_file.fw_features)) return; size = sizeof(*htt->tx_q_state.vaddr); @@ -282,7 +283,8 @@ static int ath10k_htt_tx_alloc_txq(struct ath10k_htt *htt) size_t size; int ret; - if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL, ar->fw_features)) + if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL, + ar->running_fw->fw_file.fw_features)) return 0; htt->tx_q_state.num_peers = HTT_TX_Q_STATE_NUM_PEERS; @@ -513,7 +515,8 @@ int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt) info |= SM(htt->tx_q_state.type, HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_DEPTH_TYPE); - if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL, ar->fw_features)) + if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL, + ar->running_fw->fw_file.fw_features)) info |= HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_VALID; cfg = &cmd->frag_desc_bank_cfg; diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index 56abbf2f2a59..0fd0fc111c40 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -1772,7 +1772,7 @@ static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif) if (enable_ps && ath10k_mac_num_vifs_started(ar) > 1 && !test_bit(ATH10K_FW_FEATURE_MULTI_VIF_PS_SUPPORT, - ar->fw_features)) { + ar->running_fw->fw_file.fw_features)) { ath10k_warn(ar, "refusing to enable ps on vdev %i: not supported by fw\n", arvif->vdev_id); enable_ps = false; @@ -2060,7 +2060,8 @@ static void ath10k_peer_assoc_h_crypto(struct ath10k *ar, } if (sta->mfp && - test_bit(ATH10K_FW_FEATURE_MFP_SUPPORT, ar->fw_features)) { + test_bit(ATH10K_FW_FEATURE_MFP_SUPPORT, + ar->running_fw->fw_file.fw_features)) { arg->peer_flags |= ar->wmi.peer_flags->pmf; } } @@ -3207,7 +3208,8 @@ ath10k_mac_tx_h_get_txmode(struct ath10k *ar, */ if (ar->htt.target_version_major < 3 && (ieee80211_is_nullfunc(fc) || ieee80211_is_qos_nullfunc(fc)) && - !test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX, ar->fw_features)) + !test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX, + ar->running_fw->fw_file.fw_features)) return ATH10K_HW_TXRX_MGMT; /* Workaround: @@ -3394,7 +3396,7 @@ ath10k_mac_tx_h_get_txpath(struct ath10k *ar, return ATH10K_MAC_TX_HTT; case ATH10K_HW_TXRX_MGMT: if (test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX, - ar->fw_features)) + ar->running_fw->fw_file.fw_features)) return ATH10K_MAC_TX_WMI_MGMT; else if (ar->htt.target_version_major >= 3) return ATH10K_MAC_TX_HTT; @@ -4435,7 +4437,7 @@ static int ath10k_start(struct ieee80211_hw *hw) } if (test_bit(ATH10K_FW_FEATURE_SUPPORTS_ADAPTIVE_CCA, - ar->fw_features)) { + ar->running_fw->fw_file.fw_features)) { ret = ath10k_wmi_pdev_enable_adaptive_cca(ar, 1, WMI_CCA_DETECT_LEVEL_AUTO, WMI_CCA_DETECT_MARGIN_AUTO); @@ -7694,7 +7696,7 @@ int ath10k_mac_register(struct ath10k *ar) ar->hw->wiphy->available_antennas_rx = ar->cfg_rx_chainmask; ar->hw->wiphy->available_antennas_tx = ar->cfg_tx_chainmask; - if (!test_bit(ATH10K_FW_FEATURE_NO_P2P, ar->fw_features)) + if (!test_bit(ATH10K_FW_FEATURE_NO_P2P, ar->normal_mode_fw.fw_file.fw_features)) ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_P2P_DEVICE) | BIT(NL80211_IFTYPE_P2P_CLIENT) | diff --git a/drivers/net/wireless/ath/ath10k/testmode.c b/drivers/net/wireless/ath/ath10k/testmode.c index 2c4a5d31cf0c..102539409f54 100644 --- a/drivers/net/wireless/ath/ath10k/testmode.c +++ b/drivers/net/wireless/ath/ath10k/testmode.c @@ -362,14 +362,8 @@ static int ath10k_tm_cmd_utf_start(struct ath10k *ar, struct nlattr *tb[]) spin_lock_bh(&ar->data_lock); ar->testmode.utf_monitor = true; spin_unlock_bh(&ar->data_lock); - BUILD_BUG_ON(sizeof(ar->fw_features) != - sizeof(ar->testmode.orig_fw_features)); - memcpy(ar->testmode.orig_fw_features, ar->fw_features, - sizeof(ar->fw_features)); ar->testmode.orig_wmi_op_version = ar->wmi.op_version; - memset(ar->fw_features, 0, sizeof(ar->fw_features)); - ar->wmi.op_version = ar->testmode.op_version; ath10k_dbg(ar, ATH10K_DBG_TESTMODE, "testmode wmi version %d\n", @@ -407,9 +401,6 @@ err_power_down: ath10k_hif_power_down(ar); err_fw_features: - /* return the original firmware features */ - memcpy(ar->fw_features, ar->testmode.orig_fw_features, - sizeof(ar->fw_features)); ar->wmi.op_version = ar->testmode.orig_wmi_op_version; release_firmware(ar->testmode.utf_mode_fw.fw_file.firmware); @@ -434,11 +425,6 @@ static void __ath10k_tm_cmd_utf_stop(struct ath10k *ar) spin_unlock_bh(&ar->data_lock); - /* return the original firmware features */ - memcpy(ar->fw_features, ar->testmode.orig_fw_features, - sizeof(ar->fw_features)); - ar->wmi.op_version = ar->testmode.orig_wmi_op_version; - release_firmware(ar->testmode.utf_mode_fw.fw_file.firmware); ar->testmode.utf_mode_fw.fw_file.firmware = NULL; diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index 254844b37d92..d5279ce32974 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -2149,7 +2149,8 @@ static int ath10k_wmi_op_pull_mgmt_rx_ev(struct ath10k *ar, struct sk_buff *skb, u32 msdu_len; u32 len; - if (test_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX, ar->fw_features)) { + if (test_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX, + ar->running_fw->fw_file.fw_features)) { ev_v2 = (struct wmi_mgmt_rx_event_v2 *)skb->data; ev_hdr = &ev_v2->hdr.v1; pull_len = sizeof(*ev_v2); @@ -4634,7 +4635,7 @@ static void ath10k_wmi_event_service_ready_work(struct work_struct *work) if (test_bit(WMI_SERVICE_PEER_CACHING, ar->wmi.svc_map)) { if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL, - ar->fw_features)) + ar->running_fw->fw_file.fw_features)) ar->num_active_peers = TARGET_10_4_QCACHE_ACTIVE_PEERS_PFC + ar->max_num_vdevs; else diff --git a/drivers/net/wireless/ath/ath10k/wow.c b/drivers/net/wireless/ath/ath10k/wow.c index 8e02b381990f..77100d42f401 100644 --- a/drivers/net/wireless/ath/ath10k/wow.c +++ b/drivers/net/wireless/ath/ath10k/wow.c @@ -233,7 +233,7 @@ int ath10k_wow_op_suspend(struct ieee80211_hw *hw, mutex_lock(&ar->conf_mutex); if (WARN_ON(!test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT, - ar->fw_features))) { + ar->running_fw->fw_file.fw_features))) { ret = 1; goto exit; } @@ -285,7 +285,7 @@ int ath10k_wow_op_resume(struct ieee80211_hw *hw) mutex_lock(&ar->conf_mutex); if (WARN_ON(!test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT, - ar->fw_features))) { + ar->running_fw->fw_file.fw_features))) { ret = 1; goto exit; } @@ -325,7 +325,8 @@ exit: int ath10k_wow_init(struct ath10k *ar) { - if (!test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT, ar->fw_features)) + if (!test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT, + ar->running_fw->fw_file.fw_features)) return 0; if (WARN_ON(!test_bit(WMI_SERVICE_WOW, ar->wmi.svc_map))) From bf3c13ab49965f0517b579dc490d612d074d535a Mon Sep 17 00:00:00 2001 From: Kalle Valo Date: Wed, 20 Apr 2016 19:45:33 +0300 Subject: [PATCH 0852/1649] ath10k: move wmi_op_version to struct ath10k_fw_file Preparation for testmode.c to use ath10k_core_fetch_board_data_api_n(). Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/core.c | 20 ++++++++++---------- drivers/net/wireless/ath/ath10k/core.h | 5 ++--- drivers/net/wireless/ath/ath10k/debug.c | 2 +- drivers/net/wireless/ath/ath10k/mac.c | 2 +- drivers/net/wireless/ath/ath10k/testmode.c | 17 ++++++----------- drivers/net/wireless/ath/ath10k/wmi.c | 4 ++-- 6 files changed, 22 insertions(+), 28 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index 71b8ca71d1da..a7c99355a7c2 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -1106,10 +1106,10 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name, version = (__le32 *)data; - ar->wmi.op_version = le32_to_cpup(version); + fw_file->wmi_op_version = le32_to_cpup(version); ath10k_dbg(ar, ATH10K_DBG_BOOT, "found fw ie wmi op version %d\n", - ar->wmi.op_version); + fw_file->wmi_op_version); break; case ATH10K_FW_IE_HTT_OP_VERSION: if (ie_len != sizeof(u32)) @@ -1438,9 +1438,9 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar) return -EINVAL; } - if (ar->wmi.op_version >= ATH10K_FW_WMI_OP_VERSION_MAX) { + if (fw_file->wmi_op_version >= ATH10K_FW_WMI_OP_VERSION_MAX) { ath10k_err(ar, "unsupported WMI OP version (max %d): %d\n", - ATH10K_FW_WMI_OP_VERSION_MAX, ar->wmi.op_version); + ATH10K_FW_WMI_OP_VERSION_MAX, fw_file->wmi_op_version); return -EINVAL; } @@ -1496,19 +1496,19 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar) /* Backwards compatibility for firmwares without * ATH10K_FW_IE_WMI_OP_VERSION. */ - if (ar->wmi.op_version == ATH10K_FW_WMI_OP_VERSION_UNSET) { + if (fw_file->wmi_op_version == ATH10K_FW_WMI_OP_VERSION_UNSET) { if (test_bit(ATH10K_FW_FEATURE_WMI_10X, fw_file->fw_features)) { if (test_bit(ATH10K_FW_FEATURE_WMI_10_2, fw_file->fw_features)) - ar->wmi.op_version = ATH10K_FW_WMI_OP_VERSION_10_2; + fw_file->wmi_op_version = ATH10K_FW_WMI_OP_VERSION_10_2; else - ar->wmi.op_version = ATH10K_FW_WMI_OP_VERSION_10_1; + fw_file->wmi_op_version = ATH10K_FW_WMI_OP_VERSION_10_1; } else { - ar->wmi.op_version = ATH10K_FW_WMI_OP_VERSION_MAIN; + fw_file->wmi_op_version = ATH10K_FW_WMI_OP_VERSION_MAIN; } } - switch (ar->wmi.op_version) { + switch (fw_file->wmi_op_version) { case ATH10K_FW_WMI_OP_VERSION_MAIN: ar->max_num_peers = TARGET_NUM_PEERS; ar->max_num_stations = TARGET_NUM_STATIONS; @@ -1570,7 +1570,7 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar) * ATH10K_FW_IE_HTT_OP_VERSION. */ if (ar->htt.op_version == ATH10K_FW_HTT_OP_VERSION_UNSET) { - switch (ar->wmi.op_version) { + switch (fw_file->wmi_op_version) { case ATH10K_FW_WMI_OP_VERSION_MAIN: ar->htt.op_version = ATH10K_FW_HTT_OP_VERSION_MAIN; break; diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h index 18e21b4fe034..7d709f848fac 100644 --- a/drivers/net/wireless/ath/ath10k/core.h +++ b/drivers/net/wireless/ath/ath10k/core.h @@ -139,7 +139,6 @@ struct ath10k_mem_chunk { }; struct ath10k_wmi { - enum ath10k_fw_wmi_op_version op_version; enum ath10k_htc_ep_id eid; struct completion service_ready; struct completion unified_ready; @@ -634,6 +633,8 @@ struct ath10k_fw_file { DECLARE_BITMAP(fw_features, ATH10K_FW_FEATURE_COUNT); + enum ath10k_fw_wmi_op_version wmi_op_version; + const void *firmware_data; size_t firmware_len; @@ -895,8 +896,6 @@ struct ath10k { struct { /* protected by conf_mutex */ struct ath10k_fw_components utf_mode_fw; - enum ath10k_fw_wmi_op_version orig_wmi_op_version; - enum ath10k_fw_wmi_op_version op_version; /* protected by data_lock */ bool utf_monitor; diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c index 27787d23b2bd..8a63ce5c6e09 100644 --- a/drivers/net/wireless/ath/ath10k/debug.c +++ b/drivers/net/wireless/ath/ath10k/debug.c @@ -178,7 +178,7 @@ void ath10k_debug_print_boot_info(struct ath10k *ar) ath10k_info(ar, "htt-ver %d.%d wmi-op %d htt-op %d cal %s max-sta %d raw %d hwcrypto %d\n", ar->htt.target_version_major, ar->htt.target_version_minor, - ar->wmi.op_version, + ar->normal_mode_fw.fw_file.wmi_op_version, ar->htt.op_version, ath10k_cal_mode_str(ar->cal_mode), ar->max_num_stations, diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index 0fd0fc111c40..5fb912acc0a8 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -7786,7 +7786,7 @@ int ath10k_mac_register(struct ath10k *ar) */ ar->hw->offchannel_tx_hw_queue = IEEE80211_MAX_QUEUES - 1; - switch (ar->wmi.op_version) { + switch (ar->running_fw->fw_file.wmi_op_version) { case ATH10K_FW_WMI_OP_VERSION_MAIN: ar->hw->wiphy->iface_combinations = ath10k_if_comb; ar->hw->wiphy->n_iface_combinations = diff --git a/drivers/net/wireless/ath/ath10k/testmode.c b/drivers/net/wireless/ath/ath10k/testmode.c index 102539409f54..3d4418969697 100644 --- a/drivers/net/wireless/ath/ath10k/testmode.c +++ b/drivers/net/wireless/ath/ath10k/testmode.c @@ -230,9 +230,9 @@ static int ath10k_tm_fetch_utf_firmware_api_2(struct ath10k *ar, if (ie_len != sizeof(u32)) break; version = (__le32 *)data; - ar->testmode.op_version = le32_to_cpup(version); + fw_file->wmi_op_version = le32_to_cpup(version); ath10k_dbg(ar, ATH10K_DBG_TESTMODE, "testmode found fw ie wmi op version %d\n", - ar->testmode.op_version); + fw_file->wmi_op_version); break; default: ath10k_warn(ar, "Unknown testmode FW IE: %u\n", @@ -283,7 +283,7 @@ static int ath10k_tm_fetch_utf_firmware_api_1(struct ath10k *ar, * correct WMI interface. */ - ar->testmode.op_version = ATH10K_FW_WMI_OP_VERSION_10_1; + fw_file->wmi_op_version = ATH10K_FW_WMI_OP_VERSION_10_1; fw_file->firmware_data = fw_file->firmware->data; fw_file->firmware_len = fw_file->firmware->size; @@ -363,17 +363,14 @@ static int ath10k_tm_cmd_utf_start(struct ath10k *ar, struct nlattr *tb[]) ar->testmode.utf_monitor = true; spin_unlock_bh(&ar->data_lock); - ar->testmode.orig_wmi_op_version = ar->wmi.op_version; - ar->wmi.op_version = ar->testmode.op_version; - ath10k_dbg(ar, ATH10K_DBG_TESTMODE, "testmode wmi version %d\n", - ar->wmi.op_version); + ar->testmode.utf_mode_fw.fw_file.wmi_op_version); ret = ath10k_hif_power_up(ar); if (ret) { ath10k_err(ar, "failed to power up hif (testmode): %d\n", ret); ar->state = ATH10K_STATE_OFF; - goto err_fw_features; + goto err_release_utf_mode_fw; } ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_UTF, @@ -400,9 +397,7 @@ static int ath10k_tm_cmd_utf_start(struct ath10k *ar, struct nlattr *tb[]) err_power_down: ath10k_hif_power_down(ar); -err_fw_features: - ar->wmi.op_version = ar->testmode.orig_wmi_op_version; - +err_release_utf_mode_fw: release_firmware(ar->testmode.utf_mode_fw.fw_file.firmware); ar->testmode.utf_mode_fw.fw_file.firmware = NULL; diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index d5279ce32974..a1afb2e2b05a 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -7865,7 +7865,7 @@ static const struct wmi_ops wmi_10_4_ops = { int ath10k_wmi_attach(struct ath10k *ar) { - switch (ar->wmi.op_version) { + switch (ar->running_fw->fw_file.wmi_op_version) { case ATH10K_FW_WMI_OP_VERSION_10_4: ar->wmi.ops = &wmi_10_4_ops; ar->wmi.cmd = &wmi_10_4_cmd_map; @@ -7907,7 +7907,7 @@ int ath10k_wmi_attach(struct ath10k *ar) case ATH10K_FW_WMI_OP_VERSION_UNSET: case ATH10K_FW_WMI_OP_VERSION_MAX: ath10k_err(ar, "unsupported WMI op version: %d\n", - ar->wmi.op_version); + ar->running_fw->fw_file.wmi_op_version); return -EINVAL; } From 77561f9394f8553cce487b12b15b4879ecbaf6d7 Mon Sep 17 00:00:00 2001 From: Kalle Valo Date: Wed, 20 Apr 2016 19:45:47 +0300 Subject: [PATCH 0853/1649] ath10k: move htt_op_version to struct ath10k_fw_file Preparation for testmode.c to use ath10k_core_fetch_board_data_api_n(). Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/core.c | 12 ++++++------ drivers/net/wireless/ath/ath10k/core.h | 1 + drivers/net/wireless/ath/ath10k/debug.c | 2 +- drivers/net/wireless/ath/ath10k/htt.c | 2 +- drivers/net/wireless/ath/ath10k/htt.h | 1 - drivers/net/wireless/ath/ath10k/mac.c | 2 +- drivers/net/wireless/ath/ath10k/testmode.c | 1 + 7 files changed, 11 insertions(+), 10 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index a7c99355a7c2..4af01afdaf6c 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -1117,10 +1117,10 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name, version = (__le32 *)data; - ar->htt.op_version = le32_to_cpup(version); + fw_file->htt_op_version = le32_to_cpup(version); ath10k_dbg(ar, ATH10K_DBG_BOOT, "found fw ie htt op version %d\n", - ar->htt.op_version); + fw_file->htt_op_version); break; case ATH10K_FW_IE_FW_CODE_SWAP_IMAGE: ath10k_dbg(ar, ATH10K_DBG_BOOT, @@ -1569,18 +1569,18 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar) /* Backwards compatibility for firmwares without * ATH10K_FW_IE_HTT_OP_VERSION. */ - if (ar->htt.op_version == ATH10K_FW_HTT_OP_VERSION_UNSET) { + if (fw_file->htt_op_version == ATH10K_FW_HTT_OP_VERSION_UNSET) { switch (fw_file->wmi_op_version) { case ATH10K_FW_WMI_OP_VERSION_MAIN: - ar->htt.op_version = ATH10K_FW_HTT_OP_VERSION_MAIN; + fw_file->htt_op_version = ATH10K_FW_HTT_OP_VERSION_MAIN; break; case ATH10K_FW_WMI_OP_VERSION_10_1: case ATH10K_FW_WMI_OP_VERSION_10_2: case ATH10K_FW_WMI_OP_VERSION_10_2_4: - ar->htt.op_version = ATH10K_FW_HTT_OP_VERSION_10_1; + fw_file->htt_op_version = ATH10K_FW_HTT_OP_VERSION_10_1; break; case ATH10K_FW_WMI_OP_VERSION_TLV: - ar->htt.op_version = ATH10K_FW_HTT_OP_VERSION_TLV; + fw_file->htt_op_version = ATH10K_FW_HTT_OP_VERSION_TLV; break; case ATH10K_FW_WMI_OP_VERSION_10_4: case ATH10K_FW_WMI_OP_VERSION_UNSET: diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h index 7d709f848fac..55a28c08e898 100644 --- a/drivers/net/wireless/ath/ath10k/core.h +++ b/drivers/net/wireless/ath/ath10k/core.h @@ -634,6 +634,7 @@ struct ath10k_fw_file { DECLARE_BITMAP(fw_features, ATH10K_FW_FEATURE_COUNT); enum ath10k_fw_wmi_op_version wmi_op_version; + enum ath10k_fw_htt_op_version htt_op_version; const void *firmware_data; size_t firmware_len; diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c index 8a63ce5c6e09..e2511550fbb8 100644 --- a/drivers/net/wireless/ath/ath10k/debug.c +++ b/drivers/net/wireless/ath/ath10k/debug.c @@ -179,7 +179,7 @@ void ath10k_debug_print_boot_info(struct ath10k *ar) ar->htt.target_version_major, ar->htt.target_version_minor, ar->normal_mode_fw.fw_file.wmi_op_version, - ar->htt.op_version, + ar->normal_mode_fw.fw_file.htt_op_version, ath10k_cal_mode_str(ar->cal_mode), ar->max_num_stations, test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags), diff --git a/drivers/net/wireless/ath/ath10k/htt.c b/drivers/net/wireless/ath/ath10k/htt.c index ee79512b1fcc..130cd9502021 100644 --- a/drivers/net/wireless/ath/ath10k/htt.c +++ b/drivers/net/wireless/ath/ath10k/htt.c @@ -183,7 +183,7 @@ int ath10k_htt_init(struct ath10k *ar) 8 + /* llc snap */ 2; /* ip4 dscp or ip6 priority */ - switch (ar->htt.op_version) { + switch (ar->running_fw->fw_file.htt_op_version) { case ATH10K_FW_HTT_OP_VERSION_10_4: ar->htt.t2h_msg_types = htt_10_4_t2h_msg_types; ar->htt.t2h_msg_types_max = HTT_10_4_T2H_NUM_MSGS; diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h index ee7c8f8f8073..911c535d0863 100644 --- a/drivers/net/wireless/ath/ath10k/htt.h +++ b/drivers/net/wireless/ath/ath10k/htt.h @@ -1562,7 +1562,6 @@ struct ath10k_htt { u8 target_version_major; u8 target_version_minor; struct completion target_version_received; - enum ath10k_fw_htt_op_version op_version; u8 max_num_amsdu; u8 max_num_ampdu; diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index 5fb912acc0a8..67cf004b685a 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -3359,7 +3359,7 @@ bool ath10k_mac_tx_frm_has_freq(struct ath10k *ar) */ return (ar->htt.target_version_major >= 3 && ar->htt.target_version_minor >= 4 && - ar->htt.op_version == ATH10K_FW_HTT_OP_VERSION_TLV); + ar->running_fw->fw_file.htt_op_version == ATH10K_FW_HTT_OP_VERSION_TLV); } static int ath10k_mac_tx_wmi_mgmt(struct ath10k *ar, struct sk_buff *skb) diff --git a/drivers/net/wireless/ath/ath10k/testmode.c b/drivers/net/wireless/ath/ath10k/testmode.c index 3d4418969697..daf04d74c6d0 100644 --- a/drivers/net/wireless/ath/ath10k/testmode.c +++ b/drivers/net/wireless/ath/ath10k/testmode.c @@ -284,6 +284,7 @@ static int ath10k_tm_fetch_utf_firmware_api_1(struct ath10k *ar, */ fw_file->wmi_op_version = ATH10K_FW_WMI_OP_VERSION_10_1; + fw_file->htt_op_version = ATH10K_FW_HTT_OP_VERSION_10_1; fw_file->firmware_data = fw_file->firmware->data; fw_file->firmware_len = fw_file->firmware->size; From 9dfe240b4d684f17efa861e92e45dc949b0049ed Mon Sep 17 00:00:00 2001 From: Kalle Valo Date: Wed, 20 Apr 2016 19:46:01 +0300 Subject: [PATCH 0854/1649] ath10k: switch testmode to use ath10k_core_fetch_firmware_api_n() Now that all firmware-N.bin related are within struct ath10k_fw_file we can switch to use ath10k_core_fetch_firmware_api_n() and delete almost identical ath10k_tm_fetch_utf_firmware_api_2(). Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/core.c | 4 +- drivers/net/wireless/ath/ath10k/core.h | 2 + drivers/net/wireless/ath/ath10k/testmode.c | 124 +-------------------- 3 files changed, 6 insertions(+), 124 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index 4af01afdaf6c..db9437a72ba4 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -976,8 +976,8 @@ success: return 0; } -static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name, - struct ath10k_fw_file *fw_file) +int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name, + struct ath10k_fw_file *fw_file) { size_t magic_len, len, ie_len; int ie_id, i, index, bit, ret; diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h index 55a28c08e898..f3553dc11c5d 100644 --- a/drivers/net/wireless/ath/ath10k/core.h +++ b/drivers/net/wireless/ath/ath10k/core.h @@ -933,6 +933,8 @@ void ath10k_core_destroy(struct ath10k *ar); void ath10k_core_get_fw_features_str(struct ath10k *ar, char *buf, size_t max_len); +int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name, + struct ath10k_fw_file *fw_file); int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode, const struct ath10k_fw_components *fw_components); diff --git a/drivers/net/wireless/ath/ath10k/testmode.c b/drivers/net/wireless/ath/ath10k/testmode.c index daf04d74c6d0..120f4234d3b0 100644 --- a/drivers/net/wireless/ath/ath10k/testmode.c +++ b/drivers/net/wireless/ath/ath10k/testmode.c @@ -139,127 +139,6 @@ static int ath10k_tm_cmd_get_version(struct ath10k *ar, struct nlattr *tb[]) return cfg80211_testmode_reply(skb); } -static int ath10k_tm_fetch_utf_firmware_api_2(struct ath10k *ar, - struct ath10k_fw_file *fw_file) -{ - size_t len, magic_len, ie_len; - struct ath10k_fw_ie *hdr; - char filename[100]; - __le32 *version; - const u8 *data; - int ie_id, ret; - - snprintf(filename, sizeof(filename), "%s/%s", - ar->hw_params.fw.dir, ATH10K_FW_UTF_API2_FILE); - - /* load utf firmware image */ - ret = request_firmware(&fw_file->firmware, filename, ar->dev); - if (ret) { - ath10k_warn(ar, "failed to retrieve utf firmware '%s': %d\n", - filename, ret); - return ret; - } - - data = fw_file->firmware->data; - len = fw_file->firmware->size; - - /* FIXME: call release_firmware() in error cases */ - - /* magic also includes the null byte, check that as well */ - magic_len = strlen(ATH10K_FIRMWARE_MAGIC) + 1; - - if (len < magic_len) { - ath10k_err(ar, "utf firmware file is too small to contain magic\n"); - ret = -EINVAL; - goto err; - } - - if (memcmp(data, ATH10K_FIRMWARE_MAGIC, magic_len) != 0) { - ath10k_err(ar, "invalid firmware magic\n"); - ret = -EINVAL; - goto err; - } - - /* jump over the padding */ - magic_len = ALIGN(magic_len, 4); - - len -= magic_len; - data += magic_len; - - /* loop elements */ - while (len > sizeof(struct ath10k_fw_ie)) { - hdr = (struct ath10k_fw_ie *)data; - - ie_id = le32_to_cpu(hdr->id); - ie_len = le32_to_cpu(hdr->len); - - len -= sizeof(*hdr); - data += sizeof(*hdr); - - if (len < ie_len) { - ath10k_err(ar, "invalid length for FW IE %d (%zu < %zu)\n", - ie_id, len, ie_len); - ret = -EINVAL; - goto err; - } - - switch (ie_id) { - case ATH10K_FW_IE_FW_VERSION: - if (ie_len > sizeof(fw_file->fw_version) - 1) - break; - - memcpy(fw_file->fw_version, data, ie_len); - fw_file->fw_version[ie_len] = '\0'; - - ath10k_dbg(ar, ATH10K_DBG_TESTMODE, - "testmode found fw utf version %s\n", - fw_file->fw_version); - break; - case ATH10K_FW_IE_TIMESTAMP: - /* ignore timestamp, but don't warn about it either */ - break; - case ATH10K_FW_IE_FW_IMAGE: - ath10k_dbg(ar, ATH10K_DBG_TESTMODE, - "testmode found fw image ie (%zd B)\n", - ie_len); - - fw_file->firmware_data = data; - fw_file->firmware_len = ie_len; - break; - case ATH10K_FW_IE_WMI_OP_VERSION: - if (ie_len != sizeof(u32)) - break; - version = (__le32 *)data; - fw_file->wmi_op_version = le32_to_cpup(version); - ath10k_dbg(ar, ATH10K_DBG_TESTMODE, "testmode found fw ie wmi op version %d\n", - fw_file->wmi_op_version); - break; - default: - ath10k_warn(ar, "Unknown testmode FW IE: %u\n", - le32_to_cpu(hdr->id)); - break; - } - /* jump over the padding */ - ie_len = ALIGN(ie_len, 4); - - len -= ie_len; - data += ie_len; - } - - if (!fw_file->firmware_data || !fw_file->firmware_len) { - ath10k_err(ar, "No ATH10K_FW_IE_FW_IMAGE found\n"); - ret = -EINVAL; - goto err; - } - - return 0; - -err: - release_firmware(fw_file->firmware); - - return ret; -} - static int ath10k_tm_fetch_utf_firmware_api_1(struct ath10k *ar, struct ath10k_fw_file *fw_file) { @@ -296,7 +175,8 @@ static int ath10k_tm_fetch_firmware(struct ath10k *ar) struct ath10k_fw_components *utf_mode_fw; int ret; - ret = ath10k_tm_fetch_utf_firmware_api_2(ar, &ar->testmode.utf_mode_fw.fw_file); + ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_UTF_API2_FILE, + &ar->testmode.utf_mode_fw.fw_file); if (ret == 0) { ath10k_dbg(ar, ATH10K_DBG_TESTMODE, "testmode using fw utf api 2"); goto out; From 1fe63c9ca8913eb7af6c428cf81abad29e0bc9d6 Mon Sep 17 00:00:00 2001 From: Kalle Valo Date: Wed, 20 Apr 2016 19:46:16 +0300 Subject: [PATCH 0855/1649] ath10k: remove enum ath10k_swap_code_seg_bin_type It's not needed for anything so just get rid of it. Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/core.c | 3 +-- drivers/net/wireless/ath/ath10k/swap.c | 22 ++++++---------------- drivers/net/wireless/ath/ath10k/swap.h | 9 +-------- 3 files changed, 8 insertions(+), 26 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index db9437a72ba4..e94cb87380d2 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -647,8 +647,7 @@ static int ath10k_download_fw(struct ath10k *ar) data = ar->running_fw->fw_file.firmware_data; data_len = ar->running_fw->fw_file.firmware_len; - ret = ath10k_swap_code_seg_configure(ar, - ATH10K_SWAP_CODE_SEG_BIN_TYPE_FW); + ret = ath10k_swap_code_seg_configure(ar); if (ret) { ath10k_err(ar, "failed to configure fw code swap: %d\n", ret); diff --git a/drivers/net/wireless/ath/ath10k/swap.c b/drivers/net/wireless/ath/ath10k/swap.c index 47d449bac86d..0c5f5863dac8 100644 --- a/drivers/net/wireless/ath/ath10k/swap.c +++ b/drivers/net/wireless/ath/ath10k/swap.c @@ -134,27 +134,17 @@ ath10k_swap_code_seg_alloc(struct ath10k *ar, size_t swap_bin_len) return seg_info; } -int ath10k_swap_code_seg_configure(struct ath10k *ar, - enum ath10k_swap_code_seg_bin_type type) +int ath10k_swap_code_seg_configure(struct ath10k *ar) { int ret; struct ath10k_swap_code_seg_info *seg_info = NULL; - switch (type) { - case ATH10K_SWAP_CODE_SEG_BIN_TYPE_FW: - if (!ar->swap.firmware_swap_code_seg_info) - return 0; - - ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot found firmware code swap binary\n"); - seg_info = ar->swap.firmware_swap_code_seg_info; - break; - default: - case ATH10K_SWAP_CODE_SEG_BIN_TYPE_OTP: - case ATH10K_SWAP_CODE_SEG_BIN_TYPE_UTF: - ath10k_warn(ar, "ignoring unknown code swap binary type %d\n", - type); + if (!ar->swap.firmware_swap_code_seg_info) return 0; - } + + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot found firmware code swap binary\n"); + + seg_info = ar->swap.firmware_swap_code_seg_info; ret = ath10k_bmi_write_memory(ar, seg_info->target_addr, &seg_info->seg_hw_info, diff --git a/drivers/net/wireless/ath/ath10k/swap.h b/drivers/net/wireless/ath/ath10k/swap.h index 5c89952dd20f..36991c7b07a0 100644 --- a/drivers/net/wireless/ath/ath10k/swap.h +++ b/drivers/net/wireless/ath/ath10k/swap.h @@ -39,12 +39,6 @@ union ath10k_swap_code_seg_item { struct ath10k_swap_code_seg_tail tail; } __packed; -enum ath10k_swap_code_seg_bin_type { - ATH10K_SWAP_CODE_SEG_BIN_TYPE_OTP, - ATH10K_SWAP_CODE_SEG_BIN_TYPE_FW, - ATH10K_SWAP_CODE_SEG_BIN_TYPE_UTF, -}; - struct ath10k_swap_code_seg_hw_info { /* Swap binary image size */ __le32 swap_size; @@ -64,8 +58,7 @@ struct ath10k_swap_code_seg_info { dma_addr_t paddr[ATH10K_SWAP_CODE_SEG_NUM_SUPPORTED]; }; -int ath10k_swap_code_seg_configure(struct ath10k *ar, - enum ath10k_swap_code_seg_bin_type type); +int ath10k_swap_code_seg_configure(struct ath10k *ar); void ath10k_swap_code_seg_release(struct ath10k *ar); int ath10k_swap_code_seg_init(struct ath10k *ar); From e6f268ef3687862b0e9f01f7b3706b54f75b82ab Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Wed, 20 Apr 2016 15:32:54 -0400 Subject: [PATCH 0856/1649] net: nla_align_64bit() needs to test the right pointer. Netlink messages are appended, one object at a time, to the end of the SKB. Therefore we need to test skb_tail_pointer() not skb->data for alignment. Fixes: 35c5845957c7 ("net: Add helpers for 64-bit aligning netlink attributes.") Signed-off-by: David S. Miller --- include/net/netlink.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/net/netlink.h b/include/net/netlink.h index cf95df1fa14b..3c1fd92a52c8 100644 --- a/include/net/netlink.h +++ b/include/net/netlink.h @@ -1250,7 +1250,7 @@ static inline int nla_align_64bit(struct sk_buff *skb, int padattr) * nlattr header for next attribute, will make nla_data() * 8-byte aligned. */ - if (IS_ALIGNED((unsigned long)skb->data, 8) && + if (IS_ALIGNED((unsigned long)skb_tail_pointer(skb), 8) && !nla_reserve(skb, padattr, 0)) return -EMSGSIZE; #endif From 10c9ead9f3c6bb24bddc9a96681f7d58e6623966 Mon Sep 17 00:00:00 2001 From: Roopa Prabhu Date: Wed, 20 Apr 2016 08:43:43 -0700 Subject: [PATCH 0857/1649] rtnetlink: add new RTM_GETSTATS message to dump link stats This patch adds a new RTM_GETSTATS message to query link stats via netlink from the kernel. RTM_NEWLINK also dumps stats today, but RTM_NEWLINK returns a lot more than just stats and is expensive in some cases when frequent polling for stats from userspace is a common operation. RTM_GETSTATS is an attempt to provide a light weight netlink message to explicity query only link stats from the kernel on an interface. The idea is to also keep it extensible so that new kinds of stats can be added to it in the future. This patch adds the following attribute for NETDEV stats: struct nla_policy ifla_stats_policy[IFLA_STATS_MAX + 1] = { [IFLA_STATS_LINK_64] = { .len = sizeof(struct rtnl_link_stats64) }, }; Like any other rtnetlink message, RTM_GETSTATS can be used to get stats of a single interface or all interfaces with NLM_F_DUMP. Future possible new types of stat attributes: link af stats: - IFLA_STATS_LINK_IPV6 (nested. for ipv6 stats) - IFLA_STATS_LINK_MPLS (nested. for mpls/mdev stats) extended stats: - IFLA_STATS_LINK_EXTENDED (nested. extended software netdev stats like bridge, vlan, vxlan etc) - IFLA_STATS_LINK_HW_EXTENDED (nested. extended hardware stats which are available via ethtool today) This patch also declares a filter mask for all stat attributes. User has to provide a mask of stats attributes to query. filter mask can be specified in the new hdr 'struct if_stats_msg' for stats messages. Other important field in the header is the ifindex. This api can also include attributes for global stats (eg tcp) in the future. When global stats are included in a stats msg, the ifindex in the header must be zero. A single stats message cannot contain both global and netdev specific stats. To easily distinguish them, netdev specific stat attributes name are prefixed with IFLA_STATS_LINK_ Without any attributes in the filter_mask, no stats will be returned. This patch has been tested with mofified iproute2 ifstat. Suggested-by: Jamal Hadi Salim Signed-off-by: Roopa Prabhu Signed-off-by: David S. Miller --- include/uapi/linux/if_link.h | 23 +++++ include/uapi/linux/rtnetlink.h | 5 ++ net/core/rtnetlink.c | 158 +++++++++++++++++++++++++++++++++ security/selinux/nlmsgtab.c | 4 +- 4 files changed, 189 insertions(+), 1 deletion(-) diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index af8fd58b4006..ba69d4447249 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -782,4 +782,27 @@ enum { #define IFLA_HSR_MAX (__IFLA_HSR_MAX - 1) +/* STATS section */ + +struct if_stats_msg { + __u8 family; + __u8 pad1; + __u16 pad2; + __u32 ifindex; + __u32 filter_mask; +}; + +/* A stats attribute can be netdev specific or a global stat. + * For netdev stats, lets use the prefix IFLA_STATS_LINK_* + */ +enum { + IFLA_STATS_UNSPEC, /* also used as 64bit pad attribute */ + IFLA_STATS_LINK_64, + __IFLA_STATS_MAX, +}; + +#define IFLA_STATS_MAX (__IFLA_STATS_MAX - 1) + +#define IFLA_STATS_FILTER_BIT(ATTR) (1 << (ATTR - 1)) + #endif /* _UAPI_LINUX_IF_LINK_H */ diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h index ca764b5da86d..cc885c4e9065 100644 --- a/include/uapi/linux/rtnetlink.h +++ b/include/uapi/linux/rtnetlink.h @@ -139,6 +139,11 @@ enum { RTM_GETNSID = 90, #define RTM_GETNSID RTM_GETNSID + RTM_NEWSTATS = 92, +#define RTM_NEWSTATS RTM_NEWSTATS + RTM_GETSTATS = 94, +#define RTM_GETSTATS RTM_GETSTATS + __RTM_MAX, #define RTM_MAX (((__RTM_MAX + 3) & ~3) - 1) }; diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index d3694a13c85a..4a47a9aceb1d 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -3449,6 +3449,161 @@ out: return err; } +static int rtnl_fill_statsinfo(struct sk_buff *skb, struct net_device *dev, + int type, u32 pid, u32 seq, u32 change, + unsigned int flags, unsigned int filter_mask) +{ + struct if_stats_msg *ifsm; + struct nlmsghdr *nlh; + struct nlattr *attr; + + ASSERT_RTNL(); + + nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifsm), flags); + if (!nlh) + return -EMSGSIZE; + + ifsm = nlmsg_data(nlh); + ifsm->ifindex = dev->ifindex; + ifsm->filter_mask = filter_mask; + + if (filter_mask & IFLA_STATS_FILTER_BIT(IFLA_STATS_LINK_64)) { + struct rtnl_link_stats64 *sp; + int err; + + /* if necessary, add a zero length NOP attribute so that + * IFLA_STATS_LINK_64 will be 64-bit aligned + */ + err = nla_align_64bit(skb, IFLA_STATS_UNSPEC); + if (err) + goto nla_put_failure; + + attr = nla_reserve(skb, IFLA_STATS_LINK_64, + sizeof(struct rtnl_link_stats64)); + if (!attr) + goto nla_put_failure; + + sp = nla_data(attr); + dev_get_stats(dev, sp); + } + + nlmsg_end(skb, nlh); + + return 0; + +nla_put_failure: + nlmsg_cancel(skb, nlh); + + return -EMSGSIZE; +} + +static const struct nla_policy ifla_stats_policy[IFLA_STATS_MAX + 1] = { + [IFLA_STATS_LINK_64] = { .len = sizeof(struct rtnl_link_stats64) }, +}; + +static size_t if_nlmsg_stats_size(const struct net_device *dev, + u32 filter_mask) +{ + size_t size = 0; + + if (filter_mask & IFLA_STATS_FILTER_BIT(IFLA_STATS_LINK_64)) + size += nla_total_size_64bit(sizeof(struct rtnl_link_stats64)); + + return size; +} + +static int rtnl_stats_get(struct sk_buff *skb, struct nlmsghdr *nlh) +{ + struct net *net = sock_net(skb->sk); + struct if_stats_msg *ifsm; + struct net_device *dev = NULL; + struct sk_buff *nskb; + u32 filter_mask; + int err; + + ifsm = nlmsg_data(nlh); + if (ifsm->ifindex > 0) + dev = __dev_get_by_index(net, ifsm->ifindex); + else + return -EINVAL; + + if (!dev) + return -ENODEV; + + filter_mask = ifsm->filter_mask; + if (!filter_mask) + return -EINVAL; + + nskb = nlmsg_new(if_nlmsg_stats_size(dev, filter_mask), GFP_KERNEL); + if (!nskb) + return -ENOBUFS; + + err = rtnl_fill_statsinfo(nskb, dev, RTM_NEWSTATS, + NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0, + 0, filter_mask); + if (err < 0) { + /* -EMSGSIZE implies BUG in if_nlmsg_stats_size */ + WARN_ON(err == -EMSGSIZE); + kfree_skb(nskb); + } else { + err = rtnl_unicast(nskb, net, NETLINK_CB(skb).portid); + } + + return err; +} + +static int rtnl_stats_dump(struct sk_buff *skb, struct netlink_callback *cb) +{ + struct net *net = sock_net(skb->sk); + struct if_stats_msg *ifsm; + int h, s_h; + int idx = 0, s_idx; + struct net_device *dev; + struct hlist_head *head; + unsigned int flags = NLM_F_MULTI; + u32 filter_mask = 0; + int err; + + s_h = cb->args[0]; + s_idx = cb->args[1]; + + cb->seq = net->dev_base_seq; + + ifsm = nlmsg_data(cb->nlh); + filter_mask = ifsm->filter_mask; + if (!filter_mask) + return -EINVAL; + + for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { + idx = 0; + head = &net->dev_index_head[h]; + hlist_for_each_entry(dev, head, index_hlist) { + if (idx < s_idx) + goto cont; + err = rtnl_fill_statsinfo(skb, dev, RTM_NEWSTATS, + NETLINK_CB(cb->skb).portid, + cb->nlh->nlmsg_seq, 0, + flags, filter_mask); + /* If we ran out of room on the first message, + * we're in trouble + */ + WARN_ON((err == -EMSGSIZE) && (skb->len == 0)); + + if (err < 0) + goto out; + + nl_dump_check_consistent(cb, nlmsg_hdr(skb)); +cont: + idx++; + } + } +out: + cb->args[1] = idx; + cb->args[0] = h; + + return skb->len; +} + /* Process one rtnetlink message. */ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) @@ -3598,4 +3753,7 @@ void __init rtnetlink_init(void) rtnl_register(PF_BRIDGE, RTM_GETLINK, NULL, rtnl_bridge_getlink, NULL); rtnl_register(PF_BRIDGE, RTM_DELLINK, rtnl_bridge_dellink, NULL, NULL); rtnl_register(PF_BRIDGE, RTM_SETLINK, rtnl_bridge_setlink, NULL, NULL); + + rtnl_register(PF_UNSPEC, RTM_GETSTATS, rtnl_stats_get, rtnl_stats_dump, + NULL); } diff --git a/security/selinux/nlmsgtab.c b/security/selinux/nlmsgtab.c index 8495b9368190..2ca9cde939d4 100644 --- a/security/selinux/nlmsgtab.c +++ b/security/selinux/nlmsgtab.c @@ -76,6 +76,8 @@ static struct nlmsg_perm nlmsg_route_perms[] = { RTM_NEWNSID, NETLINK_ROUTE_SOCKET__NLMSG_WRITE }, { RTM_DELNSID, NETLINK_ROUTE_SOCKET__NLMSG_READ }, { RTM_GETNSID, NETLINK_ROUTE_SOCKET__NLMSG_READ }, + { RTM_NEWSTATS, NETLINK_ROUTE_SOCKET__NLMSG_READ }, + { RTM_GETSTATS, NETLINK_ROUTE_SOCKET__NLMSG_READ }, }; static struct nlmsg_perm nlmsg_tcpdiag_perms[] = @@ -155,7 +157,7 @@ int selinux_nlmsg_lookup(u16 sclass, u16 nlmsg_type, u32 *perm) switch (sclass) { case SECCLASS_NETLINK_ROUTE_SOCKET: /* RTM_MAX always point to RTM_SETxxxx, ie RTM_NEWxxx + 3 */ - BUILD_BUG_ON(RTM_MAX != (RTM_NEWNSID + 3)); + BUILD_BUG_ON(RTM_MAX != (RTM_NEWSTATS + 3)); err = nlmsg_perm(nlmsg_type, perm, nlmsg_route_perms, sizeof(nlmsg_route_perms)); break; From fb7c579ab01f010a353d117e484edb05e6f3745c Mon Sep 17 00:00:00 2001 From: Stefan Schmidt Date: Tue, 19 Apr 2016 16:28:52 +0200 Subject: [PATCH 0858/1649] ieee802154: atusb: implement .set_csma_params ops callback Catching up with the stack here and implement CSMA parameter setting. Signed-off-by: Stefan Schmidt Signed-off-by: Marcel Holtmann --- drivers/net/ieee802154/atusb.c | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/drivers/net/ieee802154/atusb.c b/drivers/net/ieee802154/atusb.c index b1cd865ade2e..2d8de9f40ec4 100644 --- a/drivers/net/ieee802154/atusb.c +++ b/drivers/net/ieee802154/atusb.c @@ -472,6 +472,23 @@ atusb_set_txpower(struct ieee802154_hw *hw, s32 mbm) return -EINVAL; } +static int +atusb_set_csma_params(struct ieee802154_hw *hw, u8 min_be, u8 max_be, u8 retries) +{ + struct atusb *atusb = hw->priv; + int ret; + + ret = atusb_write_subreg(atusb, SR_MIN_BE, min_be); + if (ret) + return ret; + + ret = atusb_write_subreg(atusb, SR_MAX_BE, max_be); + if (ret) + return ret; + + return atusb_write_subreg(atusb, SR_MAX_CSMA_RETRIES, retries); +} + static int atusb_set_promiscuous_mode(struct ieee802154_hw *hw, const bool on) { @@ -508,6 +525,7 @@ static struct ieee802154_ops atusb_ops = { .stop = atusb_stop, .set_hw_addr_filt = atusb_set_hw_addr_filt, .set_txpower = atusb_set_txpower, + .set_csma_params = atusb_set_csma_params, .set_promiscuous_mode = atusb_set_promiscuous_mode, }; @@ -636,7 +654,7 @@ static int atusb_probe(struct usb_interface *interface, hw->parent = &usb_dev->dev; hw->flags = IEEE802154_HW_TX_OMIT_CKSUM | IEEE802154_HW_AFILT | - IEEE802154_HW_PROMISCUOUS; + IEEE802154_HW_PROMISCUOUS | IEEE802154_HW_CSMA_PARAMS; hw->phy->flags = WPAN_PHY_FLAG_TXPOWER; From 0f4715c87031fb6a128103f8e640c4ce4dfdea9a Mon Sep 17 00:00:00 2001 From: Stefan Schmidt Date: Tue, 19 Apr 2016 16:28:53 +0200 Subject: [PATCH 0859/1649] ieee802154: atusb: implement .set_cca_ed_level ops callback Catching up with the stack here and implement CCA ED level setting. Signed-off-by: Stefan Schmidt Signed-off-by: Marcel Holtmann --- drivers/net/ieee802154/atusb.c | 27 ++++++++++++++++++++++++++- 1 file changed, 26 insertions(+), 1 deletion(-) diff --git a/drivers/net/ieee802154/atusb.c b/drivers/net/ieee802154/atusb.c index 2d8de9f40ec4..94f84574f9ee 100644 --- a/drivers/net/ieee802154/atusb.c +++ b/drivers/net/ieee802154/atusb.c @@ -472,6 +472,26 @@ atusb_set_txpower(struct ieee802154_hw *hw, s32 mbm) return -EINVAL; } +#define ATUSB_MAX_ED_LEVELS 0xF +static const s32 atusb_ed_levels[ATUSB_MAX_ED_LEVELS + 1] = { + -9100, -8900, -8700, -8500, -8300, -8100, -7900, -7700, -7500, -7300, + -7100, -6900, -6700, -6500, -6300, -6100, +}; + +static int +atusb_set_cca_ed_level(struct ieee802154_hw *hw, s32 mbm) +{ + struct atusb *atusb = hw->priv; + u32 i; + + for (i = 0; i < hw->phy->supported.cca_ed_levels_size; i++) { + if (hw->phy->supported.cca_ed_levels[i] == mbm) + return atusb_write_subreg(atusb, SR_CCA_ED_THRES, i); + } + + return -EINVAL; +} + static int atusb_set_csma_params(struct ieee802154_hw *hw, u8 min_be, u8 max_be, u8 retries) { @@ -525,6 +545,7 @@ static struct ieee802154_ops atusb_ops = { .stop = atusb_stop, .set_hw_addr_filt = atusb_set_hw_addr_filt, .set_txpower = atusb_set_txpower, + .set_cca_ed_level = atusb_set_cca_ed_level, .set_csma_params = atusb_set_csma_params, .set_promiscuous_mode = atusb_set_promiscuous_mode, }; @@ -656,7 +677,10 @@ static int atusb_probe(struct usb_interface *interface, hw->flags = IEEE802154_HW_TX_OMIT_CKSUM | IEEE802154_HW_AFILT | IEEE802154_HW_PROMISCUOUS | IEEE802154_HW_CSMA_PARAMS; - hw->phy->flags = WPAN_PHY_FLAG_TXPOWER; + hw->phy->flags = WPAN_PHY_FLAG_TXPOWER | WPAN_PHY_FLAG_CCA_ED_LEVEL; + + hw->phy->supported.cca_ed_levels = atusb_ed_levels; + hw->phy->supported.cca_ed_levels_size = ARRAY_SIZE(atusb_ed_levels); hw->phy->current_page = 0; hw->phy->current_channel = 11; /* reset default */ @@ -665,6 +689,7 @@ static int atusb_probe(struct usb_interface *interface, hw->phy->supported.tx_powers_size = ARRAY_SIZE(atusb_powers); hw->phy->transmit_power = hw->phy->supported.tx_powers[0]; ieee802154_random_extended_addr(&hw->phy->perm_extended_addr); + hw->phy->cca_ed_level = hw->phy->supported.cca_ed_levels[7]; atusb_command(atusb, ATUSB_RF_RESET, 0); atusb_get_and_show_chip(atusb); From 308dbb7afde27f9ba359624e6cc1dcba9c93f49a Mon Sep 17 00:00:00 2001 From: Stefan Schmidt Date: Tue, 19 Apr 2016 16:28:54 +0200 Subject: [PATCH 0860/1649] ieee802154: atusb: implement .set_cca_mode ops callback Catching up with the stack here and implement CCA mode setting. Signed-off-by: Stefan Schmidt Signed-off-by: Marcel Holtmann --- drivers/net/ieee802154/atusb.c | 44 +++++++++++++++++++++++++++++++++- 1 file changed, 43 insertions(+), 1 deletion(-) diff --git a/drivers/net/ieee802154/atusb.c b/drivers/net/ieee802154/atusb.c index 94f84574f9ee..72128b3aaec4 100644 --- a/drivers/net/ieee802154/atusb.c +++ b/drivers/net/ieee802154/atusb.c @@ -478,6 +478,39 @@ static const s32 atusb_ed_levels[ATUSB_MAX_ED_LEVELS + 1] = { -7100, -6900, -6700, -6500, -6300, -6100, }; +static int +atusb_set_cca_mode(struct ieee802154_hw *hw, const struct wpan_phy_cca *cca) +{ + struct atusb *atusb = hw->priv; + u8 val; + + /* mapping 802.15.4 to driver spec */ + switch (cca->mode) { + case NL802154_CCA_ENERGY: + val = 1; + break; + case NL802154_CCA_CARRIER: + val = 2; + break; + case NL802154_CCA_ENERGY_CARRIER: + switch (cca->opt) { + case NL802154_CCA_OPT_ENERGY_CARRIER_AND: + val = 3; + break; + case NL802154_CCA_OPT_ENERGY_CARRIER_OR: + val = 0; + break; + default: + return -EINVAL; + } + break; + default: + return -EINVAL; + } + + return atusb_write_subreg(atusb, SR_CCA_MODE, val); +} + static int atusb_set_cca_ed_level(struct ieee802154_hw *hw, s32 mbm) { @@ -545,6 +578,7 @@ static struct ieee802154_ops atusb_ops = { .stop = atusb_stop, .set_hw_addr_filt = atusb_set_hw_addr_filt, .set_txpower = atusb_set_txpower, + .set_cca_mode = atusb_set_cca_mode, .set_cca_ed_level = atusb_set_cca_ed_level, .set_csma_params = atusb_set_csma_params, .set_promiscuous_mode = atusb_set_promiscuous_mode, @@ -677,11 +711,19 @@ static int atusb_probe(struct usb_interface *interface, hw->flags = IEEE802154_HW_TX_OMIT_CKSUM | IEEE802154_HW_AFILT | IEEE802154_HW_PROMISCUOUS | IEEE802154_HW_CSMA_PARAMS; - hw->phy->flags = WPAN_PHY_FLAG_TXPOWER | WPAN_PHY_FLAG_CCA_ED_LEVEL; + hw->phy->flags = WPAN_PHY_FLAG_TXPOWER | WPAN_PHY_FLAG_CCA_ED_LEVEL | + WPAN_PHY_FLAG_CCA_MODE; + + hw->phy->supported.cca_modes = BIT(NL802154_CCA_ENERGY) | + BIT(NL802154_CCA_CARRIER) | BIT(NL802154_CCA_ENERGY_CARRIER); + hw->phy->supported.cca_opts = BIT(NL802154_CCA_OPT_ENERGY_CARRIER_AND) | + BIT(NL802154_CCA_OPT_ENERGY_CARRIER_OR); hw->phy->supported.cca_ed_levels = atusb_ed_levels; hw->phy->supported.cca_ed_levels_size = ARRAY_SIZE(atusb_ed_levels); + hw->phy->cca.mode = NL802154_CCA_ENERGY; + hw->phy->current_page = 0; hw->phy->current_channel = 11; /* reset default */ hw->phy->supported.channels[0] = 0x7FFF800; From 151c37bc29bcbc4b34450c76a8125a5b155520e7 Mon Sep 17 00:00:00 2001 From: Stefan Schmidt Date: Tue, 19 Apr 2016 16:28:55 +0200 Subject: [PATCH 0861/1649] ieee802154: atusb: update my copyright years for this driver Signed-off-by: Stefan Schmidt Signed-off-by: Marcel Holtmann --- drivers/net/ieee802154/atusb.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/ieee802154/atusb.c b/drivers/net/ieee802154/atusb.c index 72128b3aaec4..52c9051f3b95 100644 --- a/drivers/net/ieee802154/atusb.c +++ b/drivers/net/ieee802154/atusb.c @@ -3,6 +3,8 @@ * * Written 2013 by Werner Almesberger * + * Copyright (c) 2015 - 2016 Stefan Schmidt + * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation, version 2 From 09401ae25191039f4aa45c13718595f550745c68 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Fri, 1 Apr 2016 11:15:09 -0700 Subject: [PATCH 0862/1649] fm10k: add helper functions to set strings and data for ethtool stats Reduce duplicate code and the amount of indentation by adding fm10k_add_stat_strings and fm10k_add_ethtool_stats functions which help add fm10k_stat structures to the ethtool stats callbacks. This helps increase ease of use for future stat additions, and increases code readability. Signed-off-by: Jacob Keller Tested-by: Krishneil Singh Signed-off-by: Jeff Kirsher --- .../net/ethernet/intel/fm10k/fm10k_ethtool.c | 59 +++++++++++-------- 1 file changed, 34 insertions(+), 25 deletions(-) diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c index a23748777b1b..f331966ac9df 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c @@ -121,13 +121,22 @@ static const struct fm10k_stats fm10k_gstrings_mbx_stats[] = { FM10K_MBX_STAT("mbx_rx_mbmem_pushed", rx_mbmem_pushed), }; +#define FM10K_QUEUE_STAT(_name, _stat) { \ + .stat_string = _name, \ + .sizeof_stat = FIELD_SIZEOF(struct fm10k_ring, _stat), \ + .stat_offset = offsetof(struct fm10k_ring, _stat) \ +} + +static const struct fm10k_stats fm10k_gstrings_queue_stats[] = { + FM10K_QUEUE_STAT("packets", stats.packets), + FM10K_QUEUE_STAT("bytes", stats.bytes), +}; + #define FM10K_GLOBAL_STATS_LEN ARRAY_SIZE(fm10k_gstrings_global_stats) #define FM10K_DEBUG_STATS_LEN ARRAY_SIZE(fm10k_gstrings_debug_stats) #define FM10K_PF_STATS_LEN ARRAY_SIZE(fm10k_gstrings_pf_stats) #define FM10K_MBX_STATS_LEN ARRAY_SIZE(fm10k_gstrings_mbx_stats) - -#define FM10K_QUEUE_STATS_LEN(_n) \ - ((_n) * 2 * (sizeof(struct fm10k_queue_stats) / sizeof(u64))) +#define FM10K_QUEUE_STATS_LEN ARRAY_SIZE(fm10k_gstrings_queue_stats) #define FM10K_STATIC_STATS_LEN (FM10K_GLOBAL_STATS_LEN + \ FM10K_NETDEV_STATS_LEN + \ @@ -202,14 +211,17 @@ static void fm10k_get_stat_strings(struct net_device *dev, u8 *data) } for (i = 0; i < interface->hw.mac.max_queues; i++) { - snprintf(p, ETH_GSTRING_LEN, "tx_queue_%u_packets", i); - p += ETH_GSTRING_LEN; - snprintf(p, ETH_GSTRING_LEN, "tx_queue_%u_bytes", i); - p += ETH_GSTRING_LEN; - snprintf(p, ETH_GSTRING_LEN, "rx_queue_%u_packets", i); - p += ETH_GSTRING_LEN; - snprintf(p, ETH_GSTRING_LEN, "rx_queue_%u_bytes", i); - p += ETH_GSTRING_LEN; + char prefix[ETH_GSTRING_LEN]; + + snprintf(prefix, ETH_GSTRING_LEN, "tx_queue_%u_", i); + fm10k_add_stat_strings(&p, prefix, + fm10k_gstrings_queue_stats, + FM10K_QUEUE_STATS_LEN); + + snprintf(prefix, ETH_GSTRING_LEN, "rx_queue_%u_", i); + fm10k_add_stat_strings(&p, prefix, + fm10k_gstrings_queue_stats, + FM10K_QUEUE_STATS_LEN); } } @@ -244,7 +256,7 @@ static int fm10k_get_sset_count(struct net_device *dev, int sset) case ETH_SS_TEST: return FM10K_TEST_LEN; case ETH_SS_STATS: - stats_len += FM10K_QUEUE_STATS_LEN(hw->mac.max_queues); + stats_len += hw->mac.max_queues * 2 * FM10K_QUEUE_STATS_LEN; if (hw->mac.type != fm10k_mac_vf) stats_len += FM10K_PF_STATS_LEN; @@ -272,9 +284,10 @@ static void fm10k_add_ethtool_stats(u64 **data, void *pointer, unsigned int i; char *p; - /* simply skip forward if we were not given a valid pointer */ if (!pointer) { - *data += size; + /* memory is not zero allocated so we have to clear it */ + for (i = 0; i < size; i++) + *((*data)++) = 0; return; } @@ -304,11 +317,10 @@ static void fm10k_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats __always_unused *stats, u64 *data) { - const int stat_count = sizeof(struct fm10k_queue_stats) / sizeof(u64); struct fm10k_intfc *interface = netdev_priv(netdev); struct fm10k_iov_data *iov_data = interface->iov_data; struct net_device_stats *net_stats = &netdev->stats; - int i, j; + int i; fm10k_update_stats(interface); @@ -347,19 +359,16 @@ static void fm10k_get_ethtool_stats(struct net_device *netdev, for (i = 0; i < interface->hw.mac.max_queues; i++) { struct fm10k_ring *ring; - u64 *queue_stat; ring = interface->tx_ring[i]; - if (ring) - queue_stat = (u64 *)&ring->stats; - for (j = 0; j < stat_count; j++) - *(data++) = ring ? queue_stat[j] : 0; + fm10k_add_ethtool_stats(&data, ring, + fm10k_gstrings_queue_stats, + FM10K_QUEUE_STATS_LEN); ring = interface->rx_ring[i]; - if (ring) - queue_stat = (u64 *)&ring->stats; - for (j = 0; j < stat_count; j++) - *(data++) = ring ? queue_stat[j] : 0; + fm10k_add_ethtool_stats(&data, ring, + fm10k_gstrings_queue_stats, + FM10K_QUEUE_STATS_LEN); } } From 3ef2f563267892230681b1b8890d8f759d39e64d Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Fri, 4 Mar 2016 15:37:48 -0800 Subject: [PATCH 0863/1649] fm10k: remove debug-statistics support This change fixes an (ab)use of the ethtool stats API, which could result in corrupt memory or misleading stat output. The ethtool stats API is not robust enough to handle varying number of statistics due to how it requests the size and allocates memory. Remove the poorly conceived support originally added for extra debug statistics. In the future, a new stats API may open up the ability to display these statistics. Signed-off-by: Jacob Keller Tested-by: Krishneil Singh Signed-off-by: Jeff Kirsher --- .../net/ethernet/intel/fm10k/fm10k_ethtool.c | 72 +------------------ 1 file changed, 1 insertion(+), 71 deletions(-) diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c index f331966ac9df..6ab9df52f301 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c @@ -81,17 +81,6 @@ static const struct fm10k_stats fm10k_gstrings_global_stats[] = { FM10K_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts), }; -static const struct fm10k_stats fm10k_gstrings_debug_stats[] = { - FM10K_STAT("hw_sm_mbx_full", hw_sm_mbx_full), - FM10K_STAT("hw_csum_tx_good", hw_csum_tx_good), - FM10K_STAT("hw_csum_rx_good", hw_csum_rx_good), - FM10K_STAT("rx_switch_errors", rx_switch_errors), - FM10K_STAT("rx_drops", rx_drops), - FM10K_STAT("rx_pp_errors", rx_pp_errors), - FM10K_STAT("rx_link_errors", rx_link_errors), - FM10K_STAT("rx_length_errors", rx_length_errors), -}; - static const struct fm10k_stats fm10k_gstrings_pf_stats[] = { FM10K_STAT("timeout", stats.timeout.count), FM10K_STAT("ur", stats.ur.count), @@ -133,7 +122,6 @@ static const struct fm10k_stats fm10k_gstrings_queue_stats[] = { }; #define FM10K_GLOBAL_STATS_LEN ARRAY_SIZE(fm10k_gstrings_global_stats) -#define FM10K_DEBUG_STATS_LEN ARRAY_SIZE(fm10k_gstrings_debug_stats) #define FM10K_PF_STATS_LEN ARRAY_SIZE(fm10k_gstrings_pf_stats) #define FM10K_MBX_STATS_LEN ARRAY_SIZE(fm10k_gstrings_mbx_stats) #define FM10K_QUEUE_STATS_LEN ARRAY_SIZE(fm10k_gstrings_queue_stats) @@ -154,12 +142,10 @@ enum fm10k_self_test_types { }; enum { - FM10K_PRV_FLAG_DEBUG_STATS, FM10K_PRV_FLAG_LEN, }; static const char fm10k_prv_flags[FM10K_PRV_FLAG_LEN][ETH_GSTRING_LEN] = { - "debug-statistics", }; static void fm10k_add_stat_strings(char **p, const char *prefix, @@ -178,7 +164,6 @@ static void fm10k_add_stat_strings(char **p, const char *prefix, static void fm10k_get_stat_strings(struct net_device *dev, u8 *data) { struct fm10k_intfc *interface = netdev_priv(dev); - struct fm10k_iov_data *iov_data = interface->iov_data; char *p = (char *)data; unsigned int i; @@ -188,10 +173,6 @@ static void fm10k_get_stat_strings(struct net_device *dev, u8 *data) fm10k_add_stat_strings(&p, "", fm10k_gstrings_global_stats, FM10K_GLOBAL_STATS_LEN); - if (interface->flags & FM10K_FLAG_DEBUG_STATS) - fm10k_add_stat_strings(&p, "", fm10k_gstrings_debug_stats, - FM10K_DEBUG_STATS_LEN); - fm10k_add_stat_strings(&p, "", fm10k_gstrings_mbx_stats, FM10K_MBX_STATS_LEN); @@ -199,17 +180,6 @@ static void fm10k_get_stat_strings(struct net_device *dev, u8 *data) fm10k_add_stat_strings(&p, "", fm10k_gstrings_pf_stats, FM10K_PF_STATS_LEN); - if ((interface->flags & FM10K_FLAG_DEBUG_STATS) && iov_data) { - for (i = 0; i < iov_data->num_vfs; i++) { - char prefix[ETH_GSTRING_LEN]; - - snprintf(prefix, ETH_GSTRING_LEN, "vf_%u_", i); - fm10k_add_stat_strings(&p, prefix, - fm10k_gstrings_mbx_stats, - FM10K_MBX_STATS_LEN); - } - } - for (i = 0; i < interface->hw.mac.max_queues; i++) { char prefix[ETH_GSTRING_LEN]; @@ -248,7 +218,6 @@ static void fm10k_get_strings(struct net_device *dev, static int fm10k_get_sset_count(struct net_device *dev, int sset) { struct fm10k_intfc *interface = netdev_priv(dev); - struct fm10k_iov_data *iov_data = interface->iov_data; struct fm10k_hw *hw = &interface->hw; int stats_len = FM10K_STATIC_STATS_LEN; @@ -261,14 +230,6 @@ static int fm10k_get_sset_count(struct net_device *dev, int sset) if (hw->mac.type != fm10k_mac_vf) stats_len += FM10K_PF_STATS_LEN; - if (interface->flags & FM10K_FLAG_DEBUG_STATS) { - stats_len += FM10K_DEBUG_STATS_LEN; - - if (iov_data) - stats_len += FM10K_MBX_STATS_LEN * - iov_data->num_vfs; - } - return stats_len; case ETH_SS_PRIV_FLAGS: return FM10K_PRV_FLAG_LEN; @@ -318,7 +279,6 @@ static void fm10k_get_ethtool_stats(struct net_device *netdev, u64 *data) { struct fm10k_intfc *interface = netdev_priv(netdev); - struct fm10k_iov_data *iov_data = interface->iov_data; struct net_device_stats *net_stats = &netdev->stats; int i; @@ -330,11 +290,6 @@ static void fm10k_get_ethtool_stats(struct net_device *netdev, fm10k_add_ethtool_stats(&data, interface, fm10k_gstrings_global_stats, FM10K_GLOBAL_STATS_LEN); - if (interface->flags & FM10K_FLAG_DEBUG_STATS) - fm10k_add_ethtool_stats(&data, interface, - fm10k_gstrings_debug_stats, - FM10K_DEBUG_STATS_LEN); - fm10k_add_ethtool_stats(&data, &interface->hw.mbx, fm10k_gstrings_mbx_stats, FM10K_MBX_STATS_LEN); @@ -345,18 +300,6 @@ static void fm10k_get_ethtool_stats(struct net_device *netdev, FM10K_PF_STATS_LEN); } - if ((interface->flags & FM10K_FLAG_DEBUG_STATS) && iov_data) { - for (i = 0; i < iov_data->num_vfs; i++) { - struct fm10k_vf_info *vf_info; - - vf_info = &iov_data->vf_info[i]; - - fm10k_add_ethtool_stats(&data, &vf_info->mbx, - fm10k_gstrings_mbx_stats, - FM10K_MBX_STATS_LEN); - } - } - for (i = 0; i < interface->hw.mac.max_queues; i++) { struct fm10k_ring *ring; @@ -1012,27 +955,14 @@ static void fm10k_self_test(struct net_device *dev, static u32 fm10k_get_priv_flags(struct net_device *netdev) { - struct fm10k_intfc *interface = netdev_priv(netdev); - u32 priv_flags = 0; - - if (interface->flags & FM10K_FLAG_DEBUG_STATS) - priv_flags |= BIT(FM10K_PRV_FLAG_DEBUG_STATS); - - return priv_flags; + return 0; } static int fm10k_set_priv_flags(struct net_device *netdev, u32 priv_flags) { - struct fm10k_intfc *interface = netdev_priv(netdev); - if (priv_flags >= BIT(FM10K_PRV_FLAG_LEN)) return -EINVAL; - if (priv_flags & BIT(FM10K_PRV_FLAG_DEBUG_STATS)) - interface->flags |= FM10K_FLAG_DEBUG_STATS; - else - interface->flags &= ~FM10K_FLAG_DEBUG_STATS; - return 0; } From 144d8305585a00467aaedc86d039a4ab036a9bcc Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Mon, 7 Mar 2016 09:30:15 -0800 Subject: [PATCH 0864/1649] fm10k: Add support for bulk Tx cleanup & cleanup boolean logic This patch enables bulk free in Tx cleanup for fm10k and cleans up the boolean logic in the polling routines for fm10k in the hopes of avoiding any mix-ups similar to what occurred with i40e and i40evf. Signed-off-by: Alexander Duyck Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/fm10k/fm10k_main.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c index 0b465394f88a..97650802a4cc 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c @@ -1198,9 +1198,10 @@ void fm10k_tx_timeout_reset(struct fm10k_intfc *interface) * fm10k_clean_tx_irq - Reclaim resources after transmit completes * @q_vector: structure containing interrupt and ring information * @tx_ring: tx ring to clean + * @napi_budget: Used to determine if we are in netpoll **/ static bool fm10k_clean_tx_irq(struct fm10k_q_vector *q_vector, - struct fm10k_ring *tx_ring) + struct fm10k_ring *tx_ring, int napi_budget) { struct fm10k_intfc *interface = q_vector->interface; struct fm10k_tx_buffer *tx_buffer; @@ -1238,7 +1239,7 @@ static bool fm10k_clean_tx_irq(struct fm10k_q_vector *q_vector, total_packets += tx_buffer->gso_segs; /* free the skb */ - dev_consume_skb_any(tx_buffer->skb); + napi_consume_skb(tx_buffer->skb, napi_budget); /* unmap skb header data */ dma_unmap_single(tx_ring->dev, @@ -1449,8 +1450,10 @@ static int fm10k_poll(struct napi_struct *napi, int budget) int per_ring_budget, work_done = 0; bool clean_complete = true; - fm10k_for_each_ring(ring, q_vector->tx) - clean_complete &= fm10k_clean_tx_irq(q_vector, ring); + fm10k_for_each_ring(ring, q_vector->tx) { + if (!fm10k_clean_tx_irq(q_vector, ring, budget)) + clean_complete = false; + } /* Handle case where we are called by netpoll with a budget of 0 */ if (budget <= 0) @@ -1468,7 +1471,8 @@ static int fm10k_poll(struct napi_struct *napi, int budget) int work = fm10k_clean_rx_irq(q_vector, ring, per_ring_budget); work_done += work; - clean_complete &= !!(work < per_ring_budget); + if (work >= per_ring_budget) + clean_complete = false; } /* If all work not completed, return budget and keep polling */ From 2d0f76bedbddaacc465c7a3ebdc9f8c13f68d931 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Wed, 9 Mar 2016 16:36:08 -0800 Subject: [PATCH 0865/1649] fm10k: use DRV_SUMMARY to reduce code duplication Use DRV_SUMMARY, similar to DRV_VERSION so that we don't have to duplicate the driver summary in multiple places. Signed-off-by: Jacob Keller Tested-by: Krishneil Singh Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/fm10k/fm10k_main.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c index 97650802a4cc..ca5b9d7eeb22 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c @@ -29,15 +29,15 @@ #include "fm10k.h" #define DRV_VERSION "0.19.3-k" +#define DRV_SUMMARY "Intel(R) Ethernet Switch Host Interface Driver" const char fm10k_driver_version[] = DRV_VERSION; char fm10k_driver_name[] = "fm10k"; -static const char fm10k_driver_string[] = - "Intel(R) Ethernet Switch Host Interface Driver"; +static const char fm10k_driver_string[] = DRV_SUMMARY; static const char fm10k_copyright[] = "Copyright (c) 2013 Intel Corporation."; MODULE_AUTHOR("Intel Corporation, "); -MODULE_DESCRIPTION("Intel(R) Ethernet Switch Host Interface Driver"); +MODULE_DESCRIPTION(DRV_SUMMARY); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION); From 1e4c32f3ede19bdb738aec2cc3cf69439d7b9310 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Fri, 11 Mar 2016 09:52:32 -0800 Subject: [PATCH 0866/1649] fm10k: prevent RCU issues during AER events During an AER action response, we were calling fm10k_close without holding the rtnl_lock() which could lead to possible RCU warnings being produced due to 64bit stat updates among other causes. Similarly, we need rtnl_lock() around fm10k_open during fm10k_io_resume. Follow the same pattern elsewhere in the driver and protect the entire open/close sequence. Signed-off-by: Jacob Keller Tested-by: Krishneil Singh Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/fm10k/fm10k_pci.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c index f0992950e228..a604513d0451 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c @@ -2271,6 +2271,8 @@ static pci_ers_result_t fm10k_io_error_detected(struct pci_dev *pdev, if (state == pci_channel_io_perm_failure) return PCI_ERS_RESULT_DISCONNECT; + rtnl_lock(); + if (netif_running(netdev)) fm10k_close(netdev); @@ -2279,6 +2281,8 @@ static pci_ers_result_t fm10k_io_error_detected(struct pci_dev *pdev, /* free interrupts */ fm10k_clear_queueing_scheme(interface); + rtnl_unlock(); + pci_disable_device(pdev); /* Request a slot reset. */ @@ -2349,11 +2353,13 @@ static void fm10k_io_resume(struct pci_dev *pdev) /* reset statistics starting values */ hw->mac.ops.rebind_hw_stats(hw, &interface->stats); + rtnl_lock(); + err = fm10k_init_queueing_scheme(interface); if (err) { dev_err(&interface->pdev->dev, "init_queueing_scheme failed: %d\n", err); - return; + goto unlock; } /* reassociate interrupts */ @@ -2370,6 +2376,9 @@ static void fm10k_io_resume(struct pci_dev *pdev) if (!err) netif_device_attach(netdev); + +unlock: + rtnl_unlock(); } static const struct pci_error_handlers fm10k_err_handler = { From 9de6a1a6b8ed889ecd3ae13bb0a2459485d90a24 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Fri, 1 Apr 2016 16:17:31 -0700 Subject: [PATCH 0867/1649] fm10k: drop 1588 support The 1588 support within fm10k does not work correctly with the current version of the switch management software, and likely never worked correctly to begin with. Remove support for PTP/1588. Update copyright year for all these files while we're touching them. Signed-off-by: Jacob Keller Tested-by: Krishneil Singh Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/fm10k/Makefile | 3 +- drivers/net/ethernet/intel/fm10k/fm10k.h | 34 +- .../net/ethernet/intel/fm10k/fm10k_ethtool.c | 30 +- drivers/net/ethernet/intel/fm10k/fm10k_main.c | 22 +- .../net/ethernet/intel/fm10k/fm10k_netdev.c | 22 +- drivers/net/ethernet/intel/fm10k/fm10k_pci.c | 110 +---- drivers/net/ethernet/intel/fm10k/fm10k_pf.c | 101 +--- drivers/net/ethernet/intel/fm10k/fm10k_pf.h | 17 +- drivers/net/ethernet/intel/fm10k/fm10k_ptp.c | 462 ------------------ drivers/net/ethernet/intel/fm10k/fm10k_type.h | 17 +- drivers/net/ethernet/intel/fm10k/fm10k_vf.c | 57 +-- drivers/net/ethernet/intel/fm10k/fm10k_vf.h | 12 +- 12 files changed, 11 insertions(+), 876 deletions(-) delete mode 100644 drivers/net/ethernet/intel/fm10k/fm10k_ptp.c diff --git a/drivers/net/ethernet/intel/fm10k/Makefile b/drivers/net/ethernet/intel/fm10k/Makefile index b006ff66d028..2aeaa39d9a25 100644 --- a/drivers/net/ethernet/intel/fm10k/Makefile +++ b/drivers/net/ethernet/intel/fm10k/Makefile @@ -1,7 +1,7 @@ ################################################################################ # # Intel Ethernet Switch Host Interface Driver -# Copyright(c) 2013 - 2015 Intel Corporation. +# Copyright(c) 2013 - 2016 Intel Corporation. # # This program is free software; you can redistribute it and/or modify it # under the terms and conditions of the GNU General Public License, @@ -30,7 +30,6 @@ obj-$(CONFIG_FM10K) += fm10k.o fm10k-y := fm10k_main.o \ fm10k_common.o \ fm10k_pci.o \ - fm10k_ptp.o \ fm10k_netdev.o \ fm10k_ethtool.o \ fm10k_pf.o \ diff --git a/drivers/net/ethernet/intel/fm10k/fm10k.h b/drivers/net/ethernet/intel/fm10k/fm10k.h index 9c7fafef7cf6..c21fa8699fc4 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k.h @@ -1,5 +1,5 @@ /* Intel Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2015 Intel Corporation. + * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -27,9 +27,6 @@ #include #include #include -#include -#include -#include #include "fm10k_pf.h" #include "fm10k_vf.h" @@ -342,22 +339,8 @@ struct fm10k_intfc { #ifdef CONFIG_DEBUG_FS struct dentry *dbg_intfc; - #endif /* CONFIG_DEBUG_FS */ - struct ptp_clock_info ptp_caps; - struct ptp_clock *ptp_clock; - struct sk_buff_head ts_tx_skb_queue; - u32 tx_hwtstamp_timeouts; - - struct hwtstamp_config ts_config; - /* We are unable to actually adjust the clock beyond the frequency - * value. Once the clock is started there is no resetting it. As - * such we maintain a separate offset from the actual hardware clock - * to allow for offset adjustment. - */ - s64 ptp_adjust; - rwlock_t systime_lock; #ifdef CONFIG_DCB u8 pfc_en; #endif @@ -546,21 +529,6 @@ static inline void fm10k_dbg_init(void) {} static inline void fm10k_dbg_exit(void) {} #endif /* CONFIG_DEBUG_FS */ -/* Time Stamping */ -void fm10k_systime_to_hwtstamp(struct fm10k_intfc *interface, - struct skb_shared_hwtstamps *hwtstamp, - u64 systime); -void fm10k_ts_tx_enqueue(struct fm10k_intfc *interface, struct sk_buff *skb); -void fm10k_ts_tx_hwtstamp(struct fm10k_intfc *interface, __le16 dglort, - u64 systime); -void fm10k_ts_reset(struct fm10k_intfc *interface); -void fm10k_ts_init(struct fm10k_intfc *interface); -void fm10k_ts_tx_subtask(struct fm10k_intfc *interface); -void fm10k_ptp_register(struct fm10k_intfc *interface); -void fm10k_ptp_unregister(struct fm10k_intfc *interface); -int fm10k_get_ts_config(struct net_device *netdev, struct ifreq *ifr); -int fm10k_set_ts_config(struct net_device *netdev, struct ifreq *ifr); - /* DCB */ #ifdef CONFIG_DCB void fm10k_dcbnl_set_ops(struct net_device *dev); diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c index 6ab9df52f301..ca276c0a4b8d 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c @@ -1,5 +1,5 @@ /* Intel Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2015 Intel Corporation. + * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -77,8 +77,6 @@ static const struct fm10k_stats fm10k_gstrings_global_stats[] = { FM10K_STAT("mac_rules_avail", hw.swapi.mac.avail), FM10K_STAT("tx_hang_count", tx_timeout_count), - - FM10K_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts), }; static const struct fm10k_stats fm10k_gstrings_pf_stats[] = { @@ -1140,31 +1138,6 @@ static int fm10k_set_channels(struct net_device *dev, return fm10k_setup_tc(dev, netdev_get_num_tc(dev)); } -static int fm10k_get_ts_info(struct net_device *dev, - struct ethtool_ts_info *info) -{ - struct fm10k_intfc *interface = netdev_priv(dev); - - info->so_timestamping = - SOF_TIMESTAMPING_TX_SOFTWARE | - SOF_TIMESTAMPING_RX_SOFTWARE | - SOF_TIMESTAMPING_SOFTWARE | - SOF_TIMESTAMPING_TX_HARDWARE | - SOF_TIMESTAMPING_RX_HARDWARE | - SOF_TIMESTAMPING_RAW_HARDWARE; - - if (interface->ptp_clock) - info->phc_index = ptp_clock_index(interface->ptp_clock); - else - info->phc_index = -1; - - info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON); - - info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL); - - return 0; -} - static const struct ethtool_ops fm10k_ethtool_ops = { .get_strings = fm10k_get_strings, .get_sset_count = fm10k_get_sset_count, @@ -1192,7 +1165,6 @@ static const struct ethtool_ops fm10k_ethtool_ops = { .set_rxfh = fm10k_set_rssh, .get_channels = fm10k_get_channels, .set_channels = fm10k_set_channels, - .get_ts_info = fm10k_get_ts_info, }; void fm10k_set_ethtool_ops(struct net_device *dev) diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c index ca5b9d7eeb22..58092e523bbe 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c @@ -1,5 +1,5 @@ /* Intel Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -424,19 +424,6 @@ static inline void fm10k_rx_hash(struct fm10k_ring *ring, PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3); } -static void fm10k_rx_hwtstamp(struct fm10k_ring *rx_ring, - union fm10k_rx_desc *rx_desc, - struct sk_buff *skb) -{ - struct fm10k_intfc *interface = rx_ring->q_vector->interface; - - FM10K_CB(skb)->tstamp = rx_desc->q.timestamp; - - if (unlikely(interface->flags & FM10K_FLAG_RX_TS_ENABLED)) - fm10k_systime_to_hwtstamp(interface, skb_hwtstamps(skb), - le64_to_cpu(rx_desc->q.timestamp)); -} - static void fm10k_type_trans(struct fm10k_ring *rx_ring, union fm10k_rx_desc __maybe_unused *rx_desc, struct sk_buff *skb) @@ -486,8 +473,6 @@ static unsigned int fm10k_process_skb_fields(struct fm10k_ring *rx_ring, fm10k_rx_checksum(rx_ring, rx_desc, skb); - fm10k_rx_hwtstamp(rx_ring, rx_desc, skb); - FM10K_CB(skb)->fi.w.vlan = rx_desc->w.vlan; skb_record_rx_queue(skb, rx_ring->queue_index); @@ -912,11 +897,6 @@ static u8 fm10k_tx_desc_flags(struct sk_buff *skb, u32 tx_flags) /* set type for advanced descriptor with frame checksum insertion */ u32 desc_flags = 0; - /* set timestamping bits */ - if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && - likely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) - desc_flags |= FM10K_TXD_FLAG_TIME; - /* set checksum offload bits */ desc_flags |= FM10K_SET_FLAG(tx_flags, FM10K_TX_FLAGS_CSUM, FM10K_TXD_FLAG_CSUM); diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c index 1d0f0583222c..32778dda8c12 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c @@ -1,5 +1,5 @@ /* Intel Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2015 Intel Corporation. + * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -243,9 +243,6 @@ void fm10k_clean_all_tx_rings(struct fm10k_intfc *interface) for (i = 0; i < interface->num_tx_queues; i++) fm10k_clean_tx_ring(interface->tx_ring[i]); - - /* remove any stale timestamp buffers and free them */ - skb_queue_purge(&interface->ts_tx_skb_queue); } /** @@ -660,10 +657,6 @@ static netdev_tx_t fm10k_xmit_frame(struct sk_buff *skb, struct net_device *dev) __skb_put(skb, pad_len); } - /* prepare packet for hardware time stamping */ - if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) - fm10k_ts_tx_enqueue(interface, skb); - if (r_idx >= interface->num_tx_queues) r_idx %= interface->num_tx_queues; @@ -1213,18 +1206,6 @@ static int __fm10k_setup_tc(struct net_device *dev, u32 handle, __be16 proto, return fm10k_setup_tc(dev, tc->tc); } -static int fm10k_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) -{ - switch (cmd) { - case SIOCGHWTSTAMP: - return fm10k_get_ts_config(netdev, ifr); - case SIOCSHWTSTAMP: - return fm10k_set_ts_config(netdev, ifr); - default: - return -EOPNOTSUPP; - } -} - static void fm10k_assign_l2_accel(struct fm10k_intfc *interface, struct fm10k_l2_accel *l2_accel) { @@ -1402,7 +1383,6 @@ static const struct net_device_ops fm10k_netdev_ops = { .ndo_get_vf_config = fm10k_ndo_get_vf_config, .ndo_add_vxlan_port = fm10k_add_vxlan_port, .ndo_del_vxlan_port = fm10k_del_vxlan_port, - .ndo_do_ioctl = fm10k_ioctl, .ndo_dfwd_add_station = fm10k_dfwd_add_station, .ndo_dfwd_del_station = fm10k_dfwd_del_station, #ifdef CONFIG_NET_POLL_CONTROLLER diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c index a604513d0451..29e9402c4352 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c @@ -1,5 +1,5 @@ /* Intel Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2015 Intel Corporation. + * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -209,9 +209,6 @@ static void fm10k_reinit(struct fm10k_intfc *interface) netdev->features |= NETIF_F_HW_VLAN_CTAG_RX; } - /* reset clock */ - fm10k_ts_reset(interface); - err = netif_running(netdev) ? fm10k_open(netdev) : 0; if (err) goto err_open; @@ -559,7 +556,6 @@ static void fm10k_service_task(struct work_struct *work) /* tasks only run when interface is up */ fm10k_watchdog_subtask(interface); fm10k_check_hang_subtask(interface); - fm10k_ts_tx_subtask(interface); /* release lock on service events to allow scheduling next event */ fm10k_service_event_complete(interface); @@ -1204,25 +1200,6 @@ static s32 fm10k_mbx_mac_addr(struct fm10k_hw *hw, u32 **results, return 0; } -static s32 fm10k_1588_msg_vf(struct fm10k_hw *hw, u32 **results, - struct fm10k_mbx_info __always_unused *mbx) -{ - struct fm10k_intfc *interface; - u64 timestamp; - s32 err; - - err = fm10k_tlv_attr_get_u64(results[FM10K_1588_MSG_TIMESTAMP], - ×tamp); - if (err) - return err; - - interface = container_of(hw, struct fm10k_intfc, hw); - - fm10k_ts_tx_hwtstamp(interface, 0, timestamp); - - return 0; -} - /* generic error handler for mailbox issues */ static s32 fm10k_mbx_error(struct fm10k_hw *hw, u32 **results, struct fm10k_mbx_info __always_unused *mbx) @@ -1243,7 +1220,6 @@ static const struct fm10k_msg_data vf_mbx_data[] = { FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test), FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_mbx_mac_addr), FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_msg_lport_state_vf), - FM10K_VF_MSG_1588_HANDLER(fm10k_1588_msg_vf), FM10K_TLV_MSG_ERROR_HANDLER(fm10k_mbx_error), }; @@ -1341,68 +1317,6 @@ static s32 fm10k_update_pvid(struct fm10k_hw *hw, u32 **results, return 0; } -static s32 fm10k_1588_msg_pf(struct fm10k_hw *hw, u32 **results, - struct fm10k_mbx_info __always_unused *mbx) -{ - struct fm10k_swapi_1588_timestamp timestamp; - struct fm10k_iov_data *iov_data; - struct fm10k_intfc *interface; - u16 sglort, vf_idx; - s32 err; - - err = fm10k_tlv_attr_get_le_struct( - results[FM10K_PF_ATTR_ID_1588_TIMESTAMP], - ×tamp, sizeof(timestamp)); - if (err) - return err; - - interface = container_of(hw, struct fm10k_intfc, hw); - - if (timestamp.dglort) { - fm10k_ts_tx_hwtstamp(interface, timestamp.dglort, - le64_to_cpu(timestamp.egress)); - return 0; - } - - /* either dglort or sglort must be set */ - if (!timestamp.sglort) - return FM10K_ERR_PARAM; - - /* verify GLORT is at least one of the ones we own */ - sglort = le16_to_cpu(timestamp.sglort); - if (!fm10k_glort_valid_pf(hw, sglort)) - return FM10K_ERR_PARAM; - - if (sglort == interface->glort) { - fm10k_ts_tx_hwtstamp(interface, 0, - le64_to_cpu(timestamp.ingress)); - return 0; - } - - /* if there is no iov_data then there is no mailbox to process */ - if (!ACCESS_ONCE(interface->iov_data)) - return FM10K_ERR_PARAM; - - rcu_read_lock(); - - /* notify VF if this timestamp belongs to it */ - iov_data = interface->iov_data; - vf_idx = (hw->mac.dglort_map & FM10K_DGLORTMAP_NONE) - sglort; - - if (!iov_data || vf_idx >= iov_data->num_vfs) { - err = FM10K_ERR_PARAM; - goto err_unlock; - } - - err = hw->iov.ops.report_timestamp(hw, &iov_data->vf_info[vf_idx], - le64_to_cpu(timestamp.ingress)); - -err_unlock: - rcu_read_unlock(); - - return err; -} - static const struct fm10k_msg_data pf_mbx_data[] = { FM10K_PF_MSG_ERR_HANDLER(XCAST_MODES, fm10k_msg_err_pf), FM10K_PF_MSG_ERR_HANDLER(UPDATE_MAC_FWD_RULE, fm10k_msg_err_pf), @@ -1410,7 +1324,6 @@ static const struct fm10k_msg_data pf_mbx_data[] = { FM10K_PF_MSG_ERR_HANDLER(LPORT_CREATE, fm10k_msg_err_pf), FM10K_PF_MSG_ERR_HANDLER(LPORT_DELETE, fm10k_msg_err_pf), FM10K_PF_MSG_UPDATE_PVID_HANDLER(fm10k_update_pvid), - FM10K_PF_MSG_1588_TIMESTAMP_HANDLER(fm10k_1588_msg_pf), FM10K_TLV_MSG_ERROR_HANDLER(fm10k_mbx_error), }; @@ -1789,18 +1702,9 @@ static int fm10k_sw_init(struct fm10k_intfc *interface, return -EIO; } - /* assign BAR 4 resources for use with PTP */ - if (fm10k_read_reg(hw, FM10K_CTRL) & FM10K_CTRL_BAR4_ALLOWED) - interface->sw_addr = ioremap(pci_resource_start(pdev, 4), - pci_resource_len(pdev, 4)); - hw->sw_addr = interface->sw_addr; - /* initialize DCBNL interface */ fm10k_dcbnl_set_ops(netdev); - /* Intitialize timestamp data */ - fm10k_ts_init(interface); - /* set default ring sizes */ interface->tx_ring_count = FM10K_DEFAULT_TXD; interface->rx_ring_count = FM10K_DEFAULT_RXD; @@ -2018,9 +1922,6 @@ static int fm10k_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* kick off service timer now, even when interface is down */ mod_timer(&interface->service_timer, (HZ * 2) + jiffies); - /* Register PTP interface */ - fm10k_ptp_register(interface); - /* print warning for non-optimal configurations */ fm10k_slot_warn(interface); @@ -2077,9 +1978,6 @@ static void fm10k_remove(struct pci_dev *pdev) if (netdev->reg_state == NETREG_REGISTERED) unregister_netdev(netdev); - /* cleanup timestamp handling */ - fm10k_ptp_unregister(interface); - /* release VFs */ fm10k_iov_disable(pdev); @@ -2152,9 +2050,6 @@ static int fm10k_resume(struct pci_dev *pdev) /* reset statistics starting values */ hw->mac.ops.rebind_hw_stats(hw, &interface->stats); - /* reset clock */ - fm10k_ts_reset(interface); - rtnl_lock(); err = fm10k_init_queueing_scheme(interface); @@ -2365,9 +2260,6 @@ static void fm10k_io_resume(struct pci_dev *pdev) /* reassociate interrupts */ fm10k_mbx_request_irq(interface); - /* reset clock */ - fm10k_ts_reset(interface); - if (netif_running(netdev)) err = fm10k_open(netdev); diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c index ecc99f9d2cce..c8e8ce5a8327 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c @@ -1,5 +1,5 @@ /* Intel Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2015 Intel Corporation. + * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -1140,19 +1140,6 @@ static void fm10k_iov_update_stats_pf(struct fm10k_hw *hw, fm10k_update_hw_stats_q(hw, q, idx, qpp); } -static s32 fm10k_iov_report_timestamp_pf(struct fm10k_hw *hw, - struct fm10k_vf_info *vf_info, - u64 timestamp) -{ - u32 msg[4]; - - /* generate port state response to notify VF it is not ready */ - fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_1588); - fm10k_tlv_attr_put_u64(msg, FM10K_1588_MSG_TIMESTAMP, timestamp); - - return vf_info->mbx.ops.enqueue_tx(hw, &vf_info->mbx, msg); -} - /** * fm10k_iov_msg_msix_pf - Message handler for MSI-X request from VF * @hw: Pointer to hardware structure @@ -1773,89 +1760,6 @@ s32 fm10k_msg_err_pf(struct fm10k_hw *hw, u32 **results, return 0; } -const struct fm10k_tlv_attr fm10k_1588_timestamp_msg_attr[] = { - FM10K_TLV_ATTR_LE_STRUCT(FM10K_PF_ATTR_ID_1588_TIMESTAMP, - sizeof(struct fm10k_swapi_1588_timestamp)), - FM10K_TLV_ATTR_LAST -}; - -/* currently there is no shared 1588 timestamp handler */ - -/** - * fm10k_adjust_systime_pf - Adjust systime frequency - * @hw: pointer to hardware structure - * @ppb: adjustment rate in parts per billion - * - * This function will adjust the SYSTIME_CFG register contained in BAR 4 - * if this function is supported for BAR 4 access. The adjustment amount - * is based on the parts per billion value provided and adjusted to a - * value based on parts per 2^48 clock cycles. - * - * If adjustment is not supported or the requested value is too large - * we will return an error. - **/ -static s32 fm10k_adjust_systime_pf(struct fm10k_hw *hw, s32 ppb) -{ - u64 systime_adjust; - - /* if sw_addr is not set we don't have switch register access */ - if (!hw->sw_addr) - return ppb ? FM10K_ERR_PARAM : 0; - - /* we must convert the value from parts per billion to parts per - * 2^48 cycles. In addition I have opted to only use the 30 most - * significant bits of the adjustment value as the 8 least - * significant bits are located in another register and represent - * a value significantly less than a part per billion, the result - * of dropping the 8 least significant bits is that the adjustment - * value is effectively multiplied by 2^8 when we write it. - * - * As a result of all this the math for this breaks down as follows: - * ppb / 10^9 == adjust * 2^8 / 2^48 - * If we solve this for adjust, and simplify it comes out as: - * ppb * 2^31 / 5^9 == adjust - */ - systime_adjust = (ppb < 0) ? -ppb : ppb; - systime_adjust <<= 31; - do_div(systime_adjust, 1953125); - - /* verify the requested adjustment value is in range */ - if (systime_adjust > FM10K_SW_SYSTIME_ADJUST_MASK) - return FM10K_ERR_PARAM; - - if (ppb > 0) - systime_adjust |= FM10K_SW_SYSTIME_ADJUST_DIR_POSITIVE; - - fm10k_write_sw_reg(hw, FM10K_SW_SYSTIME_ADJUST, (u32)systime_adjust); - - return 0; -} - -/** - * fm10k_read_systime_pf - Reads value of systime registers - * @hw: pointer to the hardware structure - * - * Function reads the content of 2 registers, combined to represent a 64 bit - * value measured in nanosecods. In order to guarantee the value is accurate - * we check the 32 most significant bits both before and after reading the - * 32 least significant bits to verify they didn't change as we were reading - * the registers. - **/ -static u64 fm10k_read_systime_pf(struct fm10k_hw *hw) -{ - u32 systime_l, systime_h, systime_tmp; - - systime_h = fm10k_read_reg(hw, FM10K_SYSTIME + 1); - - do { - systime_tmp = systime_h; - systime_l = fm10k_read_reg(hw, FM10K_SYSTIME); - systime_h = fm10k_read_reg(hw, FM10K_SYSTIME + 1); - } while (systime_tmp != systime_h); - - return ((u64)systime_h << 32) | systime_l; -} - static const struct fm10k_msg_data fm10k_msg_data_pf[] = { FM10K_PF_MSG_ERR_HANDLER(XCAST_MODES, fm10k_msg_err_pf), FM10K_PF_MSG_ERR_HANDLER(UPDATE_MAC_FWD_RULE, fm10k_msg_err_pf), @@ -1885,8 +1789,6 @@ static const struct fm10k_mac_ops mac_ops_pf = { .set_dma_mask = fm10k_set_dma_mask_pf, .get_fault = fm10k_get_fault_pf, .get_host_state = fm10k_get_host_state_pf, - .adjust_systime = fm10k_adjust_systime_pf, - .read_systime = fm10k_read_systime_pf, }; static const struct fm10k_iov_ops iov_ops_pf = { @@ -1898,7 +1800,6 @@ static const struct fm10k_iov_ops iov_ops_pf = { .set_lport = fm10k_iov_set_lport_pf, .reset_lport = fm10k_iov_reset_lport_pf, .update_stats = fm10k_iov_update_stats_pf, - .report_timestamp = fm10k_iov_report_timestamp_pf, }; static s32 fm10k_get_invariants_pf(struct fm10k_hw *hw) diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.h b/drivers/net/ethernet/intel/fm10k/fm10k_pf.h index b2d96b45ca3c..b78210d06213 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.h @@ -1,5 +1,5 @@ /* Intel Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2015 Intel Corporation. + * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -42,8 +42,6 @@ enum fm10k_pf_tlv_msg_id_v1 { FM10K_PF_MSG_ID_UPDATE_FLOW = 0x503, FM10K_PF_MSG_ID_DELETE_FLOW = 0x504, FM10K_PF_MSG_ID_SET_FLOW_STATE = 0x505, - FM10K_PF_MSG_ID_GET_1588_INFO = 0x506, - FM10K_PF_MSG_ID_1588_TIMESTAMP = 0x701, }; enum fm10k_pf_tlv_attr_id_v1 { @@ -61,7 +59,6 @@ enum fm10k_pf_tlv_attr_id_v1 { FM10K_PF_ATTR_ID_DELETE_FLOW = 0x0B, FM10K_PF_ATTR_ID_PORT = 0x0C, FM10K_PF_ATTR_ID_UPDATE_PVID = 0x0D, - FM10K_PF_ATTR_ID_1588_TIMESTAMP = 0x10, }; #define FM10K_MSG_LPORT_MAP_GLORT_SHIFT 0 @@ -100,13 +97,6 @@ struct fm10k_swapi_error { struct fm10k_global_table_data ffu; } __aligned(4) __packed; -struct fm10k_swapi_1588_timestamp { - __le64 egress; - __le64 ingress; - __le16 dglort; - __le16 sglort; -} __aligned(4) __packed; - s32 fm10k_msg_lport_map_pf(struct fm10k_hw *, u32 **, struct fm10k_mbx_info *); extern const struct fm10k_tlv_attr fm10k_lport_map_msg_attr[]; #define FM10K_PF_MSG_LPORT_MAP_HANDLER(func) \ @@ -122,11 +112,6 @@ extern const struct fm10k_tlv_attr fm10k_err_msg_attr[]; #define FM10K_PF_MSG_ERR_HANDLER(msg, func) \ FM10K_MSG_HANDLER(FM10K_PF_MSG_ID_##msg, fm10k_err_msg_attr, func) -extern const struct fm10k_tlv_attr fm10k_1588_timestamp_msg_attr[]; -#define FM10K_PF_MSG_1588_TIMESTAMP_HANDLER(func) \ - FM10K_MSG_HANDLER(FM10K_PF_MSG_ID_1588_TIMESTAMP, \ - fm10k_1588_timestamp_msg_attr, func) - s32 fm10k_iov_msg_msix_pf(struct fm10k_hw *, u32 **, struct fm10k_mbx_info *); s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *, u32 **, struct fm10k_mbx_info *); diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ptp.c b/drivers/net/ethernet/intel/fm10k/fm10k_ptp.c deleted file mode 100644 index 1c1ccade6538..000000000000 --- a/drivers/net/ethernet/intel/fm10k/fm10k_ptp.c +++ /dev/null @@ -1,462 +0,0 @@ -/* Intel Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2015 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ - -#include -#include - -#include "fm10k.h" - -#define FM10K_TS_TX_TIMEOUT (HZ * 15) - -void fm10k_systime_to_hwtstamp(struct fm10k_intfc *interface, - struct skb_shared_hwtstamps *hwtstamp, - u64 systime) -{ - unsigned long flags; - - read_lock_irqsave(&interface->systime_lock, flags); - systime += interface->ptp_adjust; - read_unlock_irqrestore(&interface->systime_lock, flags); - - hwtstamp->hwtstamp = ns_to_ktime(systime); -} - -static struct sk_buff *fm10k_ts_tx_skb(struct fm10k_intfc *interface, - __le16 dglort) -{ - struct sk_buff_head *list = &interface->ts_tx_skb_queue; - struct sk_buff *skb; - - skb_queue_walk(list, skb) { - if (FM10K_CB(skb)->fi.w.dglort == dglort) - return skb; - } - - return NULL; -} - -void fm10k_ts_tx_enqueue(struct fm10k_intfc *interface, struct sk_buff *skb) -{ - struct sk_buff_head *list = &interface->ts_tx_skb_queue; - struct sk_buff *clone; - unsigned long flags; - - /* create clone for us to return on the Tx path */ - clone = skb_clone_sk(skb); - if (!clone) - return; - - FM10K_CB(clone)->ts_tx_timeout = jiffies + FM10K_TS_TX_TIMEOUT; - spin_lock_irqsave(&list->lock, flags); - - /* attempt to locate any buffers with the same dglort, - * if none are present then insert skb in tail of list - */ - skb = fm10k_ts_tx_skb(interface, FM10K_CB(clone)->fi.w.dglort); - if (!skb) { - skb_shinfo(clone)->tx_flags |= SKBTX_IN_PROGRESS; - __skb_queue_tail(list, clone); - } - - spin_unlock_irqrestore(&list->lock, flags); - - /* if list is already has one then we just free the clone */ - if (skb) - dev_kfree_skb(clone); -} - -void fm10k_ts_tx_hwtstamp(struct fm10k_intfc *interface, __le16 dglort, - u64 systime) -{ - struct skb_shared_hwtstamps shhwtstamps; - struct sk_buff_head *list = &interface->ts_tx_skb_queue; - struct sk_buff *skb; - unsigned long flags; - - spin_lock_irqsave(&list->lock, flags); - - /* attempt to locate and pull the sk_buff out of the list */ - skb = fm10k_ts_tx_skb(interface, dglort); - if (skb) - __skb_unlink(skb, list); - - spin_unlock_irqrestore(&list->lock, flags); - - /* if not found do nothing */ - if (!skb) - return; - - /* timestamp the sk_buff and free out copy */ - fm10k_systime_to_hwtstamp(interface, &shhwtstamps, systime); - skb_tstamp_tx(skb, &shhwtstamps); - dev_kfree_skb_any(skb); -} - -void fm10k_ts_tx_subtask(struct fm10k_intfc *interface) -{ - struct sk_buff_head *list = &interface->ts_tx_skb_queue; - struct sk_buff *skb, *tmp; - unsigned long flags; - - /* If we're down or resetting, just bail */ - if (test_bit(__FM10K_DOWN, &interface->state) || - test_bit(__FM10K_RESETTING, &interface->state)) - return; - - spin_lock_irqsave(&list->lock, flags); - - /* walk though the list and flush any expired timestamp packets */ - skb_queue_walk_safe(list, skb, tmp) { - if (!time_is_after_jiffies(FM10K_CB(skb)->ts_tx_timeout)) - continue; - __skb_unlink(skb, list); - kfree_skb(skb); - interface->tx_hwtstamp_timeouts++; - } - - spin_unlock_irqrestore(&list->lock, flags); -} - -static u64 fm10k_systime_read(struct fm10k_intfc *interface) -{ - struct fm10k_hw *hw = &interface->hw; - - return hw->mac.ops.read_systime(hw); -} - -void fm10k_ts_reset(struct fm10k_intfc *interface) -{ - s64 ns = ktime_to_ns(ktime_get_real()); - unsigned long flags; - - /* reinitialize the clock */ - write_lock_irqsave(&interface->systime_lock, flags); - interface->ptp_adjust = fm10k_systime_read(interface) - ns; - write_unlock_irqrestore(&interface->systime_lock, flags); -} - -void fm10k_ts_init(struct fm10k_intfc *interface) -{ - /* Initialize lock protecting systime access */ - rwlock_init(&interface->systime_lock); - - /* Initialize skb queue for pending timestamp requests */ - skb_queue_head_init(&interface->ts_tx_skb_queue); - - /* reset the clock to current kernel time */ - fm10k_ts_reset(interface); -} - -/** - * fm10k_get_ts_config - get current hardware timestamping configuration - * @netdev: network interface device structure - * @ifreq: ioctl data - * - * This function returns the current timestamping settings. Rather than - * attempt to deconstruct registers to fill in the values, simply keep a copy - * of the old settings around, and return a copy when requested. - */ -int fm10k_get_ts_config(struct net_device *netdev, struct ifreq *ifr) -{ - struct fm10k_intfc *interface = netdev_priv(netdev); - struct hwtstamp_config *config = &interface->ts_config; - - return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ? - -EFAULT : 0; -} - -/** - * fm10k_set_ts_config - control hardware time stamping - * @netdev: network interface device structure - * @ifreq: ioctl data - * - * Outgoing time stamping can be enabled and disabled. Play nice and - * disable it when requested, although it shouldn't cause any overhead - * when no packet needs it. At most one packet in the queue may be - * marked for time stamping, otherwise it would be impossible to tell - * for sure to which packet the hardware time stamp belongs. - * - * Incoming time stamping has to be configured via the hardware - * filters. Not all combinations are supported, in particular event - * type has to be specified. Matching the kind of event packet is - * not supported, with the exception of "all V2 events regardless of - * level 2 or 4". - * - * Since hardware always timestamps Path delay packets when timestamping V2 - * packets, regardless of the type specified in the register, only use V2 - * Event mode. This more accurately tells the user what the hardware is going - * to do anyways. - */ -int fm10k_set_ts_config(struct net_device *netdev, struct ifreq *ifr) -{ - struct fm10k_intfc *interface = netdev_priv(netdev); - struct hwtstamp_config ts_config; - - if (copy_from_user(&ts_config, ifr->ifr_data, sizeof(ts_config))) - return -EFAULT; - - /* reserved for future extensions */ - if (ts_config.flags) - return -EINVAL; - - switch (ts_config.tx_type) { - case HWTSTAMP_TX_OFF: - break; - case HWTSTAMP_TX_ON: - /* we likely need some check here to see if this is supported */ - break; - default: - return -ERANGE; - } - - switch (ts_config.rx_filter) { - case HWTSTAMP_FILTER_NONE: - interface->flags &= ~FM10K_FLAG_RX_TS_ENABLED; - break; - case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: - case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: - case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: - case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: - case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: - case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: - case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: - case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: - case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: - case HWTSTAMP_FILTER_PTP_V2_EVENT: - case HWTSTAMP_FILTER_PTP_V2_SYNC: - case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: - case HWTSTAMP_FILTER_ALL: - interface->flags |= FM10K_FLAG_RX_TS_ENABLED; - ts_config.rx_filter = HWTSTAMP_FILTER_ALL; - break; - default: - return -ERANGE; - } - - /* save these settings for future reference */ - interface->ts_config = ts_config; - - return copy_to_user(ifr->ifr_data, &ts_config, sizeof(ts_config)) ? - -EFAULT : 0; -} - -static int fm10k_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) -{ - struct fm10k_intfc *interface; - struct fm10k_hw *hw; - int err; - - interface = container_of(ptp, struct fm10k_intfc, ptp_caps); - hw = &interface->hw; - - err = hw->mac.ops.adjust_systime(hw, ppb); - - /* the only error we should see is if the value is out of range */ - return (err == FM10K_ERR_PARAM) ? -ERANGE : err; -} - -static int fm10k_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) -{ - struct fm10k_intfc *interface; - unsigned long flags; - - interface = container_of(ptp, struct fm10k_intfc, ptp_caps); - - write_lock_irqsave(&interface->systime_lock, flags); - interface->ptp_adjust += delta; - write_unlock_irqrestore(&interface->systime_lock, flags); - - return 0; -} - -static int fm10k_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) -{ - struct fm10k_intfc *interface; - unsigned long flags; - u64 now; - - interface = container_of(ptp, struct fm10k_intfc, ptp_caps); - - read_lock_irqsave(&interface->systime_lock, flags); - now = fm10k_systime_read(interface) + interface->ptp_adjust; - read_unlock_irqrestore(&interface->systime_lock, flags); - - *ts = ns_to_timespec64(now); - - return 0; -} - -static int fm10k_ptp_settime(struct ptp_clock_info *ptp, - const struct timespec64 *ts) -{ - struct fm10k_intfc *interface; - unsigned long flags; - u64 ns = timespec64_to_ns(ts); - - interface = container_of(ptp, struct fm10k_intfc, ptp_caps); - - write_lock_irqsave(&interface->systime_lock, flags); - interface->ptp_adjust = fm10k_systime_read(interface) - ns; - write_unlock_irqrestore(&interface->systime_lock, flags); - - return 0; -} - -static int fm10k_ptp_enable(struct ptp_clock_info *ptp, - struct ptp_clock_request *rq, - int __always_unused on) -{ - struct ptp_clock_time *t = &rq->perout.period; - struct fm10k_intfc *interface; - struct fm10k_hw *hw; - u64 period; - u32 step; - - /* we can only support periodic output */ - if (rq->type != PTP_CLK_REQ_PEROUT) - return -EINVAL; - - /* verify the requested channel is there */ - if (rq->perout.index >= ptp->n_per_out) - return -EINVAL; - - /* we cannot enforce start time as there is no - * mechanism for that in the hardware, we can only control - * the period. - */ - - /* we cannot support periods greater than 4 seconds due to reg limit */ - if (t->sec > 4 || t->sec < 0) - return -ERANGE; - - interface = container_of(ptp, struct fm10k_intfc, ptp_caps); - hw = &interface->hw; - - /* we simply cannot support the operation if we don't have BAR4 */ - if (!hw->sw_addr) - return -ENOTSUPP; - - /* convert to unsigned 64b ns, verify we can put it in a 32b register */ - period = t->sec * 1000000000LL + t->nsec; - - /* determine the minimum size for period */ - step = 2 * (fm10k_read_reg(hw, FM10K_SYSTIME_CFG) & - FM10K_SYSTIME_CFG_STEP_MASK); - - /* verify the value is in range supported by hardware */ - if ((period && (period < step)) || (period > U32_MAX)) - return -ERANGE; - - /* notify hardware of request to being sending pulses */ - fm10k_write_sw_reg(hw, FM10K_SW_SYSTIME_PULSE(rq->perout.index), - (u32)period); - - return 0; -} - -static struct ptp_pin_desc fm10k_ptp_pd[2] = { - { - .name = "IEEE1588_PULSE0", - .index = 0, - .func = PTP_PF_PEROUT, - .chan = 0 - }, - { - .name = "IEEE1588_PULSE1", - .index = 1, - .func = PTP_PF_PEROUT, - .chan = 1 - } -}; - -static int fm10k_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin, - enum ptp_pin_function func, unsigned int chan) -{ - /* verify the requested pin is there */ - if (pin >= ptp->n_pins || !ptp->pin_config) - return -EINVAL; - - /* enforce locked channels, no changing them */ - if (chan != ptp->pin_config[pin].chan) - return -EINVAL; - - /* we want to keep the functions locked as well */ - if (func != ptp->pin_config[pin].func) - return -EINVAL; - - return 0; -} - -void fm10k_ptp_register(struct fm10k_intfc *interface) -{ - struct ptp_clock_info *ptp_caps = &interface->ptp_caps; - struct device *dev = &interface->pdev->dev; - struct ptp_clock *ptp_clock; - - snprintf(ptp_caps->name, sizeof(ptp_caps->name), - "%s", interface->netdev->name); - ptp_caps->owner = THIS_MODULE; - /* This math is simply the inverse of the math in - * fm10k_adjust_systime_pf applied to an adjustment value - * of 2^30 - 1 which is the maximum value of the register: - * max_ppb == ((2^30 - 1) * 5^9) / 2^31 - */ - ptp_caps->max_adj = 976562; - ptp_caps->adjfreq = fm10k_ptp_adjfreq; - ptp_caps->adjtime = fm10k_ptp_adjtime; - ptp_caps->gettime64 = fm10k_ptp_gettime; - ptp_caps->settime64 = fm10k_ptp_settime; - - /* provide pins if BAR4 is accessible */ - if (interface->sw_addr) { - /* enable periodic outputs */ - ptp_caps->n_per_out = 2; - ptp_caps->enable = fm10k_ptp_enable; - - /* enable clock pins */ - ptp_caps->verify = fm10k_ptp_verify; - ptp_caps->n_pins = 2; - ptp_caps->pin_config = fm10k_ptp_pd; - } - - ptp_clock = ptp_clock_register(ptp_caps, dev); - if (IS_ERR(ptp_clock)) { - ptp_clock = NULL; - dev_err(dev, "ptp_clock_register failed\n"); - } else { - dev_info(dev, "registered PHC device %s\n", ptp_caps->name); - } - - interface->ptp_clock = ptp_clock; -} - -void fm10k_ptp_unregister(struct fm10k_intfc *interface) -{ - struct ptp_clock *ptp_clock = interface->ptp_clock; - struct device *dev = &interface->pdev->dev; - - if (!ptp_clock) - return; - - interface->ptp_clock = NULL; - - ptp_clock_unregister(ptp_clock); - dev_info(dev, "removed PHC %s\n", interface->ptp_caps.name); -} diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_type.h b/drivers/net/ethernet/intel/fm10k/fm10k_type.h index 5c0533054c5f..995dceefec25 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_type.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k_type.h @@ -1,5 +1,5 @@ /* Intel Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2015 Intel Corporation. + * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -225,11 +225,6 @@ struct fm10k_hw; #define FM10K_STATS_LOOPBACK_DROP 0x3806 #define FM10K_STATS_NODESC_DROP 0x3807 -/* Timesync registers */ -#define FM10K_SYSTIME 0x3814 -#define FM10K_SYSTIME_CFG 0x3818 -#define FM10K_SYSTIME_CFG_STEP_MASK 0x0000000F - /* PCIe state registers */ #define FM10K_PHYADDR 0x381C @@ -381,12 +376,6 @@ struct fm10k_hw; #define FM10K_VFSYSTIME 0x00040 #define FM10K_VFITR(_n) ((_n) + 0x00060) -/* Registers contained in BAR 4 for Switch management */ -#define FM10K_SW_SYSTIME_ADJUST 0x0224D -#define FM10K_SW_SYSTIME_ADJUST_MASK 0x3FFFFFFF -#define FM10K_SW_SYSTIME_ADJUST_DIR_POSITIVE 0x80000000 -#define FM10K_SW_SYSTIME_PULSE(_n) ((_n) + 0x02252) - enum fm10k_int_source { fm10k_int_mailbox = 0, fm10k_int_pcie_fault = 1, @@ -550,8 +539,6 @@ struct fm10k_mac_ops { struct fm10k_dglort_cfg *); void (*set_dma_mask)(struct fm10k_hw *, u64); s32 (*get_fault)(struct fm10k_hw *, int, struct fm10k_fault *); - s32 (*adjust_systime)(struct fm10k_hw *, s32 ppb); - u64 (*read_systime)(struct fm10k_hw *); }; enum fm10k_mac_type { @@ -643,7 +630,6 @@ struct fm10k_iov_ops { s32 (*set_lport)(struct fm10k_hw *, struct fm10k_vf_info *, u16, u8); void (*reset_lport)(struct fm10k_hw *, struct fm10k_vf_info *); void (*update_stats)(struct fm10k_hw *, struct fm10k_hw_stats_q *, u16); - s32 (*report_timestamp)(struct fm10k_hw *, struct fm10k_vf_info *, u64); }; struct fm10k_iov_info { @@ -667,7 +653,6 @@ struct fm10k_info { struct fm10k_hw { u32 __iomem *hw_addr; - u32 __iomem *sw_addr; void *back; struct fm10k_mac_info mac; struct fm10k_bus_info bus; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c index 91f8d7311f3b..86c358c37d3f 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c @@ -1,5 +1,5 @@ /* Intel Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2015 Intel Corporation. + * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -451,13 +451,6 @@ static s32 fm10k_update_xcast_mode_vf(struct fm10k_hw *hw, u16 glort, u8 mode) return mbx->ops.enqueue_tx(hw, mbx, msg); } -const struct fm10k_tlv_attr fm10k_1588_msg_attr[] = { - FM10K_TLV_ATTR_U64(FM10K_1588_MSG_TIMESTAMP), - FM10K_TLV_ATTR_LAST -}; - -/* currently there is no shared 1588 timestamp handler */ - /** * fm10k_update_hw_stats_vf - Updates hardware related statistics of VF * @hw: pointer to hardware structure @@ -509,52 +502,6 @@ static s32 fm10k_configure_dglort_map_vf(struct fm10k_hw *hw, return 0; } -/** - * fm10k_adjust_systime_vf - Adjust systime frequency - * @hw: pointer to hardware structure - * @ppb: adjustment rate in parts per billion - * - * This function takes an adjustment rate in parts per billion and will - * verify that this value is 0 as the VF cannot support adjusting the - * systime clock. - * - * If the ppb value is non-zero the return is ERR_PARAM else success - **/ -static s32 fm10k_adjust_systime_vf(struct fm10k_hw *hw, s32 ppb) -{ - /* The VF cannot adjust the clock frequency, however it should - * already have a syntonic clock with whichever host interface is - * running as the master for the host interface clock domain so - * there should be not frequency adjustment necessary. - */ - return ppb ? FM10K_ERR_PARAM : 0; -} - -/** - * fm10k_read_systime_vf - Reads value of systime registers - * @hw: pointer to the hardware structure - * - * Function reads the content of 2 registers, combined to represent a 64 bit - * value measured in nanoseconds. In order to guarantee the value is accurate - * we check the 32 most significant bits both before and after reading the - * 32 least significant bits to verify they didn't change as we were reading - * the registers. - **/ -static u64 fm10k_read_systime_vf(struct fm10k_hw *hw) -{ - u32 systime_l, systime_h, systime_tmp; - - systime_h = fm10k_read_reg(hw, FM10K_VFSYSTIME + 1); - - do { - systime_tmp = systime_h; - systime_l = fm10k_read_reg(hw, FM10K_VFSYSTIME); - systime_h = fm10k_read_reg(hw, FM10K_VFSYSTIME + 1); - } while (systime_tmp != systime_h); - - return ((u64)systime_h << 32) | systime_l; -} - static const struct fm10k_msg_data fm10k_msg_data_vf[] = { FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test), FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_msg_mac_vlan_vf), @@ -579,8 +526,6 @@ static const struct fm10k_mac_ops mac_ops_vf = { .rebind_hw_stats = fm10k_rebind_hw_stats_vf, .configure_dglort_map = fm10k_configure_dglort_map_vf, .get_host_state = fm10k_get_host_state_generic, - .adjust_systime = fm10k_adjust_systime_vf, - .read_systime = fm10k_read_systime_vf, }; static s32 fm10k_get_invariants_vf(struct fm10k_hw *hw) diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_vf.h b/drivers/net/ethernet/intel/fm10k/fm10k_vf.h index c4439f1313a0..f0932f944793 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_vf.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k_vf.h @@ -1,5 +1,5 @@ /* Intel Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -29,7 +29,6 @@ enum fm10k_vf_tlv_msg_id { FM10K_VF_MSG_ID_MSIX, FM10K_VF_MSG_ID_MAC_VLAN, FM10K_VF_MSG_ID_LPORT_STATE, - FM10K_VF_MSG_ID_1588, FM10K_VF_MSG_ID_MAX, }; @@ -49,11 +48,6 @@ enum fm10k_tlv_lport_state_attr_id { FM10K_LPORT_STATE_MSG_MAX }; -enum fm10k_tlv_1588_attr_id { - FM10K_1588_MSG_TIMESTAMP, - FM10K_1588_MSG_MAX -}; - #define FM10K_VF_MSG_MSIX_HANDLER(func) \ FM10K_MSG_HANDLER(FM10K_VF_MSG_ID_MSIX, NULL, func) @@ -70,9 +64,5 @@ extern const struct fm10k_tlv_attr fm10k_lport_state_msg_attr[]; FM10K_MSG_HANDLER(FM10K_VF_MSG_ID_LPORT_STATE, \ fm10k_lport_state_msg_attr, func) -extern const struct fm10k_tlv_attr fm10k_1588_msg_attr[]; -#define FM10K_VF_MSG_1588_HANDLER(func) \ - FM10K_MSG_HANDLER(FM10K_VF_MSG_ID_1588, fm10k_1588_msg_attr, func) - extern const struct fm10k_info fm10k_vf_info; #endif /* _FM10K_VF_H */ From 8998763a7b57583ef2e07f68ea6a7d05bcfc1cfa Mon Sep 17 00:00:00 2001 From: Ngai-Mint Kwan Date: Fri, 1 Apr 2016 16:17:32 -0700 Subject: [PATCH 0868/1649] fm10k: Fix multicast mode sync issues Multicast mode checking is no longer a requirement to perform unicast and multicast address syncs. Specifically, a device operating in promiscuous and/or all multicast mode is not excluded. The issue occurs when the netdev is pre-configured to either multicast mode and is enabled for the first time. The multicast-group table in the Switch Manager will be missing obvious multicast entries associated to this netdev. Changes were also made to disallow unicast and multicast syncs with VLAN 0. The Switch Manager considers VLAN 0 to be an invalid entry. Requests with VLAN 0 by the netdev are only generated when the driver is freshly installed and the default VID is not set. Signed-off-by: Ngai-Mint Kwan Signed-off-by: Jacob Keller Tested-by: Krishneil Singh Signed-off-by: Jeff Kirsher --- .../net/ethernet/intel/fm10k/fm10k_netdev.c | 20 +++++++------------ 1 file changed, 7 insertions(+), 13 deletions(-) diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c index 32778dda8c12..bf229d54c20c 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c @@ -877,7 +877,7 @@ static int __fm10k_uc_sync(struct net_device *dev, return -EADDRNOTAVAIL; /* update table with current entries */ - for (vid = hw->mac.default_vid ? fm10k_find_next_vlan(interface, 0) : 0; + for (vid = hw->mac.default_vid ? fm10k_find_next_vlan(interface, 0) : 1; vid < VLAN_N_VID; vid = fm10k_find_next_vlan(interface, vid)) { err = hw->mac.ops.update_uc_addr(hw, glort, addr, @@ -940,7 +940,7 @@ static int __fm10k_mc_sync(struct net_device *dev, u16 vid, glort = interface->glort; /* update table with current entries */ - for (vid = hw->mac.default_vid ? fm10k_find_next_vlan(interface, 0) : 0; + for (vid = hw->mac.default_vid ? fm10k_find_next_vlan(interface, 0) : 1; vid < VLAN_N_VID; vid = fm10k_find_next_vlan(interface, vid)) { hw->mac.ops.update_mc_addr(hw, glort, addr, vid, sync); @@ -995,11 +995,8 @@ static void fm10k_set_rx_mode(struct net_device *dev) } /* synchronize all of the addresses */ - if (xcast_mode != FM10K_XCAST_MODE_PROMISC) { - __dev_uc_sync(dev, fm10k_uc_sync, fm10k_uc_unsync); - if (xcast_mode != FM10K_XCAST_MODE_ALLMULTI) - __dev_mc_sync(dev, fm10k_mc_sync, fm10k_mc_unsync); - } + __dev_uc_sync(dev, fm10k_uc_sync, fm10k_uc_unsync); + __dev_mc_sync(dev, fm10k_mc_sync, fm10k_mc_unsync); fm10k_mbx_unlock(interface); } @@ -1037,7 +1034,7 @@ void fm10k_restore_rx_state(struct fm10k_intfc *interface) hw->mac.ops.update_vlan(hw, 0, 0, true); /* update table with current entries */ - for (vid = hw->mac.default_vid ? fm10k_find_next_vlan(interface, 0) : 0; + for (vid = hw->mac.default_vid ? fm10k_find_next_vlan(interface, 0) : 1; vid < VLAN_N_VID; vid = fm10k_find_next_vlan(interface, vid)) { hw->mac.ops.update_vlan(hw, vid, 0, true); @@ -1049,11 +1046,8 @@ void fm10k_restore_rx_state(struct fm10k_intfc *interface) hw->mac.ops.update_xcast_mode(hw, glort, xcast_mode); /* synchronize all of the addresses */ - if (xcast_mode != FM10K_XCAST_MODE_PROMISC) { - __dev_uc_sync(netdev, fm10k_uc_sync, fm10k_uc_unsync); - if (xcast_mode != FM10K_XCAST_MODE_ALLMULTI) - __dev_mc_sync(netdev, fm10k_mc_sync, fm10k_mc_unsync); - } + __dev_uc_sync(netdev, fm10k_uc_sync, fm10k_uc_unsync); + __dev_mc_sync(netdev, fm10k_mc_sync, fm10k_mc_unsync); fm10k_mbx_unlock(interface); From a7a7783adabc3cc7599f7dbf97fcc3b0d44087b7 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Fri, 1 Apr 2016 16:17:33 -0700 Subject: [PATCH 0869/1649] fm10k: correctly handle LPORT_MAP error Currently, any error responses from the switch manager after an LPORT_MAP request are silently ignored. At most the mailbox message will be reported as an error. This can result in unexpected behavior when the switch manager has configured a port with zero bandwidth. Add support for reading the fm10k_swapi_error structure from LPORT_MAP responses. If the message contains the necessary TLV and has a non-zero error code, report link down, clear the dglort_map, and delay the next get_host_state call by a reasonable delay. Also log an error message indicating that the LPORT_MAP request failed. The delay ensures preventing an interrupt storm on the switch manager, and reduces the number of mailbox messages we send in this scenario drastically. Signed-off-by: Jacob Keller Tested-by: Krishneil Singh Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/fm10k/fm10k.h | 1 + drivers/net/ethernet/intel/fm10k/fm10k_pci.c | 31 +++++++++++++++++++- drivers/net/ethernet/intel/fm10k/fm10k_pf.c | 2 ++ drivers/net/ethernet/intel/fm10k/fm10k_pf.h | 2 ++ 4 files changed, 35 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/fm10k/fm10k.h b/drivers/net/ethernet/intel/fm10k/fm10k.h index c21fa8699fc4..5efae40698cc 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k.h @@ -330,6 +330,7 @@ struct fm10k_intfc { unsigned long last_reset; unsigned long link_down_event; bool host_ready; + bool lport_map_failed; u32 reta[FM10K_RETA_SIZE]; u32 rssrk[FM10K_RSSRK_SIZE]; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c index 29e9402c4352..9055681cf34d 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c @@ -1263,11 +1263,40 @@ static s32 fm10k_lport_map(struct fm10k_hw *hw, u32 **results, u32 dglort_map = hw->mac.dglort_map; s32 err; + interface = container_of(hw, struct fm10k_intfc, hw); + + err = fm10k_msg_err_pf(hw, results, mbx); + if (!err && hw->swapi.status) { + /* force link down for a reasonable delay */ + interface->link_down_event = jiffies + (2 * HZ); + set_bit(__FM10K_LINK_DOWN, &interface->state); + + /* reset dglort_map back to no config */ + hw->mac.dglort_map = FM10K_DGLORTMAP_NONE; + + fm10k_service_event_schedule(interface); + + /* prevent overloading kernel message buffer */ + if (interface->lport_map_failed) + return 0; + + interface->lport_map_failed = true; + + if (hw->swapi.status == FM10K_MSG_ERR_PEP_NOT_SCHEDULED) + dev_warn(&interface->pdev->dev, + "cannot obtain link because the host interface is configured for a PCIe host interface bandwidth of zero\n"); + dev_warn(&interface->pdev->dev, + "request logical port map failed: %d\n", + hw->swapi.status); + + return 0; + } + err = fm10k_msg_lport_map_pf(hw, results, mbx); if (err) return err; - interface = container_of(hw, struct fm10k_intfc, hw); + interface->lport_map_failed = false; /* we need to reset if port count was just updated */ if (dglort_map != hw->mac.dglort_map) diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c index c8e8ce5a8327..865f5c2da9d0 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c @@ -1620,6 +1620,8 @@ out: /* This structure defines the attibutes to be parsed below */ const struct fm10k_tlv_attr fm10k_lport_map_msg_attr[] = { + FM10K_TLV_ATTR_LE_STRUCT(FM10K_PF_ATTR_ID_ERR, + sizeof(struct fm10k_swapi_error)), FM10K_TLV_ATTR_U32(FM10K_PF_ATTR_ID_LPORT_MAP), FM10K_TLV_ATTR_LAST }; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.h b/drivers/net/ethernet/intel/fm10k/fm10k_pf.h index b78210d06213..d4a34657b861 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.h @@ -71,6 +71,8 @@ enum fm10k_pf_tlv_attr_id_v1 { #define FM10K_MSG_UPDATE_PVID_PVID_SHIFT 16 #define FM10K_MSG_UPDATE_PVID_PVID_SIZE 16 +#define FM10K_MSG_ERR_PEP_NOT_SCHEDULED 280 + /* The following data structures are overlayed directly onto TLV mailbox * messages, and must not break 4 byte alignment. Ensure the structures line * up correctly as per their TLV definition. From 3417415c3a86d6bae8bfee495ce634f4d24e16b8 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Fri, 1 Apr 2016 16:17:34 -0700 Subject: [PATCH 0870/1649] fm10k: do not disable PCI device in fm10k_io_error_detected fm10k_io_error_detected() does not need to call pci_disable_device(). In the cases where the reset needs to occur, the stack flow will result in calling fm10k_remove() which already disables the PCI device. If we leave the pci_disable_device(), we result in a warning about disabling an already disabled device. Many PCI drivers do call pci_disable_device() in their .error_detected() routines, but it does not appear to be required. In addition, these drivers have a check "is_pci_enabled()" call in their remove routines, which is how they chose to handle the duplicate device disable. This seems incorrect, since the PCI device structure is reference counted. It is very possible that the reference count for the PCI device could be greater than 1. In this case, you would remove the PCI device within the error_detected routine, reducing count to 1, then remove it again in the remove function, reducing it to zero. This would result in yet another disable somewhere else failing. Thus, we shouldn't be using is_pci_enabled() to check for this issue. Instead, just remove the extraneous pci_device_disable() found within the error_detected routine. Signed-off-by: Jacob Keller Tested-by: Krishneil Singh Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/fm10k/fm10k_pci.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c index 9055681cf34d..1d833782d917 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c @@ -2207,8 +2207,6 @@ static pci_ers_result_t fm10k_io_error_detected(struct pci_dev *pdev, rtnl_unlock(); - pci_disable_device(pdev); - /* Request a slot reset. */ return PCI_ERS_RESULT_NEED_RESET; } From 4e160f2a59cec8f705583edfaa11ce5f3b3ef4a6 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Fri, 1 Apr 2016 16:17:35 -0700 Subject: [PATCH 0871/1649] fm10k: fix documentation of fm10k_tlv_parse_attr fm10k_tlv_parse_attr is supposed to return FM10K_NOT_IMPLEMENTED for any TLV who's attribute id lies outside the range of results. It does not do this today. In addition, the documentation does not indicate that other attributes which are not implemented for a given TLV will be silently ignored. Fix this. Clean up the logic so that we don't rely on the fact that FM10K_NOT_IMPLEMENTED is greater than zero, as this can easily cause confusion. A future extension could look into some way of reporting unknown TLVs in order to make issues more easily discoverable. We can't just return FM10K_NOT_IMPLEMENTED here because we don't want to drop the entire message if it has an unknown TLV. While here, update the copyright year. Signed-off-by: Jacob Keller Tested-by: Krishneil Singh Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/fm10k/fm10k_tlv.c | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c index b999897e50d8..6b500a6378e0 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c @@ -1,5 +1,5 @@ /* Intel Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2015 Intel Corporation. + * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -481,7 +481,8 @@ static s32 fm10k_tlv_attr_validate(u32 *attr, * up into an array of pointers stored in results. The function will * return FM10K_ERR_PARAM on any input or message error, * FM10K_NOT_IMPLEMENTED for any attribute that is outside of the array - * and 0 on success. + * and 0 on success. Any attributes not found in tlv_attr will be silently + * ignored. **/ static s32 fm10k_tlv_attr_parse(u32 *attr, u32 **results, const struct fm10k_tlv_attr *tlv_attr) @@ -518,14 +519,15 @@ static s32 fm10k_tlv_attr_parse(u32 *attr, u32 **results, while (offset < len) { attr_id = *attr & FM10K_TLV_ID_MASK; - if (attr_id < FM10K_TLV_RESULTS_MAX) - err = fm10k_tlv_attr_validate(attr, tlv_attr); - else - err = FM10K_NOT_IMPLEMENTED; + if (attr_id >= FM10K_TLV_RESULTS_MAX) + return FM10K_NOT_IMPLEMENTED; - if (err < 0) + err = fm10k_tlv_attr_validate(attr, tlv_attr); + if (err == FM10K_NOT_IMPLEMENTED) + ; /* silently ignore non-implemented attributes */ + else if (err) return err; - if (!err) + else results[attr_id] = attr; /* update offset */ From d057d9a9446636293b4884d1a0da6ad5a7ef4e13 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Fri, 1 Apr 2016 16:17:36 -0700 Subject: [PATCH 0872/1649] fm10k: use 8bit notation instead of 10bit notation for diagram The diagram represents bit layout of the multi-bit VLAN update message format. Typically these diagrams are drawn using some power of 2 as the base, to more easily grasp where fields split. Although the numbers above can make it somewhat easy to understand which bit you're looking at, it makes the break points not line up. Re-draw the numbers using base 8, and mark the bit values every 8 bits at the top. This should make it more easy to grasp the table quickly. Signed-off-by: Jacob Keller Tested-by: Krishneil Singh Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/fm10k/fm10k_pf.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c index 865f5c2da9d0..ffe98056755b 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c @@ -219,8 +219,8 @@ static s32 fm10k_update_vlan_pf(struct fm10k_hw *hw, u32 vid, u8 vsi, bool set) /* VLAN multi-bit write: * The multi-bit write has several parts to it. - * 3 2 1 0 - * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 + * 24 16 8 0 + * 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | RSVD0 | Length |C|RSVD0| VLAN ID | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ From 5c69df8a33408c82ac633c521be0acf71a690d43 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Fri, 1 Apr 2016 16:17:37 -0700 Subject: [PATCH 0873/1649] fm10k: use different name than FM10K_VLAN_CLEAR for override bit Use a new #define FM10K_VLAN_OVERRIDE even though we're using the exact same bit. The reason for this is clarity in the code, otherwise you can read FM10K_VLAN_CLEAR and think it should be removed. Also add a comment explaining why the FM10K_VLAN_OVERRIDE bit is set. Signed-off-by: Jacob Keller Tested-by: Krishneil Singh Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/fm10k/fm10k_pf.c | 8 ++++++-- drivers/net/ethernet/intel/fm10k/fm10k_type.h | 1 + drivers/net/ethernet/intel/fm10k/fm10k_vf.c | 2 +- 3 files changed, 8 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c index ffe98056755b..88d5acf484d0 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c @@ -864,9 +864,13 @@ static s32 fm10k_iov_assign_default_mac_vlan_pf(struct fm10k_hw *hw, fm10k_write_reg(hw, FM10K_TQMAP(qmap_idx), 0); fm10k_write_reg(hw, FM10K_TXDCTL(vf_q_idx), 0); - /* determine correct default VLAN ID */ + /* Determine correct default VLAN ID. The FM10K_VLAN_OVERRIDE bit is + * used here to indicate to the VF that it will not have privilege to + * write VLAN_TABLE. All policy is enforced on the PF but this allows + * the VF to correctly report errors to userspace rqeuests. + */ if (vf_info->pf_vid) - vf_vid = vf_info->pf_vid | FM10K_VLAN_CLEAR; + vf_vid = vf_info->pf_vid | FM10K_VLAN_OVERRIDE; else vf_vid = vf_info->sw_vid; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_type.h b/drivers/net/ethernet/intel/fm10k/fm10k_type.h index 995dceefec25..f3f37a49806e 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_type.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k_type.h @@ -350,6 +350,7 @@ struct fm10k_hw; #define FM10K_VLAN_TABLE_VSI_MAX 64 #define FM10K_VLAN_LENGTH_SHIFT 16 #define FM10K_VLAN_CLEAR BIT(15) +#define FM10K_VLAN_OVERRIDE FM10K_VLAN_CLEAR #define FM10K_VLAN_ALL \ ((FM10K_VLAN_TABLE_VID_MAX - 1) << FM10K_VLAN_LENGTH_SHIFT) diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c index 86c358c37d3f..801238dec624 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c @@ -228,7 +228,7 @@ s32 fm10k_msg_mac_vlan_vf(struct fm10k_hw *hw, u32 **results, ether_addr_copy(hw->mac.perm_addr, perm_addr); hw->mac.default_vid = vid & (FM10K_VLAN_TABLE_VID_MAX - 1); - hw->mac.vlan_override = !!(vid & FM10K_VLAN_CLEAR); + hw->mac.vlan_override = !!(vid & FM10K_VLAN_OVERRIDE); return 0; } From fb6515c8f03bbfdc99cff156becd5e14df1dd601 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Fri, 1 Apr 2016 16:17:38 -0700 Subject: [PATCH 0874/1649] fm10k: update comment regarding reserved bits check The original comment may be read incorrectly as referring to checking the *entire* length is zero. However, it merely checks only the reserved bits of both length and reserved in a small amount of code. Update the comment to indicate this is a clever trick and clearly spell out that it only checks the reserve bits. Signed-off-by: Jacob Keller Tested-by: Krishneil Singh Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/fm10k/fm10k_vf.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c index 801238dec624..0440706eeb82 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c @@ -188,7 +188,7 @@ static s32 fm10k_update_vlan_vf(struct fm10k_hw *hw, u32 vid, u8 vsi, bool set) if (vsi) return FM10K_ERR_PARAM; - /* verify upper 4 bits of vid and length are 0 */ + /* clever trick to verify reserved bits in both vid and length */ if ((vid << 16 | vid) >> 28) return FM10K_ERR_PARAM; From 11ec36a974f59c99e8a4ff7040026569a43ab567 Mon Sep 17 00:00:00 2001 From: Ngai-Mint Kwan Date: Fri, 1 Apr 2016 16:17:39 -0700 Subject: [PATCH 0875/1649] fm10k: Reset multicast mode when deleting lport Deleting lport when multicast mode is configured to FM10K_XCAST_MODE_ALLMULTI or FM10K_XCAST_MODE_PROMISC will result in generating orphaned multicast-group entries in the switch manager. Before deleting the lport, reset multicast mode to FM10K_XCAST_MODE_NONE to flush out these multicast-group entries. Signed-off-by: Ngai-Mint Kwan Signed-off-by: Jacob Keller Tested-by: Krishneil Singh Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/fm10k/fm10k_pf.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c index 88d5acf484d0..2105cb8d31cc 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c @@ -488,6 +488,10 @@ static s32 fm10k_update_lport_state_pf(struct fm10k_hw *hw, u16 glort, if (!fm10k_glort_valid_pf(hw, glort)) return FM10K_ERR_PARAM; + /* reset multicast mode if deleting lport */ + if (!enable) + fm10k_update_xcast_mode_pf(hw, glort, FM10K_XCAST_MODE_NONE); + /* construct the lport message from the 2 pieces of data we have */ lport_msg = ((u32)count << 16) | glort; From 540a5d859010a239a99aba02a9fed7b255c0033e Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Thu, 7 Apr 2016 08:21:20 -0700 Subject: [PATCH 0876/1649] fm10k: fix possible null pointer deref after kcalloc When writing a new default redirection table, we needed to populate a new RSS table using ethtool_rxfh_indir_default. We populated this table into a region of memory allocated using kcalloc, but never checked this for NULL. Fix this by moving the default table generation into fm10k_write_reta. If this function is passed a table, use it. Otherwise, generate the default table using ethtool_rxfh_indir_default, 4 at at time. Fixes: 0ea7fae44094 ("fm10k: use ethtool_rxfh_indir_default for default redirection table") Signed-off-by: Jacob Keller Signed-off-by: Jeff Kirsher --- .../net/ethernet/intel/fm10k/fm10k_ethtool.c | 26 ++++++++++++++----- drivers/net/ethernet/intel/fm10k/fm10k_main.c | 14 ++-------- 2 files changed, 22 insertions(+), 18 deletions(-) diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c index ca276c0a4b8d..e79e91500a0c 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c @@ -971,15 +971,29 @@ u32 fm10k_get_reta_size(struct net_device __always_unused *netdev) void fm10k_write_reta(struct fm10k_intfc *interface, const u32 *indir) { + u16 rss_i = interface->ring_feature[RING_F_RSS].indices; struct fm10k_hw *hw = &interface->hw; - int i; + u32 table[4]; + int i, j; /* record entries to reta table */ - for (i = 0; i < FM10K_RETA_SIZE; i++, indir += 4) { - u32 reta = indir[0] | - (indir[1] << 8) | - (indir[2] << 16) | - (indir[3] << 24); + for (i = 0; i < FM10K_RETA_SIZE; i++) { + u32 reta, n; + + /* generate a new table if we weren't given one */ + for (j = 0; j < 4; j++) { + if (indir) + n = indir[i + j]; + else + n = ethtool_rxfh_indir_default(i + j, rss_i); + + table[j] = n; + } + + reta = table[0] | + (table[1] << 8) | + (table[2] << 16) | + (table[3] << 24); if (interface->reta[i] == reta) continue; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c index 58092e523bbe..aca3e4762da7 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c @@ -1927,8 +1927,7 @@ static void fm10k_assign_rings(struct fm10k_intfc *interface) static void fm10k_init_reta(struct fm10k_intfc *interface) { u16 i, rss_i = interface->ring_feature[RING_F_RSS].indices; - struct net_device *netdev = interface->netdev; - u32 reta, *indir; + u32 reta; /* If the Rx flow indirection table has been configured manually, we * need to maintain it when possible. @@ -1953,16 +1952,7 @@ static void fm10k_init_reta(struct fm10k_intfc *interface) } repopulate_reta: - indir = kcalloc(fm10k_get_reta_size(netdev), - sizeof(indir[0]), GFP_KERNEL); - - /* generate redirection table using the default kernel policy */ - for (i = 0; i < fm10k_get_reta_size(netdev); i++) - indir[i] = ethtool_rxfh_indir_default(i, rss_i); - - fm10k_write_reta(interface, indir); - - kfree(indir); + fm10k_write_reta(interface, NULL); } /** From 86641094678a90af278d1f44c0e47f817c9ba46e Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Thu, 7 Apr 2016 08:21:21 -0700 Subject: [PATCH 0877/1649] fm10k: consistently use Intel(R) for driver names Update every header file and other locations to consistently use Intel(R) instead of just Intel. Also update copyright year of files which we modified. Signed-off-by: Jacob Keller Tested-by: Krishneil Singh Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/fm10k/Makefile | 4 ++-- drivers/net/ethernet/intel/fm10k/fm10k.h | 2 +- drivers/net/ethernet/intel/fm10k/fm10k_common.c | 4 ++-- drivers/net/ethernet/intel/fm10k/fm10k_common.h | 4 ++-- drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c | 4 ++-- drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c | 4 ++-- drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c | 2 +- drivers/net/ethernet/intel/fm10k/fm10k_iov.c | 4 ++-- drivers/net/ethernet/intel/fm10k/fm10k_main.c | 4 ++-- drivers/net/ethernet/intel/fm10k/fm10k_mbx.c | 4 ++-- drivers/net/ethernet/intel/fm10k/fm10k_mbx.h | 4 ++-- drivers/net/ethernet/intel/fm10k/fm10k_netdev.c | 2 +- drivers/net/ethernet/intel/fm10k/fm10k_pci.c | 2 +- drivers/net/ethernet/intel/fm10k/fm10k_pf.c | 2 +- drivers/net/ethernet/intel/fm10k/fm10k_pf.h | 2 +- drivers/net/ethernet/intel/fm10k/fm10k_tlv.c | 2 +- drivers/net/ethernet/intel/fm10k/fm10k_tlv.h | 4 ++-- drivers/net/ethernet/intel/fm10k/fm10k_type.h | 2 +- drivers/net/ethernet/intel/fm10k/fm10k_vf.c | 2 +- drivers/net/ethernet/intel/fm10k/fm10k_vf.h | 2 +- 20 files changed, 30 insertions(+), 30 deletions(-) diff --git a/drivers/net/ethernet/intel/fm10k/Makefile b/drivers/net/ethernet/intel/fm10k/Makefile index 2aeaa39d9a25..cac645329cea 100644 --- a/drivers/net/ethernet/intel/fm10k/Makefile +++ b/drivers/net/ethernet/intel/fm10k/Makefile @@ -1,6 +1,6 @@ ################################################################################ # -# Intel Ethernet Switch Host Interface Driver +# Intel(R) Ethernet Switch Host Interface Driver # Copyright(c) 2013 - 2016 Intel Corporation. # # This program is free software; you can redistribute it and/or modify it @@ -22,7 +22,7 @@ ################################################################################ # -# Makefile for the Intel(R) FM10000 Ethernet Switch Host Interface driver +# Makefile for the Intel(R) Ethernet Switch Host Interface Driver # obj-$(CONFIG_FM10K) += fm10k.o diff --git a/drivers/net/ethernet/intel/fm10k/fm10k.h b/drivers/net/ethernet/intel/fm10k/fm10k.h index 5efae40698cc..fcf106e545c5 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k.h @@ -1,4 +1,4 @@ -/* Intel Ethernet Switch Host Interface Driver +/* Intel(R) Ethernet Switch Host Interface Driver * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_common.c b/drivers/net/ethernet/intel/fm10k/fm10k_common.c index 6cfae6ac04ea..5bbf19cfe29b 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_common.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_common.c @@ -1,5 +1,5 @@ -/* Intel Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2014 Intel Corporation. +/* Intel(R) Ethernet Switch Host Interface Driver + * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_common.h b/drivers/net/ethernet/intel/fm10k/fm10k_common.h index 45e4e5b1f20a..50f71e997448 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_common.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k_common.h @@ -1,5 +1,5 @@ -/* Intel Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2014 Intel Corporation. +/* Intel(R) Ethernet Switch Host Interface Driver + * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c b/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c index 2be4361839db..db4bd8bf9722 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c @@ -1,5 +1,5 @@ -/* Intel Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2015 Intel Corporation. +/* Intel(R) Ethernet Switch Host Interface Driver + * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c b/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c index 5d6137faf7d1..5116fd043630 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c @@ -1,5 +1,5 @@ -/* Intel Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2015 Intel Corporation. +/* Intel(R) Ethernet Switch Host Interface Driver + * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c index e79e91500a0c..9c0d87503977 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c @@ -1,4 +1,4 @@ -/* Intel Ethernet Switch Host Interface Driver +/* Intel(R) Ethernet Switch Host Interface Driver * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c index bbf7c4bac303..47f0743ec03b 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c @@ -1,5 +1,5 @@ -/* Intel Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2015 Intel Corporation. +/* Intel(R) Ethernet Switch Host Interface Driver + * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c index aca3e4762da7..b875f4243667 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c @@ -1,4 +1,4 @@ -/* Intel Ethernet Switch Host Interface Driver +/* Intel(R) Ethernet Switch Host Interface Driver * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it @@ -34,7 +34,7 @@ const char fm10k_driver_version[] = DRV_VERSION; char fm10k_driver_name[] = "fm10k"; static const char fm10k_driver_string[] = DRV_SUMMARY; static const char fm10k_copyright[] = - "Copyright (c) 2013 Intel Corporation."; + "Copyright (c) 2013 - 2016 Intel Corporation."; MODULE_AUTHOR("Intel Corporation, "); MODULE_DESCRIPTION(DRV_SUMMARY); diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c index 98202c3d591c..c9dfa6564fcf 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c @@ -1,5 +1,5 @@ -/* Intel Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2015 Intel Corporation. +/* Intel(R) Ethernet Switch Host Interface Driver + * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.h b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.h index 245a0a3dc32e..b7dbc8a84c05 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.h @@ -1,5 +1,5 @@ -/* Intel Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2015 Intel Corporation. +/* Intel(R) Ethernet Switch Host Interface Driver + * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c index bf229d54c20c..2a08d3f5b6df 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c @@ -1,4 +1,4 @@ -/* Intel Ethernet Switch Host Interface Driver +/* Intel(R) Ethernet Switch Host Interface Driver * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c index 1d833782d917..404f47ae14b6 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c @@ -1,4 +1,4 @@ -/* Intel Ethernet Switch Host Interface Driver +/* Intel(R) Ethernet Switch Host Interface Driver * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c index 2105cb8d31cc..5b0ceec361e6 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c @@ -1,4 +1,4 @@ -/* Intel Ethernet Switch Host Interface Driver +/* Intel(R) Ethernet Switch Host Interface Driver * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.h b/drivers/net/ethernet/intel/fm10k/fm10k_pf.h index d4a34657b861..3336d3c10760 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.h @@ -1,4 +1,4 @@ -/* Intel Ethernet Switch Host Interface Driver +/* Intel(R) Ethernet Switch Host Interface Driver * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c index 6b500a6378e0..f8e87bf086b9 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c @@ -1,4 +1,4 @@ -/* Intel Ethernet Switch Host Interface Driver +/* Intel(R) Ethernet Switch Host Interface Driver * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.h b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.h index e1845e0a17d8..a1f1027fe184 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.h @@ -1,5 +1,5 @@ -/* Intel Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2015 Intel Corporation. +/* Intel(R) Ethernet Switch Host Interface Driver + * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_type.h b/drivers/net/ethernet/intel/fm10k/fm10k_type.h index f3f37a49806e..b8bc06183720 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_type.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k_type.h @@ -1,4 +1,4 @@ -/* Intel Ethernet Switch Host Interface Driver +/* Intel(R) Ethernet Switch Host Interface Driver * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c index 0440706eeb82..3b06685ea63b 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c @@ -1,4 +1,4 @@ -/* Intel Ethernet Switch Host Interface Driver +/* Intel(R) Ethernet Switch Host Interface Driver * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_vf.h b/drivers/net/ethernet/intel/fm10k/fm10k_vf.h index f0932f944793..2662f33c0c71 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_vf.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k_vf.h @@ -1,4 +1,4 @@ -/* Intel Ethernet Switch Host Interface Driver +/* Intel(R) Ethernet Switch Host Interface Driver * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it From dc1b4c2b88b976a7882922e55666b20e28477c57 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Thu, 7 Apr 2016 08:52:53 -0700 Subject: [PATCH 0878/1649] fm10k: fix incorrect IPv6 extended header checksum Check for and handle IPv6 extended headers so that Tx checksum offload can be done. Also use skb_checksum_help for unexpected cases. This was originally discovered in ixgbe. Reported-by: Mark Rustad Signed-off-by: Jacob Keller Tested-by: Krishneil Singh Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/fm10k/fm10k_main.c | 25 ++++++++++++------- 1 file changed, 16 insertions(+), 9 deletions(-) diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c index b875f4243667..0e166e9c90c8 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c @@ -820,6 +820,8 @@ static void fm10k_tx_csum(struct fm10k_ring *tx_ring, struct ipv6hdr *ipv6; u8 *raw; } network_hdr; + u8 *transport_hdr; + __be16 frag_off; __be16 protocol; u8 l4_hdr = 0; @@ -837,9 +839,11 @@ static void fm10k_tx_csum(struct fm10k_ring *tx_ring, goto no_csum; } network_hdr.raw = skb_inner_network_header(skb); + transport_hdr = skb_inner_transport_header(skb); } else { protocol = vlan_get_protocol(skb); network_hdr.raw = skb_network_header(skb); + transport_hdr = skb_transport_header(skb); } switch (protocol) { @@ -848,15 +852,17 @@ static void fm10k_tx_csum(struct fm10k_ring *tx_ring, break; case htons(ETH_P_IPV6): l4_hdr = network_hdr.ipv6->nexthdr; + if (likely((transport_hdr - network_hdr.raw) == + sizeof(struct ipv6hdr))) + break; + ipv6_skip_exthdr(skb, network_hdr.raw - skb->data + + sizeof(struct ipv6hdr), + &l4_hdr, &frag_off); + if (unlikely(frag_off)) + l4_hdr = NEXTHDR_FRAGMENT; break; default: - if (unlikely(net_ratelimit())) { - dev_warn(tx_ring->dev, - "partial checksum but ip version=%x!\n", - protocol); - } - tx_ring->tx_stats.csum_err++; - goto no_csum; + break; } switch (l4_hdr) { @@ -869,9 +875,10 @@ static void fm10k_tx_csum(struct fm10k_ring *tx_ring, default: if (unlikely(net_ratelimit())) { dev_warn(tx_ring->dev, - "partial checksum but l4 proto=%x!\n", - l4_hdr); + "partial checksum, version=%d l4 proto=%x\n", + protocol, l4_hdr); } + skb_checksum_help(skb); tx_ring->tx_stats.csum_err++; goto no_csum; } From 06d05463ee337e85e42c6073b6f2f46fbfb05b96 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 18 Apr 2016 23:59:31 +0200 Subject: [PATCH 0879/1649] rtl8xxxu: hide unused tables The references to some arrays in the rtl8xxxu driver were moved inside of an #ifdef, but the symbols remain outside, resulting in build warnings: rtl8xxxu/rtl8xxxu.c:1506:33: error: 'rtl8188ru_radioa_1t_highpa_table' defined but not used rtl8xxxu/rtl8xxxu.c:1431:33: error: 'rtl8192cu_radioa_1t_init_table' defined but not used rtl8xxxu/rtl8xxxu.c:1407:33: error: 'rtl8192cu_radiob_2t_init_table' defined but not used rtl8xxxu/rtl8xxxu.c:1332:33: error: 'rtl8192cu_radioa_2t_init_table' defined but not used rtl8xxxu/rtl8xxxu.c:239:35: error: 'rtl8192c_power_base' defined but not used rtl8xxxu/rtl8xxxu.c:217:35: error: 'rtl8188r_power_base' defined but not used This adds an extra #ifdef around them to shut up the warnings. Signed-off-by: Arnd Bergmann Fixes: 2fc0b8e5a17d ("rtl8xxxu: Add TX power base values for gen1 parts") Fixes: 4062b8ffec36 ("rtl8xxxu: Move PHY RF init into device specific functions") Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 928ca56f751c..0ba84b5fe0d6 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -214,6 +214,7 @@ static struct rtl8xxxu_reg8val rtl8192e_mac_init_table[] = { {0xffff, 0xff}, }; +#ifdef CONFIG_RTL8XXXU_UNTESTED static struct rtl8xxxu_power_base rtl8188r_power_base = { .reg_0e00 = 0x06080808, .reg_0e04 = 0x00040406, @@ -257,6 +258,7 @@ static struct rtl8xxxu_power_base rtl8192c_power_base = { .reg_084c = 0x0b0c0d0e, .reg_0868 = 0x01030509, }; +#endif static struct rtl8xxxu_power_base rtl8723a_power_base = { .reg_0e00 = 0x0a0c0c0c, @@ -1329,6 +1331,7 @@ static struct rtl8xxxu_rfregval rtl8723bu_radioa_1t_init_table[] = { {0xff, 0xffffffff} }; +#ifdef CONFIG_RTL8XXXU_UNTESTED static struct rtl8xxxu_rfregval rtl8192cu_radioa_2t_init_table[] = { {0x00, 0x00030159}, {0x01, 0x00031284}, {0x02, 0x00098000}, {0x03, 0x00018c63}, @@ -1577,6 +1580,7 @@ static struct rtl8xxxu_rfregval rtl8188ru_radioa_1t_highpa_table[] = { {0x00, 0x00030159}, {0xff, 0xffffffff} }; +#endif static struct rtl8xxxu_rfregval rtl8192eu_radioa_init_table[] = { {0x7f, 0x00000082}, {0x81, 0x0003fc00}, From c60c9840423f32117a5422511c53c39df0b4d1dd Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Mon, 18 Apr 2016 18:24:04 -0400 Subject: [PATCH 0880/1649] net: dsa: remove tag_protocol from dsa_switch Having the tag protocol in dsa_switch_driver for setup time and in dsa_switch_tree for runtime is enough. Remove dsa_switch's one. Signed-off-by: Vivien Didelot Signed-off-by: David S. Miller --- include/net/dsa.h | 5 ----- net/dsa/dsa.c | 5 ++--- 2 files changed, 2 insertions(+), 8 deletions(-) diff --git a/include/net/dsa.h b/include/net/dsa.h index c4bc42bd3538..2d280aba97e2 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -135,11 +135,6 @@ struct dsa_switch { */ void *priv; - /* - * Tagging protocol understood by this switch - */ - enum dsa_tag_protocol tag_protocol; - /* * Configuration data for this switch. */ diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c index efa612f0ab9b..d61ceed912be 100644 --- a/net/dsa/dsa.c +++ b/net/dsa/dsa.c @@ -267,7 +267,7 @@ static int dsa_switch_setup_one(struct dsa_switch *ds, struct device *parent) * switch. */ if (dst->cpu_switch == index) { - switch (ds->tag_protocol) { + switch (drv->tag_protocol) { #ifdef CONFIG_NET_DSA_TAG_DSA case DSA_TAG_PROTO_DSA: dst->rcv = dsa_netdev_ops.rcv; @@ -295,7 +295,7 @@ static int dsa_switch_setup_one(struct dsa_switch *ds, struct device *parent) goto out; } - dst->tag_protocol = ds->tag_protocol; + dst->tag_protocol = drv->tag_protocol; } /* @@ -411,7 +411,6 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index, ds->pd = pd; ds->drv = drv; ds->priv = priv; - ds->tag_protocol = drv->tag_protocol; ds->master_dev = host_dev; ret = dsa_switch_setup_one(ds, parent); From 85b67bcb7e4a23ced05e7020bf5843b9857f6881 Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Mon, 18 Apr 2016 20:11:50 -0700 Subject: [PATCH 0881/1649] perf, bpf: minimize the size of perf_trace_() tracepoint handler move trace_call_bpf() into helper function to minimize the size of perf_trace_*() tracepoint handlers. text data bss dec hex filename 10541679 5526646 2945024 19013349 1221ee5 vmlinux_before 10509422 5526646 2945024 18981092 121a0e4 vmlinux_after It may seem that perf_fetch_caller_regs() can also be moved, but that is incorrect, since ip/sp will be wrong. bpf+tracepoint performance is not affected, since perf_swevent_put_recursion_context() is now inlined. export_symbol_gpl can also be dropped. No measurable change in normal perf tracepoints. Suggested-by: Steven Rostedt Signed-off-by: Alexei Starovoitov Acked-by: Peter Zijlstra (Intel) Acked-by: Steven Rostedt Signed-off-by: David S. Miller --- include/linux/trace_events.h | 5 +++++ include/trace/perf.h | 13 +++---------- kernel/events/core.c | 20 +++++++++++++++++++- 3 files changed, 27 insertions(+), 11 deletions(-) diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h index fe6441203b59..222f6aa0418f 100644 --- a/include/linux/trace_events.h +++ b/include/linux/trace_events.h @@ -609,6 +609,11 @@ extern void ftrace_profile_free_filter(struct perf_event *event); void perf_trace_buf_update(void *record, u16 type); void *perf_trace_buf_alloc(int size, struct pt_regs **regs, int *rctxp); +void perf_trace_run_bpf_submit(void *raw_data, int size, int rctx, + struct trace_event_call *call, u64 count, + struct pt_regs *regs, struct hlist_head *head, + struct task_struct *task); + static inline void perf_trace_buf_submit(void *raw_data, int size, int rctx, u16 type, u64 count, struct pt_regs *regs, void *head, diff --git a/include/trace/perf.h b/include/trace/perf.h index a182306eefd7..88de5c205e86 100644 --- a/include/trace/perf.h +++ b/include/trace/perf.h @@ -64,16 +64,9 @@ perf_trace_##call(void *__data, proto) \ \ { assign; } \ \ - if (prog) { \ - *(struct pt_regs **)entry = __regs; \ - if (!trace_call_bpf(prog, entry) || hlist_empty(head)) { \ - perf_swevent_put_recursion_context(rctx); \ - return; \ - } \ - } \ - perf_trace_buf_submit(entry, __entry_size, rctx, \ - event_call->event.type, __count, __regs, \ - head, __task); \ + perf_trace_run_bpf_submit(entry, __entry_size, rctx, \ + event_call, __count, __regs, \ + head, __task); \ } /* diff --git a/kernel/events/core.c b/kernel/events/core.c index 5056abffef27..9eb23dc27462 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -6741,7 +6741,6 @@ void perf_swevent_put_recursion_context(int rctx) put_recursion_context(swhash->recursion, rctx); } -EXPORT_SYMBOL_GPL(perf_swevent_put_recursion_context); void ___perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) { @@ -6998,6 +6997,25 @@ static int perf_tp_event_match(struct perf_event *event, return 1; } +void perf_trace_run_bpf_submit(void *raw_data, int size, int rctx, + struct trace_event_call *call, u64 count, + struct pt_regs *regs, struct hlist_head *head, + struct task_struct *task) +{ + struct bpf_prog *prog = call->prog; + + if (prog) { + *(struct pt_regs **)raw_data = regs; + if (!trace_call_bpf(prog, raw_data) || hlist_empty(head)) { + perf_swevent_put_recursion_context(rctx); + return; + } + } + perf_tp_event(call->event.type, count, raw_data, size, regs, head, + rctx, task); +} +EXPORT_SYMBOL_GPL(perf_trace_run_bpf_submit); + void perf_tp_event(u16 event_type, u64 count, void *record, int entry_size, struct pt_regs *regs, struct hlist_head *head, int rctx, struct task_struct *task) From b7de529c793c9131e58ddca37d49fd26866aa867 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Tue, 19 Apr 2016 15:10:01 +0800 Subject: [PATCH 0882/1649] net: use jiffies_to_msecs to replace EXPIRES_IN_MS in inet/sctp_diag EXPIRES_IN_MS macro comes from net/ipv4/inet_diag.c and dates back to before jiffies_to_msecs() has been introduced. Now we can remove it and use jiffies_to_msecs(). Suggested-by: Jakub Sitnicki Signed-off-by: Xin Long Acked-by: Jakub Sitnicki Acked-by: Marcelo Ricardo Leitner Acked-by: Eric Dumazet Signed-off-by: David S. Miller --- net/ipv4/inet_diag.c | 12 ++++++------ net/sctp/sctp_diag.c | 6 ++---- 2 files changed, 8 insertions(+), 10 deletions(-) diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c index 70212bddf0f8..ad7956fa659a 100644 --- a/net/ipv4/inet_diag.c +++ b/net/ipv4/inet_diag.c @@ -197,27 +197,27 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk, goto out; } -#define EXPIRES_IN_MS(tmo) DIV_ROUND_UP((tmo - jiffies) * 1000, HZ) - if (icsk->icsk_pending == ICSK_TIME_RETRANS || icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS || icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) { r->idiag_timer = 1; r->idiag_retrans = icsk->icsk_retransmits; - r->idiag_expires = EXPIRES_IN_MS(icsk->icsk_timeout); + r->idiag_expires = + jiffies_to_msecs(icsk->icsk_timeout - jiffies); } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) { r->idiag_timer = 4; r->idiag_retrans = icsk->icsk_probes_out; - r->idiag_expires = EXPIRES_IN_MS(icsk->icsk_timeout); + r->idiag_expires = + jiffies_to_msecs(icsk->icsk_timeout - jiffies); } else if (timer_pending(&sk->sk_timer)) { r->idiag_timer = 2; r->idiag_retrans = icsk->icsk_probes_out; - r->idiag_expires = EXPIRES_IN_MS(sk->sk_timer.expires); + r->idiag_expires = + jiffies_to_msecs(sk->sk_timer.expires - jiffies); } else { r->idiag_timer = 0; r->idiag_expires = 0; } -#undef EXPIRES_IN_MS if ((ext & (1 << (INET_DIAG_INFO - 1))) && handler->idiag_info_size) { attr = nla_reserve(skb, INET_DIAG_INFO, diff --git a/net/sctp/sctp_diag.c b/net/sctp/sctp_diag.c index 98ecd16da0c9..bb2d8d9608e9 100644 --- a/net/sctp/sctp_diag.c +++ b/net/sctp/sctp_diag.c @@ -48,10 +48,8 @@ static void inet_diag_msg_sctpasoc_fill(struct inet_diag_msg *r, r->idiag_state = asoc->state; r->idiag_timer = SCTP_EVENT_TIMEOUT_T3_RTX; r->idiag_retrans = asoc->rtx_data_chunks; -#define EXPIRES_IN_MS(tmo) DIV_ROUND_UP((tmo - jiffies) * 1000, HZ) - r->idiag_expires = - EXPIRES_IN_MS(asoc->timeouts[SCTP_EVENT_TIMEOUT_T3_RTX]); -#undef EXPIRES_IN_MS + r->idiag_expires = jiffies_to_msecs( + asoc->timeouts[SCTP_EVENT_TIMEOUT_T3_RTX] - jiffies); } static int inet_diag_msg_sctpladdrs_fill(struct sk_buff *skb, From f937572925d8d7beb5aca1cf180e8b9af623a903 Mon Sep 17 00:00:00 2001 From: Peter Heise Date: Tue, 19 Apr 2016 13:34:28 +0200 Subject: [PATCH 0883/1649] NLA_BINARY misuse bug in HSR Removed .type field from NLA to do proper length checking. Reported by Daniel Borkmann and Julia Lawall. Signed-off-by: Peter Heise Signed-off-by: David S. Miller --- net/hsr/hsr_netlink.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/net/hsr/hsr_netlink.c b/net/hsr/hsr_netlink.c index 5425d87611fc..d4d1617f43a8 100644 --- a/net/hsr/hsr_netlink.c +++ b/net/hsr/hsr_netlink.c @@ -24,7 +24,7 @@ static const struct nla_policy hsr_policy[IFLA_HSR_MAX + 1] = { [IFLA_HSR_SLAVE2] = { .type = NLA_U32 }, [IFLA_HSR_MULTICAST_SPEC] = { .type = NLA_U8 }, [IFLA_HSR_VERSION] = { .type = NLA_U8 }, - [IFLA_HSR_SUPERVISION_ADDR] = { .type = NLA_BINARY, .len = ETH_ALEN }, + [IFLA_HSR_SUPERVISION_ADDR] = { .len = ETH_ALEN }, [IFLA_HSR_SEQ_NR] = { .type = NLA_U16 }, }; @@ -121,10 +121,9 @@ static struct rtnl_link_ops hsr_link_ops __read_mostly = { /* attribute policy */ -/* NLA_BINARY missing in libnl; use NLA_UNSPEC in userspace instead. */ static const struct nla_policy hsr_genl_policy[HSR_A_MAX + 1] = { - [HSR_A_NODE_ADDR] = { .type = NLA_BINARY, .len = ETH_ALEN }, - [HSR_A_NODE_ADDR_B] = { .type = NLA_BINARY, .len = ETH_ALEN }, + [HSR_A_NODE_ADDR] = { .len = ETH_ALEN }, + [HSR_A_NODE_ADDR_B] = { .len = ETH_ALEN }, [HSR_A_IFINDEX] = { .type = NLA_U32 }, [HSR_A_IF1_AGE] = { .type = NLA_U32 }, [HSR_A_IF2_AGE] = { .type = NLA_U32 }, From 1ba64facae5739d91884f8f34f25fef1cb66d930 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Tue, 19 Apr 2016 17:30:56 +0300 Subject: [PATCH 0884/1649] geneve: testing the wrong variable in geneve6_build_skb() We intended to test "err" and not "skb". Fixes: aed069df099c ('ip_tunnel_core: iptunnel_handle_offloads returns int and doesn't free skb') Signed-off-by: Dan Carpenter Acked-by: Alexander Duyck Signed-off-by: David S. Miller --- drivers/net/geneve.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index efbc7ceedc3a..512dbe013713 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -733,7 +733,7 @@ static int geneve6_build_skb(struct dst_entry *dst, struct sk_buff *skb, goto free_dst; err = udp_tunnel_handle_offloads(skb, udp_sum); - if (IS_ERR(skb)) + if (err) goto free_dst; gnvh = (struct genevehdr *)__skb_push(skb, sizeof(*gnvh) + opt_len); From b1c20f0b97b4e565fa50cde1e6b44c2fd327a1e0 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Tue, 19 Apr 2016 14:02:19 -0400 Subject: [PATCH 0885/1649] netdev_features: Fold NETIF_F_ALL_TSO into NETIF_F_GSO_SOFTWARE This patch folds NETIF_F_ALL_TSO into the bitmask for NETIF_F_GSO_SOFTWARE. The idea is to avoid duplication of defines since the only difference between the two was the GSO_UDP bit. Signed-off-by: Alexander Duyck Signed-off-by: David S. Miller --- include/linux/netdev_features.h | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h index 15eb0b12fff9..bc8736266749 100644 --- a/include/linux/netdev_features.h +++ b/include/linux/netdev_features.h @@ -152,11 +152,6 @@ enum { #define NETIF_F_GSO_MASK (__NETIF_F_BIT(NETIF_F_GSO_LAST + 1) - \ __NETIF_F_BIT(NETIF_F_GSO_SHIFT)) -/* List of features with software fallbacks. */ -#define NETIF_F_GSO_SOFTWARE (NETIF_F_TSO | NETIF_F_TSO_ECN | \ - NETIF_F_TSO_MANGLEID | \ - NETIF_F_TSO6 | NETIF_F_UFO) - /* List of IP checksum features. Note that NETIF_F_ HW_CSUM should not be * set in features when NETIF_F_IP_CSUM or NETIF_F_IPV6_CSUM are set-- * this would be contradictory @@ -170,6 +165,9 @@ enum { #define NETIF_F_ALL_FCOE (NETIF_F_FCOE_CRC | NETIF_F_FCOE_MTU | \ NETIF_F_FSO) +/* List of features with software fallbacks. */ +#define NETIF_F_GSO_SOFTWARE (NETIF_F_ALL_TSO | NETIF_F_UFO) + /* * If one device supports one of these features, then enable them * for all in netdev_increment_features. From 732912d727cd6deb3c1a05a8baa74b8ce8d510ac Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Tue, 19 Apr 2016 14:02:26 -0400 Subject: [PATCH 0886/1649] veth: Update features to include all tunnel GSO types This patch adds support for the checksum enabled versions of UDP and GRE tunnels. With this change we should be able to send and receive GSO frames of these types over the veth pair without needing to segment the packets. Signed-off-by: Alexander Duyck Signed-off-by: David S. Miller --- drivers/net/veth.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/drivers/net/veth.c b/drivers/net/veth.c index 4f30a6ae50d0..f37a6e61d4ad 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -312,10 +312,9 @@ static const struct net_device_ops veth_netdev_ops = { .ndo_set_rx_headroom = veth_set_rx_headroom, }; -#define VETH_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \ - NETIF_F_HW_CSUM | NETIF_F_RXCSUM | NETIF_F_HIGHDMA | \ - NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL | \ - NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT | NETIF_F_UFO | \ +#define VETH_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HW_CSUM | \ + NETIF_F_RXCSUM | NETIF_F_HIGHDMA | \ + NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ENCAP_ALL | \ NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | \ NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_STAG_RX ) From 089bf1a6a924b97a7e9f920bae6253a8ad581cf3 Mon Sep 17 00:00:00 2001 From: Nicolas Dichtel Date: Thu, 21 Apr 2016 18:58:24 +0200 Subject: [PATCH 0887/1649] libnl: add more helpers to align attributes on 64-bit Signed-off-by: Nicolas Dichtel Signed-off-by: David S. Miller --- include/net/netlink.h | 39 +++++++++++++---- lib/nlattr.c | 99 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 130 insertions(+), 8 deletions(-) diff --git a/include/net/netlink.h b/include/net/netlink.h index 3c1fd92a52c8..6f51a8a06498 100644 --- a/include/net/netlink.h +++ b/include/net/netlink.h @@ -244,13 +244,21 @@ int nla_memcpy(void *dest, const struct nlattr *src, int count); int nla_memcmp(const struct nlattr *nla, const void *data, size_t size); int nla_strcmp(const struct nlattr *nla, const char *str); struct nlattr *__nla_reserve(struct sk_buff *skb, int attrtype, int attrlen); +struct nlattr *__nla_reserve_64bit(struct sk_buff *skb, int attrtype, + int attrlen, int padattr); void *__nla_reserve_nohdr(struct sk_buff *skb, int attrlen); struct nlattr *nla_reserve(struct sk_buff *skb, int attrtype, int attrlen); +struct nlattr *nla_reserve_64bit(struct sk_buff *skb, int attrtype, + int attrlen, int padattr); void *nla_reserve_nohdr(struct sk_buff *skb, int attrlen); void __nla_put(struct sk_buff *skb, int attrtype, int attrlen, const void *data); +void __nla_put_64bit(struct sk_buff *skb, int attrtype, int attrlen, + const void *data, int padattr); void __nla_put_nohdr(struct sk_buff *skb, int attrlen, const void *data); int nla_put(struct sk_buff *skb, int attrtype, int attrlen, const void *data); +int nla_put_64bit(struct sk_buff *skb, int attrtype, int attrlen, + const void *data, int padattr); int nla_put_nohdr(struct sk_buff *skb, int attrlen, const void *data); int nla_append(struct sk_buff *skb, int attrlen, const void *data); @@ -1230,6 +1238,27 @@ static inline int nla_validate_nested(const struct nlattr *start, int maxtype, return nla_validate(nla_data(start), nla_len(start), maxtype, policy); } +/** + * nla_need_padding_for_64bit - test 64-bit alignment of the next attribute + * @skb: socket buffer the message is stored in + * + * Return true if padding is needed to align the next attribute (nla_data()) to + * a 64-bit aligned area. + */ +static inline bool nla_need_padding_for_64bit(struct sk_buff *skb) +{ +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS + /* The nlattr header is 4 bytes in size, that's why we test + * if the skb->data _is_ aligned. A NOP attribute, plus + * nlattr header for next attribute, will make nla_data() + * 8-byte aligned. + */ + if (IS_ALIGNED((unsigned long)skb_tail_pointer(skb), 8)) + return true; +#endif + return false; +} + /** * nla_align_64bit - 64-bit align the nla_data() of next attribute * @skb: socket buffer the message is stored in @@ -1244,16 +1273,10 @@ static inline int nla_validate_nested(const struct nlattr *start, int maxtype, */ static inline int nla_align_64bit(struct sk_buff *skb, int padattr) { -#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS - /* The nlattr header is 4 bytes in size, that's why we test - * if the skb->data _is_ aligned. This NOP attribute, plus - * nlattr header for next attribute, will make nla_data() - * 8-byte aligned. - */ - if (IS_ALIGNED((unsigned long)skb_tail_pointer(skb), 8) && + if (nla_need_padding_for_64bit(skb) && !nla_reserve(skb, padattr, 0)) return -EMSGSIZE; -#endif + return 0; } diff --git a/lib/nlattr.c b/lib/nlattr.c index f5907d23272d..2b82f1e2ebc2 100644 --- a/lib/nlattr.c +++ b/lib/nlattr.c @@ -354,6 +354,29 @@ struct nlattr *__nla_reserve(struct sk_buff *skb, int attrtype, int attrlen) } EXPORT_SYMBOL(__nla_reserve); +/** + * __nla_reserve_64bit - reserve room for attribute on the skb and align it + * @skb: socket buffer to reserve room on + * @attrtype: attribute type + * @attrlen: length of attribute payload + * + * Adds a netlink attribute header to a socket buffer and reserves + * room for the payload but does not copy it. It also ensure that this + * attribute will be 64-bit aign. + * + * The caller is responsible to ensure that the skb provides enough + * tailroom for the attribute header and payload. + */ +struct nlattr *__nla_reserve_64bit(struct sk_buff *skb, int attrtype, + int attrlen, int padattr) +{ + if (nla_need_padding_for_64bit(skb)) + nla_align_64bit(skb, padattr); + + return __nla_reserve(skb, attrtype, attrlen); +} +EXPORT_SYMBOL(__nla_reserve_64bit); + /** * __nla_reserve_nohdr - reserve room for attribute without header * @skb: socket buffer to reserve room on @@ -396,6 +419,35 @@ struct nlattr *nla_reserve(struct sk_buff *skb, int attrtype, int attrlen) } EXPORT_SYMBOL(nla_reserve); +/** + * nla_reserve_64bit - reserve room for attribute on the skb and align it + * @skb: socket buffer to reserve room on + * @attrtype: attribute type + * @attrlen: length of attribute payload + * + * Adds a netlink attribute header to a socket buffer and reserves + * room for the payload but does not copy it. It also ensure that this + * attribute will be 64-bit aign. + * + * Returns NULL if the tailroom of the skb is insufficient to store + * the attribute header and payload. + */ +struct nlattr *nla_reserve_64bit(struct sk_buff *skb, int attrtype, int attrlen, + int padattr) +{ + size_t len; + + if (nla_need_padding_for_64bit(skb)) + len = nla_total_size_64bit(attrlen); + else + len = nla_total_size(attrlen); + if (unlikely(skb_tailroom(skb) < len)) + return NULL; + + return __nla_reserve_64bit(skb, attrtype, attrlen, padattr); +} +EXPORT_SYMBOL(nla_reserve_64bit); + /** * nla_reserve_nohdr - reserve room for attribute without header * @skb: socket buffer to reserve room on @@ -435,6 +487,26 @@ void __nla_put(struct sk_buff *skb, int attrtype, int attrlen, } EXPORT_SYMBOL(__nla_put); +/** + * __nla_put_64bit - Add a netlink attribute to a socket buffer and align it + * @skb: socket buffer to add attribute to + * @attrtype: attribute type + * @attrlen: length of attribute payload + * @data: head of attribute payload + * + * The caller is responsible to ensure that the skb provides enough + * tailroom for the attribute header and payload. + */ +void __nla_put_64bit(struct sk_buff *skb, int attrtype, int attrlen, + const void *data, int padattr) +{ + struct nlattr *nla; + + nla = __nla_reserve_64bit(skb, attrtype, attrlen, padattr); + memcpy(nla_data(nla), data, attrlen); +} +EXPORT_SYMBOL(__nla_put_64bit); + /** * __nla_put_nohdr - Add a netlink attribute without header * @skb: socket buffer to add attribute to @@ -473,6 +545,33 @@ int nla_put(struct sk_buff *skb, int attrtype, int attrlen, const void *data) } EXPORT_SYMBOL(nla_put); +/** + * nla_put_64bit - Add a netlink attribute to a socket buffer and align it + * @skb: socket buffer to add attribute to + * @attrtype: attribute type + * @attrlen: length of attribute payload + * @data: head of attribute payload + * + * Returns -EMSGSIZE if the tailroom of the skb is insufficient to store + * the attribute header and payload. + */ +int nla_put_64bit(struct sk_buff *skb, int attrtype, int attrlen, + const void *data, int padattr) +{ + size_t len; + + if (nla_need_padding_for_64bit(skb)) + len = nla_total_size_64bit(attrlen); + else + len = nla_total_size(attrlen); + if (unlikely(skb_tailroom(skb) < len)) + return -EMSGSIZE; + + __nla_put_64bit(skb, attrtype, attrlen, data, padattr); + return 0; +} +EXPORT_SYMBOL(nla_put_64bit); + /** * nla_put_nohdr - Add a netlink attribute without header * @skb: socket buffer to add attribute to From 58414d32a37e4c2f79da91aebc2d2365918a1562 Mon Sep 17 00:00:00 2001 From: Nicolas Dichtel Date: Thu, 21 Apr 2016 18:58:25 +0200 Subject: [PATCH 0888/1649] rtnl: use the new API to align IFLA_STATS* Signed-off-by: Nicolas Dichtel Signed-off-by: David S. Miller --- net/core/rtnetlink.c | 22 +++++----------------- 1 file changed, 5 insertions(+), 17 deletions(-) diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 4a47a9aceb1d..5ec059d52823 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -1051,14 +1051,9 @@ static noinline_for_stack int rtnl_fill_stats(struct sk_buff *skb, { struct rtnl_link_stats64 *sp; struct nlattr *attr; - int err; - err = nla_align_64bit(skb, IFLA_PAD); - if (err) - return err; - - attr = nla_reserve(skb, IFLA_STATS64, - sizeof(struct rtnl_link_stats64)); + attr = nla_reserve_64bit(skb, IFLA_STATS64, + sizeof(struct rtnl_link_stats64), IFLA_PAD); if (!attr) return -EMSGSIZE; @@ -3469,17 +3464,10 @@ static int rtnl_fill_statsinfo(struct sk_buff *skb, struct net_device *dev, if (filter_mask & IFLA_STATS_FILTER_BIT(IFLA_STATS_LINK_64)) { struct rtnl_link_stats64 *sp; - int err; - /* if necessary, add a zero length NOP attribute so that - * IFLA_STATS_LINK_64 will be 64-bit aligned - */ - err = nla_align_64bit(skb, IFLA_STATS_UNSPEC); - if (err) - goto nla_put_failure; - - attr = nla_reserve(skb, IFLA_STATS_LINK_64, - sizeof(struct rtnl_link_stats64)); + attr = nla_reserve_64bit(skb, IFLA_STATS_LINK_64, + sizeof(struct rtnl_link_stats64), + IFLA_STATS_UNSPEC); if (!attr) goto nla_put_failure; From a9a080422ef7b0c7e69925e4a1474ad93f0f0117 Mon Sep 17 00:00:00 2001 From: Nicolas Dichtel Date: Thu, 21 Apr 2016 18:58:26 +0200 Subject: [PATCH 0889/1649] ipmr: align RTA_MFC_STATS on 64-bit Signed-off-by: Nicolas Dichtel Signed-off-by: David S. Miller --- include/uapi/linux/rtnetlink.h | 1 + net/ipv4/ipmr.c | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h index cc885c4e9065..a94e0b69c769 100644 --- a/include/uapi/linux/rtnetlink.h +++ b/include/uapi/linux/rtnetlink.h @@ -317,6 +317,7 @@ enum rtattr_type_t { RTA_ENCAP_TYPE, RTA_ENCAP, RTA_EXPIRES, + RTA_PAD, __RTA_MAX }; diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 395e2814a46d..21a38e296fe2 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -2104,7 +2104,7 @@ static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, mfcs.mfcs_packets = c->mfc_un.res.pkt; mfcs.mfcs_bytes = c->mfc_un.res.bytes; mfcs.mfcs_wrong_if = c->mfc_un.res.wrong_if; - if (nla_put(skb, RTA_MFC_STATS, sizeof(mfcs), &mfcs) < 0) + if (nla_put_64bit(skb, RTA_MFC_STATS, sizeof(mfcs), &mfcs, RTA_PAD) < 0) return -EMSGSIZE; rtm->rtm_type = RTN_MULTICAST; @@ -2237,7 +2237,7 @@ static size_t mroute_msgsize(bool unresolved, int maxvif) + nla_total_size(0) /* RTA_MULTIPATH */ + maxvif * NLA_ALIGN(sizeof(struct rtnexthop)) /* RTA_MFC_STATS */ - + nla_total_size(sizeof(struct rta_mfc_stats)) + + nla_total_size_64bit(sizeof(struct rta_mfc_stats)) ; return len; From 3d6b66c1d1a8d348928996ca333730f258fbb838 Mon Sep 17 00:00:00 2001 From: Nicolas Dichtel Date: Thu, 21 Apr 2016 18:58:27 +0200 Subject: [PATCH 0890/1649] ip6mr: align RTA_MFC_STATS on 64-bit Signed-off-by: Nicolas Dichtel Signed-off-by: David S. Miller --- net/ipv6/ip6mr.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index a10e77103c88..bf678324fd52 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c @@ -2268,7 +2268,7 @@ static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb, mfcs.mfcs_packets = c->mfc_un.res.pkt; mfcs.mfcs_bytes = c->mfc_un.res.bytes; mfcs.mfcs_wrong_if = c->mfc_un.res.wrong_if; - if (nla_put(skb, RTA_MFC_STATS, sizeof(mfcs), &mfcs) < 0) + if (nla_put_64bit(skb, RTA_MFC_STATS, sizeof(mfcs), &mfcs, RTA_PAD) < 0) return -EMSGSIZE; rtm->rtm_type = RTN_MULTICAST; @@ -2411,7 +2411,7 @@ static int mr6_msgsize(bool unresolved, int maxvif) + nla_total_size(0) /* RTA_MULTIPATH */ + maxvif * NLA_ALIGN(sizeof(struct rtnexthop)) /* RTA_MFC_STATS */ - + nla_total_size(sizeof(struct rta_mfc_stats)) + + nla_total_size_64bit(sizeof(struct rta_mfc_stats)) ; return len; From ba90950c94713f294e8778e891eef4143183957c Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Wed, 20 Apr 2016 11:37:08 -0700 Subject: [PATCH 0891/1649] net: bcmsysport: use __napi_schedule_irqoff() Both bcm_sysport_tx_isr() and bcm_sysport_rx_isr() run in hard irq context, we do not need to block irq again. Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bcmsysport.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 993c780bdfab..9e3ec739d860 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -873,7 +873,7 @@ static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id) if (likely(napi_schedule_prep(&priv->napi))) { /* disable RX interrupts */ intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE); - __napi_schedule(&priv->napi); + __napi_schedule_irqoff(&priv->napi); } } @@ -916,7 +916,7 @@ static irqreturn_t bcm_sysport_tx_isr(int irq, void *dev_id) if (likely(napi_schedule_prep(&txr->napi))) { intrl2_1_mask_set(priv, BIT(ring)); - __napi_schedule(&txr->napi); + __napi_schedule_irqoff(&txr->napi); } } From c82f47efa021697722b1cfc84e48f72d45c9f5b2 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Wed, 20 Apr 2016 11:37:09 -0700 Subject: [PATCH 0892/1649] net: bcmsysport: use napi_complete_done() By using napi_complete_done(), we allow fine tuning of /sys/class/net/ethX/gro_flush_timeout for higher GRO aggregation efficiency for a Gbit NIC. Check commit 24d2e4a50737 ("tg3: use napi_complete_done()") for details. Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bcmsysport.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 9e3ec739d860..30b0c2895a56 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -831,7 +831,7 @@ static int bcm_sysport_poll(struct napi_struct *napi, int budget) rdma_writel(priv, priv->rx_c_index, RDMA_CONS_INDEX); if (work_done < budget) { - napi_complete(napi); + napi_complete_done(napi, work_done); /* re-enable RX interrupts */ intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE); } From 237cd218099ce96edf2890a49aa191b38b84c2fc Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Wed, 20 Apr 2016 22:02:09 +0300 Subject: [PATCH 0893/1649] net/mlx5: Introduce device queue counters A queue counter can collect several statistics for one or more hardware queues (QPs, RQs, etc ..) that the counter is attached to. For Ethernet it will provide an "out of buffer" counter which collects the number of all packets that are dropped due to lack of software buffers. Here we add device commands to alloc/query/dealloc queue counters. Signed-off-by: Tariq Toukan Signed-off-by: Rana Shahout Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/qp.c | 68 ++++++++++++++++++++ include/linux/mlx5/qp.h | 6 ++ 2 files changed, 74 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c index def289375ecb..b720a274220d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c @@ -538,3 +538,71 @@ void mlx5_core_destroy_sq_tracked(struct mlx5_core_dev *dev, mlx5_core_destroy_sq(dev, sq->qpn); } EXPORT_SYMBOL(mlx5_core_destroy_sq_tracked); + +int mlx5_core_alloc_q_counter(struct mlx5_core_dev *dev, u16 *counter_id) +{ + u32 in[MLX5_ST_SZ_DW(alloc_q_counter_in)]; + u32 out[MLX5_ST_SZ_DW(alloc_q_counter_out)]; + int err; + + memset(in, 0, sizeof(in)); + memset(out, 0, sizeof(out)); + + MLX5_SET(alloc_q_counter_in, in, opcode, MLX5_CMD_OP_ALLOC_Q_COUNTER); + err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out)); + if (!err) + *counter_id = MLX5_GET(alloc_q_counter_out, out, + counter_set_id); + return err; +} +EXPORT_SYMBOL_GPL(mlx5_core_alloc_q_counter); + +int mlx5_core_dealloc_q_counter(struct mlx5_core_dev *dev, u16 counter_id) +{ + u32 in[MLX5_ST_SZ_DW(dealloc_q_counter_in)]; + u32 out[MLX5_ST_SZ_DW(dealloc_q_counter_out)]; + + memset(in, 0, sizeof(in)); + memset(out, 0, sizeof(out)); + + MLX5_SET(dealloc_q_counter_in, in, opcode, + MLX5_CMD_OP_DEALLOC_Q_COUNTER); + MLX5_SET(dealloc_q_counter_in, in, counter_set_id, counter_id); + return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, + sizeof(out)); +} +EXPORT_SYMBOL_GPL(mlx5_core_dealloc_q_counter); + +int mlx5_core_query_q_counter(struct mlx5_core_dev *dev, u16 counter_id, + int reset, void *out, int out_size) +{ + u32 in[MLX5_ST_SZ_DW(query_q_counter_in)]; + + memset(in, 0, sizeof(in)); + + MLX5_SET(query_q_counter_in, in, opcode, MLX5_CMD_OP_QUERY_Q_COUNTER); + MLX5_SET(query_q_counter_in, in, clear, reset); + MLX5_SET(query_q_counter_in, in, counter_set_id, counter_id); + return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, out_size); +} +EXPORT_SYMBOL_GPL(mlx5_core_query_q_counter); + +int mlx5_core_query_out_of_buffer(struct mlx5_core_dev *dev, u16 counter_id, + u32 *out_of_buffer) +{ + int outlen = MLX5_ST_SZ_BYTES(query_q_counter_out); + void *out; + int err; + + out = mlx5_vzalloc(outlen); + if (!out) + return -ENOMEM; + + err = mlx5_core_query_q_counter(dev, counter_id, 0, out, outlen); + if (!err) + *out_of_buffer = MLX5_GET(query_q_counter_out, out, + out_of_buffer); + + kfree(out); + return err; +} diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h index cf031a3f16c5..64221027bf1f 100644 --- a/include/linux/mlx5/qp.h +++ b/include/linux/mlx5/qp.h @@ -668,6 +668,12 @@ int mlx5_core_create_sq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen, struct mlx5_core_qp *sq); void mlx5_core_destroy_sq_tracked(struct mlx5_core_dev *dev, struct mlx5_core_qp *sq); +int mlx5_core_alloc_q_counter(struct mlx5_core_dev *dev, u16 *counter_id); +int mlx5_core_dealloc_q_counter(struct mlx5_core_dev *dev, u16 counter_id); +int mlx5_core_query_q_counter(struct mlx5_core_dev *dev, u16 counter_id, + int reset, void *out, int out_size); +int mlx5_core_query_out_of_buffer(struct mlx5_core_dev *dev, u16 counter_id, + u32 *out_of_buffer); static inline const char *mlx5_qp_type_str(int type) { From 593cf33829adfd3d5c75d42879cc42afded1b626 Mon Sep 17 00:00:00 2001 From: Rana Shahout Date: Wed, 20 Apr 2016 22:02:10 +0300 Subject: [PATCH 0894/1649] net/mlx5e: Allocate set of queue counters per netdev Connect all netdev RQs to this set of queue counters. Also, add an "rx_out_of_buffer" counter to ethtool, which indicates RX packet drops due to lack of receive buffers. Signed-off-by: Rana Shahout Signed-off-by: Tariq Toukan Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 11 +++++ .../ethernet/mellanox/mlx5/core/en_ethtool.c | 11 +++++ .../net/ethernet/mellanox/mlx5/core/en_main.c | 42 ++++++++++++++++++- 3 files changed, 62 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 879e6276c473..c4ddbe8501a7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -236,6 +236,15 @@ struct mlx5e_pport_stats { __be64 RFC_2819_counters[NUM_RFC_2819_COUNTERS]; }; +static const char qcounter_stats_strings[][ETH_GSTRING_LEN] = { + "rx_out_of_buffer", +}; + +struct mlx5e_qcounter_stats { + u32 rx_out_of_buffer; +#define NUM_Q_COUNTERS 1 +}; + static const char rq_stats_strings[][ETH_GSTRING_LEN] = { "packets", "bytes", @@ -293,6 +302,7 @@ struct mlx5e_sq_stats { struct mlx5e_stats { struct mlx5e_vport_stats vport; struct mlx5e_pport_stats pport; + struct mlx5e_qcounter_stats qcnt; }; struct mlx5e_params { @@ -575,6 +585,7 @@ struct mlx5e_priv { struct net_device *netdev; struct mlx5e_stats stats; struct mlx5e_tstamp tstamp; + u16 q_counter; }; #define MLX5E_NET_IP_ALIGN 2 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 68834b715f6c..39c19021d154 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -165,6 +165,8 @@ static const struct { }, }; +#define MLX5E_NUM_Q_CNTRS(priv) (NUM_Q_COUNTERS * (!!priv->q_counter)) + static int mlx5e_get_sset_count(struct net_device *dev, int sset) { struct mlx5e_priv *priv = netdev_priv(dev); @@ -172,6 +174,7 @@ static int mlx5e_get_sset_count(struct net_device *dev, int sset) switch (sset) { case ETH_SS_STATS: return NUM_VPORT_COUNTERS + NUM_PPORT_COUNTERS + + MLX5E_NUM_Q_CNTRS(priv) + priv->params.num_channels * NUM_RQ_STATS + priv->params.num_channels * priv->params.num_tc * NUM_SQ_STATS; @@ -200,6 +203,11 @@ static void mlx5e_get_strings(struct net_device *dev, strcpy(data + (idx++) * ETH_GSTRING_LEN, vport_strings[i]); + /* Q counters */ + for (i = 0; i < MLX5E_NUM_Q_CNTRS(priv); i++) + strcpy(data + (idx++) * ETH_GSTRING_LEN, + qcounter_stats_strings[i]); + /* PPORT counters */ for (i = 0; i < NUM_PPORT_COUNTERS; i++) strcpy(data + (idx++) * ETH_GSTRING_LEN, @@ -240,6 +248,9 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev, for (i = 0; i < NUM_VPORT_COUNTERS; i++) data[idx++] = ((u64 *)&priv->stats.vport)[i]; + for (i = 0; i < MLX5E_NUM_Q_CNTRS(priv); i++) + data[idx++] = ((u32 *)&priv->stats.qcnt)[i]; + for (i = 0; i < NUM_PPORT_COUNTERS; i++) data[idx++] = be64_to_cpu(((__be64 *)&priv->stats.pport)[i]); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index e0adb604f461..7fbe1ba86294 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -129,6 +129,17 @@ free_out: kvfree(out); } +static void mlx5e_update_q_counter(struct mlx5e_priv *priv) +{ + struct mlx5e_qcounter_stats *qcnt = &priv->stats.qcnt; + + if (!priv->q_counter) + return; + + mlx5_core_query_out_of_buffer(priv->mdev, priv->q_counter, + &qcnt->rx_out_of_buffer); +} + void mlx5e_update_stats(struct mlx5e_priv *priv) { struct mlx5_core_dev *mdev = priv->mdev; @@ -250,6 +261,8 @@ void mlx5e_update_stats(struct mlx5e_priv *priv) s->rx_csum_sw; mlx5e_update_pport_counters(priv); + mlx5e_update_q_counter(priv); + free_out: kvfree(out); } @@ -1055,6 +1068,7 @@ static void mlx5e_build_rq_param(struct mlx5e_priv *priv, MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe))); MLX5_SET(wq, wq, log_wq_sz, priv->params.log_rq_size); MLX5_SET(wq, wq, pd, priv->pdn); + MLX5_SET(rqc, rqc, counter_set_id, priv->q_counter); param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev); param->wq.linear = 1; @@ -2442,6 +2456,26 @@ static int mlx5e_create_mkey(struct mlx5e_priv *priv, u32 pdn, return err; } +static void mlx5e_create_q_counter(struct mlx5e_priv *priv) +{ + struct mlx5_core_dev *mdev = priv->mdev; + int err; + + err = mlx5_core_alloc_q_counter(mdev, &priv->q_counter); + if (err) { + mlx5_core_warn(mdev, "alloc queue counter failed, %d\n", err); + priv->q_counter = 0; + } +} + +static void mlx5e_destroy_q_counter(struct mlx5e_priv *priv) +{ + if (!priv->q_counter) + return; + + mlx5_core_dealloc_q_counter(priv->mdev, priv->q_counter); +} + static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev) { struct net_device *netdev; @@ -2527,13 +2561,15 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev) goto err_destroy_tirs; } + mlx5e_create_q_counter(priv); + mlx5e_init_eth_addr(priv); mlx5e_vxlan_init(priv); err = mlx5e_tc_init(priv); if (err) - goto err_destroy_flow_tables; + goto err_dealloc_q_counters; #ifdef CONFIG_MLX5_CORE_EN_DCB mlx5e_dcbnl_ieee_setets_core(priv, &priv->params.ets); @@ -2556,7 +2592,8 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev) err_tc_cleanup: mlx5e_tc_cleanup(priv); -err_destroy_flow_tables: +err_dealloc_q_counters: + mlx5e_destroy_q_counter(priv); mlx5e_destroy_flow_tables(priv); err_destroy_tirs: @@ -2605,6 +2642,7 @@ static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv) unregister_netdev(netdev); mlx5e_tc_cleanup(priv); mlx5e_vxlan_cleanup(priv); + mlx5e_destroy_q_counter(priv); mlx5e_destroy_flow_tables(priv); mlx5e_destroy_tirs(priv); mlx5e_destroy_rqt(priv, MLX5E_SINGLE_RQ_RQT); From d8c9660dac6287490ef450bc892593f05d364531 Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Wed, 20 Apr 2016 22:02:11 +0300 Subject: [PATCH 0895/1649] net/mlx5e: Use only close NUMA node for default RSS Distribute default RSS table uniformly over the rings of the close NUMA node, instead of all available channels. This way we enforce the preference of close rings over far ones. Signed-off-by: Tariq Toukan Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 3 ++- .../net/ethernet/mellanox/mlx5/core/en_ethtool.c | 2 +- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 15 +++++++++++++-- 3 files changed, 16 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index c4ddbe8501a7..7f19644689f2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -671,7 +671,8 @@ void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv); int mlx5e_open_locked(struct net_device *netdev); int mlx5e_close_locked(struct net_device *netdev); -void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len, +void mlx5e_build_default_indir_rqt(struct mlx5_core_dev *mdev, + u32 *indirection_rqt, int len, int num_channels); static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 39c19021d154..6f40ba448f07 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -397,7 +397,7 @@ static int mlx5e_set_channels(struct net_device *dev, mlx5e_close_locked(dev); priv->params.num_channels = count; - mlx5e_build_default_indir_rqt(priv->params.indirection_rqt, + mlx5e_build_default_indir_rqt(priv->mdev, priv->params.indirection_rqt, MLX5E_INDIR_RQT_SIZE, count); if (was_opened) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 7fbe1ba86294..9b58ef6cab93 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -2297,11 +2297,22 @@ static void mlx5e_ets_init(struct mlx5e_priv *priv) } #endif -void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len, +void mlx5e_build_default_indir_rqt(struct mlx5_core_dev *mdev, + u32 *indirection_rqt, int len, int num_channels) { + int node = mdev->priv.numa_node; + int node_num_of_cores; int i; + if (node == -1) + node = first_online_node; + + node_num_of_cores = cpumask_weight(cpumask_of_node(node)); + + if (node_num_of_cores) + num_channels = min_t(int, num_channels, node_num_of_cores); + for (i = 0; i < len; i++) indirection_rqt[i] = i % num_channels; } @@ -2333,7 +2344,7 @@ static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev, netdev_rss_key_fill(priv->params.toeplitz_hash_key, sizeof(priv->params.toeplitz_hash_key)); - mlx5e_build_default_indir_rqt(priv->params.indirection_rqt, + mlx5e_build_default_indir_rqt(mdev, priv->params.indirection_rqt, MLX5E_INDIR_RQT_SIZE, num_channels); priv->params.lro_wqe_sz = From 2f48af128d9aa64dd4e8c6fe97491b0bde3681b2 Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Wed, 20 Apr 2016 22:02:12 +0300 Subject: [PATCH 0896/1649] net/mlx5e: Use function pointers for RX data path handling In preparation for Striding RQ feature, which will need its own RX handlers. This patch does not change any functionality. Signed-off-by: Tariq Toukan Signed-off-by: Achiad Shochat Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 33 ++++++--- .../net/ethernet/mellanox/mlx5/core/en_main.c | 2 + .../net/ethernet/mellanox/mlx5/core/en_rx.c | 74 ++++++++++--------- 3 files changed, 62 insertions(+), 47 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 7f19644689f2..61e249d8d7f8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -72,6 +72,17 @@ #define MLX5E_SQ_BF_BUDGET 16 #define MLX5E_NUM_MAIN_GROUPS 9 +#define MLX5E_NET_IP_ALIGN 2 + +struct mlx5e_tx_wqe { + struct mlx5_wqe_ctrl_seg ctrl; + struct mlx5_wqe_eth_seg eth; +}; + +struct mlx5e_rx_wqe { + struct mlx5_wqe_srq_next_seg next; + struct mlx5_wqe_data_seg data; +}; #ifdef CONFIG_MLX5_CORE_EN_DCB #define MLX5E_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */ @@ -357,6 +368,12 @@ struct mlx5e_cq { struct mlx5_wq_ctrl wq_ctrl; } ____cacheline_aligned_in_smp; +struct mlx5e_rq; +typedef void (*mlx5e_fp_handle_rx_cqe)(struct mlx5e_rq *rq, + struct mlx5_cqe64 *cqe); +typedef int (*mlx5e_fp_alloc_wqe)(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, + u16 ix); + struct mlx5e_rq { /* data path */ struct mlx5_wq_ll wq; @@ -368,6 +385,8 @@ struct mlx5e_rq { struct mlx5e_tstamp *tstamp; struct mlx5e_rq_stats stats; struct mlx5e_cq cq; + mlx5e_fp_handle_rx_cqe handle_rx_cqe; + mlx5e_fp_alloc_wqe alloc_wqe; unsigned long state; int ix; @@ -588,18 +607,6 @@ struct mlx5e_priv { u16 q_counter; }; -#define MLX5E_NET_IP_ALIGN 2 - -struct mlx5e_tx_wqe { - struct mlx5_wqe_ctrl_seg ctrl; - struct mlx5_wqe_eth_seg eth; -}; - -struct mlx5e_rx_wqe { - struct mlx5_wqe_srq_next_seg next; - struct mlx5_wqe_data_seg data; -}; - enum mlx5e_link_mode { MLX5E_1000BASE_CX_SGMII = 0, MLX5E_1000BASE_KX = 1, @@ -642,7 +649,9 @@ void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event); int mlx5e_napi_poll(struct napi_struct *napi, int budget); bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget); int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget); +void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq); +int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix); struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq); void mlx5e_update_stats(struct mlx5e_priv *priv); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 9b58ef6cab93..23ba12c3a738 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -357,6 +357,8 @@ static int mlx5e_create_rq(struct mlx5e_channel *c, cpu_to_be32(byte_count | MLX5_HW_START_PADDING); } + rq->handle_rx_cqe = mlx5e_handle_rx_cqe; + rq->alloc_wqe = mlx5e_alloc_rx_wqe; rq->pdev = c->pdev; rq->netdev = c->netdev; rq->tstamp = &priv->tstamp; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 58d4e2f962c3..d7cccedddf34 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -42,8 +42,7 @@ static inline bool mlx5e_rx_hw_stamp(struct mlx5e_tstamp *tstamp) return tstamp->hwtstamp_config.rx_filter == HWTSTAMP_FILTER_ALL; } -static inline int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, - struct mlx5e_rx_wqe *wqe, u16 ix) +int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix) { struct sk_buff *skb; dma_addr_t dma_addr; @@ -87,7 +86,7 @@ bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq) while (!mlx5_wq_ll_is_full(wq)) { struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(wq, wq->head); - if (unlikely(mlx5e_alloc_rx_wqe(rq, wqe, wq->head))) + if (unlikely(rq->alloc_wqe(rq, wqe, wq->head))) break; mlx5_wq_ll_push(wq, be16_to_cpu(wqe->next.next_wqe_index)); @@ -229,50 +228,55 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe, skb->mark = be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK; } +void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) +{ + struct mlx5e_rx_wqe *wqe; + struct sk_buff *skb; + __be16 wqe_counter_be; + u16 wqe_counter; + + wqe_counter_be = cqe->wqe_counter; + wqe_counter = be16_to_cpu(wqe_counter_be); + wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter); + skb = rq->skb[wqe_counter]; + prefetch(skb->data); + rq->skb[wqe_counter] = NULL; + + dma_unmap_single(rq->pdev, + *((dma_addr_t *)skb->cb), + rq->wqe_sz, + DMA_FROM_DEVICE); + + if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) { + rq->stats.wqe_err++; + dev_kfree_skb(skb); + goto wq_ll_pop; + } + + mlx5e_build_rx_skb(cqe, rq, skb); + rq->stats.packets++; + rq->stats.bytes += be32_to_cpu(cqe->byte_cnt); + napi_gro_receive(rq->cq.napi, skb); + +wq_ll_pop: + mlx5_wq_ll_pop(&rq->wq, wqe_counter_be, + &wqe->next.next_wqe_index); +} + int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget) { struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq); int work_done; for (work_done = 0; work_done < budget; work_done++) { - struct mlx5e_rx_wqe *wqe; - struct mlx5_cqe64 *cqe; - struct sk_buff *skb; - __be16 wqe_counter_be; - u16 wqe_counter; + struct mlx5_cqe64 *cqe = mlx5e_get_cqe(cq); - cqe = mlx5e_get_cqe(cq); if (!cqe) break; mlx5_cqwq_pop(&cq->wq); - wqe_counter_be = cqe->wqe_counter; - wqe_counter = be16_to_cpu(wqe_counter_be); - wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter); - skb = rq->skb[wqe_counter]; - prefetch(skb->data); - rq->skb[wqe_counter] = NULL; - - dma_unmap_single(rq->pdev, - *((dma_addr_t *)skb->cb), - rq->wqe_sz, - DMA_FROM_DEVICE); - - if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) { - rq->stats.wqe_err++; - dev_kfree_skb(skb); - goto wq_ll_pop; - } - - mlx5e_build_rx_skb(cqe, rq, skb); - rq->stats.packets++; - rq->stats.bytes += be32_to_cpu(cqe->byte_cnt); - napi_gro_receive(cq->napi, skb); - -wq_ll_pop: - mlx5_wq_ll_pop(&rq->wq, wqe_counter_be, - &wqe->next.next_wqe_index); + rq->handle_rx_cqe(rq, cqe); } mlx5_cqwq_update_db_record(&cq->wq); From 461017cb006aa1b39b0f647ae0ee2d9d84eef05b Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Wed, 20 Apr 2016 22:02:13 +0300 Subject: [PATCH 0897/1649] net/mlx5e: Support RX multi-packet WQE (Striding RQ) Introduce the feature of multi-packet WQE (RX Work Queue Element) referred to as (MPWQE or Striding RQ), in which WQEs are larger and serve multiple packets each. Every WQE consists of many strides of the same size, every received packet is aligned to a beginning of a stride and is written to consecutive strides within a WQE. In the regular approach, each regular WQE is big enough to be capable of serving one received packet of any size up to MTU or 64K in case of device LRO is enabled, making it very wasteful when dealing with small packets or device LRO is enabled. For its flexibility, MPWQE allows a better memory utilization (implying improvements in CPU utilization and packet rate) as packets consume strides according to their size, preserving the rest of the WQE to be available for other packets. MPWQE default configuration: Num of WQEs = 16 Strides Per WQE = 2048 Stride Size = 64 byte The default WQEs memory footprint went from 1024*mtu (~1.5MB) to 16 * 2048 * 64 = 2MB per ring. However, HW LRO can now be supported at no additional cost in memory footprint, and hence we turn it on by default and get an even better performance. Performance tested on ConnectX4-Lx 50G. To isolate the feature under test, the numbers below were measured with HW LRO turned off. We verified that the performance just improves when LRO is turned back on. * Netperf single TCP stream: - BW raised by 10-15% for representative packet sizes: default, 64B, 1024B, 1478B, 65536B. * Netperf multi TCP stream: - No degradation, line rate reached. * Pktgen: packet rate raised by 2-10% for traffic of different message sizes: 64B, 128B, 256B, 1024B, and 1500B. * Pktgen: packet loss in bursts of small messages (64byte), single stream: - | num packets | packets loss before | packets loss after | 2K | ~ 1K | 0 | 8K | ~ 6K | 0 | 16K | ~13K | 0 | 32K | ~28K | 0 | 64K | ~57K | ~24K As expected as the driver can receive as many small packets (<=64B) as the number of total strides in the ring (default = 2048 * 16) vs. 1024 (default ring size regardless of packets size) before this feature. Signed-off-by: Tariq Toukan Signed-off-by: Achiad Shochat Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 77 ++++++++- .../ethernet/mellanox/mlx5/core/en_ethtool.c | 15 +- .../net/ethernet/mellanox/mlx5/core/en_main.c | 109 ++++++++++--- .../net/ethernet/mellanox/mlx5/core/en_rx.c | 153 ++++++++++++++++-- include/linux/mlx5/device.h | 39 ++++- 5 files changed, 349 insertions(+), 44 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 61e249d8d7f8..f519148d7dcc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -57,12 +57,30 @@ #define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE 0xa #define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE 0xd +#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW 0x1 +#define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW 0x4 +#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW 0x6 + +#define MLX5_MPWRQ_LOG_NUM_STRIDES 11 /* >= 9, HW restriction */ +#define MLX5_MPWRQ_LOG_STRIDE_SIZE 6 /* >= 6, HW restriction */ +#define MLX5_MPWRQ_NUM_STRIDES BIT(MLX5_MPWRQ_LOG_NUM_STRIDES) +#define MLX5_MPWRQ_STRIDE_SIZE BIT(MLX5_MPWRQ_LOG_STRIDE_SIZE) +#define MLX5_MPWRQ_LOG_WQE_SZ (MLX5_MPWRQ_LOG_NUM_STRIDES +\ + MLX5_MPWRQ_LOG_STRIDE_SIZE) +#define MLX5_MPWRQ_WQE_PAGE_ORDER (MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT > 0 ? \ + MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT : 0) +#define MLX5_MPWRQ_PAGES_PER_WQE BIT(MLX5_MPWRQ_WQE_PAGE_ORDER) +#define MLX5_MPWRQ_STRIDES_PER_PAGE (MLX5_MPWRQ_NUM_STRIDES >> \ + MLX5_MPWRQ_WQE_PAGE_ORDER) +#define MLX5_MPWRQ_SMALL_PACKET_THRESHOLD (128) + #define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (64 * 1024) #define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC 0x10 #define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS 0x20 #define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC 0x10 #define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS 0x20 #define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES 0x80 +#define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES_MPW 0x2 #define MLX5E_LOG_INDIR_RQT_SIZE 0x7 #define MLX5E_INDIR_RQT_SIZE BIT(MLX5E_LOG_INDIR_RQT_SIZE) @@ -74,6 +92,38 @@ #define MLX5E_NUM_MAIN_GROUPS 9 #define MLX5E_NET_IP_ALIGN 2 +static inline u16 mlx5_min_rx_wqes(int wq_type, u32 wq_size) +{ + switch (wq_type) { + case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: + return min_t(u16, MLX5E_PARAMS_DEFAULT_MIN_RX_WQES_MPW, + wq_size / 2); + default: + return min_t(u16, MLX5E_PARAMS_DEFAULT_MIN_RX_WQES, + wq_size / 2); + } +} + +static inline int mlx5_min_log_rq_size(int wq_type) +{ + switch (wq_type) { + case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: + return MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW; + default: + return MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE; + } +} + +static inline int mlx5_max_log_rq_size(int wq_type) +{ + switch (wq_type) { + case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: + return MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW; + default: + return MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE; + } +} + struct mlx5e_tx_wqe { struct mlx5_wqe_ctrl_seg ctrl; struct mlx5_wqe_eth_seg eth; @@ -128,6 +178,7 @@ static const char vport_strings[][ETH_GSTRING_LEN] = { "tx_queue_wake", "tx_queue_dropped", "rx_wqe_err", + "rx_mpwqe_filler", }; struct mlx5e_vport_stats { @@ -169,8 +220,9 @@ struct mlx5e_vport_stats { u64 tx_queue_wake; u64 tx_queue_dropped; u64 rx_wqe_err; + u64 rx_mpwqe_filler; -#define NUM_VPORT_COUNTERS 35 +#define NUM_VPORT_COUNTERS 36 }; static const char pport_strings[][ETH_GSTRING_LEN] = { @@ -263,7 +315,8 @@ static const char rq_stats_strings[][ETH_GSTRING_LEN] = { "csum_sw", "lro_packets", "lro_bytes", - "wqe_err" + "wqe_err", + "mpwqe_filler", }; struct mlx5e_rq_stats { @@ -274,7 +327,8 @@ struct mlx5e_rq_stats { u64 lro_packets; u64 lro_bytes; u64 wqe_err; -#define NUM_RQ_STATS 7 + u64 mpwqe_filler; +#define NUM_RQ_STATS 8 }; static const char sq_stats_strings[][ETH_GSTRING_LEN] = { @@ -318,6 +372,7 @@ struct mlx5e_stats { struct mlx5e_params { u8 log_sq_size; + u8 rq_wq_type; u8 log_rq_size; u16 num_channels; u8 num_tc; @@ -374,11 +429,23 @@ typedef void (*mlx5e_fp_handle_rx_cqe)(struct mlx5e_rq *rq, typedef int (*mlx5e_fp_alloc_wqe)(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix); +struct mlx5e_dma_info { + struct page *page; + dma_addr_t addr; +}; + +struct mlx5e_mpw_info { + struct mlx5e_dma_info dma_info; + u16 consumed_strides; + u16 skbs_frags[MLX5_MPWRQ_PAGES_PER_WQE]; +}; + struct mlx5e_rq { /* data path */ struct mlx5_wq_ll wq; u32 wqe_sz; struct sk_buff **skb; + struct mlx5e_mpw_info *wqe_info; struct device *pdev; struct net_device *netdev; @@ -393,6 +460,7 @@ struct mlx5e_rq { /* control */ struct mlx5_wq_ctrl wq_ctrl; + u8 wq_type; u32 rqn; struct mlx5e_channel *channel; struct mlx5e_priv *priv; @@ -649,9 +717,12 @@ void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event); int mlx5e_napi_poll(struct napi_struct *napi, int budget); bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget); int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget); + void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); +void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq); int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix); +int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix); struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq); void mlx5e_update_stats(struct mlx5e_priv *priv); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 6f40ba448f07..4077856aab76 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -273,8 +273,9 @@ static void mlx5e_get_ringparam(struct net_device *dev, struct ethtool_ringparam *param) { struct mlx5e_priv *priv = netdev_priv(dev); + int rq_wq_type = priv->params.rq_wq_type; - param->rx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE; + param->rx_max_pending = 1 << mlx5_max_log_rq_size(rq_wq_type); param->tx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE; param->rx_pending = 1 << priv->params.log_rq_size; param->tx_pending = 1 << priv->params.log_sq_size; @@ -285,6 +286,7 @@ static int mlx5e_set_ringparam(struct net_device *dev, { struct mlx5e_priv *priv = netdev_priv(dev); bool was_opened; + int rq_wq_type = priv->params.rq_wq_type; u16 min_rx_wqes; u8 log_rq_size; u8 log_sq_size; @@ -300,16 +302,16 @@ static int mlx5e_set_ringparam(struct net_device *dev, __func__); return -EINVAL; } - if (param->rx_pending < (1 << MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE)) { + if (param->rx_pending < (1 << mlx5_min_log_rq_size(rq_wq_type))) { netdev_info(dev, "%s: rx_pending (%d) < min (%d)\n", __func__, param->rx_pending, - 1 << MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE); + 1 << mlx5_min_log_rq_size(rq_wq_type)); return -EINVAL; } - if (param->rx_pending > (1 << MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE)) { + if (param->rx_pending > (1 << mlx5_max_log_rq_size(rq_wq_type))) { netdev_info(dev, "%s: rx_pending (%d) > max (%d)\n", __func__, param->rx_pending, - 1 << MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE); + 1 << mlx5_max_log_rq_size(rq_wq_type)); return -EINVAL; } if (param->tx_pending < (1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)) { @@ -327,8 +329,7 @@ static int mlx5e_set_ringparam(struct net_device *dev, log_rq_size = order_base_2(param->rx_pending); log_sq_size = order_base_2(param->tx_pending); - min_rx_wqes = min_t(u16, param->rx_pending - 1, - MLX5E_PARAMS_DEFAULT_MIN_RX_WQES); + min_rx_wqes = mlx5_min_rx_wqes(rq_wq_type, param->rx_pending); if (log_rq_size == priv->params.log_rq_size && log_sq_size == priv->params.log_sq_size && diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 23ba12c3a738..871f3af204dd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -175,6 +175,7 @@ void mlx5e_update_stats(struct mlx5e_priv *priv) s->rx_csum_none = 0; s->rx_csum_sw = 0; s->rx_wqe_err = 0; + s->rx_mpwqe_filler = 0; for (i = 0; i < priv->params.num_channels; i++) { rq_stats = &priv->channel[i]->rq.stats; @@ -185,6 +186,7 @@ void mlx5e_update_stats(struct mlx5e_priv *priv) s->rx_csum_none += rq_stats->csum_none; s->rx_csum_sw += rq_stats->csum_sw; s->rx_wqe_err += rq_stats->wqe_err; + s->rx_mpwqe_filler += rq_stats->mpwqe_filler; for (j = 0; j < priv->params.num_tc; j++) { sq_stats = &priv->channel[i]->sq[j].stats; @@ -323,6 +325,7 @@ static int mlx5e_create_rq(struct mlx5e_channel *c, struct mlx5_core_dev *mdev = priv->mdev; void *rqc = param->rqc; void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq); + u32 byte_count; int wq_sz; int err; int i; @@ -337,28 +340,47 @@ static int mlx5e_create_rq(struct mlx5e_channel *c, rq->wq.db = &rq->wq.db[MLX5_RCV_DBR]; wq_sz = mlx5_wq_ll_get_size(&rq->wq); - rq->skb = kzalloc_node(wq_sz * sizeof(*rq->skb), GFP_KERNEL, - cpu_to_node(c->cpu)); - if (!rq->skb) { - err = -ENOMEM; - goto err_rq_wq_destroy; - } - rq->wqe_sz = (priv->params.lro_en) ? priv->params.lro_wqe_sz : - MLX5E_SW2HW_MTU(priv->netdev->mtu); - rq->wqe_sz = SKB_DATA_ALIGN(rq->wqe_sz + MLX5E_NET_IP_ALIGN); + switch (priv->params.rq_wq_type) { + case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: + rq->wqe_info = kzalloc_node(wq_sz * sizeof(*rq->wqe_info), + GFP_KERNEL, cpu_to_node(c->cpu)); + if (!rq->wqe_info) { + err = -ENOMEM; + goto err_rq_wq_destroy; + } + rq->handle_rx_cqe = mlx5e_handle_rx_cqe_mpwrq; + rq->alloc_wqe = mlx5e_alloc_rx_mpwqe; + + rq->wqe_sz = MLX5_MPWRQ_NUM_STRIDES * MLX5_MPWRQ_STRIDE_SIZE; + byte_count = rq->wqe_sz; + break; + default: /* MLX5_WQ_TYPE_LINKED_LIST */ + rq->skb = kzalloc_node(wq_sz * sizeof(*rq->skb), GFP_KERNEL, + cpu_to_node(c->cpu)); + if (!rq->skb) { + err = -ENOMEM; + goto err_rq_wq_destroy; + } + rq->handle_rx_cqe = mlx5e_handle_rx_cqe; + rq->alloc_wqe = mlx5e_alloc_rx_wqe; + + rq->wqe_sz = (priv->params.lro_en) ? + priv->params.lro_wqe_sz : + MLX5E_SW2HW_MTU(priv->netdev->mtu); + rq->wqe_sz = SKB_DATA_ALIGN(rq->wqe_sz + MLX5E_NET_IP_ALIGN); + byte_count = rq->wqe_sz - MLX5E_NET_IP_ALIGN; + byte_count |= MLX5_HW_START_PADDING; + } for (i = 0; i < wq_sz; i++) { struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i); - u32 byte_count = rq->wqe_sz - MLX5E_NET_IP_ALIGN; wqe->data.lkey = c->mkey_be; - wqe->data.byte_count = - cpu_to_be32(byte_count | MLX5_HW_START_PADDING); + wqe->data.byte_count = cpu_to_be32(byte_count); } - rq->handle_rx_cqe = mlx5e_handle_rx_cqe; - rq->alloc_wqe = mlx5e_alloc_rx_wqe; + rq->wq_type = priv->params.rq_wq_type; rq->pdev = c->pdev; rq->netdev = c->netdev; rq->tstamp = &priv->tstamp; @@ -376,7 +398,14 @@ err_rq_wq_destroy: static void mlx5e_destroy_rq(struct mlx5e_rq *rq) { - kfree(rq->skb); + switch (rq->wq_type) { + case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: + kfree(rq->wqe_info); + break; + default: /* MLX5_WQ_TYPE_LINKED_LIST */ + kfree(rq->skb); + } + mlx5_wq_destroy(&rq->wq_ctrl); } @@ -1065,7 +1094,18 @@ static void mlx5e_build_rq_param(struct mlx5e_priv *priv, void *rqc = param->rqc; void *wq = MLX5_ADDR_OF(rqc, rqc, wq); - MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST); + switch (priv->params.rq_wq_type) { + case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: + MLX5_SET(wq, wq, log_wqe_num_of_strides, + MLX5_MPWRQ_LOG_NUM_STRIDES - 9); + MLX5_SET(wq, wq, log_wqe_stride_size, + MLX5_MPWRQ_LOG_STRIDE_SIZE - 6); + MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ); + break; + default: /* MLX5_WQ_TYPE_LINKED_LIST */ + MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST); + } + MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN); MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe))); MLX5_SET(wq, wq, log_wq_sz, priv->params.log_rq_size); @@ -1111,8 +1151,18 @@ static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv, struct mlx5e_cq_param *param) { void *cqc = param->cqc; + u8 log_cq_size; - MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_rq_size); + switch (priv->params.rq_wq_type) { + case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: + log_cq_size = priv->params.log_rq_size + + MLX5_MPWRQ_LOG_NUM_STRIDES; + break; + default: /* MLX5_WQ_TYPE_LINKED_LIST */ + log_cq_size = priv->params.log_rq_size; + } + + MLX5_SET(cqc, cqc, log_cq_size, log_cq_size); mlx5e_build_common_cq_param(priv, param); } @@ -1983,7 +2033,8 @@ static int mlx5e_set_features(struct net_device *netdev, if (changes & NETIF_F_LRO) { bool was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state); - if (was_opened) + if (was_opened && (priv->params.rq_wq_type == + MLX5_WQ_TYPE_LINKED_LIST)) mlx5e_close_locked(priv->netdev); priv->params.lro_en = !!(features & NETIF_F_LRO); @@ -1992,7 +2043,8 @@ static int mlx5e_set_features(struct net_device *netdev, mlx5_core_warn(priv->mdev, "lro modify failed, %d\n", err); - if (was_opened) + if (was_opened && (priv->params.rq_wq_type == + MLX5_WQ_TYPE_LINKED_LIST)) err = mlx5e_open_locked(priv->netdev); } @@ -2327,8 +2379,21 @@ static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev, priv->params.log_sq_size = MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE; - priv->params.log_rq_size = - MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE; + priv->params.rq_wq_type = MLX5_CAP_GEN(mdev, striding_rq) ? + MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ : + MLX5_WQ_TYPE_LINKED_LIST; + + switch (priv->params.rq_wq_type) { + case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: + priv->params.log_rq_size = MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW; + priv->params.lro_en = true; + break; + default: /* MLX5_WQ_TYPE_LINKED_LIST */ + priv->params.log_rq_size = MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE; + } + + priv->params.min_rx_wqes = mlx5_min_rx_wqes(priv->params.rq_wq_type, + BIT(priv->params.log_rq_size)); priv->params.rx_cq_moderation_usec = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC; priv->params.rx_cq_moderation_pkts = @@ -2338,8 +2403,6 @@ static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev, priv->params.tx_cq_moderation_pkts = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS; priv->params.tx_max_inline = mlx5e_get_max_inline_cap(mdev); - priv->params.min_rx_wqes = - MLX5E_PARAMS_DEFAULT_MIN_RX_WQES; priv->params.num_tc = 1; priv->params.rss_hfunc = ETH_RSS_HASH_XOR; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index d7cccedddf34..71f3a5d244ff 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -76,6 +76,41 @@ err_free_skb: return -ENOMEM; } +int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix) +{ + struct mlx5e_mpw_info *wi = &rq->wqe_info[ix]; + gfp_t gfp_mask; + int i; + + gfp_mask = GFP_ATOMIC | __GFP_COLD | __GFP_MEMALLOC; + wi->dma_info.page = alloc_pages_node(NUMA_NO_NODE, gfp_mask, + MLX5_MPWRQ_WQE_PAGE_ORDER); + if (unlikely(!wi->dma_info.page)) + return -ENOMEM; + + wi->dma_info.addr = dma_map_page(rq->pdev, wi->dma_info.page, 0, + rq->wqe_sz, PCI_DMA_FROMDEVICE); + if (unlikely(dma_mapping_error(rq->pdev, wi->dma_info.addr))) { + put_page(wi->dma_info.page); + return -ENOMEM; + } + + /* We split the high-order page into order-0 ones and manage their + * reference counter to minimize the memory held by small skb fragments + */ + split_page(wi->dma_info.page, MLX5_MPWRQ_WQE_PAGE_ORDER); + for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) { + atomic_add(MLX5_MPWRQ_STRIDES_PER_PAGE, + &wi->dma_info.page[i]._count); + wi->skbs_frags[i] = 0; + } + + wi->consumed_strides = 0; + wqe->data.addr = cpu_to_be64(wi->dma_info.addr); + + return 0; +} + bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq) { struct mlx5_wq_ll *wq = &rq->wq; @@ -100,7 +135,8 @@ bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq) return !mlx5_wq_ll_is_full(wq); } -static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe) +static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe, + u32 cqe_bcnt) { struct ethhdr *eth = (struct ethhdr *)(skb->data); struct iphdr *ipv4 = (struct iphdr *)(skb->data + ETH_HLEN); @@ -111,7 +147,7 @@ static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe) int tcp_ack = ((CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA == l4_hdr_type) || (CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA == l4_hdr_type)); - u16 tot_len = be32_to_cpu(cqe->byte_cnt) - ETH_HLEN; + u16 tot_len = cqe_bcnt - ETH_HLEN; if (eth->h_proto == htons(ETH_P_IP)) { tcp = (struct tcphdr *)(skb->data + ETH_HLEN + @@ -191,19 +227,17 @@ csum_none: } static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe, + u32 cqe_bcnt, struct mlx5e_rq *rq, struct sk_buff *skb) { struct net_device *netdev = rq->netdev; - u32 cqe_bcnt = be32_to_cpu(cqe->byte_cnt); struct mlx5e_tstamp *tstamp = rq->tstamp; int lro_num_seg; - skb_put(skb, cqe_bcnt); - lro_num_seg = be32_to_cpu(cqe->srqn) >> 24; if (lro_num_seg > 1) { - mlx5e_lro_update_hdr(skb, cqe); + mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt); skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt, lro_num_seg); rq->stats.lro_packets++; rq->stats.lro_bytes += cqe_bcnt; @@ -228,12 +262,24 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe, skb->mark = be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK; } +static inline void mlx5e_complete_rx_cqe(struct mlx5e_rq *rq, + struct mlx5_cqe64 *cqe, + u32 cqe_bcnt, + struct sk_buff *skb) +{ + rq->stats.packets++; + rq->stats.bytes += cqe_bcnt; + mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb); + napi_gro_receive(rq->cq.napi, skb); +} + void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) { struct mlx5e_rx_wqe *wqe; struct sk_buff *skb; __be16 wqe_counter_be; u16 wqe_counter; + u32 cqe_bcnt; wqe_counter_be = cqe->wqe_counter; wqe_counter = be16_to_cpu(wqe_counter_be); @@ -253,16 +299,103 @@ void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) goto wq_ll_pop; } - mlx5e_build_rx_skb(cqe, rq, skb); - rq->stats.packets++; - rq->stats.bytes += be32_to_cpu(cqe->byte_cnt); - napi_gro_receive(rq->cq.napi, skb); + cqe_bcnt = be32_to_cpu(cqe->byte_cnt); + skb_put(skb, cqe_bcnt); + + mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); wq_ll_pop: mlx5_wq_ll_pop(&rq->wq, wqe_counter_be, &wqe->next.next_wqe_index); } +void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) +{ + u16 cstrides = mpwrq_get_cqe_consumed_strides(cqe); + u16 stride_ix = mpwrq_get_cqe_stride_index(cqe); + u16 wqe_id = be16_to_cpu(cqe->wqe_id); + struct mlx5e_mpw_info *wi = &rq->wqe_info[wqe_id]; + struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_id); + struct sk_buff *skb; + u32 consumed_bytes; + u32 head_offset; + u32 frag_offset; + u32 wqe_offset; + u32 page_idx; + u16 byte_cnt; + u16 cqe_bcnt; + u16 headlen; + int i; + + wi->consumed_strides += cstrides; + + if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) { + rq->stats.wqe_err++; + goto mpwrq_cqe_out; + } + + if (unlikely(mpwrq_is_filler_cqe(cqe))) { + rq->stats.mpwqe_filler++; + goto mpwrq_cqe_out; + } + + skb = netdev_alloc_skb(rq->netdev, + ALIGN(MLX5_MPWRQ_SMALL_PACKET_THRESHOLD, + sizeof(long))); + if (unlikely(!skb)) + goto mpwrq_cqe_out; + + prefetch(skb->data); + wqe_offset = stride_ix * MLX5_MPWRQ_STRIDE_SIZE; + consumed_bytes = cstrides * MLX5_MPWRQ_STRIDE_SIZE; + dma_sync_single_for_cpu(rq->pdev, wi->dma_info.addr + wqe_offset, + consumed_bytes, DMA_FROM_DEVICE); + + head_offset = wqe_offset & (PAGE_SIZE - 1); + page_idx = wqe_offset >> PAGE_SHIFT; + cqe_bcnt = mpwrq_get_cqe_byte_cnt(cqe); + headlen = min_t(u16, MLX5_MPWRQ_SMALL_PACKET_THRESHOLD, cqe_bcnt); + frag_offset = head_offset + headlen; + + byte_cnt = cqe_bcnt - headlen; + while (byte_cnt) { + u32 pg_consumed_bytes = + min_t(u32, PAGE_SIZE - frag_offset, byte_cnt); + unsigned int truesize = + ALIGN(pg_consumed_bytes, MLX5_MPWRQ_STRIDE_SIZE); + + wi->skbs_frags[page_idx]++; + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, + &wi->dma_info.page[page_idx], frag_offset, + pg_consumed_bytes, truesize); + byte_cnt -= pg_consumed_bytes; + frag_offset = 0; + page_idx++; + } + + skb_copy_to_linear_data(skb, + page_address(wi->dma_info.page) + wqe_offset, + ALIGN(headlen, sizeof(long))); + /* skb linear part was allocated with headlen and aligned to long */ + skb->tail += headlen; + skb->len += headlen; + + mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); + +mpwrq_cqe_out: + if (likely(wi->consumed_strides < MLX5_MPWRQ_NUM_STRIDES)) + return; + + dma_unmap_page(rq->pdev, wi->dma_info.addr, rq->wqe_sz, + PCI_DMA_FROMDEVICE); + for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) { + atomic_sub(MLX5_MPWRQ_STRIDES_PER_PAGE - wi->skbs_frags[i], + &wi->dma_info.page[i]._count); + put_page(&wi->dma_info.page[i]); + } + mlx5_wq_ll_pop(&rq->wq, cqe->wqe_id, &wqe->next.next_wqe_index); +} + int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget) { struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq); diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 8156e3c9239c..03f8d719b680 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -644,7 +644,8 @@ struct mlx5_err_cqe { }; struct mlx5_cqe64 { - u8 rsvd0[4]; + u8 rsvd0[2]; + __be16 wqe_id; u8 lro_tcppsh_abort_dupack; u8 lro_min_ttl; __be16 lro_tcp_win; @@ -696,6 +697,42 @@ static inline u64 get_cqe_ts(struct mlx5_cqe64 *cqe) return (u64)lo | ((u64)hi << 32); } +struct mpwrq_cqe_bc { + __be16 filler_consumed_strides; + __be16 byte_cnt; +}; + +static inline u16 mpwrq_get_cqe_byte_cnt(struct mlx5_cqe64 *cqe) +{ + struct mpwrq_cqe_bc *bc = (struct mpwrq_cqe_bc *)&cqe->byte_cnt; + + return be16_to_cpu(bc->byte_cnt); +} + +static inline u16 mpwrq_get_cqe_bc_consumed_strides(struct mpwrq_cqe_bc *bc) +{ + return 0x7fff & be16_to_cpu(bc->filler_consumed_strides); +} + +static inline u16 mpwrq_get_cqe_consumed_strides(struct mlx5_cqe64 *cqe) +{ + struct mpwrq_cqe_bc *bc = (struct mpwrq_cqe_bc *)&cqe->byte_cnt; + + return mpwrq_get_cqe_bc_consumed_strides(bc); +} + +static inline bool mpwrq_is_filler_cqe(struct mlx5_cqe64 *cqe) +{ + struct mpwrq_cqe_bc *bc = (struct mpwrq_cqe_bc *)&cqe->byte_cnt; + + return 0x8000 & be16_to_cpu(bc->filler_consumed_strides); +} + +static inline u16 mpwrq_get_cqe_stride_index(struct mlx5_cqe64 *cqe) +{ + return be16_to_cpu(cqe->wqe_counter); +} + enum { CQE_L4_HDR_TYPE_NONE = 0x0, CQE_L4_HDR_TYPE_TCP_NO_ACK = 0x1, From d3c9bc2743dc95b273ed0e6a3394a71ca314813c Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Wed, 20 Apr 2016 22:02:14 +0300 Subject: [PATCH 0898/1649] net/mlx5e: Added ICO SQs Added ICO (Internal Control Operations) SQ per channel to be used for driver internal operations such as memory registration for fragmented memory and nop requests upon ifconfig up. Signed-off-by: Tariq Toukan Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 7 + .../net/ethernet/mellanox/mlx5/core/en_main.c | 135 ++++++++++++++---- .../net/ethernet/mellanox/mlx5/core/en_tx.c | 2 +- .../net/ethernet/mellanox/mlx5/core/en_txrx.c | 55 +++++++ 4 files changed, 174 insertions(+), 25 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index f519148d7dcc..a757fcf19332 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -488,6 +488,11 @@ enum { MLX5E_SQ_STATE_BF_ENABLE, }; +struct mlx5e_ico_wqe_info { + u8 opcode; + u8 num_wqebbs; +}; + struct mlx5e_sq { /* data path */ @@ -529,6 +534,7 @@ struct mlx5e_sq { struct mlx5_uar uar; struct mlx5e_channel *channel; int tc; + struct mlx5e_ico_wqe_info *ico_wqe_info; } ____cacheline_aligned_in_smp; static inline bool mlx5e_sq_has_room_for(struct mlx5e_sq *sq, u16 n) @@ -545,6 +551,7 @@ struct mlx5e_channel { /* data path */ struct mlx5e_rq rq; struct mlx5e_sq sq[MLX5E_MAX_NUM_TC]; + struct mlx5e_sq icosq; /* internal control operations */ struct napi_struct napi; struct device *pdev; struct net_device *netdev; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 871f3af204dd..b25b429ebabb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -48,6 +48,7 @@ struct mlx5e_sq_param { u32 sqc[MLX5_ST_SZ_DW(sqc)]; struct mlx5_wq_param wq; u16 max_inline; + bool icosq; }; struct mlx5e_cq_param { @@ -59,8 +60,10 @@ struct mlx5e_cq_param { struct mlx5e_channel_param { struct mlx5e_rq_param rq; struct mlx5e_sq_param sq; + struct mlx5e_sq_param icosq; struct mlx5e_cq_param rx_cq; struct mlx5e_cq_param tx_cq; + struct mlx5e_cq_param icosq_cq; }; static void mlx5e_update_carrier(struct mlx5e_priv *priv) @@ -502,6 +505,8 @@ static int mlx5e_open_rq(struct mlx5e_channel *c, struct mlx5e_rq_param *param, struct mlx5e_rq *rq) { + struct mlx5e_sq *sq = &c->icosq; + u16 pi = sq->pc & sq->wq.sz_m1; int err; err = mlx5e_create_rq(c, param, rq); @@ -517,7 +522,10 @@ static int mlx5e_open_rq(struct mlx5e_channel *c, goto err_disable_rq; set_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state); - mlx5e_send_nop(&c->sq[0], true); /* trigger mlx5e_post_rx_wqes() */ + + sq->ico_wqe_info[pi].opcode = MLX5_OPCODE_NOP; + sq->ico_wqe_info[pi].num_wqebbs = 1; + mlx5e_send_nop(sq, true); /* trigger mlx5e_post_rx_wqes() */ return 0; @@ -583,7 +591,6 @@ static int mlx5e_create_sq(struct mlx5e_channel *c, void *sqc = param->sqc; void *sqc_wq = MLX5_ADDR_OF(sqc, sqc, wq); - int txq_ix; int err; err = mlx5_alloc_map_uar(mdev, &sq->uar, true); @@ -611,8 +618,24 @@ static int mlx5e_create_sq(struct mlx5e_channel *c, if (err) goto err_sq_wq_destroy; - txq_ix = c->ix + tc * priv->params.num_channels; - sq->txq = netdev_get_tx_queue(priv->netdev, txq_ix); + if (param->icosq) { + u8 wq_sz = mlx5_wq_cyc_get_size(&sq->wq); + + sq->ico_wqe_info = kzalloc_node(sizeof(*sq->ico_wqe_info) * + wq_sz, + GFP_KERNEL, + cpu_to_node(c->cpu)); + if (!sq->ico_wqe_info) { + err = -ENOMEM; + goto err_free_sq_db; + } + } else { + int txq_ix; + + txq_ix = c->ix + tc * priv->params.num_channels; + sq->txq = netdev_get_tx_queue(priv->netdev, txq_ix); + priv->txq_to_sq_map[txq_ix] = sq; + } sq->pdev = c->pdev; sq->tstamp = &priv->tstamp; @@ -621,10 +644,12 @@ static int mlx5e_create_sq(struct mlx5e_channel *c, sq->tc = tc; sq->edge = (sq->wq.sz_m1 + 1) - MLX5_SEND_WQE_MAX_WQEBBS; sq->bf_budget = MLX5E_SQ_BF_BUDGET; - priv->txq_to_sq_map[txq_ix] = sq; return 0; +err_free_sq_db: + mlx5e_free_sq_db(sq); + err_sq_wq_destroy: mlx5_wq_destroy(&sq->wq_ctrl); @@ -639,6 +664,7 @@ static void mlx5e_destroy_sq(struct mlx5e_sq *sq) struct mlx5e_channel *c = sq->channel; struct mlx5e_priv *priv = c->priv; + kfree(sq->ico_wqe_info); mlx5e_free_sq_db(sq); mlx5_wq_destroy(&sq->wq_ctrl); mlx5_unmap_free_uar(priv->mdev, &sq->uar); @@ -667,10 +693,10 @@ static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param) memcpy(sqc, param->sqc, sizeof(param->sqc)); - MLX5_SET(sqc, sqc, tis_num_0, priv->tisn[sq->tc]); - MLX5_SET(sqc, sqc, cqn, c->sq[sq->tc].cq.mcq.cqn); + MLX5_SET(sqc, sqc, tis_num_0, param->icosq ? 0 : priv->tisn[sq->tc]); + MLX5_SET(sqc, sqc, cqn, sq->cq.mcq.cqn); MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST); - MLX5_SET(sqc, sqc, tis_lst_sz, 1); + MLX5_SET(sqc, sqc, tis_lst_sz, param->icosq ? 0 : 1); MLX5_SET(sqc, sqc, flush_in_error_en, 1); MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC); @@ -745,9 +771,11 @@ static int mlx5e_open_sq(struct mlx5e_channel *c, if (err) goto err_disable_sq; - set_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state); - netdev_tx_reset_queue(sq->txq); - netif_tx_start_queue(sq->txq); + if (sq->txq) { + set_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state); + netdev_tx_reset_queue(sq->txq); + netif_tx_start_queue(sq->txq); + } return 0; @@ -768,15 +796,19 @@ static inline void netif_tx_disable_queue(struct netdev_queue *txq) static void mlx5e_close_sq(struct mlx5e_sq *sq) { - clear_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state); - napi_synchronize(&sq->channel->napi); /* prevent netif_tx_wake_queue */ - netif_tx_disable_queue(sq->txq); + if (sq->txq) { + clear_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state); + /* prevent netif_tx_wake_queue */ + napi_synchronize(&sq->channel->napi); + netif_tx_disable_queue(sq->txq); - /* ensure hw is notified of all pending wqes */ - if (mlx5e_sq_has_room_for(sq, 1)) - mlx5e_send_nop(sq, true); + /* ensure hw is notified of all pending wqes */ + if (mlx5e_sq_has_room_for(sq, 1)) + mlx5e_send_nop(sq, true); + + mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, MLX5_SQC_STATE_ERR); + } - mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, MLX5_SQC_STATE_ERR); while (sq->cc != sq->pc) /* wait till sq is empty */ msleep(20); @@ -1030,10 +1062,14 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64); - err = mlx5e_open_tx_cqs(c, cparam); + err = mlx5e_open_cq(c, &cparam->icosq_cq, &c->icosq.cq, 0, 0); if (err) goto err_napi_del; + err = mlx5e_open_tx_cqs(c, cparam); + if (err) + goto err_close_icosq_cq; + err = mlx5e_open_cq(c, &cparam->rx_cq, &c->rq.cq, priv->params.rx_cq_moderation_usec, priv->params.rx_cq_moderation_pkts); @@ -1042,10 +1078,14 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, napi_enable(&c->napi); - err = mlx5e_open_sqs(c, cparam); + err = mlx5e_open_sq(c, 0, &cparam->icosq, &c->icosq); if (err) goto err_disable_napi; + err = mlx5e_open_sqs(c, cparam); + if (err) + goto err_close_icosq; + err = mlx5e_open_rq(c, &cparam->rq, &c->rq); if (err) goto err_close_sqs; @@ -1058,6 +1098,9 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, err_close_sqs: mlx5e_close_sqs(c); +err_close_icosq: + mlx5e_close_sq(&c->icosq); + err_disable_napi: napi_disable(&c->napi); mlx5e_close_cq(&c->rq.cq); @@ -1065,6 +1108,9 @@ err_disable_napi: err_close_tx_cqs: mlx5e_close_tx_cqs(c); +err_close_icosq_cq: + mlx5e_close_cq(&c->icosq.cq); + err_napi_del: netif_napi_del(&c->napi); napi_hash_del(&c->napi); @@ -1077,9 +1123,11 @@ static void mlx5e_close_channel(struct mlx5e_channel *c) { mlx5e_close_rq(&c->rq); mlx5e_close_sqs(c); + mlx5e_close_sq(&c->icosq); napi_disable(&c->napi); mlx5e_close_cq(&c->rq.cq); mlx5e_close_tx_cqs(c); + mlx5e_close_cq(&c->icosq.cq); netif_napi_del(&c->napi); napi_hash_del(&c->napi); @@ -1125,17 +1173,27 @@ static void mlx5e_build_drop_rq_param(struct mlx5e_rq_param *param) MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe))); } +static void mlx5e_build_sq_param_common(struct mlx5e_priv *priv, + struct mlx5e_sq_param *param) +{ + void *sqc = param->sqc; + void *wq = MLX5_ADDR_OF(sqc, sqc, wq); + + MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB)); + MLX5_SET(wq, wq, pd, priv->pdn); + + param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev); +} + static void mlx5e_build_sq_param(struct mlx5e_priv *priv, struct mlx5e_sq_param *param) { void *sqc = param->sqc; void *wq = MLX5_ADDR_OF(sqc, sqc, wq); + mlx5e_build_sq_param_common(priv, param); MLX5_SET(wq, wq, log_wq_sz, priv->params.log_sq_size); - MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB)); - MLX5_SET(wq, wq, pd, priv->pdn); - param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev); param->max_inline = priv->params.tx_max_inline; } @@ -1172,20 +1230,49 @@ static void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv, { void *cqc = param->cqc; - MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_sq_size); + MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_sq_size); mlx5e_build_common_cq_param(priv, param); } +static void mlx5e_build_ico_cq_param(struct mlx5e_priv *priv, + struct mlx5e_cq_param *param, + u8 log_wq_size) +{ + void *cqc = param->cqc; + + MLX5_SET(cqc, cqc, log_cq_size, log_wq_size); + + mlx5e_build_common_cq_param(priv, param); +} + +static void mlx5e_build_icosq_param(struct mlx5e_priv *priv, + struct mlx5e_sq_param *param, + u8 log_wq_size) +{ + void *sqc = param->sqc; + void *wq = MLX5_ADDR_OF(sqc, sqc, wq); + + mlx5e_build_sq_param_common(priv, param); + + MLX5_SET(wq, wq, log_wq_sz, log_wq_size); + + param->icosq = true; +} + static void mlx5e_build_channel_param(struct mlx5e_priv *priv, struct mlx5e_channel_param *cparam) { + u8 icosq_log_wq_sz = 0; + memset(cparam, 0, sizeof(*cparam)); mlx5e_build_rq_param(priv, &cparam->rq); mlx5e_build_sq_param(priv, &cparam->sq); + mlx5e_build_icosq_param(priv, &cparam->icosq, icosq_log_wq_sz); mlx5e_build_rx_cq_param(priv, &cparam->rx_cq); mlx5e_build_tx_cq_param(priv, &cparam->tx_cq); + mlx5e_build_ico_cq_param(priv, &cparam->icosq_cq, icosq_log_wq_sz); } static int mlx5e_open_channels(struct mlx5e_priv *priv) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index 1ffc7cb6f78c..a8d2935c50d0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -54,6 +54,7 @@ void mlx5e_send_nop(struct mlx5e_sq *sq, bool notify_hw) sq->skb[pi] = NULL; sq->pc++; + sq->stats.nop++; if (notify_hw) { cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE; @@ -387,7 +388,6 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) wi = &sq->wqe_info[ci]; if (unlikely(!skb)) { /* nop */ - sq->stats.nop++; sqcc++; continue; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c index 9bb4395aceeb..ad624cb6f147 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c @@ -49,6 +49,57 @@ struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq) return cqe; } +static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq) +{ + struct mlx5_wq_cyc *wq; + struct mlx5_cqe64 *cqe; + struct mlx5e_sq *sq; + u16 sqcc; + + cqe = mlx5e_get_cqe(cq); + if (likely(!cqe)) + return; + + sq = container_of(cq, struct mlx5e_sq, cq); + wq = &sq->wq; + + /* sq->cc must be updated only after mlx5_cqwq_update_db_record(), + * otherwise a cq overrun may occur + */ + sqcc = sq->cc; + + do { + u16 ci = be16_to_cpu(cqe->wqe_counter) & wq->sz_m1; + struct mlx5e_ico_wqe_info *icowi = &sq->ico_wqe_info[ci]; + + mlx5_cqwq_pop(&cq->wq); + sqcc += icowi->num_wqebbs; + + if (unlikely((cqe->op_own >> 4) != MLX5_CQE_REQ)) { + WARN_ONCE(true, "mlx5e: Bad OP in ICOSQ CQE: 0x%x\n", + cqe->op_own); + break; + } + + switch (icowi->opcode) { + case MLX5_OPCODE_NOP: + break; + default: + WARN_ONCE(true, + "mlx5e: Bad OPCODE in ICOSQ WQE info: 0x%x\n", + icowi->opcode); + } + + } while ((cqe = mlx5e_get_cqe(cq))); + + mlx5_cqwq_update_db_record(&cq->wq); + + /* ensure cq space is freed before enabling more cqes */ + wmb(); + + sq->cc = sqcc; +} + int mlx5e_napi_poll(struct napi_struct *napi, int budget) { struct mlx5e_channel *c = container_of(napi, struct mlx5e_channel, @@ -64,6 +115,9 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget) work_done = mlx5e_poll_rx_cq(&c->rq.cq, budget); busy |= work_done == budget; + + mlx5e_poll_ico_cq(&c->icosq.cq); + busy |= mlx5e_post_rx_wqes(&c->rq); if (busy) @@ -80,6 +134,7 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget) for (i = 0; i < c->num_tc; i++) mlx5e_cq_arm(&c->sq[i].cq); mlx5e_cq_arm(&c->rq.cq); + mlx5e_cq_arm(&c->icosq.cq); return work_done; } From bc77b240b3c57236cdcc08d64ca390655d3a16ff Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Wed, 20 Apr 2016 22:02:15 +0300 Subject: [PATCH 0899/1649] net/mlx5e: Add fragmented memory support for RX multi packet WQE If the allocation of a linear (physically continuous) MPWQE fails, we allocate a fragmented MPWQE. This is implemented via device's UMR (User Memory Registration) which allows to register multiple memory fragments into ConnectX hardware as a continuous buffer. UMR registration is an asynchronous operation and is done via ICO SQs. Signed-off-by: Tariq Toukan Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 84 +++- .../net/ethernet/mellanox/mlx5/core/en_main.c | 64 ++- .../net/ethernet/mellanox/mlx5/core/en_rx.c | 427 +++++++++++++++--- .../net/ethernet/mellanox/mlx5/core/en_tx.c | 4 +- .../net/ethernet/mellanox/mlx5/core/en_txrx.c | 3 + 5 files changed, 514 insertions(+), 68 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index a757fcf19332..c99fdff74c97 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -72,6 +72,9 @@ #define MLX5_MPWRQ_PAGES_PER_WQE BIT(MLX5_MPWRQ_WQE_PAGE_ORDER) #define MLX5_MPWRQ_STRIDES_PER_PAGE (MLX5_MPWRQ_NUM_STRIDES >> \ MLX5_MPWRQ_WQE_PAGE_ORDER) +#define MLX5_CHANNEL_MAX_NUM_MTTS (ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8) * \ + BIT(MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW)) +#define MLX5_UMR_ALIGN (2048) #define MLX5_MPWRQ_SMALL_PACKET_THRESHOLD (128) #define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (64 * 1024) @@ -134,6 +137,13 @@ struct mlx5e_rx_wqe { struct mlx5_wqe_data_seg data; }; +struct mlx5e_umr_wqe { + struct mlx5_wqe_ctrl_seg ctrl; + struct mlx5_wqe_umr_ctrl_seg uctrl; + struct mlx5_mkey_seg mkc; + struct mlx5_wqe_data_seg data; +}; + #ifdef CONFIG_MLX5_CORE_EN_DCB #define MLX5E_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */ #define MLX5E_MIN_BW_ALLOC 1 /* Min percentage of BW allocation */ @@ -179,6 +189,7 @@ static const char vport_strings[][ETH_GSTRING_LEN] = { "tx_queue_dropped", "rx_wqe_err", "rx_mpwqe_filler", + "rx_mpwqe_frag", }; struct mlx5e_vport_stats { @@ -221,8 +232,9 @@ struct mlx5e_vport_stats { u64 tx_queue_dropped; u64 rx_wqe_err; u64 rx_mpwqe_filler; + u64 rx_mpwqe_frag; -#define NUM_VPORT_COUNTERS 36 +#define NUM_VPORT_COUNTERS 37 }; static const char pport_strings[][ETH_GSTRING_LEN] = { @@ -317,6 +329,7 @@ static const char rq_stats_strings[][ETH_GSTRING_LEN] = { "lro_bytes", "wqe_err", "mpwqe_filler", + "mpwqe_frag", }; struct mlx5e_rq_stats { @@ -328,7 +341,8 @@ struct mlx5e_rq_stats { u64 lro_bytes; u64 wqe_err; u64 mpwqe_filler; -#define NUM_RQ_STATS 8 + u64 mpwqe_frag; +#define NUM_RQ_STATS 9 }; static const char sq_stats_strings[][ETH_GSTRING_LEN] = { @@ -407,6 +421,7 @@ struct mlx5e_tstamp { enum { MLX5E_RQ_STATE_POST_WQES_ENABLE, + MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, }; struct mlx5e_cq { @@ -434,18 +449,14 @@ struct mlx5e_dma_info { dma_addr_t addr; }; -struct mlx5e_mpw_info { - struct mlx5e_dma_info dma_info; - u16 consumed_strides; - u16 skbs_frags[MLX5_MPWRQ_PAGES_PER_WQE]; -}; - struct mlx5e_rq { /* data path */ struct mlx5_wq_ll wq; u32 wqe_sz; struct sk_buff **skb; struct mlx5e_mpw_info *wqe_info; + __be32 mkey_be; + __be32 umr_mkey_be; struct device *pdev; struct net_device *netdev; @@ -466,6 +477,36 @@ struct mlx5e_rq { struct mlx5e_priv *priv; } ____cacheline_aligned_in_smp; +struct mlx5e_umr_dma_info { + __be64 *mtt; + __be64 *mtt_no_align; + dma_addr_t mtt_addr; + struct mlx5e_dma_info *dma_info; +}; + +struct mlx5e_mpw_info { + union { + struct mlx5e_dma_info dma_info; + struct mlx5e_umr_dma_info umr; + }; + u16 consumed_strides; + u16 skbs_frags[MLX5_MPWRQ_PAGES_PER_WQE]; + + void (*dma_pre_sync)(struct device *pdev, + struct mlx5e_mpw_info *wi, + u32 wqe_offset, u32 len); + void (*add_skb_frag)(struct device *pdev, + struct sk_buff *skb, + struct mlx5e_mpw_info *wi, + u32 page_idx, u32 frag_offset, u32 len); + void (*copy_skb_header)(struct device *pdev, + struct sk_buff *skb, + struct mlx5e_mpw_info *wi, + u32 page_idx, u32 offset, + u32 headlen); + void (*free_wqe)(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi); +}; + struct mlx5e_tx_wqe_info { u32 num_bytes; u8 num_wqebbs; @@ -658,6 +699,7 @@ struct mlx5e_priv { u32 pdn; u32 tdn; struct mlx5_core_mkey mkey; + struct mlx5_core_mkey umr_mkey; struct mlx5e_rq drop_rq; struct mlx5e_channel **channel; @@ -730,6 +772,21 @@ void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq); int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix); int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix); +void mlx5e_post_rx_fragmented_mpwqe(struct mlx5e_rq *rq); +void mlx5e_complete_rx_linear_mpwqe(struct mlx5e_rq *rq, + struct mlx5_cqe64 *cqe, + u16 byte_cnt, + struct mlx5e_mpw_info *wi, + struct sk_buff *skb); +void mlx5e_complete_rx_fragmented_mpwqe(struct mlx5e_rq *rq, + struct mlx5_cqe64 *cqe, + u16 byte_cnt, + struct mlx5e_mpw_info *wi, + struct sk_buff *skb); +void mlx5e_free_rx_linear_mpwqe(struct mlx5e_rq *rq, + struct mlx5e_mpw_info *wi); +void mlx5e_free_rx_fragmented_mpwqe(struct mlx5e_rq *rq, + struct mlx5e_mpw_info *wi); struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq); void mlx5e_update_stats(struct mlx5e_priv *priv); @@ -763,7 +820,7 @@ void mlx5e_build_default_indir_rqt(struct mlx5_core_dev *mdev, int num_channels); static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq, - struct mlx5e_tx_wqe *wqe, int bf_sz) + struct mlx5_wqe_ctrl_seg *ctrl, int bf_sz) { u16 ofst = MLX5_BF_OFFSET + sq->bf_offset; @@ -777,9 +834,9 @@ static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq, */ wmb(); if (bf_sz) - __iowrite64_copy(sq->uar_map + ofst, &wqe->ctrl, bf_sz); + __iowrite64_copy(sq->uar_map + ofst, ctrl, bf_sz); else - mlx5_write64((__be32 *)&wqe->ctrl, sq->uar_map + ofst, NULL); + mlx5_write64((__be32 *)ctrl, sq->uar_map + ofst, NULL); /* flush the write-combining mapped buffer */ wmb(); @@ -800,6 +857,11 @@ static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev) MLX5E_MAX_NUM_CHANNELS); } +static inline int mlx5e_get_mtt_octw(int npages) +{ + return ALIGN(npages, 8) / 2; +} + extern const struct ethtool_ops mlx5e_ethtool_ops; #ifdef CONFIG_MLX5_CORE_EN_DCB extern const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index b25b429ebabb..942829e6d8ba 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -179,6 +179,7 @@ void mlx5e_update_stats(struct mlx5e_priv *priv) s->rx_csum_sw = 0; s->rx_wqe_err = 0; s->rx_mpwqe_filler = 0; + s->rx_mpwqe_frag = 0; for (i = 0; i < priv->params.num_channels; i++) { rq_stats = &priv->channel[i]->rq.stats; @@ -190,6 +191,7 @@ void mlx5e_update_stats(struct mlx5e_priv *priv) s->rx_csum_sw += rq_stats->csum_sw; s->rx_wqe_err += rq_stats->wqe_err; s->rx_mpwqe_filler += rq_stats->mpwqe_filler; + s->rx_mpwqe_frag += rq_stats->mpwqe_frag; for (j = 0; j < priv->params.num_tc; j++) { sq_stats = &priv->channel[i]->sq[j].stats; @@ -379,7 +381,6 @@ static int mlx5e_create_rq(struct mlx5e_channel *c, for (i = 0; i < wq_sz; i++) { struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i); - wqe->data.lkey = c->mkey_be; wqe->data.byte_count = cpu_to_be32(byte_count); } @@ -390,6 +391,8 @@ static int mlx5e_create_rq(struct mlx5e_channel *c, rq->channel = c; rq->ix = c->ix; rq->priv = c->priv; + rq->mkey_be = c->mkey_be; + rq->umr_mkey_be = cpu_to_be32(c->priv->umr_mkey.key); return 0; @@ -1256,6 +1259,7 @@ static void mlx5e_build_icosq_param(struct mlx5e_priv *priv, mlx5e_build_sq_param_common(priv, param); MLX5_SET(wq, wq, log_wq_sz, log_wq_size); + MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(priv->mdev, reg_umr_sq)); param->icosq = true; } @@ -1263,7 +1267,7 @@ static void mlx5e_build_icosq_param(struct mlx5e_priv *priv, static void mlx5e_build_channel_param(struct mlx5e_priv *priv, struct mlx5e_channel_param *cparam) { - u8 icosq_log_wq_sz = 0; + u8 icosq_log_wq_sz = MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE; memset(cparam, 0, sizeof(*cparam)); @@ -2458,6 +2462,13 @@ void mlx5e_build_default_indir_rqt(struct mlx5_core_dev *mdev, indirection_rqt[i] = i % num_channels; } +static bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev) +{ + return MLX5_CAP_GEN(mdev, striding_rq) && + MLX5_CAP_GEN(mdev, umr_ptr_rlky) && + MLX5_CAP_ETH(mdev, reg_umr_sq); +} + static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev, struct net_device *netdev, int num_channels) @@ -2466,7 +2477,7 @@ static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev, priv->params.log_sq_size = MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE; - priv->params.rq_wq_type = MLX5_CAP_GEN(mdev, striding_rq) ? + priv->params.rq_wq_type = mlx5e_check_fragmented_striding_rq_cap(mdev) ? MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ : MLX5_WQ_TYPE_LINKED_LIST; @@ -2639,6 +2650,41 @@ static void mlx5e_destroy_q_counter(struct mlx5e_priv *priv) mlx5_core_dealloc_q_counter(priv->mdev, priv->q_counter); } +static int mlx5e_create_umr_mkey(struct mlx5e_priv *priv) +{ + struct mlx5_core_dev *mdev = priv->mdev; + struct mlx5_create_mkey_mbox_in *in; + struct mlx5_mkey_seg *mkc; + int inlen = sizeof(*in); + u64 npages = + mlx5e_get_max_num_channels(mdev) * MLX5_CHANNEL_MAX_NUM_MTTS; + int err; + + in = mlx5_vzalloc(inlen); + if (!in) + return -ENOMEM; + + mkc = &in->seg; + mkc->status = MLX5_MKEY_STATUS_FREE; + mkc->flags = MLX5_PERM_UMR_EN | + MLX5_PERM_LOCAL_READ | + MLX5_PERM_LOCAL_WRITE | + MLX5_ACCESS_MODE_MTT; + + mkc->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8); + mkc->flags_pd = cpu_to_be32(priv->pdn); + mkc->len = cpu_to_be64(npages << PAGE_SHIFT); + mkc->xlt_oct_size = cpu_to_be32(mlx5e_get_mtt_octw(npages)); + mkc->log2_page_size = PAGE_SHIFT; + + err = mlx5_core_create_mkey(mdev, &priv->umr_mkey, in, inlen, NULL, + NULL, NULL); + + kvfree(in); + + return err; +} + static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev) { struct net_device *netdev; @@ -2688,10 +2734,16 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev) goto err_dealloc_transport_domain; } + err = mlx5e_create_umr_mkey(priv); + if (err) { + mlx5_core_err(mdev, "create umr mkey failed, %d\n", err); + goto err_destroy_mkey; + } + err = mlx5e_create_tises(priv); if (err) { mlx5_core_warn(mdev, "create tises failed, %d\n", err); - goto err_destroy_mkey; + goto err_destroy_umr_mkey; } err = mlx5e_open_drop_rq(priv); @@ -2774,6 +2826,9 @@ err_close_drop_rq: err_destroy_tises: mlx5e_destroy_tises(priv); +err_destroy_umr_mkey: + mlx5_core_destroy_mkey(mdev, &priv->umr_mkey); + err_destroy_mkey: mlx5_core_destroy_mkey(mdev, &priv->mkey); @@ -2812,6 +2867,7 @@ static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv) mlx5e_destroy_rqt(priv, MLX5E_INDIRECTION_RQT); mlx5e_close_drop_rq(priv); mlx5e_destroy_tises(priv); + mlx5_core_destroy_mkey(priv->mdev, &priv->umr_mkey); mlx5_core_destroy_mkey(priv->mdev, &priv->mkey); mlx5_core_dealloc_transport_domain(priv->mdev, priv->tdn); mlx5_core_dealloc_pd(priv->mdev, priv->pdn); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 71f3a5d244ff..d71919ccf912 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -65,6 +65,7 @@ int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix) *((dma_addr_t *)skb->cb) = dma_addr; wqe->data.addr = cpu_to_be64(dma_addr + MLX5E_NET_IP_ALIGN); + wqe->data.lkey = rq->mkey_be; rq->skb[ix] = skb; @@ -76,7 +77,295 @@ err_free_skb: return -ENOMEM; } -int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix) +static inline void +mlx5e_dma_pre_sync_linear_mpwqe(struct device *pdev, + struct mlx5e_mpw_info *wi, + u32 wqe_offset, u32 len) +{ + dma_sync_single_for_cpu(pdev, wi->dma_info.addr + wqe_offset, + len, DMA_FROM_DEVICE); +} + +static inline void +mlx5e_dma_pre_sync_fragmented_mpwqe(struct device *pdev, + struct mlx5e_mpw_info *wi, + u32 wqe_offset, u32 len) +{ + /* No dma pre sync for fragmented MPWQE */ +} + +static inline void +mlx5e_add_skb_frag_linear_mpwqe(struct device *pdev, + struct sk_buff *skb, + struct mlx5e_mpw_info *wi, + u32 page_idx, u32 frag_offset, + u32 len) +{ + unsigned int truesize = ALIGN(len, MLX5_MPWRQ_STRIDE_SIZE); + + wi->skbs_frags[page_idx]++; + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, + &wi->dma_info.page[page_idx], frag_offset, + len, truesize); +} + +static inline void +mlx5e_add_skb_frag_fragmented_mpwqe(struct device *pdev, + struct sk_buff *skb, + struct mlx5e_mpw_info *wi, + u32 page_idx, u32 frag_offset, + u32 len) +{ + unsigned int truesize = ALIGN(len, MLX5_MPWRQ_STRIDE_SIZE); + + dma_sync_single_for_cpu(pdev, + wi->umr.dma_info[page_idx].addr + frag_offset, + len, DMA_FROM_DEVICE); + wi->skbs_frags[page_idx]++; + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, + wi->umr.dma_info[page_idx].page, frag_offset, + len, truesize); +} + +static inline void +mlx5e_copy_skb_header_linear_mpwqe(struct device *pdev, + struct sk_buff *skb, + struct mlx5e_mpw_info *wi, + u32 page_idx, u32 offset, + u32 headlen) +{ + struct page *page = &wi->dma_info.page[page_idx]; + + skb_copy_to_linear_data(skb, page_address(page) + offset, + ALIGN(headlen, sizeof(long))); +} + +static inline void +mlx5e_copy_skb_header_fragmented_mpwqe(struct device *pdev, + struct sk_buff *skb, + struct mlx5e_mpw_info *wi, + u32 page_idx, u32 offset, + u32 headlen) +{ + u16 headlen_pg = min_t(u32, headlen, PAGE_SIZE - offset); + struct mlx5e_dma_info *dma_info = &wi->umr.dma_info[page_idx]; + unsigned int len; + + /* Aligning len to sizeof(long) optimizes memcpy performance */ + len = ALIGN(headlen_pg, sizeof(long)); + dma_sync_single_for_cpu(pdev, dma_info->addr + offset, len, + DMA_FROM_DEVICE); + skb_copy_to_linear_data_offset(skb, 0, + page_address(dma_info->page) + offset, + len); +#if (MLX5_MPWRQ_SMALL_PACKET_THRESHOLD >= MLX5_MPWRQ_STRIDE_SIZE) + if (unlikely(offset + headlen > PAGE_SIZE)) { + dma_info++; + headlen_pg = len; + len = ALIGN(headlen - headlen_pg, sizeof(long)); + dma_sync_single_for_cpu(pdev, dma_info->addr, len, + DMA_FROM_DEVICE); + skb_copy_to_linear_data_offset(skb, headlen_pg, + page_address(dma_info->page), + len); + } +#endif +} + +static u16 mlx5e_get_wqe_mtt_offset(u16 rq_ix, u16 wqe_ix) +{ + return rq_ix * MLX5_CHANNEL_MAX_NUM_MTTS + + wqe_ix * ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8); +} + +static void mlx5e_build_umr_wqe(struct mlx5e_rq *rq, + struct mlx5e_sq *sq, + struct mlx5e_umr_wqe *wqe, + u16 ix) +{ + struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; + struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->uctrl; + struct mlx5_wqe_data_seg *dseg = &wqe->data; + struct mlx5e_mpw_info *wi = &rq->wqe_info[ix]; + u8 ds_cnt = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS); + u16 umr_wqe_mtt_offset = mlx5e_get_wqe_mtt_offset(rq->ix, ix); + + memset(wqe, 0, sizeof(*wqe)); + cseg->opmod_idx_opcode = + cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) | + MLX5_OPCODE_UMR); + cseg->qpn_ds = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) | + ds_cnt); + cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE; + cseg->imm = rq->umr_mkey_be; + + ucseg->flags = MLX5_UMR_TRANSLATION_OFFSET_EN; + ucseg->klm_octowords = + cpu_to_be16(mlx5e_get_mtt_octw(MLX5_MPWRQ_PAGES_PER_WQE)); + ucseg->bsf_octowords = + cpu_to_be16(mlx5e_get_mtt_octw(umr_wqe_mtt_offset)); + ucseg->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE); + + dseg->lkey = sq->mkey_be; + dseg->addr = cpu_to_be64(wi->umr.mtt_addr); +} + +static void mlx5e_post_umr_wqe(struct mlx5e_rq *rq, u16 ix) +{ + struct mlx5e_sq *sq = &rq->channel->icosq; + struct mlx5_wq_cyc *wq = &sq->wq; + struct mlx5e_umr_wqe *wqe; + u8 num_wqebbs = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_BB); + u16 pi; + + /* fill sq edge with nops to avoid wqe wrap around */ + while ((pi = (sq->pc & wq->sz_m1)) > sq->edge) { + sq->ico_wqe_info[pi].opcode = MLX5_OPCODE_NOP; + sq->ico_wqe_info[pi].num_wqebbs = 1; + mlx5e_send_nop(sq, true); + } + + wqe = mlx5_wq_cyc_get_wqe(wq, pi); + mlx5e_build_umr_wqe(rq, sq, wqe, ix); + sq->ico_wqe_info[pi].opcode = MLX5_OPCODE_UMR; + sq->ico_wqe_info[pi].num_wqebbs = num_wqebbs; + sq->pc += num_wqebbs; + mlx5e_tx_notify_hw(sq, &wqe->ctrl, 0); +} + +static inline int mlx5e_get_wqe_mtt_sz(void) +{ + /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes. + * To avoid copying garbage after the mtt array, we allocate + * a little more. + */ + return ALIGN(MLX5_MPWRQ_PAGES_PER_WQE * sizeof(__be64), + MLX5_UMR_MTT_ALIGNMENT); +} + +static int mlx5e_alloc_and_map_page(struct mlx5e_rq *rq, + struct mlx5e_mpw_info *wi, + int i) +{ + struct page *page; + + page = dev_alloc_page(); + if (unlikely(!page)) + return -ENOMEM; + + wi->umr.dma_info[i].page = page; + wi->umr.dma_info[i].addr = dma_map_page(rq->pdev, page, 0, PAGE_SIZE, + PCI_DMA_FROMDEVICE); + if (unlikely(dma_mapping_error(rq->pdev, wi->umr.dma_info[i].addr))) { + put_page(page); + return -ENOMEM; + } + wi->umr.mtt[i] = cpu_to_be64(wi->umr.dma_info[i].addr | MLX5_EN_WR); + + return 0; +} + +static int mlx5e_alloc_rx_fragmented_mpwqe(struct mlx5e_rq *rq, + struct mlx5e_rx_wqe *wqe, + u16 ix) +{ + struct mlx5e_mpw_info *wi = &rq->wqe_info[ix]; + int mtt_sz = mlx5e_get_wqe_mtt_sz(); + u32 dma_offset = mlx5e_get_wqe_mtt_offset(rq->ix, ix) << PAGE_SHIFT; + int i; + + wi->umr.dma_info = kmalloc(sizeof(*wi->umr.dma_info) * + MLX5_MPWRQ_PAGES_PER_WQE, + GFP_ATOMIC); + if (unlikely(!wi->umr.dma_info)) + goto err_out; + + /* We allocate more than mtt_sz as we will align the pointer */ + wi->umr.mtt_no_align = kzalloc(mtt_sz + MLX5_UMR_ALIGN - 1, + GFP_ATOMIC); + if (unlikely(!wi->umr.mtt_no_align)) + goto err_free_umr; + + wi->umr.mtt = PTR_ALIGN(wi->umr.mtt_no_align, MLX5_UMR_ALIGN); + wi->umr.mtt_addr = dma_map_single(rq->pdev, wi->umr.mtt, mtt_sz, + PCI_DMA_TODEVICE); + if (unlikely(dma_mapping_error(rq->pdev, wi->umr.mtt_addr))) + goto err_free_mtt; + + for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) { + if (unlikely(mlx5e_alloc_and_map_page(rq, wi, i))) + goto err_unmap; + atomic_add(MLX5_MPWRQ_STRIDES_PER_PAGE, + &wi->umr.dma_info[i].page->_count); + wi->skbs_frags[i] = 0; + } + + wi->consumed_strides = 0; + wi->dma_pre_sync = mlx5e_dma_pre_sync_fragmented_mpwqe; + wi->add_skb_frag = mlx5e_add_skb_frag_fragmented_mpwqe; + wi->copy_skb_header = mlx5e_copy_skb_header_fragmented_mpwqe; + wi->free_wqe = mlx5e_free_rx_fragmented_mpwqe; + wqe->data.lkey = rq->umr_mkey_be; + wqe->data.addr = cpu_to_be64(dma_offset); + + return 0; + +err_unmap: + while (--i >= 0) { + dma_unmap_page(rq->pdev, wi->umr.dma_info[i].addr, PAGE_SIZE, + PCI_DMA_FROMDEVICE); + atomic_sub(MLX5_MPWRQ_STRIDES_PER_PAGE, + &wi->umr.dma_info[i].page->_count); + put_page(wi->umr.dma_info[i].page); + } + dma_unmap_single(rq->pdev, wi->umr.mtt_addr, mtt_sz, PCI_DMA_TODEVICE); + +err_free_mtt: + kfree(wi->umr.mtt_no_align); + +err_free_umr: + kfree(wi->umr.dma_info); + +err_out: + return -ENOMEM; +} + +void mlx5e_free_rx_fragmented_mpwqe(struct mlx5e_rq *rq, + struct mlx5e_mpw_info *wi) +{ + int mtt_sz = mlx5e_get_wqe_mtt_sz(); + int i; + + for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) { + dma_unmap_page(rq->pdev, wi->umr.dma_info[i].addr, PAGE_SIZE, + PCI_DMA_FROMDEVICE); + atomic_sub(MLX5_MPWRQ_STRIDES_PER_PAGE - wi->skbs_frags[i], + &wi->umr.dma_info[i].page->_count); + put_page(wi->umr.dma_info[i].page); + } + dma_unmap_single(rq->pdev, wi->umr.mtt_addr, mtt_sz, PCI_DMA_TODEVICE); + kfree(wi->umr.mtt_no_align); + kfree(wi->umr.dma_info); +} + +void mlx5e_post_rx_fragmented_mpwqe(struct mlx5e_rq *rq) +{ + struct mlx5_wq_ll *wq = &rq->wq; + struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(wq, wq->head); + + clear_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state); + mlx5_wq_ll_push(wq, be16_to_cpu(wqe->next.next_wqe_index)); + rq->stats.mpwqe_frag++; + + /* ensure wqes are visible to device before updating doorbell record */ + dma_wmb(); + + mlx5_wq_ll_update_db_record(wq); +} + +static int mlx5e_alloc_rx_linear_mpwqe(struct mlx5e_rq *rq, + struct mlx5e_rx_wqe *wqe, + u16 ix) { struct mlx5e_mpw_info *wi = &rq->wqe_info[ix]; gfp_t gfp_mask; @@ -106,16 +395,56 @@ int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix) } wi->consumed_strides = 0; - wqe->data.addr = cpu_to_be64(wi->dma_info.addr); + wi->dma_pre_sync = mlx5e_dma_pre_sync_linear_mpwqe; + wi->add_skb_frag = mlx5e_add_skb_frag_linear_mpwqe; + wi->copy_skb_header = mlx5e_copy_skb_header_linear_mpwqe; + wi->free_wqe = mlx5e_free_rx_linear_mpwqe; + wqe->data.lkey = rq->mkey_be; + wqe->data.addr = cpu_to_be64(wi->dma_info.addr); return 0; } +void mlx5e_free_rx_linear_mpwqe(struct mlx5e_rq *rq, + struct mlx5e_mpw_info *wi) +{ + int i; + + dma_unmap_page(rq->pdev, wi->dma_info.addr, rq->wqe_sz, + PCI_DMA_FROMDEVICE); + for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) { + atomic_sub(MLX5_MPWRQ_STRIDES_PER_PAGE - wi->skbs_frags[i], + &wi->dma_info.page[i]._count); + put_page(&wi->dma_info.page[i]); + } +} + +int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix) +{ + int err; + + err = mlx5e_alloc_rx_linear_mpwqe(rq, wqe, ix); + if (unlikely(err)) { + err = mlx5e_alloc_rx_fragmented_mpwqe(rq, wqe, ix); + if (unlikely(err)) + return err; + set_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state); + mlx5e_post_umr_wqe(rq, ix); + return -EBUSY; + } + + return 0; +} + +#define RQ_CANNOT_POST(rq) \ + (!test_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state) || \ + test_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state)) + bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq) { struct mlx5_wq_ll *wq = &rq->wq; - if (unlikely(!test_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state))) + if (unlikely(RQ_CANNOT_POST(rq))) return false; while (!mlx5_wq_ll_is_full(wq)) { @@ -309,23 +638,56 @@ wq_ll_pop: &wqe->next.next_wqe_index); } +static inline void mlx5e_mpwqe_fill_rx_skb(struct mlx5e_rq *rq, + struct mlx5_cqe64 *cqe, + struct mlx5e_mpw_info *wi, + u32 cqe_bcnt, + struct sk_buff *skb) +{ + u32 consumed_bytes = ALIGN(cqe_bcnt, MLX5_MPWRQ_STRIDE_SIZE); + u16 stride_ix = mpwrq_get_cqe_stride_index(cqe); + u32 wqe_offset = stride_ix * MLX5_MPWRQ_STRIDE_SIZE; + u32 head_offset = wqe_offset & (PAGE_SIZE - 1); + u32 page_idx = wqe_offset >> PAGE_SHIFT; + u32 head_page_idx = page_idx; + u16 headlen = min_t(u16, MLX5_MPWRQ_SMALL_PACKET_THRESHOLD, cqe_bcnt); + u32 frag_offset = head_offset + headlen; + u16 byte_cnt = cqe_bcnt - headlen; + +#if (MLX5_MPWRQ_SMALL_PACKET_THRESHOLD >= MLX5_MPWRQ_STRIDE_SIZE) + if (unlikely(frag_offset >= PAGE_SIZE)) { + page_idx++; + frag_offset -= PAGE_SIZE; + } +#endif + wi->dma_pre_sync(rq->pdev, wi, wqe_offset, consumed_bytes); + + while (byte_cnt) { + u32 pg_consumed_bytes = + min_t(u32, PAGE_SIZE - frag_offset, byte_cnt); + + wi->add_skb_frag(rq->pdev, skb, wi, page_idx, frag_offset, + pg_consumed_bytes); + byte_cnt -= pg_consumed_bytes; + frag_offset = 0; + page_idx++; + } + /* copy header */ + wi->copy_skb_header(rq->pdev, skb, wi, head_page_idx, head_offset, + headlen); + /* skb linear part was allocated with headlen and aligned to long */ + skb->tail += headlen; + skb->len += headlen; +} + void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) { u16 cstrides = mpwrq_get_cqe_consumed_strides(cqe); - u16 stride_ix = mpwrq_get_cqe_stride_index(cqe); u16 wqe_id = be16_to_cpu(cqe->wqe_id); struct mlx5e_mpw_info *wi = &rq->wqe_info[wqe_id]; struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_id); struct sk_buff *skb; - u32 consumed_bytes; - u32 head_offset; - u32 frag_offset; - u32 wqe_offset; - u32 page_idx; - u16 byte_cnt; u16 cqe_bcnt; - u16 headlen; - int i; wi->consumed_strides += cstrides; @@ -346,53 +708,16 @@ void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) goto mpwrq_cqe_out; prefetch(skb->data); - wqe_offset = stride_ix * MLX5_MPWRQ_STRIDE_SIZE; - consumed_bytes = cstrides * MLX5_MPWRQ_STRIDE_SIZE; - dma_sync_single_for_cpu(rq->pdev, wi->dma_info.addr + wqe_offset, - consumed_bytes, DMA_FROM_DEVICE); - - head_offset = wqe_offset & (PAGE_SIZE - 1); - page_idx = wqe_offset >> PAGE_SHIFT; cqe_bcnt = mpwrq_get_cqe_byte_cnt(cqe); - headlen = min_t(u16, MLX5_MPWRQ_SMALL_PACKET_THRESHOLD, cqe_bcnt); - frag_offset = head_offset + headlen; - - byte_cnt = cqe_bcnt - headlen; - while (byte_cnt) { - u32 pg_consumed_bytes = - min_t(u32, PAGE_SIZE - frag_offset, byte_cnt); - unsigned int truesize = - ALIGN(pg_consumed_bytes, MLX5_MPWRQ_STRIDE_SIZE); - - wi->skbs_frags[page_idx]++; - skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, - &wi->dma_info.page[page_idx], frag_offset, - pg_consumed_bytes, truesize); - byte_cnt -= pg_consumed_bytes; - frag_offset = 0; - page_idx++; - } - - skb_copy_to_linear_data(skb, - page_address(wi->dma_info.page) + wqe_offset, - ALIGN(headlen, sizeof(long))); - /* skb linear part was allocated with headlen and aligned to long */ - skb->tail += headlen; - skb->len += headlen; + mlx5e_mpwqe_fill_rx_skb(rq, cqe, wi, cqe_bcnt, skb); mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); mpwrq_cqe_out: if (likely(wi->consumed_strides < MLX5_MPWRQ_NUM_STRIDES)) return; - dma_unmap_page(rq->pdev, wi->dma_info.addr, rq->wqe_sz, - PCI_DMA_FROMDEVICE); - for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) { - atomic_sub(MLX5_MPWRQ_STRIDES_PER_PAGE - wi->skbs_frags[i], - &wi->dma_info.page[i]._count); - put_page(&wi->dma_info.page[i]); - } + wi->free_wqe(rq, wi); mlx5_wq_ll_pop(&rq->wq, cqe->wqe_id, &wqe->next.next_wqe_index); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index a8d2935c50d0..229ab16fb8d3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -58,7 +58,7 @@ void mlx5e_send_nop(struct mlx5e_sq *sq, bool notify_hw) if (notify_hw) { cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE; - mlx5e_tx_notify_hw(sq, wqe, 0); + mlx5e_tx_notify_hw(sq, &wqe->ctrl, 0); } } @@ -310,7 +310,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) bf_sz = wi->num_wqebbs << 3; cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE; - mlx5e_tx_notify_hw(sq, wqe, bf_sz); + mlx5e_tx_notify_hw(sq, &wqe->ctrl, bf_sz); } /* fill sq edge with nops to avoid wqe wrap around */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c index ad624cb6f147..a3fd0f55ce2e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c @@ -84,6 +84,9 @@ static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq) switch (icowi->opcode) { case MLX5_OPCODE_NOP: break; + case MLX5_OPCODE_UMR: + mlx5e_post_rx_fragmented_mpwqe(&sq->channel->rq); + break; default: WARN_ONCE(true, "mlx5e: Bad OPCODE in ICOSQ WQE info: 0x%x\n", From c5adb96f6c4a22aceff2e8220612c5b9239ffeb2 Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Wed, 20 Apr 2016 22:02:16 +0300 Subject: [PATCH 0900/1649] net/mlx5e: Use napi_alloc_skb for RX SKB allocations Instead of netdev_alloc_skb, we use the napi_alloc_skb function which is designated to allocate skbuff's for RX in a channel-specific NAPI instance, and implies the IP packet alignment. Signed-off-by: Tariq Toukan Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 1 - drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 4 ++-- drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 12 +++++------- 3 files changed, 7 insertions(+), 10 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index c99fdff74c97..303e6cdf9fcd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -93,7 +93,6 @@ #define MLX5E_SQ_BF_BUDGET 16 #define MLX5E_NUM_MAIN_GROUPS 9 -#define MLX5E_NET_IP_ALIGN 2 static inline u16 mlx5_min_rx_wqes(int wq_type, u32 wq_size) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 942829e6d8ba..9b17bc064cc8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -373,8 +373,8 @@ static int mlx5e_create_rq(struct mlx5e_channel *c, rq->wqe_sz = (priv->params.lro_en) ? priv->params.lro_wqe_sz : MLX5E_SW2HW_MTU(priv->netdev->mtu); - rq->wqe_sz = SKB_DATA_ALIGN(rq->wqe_sz + MLX5E_NET_IP_ALIGN); - byte_count = rq->wqe_sz - MLX5E_NET_IP_ALIGN; + rq->wqe_sz = SKB_DATA_ALIGN(rq->wqe_sz); + byte_count = rq->wqe_sz; byte_count |= MLX5_HW_START_PADDING; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index d71919ccf912..5bdcc0b69f76 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -47,7 +47,7 @@ int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix) struct sk_buff *skb; dma_addr_t dma_addr; - skb = netdev_alloc_skb(rq->netdev, rq->wqe_sz); + skb = napi_alloc_skb(rq->cq.napi, rq->wqe_sz); if (unlikely(!skb)) return -ENOMEM; @@ -61,10 +61,8 @@ int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix) if (unlikely(dma_mapping_error(rq->pdev, dma_addr))) goto err_free_skb; - skb_reserve(skb, MLX5E_NET_IP_ALIGN); - *((dma_addr_t *)skb->cb) = dma_addr; - wqe->data.addr = cpu_to_be64(dma_addr + MLX5E_NET_IP_ALIGN); + wqe->data.addr = cpu_to_be64(dma_addr); wqe->data.lkey = rq->mkey_be; rq->skb[ix] = skb; @@ -701,9 +699,9 @@ void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) goto mpwrq_cqe_out; } - skb = netdev_alloc_skb(rq->netdev, - ALIGN(MLX5_MPWRQ_SMALL_PACKET_THRESHOLD, - sizeof(long))); + skb = napi_alloc_skb(rq->cq.napi, + ALIGN(MLX5_MPWRQ_SMALL_PACKET_THRESHOLD, + sizeof(long))); if (unlikely(!skb)) goto mpwrq_cqe_out; From 1bfec31627bf9b351b93b8cef4520b90f48ca276 Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Wed, 20 Apr 2016 22:02:17 +0300 Subject: [PATCH 0901/1649] net/mlx5e: Remove redundant barrier The bit-op operation one line before is an explicit barrier by itself. Signed-off-by: Tariq Toukan Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c index a3fd0f55ce2e..c38781fa567d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c @@ -147,7 +147,6 @@ void mlx5e_completion_event(struct mlx5_core_cq *mcq) struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq); set_bit(MLX5E_CHANNEL_NAPI_SCHED, &cq->channel->flags); - barrier(); napi_schedule(cq->napi); } From e20a0db30454a07f03f3a34a79e9f35881cfaa9d Mon Sep 17 00:00:00 2001 From: Saeed Mahameed Date: Wed, 20 Apr 2016 22:02:18 +0300 Subject: [PATCH 0902/1649] net/mlx5e: Delay skb->data access Move mlx5e_handle_csum and eth_type_trans to the end of mlx5e_build_rx_skb to gain some more time before accessing skb->data, to reduce cache misses. Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 5bdcc0b69f76..ee5fa16aafd1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -573,10 +573,6 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe, if (unlikely(mlx5e_rx_hw_stamp(tstamp))) mlx5e_fill_hwstamp(tstamp, get_cqe_ts(cqe), skb_hwtstamps(skb)); - mlx5e_handle_csum(netdev, cqe, rq, skb, !!lro_num_seg); - - skb->protocol = eth_type_trans(skb, netdev); - skb_record_rx_queue(skb, rq->ix); if (likely(netdev->features & NETIF_F_RXHASH)) @@ -587,6 +583,9 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe, be16_to_cpu(cqe->vlan_info)); skb->mark = be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK; + + mlx5e_handle_csum(netdev, cqe, rq, skb, !!lro_num_seg); + skb->protocol = eth_type_trans(skb, netdev); } static inline void mlx5e_complete_rx_cqe(struct mlx5e_rq *rq, From 54984407564ef6b35488f52654f828c17b9d6fa8 Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Wed, 20 Apr 2016 22:02:19 +0300 Subject: [PATCH 0903/1649] net/mlx5e: Add ethtool counter for RX buffer allocation failures Counts the number of RX buffer allocation failures and shows it in ethtool statistics. Signed-off-by: Tariq Toukan Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 8 ++++++-- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 2 ++ drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 11 +++++++++-- 3 files changed, 17 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 303e6cdf9fcd..6e24e821a1d8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -189,6 +189,7 @@ static const char vport_strings[][ETH_GSTRING_LEN] = { "rx_wqe_err", "rx_mpwqe_filler", "rx_mpwqe_frag", + "rx_buff_alloc_err", }; struct mlx5e_vport_stats { @@ -232,8 +233,9 @@ struct mlx5e_vport_stats { u64 rx_wqe_err; u64 rx_mpwqe_filler; u64 rx_mpwqe_frag; + u64 rx_buff_alloc_err; -#define NUM_VPORT_COUNTERS 37 +#define NUM_VPORT_COUNTERS 38 }; static const char pport_strings[][ETH_GSTRING_LEN] = { @@ -329,6 +331,7 @@ static const char rq_stats_strings[][ETH_GSTRING_LEN] = { "wqe_err", "mpwqe_filler", "mpwqe_frag", + "buff_alloc_err", }; struct mlx5e_rq_stats { @@ -341,7 +344,8 @@ struct mlx5e_rq_stats { u64 wqe_err; u64 mpwqe_filler; u64 mpwqe_frag; -#define NUM_RQ_STATS 9 + u64 buff_alloc_err; +#define NUM_RQ_STATS 10 }; static const char sq_stats_strings[][ETH_GSTRING_LEN] = { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 9b17bc064cc8..d485d1e4e100 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -180,6 +180,7 @@ void mlx5e_update_stats(struct mlx5e_priv *priv) s->rx_wqe_err = 0; s->rx_mpwqe_filler = 0; s->rx_mpwqe_frag = 0; + s->rx_buff_alloc_err = 0; for (i = 0; i < priv->params.num_channels; i++) { rq_stats = &priv->channel[i]->rq.stats; @@ -192,6 +193,7 @@ void mlx5e_update_stats(struct mlx5e_priv *priv) s->rx_wqe_err += rq_stats->wqe_err; s->rx_mpwqe_filler += rq_stats->mpwqe_filler; s->rx_mpwqe_frag += rq_stats->mpwqe_frag; + s->rx_buff_alloc_err += rq_stats->buff_alloc_err; for (j = 0; j < priv->params.num_tc; j++) { sq_stats = &priv->channel[i]->sq[j].stats; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index ee5fa16aafd1..918b7c7fd74f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -447,9 +447,14 @@ bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq) while (!mlx5_wq_ll_is_full(wq)) { struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(wq, wq->head); + int err; - if (unlikely(rq->alloc_wqe(rq, wqe, wq->head))) + err = rq->alloc_wqe(rq, wqe, wq->head); + if (unlikely(err)) { + if (err != -EBUSY) + rq->stats.buff_alloc_err++; break; + } mlx5_wq_ll_push(wq, be16_to_cpu(wqe->next.next_wqe_index)); } @@ -701,8 +706,10 @@ void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) skb = napi_alloc_skb(rq->cq.napi, ALIGN(MLX5_MPWRQ_SMALL_PACKET_THRESHOLD, sizeof(long))); - if (unlikely(!skb)) + if (unlikely(!skb)) { + rq->stats.buff_alloc_err++; goto mpwrq_cqe_out; + } prefetch(skb->data); cqe_bcnt = mpwrq_get_cqe_byte_cnt(cqe); From 7f348a60762afd4cd0e4e7fa14cfa66331b7c30e Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Wed, 20 Apr 2016 16:51:00 -0400 Subject: [PATCH 0904/1649] net: Add support for IP ID mangling TSO in cases that require encapsulation This patch adds support for NETIF_F_TSO_MANGLEID if a given tunnel supports NETIF_F_TSO. This way if needed a device can then later enable the TSO with IP ID mangling and the tunnels on top of that device can then also make use of the IP ID mangling as well. Signed-off-by: Alexander Duyck Signed-off-by: David S. Miller --- net/core/dev.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/net/core/dev.c b/net/core/dev.c index 52d446b2cb99..6324bc9267f7 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -7029,8 +7029,19 @@ int register_netdevice(struct net_device *dev) if (!(dev->flags & IFF_LOOPBACK)) dev->hw_features |= NETIF_F_NOCACHE_COPY; + /* If IPv4 TCP segmentation offload is supported we should also + * allow the device to enable segmenting the frame with the option + * of ignoring a static IP ID value. This doesn't enable the + * feature itself but allows the user to enable it later. + */ if (dev->hw_features & NETIF_F_TSO) dev->hw_features |= NETIF_F_TSO_MANGLEID; + if (dev->vlan_features & NETIF_F_TSO) + dev->vlan_features |= NETIF_F_TSO_MANGLEID; + if (dev->mpls_features & NETIF_F_TSO) + dev->mpls_features |= NETIF_F_TSO_MANGLEID; + if (dev->hw_enc_features & NETIF_F_TSO) + dev->hw_enc_features |= NETIF_F_TSO_MANGLEID; /* Make NETIF_F_HIGHDMA inheritable to VLAN devices. */ From 08d9910c3408531473766ec4d8b288e8ee2fe500 Mon Sep 17 00:00:00 2001 From: Hannes Frederic Sowa Date: Mon, 18 Apr 2016 21:19:42 +0200 Subject: [PATCH 0905/1649] benet: be_resume needs to protect be_open with rtnl_lock be_open calls down to functions which expects rtnl lock to be held. Cc: Sathya Perla Cc: Ajit Khaparde Cc: Padmanabh Ratnakar Cc: Sriharsha Basavapatna Cc: Somnath Kotur Signed-off-by: Hannes Frederic Sowa Signed-off-by: David S. Miller --- drivers/net/ethernet/emulex/benet/be_main.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 536686476369..ed98ef1ecac3 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -4890,11 +4890,13 @@ static int be_resume(struct be_adapter *adapter) if (status) return status; - if (netif_running(netdev)) { + rtnl_lock(); + if (netif_running(netdev)) status = be_open(netdev); - if (status) - return status; - } + rtnl_unlock(); + + if (status) + return status; netif_device_attach(netdev); From 41419b9303f085e8912406140355e45230fed22f Mon Sep 17 00:00:00 2001 From: Hannes Frederic Sowa Date: Mon, 18 Apr 2016 21:19:43 +0200 Subject: [PATCH 0906/1649] fm10k: protect fm10k_open in fm10k_io_resume with rtnl_lock fm10k_open requires rtnl_lock to be held. Cc: Jeff Kirsher Cc: Jesse Brandeburg Cc: Shannon Nelson Cc: Carolyn Wyborny Cc: Don Skidmore Cc: Bruce Allan Cc: John Ronciak Cc: Mitch Williams Signed-off-by: Hannes Frederic Sowa Signed-off-by: David S. Miller --- drivers/net/ethernet/intel/fm10k/fm10k_pci.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c index 404f47ae14b6..206a466999ed 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c @@ -2287,8 +2287,10 @@ static void fm10k_io_resume(struct pci_dev *pdev) /* reassociate interrupts */ fm10k_mbx_request_irq(interface); + rtnl_lock(); if (netif_running(netdev)) err = fm10k_open(netdev); + rtnl_unlock(); /* final check of hardware state before registering the interface */ err = err ? : fm10k_hw_ready(interface); From 0c5c3252c43cc935bef05c2211fc7cb32facddf7 Mon Sep 17 00:00:00 2001 From: Hannes Frederic Sowa Date: Mon, 18 Apr 2016 21:19:44 +0200 Subject: [PATCH 0907/1649] mlx4: protect mlx4_en_start_port in mlx4_en_restart with rtnl_lock mlx4_en_start_port requires rtnl_lock to be held. Cc: Eugenia Emantayev Cc: Yishai Hadas Signed-off-by: Hannes Frederic Sowa Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/en_netdev.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index b4b258c8ca47..8bd143dda95d 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -1856,6 +1856,7 @@ static void mlx4_en_restart(struct work_struct *work) en_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port); + rtnl_lock(); mutex_lock(&mdev->state_lock); if (priv->port_up) { mlx4_en_stop_port(dev, 1); @@ -1863,6 +1864,7 @@ static void mlx4_en_restart(struct work_struct *work) en_err(priv, "Failed restarting port %d\n", priv->port); } mutex_unlock(&mdev->state_lock); + rtnl_unlock(); } static void mlx4_en_clear_stats(struct net_device *dev) From b1f99a787e8239da3ea859709f5fb60b3fd02c13 Mon Sep 17 00:00:00 2001 From: Hannes Frederic Sowa Date: Mon, 18 Apr 2016 21:19:45 +0200 Subject: [PATCH 0908/1649] ixgbe: protect vxlan_get_rx_port in ixgbe_service_task with rtnl_lock vxlan_get_rx_port requires rtnl_lock to be held. Cc: Jeff Kirsher Cc: Jesse Brandeburg Cc: Shannon Nelson Cc: Carolyn Wyborny Cc: Don Skidmore Cc: Bruce Allan Cc: John Ronciak Cc: Mitch Williams Signed-off-by: Hannes Frederic Sowa Signed-off-by: David S. Miller --- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 2976df77bf14..b2f2cf40f06a 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -7192,10 +7192,12 @@ static void ixgbe_service_task(struct work_struct *work) return; } #ifdef CONFIG_IXGBE_VXLAN + rtnl_lock(); if (adapter->flags2 & IXGBE_FLAG2_VXLAN_REREG_NEEDED) { adapter->flags2 &= ~IXGBE_FLAG2_VXLAN_REREG_NEEDED; vxlan_get_rx_port(adapter->netdev); } + rtnl_unlock(); #endif /* CONFIG_IXGBE_VXLAN */ ixgbe_reset_subtask(adapter); ixgbe_phy_interrupt_subtask(adapter); From 50d65d78897ff9785b7debbdca0030967cd5772d Mon Sep 17 00:00:00 2001 From: Hannes Frederic Sowa Date: Mon, 18 Apr 2016 21:19:46 +0200 Subject: [PATCH 0909/1649] qlcnic: protect qlicnic_attach_func with rtnl_lock qlcnic_attach_func requires rtnl_lock to be held. Cc: Dept-GELinuxNICDev@qlogic.com Signed-off-by: Hannes Frederic Sowa Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index 1205f6f9c941..1c29105b6c36 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -3952,8 +3952,14 @@ static pci_ers_result_t qlcnic_82xx_io_error_detected(struct pci_dev *pdev, static pci_ers_result_t qlcnic_82xx_io_slot_reset(struct pci_dev *pdev) { - return qlcnic_attach_func(pdev) ? PCI_ERS_RESULT_DISCONNECT : - PCI_ERS_RESULT_RECOVERED; + pci_ers_result_t res; + + rtnl_lock(); + res = qlcnic_attach_func(pdev) ? PCI_ERS_RESULT_DISCONNECT : + PCI_ERS_RESULT_RECOVERED; + rtnl_unlock(); + + return res; } static void qlcnic_82xx_io_resume(struct pci_dev *pdev) From b7aade15485a660cbf5161962c284131324a9534 Mon Sep 17 00:00:00 2001 From: Hannes Frederic Sowa Date: Mon, 18 Apr 2016 21:19:47 +0200 Subject: [PATCH 0910/1649] vxlan: break dependency with netdev drivers Currently all drivers depend and autoload the vxlan module because how vxlan_get_rx_port is linked into them. Remove this dependency: By using a new event type in the netdevice notifier call chain we proxy the request from the drivers to flush and resetup the vxlan ports not directly via function call but by the already existing netdevice notifier call chain. I added a separate new event type, NETDEV_OFFLOAD_PUSH_VXLAN, to do so. We don't need to save those ids, as the event type field is an unsigned long and using specialized event types for this purpose seemed to be a more elegant way. This also comes in beneficial if in future we want to add offloading knobs for vxlan. Cc: Jesse Gross Signed-off-by: Hannes Frederic Sowa Signed-off-by: David S. Miller --- drivers/net/vxlan.c | 14 +++++++++----- include/linux/netdevice.h | 1 + include/net/vxlan.h | 6 ++---- 3 files changed, 12 insertions(+), 9 deletions(-) diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index c2e22c2532a1..6fb93b57a724 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -2527,7 +2527,7 @@ static struct device_type vxlan_type = { * supply the listening VXLAN udp ports. Callers are expected * to implement the ndo_add_vxlan_port. */ -void vxlan_get_rx_port(struct net_device *dev) +static void vxlan_push_rx_ports(struct net_device *dev) { struct vxlan_sock *vs; struct net *net = dev_net(dev); @@ -2536,6 +2536,9 @@ void vxlan_get_rx_port(struct net_device *dev) __be16 port; unsigned int i; + if (!dev->netdev_ops->ndo_add_vxlan_port) + return; + spin_lock(&vn->sock_lock); for (i = 0; i < PORT_HASH_SIZE; ++i) { hlist_for_each_entry_rcu(vs, &vn->sock_list[i], hlist) { @@ -2547,7 +2550,6 @@ void vxlan_get_rx_port(struct net_device *dev) } spin_unlock(&vn->sock_lock); } -EXPORT_SYMBOL_GPL(vxlan_get_rx_port); /* Initialize the device structure. */ static void vxlan_setup(struct net_device *dev) @@ -3283,20 +3285,22 @@ static void vxlan_handle_lowerdev_unregister(struct vxlan_net *vn, unregister_netdevice_many(&list_kill); } -static int vxlan_lowerdev_event(struct notifier_block *unused, - unsigned long event, void *ptr) +static int vxlan_netdevice_event(struct notifier_block *unused, + unsigned long event, void *ptr) { struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id); if (event == NETDEV_UNREGISTER) vxlan_handle_lowerdev_unregister(vn, dev); + else if (event == NETDEV_OFFLOAD_PUSH_VXLAN) + vxlan_push_rx_ports(dev); return NOTIFY_DONE; } static struct notifier_block vxlan_notifier_block __read_mostly = { - .notifier_call = vxlan_lowerdev_event, + .notifier_call = vxlan_netdevice_event, }; static __net_init int vxlan_init_net(struct net *net) diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index a3bb534576a3..d4c8cd424f8d 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -2244,6 +2244,7 @@ struct netdev_lag_lower_state_info { #define NETDEV_BONDING_INFO 0x0019 #define NETDEV_PRECHANGEUPPER 0x001A #define NETDEV_CHANGELOWERSTATE 0x001B +#define NETDEV_OFFLOAD_PUSH_VXLAN 0x001C int register_netdevice_notifier(struct notifier_block *nb); int unregister_netdevice_notifier(struct notifier_block *nb); diff --git a/include/net/vxlan.h b/include/net/vxlan.h index d442eb3129cd..673e9f9e6da7 100644 --- a/include/net/vxlan.h +++ b/include/net/vxlan.h @@ -390,13 +390,11 @@ static inline __be32 vxlan_compute_rco(unsigned int start, unsigned int offset) return vni_field; } -#if IS_ENABLED(CONFIG_VXLAN) -void vxlan_get_rx_port(struct net_device *netdev); -#else static inline void vxlan_get_rx_port(struct net_device *netdev) { + ASSERT_RTNL(); + call_netdevice_notifiers(NETDEV_OFFLOAD_PUSH_VXLAN, netdev); } -#endif static inline unsigned short vxlan_get_sk_family(struct vxlan_sock *vs) { From 681e683ff30ada19f73c17c38a528528dd8824f1 Mon Sep 17 00:00:00 2001 From: Hannes Frederic Sowa Date: Mon, 18 Apr 2016 21:19:48 +0200 Subject: [PATCH 0911/1649] geneve: break dependency with netdev drivers Equivalent to "vxlan: break dependency with netdev drivers", don't autoload geneve module in case the driver is loaded. Instead make the coupling weaker by using netdevice notifiers as proxy. Cc: Jesse Gross Signed-off-by: Hannes Frederic Sowa Signed-off-by: David S. Miller --- drivers/net/geneve.c | 31 ++++++++++++++++++++++++++++--- include/linux/netdevice.h | 1 + include/net/geneve.h | 6 ++---- 3 files changed, 31 insertions(+), 7 deletions(-) diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index 512dbe013713..9c40b88fabd5 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -1172,7 +1172,7 @@ static struct device_type geneve_type = { * supply the listening GENEVE udp ports. Callers are expected * to implement the ndo_add_geneve_port. */ -void geneve_get_rx_port(struct net_device *dev) +static void geneve_push_rx_ports(struct net_device *dev) { struct net *net = dev_net(dev); struct geneve_net *gn = net_generic(net, geneve_net_id); @@ -1181,6 +1181,9 @@ void geneve_get_rx_port(struct net_device *dev) struct sock *sk; __be16 port; + if (!dev->netdev_ops->ndo_add_geneve_port) + return; + rcu_read_lock(); list_for_each_entry_rcu(gs, &gn->sock_list, list) { sk = gs->sock->sk; @@ -1190,7 +1193,6 @@ void geneve_get_rx_port(struct net_device *dev) } rcu_read_unlock(); } -EXPORT_SYMBOL_GPL(geneve_get_rx_port); /* Initialize the device structure. */ static void geneve_setup(struct net_device *dev) @@ -1538,6 +1540,21 @@ struct net_device *geneve_dev_create_fb(struct net *net, const char *name, } EXPORT_SYMBOL_GPL(geneve_dev_create_fb); +static int geneve_netdevice_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct net_device *dev = netdev_notifier_info_to_dev(ptr); + + if (event == NETDEV_OFFLOAD_PUSH_GENEVE) + geneve_push_rx_ports(dev); + + return NOTIFY_DONE; +} + +static struct notifier_block geneve_notifier_block __read_mostly = { + .notifier_call = geneve_netdevice_event, +}; + static __net_init int geneve_init_net(struct net *net) { struct geneve_net *gn = net_generic(net, geneve_net_id); @@ -1590,11 +1607,18 @@ static int __init geneve_init_module(void) if (rc) goto out1; - rc = rtnl_link_register(&geneve_link_ops); + rc = register_netdevice_notifier(&geneve_notifier_block); if (rc) goto out2; + rc = rtnl_link_register(&geneve_link_ops); + if (rc) + goto out3; + return 0; + +out3: + unregister_netdevice_notifier(&geneve_notifier_block); out2: unregister_pernet_subsys(&geneve_net_ops); out1: @@ -1605,6 +1629,7 @@ late_initcall(geneve_init_module); static void __exit geneve_cleanup_module(void) { rtnl_link_unregister(&geneve_link_ops); + unregister_netdevice_notifier(&geneve_notifier_block); unregister_pernet_subsys(&geneve_net_ops); } module_exit(geneve_cleanup_module); diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index d4c8cd424f8d..1f6d5db471a2 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -2245,6 +2245,7 @@ struct netdev_lag_lower_state_info { #define NETDEV_PRECHANGEUPPER 0x001A #define NETDEV_CHANGELOWERSTATE 0x001B #define NETDEV_OFFLOAD_PUSH_VXLAN 0x001C +#define NETDEV_OFFLOAD_PUSH_GENEVE 0x001D int register_netdevice_notifier(struct notifier_block *nb); int unregister_netdevice_notifier(struct notifier_block *nb); diff --git a/include/net/geneve.h b/include/net/geneve.h index e6c23dc765f7..cb544a530146 100644 --- a/include/net/geneve.h +++ b/include/net/geneve.h @@ -62,13 +62,11 @@ struct genevehdr { struct geneve_opt options[]; }; -#if IS_ENABLED(CONFIG_GENEVE) -void geneve_get_rx_port(struct net_device *netdev); -#else static inline void geneve_get_rx_port(struct net_device *netdev) { + ASSERT_RTNL(); + call_netdevice_notifiers(NETDEV_OFFLOAD_PUSH_GENEVE, netdev); } -#endif #ifdef CONFIG_INET struct net_device *geneve_dev_create_fb(struct net *net, const char *name, From e9fc71649b5361b8ac608898342c8904167cb63d Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Fri, 22 Apr 2016 13:02:55 +0300 Subject: [PATCH 0912/1649] Bluetooth: ath3k: Silence uninitialized variable warning We could print an uninitialized value in the error message. Signed-off-by: Dan Carpenter Signed-off-by: Marcel Holtmann --- drivers/bluetooth/ath3k.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c index 47ca4b39d306..641c2d19fc57 100644 --- a/drivers/bluetooth/ath3k.c +++ b/drivers/bluetooth/ath3k.c @@ -206,7 +206,8 @@ static int ath3k_load_firmware(struct usb_device *udev, const struct firmware *firmware) { u8 *send_buf; - int err, pipe, len, size, sent = 0; + int len = 0; + int err, pipe, size, sent = 0; int count = firmware->size; BT_DBG("udev %p", udev); @@ -302,7 +303,8 @@ static int ath3k_load_fwfile(struct usb_device *udev, const struct firmware *firmware) { u8 *send_buf; - int err, pipe, len, size, count, sent = 0; + int len = 0; + int err, pipe, size, count, sent = 0; int ret; count = firmware->size; From 5c0e03cd9f10d541b69b667a2b1b8980f196f432 Mon Sep 17 00:00:00 2001 From: Johan Hedberg Date: Fri, 22 Apr 2016 15:59:25 +0300 Subject: [PATCH 0913/1649] Bluetooth: Add defines for SPI and I2C Extend the set of possible HCI bus types with SPI and I2C. Signed-off-by: Johan Hedberg Signed-off-by: Marcel Holtmann --- include/net/bluetooth/hci.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h index 5d38d980b89d..eefcf3e96421 100644 --- a/include/net/bluetooth/hci.h +++ b/include/net/bluetooth/hci.h @@ -61,6 +61,8 @@ #define HCI_RS232 4 #define HCI_PCI 5 #define HCI_SDIO 6 +#define HCI_SPI 7 +#define HCI_I2C 8 /* HCI controller types */ #define HCI_BREDR 0x00 From 11a99573079e15f11499ae8d21b07e3e3257fff1 Mon Sep 17 00:00:00 2001 From: Nicolas Dichtel Date: Fri, 22 Apr 2016 17:31:16 +0200 Subject: [PATCH 0914/1649] libnl: fix help of _64bit functions Fix typo and describe 'padattr'. Fixes: 089bf1a6a924 ("libnl: add more helpers to align attributes on 64-bit") Signed-off-by: Nicolas Dichtel Signed-off-by: David S. Miller --- lib/nlattr.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/lib/nlattr.c b/lib/nlattr.c index 2b82f1e2ebc2..fce1e9afc6d9 100644 --- a/lib/nlattr.c +++ b/lib/nlattr.c @@ -359,10 +359,11 @@ EXPORT_SYMBOL(__nla_reserve); * @skb: socket buffer to reserve room on * @attrtype: attribute type * @attrlen: length of attribute payload + * @padattr: attribute type for the padding * * Adds a netlink attribute header to a socket buffer and reserves * room for the payload but does not copy it. It also ensure that this - * attribute will be 64-bit aign. + * attribute will have a 64-bit aligned nla_data() area. * * The caller is responsible to ensure that the skb provides enough * tailroom for the attribute header and payload. @@ -424,10 +425,11 @@ EXPORT_SYMBOL(nla_reserve); * @skb: socket buffer to reserve room on * @attrtype: attribute type * @attrlen: length of attribute payload + * @padattr: attribute type for the padding * * Adds a netlink attribute header to a socket buffer and reserves * room for the payload but does not copy it. It also ensure that this - * attribute will be 64-bit aign. + * attribute will have a 64-bit aligned nla_data() area. * * Returns NULL if the tailroom of the skb is insufficient to store * the attribute header and payload. @@ -493,6 +495,7 @@ EXPORT_SYMBOL(__nla_put); * @attrtype: attribute type * @attrlen: length of attribute payload * @data: head of attribute payload + * @padattr: attribute type for the padding * * The caller is responsible to ensure that the skb provides enough * tailroom for the attribute header and payload. @@ -551,6 +554,7 @@ EXPORT_SYMBOL(nla_put); * @attrtype: attribute type * @attrlen: length of attribute payload * @data: head of attribute payload + * @padattr: attribute type for the padding * * Returns -EMSGSIZE if the tailroom of the skb is insufficient to store * the attribute header and payload. From e7479122befd7026cf0fb3b3740f17ebd9c64d35 Mon Sep 17 00:00:00 2001 From: Nicolas Dichtel Date: Fri, 22 Apr 2016 17:31:17 +0200 Subject: [PATCH 0915/1649] libnl: nla_put_le64(): align on a 64-bit area nla_data() is now aligned on a 64-bit area. Signed-off-by: Nicolas Dichtel Signed-off-by: David S. Miller --- include/net/netlink.h | 8 +++++--- include/net/nl802154.h | 6 ++++++ net/ieee802154/nl802154.c | 13 ++++++++----- 3 files changed, 19 insertions(+), 8 deletions(-) diff --git a/include/net/netlink.h b/include/net/netlink.h index 6f51a8a06498..7f6b99483ab7 100644 --- a/include/net/netlink.h +++ b/include/net/netlink.h @@ -878,14 +878,16 @@ static inline int nla_put_net64(struct sk_buff *skb, int attrtype, __be64 value) } /** - * nla_put_le64 - Add a __le64 netlink attribute to a socket buffer + * nla_put_le64 - Add a __le64 netlink attribute to a socket buffer and align it * @skb: socket buffer to add attribute to * @attrtype: attribute type * @value: numeric value + * @padattr: attribute type for the padding */ -static inline int nla_put_le64(struct sk_buff *skb, int attrtype, __le64 value) +static inline int nla_put_le64(struct sk_buff *skb, int attrtype, __le64 value, + int padattr) { - return nla_put(skb, attrtype, sizeof(__le64), &value); + return nla_put_64bit(skb, attrtype, sizeof(__le64), &value, padattr); } /** diff --git a/include/net/nl802154.h b/include/net/nl802154.h index 32cb3e591e07..fcab4de49951 100644 --- a/include/net/nl802154.h +++ b/include/net/nl802154.h @@ -138,6 +138,8 @@ enum nl802154_attrs { NL802154_ATTR_SEC_KEY, #endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */ + NL802154_ATTR_PAD, + __NL802154_ATTR_AFTER_LAST, NL802154_ATTR_MAX = __NL802154_ATTR_AFTER_LAST - 1 }; @@ -295,6 +297,7 @@ enum nl802154_dev_addr_attrs { NL802154_DEV_ADDR_ATTR_MODE, NL802154_DEV_ADDR_ATTR_SHORT, NL802154_DEV_ADDR_ATTR_EXTENDED, + NL802154_DEV_ADDR_ATTR_PAD, /* keep last */ __NL802154_DEV_ADDR_ATTR_AFTER_LAST, @@ -320,6 +323,7 @@ enum nl802154_key_id_attrs { NL802154_KEY_ID_ATTR_IMPLICIT, NL802154_KEY_ID_ATTR_SOURCE_SHORT, NL802154_KEY_ID_ATTR_SOURCE_EXTENDED, + NL802154_KEY_ID_ATTR_PAD, /* keep last */ __NL802154_KEY_ID_ATTR_AFTER_LAST, @@ -402,6 +406,7 @@ enum nl802154_dev { NL802154_DEV_ATTR_EXTENDED_ADDR, NL802154_DEV_ATTR_SECLEVEL_EXEMPT, NL802154_DEV_ATTR_KEY_MODE, + NL802154_DEV_ATTR_PAD, /* keep last */ __NL802154_DEV_ATTR_AFTER_LAST, @@ -414,6 +419,7 @@ enum nl802154_devkey { NL802154_DEVKEY_ATTR_FRAME_COUNTER, NL802154_DEVKEY_ATTR_EXTENDED_ADDR, NL802154_DEVKEY_ATTR_ID, + NL802154_DEVKEY_ATTR_PAD, /* keep last */ __NL802154_DEVKEY_ATTR_AFTER_LAST, diff --git a/net/ieee802154/nl802154.c b/net/ieee802154/nl802154.c index 16ef0d9f566e..614072064d03 100644 --- a/net/ieee802154/nl802154.c +++ b/net/ieee802154/nl802154.c @@ -722,7 +722,8 @@ ieee802154_llsec_send_key_id(struct sk_buff *msg, break; case NL802154_DEV_ADDR_EXTENDED: if (nla_put_le64(msg, NL802154_DEV_ADDR_ATTR_EXTENDED, - desc->device_addr.extended_addr)) + desc->device_addr.extended_addr, + NL802154_DEV_ADDR_ATTR_PAD)) return -ENOBUFS; break; default: @@ -742,7 +743,8 @@ ieee802154_llsec_send_key_id(struct sk_buff *msg, break; case NL802154_KEY_ID_MODE_INDEX_EXTENDED: if (nla_put_le64(msg, NL802154_KEY_ID_ATTR_SOURCE_EXTENDED, - desc->extended_source)) + desc->extended_source, + NL802154_KEY_ID_ATTR_PAD)) return -ENOBUFS; break; default: @@ -819,7 +821,8 @@ nl802154_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flags, /* address settings */ if (nla_put_le64(msg, NL802154_ATTR_EXTENDED_ADDR, - wpan_dev->extended_addr) || + wpan_dev->extended_addr, + NL802154_ATTR_PAD) || nla_put_le16(msg, NL802154_ATTR_SHORT_ADDR, wpan_dev->short_addr) || nla_put_le16(msg, NL802154_ATTR_PAN_ID, wpan_dev->pan_id)) @@ -1614,7 +1617,7 @@ static int nl802154_send_device(struct sk_buff *msg, u32 cmd, u32 portid, nla_put_le16(msg, NL802154_DEV_ATTR_SHORT_ADDR, dev_desc->short_addr) || nla_put_le64(msg, NL802154_DEV_ATTR_EXTENDED_ADDR, - dev_desc->hwaddr) || + dev_desc->hwaddr, NL802154_DEV_ATTR_PAD) || nla_put_u8(msg, NL802154_DEV_ATTR_SECLEVEL_EXEMPT, dev_desc->seclevel_exempt) || nla_put_u32(msg, NL802154_DEV_ATTR_KEY_MODE, dev_desc->key_mode)) @@ -1778,7 +1781,7 @@ static int nl802154_send_devkey(struct sk_buff *msg, u32 cmd, u32 portid, goto nla_put_failure; if (nla_put_le64(msg, NL802154_DEVKEY_ATTR_EXTENDED_ADDR, - extended_addr) || + extended_addr, NL802154_DEVKEY_ATTR_PAD) || nla_put_u32(msg, NL802154_DEVKEY_ATTR_FRAME_COUNTER, devkey->frame_counter)) goto nla_put_failure; From b46f6ded906ef0be52a4881ba50a084aeca64d7e Mon Sep 17 00:00:00 2001 From: Nicolas Dichtel Date: Fri, 22 Apr 2016 17:31:18 +0200 Subject: [PATCH 0916/1649] libnl: nla_put_be64(): align on a 64-bit area nla_data() is now aligned on a 64-bit area. A temporary version (nla_put_be64_32bit()) is added for nla_put_net64(). This function is removed in the next patch. Signed-off-by: Nicolas Dichtel Signed-off-by: David S. Miller --- include/net/netlink.h | 15 ++++++++---- include/uapi/linux/fib_rules.h | 1 + include/uapi/linux/lwtunnel.h | 2 ++ include/uapi/linux/netfilter/nf_tables.h | 8 +++++++ include/uapi/linux/netfilter/nfnetlink_acct.h | 1 + .../linux/netfilter/nfnetlink_conntrack.h | 3 +++ include/uapi/linux/openvswitch.h | 1 + net/core/fib_rules.c | 4 ++-- net/ipv4/ip_tunnel_core.c | 10 ++++---- net/netfilter/nf_conntrack_netlink.c | 18 ++++++++------ net/netfilter/nf_conntrack_proto_dccp.c | 4 +++- net/netfilter/nf_tables_api.c | 24 ++++++++++++------- net/netfilter/nf_tables_trace.c | 5 ++-- net/netfilter/nfnetlink_acct.c | 9 ++++--- net/netfilter/nft_counter.c | 6 +++-- net/netfilter/nft_dynset.c | 3 ++- net/netfilter/nft_limit.c | 6 +++-- net/openvswitch/flow_netlink.c | 5 ++-- 18 files changed, 87 insertions(+), 38 deletions(-) diff --git a/include/net/netlink.h b/include/net/netlink.h index 7f6b99483ab7..47d7d1356fa3 100644 --- a/include/net/netlink.h +++ b/include/net/netlink.h @@ -856,16 +856,23 @@ static inline int nla_put_u64(struct sk_buff *skb, int attrtype, u64 value) } /** - * nla_put_be64 - Add a __be64 netlink attribute to a socket buffer + * nla_put_be64 - Add a __be64 netlink attribute to a socket buffer and align it * @skb: socket buffer to add attribute to * @attrtype: attribute type * @value: numeric value + * @padattr: attribute type for the padding */ -static inline int nla_put_be64(struct sk_buff *skb, int attrtype, __be64 value) +static inline int nla_put_be64(struct sk_buff *skb, int attrtype, __be64 value, + int padattr) +{ + return nla_put_64bit(skb, attrtype, sizeof(__be64), &value, padattr); +} + +static inline int nla_put_be64_32bit(struct sk_buff *skb, int attrtype, + __be64 value) { return nla_put(skb, attrtype, sizeof(__be64), &value); } - /** * nla_put_net64 - Add 64-bit network byte order netlink attribute to a socket buffer * @skb: socket buffer to add attribute to @@ -874,7 +881,7 @@ static inline int nla_put_be64(struct sk_buff *skb, int attrtype, __be64 value) */ static inline int nla_put_net64(struct sk_buff *skb, int attrtype, __be64 value) { - return nla_put_be64(skb, attrtype | NLA_F_NET_BYTEORDER, value); + return nla_put_be64_32bit(skb, attrtype | NLA_F_NET_BYTEORDER, value); } /** diff --git a/include/uapi/linux/fib_rules.h b/include/uapi/linux/fib_rules.h index 96161b8202b5..620c8a5ddc00 100644 --- a/include/uapi/linux/fib_rules.h +++ b/include/uapi/linux/fib_rules.h @@ -49,6 +49,7 @@ enum { FRA_TABLE, /* Extended table id */ FRA_FWMASK, /* mask for netfilter mark */ FRA_OIFNAME, + FRA_PAD, __FRA_MAX }; diff --git a/include/uapi/linux/lwtunnel.h b/include/uapi/linux/lwtunnel.h index f8b01887a495..a478fe80e203 100644 --- a/include/uapi/linux/lwtunnel.h +++ b/include/uapi/linux/lwtunnel.h @@ -22,6 +22,7 @@ enum lwtunnel_ip_t { LWTUNNEL_IP_TTL, LWTUNNEL_IP_TOS, LWTUNNEL_IP_FLAGS, + LWTUNNEL_IP_PAD, __LWTUNNEL_IP_MAX, }; @@ -35,6 +36,7 @@ enum lwtunnel_ip6_t { LWTUNNEL_IP6_HOPLIMIT, LWTUNNEL_IP6_TC, LWTUNNEL_IP6_FLAGS, + LWTUNNEL_IP6_PAD, __LWTUNNEL_IP6_MAX, }; diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h index eeffde196f80..660231363bb5 100644 --- a/include/uapi/linux/netfilter/nf_tables.h +++ b/include/uapi/linux/netfilter/nf_tables.h @@ -182,6 +182,7 @@ enum nft_chain_attributes { NFTA_CHAIN_USE, NFTA_CHAIN_TYPE, NFTA_CHAIN_COUNTERS, + NFTA_CHAIN_PAD, __NFTA_CHAIN_MAX }; #define NFTA_CHAIN_MAX (__NFTA_CHAIN_MAX - 1) @@ -206,6 +207,7 @@ enum nft_rule_attributes { NFTA_RULE_COMPAT, NFTA_RULE_POSITION, NFTA_RULE_USERDATA, + NFTA_RULE_PAD, __NFTA_RULE_MAX }; #define NFTA_RULE_MAX (__NFTA_RULE_MAX - 1) @@ -308,6 +310,7 @@ enum nft_set_attributes { NFTA_SET_TIMEOUT, NFTA_SET_GC_INTERVAL, NFTA_SET_USERDATA, + NFTA_SET_PAD, __NFTA_SET_MAX }; #define NFTA_SET_MAX (__NFTA_SET_MAX - 1) @@ -341,6 +344,7 @@ enum nft_set_elem_attributes { NFTA_SET_ELEM_EXPIRATION, NFTA_SET_ELEM_USERDATA, NFTA_SET_ELEM_EXPR, + NFTA_SET_ELEM_PAD, __NFTA_SET_ELEM_MAX }; #define NFTA_SET_ELEM_MAX (__NFTA_SET_ELEM_MAX - 1) @@ -584,6 +588,7 @@ enum nft_dynset_attributes { NFTA_DYNSET_SREG_DATA, NFTA_DYNSET_TIMEOUT, NFTA_DYNSET_EXPR, + NFTA_DYNSET_PAD, __NFTA_DYNSET_MAX, }; #define NFTA_DYNSET_MAX (__NFTA_DYNSET_MAX - 1) @@ -806,6 +811,7 @@ enum nft_limit_attributes { NFTA_LIMIT_BURST, NFTA_LIMIT_TYPE, NFTA_LIMIT_FLAGS, + NFTA_LIMIT_PAD, __NFTA_LIMIT_MAX }; #define NFTA_LIMIT_MAX (__NFTA_LIMIT_MAX - 1) @@ -820,6 +826,7 @@ enum nft_counter_attributes { NFTA_COUNTER_UNSPEC, NFTA_COUNTER_BYTES, NFTA_COUNTER_PACKETS, + NFTA_COUNTER_PAD, __NFTA_COUNTER_MAX }; #define NFTA_COUNTER_MAX (__NFTA_COUNTER_MAX - 1) @@ -1055,6 +1062,7 @@ enum nft_trace_attibutes { NFTA_TRACE_MARK, NFTA_TRACE_NFPROTO, NFTA_TRACE_POLICY, + NFTA_TRACE_PAD, __NFTA_TRACE_MAX }; #define NFTA_TRACE_MAX (__NFTA_TRACE_MAX - 1) diff --git a/include/uapi/linux/netfilter/nfnetlink_acct.h b/include/uapi/linux/netfilter/nfnetlink_acct.h index f3e34dbbf966..36047ec70f37 100644 --- a/include/uapi/linux/netfilter/nfnetlink_acct.h +++ b/include/uapi/linux/netfilter/nfnetlink_acct.h @@ -29,6 +29,7 @@ enum nfnl_acct_type { NFACCT_FLAGS, NFACCT_QUOTA, NFACCT_FILTER, + NFACCT_PAD, __NFACCT_MAX }; #define NFACCT_MAX (__NFACCT_MAX - 1) diff --git a/include/uapi/linux/netfilter/nfnetlink_conntrack.h b/include/uapi/linux/netfilter/nfnetlink_conntrack.h index c1a4e1441a25..9df789709abe 100644 --- a/include/uapi/linux/netfilter/nfnetlink_conntrack.h +++ b/include/uapi/linux/netfilter/nfnetlink_conntrack.h @@ -116,6 +116,7 @@ enum ctattr_protoinfo_dccp { CTA_PROTOINFO_DCCP_STATE, CTA_PROTOINFO_DCCP_ROLE, CTA_PROTOINFO_DCCP_HANDSHAKE_SEQ, + CTA_PROTOINFO_DCCP_PAD, __CTA_PROTOINFO_DCCP_MAX, }; #define CTA_PROTOINFO_DCCP_MAX (__CTA_PROTOINFO_DCCP_MAX - 1) @@ -135,6 +136,7 @@ enum ctattr_counters { CTA_COUNTERS_BYTES, /* 64bit counters */ CTA_COUNTERS32_PACKETS, /* old 32bit counters, unused */ CTA_COUNTERS32_BYTES, /* old 32bit counters, unused */ + CTA_COUNTERS_PAD, __CTA_COUNTERS_MAX }; #define CTA_COUNTERS_MAX (__CTA_COUNTERS_MAX - 1) @@ -143,6 +145,7 @@ enum ctattr_tstamp { CTA_TIMESTAMP_UNSPEC, CTA_TIMESTAMP_START, CTA_TIMESTAMP_STOP, + CTA_TIMESTAMP_PAD, __CTA_TIMESTAMP_MAX }; #define CTA_TIMESTAMP_MAX (__CTA_TIMESTAMP_MAX - 1) diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h index 616d04761730..0358f94af86e 100644 --- a/include/uapi/linux/openvswitch.h +++ b/include/uapi/linux/openvswitch.h @@ -351,6 +351,7 @@ enum ovs_tunnel_key_attr { OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS, /* Nested OVS_VXLAN_EXT_* */ OVS_TUNNEL_KEY_ATTR_IPV6_SRC, /* struct in6_addr src IPv6 address. */ OVS_TUNNEL_KEY_ATTR_IPV6_DST, /* struct in6_addr dst IPv6 address. */ + OVS_TUNNEL_KEY_ATTR_PAD, __OVS_TUNNEL_KEY_ATTR_MAX }; diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c index 365de66436ac..840acebbb80c 100644 --- a/net/core/fib_rules.c +++ b/net/core/fib_rules.c @@ -549,7 +549,7 @@ static inline size_t fib_rule_nlmsg_size(struct fib_rules_ops *ops, + nla_total_size(4) /* FRA_SUPPRESS_IFGROUP */ + nla_total_size(4) /* FRA_FWMARK */ + nla_total_size(4) /* FRA_FWMASK */ - + nla_total_size(8); /* FRA_TUN_ID */ + + nla_total_size_64bit(8); /* FRA_TUN_ID */ if (ops->nlmsg_payload) payload += ops->nlmsg_payload(rule); @@ -607,7 +607,7 @@ static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule, (rule->target && nla_put_u32(skb, FRA_GOTO, rule->target)) || (rule->tun_id && - nla_put_be64(skb, FRA_TUN_ID, rule->tun_id))) + nla_put_be64(skb, FRA_TUN_ID, rule->tun_id, FRA_PAD))) goto nla_put_failure; if (rule->suppress_ifgroup != -1) { diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c index f46c5c873831..786fa7ca28e0 100644 --- a/net/ipv4/ip_tunnel_core.c +++ b/net/ipv4/ip_tunnel_core.c @@ -271,7 +271,8 @@ static int ip_tun_fill_encap_info(struct sk_buff *skb, { struct ip_tunnel_info *tun_info = lwt_tun_info(lwtstate); - if (nla_put_be64(skb, LWTUNNEL_IP_ID, tun_info->key.tun_id) || + if (nla_put_be64(skb, LWTUNNEL_IP_ID, tun_info->key.tun_id, + LWTUNNEL_IP_PAD) || nla_put_in_addr(skb, LWTUNNEL_IP_DST, tun_info->key.u.ipv4.dst) || nla_put_in_addr(skb, LWTUNNEL_IP_SRC, tun_info->key.u.ipv4.src) || nla_put_u8(skb, LWTUNNEL_IP_TOS, tun_info->key.tos) || @@ -284,7 +285,7 @@ static int ip_tun_fill_encap_info(struct sk_buff *skb, static int ip_tun_encap_nlsize(struct lwtunnel_state *lwtstate) { - return nla_total_size(8) /* LWTUNNEL_IP_ID */ + return nla_total_size_64bit(8) /* LWTUNNEL_IP_ID */ + nla_total_size(4) /* LWTUNNEL_IP_DST */ + nla_total_size(4) /* LWTUNNEL_IP_SRC */ + nla_total_size(1) /* LWTUNNEL_IP_TOS */ @@ -366,7 +367,8 @@ static int ip6_tun_fill_encap_info(struct sk_buff *skb, { struct ip_tunnel_info *tun_info = lwt_tun_info(lwtstate); - if (nla_put_be64(skb, LWTUNNEL_IP6_ID, tun_info->key.tun_id) || + if (nla_put_be64(skb, LWTUNNEL_IP6_ID, tun_info->key.tun_id, + LWTUNNEL_IP6_PAD) || nla_put_in6_addr(skb, LWTUNNEL_IP6_DST, &tun_info->key.u.ipv6.dst) || nla_put_in6_addr(skb, LWTUNNEL_IP6_SRC, &tun_info->key.u.ipv6.src) || nla_put_u8(skb, LWTUNNEL_IP6_TC, tun_info->key.tos) || @@ -379,7 +381,7 @@ static int ip6_tun_fill_encap_info(struct sk_buff *skb, static int ip6_tun_encap_nlsize(struct lwtunnel_state *lwtstate) { - return nla_total_size(8) /* LWTUNNEL_IP6_ID */ + return nla_total_size_64bit(8) /* LWTUNNEL_IP6_ID */ + nla_total_size(16) /* LWTUNNEL_IP6_DST */ + nla_total_size(16) /* LWTUNNEL_IP6_SRC */ + nla_total_size(1) /* LWTUNNEL_IP6_HOPLIMIT */ diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index 355e8552fd5b..3362d65f3285 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -245,8 +245,10 @@ dump_counters(struct sk_buff *skb, struct nf_conn_acct *acct, if (!nest_count) goto nla_put_failure; - if (nla_put_be64(skb, CTA_COUNTERS_PACKETS, cpu_to_be64(pkts)) || - nla_put_be64(skb, CTA_COUNTERS_BYTES, cpu_to_be64(bytes))) + if (nla_put_be64(skb, CTA_COUNTERS_PACKETS, cpu_to_be64(pkts), + CTA_COUNTERS_PAD) || + nla_put_be64(skb, CTA_COUNTERS_BYTES, cpu_to_be64(bytes), + CTA_COUNTERS_PAD)) goto nla_put_failure; nla_nest_end(skb, nest_count); @@ -287,9 +289,11 @@ ctnetlink_dump_timestamp(struct sk_buff *skb, const struct nf_conn *ct) if (!nest_count) goto nla_put_failure; - if (nla_put_be64(skb, CTA_TIMESTAMP_START, cpu_to_be64(tstamp->start)) || + if (nla_put_be64(skb, CTA_TIMESTAMP_START, cpu_to_be64(tstamp->start), + CTA_TIMESTAMP_PAD) || (tstamp->stop != 0 && nla_put_be64(skb, CTA_TIMESTAMP_STOP, - cpu_to_be64(tstamp->stop)))) + cpu_to_be64(tstamp->stop), + CTA_TIMESTAMP_PAD))) goto nla_put_failure; nla_nest_end(skb, nest_count); @@ -562,8 +566,8 @@ ctnetlink_acct_size(const struct nf_conn *ct) if (!nf_ct_ext_exist(ct, NF_CT_EXT_ACCT)) return 0; return 2 * nla_total_size(0) /* CTA_COUNTERS_ORIG|REPL */ - + 2 * nla_total_size(sizeof(uint64_t)) /* CTA_COUNTERS_PACKETS */ - + 2 * nla_total_size(sizeof(uint64_t)) /* CTA_COUNTERS_BYTES */ + + 2 * nla_total_size_64bit(sizeof(uint64_t)) /* CTA_COUNTERS_PACKETS */ + + 2 * nla_total_size_64bit(sizeof(uint64_t)) /* CTA_COUNTERS_BYTES */ ; } @@ -590,7 +594,7 @@ ctnetlink_timestamp_size(const struct nf_conn *ct) #ifdef CONFIG_NF_CONNTRACK_TIMESTAMP if (!nf_ct_ext_exist(ct, NF_CT_EXT_TSTAMP)) return 0; - return nla_total_size(0) + 2 * nla_total_size(sizeof(uint64_t)); + return nla_total_size(0) + 2 * nla_total_size_64bit(sizeof(uint64_t)); #else return 0; #endif diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c index fce1b1cca32d..399a38fd685a 100644 --- a/net/netfilter/nf_conntrack_proto_dccp.c +++ b/net/netfilter/nf_conntrack_proto_dccp.c @@ -645,7 +645,8 @@ static int dccp_to_nlattr(struct sk_buff *skb, struct nlattr *nla, nla_put_u8(skb, CTA_PROTOINFO_DCCP_ROLE, ct->proto.dccp.role[IP_CT_DIR_ORIGINAL]) || nla_put_be64(skb, CTA_PROTOINFO_DCCP_HANDSHAKE_SEQ, - cpu_to_be64(ct->proto.dccp.handshake_seq))) + cpu_to_be64(ct->proto.dccp.handshake_seq), + CTA_PROTOINFO_DCCP_PAD)) goto nla_put_failure; nla_nest_end(skb, nest_parms); spin_unlock_bh(&ct->lock); @@ -660,6 +661,7 @@ static const struct nla_policy dccp_nla_policy[CTA_PROTOINFO_DCCP_MAX + 1] = { [CTA_PROTOINFO_DCCP_STATE] = { .type = NLA_U8 }, [CTA_PROTOINFO_DCCP_ROLE] = { .type = NLA_U8 }, [CTA_PROTOINFO_DCCP_HANDSHAKE_SEQ] = { .type = NLA_U64 }, + [CTA_PROTOINFO_DCCP_PAD] = { .type = NLA_UNSPEC }, }; static int nlattr_to_dccp(struct nlattr *cda[], struct nf_conn *ct) diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 2011977cd79d..7a85a9dd37ad 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -944,8 +944,10 @@ static int nft_dump_stats(struct sk_buff *skb, struct nft_stats __percpu *stats) if (nest == NULL) goto nla_put_failure; - if (nla_put_be64(skb, NFTA_COUNTER_PACKETS, cpu_to_be64(total.pkts)) || - nla_put_be64(skb, NFTA_COUNTER_BYTES, cpu_to_be64(total.bytes))) + if (nla_put_be64(skb, NFTA_COUNTER_PACKETS, cpu_to_be64(total.pkts), + NFTA_COUNTER_PAD) || + nla_put_be64(skb, NFTA_COUNTER_BYTES, cpu_to_be64(total.bytes), + NFTA_COUNTER_PAD)) goto nla_put_failure; nla_nest_end(skb, nest); @@ -975,7 +977,8 @@ static int nf_tables_fill_chain_info(struct sk_buff *skb, struct net *net, if (nla_put_string(skb, NFTA_CHAIN_TABLE, table->name)) goto nla_put_failure; - if (nla_put_be64(skb, NFTA_CHAIN_HANDLE, cpu_to_be64(chain->handle))) + if (nla_put_be64(skb, NFTA_CHAIN_HANDLE, cpu_to_be64(chain->handle), + NFTA_CHAIN_PAD)) goto nla_put_failure; if (nla_put_string(skb, NFTA_CHAIN_NAME, chain->name)) goto nla_put_failure; @@ -1803,13 +1806,15 @@ static int nf_tables_fill_rule_info(struct sk_buff *skb, struct net *net, goto nla_put_failure; if (nla_put_string(skb, NFTA_RULE_CHAIN, chain->name)) goto nla_put_failure; - if (nla_put_be64(skb, NFTA_RULE_HANDLE, cpu_to_be64(rule->handle))) + if (nla_put_be64(skb, NFTA_RULE_HANDLE, cpu_to_be64(rule->handle), + NFTA_RULE_PAD)) goto nla_put_failure; if ((event != NFT_MSG_DELRULE) && (rule->list.prev != &chain->rules)) { prule = list_entry(rule->list.prev, struct nft_rule, list); if (nla_put_be64(skb, NFTA_RULE_POSITION, - cpu_to_be64(prule->handle))) + cpu_to_be64(prule->handle), + NFTA_RULE_PAD)) goto nla_put_failure; } @@ -2473,7 +2478,8 @@ static int nf_tables_fill_set(struct sk_buff *skb, const struct nft_ctx *ctx, } if (set->timeout && - nla_put_be64(skb, NFTA_SET_TIMEOUT, cpu_to_be64(set->timeout))) + nla_put_be64(skb, NFTA_SET_TIMEOUT, cpu_to_be64(set->timeout), + NFTA_SET_PAD)) goto nla_put_failure; if (set->gc_int && nla_put_be32(skb, NFTA_SET_GC_INTERVAL, htonl(set->gc_int))) @@ -3076,7 +3082,8 @@ static int nf_tables_fill_setelem(struct sk_buff *skb, if (nft_set_ext_exists(ext, NFT_SET_EXT_TIMEOUT) && nla_put_be64(skb, NFTA_SET_ELEM_TIMEOUT, - cpu_to_be64(*nft_set_ext_timeout(ext)))) + cpu_to_be64(*nft_set_ext_timeout(ext)), + NFTA_SET_ELEM_PAD)) goto nla_put_failure; if (nft_set_ext_exists(ext, NFT_SET_EXT_EXPIRATION)) { @@ -3089,7 +3096,8 @@ static int nf_tables_fill_setelem(struct sk_buff *skb, expires = 0; if (nla_put_be64(skb, NFTA_SET_ELEM_EXPIRATION, - cpu_to_be64(jiffies_to_msecs(expires)))) + cpu_to_be64(jiffies_to_msecs(expires)), + NFTA_SET_ELEM_PAD)) goto nla_put_failure; } diff --git a/net/netfilter/nf_tables_trace.c b/net/netfilter/nf_tables_trace.c index e9e959f65d91..39eb1cc62e91 100644 --- a/net/netfilter/nf_tables_trace.c +++ b/net/netfilter/nf_tables_trace.c @@ -156,7 +156,8 @@ static int nf_trace_fill_rule_info(struct sk_buff *nlskb, return 0; return nla_put_be64(nlskb, NFTA_TRACE_RULE_HANDLE, - cpu_to_be64(info->rule->handle)); + cpu_to_be64(info->rule->handle), + NFTA_TRACE_PAD); } void nft_trace_notify(struct nft_traceinfo *info) @@ -174,7 +175,7 @@ void nft_trace_notify(struct nft_traceinfo *info) size = nlmsg_total_size(sizeof(struct nfgenmsg)) + nla_total_size(NFT_TABLE_MAXNAMELEN) + nla_total_size(NFT_CHAIN_MAXNAMELEN) + - nla_total_size(sizeof(__be64)) + /* rule handle */ + nla_total_size_64bit(sizeof(__be64)) + /* rule handle */ nla_total_size(sizeof(__be32)) + /* trace type */ nla_total_size(0) + /* VERDICT, nested */ nla_total_size(sizeof(u32)) + /* verdict code */ diff --git a/net/netfilter/nfnetlink_acct.c b/net/netfilter/nfnetlink_acct.c index 4c2b4c0c4d5f..d016066a25e3 100644 --- a/net/netfilter/nfnetlink_acct.c +++ b/net/netfilter/nfnetlink_acct.c @@ -160,15 +160,18 @@ nfnl_acct_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type, pkts = atomic64_read(&acct->pkts); bytes = atomic64_read(&acct->bytes); } - if (nla_put_be64(skb, NFACCT_PKTS, cpu_to_be64(pkts)) || - nla_put_be64(skb, NFACCT_BYTES, cpu_to_be64(bytes)) || + if (nla_put_be64(skb, NFACCT_PKTS, cpu_to_be64(pkts), + NFACCT_PAD) || + nla_put_be64(skb, NFACCT_BYTES, cpu_to_be64(bytes), + NFACCT_PAD) || nla_put_be32(skb, NFACCT_USE, htonl(atomic_read(&acct->refcnt)))) goto nla_put_failure; if (acct->flags & NFACCT_F_QUOTA) { u64 *quota = (u64 *)acct->data; if (nla_put_be32(skb, NFACCT_FLAGS, htonl(old_flags)) || - nla_put_be64(skb, NFACCT_QUOTA, cpu_to_be64(*quota))) + nla_put_be64(skb, NFACCT_QUOTA, cpu_to_be64(*quota), + NFACCT_PAD)) goto nla_put_failure; } nlmsg_end(skb, nlh); diff --git a/net/netfilter/nft_counter.c b/net/netfilter/nft_counter.c index c9743f78f219..77db8358ab14 100644 --- a/net/netfilter/nft_counter.c +++ b/net/netfilter/nft_counter.c @@ -76,8 +76,10 @@ static int nft_counter_dump(struct sk_buff *skb, const struct nft_expr *expr) nft_counter_fetch(priv->counter, &total); - if (nla_put_be64(skb, NFTA_COUNTER_BYTES, cpu_to_be64(total.bytes)) || - nla_put_be64(skb, NFTA_COUNTER_PACKETS, cpu_to_be64(total.packets))) + if (nla_put_be64(skb, NFTA_COUNTER_BYTES, cpu_to_be64(total.bytes), + NFTA_COUNTER_PAD) || + nla_put_be64(skb, NFTA_COUNTER_PACKETS, cpu_to_be64(total.packets), + NFTA_COUNTER_PAD)) goto nla_put_failure; return 0; diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c index 9dec3bd1b63c..78d4914fb39c 100644 --- a/net/netfilter/nft_dynset.c +++ b/net/netfilter/nft_dynset.c @@ -227,7 +227,8 @@ static int nft_dynset_dump(struct sk_buff *skb, const struct nft_expr *expr) goto nla_put_failure; if (nla_put_string(skb, NFTA_DYNSET_SET_NAME, priv->set->name)) goto nla_put_failure; - if (nla_put_be64(skb, NFTA_DYNSET_TIMEOUT, cpu_to_be64(priv->timeout))) + if (nla_put_be64(skb, NFTA_DYNSET_TIMEOUT, cpu_to_be64(priv->timeout), + NFTA_DYNSET_PAD)) goto nla_put_failure; if (priv->expr && nft_expr_dump(skb, NFTA_DYNSET_EXPR, priv->expr)) goto nla_put_failure; diff --git a/net/netfilter/nft_limit.c b/net/netfilter/nft_limit.c index 99d18578afc6..070b98938e02 100644 --- a/net/netfilter/nft_limit.c +++ b/net/netfilter/nft_limit.c @@ -97,8 +97,10 @@ static int nft_limit_dump(struct sk_buff *skb, const struct nft_limit *limit, u64 secs = div_u64(limit->nsecs, NSEC_PER_SEC); u64 rate = limit->rate - limit->burst; - if (nla_put_be64(skb, NFTA_LIMIT_RATE, cpu_to_be64(rate)) || - nla_put_be64(skb, NFTA_LIMIT_UNIT, cpu_to_be64(secs)) || + if (nla_put_be64(skb, NFTA_LIMIT_RATE, cpu_to_be64(rate), + NFTA_LIMIT_PAD) || + nla_put_be64(skb, NFTA_LIMIT_UNIT, cpu_to_be64(secs), + NFTA_LIMIT_PAD) || nla_put_be32(skb, NFTA_LIMIT_BURST, htonl(limit->burst)) || nla_put_be32(skb, NFTA_LIMIT_TYPE, htonl(type)) || nla_put_be32(skb, NFTA_LIMIT_FLAGS, htonl(flags))) diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c index 689c17264221..0bb650f4f219 100644 --- a/net/openvswitch/flow_netlink.c +++ b/net/openvswitch/flow_netlink.c @@ -261,7 +261,7 @@ size_t ovs_tun_key_attr_size(void) /* Whenever adding new OVS_TUNNEL_KEY_ FIELDS, we should consider * updating this function. */ - return nla_total_size(8) /* OVS_TUNNEL_KEY_ATTR_ID */ + return nla_total_size_64bit(8) /* OVS_TUNNEL_KEY_ATTR_ID */ + nla_total_size(16) /* OVS_TUNNEL_KEY_ATTR_IPV[46]_SRC */ + nla_total_size(16) /* OVS_TUNNEL_KEY_ATTR_IPV[46]_DST */ + nla_total_size(1) /* OVS_TUNNEL_KEY_ATTR_TOS */ @@ -720,7 +720,8 @@ static int __ip_tun_to_nlattr(struct sk_buff *skb, unsigned short tun_proto) { if (output->tun_flags & TUNNEL_KEY && - nla_put_be64(skb, OVS_TUNNEL_KEY_ATTR_ID, output->tun_id)) + nla_put_be64(skb, OVS_TUNNEL_KEY_ATTR_ID, output->tun_id, + OVS_TUNNEL_KEY_ATTR_PAD)) return -EMSGSIZE; switch (tun_proto) { case AF_INET: From e9bbe898cbe89b17ad3993c136aa13d0431cd537 Mon Sep 17 00:00:00 2001 From: Nicolas Dichtel Date: Fri, 22 Apr 2016 17:31:19 +0200 Subject: [PATCH 0917/1649] libnl: nla_put_net64(): align on a 64-bit area nla_data() is now aligned on a 64-bit area. The temporary function nla_put_be64_32bit() is removed in this patch. Signed-off-by: Nicolas Dichtel Signed-off-by: David S. Miller --- include/linux/netfilter/ipset/ip_set.h | 9 ++++++--- include/net/netlink.h | 14 ++++++-------- include/uapi/linux/netfilter/ipset/ip_set.h | 1 + 3 files changed, 13 insertions(+), 11 deletions(-) diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h index f48b8a664b0f..83b9a2e0d8d4 100644 --- a/include/linux/netfilter/ipset/ip_set.h +++ b/include/linux/netfilter/ipset/ip_set.h @@ -351,7 +351,8 @@ ip_set_put_skbinfo(struct sk_buff *skb, struct ip_set_skbinfo *skbinfo) return ((skbinfo->skbmark || skbinfo->skbmarkmask) && nla_put_net64(skb, IPSET_ATTR_SKBMARK, cpu_to_be64((u64)skbinfo->skbmark << 32 | - skbinfo->skbmarkmask))) || + skbinfo->skbmarkmask), + IPSET_ATTR_PAD)) || (skbinfo->skbprio && nla_put_net32(skb, IPSET_ATTR_SKBPRIO, cpu_to_be32(skbinfo->skbprio))) || @@ -374,9 +375,11 @@ static inline bool ip_set_put_counter(struct sk_buff *skb, struct ip_set_counter *counter) { return nla_put_net64(skb, IPSET_ATTR_BYTES, - cpu_to_be64(ip_set_get_bytes(counter))) || + cpu_to_be64(ip_set_get_bytes(counter)), + IPSET_ATTR_PAD) || nla_put_net64(skb, IPSET_ATTR_PACKETS, - cpu_to_be64(ip_set_get_packets(counter))); + cpu_to_be64(ip_set_get_packets(counter)), + IPSET_ATTR_PAD); } static inline void diff --git a/include/net/netlink.h b/include/net/netlink.h index 47d7d1356fa3..066a921e7cbe 100644 --- a/include/net/netlink.h +++ b/include/net/netlink.h @@ -868,20 +868,18 @@ static inline int nla_put_be64(struct sk_buff *skb, int attrtype, __be64 value, return nla_put_64bit(skb, attrtype, sizeof(__be64), &value, padattr); } -static inline int nla_put_be64_32bit(struct sk_buff *skb, int attrtype, - __be64 value) -{ - return nla_put(skb, attrtype, sizeof(__be64), &value); -} /** - * nla_put_net64 - Add 64-bit network byte order netlink attribute to a socket buffer + * nla_put_net64 - Add 64-bit network byte order nlattr to a skb and align it * @skb: socket buffer to add attribute to * @attrtype: attribute type * @value: numeric value + * @padattr: attribute type for the padding */ -static inline int nla_put_net64(struct sk_buff *skb, int attrtype, __be64 value) +static inline int nla_put_net64(struct sk_buff *skb, int attrtype, __be64 value, + int padattr) { - return nla_put_be64_32bit(skb, attrtype | NLA_F_NET_BYTEORDER, value); + return nla_put_be64(skb, attrtype | NLA_F_NET_BYTEORDER, value, + padattr); } /** diff --git a/include/uapi/linux/netfilter/ipset/ip_set.h b/include/uapi/linux/netfilter/ipset/ip_set.h index 63b2e34f1b60..ebb5154976de 100644 --- a/include/uapi/linux/netfilter/ipset/ip_set.h +++ b/include/uapi/linux/netfilter/ipset/ip_set.h @@ -118,6 +118,7 @@ enum { IPSET_ATTR_SKBMARK, IPSET_ATTR_SKBPRIO, IPSET_ATTR_SKBQUEUE, + IPSET_ATTR_PAD, __IPSET_ATTR_ADT_MAX, }; #define IPSET_ATTR_ADT_MAX (__IPSET_ATTR_ADT_MAX - 1) From 756a2f59b73cd6ed8afae3f6e8efb3fb829e4600 Mon Sep 17 00:00:00 2001 From: Nicolas Dichtel Date: Fri, 22 Apr 2016 17:31:20 +0200 Subject: [PATCH 0918/1649] libnl: nla_put_s64(): align on a 64-bit area nla_data() is now aligned on a 64-bit area. In fact, there is no user of this function. Signed-off-by: Nicolas Dichtel Signed-off-by: David S. Miller --- include/net/netlink.h | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/include/net/netlink.h b/include/net/netlink.h index 066a921e7cbe..074215a59d19 100644 --- a/include/net/netlink.h +++ b/include/net/netlink.h @@ -102,7 +102,8 @@ * nla_put_s8(skb, type, value) add s8 attribute to skb * nla_put_s16(skb, type, value) add s16 attribute to skb * nla_put_s32(skb, type, value) add s32 attribute to skb - * nla_put_s64(skb, type, value) add s64 attribute to skb + * nla_put_s64(skb, type, value, + * padattr) add s64 attribute to skb * nla_put_string(skb, type, str) add string attribute to skb * nla_put_flag(skb, type) add flag attribute to skb * nla_put_msecs(skb, type, jiffies) add msecs attribute to skb @@ -929,14 +930,16 @@ static inline int nla_put_s32(struct sk_buff *skb, int attrtype, s32 value) } /** - * nla_put_s64 - Add a s64 netlink attribute to a socket buffer + * nla_put_s64 - Add a s64 netlink attribute to a socket buffer and align it * @skb: socket buffer to add attribute to * @attrtype: attribute type * @value: numeric value + * @padattr: attribute type for the padding */ -static inline int nla_put_s64(struct sk_buff *skb, int attrtype, s64 value) +static inline int nla_put_s64(struct sk_buff *skb, int attrtype, s64 value, + int padattr) { - return nla_put(skb, attrtype, sizeof(s64), &value); + return nla_put_64bit(skb, attrtype, sizeof(s64), &value, padattr); } /** From 2175d87cc3561c02e605bc8ac81ee5d875a51b9e Mon Sep 17 00:00:00 2001 From: Nicolas Dichtel Date: Fri, 22 Apr 2016 17:31:21 +0200 Subject: [PATCH 0919/1649] libnl: nla_put_msecs(): align on a 64-bit area nla_data() is now aligned on a 64-bit area. Signed-off-by: Nicolas Dichtel Signed-off-by: David S. Miller --- include/net/netlink.h | 11 +++++++---- include/uapi/linux/l2tp.h | 1 + include/uapi/linux/neighbour.h | 2 ++ include/uapi/linux/tcp_metrics.h | 1 + net/core/neighbour.c | 19 ++++++++++--------- net/ipv4/tcp_metrics.c | 6 ++++-- net/l2tp/l2tp_netlink.c | 3 ++- 7 files changed, 27 insertions(+), 16 deletions(-) diff --git a/include/net/netlink.h b/include/net/netlink.h index 074215a59d19..113b483b6ee8 100644 --- a/include/net/netlink.h +++ b/include/net/netlink.h @@ -106,7 +106,8 @@ * padattr) add s64 attribute to skb * nla_put_string(skb, type, str) add string attribute to skb * nla_put_flag(skb, type) add flag attribute to skb - * nla_put_msecs(skb, type, jiffies) add msecs attribute to skb + * nla_put_msecs(skb, type, jiffies, + * padattr) add msecs attribute to skb * nla_put_in_addr(skb, type, addr) add IPv4 address attribute to skb * nla_put_in6_addr(skb, type, addr) add IPv6 address attribute to skb * @@ -965,16 +966,18 @@ static inline int nla_put_flag(struct sk_buff *skb, int attrtype) } /** - * nla_put_msecs - Add a msecs netlink attribute to a socket buffer + * nla_put_msecs - Add a msecs netlink attribute to a skb and align it * @skb: socket buffer to add attribute to * @attrtype: attribute type * @njiffies: number of jiffies to convert to msecs + * @padattr: attribute type for the padding */ static inline int nla_put_msecs(struct sk_buff *skb, int attrtype, - unsigned long njiffies) + unsigned long njiffies, int padattr) { u64 tmp = jiffies_to_msecs(njiffies); - return nla_put(skb, attrtype, sizeof(u64), &tmp); + + return nla_put_64bit(skb, attrtype, sizeof(u64), &tmp, padattr); } /** diff --git a/include/uapi/linux/l2tp.h b/include/uapi/linux/l2tp.h index 347ef22a964e..3386a99e0397 100644 --- a/include/uapi/linux/l2tp.h +++ b/include/uapi/linux/l2tp.h @@ -126,6 +126,7 @@ enum { L2TP_ATTR_IP6_DADDR, /* struct in6_addr */ L2TP_ATTR_UDP_ZERO_CSUM6_TX, /* u8 */ L2TP_ATTR_UDP_ZERO_CSUM6_RX, /* u8 */ + L2TP_ATTR_PAD, __L2TP_ATTR_MAX, }; diff --git a/include/uapi/linux/neighbour.h b/include/uapi/linux/neighbour.h index 788655bfa0f3..bd99a8d80f36 100644 --- a/include/uapi/linux/neighbour.h +++ b/include/uapi/linux/neighbour.h @@ -128,6 +128,7 @@ enum { NDTPA_LOCKTIME, /* u64, msecs */ NDTPA_QUEUE_LENBYTES, /* u32 */ NDTPA_MCAST_REPROBES, /* u32 */ + NDTPA_PAD, __NDTPA_MAX }; #define NDTPA_MAX (__NDTPA_MAX - 1) @@ -160,6 +161,7 @@ enum { NDTA_PARMS, /* nested TLV NDTPA_* */ NDTA_STATS, /* struct ndt_stats, read-only */ NDTA_GC_INTERVAL, /* u64, msecs */ + NDTA_PAD, __NDTA_MAX }; #define NDTA_MAX (__NDTA_MAX - 1) diff --git a/include/uapi/linux/tcp_metrics.h b/include/uapi/linux/tcp_metrics.h index 93533926035c..80ad90d0cfc2 100644 --- a/include/uapi/linux/tcp_metrics.h +++ b/include/uapi/linux/tcp_metrics.h @@ -40,6 +40,7 @@ enum { TCP_METRICS_ATTR_FOPEN_COOKIE, /* binary */ TCP_METRICS_ATTR_SADDR_IPV4, /* u32 */ TCP_METRICS_ATTR_SADDR_IPV6, /* binary */ + TCP_METRICS_ATTR_PAD, __TCP_METRICS_ATTR_MAX, }; diff --git a/net/core/neighbour.c b/net/core/neighbour.c index f18ae91b652e..6a395d440228 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -1763,21 +1763,22 @@ static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms) NEIGH_VAR(parms, MCAST_PROBES)) || nla_put_u32(skb, NDTPA_MCAST_REPROBES, NEIGH_VAR(parms, MCAST_REPROBES)) || - nla_put_msecs(skb, NDTPA_REACHABLE_TIME, parms->reachable_time) || + nla_put_msecs(skb, NDTPA_REACHABLE_TIME, parms->reachable_time, + NDTPA_PAD) || nla_put_msecs(skb, NDTPA_BASE_REACHABLE_TIME, - NEIGH_VAR(parms, BASE_REACHABLE_TIME)) || + NEIGH_VAR(parms, BASE_REACHABLE_TIME), NDTPA_PAD) || nla_put_msecs(skb, NDTPA_GC_STALETIME, - NEIGH_VAR(parms, GC_STALETIME)) || + NEIGH_VAR(parms, GC_STALETIME), NDTPA_PAD) || nla_put_msecs(skb, NDTPA_DELAY_PROBE_TIME, - NEIGH_VAR(parms, DELAY_PROBE_TIME)) || + NEIGH_VAR(parms, DELAY_PROBE_TIME), NDTPA_PAD) || nla_put_msecs(skb, NDTPA_RETRANS_TIME, - NEIGH_VAR(parms, RETRANS_TIME)) || + NEIGH_VAR(parms, RETRANS_TIME), NDTPA_PAD) || nla_put_msecs(skb, NDTPA_ANYCAST_DELAY, - NEIGH_VAR(parms, ANYCAST_DELAY)) || + NEIGH_VAR(parms, ANYCAST_DELAY), NDTPA_PAD) || nla_put_msecs(skb, NDTPA_PROXY_DELAY, - NEIGH_VAR(parms, PROXY_DELAY)) || + NEIGH_VAR(parms, PROXY_DELAY), NDTPA_PAD) || nla_put_msecs(skb, NDTPA_LOCKTIME, - NEIGH_VAR(parms, LOCKTIME))) + NEIGH_VAR(parms, LOCKTIME), NDTPA_PAD)) goto nla_put_failure; return nla_nest_end(skb, nest); @@ -1804,7 +1805,7 @@ static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl, ndtmsg->ndtm_pad2 = 0; if (nla_put_string(skb, NDTA_NAME, tbl->id) || - nla_put_msecs(skb, NDTA_GC_INTERVAL, tbl->gc_interval) || + nla_put_msecs(skb, NDTA_GC_INTERVAL, tbl->gc_interval, NDTA_PAD) || nla_put_u32(skb, NDTA_THRESH1, tbl->gc_thresh1) || nla_put_u32(skb, NDTA_THRESH2, tbl->gc_thresh2) || nla_put_u32(skb, NDTA_THRESH3, tbl->gc_thresh3)) diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c index 7b7eec439906..b617826e2477 100644 --- a/net/ipv4/tcp_metrics.c +++ b/net/ipv4/tcp_metrics.c @@ -800,7 +800,8 @@ static int tcp_metrics_fill_info(struct sk_buff *msg, } if (nla_put_msecs(msg, TCP_METRICS_ATTR_AGE, - jiffies - tm->tcpm_stamp) < 0) + jiffies - tm->tcpm_stamp, + TCP_METRICS_ATTR_PAD) < 0) goto nla_put_failure; if (tm->tcpm_ts_stamp) { if (nla_put_s32(msg, TCP_METRICS_ATTR_TW_TS_STAMP, @@ -864,7 +865,8 @@ static int tcp_metrics_fill_info(struct sk_buff *msg, (nla_put_u16(msg, TCP_METRICS_ATTR_FOPEN_SYN_DROPS, tfom->syn_loss) < 0 || nla_put_msecs(msg, TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS, - jiffies - tfom->last_syn_loss) < 0)) + jiffies - tfom->last_syn_loss, + TCP_METRICS_ATTR_PAD) < 0)) goto nla_put_failure; if (tfom->cookie.len > 0 && nla_put(msg, TCP_METRICS_ATTR_FOPEN_COOKIE, diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c index 2caaa84ce92d..24ed2e875c45 100644 --- a/net/l2tp/l2tp_netlink.c +++ b/net/l2tp/l2tp_netlink.c @@ -746,7 +746,8 @@ static int l2tp_nl_session_send(struct sk_buff *skb, u32 portid, u32 seq, int fl nla_put_u8(skb, L2TP_ATTR_USING_IPSEC, 1)) || #endif (session->reorder_timeout && - nla_put_msecs(skb, L2TP_ATTR_RECV_TIMEOUT, session->reorder_timeout))) + nla_put_msecs(skb, L2TP_ATTR_RECV_TIMEOUT, + session->reorder_timeout, L2TP_ATTR_PAD))) goto nla_put_failure; nest = nla_nest_start(skb, L2TP_ATTR_STATS); From 73520786b0793c612ef4de3e9addb2ec411bea20 Mon Sep 17 00:00:00 2001 From: Nicolas Dichtel Date: Fri, 22 Apr 2016 17:31:22 +0200 Subject: [PATCH 0920/1649] libnl: add nla_put_u64_64bit() helper With this function, nla_data() is aligned on a 64-bit area. Signed-off-by: Nicolas Dichtel Signed-off-by: David S. Miller --- include/net/netlink.h | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/include/net/netlink.h b/include/net/netlink.h index 113b483b6ee8..e589cb3dccee 100644 --- a/include/net/netlink.h +++ b/include/net/netlink.h @@ -857,6 +857,19 @@ static inline int nla_put_u64(struct sk_buff *skb, int attrtype, u64 value) return nla_put(skb, attrtype, sizeof(u64), &value); } +/** + * nla_put_u64_64bit - Add a u64 netlink attribute to a skb and align it + * @skb: socket buffer to add attribute to + * @attrtype: attribute type + * @value: numeric value + * @padattr: attribute type for the padding + */ +static inline int nla_put_u64_64bit(struct sk_buff *skb, int attrtype, + u64 value, int padattr) +{ + return nla_put_64bit(skb, attrtype, sizeof(u64), &value, padattr); +} + /** * nla_put_be64 - Add a __be64 netlink attribute to a socket buffer and align it * @skb: socket buffer to add attribute to From de95c4a46a6e608444ccaf541087594553c7df11 Mon Sep 17 00:00:00 2001 From: Nicolas Dichtel Date: Fri, 22 Apr 2016 17:31:23 +0200 Subject: [PATCH 0921/1649] xfrm: align nlattr properly when needed Signed-off-by: Nicolas Dichtel Signed-off-by: David S. Miller --- include/uapi/linux/xfrm.h | 1 + net/xfrm/xfrm_user.c | 10 ++++++---- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/include/uapi/linux/xfrm.h b/include/uapi/linux/xfrm.h index 2cd9e608d0d1..143338978b48 100644 --- a/include/uapi/linux/xfrm.h +++ b/include/uapi/linux/xfrm.h @@ -302,6 +302,7 @@ enum xfrm_attr_type_t { XFRMA_SA_EXTRA_FLAGS, /* __u32 */ XFRMA_PROTO, /* __u8 */ XFRMA_ADDRESS_FILTER, /* struct xfrm_address_filter */ + XFRMA_PAD, __XFRMA_MAX #define XFRMA_MAX (__XFRMA_MAX - 1) diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index 2cc7af858c6f..d516845e16e3 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@ -809,7 +809,8 @@ static int copy_to_user_state_extra(struct xfrm_state *x, goto out; } if (x->lastused) { - ret = nla_put_u64(skb, XFRMA_LASTUSED, x->lastused); + ret = nla_put_u64_64bit(skb, XFRMA_LASTUSED, x->lastused, + XFRMA_PAD); if (ret) goto out; } @@ -1813,7 +1814,7 @@ static inline size_t xfrm_aevent_msgsize(struct xfrm_state *x) return NLMSG_ALIGN(sizeof(struct xfrm_aevent_id)) + nla_total_size(replay_size) - + nla_total_size(sizeof(struct xfrm_lifetime_cur)) + + nla_total_size_64bit(sizeof(struct xfrm_lifetime_cur)) + nla_total_size(sizeof(struct xfrm_mark)) + nla_total_size(4) /* XFRM_AE_RTHR */ + nla_total_size(4); /* XFRM_AE_ETHR */ @@ -1848,7 +1849,8 @@ static int build_aevent(struct sk_buff *skb, struct xfrm_state *x, const struct } if (err) goto out_cancel; - err = nla_put(skb, XFRMA_LTIME_VAL, sizeof(x->curlft), &x->curlft); + err = nla_put_64bit(skb, XFRMA_LTIME_VAL, sizeof(x->curlft), &x->curlft, + XFRMA_PAD); if (err) goto out_cancel; @@ -2617,7 +2619,7 @@ static inline size_t xfrm_sa_len(struct xfrm_state *x) l += nla_total_size(sizeof(x->props.extra_flags)); /* Must count x->lastused as it may become non-zero behind our back. */ - l += nla_total_size(sizeof(u64)); + l += nla_total_size_64bit(sizeof(u64)); return l; } From 80df554275c21edca22ece02448bdb378c2ee9f1 Mon Sep 17 00:00:00 2001 From: Nicolas Dichtel Date: Fri, 22 Apr 2016 17:31:24 +0200 Subject: [PATCH 0922/1649] taskstats: use the libnl API to align nlattr on 64-bit Goal of this patch is to use the new libnl API to align netlink attribute when needed. The layout of the netlink message will be a bit different after the patch, because the padattr (TASKSTATS_TYPE_STATS) will be inside the nested attribute instead of before it. Signed-off-by: Nicolas Dichtel Signed-off-by: David S. Miller --- kernel/taskstats.c | 37 +++++-------------------------------- 1 file changed, 5 insertions(+), 32 deletions(-) diff --git a/kernel/taskstats.c b/kernel/taskstats.c index 21f82c29c914..b3f05ee20d18 100644 --- a/kernel/taskstats.c +++ b/kernel/taskstats.c @@ -357,10 +357,6 @@ static int parse(struct nlattr *na, struct cpumask *mask) return ret; } -#if defined(CONFIG_64BIT) && !defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) -#define TASKSTATS_NEEDS_PADDING 1 -#endif - static struct taskstats *mk_reply(struct sk_buff *skb, int type, u32 pid) { struct nlattr *na, *ret; @@ -370,29 +366,6 @@ static struct taskstats *mk_reply(struct sk_buff *skb, int type, u32 pid) ? TASKSTATS_TYPE_AGGR_PID : TASKSTATS_TYPE_AGGR_TGID; - /* - * The taskstats structure is internally aligned on 8 byte - * boundaries but the layout of the aggregrate reply, with - * two NLA headers and the pid (each 4 bytes), actually - * force the entire structure to be unaligned. This causes - * the kernel to issue unaligned access warnings on some - * architectures like ia64. Unfortunately, some software out there - * doesn't properly unroll the NLA packet and assumes that the start - * of the taskstats structure will always be 20 bytes from the start - * of the netlink payload. Aligning the start of the taskstats - * structure breaks this software, which we don't want. So, for now - * the alignment only happens on architectures that require it - * and those users will have to update to fixed versions of those - * packages. Space is reserved in the packet only when needed. - * This ifdef should be removed in several years e.g. 2012 once - * we can be confident that fixed versions are installed on most - * systems. We add the padding before the aggregate since the - * aggregate is already a defined type. - */ -#ifdef TASKSTATS_NEEDS_PADDING - if (nla_put(skb, TASKSTATS_TYPE_NULL, 0, NULL) < 0) - goto err; -#endif na = nla_nest_start(skb, aggr); if (!na) goto err; @@ -401,7 +374,8 @@ static struct taskstats *mk_reply(struct sk_buff *skb, int type, u32 pid) nla_nest_cancel(skb, na); goto err; } - ret = nla_reserve(skb, TASKSTATS_TYPE_STATS, sizeof(struct taskstats)); + ret = nla_reserve_64bit(skb, TASKSTATS_TYPE_STATS, + sizeof(struct taskstats), TASKSTATS_TYPE_NULL); if (!ret) { nla_nest_cancel(skb, na); goto err; @@ -500,10 +474,9 @@ static size_t taskstats_packet_size(void) size_t size; size = nla_total_size(sizeof(u32)) + - nla_total_size(sizeof(struct taskstats)) + nla_total_size(0); -#ifdef TASKSTATS_NEEDS_PADDING - size += nla_total_size(0); /* Padding for alignment */ -#endif + nla_total_size_64bit(sizeof(struct taskstats)) + + nla_total_size(0); + return size; } From b51e13faf73fcc799a41ed085069f07203d8e7b7 Mon Sep 17 00:00:00 2001 From: Martin KaFai Lau Date: Tue, 19 Apr 2016 22:50:47 -0700 Subject: [PATCH 0923/1649] tcp: Carry txstamp_ack in tcp_fragment_tstamp When a tcp skb is sliced into two smaller skbs (e.g. in tcp_fragment() and tso_fragment()), it does not carry the txstamp_ack bit to the newly created skb if it is needed. The end result is a timestamping event (SCM_TSTAMP_ACK) will be missing from the sk->sk_error_queue. This patch carries this bit to the new skb2 in tcp_fragment_tstamp(). BPF Output Before: ~~~~~~ BPF Output After: ~~~~~~ <...>-2050 [000] d.s. 100.928763: : ee_data:14599 Packetdrill Script: ~~~~~~ +0 `sysctl -q -w net.ipv4.tcp_min_tso_segs=10` +0 `sysctl -q -w net.ipv4.tcp_no_metrics_save=1` +0 socket(..., SOCK_STREAM, IPPROTO_TCP) = 3 +0 setsockopt(3, SOL_SOCKET, SO_REUSEADDR, [1], 4) = 0 +0 bind(3, ..., ...) = 0 +0 listen(3, 1) = 0 0.100 < S 0:0(0) win 32792 0.100 > S. 0:0(0) ack 1 0.200 < . 1:1(0) ack 1 win 257 0.200 accept(3, ..., ...) = 4 +0 setsockopt(4, SOL_TCP, TCP_NODELAY, [1], 4) = 0 +0 setsockopt(4, SOL_SOCKET, 37, [2688], 4) = 0 0.200 write(4, ..., 14600) = 14600 +0 setsockopt(4, SOL_SOCKET, 37, [2176], 4) = 0 0.200 > . 1:7301(7300) ack 1 0.200 > P. 7301:14601(7300) ack 1 0.300 < . 1:1(0) ack 14601 win 257 0.300 close(4) = 0 0.300 > F. 14601:14601(0) ack 1 0.400 < F. 1:1(0) ack 16062 win 257 0.400 > . 14602:14602(0) ack 2 Signed-off-by: Martin KaFai Lau Cc: Eric Dumazet Cc: Neal Cardwell Cc: Soheil Hassas Yeganeh Cc: Willem de Bruijn Cc: Yuchung Cheng Acked-by: Soheil Hassas Yeganeh Tested-by: Soheil Hassas Yeganeh Acked-by: Willem de Bruijn Signed-off-by: David S. Miller --- net/ipv4/tcp_output.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 96182a2aabc9..f7c3bc053d27 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -1123,6 +1123,8 @@ static void tcp_fragment_tstamp(struct sk_buff *skb, struct sk_buff *skb2) shinfo->tx_flags &= ~tsflags; shinfo2->tx_flags |= tsflags; swap(shinfo->tskey, shinfo2->tskey); + TCP_SKB_CB(skb2)->txstamp_ack = TCP_SKB_CB(skb)->txstamp_ack; + TCP_SKB_CB(skb)->txstamp_ack = 0; } } From 2de8023e7bb288e0bfbe0325a7690d32dc670873 Mon Sep 17 00:00:00 2001 From: Martin KaFai Lau Date: Tue, 19 Apr 2016 22:50:48 -0700 Subject: [PATCH 0924/1649] tcp: Merge txstamp_ack in tcp_skb_collapse_tstamp When collapsing skbs, txstamp_ack also needs to be merged. Retrans Collapse Test: ~~~~~~ 0.200 accept(3, ..., ...) = 4 +0 setsockopt(4, SOL_TCP, TCP_NODELAY, [1], 4) = 0 0.200 write(4, ..., 730) = 730 +0 setsockopt(4, SOL_SOCKET, 37, [2688], 4) = 0 0.200 write(4, ..., 730) = 730 +0 setsockopt(4, SOL_SOCKET, 37, [2176], 4) = 0 0.200 write(4, ..., 11680) = 11680 0.200 > P. 1:731(730) ack 1 0.200 > P. 731:1461(730) ack 1 0.200 > . 1461:8761(7300) ack 1 0.200 > P. 8761:13141(4380) ack 1 0.300 < . 1:1(0) ack 1 win 257 0.300 < . 1:1(0) ack 1 win 257 0.300 < . 1:1(0) ack 1 win 257 0.300 > P. 1:1461(1460) ack 1 0.400 < . 1:1(0) ack 13141 win 257 BPF Output Before: ~~~~~ BPF Output After: ~~~~~ <...>-2027 [007] d.s. 79.765921: : ee_data:1459 Sacks Collapse Test: ~~~~~ 0.200 accept(3, ..., ...) = 4 +0 setsockopt(4, SOL_TCP, TCP_NODELAY, [1], 4) = 0 0.200 write(4, ..., 1460) = 1460 +0 setsockopt(4, SOL_SOCKET, 37, [2688], 4) = 0 0.200 write(4, ..., 13140) = 13140 +0 setsockopt(4, SOL_SOCKET, 37, [2176], 4) = 0 0.200 > P. 1:1461(1460) ack 1 0.200 > . 1461:8761(7300) ack 1 0.200 > P. 8761:14601(5840) ack 1 0.300 < . 1:1(0) ack 1 win 257 0.300 > P. 1:1461(1460) ack 1 0.400 < . 1:1(0) ack 14601 win 257 BPF Output Before: ~~~~~ BPF Output After: ~~~~~ <...>-2049 [007] d.s. 89.185538: : ee_data:14599 Signed-off-by: Martin KaFai Lau Cc: Eric Dumazet Cc: Neal Cardwell Cc: Soheil Hassas Yeganeh Cc: Willem de Bruijn Cc: Yuchung Cheng Acked-by: Soheil Hassas Yeganeh Tested-by: Soheil Hassas Yeganeh Signed-off-by: David S. Miller --- net/ipv4/tcp_output.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index f7c3bc053d27..a6e4a8353b02 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -2454,6 +2454,8 @@ void tcp_skb_collapse_tstamp(struct sk_buff *skb, shinfo->tx_flags |= tsflags; shinfo->tskey = next_shinfo->tskey; + TCP_SKB_CB(skb)->txstamp_ack |= + TCP_SKB_CB(next_skb)->txstamp_ack; } } From 8cee83dd29dea4e7d27fda3b170381059f628868 Mon Sep 17 00:00:00 2001 From: Parthasarathy Bhuvaragan Date: Thu, 21 Apr 2016 15:51:13 +0200 Subject: [PATCH 0925/1649] tipc: fix stale links after re-enabling bearer Commit 42b18f605fea ("tipc: refactor function tipc_link_timeout()"), introduced a bug which prevents sending of probe messages during link synchronization phase. This leads to hanging links, if the bearer is disabled/enabled after links are up. In this commit, we send the probe messages correctly. Fixes: 42b18f605fea ("tipc: refactor function tipc_link_timeout()") Acked-by: Jon Maloy Signed-off-by: Parthasarathy Bhuvaragan Signed-off-by: David S. Miller --- net/tipc/link.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/net/tipc/link.c b/net/tipc/link.c index 2e28a7d7e802..7059c94f33c5 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c @@ -721,8 +721,7 @@ int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq) mtyp = STATE_MSG; state = bc_acked != bc_snt; probe = l->silent_intv_cnt; - if (probe) - l->silent_intv_cnt++; + l->silent_intv_cnt++; break; case LINK_RESET: setup = l->rst_cnt++ <= 4; From 10d3be569243def8d92ac3722395ef5a59c504e6 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 21 Apr 2016 10:55:23 -0700 Subject: [PATCH 0926/1649] tcp-tso: do not split TSO packets at retransmit time Linux TCP stack painfully segments all TSO/GSO packets before retransmits. This was fine back in the days when TSO/GSO were emerging, with their bugs, but we believe the dark age is over. Keeping big packets in write queues, but also in stack traversal has a lot of benefits. - Less memory overhead, because write queues have less skbs - Less cpu overhead at ACK processing. - Better SACK processing, as lot of studies mentioned how awful linux was at this ;) - Less cpu overhead to send the rtx packets (IP stack traversal, netfilter traversal, drivers...) - Better latencies in presence of losses. - Smaller spikes in fq like packet schedulers, as retransmits are not constrained by TCP Small Queues. 1 % packet losses are common today, and at 100Gbit speeds, this translates to ~80,000 losses per second. Losses are often correlated, and we see many retransmit events leading to 1-MSS train of packets, at the time hosts are already under stress. Signed-off-by: Eric Dumazet Acked-by: Yuchung Cheng Signed-off-by: David S. Miller --- include/net/tcp.h | 4 +-- net/ipv4/tcp_input.c | 2 +- net/ipv4/tcp_output.c | 64 ++++++++++++++++++++----------------------- net/ipv4/tcp_timer.c | 4 +-- 4 files changed, 34 insertions(+), 40 deletions(-) diff --git a/include/net/tcp.h b/include/net/tcp.h index c0ef0544dfcf..7f2553da10d1 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -538,8 +538,8 @@ __u32 cookie_v6_init_sequence(const struct sk_buff *skb, __u16 *mss); void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss, int nonagle); bool tcp_may_send_now(struct sock *sk); -int __tcp_retransmit_skb(struct sock *, struct sk_buff *); -int tcp_retransmit_skb(struct sock *, struct sk_buff *); +int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs); +int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs); void tcp_retransmit_timer(struct sock *sk); void tcp_xmit_retransmit_queue(struct sock *); void tcp_simple_retransmit(struct sock *); diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 75e8336f6ecd..dcad8f9f96eb 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -5545,7 +5545,7 @@ static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack, if (data) { /* Retransmit unacked data in SYN */ tcp_for_write_queue_from(data, sk) { if (data == tcp_send_head(sk) || - __tcp_retransmit_skb(sk, data)) + __tcp_retransmit_skb(sk, data, 1)) break; } tcp_rearm_rto(sk); diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index a6e4a8353b02..9d3b4b364652 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -2268,7 +2268,7 @@ void tcp_send_loss_probe(struct sock *sk) if (WARN_ON(!skb || !tcp_skb_pcount(skb))) goto rearm_timer; - if (__tcp_retransmit_skb(sk, skb)) + if (__tcp_retransmit_skb(sk, skb, 1)) goto rearm_timer; /* Record snd_nxt for loss detection. */ @@ -2571,17 +2571,17 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to, * state updates are done by the caller. Returns non-zero if an * error occurred which prevented the send. */ -int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) +int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs) { - struct tcp_sock *tp = tcp_sk(sk); struct inet_connection_sock *icsk = inet_csk(sk); + struct tcp_sock *tp = tcp_sk(sk); unsigned int cur_mss; - int err; + int diff, len, err; - /* Inconslusive MTU probe */ - if (icsk->icsk_mtup.probe_size) { + + /* Inconclusive MTU probe */ + if (icsk->icsk_mtup.probe_size) icsk->icsk_mtup.probe_size = 0; - } /* Do not sent more than we queued. 1/4 is reserved for possible * copying overhead: fragmentation, tunneling, mangling etc. @@ -2614,30 +2614,27 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) TCP_SKB_CB(skb)->seq != tp->snd_una) return -EAGAIN; - if (skb->len > cur_mss) { - if (tcp_fragment(sk, skb, cur_mss, cur_mss, GFP_ATOMIC)) + len = cur_mss * segs; + if (skb->len > len) { + if (tcp_fragment(sk, skb, len, cur_mss, GFP_ATOMIC)) return -ENOMEM; /* We'll try again later. */ } else { - int oldpcount = tcp_skb_pcount(skb); + if (skb_unclone(skb, GFP_ATOMIC)) + return -ENOMEM; - if (unlikely(oldpcount > 1)) { - if (skb_unclone(skb, GFP_ATOMIC)) - return -ENOMEM; - tcp_init_tso_segs(skb, cur_mss); - tcp_adjust_pcount(sk, skb, oldpcount - tcp_skb_pcount(skb)); - } + diff = tcp_skb_pcount(skb); + tcp_set_skb_tso_segs(skb, cur_mss); + diff -= tcp_skb_pcount(skb); + if (diff) + tcp_adjust_pcount(sk, skb, diff); + if (skb->len < cur_mss) + tcp_retrans_try_collapse(sk, skb, cur_mss); } /* RFC3168, section 6.1.1.1. ECN fallback */ if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN_ECN) == TCPHDR_SYN_ECN) tcp_ecn_clear_syn(sk, skb); - tcp_retrans_try_collapse(sk, skb, cur_mss); - - /* Make a copy, if the first transmission SKB clone we made - * is still in somebody's hands, else make a clone. - */ - /* make sure skb->data is aligned on arches that require it * and check if ack-trimming & collapsing extended the headroom * beyond what csum_start can cover. @@ -2653,20 +2650,22 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) } if (likely(!err)) { + segs = tcp_skb_pcount(skb); + TCP_SKB_CB(skb)->sacked |= TCPCB_EVER_RETRANS; /* Update global TCP statistics. */ - TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS); + TCP_ADD_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS, segs); if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN) NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNRETRANS); - tp->total_retrans++; + tp->total_retrans += segs; } return err; } -int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) +int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs) { struct tcp_sock *tp = tcp_sk(sk); - int err = __tcp_retransmit_skb(sk, skb); + int err = __tcp_retransmit_skb(sk, skb, segs); if (err == 0) { #if FASTRETRANS_DEBUG > 0 @@ -2757,6 +2756,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk) tcp_for_write_queue_from(skb, sk) { __u8 sacked = TCP_SKB_CB(skb)->sacked; + int segs; if (skb == tcp_send_head(sk)) break; @@ -2764,14 +2764,8 @@ void tcp_xmit_retransmit_queue(struct sock *sk) if (!hole) tp->retransmit_skb_hint = skb; - /* Assume this retransmit will generate - * only one packet for congestion window - * calculation purposes. This works because - * tcp_retransmit_skb() will chop up the - * packet to be MSS sized and all the - * packet counting works out. - */ - if (tcp_packets_in_flight(tp) >= tp->snd_cwnd) + segs = tp->snd_cwnd - tcp_packets_in_flight(tp); + if (segs <= 0) return; if (fwd_rexmitting) { @@ -2808,7 +2802,7 @@ begin_fwd: if (sacked & (TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS)) continue; - if (tcp_retransmit_skb(sk, skb)) + if (tcp_retransmit_skb(sk, skb, segs)) return; NET_INC_STATS_BH(sock_net(sk), mib_idx); diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index 49bc474f8e35..373b03e78aaa 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c @@ -404,7 +404,7 @@ void tcp_retransmit_timer(struct sock *sk) goto out; } tcp_enter_loss(sk); - tcp_retransmit_skb(sk, tcp_write_queue_head(sk)); + tcp_retransmit_skb(sk, tcp_write_queue_head(sk), 1); __sk_dst_reset(sk); goto out_reset_timer; } @@ -436,7 +436,7 @@ void tcp_retransmit_timer(struct sock *sk) tcp_enter_loss(sk); - if (tcp_retransmit_skb(sk, tcp_write_queue_head(sk)) > 0) { + if (tcp_retransmit_skb(sk, tcp_write_queue_head(sk), 1) > 0) { /* Retransmission failed because of local congestion, * do not backoff. */ From 77f192af721440a9d91365438be6ecb98edd0310 Mon Sep 17 00:00:00 2001 From: Emil Tantilov Date: Fri, 18 Mar 2016 16:11:14 -0700 Subject: [PATCH 0927/1649] ixgbe: consolidate the configuration of spoof checking Consolidate the logic behind configuring spoof checking: Move the setting of the MAC, VLAN and Ethertype spoof checking into ixgbe_ndo_set_vf_spoofchk(). Change ixgbe_set_mac_anti_spoofing() to set MAC spoofing per VF similar to the VLAN and Ethertype functions - this allows us to call the helper functions in ixgbe_ndo_set_vf_spoofchk() for all spoof check types and only disable MAC spoof checking when creating MACVLAN. Signed-off-by: Emil Tantilov Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- .../net/ethernet/intel/ixgbe/ixgbe_common.c | 40 +++++------------- .../net/ethernet/intel/ixgbe/ixgbe_common.h | 2 +- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 30 ++------------ .../net/ethernet/intel/ixgbe/ixgbe_sriov.c | 41 ++++++++++++------- 4 files changed, 41 insertions(+), 72 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c index 737443a015d5..c9dffa6101b8 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c @@ -3310,43 +3310,25 @@ wwn_prefix_err: /** * ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing * @hw: pointer to hardware structure - * @enable: enable or disable switch for anti-spoofing - * @pf: Physical Function pool - do not enable anti-spoofing for the PF + * @enable: enable or disable switch for MAC anti-spoofing + * @vf: Virtual Function pool - VF Pool to set for MAC anti-spoofing * **/ -void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf) +void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf) { - int j; - int pf_target_reg = pf >> 3; - int pf_target_shift = pf % 8; - u32 pfvfspoof = 0; + int vf_target_reg = vf >> 3; + int vf_target_shift = vf % 8; + u32 pfvfspoof; if (hw->mac.type == ixgbe_mac_82598EB) return; + pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg)); if (enable) - pfvfspoof = IXGBE_SPOOF_MACAS_MASK; - - /* - * PFVFSPOOF register array is size 8 with 8 bits assigned to - * MAC anti-spoof enables in each register array element. - */ - for (j = 0; j < pf_target_reg; j++) - IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof); - - /* - * The PF should be allowed to spoof so that it can support - * emulation mode NICs. Do not set the bits assigned to the PF - */ - pfvfspoof &= (1 << pf_target_shift) - 1; - IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof); - - /* - * Remaining pools belong to the PF so they do not need to have - * anti-spoofing enabled. - */ - for (j++; j < IXGBE_PFVFSPOOF_REG_COUNT; j++) - IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), 0); + pfvfspoof |= BIT(vf_target_shift); + else + pfvfspoof &= ~BIT(vf_target_shift); + IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof); } /** diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h index 6f8e6a56e242..6d4c260d0cbd 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h @@ -106,7 +106,7 @@ s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked); s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index); s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index); -void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf); +void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf); void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf); s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps); s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index b2f2cf40f06a..657e999befcb 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -3776,34 +3776,10 @@ static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter) IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext); - - /* Enable MAC Anti-Spoofing */ - hw->mac.ops.set_mac_anti_spoofing(hw, (adapter->num_vfs != 0), - adapter->num_vfs); - - /* Ensure LLDP and FC is set for Ethertype Antispoofing if we will be - * calling set_ethertype_anti_spoofing for each VF in loop below - */ - if (hw->mac.ops.set_ethertype_anti_spoofing) { - IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_LLDP), - (IXGBE_ETQF_FILTER_EN | - IXGBE_ETQF_TX_ANTISPOOF | - IXGBE_ETH_P_LLDP)); - - IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FC), - (IXGBE_ETQF_FILTER_EN | - IXGBE_ETQF_TX_ANTISPOOF | - ETH_P_PAUSE)); - } - - /* For VFs that have spoof checking turned off */ for (i = 0; i < adapter->num_vfs; i++) { - if (!adapter->vfinfo[i].spoofchk_enabled) - ixgbe_ndo_set_vf_spoofchk(adapter->netdev, i, false); - - /* enable ethertype anti spoofing if hw supports it */ - if (hw->mac.ops.set_ethertype_anti_spoofing) - hw->mac.ops.set_ethertype_anti_spoofing(hw, true, i); + /* configure spoof checking */ + ixgbe_ndo_set_vf_spoofchk(adapter->netdev, i, + adapter->vfinfo[i].spoofchk_enabled); /* Enable/Disable RSS query feature */ ixgbe_ndo_set_vf_rss_query_en(adapter->netdev, i, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index adcf00002483..cc635ced15d6 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -964,8 +964,11 @@ static int ixgbe_set_vf_macvlan_msg(struct ixgbe_adapter *adapter, * If the VF is allowed to set MAC filters then turn off * anti-spoofing to avoid false positives. */ - if (adapter->vfinfo[vf].spoofchk_enabled) - ixgbe_ndo_set_vf_spoofchk(adapter->netdev, vf, false); + if (adapter->vfinfo[vf].spoofchk_enabled) { + struct ixgbe_hw *hw = &adapter->hw; + + hw->mac.ops.set_mac_anti_spoofing(hw, false, vf); + } } err = ixgbe_set_vf_macvlan(adapter, vf, index, new_mac); @@ -1525,27 +1528,35 @@ int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate, int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting) { struct ixgbe_adapter *adapter = netdev_priv(netdev); - int vf_target_reg = vf >> 3; - int vf_target_shift = vf % 8; struct ixgbe_hw *hw = &adapter->hw; - u32 regval; if (vf >= adapter->num_vfs) return -EINVAL; adapter->vfinfo[vf].spoofchk_enabled = setting; - regval = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg)); - regval &= ~(1 << vf_target_shift); - regval |= (setting << vf_target_shift); - IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), regval); + /* configure MAC spoofing */ + hw->mac.ops.set_mac_anti_spoofing(hw, setting, vf); - if (adapter->vfinfo[vf].vlan_count) { - vf_target_shift += IXGBE_SPOOF_VLANAS_SHIFT; - regval = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg)); - regval &= ~(1 << vf_target_shift); - regval |= (setting << vf_target_shift); - IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), regval); + /* configure VLAN spoofing */ + if (adapter->vfinfo[vf].vlan_count) + hw->mac.ops.set_vlan_anti_spoofing(hw, setting, vf); + + /* Ensure LLDP and FC is set for Ethertype Antispoofing if we will be + * calling set_ethertype_anti_spoofing for each VF in loop below + */ + if (hw->mac.ops.set_ethertype_anti_spoofing) { + IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_LLDP), + (IXGBE_ETQF_FILTER_EN | + IXGBE_ETQF_TX_ANTISPOOF | + IXGBE_ETH_P_LLDP)); + + IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FC), + (IXGBE_ETQF_FILTER_EN | + IXGBE_ETQF_TX_ANTISPOOF | + ETH_P_PAUSE)); + + hw->mac.ops.set_ethertype_anti_spoofing(hw, setting, vf); } return 0; From d3dec7c7c03351ae006f698501b523e7b1a38b3d Mon Sep 17 00:00:00 2001 From: Emil Tantilov Date: Fri, 18 Mar 2016 16:11:19 -0700 Subject: [PATCH 0928/1649] ixgbe: set VLAN spoof checking unconditionally Previously the PF driver would only set VLAN spoof checking if the VF had created VLANs. This was done by setting and checking a counter (vlan_count) whenever a VLAN was created by the VF. However it is possible for the vlan_count to be !=0 while there are no VLANs assigned to the VF due to the count incrementing every time a VLAN 0 is added on ifdown/up, which resulted in VLAN spoofing always being set for those VFs. This patch cleans up the logic by unconditionally setting VLAN based on how the VF is configured (via ip link set ethX vf Y spoofchk on/off). This change also resolves an issue where the VLAN spoofing can remain set even after being disabled by the user due to the driver enabling VLAN spoof checking every time a VLAN is added to the VF, but would only allow changes in the setting if vlan_count != 0. Also default_vf_vlan_id and vlans_enabled were removed from the vf_data_storage structure since they are not being used in the driver. Signed-off-by: Emil Tantilov Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe.h | 3 --- .../net/ethernet/intel/ixgbe/ixgbe_sriov.c | 25 ++----------------- 2 files changed, 2 insertions(+), 26 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index d10ed62993c1..61d8110d26c7 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -143,14 +143,11 @@ struct vf_data_storage { unsigned char vf_mac_addresses[ETH_ALEN]; u16 vf_mc_hashes[IXGBE_MAX_VF_MC_ENTRIES]; u16 num_vf_mc_hashes; - u16 default_vf_vlan_id; - u16 vlans_enabled; bool clear_to_send; bool pf_set_mac; u16 pf_vlan; /* When set, guest VLAN config not allowed. */ u16 pf_qos; u16 tx_rate; - u16 vlan_count; u8 spoofchk_enabled; bool rss_query_enabled; u8 trusted; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index cc635ced15d6..e9f2558e65fc 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -908,8 +908,6 @@ static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter, u32 add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT; u32 vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK); u8 tcs = netdev_get_num_tc(adapter->netdev); - struct ixgbe_hw *hw = &adapter->hw; - int err; if (adapter->vfinfo[vf].pf_vlan || tcs) { e_warn(drv, @@ -923,19 +921,7 @@ static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter, if (!vid && !add) return 0; - err = ixgbe_set_vf_vlan(adapter, add, vid, vf); - if (err) - return err; - - if (adapter->vfinfo[vf].spoofchk_enabled) - hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf); - - if (add) - adapter->vfinfo[vf].vlan_count++; - else if (adapter->vfinfo[vf].vlan_count) - adapter->vfinfo[vf].vlan_count--; - - return 0; + return ixgbe_set_vf_vlan(adapter, add, vid, vf); } static int ixgbe_set_vf_macvlan_msg(struct ixgbe_adapter *adapter, @@ -1324,9 +1310,6 @@ static int ixgbe_enable_port_vlan(struct ixgbe_adapter *adapter, int vf, ixgbe_set_vmvir(adapter, vlan, qos, vf); ixgbe_set_vmolr(hw, vf, false); - if (adapter->vfinfo[vf].spoofchk_enabled) - hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf); - adapter->vfinfo[vf].vlan_count++; /* enable hide vlan on X550 */ if (hw->mac.type >= ixgbe_mac_X550) @@ -1359,9 +1342,6 @@ static int ixgbe_disable_port_vlan(struct ixgbe_adapter *adapter, int vf) ixgbe_set_vf_vlan(adapter, true, 0, vf); ixgbe_clear_vmvir(adapter, vf); ixgbe_set_vmolr(hw, vf, true); - hw->mac.ops.set_vlan_anti_spoofing(hw, false, vf); - if (adapter->vfinfo[vf].vlan_count) - adapter->vfinfo[vf].vlan_count--; /* disable hide VLAN on X550 */ if (hw->mac.type >= ixgbe_mac_X550) @@ -1539,8 +1519,7 @@ int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting) hw->mac.ops.set_mac_anti_spoofing(hw, setting, vf); /* configure VLAN spoofing */ - if (adapter->vfinfo[vf].vlan_count) - hw->mac.ops.set_vlan_anti_spoofing(hw, setting, vf); + hw->mac.ops.set_vlan_anti_spoofing(hw, setting, vf); /* Ensure LLDP and FC is set for Ethertype Antispoofing if we will be * calling set_ethertype_anti_spoofing for each VF in loop below From 4695886c644e48a02ca9d4c146a7ec4de8f2d2d8 Mon Sep 17 00:00:00 2001 From: Emil Tantilov Date: Thu, 24 Mar 2016 09:58:40 -0700 Subject: [PATCH 0929/1649] ixgbe: fix default mac->ops.setup_link for X550EM X550EM_a/x did not have a default value for mac->ops.setup_link which was causing link issues for backplane devices. This patch sets mac->ops.setup_link to ixgbe_setup_mac_link_X540 for X550EM_a/x which is also default for X550. This will result in mac->ops.setup_link calling the link setup function for the respective PHY type in case we do not need a special function to deal with it. Reported-by: Ken Cox Signed-off-by: Emil Tantilov Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c index c71e93ed4451..ea25f001f3bb 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c @@ -2908,7 +2908,7 @@ static const struct ixgbe_mac_operations mac_ops_X550EM_x = { .get_media_type = &ixgbe_get_media_type_X550em, .get_san_mac_addr = NULL, .get_wwn_prefix = NULL, - .setup_link = NULL, /* defined later */ + .setup_link = &ixgbe_setup_mac_link_X540, .get_link_capabilities = &ixgbe_get_link_capabilities_X550em, .get_bus_info = &ixgbe_get_bus_info_X550em, .setup_sfp = ixgbe_setup_sfp_modules_X550em, @@ -2926,7 +2926,7 @@ static struct ixgbe_mac_operations mac_ops_x550em_a = { .get_media_type = ixgbe_get_media_type_X550em, .get_san_mac_addr = NULL, .get_wwn_prefix = NULL, - .setup_link = NULL, /* defined later */ + .setup_link = &ixgbe_setup_mac_link_X540, .get_link_capabilities = ixgbe_get_link_capabilities_X550em, .get_bus_info = ixgbe_get_bus_info_X550em, .setup_sfp = ixgbe_setup_sfp_modules_X550em, From 2a9ed5d1fc5e7e88a22da2d85bbaf6fc5b4c2fb8 Mon Sep 17 00:00:00 2001 From: Sridhar Samudrala Date: Fri, 1 Apr 2016 10:34:38 -0700 Subject: [PATCH 0930/1649] ixgbe: make 'action' field in struct ixgbe_fdir_filter a u64 value This field is used to record the RX queue index for a redirect action passed via ring_cookie field in struct ethtool_rx_flow_spec which is a u64 value. For ex: after adding a filter rule to redirect to a VF using ethtool # echo 4 > /sys/class/net/p4p1/device/sriov_numvfs # ethtool -N p4p1 flow-type ip4 src-ip 192.168.0.1 action 0x100000000 querying for the rule shows the Action as 'Direct to queue 0' # ethtool -n p4p1 4 RX rings available Total 1 rules Filter: 2045 Rule Type: Raw IPv4 Src IP addr: 192.168.0.1 mask: 0.0.0.0 Dest IP addr: 0.0.0.0 mask: 255.255.255.255 TOS: 0x0 mask: 0xff Protocol: 0 mask: 0xff L4 bytes: 0x0 mask: 0xffffffff VLAN EtherType: 0x0 mask: 0xffff VLAN: 0x0 mask: 0xffff User-defined: 0x0 mask: 0xffffffffffffffff Action: Direct to queue 0 With this fix, ethtool will report the right queue index even for VFs. Action: Direct to queue 4294967296 Here 4294967296 corresponds to 0x100000000. We need to update 'ethtool' to report the queue index as a Hex value so that it is more user friendly and matches with the 'action' value that is passed when adding the rule. Signed-off-by: Sridhar Samudrala Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index 61d8110d26c7..94e39c13e7d4 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -825,7 +825,7 @@ struct ixgbe_fdir_filter { struct hlist_node fdir_node; union ixgbe_atr_input filter; u16 sw_idx; - u16 action; + u64 action; }; enum ixgbe_state_t { From 15cfd40771e18a4e9b788c64c9db2606f958b93d Mon Sep 17 00:00:00 2001 From: Haiyang Zhang Date: Thu, 21 Apr 2016 16:13:01 -0700 Subject: [PATCH 0931/1649] hv_netvsc: Fix the list processing for network change event RNDIS_STATUS_NETWORK_CHANGE event is handled as two "half events" -- media disconnect & connect. The second half should be added to the list head, not to the tail. So all events are processed in normal order. Signed-off-by: Haiyang Zhang Reviewed-by: K. Y. Srinivasan Reviewed-by: Vitaly Kuznetsov Signed-off-by: David S. Miller --- drivers/net/hyperv/netvsc_drv.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index bfdb568ac6b8..ba3f3f3d48ef 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -1125,7 +1125,7 @@ static void netvsc_link_change(struct work_struct *w) netif_tx_stop_all_queues(net); event->event = RNDIS_STATUS_MEDIA_CONNECT; spin_lock_irqsave(&ndev_ctx->lock, flags); - list_add_tail(&event->list, &ndev_ctx->reconfig_events); + list_add(&event->list, &ndev_ctx->reconfig_events); spin_unlock_irqrestore(&ndev_ctx->lock, flags); reschedule = true; } From 2f2219bea21118511c23d24dba5f2145f870a7db Mon Sep 17 00:00:00 2001 From: Mark Rustad Date: Thu, 7 Apr 2016 10:43:50 -0700 Subject: [PATCH 0932/1649] ixgbe: Add register wait for slow links Use a new register to wait for previous register writes to complete before issuing a register read. This is needed when slower links are in use. Signed-off-by: Mark Rustad Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 21 +++++++++++++++++++ drivers/net/ethernet/intel/ixgbe/ixgbe_type.h | 1 + 2 files changed, 22 insertions(+) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 657e999befcb..517f06e6c3d8 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -371,6 +371,27 @@ u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg) if (ixgbe_removed(reg_addr)) return IXGBE_FAILED_READ_REG; + if (unlikely(hw->phy.nw_mng_if_sel & + IXGBE_NW_MNG_IF_SEL_ENABLE_10_100M)) { + struct ixgbe_adapter *adapter; + int i; + + for (i = 0; i < 200; ++i) { + value = readl(reg_addr + IXGBE_MAC_SGMII_BUSY); + if (likely(!value)) + goto writes_completed; + if (value == IXGBE_FAILED_READ_REG) { + ixgbe_remove_adapter(hw); + return IXGBE_FAILED_READ_REG; + } + udelay(5); + } + + adapter = hw->back; + e_warn(hw, "register writes incomplete %08x\n", value); + } + +writes_completed: value = readl(reg_addr + reg); if (unlikely(value == IXGBE_FAILED_READ_REG)) ixgbe_check_remove(hw, reg); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index ba3b837c7e9d..29a1c423543b 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -1131,6 +1131,7 @@ struct ixgbe_thermal_sensor_data { #define IXGBE_XPCSS 0x04290 #define IXGBE_MFLCN 0x04294 #define IXGBE_SERDESC 0x04298 +#define IXGBE_MAC_SGMII_BUSY 0x04298 #define IXGBE_MACS 0x0429C #define IXGBE_AUTOC 0x042A0 #define IXGBE_LINKS 0x042A4 From d72d6c19b583afc09ace22baf80b29b11139a8f3 Mon Sep 17 00:00:00 2001 From: Emil Tantilov Date: Thu, 7 Apr 2016 15:58:39 -0700 Subject: [PATCH 0933/1649] ixgbevf: refactor ethtool stats handling This brings the logic closer to how we handle the stats in ixgbe and it sets us up for introducing per-queue stats. Use IXGBEVF_STAT and IXGBEVF_NETDEV_STAT for accessing the driver and netdev stats respectively. This way we don't have to calculate the stats based on register values which could lead to the counters not being initialized properly when the interface is down. IXGBEVF_QUEUE_STATS_LEN is set to include the number of queues. Also some defines were renamed to use the IXGBEVF prefix. Signed-off-by: Emil Tantilov Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbevf/ethtool.c | 126 ++++++++++--------- 1 file changed, 64 insertions(+), 62 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c index d7aa4b203f40..cd4d311ea0b6 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c +++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c @@ -42,65 +42,62 @@ #define IXGBE_ALL_RAR_ENTRIES 16 +enum {NETDEV_STATS, IXGBEVF_STATS}; + struct ixgbe_stats { char stat_string[ETH_GSTRING_LEN]; - struct { - int sizeof_stat; - int stat_offset; - int base_stat_offset; - int saved_reset_offset; - }; + int type; + int sizeof_stat; + int stat_offset; }; -#define IXGBEVF_STAT(m, b, r) { \ - .sizeof_stat = FIELD_SIZEOF(struct ixgbevf_adapter, m), \ - .stat_offset = offsetof(struct ixgbevf_adapter, m), \ - .base_stat_offset = offsetof(struct ixgbevf_adapter, b), \ - .saved_reset_offset = offsetof(struct ixgbevf_adapter, r) \ +#define IXGBEVF_STAT(_name, _stat) { \ + .stat_string = _name, \ + .type = IXGBEVF_STATS, \ + .sizeof_stat = FIELD_SIZEOF(struct ixgbevf_adapter, _stat), \ + .stat_offset = offsetof(struct ixgbevf_adapter, _stat) \ } -#define IXGBEVF_ZSTAT(m) { \ - .sizeof_stat = FIELD_SIZEOF(struct ixgbevf_adapter, m), \ - .stat_offset = offsetof(struct ixgbevf_adapter, m), \ - .base_stat_offset = -1, \ - .saved_reset_offset = -1 \ +#define IXGBEVF_NETDEV_STAT(_net_stat) { \ + .stat_string = #_net_stat, \ + .type = NETDEV_STATS, \ + .sizeof_stat = FIELD_SIZEOF(struct net_device_stats, _net_stat), \ + .stat_offset = offsetof(struct net_device_stats, _net_stat) \ } -static const struct ixgbe_stats ixgbe_gstrings_stats[] = { - {"rx_packets", IXGBEVF_STAT(stats.vfgprc, stats.base_vfgprc, - stats.saved_reset_vfgprc)}, - {"tx_packets", IXGBEVF_STAT(stats.vfgptc, stats.base_vfgptc, - stats.saved_reset_vfgptc)}, - {"rx_bytes", IXGBEVF_STAT(stats.vfgorc, stats.base_vfgorc, - stats.saved_reset_vfgorc)}, - {"tx_bytes", IXGBEVF_STAT(stats.vfgotc, stats.base_vfgotc, - stats.saved_reset_vfgotc)}, - {"tx_busy", IXGBEVF_ZSTAT(tx_busy)}, - {"tx_restart_queue", IXGBEVF_ZSTAT(restart_queue)}, - {"tx_timeout_count", IXGBEVF_ZSTAT(tx_timeout_count)}, - {"multicast", IXGBEVF_STAT(stats.vfmprc, stats.base_vfmprc, - stats.saved_reset_vfmprc)}, - {"rx_csum_offload_errors", IXGBEVF_ZSTAT(hw_csum_rx_error)}, +static struct ixgbe_stats ixgbevf_gstrings_stats[] = { + IXGBEVF_NETDEV_STAT(rx_packets), + IXGBEVF_NETDEV_STAT(tx_packets), + IXGBEVF_NETDEV_STAT(rx_bytes), + IXGBEVF_NETDEV_STAT(tx_bytes), + IXGBEVF_STAT("tx_busy", tx_busy), + IXGBEVF_STAT("tx_restart_queue", restart_queue), + IXGBEVF_STAT("tx_timeout_count", tx_timeout_count), + IXGBEVF_NETDEV_STAT(multicast), + IXGBEVF_STAT("rx_csum_offload_errors", hw_csum_rx_error), #ifdef BP_EXTENDED_STATS - {"rx_bp_poll_yield", IXGBEVF_ZSTAT(bp_rx_yields)}, - {"rx_bp_cleaned", IXGBEVF_ZSTAT(bp_rx_cleaned)}, - {"rx_bp_misses", IXGBEVF_ZSTAT(bp_rx_missed)}, - {"tx_bp_napi_yield", IXGBEVF_ZSTAT(bp_tx_yields)}, - {"tx_bp_cleaned", IXGBEVF_ZSTAT(bp_tx_cleaned)}, - {"tx_bp_misses", IXGBEVF_ZSTAT(bp_tx_missed)}, + IXGBEVF_STAT("rx_bp_poll_yield", bp_rx_yields), + IXGBEVF_STAT("rx_bp_cleaned", bp_rx_cleaned), + IXGBEVF_STAT("rx_bp_misses", bp_rx_missed), + IXGBEVF_STAT("tx_bp_napi_yield", bp_tx_yields), + IXGBEVF_STAT("tx_bp_cleaned", bp_tx_cleaned), + IXGBEVF_STAT("tx_bp_misses", bp_tx_missed), #endif }; -#define IXGBE_QUEUE_STATS_LEN 0 -#define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats) +#define IXGBEVF_QUEUE_STATS_LEN ( \ + (((struct ixgbevf_adapter *)netdev_priv(netdev))->num_tx_queues + \ + ((struct ixgbevf_adapter *)netdev_priv(netdev))->num_rx_queues) * \ + (sizeof(struct ixgbe_stats) / sizeof(u64))) +#define IXGBEVF_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbevf_gstrings_stats) -#define IXGBEVF_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + IXGBE_QUEUE_STATS_LEN) +#define IXGBEVF_STATS_LEN (IXGBEVF_GLOBAL_STATS_LEN + IXGBEVF_QUEUE_STATS_LEN) static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = { "Register test (offline)", "Link test (on/offline)" }; -#define IXGBE_TEST_LEN (sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN) +#define IXGBEVF_TEST_LEN (sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN) static int ixgbevf_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) @@ -396,9 +393,9 @@ static int ixgbevf_get_sset_count(struct net_device *dev, int stringset) { switch (stringset) { case ETH_SS_TEST: - return IXGBE_TEST_LEN; + return IXGBEVF_TEST_LEN; case ETH_SS_STATS: - return IXGBE_GLOBAL_STATS_LEN; + return IXGBEVF_GLOBAL_STATS_LEN; default: return -EINVAL; } @@ -408,8 +405,11 @@ static void ixgbevf_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, u64 *data) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); - char *base = (char *)adapter; + struct rtnl_link_stats64 temp; + const struct rtnl_link_stats64 *net_stats; int i; + char *p; + #ifdef BP_EXTENDED_STATS u64 rx_yields = 0, rx_cleaned = 0, rx_missed = 0, tx_yields = 0, tx_cleaned = 0, tx_missed = 0; @@ -436,22 +436,24 @@ static void ixgbevf_get_ethtool_stats(struct net_device *netdev, #endif ixgbevf_update_stats(adapter); - for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) { - char *p = base + ixgbe_gstrings_stats[i].stat_offset; - char *b = base + ixgbe_gstrings_stats[i].base_stat_offset; - char *r = base + ixgbe_gstrings_stats[i].saved_reset_offset; - - if (ixgbe_gstrings_stats[i].sizeof_stat == sizeof(u64)) { - if (ixgbe_gstrings_stats[i].base_stat_offset >= 0) - data[i] = *(u64 *)p - *(u64 *)b + *(u64 *)r; - else - data[i] = *(u64 *)p; - } else { - if (ixgbe_gstrings_stats[i].base_stat_offset >= 0) - data[i] = *(u32 *)p - *(u32 *)b + *(u32 *)r; - else - data[i] = *(u32 *)p; + net_stats = dev_get_stats(netdev, &temp); + for (i = 0; i < IXGBEVF_GLOBAL_STATS_LEN; i++) { + switch (ixgbevf_gstrings_stats[i].type) { + case NETDEV_STATS: + p = (char *)net_stats + + ixgbevf_gstrings_stats[i].stat_offset; + break; + case IXGBEVF_STATS: + p = (char *)adapter + + ixgbevf_gstrings_stats[i].stat_offset; + break; + default: + data[i] = 0; + continue; } + + data[i] = (ixgbevf_gstrings_stats[i].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; } } @@ -464,11 +466,11 @@ static void ixgbevf_get_strings(struct net_device *netdev, u32 stringset, switch (stringset) { case ETH_SS_TEST: memcpy(data, *ixgbe_gstrings_test, - IXGBE_TEST_LEN * ETH_GSTRING_LEN); + IXGBEVF_TEST_LEN * ETH_GSTRING_LEN); break; case ETH_SS_STATS: - for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) { - memcpy(p, ixgbe_gstrings_stats[i].stat_string, + for (i = 0; i < IXGBEVF_GLOBAL_STATS_LEN; i++) { + memcpy(p, ixgbevf_gstrings_stats[i].stat_string, ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; } From a02a5a53418a6039893f5d5a9373cf18080fded2 Mon Sep 17 00:00:00 2001 From: Emil Tantilov Date: Thu, 7 Apr 2016 15:58:44 -0700 Subject: [PATCH 0934/1649] ixgbevf: add support for per-queue ethtool stats Implement per-queue statistics for packets, bytes and busy poll specific counters. Signed-off-by: Emil Tantilov Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbevf/ethtool.c | 127 +++++++++++++------ drivers/net/ethernet/intel/ixgbevf/ixgbevf.h | 10 -- 2 files changed, 91 insertions(+), 46 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c index cd4d311ea0b6..64d5c6e9343e 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c +++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c @@ -75,14 +75,6 @@ static struct ixgbe_stats ixgbevf_gstrings_stats[] = { IXGBEVF_STAT("tx_timeout_count", tx_timeout_count), IXGBEVF_NETDEV_STAT(multicast), IXGBEVF_STAT("rx_csum_offload_errors", hw_csum_rx_error), -#ifdef BP_EXTENDED_STATS - IXGBEVF_STAT("rx_bp_poll_yield", bp_rx_yields), - IXGBEVF_STAT("rx_bp_cleaned", bp_rx_cleaned), - IXGBEVF_STAT("rx_bp_misses", bp_rx_missed), - IXGBEVF_STAT("tx_bp_napi_yield", bp_tx_yields), - IXGBEVF_STAT("tx_bp_cleaned", bp_tx_cleaned), - IXGBEVF_STAT("tx_bp_misses", bp_tx_missed), -#endif }; #define IXGBEVF_QUEUE_STATS_LEN ( \ @@ -389,13 +381,13 @@ clear_reset: return err; } -static int ixgbevf_get_sset_count(struct net_device *dev, int stringset) +static int ixgbevf_get_sset_count(struct net_device *netdev, int stringset) { switch (stringset) { case ETH_SS_TEST: return IXGBEVF_TEST_LEN; case ETH_SS_STATS: - return IXGBEVF_GLOBAL_STATS_LEN; + return IXGBEVF_STATS_LEN; default: return -EINVAL; } @@ -407,34 +399,11 @@ static void ixgbevf_get_ethtool_stats(struct net_device *netdev, struct ixgbevf_adapter *adapter = netdev_priv(netdev); struct rtnl_link_stats64 temp; const struct rtnl_link_stats64 *net_stats; - int i; + unsigned int start; + struct ixgbevf_ring *ring; + int i, j; char *p; -#ifdef BP_EXTENDED_STATS - u64 rx_yields = 0, rx_cleaned = 0, rx_missed = 0, - tx_yields = 0, tx_cleaned = 0, tx_missed = 0; - - for (i = 0; i < adapter->num_rx_queues; i++) { - rx_yields += adapter->rx_ring[i]->stats.yields; - rx_cleaned += adapter->rx_ring[i]->stats.cleaned; - rx_yields += adapter->rx_ring[i]->stats.yields; - } - - for (i = 0; i < adapter->num_tx_queues; i++) { - tx_yields += adapter->tx_ring[i]->stats.yields; - tx_cleaned += adapter->tx_ring[i]->stats.cleaned; - tx_yields += adapter->tx_ring[i]->stats.yields; - } - - adapter->bp_rx_yields = rx_yields; - adapter->bp_rx_cleaned = rx_cleaned; - adapter->bp_rx_missed = rx_missed; - - adapter->bp_tx_yields = tx_yields; - adapter->bp_tx_cleaned = tx_cleaned; - adapter->bp_tx_missed = tx_missed; -#endif - ixgbevf_update_stats(adapter); net_stats = dev_get_stats(netdev, &temp); for (i = 0; i < IXGBEVF_GLOBAL_STATS_LEN; i++) { @@ -455,11 +424,68 @@ static void ixgbevf_get_ethtool_stats(struct net_device *netdev, data[i] = (ixgbevf_gstrings_stats[i].sizeof_stat == sizeof(u64)) ? *(u64 *)p : *(u32 *)p; } + + /* populate Tx queue data */ + for (j = 0; j < adapter->num_tx_queues; j++) { + ring = adapter->tx_ring[j]; + if (!ring) { + data[i++] = 0; + data[i++] = 0; +#ifdef BP_EXTENDED_STATS + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; +#endif + continue; + } + + do { + start = u64_stats_fetch_begin_irq(&ring->syncp); + data[i] = ring->stats.packets; + data[i + 1] = ring->stats.bytes; + } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); + i += 2; +#ifdef BP_EXTENDED_STATS + data[i] = ring->stats.yields; + data[i + 1] = ring->stats.misses; + data[i + 2] = ring->stats.cleaned; + i += 3; +#endif + } + + /* populate Rx queue data */ + for (j = 0; j < adapter->num_rx_queues; j++) { + ring = adapter->rx_ring[j]; + if (!ring) { + data[i++] = 0; + data[i++] = 0; +#ifdef BP_EXTENDED_STATS + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; +#endif + continue; + } + + do { + start = u64_stats_fetch_begin_irq(&ring->syncp); + data[i] = ring->stats.packets; + data[i + 1] = ring->stats.bytes; + } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); + i += 2; +#ifdef BP_EXTENDED_STATS + data[i] = ring->stats.yields; + data[i + 1] = ring->stats.misses; + data[i + 2] = ring->stats.cleaned; + i += 3; +#endif + } } static void ixgbevf_get_strings(struct net_device *netdev, u32 stringset, u8 *data) { + struct ixgbevf_adapter *adapter = netdev_priv(netdev); char *p = (char *)data; int i; @@ -474,6 +500,35 @@ static void ixgbevf_get_strings(struct net_device *netdev, u32 stringset, ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; } + + for (i = 0; i < adapter->num_tx_queues; i++) { + sprintf(p, "tx_queue_%u_packets", i); + p += ETH_GSTRING_LEN; + sprintf(p, "tx_queue_%u_bytes", i); + p += ETH_GSTRING_LEN; +#ifdef BP_EXTENDED_STATS + sprintf(p, "tx_queue_%u_bp_napi_yield", i); + p += ETH_GSTRING_LEN; + sprintf(p, "tx_queue_%u_bp_misses", i); + p += ETH_GSTRING_LEN; + sprintf(p, "tx_queue_%u_bp_cleaned", i); + p += ETH_GSTRING_LEN; +#endif /* BP_EXTENDED_STATS */ + } + for (i = 0; i < adapter->num_rx_queues; i++) { + sprintf(p, "rx_queue_%u_packets", i); + p += ETH_GSTRING_LEN; + sprintf(p, "rx_queue_%u_bytes", i); + p += ETH_GSTRING_LEN; +#ifdef BP_EXTENDED_STATS + sprintf(p, "rx_queue_%u_bp_poll_yield", i); + p += ETH_GSTRING_LEN; + sprintf(p, "rx_queue_%u_bp_misses", i); + p += ETH_GSTRING_LEN; + sprintf(p, "rx_queue_%u_bp_cleaned", i); + p += ETH_GSTRING_LEN; +#endif /* BP_EXTENDED_STATS */ + } break; } } diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h index 5ac60eefb0cd..5ca3794aeb2e 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h @@ -422,16 +422,6 @@ struct ixgbevf_adapter { unsigned int tx_ring_count; unsigned int rx_ring_count; -#ifdef BP_EXTENDED_STATS - u64 bp_rx_yields; - u64 bp_rx_cleaned; - u64 bp_rx_missed; - - u64 bp_tx_yields; - u64 bp_tx_cleaned; - u64 bp_tx_missed; -#endif - u8 __iomem *io_addr; /* Mainly for iounmap use */ u32 link_speed; bool link_up; From a0254a70b4f91396ad04b1225dd7c10a680d38ff Mon Sep 17 00:00:00 2001 From: Mark Rustad Date: Fri, 8 Apr 2016 16:19:29 -0700 Subject: [PATCH 0935/1649] ixgbe: Use correct FC setup function for x550em_a Somehow the wrong fc_setup function was used for x550em_a, so correct that. Also set setup_link to NULL as its value is determined later, just like it is with X550EM_x. Signed-off-by: Mark Rustad Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c index ea25f001f3bb..a17e398d56b8 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c @@ -2926,13 +2926,13 @@ static struct ixgbe_mac_operations mac_ops_x550em_a = { .get_media_type = ixgbe_get_media_type_X550em, .get_san_mac_addr = NULL, .get_wwn_prefix = NULL, - .setup_link = &ixgbe_setup_mac_link_X540, + .setup_link = NULL, /* defined later */ .get_link_capabilities = ixgbe_get_link_capabilities_X550em, .get_bus_info = ixgbe_get_bus_info_X550em, .setup_sfp = ixgbe_setup_sfp_modules_X550em, .acquire_swfw_sync = ixgbe_acquire_swfw_sync_x550em_a, .release_swfw_sync = ixgbe_release_swfw_sync_x550em_a, - .setup_fc = ixgbe_setup_fc_generic, + .setup_fc = ixgbe_setup_fc_x550em, .read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550a, .write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550a, }; From 4319a7976722f6925b5bbbdac417d87a0cbde859 Mon Sep 17 00:00:00 2001 From: Don Skidmore Date: Tue, 12 Apr 2016 19:25:10 -0400 Subject: [PATCH 0936/1649] ixgbe: Add work around for empty SFP+ cage crosstalk It is possible on some systems that crosstalk could lead to link flap on empty SFP+ cages. A new NVM bit was defined to let SW know it needs to implement the work around which consists of verifying that there is a module in the cage before acting on the LSC. Signed-off-by: Don Skidmore Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe.h | 2 + drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 39 +++++++++++++++++++ drivers/net/ethernet/intel/ixgbe/ixgbe_type.h | 1 + 3 files changed, 42 insertions(+) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index 94e39c13e7d4..22cf2a9430b5 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -803,6 +803,8 @@ struct ixgbe_adapter { #define IXGBE_RSS_KEY_SIZE 40 /* size of RSS Hash Key in bytes */ u32 rss_key[IXGBE_RSS_KEY_SIZE / sizeof(u32)]; + + bool need_crosstalk_fix; }; static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 517f06e6c3d8..09d7c8bdcd5c 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -5572,6 +5572,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter) struct pci_dev *pdev = adapter->pdev; unsigned int rss, fdir; u32 fwsm; + u16 device_caps; #ifdef CONFIG_IXGBE_DCB int j; struct tc_configuration *tc; @@ -5737,6 +5738,22 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter) adapter->tx_ring_count = IXGBE_DEFAULT_TXD; adapter->rx_ring_count = IXGBE_DEFAULT_RXD; + /* Cache bit indicating need for crosstalk fix */ + switch (hw->mac.type) { + case ixgbe_mac_82599EB: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: + hw->mac.ops.get_device_caps(hw, &device_caps); + if (device_caps & IXGBE_DEVICE_CAPS_NO_CROSSTALK_WR) + adapter->need_crosstalk_fix = false; + else + adapter->need_crosstalk_fix = true; + break; + default: + adapter->need_crosstalk_fix = false; + break; + } + /* set default work limits */ adapter->tx_work_limit = IXGBE_DEFAULT_TX_WORK; @@ -6659,6 +6676,18 @@ static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter) link_up = true; } + /* If Crosstalk fix enabled do the sanity check of making sure + * the SFP+ cage is empty. + */ + if (adapter->need_crosstalk_fix) { + u32 sfp_cage_full; + + sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) & + IXGBE_ESDP_SDP2; + if (ixgbe_is_sfp(hw) && link_up && !sfp_cage_full) + link_up = false; + } + if (adapter->ixgbe_ieee_pfc) pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en); @@ -7005,6 +7034,16 @@ static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter) struct ixgbe_hw *hw = &adapter->hw; s32 err; + /* If crosstalk fix enabled verify the SFP+ cage is full */ + if (adapter->need_crosstalk_fix) { + u32 sfp_cage_full; + + sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) & + IXGBE_ESDP_SDP2; + if (!sfp_cage_full) + return; + } + /* not searching for SFP so there is nothing to do here */ if (!(adapter->flags2 & IXGBE_FLAG2_SEARCH_FOR_SFP) && !(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET)) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index 29a1c423543b..8e7decb3e078 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -2124,6 +2124,7 @@ enum { #define IXGBE_SAN_MAC_ADDR_PORT1_OFFSET 0x3 #define IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP 0x1 #define IXGBE_DEVICE_CAPS_FCOE_OFFLOADS 0x2 +#define IXGBE_DEVICE_CAPS_NO_CROSSTALK_WR BIT(7) #define IXGBE_FW_LESM_PARAMETERS_PTR 0x2 #define IXGBE_FW_LESM_STATE_1 0x1 #define IXGBE_FW_LESM_STATE_ENABLED 0x8000 /* LESM Enable bit */ From b4f47a483045a6e6b31be8ade76cdfef7091f18b Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Wed, 13 Apr 2016 16:08:22 -0700 Subject: [PATCH 0937/1649] ixgbe: use BIT() macro Several areas of ixgbe were written before widespread usage of the BIT(n) macro. With the impending release of GCC 6 and its associated new warnings, some usages such as (1 << 31) have been noted within the ixgbe driver source. Fix these wholesale and prevent future issues by simply using BIT macro instead of hand coded bit shifts. Also fix a few shifts that are shifting values into place by using the 'u' prefix to indicate unsigned. It doesn't strictly matter in these cases because we're not shifting by too large a value, but these are all unsigned values and should be indicated as such. Signed-off-by: Jacob Keller Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe.h | 68 ++++---- .../net/ethernet/intel/ixgbe/ixgbe_82598.c | 6 +- .../net/ethernet/intel/ixgbe/ixgbe_82599.c | 16 +- .../net/ethernet/intel/ixgbe/ixgbe_common.c | 30 ++-- drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c | 4 +- .../ethernet/intel/ixgbe/ixgbe_dcb_82598.c | 2 +- .../ethernet/intel/ixgbe/ixgbe_dcb_82599.c | 2 +- .../net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c | 6 +- .../net/ethernet/intel/ixgbe/ixgbe_ethtool.c | 14 +- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 32 ++-- drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c | 4 +- drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h | 2 +- drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c | 4 +- .../net/ethernet/intel/ixgbe/ixgbe_sriov.c | 22 +-- drivers/net/ethernet/intel/ixgbe/ixgbe_type.h | 164 +++++++++--------- drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c | 4 +- drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c | 8 +- 17 files changed, 194 insertions(+), 194 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index 22cf2a9430b5..781c8787ab66 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -170,7 +170,7 @@ struct vf_macvlans { }; #define IXGBE_MAX_TXD_PWR 14 -#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR) +#define IXGBE_MAX_DATA_PER_TXD (1u << IXGBE_MAX_TXD_PWR) /* Tx Descriptors needed, worst case */ #define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD) @@ -620,44 +620,44 @@ struct ixgbe_adapter { * thus the additional *_CAPABLE flags. */ u32 flags; -#define IXGBE_FLAG_MSI_ENABLED (u32)(1 << 1) -#define IXGBE_FLAG_MSIX_ENABLED (u32)(1 << 3) -#define IXGBE_FLAG_RX_1BUF_CAPABLE (u32)(1 << 4) -#define IXGBE_FLAG_RX_PS_CAPABLE (u32)(1 << 5) -#define IXGBE_FLAG_RX_PS_ENABLED (u32)(1 << 6) -#define IXGBE_FLAG_DCA_ENABLED (u32)(1 << 8) -#define IXGBE_FLAG_DCA_CAPABLE (u32)(1 << 9) -#define IXGBE_FLAG_IMIR_ENABLED (u32)(1 << 10) -#define IXGBE_FLAG_MQ_CAPABLE (u32)(1 << 11) -#define IXGBE_FLAG_DCB_ENABLED (u32)(1 << 12) -#define IXGBE_FLAG_VMDQ_CAPABLE (u32)(1 << 13) -#define IXGBE_FLAG_VMDQ_ENABLED (u32)(1 << 14) -#define IXGBE_FLAG_FAN_FAIL_CAPABLE (u32)(1 << 15) -#define IXGBE_FLAG_NEED_LINK_UPDATE (u32)(1 << 16) -#define IXGBE_FLAG_NEED_LINK_CONFIG (u32)(1 << 17) -#define IXGBE_FLAG_FDIR_HASH_CAPABLE (u32)(1 << 18) -#define IXGBE_FLAG_FDIR_PERFECT_CAPABLE (u32)(1 << 19) -#define IXGBE_FLAG_FCOE_CAPABLE (u32)(1 << 20) -#define IXGBE_FLAG_FCOE_ENABLED (u32)(1 << 21) -#define IXGBE_FLAG_SRIOV_CAPABLE (u32)(1 << 22) -#define IXGBE_FLAG_SRIOV_ENABLED (u32)(1 << 23) +#define IXGBE_FLAG_MSI_ENABLED BIT(1) +#define IXGBE_FLAG_MSIX_ENABLED BIT(3) +#define IXGBE_FLAG_RX_1BUF_CAPABLE BIT(4) +#define IXGBE_FLAG_RX_PS_CAPABLE BIT(5) +#define IXGBE_FLAG_RX_PS_ENABLED BIT(6) +#define IXGBE_FLAG_DCA_ENABLED BIT(8) +#define IXGBE_FLAG_DCA_CAPABLE BIT(9) +#define IXGBE_FLAG_IMIR_ENABLED BIT(10) +#define IXGBE_FLAG_MQ_CAPABLE BIT(11) +#define IXGBE_FLAG_DCB_ENABLED BIT(12) +#define IXGBE_FLAG_VMDQ_CAPABLE BIT(13) +#define IXGBE_FLAG_VMDQ_ENABLED BIT(14) +#define IXGBE_FLAG_FAN_FAIL_CAPABLE BIT(15) +#define IXGBE_FLAG_NEED_LINK_UPDATE BIT(16) +#define IXGBE_FLAG_NEED_LINK_CONFIG BIT(17) +#define IXGBE_FLAG_FDIR_HASH_CAPABLE BIT(18) +#define IXGBE_FLAG_FDIR_PERFECT_CAPABLE BIT(19) +#define IXGBE_FLAG_FCOE_CAPABLE BIT(20) +#define IXGBE_FLAG_FCOE_ENABLED BIT(21) +#define IXGBE_FLAG_SRIOV_CAPABLE BIT(22) +#define IXGBE_FLAG_SRIOV_ENABLED BIT(23) #define IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE BIT(24) #define IXGBE_FLAG_RX_HWTSTAMP_ENABLED BIT(25) #define IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER BIT(26) u32 flags2; -#define IXGBE_FLAG2_RSC_CAPABLE (u32)(1 << 0) -#define IXGBE_FLAG2_RSC_ENABLED (u32)(1 << 1) -#define IXGBE_FLAG2_TEMP_SENSOR_CAPABLE (u32)(1 << 2) -#define IXGBE_FLAG2_TEMP_SENSOR_EVENT (u32)(1 << 3) -#define IXGBE_FLAG2_SEARCH_FOR_SFP (u32)(1 << 4) -#define IXGBE_FLAG2_SFP_NEEDS_RESET (u32)(1 << 5) -#define IXGBE_FLAG2_RESET_REQUESTED (u32)(1 << 6) -#define IXGBE_FLAG2_FDIR_REQUIRES_REINIT (u32)(1 << 7) -#define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP (u32)(1 << 8) -#define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP (u32)(1 << 9) -#define IXGBE_FLAG2_PTP_PPS_ENABLED (u32)(1 << 10) -#define IXGBE_FLAG2_PHY_INTERRUPT (u32)(1 << 11) +#define IXGBE_FLAG2_RSC_CAPABLE BIT(0) +#define IXGBE_FLAG2_RSC_ENABLED BIT(1) +#define IXGBE_FLAG2_TEMP_SENSOR_CAPABLE BIT(2) +#define IXGBE_FLAG2_TEMP_SENSOR_EVENT BIT(3) +#define IXGBE_FLAG2_SEARCH_FOR_SFP BIT(4) +#define IXGBE_FLAG2_SFP_NEEDS_RESET BIT(5) +#define IXGBE_FLAG2_RESET_REQUESTED BIT(6) +#define IXGBE_FLAG2_FDIR_REQUIRES_REINIT BIT(7) +#define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP BIT(8) +#define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP BIT(9) +#define IXGBE_FLAG2_PTP_PPS_ENABLED BIT(10) +#define IXGBE_FLAG2_PHY_INTERRUPT BIT(11) #define IXGBE_FLAG2_VXLAN_REREG_NEEDED BIT(12) #define IXGBE_FLAG2_VLAN_PROMISC BIT(13) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c index 6ecd598c6ef5..fb51be74dd4c 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c @@ -792,7 +792,7 @@ mac_reset_top: } gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR); - gheccr &= ~((1 << 21) | (1 << 18) | (1 << 9) | (1 << 6)); + gheccr &= ~(BIT(21) | BIT(18) | BIT(9) | BIT(6)); IXGBE_WRITE_REG(hw, IXGBE_GHECCR, gheccr); /* @@ -914,10 +914,10 @@ static s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind, bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex)); if (vlan_on) /* Turn on this VLAN id */ - bits |= (1 << bitindex); + bits |= BIT(bitindex); else /* Turn off this VLAN id */ - bits &= ~(1 << bitindex); + bits &= ~BIT(bitindex); IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits); return 0; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c index 01519787324a..47afed74a54d 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c @@ -1296,17 +1296,17 @@ s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl) #define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \ do { \ u32 n = (_n); \ - if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \ + if (IXGBE_ATR_COMMON_HASH_KEY & BIT(n)) \ common_hash ^= lo_hash_dword >> n; \ - else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \ + else if (IXGBE_ATR_BUCKET_HASH_KEY & BIT(n)) \ bucket_hash ^= lo_hash_dword >> n; \ - else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) \ + else if (IXGBE_ATR_SIGNATURE_HASH_KEY & BIT(n)) \ sig_hash ^= lo_hash_dword << (16 - n); \ - if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << (n + 16))) \ + if (IXGBE_ATR_COMMON_HASH_KEY & BIT(n + 16)) \ common_hash ^= hi_hash_dword >> n; \ - else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \ + else if (IXGBE_ATR_BUCKET_HASH_KEY & BIT(n + 16)) \ bucket_hash ^= hi_hash_dword >> n; \ - else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \ + else if (IXGBE_ATR_SIGNATURE_HASH_KEY & BIT(n + 16)) \ sig_hash ^= hi_hash_dword << (16 - n); \ } while (0) @@ -1440,9 +1440,9 @@ s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, #define IXGBE_COMPUTE_BKT_HASH_ITERATION(_n) \ do { \ u32 n = (_n); \ - if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \ + if (IXGBE_ATR_BUCKET_HASH_KEY & BIT(n)) \ bucket_hash ^= lo_hash_dword >> n; \ - if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \ + if (IXGBE_ATR_BUCKET_HASH_KEY & BIT(n + 16)) \ bucket_hash ^= hi_hash_dword >> n; \ } while (0) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c index c9dffa6101b8..902d2061ce73 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c @@ -825,8 +825,8 @@ s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw) */ eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >> IXGBE_EEC_SIZE_SHIFT); - eeprom->word_size = 1 << (eeprom_size + - IXGBE_EEPROM_WORD_SIZE_SHIFT); + eeprom->word_size = BIT(eeprom_size + + IXGBE_EEPROM_WORD_SIZE_SHIFT); } if (eec & IXGBE_EEC_ADDR_SIZE) @@ -1502,7 +1502,7 @@ static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data, * Mask is used to shift "count" bits of "data" out to the EEPROM * one bit at a time. Determine the starting bit based on count */ - mask = 0x01 << (count - 1); + mask = BIT(count - 1); for (i = 0; i < count; i++) { /* @@ -1991,7 +1991,7 @@ static void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr) */ vector_reg = (vector >> 5) & 0x7F; vector_bit = vector & 0x1F; - hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit); + hw->mac.mta_shadow[vector_reg] |= BIT(vector_bit); } /** @@ -2921,10 +2921,10 @@ s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq) mpsar_hi = 0; } } else if (vmdq < 32) { - mpsar_lo &= ~(1 << vmdq); + mpsar_lo &= ~BIT(vmdq); IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo); } else { - mpsar_hi &= ~(1 << (vmdq - 32)); + mpsar_hi &= ~BIT(vmdq - 32); IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi); } @@ -2953,11 +2953,11 @@ s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq) if (vmdq < 32) { mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar)); - mpsar |= 1 << vmdq; + mpsar |= BIT(vmdq); IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar); } else { mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar)); - mpsar |= 1 << (vmdq - 32); + mpsar |= BIT(vmdq - 32); IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar); } return 0; @@ -2978,11 +2978,11 @@ s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq) u32 rar = hw->mac.san_mac_rar_index; if (vmdq < 32) { - IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 1 << vmdq); + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), BIT(vmdq)); IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0); } else { IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0); - IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 1 << (vmdq - 32)); + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), BIT(vmdq - 32)); } return 0; @@ -3082,7 +3082,7 @@ s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind, * bits[4-0]: which bit in the register */ regidx = vlan / 32; - vfta_delta = 1 << (vlan % 32); + vfta_delta = BIT(vlan % 32); vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regidx)); /* vfta_delta represents the difference between the current value @@ -3113,12 +3113,12 @@ s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind, bits = IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32)); /* set the pool bit */ - bits |= 1 << (vind % 32); + bits |= BIT(vind % 32); if (vlan_on) goto vlvf_update; /* clear the pool bit */ - bits ^= 1 << (vind % 32); + bits ^= BIT(vind % 32); if (!bits && !IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + 1 - vind / 32))) { @@ -3349,9 +3349,9 @@ void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf) pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg)); if (enable) - pfvfspoof |= (1 << vf_target_shift); + pfvfspoof |= BIT(vf_target_shift); else - pfvfspoof &= ~(1 << vf_target_shift); + pfvfspoof &= ~BIT(vf_target_shift); IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof); } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c index f8fb2acc2632..072ef3b5fc61 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c @@ -186,7 +186,7 @@ void ixgbe_dcb_unpack_pfc(struct ixgbe_dcb_config *cfg, u8 *pfc_en) for (*pfc_en = 0, tc = 0; tc < MAX_TRAFFIC_CLASS; tc++) { if (tc_config[tc].dcb_pfc != pfc_disabled) - *pfc_en |= 1 << tc; + *pfc_en |= BIT(tc); } } @@ -232,7 +232,7 @@ void ixgbe_dcb_unpack_prio(struct ixgbe_dcb_config *cfg, int direction, u8 ixgbe_dcb_get_tc_from_up(struct ixgbe_dcb_config *cfg, int direction, u8 up) { struct tc_configuration *tc_config = &cfg->tc_config[0]; - u8 prio_mask = 1 << up; + u8 prio_mask = BIT(up); u8 tc = cfg->num_tcs.pg_tcs; /* If tc is 0 then DCB is likely not enabled or supported */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c index d3ba63f9ad37..b79e93a5b699 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c @@ -210,7 +210,7 @@ s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en) /* Configure PFC Tx thresholds per TC */ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { - if (!(pfc_en & (1 << i))) { + if (!(pfc_en & BIT(i))) { IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), 0); IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), 0); continue; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c index b5cc989a3d23..1011d644978f 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c @@ -248,7 +248,7 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc) int enabled = 0; for (j = 0; j < MAX_USER_PRIORITY; j++) { - if ((prio_tc[j] == i) && (pfc_en & (1 << j))) { + if ((prio_tc[j] == i) && (pfc_en & BIT(j))) { enabled = 1; break; } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c index 2707bda37418..b8fc3cfec831 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c @@ -62,7 +62,7 @@ static int ixgbe_copy_dcb_cfg(struct ixgbe_adapter *adapter, int tc_max) }; u8 up = dcb_getapp(adapter->netdev, &app); - if (up && !(up & (1 << adapter->fcoe.up))) + if (up && !(up & BIT(adapter->fcoe.up))) changes |= BIT_APP_UPCHG; #endif @@ -657,7 +657,7 @@ static int ixgbe_dcbnl_ieee_setapp(struct net_device *dev, app->protocol == ETH_P_FCOE) { u8 app_mask = dcb_ieee_getapp_mask(dev, app); - if (app_mask & (1 << adapter->fcoe.up)) + if (app_mask & BIT(adapter->fcoe.up)) return 0; adapter->fcoe.up = app->priority; @@ -700,7 +700,7 @@ static int ixgbe_dcbnl_ieee_delapp(struct net_device *dev, app->protocol == ETH_P_FCOE) { u8 app_mask = dcb_ieee_getapp_mask(dev, app); - if (app_mask & (1 << adapter->fcoe.up)) + if (app_mask & BIT(adapter->fcoe.up)) return 0; adapter->fcoe.up = app_mask ? diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index 9f76be1431b1..d3efcb4fecce 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -1586,7 +1586,7 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data) /* Test each interrupt */ for (; i < 10; i++) { /* Interrupt to test */ - mask = 1 << i; + mask = BIT(i); if (!shared_int) { /* @@ -3014,14 +3014,14 @@ static int ixgbe_get_ts_info(struct net_device *dev, info->phc_index = -1; info->tx_types = - (1 << HWTSTAMP_TX_OFF) | - (1 << HWTSTAMP_TX_ON); + BIT(HWTSTAMP_TX_OFF) | + BIT(HWTSTAMP_TX_ON); info->rx_filters = - (1 << HWTSTAMP_FILTER_NONE) | - (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | - (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | - (1 << HWTSTAMP_FILTER_PTP_V2_EVENT); + BIT(HWTSTAMP_FILTER_NONE) | + BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | + BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | + BIT(HWTSTAMP_FILTER_PTP_V2_EVENT); break; default: return ethtool_op_get_ts_info(dev, info); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 09d7c8bdcd5c..b41a26fe57de 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -2245,7 +2245,7 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) /* Populate MSIX to EITR Select */ if (adapter->num_vfs > 32) { - u32 eitrsel = (1 << (adapter->num_vfs - 32)) - 1; + u32 eitrsel = BIT(adapter->num_vfs - 32) - 1; IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, eitrsel); } @@ -2884,7 +2884,7 @@ int ixgbe_poll(struct napi_struct *napi, int budget) if (adapter->rx_itr_setting & 1) ixgbe_set_itr(q_vector); if (!test_bit(__IXGBE_DOWN, &adapter->state)) - ixgbe_irq_enable_queues(adapter, ((u64)1 << q_vector->v_idx)); + ixgbe_irq_enable_queues(adapter, BIT_ULL(q_vector->v_idx)); return 0; } @@ -3177,15 +3177,15 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter, * currently 40. */ if (!ring->q_vector || (ring->q_vector->itr < IXGBE_100K_ITR)) - txdctl |= (1 << 16); /* WTHRESH = 1 */ + txdctl |= 1u << 16; /* WTHRESH = 1 */ else - txdctl |= (8 << 16); /* WTHRESH = 8 */ + txdctl |= 8u << 16; /* WTHRESH = 8 */ /* * Setting PTHRESH to 32 both improves performance * and avoids a TX hang with DFP enabled */ - txdctl |= (1 << 8) | /* HTHRESH = 1 */ + txdctl |= (1u << 8) | /* HTHRESH = 1 */ 32; /* PTHRESH = 32 */ /* reinitialize flowdirector state */ @@ -3737,9 +3737,9 @@ static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter) return; if (rss_i > 3) - psrtype |= 2 << 29; + psrtype |= 2u << 29; else if (rss_i > 1) - psrtype |= 1 << 29; + psrtype |= 1u << 29; for_each_set_bit(pool, &adapter->fwd_bitmask, 32) IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype); @@ -3994,7 +3994,7 @@ void ixgbe_update_pf_promisc_vlvf(struct ixgbe_adapter *adapter, u32 vid) * entry other than the PF. */ word = idx * 2 + (VMDQ_P(0) / 32); - bits = ~(1 << (VMDQ_P(0)) % 32); + bits = ~BIT(VMDQ_P(0) % 32); bits &= IXGBE_READ_REG(hw, IXGBE_VLVFB(word)); /* Disable the filter so this falls into the default pool. */ @@ -4129,7 +4129,7 @@ static void ixgbe_vlan_promisc_enable(struct ixgbe_adapter *adapter) u32 reg_offset = IXGBE_VLVFB(i * 2 + VMDQ_P(0) / 32); u32 vlvfb = IXGBE_READ_REG(hw, reg_offset); - vlvfb |= 1 << (VMDQ_P(0) % 32); + vlvfb |= BIT(VMDQ_P(0) % 32); IXGBE_WRITE_REG(hw, reg_offset, vlvfb); } @@ -4159,7 +4159,7 @@ static void ixgbe_scrub_vfta(struct ixgbe_adapter *adapter, u32 vfta_offset) if (vlvf) { /* record VLAN ID in VFTA */ - vfta[(vid - vid_start) / 32] |= 1 << (vid % 32); + vfta[(vid - vid_start) / 32] |= BIT(vid % 32); /* if PF is part of this then continue */ if (test_bit(vid, adapter->active_vlans)) @@ -4168,7 +4168,7 @@ static void ixgbe_scrub_vfta(struct ixgbe_adapter *adapter, u32 vfta_offset) /* remove PF from the pool */ word = i * 2 + VMDQ_P(0) / 32; - bits = ~(1 << (VMDQ_P(0) % 32)); + bits = ~BIT(VMDQ_P(0) % 32); bits &= IXGBE_READ_REG(hw, IXGBE_VLVFB(word)); IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), bits); } @@ -4862,9 +4862,9 @@ static void ixgbe_fwd_psrtype(struct ixgbe_fwd_adapter *vadapter) return; if (rss_i > 3) - psrtype |= 2 << 29; + psrtype |= 2u << 29; else if (rss_i > 1) - psrtype |= 1 << 29; + psrtype |= 1u << 29; IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype); } @@ -4928,7 +4928,7 @@ static void ixgbe_disable_fwd_ring(struct ixgbe_fwd_adapter *vadapter, /* shutdown specific queue receive and wait for dma to settle */ ixgbe_disable_rx_queue(adapter, rx_ring); usleep_range(10000, 20000); - ixgbe_irq_disable_queues(adapter, ((u64)1 << index)); + ixgbe_irq_disable_queues(adapter, BIT_ULL(index)); ixgbe_clean_rx_ring(rx_ring); rx_ring->l2_accel_priv = NULL; } @@ -6645,7 +6645,7 @@ static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter) for (i = 0; i < adapter->num_q_vectors; i++) { struct ixgbe_q_vector *qv = adapter->q_vector[i]; if (qv->rx.ring || qv->tx.ring) - eics |= ((u64)1 << i); + eics |= BIT_ULL(i); } } @@ -9192,7 +9192,7 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_ioremap; } /* If EEPROM is valid (bit 8 = 1), use default otherwise use bit bang */ - if (!(eec & (1 << 8))) + if (!(eec & BIT(8))) hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic; /* PHY */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c index b2125e358f7b..a0cb84381cd0 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c @@ -314,8 +314,8 @@ static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number) break; } - if (vflre & (1 << vf_shift)) { - IXGBE_WRITE_REG(hw, IXGBE_VFLREC(reg_offset), (1 << vf_shift)); + if (vflre & BIT(vf_shift)) { + IXGBE_WRITE_REG(hw, IXGBE_VFLREC(reg_offset), BIT(vf_shift)); hw->mbx.stats.rsts++; return 0; } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h index cdf4c3800801..cc735ec3e045 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h @@ -107,7 +107,7 @@ #define IXGBE_PE 0xE0 /* Port expander addr */ #define IXGBE_PE_OUTPUT 1 /* Output reg offset */ #define IXGBE_PE_CONFIG 3 /* Config reg offset */ -#define IXGBE_PE_BIT1 (1 << 1) +#define IXGBE_PE_BIT1 BIT(1) /* Flow control defines */ #define IXGBE_TAF_SYM_PAUSE 0x400 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c index bdc8fdcc07a5..e5431bfe3339 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c @@ -396,7 +396,7 @@ static int ixgbe_ptp_adjfreq_82599(struct ptp_clock_info *ptp, s32 ppb) if (incval > 0x00FFFFFFULL) e_dev_warn("PTP ppb adjusted SYSTIME rate overflowed!\n"); IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, - (1 << IXGBE_INCPER_SHIFT_82599) | + BIT(IXGBE_INCPER_SHIFT_82599) | ((u32)incval & 0x00FFFFFFUL)); break; default: @@ -1114,7 +1114,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter) incval >>= IXGBE_INCVAL_SHIFT_82599; cc.shift -= IXGBE_INCVAL_SHIFT_82599; IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, - (1 << IXGBE_INCPER_SHIFT_82599) | incval); + BIT(IXGBE_INCPER_SHIFT_82599) | incval); break; default: /* other devices aren't supported */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index e9f2558e65fc..c5caacdd193d 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -406,7 +406,7 @@ static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter, vector_reg = (vfinfo->vf_mc_hashes[i] >> 5) & 0x7F; vector_bit = vfinfo->vf_mc_hashes[i] & 0x1F; mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg)); - mta_reg |= (1 << vector_bit); + mta_reg |= BIT(vector_bit); IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg); } vmolr |= IXGBE_VMOLR_ROMPE; @@ -433,7 +433,7 @@ void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter) vector_reg = (vfinfo->vf_mc_hashes[j] >> 5) & 0x7F; vector_bit = vfinfo->vf_mc_hashes[j] & 0x1F; mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg)); - mta_reg |= (1 << vector_bit); + mta_reg |= BIT(vector_bit); IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg); } @@ -536,9 +536,9 @@ static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) /* enable or disable receive depending on error */ vfre = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset)); if (err) - vfre &= ~(1 << vf_shift); + vfre &= ~BIT(vf_shift); else - vfre |= 1 << vf_shift; + vfre |= BIT(vf_shift); IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), vfre); if (err) { @@ -592,8 +592,8 @@ static void ixgbe_clear_vf_vlans(struct ixgbe_adapter *adapter, u32 vf) u32 vlvfb_mask, pool_mask, i; /* create mask for VF and other pools */ - pool_mask = ~(1 << (VMDQ_P(0) % 32)); - vlvfb_mask = 1 << (vf % 32); + pool_mask = ~BIT(VMDQ_P(0) % 32); + vlvfb_mask = BIT(vf % 32); /* post increment loop, covers VLVF_ENTRIES - 1 to 0 */ for (i = IXGBE_VLVF_ENTRIES; i--;) { @@ -629,7 +629,7 @@ static void ixgbe_clear_vf_vlans(struct ixgbe_adapter *adapter, u32 vf) goto update_vlvfb; vid = vlvf & VLAN_VID_MASK; - mask = 1 << (vid % 32); + mask = BIT(vid % 32); /* clear bit from VFTA */ vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(vid / 32)); @@ -813,7 +813,7 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf) /* enable transmit for vf */ reg = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset)); - reg |= 1 << vf_shift; + reg |= BIT(vf_shift); IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg); /* force drop enable for all VF Rx queues */ @@ -821,7 +821,7 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf) /* enable receive for vf */ reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset)); - reg |= 1 << vf_shift; + reg |= BIT(vf_shift); /* * The 82599 cannot support a mix of jumbo and non-jumbo PF/VFs. * For more info take a look at ixgbe_set_vf_lpe @@ -837,7 +837,7 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf) #endif /* CONFIG_FCOE */ if (pf_max_frame > ETH_FRAME_LEN) - reg &= ~(1 << vf_shift); + reg &= ~BIT(vf_shift); } IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg); @@ -846,7 +846,7 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf) /* Enable counting of spoofed packets in the SSVPC register */ reg = IXGBE_READ_REG(hw, IXGBE_VMECM(reg_offset)); - reg |= (1 << vf_shift); + reg |= BIT(vf_shift); IXGBE_WRITE_REG(hw, IXGBE_VMECM(reg_offset), reg); /* diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index 8e7decb3e078..7af451460374 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -697,16 +697,16 @@ struct ixgbe_thermal_sensor_data { #define IXGBE_FCDMARW 0x02420 /* FC Receive DMA RW */ #define IXGBE_FCINVST0 0x03FC0 /* FC Invalid DMA Context Status Reg 0 */ #define IXGBE_FCINVST(_i) (IXGBE_FCINVST0 + ((_i) * 4)) -#define IXGBE_FCBUFF_VALID (1 << 0) /* DMA Context Valid */ -#define IXGBE_FCBUFF_BUFFSIZE (3 << 3) /* User Buffer Size */ -#define IXGBE_FCBUFF_WRCONTX (1 << 7) /* 0: Initiator, 1: Target */ +#define IXGBE_FCBUFF_VALID BIT(0) /* DMA Context Valid */ +#define IXGBE_FCBUFF_BUFFSIZE (3u << 3) /* User Buffer Size */ +#define IXGBE_FCBUFF_WRCONTX BIT(7) /* 0: Initiator, 1: Target */ #define IXGBE_FCBUFF_BUFFCNT 0x0000ff00 /* Number of User Buffers */ #define IXGBE_FCBUFF_OFFSET 0xffff0000 /* User Buffer Offset */ #define IXGBE_FCBUFF_BUFFSIZE_SHIFT 3 #define IXGBE_FCBUFF_BUFFCNT_SHIFT 8 #define IXGBE_FCBUFF_OFFSET_SHIFT 16 -#define IXGBE_FCDMARW_WE (1 << 14) /* Write enable */ -#define IXGBE_FCDMARW_RE (1 << 15) /* Read enable */ +#define IXGBE_FCDMARW_WE BIT(14) /* Write enable */ +#define IXGBE_FCDMARW_RE BIT(15) /* Read enable */ #define IXGBE_FCDMARW_FCOESEL 0x000001ff /* FC X_ID: 11 bits */ #define IXGBE_FCDMARW_LASTSIZE 0xffff0000 /* Last User Buffer Size */ #define IXGBE_FCDMARW_LASTSIZE_SHIFT 16 @@ -723,23 +723,23 @@ struct ixgbe_thermal_sensor_data { #define IXGBE_FCFLT 0x05108 /* FC FLT Context */ #define IXGBE_FCFLTRW 0x05110 /* FC Filter RW Control */ #define IXGBE_FCPARAM 0x051d8 /* FC Offset Parameter */ -#define IXGBE_FCFLT_VALID (1 << 0) /* Filter Context Valid */ -#define IXGBE_FCFLT_FIRST (1 << 1) /* Filter First */ +#define IXGBE_FCFLT_VALID BIT(0) /* Filter Context Valid */ +#define IXGBE_FCFLT_FIRST BIT(1) /* Filter First */ #define IXGBE_FCFLT_SEQID 0x00ff0000 /* Sequence ID */ #define IXGBE_FCFLT_SEQCNT 0xff000000 /* Sequence Count */ -#define IXGBE_FCFLTRW_RVALDT (1 << 13) /* Fast Re-Validation */ -#define IXGBE_FCFLTRW_WE (1 << 14) /* Write Enable */ -#define IXGBE_FCFLTRW_RE (1 << 15) /* Read Enable */ +#define IXGBE_FCFLTRW_RVALDT BIT(13) /* Fast Re-Validation */ +#define IXGBE_FCFLTRW_WE BIT(14) /* Write Enable */ +#define IXGBE_FCFLTRW_RE BIT(15) /* Read Enable */ /* FCoE Receive Control */ #define IXGBE_FCRXCTRL 0x05100 /* FC Receive Control */ -#define IXGBE_FCRXCTRL_FCOELLI (1 << 0) /* Low latency interrupt */ -#define IXGBE_FCRXCTRL_SAVBAD (1 << 1) /* Save Bad Frames */ -#define IXGBE_FCRXCTRL_FRSTRDH (1 << 2) /* EN 1st Read Header */ -#define IXGBE_FCRXCTRL_LASTSEQH (1 << 3) /* EN Last Header in Seq */ -#define IXGBE_FCRXCTRL_ALLH (1 << 4) /* EN All Headers */ -#define IXGBE_FCRXCTRL_FRSTSEQH (1 << 5) /* EN 1st Seq. Header */ -#define IXGBE_FCRXCTRL_ICRC (1 << 6) /* Ignore Bad FC CRC */ -#define IXGBE_FCRXCTRL_FCCRCBO (1 << 7) /* FC CRC Byte Ordering */ +#define IXGBE_FCRXCTRL_FCOELLI BIT(0) /* Low latency interrupt */ +#define IXGBE_FCRXCTRL_SAVBAD BIT(1) /* Save Bad Frames */ +#define IXGBE_FCRXCTRL_FRSTRDH BIT(2) /* EN 1st Read Header */ +#define IXGBE_FCRXCTRL_LASTSEQH BIT(3) /* EN Last Header in Seq */ +#define IXGBE_FCRXCTRL_ALLH BIT(4) /* EN All Headers */ +#define IXGBE_FCRXCTRL_FRSTSEQH BIT(5) /* EN 1st Seq. Header */ +#define IXGBE_FCRXCTRL_ICRC BIT(6) /* Ignore Bad FC CRC */ +#define IXGBE_FCRXCTRL_FCCRCBO BIT(7) /* FC CRC Byte Ordering */ #define IXGBE_FCRXCTRL_FCOEVER 0x00000f00 /* FCoE Version: 4 bits */ #define IXGBE_FCRXCTRL_FCOEVER_SHIFT 8 /* FCoE Redirection */ @@ -1256,20 +1256,20 @@ struct ixgbe_thermal_sensor_data { #define IXGBE_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */ #define IXGBE_DCA_RXCTRL_CPUID_MASK_82599 0xFF000000 /* Rx CPUID Mask */ #define IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599 24 /* Rx CPUID Shift */ -#define IXGBE_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */ -#define IXGBE_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header enable */ -#define IXGBE_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload enable */ -#define IXGBE_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* DCA Rx rd Desc Relax Order */ -#define IXGBE_DCA_RXCTRL_DATA_WRO_EN (1 << 13) /* Rx wr data Relax Order */ -#define IXGBE_DCA_RXCTRL_HEAD_WRO_EN (1 << 15) /* Rx wr header RO */ +#define IXGBE_DCA_RXCTRL_DESC_DCA_EN BIT(5) /* DCA Rx Desc enable */ +#define IXGBE_DCA_RXCTRL_HEAD_DCA_EN BIT(6) /* DCA Rx Desc header enable */ +#define IXGBE_DCA_RXCTRL_DATA_DCA_EN BIT(7) /* DCA Rx Desc payload enable */ +#define IXGBE_DCA_RXCTRL_DESC_RRO_EN BIT(9) /* DCA Rx rd Desc Relax Order */ +#define IXGBE_DCA_RXCTRL_DATA_WRO_EN BIT(13) /* Rx wr data Relax Order */ +#define IXGBE_DCA_RXCTRL_HEAD_WRO_EN BIT(15) /* Rx wr header RO */ #define IXGBE_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */ #define IXGBE_DCA_TXCTRL_CPUID_MASK_82599 0xFF000000 /* Tx CPUID Mask */ #define IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599 24 /* Tx CPUID Shift */ -#define IXGBE_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */ -#define IXGBE_DCA_TXCTRL_DESC_RRO_EN (1 << 9) /* Tx rd Desc Relax Order */ -#define IXGBE_DCA_TXCTRL_DESC_WRO_EN (1 << 11) /* Tx Desc writeback RO bit */ -#define IXGBE_DCA_TXCTRL_DATA_RRO_EN (1 << 13) /* Tx rd data Relax Order */ +#define IXGBE_DCA_TXCTRL_DESC_DCA_EN BIT(5) /* DCA Tx Desc enable */ +#define IXGBE_DCA_TXCTRL_DESC_RRO_EN BIT(9) /* Tx rd Desc Relax Order */ +#define IXGBE_DCA_TXCTRL_DESC_WRO_EN BIT(11) /* Tx Desc writeback RO bit */ +#define IXGBE_DCA_TXCTRL_DATA_RRO_EN BIT(13) /* Tx rd data Relax Order */ #define IXGBE_DCA_MAX_QUEUES_82598 16 /* DCA regs only on 16 queues */ /* MSCA Bit Masks */ @@ -1748,7 +1748,7 @@ enum { #define IXGBE_ETQF_TX_ANTISPOOF 0x20000000 /* bit 29 */ #define IXGBE_ETQF_1588 0x40000000 /* bit 30 */ #define IXGBE_ETQF_FILTER_EN 0x80000000 /* bit 31 */ -#define IXGBE_ETQF_POOL_ENABLE (1 << 26) /* bit 26 */ +#define IXGBE_ETQF_POOL_ENABLE BIT(26) /* bit 26 */ #define IXGBE_ETQF_POOL_SHIFT 20 #define IXGBE_ETQS_RX_QUEUE 0x007F0000 /* bits 22:16 */ @@ -1874,20 +1874,20 @@ enum { #define IXGBE_AUTOC_1G_PMA_PMD_SHIFT 9 #define IXGBE_AUTOC_10G_PMA_PMD_MASK 0x00000180 #define IXGBE_AUTOC_10G_PMA_PMD_SHIFT 7 -#define IXGBE_AUTOC_10G_XAUI (0x0 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) -#define IXGBE_AUTOC_10G_KX4 (0x1 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) -#define IXGBE_AUTOC_10G_CX4 (0x2 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) -#define IXGBE_AUTOC_1G_BX (0x0 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT) -#define IXGBE_AUTOC_1G_KX (0x1 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT) -#define IXGBE_AUTOC_1G_SFI (0x0 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT) -#define IXGBE_AUTOC_1G_KX_BX (0x1 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT) +#define IXGBE_AUTOC_10G_XAUI (0u << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) +#define IXGBE_AUTOC_10G_KX4 (1u << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) +#define IXGBE_AUTOC_10G_CX4 (2u << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) +#define IXGBE_AUTOC_1G_BX (0u << IXGBE_AUTOC_1G_PMA_PMD_SHIFT) +#define IXGBE_AUTOC_1G_KX (1u << IXGBE_AUTOC_1G_PMA_PMD_SHIFT) +#define IXGBE_AUTOC_1G_SFI (0u << IXGBE_AUTOC_1G_PMA_PMD_SHIFT) +#define IXGBE_AUTOC_1G_KX_BX (1u << IXGBE_AUTOC_1G_PMA_PMD_SHIFT) #define IXGBE_AUTOC2_UPPER_MASK 0xFFFF0000 #define IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK 0x00030000 #define IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT 16 -#define IXGBE_AUTOC2_10G_KR (0x0 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT) -#define IXGBE_AUTOC2_10G_XFI (0x1 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT) -#define IXGBE_AUTOC2_10G_SFI (0x2 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT) +#define IXGBE_AUTOC2_10G_KR (0u << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT) +#define IXGBE_AUTOC2_10G_XFI (1u << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT) +#define IXGBE_AUTOC2_10G_SFI (2u << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT) #define IXGBE_AUTOC2_LINK_DISABLE_ON_D3_MASK 0x50000000 #define IXGBE_AUTOC2_LINK_DISABLE_MASK 0x70000000 @@ -2840,15 +2840,15 @@ struct ixgbe_adv_tx_context_desc { #define IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP 0x00002000 /* IPSec Type ESP */ #define IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN 0x00004000/* ESP Encrypt Enable */ #define IXGBE_ADVTXT_TUCMD_FCOE 0x00008000 /* FCoE Frame Type */ -#define IXGBE_ADVTXD_FCOEF_EOF_MASK (0x3 << 10) /* FC EOF index */ -#define IXGBE_ADVTXD_FCOEF_SOF ((1 << 2) << 10) /* FC SOF index */ -#define IXGBE_ADVTXD_FCOEF_PARINC ((1 << 3) << 10) /* Rel_Off in F_CTL */ -#define IXGBE_ADVTXD_FCOEF_ORIE ((1 << 4) << 10) /* Orientation: End */ -#define IXGBE_ADVTXD_FCOEF_ORIS ((1 << 5) << 10) /* Orientation: Start */ -#define IXGBE_ADVTXD_FCOEF_EOF_N (0x0 << 10) /* 00: EOFn */ -#define IXGBE_ADVTXD_FCOEF_EOF_T (0x1 << 10) /* 01: EOFt */ -#define IXGBE_ADVTXD_FCOEF_EOF_NI (0x2 << 10) /* 10: EOFni */ -#define IXGBE_ADVTXD_FCOEF_EOF_A (0x3 << 10) /* 11: EOFa */ +#define IXGBE_ADVTXD_FCOEF_SOF (BIT(2) << 10) /* FC SOF index */ +#define IXGBE_ADVTXD_FCOEF_PARINC (BIT(3) << 10) /* Rel_Off in F_CTL */ +#define IXGBE_ADVTXD_FCOEF_ORIE (BIT(4) << 10) /* Orientation: End */ +#define IXGBE_ADVTXD_FCOEF_ORIS (BIT(5) << 10) /* Orientation: Start */ +#define IXGBE_ADVTXD_FCOEF_EOF_N (0u << 10) /* 00: EOFn */ +#define IXGBE_ADVTXD_FCOEF_EOF_T (1u << 10) /* 01: EOFt */ +#define IXGBE_ADVTXD_FCOEF_EOF_NI (2u << 10) /* 10: EOFni */ +#define IXGBE_ADVTXD_FCOEF_EOF_A (3u << 10) /* 11: EOFa */ +#define IXGBE_ADVTXD_FCOEF_EOF_MASK (3u << 10) /* FC EOF index */ #define IXGBE_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ #define IXGBE_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ @@ -3583,7 +3583,7 @@ struct ixgbe_info { #define IXGBE_FUSES0_GROUP(_i) (0x11158 + ((_i) * 4)) #define IXGBE_FUSES0_300MHZ BIT(5) -#define IXGBE_FUSES0_REV_MASK (3 << 6) +#define IXGBE_FUSES0_REV_MASK (3u << 6) #define IXGBE_KRM_PORT_CAR_GEN_CTRL(P) ((P) ? 0x8010 : 0x4010) #define IXGBE_KRM_LINK_CTRL_1(P) ((P) ? 0x820C : 0x420C) @@ -3597,25 +3597,25 @@ struct ixgbe_info { #define IXGBE_KRM_TX_COEFF_CTRL_1(P) ((P) ? 0x9520 : 0x5520) #define IXGBE_KRM_RX_ANA_CTL(P) ((P) ? 0x9A00 : 0x5A00) -#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_32B (1 << 9) -#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_KRPCS (1 << 11) +#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_32B BIT(9) +#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_KRPCS BIT(11) -#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK (0x7 << 8) -#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G (2 << 8) -#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G (4 << 8) +#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK (7u << 8) +#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G (2u << 8) +#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G (4u << 8) #define IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN BIT(12) #define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN BIT(13) -#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_FEC_REQ (1 << 14) -#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC (1 << 15) -#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX (1 << 16) -#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR (1 << 18) -#define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KX (1 << 24) -#define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KR (1 << 26) -#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE (1 << 29) -#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART (1 << 31) +#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_FEC_REQ BIT(14) +#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC BIT(15) +#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX BIT(16) +#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR BIT(18) +#define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KX BIT(24) +#define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KR BIT(26) +#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE BIT(29) +#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART BIT(31) -#define IXGBE_KRM_AN_CNTL_1_SYM_PAUSE (1 << 28) -#define IXGBE_KRM_AN_CNTL_1_ASM_PAUSE (1 << 29) +#define IXGBE_KRM_AN_CNTL_1_SYM_PAUSE BIT(28) +#define IXGBE_KRM_AN_CNTL_1_ASM_PAUSE BIT(29) #define IXGBE_KRM_AN_CNTL_8_LINEAR BIT(0) #define IXGBE_KRM_AN_CNTL_8_LIMITING BIT(1) @@ -3623,28 +3623,28 @@ struct ixgbe_info { #define IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D BIT(12) #define IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D BIT(19) -#define IXGBE_KRM_DSP_TXFFE_STATE_C0_EN (1 << 6) -#define IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN (1 << 15) -#define IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN (1 << 16) +#define IXGBE_KRM_DSP_TXFFE_STATE_C0_EN BIT(6) +#define IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN BIT(15) +#define IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN BIT(16) -#define IXGBE_KRM_RX_TRN_LINKUP_CTRL_CONV_WO_PROTOCOL (1 << 4) -#define IXGBE_KRM_RX_TRN_LINKUP_CTRL_PROTOCOL_BYPASS (1 << 2) +#define IXGBE_KRM_RX_TRN_LINKUP_CTRL_CONV_WO_PROTOCOL BIT(4) +#define IXGBE_KRM_RX_TRN_LINKUP_CTRL_PROTOCOL_BYPASS BIT(2) -#define IXGBE_KRM_PMD_DFX_BURNIN_TX_RX_KR_LB_MASK (0x3 << 16) +#define IXGBE_KRM_PMD_DFX_BURNIN_TX_RX_KR_LB_MASK (3u << 16) -#define IXGBE_KRM_TX_COEFF_CTRL_1_CMINUS1_OVRRD_EN (1 << 1) -#define IXGBE_KRM_TX_COEFF_CTRL_1_CPLUS1_OVRRD_EN (1 << 2) -#define IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN (1 << 3) -#define IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN (1 << 31) +#define IXGBE_KRM_TX_COEFF_CTRL_1_CMINUS1_OVRRD_EN BIT(1) +#define IXGBE_KRM_TX_COEFF_CTRL_1_CPLUS1_OVRRD_EN BIT(2) +#define IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN BIT(3) +#define IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN BIT(31) #define IXGBE_KX4_LINK_CNTL_1 0x4C -#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX (1 << 16) -#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX4 (1 << 17) -#define IXGBE_KX4_LINK_CNTL_1_TETH_EEE_CAP_KX (1 << 24) -#define IXGBE_KX4_LINK_CNTL_1_TETH_EEE_CAP_KX4 (1 << 25) -#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_ENABLE (1 << 29) -#define IXGBE_KX4_LINK_CNTL_1_TETH_FORCE_LINK_UP (1 << 30) -#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_RESTART (1 << 31) +#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX BIT(16) +#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX4 BIT(17) +#define IXGBE_KX4_LINK_CNTL_1_TETH_EEE_CAP_KX BIT(24) +#define IXGBE_KX4_LINK_CNTL_1_TETH_EEE_CAP_KX4 BIT(25) +#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_ENABLE BIT(29) +#define IXGBE_KX4_LINK_CNTL_1_TETH_FORCE_LINK_UP BIT(30) +#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_RESTART BIT(31) #define IXGBE_SB_IOSF_INDIRECT_CTRL 0x00011144 #define IXGBE_SB_IOSF_INDIRECT_DATA 0x00011148 @@ -3660,7 +3660,7 @@ struct ixgbe_info { #define IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT 28 #define IXGBE_SB_IOSF_CTRL_TARGET_SELECT_MASK 0x7 #define IXGBE_SB_IOSF_CTRL_BUSY_SHIFT 31 -#define IXGBE_SB_IOSF_CTRL_BUSY (1 << IXGBE_SB_IOSF_CTRL_BUSY_SHIFT) +#define IXGBE_SB_IOSF_CTRL_BUSY BIT(IXGBE_SB_IOSF_CTRL_BUSY_SHIFT) #define IXGBE_SB_IOSF_TARGET_KR_PHY 0 #define IXGBE_SB_IOSF_TARGET_KX4_UNIPHY 1 #define IXGBE_SB_IOSF_TARGET_KX4_PCS0 2 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c index 40824d85d807..f2b1d48a16c3 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c @@ -214,8 +214,8 @@ s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw) eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >> IXGBE_EEC_SIZE_SHIFT); - eeprom->word_size = 1 << (eeprom_size + - IXGBE_EEPROM_WORD_SIZE_SHIFT); + eeprom->word_size = BIT(eeprom_size + + IXGBE_EEPROM_WORD_SIZE_SHIFT); hw_dbg(hw, "Eeprom params: type = %d, size = %d\n", eeprom->type, eeprom->word_size); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c index a17e398d56b8..c8a4f5ef06c0 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c @@ -335,8 +335,8 @@ static s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw) eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >> IXGBE_EEC_SIZE_SHIFT); - eeprom->word_size = 1 << (eeprom_size + - IXGBE_EEPROM_WORD_SIZE_SHIFT); + eeprom->word_size = BIT(eeprom_size + + IXGBE_EEPROM_WORD_SIZE_SHIFT); hw_dbg(hw, "Eeprom params: type = %d, size = %d\n", eeprom->type, eeprom->word_size); @@ -2646,9 +2646,9 @@ static void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw, pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg)); if (enable) - pfvfspoof |= (1 << vf_target_shift); + pfvfspoof |= BIT(vf_target_shift); else - pfvfspoof &= ~(1 << vf_target_shift); + pfvfspoof &= ~BIT(vf_target_shift); IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof); } From 3e973dc4b93da06e38b263c9bd7e239d8f3f251f Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Wed, 13 Apr 2016 16:08:23 -0700 Subject: [PATCH 0938/1649] ixgbe: resolve shift of negative value warning Make use of GENMASK instead of open coding the equivalent operation incorrectly. Signed-off-by: Jacob Keller Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index b41a26fe57de..60592cfa5ca6 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -3766,9 +3766,9 @@ static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter) reg_offset = (VMDQ_P(0) >= 32) ? 1 : 0; /* Enable only the PF's pool for Tx/Rx */ - IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (~0) << vf_shift); + IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), GENMASK(vf_shift, 31)); IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), reg_offset - 1); - IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (~0) << vf_shift); + IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), GENMASK(vf_shift, 31)); IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), reg_offset - 1); if (adapter->bridge_mode == BRIDGE_MODE_VEB) IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); From 8d055cc0c8be92cd6a77193460117f0ab0a05286 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Wed, 13 Apr 2016 16:08:24 -0700 Subject: [PATCH 0939/1649] ixgbevf: make use of BIT() macro to avoid shift of signed values Also cleanup a case where we're bit shifting a value into place, and use an unsigned constant. Make use of the unsigned postfix in places where BIT() macro is not appropriate. Signed-off-by: Jacob Keller Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbevf/defines.h | 22 +++++++++---------- drivers/net/ethernet/intel/ixgbevf/ethtool.c | 3 ++- drivers/net/ethernet/intel/ixgbevf/ixgbevf.h | 8 +++---- .../net/ethernet/intel/ixgbevf/ixgbevf_main.c | 18 +++++++-------- 4 files changed, 26 insertions(+), 25 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h index 58434584b16d..74901f7ef391 100644 --- a/drivers/net/ethernet/intel/ixgbevf/defines.h +++ b/drivers/net/ethernet/intel/ixgbevf/defines.h @@ -74,7 +74,7 @@ typedef u32 ixgbe_link_speed; #define IXGBE_RXDCTL_RLPML_EN 0x00008000 /* DCA Control */ -#define IXGBE_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */ +#define IXGBE_DCA_TXCTRL_TX_WB_RO_EN BIT(11) /* Tx Desc writeback RO bit */ /* PSRTYPE bit definitions */ #define IXGBE_PSRTYPE_TCPHDR 0x00000010 @@ -296,16 +296,16 @@ struct ixgbe_adv_tx_context_desc { #define IXGBE_TXDCTL_SWFLSH 0x04000000 /* Tx Desc. wr-bk flushing */ #define IXGBE_TXDCTL_WTHRESH_SHIFT 16 /* shift to WTHRESH bits */ -#define IXGBE_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* Rx Desc enable */ -#define IXGBE_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* Rx Desc header ena */ -#define IXGBE_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* Rx Desc payload ena */ -#define IXGBE_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* Rx rd Desc Relax Order */ -#define IXGBE_DCA_RXCTRL_DATA_WRO_EN (1 << 13) /* Rx wr data Relax Order */ -#define IXGBE_DCA_RXCTRL_HEAD_WRO_EN (1 << 15) /* Rx wr header RO */ +#define IXGBE_DCA_RXCTRL_DESC_DCA_EN BIT(5) /* Rx Desc enable */ +#define IXGBE_DCA_RXCTRL_HEAD_DCA_EN BIT(6) /* Rx Desc header ena */ +#define IXGBE_DCA_RXCTRL_DATA_DCA_EN BIT(7) /* Rx Desc payload ena */ +#define IXGBE_DCA_RXCTRL_DESC_RRO_EN BIT(9) /* Rx rd Desc Relax Order */ +#define IXGBE_DCA_RXCTRL_DATA_WRO_EN BIT(13) /* Rx wr data Relax Order */ +#define IXGBE_DCA_RXCTRL_HEAD_WRO_EN BIT(15) /* Rx wr header RO */ -#define IXGBE_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */ -#define IXGBE_DCA_TXCTRL_DESC_RRO_EN (1 << 9) /* Tx rd Desc Relax Order */ -#define IXGBE_DCA_TXCTRL_DESC_WRO_EN (1 << 11) /* Tx Desc writeback RO bit */ -#define IXGBE_DCA_TXCTRL_DATA_RRO_EN (1 << 13) /* Tx rd data Relax Order */ +#define IXGBE_DCA_TXCTRL_DESC_DCA_EN BIT(5) /* DCA Tx Desc enable */ +#define IXGBE_DCA_TXCTRL_DESC_RRO_EN BIT(9) /* Tx rd Desc Relax Order */ +#define IXGBE_DCA_TXCTRL_DESC_WRO_EN BIT(11) /* Tx Desc writeback RO bit */ +#define IXGBE_DCA_TXCTRL_DATA_RRO_EN BIT(13) /* Tx rd data Relax Order */ #endif /* _IXGBEVF_DEFINES_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c index 64d5c6e9343e..508e72c5f1c2 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c +++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c @@ -166,7 +166,8 @@ static void ixgbevf_get_regs(struct net_device *netdev, memset(p, 0, regs_len); - regs->version = (1 << 24) | hw->revision_id << 16 | hw->device_id; + /* generate a number suitable for ethtool's register version */ + regs->version = (1u << 24) | (hw->revision_id << 16) | hw->device_id; /* General Registers */ regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_VFCTRL); diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h index 5ca3794aeb2e..aa28c4fb1a43 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h @@ -166,10 +166,10 @@ struct ixgbevf_ring { #define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN) -#define IXGBE_TX_FLAGS_CSUM (u32)(1) -#define IXGBE_TX_FLAGS_VLAN (u32)(1 << 1) -#define IXGBE_TX_FLAGS_TSO (u32)(1 << 2) -#define IXGBE_TX_FLAGS_IPV4 (u32)(1 << 3) +#define IXGBE_TX_FLAGS_CSUM BIT(0) +#define IXGBE_TX_FLAGS_VLAN BIT(1) +#define IXGBE_TX_FLAGS_TSO BIT(2) +#define IXGBE_TX_FLAGS_IPV4 BIT(3) #define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000 #define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0x0000e000 #define IXGBE_TX_FLAGS_VLAN_SHIFT 16 diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 007cbe094990..e4e6060e6197 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -1056,7 +1056,7 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget) if (!test_bit(__IXGBEVF_DOWN, &adapter->state) && !test_bit(__IXGBEVF_REMOVING, &adapter->state)) ixgbevf_irq_enable_queues(adapter, - 1 << q_vector->v_idx); + BIT(q_vector->v_idx)); return 0; } @@ -1158,14 +1158,14 @@ static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter) } /* add q_vector eims value to global eims_enable_mask */ - adapter->eims_enable_mask |= 1 << v_idx; + adapter->eims_enable_mask |= BIT(v_idx); ixgbevf_write_eitr(q_vector); } ixgbevf_set_ivar(adapter, -1, 1, v_idx); /* setup eims_other and add value to global eims_enable_mask */ - adapter->eims_other = 1 << v_idx; + adapter->eims_other = BIT(v_idx); adapter->eims_enable_mask |= adapter->eims_other; } @@ -1589,8 +1589,8 @@ static void ixgbevf_configure_tx_ring(struct ixgbevf_adapter *adapter, txdctl |= (8 << 16); /* WTHRESH = 8 */ /* Setting PTHRESH to 32 both improves performance */ - txdctl |= (1 << 8) | /* HTHRESH = 1 */ - 32; /* PTHRESH = 32 */ + txdctl |= (1u << 8) | /* HTHRESH = 1 */ + 32; /* PTHRESH = 32 */ clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &ring->state); @@ -1646,7 +1646,7 @@ static void ixgbevf_setup_psrtype(struct ixgbevf_adapter *adapter) IXGBE_PSRTYPE_L2HDR; if (adapter->num_rx_queues > 1) - psrtype |= 1 << 29; + psrtype |= BIT(29); IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype); } @@ -2797,7 +2797,7 @@ static void ixgbevf_check_hang_subtask(struct ixgbevf_adapter *adapter) struct ixgbevf_q_vector *qv = adapter->q_vector[i]; if (qv->rx.ring || qv->tx.ring) - eics |= 1 << i; + eics |= BIT(i); } /* Cause software interrupt to ensure rings are cleaned */ @@ -3325,7 +3325,7 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring, /* mss_l4len_id: use 1 as index for TSO */ mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT; mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT; - mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT; + mss_l4len_idx |= (1u << IXGBE_ADVTXD_IDX_SHIFT); /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ vlan_macip_lens = skb_network_header_len(skb); @@ -3422,7 +3422,7 @@ static void ixgbevf_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc, /* use index 1 context for TSO/FSO/FCOE */ if (tx_flags & IXGBE_TX_FLAGS_TSO) - olinfo_status |= cpu_to_le32(1 << IXGBE_ADVTXD_IDX_SHIFT); + olinfo_status |= cpu_to_le32(1u << IXGBE_ADVTXD_IDX_SHIFT); /* Check Context must be set if Tx switch is enabled, which it * always is for case where virtual functions are running From b83e30104bd9635765c562bd46b2e436350bd652 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Thu, 14 Apr 2016 17:19:31 -0400 Subject: [PATCH 0940/1649] ixgbe/ixgbevf: Add support for GSO partial This patch adds support for partial GSO segmentation in the case of tunnels. Specifically with this change the driver an perform segmentation as long as the frame either has IPv6 inner headers, or we are allowed to mangle the IP IDs on the inner header. This is needed because we will not be modifying any fields from the start of the start of the outer transport header to the start of the inner transport header as we are treating them like they are just a block of IP options. Signed-off-by: Alexander Duyck Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 131 ++++++++++++------ .../net/ethernet/intel/ixgbevf/ixgbevf_main.c | 129 ++++++++++++----- 2 files changed, 179 insertions(+), 81 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 60592cfa5ca6..0ef4a15bb23e 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -7256,9 +7256,18 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring, struct ixgbe_tx_buffer *first, u8 *hdr_len) { + u32 vlan_macip_lens, type_tucmd, mss_l4len_idx; struct sk_buff *skb = first->skb; - u32 vlan_macip_lens, type_tucmd; - u32 mss_l4len_idx, l4len; + union { + struct iphdr *v4; + struct ipv6hdr *v6; + unsigned char *hdr; + } ip; + union { + struct tcphdr *tcp; + unsigned char *hdr; + } l4; + u32 paylen, l4_offset; int err; if (skb->ip_summed != CHECKSUM_PARTIAL) @@ -7271,46 +7280,52 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring, if (err < 0) return err; + ip.hdr = skb_network_header(skb); + l4.hdr = skb_checksum_start(skb); + /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP; - if (first->protocol == htons(ETH_P_IP)) { - struct iphdr *iph = ip_hdr(skb); - iph->tot_len = 0; - iph->check = 0; - tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, - iph->daddr, 0, - IPPROTO_TCP, - 0); + /* initialize outer IP header fields */ + if (ip.v4->version == 4) { + /* IP header will have to cancel out any data that + * is not a part of the outer IP header + */ + ip.v4->check = csum_fold(csum_add(lco_csum(skb), + csum_unfold(l4.tcp->check))); type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; + + ip.v4->tot_len = 0; first->tx_flags |= IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_CSUM | IXGBE_TX_FLAGS_IPV4; - } else if (skb_is_gso_v6(skb)) { - ipv6_hdr(skb)->payload_len = 0; - tcp_hdr(skb)->check = - ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, - &ipv6_hdr(skb)->daddr, - 0, IPPROTO_TCP, 0); + } else { + ip.v6->payload_len = 0; first->tx_flags |= IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_CSUM; } - /* compute header lengths */ - l4len = tcp_hdrlen(skb); - *hdr_len = skb_transport_offset(skb) + l4len; + /* determine offset of inner transport header */ + l4_offset = l4.hdr - skb->data; + + /* compute length of segmentation header */ + *hdr_len = (l4.tcp->doff * 4) + l4_offset; + + /* remove payload length from inner checksum */ + paylen = skb->len - l4_offset; + csum_replace_by_diff(&l4.tcp->check, htonl(paylen)); /* update gso size and bytecount with header size */ first->gso_segs = skb_shinfo(skb)->gso_segs; first->bytecount += (first->gso_segs - 1) * *hdr_len; /* mss_l4len_id: use 0 as index for TSO */ - mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT; + mss_l4len_idx = (*hdr_len - l4_offset) << IXGBE_ADVTXD_L4LEN_SHIFT; mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT; /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ - vlan_macip_lens = skb_network_header_len(skb); - vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; + vlan_macip_lens = l4.hdr - ip.hdr; + vlan_macip_lens |= (ip.hdr - skb->data) << IXGBE_ADVTXD_MACLEN_SHIFT; vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, type_tucmd, @@ -8898,17 +8913,36 @@ static void ixgbe_fwd_del(struct net_device *pdev, void *priv) kfree(fwd_adapter); } -#define IXGBE_MAX_TUNNEL_HDR_LEN 80 +#define IXGBE_MAX_MAC_HDR_LEN 127 +#define IXGBE_MAX_NETWORK_HDR_LEN 511 + static netdev_features_t ixgbe_features_check(struct sk_buff *skb, struct net_device *dev, netdev_features_t features) { - if (!skb->encapsulation) - return features; + unsigned int network_hdr_len, mac_hdr_len; - if (unlikely(skb_inner_mac_header(skb) - skb_transport_header(skb) > - IXGBE_MAX_TUNNEL_HDR_LEN)) - return features & ~NETIF_F_CSUM_MASK; + /* Make certain the headers can be described by a context descriptor */ + mac_hdr_len = skb_network_header(skb) - skb->data; + if (unlikely(mac_hdr_len > IXGBE_MAX_MAC_HDR_LEN)) + return features & ~(NETIF_F_HW_CSUM | + NETIF_F_SCTP_CRC | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_TSO | + NETIF_F_TSO6); + + network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb); + if (unlikely(network_hdr_len > IXGBE_MAX_NETWORK_HDR_LEN)) + return features & ~(NETIF_F_HW_CSUM | + NETIF_F_SCTP_CRC | + NETIF_F_TSO | + NETIF_F_TSO6); + + /* We can only support IPV4 TSO in tunnels if we can mangle the + * inner IP ID field, so strip TSO if MANGLEID is not supported. + */ + if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) + features &= ~NETIF_F_TSO; return features; } @@ -9275,31 +9309,44 @@ skip_sriov: NETIF_F_TSO6 | NETIF_F_RXHASH | NETIF_F_RXCSUM | - NETIF_F_HW_CSUM | - NETIF_F_HW_VLAN_CTAG_TX | - NETIF_F_HW_VLAN_CTAG_RX | - NETIF_F_HW_VLAN_CTAG_FILTER; + NETIF_F_HW_CSUM; + +#define IXGBE_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \ + NETIF_F_GSO_GRE_CSUM | \ + NETIF_F_GSO_IPIP | \ + NETIF_F_GSO_SIT | \ + NETIF_F_GSO_UDP_TUNNEL | \ + NETIF_F_GSO_UDP_TUNNEL_CSUM) + + netdev->gso_partial_features = IXGBE_GSO_PARTIAL_FEATURES; + netdev->features |= NETIF_F_GSO_PARTIAL | + IXGBE_GSO_PARTIAL_FEATURES; if (hw->mac.type >= ixgbe_mac_82599EB) netdev->features |= NETIF_F_SCTP_CRC; /* copy netdev features into list of user selectable features */ - netdev->hw_features |= netdev->features; - netdev->hw_features |= NETIF_F_RXALL | + netdev->hw_features |= netdev->features | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_RXALL | NETIF_F_HW_L2FW_DOFFLOAD; if (hw->mac.type >= ixgbe_mac_82599EB) netdev->hw_features |= NETIF_F_NTUPLE | NETIF_F_HW_TC; - netdev->vlan_features |= NETIF_F_SG | - NETIF_F_TSO | - NETIF_F_TSO6 | - NETIF_F_HW_CSUM | - NETIF_F_SCTP_CRC; + if (pci_using_dac) + netdev->features |= NETIF_F_HIGHDMA; + /* set this bit last since it cannot be part of vlan_features */ + netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_TX; + + netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID; + netdev->hw_enc_features |= netdev->vlan_features; netdev->mpls_features |= NETIF_F_HW_CSUM; - netdev->hw_enc_features |= NETIF_F_HW_CSUM; netdev->priv_flags |= IFF_UNICAST_FLT; netdev->priv_flags |= IFF_SUPP_NOFCS; @@ -9330,10 +9377,6 @@ skip_sriov: NETIF_F_FCOE_MTU; } #endif /* IXGBE_FCOE */ - if (pci_using_dac) { - netdev->features |= NETIF_F_HIGHDMA; - netdev->vlan_features |= NETIF_F_HIGHDMA; - } if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) netdev->hw_features |= NETIF_F_LRO; diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index e4e6060e6197..eb91922bcb19 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -3272,9 +3272,18 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring, struct ixgbevf_tx_buffer *first, u8 *hdr_len) { + u32 vlan_macip_lens, type_tucmd, mss_l4len_idx; struct sk_buff *skb = first->skb; - u32 vlan_macip_lens, type_tucmd; - u32 mss_l4len_idx, l4len; + union { + struct iphdr *v4; + struct ipv6hdr *v6; + unsigned char *hdr; + } ip; + union { + struct tcphdr *tcp; + unsigned char *hdr; + } l4; + u32 paylen, l4_offset; int err; if (skb->ip_summed != CHECKSUM_PARTIAL) @@ -3287,49 +3296,53 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring, if (err < 0) return err; + ip.hdr = skb_network_header(skb); + l4.hdr = skb_checksum_start(skb); + /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP; - if (first->protocol == htons(ETH_P_IP)) { - struct iphdr *iph = ip_hdr(skb); - - iph->tot_len = 0; - iph->check = 0; - tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, - iph->daddr, 0, - IPPROTO_TCP, - 0); + /* initialize outer IP header fields */ + if (ip.v4->version == 4) { + /* IP header will have to cancel out any data that + * is not a part of the outer IP header + */ + ip.v4->check = csum_fold(csum_add(lco_csum(skb), + csum_unfold(l4.tcp->check))); type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; + + ip.v4->tot_len = 0; first->tx_flags |= IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_CSUM | IXGBE_TX_FLAGS_IPV4; - } else if (skb_is_gso_v6(skb)) { - ipv6_hdr(skb)->payload_len = 0; - tcp_hdr(skb)->check = - ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, - &ipv6_hdr(skb)->daddr, - 0, IPPROTO_TCP, 0); + } else { + ip.v6->payload_len = 0; first->tx_flags |= IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_CSUM; } - /* compute header lengths */ - l4len = tcp_hdrlen(skb); - *hdr_len += l4len; - *hdr_len = skb_transport_offset(skb) + l4len; + /* determine offset of inner transport header */ + l4_offset = l4.hdr - skb->data; - /* update GSO size and bytecount with header size */ + /* compute length of segmentation header */ + *hdr_len = (l4.tcp->doff * 4) + l4_offset; + + /* remove payload length from inner checksum */ + paylen = skb->len - l4_offset; + csum_replace_by_diff(&l4.tcp->check, htonl(paylen)); + + /* update gso size and bytecount with header size */ first->gso_segs = skb_shinfo(skb)->gso_segs; first->bytecount += (first->gso_segs - 1) * *hdr_len; /* mss_l4len_id: use 1 as index for TSO */ - mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT; + mss_l4len_idx = (*hdr_len - l4_offset) << IXGBE_ADVTXD_L4LEN_SHIFT; mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT; mss_l4len_idx |= (1u << IXGBE_ADVTXD_IDX_SHIFT); /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ - vlan_macip_lens = skb_network_header_len(skb); - vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; + vlan_macip_lens = l4.hdr - ip.hdr; + vlan_macip_lens |= (ip.hdr - skb->data) << IXGBE_ADVTXD_MACLEN_SHIFT; vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, @@ -3870,6 +3883,40 @@ static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev, return stats; } +#define IXGBEVF_MAX_MAC_HDR_LEN 127 +#define IXGBEVF_MAX_NETWORK_HDR_LEN 511 + +static netdev_features_t +ixgbevf_features_check(struct sk_buff *skb, struct net_device *dev, + netdev_features_t features) +{ + unsigned int network_hdr_len, mac_hdr_len; + + /* Make certain the headers can be described by a context descriptor */ + mac_hdr_len = skb_network_header(skb) - skb->data; + if (unlikely(mac_hdr_len > IXGBEVF_MAX_MAC_HDR_LEN)) + return features & ~(NETIF_F_HW_CSUM | + NETIF_F_SCTP_CRC | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_TSO | + NETIF_F_TSO6); + + network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb); + if (unlikely(network_hdr_len > IXGBEVF_MAX_NETWORK_HDR_LEN)) + return features & ~(NETIF_F_HW_CSUM | + NETIF_F_SCTP_CRC | + NETIF_F_TSO | + NETIF_F_TSO6); + + /* We can only support IPV4 TSO in tunnels if we can mangle the + * inner IP ID field, so strip TSO if MANGLEID is not supported. + */ + if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) + features &= ~NETIF_F_TSO; + + return features; +} + static const struct net_device_ops ixgbevf_netdev_ops = { .ndo_open = ixgbevf_open, .ndo_stop = ixgbevf_close, @@ -3888,7 +3935,7 @@ static const struct net_device_ops ixgbevf_netdev_ops = { #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = ixgbevf_netpoll, #endif - .ndo_features_check = passthru_features_check, + .ndo_features_check = ixgbevf_features_check, }; static void ixgbevf_assign_netdev_ops(struct net_device *dev) @@ -3999,23 +4046,31 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) NETIF_F_HW_CSUM | NETIF_F_SCTP_CRC; - netdev->features = netdev->hw_features | - NETIF_F_HW_VLAN_CTAG_TX | - NETIF_F_HW_VLAN_CTAG_RX | - NETIF_F_HW_VLAN_CTAG_FILTER; +#define IXGBEVF_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \ + NETIF_F_GSO_GRE_CSUM | \ + NETIF_F_GSO_IPIP | \ + NETIF_F_GSO_SIT | \ + NETIF_F_GSO_UDP_TUNNEL | \ + NETIF_F_GSO_UDP_TUNNEL_CSUM) - netdev->vlan_features |= NETIF_F_SG | - NETIF_F_TSO | - NETIF_F_TSO6 | - NETIF_F_HW_CSUM | - NETIF_F_SCTP_CRC; + netdev->gso_partial_features = IXGBEVF_GSO_PARTIAL_FEATURES; + netdev->hw_features |= NETIF_F_GSO_PARTIAL | + IXGBEVF_GSO_PARTIAL_FEATURES; - netdev->mpls_features |= NETIF_F_HW_CSUM; - netdev->hw_enc_features |= NETIF_F_HW_CSUM; + netdev->features = netdev->hw_features; if (pci_using_dac) netdev->features |= NETIF_F_HIGHDMA; + netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID; + netdev->mpls_features |= NETIF_F_HW_CSUM; + netdev->hw_enc_features |= netdev->vlan_features; + + /* set this bit last since it cannot be part of vlan_features */ + netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_TX; + netdev->priv_flags |= IFF_UNICAST_FLT; if (IXGBE_REMOVED(hw->hw_addr)) { From 7921f4dc4c36e736d7a5b45dfa7b6a755a4fc012 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Thu, 14 Apr 2016 20:37:15 -0400 Subject: [PATCH 0941/1649] ixgbevf: Move API negotiation function into mac_ops This patch moves API negotiation into mac_ops. The general idea here is that with HyperV on the way we need to make certain that anything that will have different versions between HyperV and a standard VF needs to be abstracted enough so that we can have a separate function between the two so we can avoid changes in one breaking something in the other. Signed-off-by: Alexander Duyck Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | 2 +- drivers/net/ethernet/intel/ixgbevf/vf.c | 5 +++-- drivers/net/ethernet/intel/ixgbevf/vf.h | 2 +- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index eb91922bcb19..319e25f29883 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -2056,7 +2056,7 @@ static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter) spin_lock_bh(&adapter->mbx_lock); while (api[idx] != ixgbe_mbox_api_unknown) { - err = ixgbevf_negotiate_api_version(hw, api[idx]); + err = hw->mac.ops.negotiate_api_version(hw, api[idx]); if (!err) break; idx++; diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c index 4d613a4f2a7f..987ad69d4918 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.c +++ b/drivers/net/ethernet/intel/ixgbevf/vf.c @@ -670,11 +670,11 @@ void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size) } /** - * ixgbevf_negotiate_api_version - Negotiate supported API version + * ixgbevf_negotiate_api_version_vf - Negotiate supported API version * @hw: pointer to the HW structure * @api: integer containing requested API version **/ -int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api) +static int ixgbevf_negotiate_api_version_vf(struct ixgbe_hw *hw, int api) { int err; u32 msg[3]; @@ -769,6 +769,7 @@ static const struct ixgbe_mac_operations ixgbevf_mac_ops = { .stop_adapter = ixgbevf_stop_hw_vf, .setup_link = ixgbevf_setup_mac_link_vf, .check_link = ixgbevf_check_mac_link_vf, + .negotiate_api_version = ixgbevf_negotiate_api_version_vf, .set_rar = ixgbevf_set_rar_vf, .update_mc_addr_list = ixgbevf_update_mc_addr_list_vf, .update_xcast_mode = ixgbevf_update_xcast_mode, diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h index ef9f7736b4dc..8e623f9327ae 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.h +++ b/drivers/net/ethernet/intel/ixgbevf/vf.h @@ -51,6 +51,7 @@ struct ixgbe_mac_operations { s32 (*get_mac_addr)(struct ixgbe_hw *, u8 *); s32 (*stop_adapter)(struct ixgbe_hw *); s32 (*get_bus_info)(struct ixgbe_hw *); + s32 (*negotiate_api_version)(struct ixgbe_hw *hw, int api); /* Link */ s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool, bool); @@ -208,7 +209,6 @@ static inline u32 ixgbe_read_reg_array(struct ixgbe_hw *hw, u32 reg, #define IXGBE_READ_REG_ARRAY(h, r, o) ixgbe_read_reg_array(h, r, o) void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size); -int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api); int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs, unsigned int *default_tc); int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, int num_rx_queues); From d4f90d9dca26efef7a1112a8f4258c90b73bb37f Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Sat, 16 Apr 2016 22:35:08 +0200 Subject: [PATCH 0942/1649] ixgbe: use msleep for long delays The newly added x550em_a support causes a link failure on ARM because of an overly long time passed into udelay(): ERROR: "__bad_udelay" [drivers/net/ethernet/intel/ixgbe/ixgbe.ko] undefined! There are multiple variants of the ixgbe_acquire_swfw_sync_*() function, and the other ones all use msleep(), so we can safely assume that all callers are allowed to sleep, which makes msleep() a better replacement than mdelay(). Signed-off-by: Arnd Bergmann Fixes: 49425dfc7451 ("ixgbe: Add support for x550em_a 10G MAC type") Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c index c8a4f5ef06c0..19b75cd98682 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c @@ -2765,7 +2765,7 @@ static s32 ixgbe_acquire_swfw_sync_x550em_a(struct ixgbe_hw *hw, u32 mask) ixgbe_release_swfw_sync_X540(hw, hmask); if (status != IXGBE_ERR_TOKEN_RETRY) return status; - udelay(FW_PHY_TOKEN_DELAY * 1000); + msleep(FW_PHY_TOKEN_DELAY); } return status; From a3efd81205b128a802025abb689925177a4607ed Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Mon, 18 Apr 2016 16:16:59 +0200 Subject: [PATCH 0943/1649] netfilter: conntrack: move generation seqcnt out of netns_ct We only allow rehash in init namespace, so we only use init_ns.generation. And even if we would allow it, it makes no sense as the conntrack locks are global; any ongoing rehash prevents insert/ delete. So make this private to nf_conntrack_core instead. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- include/net/netns/conntrack.h | 1 - net/netfilter/nf_conntrack_core.c | 20 +++++++++++--------- 2 files changed, 11 insertions(+), 10 deletions(-) diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h index 723b61c82b3f..b052785b1590 100644 --- a/include/net/netns/conntrack.h +++ b/include/net/netns/conntrack.h @@ -94,7 +94,6 @@ struct netns_ct { int sysctl_checksum; unsigned int htable_size; - seqcount_t generation; struct kmem_cache *nf_conntrack_cachep; struct hlist_nulls_head *hash; struct hlist_head *expect_hash; diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 2fd607408998..a53c009fe510 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -69,6 +69,7 @@ __cacheline_aligned_in_smp DEFINE_SPINLOCK(nf_conntrack_expect_lock); EXPORT_SYMBOL_GPL(nf_conntrack_expect_lock); static __read_mostly spinlock_t nf_conntrack_locks_all_lock; +static __read_mostly seqcount_t nf_conntrack_generation; static __read_mostly bool nf_conntrack_locks_all; void nf_conntrack_lock(spinlock_t *lock) __acquires(lock) @@ -107,7 +108,7 @@ static bool nf_conntrack_double_lock(struct net *net, unsigned int h1, spin_lock_nested(&nf_conntrack_locks[h1], SINGLE_DEPTH_NESTING); } - if (read_seqcount_retry(&net->ct.generation, sequence)) { + if (read_seqcount_retry(&nf_conntrack_generation, sequence)) { nf_conntrack_double_unlock(h1, h2); return true; } @@ -393,7 +394,7 @@ static void nf_ct_delete_from_lists(struct nf_conn *ct) local_bh_disable(); do { - sequence = read_seqcount_begin(&net->ct.generation); + sequence = read_seqcount_begin(&nf_conntrack_generation); hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); reply_hash = hash_conntrack(net, @@ -560,7 +561,7 @@ nf_conntrack_hash_check_insert(struct nf_conn *ct) local_bh_disable(); do { - sequence = read_seqcount_begin(&net->ct.generation); + sequence = read_seqcount_begin(&nf_conntrack_generation); hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); reply_hash = hash_conntrack(net, @@ -628,7 +629,7 @@ __nf_conntrack_confirm(struct sk_buff *skb) local_bh_disable(); do { - sequence = read_seqcount_begin(&net->ct.generation); + sequence = read_seqcount_begin(&nf_conntrack_generation); /* reuse the hash saved before */ hash = *(unsigned long *)&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev; hash = hash_bucket(hash, net); @@ -771,12 +772,12 @@ static noinline int early_drop(struct net *net, unsigned int _hash) local_bh_disable(); restart: - sequence = read_seqcount_begin(&net->ct.generation); + sequence = read_seqcount_begin(&nf_conntrack_generation); hash = hash_bucket(_hash, net); for (; i < net->ct.htable_size; i++) { lockp = &nf_conntrack_locks[hash % CONNTRACK_LOCKS]; nf_conntrack_lock(lockp); - if (read_seqcount_retry(&net->ct.generation, sequence)) { + if (read_seqcount_retry(&nf_conntrack_generation, sequence)) { spin_unlock(lockp); goto restart; } @@ -1607,7 +1608,7 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp) local_bh_disable(); nf_conntrack_all_lock(); - write_seqcount_begin(&init_net.ct.generation); + write_seqcount_begin(&nf_conntrack_generation); /* Lookups in the old hash might happen in parallel, which means we * might get false negatives during connection lookup. New connections @@ -1631,7 +1632,7 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp) init_net.ct.htable_size = nf_conntrack_htable_size = hashsize; init_net.ct.hash = hash; - write_seqcount_end(&init_net.ct.generation); + write_seqcount_end(&nf_conntrack_generation); nf_conntrack_all_unlock(); local_bh_enable(); @@ -1657,6 +1658,8 @@ int nf_conntrack_init_start(void) int max_factor = 8; int i, ret, cpu; + seqcount_init(&nf_conntrack_generation); + for (i = 0; i < CONNTRACK_LOCKS; i++) spin_lock_init(&nf_conntrack_locks[i]); @@ -1783,7 +1786,6 @@ int nf_conntrack_init_net(struct net *net) int cpu; atomic_set(&net->ct.count, 0); - seqcount_init(&net->ct.generation); net->ct.pcpu_lists = alloc_percpu(struct ct_pcpu); if (!net->ct.pcpu_lists) From 7001c6d109ea41a88e7156f467cf9fb5f37f5036 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Mon, 18 Apr 2016 16:17:00 +0200 Subject: [PATCH 0944/1649] netfilter: conntrack: use get_random_once for nat and expectations Use a private seed and init it using get_random_once. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_conntrack_expect.c | 7 +++---- net/netfilter/nf_nat_core.c | 6 ++++-- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c index 278927ab0948..c2f7c4f475b1 100644 --- a/net/netfilter/nf_conntrack_expect.c +++ b/net/netfilter/nf_conntrack_expect.c @@ -38,6 +38,7 @@ EXPORT_SYMBOL_GPL(nf_ct_expect_hsize); unsigned int nf_ct_expect_max __read_mostly; static struct kmem_cache *nf_ct_expect_cachep __read_mostly; +static unsigned int nf_ct_expect_hashrnd __read_mostly; /* nf_conntrack_expect helper functions */ void nf_ct_unlink_expect_report(struct nf_conntrack_expect *exp, @@ -76,13 +77,11 @@ static unsigned int nf_ct_expect_dst_hash(const struct nf_conntrack_tuple *tuple { unsigned int hash; - if (unlikely(!nf_conntrack_hash_rnd)) { - init_nf_conntrack_hash_rnd(); - } + get_random_once(&nf_ct_expect_hashrnd, sizeof(nf_ct_expect_hashrnd)); hash = jhash2(tuple->dst.u3.all, ARRAY_SIZE(tuple->dst.u3.all), (((tuple->dst.protonum ^ tuple->src.l3num) << 16) | - (__force __u16)tuple->dst.u.all) ^ nf_conntrack_hash_rnd); + (__force __u16)tuple->dst.u.all) ^ nf_ct_expect_hashrnd); return reciprocal_scale(hash, nf_ct_expect_hsize); } diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c index 06a9f45771ab..3d522715a167 100644 --- a/net/netfilter/nf_nat_core.c +++ b/net/netfilter/nf_nat_core.c @@ -37,7 +37,7 @@ static const struct nf_nat_l3proto __rcu *nf_nat_l3protos[NFPROTO_NUMPROTO] __read_mostly; static const struct nf_nat_l4proto __rcu **nf_nat_l4protos[NFPROTO_NUMPROTO] __read_mostly; - +static unsigned int nf_nat_hash_rnd __read_mostly; inline const struct nf_nat_l3proto * __nf_nat_l3proto_find(u8 family) @@ -122,9 +122,11 @@ hash_by_src(const struct net *net, const struct nf_conntrack_tuple *tuple) { unsigned int hash; + get_random_once(&nf_nat_hash_rnd, sizeof(nf_nat_hash_rnd)); + /* Original src, to ensure we map it consistently if poss. */ hash = jhash2((u32 *)&tuple->src, sizeof(tuple->src) / sizeof(u32), - tuple->dst.protonum ^ nf_conntrack_hash_rnd); + tuple->dst.protonum ^ nf_nat_hash_rnd); return reciprocal_scale(hash, net->ct.nat_htable_size); } From 141658fb02c248e6243d619cb7d48a76158a66ac Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Mon, 18 Apr 2016 16:17:01 +0200 Subject: [PATCH 0945/1649] netfilter: conntrack: use get_random_once for conntrack hash seed As earlier commit removed accessed to the hash from other files we can also make it static. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- include/net/netfilter/nf_conntrack.h | 2 -- net/netfilter/nf_conntrack_core.c | 26 +++----------------------- 2 files changed, 3 insertions(+), 25 deletions(-) diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h index fde4068eec0b..dd78bea227c8 100644 --- a/include/net/netfilter/nf_conntrack.h +++ b/include/net/netfilter/nf_conntrack.h @@ -289,8 +289,6 @@ struct kernel_param; int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp); extern unsigned int nf_conntrack_htable_size; extern unsigned int nf_conntrack_max; -extern unsigned int nf_conntrack_hash_rnd; -void init_nf_conntrack_hash_rnd(void); struct nf_conn *nf_ct_tmpl_alloc(struct net *net, const struct nf_conntrack_zone *zone, diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index a53c009fe510..1fd0ff1030c2 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -142,13 +142,14 @@ EXPORT_SYMBOL_GPL(nf_conntrack_max); DEFINE_PER_CPU(struct nf_conn, nf_conntrack_untracked); EXPORT_PER_CPU_SYMBOL(nf_conntrack_untracked); -unsigned int nf_conntrack_hash_rnd __read_mostly; -EXPORT_SYMBOL_GPL(nf_conntrack_hash_rnd); +static unsigned int nf_conntrack_hash_rnd __read_mostly; static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple) { unsigned int n; + get_random_once(&nf_conntrack_hash_rnd, sizeof(nf_conntrack_hash_rnd)); + /* The direction must be ignored, so we hash everything up to the * destination ports (which is a multiple of 4) and treat the last * three bytes manually. @@ -815,21 +816,6 @@ restart: return dropped; } -void init_nf_conntrack_hash_rnd(void) -{ - unsigned int rand; - - /* - * Why not initialize nf_conntrack_rnd in a "init()" function ? - * Because there isn't enough entropy when system initializing, - * and we initialize it as late as possible. - */ - do { - get_random_bytes(&rand, sizeof(rand)); - } while (!rand); - cmpxchg(&nf_conntrack_hash_rnd, 0, rand); -} - static struct nf_conn * __nf_conntrack_alloc(struct net *net, const struct nf_conntrack_zone *zone, @@ -839,12 +825,6 @@ __nf_conntrack_alloc(struct net *net, { struct nf_conn *ct; - if (unlikely(!nf_conntrack_hash_rnd)) { - init_nf_conntrack_hash_rnd(); - /* recompute the hash as nf_conntrack_hash_rnd is initialized */ - hash = hash_conntrack_raw(orig); - } - /* We don't want any race condition at early drop stage */ atomic_inc(&net->ct.count); From 0e9091d6862f60499fa3faec7c2060c1929d0763 Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Tue, 12 Apr 2016 23:50:34 +0200 Subject: [PATCH 0946/1649] netfilter: nf_tables: introduce nft_setelem_parse_flags() helper This function parses the set element flags, thus, we can reuse the same handling when deleting elements. Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_tables_api.c | 34 ++++++++++++++++++++++------------ 1 file changed, 22 insertions(+), 12 deletions(-) diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 7a85a9dd37ad..1b3210b2b82d 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -3375,6 +3375,22 @@ void nft_set_elem_destroy(const struct nft_set *set, void *elem) } EXPORT_SYMBOL_GPL(nft_set_elem_destroy); +static int nft_setelem_parse_flags(const struct nft_set *set, + const struct nlattr *attr, u32 *flags) +{ + if (attr == NULL) + return 0; + + *flags = ntohl(nla_get_be32(attr)); + if (*flags & ~NFT_SET_ELEM_INTERVAL_END) + return -EINVAL; + if (!(set->flags & NFT_SET_INTERVAL) && + *flags & NFT_SET_ELEM_INTERVAL_END) + return -EINVAL; + + return 0; +} + static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set, const struct nlattr *attr) { @@ -3388,8 +3404,8 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set, struct nft_data data; enum nft_registers dreg; struct nft_trans *trans; + u32 flags = 0; u64 timeout; - u32 flags; u8 ulen; int err; @@ -3403,17 +3419,11 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set, nft_set_ext_prepare(&tmpl); - flags = 0; - if (nla[NFTA_SET_ELEM_FLAGS] != NULL) { - flags = ntohl(nla_get_be32(nla[NFTA_SET_ELEM_FLAGS])); - if (flags & ~NFT_SET_ELEM_INTERVAL_END) - return -EINVAL; - if (!(set->flags & NFT_SET_INTERVAL) && - flags & NFT_SET_ELEM_INTERVAL_END) - return -EINVAL; - if (flags != 0) - nft_set_ext_add(&tmpl, NFT_SET_EXT_FLAGS); - } + err = nft_setelem_parse_flags(set, nla[NFTA_SET_ELEM_FLAGS], &flags); + if (err < 0) + return err; + if (flags != 0) + nft_set_ext_add(&tmpl, NFT_SET_EXT_FLAGS); if (set->flags & NFT_SET_MAP) { if (nla[NFTA_SET_ELEM_DATA] == NULL && From 3971ca14350062fc30b2dd3ca182234f17d268c2 Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Tue, 12 Apr 2016 23:50:35 +0200 Subject: [PATCH 0947/1649] netfilter: nf_tables: parse element flags from nft_del_setelem() Parse flags and pass them to the set via ->deactivate() to check if we remove the right element from the intervals. Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_tables_api.c | 42 +++++++++++++++++++++++++++++------ 1 file changed, 35 insertions(+), 7 deletions(-) diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 1b3210b2b82d..73c8fad0b8ef 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -3592,9 +3592,13 @@ static int nft_del_setelem(struct nft_ctx *ctx, struct nft_set *set, const struct nlattr *attr) { struct nlattr *nla[NFTA_SET_ELEM_MAX + 1]; + struct nft_set_ext_tmpl tmpl; struct nft_data_desc desc; struct nft_set_elem elem; + struct nft_set_ext *ext; struct nft_trans *trans; + u32 flags = 0; + void *priv; int err; err = nla_parse_nested(nla, NFTA_SET_ELEM_MAX, attr, @@ -3606,6 +3610,14 @@ static int nft_del_setelem(struct nft_ctx *ctx, struct nft_set *set, if (nla[NFTA_SET_ELEM_KEY] == NULL) goto err1; + nft_set_ext_prepare(&tmpl); + + err = nft_setelem_parse_flags(set, nla[NFTA_SET_ELEM_FLAGS], &flags); + if (err < 0) + return err; + if (flags != 0) + nft_set_ext_add(&tmpl, NFT_SET_EXT_FLAGS); + err = nft_data_init(ctx, &elem.key.val, sizeof(elem.key), &desc, nla[NFTA_SET_ELEM_KEY]); if (err < 0) @@ -3615,24 +3627,40 @@ static int nft_del_setelem(struct nft_ctx *ctx, struct nft_set *set, if (desc.type != NFT_DATA_VALUE || desc.len != set->klen) goto err2; + nft_set_ext_add_length(&tmpl, NFT_SET_EXT_KEY, desc.len); + + err = -ENOMEM; + elem.priv = nft_set_elem_init(set, &tmpl, elem.key.val.data, NULL, 0, + GFP_KERNEL); + if (elem.priv == NULL) + goto err2; + + ext = nft_set_elem_ext(set, elem.priv); + if (flags) + *nft_set_ext_flags(ext) = flags; + trans = nft_trans_elem_alloc(ctx, NFT_MSG_DELSETELEM, set); if (trans == NULL) { err = -ENOMEM; - goto err2; - } - - elem.priv = set->ops->deactivate(set, &elem); - if (elem.priv == NULL) { - err = -ENOENT; goto err3; } + priv = set->ops->deactivate(set, &elem); + if (priv == NULL) { + err = -ENOENT; + goto err4; + } + kfree(elem.priv); + elem.priv = priv; + nft_trans_elem(trans) = elem; list_add_tail(&trans->list, &ctx->net->nft.commit_list); return 0; -err3: +err4: kfree(trans); +err3: + kfree(elem.priv); err2: nft_data_uninit(&elem.key.val, desc.type); err1: From ef1d20e0f8a80ba2942a59331d472322794d6748 Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Tue, 12 Apr 2016 23:50:36 +0200 Subject: [PATCH 0948/1649] netfilter: nft_rbtree: introduce nft_rbtree_interval_end() helper Add this new nft_rbtree_interval_end() helper function to check in the end interval is set. Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nft_rbtree.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/net/netfilter/nft_rbtree.c b/net/netfilter/nft_rbtree.c index 1c30f41cff5b..29f2ab88d787 100644 --- a/net/netfilter/nft_rbtree.c +++ b/net/netfilter/nft_rbtree.c @@ -29,6 +29,11 @@ struct nft_rbtree_elem { struct nft_set_ext ext; }; +static bool nft_rbtree_interval_end(const struct nft_rbtree_elem *rbe) +{ + return nft_set_ext_exists(&rbe->ext, NFT_SET_EXT_FLAGS) && + (*nft_set_ext_flags(&rbe->ext) & NFT_SET_ELEM_INTERVAL_END); +} static bool nft_rbtree_lookup(const struct nft_set *set, const u32 *key, const struct nft_set_ext **ext) @@ -56,9 +61,7 @@ found: parent = parent->rb_left; continue; } - if (nft_set_ext_exists(&rbe->ext, NFT_SET_EXT_FLAGS) && - *nft_set_ext_flags(&rbe->ext) & - NFT_SET_ELEM_INTERVAL_END) + if (nft_rbtree_interval_end(rbe)) goto out; spin_unlock_bh(&nft_rbtree_lock); From e701001e7cbe88cdc937037f6f398669eef7e7ff Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Tue, 12 Apr 2016 23:50:37 +0200 Subject: [PATCH 0949/1649] netfilter: nft_rbtree: allow adjacent intervals with dynamic updates This patch fixes dynamic element updates for adjacent intervals in the rb-tree representation. Since elements are sorted in the rb-tree, in case of adjacent nodes with the same key, the assumption is that an interval end node must be placed before an interval opening. In tree lookup operations, the idea is to search for the closer element that is smaller than the one we're searching for. Given that we'll have two possible matchings, we have to take the opening interval in case of adjacent nodes. Range merges are not trivial with the current representation, specifically we have to check if node extensions are equal and make sure we keep the existing internal states around. Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nft_rbtree.c | 40 +++++++++++++++++++++++++++++++++----- 1 file changed, 35 insertions(+), 5 deletions(-) diff --git a/net/netfilter/nft_rbtree.c b/net/netfilter/nft_rbtree.c index 29f2ab88d787..f762094af7c1 100644 --- a/net/netfilter/nft_rbtree.c +++ b/net/netfilter/nft_rbtree.c @@ -35,6 +35,12 @@ static bool nft_rbtree_interval_end(const struct nft_rbtree_elem *rbe) (*nft_set_ext_flags(&rbe->ext) & NFT_SET_ELEM_INTERVAL_END); } +static bool nft_rbtree_equal(const struct nft_set *set, const void *this, + const struct nft_rbtree_elem *interval) +{ + return memcmp(this, nft_set_ext_key(&interval->ext), set->klen) == 0; +} + static bool nft_rbtree_lookup(const struct nft_set *set, const u32 *key, const struct nft_set_ext **ext) { @@ -42,6 +48,7 @@ static bool nft_rbtree_lookup(const struct nft_set *set, const u32 *key, const struct nft_rbtree_elem *rbe, *interval = NULL; const struct rb_node *parent; u8 genmask = nft_genmask_cur(read_pnet(&set->pnet)); + const void *this; int d; spin_lock_bh(&nft_rbtree_lock); @@ -49,9 +56,16 @@ static bool nft_rbtree_lookup(const struct nft_set *set, const u32 *key, while (parent != NULL) { rbe = rb_entry(parent, struct nft_rbtree_elem, node); - d = memcmp(nft_set_ext_key(&rbe->ext), key, set->klen); + this = nft_set_ext_key(&rbe->ext); + d = memcmp(this, key, set->klen); if (d < 0) { parent = parent->rb_left; + /* In case of adjacent ranges, we always see the high + * part of the range in first place, before the low one. + * So don't update interval if the keys are equal. + */ + if (interval && nft_rbtree_equal(set, this, interval)) + continue; interval = rbe; } else if (d > 0) parent = parent->rb_right; @@ -101,9 +115,16 @@ static int __nft_rbtree_insert(const struct nft_set *set, else if (d > 0) p = &parent->rb_right; else { - if (nft_set_elem_active(&rbe->ext, genmask)) - return -EEXIST; - p = &parent->rb_left; + if (nft_set_elem_active(&rbe->ext, genmask)) { + if (nft_rbtree_interval_end(rbe) && + !nft_rbtree_interval_end(new)) + p = &parent->rb_left; + else if (!nft_rbtree_interval_end(rbe) && + nft_rbtree_interval_end(new)) + p = &parent->rb_right; + else + return -EEXIST; + } } } rb_link_node(&new->node, parent, p); @@ -148,7 +169,7 @@ static void *nft_rbtree_deactivate(const struct nft_set *set, { const struct nft_rbtree *priv = nft_set_priv(set); const struct rb_node *parent = priv->root.rb_node; - struct nft_rbtree_elem *rbe; + struct nft_rbtree_elem *rbe, *this = elem->priv; u8 genmask = nft_genmask_cur(read_pnet(&set->pnet)); int d; @@ -166,6 +187,15 @@ static void *nft_rbtree_deactivate(const struct nft_set *set, parent = parent->rb_left; continue; } + if (nft_rbtree_interval_end(rbe) && + !nft_rbtree_interval_end(this)) { + parent = parent->rb_left; + continue; + } else if (!nft_rbtree_interval_end(rbe) && + nft_rbtree_interval_end(this)) { + parent = parent->rb_right; + continue; + } nft_set_elem_change_active(set, &rbe->ext); return rbe; } From 3bb398d925ec73e42b778cf823c8f4aecae359ea Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Thu, 14 Apr 2016 17:13:41 +0200 Subject: [PATCH 0950/1649] netfilter: nf_ct_helper: disable automatic helper assignment Four years ago we introduced a new sysctl knob to disable automatic helper assignment in 72110dfaa907 ("netfilter: nf_ct_helper: disable automatic helper assignment"). This knob kept this behaviour enabled by default to remain conservative. This measure was introduced to provide a secure way to configure iptables and connection tracking helpers through explicit rules. Give the time we have waited for this, let's turn off this by default now, worse case users still have a chance to recover the former behaviour by explicitly enabling this back through sysctl. Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_conntrack_helper.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c index 3b40ec575cd5..498bf74f154d 100644 --- a/net/netfilter/nf_conntrack_helper.c +++ b/net/netfilter/nf_conntrack_helper.c @@ -38,10 +38,10 @@ unsigned int nf_ct_helper_hsize __read_mostly; EXPORT_SYMBOL_GPL(nf_ct_helper_hsize); static unsigned int nf_ct_helper_count __read_mostly; -static bool nf_ct_auto_assign_helper __read_mostly = true; +static bool nf_ct_auto_assign_helper __read_mostly = false; module_param_named(nf_conntrack_helper, nf_ct_auto_assign_helper, bool, 0644); MODULE_PARM_DESC(nf_conntrack_helper, - "Enable automatic conntrack helper assignment (default 1)"); + "Enable automatic conntrack helper assignment (default 0)"); #ifdef CONFIG_SYSCTL static struct ctl_table helper_sysctl_table[] = { From d2b484b577776f3c6f4d52505b27bad27ea1fe00 Mon Sep 17 00:00:00 2001 From: Liping Zhang Date: Fri, 22 Apr 2016 02:56:57 -0700 Subject: [PATCH 0951/1649] netfilter: ip6t_SYNPROXY: unnecessary to check whether ip6_route_output returns NULL ip6_route_output() will never return a NULL pointer, so there's no need to check it. Signed-off-by: Liping Zhang Signed-off-by: Pablo Neira Ayuso --- net/ipv6/netfilter/ip6t_SYNPROXY.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/ipv6/netfilter/ip6t_SYNPROXY.c b/net/ipv6/netfilter/ip6t_SYNPROXY.c index 5d778dd11f66..06bed74cf5ee 100644 --- a/net/ipv6/netfilter/ip6t_SYNPROXY.c +++ b/net/ipv6/netfilter/ip6t_SYNPROXY.c @@ -60,7 +60,7 @@ synproxy_send_tcp(struct net *net, fl6.fl6_dport = nth->dest; security_skb_classify_flow((struct sk_buff *)skb, flowi6_to_flowi(&fl6)); dst = ip6_route_output(net, NULL, &fl6); - if (dst == NULL || dst->error) { + if (dst->error) { dst_release(dst); goto free_nskb; } From 5e91f6ce4c584d231763437a3ea3aded8e672363 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Mon, 25 Apr 2016 06:34:09 -0700 Subject: [PATCH 0952/1649] sock: relax WARN_ON() in sock_owned_by_user() Valdis reported tons of stack dumps caused by WARN_ON() in sock_owned_by_user() This test needs to be relaxed if/when lockdep disables itself. Note that other lockdep_sock_is_held() callers are all from rcu_dereference_protected() sections which already are disabled if/when lockdep has been disabled. Fixes: fafc4e1ea1a4 ("sock: tigthen lockdep checks for sock_owned_by_user") Reported-by: Valdis Kletnieks Signed-off-by: Eric Dumazet Acked-by: Hannes Frederic Sowa Signed-off-by: David S. Miller --- include/net/sock.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/net/sock.h b/include/net/sock.h index 52448baf19d7..2fdb87f176cf 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1409,7 +1409,7 @@ static inline void unlock_sock_fast(struct sock *sk, bool slow) static inline bool sock_owned_by_user(const struct sock *sk) { #ifdef CONFIG_LOCKDEP - WARN_ON(!lockdep_sock_is_held(sk)); + WARN_ON_ONCE(!lockdep_sock_is_held(sk) && debug_locks); #endif return sk->sk_lock.owned; } From d296ba60d8e2de23a350796a567a3aa90fe1cb6e Mon Sep 17 00:00:00 2001 From: Craig Gallek Date: Mon, 25 Apr 2016 10:42:12 -0400 Subject: [PATCH 0953/1649] soreuseport: Resolve merge conflict for v4/v6 ordering fix d894ba18d4e4 ("soreuseport: fix ordering for mixed v4/v6 sockets") was merged as a bug fix to the net tree. Two conflicting changes were committed to net-next before the above fix was merged back to net-next: ca065d0cf80f ("udp: no longer use SLAB_DESTROY_BY_RCU") 3b24d854cb35 ("tcp/dccp: do not touch listener sk_refcnt under synflood") These changes switched the datastructure used for TCP and UDP sockets from hlist_nulls to hlist. This patch applies the necessary parts of the net tree fix to net-next which were not automatic as part of the merge. Fixes: 1602f49b58ab ("Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net") Signed-off-by: Craig Gallek Signed-off-by: David S. Miller --- include/net/sock.h | 6 +++++- net/ipv4/inet_hashtables.c | 6 +++++- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/include/net/sock.h b/include/net/sock.h index 2fdb87f176cf..d63b8494124e 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -630,7 +630,11 @@ static inline void sk_add_node(struct sock *sk, struct hlist_head *list) static inline void sk_add_node_rcu(struct sock *sk, struct hlist_head *list) { sock_hold(sk); - hlist_add_head_rcu(&sk->sk_node, list); + if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport && + sk->sk_family == AF_INET6) + hlist_add_tail_rcu(&sk->sk_node, list); + else + hlist_add_head_rcu(&sk->sk_node, list); } static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list) diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c index fcadb670f50b..b76b0d7e59c1 100644 --- a/net/ipv4/inet_hashtables.c +++ b/net/ipv4/inet_hashtables.c @@ -479,7 +479,11 @@ int __inet_hash(struct sock *sk, struct sock *osk, if (err) goto unlock; } - hlist_add_head_rcu(&sk->sk_node, &ilb->head); + if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport && + sk->sk_family == AF_INET6) + hlist_add_tail_rcu(&sk->sk_node, &ilb->head); + else + hlist_add_head_rcu(&sk->sk_node, &ilb->head); sock_set_flag(sk, SOCK_RCU_FREE); sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); unlock: From cd84042ce9040ad038e958bc67a46fcfc015c736 Mon Sep 17 00:00:00 2001 From: "Vittorio Gambaletta (VittGam)" Date: Mon, 11 Apr 2016 04:48:54 +0200 Subject: [PATCH 0954/1649] ath9k: Add a module parameter to invert LED polarity. The LED can be active high instead of active low on some hardware. Add the led_active_high module parameter. It defaults to -1 to obey platform data as before. Setting the parameter to 1 or 0 will force the LED respectively active high or active low. Cc: Cc: Cc: Cc: Signed-off-by: Vittorio Gambaletta Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath9k/init.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c index fb702c48a233..535b1644501c 100644 --- a/drivers/net/wireless/ath/ath9k/init.c +++ b/drivers/net/wireless/ath/ath9k/init.c @@ -49,6 +49,10 @@ int ath9k_led_blink; module_param_named(blink, ath9k_led_blink, int, 0444); MODULE_PARM_DESC(blink, "Enable LED blink on activity"); +static int ath9k_led_active_high = -1; +module_param_named(led_active_high, ath9k_led_active_high, int, 0444); +MODULE_PARM_DESC(led_active_high, "Invert LED polarity"); + static int ath9k_btcoex_enable; module_param_named(btcoex_enable, ath9k_btcoex_enable, int, 0444); MODULE_PARM_DESC(btcoex_enable, "Enable wifi-BT coexistence"); @@ -600,6 +604,9 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc, if (ret) return ret; + if (ath9k_led_active_high != -1) + ah->config.led_active_high = ath9k_led_active_high == 1; + /* * Enable WLAN/BT RX Antenna diversity only when: * From 0f9edcdd88a993914fa1d1dc369b35dc503979db Mon Sep 17 00:00:00 2001 From: "Vittorio Gambaletta (VittGam)" Date: Mon, 11 Apr 2016 04:48:55 +0200 Subject: [PATCH 0955/1649] ath9k: Fix LED polarity for some Mini PCI AR9220 MB92 cards. The Wistron DNMA-92 and Compex WLM200NX have inverted LED polarity (active high instead of active low). The same PCI Subsystem ID is used by both cards, which are based on the same Atheros MB92 design. Cc: Cc: Cc: Cc: Signed-off-by: Vittorio Gambaletta Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath9k/pci.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c index e6fef1be9977..7cdaf40c3057 100644 --- a/drivers/net/wireless/ath/ath9k/pci.c +++ b/drivers/net/wireless/ath/ath9k/pci.c @@ -28,6 +28,16 @@ static const struct pci_device_id ath_pci_id_table[] = { { PCI_VDEVICE(ATHEROS, 0x0024) }, /* PCI-E */ { PCI_VDEVICE(ATHEROS, 0x0027) }, /* PCI */ { PCI_VDEVICE(ATHEROS, 0x0029) }, /* PCI */ + +#ifdef CONFIG_ATH9K_PCOEM + /* Mini PCI AR9220 MB92 cards: Compex WLM200NX, Wistron DNMA-92 */ + { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, + 0x0029, + PCI_VENDOR_ID_ATHEROS, + 0x2096), + .driver_data = ATH9K_PCI_LED_ACT_HI }, +#endif + { PCI_VDEVICE(ATHEROS, 0x002A) }, /* PCI-E */ #ifdef CONFIG_ATH9K_PCOEM From 25d217d6e0723481bf90db1d8be02ab475d16002 Mon Sep 17 00:00:00 2001 From: Pontus Fuchs Date: Mon, 18 Apr 2016 22:00:39 -0700 Subject: [PATCH 0956/1649] wcn36xx: Clean up wcn36xx_smd_send_beacon Needed for coming improvements. No functional changes. Signed-off-by: Pontus Fuchs [bjorn: restored BEACON_TEMPLATE_SIZE define to 0x180] Signed-off-by: Bjorn Andersson Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wcn36xx/hal.h | 5 ++++- drivers/net/wireless/ath/wcn36xx/smd.c | 12 +++++------- 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/drivers/net/wireless/ath/wcn36xx/hal.h b/drivers/net/wireless/ath/wcn36xx/hal.h index b947de0fb2e5..d713204f755d 100644 --- a/drivers/net/wireless/ath/wcn36xx/hal.h +++ b/drivers/net/wireless/ath/wcn36xx/hal.h @@ -2884,11 +2884,14 @@ struct update_beacon_rsp_msg { struct wcn36xx_hal_send_beacon_req_msg { struct wcn36xx_hal_msg_header header; + /* length of the template + 6. Only qcom knows why */ + u32 beacon_length6; + /* length of the template. */ u32 beacon_length; /* Beacon data. */ - u8 beacon[BEACON_TEMPLATE_SIZE]; + u8 beacon[BEACON_TEMPLATE_SIZE - sizeof(u32)]; u8 bssid[ETH_ALEN]; diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c index 74f56a81ad9a..ff3ed2461a69 100644 --- a/drivers/net/wireless/ath/wcn36xx/smd.c +++ b/drivers/net/wireless/ath/wcn36xx/smd.c @@ -1380,19 +1380,17 @@ int wcn36xx_smd_send_beacon(struct wcn36xx *wcn, struct ieee80211_vif *vif, mutex_lock(&wcn->hal_mutex); INIT_HAL_MSG(msg_body, WCN36XX_HAL_SEND_BEACON_REQ); - /* TODO need to find out why this is needed? */ - msg_body.beacon_length = skb_beacon->len + 6; + msg_body.beacon_length = skb_beacon->len; + /* TODO need to find out why + 6 is needed */ + msg_body.beacon_length6 = msg_body.beacon_length + 6; - if (BEACON_TEMPLATE_SIZE > msg_body.beacon_length) { - memcpy(&msg_body.beacon, &skb_beacon->len, sizeof(u32)); - memcpy(&(msg_body.beacon[4]), skb_beacon->data, - skb_beacon->len); - } else { + if (msg_body.beacon_length > BEACON_TEMPLATE_SIZE) { wcn36xx_err("Beacon is to big: beacon size=%d\n", msg_body.beacon_length); ret = -ENOMEM; goto out; } + memcpy(msg_body.beacon, skb_beacon->data, skb_beacon->len); memcpy(msg_body.bssid, vif->addr, ETH_ALEN); /* TODO need to find out why this is needed? */ From 91c3eeba45e13ab7edfb50610df8672d52809394 Mon Sep 17 00:00:00 2001 From: Pontus Fuchs Date: Mon, 18 Apr 2016 22:00:40 -0700 Subject: [PATCH 0957/1649] wcn36xx: Pad TIM PVM if needed The wcn36xx FW expects a fixed size TIM PVM in the beacon template. If supplied with a shorter than expected PVM it will overwrite the IE following the TIM. Squashed with fix from Jason Mobarak : Patch "wcn36xx: Pad TIM PVM if needed" has caused a regression in mesh beaconing. The field tim_off is always 0 for mesh mode, and thus pvm_len (referring to the TIM length field) and pad are both incorrectly calculated. Thus, msg_body.beacon_length is incorrectly calculated for mesh mode. Fix this. Signed-off-by: Pontus Fuchs Signed-off-by: Jason Mobarak [bjorn: squashed in Jason's fixup] Signed-off-by: Bjorn Andersson Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wcn36xx/hal.h | 3 +++ drivers/net/wireless/ath/wcn36xx/smd.c | 27 ++++++++++++++++++++++++-- 2 files changed, 28 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/ath/wcn36xx/hal.h b/drivers/net/wireless/ath/wcn36xx/hal.h index d713204f755d..3af16cba3d12 100644 --- a/drivers/net/wireless/ath/wcn36xx/hal.h +++ b/drivers/net/wireless/ath/wcn36xx/hal.h @@ -54,6 +54,9 @@ /* Default Beacon template size */ #define BEACON_TEMPLATE_SIZE 0x180 +/* Minimum PVM size that the FW expects. See comment in smd.c for details. */ +#define TIM_MIN_PVM_SIZE 6 + /* Param Change Bitmap sent to HAL */ #define PARAM_BCN_INTERVAL_CHANGED (1 << 0) #define PARAM_SHORT_PREAMBLE_CHANGED (1 << 1) diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c index ff3ed2461a69..089a7e445cd6 100644 --- a/drivers/net/wireless/ath/wcn36xx/smd.c +++ b/drivers/net/wireless/ath/wcn36xx/smd.c @@ -1375,12 +1375,19 @@ int wcn36xx_smd_send_beacon(struct wcn36xx *wcn, struct ieee80211_vif *vif, u16 p2p_off) { struct wcn36xx_hal_send_beacon_req_msg msg_body; - int ret = 0; + int ret = 0, pad, pvm_len; mutex_lock(&wcn->hal_mutex); INIT_HAL_MSG(msg_body, WCN36XX_HAL_SEND_BEACON_REQ); - msg_body.beacon_length = skb_beacon->len; + pvm_len = skb_beacon->data[tim_off + 1] - 3; + pad = TIM_MIN_PVM_SIZE - pvm_len; + + /* Padding is irrelevant to mesh mode since tim_off is always 0. */ + if (vif->type == NL80211_IFTYPE_MESH_POINT) + pad = 0; + + msg_body.beacon_length = skb_beacon->len + pad; /* TODO need to find out why + 6 is needed */ msg_body.beacon_length6 = msg_body.beacon_length + 6; @@ -1393,6 +1400,22 @@ int wcn36xx_smd_send_beacon(struct wcn36xx *wcn, struct ieee80211_vif *vif, memcpy(msg_body.beacon, skb_beacon->data, skb_beacon->len); memcpy(msg_body.bssid, vif->addr, ETH_ALEN); + if (pad > 0) { + /* + * The wcn36xx FW has a fixed size for the PVM in the TIM. If + * given the beacon template from mac80211 with a PVM shorter + * than the FW expectes it will overwrite the data after the + * TIM. + */ + wcn36xx_dbg(WCN36XX_DBG_HAL, "Pad TIM PVM. %d bytes at %d\n", + pad, pvm_len); + memmove(&msg_body.beacon[tim_off + 5 + pvm_len + pad], + &msg_body.beacon[tim_off + 5 + pvm_len], + skb_beacon->len - (tim_off + 5 + pvm_len)); + memset(&msg_body.beacon[tim_off + 5 + pvm_len], 0, pad); + msg_body.beacon[tim_off + 1] += pad; + } + /* TODO need to find out why this is needed? */ if (vif->type == NL80211_IFTYPE_MESH_POINT) /* mesh beacon don't need this, so push further down */ From ce75877f6c3da01cd5efe41683dd32beee1b4b33 Mon Sep 17 00:00:00 2001 From: Pontus Fuchs Date: Mon, 18 Apr 2016 22:00:41 -0700 Subject: [PATCH 0958/1649] wcn36xx: Add helper macros to cast vif to private vif and vice versa Makes the code a little easier to read. Signed-off-by: Pontus Fuchs Signed-off-by: Bjorn Andersson Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wcn36xx/debug.c | 12 +++-------- drivers/net/wireless/ath/wcn36xx/main.c | 16 +++++++-------- drivers/net/wireless/ath/wcn36xx/pmc.c | 4 ++-- drivers/net/wireless/ath/wcn36xx/smd.c | 24 +++++++++------------- drivers/net/wireless/ath/wcn36xx/txrx.c | 8 ++------ drivers/net/wireless/ath/wcn36xx/wcn36xx.h | 12 +++++++++++ 6 files changed, 36 insertions(+), 40 deletions(-) diff --git a/drivers/net/wireless/ath/wcn36xx/debug.c b/drivers/net/wireless/ath/wcn36xx/debug.c index ef44a2da644d..2a6bb62e785c 100644 --- a/drivers/net/wireless/ath/wcn36xx/debug.c +++ b/drivers/net/wireless/ath/wcn36xx/debug.c @@ -33,9 +33,7 @@ static ssize_t read_file_bool_bmps(struct file *file, char __user *user_buf, char buf[3]; list_for_each_entry(vif_priv, &wcn->vif_list, list) { - vif = container_of((void *)vif_priv, - struct ieee80211_vif, - drv_priv); + vif = wcn36xx_priv_to_vif(vif_priv); if (NL80211_IFTYPE_STATION == vif->type) { if (vif_priv->pw_state == WCN36XX_BMPS) buf[0] = '1'; @@ -70,9 +68,7 @@ static ssize_t write_file_bool_bmps(struct file *file, case 'Y': case '1': list_for_each_entry(vif_priv, &wcn->vif_list, list) { - vif = container_of((void *)vif_priv, - struct ieee80211_vif, - drv_priv); + vif = wcn36xx_priv_to_vif(vif_priv); if (NL80211_IFTYPE_STATION == vif->type) { wcn36xx_enable_keep_alive_null_packet(wcn, vif); wcn36xx_pmc_enter_bmps_state(wcn, vif); @@ -83,9 +79,7 @@ static ssize_t write_file_bool_bmps(struct file *file, case 'N': case '0': list_for_each_entry(vif_priv, &wcn->vif_list, list) { - vif = container_of((void *)vif_priv, - struct ieee80211_vif, - drv_priv); + vif = wcn36xx_priv_to_vif(vif_priv); if (NL80211_IFTYPE_STATION == vif->type) wcn36xx_pmc_exit_bmps_state(wcn, vif); } diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c index a27279c2c695..62cb9ffd854c 100644 --- a/drivers/net/wireless/ath/wcn36xx/main.c +++ b/drivers/net/wireless/ath/wcn36xx/main.c @@ -346,9 +346,7 @@ static int wcn36xx_config(struct ieee80211_hw *hw, u32 changed) wcn36xx_dbg(WCN36XX_DBG_MAC, "wcn36xx_config channel switch=%d\n", ch); list_for_each_entry(tmp, &wcn->vif_list, list) { - vif = container_of((void *)tmp, - struct ieee80211_vif, - drv_priv); + vif = wcn36xx_priv_to_vif(tmp); wcn36xx_smd_switch_channel(wcn, vif, ch); } } @@ -387,7 +385,7 @@ static int wcn36xx_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, struct ieee80211_key_conf *key_conf) { struct wcn36xx *wcn = hw->priv; - struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv; + struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif); struct wcn36xx_sta *sta_priv = vif_priv->sta; int ret = 0; u8 key[WLAN_MAX_KEY_LEN]; @@ -590,7 +588,7 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw, struct sk_buff *skb = NULL; u16 tim_off, tim_len; enum wcn36xx_hal_link_state link_state; - struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv; + struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif); wcn36xx_dbg(WCN36XX_DBG_MAC, "mac bss info changed vif %p changed 0x%08x\n", vif, changed); @@ -757,7 +755,7 @@ static void wcn36xx_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct wcn36xx *wcn = hw->priv; - struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv; + struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif); wcn36xx_dbg(WCN36XX_DBG_MAC, "mac remove interface vif %p\n", vif); list_del(&vif_priv->list); @@ -768,7 +766,7 @@ static int wcn36xx_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct wcn36xx *wcn = hw->priv; - struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv; + struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif); wcn36xx_dbg(WCN36XX_DBG_MAC, "mac add interface vif %p type %d\n", vif, vif->type); @@ -792,7 +790,7 @@ static int wcn36xx_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { struct wcn36xx *wcn = hw->priv; - struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv; + struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif); struct wcn36xx_sta *sta_priv = (struct wcn36xx_sta *)sta->drv_priv; wcn36xx_dbg(WCN36XX_DBG_MAC, "mac sta add vif %p sta %pM\n", vif, sta->addr); @@ -817,7 +815,7 @@ static int wcn36xx_sta_remove(struct ieee80211_hw *hw, struct ieee80211_sta *sta) { struct wcn36xx *wcn = hw->priv; - struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv; + struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif); struct wcn36xx_sta *sta_priv = (struct wcn36xx_sta *)sta->drv_priv; wcn36xx_dbg(WCN36XX_DBG_MAC, "mac sta remove vif %p sta %pM index %d\n", diff --git a/drivers/net/wireless/ath/wcn36xx/pmc.c b/drivers/net/wireless/ath/wcn36xx/pmc.c index 28b515c81b0e..589fe5f70971 100644 --- a/drivers/net/wireless/ath/wcn36xx/pmc.c +++ b/drivers/net/wireless/ath/wcn36xx/pmc.c @@ -22,7 +22,7 @@ int wcn36xx_pmc_enter_bmps_state(struct wcn36xx *wcn, struct ieee80211_vif *vif) { int ret = 0; - struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv; + struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif); /* TODO: Make sure the TX chain clean */ ret = wcn36xx_smd_enter_bmps(wcn, vif); if (!ret) { @@ -42,7 +42,7 @@ int wcn36xx_pmc_enter_bmps_state(struct wcn36xx *wcn, int wcn36xx_pmc_exit_bmps_state(struct wcn36xx *wcn, struct ieee80211_vif *vif) { - struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv; + struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif); if (WCN36XX_BMPS != vif_priv->pw_state) { wcn36xx_err("Not in BMPS mode, no need to exit from BMPS mode!\n"); diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c index 089a7e445cd6..cc1b3b7a4ff9 100644 --- a/drivers/net/wireless/ath/wcn36xx/smd.c +++ b/drivers/net/wireless/ath/wcn36xx/smd.c @@ -191,7 +191,7 @@ static void wcn36xx_smd_set_sta_params(struct wcn36xx *wcn, struct ieee80211_sta *sta, struct wcn36xx_hal_config_sta_params *sta_params) { - struct wcn36xx_vif *priv_vif = (struct wcn36xx_vif *)vif->drv_priv; + struct wcn36xx_vif *priv_vif = wcn36xx_vif_to_priv(vif); struct wcn36xx_sta *priv_sta = NULL; if (vif->type == NL80211_IFTYPE_ADHOC || vif->type == NL80211_IFTYPE_AP || @@ -726,7 +726,7 @@ static int wcn36xx_smd_add_sta_self_rsp(struct wcn36xx *wcn, size_t len) { struct wcn36xx_hal_add_sta_self_rsp_msg *rsp; - struct wcn36xx_vif *priv_vif = (struct wcn36xx_vif *)vif->drv_priv; + struct wcn36xx_vif *priv_vif = wcn36xx_vif_to_priv(vif); if (len < sizeof(*rsp)) return -EINVAL; @@ -1175,7 +1175,7 @@ static int wcn36xx_smd_config_bss_rsp(struct wcn36xx *wcn, { struct wcn36xx_hal_config_bss_rsp_msg *rsp; struct wcn36xx_hal_config_bss_rsp_params *params; - struct wcn36xx_vif *priv_vif = (struct wcn36xx_vif *)vif->drv_priv; + struct wcn36xx_vif *priv_vif = wcn36xx_vif_to_priv(vif); if (len < sizeof(*rsp)) return -EINVAL; @@ -1217,7 +1217,7 @@ int wcn36xx_smd_config_bss(struct wcn36xx *wcn, struct ieee80211_vif *vif, struct wcn36xx_hal_config_bss_req_msg msg; struct wcn36xx_hal_config_bss_params *bss; struct wcn36xx_hal_config_sta_params *sta_params; - struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv; + struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif); int ret = 0; mutex_lock(&wcn->hal_mutex); @@ -1343,7 +1343,7 @@ out: int wcn36xx_smd_delete_bss(struct wcn36xx *wcn, struct ieee80211_vif *vif) { struct wcn36xx_hal_delete_bss_req_msg msg_body; - struct wcn36xx_vif *priv_vif = (struct wcn36xx_vif *)vif->drv_priv; + struct wcn36xx_vif *priv_vif = wcn36xx_vif_to_priv(vif); int ret = 0; mutex_lock(&wcn->hal_mutex); @@ -1633,7 +1633,7 @@ out: int wcn36xx_smd_enter_bmps(struct wcn36xx *wcn, struct ieee80211_vif *vif) { struct wcn36xx_hal_enter_bmps_req_msg msg_body; - struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv; + struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif); int ret = 0; mutex_lock(&wcn->hal_mutex); @@ -1663,7 +1663,7 @@ out: int wcn36xx_smd_exit_bmps(struct wcn36xx *wcn, struct ieee80211_vif *vif) { struct wcn36xx_hal_enter_bmps_req_msg msg_body; - struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv; + struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif); int ret = 0; mutex_lock(&wcn->hal_mutex); @@ -1724,7 +1724,7 @@ int wcn36xx_smd_keep_alive_req(struct wcn36xx *wcn, int packet_type) { struct wcn36xx_hal_keep_alive_req_msg msg_body; - struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv; + struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif); int ret = 0; mutex_lock(&wcn->hal_mutex); @@ -2027,9 +2027,7 @@ static int wcn36xx_smd_missed_beacon_ind(struct wcn36xx *wcn, list_for_each_entry(tmp, &wcn->vif_list, list) { wcn36xx_dbg(WCN36XX_DBG_HAL, "beacon missed bss_index %d\n", tmp->bss_index); - vif = container_of((void *)tmp, - struct ieee80211_vif, - drv_priv); + vif = wcn36xx_priv_to_vif(tmp); ieee80211_connection_loss(vif); } return 0; @@ -2044,9 +2042,7 @@ static int wcn36xx_smd_missed_beacon_ind(struct wcn36xx *wcn, if (tmp->bss_index == rsp->bss_index) { wcn36xx_dbg(WCN36XX_DBG_HAL, "beacon missed bss_index %d\n", rsp->bss_index); - vif = container_of((void *)tmp, - struct ieee80211_vif, - drv_priv); + vif = wcn36xx_priv_to_vif(tmp); ieee80211_connection_loss(vif); return 0; } diff --git a/drivers/net/wireless/ath/wcn36xx/txrx.c b/drivers/net/wireless/ath/wcn36xx/txrx.c index 99c21aac68bd..b12c89b5940a 100644 --- a/drivers/net/wireless/ath/wcn36xx/txrx.c +++ b/drivers/net/wireless/ath/wcn36xx/txrx.c @@ -102,9 +102,7 @@ static inline struct wcn36xx_vif *get_vif_by_addr(struct wcn36xx *wcn, struct wcn36xx_vif *vif_priv = NULL; struct ieee80211_vif *vif = NULL; list_for_each_entry(vif_priv, &wcn->vif_list, list) { - vif = container_of((void *)vif_priv, - struct ieee80211_vif, - drv_priv); + vif = wcn36xx_priv_to_vif(vif_priv); if (memcmp(vif->addr, addr, ETH_ALEN) == 0) return vif_priv; } @@ -167,9 +165,7 @@ static void wcn36xx_set_tx_data(struct wcn36xx_tx_bd *bd, */ if (sta_priv) { __vif_priv = sta_priv->vif; - vif = container_of((void *)__vif_priv, - struct ieee80211_vif, - drv_priv); + vif = wcn36xx_priv_to_vif(__vif_priv); bd->dpu_sign = sta_priv->ucast_dpu_sign; if (vif->type == NL80211_IFTYPE_STATION) { diff --git a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h index 7b41e833e18c..c3ba07ed1db5 100644 --- a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h +++ b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h @@ -263,4 +263,16 @@ struct ieee80211_sta *wcn36xx_priv_to_sta(struct wcn36xx_sta *sta_priv) return container_of((void *)sta_priv, struct ieee80211_sta, drv_priv); } +static inline +struct wcn36xx_vif *wcn36xx_vif_to_priv(struct ieee80211_vif *vif) +{ + return (struct wcn36xx_vif *) vif->drv_priv; +} + +static inline +struct ieee80211_vif *wcn36xx_priv_to_vif(struct wcn36xx_vif *vif_priv) +{ + return container_of((void *) vif_priv, struct ieee80211_vif, drv_priv); +} + #endif /* _WCN36XX_H_ */ From 657a49be13eda5a3befc161d1d499d413c348762 Mon Sep 17 00:00:00 2001 From: Pontus Fuchs Date: Mon, 18 Apr 2016 22:00:42 -0700 Subject: [PATCH 0959/1649] wcn36xx: Use consistent name for private vif Some code used priv_vif and some used vif_priv. Convert all to vif_priv for consistency. Signed-off-by: Pontus Fuchs Signed-off-by: Bjorn Andersson Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wcn36xx/smd.c | 28 +++++++++++++------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c index cc1b3b7a4ff9..170440ed5d85 100644 --- a/drivers/net/wireless/ath/wcn36xx/smd.c +++ b/drivers/net/wireless/ath/wcn36xx/smd.c @@ -191,7 +191,7 @@ static void wcn36xx_smd_set_sta_params(struct wcn36xx *wcn, struct ieee80211_sta *sta, struct wcn36xx_hal_config_sta_params *sta_params) { - struct wcn36xx_vif *priv_vif = wcn36xx_vif_to_priv(vif); + struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif); struct wcn36xx_sta *priv_sta = NULL; if (vif->type == NL80211_IFTYPE_ADHOC || vif->type == NL80211_IFTYPE_AP || @@ -215,7 +215,7 @@ static void wcn36xx_smd_set_sta_params(struct wcn36xx *wcn, else memcpy(&sta_params->bssid, vif->addr, ETH_ALEN); - sta_params->encrypt_type = priv_vif->encrypt_type; + sta_params->encrypt_type = vif_priv->encrypt_type; sta_params->short_preamble_supported = true; sta_params->rifs_mode = 0; @@ -224,7 +224,7 @@ static void wcn36xx_smd_set_sta_params(struct wcn36xx *wcn, sta_params->uapsd = 0; sta_params->mimo_ps = WCN36XX_HAL_HT_MIMO_PS_STATIC; sta_params->max_ampdu_duration = 0; - sta_params->bssid_index = priv_vif->bss_index; + sta_params->bssid_index = vif_priv->bss_index; sta_params->p2p = 0; if (sta) { @@ -726,7 +726,7 @@ static int wcn36xx_smd_add_sta_self_rsp(struct wcn36xx *wcn, size_t len) { struct wcn36xx_hal_add_sta_self_rsp_msg *rsp; - struct wcn36xx_vif *priv_vif = wcn36xx_vif_to_priv(vif); + struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif); if (len < sizeof(*rsp)) return -EINVAL; @@ -743,8 +743,8 @@ static int wcn36xx_smd_add_sta_self_rsp(struct wcn36xx *wcn, "hal add sta self status %d self_sta_index %d dpu_index %d\n", rsp->status, rsp->self_sta_index, rsp->dpu_index); - priv_vif->self_sta_index = rsp->self_sta_index; - priv_vif->self_dpu_desc_index = rsp->dpu_index; + vif_priv->self_sta_index = rsp->self_sta_index; + vif_priv->self_dpu_desc_index = rsp->dpu_index; return 0; } @@ -1175,7 +1175,7 @@ static int wcn36xx_smd_config_bss_rsp(struct wcn36xx *wcn, { struct wcn36xx_hal_config_bss_rsp_msg *rsp; struct wcn36xx_hal_config_bss_rsp_params *params; - struct wcn36xx_vif *priv_vif = wcn36xx_vif_to_priv(vif); + struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif); if (len < sizeof(*rsp)) return -EINVAL; @@ -1198,14 +1198,14 @@ static int wcn36xx_smd_config_bss_rsp(struct wcn36xx *wcn, params->bss_bcast_sta_idx, params->mac, params->tx_mgmt_power, params->ucast_dpu_signature); - priv_vif->bss_index = params->bss_index; + vif_priv->bss_index = params->bss_index; - if (priv_vif->sta) { - priv_vif->sta->bss_sta_index = params->bss_sta_index; - priv_vif->sta->bss_dpu_desc_index = params->dpu_desc_index; + if (vif_priv->sta) { + vif_priv->sta->bss_sta_index = params->bss_sta_index; + vif_priv->sta->bss_dpu_desc_index = params->dpu_desc_index; } - priv_vif->self_ucast_dpu_sign = params->ucast_dpu_signature; + vif_priv->self_ucast_dpu_sign = params->ucast_dpu_signature; return 0; } @@ -1343,13 +1343,13 @@ out: int wcn36xx_smd_delete_bss(struct wcn36xx *wcn, struct ieee80211_vif *vif) { struct wcn36xx_hal_delete_bss_req_msg msg_body; - struct wcn36xx_vif *priv_vif = wcn36xx_vif_to_priv(vif); + struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif); int ret = 0; mutex_lock(&wcn->hal_mutex); INIT_HAL_MSG(msg_body, WCN36XX_HAL_DELETE_BSS_REQ); - msg_body.bss_index = priv_vif->bss_index; + msg_body.bss_index = vif_priv->bss_index; PREPARE_HAL_BUF(wcn->hal_buf, msg_body); From 90023c034fefe51cf67c719065434b0e43e9baf9 Mon Sep 17 00:00:00 2001 From: Pontus Fuchs Date: Mon, 18 Apr 2016 22:00:43 -0700 Subject: [PATCH 0960/1649] wcn36xx: Use define for invalid index and fix typo Signed-off-by: Pontus Fuchs Signed-off-by: Bjorn Andersson Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wcn36xx/hal.h | 2 +- drivers/net/wireless/ath/wcn36xx/main.c | 4 ++-- drivers/net/wireless/ath/wcn36xx/smd.c | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/net/wireless/ath/wcn36xx/hal.h b/drivers/net/wireless/ath/wcn36xx/hal.h index 3af16cba3d12..433d9801a0ae 100644 --- a/drivers/net/wireless/ath/wcn36xx/hal.h +++ b/drivers/net/wireless/ath/wcn36xx/hal.h @@ -48,7 +48,7 @@ #define WCN36XX_HAL_IPV4_ADDR_LEN 4 -#define WALN_HAL_STA_INVALID_IDX 0xFF +#define WCN36XX_HAL_STA_INVALID_IDX 0xFF #define WCN36XX_HAL_BSS_INVALID_IDX 0xFF /* Default Beacon template size */ diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c index 62cb9ffd854c..4781b5e8deb3 100644 --- a/drivers/net/wireless/ath/wcn36xx/main.c +++ b/drivers/net/wireless/ath/wcn36xx/main.c @@ -618,7 +618,7 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw, if (!is_zero_ether_addr(bss_conf->bssid)) { vif_priv->is_joining = true; - vif_priv->bss_index = 0xff; + vif_priv->bss_index = WCN36XX_HAL_BSS_INVALID_IDX; wcn36xx_smd_join(wcn, bss_conf->bssid, vif->addr, WCN36XX_HW_CHANNEL(wcn)); wcn36xx_smd_config_bss(wcn, vif, NULL, @@ -711,7 +711,7 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw, if (bss_conf->enable_beacon) { vif_priv->dtim_period = bss_conf->dtim_period; - vif_priv->bss_index = 0xff; + vif_priv->bss_index = WCN36XX_HAL_BSS_INVALID_IDX; wcn36xx_smd_config_bss(wcn, vif, NULL, vif->addr, false); skb = ieee80211_beacon_get_tim(hw, vif, &tim_off, diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c index 170440ed5d85..6d4aa9250ca8 100644 --- a/drivers/net/wireless/ath/wcn36xx/smd.c +++ b/drivers/net/wireless/ath/wcn36xx/smd.c @@ -197,7 +197,7 @@ static void wcn36xx_smd_set_sta_params(struct wcn36xx *wcn, vif->type == NL80211_IFTYPE_AP || vif->type == NL80211_IFTYPE_MESH_POINT) { sta_params->type = 1; - sta_params->sta_index = 0xFF; + sta_params->sta_index = WCN36XX_HAL_STA_INVALID_IDX; } else { sta_params->type = 0; sta_params->sta_index = 1; From a92e4696292199714d47d8e52c4bf0318324f77f Mon Sep 17 00:00:00 2001 From: Pontus Fuchs Date: Mon, 18 Apr 2016 22:00:44 -0700 Subject: [PATCH 0961/1649] wcn36xx: Add helper macros to cast sta to priv While poking at this I also change two related things. I rename one variable to make the names consistent. I also move one assignment of priv_sta to the declaration to save a few lines. Signed-off-by: Pontus Fuchs Signed-off-by: Bjorn Andersson Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wcn36xx/main.c | 14 ++++++-------- drivers/net/wireless/ath/wcn36xx/smd.c | 12 ++++++------ drivers/net/wireless/ath/wcn36xx/wcn36xx.h | 6 ++++++ 3 files changed, 18 insertions(+), 14 deletions(-) diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c index 4781b5e8deb3..30f015d3a9e6 100644 --- a/drivers/net/wireless/ath/wcn36xx/main.c +++ b/drivers/net/wireless/ath/wcn36xx/main.c @@ -373,7 +373,7 @@ static void wcn36xx_tx(struct ieee80211_hw *hw, struct wcn36xx_sta *sta_priv = NULL; if (control->sta) - sta_priv = (struct wcn36xx_sta *)control->sta->drv_priv; + sta_priv = wcn36xx_sta_to_priv(control->sta); if (wcn36xx_start_tx(wcn, sta_priv, skb)) ieee80211_free_txskb(wcn->hw, skb); @@ -518,7 +518,7 @@ static void wcn36xx_update_allowed_rates(struct ieee80211_sta *sta, { int i, size; u16 *rates_table; - struct wcn36xx_sta *sta_priv = (struct wcn36xx_sta *)sta->drv_priv; + struct wcn36xx_sta *sta_priv = wcn36xx_sta_to_priv(sta); u32 rates = sta->supp_rates[band]; memset(&sta_priv->supported_rates, 0, @@ -661,7 +661,7 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw, rcu_read_unlock(); goto out; } - sta_priv = (struct wcn36xx_sta *)sta->drv_priv; + sta_priv = wcn36xx_sta_to_priv(sta); wcn36xx_update_allowed_rates(sta, WCN36XX_BAND(wcn)); @@ -791,7 +791,7 @@ static int wcn36xx_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, { struct wcn36xx *wcn = hw->priv; struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif); - struct wcn36xx_sta *sta_priv = (struct wcn36xx_sta *)sta->drv_priv; + struct wcn36xx_sta *sta_priv = wcn36xx_sta_to_priv(sta); wcn36xx_dbg(WCN36XX_DBG_MAC, "mac sta add vif %p sta %pM\n", vif, sta->addr); @@ -816,7 +816,7 @@ static int wcn36xx_sta_remove(struct ieee80211_hw *hw, { struct wcn36xx *wcn = hw->priv; struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif); - struct wcn36xx_sta *sta_priv = (struct wcn36xx_sta *)sta->drv_priv; + struct wcn36xx_sta *sta_priv = wcn36xx_sta_to_priv(sta); wcn36xx_dbg(WCN36XX_DBG_MAC, "mac sta remove vif %p sta %pM index %d\n", vif, sta->addr, sta_priv->sta_index); @@ -858,7 +858,7 @@ static int wcn36xx_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_ampdu_params *params) { struct wcn36xx *wcn = hw->priv; - struct wcn36xx_sta *sta_priv = NULL; + struct wcn36xx_sta *sta_priv = wcn36xx_sta_to_priv(params->sta); struct ieee80211_sta *sta = params->sta; enum ieee80211_ampdu_mlme_action action = params->action; u16 tid = params->tid; @@ -867,8 +867,6 @@ static int wcn36xx_ampdu_action(struct ieee80211_hw *hw, wcn36xx_dbg(WCN36XX_DBG_MAC, "mac ampdu action action %d tid %d\n", action, tid); - sta_priv = (struct wcn36xx_sta *)sta->drv_priv; - switch (action) { case IEEE80211_AMPDU_RX_START: sta_priv->tid = tid; diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c index 6d4aa9250ca8..ff56138528b6 100644 --- a/drivers/net/wireless/ath/wcn36xx/smd.c +++ b/drivers/net/wireless/ath/wcn36xx/smd.c @@ -192,7 +192,7 @@ static void wcn36xx_smd_set_sta_params(struct wcn36xx *wcn, struct wcn36xx_hal_config_sta_params *sta_params) { struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif); - struct wcn36xx_sta *priv_sta = NULL; + struct wcn36xx_sta *sta_priv = NULL; if (vif->type == NL80211_IFTYPE_ADHOC || vif->type == NL80211_IFTYPE_AP || vif->type == NL80211_IFTYPE_MESH_POINT) { @@ -228,17 +228,17 @@ static void wcn36xx_smd_set_sta_params(struct wcn36xx *wcn, sta_params->p2p = 0; if (sta) { - priv_sta = (struct wcn36xx_sta *)sta->drv_priv; + sta_priv = wcn36xx_sta_to_priv(sta); if (NL80211_IFTYPE_STATION == vif->type) memcpy(&sta_params->bssid, sta->addr, ETH_ALEN); else memcpy(&sta_params->mac, sta->addr, ETH_ALEN); sta_params->wmm_enabled = sta->wme; sta_params->max_sp_len = sta->max_sp; - sta_params->aid = priv_sta->aid; + sta_params->aid = sta_priv->aid; wcn36xx_smd_set_sta_ht_params(sta, sta_params); - memcpy(&sta_params->supported_rates, &priv_sta->supported_rates, - sizeof(priv_sta->supported_rates)); + memcpy(&sta_params->supported_rates, &sta_priv->supported_rates, + sizeof(sta_priv->supported_rates)); } else { wcn36xx_set_default_rates(&sta_params->supported_rates); wcn36xx_smd_set_sta_default_ht_params(sta_params); @@ -969,7 +969,7 @@ static int wcn36xx_smd_config_sta_rsp(struct wcn36xx *wcn, { struct wcn36xx_hal_config_sta_rsp_msg *rsp; struct config_sta_rsp_params *params; - struct wcn36xx_sta *sta_priv = (struct wcn36xx_sta *)sta->drv_priv; + struct wcn36xx_sta *sta_priv = wcn36xx_sta_to_priv(sta); if (len < sizeof(*rsp)) return -EINVAL; diff --git a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h index c3ba07ed1db5..c368a34c8de7 100644 --- a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h +++ b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h @@ -275,4 +275,10 @@ struct ieee80211_vif *wcn36xx_priv_to_vif(struct wcn36xx_vif *vif_priv) return container_of((void *) vif_priv, struct ieee80211_vif, drv_priv); } +static inline +struct wcn36xx_sta *wcn36xx_sta_to_priv(struct ieee80211_sta *sta) +{ + return (struct wcn36xx_sta *)sta->drv_priv; +} + #endif /* _WCN36XX_H_ */ From 81c69263757788d77537fefdd9a55b05ed83c87b Mon Sep 17 00:00:00 2001 From: Pontus Fuchs Date: Mon, 18 Apr 2016 22:00:45 -0700 Subject: [PATCH 0962/1649] wcn36xx: Fetch private sta data from sta entry instead of from vif For consistency with other code. Signed-off-by: Pontus Fuchs Signed-off-by: Bjorn Andersson Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wcn36xx/main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c index 30f015d3a9e6..a23738deb5b3 100644 --- a/drivers/net/wireless/ath/wcn36xx/main.c +++ b/drivers/net/wireless/ath/wcn36xx/main.c @@ -386,7 +386,7 @@ static int wcn36xx_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, { struct wcn36xx *wcn = hw->priv; struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif); - struct wcn36xx_sta *sta_priv = vif_priv->sta; + struct wcn36xx_sta *sta_priv = wcn36xx_sta_to_priv(sta); int ret = 0; u8 key[WLAN_MAX_KEY_LEN]; From 25a44da26f2901308440a047b27a3a0054ea4a71 Mon Sep 17 00:00:00 2001 From: Pontus Fuchs Date: Mon, 18 Apr 2016 22:00:46 -0700 Subject: [PATCH 0963/1649] wcn36xx: Remove sta pointer in private vif struct This does not work with multiple sta's in a vif. Signed-off-by: Pontus Fuchs Signed-off-by: Bjorn Andersson Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wcn36xx/main.c | 3 --- drivers/net/wireless/ath/wcn36xx/smd.c | 28 ++++++++++++---------- drivers/net/wireless/ath/wcn36xx/wcn36xx.h | 1 - 3 files changed, 15 insertions(+), 17 deletions(-) diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c index a23738deb5b3..7c06ca9fdd2c 100644 --- a/drivers/net/wireless/ath/wcn36xx/main.c +++ b/drivers/net/wireless/ath/wcn36xx/main.c @@ -796,7 +796,6 @@ static int wcn36xx_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, vif, sta->addr); spin_lock_init(&sta_priv->ampdu_lock); - vif_priv->sta = sta_priv; sta_priv->vif = vif_priv; /* * For STA mode HW will be configured on BSS_CHANGED_ASSOC because @@ -815,14 +814,12 @@ static int wcn36xx_sta_remove(struct ieee80211_hw *hw, struct ieee80211_sta *sta) { struct wcn36xx *wcn = hw->priv; - struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif); struct wcn36xx_sta *sta_priv = wcn36xx_sta_to_priv(sta); wcn36xx_dbg(WCN36XX_DBG_MAC, "mac sta remove vif %p sta %pM index %d\n", vif, sta->addr, sta_priv->sta_index); wcn36xx_smd_delete_sta(wcn, sta_priv->sta_index); - vif_priv->sta = NULL; sta_priv->vif = NULL; return 0; } diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c index ff56138528b6..76c6856ed932 100644 --- a/drivers/net/wireless/ath/wcn36xx/smd.c +++ b/drivers/net/wireless/ath/wcn36xx/smd.c @@ -1170,6 +1170,7 @@ static int wcn36xx_smd_config_bss_v1(struct wcn36xx *wcn, static int wcn36xx_smd_config_bss_rsp(struct wcn36xx *wcn, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, void *buf, size_t len) { @@ -1200,9 +1201,10 @@ static int wcn36xx_smd_config_bss_rsp(struct wcn36xx *wcn, vif_priv->bss_index = params->bss_index; - if (vif_priv->sta) { - vif_priv->sta->bss_sta_index = params->bss_sta_index; - vif_priv->sta->bss_dpu_desc_index = params->dpu_desc_index; + if (sta) { + struct wcn36xx_sta *sta_priv = wcn36xx_sta_to_priv(sta); + sta_priv->bss_sta_index = params->bss_sta_index; + sta_priv->bss_dpu_desc_index = params->dpu_desc_index; } vif_priv->self_ucast_dpu_sign = params->ucast_dpu_signature; @@ -1329,6 +1331,7 @@ int wcn36xx_smd_config_bss(struct wcn36xx *wcn, struct ieee80211_vif *vif, } ret = wcn36xx_smd_config_bss_rsp(wcn, vif, + sta, wcn->hal_buf, wcn->hal_rsp_len); if (ret) { @@ -2058,25 +2061,24 @@ static int wcn36xx_smd_delete_sta_context_ind(struct wcn36xx *wcn, { struct wcn36xx_hal_delete_sta_context_ind_msg *rsp = buf; struct wcn36xx_vif *tmp; - struct ieee80211_sta *sta = NULL; + struct ieee80211_sta *sta; if (len != sizeof(*rsp)) { wcn36xx_warn("Corrupted delete sta indication\n"); return -EIO; } + wcn36xx_dbg(WCN36XX_DBG_HAL, "delete station indication %pM index %d\n", + rsp->addr2, rsp->sta_id); + list_for_each_entry(tmp, &wcn->vif_list, list) { - if (sta && (tmp->sta->sta_index == rsp->sta_id)) { - sta = container_of((void *)tmp->sta, - struct ieee80211_sta, - drv_priv); - wcn36xx_dbg(WCN36XX_DBG_HAL, - "delete station indication %pM index %d\n", - rsp->addr2, - rsp->sta_id); + rcu_read_lock(); + sta = ieee80211_find_sta(wcn36xx_priv_to_vif(tmp), rsp->addr2); + if (sta) ieee80211_report_low_ack(sta, 0); + rcu_read_unlock(); + if (sta) return 0; - } } wcn36xx_warn("STA with addr %pM and index %d not found\n", diff --git a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h index c368a34c8de7..54000db0af1a 100644 --- a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h +++ b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h @@ -125,7 +125,6 @@ struct wcn36xx_platform_ctrl_ops { */ struct wcn36xx_vif { struct list_head list; - struct wcn36xx_sta *sta; u8 dtim_period; enum ani_ed_type encrypt_type; bool is_joining; From 16be1ac55944412e8d132b1db26f994b368c5742 Mon Sep 17 00:00:00 2001 From: Pontus Fuchs Date: Mon, 18 Apr 2016 22:00:47 -0700 Subject: [PATCH 0964/1649] wcn36xx: Parse trigger_ba response properly This message does not follow the canonical format and needs it's own parser. Signed-off-by: Pontus Fuchs Signed-off-by: Bjorn Andersson Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wcn36xx/smd.c | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c index 76c6856ed932..7f315d098f52 100644 --- a/drivers/net/wireless/ath/wcn36xx/smd.c +++ b/drivers/net/wireless/ath/wcn36xx/smd.c @@ -1968,6 +1968,17 @@ out: return ret; } +static int wcn36xx_smd_trigger_ba_rsp(void *buf, int len) +{ + struct wcn36xx_hal_trigger_ba_rsp_msg *rsp; + + if (len < sizeof(*rsp)) + return -EINVAL; + + rsp = (struct wcn36xx_hal_trigger_ba_rsp_msg *) buf; + return rsp->status; +} + int wcn36xx_smd_trigger_ba(struct wcn36xx *wcn, u8 sta_index) { struct wcn36xx_hal_trigger_ba_req_msg msg_body; @@ -1992,8 +2003,7 @@ int wcn36xx_smd_trigger_ba(struct wcn36xx *wcn, u8 sta_index) wcn36xx_err("Sending hal_trigger_ba failed\n"); goto out; } - ret = wcn36xx_smd_rsp_status_check_v2(wcn, wcn->hal_buf, - wcn->hal_rsp_len); + ret = wcn36xx_smd_trigger_ba_rsp(wcn->hal_buf, wcn->hal_rsp_len); if (ret) { wcn36xx_err("hal_trigger_ba response failed err=%d\n", ret); goto out; From df98c3294bdf1fc3094f5466accf6423ce968a74 Mon Sep 17 00:00:00 2001 From: Pontus Fuchs Date: Mon, 18 Apr 2016 22:00:48 -0700 Subject: [PATCH 0965/1649] wcn36xx: Copy all members in config_sta v1 conversion When converting to version 1 of the config_sta struct not all members where copied. This fixes the problem of multicast frames not being delivered on an encrypted network. Signed-off-by: Pontus Fuchs Signed-off-by: Bjorn Andersson Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wcn36xx/smd.c | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c index 7f315d098f52..ebb446272d21 100644 --- a/drivers/net/wireless/ath/wcn36xx/smd.c +++ b/drivers/net/wireless/ath/wcn36xx/smd.c @@ -949,17 +949,32 @@ static void wcn36xx_smd_convert_sta_to_v1(struct wcn36xx *wcn, memcpy(&v1->mac, orig->mac, ETH_ALEN); v1->aid = orig->aid; v1->type = orig->type; + v1->short_preamble_supported = orig->short_preamble_supported; v1->listen_interval = orig->listen_interval; + v1->wmm_enabled = orig->wmm_enabled; v1->ht_capable = orig->ht_capable; - + v1->tx_channel_width_set = orig->tx_channel_width_set; + v1->rifs_mode = orig->rifs_mode; + v1->lsig_txop_protection = orig->lsig_txop_protection; v1->max_ampdu_size = orig->max_ampdu_size; v1->max_ampdu_density = orig->max_ampdu_density; v1->sgi_40mhz = orig->sgi_40mhz; v1->sgi_20Mhz = orig->sgi_20Mhz; - + v1->rmf = orig->rmf; + v1->encrypt_type = orig->encrypt_type; + v1->action = orig->action; + v1->uapsd = orig->uapsd; + v1->max_sp_len = orig->max_sp_len; + v1->green_field_capable = orig->green_field_capable; + v1->mimo_ps = orig->mimo_ps; + v1->delayed_ba_support = orig->delayed_ba_support; + v1->max_ampdu_duration = orig->max_ampdu_duration; + v1->dsss_cck_mode_40mhz = orig->dsss_cck_mode_40mhz; memcpy(&v1->supported_rates, &orig->supported_rates, sizeof(orig->supported_rates)); v1->sta_index = orig->sta_index; + v1->bssid_index = orig->bssid_index; + v1->p2p = orig->p2p; } static int wcn36xx_smd_config_sta_rsp(struct wcn36xx *wcn, From 6d9cf123cd79277c65605faff8d25dbdd1b6ca64 Mon Sep 17 00:00:00 2001 From: Pontus Fuchs Date: Mon, 18 Apr 2016 22:00:49 -0700 Subject: [PATCH 0966/1649] wcn36xx: Use allocated self sta index instead of hard coded Signed-off-by: Pontus Fuchs Signed-off-by: Bjorn Andersson Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wcn36xx/smd.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c index ebb446272d21..e0d5631657c1 100644 --- a/drivers/net/wireless/ath/wcn36xx/smd.c +++ b/drivers/net/wireless/ath/wcn36xx/smd.c @@ -200,7 +200,7 @@ static void wcn36xx_smd_set_sta_params(struct wcn36xx *wcn, sta_params->sta_index = WCN36XX_HAL_STA_INVALID_IDX; } else { sta_params->type = 0; - sta_params->sta_index = 1; + sta_params->sta_index = vif_priv->self_sta_index; } sta_params->listen_interval = WCN36XX_LISTEN_INTERVAL(wcn); From 2716a8ac655f17d17a7040f99f306a6244b08802 Mon Sep 17 00:00:00 2001 From: Pontus Fuchs Date: Mon, 18 Apr 2016 22:00:50 -0700 Subject: [PATCH 0967/1649] wcn36xx: Clear encrypt_type when deleting bss key This fixes a problem connecting to an open network after being connected to an encrypted network. Signed-off-by: Pontus Fuchs Signed-off-by: Bjorn Andersson Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wcn36xx/main.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c index 7c06ca9fdd2c..f9c77de94583 100644 --- a/drivers/net/wireless/ath/wcn36xx/main.c +++ b/drivers/net/wireless/ath/wcn36xx/main.c @@ -471,6 +471,7 @@ static int wcn36xx_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, break; case DISABLE_KEY: if (!(IEEE80211_KEY_FLAG_PAIRWISE & key_conf->flags)) { + vif_priv->encrypt_type = WCN36XX_HAL_ED_NONE; wcn36xx_smd_remove_bsskey(wcn, vif_priv->encrypt_type, key_conf->keyidx); @@ -626,6 +627,7 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw, } else { vif_priv->is_joining = false; wcn36xx_smd_delete_bss(wcn, vif); + vif_priv->encrypt_type = WCN36XX_HAL_ED_NONE; } } From 043ce546190243bd9de05dbb6c82c9099b01a3a2 Mon Sep 17 00:00:00 2001 From: Pontus Fuchs Date: Mon, 18 Apr 2016 22:00:51 -0700 Subject: [PATCH 0968/1649] wcn36xx: Track association state Knowing the association state is needed for mc filtering. Signed-off-by: Pontus Fuchs Signed-off-by: Bjorn Andersson Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wcn36xx/main.c | 2 ++ drivers/net/wireless/ath/wcn36xx/wcn36xx.h | 1 + 2 files changed, 3 insertions(+) diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c index f9c77de94583..253cece1b660 100644 --- a/drivers/net/wireless/ath/wcn36xx/main.c +++ b/drivers/net/wireless/ath/wcn36xx/main.c @@ -655,6 +655,7 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw, vif->addr, bss_conf->aid); + vif_priv->sta_assoc = true; rcu_read_lock(); sta = ieee80211_find_sta(vif, bss_conf->bssid); if (!sta) { @@ -686,6 +687,7 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw, bss_conf->bssid, vif->addr, bss_conf->aid); + vif_priv->sta_assoc = false; wcn36xx_smd_set_link_st(wcn, bss_conf->bssid, vif->addr, diff --git a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h index 54000db0af1a..7433d67a5929 100644 --- a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h +++ b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h @@ -128,6 +128,7 @@ struct wcn36xx_vif { u8 dtim_period; enum ani_ed_type encrypt_type; bool is_joining; + bool sta_assoc; struct wcn36xx_hal_mac_ssid ssid; /* Power management */ From 20a779ede344a0b9778b7d5d9af76453d14474fc Mon Sep 17 00:00:00 2001 From: Pontus Fuchs Date: Mon, 18 Apr 2016 22:00:52 -0700 Subject: [PATCH 0969/1649] wcn36xx: Implement multicast filtering Pass the multicast list to FW. This patch also adds a way to build the smd command in place. This is needed because the MC list command is too big for the stack. Signed-off-by: Pontus Fuchs [bjorn: dropped FIF_PROMISC_IN_BSS usage] Signed-off-by: Bjorn Andersson Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wcn36xx/hal.h | 6 +-- drivers/net/wireless/ath/wcn36xx/main.c | 50 ++++++++++++++++++++++-- drivers/net/wireless/ath/wcn36xx/smd.c | 51 +++++++++++++++++++++++++ drivers/net/wireless/ath/wcn36xx/smd.h | 3 ++ 4 files changed, 104 insertions(+), 6 deletions(-) diff --git a/drivers/net/wireless/ath/wcn36xx/hal.h b/drivers/net/wireless/ath/wcn36xx/hal.h index 433d9801a0ae..ec64c47f918b 100644 --- a/drivers/net/wireless/ath/wcn36xx/hal.h +++ b/drivers/net/wireless/ath/wcn36xx/hal.h @@ -4267,9 +4267,9 @@ struct wcn36xx_hal_rcv_flt_mc_addr_list_type { u8 data_offset; u32 mc_addr_count; - u8 mc_addr[ETH_ALEN][WCN36XX_HAL_MAX_NUM_MULTICAST_ADDRESS]; + u8 mc_addr[WCN36XX_HAL_MAX_NUM_MULTICAST_ADDRESS][ETH_ALEN]; u8 bss_index; -}; +} __packed; struct wcn36xx_hal_set_pkt_filter_rsp_msg { struct wcn36xx_hal_msg_header header; @@ -4323,7 +4323,7 @@ struct wcn36xx_hal_rcv_flt_pkt_clear_rsp_msg { struct wcn36xx_hal_rcv_flt_pkt_set_mc_list_req_msg { struct wcn36xx_hal_msg_header header; struct wcn36xx_hal_rcv_flt_mc_addr_list_type mc_addr_list; -}; +} __packed; struct wcn36xx_hal_rcv_flt_pkt_set_mc_list_rsp_msg { struct wcn36xx_hal_msg_header header; diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c index 253cece1b660..c0ba7b0775b3 100644 --- a/drivers/net/wireless/ath/wcn36xx/main.c +++ b/drivers/net/wireless/ath/wcn36xx/main.c @@ -287,6 +287,7 @@ static int wcn36xx_start(struct ieee80211_hw *hw) } wcn36xx_detect_chip_version(wcn); + wcn36xx_smd_update_cfg(wcn, WCN36XX_HAL_CFG_ENABLE_MC_ADDR_LIST, 1); /* DMA channel initialization */ ret = wcn36xx_dxe_init(wcn); @@ -354,15 +355,57 @@ static int wcn36xx_config(struct ieee80211_hw *hw, u32 changed) return 0; } -#define WCN36XX_SUPPORTED_FILTERS (0) - static void wcn36xx_configure_filter(struct ieee80211_hw *hw, unsigned int changed, unsigned int *total, u64 multicast) { + struct wcn36xx_hal_rcv_flt_mc_addr_list_type *fp; + struct wcn36xx *wcn = hw->priv; + struct wcn36xx_vif *tmp; + struct ieee80211_vif *vif = NULL; + wcn36xx_dbg(WCN36XX_DBG_MAC, "mac configure filter\n"); - *total &= WCN36XX_SUPPORTED_FILTERS; + *total &= FIF_ALLMULTI; + + fp = (void *)(unsigned long)multicast; + list_for_each_entry(tmp, &wcn->vif_list, list) { + vif = wcn36xx_priv_to_vif(tmp); + + /* FW handles MC filtering only when connected as STA */ + if (*total & FIF_ALLMULTI) + wcn36xx_smd_set_mc_list(wcn, vif, NULL); + else if (NL80211_IFTYPE_STATION == vif->type && tmp->sta_assoc) + wcn36xx_smd_set_mc_list(wcn, vif, fp); + } + kfree(fp); +} + +static u64 wcn36xx_prepare_multicast(struct ieee80211_hw *hw, + struct netdev_hw_addr_list *mc_list) +{ + struct wcn36xx_hal_rcv_flt_mc_addr_list_type *fp; + struct netdev_hw_addr *ha; + + wcn36xx_dbg(WCN36XX_DBG_MAC, "mac prepare multicast list\n"); + fp = kzalloc(sizeof(*fp), GFP_ATOMIC); + if (!fp) { + wcn36xx_err("Out of memory setting filters.\n"); + return 0; + } + + fp->mc_addr_count = 0; + /* update multicast filtering parameters */ + if (netdev_hw_addr_list_count(mc_list) <= + WCN36XX_HAL_MAX_NUM_MULTICAST_ADDRESS) { + netdev_hw_addr_list_for_each(ha, mc_list) { + memcpy(fp->mc_addr[fp->mc_addr_count], + ha->addr, ETH_ALEN); + fp->mc_addr_count++; + } + } + + return (u64)(unsigned long)fp; } static void wcn36xx_tx(struct ieee80211_hw *hw, @@ -920,6 +963,7 @@ static const struct ieee80211_ops wcn36xx_ops = { .resume = wcn36xx_resume, #endif .config = wcn36xx_config, + .prepare_multicast = wcn36xx_prepare_multicast, .configure_filter = wcn36xx_configure_filter, .tx = wcn36xx_tx, .set_key = wcn36xx_set_key, diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c index e0d5631657c1..b1bdc229e560 100644 --- a/drivers/net/wireless/ath/wcn36xx/smd.c +++ b/drivers/net/wireless/ath/wcn36xx/smd.c @@ -271,6 +271,16 @@ out: return ret; } +static void init_hal_msg(struct wcn36xx_hal_msg_header *hdr, + enum wcn36xx_hal_host_msg_type msg_type, + size_t msg_size) +{ + memset(hdr, 0, msg_size + sizeof(*hdr)); + hdr->msg_type = msg_type; + hdr->msg_version = WCN36XX_HAL_MSG_VERSION0; + hdr->len = msg_size + sizeof(*hdr); +} + #define INIT_HAL_MSG(msg_body, type) \ do { \ memset(&msg_body, 0, sizeof(msg_body)); \ @@ -2144,6 +2154,46 @@ out: mutex_unlock(&wcn->hal_mutex); return ret; } + +int wcn36xx_smd_set_mc_list(struct wcn36xx *wcn, + struct ieee80211_vif *vif, + struct wcn36xx_hal_rcv_flt_mc_addr_list_type *fp) +{ + struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif); + struct wcn36xx_hal_rcv_flt_pkt_set_mc_list_req_msg *msg_body = NULL; + int ret = 0; + + mutex_lock(&wcn->hal_mutex); + + msg_body = (struct wcn36xx_hal_rcv_flt_pkt_set_mc_list_req_msg *) + wcn->hal_buf; + init_hal_msg(&msg_body->header, WCN36XX_HAL_8023_MULTICAST_LIST_REQ, + sizeof(msg_body->mc_addr_list)); + + /* An empty list means all mc traffic will be received */ + if (fp) + memcpy(&msg_body->mc_addr_list, fp, + sizeof(msg_body->mc_addr_list)); + else + msg_body->mc_addr_list.mc_addr_count = 0; + + msg_body->mc_addr_list.bss_index = vif_priv->bss_index; + + ret = wcn36xx_smd_send_and_wait(wcn, msg_body->header.len); + if (ret) { + wcn36xx_err("Sending HAL_8023_MULTICAST_LIST failed\n"); + goto out; + } + ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len); + if (ret) { + wcn36xx_err("HAL_8023_MULTICAST_LIST rsp failed err=%d\n", ret); + goto out; + } +out: + mutex_unlock(&wcn->hal_mutex); + return ret; +} + static void wcn36xx_smd_rsp_process(struct wcn36xx *wcn, void *buf, size_t len) { struct wcn36xx_hal_msg_header *msg_header = buf; @@ -2185,6 +2235,7 @@ static void wcn36xx_smd_rsp_process(struct wcn36xx *wcn, void *buf, size_t len) case WCN36XX_HAL_UPDATE_SCAN_PARAM_RSP: case WCN36XX_HAL_CH_SWITCH_RSP: case WCN36XX_HAL_FEATURE_CAPS_EXCHANGE_RSP: + case WCN36XX_HAL_8023_MULTICAST_LIST_RSP: memcpy(wcn->hal_buf, buf, len); wcn->hal_rsp_len = len; complete(&wcn->hal_rsp_compl); diff --git a/drivers/net/wireless/ath/wcn36xx/smd.h b/drivers/net/wireless/ath/wcn36xx/smd.h index 8361f9e3995b..c1b76d75cf85 100644 --- a/drivers/net/wireless/ath/wcn36xx/smd.h +++ b/drivers/net/wireless/ath/wcn36xx/smd.h @@ -136,4 +136,7 @@ int wcn36xx_smd_del_ba(struct wcn36xx *wcn, u16 tid, u8 sta_index); int wcn36xx_smd_trigger_ba(struct wcn36xx *wcn, u8 sta_index); int wcn36xx_smd_update_cfg(struct wcn36xx *wcn, u32 cfg_id, u32 value); +int wcn36xx_smd_set_mc_list(struct wcn36xx *wcn, + struct ieee80211_vif *vif, + struct wcn36xx_hal_rcv_flt_mc_addr_list_type *fp); #endif /* _SMD_H_ */ From 6770559b8f614d3569ced7a7e3a8e846115e77af Mon Sep 17 00:00:00 2001 From: Pontus Fuchs Date: Mon, 18 Apr 2016 22:00:53 -0700 Subject: [PATCH 0970/1649] wcn36xx: Use correct command struct for EXIT_BMPS_REQ EXIT_BMPS_REQ was using the command struct for ENTER_BMPS_REQ. I spotted this when looking at command dumps. Signed-off-by: Pontus Fuchs Signed-off-by: Bjorn Andersson Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wcn36xx/smd.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c index b1bdc229e560..c15501c06eb2 100644 --- a/drivers/net/wireless/ath/wcn36xx/smd.c +++ b/drivers/net/wireless/ath/wcn36xx/smd.c @@ -1690,7 +1690,7 @@ out: int wcn36xx_smd_exit_bmps(struct wcn36xx *wcn, struct ieee80211_vif *vif) { - struct wcn36xx_hal_enter_bmps_req_msg msg_body; + struct wcn36xx_hal_exit_bmps_req_msg msg_body; struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif); int ret = 0; From 5443918d050a1a1e5766544e3b895e98671adeef Mon Sep 17 00:00:00 2001 From: Bjorn Andersson Date: Mon, 18 Apr 2016 22:00:54 -0700 Subject: [PATCH 0971/1649] wcn36xx: Delete BSS before idling link When disabling the beacon we must delete the bss before idling the link. Signed-off-by: Bjorn Andersson Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wcn36xx/main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c index c0ba7b0775b3..680217506b3d 100644 --- a/drivers/net/wireless/ath/wcn36xx/main.c +++ b/drivers/net/wireless/ath/wcn36xx/main.c @@ -779,9 +779,9 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw, wcn36xx_smd_set_link_st(wcn, vif->addr, vif->addr, link_state); } else { + wcn36xx_smd_delete_bss(wcn, vif); wcn36xx_smd_set_link_st(wcn, vif->addr, vif->addr, WCN36XX_HAL_LINK_IDLE_STATE); - wcn36xx_smd_delete_bss(wcn, vif); } } out: From 23c2aabb93c9c8efb7b8991707e2db59f7346783 Mon Sep 17 00:00:00 2001 From: Bjorn Andersson Date: Mon, 18 Apr 2016 22:00:55 -0700 Subject: [PATCH 0972/1649] wcn36xx: Correct remove bss key response encoding The WCN36XX_HAL_RMV_BSSKEY_RSP carries a single u32 with "status", so we can use the standard status check function for decoding the result. This is the last user of the v2 status checker, so remove the struct and helper function. Signed-off-by: Bjorn Andersson Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wcn36xx/smd.c | 19 +------------------ drivers/net/wireless/ath/wcn36xx/smd.h | 9 --------- 2 files changed, 1 insertion(+), 27 deletions(-) diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c index c15501c06eb2..5f6ca3124bd8 100644 --- a/drivers/net/wireless/ath/wcn36xx/smd.c +++ b/drivers/net/wireless/ath/wcn36xx/smd.c @@ -312,22 +312,6 @@ static int wcn36xx_smd_rsp_status_check(void *buf, size_t len) return 0; } -static int wcn36xx_smd_rsp_status_check_v2(struct wcn36xx *wcn, void *buf, - size_t len) -{ - struct wcn36xx_fw_msg_status_rsp_v2 *rsp; - - if (len < sizeof(struct wcn36xx_hal_msg_header) + sizeof(*rsp)) - return wcn36xx_smd_rsp_status_check(buf, len); - - rsp = buf + sizeof(struct wcn36xx_hal_msg_header); - - if (WCN36XX_FW_MSG_RESULT_SUCCESS != rsp->status) - return rsp->status; - - return 0; -} - int wcn36xx_smd_load_nv(struct wcn36xx *wcn) { struct nv_data *nv_d; @@ -1647,8 +1631,7 @@ int wcn36xx_smd_remove_bsskey(struct wcn36xx *wcn, wcn36xx_err("Sending hal_remove_bsskey failed\n"); goto out; } - ret = wcn36xx_smd_rsp_status_check_v2(wcn, wcn->hal_buf, - wcn->hal_rsp_len); + ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len); if (ret) { wcn36xx_err("hal_remove_bsskey response failed err=%d\n", ret); goto out; diff --git a/drivers/net/wireless/ath/wcn36xx/smd.h b/drivers/net/wireless/ath/wcn36xx/smd.h index c1b76d75cf85..d74d781f4c8d 100644 --- a/drivers/net/wireless/ath/wcn36xx/smd.h +++ b/drivers/net/wireless/ath/wcn36xx/smd.h @@ -44,15 +44,6 @@ struct wcn36xx_fw_msg_status_rsp { u32 status; } __packed; -/* wcn3620 returns this for tigger_ba */ - -struct wcn36xx_fw_msg_status_rsp_v2 { - u8 bss_id[6]; - u32 status __packed; - u16 count_following_candidates __packed; - /* candidate list follows */ -}; - struct wcn36xx_hal_ind_msg { struct list_head list; u8 *msg; From ffc03c331a1e7cafac3beb4f89c40fa7d6213d6e Mon Sep 17 00:00:00 2001 From: Bjorn Andersson Date: Mon, 18 Apr 2016 22:00:56 -0700 Subject: [PATCH 0973/1649] wcn36xx: Fill in capability list Fill in the capability list with more values from the downstream driver. Signed-off-by: Bjorn Andersson Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wcn36xx/hal.h | 39 ++++++++++++++++++++++++ drivers/net/wireless/ath/wcn36xx/main.c | 40 ++++++++++++++++++++++++- 2 files changed, 78 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/ath/wcn36xx/hal.h b/drivers/net/wireless/ath/wcn36xx/hal.h index ec64c47f918b..658bfb8baabe 100644 --- a/drivers/net/wireless/ath/wcn36xx/hal.h +++ b/drivers/net/wireless/ath/wcn36xx/hal.h @@ -4389,6 +4389,45 @@ enum place_holder_in_cap_bitmap { RTT = 20, RATECTRL = 21, WOW = 22, + WLAN_ROAM_SCAN_OFFLOAD = 23, + SPECULATIVE_PS_POLL = 24, + SCAN_SCH = 25, + IBSS_HEARTBEAT_OFFLOAD = 26, + WLAN_SCAN_OFFLOAD = 27, + WLAN_PERIODIC_TX_PTRN = 28, + ADVANCE_TDLS = 29, + BATCH_SCAN = 30, + FW_IN_TX_PATH = 31, + EXTENDED_NSOFFLOAD_SLOT = 32, + CH_SWITCH_V1 = 33, + HT40_OBSS_SCAN = 34, + UPDATE_CHANNEL_LIST = 35, + WLAN_MCADDR_FLT = 36, + WLAN_CH144 = 37, + NAN = 38, + TDLS_SCAN_COEXISTENCE = 39, + LINK_LAYER_STATS_MEAS = 40, + MU_MIMO = 41, + EXTENDED_SCAN = 42, + DYNAMIC_WMM_PS = 43, + MAC_SPOOFED_SCAN = 44, + BMU_ERROR_GENERIC_RECOVERY = 45, + DISA = 46, + FW_STATS = 47, + WPS_PRBRSP_TMPL = 48, + BCN_IE_FLT_DELTA = 49, + TDLS_OFF_CHANNEL = 51, + RTT3 = 52, + MGMT_FRAME_LOGGING = 53, + ENHANCED_TXBD_COMPLETION = 54, + LOGGING_ENHANCEMENT = 55, + EXT_SCAN_ENHANCED = 56, + MEMORY_DUMP_SUPPORTED = 57, + PER_PKT_STATS_SUPPORTED = 58, + EXT_LL_STAT = 60, + WIFI_CONFIG = 61, + ANTENNA_DIVERSITY_SELECTION = 62, + MAX_FEATURE_SUPPORTED = 128, }; diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c index 680217506b3d..fe81b2a7c8d9 100644 --- a/drivers/net/wireless/ath/wcn36xx/main.c +++ b/drivers/net/wireless/ath/wcn36xx/main.c @@ -201,7 +201,45 @@ static const char * const wcn36xx_caps_names[] = { "BCN_FILTER", /* 19 */ "RTT", /* 20 */ "RATECTRL", /* 21 */ - "WOW" /* 22 */ + "WOW", /* 22 */ + "WLAN_ROAM_SCAN_OFFLOAD", /* 23 */ + "SPECULATIVE_PS_POLL", /* 24 */ + "SCAN_SCH", /* 25 */ + "IBSS_HEARTBEAT_OFFLOAD", /* 26 */ + "WLAN_SCAN_OFFLOAD", /* 27 */ + "WLAN_PERIODIC_TX_PTRN", /* 28 */ + "ADVANCE_TDLS", /* 29 */ + "BATCH_SCAN", /* 30 */ + "FW_IN_TX_PATH", /* 31 */ + "EXTENDED_NSOFFLOAD_SLOT", /* 32 */ + "CH_SWITCH_V1", /* 33 */ + "HT40_OBSS_SCAN", /* 34 */ + "UPDATE_CHANNEL_LIST", /* 35 */ + "WLAN_MCADDR_FLT", /* 36 */ + "WLAN_CH144", /* 37 */ + "NAN", /* 38 */ + "TDLS_SCAN_COEXISTENCE", /* 39 */ + "LINK_LAYER_STATS_MEAS", /* 40 */ + "MU_MIMO", /* 41 */ + "EXTENDED_SCAN", /* 42 */ + "DYNAMIC_WMM_PS", /* 43 */ + "MAC_SPOOFED_SCAN", /* 44 */ + "BMU_ERROR_GENERIC_RECOVERY", /* 45 */ + "DISA", /* 46 */ + "FW_STATS", /* 47 */ + "WPS_PRBRSP_TMPL", /* 48 */ + "BCN_IE_FLT_DELTA", /* 49 */ + "TDLS_OFF_CHANNEL", /* 51 */ + "RTT3", /* 52 */ + "MGMT_FRAME_LOGGING", /* 53 */ + "ENHANCED_TXBD_COMPLETION", /* 54 */ + "LOGGING_ENHANCEMENT", /* 55 */ + "EXT_SCAN_ENHANCED", /* 56 */ + "MEMORY_DUMP_SUPPORTED", /* 57 */ + "PER_PKT_STATS_SUPPORTED", /* 58 */ + "EXT_LL_STAT", /* 60 */ + "WIFI_CONFIG", /* 61 */ + "ANTENNA_DIVERSITY_SELECTION", /* 62 */ }; static const char *wcn36xx_get_cap_name(enum place_holder_in_cap_bitmap x) From 343a6d8e4955f298206d83ae764acf60d146b898 Mon Sep 17 00:00:00 2001 From: Nicolas Dichtel Date: Mon, 25 Apr 2016 10:25:14 +0200 Subject: [PATCH 0974/1649] rtnl: use nla_put_u64_64bit() Signed-off-by: Nicolas Dichtel Signed-off-by: David S. Miller --- include/uapi/linux/if_link.h | 1 + net/core/rtnetlink.c | 36 ++++++++++++++++++------------------ 2 files changed, 19 insertions(+), 18 deletions(-) diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index ba69d4447249..5fdd3a42e377 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -666,6 +666,7 @@ enum { IFLA_VF_STATS_TX_BYTES, IFLA_VF_STATS_BROADCAST, IFLA_VF_STATS_MULTICAST, + IFLA_VF_STATS_PAD, __IFLA_VF_STATS_MAX, }; diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 5ec059d52823..9efc1f34ef3b 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -825,17 +825,17 @@ static inline int rtnl_vfinfo_size(const struct net_device *dev, nla_total_size(sizeof(struct ifla_vf_link_state)) + nla_total_size(sizeof(struct ifla_vf_rss_query_en)) + /* IFLA_VF_STATS_RX_PACKETS */ - nla_total_size(sizeof(__u64)) + + nla_total_size_64bit(sizeof(__u64)) + /* IFLA_VF_STATS_TX_PACKETS */ - nla_total_size(sizeof(__u64)) + + nla_total_size_64bit(sizeof(__u64)) + /* IFLA_VF_STATS_RX_BYTES */ - nla_total_size(sizeof(__u64)) + + nla_total_size_64bit(sizeof(__u64)) + /* IFLA_VF_STATS_TX_BYTES */ - nla_total_size(sizeof(__u64)) + + nla_total_size_64bit(sizeof(__u64)) + /* IFLA_VF_STATS_BROADCAST */ - nla_total_size(sizeof(__u64)) + + nla_total_size_64bit(sizeof(__u64)) + /* IFLA_VF_STATS_MULTICAST */ - nla_total_size(sizeof(__u64)) + + nla_total_size_64bit(sizeof(__u64)) + nla_total_size(sizeof(struct ifla_vf_trust))); return size; } else @@ -1153,18 +1153,18 @@ static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb, nla_nest_cancel(skb, vfinfo); return -EMSGSIZE; } - if (nla_put_u64(skb, IFLA_VF_STATS_RX_PACKETS, - vf_stats.rx_packets) || - nla_put_u64(skb, IFLA_VF_STATS_TX_PACKETS, - vf_stats.tx_packets) || - nla_put_u64(skb, IFLA_VF_STATS_RX_BYTES, - vf_stats.rx_bytes) || - nla_put_u64(skb, IFLA_VF_STATS_TX_BYTES, - vf_stats.tx_bytes) || - nla_put_u64(skb, IFLA_VF_STATS_BROADCAST, - vf_stats.broadcast) || - nla_put_u64(skb, IFLA_VF_STATS_MULTICAST, - vf_stats.multicast)) + if (nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_PACKETS, + vf_stats.rx_packets, IFLA_VF_STATS_PAD) || + nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_PACKETS, + vf_stats.tx_packets, IFLA_VF_STATS_PAD) || + nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_BYTES, + vf_stats.rx_bytes, IFLA_VF_STATS_PAD) || + nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_BYTES, + vf_stats.tx_bytes, IFLA_VF_STATS_PAD) || + nla_put_u64_64bit(skb, IFLA_VF_STATS_BROADCAST, + vf_stats.broadcast, IFLA_VF_STATS_PAD) || + nla_put_u64_64bit(skb, IFLA_VF_STATS_MULTICAST, + vf_stats.multicast, IFLA_VF_STATS_PAD)) return -EMSGSIZE; nla_nest_end(skb, vfstats); nla_nest_end(skb, vf); From 2a51c1e8ecdcedfcb6f84efb3756822d0d0dfb36 Mon Sep 17 00:00:00 2001 From: Nicolas Dichtel Date: Mon, 25 Apr 2016 10:25:15 +0200 Subject: [PATCH 0975/1649] sched: use nla_put_u64_64bit() Signed-off-by: Nicolas Dichtel Signed-off-by: David S. Miller --- include/uapi/linux/pkt_sched.h | 3 +++ net/sched/sch_htb.c | 6 ++++-- net/sched/sch_netem.c | 3 ++- net/sched/sch_tbf.c | 6 ++++-- 4 files changed, 13 insertions(+), 5 deletions(-) diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h index 8cb18b44968e..1c78c7454c7c 100644 --- a/include/uapi/linux/pkt_sched.h +++ b/include/uapi/linux/pkt_sched.h @@ -179,6 +179,7 @@ enum { TCA_TBF_PRATE64, TCA_TBF_BURST, TCA_TBF_PBURST, + TCA_TBF_PAD, __TCA_TBF_MAX, }; @@ -368,6 +369,7 @@ enum { TCA_HTB_DIRECT_QLEN, TCA_HTB_RATE64, TCA_HTB_CEIL64, + TCA_HTB_PAD, __TCA_HTB_MAX, }; @@ -531,6 +533,7 @@ enum { TCA_NETEM_RATE, TCA_NETEM_ECN, TCA_NETEM_RATE64, + TCA_NETEM_PAD, __TCA_NETEM_MAX, }; diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index 87b02ed3d5f2..f6bf5818ed4d 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c @@ -1122,10 +1122,12 @@ static int htb_dump_class(struct Qdisc *sch, unsigned long arg, if (nla_put(skb, TCA_HTB_PARMS, sizeof(opt), &opt)) goto nla_put_failure; if ((cl->rate.rate_bytes_ps >= (1ULL << 32)) && - nla_put_u64(skb, TCA_HTB_RATE64, cl->rate.rate_bytes_ps)) + nla_put_u64_64bit(skb, TCA_HTB_RATE64, cl->rate.rate_bytes_ps, + TCA_HTB_PAD)) goto nla_put_failure; if ((cl->ceil.rate_bytes_ps >= (1ULL << 32)) && - nla_put_u64(skb, TCA_HTB_CEIL64, cl->ceil.rate_bytes_ps)) + nla_put_u64_64bit(skb, TCA_HTB_CEIL64, cl->ceil.rate_bytes_ps, + TCA_HTB_PAD)) goto nla_put_failure; return nla_nest_end(skb, nest); diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index 9640bb39a5d2..491d6fd6430c 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c @@ -994,7 +994,8 @@ static int netem_dump(struct Qdisc *sch, struct sk_buff *skb) goto nla_put_failure; if (q->rate >= (1ULL << 32)) { - if (nla_put_u64(skb, TCA_NETEM_RATE64, q->rate)) + if (nla_put_u64_64bit(skb, TCA_NETEM_RATE64, q->rate, + TCA_NETEM_PAD)) goto nla_put_failure; rate.rate = ~0U; } else { diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c index c2fbde742f37..83b90b584fae 100644 --- a/net/sched/sch_tbf.c +++ b/net/sched/sch_tbf.c @@ -472,11 +472,13 @@ static int tbf_dump(struct Qdisc *sch, struct sk_buff *skb) if (nla_put(skb, TCA_TBF_PARMS, sizeof(opt), &opt)) goto nla_put_failure; if (q->rate.rate_bytes_ps >= (1ULL << 32) && - nla_put_u64(skb, TCA_TBF_RATE64, q->rate.rate_bytes_ps)) + nla_put_u64_64bit(skb, TCA_TBF_RATE64, q->rate.rate_bytes_ps, + TCA_TBF_PAD)) goto nla_put_failure; if (tbf_peak_present(q) && q->peak.rate_bytes_ps >= (1ULL << 32) && - nla_put_u64(skb, TCA_TBF_PRATE64, q->peak.rate_bytes_ps)) + nla_put_u64_64bit(skb, TCA_TBF_PRATE64, q->peak.rate_bytes_ps, + TCA_TBF_PAD)) goto nla_put_failure; return nla_nest_end(skb, nest); From f13a82d87b21a3b7c2c3e3c75fe9cf810c332a09 Mon Sep 17 00:00:00 2001 From: Nicolas Dichtel Date: Mon, 25 Apr 2016 10:25:16 +0200 Subject: [PATCH 0976/1649] ipv6: use nla_put_u64_64bit() Signed-off-by: Nicolas Dichtel Signed-off-by: David S. Miller --- include/uapi/linux/ila.h | 1 + net/ipv6/ila/ila_lwt.c | 3 ++- net/ipv6/ila/ila_xlat.c | 15 +++++++++------ 3 files changed, 12 insertions(+), 7 deletions(-) diff --git a/include/uapi/linux/ila.h b/include/uapi/linux/ila.h index abde7bbd6f3b..cd97951680bf 100644 --- a/include/uapi/linux/ila.h +++ b/include/uapi/linux/ila.h @@ -14,6 +14,7 @@ enum { ILA_ATTR_LOCATOR_MATCH, /* u64 */ ILA_ATTR_IFINDEX, /* s32 */ ILA_ATTR_DIR, /* u32 */ + ILA_ATTR_PAD, __ILA_ATTR_MAX, }; diff --git a/net/ipv6/ila/ila_lwt.c b/net/ipv6/ila/ila_lwt.c index 2ae3c4fd8aab..9db3621b2126 100644 --- a/net/ipv6/ila/ila_lwt.c +++ b/net/ipv6/ila/ila_lwt.c @@ -109,7 +109,8 @@ static int ila_fill_encap_info(struct sk_buff *skb, { struct ila_params *p = ila_params_lwtunnel(lwtstate); - if (nla_put_u64(skb, ILA_ATTR_LOCATOR, (__force u64)p->locator)) + if (nla_put_u64_64bit(skb, ILA_ATTR_LOCATOR, (__force u64)p->locator, + ILA_ATTR_PAD)) goto nla_put_failure; return 0; diff --git a/net/ipv6/ila/ila_xlat.c b/net/ipv6/ila/ila_xlat.c index 0b03533453e4..0e9e579410da 100644 --- a/net/ipv6/ila/ila_xlat.c +++ b/net/ipv6/ila/ila_xlat.c @@ -418,12 +418,15 @@ static int ila_nl_cmd_del_mapping(struct sk_buff *skb, struct genl_info *info) static int ila_fill_info(struct ila_map *ila, struct sk_buff *msg) { - if (nla_put_u64(msg, ILA_ATTR_IDENTIFIER, - (__force u64)ila->p.identifier) || - nla_put_u64(msg, ILA_ATTR_LOCATOR, - (__force u64)ila->p.ip.locator) || - nla_put_u64(msg, ILA_ATTR_LOCATOR_MATCH, - (__force u64)ila->p.ip.locator_match) || + if (nla_put_u64_64bit(msg, ILA_ATTR_IDENTIFIER, + (__force u64)ila->p.identifier, + ILA_ATTR_PAD) || + nla_put_u64_64bit(msg, ILA_ATTR_LOCATOR, + (__force u64)ila->p.ip.locator, + ILA_ATTR_PAD) || + nla_put_u64_64bit(msg, ILA_ATTR_LOCATOR_MATCH, + (__force u64)ila->p.ip.locator_match, + ILA_ATTR_PAD) || nla_put_s32(msg, ILA_ATTR_IFINDEX, ila->p.ifindex) || nla_put_u32(msg, ILA_ATTR_DIR, ila->p.dir)) return -1; From 0238b7204b7ff1bad1d2d4489f010d670cbd89f2 Mon Sep 17 00:00:00 2001 From: Nicolas Dichtel Date: Mon, 25 Apr 2016 10:25:17 +0200 Subject: [PATCH 0977/1649] ovs: use nla_put_u64_64bit() Signed-off-by: Nicolas Dichtel Signed-off-by: David S. Miller --- include/uapi/linux/openvswitch.h | 1 + net/openvswitch/datapath.c | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h index 0358f94af86e..d6be1fb778a5 100644 --- a/include/uapi/linux/openvswitch.h +++ b/include/uapi/linux/openvswitch.h @@ -519,6 +519,7 @@ enum ovs_flow_attr { * logging should be suppressed. */ OVS_FLOW_ATTR_UFID, /* Variable length unique flow identifier. */ OVS_FLOW_ATTR_UFID_FLAGS,/* u32 of OVS_UFID_F_*. */ + OVS_FLOW_ATTR_PAD, __OVS_FLOW_ATTR_MAX }; diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index 0cc66a4e492d..22d9a5316304 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c @@ -754,7 +754,8 @@ static int ovs_flow_cmd_fill_stats(const struct sw_flow *flow, ovs_flow_stats_get(flow, &stats, &used, &tcp_flags); if (used && - nla_put_u64(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used))) + nla_put_u64_64bit(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used), + OVS_FLOW_ATTR_PAD)) return -EMSGSIZE; if (stats.n_packets && From 12a0faa3bd76157b9dc096758d6818ff535e4586 Mon Sep 17 00:00:00 2001 From: Nicolas Dichtel Date: Mon, 25 Apr 2016 10:25:18 +0200 Subject: [PATCH 0978/1649] bridge: use nla_put_u64_64bit() Signed-off-by: Nicolas Dichtel Signed-off-by: David S. Miller --- include/uapi/linux/if_link.h | 2 ++ net/bridge/br_netlink.c | 62 +++++++++++++++++++++--------------- 2 files changed, 38 insertions(+), 26 deletions(-) diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index 5fdd3a42e377..9300c08346c8 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -271,6 +271,7 @@ enum { IFLA_BR_NF_CALL_IP6TABLES, IFLA_BR_NF_CALL_ARPTABLES, IFLA_BR_VLAN_DEFAULT_PVID, + IFLA_BR_PAD, __IFLA_BR_MAX, }; @@ -313,6 +314,7 @@ enum { IFLA_BRPORT_HOLD_TIMER, IFLA_BRPORT_FLUSH, IFLA_BRPORT_MULTICAST_ROUTER, + IFLA_BRPORT_PAD, __IFLA_BRPORT_MAX }; #define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1) diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index e9c635eae24d..6bae1125e36d 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c @@ -135,9 +135,9 @@ static inline size_t br_port_info_size(void) + nla_total_size(sizeof(u16)) /* IFLA_BRPORT_NO */ + nla_total_size(sizeof(u8)) /* IFLA_BRPORT_TOPOLOGY_CHANGE_ACK */ + nla_total_size(sizeof(u8)) /* IFLA_BRPORT_CONFIG_PENDING */ - + nla_total_size(sizeof(u64)) /* IFLA_BRPORT_MESSAGE_AGE_TIMER */ - + nla_total_size(sizeof(u64)) /* IFLA_BRPORT_FORWARD_DELAY_TIMER */ - + nla_total_size(sizeof(u64)) /* IFLA_BRPORT_HOLD_TIMER */ + + nla_total_size_64bit(sizeof(u64)) /* IFLA_BRPORT_MESSAGE_AGE_TIMER */ + + nla_total_size_64bit(sizeof(u64)) /* IFLA_BRPORT_FORWARD_DELAY_TIMER */ + + nla_total_size_64bit(sizeof(u64)) /* IFLA_BRPORT_HOLD_TIMER */ #ifdef CONFIG_BRIDGE_IGMP_SNOOPING + nla_total_size(sizeof(u8)) /* IFLA_BRPORT_MULTICAST_ROUTER */ #endif @@ -190,13 +190,16 @@ static int br_port_fill_attrs(struct sk_buff *skb, return -EMSGSIZE; timerval = br_timer_value(&p->message_age_timer); - if (nla_put_u64(skb, IFLA_BRPORT_MESSAGE_AGE_TIMER, timerval)) + if (nla_put_u64_64bit(skb, IFLA_BRPORT_MESSAGE_AGE_TIMER, timerval, + IFLA_BRPORT_PAD)) return -EMSGSIZE; timerval = br_timer_value(&p->forward_delay_timer); - if (nla_put_u64(skb, IFLA_BRPORT_FORWARD_DELAY_TIMER, timerval)) + if (nla_put_u64_64bit(skb, IFLA_BRPORT_FORWARD_DELAY_TIMER, timerval, + IFLA_BRPORT_PAD)) return -EMSGSIZE; timerval = br_timer_value(&p->hold_timer); - if (nla_put_u64(skb, IFLA_BRPORT_HOLD_TIMER, timerval)) + if (nla_put_u64_64bit(skb, IFLA_BRPORT_HOLD_TIMER, timerval, + IFLA_BRPORT_PAD)) return -EMSGSIZE; #ifdef CONFIG_BRIDGE_IGMP_SNOOPING @@ -1087,10 +1090,10 @@ static size_t br_get_size(const struct net_device *brdev) nla_total_size(sizeof(u32)) + /* IFLA_BR_ROOT_PATH_COST */ nla_total_size(sizeof(u8)) + /* IFLA_BR_TOPOLOGY_CHANGE */ nla_total_size(sizeof(u8)) + /* IFLA_BR_TOPOLOGY_CHANGE_DETECTED */ - nla_total_size(sizeof(u64)) + /* IFLA_BR_HELLO_TIMER */ - nla_total_size(sizeof(u64)) + /* IFLA_BR_TCN_TIMER */ - nla_total_size(sizeof(u64)) + /* IFLA_BR_TOPOLOGY_CHANGE_TIMER */ - nla_total_size(sizeof(u64)) + /* IFLA_BR_GC_TIMER */ + nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_HELLO_TIMER */ + nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_TCN_TIMER */ + nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_TOPOLOGY_CHANGE_TIMER */ + nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_GC_TIMER */ nla_total_size(ETH_ALEN) + /* IFLA_BR_GROUP_ADDR */ #ifdef CONFIG_BRIDGE_IGMP_SNOOPING nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_ROUTER */ @@ -1101,12 +1104,12 @@ static size_t br_get_size(const struct net_device *brdev) nla_total_size(sizeof(u32)) + /* IFLA_BR_MCAST_HASH_MAX */ nla_total_size(sizeof(u32)) + /* IFLA_BR_MCAST_LAST_MEMBER_CNT */ nla_total_size(sizeof(u32)) + /* IFLA_BR_MCAST_STARTUP_QUERY_CNT */ - nla_total_size(sizeof(u64)) + /* IFLA_BR_MCAST_LAST_MEMBER_INTVL */ - nla_total_size(sizeof(u64)) + /* IFLA_BR_MCAST_MEMBERSHIP_INTVL */ - nla_total_size(sizeof(u64)) + /* IFLA_BR_MCAST_QUERIER_INTVL */ - nla_total_size(sizeof(u64)) + /* IFLA_BR_MCAST_QUERY_INTVL */ - nla_total_size(sizeof(u64)) + /* IFLA_BR_MCAST_QUERY_RESPONSE_INTVL */ - nla_total_size(sizeof(u64)) + /* IFLA_BR_MCAST_STARTUP_QUERY_INTVL */ + nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_LAST_MEMBER_INTVL */ + nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_MEMBERSHIP_INTVL */ + nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_QUERIER_INTVL */ + nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_QUERY_INTVL */ + nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_QUERY_RESPONSE_INTVL */ + nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_STARTUP_QUERY_INTVL */ #endif #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) nla_total_size(sizeof(u8)) + /* IFLA_BR_NF_CALL_IPTABLES */ @@ -1129,16 +1132,17 @@ static int br_fill_info(struct sk_buff *skb, const struct net_device *brdev) u64 clockval; clockval = br_timer_value(&br->hello_timer); - if (nla_put_u64(skb, IFLA_BR_HELLO_TIMER, clockval)) + if (nla_put_u64_64bit(skb, IFLA_BR_HELLO_TIMER, clockval, IFLA_BR_PAD)) return -EMSGSIZE; clockval = br_timer_value(&br->tcn_timer); - if (nla_put_u64(skb, IFLA_BR_TCN_TIMER, clockval)) + if (nla_put_u64_64bit(skb, IFLA_BR_TCN_TIMER, clockval, IFLA_BR_PAD)) return -EMSGSIZE; clockval = br_timer_value(&br->topology_change_timer); - if (nla_put_u64(skb, IFLA_BR_TOPOLOGY_CHANGE_TIMER, clockval)) + if (nla_put_u64_64bit(skb, IFLA_BR_TOPOLOGY_CHANGE_TIMER, clockval, + IFLA_BR_PAD)) return -EMSGSIZE; clockval = br_timer_value(&br->gc_timer); - if (nla_put_u64(skb, IFLA_BR_GC_TIMER, clockval)) + if (nla_put_u64_64bit(skb, IFLA_BR_GC_TIMER, clockval, IFLA_BR_PAD)) return -EMSGSIZE; if (nla_put_u32(skb, IFLA_BR_FORWARD_DELAY, forward_delay) || @@ -1182,22 +1186,28 @@ static int br_fill_info(struct sk_buff *skb, const struct net_device *brdev) return -EMSGSIZE; clockval = jiffies_to_clock_t(br->multicast_last_member_interval); - if (nla_put_u64(skb, IFLA_BR_MCAST_LAST_MEMBER_INTVL, clockval)) + if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_LAST_MEMBER_INTVL, clockval, + IFLA_BR_PAD)) return -EMSGSIZE; clockval = jiffies_to_clock_t(br->multicast_membership_interval); - if (nla_put_u64(skb, IFLA_BR_MCAST_MEMBERSHIP_INTVL, clockval)) + if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_MEMBERSHIP_INTVL, clockval, + IFLA_BR_PAD)) return -EMSGSIZE; clockval = jiffies_to_clock_t(br->multicast_querier_interval); - if (nla_put_u64(skb, IFLA_BR_MCAST_QUERIER_INTVL, clockval)) + if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_QUERIER_INTVL, clockval, + IFLA_BR_PAD)) return -EMSGSIZE; clockval = jiffies_to_clock_t(br->multicast_query_interval); - if (nla_put_u64(skb, IFLA_BR_MCAST_QUERY_INTVL, clockval)) + if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_QUERY_INTVL, clockval, + IFLA_BR_PAD)) return -EMSGSIZE; clockval = jiffies_to_clock_t(br->multicast_query_response_interval); - if (nla_put_u64(skb, IFLA_BR_MCAST_QUERY_RESPONSE_INTVL, clockval)) + if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_QUERY_RESPONSE_INTVL, clockval, + IFLA_BR_PAD)) return -EMSGSIZE; clockval = jiffies_to_clock_t(br->multicast_startup_query_interval); - if (nla_put_u64(skb, IFLA_BR_MCAST_STARTUP_QUERY_INTVL, clockval)) + if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_STARTUP_QUERY_INTVL, clockval, + IFLA_BR_PAD)) return -EMSGSIZE; #endif #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) From 1c714a92833674c040e03be067accfb2b322221e Mon Sep 17 00:00:00 2001 From: Nicolas Dichtel Date: Mon, 25 Apr 2016 10:25:19 +0200 Subject: [PATCH 0979/1649] l2tp: use nla_put_u64_64bit() Signed-off-by: Nicolas Dichtel Signed-off-by: David S. Miller --- include/uapi/linux/l2tp.h | 1 + net/l2tp/l2tp_netlink.c | 80 +++++++++++++++++++++++---------------- 2 files changed, 49 insertions(+), 32 deletions(-) diff --git a/include/uapi/linux/l2tp.h b/include/uapi/linux/l2tp.h index 3386a99e0397..4bd27d0270a2 100644 --- a/include/uapi/linux/l2tp.h +++ b/include/uapi/linux/l2tp.h @@ -143,6 +143,7 @@ enum { L2TP_ATTR_RX_SEQ_DISCARDS, /* u64 */ L2TP_ATTR_RX_OOS_PACKETS, /* u64 */ L2TP_ATTR_RX_ERRORS, /* u64 */ + L2TP_ATTR_STATS_PAD, __L2TP_ATTR_STATS_MAX, }; diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c index 24ed2e875c45..1d02e8d20e56 100644 --- a/net/l2tp/l2tp_netlink.c +++ b/net/l2tp/l2tp_netlink.c @@ -346,22 +346,30 @@ static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 portid, u32 seq, int fla if (nest == NULL) goto nla_put_failure; - if (nla_put_u64(skb, L2TP_ATTR_TX_PACKETS, - atomic_long_read(&tunnel->stats.tx_packets)) || - nla_put_u64(skb, L2TP_ATTR_TX_BYTES, - atomic_long_read(&tunnel->stats.tx_bytes)) || - nla_put_u64(skb, L2TP_ATTR_TX_ERRORS, - atomic_long_read(&tunnel->stats.tx_errors)) || - nla_put_u64(skb, L2TP_ATTR_RX_PACKETS, - atomic_long_read(&tunnel->stats.rx_packets)) || - nla_put_u64(skb, L2TP_ATTR_RX_BYTES, - atomic_long_read(&tunnel->stats.rx_bytes)) || - nla_put_u64(skb, L2TP_ATTR_RX_SEQ_DISCARDS, - atomic_long_read(&tunnel->stats.rx_seq_discards)) || - nla_put_u64(skb, L2TP_ATTR_RX_OOS_PACKETS, - atomic_long_read(&tunnel->stats.rx_oos_packets)) || - nla_put_u64(skb, L2TP_ATTR_RX_ERRORS, - atomic_long_read(&tunnel->stats.rx_errors))) + if (nla_put_u64_64bit(skb, L2TP_ATTR_TX_PACKETS, + atomic_long_read(&tunnel->stats.tx_packets), + L2TP_ATTR_STATS_PAD) || + nla_put_u64_64bit(skb, L2TP_ATTR_TX_BYTES, + atomic_long_read(&tunnel->stats.tx_bytes), + L2TP_ATTR_STATS_PAD) || + nla_put_u64_64bit(skb, L2TP_ATTR_TX_ERRORS, + atomic_long_read(&tunnel->stats.tx_errors), + L2TP_ATTR_STATS_PAD) || + nla_put_u64_64bit(skb, L2TP_ATTR_RX_PACKETS, + atomic_long_read(&tunnel->stats.rx_packets), + L2TP_ATTR_STATS_PAD) || + nla_put_u64_64bit(skb, L2TP_ATTR_RX_BYTES, + atomic_long_read(&tunnel->stats.rx_bytes), + L2TP_ATTR_STATS_PAD) || + nla_put_u64_64bit(skb, L2TP_ATTR_RX_SEQ_DISCARDS, + atomic_long_read(&tunnel->stats.rx_seq_discards), + L2TP_ATTR_STATS_PAD) || + nla_put_u64_64bit(skb, L2TP_ATTR_RX_OOS_PACKETS, + atomic_long_read(&tunnel->stats.rx_oos_packets), + L2TP_ATTR_STATS_PAD) || + nla_put_u64_64bit(skb, L2TP_ATTR_RX_ERRORS, + atomic_long_read(&tunnel->stats.rx_errors), + L2TP_ATTR_STATS_PAD)) goto nla_put_failure; nla_nest_end(skb, nest); @@ -754,22 +762,30 @@ static int l2tp_nl_session_send(struct sk_buff *skb, u32 portid, u32 seq, int fl if (nest == NULL) goto nla_put_failure; - if (nla_put_u64(skb, L2TP_ATTR_TX_PACKETS, - atomic_long_read(&session->stats.tx_packets)) || - nla_put_u64(skb, L2TP_ATTR_TX_BYTES, - atomic_long_read(&session->stats.tx_bytes)) || - nla_put_u64(skb, L2TP_ATTR_TX_ERRORS, - atomic_long_read(&session->stats.tx_errors)) || - nla_put_u64(skb, L2TP_ATTR_RX_PACKETS, - atomic_long_read(&session->stats.rx_packets)) || - nla_put_u64(skb, L2TP_ATTR_RX_BYTES, - atomic_long_read(&session->stats.rx_bytes)) || - nla_put_u64(skb, L2TP_ATTR_RX_SEQ_DISCARDS, - atomic_long_read(&session->stats.rx_seq_discards)) || - nla_put_u64(skb, L2TP_ATTR_RX_OOS_PACKETS, - atomic_long_read(&session->stats.rx_oos_packets)) || - nla_put_u64(skb, L2TP_ATTR_RX_ERRORS, - atomic_long_read(&session->stats.rx_errors))) + if (nla_put_u64_64bit(skb, L2TP_ATTR_TX_PACKETS, + atomic_long_read(&session->stats.tx_packets), + L2TP_ATTR_STATS_PAD) || + nla_put_u64_64bit(skb, L2TP_ATTR_TX_BYTES, + atomic_long_read(&session->stats.tx_bytes), + L2TP_ATTR_STATS_PAD) || + nla_put_u64_64bit(skb, L2TP_ATTR_TX_ERRORS, + atomic_long_read(&session->stats.tx_errors), + L2TP_ATTR_STATS_PAD) || + nla_put_u64_64bit(skb, L2TP_ATTR_RX_PACKETS, + atomic_long_read(&session->stats.rx_packets), + L2TP_ATTR_STATS_PAD) || + nla_put_u64_64bit(skb, L2TP_ATTR_RX_BYTES, + atomic_long_read(&session->stats.rx_bytes), + L2TP_ATTR_STATS_PAD) || + nla_put_u64_64bit(skb, L2TP_ATTR_RX_SEQ_DISCARDS, + atomic_long_read(&session->stats.rx_seq_discards), + L2TP_ATTR_STATS_PAD) || + nla_put_u64_64bit(skb, L2TP_ATTR_RX_OOS_PACKETS, + atomic_long_read(&session->stats.rx_oos_packets), + L2TP_ATTR_STATS_PAD) || + nla_put_u64_64bit(skb, L2TP_ATTR_RX_ERRORS, + atomic_long_read(&session->stats.rx_errors), + L2TP_ATTR_STATS_PAD)) goto nla_put_failure; nla_nest_end(skb, nest); From a558da0916b90c330940a106105d0a6a67cb77f7 Mon Sep 17 00:00:00 2001 From: Nicolas Dichtel Date: Mon, 25 Apr 2016 10:25:20 +0200 Subject: [PATCH 0980/1649] ieee802154: use nla_put_u64_64bit() Signed-off-by: Nicolas Dichtel Signed-off-by: David S. Miller --- include/linux/nl802154.h | 2 ++ net/ieee802154/nl-mac.c | 17 +++++++++++------ net/ieee802154/nl802154.c | 3 ++- 3 files changed, 15 insertions(+), 7 deletions(-) diff --git a/include/linux/nl802154.h b/include/linux/nl802154.h index 167342c2ce6b..0f6f6607f592 100644 --- a/include/linux/nl802154.h +++ b/include/linux/nl802154.h @@ -92,6 +92,8 @@ enum { IEEE802154_ATTR_LLSEC_DEV_OVERRIDE, IEEE802154_ATTR_LLSEC_DEV_KEY_MODE, + IEEE802154_ATTR_PAD, + __IEEE802154_ATTR_MAX, }; diff --git a/net/ieee802154/nl-mac.c b/net/ieee802154/nl-mac.c index 3503c38954f9..d3cbb3258718 100644 --- a/net/ieee802154/nl-mac.c +++ b/net/ieee802154/nl-mac.c @@ -34,9 +34,11 @@ #include "ieee802154.h" -static int nla_put_hwaddr(struct sk_buff *msg, int type, __le64 hwaddr) +static int nla_put_hwaddr(struct sk_buff *msg, int type, __le64 hwaddr, + int padattr) { - return nla_put_u64(msg, type, swab64((__force u64)hwaddr)); + return nla_put_u64_64bit(msg, type, swab64((__force u64)hwaddr), + padattr); } static __le64 nla_get_hwaddr(const struct nlattr *nla) @@ -623,7 +625,8 @@ ieee802154_llsec_fill_key_id(struct sk_buff *msg, if (desc->device_addr.mode == IEEE802154_ADDR_LONG && nla_put_hwaddr(msg, IEEE802154_ATTR_HW_ADDR, - desc->device_addr.extended_addr)) + desc->device_addr.extended_addr, + IEEE802154_ATTR_PAD)) return -EMSGSIZE; } @@ -638,7 +641,7 @@ ieee802154_llsec_fill_key_id(struct sk_buff *msg, if (desc->mode == IEEE802154_SCF_KEY_HW_INDEX && nla_put_hwaddr(msg, IEEE802154_ATTR_LLSEC_KEY_SOURCE_EXTENDED, - desc->extended_source)) + desc->extended_source, IEEE802154_ATTR_PAD)) return -EMSGSIZE; return 0; @@ -1063,7 +1066,8 @@ ieee802154_nl_fill_dev(struct sk_buff *msg, u32 portid, u32 seq, nla_put_shortaddr(msg, IEEE802154_ATTR_PAN_ID, desc->pan_id) || nla_put_shortaddr(msg, IEEE802154_ATTR_SHORT_ADDR, desc->short_addr) || - nla_put_hwaddr(msg, IEEE802154_ATTR_HW_ADDR, desc->hwaddr) || + nla_put_hwaddr(msg, IEEE802154_ATTR_HW_ADDR, desc->hwaddr, + IEEE802154_ATTR_PAD) || nla_put_u32(msg, IEEE802154_ATTR_LLSEC_FRAME_COUNTER, desc->frame_counter) || nla_put_u8(msg, IEEE802154_ATTR_LLSEC_DEV_OVERRIDE, @@ -1167,7 +1171,8 @@ ieee802154_nl_fill_devkey(struct sk_buff *msg, u32 portid, u32 seq, if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) || nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) || - nla_put_hwaddr(msg, IEEE802154_ATTR_HW_ADDR, devaddr) || + nla_put_hwaddr(msg, IEEE802154_ATTR_HW_ADDR, devaddr, + IEEE802154_ATTR_PAD) || nla_put_u32(msg, IEEE802154_ATTR_LLSEC_FRAME_COUNTER, devkey->frame_counter) || ieee802154_llsec_fill_key_id(msg, &devkey->key_id)) diff --git a/net/ieee802154/nl802154.c b/net/ieee802154/nl802154.c index 614072064d03..8035c93dd527 100644 --- a/net/ieee802154/nl802154.c +++ b/net/ieee802154/nl802154.c @@ -813,7 +813,8 @@ nl802154_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flags, if (nla_put_u32(msg, NL802154_ATTR_WPAN_PHY, rdev->wpan_phy_idx) || nla_put_u32(msg, NL802154_ATTR_IFTYPE, wpan_dev->iftype) || - nla_put_u64(msg, NL802154_ATTR_WPAN_DEV, wpan_dev_id(wpan_dev)) || + nla_put_u64_64bit(msg, NL802154_ATTR_WPAN_DEV, + wpan_dev_id(wpan_dev), NL802154_ATTR_PAD) || nla_put_u32(msg, NL802154_ATTR_GENERATION, rdev->devlist_generation ^ (cfg802154_rdev_list_generation << 2))) From cbdeafd7e18b77d147fc1f6c000d4126e53d48bb Mon Sep 17 00:00:00 2001 From: Nicolas Dichtel Date: Mon, 25 Apr 2016 10:25:21 +0200 Subject: [PATCH 0981/1649] netfilter/ipvs: use nla_put_u64_64bit() Signed-off-by: Nicolas Dichtel Signed-off-by: David S. Miller --- include/uapi/linux/ip_vs.h | 1 + net/netfilter/ipvs/ip_vs_ctl.c | 36 ++++++++++++++++++++++------------ 2 files changed, 25 insertions(+), 12 deletions(-) diff --git a/include/uapi/linux/ip_vs.h b/include/uapi/linux/ip_vs.h index 391395c06c7e..22d69894bc92 100644 --- a/include/uapi/linux/ip_vs.h +++ b/include/uapi/linux/ip_vs.h @@ -435,6 +435,7 @@ enum { IPVS_STATS_ATTR_OUTPPS, /* current out packet rate */ IPVS_STATS_ATTR_INBPS, /* current in byte rate */ IPVS_STATS_ATTR_OUTBPS, /* current out byte rate */ + IPVS_STATS_ATTR_PAD, __IPVS_STATS_ATTR_MAX, }; diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index 404b2a4f4b5b..f35ebc02fa5c 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c @@ -2875,8 +2875,10 @@ static int ip_vs_genl_fill_stats(struct sk_buff *skb, int container_type, if (nla_put_u32(skb, IPVS_STATS_ATTR_CONNS, (u32)kstats->conns) || nla_put_u32(skb, IPVS_STATS_ATTR_INPKTS, (u32)kstats->inpkts) || nla_put_u32(skb, IPVS_STATS_ATTR_OUTPKTS, (u32)kstats->outpkts) || - nla_put_u64(skb, IPVS_STATS_ATTR_INBYTES, kstats->inbytes) || - nla_put_u64(skb, IPVS_STATS_ATTR_OUTBYTES, kstats->outbytes) || + nla_put_u64_64bit(skb, IPVS_STATS_ATTR_INBYTES, kstats->inbytes, + IPVS_STATS_ATTR_PAD) || + nla_put_u64_64bit(skb, IPVS_STATS_ATTR_OUTBYTES, kstats->outbytes, + IPVS_STATS_ATTR_PAD) || nla_put_u32(skb, IPVS_STATS_ATTR_CPS, (u32)kstats->cps) || nla_put_u32(skb, IPVS_STATS_ATTR_INPPS, (u32)kstats->inpps) || nla_put_u32(skb, IPVS_STATS_ATTR_OUTPPS, (u32)kstats->outpps) || @@ -2900,16 +2902,26 @@ static int ip_vs_genl_fill_stats64(struct sk_buff *skb, int container_type, if (!nl_stats) return -EMSGSIZE; - if (nla_put_u64(skb, IPVS_STATS_ATTR_CONNS, kstats->conns) || - nla_put_u64(skb, IPVS_STATS_ATTR_INPKTS, kstats->inpkts) || - nla_put_u64(skb, IPVS_STATS_ATTR_OUTPKTS, kstats->outpkts) || - nla_put_u64(skb, IPVS_STATS_ATTR_INBYTES, kstats->inbytes) || - nla_put_u64(skb, IPVS_STATS_ATTR_OUTBYTES, kstats->outbytes) || - nla_put_u64(skb, IPVS_STATS_ATTR_CPS, kstats->cps) || - nla_put_u64(skb, IPVS_STATS_ATTR_INPPS, kstats->inpps) || - nla_put_u64(skb, IPVS_STATS_ATTR_OUTPPS, kstats->outpps) || - nla_put_u64(skb, IPVS_STATS_ATTR_INBPS, kstats->inbps) || - nla_put_u64(skb, IPVS_STATS_ATTR_OUTBPS, kstats->outbps)) + if (nla_put_u64_64bit(skb, IPVS_STATS_ATTR_CONNS, kstats->conns, + IPVS_STATS_ATTR_PAD) || + nla_put_u64_64bit(skb, IPVS_STATS_ATTR_INPKTS, kstats->inpkts, + IPVS_STATS_ATTR_PAD) || + nla_put_u64_64bit(skb, IPVS_STATS_ATTR_OUTPKTS, kstats->outpkts, + IPVS_STATS_ATTR_PAD) || + nla_put_u64_64bit(skb, IPVS_STATS_ATTR_INBYTES, kstats->inbytes, + IPVS_STATS_ATTR_PAD) || + nla_put_u64_64bit(skb, IPVS_STATS_ATTR_OUTBYTES, kstats->outbytes, + IPVS_STATS_ATTR_PAD) || + nla_put_u64_64bit(skb, IPVS_STATS_ATTR_CPS, kstats->cps, + IPVS_STATS_ATTR_PAD) || + nla_put_u64_64bit(skb, IPVS_STATS_ATTR_INPPS, kstats->inpps, + IPVS_STATS_ATTR_PAD) || + nla_put_u64_64bit(skb, IPVS_STATS_ATTR_OUTPPS, kstats->outpps, + IPVS_STATS_ATTR_PAD) || + nla_put_u64_64bit(skb, IPVS_STATS_ATTR_INBPS, kstats->inbps, + IPVS_STATS_ATTR_PAD) || + nla_put_u64_64bit(skb, IPVS_STATS_ATTR_OUTBPS, kstats->outbps, + IPVS_STATS_ATTR_PAD)) goto nla_put_failure; nla_nest_end(skb, nl_stats); From 2dad624e6dd65c6048a9bbe0e16559fce182c87c Mon Sep 17 00:00:00 2001 From: Nicolas Dichtel Date: Mon, 25 Apr 2016 10:25:22 +0200 Subject: [PATCH 0982/1649] wireless: use nla_put_u64_64bit() Signed-off-by: Nicolas Dichtel Signed-off-by: David S. Miller --- include/uapi/linux/nl80211.h | 4 ++ net/wireless/nl80211.c | 91 ++++++++++++++++++++++-------------- 2 files changed, 59 insertions(+), 36 deletions(-) diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index 1df655d8aa52..2c55dd1894c3 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -2197,6 +2197,8 @@ enum nl80211_attrs { NL80211_ATTR_STA_SUPPORT_P2P_PS, + NL80211_ATTR_PAD, + /* add attributes here, update the policy in nl80211.c */ __NL80211_ATTR_AFTER_LAST, @@ -3023,6 +3025,7 @@ enum nl80211_survey_info { NL80211_SURVEY_INFO_TIME_RX, NL80211_SURVEY_INFO_TIME_TX, NL80211_SURVEY_INFO_TIME_SCAN, + NL80211_SURVEY_INFO_PAD, /* keep last */ __NL80211_SURVEY_INFO_AFTER_LAST, @@ -3468,6 +3471,7 @@ enum nl80211_bss { NL80211_BSS_BEACON_TSF, NL80211_BSS_PRESP_DATA, NL80211_BSS_LAST_SEEN_BOOTTIME, + NL80211_BSS_PAD, /* keep last */ __NL80211_BSS_AFTER_LAST, diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index fd7f34a2b10c..afeb1ef1b199 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -2429,7 +2429,8 @@ static int nl80211_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flag if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || nla_put_u32(msg, NL80211_ATTR_IFTYPE, wdev->iftype) || - nla_put_u64(msg, NL80211_ATTR_WDEV, wdev_id(wdev)) || + nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev), + NL80211_ATTR_PAD) || nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, wdev_address(wdev)) || nla_put_u32(msg, NL80211_ATTR_GENERATION, rdev->devlist_generation ^ @@ -6874,7 +6875,8 @@ static int nl80211_send_bss(struct sk_buff *msg, struct netlink_callback *cb, if (wdev->netdev && nla_put_u32(msg, NL80211_ATTR_IFINDEX, wdev->netdev->ifindex)) goto nla_put_failure; - if (nla_put_u64(msg, NL80211_ATTR_WDEV, wdev_id(wdev))) + if (nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev), + NL80211_ATTR_PAD)) goto nla_put_failure; bss = nla_nest_start(msg, NL80211_ATTR_BSS); @@ -6895,7 +6897,8 @@ static int nl80211_send_bss(struct sk_buff *msg, struct netlink_callback *cb, */ ies = rcu_dereference(res->ies); if (ies) { - if (nla_put_u64(msg, NL80211_BSS_TSF, ies->tsf)) + if (nla_put_u64_64bit(msg, NL80211_BSS_TSF, ies->tsf, + NL80211_BSS_PAD)) goto fail_unlock_rcu; if (ies->len && nla_put(msg, NL80211_BSS_INFORMATION_ELEMENTS, ies->len, ies->data)) @@ -6905,7 +6908,8 @@ static int nl80211_send_bss(struct sk_buff *msg, struct netlink_callback *cb, /* and this pointer is always (unless driver didn't know) beacon data */ ies = rcu_dereference(res->beacon_ies); if (ies && ies->from_beacon) { - if (nla_put_u64(msg, NL80211_BSS_BEACON_TSF, ies->tsf)) + if (nla_put_u64_64bit(msg, NL80211_BSS_BEACON_TSF, ies->tsf, + NL80211_BSS_PAD)) goto fail_unlock_rcu; if (ies->len && nla_put(msg, NL80211_BSS_BEACON_IES, ies->len, ies->data)) @@ -6924,8 +6928,8 @@ static int nl80211_send_bss(struct sk_buff *msg, struct netlink_callback *cb, goto nla_put_failure; if (intbss->ts_boottime && - nla_put_u64(msg, NL80211_BSS_LAST_SEEN_BOOTTIME, - intbss->ts_boottime)) + nla_put_u64_64bit(msg, NL80211_BSS_LAST_SEEN_BOOTTIME, + intbss->ts_boottime, NL80211_BSS_PAD)) goto nla_put_failure; switch (rdev->wiphy.signal_type) { @@ -7045,28 +7049,28 @@ static int nl80211_send_survey(struct sk_buff *msg, u32 portid, u32 seq, nla_put_flag(msg, NL80211_SURVEY_INFO_IN_USE)) goto nla_put_failure; if ((survey->filled & SURVEY_INFO_TIME) && - nla_put_u64(msg, NL80211_SURVEY_INFO_TIME, - survey->time)) + nla_put_u64_64bit(msg, NL80211_SURVEY_INFO_TIME, + survey->time, NL80211_SURVEY_INFO_PAD)) goto nla_put_failure; if ((survey->filled & SURVEY_INFO_TIME_BUSY) && - nla_put_u64(msg, NL80211_SURVEY_INFO_TIME_BUSY, - survey->time_busy)) + nla_put_u64_64bit(msg, NL80211_SURVEY_INFO_TIME_BUSY, + survey->time_busy, NL80211_SURVEY_INFO_PAD)) goto nla_put_failure; if ((survey->filled & SURVEY_INFO_TIME_EXT_BUSY) && - nla_put_u64(msg, NL80211_SURVEY_INFO_TIME_EXT_BUSY, - survey->time_ext_busy)) + nla_put_u64_64bit(msg, NL80211_SURVEY_INFO_TIME_EXT_BUSY, + survey->time_ext_busy, NL80211_SURVEY_INFO_PAD)) goto nla_put_failure; if ((survey->filled & SURVEY_INFO_TIME_RX) && - nla_put_u64(msg, NL80211_SURVEY_INFO_TIME_RX, - survey->time_rx)) + nla_put_u64_64bit(msg, NL80211_SURVEY_INFO_TIME_RX, + survey->time_rx, NL80211_SURVEY_INFO_PAD)) goto nla_put_failure; if ((survey->filled & SURVEY_INFO_TIME_TX) && - nla_put_u64(msg, NL80211_SURVEY_INFO_TIME_TX, - survey->time_tx)) + nla_put_u64_64bit(msg, NL80211_SURVEY_INFO_TIME_TX, + survey->time_tx, NL80211_SURVEY_INFO_PAD)) goto nla_put_failure; if ((survey->filled & SURVEY_INFO_TIME_SCAN) && - nla_put_u64(msg, NL80211_SURVEY_INFO_TIME_SCAN, - survey->time_scan)) + nla_put_u64_64bit(msg, NL80211_SURVEY_INFO_TIME_SCAN, + survey->time_scan, NL80211_SURVEY_INFO_PAD)) goto nla_put_failure; nla_nest_end(msg, infoattr); @@ -7786,8 +7790,8 @@ __cfg80211_alloc_vendor_skb(struct cfg80211_registered_device *rdev, } if (wdev) { - if (nla_put_u64(skb, NL80211_ATTR_WDEV, - wdev_id(wdev))) + if (nla_put_u64_64bit(skb, NL80211_ATTR_WDEV, + wdev_id(wdev), NL80211_ATTR_PAD)) goto nla_put_failure; if (wdev->netdev && nla_put_u32(skb, NL80211_ATTR_IFINDEX, @@ -8380,7 +8384,8 @@ static int nl80211_remain_on_channel(struct sk_buff *skb, if (err) goto free_msg; - if (nla_put_u64(msg, NL80211_ATTR_COOKIE, cookie)) + if (nla_put_u64_64bit(msg, NL80211_ATTR_COOKIE, cookie, + NL80211_ATTR_PAD)) goto nla_put_failure; genlmsg_end(msg, hdr); @@ -8792,7 +8797,8 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info) goto free_msg; if (msg) { - if (nla_put_u64(msg, NL80211_ATTR_COOKIE, cookie)) + if (nla_put_u64_64bit(msg, NL80211_ATTR_COOKIE, cookie, + NL80211_ATTR_PAD)) goto nla_put_failure; genlmsg_end(msg, hdr); @@ -10078,7 +10084,8 @@ static int nl80211_probe_client(struct sk_buff *skb, if (err) goto free_msg; - if (nla_put_u64(msg, NL80211_ATTR_COOKIE, cookie)) + if (nla_put_u64_64bit(msg, NL80211_ATTR_COOKIE, cookie, + NL80211_ATTR_PAD)) goto nla_put_failure; genlmsg_end(msg, hdr); @@ -10503,8 +10510,9 @@ static int nl80211_vendor_cmd_dump(struct sk_buff *skb, break; if (nla_put_u32(skb, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || - (wdev && nla_put_u64(skb, NL80211_ATTR_WDEV, - wdev_id(wdev)))) { + (wdev && nla_put_u64_64bit(skb, NL80211_ATTR_WDEV, + wdev_id(wdev), + NL80211_ATTR_PAD))) { genlmsg_cancel(skb, hdr); break; } @@ -11711,7 +11719,8 @@ static int nl80211_send_scan_msg(struct sk_buff *msg, if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || (wdev->netdev && nla_put_u32(msg, NL80211_ATTR_IFINDEX, wdev->netdev->ifindex)) || - nla_put_u64(msg, NL80211_ATTR_WDEV, wdev_id(wdev))) + nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev), + NL80211_ATTR_PAD)) goto nla_put_failure; /* ignore errors and send incomplete event anyway */ @@ -12378,11 +12387,13 @@ static void nl80211_send_remain_on_chan_event( if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || (wdev->netdev && nla_put_u32(msg, NL80211_ATTR_IFINDEX, wdev->netdev->ifindex)) || - nla_put_u64(msg, NL80211_ATTR_WDEV, wdev_id(wdev)) || + nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev), + NL80211_ATTR_PAD) || nla_put_u32(msg, NL80211_ATTR_WIPHY_FREQ, chan->center_freq) || nla_put_u32(msg, NL80211_ATTR_WIPHY_CHANNEL_TYPE, NL80211_CHAN_NO_HT) || - nla_put_u64(msg, NL80211_ATTR_COOKIE, cookie)) + nla_put_u64_64bit(msg, NL80211_ATTR_COOKIE, cookie, + NL80211_ATTR_PAD)) goto nla_put_failure; if (cmd == NL80211_CMD_REMAIN_ON_CHANNEL && @@ -12616,7 +12627,8 @@ int nl80211_send_mgmt(struct cfg80211_registered_device *rdev, if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || (netdev && nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex)) || - nla_put_u64(msg, NL80211_ATTR_WDEV, wdev_id(wdev)) || + nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev), + NL80211_ATTR_PAD) || nla_put_u32(msg, NL80211_ATTR_WIPHY_FREQ, freq) || (sig_dbm && nla_put_u32(msg, NL80211_ATTR_RX_SIGNAL_DBM, sig_dbm)) || @@ -12659,9 +12671,11 @@ void cfg80211_mgmt_tx_status(struct wireless_dev *wdev, u64 cookie, if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || (netdev && nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex)) || - nla_put_u64(msg, NL80211_ATTR_WDEV, wdev_id(wdev)) || + nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev), + NL80211_ATTR_PAD) || nla_put(msg, NL80211_ATTR_FRAME, len, buf) || - nla_put_u64(msg, NL80211_ATTR_COOKIE, cookie) || + nla_put_u64_64bit(msg, NL80211_ATTR_COOKIE, cookie, + NL80211_ATTR_PAD) || (ack && nla_put_flag(msg, NL80211_ATTR_ACK))) goto nla_put_failure; @@ -13041,7 +13055,8 @@ nl80211_radar_notify(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev = netdev->ieee80211_ptr; if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) || - nla_put_u64(msg, NL80211_ATTR_WDEV, wdev_id(wdev))) + nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev), + NL80211_ATTR_PAD)) goto nla_put_failure; } @@ -13086,7 +13101,8 @@ void cfg80211_probe_status(struct net_device *dev, const u8 *addr, if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) || nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, addr) || - nla_put_u64(msg, NL80211_ATTR_COOKIE, cookie) || + nla_put_u64_64bit(msg, NL80211_ATTR_COOKIE, cookie, + NL80211_ATTR_PAD) || (acked && nla_put_flag(msg, NL80211_ATTR_ACK))) goto nla_put_failure; @@ -13231,7 +13247,8 @@ void cfg80211_report_wowlan_wakeup(struct wireless_dev *wdev, goto free_msg; if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || - nla_put_u64(msg, NL80211_ATTR_WDEV, wdev_id(wdev))) + nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev), + NL80211_ATTR_PAD)) goto free_msg; if (wdev->netdev && nla_put_u32(msg, NL80211_ATTR_IFINDEX, @@ -13506,7 +13523,8 @@ void cfg80211_crit_proto_stopped(struct wireless_dev *wdev, gfp_t gfp) goto nla_put_failure; if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || - nla_put_u64(msg, NL80211_ATTR_WDEV, wdev_id(wdev))) + nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev), + NL80211_ATTR_PAD)) goto nla_put_failure; genlmsg_end(msg, hdr); @@ -13539,7 +13557,8 @@ void nl80211_send_ap_stopped(struct wireless_dev *wdev) if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || nla_put_u32(msg, NL80211_ATTR_IFINDEX, wdev->netdev->ifindex) || - nla_put_u64(msg, NL80211_ATTR_WDEV, wdev_id(wdev))) + nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev), + NL80211_ATTR_PAD)) goto out; genlmsg_end(msg, hdr); From 0aea76d35c9651d55bbaf746e7914e5f9ae5a25d Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 21 Apr 2016 22:13:01 -0700 Subject: [PATCH 0983/1649] tcp: SYN packets are now simply consumed We now have proper per-listener but also per network namespace counters for SYN packets that might be dropped. We replace the kfree_skb() by consume_skb() to be drop monitor [1] friendly, and remove an obsolete comment. FastOpen SYN packets can carry payload in them just fine. [1] perf record -a -g -e skb:kfree_skb sleep 1; perf report Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/ipv4/tcp_input.c | 19 +------------------ 1 file changed, 1 insertion(+), 18 deletions(-) diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index dcad8f9f96eb..967520dbe0bf 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -5815,24 +5815,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb) if (icsk->icsk_af_ops->conn_request(sk, skb) < 0) return 1; - /* Now we have several options: In theory there is - * nothing else in the frame. KA9Q has an option to - * send data with the syn, BSD accepts data with the - * syn up to the [to be] advertised window and - * Solaris 2.1 gives you a protocol error. For now - * we just ignore it, that fits the spec precisely - * and avoids incompatibilities. It would be nice in - * future to drop through and process the data. - * - * Now that TTCP is starting to be used we ought to - * queue this data. - * But, this leaves one open to an easy denial of - * service attack, and SYN cookies can't defend - * against this problem. So, we drop the data - * in the interest of security over speed unless - * it's still in use. - */ - kfree_skb(skb); + consume_skb(skb); return 0; } goto discard; From 960a26282f5b1f084313c59d22f76026e6637995 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 21 Apr 2016 22:27:32 -0700 Subject: [PATCH 0984/1649] net: better drop monitoring in ip{6}_recv_error() We should call consume_skb(skb) when skb is properly consumed, or kfree_skb(skb) when skb must be dropped in error case. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/ipv4/ip_sockglue.c | 10 +++++----- net/ipv6/datagram.c | 10 +++++----- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index 279471c4e58f..bdb222c0c6a2 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c @@ -510,9 +510,10 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len) copied = len; } err = skb_copy_datagram_msg(skb, 0, msg, copied); - if (err) - goto out_free_skb; - + if (unlikely(err)) { + kfree_skb(skb); + return err; + } sock_recv_timestamp(msg, sk, skb); serr = SKB_EXT_ERR(skb); @@ -544,8 +545,7 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len) msg->msg_flags |= MSG_ERRQUEUE; err = copied; -out_free_skb: - kfree_skb(skb); + consume_skb(skb); out: return err; } diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index 3962b6c810fc..ea9ee5cce5cf 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c @@ -450,9 +450,10 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len) copied = len; } err = skb_copy_datagram_msg(skb, 0, msg, copied); - if (err) - goto out_free_skb; - + if (unlikely(err)) { + kfree_skb(skb); + return err; + } sock_recv_timestamp(msg, sk, skb); serr = SKB_EXT_ERR(skb); @@ -509,8 +510,7 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len) msg->msg_flags |= MSG_ERRQUEUE; err = copied; -out_free_skb: - kfree_skb(skb); + consume_skb(skb); out: return err; } From d4967cf38fbd62467b8fb5cab63d7da1f5907ed7 Mon Sep 17 00:00:00 2001 From: Yuval Mintz Date: Fri, 22 Apr 2016 08:41:01 +0300 Subject: [PATCH 0985/1649] qed*: Align statistics names There's a difference in statsitics' names starting at qed and propagating to qede, where egress counters indicate ranges while ingress counters indiciate high-end. Align all statistcs to follow the same conventions - name indicates range. Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_l2.c | 20 ++++++------- drivers/net/ethernet/qlogic/qede/qede.h | 20 ++++++------- .../net/ethernet/qlogic/qede/qede_ethtool.c | 20 ++++++------- drivers/net/ethernet/qlogic/qede/qede_main.c | 29 ++++++++++++------- include/linux/qed/qed_if.h | 20 ++++++------- 5 files changed, 59 insertions(+), 50 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index fb5f3b815340..31e1d510a991 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -1415,16 +1415,16 @@ static void __qed_get_vport_port_stats(struct qed_hwfn *p_hwfn, sizeof(port_stats)); p_stats->rx_64_byte_packets += port_stats.pmm.r64; - p_stats->rx_127_byte_packets += port_stats.pmm.r127; - p_stats->rx_255_byte_packets += port_stats.pmm.r255; - p_stats->rx_511_byte_packets += port_stats.pmm.r511; - p_stats->rx_1023_byte_packets += port_stats.pmm.r1023; - p_stats->rx_1518_byte_packets += port_stats.pmm.r1518; - p_stats->rx_1522_byte_packets += port_stats.pmm.r1522; - p_stats->rx_2047_byte_packets += port_stats.pmm.r2047; - p_stats->rx_4095_byte_packets += port_stats.pmm.r4095; - p_stats->rx_9216_byte_packets += port_stats.pmm.r9216; - p_stats->rx_16383_byte_packets += port_stats.pmm.r16383; + p_stats->rx_65_to_127_byte_packets += port_stats.pmm.r127; + p_stats->rx_128_to_255_byte_packets += port_stats.pmm.r255; + p_stats->rx_256_to_511_byte_packets += port_stats.pmm.r511; + p_stats->rx_512_to_1023_byte_packets += port_stats.pmm.r1023; + p_stats->rx_1024_to_1518_byte_packets += port_stats.pmm.r1518; + p_stats->rx_1519_to_1522_byte_packets += port_stats.pmm.r1522; + p_stats->rx_1519_to_2047_byte_packets += port_stats.pmm.r2047; + p_stats->rx_2048_to_4095_byte_packets += port_stats.pmm.r4095; + p_stats->rx_4096_to_9216_byte_packets += port_stats.pmm.r9216; + p_stats->rx_9217_to_16383_byte_packets += port_stats.pmm.r16383; p_stats->rx_crc_errors += port_stats.pmm.rfcs; p_stats->rx_mac_crtl_frames += port_stats.pmm.rxcf; p_stats->rx_pause_frames += port_stats.pmm.rxpf; diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h index 16df1591388f..a687e7a1dc8d 100644 --- a/drivers/net/ethernet/qlogic/qede/qede.h +++ b/drivers/net/ethernet/qlogic/qede/qede.h @@ -59,16 +59,16 @@ struct qede_stats { /* port */ u64 rx_64_byte_packets; - u64 rx_127_byte_packets; - u64 rx_255_byte_packets; - u64 rx_511_byte_packets; - u64 rx_1023_byte_packets; - u64 rx_1518_byte_packets; - u64 rx_1522_byte_packets; - u64 rx_2047_byte_packets; - u64 rx_4095_byte_packets; - u64 rx_9216_byte_packets; - u64 rx_16383_byte_packets; + u64 rx_65_to_127_byte_packets; + u64 rx_128_to_255_byte_packets; + u64 rx_256_to_511_byte_packets; + u64 rx_512_to_1023_byte_packets; + u64 rx_1024_to_1518_byte_packets; + u64 rx_1519_to_1522_byte_packets; + u64 rx_1519_to_2047_byte_packets; + u64 rx_2048_to_4095_byte_packets; + u64 rx_4096_to_9216_byte_packets; + u64 rx_9217_to_16383_byte_packets; u64 rx_crc_errors; u64 rx_mac_crtl_frames; u64 rx_pause_frames; diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c index f0982f163670..f87e83b41d5d 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c @@ -59,16 +59,16 @@ static const struct { QEDE_STAT(tx_bcast_pkts), QEDE_PF_STAT(rx_64_byte_packets), - QEDE_PF_STAT(rx_127_byte_packets), - QEDE_PF_STAT(rx_255_byte_packets), - QEDE_PF_STAT(rx_511_byte_packets), - QEDE_PF_STAT(rx_1023_byte_packets), - QEDE_PF_STAT(rx_1518_byte_packets), - QEDE_PF_STAT(rx_1522_byte_packets), - QEDE_PF_STAT(rx_2047_byte_packets), - QEDE_PF_STAT(rx_4095_byte_packets), - QEDE_PF_STAT(rx_9216_byte_packets), - QEDE_PF_STAT(rx_16383_byte_packets), + QEDE_PF_STAT(rx_65_to_127_byte_packets), + QEDE_PF_STAT(rx_128_to_255_byte_packets), + QEDE_PF_STAT(rx_256_to_511_byte_packets), + QEDE_PF_STAT(rx_512_to_1023_byte_packets), + QEDE_PF_STAT(rx_1024_to_1518_byte_packets), + QEDE_PF_STAT(rx_1519_to_1522_byte_packets), + QEDE_PF_STAT(rx_1519_to_2047_byte_packets), + QEDE_PF_STAT(rx_2048_to_4095_byte_packets), + QEDE_PF_STAT(rx_4096_to_9216_byte_packets), + QEDE_PF_STAT(rx_9217_to_16383_byte_packets), QEDE_PF_STAT(tx_64_byte_packets), QEDE_PF_STAT(tx_65_to_127_byte_packets), QEDE_PF_STAT(tx_128_to_255_byte_packets), diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 197ef85684da..1e3ee49bae24 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -1638,16 +1638,25 @@ void qede_fill_by_demand_stats(struct qede_dev *edev) edev->stats.coalesced_bytes = stats.tpa_coalesced_bytes; edev->stats.rx_64_byte_packets = stats.rx_64_byte_packets; - edev->stats.rx_127_byte_packets = stats.rx_127_byte_packets; - edev->stats.rx_255_byte_packets = stats.rx_255_byte_packets; - edev->stats.rx_511_byte_packets = stats.rx_511_byte_packets; - edev->stats.rx_1023_byte_packets = stats.rx_1023_byte_packets; - edev->stats.rx_1518_byte_packets = stats.rx_1518_byte_packets; - edev->stats.rx_1522_byte_packets = stats.rx_1522_byte_packets; - edev->stats.rx_2047_byte_packets = stats.rx_2047_byte_packets; - edev->stats.rx_4095_byte_packets = stats.rx_4095_byte_packets; - edev->stats.rx_9216_byte_packets = stats.rx_9216_byte_packets; - edev->stats.rx_16383_byte_packets = stats.rx_16383_byte_packets; + edev->stats.rx_65_to_127_byte_packets = stats.rx_65_to_127_byte_packets; + edev->stats.rx_128_to_255_byte_packets = + stats.rx_128_to_255_byte_packets; + edev->stats.rx_256_to_511_byte_packets = + stats.rx_256_to_511_byte_packets; + edev->stats.rx_512_to_1023_byte_packets = + stats.rx_512_to_1023_byte_packets; + edev->stats.rx_1024_to_1518_byte_packets = + stats.rx_1024_to_1518_byte_packets; + edev->stats.rx_1519_to_1522_byte_packets = + stats.rx_1519_to_1522_byte_packets; + edev->stats.rx_1519_to_2047_byte_packets = + stats.rx_1519_to_2047_byte_packets; + edev->stats.rx_2048_to_4095_byte_packets = + stats.rx_2048_to_4095_byte_packets; + edev->stats.rx_4096_to_9216_byte_packets = + stats.rx_4096_to_9216_byte_packets; + edev->stats.rx_9217_to_16383_byte_packets = + stats.rx_9217_to_16383_byte_packets; edev->stats.rx_crc_errors = stats.rx_crc_errors; edev->stats.rx_mac_crtl_frames = stats.rx_mac_crtl_frames; edev->stats.rx_pause_frames = stats.rx_pause_frames; diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index 67e8c206b2c1..82a7fe011068 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -384,16 +384,16 @@ struct qed_eth_stats { /* port */ u64 rx_64_byte_packets; - u64 rx_127_byte_packets; - u64 rx_255_byte_packets; - u64 rx_511_byte_packets; - u64 rx_1023_byte_packets; - u64 rx_1518_byte_packets; - u64 rx_1522_byte_packets; - u64 rx_2047_byte_packets; - u64 rx_4095_byte_packets; - u64 rx_9216_byte_packets; - u64 rx_16383_byte_packets; + u64 rx_65_to_127_byte_packets; + u64 rx_128_to_255_byte_packets; + u64 rx_256_to_511_byte_packets; + u64 rx_512_to_1023_byte_packets; + u64 rx_1024_to_1518_byte_packets; + u64 rx_1519_to_1522_byte_packets; + u64 rx_1519_to_2047_byte_packets; + u64 rx_2048_to_4095_byte_packets; + u64 rx_4096_to_9216_byte_packets; + u64 rx_9217_to_16383_byte_packets; u64 rx_crc_errors; u64 rx_mac_crtl_frames; u64 rx_pause_frames; From f3e72109f04c36ee45e62c0e6e1323179287c3e4 Mon Sep 17 00:00:00 2001 From: Yuval Mintz Date: Fri, 22 Apr 2016 08:41:02 +0300 Subject: [PATCH 0986/1649] qede: Add support for ethtool private flags Adds a getter for the interfaces private flags. The only parameter currently supported is whether the interface is a coupled function [required for supporting 100g]. Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- .../net/ethernet/qlogic/qede/qede_ethtool.c | 23 +++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c index f87e83b41d5d..2ac98d44c1e1 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c @@ -116,6 +116,15 @@ static const struct { #define QEDE_NUM_STATS ARRAY_SIZE(qede_stats_arr) +enum { + QEDE_PRI_FLAG_CMT, + QEDE_PRI_FLAG_LEN, +}; + +static const char qede_private_arr[QEDE_PRI_FLAG_LEN][ETH_GSTRING_LEN] = { + "Coupled-Function", +}; + static void qede_get_strings_stats(struct qede_dev *edev, u8 *buf) { int i, j, k; @@ -139,6 +148,10 @@ static void qede_get_strings(struct net_device *dev, u32 stringset, u8 *buf) case ETH_SS_STATS: qede_get_strings_stats(edev, buf); break; + case ETH_SS_PRIV_FLAGS: + memcpy(buf, qede_private_arr, + ETH_GSTRING_LEN * QEDE_PRI_FLAG_LEN); + break; default: DP_VERBOSE(edev, QED_MSG_DEBUG, "Unsupported stringset 0x%08x\n", stringset); @@ -177,6 +190,8 @@ static int qede_get_sset_count(struct net_device *dev, int stringset) switch (stringset) { case ETH_SS_STATS: return num_stats + QEDE_NUM_RQSTATS; + case ETH_SS_PRIV_FLAGS: + return QEDE_PRI_FLAG_LEN; default: DP_VERBOSE(edev, QED_MSG_DEBUG, @@ -185,6 +200,13 @@ static int qede_get_sset_count(struct net_device *dev, int stringset) } } +static u32 qede_get_priv_flags(struct net_device *dev) +{ + struct qede_dev *edev = netdev_priv(dev); + + return (!!(edev->dev_info.common.num_hwfns > 1)) << QEDE_PRI_FLAG_CMT; +} + static int qede_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct qede_dev *edev = netdev_priv(dev); @@ -814,6 +836,7 @@ static const struct ethtool_ops qede_ethtool_ops = { .get_strings = qede_get_strings, .set_phys_id = qede_set_phys_id, .get_ethtool_stats = qede_get_ethtool_stats, + .get_priv_flags = qede_get_priv_flags, .get_sset_count = qede_get_sset_count, .get_rxnfc = qede_get_rxnfc, .set_rxnfc = qede_set_rxnfc, From fe7cd2bfdac4d8739bc8665eef040e668e6b428f Mon Sep 17 00:00:00 2001 From: Yuval Mintz Date: Fri, 22 Apr 2016 08:41:03 +0300 Subject: [PATCH 0987/1649] qed*: Conditions for changing link There's some inconsistency in current logic determining whether the link settings of a given interface can be changed; I.e., in all modes other than the so-called `deault' mode the interfaces are forbidden from changing the configuration - but even this rule is not applied to all user APIs that may change the configuration. Instead, let the core-module [qed] decide whether an interface can change the configuration by supporting a new API function. We also revise the current rule, allowing all interfaces to change their configurations while laying the infrastructure for future modes where an interface would be blocked from making such a configuration. Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_main.c | 6 ++++++ drivers/net/ethernet/qlogic/qede/qede_ethtool.c | 14 ++++++++++---- include/linux/qed/qed_if.h | 10 ++++++++++ 3 files changed, 26 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 1e9f321f1ac4..d189871e8e23 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -915,6 +915,11 @@ static u32 qed_sb_release(struct qed_dev *cdev, return rc; } +static bool qed_can_link_change(struct qed_dev *cdev) +{ + return true; +} + static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params) { @@ -1177,6 +1182,7 @@ const struct qed_common_ops qed_common_ops_pass = { .sb_release = &qed_sb_release, .simd_handler_config = &qed_simd_handler_config, .simd_handler_clean = &qed_simd_handler_clean, + .can_link_change = &qed_can_link_change, .set_link = &qed_set_link, .get_link = &qed_get_current_link, .drain = &qed_drain, diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c index 2ac98d44c1e1..f1dd25ac5552 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c @@ -239,9 +239,9 @@ static int qede_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) struct qed_link_params params; u32 speed; - if (!edev->dev_info.common.is_mf_default) { + if (!edev->ops || !edev->ops->common->can_link_change(edev->cdev)) { DP_INFO(edev, - "Link parameters can not be changed in non-default mode\n"); + "Link settings are not allowed to be changed\n"); return -EOPNOTSUPP; } @@ -350,6 +350,12 @@ static int qede_nway_reset(struct net_device *dev) struct qed_link_output current_link; struct qed_link_params link_params; + if (!edev->ops || !edev->ops->common->can_link_change(edev->cdev)) { + DP_INFO(edev, + "Link settings are not allowed to be changed\n"); + return -EOPNOTSUPP; + } + if (!netif_running(dev)) return 0; @@ -450,9 +456,9 @@ static int qede_set_pauseparam(struct net_device *dev, struct qed_link_params params; struct qed_link_output current_link; - if (!edev->dev_info.common.is_mf_default) { + if (!edev->ops || !edev->ops->common->can_link_change(edev->cdev)) { DP_INFO(edev, - "Pause parameters can not be updated in non-default mode\n"); + "Pause settings are not allowed to be changed\n"); return -EOPNOTSUPP; } diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index 82a7fe011068..e5de42b62976 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -211,6 +211,16 @@ struct qed_common_ops { void (*simd_handler_clean)(struct qed_dev *cdev, int index); + +/** + * @brief can_link_change - can the instance change the link or not + * + * @param cdev + * + * @return true if link-change is allowed, false otherwise. + */ + bool (*can_link_change)(struct qed_dev *cdev); + /** * @brief set_link - set links according to params * From a43f235f12e9da60a7e181f6a9524ea1e212e39d Mon Sep 17 00:00:00 2001 From: Sudarsana Reddy Kalluru Date: Fri, 22 Apr 2016 08:41:04 +0300 Subject: [PATCH 0988/1649] qed: add support for link pause configuration. The APIs for making this sort of configuration [e.g., via ethtool] are already present in qede, but the current configuration flow in qed doesn't respect it. Signed-off-by: Sudarsana Reddy Kalluru Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_main.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index d189871e8e23..1918b83f0a97 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -962,6 +962,20 @@ static int qed_set_link(struct qed_dev *cdev, } if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED) link_params->speed.forced_speed = params->forced_speed; + if (params->override_flags & QED_LINK_OVERRIDE_PAUSE_CONFIG) { + if (params->pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE) + link_params->pause.autoneg = true; + else + link_params->pause.autoneg = false; + if (params->pause_config & QED_LINK_PAUSE_RX_ENABLE) + link_params->pause.forced_rx = true; + else + link_params->pause.forced_rx = false; + if (params->pause_config & QED_LINK_PAUSE_TX_ENABLE) + link_params->pause.forced_tx = true; + else + link_params->pause.forced_tx = false; + } rc = qed_mcp_set_link(hwfn, ptt, params->link_up); From 0868e2538e45a9ed68e2b14adc42b020a36aae1d Mon Sep 17 00:00:00 2001 From: Jiri Benc Date: Fri, 22 Apr 2016 12:40:02 +0200 Subject: [PATCH 0989/1649] route: move lwtunnel state to a single place Commit 751a587ac9f9 ("route: fix breakage after moving lwtunnel state") moved lwtstate to the end of dst_entry for 32bit archs. This makes it share the cacheline with __refcnt which had an unkown effect on performance. For this reason, the pointer was kept in place for 64bit archs. However, later performance measurements showed this is of no concern. It turns out that every performance sensitive path that accesses lwtstate accesses also struct rtable or struct rt6_info which share the same cache line. Thus, to get rid of a few #ifdefs, move the field to the end of the struct also for 64bit. Signed-off-by: Jiri Benc Signed-off-by: David S. Miller --- include/net/dst.h | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/include/net/dst.h b/include/net/dst.h index 5c98443c1c9e..6835d224d47b 100644 --- a/include/net/dst.h +++ b/include/net/dst.h @@ -85,12 +85,11 @@ struct dst_entry { #endif #ifdef CONFIG_64BIT - struct lwtunnel_state *lwtstate; /* * Align __refcnt to a 64 bytes alignment * (L1_CACHE_SIZE would be too much) */ - long __pad_to_align_refcnt[1]; + long __pad_to_align_refcnt[2]; #endif /* * __refcnt wants to be on a different cache line from @@ -99,9 +98,7 @@ struct dst_entry { atomic_t __refcnt; /* client references */ int __use; unsigned long lastuse; -#ifndef CONFIG_64BIT struct lwtunnel_state *lwtstate; -#endif union { struct dst_entry *next; struct rtable __rcu *rt_next; From e425974feaa545575135f04e646f0495439b4c54 Mon Sep 17 00:00:00 2001 From: Phil Sutter Date: Fri, 22 Apr 2016 14:02:42 +0200 Subject: [PATCH 0990/1649] macsec: Convert to using IFF_NO_QUEUE Signed-off-by: Phil Sutter Acked-by: Sabrina Dubroca Signed-off-by: David S. Miller --- drivers/net/macsec.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index 84d3e5ca8817..6caa72402de7 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c @@ -2826,7 +2826,7 @@ static void macsec_free_netdev(struct net_device *dev) static void macsec_setup(struct net_device *dev) { ether_setup(dev); - dev->tx_queue_len = 0; + dev->priv_flags |= IFF_NO_QUEUE; dev->netdev_ops = &macsec_netdev_ops; dev->destructor = macsec_free_netdev; From 79bdc4c862af7cf11a135a6fdf8093622043c862 Mon Sep 17 00:00:00 2001 From: Michal Kazior Date: Fri, 22 Apr 2016 14:15:58 +0200 Subject: [PATCH 0991/1649] codel: generalize the implementation This strips out qdisc specific bits from the code and makes it slightly more reusable. Codel will be used by wireless/mac80211 in the future. Signed-off-by: Michal Kazior Signed-off-by: David S. Miller --- include/net/codel.h | 64 ++++++++++++++++++++++++---------------- net/sched/sch_codel.c | 20 +++++++++++-- net/sched/sch_fq_codel.c | 19 +++++++++--- 3 files changed, 71 insertions(+), 32 deletions(-) diff --git a/include/net/codel.h b/include/net/codel.h index d168aca115cc..06ac687b4909 100644 --- a/include/net/codel.h +++ b/include/net/codel.h @@ -176,12 +176,10 @@ struct codel_stats { #define CODEL_DISABLED_THRESHOLD INT_MAX -static void codel_params_init(struct codel_params *params, - const struct Qdisc *sch) +static void codel_params_init(struct codel_params *params) { params->interval = MS2TIME(100); params->target = MS2TIME(5); - params->mtu = psched_mtu(qdisc_dev(sch)); params->ce_threshold = CODEL_DISABLED_THRESHOLD; params->ecn = false; } @@ -226,28 +224,38 @@ static codel_time_t codel_control_law(codel_time_t t, return t + reciprocal_scale(interval, rec_inv_sqrt << REC_INV_SQRT_SHIFT); } +typedef u32 (*codel_skb_len_t)(const struct sk_buff *skb); +typedef codel_time_t (*codel_skb_time_t)(const struct sk_buff *skb); +typedef void (*codel_skb_drop_t)(struct sk_buff *skb, void *ctx); +typedef struct sk_buff * (*codel_skb_dequeue_t)(struct codel_vars *vars, + void *ctx); + static bool codel_should_drop(const struct sk_buff *skb, - struct Qdisc *sch, + void *ctx, struct codel_vars *vars, struct codel_params *params, struct codel_stats *stats, + codel_skb_len_t skb_len_func, + codel_skb_time_t skb_time_func, + u32 *backlog, codel_time_t now) { bool ok_to_drop; + u32 skb_len; if (!skb) { vars->first_above_time = 0; return false; } - vars->ldelay = now - codel_get_enqueue_time(skb); - sch->qstats.backlog -= qdisc_pkt_len(skb); + skb_len = skb_len_func(skb); + vars->ldelay = now - skb_time_func(skb); - if (unlikely(qdisc_pkt_len(skb) > stats->maxpacket)) - stats->maxpacket = qdisc_pkt_len(skb); + if (unlikely(skb_len > stats->maxpacket)) + stats->maxpacket = skb_len; if (codel_time_before(vars->ldelay, params->target) || - sch->qstats.backlog <= params->mtu) { + *backlog <= params->mtu) { /* went below - stay below for at least interval */ vars->first_above_time = 0; return false; @@ -264,16 +272,17 @@ static bool codel_should_drop(const struct sk_buff *skb, return ok_to_drop; } -typedef struct sk_buff * (*codel_skb_dequeue_t)(struct codel_vars *vars, - struct Qdisc *sch); - -static struct sk_buff *codel_dequeue(struct Qdisc *sch, +static struct sk_buff *codel_dequeue(void *ctx, + u32 *backlog, struct codel_params *params, struct codel_vars *vars, struct codel_stats *stats, + codel_skb_len_t skb_len_func, + codel_skb_time_t skb_time_func, + codel_skb_drop_t drop_func, codel_skb_dequeue_t dequeue_func) { - struct sk_buff *skb = dequeue_func(vars, sch); + struct sk_buff *skb = dequeue_func(vars, ctx); codel_time_t now; bool drop; @@ -282,7 +291,8 @@ static struct sk_buff *codel_dequeue(struct Qdisc *sch, return skb; } now = codel_get_time(); - drop = codel_should_drop(skb, sch, vars, params, stats, now); + drop = codel_should_drop(skb, ctx, vars, params, stats, + skb_len_func, skb_time_func, backlog, now); if (vars->dropping) { if (!drop) { /* sojourn time below target - leave dropping state */ @@ -310,12 +320,15 @@ static struct sk_buff *codel_dequeue(struct Qdisc *sch, vars->rec_inv_sqrt); goto end; } - stats->drop_len += qdisc_pkt_len(skb); - qdisc_drop(skb, sch); + stats->drop_len += skb_len_func(skb); + drop_func(skb, ctx); stats->drop_count++; - skb = dequeue_func(vars, sch); - if (!codel_should_drop(skb, sch, - vars, params, stats, now)) { + skb = dequeue_func(vars, ctx); + if (!codel_should_drop(skb, ctx, + vars, params, stats, + skb_len_func, + skb_time_func, + backlog, now)) { /* leave dropping state */ vars->dropping = false; } else { @@ -333,13 +346,14 @@ static struct sk_buff *codel_dequeue(struct Qdisc *sch, if (params->ecn && INET_ECN_set_ce(skb)) { stats->ecn_mark++; } else { - stats->drop_len += qdisc_pkt_len(skb); - qdisc_drop(skb, sch); + stats->drop_len += skb_len_func(skb); + drop_func(skb, ctx); stats->drop_count++; - skb = dequeue_func(vars, sch); - drop = codel_should_drop(skb, sch, vars, params, - stats, now); + skb = dequeue_func(vars, ctx); + drop = codel_should_drop(skb, ctx, vars, params, + stats, skb_len_func, + skb_time_func, backlog, now); } vars->dropping = true; /* if min went above target close to when we last went below it diff --git a/net/sched/sch_codel.c b/net/sched/sch_codel.c index 9b7e2980ee5c..512a94abe351 100644 --- a/net/sched/sch_codel.c +++ b/net/sched/sch_codel.c @@ -64,20 +64,33 @@ struct codel_sched_data { * to dequeue a packet from queue. Note: backlog is handled in * codel, we dont need to reduce it here. */ -static struct sk_buff *dequeue(struct codel_vars *vars, struct Qdisc *sch) +static struct sk_buff *dequeue_func(struct codel_vars *vars, void *ctx) { + struct Qdisc *sch = ctx; struct sk_buff *skb = __skb_dequeue(&sch->q); + if (skb) + sch->qstats.backlog -= qdisc_pkt_len(skb); + prefetch(&skb->end); /* we'll need skb_shinfo() */ return skb; } +static void drop_func(struct sk_buff *skb, void *ctx) +{ + struct Qdisc *sch = ctx; + + qdisc_drop(skb, sch); +} + static struct sk_buff *codel_qdisc_dequeue(struct Qdisc *sch) { struct codel_sched_data *q = qdisc_priv(sch); struct sk_buff *skb; - skb = codel_dequeue(sch, &q->params, &q->vars, &q->stats, dequeue); + skb = codel_dequeue(sch, &sch->qstats.backlog, &q->params, &q->vars, + &q->stats, qdisc_pkt_len, codel_get_enqueue_time, + drop_func, dequeue_func); /* We cant call qdisc_tree_reduce_backlog() if our qlen is 0, * or HTB crashes. Defer it for next round. @@ -173,9 +186,10 @@ static int codel_init(struct Qdisc *sch, struct nlattr *opt) sch->limit = DEFAULT_CODEL_LIMIT; - codel_params_init(&q->params, sch); + codel_params_init(&q->params); codel_vars_init(&q->vars); codel_stats_init(&q->stats); + q->params.mtu = psched_mtu(qdisc_dev(sch)); if (opt) { int err = codel_change(sch, opt); diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c index d3fc8f9dd3d4..dcf7266e6901 100644 --- a/net/sched/sch_fq_codel.c +++ b/net/sched/sch_fq_codel.c @@ -220,8 +220,9 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch) * to dequeue a packet from queue. Note: backlog is handled in * codel, we dont need to reduce it here. */ -static struct sk_buff *dequeue(struct codel_vars *vars, struct Qdisc *sch) +static struct sk_buff *dequeue_func(struct codel_vars *vars, void *ctx) { + struct Qdisc *sch = ctx; struct fq_codel_sched_data *q = qdisc_priv(sch); struct fq_codel_flow *flow; struct sk_buff *skb = NULL; @@ -231,10 +232,18 @@ static struct sk_buff *dequeue(struct codel_vars *vars, struct Qdisc *sch) skb = dequeue_head(flow); q->backlogs[flow - q->flows] -= qdisc_pkt_len(skb); sch->q.qlen--; + sch->qstats.backlog -= qdisc_pkt_len(skb); } return skb; } +static void drop_func(struct sk_buff *skb, void *ctx) +{ + struct Qdisc *sch = ctx; + + qdisc_drop(skb, sch); +} + static struct sk_buff *fq_codel_dequeue(struct Qdisc *sch) { struct fq_codel_sched_data *q = qdisc_priv(sch); @@ -263,8 +272,9 @@ begin: prev_ecn_mark = q->cstats.ecn_mark; prev_backlog = sch->qstats.backlog; - skb = codel_dequeue(sch, &q->cparams, &flow->cvars, &q->cstats, - dequeue); + skb = codel_dequeue(sch, &sch->qstats.backlog, &q->cparams, + &flow->cvars, &q->cstats, qdisc_pkt_len, + codel_get_enqueue_time, drop_func, dequeue_func); flow->dropped += q->cstats.drop_count - prev_drop_count; flow->dropped += q->cstats.ecn_mark - prev_ecn_mark; @@ -423,9 +433,10 @@ static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt) q->perturbation = prandom_u32(); INIT_LIST_HEAD(&q->new_flows); INIT_LIST_HEAD(&q->old_flows); - codel_params_init(&q->cparams, sch); + codel_params_init(&q->cparams); codel_stats_init(&q->cstats); q->cparams.ecn = true; + q->cparams.mtu = psched_mtu(qdisc_dev(sch)); if (opt) { int err = fq_codel_change(sch, opt); From d068ca2ae2e614b9a418fb3b5f1fd4cf996ff032 Mon Sep 17 00:00:00 2001 From: Michal Kazior Date: Fri, 22 Apr 2016 14:15:59 +0200 Subject: [PATCH 0992/1649] codel: split into multiple files It was impossible to include codel.h for the purpose of having access to codel_params or codel_vars structure definitions and using them for embedding in other more complex structures. This splits allows codel.h itself to be treated like any other header file while codel_qdisc.h and codel_impl.h contain function definitions with logic that was previously in codel.h. This copies over copyrights and doesn't involve code changes other than adding a few additional include directives to net/sched/sch*codel.c. Signed-off-by: Michal Kazior Signed-off-by: David S. Miller --- include/net/codel.h | 223 --------------------------------- include/net/codel_impl.h | 255 ++++++++++++++++++++++++++++++++++++++ include/net/codel_qdisc.h | 73 +++++++++++ net/sched/sch_codel.c | 2 + net/sched/sch_fq_codel.c | 2 + 5 files changed, 332 insertions(+), 223 deletions(-) create mode 100644 include/net/codel_impl.h create mode 100644 include/net/codel_qdisc.h diff --git a/include/net/codel.h b/include/net/codel.h index 06ac687b4909..a6e428f80135 100644 --- a/include/net/codel.h +++ b/include/net/codel.h @@ -87,27 +87,6 @@ static inline codel_time_t codel_get_time(void) ((s32)((a) - (b)) >= 0)) #define codel_time_before_eq(a, b) codel_time_after_eq(b, a) -/* Qdiscs using codel plugin must use codel_skb_cb in their own cb[] */ -struct codel_skb_cb { - codel_time_t enqueue_time; -}; - -static struct codel_skb_cb *get_codel_cb(const struct sk_buff *skb) -{ - qdisc_cb_private_validate(skb, sizeof(struct codel_skb_cb)); - return (struct codel_skb_cb *)qdisc_skb_cb(skb)->data; -} - -static codel_time_t codel_get_enqueue_time(const struct sk_buff *skb) -{ - return get_codel_cb(skb)->enqueue_time; -} - -static void codel_set_enqueue_time(struct sk_buff *skb) -{ - get_codel_cb(skb)->enqueue_time = codel_get_time(); -} - static inline u32 codel_time_to_us(codel_time_t val) { u64 valns = ((u64)val << CODEL_SHIFT); @@ -176,212 +155,10 @@ struct codel_stats { #define CODEL_DISABLED_THRESHOLD INT_MAX -static void codel_params_init(struct codel_params *params) -{ - params->interval = MS2TIME(100); - params->target = MS2TIME(5); - params->ce_threshold = CODEL_DISABLED_THRESHOLD; - params->ecn = false; -} - -static void codel_vars_init(struct codel_vars *vars) -{ - memset(vars, 0, sizeof(*vars)); -} - -static void codel_stats_init(struct codel_stats *stats) -{ - stats->maxpacket = 0; -} - -/* - * http://en.wikipedia.org/wiki/Methods_of_computing_square_roots#Iterative_methods_for_reciprocal_square_roots - * new_invsqrt = (invsqrt / 2) * (3 - count * invsqrt^2) - * - * Here, invsqrt is a fixed point number (< 1.0), 32bit mantissa, aka Q0.32 - */ -static void codel_Newton_step(struct codel_vars *vars) -{ - u32 invsqrt = ((u32)vars->rec_inv_sqrt) << REC_INV_SQRT_SHIFT; - u32 invsqrt2 = ((u64)invsqrt * invsqrt) >> 32; - u64 val = (3LL << 32) - ((u64)vars->count * invsqrt2); - - val >>= 2; /* avoid overflow in following multiply */ - val = (val * invsqrt) >> (32 - 2 + 1); - - vars->rec_inv_sqrt = val >> REC_INV_SQRT_SHIFT; -} - -/* - * CoDel control_law is t + interval/sqrt(count) - * We maintain in rec_inv_sqrt the reciprocal value of sqrt(count) to avoid - * both sqrt() and divide operation. - */ -static codel_time_t codel_control_law(codel_time_t t, - codel_time_t interval, - u32 rec_inv_sqrt) -{ - return t + reciprocal_scale(interval, rec_inv_sqrt << REC_INV_SQRT_SHIFT); -} - typedef u32 (*codel_skb_len_t)(const struct sk_buff *skb); typedef codel_time_t (*codel_skb_time_t)(const struct sk_buff *skb); typedef void (*codel_skb_drop_t)(struct sk_buff *skb, void *ctx); typedef struct sk_buff * (*codel_skb_dequeue_t)(struct codel_vars *vars, void *ctx); -static bool codel_should_drop(const struct sk_buff *skb, - void *ctx, - struct codel_vars *vars, - struct codel_params *params, - struct codel_stats *stats, - codel_skb_len_t skb_len_func, - codel_skb_time_t skb_time_func, - u32 *backlog, - codel_time_t now) -{ - bool ok_to_drop; - u32 skb_len; - - if (!skb) { - vars->first_above_time = 0; - return false; - } - - skb_len = skb_len_func(skb); - vars->ldelay = now - skb_time_func(skb); - - if (unlikely(skb_len > stats->maxpacket)) - stats->maxpacket = skb_len; - - if (codel_time_before(vars->ldelay, params->target) || - *backlog <= params->mtu) { - /* went below - stay below for at least interval */ - vars->first_above_time = 0; - return false; - } - ok_to_drop = false; - if (vars->first_above_time == 0) { - /* just went above from below. If we stay above - * for at least interval we'll say it's ok to drop - */ - vars->first_above_time = now + params->interval; - } else if (codel_time_after(now, vars->first_above_time)) { - ok_to_drop = true; - } - return ok_to_drop; -} - -static struct sk_buff *codel_dequeue(void *ctx, - u32 *backlog, - struct codel_params *params, - struct codel_vars *vars, - struct codel_stats *stats, - codel_skb_len_t skb_len_func, - codel_skb_time_t skb_time_func, - codel_skb_drop_t drop_func, - codel_skb_dequeue_t dequeue_func) -{ - struct sk_buff *skb = dequeue_func(vars, ctx); - codel_time_t now; - bool drop; - - if (!skb) { - vars->dropping = false; - return skb; - } - now = codel_get_time(); - drop = codel_should_drop(skb, ctx, vars, params, stats, - skb_len_func, skb_time_func, backlog, now); - if (vars->dropping) { - if (!drop) { - /* sojourn time below target - leave dropping state */ - vars->dropping = false; - } else if (codel_time_after_eq(now, vars->drop_next)) { - /* It's time for the next drop. Drop the current - * packet and dequeue the next. The dequeue might - * take us out of dropping state. - * If not, schedule the next drop. - * A large backlog might result in drop rates so high - * that the next drop should happen now, - * hence the while loop. - */ - while (vars->dropping && - codel_time_after_eq(now, vars->drop_next)) { - vars->count++; /* dont care of possible wrap - * since there is no more divide - */ - codel_Newton_step(vars); - if (params->ecn && INET_ECN_set_ce(skb)) { - stats->ecn_mark++; - vars->drop_next = - codel_control_law(vars->drop_next, - params->interval, - vars->rec_inv_sqrt); - goto end; - } - stats->drop_len += skb_len_func(skb); - drop_func(skb, ctx); - stats->drop_count++; - skb = dequeue_func(vars, ctx); - if (!codel_should_drop(skb, ctx, - vars, params, stats, - skb_len_func, - skb_time_func, - backlog, now)) { - /* leave dropping state */ - vars->dropping = false; - } else { - /* and schedule the next drop */ - vars->drop_next = - codel_control_law(vars->drop_next, - params->interval, - vars->rec_inv_sqrt); - } - } - } - } else if (drop) { - u32 delta; - - if (params->ecn && INET_ECN_set_ce(skb)) { - stats->ecn_mark++; - } else { - stats->drop_len += skb_len_func(skb); - drop_func(skb, ctx); - stats->drop_count++; - - skb = dequeue_func(vars, ctx); - drop = codel_should_drop(skb, ctx, vars, params, - stats, skb_len_func, - skb_time_func, backlog, now); - } - vars->dropping = true; - /* if min went above target close to when we last went below it - * assume that the drop rate that controlled the queue on the - * last cycle is a good starting point to control it now. - */ - delta = vars->count - vars->lastcount; - if (delta > 1 && - codel_time_before(now - vars->drop_next, - 16 * params->interval)) { - vars->count = delta; - /* we dont care if rec_inv_sqrt approximation - * is not very precise : - * Next Newton steps will correct it quadratically. - */ - codel_Newton_step(vars); - } else { - vars->count = 1; - vars->rec_inv_sqrt = ~0U >> REC_INV_SQRT_SHIFT; - } - vars->lastcount = vars->count; - vars->drop_next = codel_control_law(now, params->interval, - vars->rec_inv_sqrt); - } -end: - if (skb && codel_time_after(vars->ldelay, params->ce_threshold) && - INET_ECN_set_ce(skb)) - stats->ce_mark++; - return skb; -} #endif diff --git a/include/net/codel_impl.h b/include/net/codel_impl.h new file mode 100644 index 000000000000..d289b91dcd65 --- /dev/null +++ b/include/net/codel_impl.h @@ -0,0 +1,255 @@ +#ifndef __NET_SCHED_CODEL_IMPL_H +#define __NET_SCHED_CODEL_IMPL_H + +/* + * Codel - The Controlled-Delay Active Queue Management algorithm + * + * Copyright (C) 2011-2012 Kathleen Nichols + * Copyright (C) 2011-2012 Van Jacobson + * Copyright (C) 2012 Michael D. Taht + * Copyright (C) 2012,2015 Eric Dumazet + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The names of the authors may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * Alternatively, provided that this notice is retained in full, this + * software may be distributed under the terms of the GNU General + * Public License ("GPL") version 2, in which case the provisions of the + * GPL apply INSTEAD OF those given above. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH + * DAMAGE. + * + */ + +/* Controlling Queue Delay (CoDel) algorithm + * ========================================= + * Source : Kathleen Nichols and Van Jacobson + * http://queue.acm.org/detail.cfm?id=2209336 + * + * Implemented on linux by Dave Taht and Eric Dumazet + */ + +static void codel_params_init(struct codel_params *params) +{ + params->interval = MS2TIME(100); + params->target = MS2TIME(5); + params->ce_threshold = CODEL_DISABLED_THRESHOLD; + params->ecn = false; +} + +static void codel_vars_init(struct codel_vars *vars) +{ + memset(vars, 0, sizeof(*vars)); +} + +static void codel_stats_init(struct codel_stats *stats) +{ + stats->maxpacket = 0; +} + +/* + * http://en.wikipedia.org/wiki/Methods_of_computing_square_roots#Iterative_methods_for_reciprocal_square_roots + * new_invsqrt = (invsqrt / 2) * (3 - count * invsqrt^2) + * + * Here, invsqrt is a fixed point number (< 1.0), 32bit mantissa, aka Q0.32 + */ +static void codel_Newton_step(struct codel_vars *vars) +{ + u32 invsqrt = ((u32)vars->rec_inv_sqrt) << REC_INV_SQRT_SHIFT; + u32 invsqrt2 = ((u64)invsqrt * invsqrt) >> 32; + u64 val = (3LL << 32) - ((u64)vars->count * invsqrt2); + + val >>= 2; /* avoid overflow in following multiply */ + val = (val * invsqrt) >> (32 - 2 + 1); + + vars->rec_inv_sqrt = val >> REC_INV_SQRT_SHIFT; +} + +/* + * CoDel control_law is t + interval/sqrt(count) + * We maintain in rec_inv_sqrt the reciprocal value of sqrt(count) to avoid + * both sqrt() and divide operation. + */ +static codel_time_t codel_control_law(codel_time_t t, + codel_time_t interval, + u32 rec_inv_sqrt) +{ + return t + reciprocal_scale(interval, rec_inv_sqrt << REC_INV_SQRT_SHIFT); +} + +static bool codel_should_drop(const struct sk_buff *skb, + void *ctx, + struct codel_vars *vars, + struct codel_params *params, + struct codel_stats *stats, + codel_skb_len_t skb_len_func, + codel_skb_time_t skb_time_func, + u32 *backlog, + codel_time_t now) +{ + bool ok_to_drop; + u32 skb_len; + + if (!skb) { + vars->first_above_time = 0; + return false; + } + + skb_len = skb_len_func(skb); + vars->ldelay = now - skb_time_func(skb); + + if (unlikely(skb_len > stats->maxpacket)) + stats->maxpacket = skb_len; + + if (codel_time_before(vars->ldelay, params->target) || + *backlog <= params->mtu) { + /* went below - stay below for at least interval */ + vars->first_above_time = 0; + return false; + } + ok_to_drop = false; + if (vars->first_above_time == 0) { + /* just went above from below. If we stay above + * for at least interval we'll say it's ok to drop + */ + vars->first_above_time = now + params->interval; + } else if (codel_time_after(now, vars->first_above_time)) { + ok_to_drop = true; + } + return ok_to_drop; +} + +static struct sk_buff *codel_dequeue(void *ctx, + u32 *backlog, + struct codel_params *params, + struct codel_vars *vars, + struct codel_stats *stats, + codel_skb_len_t skb_len_func, + codel_skb_time_t skb_time_func, + codel_skb_drop_t drop_func, + codel_skb_dequeue_t dequeue_func) +{ + struct sk_buff *skb = dequeue_func(vars, ctx); + codel_time_t now; + bool drop; + + if (!skb) { + vars->dropping = false; + return skb; + } + now = codel_get_time(); + drop = codel_should_drop(skb, ctx, vars, params, stats, + skb_len_func, skb_time_func, backlog, now); + if (vars->dropping) { + if (!drop) { + /* sojourn time below target - leave dropping state */ + vars->dropping = false; + } else if (codel_time_after_eq(now, vars->drop_next)) { + /* It's time for the next drop. Drop the current + * packet and dequeue the next. The dequeue might + * take us out of dropping state. + * If not, schedule the next drop. + * A large backlog might result in drop rates so high + * that the next drop should happen now, + * hence the while loop. + */ + while (vars->dropping && + codel_time_after_eq(now, vars->drop_next)) { + vars->count++; /* dont care of possible wrap + * since there is no more divide + */ + codel_Newton_step(vars); + if (params->ecn && INET_ECN_set_ce(skb)) { + stats->ecn_mark++; + vars->drop_next = + codel_control_law(vars->drop_next, + params->interval, + vars->rec_inv_sqrt); + goto end; + } + stats->drop_len += skb_len_func(skb); + drop_func(skb, ctx); + stats->drop_count++; + skb = dequeue_func(vars, ctx); + if (!codel_should_drop(skb, ctx, + vars, params, stats, + skb_len_func, + skb_time_func, + backlog, now)) { + /* leave dropping state */ + vars->dropping = false; + } else { + /* and schedule the next drop */ + vars->drop_next = + codel_control_law(vars->drop_next, + params->interval, + vars->rec_inv_sqrt); + } + } + } + } else if (drop) { + u32 delta; + + if (params->ecn && INET_ECN_set_ce(skb)) { + stats->ecn_mark++; + } else { + stats->drop_len += skb_len_func(skb); + drop_func(skb, ctx); + stats->drop_count++; + + skb = dequeue_func(vars, ctx); + drop = codel_should_drop(skb, ctx, vars, params, + stats, skb_len_func, + skb_time_func, backlog, now); + } + vars->dropping = true; + /* if min went above target close to when we last went below it + * assume that the drop rate that controlled the queue on the + * last cycle is a good starting point to control it now. + */ + delta = vars->count - vars->lastcount; + if (delta > 1 && + codel_time_before(now - vars->drop_next, + 16 * params->interval)) { + vars->count = delta; + /* we dont care if rec_inv_sqrt approximation + * is not very precise : + * Next Newton steps will correct it quadratically. + */ + codel_Newton_step(vars); + } else { + vars->count = 1; + vars->rec_inv_sqrt = ~0U >> REC_INV_SQRT_SHIFT; + } + vars->lastcount = vars->count; + vars->drop_next = codel_control_law(now, params->interval, + vars->rec_inv_sqrt); + } +end: + if (skb && codel_time_after(vars->ldelay, params->ce_threshold) && + INET_ECN_set_ce(skb)) + stats->ce_mark++; + return skb; +} + +#endif diff --git a/include/net/codel_qdisc.h b/include/net/codel_qdisc.h new file mode 100644 index 000000000000..8144d9cd2908 --- /dev/null +++ b/include/net/codel_qdisc.h @@ -0,0 +1,73 @@ +#ifndef __NET_SCHED_CODEL_QDISC_H +#define __NET_SCHED_CODEL_QDISC_H + +/* + * Codel - The Controlled-Delay Active Queue Management algorithm + * + * Copyright (C) 2011-2012 Kathleen Nichols + * Copyright (C) 2011-2012 Van Jacobson + * Copyright (C) 2012 Michael D. Taht + * Copyright (C) 2012,2015 Eric Dumazet + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The names of the authors may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * Alternatively, provided that this notice is retained in full, this + * software may be distributed under the terms of the GNU General + * Public License ("GPL") version 2, in which case the provisions of the + * GPL apply INSTEAD OF those given above. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH + * DAMAGE. + * + */ + +/* Controlling Queue Delay (CoDel) algorithm + * ========================================= + * Source : Kathleen Nichols and Van Jacobson + * http://queue.acm.org/detail.cfm?id=2209336 + * + * Implemented on linux by Dave Taht and Eric Dumazet + */ + +/* Qdiscs using codel plugin must use codel_skb_cb in their own cb[] */ +struct codel_skb_cb { + codel_time_t enqueue_time; +}; + +static struct codel_skb_cb *get_codel_cb(const struct sk_buff *skb) +{ + qdisc_cb_private_validate(skb, sizeof(struct codel_skb_cb)); + return (struct codel_skb_cb *)qdisc_skb_cb(skb)->data; +} + +static codel_time_t codel_get_enqueue_time(const struct sk_buff *skb) +{ + return get_codel_cb(skb)->enqueue_time; +} + +static void codel_set_enqueue_time(struct sk_buff *skb) +{ + get_codel_cb(skb)->enqueue_time = codel_get_time(); +} + +#endif diff --git a/net/sched/sch_codel.c b/net/sched/sch_codel.c index 512a94abe351..dddf3bb65a32 100644 --- a/net/sched/sch_codel.c +++ b/net/sched/sch_codel.c @@ -49,6 +49,8 @@ #include #include #include +#include +#include #define DEFAULT_CODEL_LIMIT 1000 diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c index dcf7266e6901..a5e420b3d4ab 100644 --- a/net/sched/sch_fq_codel.c +++ b/net/sched/sch_fq_codel.c @@ -24,6 +24,8 @@ #include #include #include +#include +#include /* Fair Queue CoDel. * From 557fc4a098039cf296fe33f118bab99a925fd881 Mon Sep 17 00:00:00 2001 From: Michal Kazior Date: Fri, 22 Apr 2016 14:20:13 +0200 Subject: [PATCH 0993/1649] fq: add fair queuing framework This works on the same implementation principle as codel*.h, i.e. there's a generic header with structures and macros and a implementation header carrying function definitions to include in given, e.g. driver or module. The fairness logic comes from net/sched/sch_fq_codel.c but is generalized so it is more flexible and easier to re-use. Signed-off-by: Michal Kazior Signed-off-by: David S. Miller --- include/net/fq.h | 95 +++++++++++++++ include/net/fq_impl.h | 269 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 364 insertions(+) create mode 100644 include/net/fq.h create mode 100644 include/net/fq_impl.h diff --git a/include/net/fq.h b/include/net/fq.h new file mode 100644 index 000000000000..268b49049c37 --- /dev/null +++ b/include/net/fq.h @@ -0,0 +1,95 @@ +/* + * Copyright (c) 2016 Qualcomm Atheros, Inc + * + * GPL v2 + * + * Based on net/sched/sch_fq_codel.c + */ +#ifndef __NET_SCHED_FQ_H +#define __NET_SCHED_FQ_H + +struct fq_tin; + +/** + * struct fq_flow - per traffic flow queue + * + * @tin: owner of this flow. Used to manage collisions, i.e. when a packet + * hashes to an index which points to a flow that is already owned by a + * different tin the packet is destined to. In such case the implementer + * must provide a fallback flow + * @flowchain: can be linked to fq_tin's new_flows or old_flows. Used for DRR++ + * (deficit round robin) based round robin queuing similar to the one + * found in net/sched/sch_fq_codel.c + * @backlogchain: can be linked to other fq_flow and fq. Used to keep track of + * fat flows and efficient head-dropping if packet limit is reached + * @queue: sk_buff queue to hold packets + * @backlog: number of bytes pending in the queue. The number of packets can be + * found in @queue.qlen + * @deficit: used for DRR++ + */ +struct fq_flow { + struct fq_tin *tin; + struct list_head flowchain; + struct list_head backlogchain; + struct sk_buff_head queue; + u32 backlog; + int deficit; +}; + +/** + * struct fq_tin - a logical container of fq_flows + * + * Used to group fq_flows into a logical aggregate. DRR++ scheme is used to + * pull interleaved packets out of the associated flows. + * + * @new_flows: linked list of fq_flow + * @old_flows: linked list of fq_flow + */ +struct fq_tin { + struct list_head new_flows; + struct list_head old_flows; + u32 backlog_bytes; + u32 backlog_packets; + u32 overlimit; + u32 collisions; + u32 flows; + u32 tx_bytes; + u32 tx_packets; +}; + +/** + * struct fq - main container for fair queuing purposes + * + * @backlogs: linked to fq_flows. Used to maintain fat flows for efficient + * head-dropping when @backlog reaches @limit + * @limit: max number of packets that can be queued across all flows + * @backlog: number of packets queued across all flows + */ +struct fq { + struct fq_flow *flows; + struct list_head backlogs; + spinlock_t lock; + u32 flows_cnt; + u32 perturbation; + u32 limit; + u32 quantum; + u32 backlog; + u32 overlimit; + u32 collisions; +}; + +typedef struct sk_buff *fq_tin_dequeue_t(struct fq *, + struct fq_tin *, + struct fq_flow *flow); + +typedef void fq_skb_free_t(struct fq *, + struct fq_tin *, + struct fq_flow *, + struct sk_buff *); + +typedef struct fq_flow *fq_flow_get_default_t(struct fq *, + struct fq_tin *, + int idx, + struct sk_buff *); + +#endif diff --git a/include/net/fq_impl.h b/include/net/fq_impl.h new file mode 100644 index 000000000000..02eab7c51adb --- /dev/null +++ b/include/net/fq_impl.h @@ -0,0 +1,269 @@ +/* + * Copyright (c) 2016 Qualcomm Atheros, Inc + * + * GPL v2 + * + * Based on net/sched/sch_fq_codel.c + */ +#ifndef __NET_SCHED_FQ_IMPL_H +#define __NET_SCHED_FQ_IMPL_H + +#include + +/* functions that are embedded into includer */ + +static struct sk_buff *fq_flow_dequeue(struct fq *fq, + struct fq_flow *flow) +{ + struct fq_tin *tin = flow->tin; + struct fq_flow *i; + struct sk_buff *skb; + + lockdep_assert_held(&fq->lock); + + skb = __skb_dequeue(&flow->queue); + if (!skb) + return NULL; + + tin->backlog_bytes -= skb->len; + tin->backlog_packets--; + flow->backlog -= skb->len; + fq->backlog--; + + if (flow->backlog == 0) { + list_del_init(&flow->backlogchain); + } else { + i = flow; + + list_for_each_entry_continue(i, &fq->backlogs, backlogchain) + if (i->backlog < flow->backlog) + break; + + list_move_tail(&flow->backlogchain, + &i->backlogchain); + } + + return skb; +} + +static struct sk_buff *fq_tin_dequeue(struct fq *fq, + struct fq_tin *tin, + fq_tin_dequeue_t dequeue_func) +{ + struct fq_flow *flow; + struct list_head *head; + struct sk_buff *skb; + + lockdep_assert_held(&fq->lock); + +begin: + head = &tin->new_flows; + if (list_empty(head)) { + head = &tin->old_flows; + if (list_empty(head)) + return NULL; + } + + flow = list_first_entry(head, struct fq_flow, flowchain); + + if (flow->deficit <= 0) { + flow->deficit += fq->quantum; + list_move_tail(&flow->flowchain, + &tin->old_flows); + goto begin; + } + + skb = dequeue_func(fq, tin, flow); + if (!skb) { + /* force a pass through old_flows to prevent starvation */ + if ((head == &tin->new_flows) && + !list_empty(&tin->old_flows)) { + list_move_tail(&flow->flowchain, &tin->old_flows); + } else { + list_del_init(&flow->flowchain); + flow->tin = NULL; + } + goto begin; + } + + flow->deficit -= skb->len; + tin->tx_bytes += skb->len; + tin->tx_packets++; + + return skb; +} + +static struct fq_flow *fq_flow_classify(struct fq *fq, + struct fq_tin *tin, + struct sk_buff *skb, + fq_flow_get_default_t get_default_func) +{ + struct fq_flow *flow; + u32 hash; + u32 idx; + + lockdep_assert_held(&fq->lock); + + hash = skb_get_hash_perturb(skb, fq->perturbation); + idx = reciprocal_scale(hash, fq->flows_cnt); + flow = &fq->flows[idx]; + + if (flow->tin && flow->tin != tin) { + flow = get_default_func(fq, tin, idx, skb); + tin->collisions++; + fq->collisions++; + } + + if (!flow->tin) + tin->flows++; + + return flow; +} + +static void fq_tin_enqueue(struct fq *fq, + struct fq_tin *tin, + struct sk_buff *skb, + fq_skb_free_t free_func, + fq_flow_get_default_t get_default_func) +{ + struct fq_flow *flow; + struct fq_flow *i; + + lockdep_assert_held(&fq->lock); + + flow = fq_flow_classify(fq, tin, skb, get_default_func); + + flow->tin = tin; + flow->backlog += skb->len; + tin->backlog_bytes += skb->len; + tin->backlog_packets++; + fq->backlog++; + + if (list_empty(&flow->backlogchain)) + list_add_tail(&flow->backlogchain, &fq->backlogs); + + i = flow; + list_for_each_entry_continue_reverse(i, &fq->backlogs, + backlogchain) + if (i->backlog > flow->backlog) + break; + + list_move(&flow->backlogchain, &i->backlogchain); + + if (list_empty(&flow->flowchain)) { + flow->deficit = fq->quantum; + list_add_tail(&flow->flowchain, + &tin->new_flows); + } + + __skb_queue_tail(&flow->queue, skb); + + if (fq->backlog > fq->limit) { + flow = list_first_entry_or_null(&fq->backlogs, + struct fq_flow, + backlogchain); + if (!flow) + return; + + skb = fq_flow_dequeue(fq, flow); + if (!skb) + return; + + free_func(fq, flow->tin, flow, skb); + + flow->tin->overlimit++; + fq->overlimit++; + } +} + +static void fq_flow_reset(struct fq *fq, + struct fq_flow *flow, + fq_skb_free_t free_func) +{ + struct sk_buff *skb; + + while ((skb = fq_flow_dequeue(fq, flow))) + free_func(fq, flow->tin, flow, skb); + + if (!list_empty(&flow->flowchain)) + list_del_init(&flow->flowchain); + + if (!list_empty(&flow->backlogchain)) + list_del_init(&flow->backlogchain); + + flow->tin = NULL; + + WARN_ON_ONCE(flow->backlog); +} + +static void fq_tin_reset(struct fq *fq, + struct fq_tin *tin, + fq_skb_free_t free_func) +{ + struct list_head *head; + struct fq_flow *flow; + + for (;;) { + head = &tin->new_flows; + if (list_empty(head)) { + head = &tin->old_flows; + if (list_empty(head)) + break; + } + + flow = list_first_entry(head, struct fq_flow, flowchain); + fq_flow_reset(fq, flow, free_func); + } + + WARN_ON_ONCE(tin->backlog_bytes); + WARN_ON_ONCE(tin->backlog_packets); +} + +static void fq_flow_init(struct fq_flow *flow) +{ + INIT_LIST_HEAD(&flow->flowchain); + INIT_LIST_HEAD(&flow->backlogchain); + __skb_queue_head_init(&flow->queue); +} + +static void fq_tin_init(struct fq_tin *tin) +{ + INIT_LIST_HEAD(&tin->new_flows); + INIT_LIST_HEAD(&tin->old_flows); +} + +static int fq_init(struct fq *fq, int flows_cnt) +{ + int i; + + memset(fq, 0, sizeof(fq[0])); + INIT_LIST_HEAD(&fq->backlogs); + spin_lock_init(&fq->lock); + fq->flows_cnt = max_t(u32, flows_cnt, 1); + fq->perturbation = prandom_u32(); + fq->quantum = 300; + fq->limit = 8192; + + fq->flows = kcalloc(fq->flows_cnt, sizeof(fq->flows[0]), GFP_KERNEL); + if (!fq->flows) + return -ENOMEM; + + for (i = 0; i < fq->flows_cnt; i++) + fq_flow_init(&fq->flows[i]); + + return 0; +} + +static void fq_reset(struct fq *fq, + fq_skb_free_t free_func) +{ + int i; + + for (i = 0; i < fq->flows_cnt; i++) + fq_flow_reset(fq, &fq->flows[i], free_func); + + kfree(fq->flows); + fq->flows = NULL; +} + +#endif From 6fa01ccd883021105e9f8af7d04b9f156fa3494a Mon Sep 17 00:00:00 2001 From: Sowmini Varadhan Date: Fri, 22 Apr 2016 18:36:35 -0700 Subject: [PATCH 0994/1649] skbuff: Add pskb_extract() helper function A pattern of skb usage seen in modules such as RDS-TCP is to extract `to_copy' bytes from the received TCP segment, starting at some offset `off' into a new skb `clone'. This is done in the ->data_ready callback, where the clone skb is queued up for rx on the PF_RDS socket, while the parent TCP segment is returned unchanged back to the TCP engine. The existing code uses the sequence clone = skb_clone(..); pskb_pull(clone, off, ..); pskb_trim(clone, to_copy, ..); with the intention of discarding the first `off' bytes. However, skb_clone() + pskb_pull() implies pksb_expand_head(), which ends up doing a redundant memcpy of bytes that will then get discarded in __pskb_pull_tail(). To avoid this inefficiency, this commit adds pskb_extract() that creates the clone, and memcpy's only the relevant header/frag/frag_list to the start of `clone'. pskb_trim() is then invoked to trim clone down to the requested to_copy bytes. Signed-off-by: Sowmini Varadhan Signed-off-by: David S. Miller --- include/linux/skbuff.h | 2 + net/core/skbuff.c | 242 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 244 insertions(+) diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index da0ace389fec..a1ce63979ad8 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -2986,6 +2986,8 @@ struct sk_buff *skb_vlan_untag(struct sk_buff *skb); int skb_ensure_writable(struct sk_buff *skb, int write_len); int skb_vlan_pop(struct sk_buff *skb); int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci); +struct sk_buff *pskb_extract(struct sk_buff *skb, int off, int to_copy, + gfp_t gfp); static inline int memcpy_from_msg(void *data, struct msghdr *msg, int len) { diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 7ff7788b0151..7a1d48983f81 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -4622,3 +4622,245 @@ failure: return NULL; } EXPORT_SYMBOL(alloc_skb_with_frags); + +/* carve out the first off bytes from skb when off < headlen */ +static int pskb_carve_inside_header(struct sk_buff *skb, const u32 off, + const int headlen, gfp_t gfp_mask) +{ + int i; + int size = skb_end_offset(skb); + int new_hlen = headlen - off; + u8 *data; + int doff = 0; + + size = SKB_DATA_ALIGN(size); + + if (skb_pfmemalloc(skb)) + gfp_mask |= __GFP_MEMALLOC; + data = kmalloc_reserve(size + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)), + gfp_mask, NUMA_NO_NODE, NULL); + if (!data) + return -ENOMEM; + + size = SKB_WITH_OVERHEAD(ksize(data)); + + /* Copy real data, and all frags */ + skb_copy_from_linear_data_offset(skb, off, data, new_hlen); + skb->len -= off; + + memcpy((struct skb_shared_info *)(data + size), + skb_shinfo(skb), + offsetof(struct skb_shared_info, + frags[skb_shinfo(skb)->nr_frags])); + if (skb_cloned(skb)) { + /* drop the old head gracefully */ + if (skb_orphan_frags(skb, gfp_mask)) { + kfree(data); + return -ENOMEM; + } + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) + skb_frag_ref(skb, i); + if (skb_has_frag_list(skb)) + skb_clone_fraglist(skb); + skb_release_data(skb); + } else { + /* we can reuse existing recount- all we did was + * relocate values + */ + skb_free_head(skb); + } + + doff = (data - skb->head); + skb->head = data; + skb->data = data; + skb->head_frag = 0; +#ifdef NET_SKBUFF_DATA_USES_OFFSET + skb->end = size; + doff = 0; +#else + skb->end = skb->head + size; +#endif + skb_set_tail_pointer(skb, skb_headlen(skb)); + skb_headers_offset_update(skb, 0); + skb->cloned = 0; + skb->hdr_len = 0; + skb->nohdr = 0; + atomic_set(&skb_shinfo(skb)->dataref, 1); + + return 0; +} + +static int pskb_carve(struct sk_buff *skb, const u32 off, gfp_t gfp); + +/* carve out the first eat bytes from skb's frag_list. May recurse into + * pskb_carve() + */ +static int pskb_carve_frag_list(struct sk_buff *skb, + struct skb_shared_info *shinfo, int eat, + gfp_t gfp_mask) +{ + struct sk_buff *list = shinfo->frag_list; + struct sk_buff *clone = NULL; + struct sk_buff *insp = NULL; + + do { + if (!list) { + pr_err("Not enough bytes to eat. Want %d\n", eat); + return -EFAULT; + } + if (list->len <= eat) { + /* Eaten as whole. */ + eat -= list->len; + list = list->next; + insp = list; + } else { + /* Eaten partially. */ + if (skb_shared(list)) { + clone = skb_clone(list, gfp_mask); + if (!clone) + return -ENOMEM; + insp = list->next; + list = clone; + } else { + /* This may be pulled without problems. */ + insp = list; + } + if (pskb_carve(list, eat, gfp_mask) < 0) { + kfree_skb(clone); + return -ENOMEM; + } + break; + } + } while (eat); + + /* Free pulled out fragments. */ + while ((list = shinfo->frag_list) != insp) { + shinfo->frag_list = list->next; + kfree_skb(list); + } + /* And insert new clone at head. */ + if (clone) { + clone->next = list; + shinfo->frag_list = clone; + } + return 0; +} + +/* carve off first len bytes from skb. Split line (off) is in the + * non-linear part of skb + */ +static int pskb_carve_inside_nonlinear(struct sk_buff *skb, const u32 off, + int pos, gfp_t gfp_mask) +{ + int i, k = 0; + int size = skb_end_offset(skb); + u8 *data; + const int nfrags = skb_shinfo(skb)->nr_frags; + struct skb_shared_info *shinfo; + int doff = 0; + + size = SKB_DATA_ALIGN(size); + + if (skb_pfmemalloc(skb)) + gfp_mask |= __GFP_MEMALLOC; + data = kmalloc_reserve(size + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)), + gfp_mask, NUMA_NO_NODE, NULL); + if (!data) + return -ENOMEM; + + size = SKB_WITH_OVERHEAD(ksize(data)); + + memcpy((struct skb_shared_info *)(data + size), + skb_shinfo(skb), offsetof(struct skb_shared_info, + frags[skb_shinfo(skb)->nr_frags])); + if (skb_orphan_frags(skb, gfp_mask)) { + kfree(data); + return -ENOMEM; + } + shinfo = (struct skb_shared_info *)(data + size); + for (i = 0; i < nfrags; i++) { + int fsize = skb_frag_size(&skb_shinfo(skb)->frags[i]); + + if (pos + fsize > off) { + shinfo->frags[k] = skb_shinfo(skb)->frags[i]; + + if (pos < off) { + /* Split frag. + * We have two variants in this case: + * 1. Move all the frag to the second + * part, if it is possible. F.e. + * this approach is mandatory for TUX, + * where splitting is expensive. + * 2. Split is accurately. We make this. + */ + shinfo->frags[0].page_offset += off - pos; + skb_frag_size_sub(&shinfo->frags[0], off - pos); + } + skb_frag_ref(skb, i); + k++; + } + pos += fsize; + } + shinfo->nr_frags = k; + if (skb_has_frag_list(skb)) + skb_clone_fraglist(skb); + + if (k == 0) { + /* split line is in frag list */ + pskb_carve_frag_list(skb, shinfo, off - pos, gfp_mask); + } + skb_release_data(skb); + + doff = (data - skb->head); + skb->head = data; + skb->head_frag = 0; + skb->data = data; +#ifdef NET_SKBUFF_DATA_USES_OFFSET + skb->end = size; + doff = 0; +#else + skb->end = skb->head + size; +#endif + skb_reset_tail_pointer(skb); + skb_headers_offset_update(skb, 0); + skb->cloned = 0; + skb->hdr_len = 0; + skb->nohdr = 0; + skb->len -= off; + skb->data_len = skb->len; + atomic_set(&skb_shinfo(skb)->dataref, 1); + return 0; +} + +/* remove len bytes from the beginning of the skb */ +static int pskb_carve(struct sk_buff *skb, const u32 len, gfp_t gfp) +{ + int headlen = skb_headlen(skb); + + if (len < headlen) + return pskb_carve_inside_header(skb, len, headlen, gfp); + else + return pskb_carve_inside_nonlinear(skb, len, headlen, gfp); +} + +/* Extract to_copy bytes starting at off from skb, and return this in + * a new skb + */ +struct sk_buff *pskb_extract(struct sk_buff *skb, int off, + int to_copy, gfp_t gfp) +{ + struct sk_buff *clone = skb_clone(skb, gfp); + + if (!clone) + return NULL; + + if (pskb_carve(clone, off, gfp) < 0 || + pskb_trim(clone, to_copy)) { + kfree_skb(clone); + return NULL; + } + return clone; +} +EXPORT_SYMBOL(pskb_extract); From 947d2756cddec83c72af99b02010002ddc57aeb3 Mon Sep 17 00:00:00 2001 From: Sowmini Varadhan Date: Fri, 22 Apr 2016 18:36:36 -0700 Subject: [PATCH 0995/1649] RDS: TCP: Call pskb_extract() helper function rds-stress experiments with request size 256 bytes, 8K acks, using 16 threads show a 40% improvment when pskb_extract() replaces the {skb_clone(..); pskb_pull(..); pskb_trim(..);} pattern in the Rx path, so we leverage the perf gain with this commit. Signed-off-by: Sowmini Varadhan Signed-off-by: David S. Miller --- net/rds/tcp_recv.c | 14 +++----------- 1 file changed, 3 insertions(+), 11 deletions(-) diff --git a/net/rds/tcp_recv.c b/net/rds/tcp_recv.c index 27a992154804..d75d8b56a9e3 100644 --- a/net/rds/tcp_recv.c +++ b/net/rds/tcp_recv.c @@ -207,22 +207,14 @@ static int rds_tcp_data_recv(read_descriptor_t *desc, struct sk_buff *skb, } if (left && tc->t_tinc_data_rem) { - clone = skb_clone(skb, arg->gfp); + to_copy = min(tc->t_tinc_data_rem, left); + + clone = pskb_extract(skb, offset, to_copy, arg->gfp); if (!clone) { desc->error = -ENOMEM; goto out; } - to_copy = min(tc->t_tinc_data_rem, left); - if (!pskb_pull(clone, offset) || - pskb_trim(clone, to_copy)) { - pr_warn("rds_tcp_data_recv: pull/trim failed " - "left %zu data_rem %zu skb_len %d\n", - left, tc->t_tinc_data_rem, skb->len); - kfree_skb(clone); - desc->error = -ENOMEM; - goto out; - } skb_queue_tail(&tinc->ti_skb_list, clone); rdsdebug("skb %p data %p len %d off %u to_copy %zu -> " From 55441070ca1cbd47ce1ad2959bbf4b47aed9b83b Mon Sep 17 00:00:00 2001 From: Glenn Ruben Bakke Date: Fri, 22 Apr 2016 18:06:11 +0200 Subject: [PATCH 0996/1649] Bluetooth: 6lowpan: Fix memory corruption of ipv6 destination address The memcpy of ipv6 header destination address to the skb control block (sbk->cb) in header_create() results in currupted memory when bt_xmit() is issued. The skb->cb is "released" in the return of header_create() making room for lower layer to minipulate the skb->cb. The value retrieved in bt_xmit is not persistent across header creation and sending, and the lower layer will overwrite portions of skb->cb, making the copied destination address wrong. The memory corruption will lead to non-working multicast as the first 4 bytes of the copied destination address is replaced by a value that resolves into a non-multicast prefix. This fix removes the dependency on the skb control block between header creation and send, by moving the destination address memcpy to the send function path (setup_create, which is called from bt_xmit). Signed-off-by: Glenn Ruben Bakke Acked-by: Jukka Rissanen Signed-off-by: Marcel Holtmann Cc: stable@vger.kernel.org # 4.5+ --- net/bluetooth/6lowpan.c | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c index 38e82ddd7ccd..780089d75915 100644 --- a/net/bluetooth/6lowpan.c +++ b/net/bluetooth/6lowpan.c @@ -434,15 +434,18 @@ static int setup_header(struct sk_buff *skb, struct net_device *netdev, bdaddr_t *peer_addr, u8 *peer_addr_type) { struct in6_addr ipv6_daddr; + struct ipv6hdr *hdr; struct lowpan_btle_dev *dev; struct lowpan_peer *peer; bdaddr_t addr, *any = BDADDR_ANY; u8 *daddr = any->b; int err, status = 0; + hdr = ipv6_hdr(skb); + dev = lowpan_btle_dev(netdev); - memcpy(&ipv6_daddr, &lowpan_cb(skb)->addr, sizeof(ipv6_daddr)); + memcpy(&ipv6_daddr, &hdr->daddr, sizeof(ipv6_daddr)); if (ipv6_addr_is_multicast(&ipv6_daddr)) { lowpan_cb(skb)->chan = NULL; @@ -492,15 +495,9 @@ static int header_create(struct sk_buff *skb, struct net_device *netdev, unsigned short type, const void *_daddr, const void *_saddr, unsigned int len) { - struct ipv6hdr *hdr; - if (type != ETH_P_IPV6) return -EINVAL; - hdr = ipv6_hdr(skb); - - memcpy(&lowpan_cb(skb)->addr, &hdr->daddr, sizeof(struct in6_addr)); - return 0; } From 89a440932b6f2eb7fee78dbde05870e2b95e6151 Mon Sep 17 00:00:00 2001 From: "Yisen.Zhuang\\(Zhuangyuzeng\\)" Date: Sat, 23 Apr 2016 17:05:05 +0800 Subject: [PATCH 0997/1649] net: hns: add a new dsaf mode for debug port This patch adds a new dsaf mode named "single-port" mode for debug port. This mode only contains one debug port. This patch also changes the method of distinguishing the port type. Signed-off-by: Daode Huang Signed-off-by: Yisen Zhuang Signed-off-by: David S. Miller --- .../net/ethernet/hisilicon/hns/hns_ae_adapt.c | 2 +- .../net/ethernet/hisilicon/hns/hns_dsaf_mac.c | 8 +- .../ethernet/hisilicon/hns/hns_dsaf_main.c | 16 ++- .../ethernet/hisilicon/hns/hns_dsaf_main.h | 2 + .../ethernet/hisilicon/hns/hns_dsaf_misc.c | 4 +- .../net/ethernet/hisilicon/hns/hns_dsaf_ppe.c | 6 +- .../net/ethernet/hisilicon/hns/hns_dsaf_rcb.c | 132 ++++++++---------- .../net/ethernet/hisilicon/hns/hns_dsaf_rcb.h | 2 +- .../net/ethernet/hisilicon/hns/hns_dsaf_reg.h | 1 - 9 files changed, 84 insertions(+), 89 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c index 159142272afb..1e8bf222ef3a 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c @@ -96,7 +96,7 @@ static struct ring_pair_cb *hns_ae_get_base_ring_pair( int q_num = rcb_comm->max_q_per_vf; int vf_num = rcb_comm->max_vfn; - if (common_idx == HNS_DSAF_COMM_SERVICE_NW_IDX) + if (!HNS_DSAF_IS_DEBUG(dsaf_dev)) return &rcb_comm->ring_pair_cb[port * q_num * vf_num]; else return &rcb_comm->ring_pair_cb[0]; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c index 10c367d20955..353b9e7502b5 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c @@ -249,7 +249,7 @@ int hns_mac_change_vf_addr(struct hns_mac_cb *mac_cb, struct mac_entry_idx *old_entry; old_entry = &mac_cb->addr_entry_idx[vmid]; - if (dsaf_dev) { + if (!HNS_DSAF_IS_DEBUG(dsaf_dev)) { memcpy(mac_entry.addr, addr, sizeof(mac_entry.addr)); mac_entry.in_vlan_id = old_entry->vlan_id; mac_entry.in_port_num = mac_cb->mac_id; @@ -289,7 +289,7 @@ int hns_mac_set_multi(struct hns_mac_cb *mac_cb, struct dsaf_device *dsaf_dev = mac_cb->dsaf_dev; struct dsaf_drv_mac_single_dest_entry mac_entry; - if (dsaf_dev && addr) { + if (!HNS_DSAF_IS_DEBUG(dsaf_dev) && addr) { memcpy(mac_entry.addr, addr, sizeof(mac_entry.addr)); mac_entry.in_vlan_id = 0;/*vlan_id;*/ mac_entry.in_port_num = mac_cb->mac_id; @@ -380,7 +380,7 @@ static int hns_mac_port_config_bc_en(struct hns_mac_cb *mac_cb, if (mac_cb->mac_type == HNAE_PORT_DEBUG) return 0; - if (dsaf_dev) { + if (!HNS_DSAF_IS_DEBUG(dsaf_dev)) { memcpy(mac_entry.addr, addr, sizeof(mac_entry.addr)); mac_entry.in_vlan_id = vlan_id; mac_entry.in_port_num = mac_cb->mac_id; @@ -418,7 +418,7 @@ int hns_mac_vm_config_bc_en(struct hns_mac_cb *mac_cb, u32 vmid, bool enable) uc_mac_entry = &mac_cb->addr_entry_idx[vmid]; - if (dsaf_dev) { + if (!HNS_DSAF_IS_DEBUG(dsaf_dev)) { memcpy(mac_entry.addr, addr, sizeof(mac_entry.addr)); mac_entry.in_vlan_id = uc_mac_entry->vlan_id; mac_entry.in_port_num = mac_cb->mac_id; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c index 8439f6d8e360..769285375341 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c @@ -28,6 +28,7 @@ const char *g_dsaf_mode_match[DSAF_MODE_MAX] = { [DSAF_MODE_DISABLE_2PORT_64VM] = "2port-64vf", [DSAF_MODE_DISABLE_6PORT_0VM] = "6port-16rss", [DSAF_MODE_DISABLE_6PORT_16VM] = "6port-16vf", + [DSAF_MODE_DISABLE_SP] = "single-port", }; int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev) @@ -217,9 +218,7 @@ static void hns_dsaf_mix_def_qid_cfg(struct dsaf_device *dsaf_dev) u32 q_id, q_num_per_port; u32 i; - hns_rcb_get_queue_mode(dsaf_dev->dsaf_mode, - HNS_DSAF_COMM_SERVICE_NW_IDX, - &max_vfn, &max_q_per_vf); + hns_rcb_get_queue_mode(dsaf_dev->dsaf_mode, &max_vfn, &max_q_per_vf); q_num_per_port = max_vfn * max_q_per_vf; for (i = 0, q_id = 0; i < DSAF_SERVICE_NW_NUM; i++) { @@ -239,9 +238,7 @@ static void hns_dsaf_inner_qid_cfg(struct dsaf_device *dsaf_dev) if (AE_IS_VER1(dsaf_dev->dsaf_ver)) return; - hns_rcb_get_queue_mode(dsaf_dev->dsaf_mode, - HNS_DSAF_COMM_SERVICE_NW_IDX, - &max_vfn, &max_q_per_vf); + hns_rcb_get_queue_mode(dsaf_dev->dsaf_mode, &max_vfn, &max_q_per_vf); q_num_per_port = max_vfn * max_q_per_vf; for (mac_id = 0, q_id = 0; mac_id < DSAF_SERVICE_NW_NUM; mac_id++) { @@ -712,7 +709,9 @@ static void hns_dsaf_tbl_tcam_data_ucast_pul( void hns_dsaf_set_promisc_mode(struct dsaf_device *dsaf_dev, u32 en) { - dsaf_set_dev_bit(dsaf_dev, DSAF_CFG_0_REG, DSAF_CFG_MIX_MODE_S, !!en); + if (!HNS_DSAF_IS_DEBUG(dsaf_dev)) + dsaf_set_dev_bit(dsaf_dev, DSAF_CFG_0_REG, + DSAF_CFG_MIX_MODE_S, !!en); } void hns_dsaf_set_inner_lb(struct dsaf_device *dsaf_dev, u32 mac_id, u32 en) @@ -1307,6 +1306,9 @@ static int hns_dsaf_init(struct dsaf_device *dsaf_dev) u32 i; int ret; + if (HNS_DSAF_IS_DEBUG(dsaf_dev)) + return 0; + ret = hns_dsaf_init_hw(dsaf_dev); if (ret) return ret; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h index e8eedc571296..a783019deace 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h @@ -41,6 +41,7 @@ struct hns_mac_cb; #define DSAF_STATIC_NUM 28 #define DSAF_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset)))) +#define HNS_DSAF_IS_DEBUG(dev) (dev->dsaf_mode == DSAF_MODE_DISABLE_SP) enum hal_dsaf_mode { HRD_DSAF_NO_DSAF_MODE = 0x0, @@ -117,6 +118,7 @@ enum dsaf_mode { DSAF_MODE_ENABLE_32VM, /**< en DSAF-mode, support 32 VM */ DSAF_MODE_ENABLE_128VM, /**< en DSAF-mode, support 128 VM */ DSAF_MODE_ENABLE, /**< before is enable DSAF mode*/ + DSAF_MODE_DISABLE_SP, /* = DSAF_GE_NUM) return; - if (port < DSAF_SERVICE_NW_NUM) { + if (!HNS_DSAF_IS_DEBUG(dsaf_dev)) { reg_val_1 = 0x1 << port; /* there is difference between V1 and V2 in register.*/ if (AE_IS_VER1(dsaf_dev->dsaf_ver)) @@ -218,7 +218,7 @@ void hns_ppe_com_srst(struct ppe_common_cb *ppe_common, u32 val) u32 reg_val; u32 reg_addr; - if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) { + if (!HNS_DSAF_IS_DEBUG(dsaf_dev)) { reg_val = RESET_REQ_OR_DREQ; if (val == 0) reg_addr = DSAF_SUB_SC_RCB_PPE_COM_RESET_REQ_REG; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c index ab27b3b14ca3..3f59a8a30c86 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c @@ -68,7 +68,7 @@ static void __iomem *hns_ppe_common_get_ioaddr( int idx = ppe_common->comm_index; - if (idx == HNS_DSAF_COMM_SERVICE_NW_IDX) + if (!HNS_DSAF_IS_DEBUG(ppe_common->dsaf_dev)) base_addr = ppe_common->dsaf_dev->ppe_base + PPE_COMMON_REG_OFFSET; else @@ -90,7 +90,7 @@ int hns_ppe_common_get_cfg(struct dsaf_device *dsaf_dev, int comm_index) struct ppe_common_cb *ppe_common; int ppe_num; - if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) + if (!HNS_DSAF_IS_DEBUG(dsaf_dev)) ppe_num = HNS_PPE_SERVICE_NW_ENGINE_NUM; else ppe_num = HNS_PPE_DEBUG_NW_ENGINE_NUM; @@ -103,7 +103,7 @@ int hns_ppe_common_get_cfg(struct dsaf_device *dsaf_dev, int comm_index) ppe_common->ppe_num = ppe_num; ppe_common->dsaf_dev = dsaf_dev; ppe_common->comm_index = comm_index; - if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) + if (!HNS_DSAF_IS_DEBUG(dsaf_dev)) ppe_common->ppe_mode = PPE_COMMON_MODE_SERVICE; else ppe_common->ppe_mode = PPE_COMMON_MODE_DEBUG; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c index 28ee26e5c478..121ba4e56dc4 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c @@ -270,7 +270,7 @@ static void hns_rcb_set_port_timeout( static int hns_rcb_common_get_port_num(struct rcb_common_cb *rcb_common) { - if (rcb_common->comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) + if (!HNS_DSAF_IS_DEBUG(rcb_common->dsaf_dev)) return HNS_RCB_SERVICE_NW_ENGINE_NUM; else return HNS_RCB_DEBUG_NW_ENGINE_NUM; @@ -430,11 +430,10 @@ static void hns_rcb_ring_pair_get_cfg(struct ring_pair_cb *ring_pair_cb) static int hns_rcb_get_port_in_comm( struct rcb_common_cb *rcb_common, int ring_idx) { - int comm_index = rcb_common->comm_index; int port; int q_num; - if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) { + if (!HNS_DSAF_IS_DEBUG(rcb_common->dsaf_dev)) { q_num = (int)rcb_common->max_q_per_vf * rcb_common->max_vfn; port = ring_idx / q_num; } else { @@ -455,7 +454,7 @@ static int hns_rcb_get_base_irq_idx(struct rcb_common_cb *rcb_common) int comm_index = rcb_common->comm_index; bool is_ver1 = AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver); - if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) + if (!HNS_DSAF_IS_DEBUG(rcb_common->dsaf_dev)) return SERVICE_RING_IRQ_IDX(is_ver1); else return DEBUG_RING_IRQ_IDX(is_ver1) + @@ -549,7 +548,7 @@ int hns_rcb_set_coalesce_usecs( return 0; if (AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver)) { - if (rcb_common->comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) { + if (!HNS_DSAF_IS_DEBUG(rcb_common->dsaf_dev)) { dev_err(rcb_common->dsaf_dev->dev, "error: not support coalesce_usecs setting!\n"); return -EINVAL; @@ -601,74 +600,67 @@ int hns_rcb_set_coalesced_frames( *@max_vfn : max vfn number *@max_q_per_vf:max ring number per vm */ -void hns_rcb_get_queue_mode(enum dsaf_mode dsaf_mode, int comm_index, - u16 *max_vfn, u16 *max_q_per_vf) +void hns_rcb_get_queue_mode(enum dsaf_mode dsaf_mode, u16 *max_vfn, + u16 *max_q_per_vf) { - if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) { - switch (dsaf_mode) { - case DSAF_MODE_DISABLE_6PORT_0VM: - *max_vfn = 1; - *max_q_per_vf = 16; - break; - case DSAF_MODE_DISABLE_FIX: - *max_vfn = 1; - *max_q_per_vf = 1; - break; - case DSAF_MODE_DISABLE_2PORT_64VM: - *max_vfn = 64; - *max_q_per_vf = 1; - break; - case DSAF_MODE_DISABLE_6PORT_16VM: - *max_vfn = 16; - *max_q_per_vf = 1; - break; - default: - *max_vfn = 1; - *max_q_per_vf = 16; - break; - } - } else { + switch (dsaf_mode) { + case DSAF_MODE_DISABLE_6PORT_0VM: + *max_vfn = 1; + *max_q_per_vf = 16; + break; + case DSAF_MODE_DISABLE_FIX: + case DSAF_MODE_DISABLE_SP: *max_vfn = 1; *max_q_per_vf = 1; + break; + case DSAF_MODE_DISABLE_2PORT_64VM: + *max_vfn = 64; + *max_q_per_vf = 1; + break; + case DSAF_MODE_DISABLE_6PORT_16VM: + *max_vfn = 16; + *max_q_per_vf = 1; + break; + default: + *max_vfn = 1; + *max_q_per_vf = 16; + break; } } -int hns_rcb_get_ring_num(struct dsaf_device *dsaf_dev, int comm_index) +int hns_rcb_get_ring_num(struct dsaf_device *dsaf_dev) { - if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) { - switch (dsaf_dev->dsaf_mode) { - case DSAF_MODE_ENABLE_FIX: - return 1; - - case DSAF_MODE_DISABLE_FIX: - return 6; - - case DSAF_MODE_ENABLE_0VM: - return 32; - - case DSAF_MODE_DISABLE_6PORT_0VM: - case DSAF_MODE_ENABLE_16VM: - case DSAF_MODE_DISABLE_6PORT_2VM: - case DSAF_MODE_DISABLE_6PORT_16VM: - case DSAF_MODE_DISABLE_6PORT_4VM: - case DSAF_MODE_ENABLE_8VM: - return 96; - - case DSAF_MODE_DISABLE_2PORT_16VM: - case DSAF_MODE_DISABLE_2PORT_8VM: - case DSAF_MODE_ENABLE_32VM: - case DSAF_MODE_DISABLE_2PORT_64VM: - case DSAF_MODE_ENABLE_128VM: - return 128; - - default: - dev_warn(dsaf_dev->dev, - "get ring num fail,use default!dsaf_mode=%d\n", - dsaf_dev->dsaf_mode); - return 128; - } - } else { + switch (dsaf_dev->dsaf_mode) { + case DSAF_MODE_ENABLE_FIX: + case DSAF_MODE_DISABLE_SP: return 1; + + case DSAF_MODE_DISABLE_FIX: + return 6; + + case DSAF_MODE_ENABLE_0VM: + return 32; + + case DSAF_MODE_DISABLE_6PORT_0VM: + case DSAF_MODE_ENABLE_16VM: + case DSAF_MODE_DISABLE_6PORT_2VM: + case DSAF_MODE_DISABLE_6PORT_16VM: + case DSAF_MODE_DISABLE_6PORT_4VM: + case DSAF_MODE_ENABLE_8VM: + return 96; + + case DSAF_MODE_DISABLE_2PORT_16VM: + case DSAF_MODE_DISABLE_2PORT_8VM: + case DSAF_MODE_ENABLE_32VM: + case DSAF_MODE_DISABLE_2PORT_64VM: + case DSAF_MODE_ENABLE_128VM: + return 128; + + default: + dev_warn(dsaf_dev->dev, + "get ring num fail,use default!dsaf_mode=%d\n", + dsaf_dev->dsaf_mode); + return 128; } } @@ -677,7 +669,7 @@ void __iomem *hns_rcb_common_get_vaddr(struct dsaf_device *dsaf_dev, { void __iomem *base_addr; - if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) + if (!HNS_DSAF_IS_DEBUG(dsaf_dev)) base_addr = dsaf_dev->ppe_base + RCB_COMMON_REG_OFFSET; else base_addr = dsaf_dev->sds_base @@ -697,7 +689,7 @@ static phys_addr_t hns_rcb_common_get_paddr(struct dsaf_device *dsaf_dev, u64 size = 0; int index = 0; - if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) { + if (!HNS_DSAF_IS_DEBUG(dsaf_dev)) { index = 2; addr_offset = RCB_COMMON_REG_OFFSET; } else { @@ -717,7 +709,7 @@ int hns_rcb_common_get_cfg(struct dsaf_device *dsaf_dev, enum dsaf_mode dsaf_mode = dsaf_dev->dsaf_mode; u16 max_vfn; u16 max_q_per_vf; - int ring_num = hns_rcb_get_ring_num(dsaf_dev, comm_index); + int ring_num = hns_rcb_get_ring_num(dsaf_dev); rcb_common = devm_kzalloc(dsaf_dev->dev, sizeof(*rcb_common) + @@ -732,7 +724,7 @@ int hns_rcb_common_get_cfg(struct dsaf_device *dsaf_dev, rcb_common->desc_num = dsaf_dev->desc_num; - hns_rcb_get_queue_mode(dsaf_mode, comm_index, &max_vfn, &max_q_per_vf); + hns_rcb_get_queue_mode(dsaf_mode, &max_vfn, &max_q_per_vf); rcb_common->max_vfn = max_vfn; rcb_common->max_q_per_vf = max_q_per_vf; @@ -932,7 +924,7 @@ void hns_rcb_get_common_regs(struct rcb_common_cb *rcb_com, void *data) { u32 *regs = data; bool is_ver1 = AE_IS_VER1(rcb_com->dsaf_dev->dsaf_ver); - bool is_dbg = (rcb_com->comm_index != HNS_DSAF_COMM_SERVICE_NW_IDX); + bool is_dbg = HNS_DSAF_IS_DEBUG(rcb_com->dsaf_dev); u32 reg_tmp; u32 reg_num_tmp; u32 i = 0; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h index eb61014ad615..bd54dac82ee0 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h @@ -111,7 +111,7 @@ void hns_rcb_common_free_cfg(struct dsaf_device *dsaf_dev, u32 comm_index); int hns_rcb_common_init_hw(struct rcb_common_cb *rcb_common); void hns_rcb_start(struct hnae_queue *q, u32 val); void hns_rcb_get_cfg(struct rcb_common_cb *rcb_common); -void hns_rcb_get_queue_mode(enum dsaf_mode dsaf_mode, int comm_index, +void hns_rcb_get_queue_mode(enum dsaf_mode dsaf_mode, u16 *max_vfn, u16 *max_q_per_vf); void hns_rcb_common_init_commit_hw(struct rcb_common_cb *rcb_common); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h index 7ff195e60b02..cffd244f1ded 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h @@ -23,7 +23,6 @@ #define DSAF_COMM_DEV_NUM 3 #define DSAF_PPE_INODE_BASE 6 -#define HNS_DSAF_COMM_SERVICE_NW_IDX 0 #define DSAF_DEBUG_NW_NUM 2 #define DSAF_SERVICE_NW_NUM 6 #define DSAF_COMM_CHN DSAF_SERVICE_NW_NUM From a542458cb7211a5e092c6a3cd6150404a5b1aa46 Mon Sep 17 00:00:00 2001 From: Daode Huang Date: Sat, 23 Apr 2016 17:05:06 +0800 Subject: [PATCH 0998/1649] net: hns: set debug port irq index to 0 As debug ports are moved from service dsaf to debug dsaf, the interrupts offset should start from 0, So this patch re-defines the offset index of debug ports. Signed-off-by: Daode Huang Signed-off-by: Yisen Zhuang Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c | 8 +------- drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h | 5 +---- 2 files changed, 2 insertions(+), 11 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c index 121ba4e56dc4..054f391a3eeb 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c @@ -445,20 +445,14 @@ static int hns_rcb_get_port_in_comm( #define SERVICE_RING_IRQ_IDX(v1) \ ((v1) ? HNS_SERVICE_RING_IRQ_IDX : HNSV2_SERVICE_RING_IRQ_IDX) -#define DEBUG_RING_IRQ_IDX(v1) \ - ((v1) ? HNS_DEBUG_RING_IRQ_IDX : HNSV2_DEBUG_RING_IRQ_IDX) -#define DEBUG_RING_IRQ_OFFSET(v1) \ - ((v1) ? HNS_DEBUG_RING_IRQ_OFFSET : HNSV2_DEBUG_RING_IRQ_OFFSET) static int hns_rcb_get_base_irq_idx(struct rcb_common_cb *rcb_common) { - int comm_index = rcb_common->comm_index; bool is_ver1 = AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver); if (!HNS_DSAF_IS_DEBUG(rcb_common->dsaf_dev)) return SERVICE_RING_IRQ_IDX(is_ver1); else - return DEBUG_RING_IRQ_IDX(is_ver1) + - (comm_index - 1) * DEBUG_RING_IRQ_OFFSET(is_ver1); + return HNS_DEBUG_RING_IRQ_IDX; } #define RCB_COMM_BASE_TO_RING_BASE(base, ringid)\ diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h index cffd244f1ded..87826087f08b 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h @@ -10,12 +10,9 @@ #ifndef _DSAF_REG_H_ #define _DSAF_REG_H_ -#define HNS_DEBUG_RING_IRQ_IDX 55 +#define HNS_DEBUG_RING_IRQ_IDX 0 #define HNS_SERVICE_RING_IRQ_IDX 59 -#define HNS_DEBUG_RING_IRQ_OFFSET 2 -#define HNSV2_DEBUG_RING_IRQ_IDX 409 #define HNSV2_SERVICE_RING_IRQ_IDX 25 -#define HNSV2_DEBUG_RING_IRQ_OFFSET 9 #define DSAF_MAX_PORT_NUM_PER_CHIP 8 #define DSAF_SERVICE_PORT_NUM_PER_DSAF 6 From 406adee9a9fc38c11671f26180e694976f45237c Mon Sep 17 00:00:00 2001 From: "Yisen.Zhuang\\(Zhuangyuzeng\\)" Date: Sat, 23 Apr 2016 17:05:07 +0800 Subject: [PATCH 0999/1649] net: hns: add attribute port-idx-in-ae in enet node. This patch parse port-idx-in-ae in enet node. In NIC mode of DSAF, all 6 PHYs of service DSAF are taken as ethernet ports to the CPU. The port-idx-in-ae can be 0 to 5. Here is the diagram: +-----+---------------+ | CPU | +-+-+-+---+-+-+-+-+-+-+ | | | | | | | | debug debug service port port port (0) (0) (0-5) In Switch mode of DSAF, all 6 PHYs of service DSAF are taken as physical ports connect to a LAN Switch while the CPU side assume itself have one single NIC connect to this switch. In this case, the port-idx-in-ae will be 0 only. +-----+-----+------+------+ | CPU | +-+-+-+-+-+-+-+-+-+-+-+-+-+ | | service| port(0) debug debug +------------+ port port | switch | (0) (0) +-+-+-+-+-+-++ | | | | | | external port when port-idx-in-ae is not exists, old attribute port-id will be used (only for compatible purpose, not recommended to use port-id in new code). Signed-off-by: Daode Huang Signed-off-by: Yisen Zhuang Signed-off-by: David S. Miller --- .../net/ethernet/hisilicon/hns/hns_ae_adapt.c | 33 ++++--------------- .../net/ethernet/hisilicon/hns/hns_dsaf_reg.h | 1 - drivers/net/ethernet/hisilicon/hns/hns_enet.c | 17 +++++++--- drivers/net/ethernet/hisilicon/hns/hns_enet.h | 3 ++ 4 files changed, 22 insertions(+), 32 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c index 1e8bf222ef3a..1c86336d6475 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c @@ -29,25 +29,6 @@ static struct hns_mac_cb *hns_get_mac_cb(struct hnae_handle *handle) return vf_cb->mac_cb; } -/** - * hns_ae_map_eport_to_dport - translate enet port id to dsaf port id - * @port_id: enet port id - *: debug port 0-1, service port 2 -7 (dsaf mode only 2) - * return: dsaf port id - *: service ports 0 - 5, debug port 6-7 - **/ -static int hns_ae_map_eport_to_dport(u32 port_id) -{ - int port_index; - - if (port_id < DSAF_DEBUG_NW_NUM) - port_index = port_id + DSAF_SERVICE_PORT_NUM_PER_DSAF; - else - port_index = port_id - DSAF_DEBUG_NW_NUM; - - return port_index; -} - static struct dsaf_device *hns_ae_get_dsaf_dev(struct hnae_ae_dev *dev) { return container_of(dev, struct dsaf_device, ae_dev); @@ -110,7 +91,6 @@ static struct ring_pair_cb *hns_ae_get_ring_pair(struct hnae_queue *q) struct hnae_handle *hns_ae_get_handle(struct hnae_ae_dev *dev, u32 port_id) { - int port_idx; int vfnum_per_port; int qnum_per_vf; int i; @@ -120,11 +100,10 @@ struct hnae_handle *hns_ae_get_handle(struct hnae_ae_dev *dev, struct hnae_vf_cb *vf_cb; dsaf_dev = hns_ae_get_dsaf_dev(dev); - port_idx = hns_ae_map_eport_to_dport(port_id); - ring_pair_cb = hns_ae_get_base_ring_pair(dsaf_dev, port_idx); - vfnum_per_port = hns_ae_get_vf_num_per_port(dsaf_dev, port_idx); - qnum_per_vf = hns_ae_get_q_num_per_vf(dsaf_dev, port_idx); + ring_pair_cb = hns_ae_get_base_ring_pair(dsaf_dev, port_id); + vfnum_per_port = hns_ae_get_vf_num_per_port(dsaf_dev, port_id); + qnum_per_vf = hns_ae_get_q_num_per_vf(dsaf_dev, port_id); vf_cb = kzalloc(sizeof(*vf_cb) + qnum_per_vf * sizeof(struct hnae_queue *), GFP_KERNEL); @@ -163,14 +142,14 @@ struct hnae_handle *hns_ae_get_handle(struct hnae_ae_dev *dev, } vf_cb->dsaf_dev = dsaf_dev; - vf_cb->port_index = port_idx; - vf_cb->mac_cb = &dsaf_dev->mac_cb[port_idx]; + vf_cb->port_index = port_id; + vf_cb->mac_cb = &dsaf_dev->mac_cb[port_id]; ae_handle->phy_if = vf_cb->mac_cb->phy_if; ae_handle->phy_node = vf_cb->mac_cb->phy_node; ae_handle->if_support = vf_cb->mac_cb->if_support; ae_handle->port_type = vf_cb->mac_cb->mac_type; - ae_handle->dport_id = port_idx; + ae_handle->dport_id = port_id; return ae_handle; vf_id_err: diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h index 87826087f08b..ed0043a4dbe1 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h @@ -24,7 +24,6 @@ #define DSAF_SERVICE_NW_NUM 6 #define DSAF_COMM_CHN DSAF_SERVICE_NW_NUM #define DSAF_GE_NUM ((DSAF_SERVICE_NW_NUM) + (DSAF_DEBUG_NW_NUM)) -#define DSAF_PORT_NUM ((DSAF_SERVICE_NW_NUM) + (DSAF_DEBUG_NW_NUM)) #define DSAF_XGE_NUM DSAF_SERVICE_NW_NUM #define DSAF_PORT_TYPE_NUM 3 #define DSAF_NODE_NUM 18 diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c index 687204b780b0..e47aff250b15 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c @@ -1873,6 +1873,7 @@ static int hns_nic_dev_probe(struct platform_device *pdev) struct net_device *ndev; struct hns_nic_priv *priv; struct device_node *node = dev->of_node; + u32 port_id; int ret; ndev = alloc_etherdev_mq(sizeof(struct hns_nic_priv), NIC_MAX_Q_PER_VF); @@ -1896,10 +1897,18 @@ static int hns_nic_dev_probe(struct platform_device *pdev) dev_err(dev, "not find ae-handle\n"); goto out_read_prop_fail; } - - ret = of_property_read_u32(node, "port-id", &priv->port_id); - if (ret) - goto out_read_prop_fail; + /* try to find port-idx-in-ae first */ + ret = of_property_read_u32(node, "port-idx-in-ae", &port_id); + if (ret) { + /* only for old code compatible */ + ret = of_property_read_u32(node, "port-id", &port_id); + if (ret) + goto out_read_prop_fail; + /* for old dts, we need to caculate the port offset */ + port_id = port_id < HNS_SRV_OFFSET ? port_id + HNS_DEBUG_OFFSET + : port_id - HNS_SRV_OFFSET; + } + priv->port_id = port_id; hns_init_mac_addr(ndev); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.h b/drivers/net/ethernet/hisilicon/hns/hns_enet.h index c68ab3d34fc2..337efa582bac 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.h @@ -18,6 +18,9 @@ #include "hnae.h" +#define HNS_DEBUG_OFFSET 6 +#define HNS_SRV_OFFSET 2 + enum hns_nic_state { NIC_STATE_TESTING = 0, NIC_STATE_RESETTING, From 422c3107ed2cc6297f051109f3d4b6d855eaae14 Mon Sep 17 00:00:00 2001 From: "Yisen.Zhuang\\(Zhuangyuzeng\\)" Date: Sat, 23 Apr 2016 17:05:08 +0800 Subject: [PATCH 1000/1649] net: hns: add attribute reset-field-offset for dsaf node Add the subctrl reset offset for dsaf, this property is used to reset xge/ge ports for different dsaf. If this attribute is not present, default value 0 will be used. Signed-off-by: Daode Huang Signed-off-by: Yisen Zhuang Signed-off-by: David S. Miller --- .../ethernet/hisilicon/hns/hns_dsaf_main.c | 8 ++++ .../ethernet/hisilicon/hns/hns_dsaf_main.h | 1 + .../ethernet/hisilicon/hns/hns_dsaf_misc.c | 40 +++++++++++++------ 3 files changed, 37 insertions(+), 12 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c index 769285375341..b418d4201290 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c @@ -36,6 +36,7 @@ int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev) int ret, i; u32 desc_num; u32 buf_size; + u32 reset_offset = 0; const char *mode_str; struct device_node *np = dsaf_dev->dev->of_node; @@ -119,6 +120,13 @@ int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev) } dsaf_dev->desc_num = desc_num; + ret = of_property_read_u32(np, "reset-field-offset", &reset_offset); + if (ret < 0) { + dev_dbg(dsaf_dev->dev, + "get reset-field-offset fail, ret=%d!\r\n", ret); + } + dsaf_dev->reset_offset = reset_offset; + ret = of_property_read_u32(np, "buf-size", &buf_size); if (ret < 0) { dev_err(dsaf_dev->dev, diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h index a783019deace..47e768b9ec97 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h @@ -281,6 +281,7 @@ struct dsaf_device { u32 desc_num; /* desc num per queue*/ u32 buf_size; /* ring buffer size */ + u32 reset_offset; /* reset field offset in sub sysctrl */ int buf_size_type; /* ring buffer size-type */ enum dsaf_mode dsaf_mode; /* dsaf mode */ enum hal_dsaf_mode dsaf_en; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c index 8cb13d9059f9..91e0382391eb 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c @@ -110,7 +110,11 @@ void hns_dsaf_xge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val) return; reg_val |= RESET_REQ_OR_DREQ; - reg_val |= 0x2082082 << port; + + if (!HNS_DSAF_IS_DEBUG(dsaf_dev)) + reg_val |= 0x2082082 << port; + else + reg_val |= 0x2082082 << (dsaf_dev->reset_offset + 6); if (val == 0) reg_addr = DSAF_SUB_SC_XGE_RESET_REQ_REG; @@ -129,7 +133,11 @@ void hns_dsaf_xge_core_srst_by_port(struct dsaf_device *dsaf_dev, if (port >= DSAF_XGE_NUM) return; - reg_val |= XGMAC_TRX_CORE_SRST_M << port; + if (!HNS_DSAF_IS_DEBUG(dsaf_dev)) + reg_val |= XGMAC_TRX_CORE_SRST_M << port; + else + reg_val |= XGMAC_TRX_CORE_SRST_M << + (dsaf_dev->reset_offset + 6); if (val == 0) reg_addr = DSAF_SUB_SC_XGE_RESET_REQ_REG; @@ -173,8 +181,8 @@ void hns_dsaf_ge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val) reg_val_1); } } else { - reg_val_1 = 0x15540 << (port - 6); - reg_val_2 = 0x100 << (port - 6); + reg_val_1 = 0x15540 << dsaf_dev->reset_offset; + reg_val_2 = 0x100 << dsaf_dev->reset_offset; if (val == 0) { dsaf_write_reg(dsaf_dev->sc_base, @@ -201,7 +209,11 @@ void hns_ppe_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val) u32 reg_val = 0; u32 reg_addr; - reg_val |= RESET_REQ_OR_DREQ << port; + if (!HNS_DSAF_IS_DEBUG(dsaf_dev)) + reg_val |= RESET_REQ_OR_DREQ << port; + else + reg_val |= RESET_REQ_OR_DREQ << + (dsaf_dev->reset_offset + 6); if (val == 0) reg_addr = DSAF_SUB_SC_PPE_RESET_REQ_REG; @@ -213,7 +225,6 @@ void hns_ppe_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val) void hns_ppe_com_srst(struct ppe_common_cb *ppe_common, u32 val) { - int comm_index = ppe_common->comm_index; struct dsaf_device *dsaf_dev = ppe_common->dsaf_dev; u32 reg_val; u32 reg_addr; @@ -226,7 +237,7 @@ void hns_ppe_com_srst(struct ppe_common_cb *ppe_common, u32 val) reg_addr = DSAF_SUB_SC_RCB_PPE_COM_RESET_DREQ_REG; } else { - reg_val = 0x100 << (comm_index - 1); + reg_val = 0x100 << dsaf_dev->reset_offset; if (val == 0) reg_addr = DSAF_SUB_SC_PPE_RESET_REQ_REG; @@ -247,14 +258,16 @@ phy_interface_t hns_mac_get_phy_if(struct hns_mac_cb *mac_cb) u32 mode; u32 reg; u32 shift; + u32 phy_offset; bool is_ver1 = AE_IS_VER1(mac_cb->dsaf_dev->dsaf_ver); void __iomem *sys_ctl_vaddr = mac_cb->sys_ctl_vaddr; int mac_id = mac_cb->mac_id; phy_interface_t phy_if = PHY_INTERFACE_MODE_NA; - if (is_ver1 && (mac_id >= 6 && mac_id <= 7)) { + if (is_ver1 && HNS_DSAF_IS_DEBUG(mac_cb->dsaf_dev)) { phy_if = PHY_INTERFACE_MODE_SGMII; - } else if (mac_id >= 0 && mac_id <= 3) { + } else if (mac_id >= 0 && mac_id <= 3 && + !HNS_DSAF_IS_DEBUG(mac_cb->dsaf_dev)) { reg = is_ver1 ? HNS_MAC_HILINK4_REG : HNS_MAC_HILINK4V2_REG; mode = dsaf_read_reg(sys_ctl_vaddr, reg); /* mac_id 0, 1, 2, 3 ---> hilink4 lane 0, 1, 2, 3 */ @@ -263,11 +276,14 @@ phy_interface_t hns_mac_get_phy_if(struct hns_mac_cb *mac_cb) phy_if = PHY_INTERFACE_MODE_XGMII; else phy_if = PHY_INTERFACE_MODE_SGMII; - } else if (mac_id >= 4 && mac_id <= 7) { + } else { reg = is_ver1 ? HNS_MAC_HILINK3_REG : HNS_MAC_HILINK3V2_REG; mode = dsaf_read_reg(sys_ctl_vaddr, reg); - /* mac_id 4, 5, 6, 7 ---> hilink3 lane 2, 3, 0, 1 */ - shift = is_ver1 ? 0 : mac_id <= 5 ? mac_id - 2 : mac_id - 6; + /* mac_id 4, 5,---> hilink3 lane 2, 3 + * debug port 0(6), 1(7) ---> hilink3 lane 0, 1 + */ + phy_offset = mac_cb->dsaf_dev->reset_offset - 1; + shift = is_ver1 ? 0 : mac_id >= 4 ? mac_id - 2 : phy_offset; if (dsaf_get_bit(mode, shift)) phy_if = PHY_INTERFACE_MODE_XGMII; else From 86897c960b490e62714f4b123b7d20b04945d773 Mon Sep 17 00:00:00 2001 From: "Yisen.Zhuang\\(Zhuangyuzeng\\)" Date: Sat, 23 Apr 2016 17:05:09 +0800 Subject: [PATCH 1001/1649] net: hns: add syscon operation for dsaf This patch provides the read/write function for dsaf to access the registers through syscon methods. Signed-off-by: Daode Huang Signed-off-by: Yisen Zhuang Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h index ed0043a4dbe1..6a03c94821d5 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h @@ -10,6 +10,7 @@ #ifndef _DSAF_REG_H_ #define _DSAF_REG_H_ +#include #define HNS_DEBUG_RING_IRQ_IDX 0 #define HNS_SERVICE_RING_IRQ_IDX 59 #define HNSV2_SERVICE_RING_IRQ_IDX 25 @@ -989,6 +990,19 @@ static inline u32 dsaf_read_reg(u8 __iomem *base, u32 reg) return readl(reg_addr + reg); } +static inline void dsaf_write_syscon(struct regmap *base, u32 reg, u32 value) +{ + regmap_write(base, reg, value); +} + +static inline u32 dsaf_read_syscon(struct regmap *base, u32 reg) +{ + unsigned int val; + + regmap_read(base, reg, &val); + return val; +} + #define dsaf_read_dev(a, reg) \ dsaf_read_reg((a)->io_base, (reg)) From 2e2591b130c43dd241e7aa8b0f2d74dbf3cc334b Mon Sep 17 00:00:00 2001 From: Daode Huang Date: Sat, 23 Apr 2016 17:05:10 +0800 Subject: [PATCH 1002/1649] net: hns: sort the header file by alphabetical order This patch tunes the header file by the alphabetical order. Signed-off-by: Daode Huang Signed-off-by: Yisen Zhuang Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c | 12 ++++++------ drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c | 14 +++++++------- drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c | 4 ++-- 3 files changed, 15 insertions(+), 15 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c index 353b9e7502b5..37303852e9a9 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c @@ -7,18 +7,18 @@ * (at your option) any later version. */ -#include -#include #include -#include -#include #include -#include +#include +#include +#include #include #include +#include +#include -#include "hns_dsaf_misc.h" #include "hns_dsaf_main.h" +#include "hns_dsaf_misc.h" #include "hns_dsaf_rcb.h" #define MAC_EN_FLAG_V 0xada0328 diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c index b418d4201290..98e0e8302190 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c @@ -7,22 +7,22 @@ * (at your option) any later version. */ -#include -#include +#include #include #include +#include +#include #include -#include #include #include #include -#include +#include #include -#include "hns_dsaf_main.h" -#include "hns_dsaf_rcb.h" -#include "hns_dsaf_ppe.h" #include "hns_dsaf_mac.h" +#include "hns_dsaf_main.h" +#include "hns_dsaf_ppe.h" +#include "hns_dsaf_rcb.h" const char *g_dsaf_mode_match[DSAF_MODE_MAX] = { [DSAF_MODE_DISABLE_2PORT_64VM] = "2port-64vf", diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c index 91e0382391eb..67c8b9e8b90f 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c @@ -7,10 +7,10 @@ * (at your option) any later version. */ -#include "hns_dsaf_misc.h" #include "hns_dsaf_mac.h" -#include "hns_dsaf_reg.h" +#include "hns_dsaf_misc.h" #include "hns_dsaf_ppe.h" +#include "hns_dsaf_reg.h" void hns_cpld_set_led(struct hns_mac_cb *mac_cb, int link_status, u16 speed, int data) From 831d828bf2cc8535b74fa33c705a6f83e2e34eec Mon Sep 17 00:00:00 2001 From: "Yisen.Zhuang\\(Zhuangyuzeng\\)" Date: Sat, 23 Apr 2016 17:05:11 +0800 Subject: [PATCH 1003/1649] net: hns: separate debug dsaf device from service dsaf device There are two kinds of dsaf device in hns, one is for service ports, contains crossbar in it, can work under different mode. Another is for debug port, only can work under "single-port" mode. The current code only declared a dsaf device for both service ports and debug ports. This patch separate it to three platform devices. Here is the diagram of all port in one platform device(old): CPU | | DSAF(one platform device) -------------------------------------------------------------- / | | | | | / | PPE PPE PPE | / | | | | | / | | | | | / | crossbar | | | / | | | | |/ | ----------------------------------- | | | | | | | | | | | | | | | | | | | | | | | | MAC MAC MAC MAC MAC MAC MAC MAC | | | | | | | | | | | -------------------------------------------------------------- | | | | | | | | PHY PHY PHY PHY PHY PHY PHY PHY Here is the diagram of separate all ports to three platform(new): CPU | ----------------------------------- | | | ---------------------------------------------- --------- --------- | | | | | | | | | PPE | | PPE | | PPE | | | | | | | | | | | | | | | | | | | | crossbar | | | | | | | | | | | | | | | | | ---------------------------------- | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | MAC MAC MAC MAC MAC MAC | | MAC | | MAC | | | | | | | | | | | | | | | ---------------------------------------------- --------- --------- | | | | | | \ / | / | PHY PHY PHY PHY PHY PHY \ / PHY / PHY \ / / \ / / DSAF(three platform device) Signed-off-by: Daode Huang Signed-off-by: Yisen Zhuang Signed-off-by: David S. Miller --- .../net/ethernet/hisilicon/hns/hns_ae_adapt.c | 40 ++--- .../net/ethernet/hisilicon/hns/hns_dsaf_mac.c | 152 +++++++++++++----- .../net/ethernet/hisilicon/hns/hns_dsaf_mac.h | 7 +- .../ethernet/hisilicon/hns/hns_dsaf_main.c | 89 ++++++---- .../ethernet/hisilicon/hns/hns_dsaf_main.h | 12 +- .../ethernet/hisilicon/hns/hns_dsaf_misc.c | 72 +++++---- .../net/ethernet/hisilicon/hns/hns_dsaf_ppe.c | 57 ++----- .../net/ethernet/hisilicon/hns/hns_dsaf_ppe.h | 1 - .../net/ethernet/hisilicon/hns/hns_dsaf_rcb.c | 51 ++---- .../net/ethernet/hisilicon/hns/hns_dsaf_reg.h | 15 +- 10 files changed, 258 insertions(+), 238 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c index 1c86336d6475..58341dad8042 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c @@ -37,50 +37,35 @@ static struct dsaf_device *hns_ae_get_dsaf_dev(struct hnae_ae_dev *dev) static struct hns_ppe_cb *hns_get_ppe_cb(struct hnae_handle *handle) { int ppe_index; - int ppe_common_index; struct ppe_common_cb *ppe_comm; struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle); - if (vf_cb->port_index < DSAF_SERVICE_PORT_NUM_PER_DSAF) { - ppe_index = vf_cb->port_index; - ppe_common_index = 0; - } else { - ppe_index = 0; - ppe_common_index = - vf_cb->port_index - DSAF_SERVICE_PORT_NUM_PER_DSAF + 1; - } - ppe_comm = vf_cb->dsaf_dev->ppe_common[ppe_common_index]; + ppe_comm = vf_cb->dsaf_dev->ppe_common[0]; + ppe_index = vf_cb->port_index; + return &ppe_comm->ppe_cb[ppe_index]; } static int hns_ae_get_q_num_per_vf( struct dsaf_device *dsaf_dev, int port) { - int common_idx = hns_dsaf_get_comm_idx_by_port(port); - - return dsaf_dev->rcb_common[common_idx]->max_q_per_vf; + return dsaf_dev->rcb_common[0]->max_q_per_vf; } static int hns_ae_get_vf_num_per_port( struct dsaf_device *dsaf_dev, int port) { - int common_idx = hns_dsaf_get_comm_idx_by_port(port); - - return dsaf_dev->rcb_common[common_idx]->max_vfn; + return dsaf_dev->rcb_common[0]->max_vfn; } static struct ring_pair_cb *hns_ae_get_base_ring_pair( struct dsaf_device *dsaf_dev, int port) { - int common_idx = hns_dsaf_get_comm_idx_by_port(port); - struct rcb_common_cb *rcb_comm = dsaf_dev->rcb_common[common_idx]; + struct rcb_common_cb *rcb_comm = dsaf_dev->rcb_common[0]; int q_num = rcb_comm->max_q_per_vf; int vf_num = rcb_comm->max_vfn; - if (!HNS_DSAF_IS_DEBUG(dsaf_dev)) - return &rcb_comm->ring_pair_cb[port * q_num * vf_num]; - else - return &rcb_comm->ring_pair_cb[0]; + return &rcb_comm->ring_pair_cb[port * q_num * vf_num]; } static struct ring_pair_cb *hns_ae_get_ring_pair(struct hnae_queue *q) @@ -143,7 +128,7 @@ struct hnae_handle *hns_ae_get_handle(struct hnae_ae_dev *dev, vf_cb->dsaf_dev = dsaf_dev; vf_cb->port_index = port_id; - vf_cb->mac_cb = &dsaf_dev->mac_cb[port_id]; + vf_cb->mac_cb = dsaf_dev->mac_cb[port_id]; ae_handle->phy_if = vf_cb->mac_cb->phy_if; ae_handle->phy_node = vf_cb->mac_cb->phy_node; @@ -299,11 +284,8 @@ static void hns_ae_reset(struct hnae_handle *handle) struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle); if (vf_cb->mac_cb->mac_type == HNAE_PORT_DEBUG) { - u8 ppe_common_index = - vf_cb->port_index - DSAF_SERVICE_PORT_NUM_PER_DSAF + 1; - hns_mac_reset(vf_cb->mac_cb); - hns_ppe_reset_common(vf_cb->dsaf_dev, ppe_common_index); + hns_ppe_reset_common(vf_cb->dsaf_dev, 0); } } @@ -702,7 +684,6 @@ int hns_ae_cpld_set_led_id(struct hnae_handle *handle, void hns_ae_get_regs(struct hnae_handle *handle, void *data) { u32 *p = data; - u32 rcb_com_idx; int i; struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle); struct hns_ppe_cb *ppe_cb = hns_get_ppe_cb(handle); @@ -710,8 +691,7 @@ void hns_ae_get_regs(struct hnae_handle *handle, void *data) hns_ppe_get_regs(ppe_cb, p); p += hns_ppe_get_regs_count(); - rcb_com_idx = hns_dsaf_get_comm_idx_by_port(vf_cb->port_index); - hns_rcb_get_common_regs(vf_cb->dsaf_dev->rcb_common[rcb_com_idx], p); + hns_rcb_get_common_regs(vf_cb->dsaf_dev->rcb_common[0], p); p += hns_rcb_get_common_regs_count(); for (i = 0; i < handle->q_num; i++) { diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c index 37303852e9a9..a731777415dc 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include @@ -168,10 +169,9 @@ static int hns_mac_get_inner_port_num(struct hns_mac_cb *mac_cb, u8 vmid, u8 *port_num) { u8 tmp_port; - u32 comm_idx; if (mac_cb->dsaf_dev->dsaf_mode <= DSAF_MODE_ENABLE) { - if (mac_cb->mac_id != DSAF_MAX_PORT_NUM_PER_CHIP) { + if (mac_cb->mac_id != DSAF_MAX_PORT_NUM) { dev_err(mac_cb->dev, "input invalid,%s mac%d vmid%d !\n", mac_cb->dsaf_dev->ae_dev.name, @@ -179,7 +179,7 @@ static int hns_mac_get_inner_port_num(struct hns_mac_cb *mac_cb, return -EINVAL; } } else if (mac_cb->dsaf_dev->dsaf_mode < DSAF_MODE_MAX) { - if (mac_cb->mac_id >= DSAF_MAX_PORT_NUM_PER_CHIP) { + if (mac_cb->mac_id >= DSAF_MAX_PORT_NUM) { dev_err(mac_cb->dev, "input invalid,%s mac%d vmid%d!\n", mac_cb->dsaf_dev->ae_dev.name, @@ -192,9 +192,7 @@ static int hns_mac_get_inner_port_num(struct hns_mac_cb *mac_cb, return -EINVAL; } - comm_idx = hns_dsaf_get_comm_idx_by_port(mac_cb->mac_id); - - if (vmid >= mac_cb->dsaf_dev->rcb_common[comm_idx]->max_vfn) { + if (vmid >= mac_cb->dsaf_dev->rcb_common[0]->max_vfn) { dev_err(mac_cb->dev, "input invalid,%s mac%d vmid%d !\n", mac_cb->dsaf_dev->ae_dev.name, mac_cb->mac_id, vmid); return -EINVAL; @@ -234,7 +232,7 @@ static int hns_mac_get_inner_port_num(struct hns_mac_cb *mac_cb, } /** - *hns_mac_get_inner_port_num - change vf mac address + *hns_mac_change_vf_addr - change vf mac address *@mac_cb: mac device *@vmid: vmid *@addr:mac address @@ -651,14 +649,15 @@ free_mac_drv: } /** - *mac_free_dev - get mac information from device node + *hns_mac_get_info - get mac information from device node *@mac_cb: mac device *@np:device node - *@mac_mode_idx:mac mode index + * return: 0 --success, negative --fail */ -static void hns_mac_get_info(struct hns_mac_cb *mac_cb, - struct device_node *np, u32 mac_mode_idx) +static int hns_mac_get_info(struct hns_mac_cb *mac_cb) { + struct device_node *np = mac_cb->dev->of_node; + struct regmap *syscon; mac_cb->link = false; mac_cb->half_duplex = false; mac_cb->speed = mac_phy_to_speed[mac_cb->phy_if]; @@ -675,11 +674,34 @@ static void hns_mac_get_info(struct hns_mac_cb *mac_cb, mac_cb->max_frm = MAC_DEFAULT_MTU; mac_cb->tx_pause_frm_time = MAC_DEFAULT_PAUSE_TIME; - /* Get the rest of the PHY information */ - mac_cb->phy_node = of_parse_phandle(np, "phy-handle", mac_cb->mac_id); + /* if the dsaf node doesn't contain a port subnode, get phy-handle + * from dsaf node + */ + if (!mac_cb->fw_port) { + mac_cb->phy_node = of_parse_phandle(np, "phy-handle", + mac_cb->mac_id); + if (mac_cb->phy_node) + dev_dbg(mac_cb->dev, "mac%d phy_node: %s\n", + mac_cb->mac_id, mac_cb->phy_node->name); + return 0; + } + if (!is_of_node(mac_cb->fw_port)) + return -EINVAL; + /* parse property from port subnode in dsaf */ + mac_cb->phy_node = of_parse_phandle(to_of_node(mac_cb->fw_port), + "phy-handle", 0); if (mac_cb->phy_node) dev_dbg(mac_cb->dev, "mac%d phy_node: %s\n", mac_cb->mac_id, mac_cb->phy_node->name); + syscon = syscon_node_to_regmap( + of_parse_phandle(to_of_node(mac_cb->fw_port), + "serdes-syscon", 0)); + if (IS_ERR_OR_NULL(syscon)) { + dev_err(mac_cb->dev, "serdes-syscon is needed!\n"); + return -EINVAL; + } + mac_cb->serdes_ctrl = syscon; + return 0; } /** @@ -709,31 +731,27 @@ u8 __iomem *hns_mac_get_vaddr(struct dsaf_device *dsaf_dev, return base + 0x40000 + mac_id * 0x4000 - mac_mode_idx * 0x20000; else - return mac_cb->serdes_vaddr + 0x1000 - + (mac_id - DSAF_SERVICE_PORT_NUM_PER_DSAF) * 0x100000; + return dsaf_dev->ppe_base + 0x1000; } /** * hns_mac_get_cfg - get mac cfg from dtb or acpi table * @dsaf_dev: dsa fabric device struct pointer - * @mac_idx: mac index - * retuen 0 - success , negative --fail + * @mac_cb: mac control block + * return 0 - success , negative --fail */ -int hns_mac_get_cfg(struct dsaf_device *dsaf_dev, int mac_idx) +int hns_mac_get_cfg(struct dsaf_device *dsaf_dev, struct hns_mac_cb *mac_cb) { int ret; u32 mac_mode_idx; - struct hns_mac_cb *mac_cb = &dsaf_dev->mac_cb[mac_idx]; mac_cb->dsaf_dev = dsaf_dev; mac_cb->dev = dsaf_dev->dev; - mac_cb->mac_id = mac_idx; mac_cb->sys_ctl_vaddr = dsaf_dev->sc_base; mac_cb->serdes_vaddr = dsaf_dev->sds_base; - if (dsaf_dev->cpld_base && - mac_idx < DSAF_SERVICE_PORT_NUM_PER_DSAF) { + if (dsaf_dev->cpld_base && !HNS_DSAF_IS_DEBUG(dsaf_dev)) { mac_cb->cpld_vaddr = dsaf_dev->cpld_base + mac_cb->mac_id * CPLD_ADDR_PORT_OFFSET; cpld_led_reset(mac_cb); @@ -742,7 +760,7 @@ int hns_mac_get_cfg(struct dsaf_device *dsaf_dev, int mac_idx) mac_cb->txpkt_for_led = 0; mac_cb->rxpkt_for_led = 0; - if (mac_idx < DSAF_SERVICE_PORT_NUM_PER_DSAF) + if (!HNS_DSAF_IS_DEBUG(dsaf_dev)) mac_cb->mac_type = HNAE_PORT_SERVICE; else mac_cb->mac_type = HNAE_PORT_DEBUG; @@ -758,53 +776,99 @@ int hns_mac_get_cfg(struct dsaf_device *dsaf_dev, int mac_idx) } mac_mode_idx = (u32)ret; - hns_mac_get_info(mac_cb, mac_cb->dev->of_node, mac_mode_idx); + ret = hns_mac_get_info(mac_cb); + if (ret) + return ret; mac_cb->vaddr = hns_mac_get_vaddr(dsaf_dev, mac_cb, mac_mode_idx); return 0; } +static int hns_mac_get_max_port_num(struct dsaf_device *dsaf_dev) +{ + if (HNS_DSAF_IS_DEBUG(dsaf_dev)) + return 1; + else + return DSAF_MAX_PORT_NUM; +} + /** * hns_mac_init - init mac * @dsaf_dev: dsa fabric device struct pointer - * retuen 0 - success , negative --fail + * return 0 - success , negative --fail */ int hns_mac_init(struct dsaf_device *dsaf_dev) { - int i; + bool found = false; int ret; - size_t size; + u32 port_id; + int max_port_num = hns_mac_get_max_port_num(dsaf_dev); struct hns_mac_cb *mac_cb; + struct fwnode_handle *child; - size = sizeof(struct hns_mac_cb) * DSAF_MAX_PORT_NUM_PER_CHIP; - dsaf_dev->mac_cb = devm_kzalloc(dsaf_dev->dev, size, GFP_KERNEL); - if (!dsaf_dev->mac_cb) - return -ENOMEM; + device_for_each_child_node(dsaf_dev->dev, child) { + ret = fwnode_property_read_u32(child, "port-id", &port_id); + if (ret) { + dev_err(dsaf_dev->dev, + "get port-id fail, ret=%d!\n", ret); + return ret; + } + if (port_id >= max_port_num) { + dev_err(dsaf_dev->dev, + "port-id(%u) out of range!\n", port_id); + return -EINVAL; + } + mac_cb = devm_kzalloc(dsaf_dev->dev, sizeof(*mac_cb), + GFP_KERNEL); + if (!mac_cb) + return -ENOMEM; + mac_cb->fw_port = child; + mac_cb->mac_id = (u8)port_id; + dsaf_dev->mac_cb[port_id] = mac_cb; + found = true; + } - for (i = 0; i < DSAF_MAX_PORT_NUM_PER_CHIP; i++) { - ret = hns_mac_get_cfg(dsaf_dev, i); + /* if don't get any port subnode from dsaf node + * will init all port then, this is compatible with the old dts + */ + if (!found) { + for (port_id = 0; port_id < max_port_num; port_id++) { + mac_cb = devm_kzalloc(dsaf_dev->dev, sizeof(*mac_cb), + GFP_KERNEL); + if (!mac_cb) + return -ENOMEM; + + mac_cb->mac_id = port_id; + dsaf_dev->mac_cb[port_id] = mac_cb; + } + } + /* init mac_cb for all port */ + for (port_id = 0; port_id < max_port_num; port_id++) { + mac_cb = dsaf_dev->mac_cb[port_id]; + if (!mac_cb) + continue; + + ret = hns_mac_get_cfg(dsaf_dev, mac_cb); if (ret) - goto free_mac_cb; - - mac_cb = &dsaf_dev->mac_cb[i]; + return ret; ret = hns_mac_init_ex(mac_cb); if (ret) - goto free_mac_cb; + return ret; } return 0; - -free_mac_cb: - dsaf_dev->mac_cb = NULL; - - return ret; } void hns_mac_uninit(struct dsaf_device *dsaf_dev) { - cpld_led_reset(dsaf_dev->mac_cb); - dsaf_dev->mac_cb = NULL; + int i; + int max_port_num = hns_mac_get_max_port_num(dsaf_dev); + + for (i = 0; i < max_port_num; i++) { + cpld_led_reset(dsaf_dev->mac_cb[i]); + dsaf_dev->mac_cb[i] = NULL; + } } int hns_mac_config_mac_loopback(struct hns_mac_cb *mac_cb, diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h index 823b6e78c8aa..45c5f16ae735 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h @@ -10,9 +10,10 @@ #ifndef _HNS_DSAF_MAC_H #define _HNS_DSAF_MAC_H -#include -#include #include +#include +#include +#include #include "hns_dsaf_main.h" struct dsaf_device; @@ -310,10 +311,12 @@ struct hns_mac_cb { struct device *dev; struct dsaf_device *dsaf_dev; struct mac_priv priv; + struct fwnode_handle *fw_port; u8 __iomem *vaddr; u8 __iomem *cpld_vaddr; u8 __iomem *sys_ctl_vaddr; u8 __iomem *serdes_vaddr; + struct regmap *serdes_ctrl; struct mac_entry_idx addr_entry_idx[DSAF_MAX_VM_NUM]; u8 sfp_prsnt; u8 cpld_led_value; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c index 98e0e8302190..33cdb215a547 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include @@ -37,8 +38,12 @@ int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev) u32 desc_num; u32 buf_size; u32 reset_offset = 0; + u32 res_idx = 0; const char *mode_str; + struct regmap *syscon; + struct resource *res; struct device_node *np = dsaf_dev->dev->of_node; + struct platform_device *pdev = to_platform_device(dsaf_dev->dev); if (of_device_is_compatible(np, "hisilicon,hns-dsaf-v1")) dsaf_dev->dsaf_ver = AE_VERSION_1; @@ -75,42 +80,68 @@ int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev) else dsaf_dev->dsaf_tc_mode = HRD_DSAF_4TC_MODE; - dsaf_dev->sc_base = of_iomap(np, 0); - if (!dsaf_dev->sc_base) { - dev_err(dsaf_dev->dev, - "%s of_iomap 0 fail!\n", dsaf_dev->ae_dev.name); - ret = -ENOMEM; - goto unmap_base_addr; + syscon = syscon_node_to_regmap( + of_parse_phandle(np, "subctrl-syscon", 0)); + if (IS_ERR_OR_NULL(syscon)) { + res = platform_get_resource(pdev, IORESOURCE_MEM, res_idx++); + if (!res) { + dev_err(dsaf_dev->dev, "subctrl info is needed!\n"); + return -ENOMEM; + } + dsaf_dev->sc_base = devm_ioremap_resource(&pdev->dev, res); + if (!dsaf_dev->sc_base) { + dev_err(dsaf_dev->dev, "subctrl can not map!\n"); + return -ENOMEM; + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, res_idx++); + if (!res) { + dev_err(dsaf_dev->dev, "serdes-ctrl info is needed!\n"); + return -ENOMEM; + } + dsaf_dev->sds_base = devm_ioremap_resource(&pdev->dev, res); + if (!dsaf_dev->sds_base) { + dev_err(dsaf_dev->dev, "serdes-ctrl can not map!\n"); + return -ENOMEM; + } + } else { + dsaf_dev->sub_ctrl = syscon; } - dsaf_dev->sds_base = of_iomap(np, 1); - if (!dsaf_dev->sds_base) { - dev_err(dsaf_dev->dev, - "%s of_iomap 1 fail!\n", dsaf_dev->ae_dev.name); - ret = -ENOMEM; - goto unmap_base_addr; + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ppe-base"); + if (!res) { + res = platform_get_resource(pdev, IORESOURCE_MEM, res_idx++); + if (!res) { + dev_err(dsaf_dev->dev, "ppe-base info is needed!\n"); + return -ENOMEM; + } } - - dsaf_dev->ppe_base = of_iomap(np, 2); + dsaf_dev->ppe_base = devm_ioremap_resource(&pdev->dev, res); if (!dsaf_dev->ppe_base) { - dev_err(dsaf_dev->dev, - "%s of_iomap 2 fail!\n", dsaf_dev->ae_dev.name); - ret = -ENOMEM; - goto unmap_base_addr; + dev_err(dsaf_dev->dev, "ppe-base resource can not map!\n"); + return -ENOMEM; } + dsaf_dev->ppe_paddr = res->start; - dsaf_dev->io_base = of_iomap(np, 3); - if (!dsaf_dev->io_base) { - dev_err(dsaf_dev->dev, - "%s of_iomap 3 fail!\n", dsaf_dev->ae_dev.name); - ret = -ENOMEM; - goto unmap_base_addr; + if (!HNS_DSAF_IS_DEBUG(dsaf_dev)) { + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "dsaf-base"); + if (!res) { + res = platform_get_resource(pdev, IORESOURCE_MEM, + res_idx); + if (!res) { + dev_err(dsaf_dev->dev, + "dsaf-base info is needed!\n"); + return -ENOMEM; + } + } + dsaf_dev->io_base = devm_ioremap_resource(&pdev->dev, res); + if (!dsaf_dev->io_base) { + dev_err(dsaf_dev->dev, "dsaf-base resource can not map!\n"); + return -ENOMEM; + } } - dsaf_dev->cpld_base = of_iomap(np, 4); - if (!dsaf_dev->cpld_base) - dev_dbg(dsaf_dev->dev, "NO CPLD ADDR"); - ret = of_property_read_u32(np, "desc-num", &desc_num); if (ret < 0 || desc_num < HNS_DSAF_MIN_DESC_CNT || desc_num > HNS_DSAF_MAX_DESC_CNT) { @@ -725,7 +756,7 @@ void hns_dsaf_set_promisc_mode(struct dsaf_device *dsaf_dev, u32 en) void hns_dsaf_set_inner_lb(struct dsaf_device *dsaf_dev, u32 mac_id, u32 en) { if (AE_IS_VER1(dsaf_dev->dsaf_ver) || - dsaf_dev->mac_cb[mac_id].mac_type == HNAE_PORT_DEBUG) + dsaf_dev->mac_cb[mac_id]->mac_type == HNAE_PORT_DEBUG) return; dsaf_set_dev_bit(dsaf_dev, DSAFV2_SERDES_LBK_0_REG + 4 * mac_id, diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h index 47e768b9ec97..a48ef2644355 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h @@ -278,6 +278,8 @@ struct dsaf_device { u8 __iomem *ppe_base; u8 __iomem *io_base; u8 __iomem *cpld_base; + struct regmap *sub_ctrl; + phys_addr_t ppe_paddr; u32 desc_num; /* desc num per queue*/ u32 buf_size; /* ring buffer size */ @@ -290,7 +292,7 @@ struct dsaf_device { struct ppe_common_cb *ppe_common[DSAF_COMM_DEV_NUM]; struct rcb_common_cb *rcb_common[DSAF_COMM_DEV_NUM]; - struct hns_mac_cb *mac_cb; + struct hns_mac_cb *mac_cb[DSAF_MAX_PORT_NUM]; struct dsaf_hw_stats hw_stats[DSAF_NODE_NUM]; struct dsaf_int_stat int_stat; @@ -362,14 +364,6 @@ static inline void hns_dsaf_tbl_line_addr_cfg(struct dsaf_device *dsaf_dev, tab_line_addr); } -static inline int hns_dsaf_get_comm_idx_by_port(int port) -{ - if ((port < DSAF_COMM_CHN) || (port == DSAF_MAX_PORT_NUM_PER_CHIP)) - return 0; - else - return (port - DSAF_COMM_CHN + 1); -} - static inline struct hnae_vf_cb *hns_ae_get_vf_cb( struct hnae_handle *handle) { diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c index 67c8b9e8b90f..972eab0ad89d 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c @@ -12,6 +12,26 @@ #include "hns_dsaf_ppe.h" #include "hns_dsaf_reg.h" +static void dsaf_write_sub(struct dsaf_device *dsaf_dev, u32 reg, u32 val) +{ + if (dsaf_dev->sub_ctrl) + dsaf_write_syscon(dsaf_dev->sub_ctrl, reg, val); + else + dsaf_write_reg(dsaf_dev->sc_base, reg, val); +} + +static u32 dsaf_read_sub(struct dsaf_device *dsaf_dev, u32 reg) +{ + u32 ret; + + if (dsaf_dev->sub_ctrl) + ret = dsaf_read_syscon(dsaf_dev->sub_ctrl, reg); + else + ret = dsaf_read_reg(dsaf_dev->sc_base, reg); + + return ret; +} + void hns_cpld_set_led(struct hns_mac_cb *mac_cb, int link_status, u16 speed, int data) { @@ -95,10 +115,8 @@ void hns_dsaf_rst(struct dsaf_device *dsaf_dev, u32 val) nt_reg_addr = DSAF_SUB_SC_NT_RESET_DREQ_REG; } - dsaf_write_reg(dsaf_dev->sc_base, xbar_reg_addr, - RESET_REQ_OR_DREQ); - dsaf_write_reg(dsaf_dev->sc_base, nt_reg_addr, - RESET_REQ_OR_DREQ); + dsaf_write_sub(dsaf_dev, xbar_reg_addr, RESET_REQ_OR_DREQ); + dsaf_write_sub(dsaf_dev, nt_reg_addr, RESET_REQ_OR_DREQ); } void hns_dsaf_xge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val) @@ -121,7 +139,7 @@ void hns_dsaf_xge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val) else reg_addr = DSAF_SUB_SC_XGE_RESET_DREQ_REG; - dsaf_write_reg(dsaf_dev->sc_base, reg_addr, reg_val); + dsaf_write_sub(dsaf_dev, reg_addr, reg_val); } void hns_dsaf_xge_core_srst_by_port(struct dsaf_device *dsaf_dev, @@ -144,7 +162,7 @@ void hns_dsaf_xge_core_srst_by_port(struct dsaf_device *dsaf_dev, else reg_addr = DSAF_SUB_SC_XGE_RESET_DREQ_REG; - dsaf_write_reg(dsaf_dev->sc_base, reg_addr, reg_val); + dsaf_write_sub(dsaf_dev, reg_addr, reg_val); } void hns_dsaf_ge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val) @@ -164,20 +182,16 @@ void hns_dsaf_ge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val) reg_val_2 = 0x2082082 << port; if (val == 0) { - dsaf_write_reg(dsaf_dev->sc_base, - DSAF_SUB_SC_GE_RESET_REQ1_REG, + dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_GE_RESET_REQ1_REG, reg_val_1); - dsaf_write_reg(dsaf_dev->sc_base, - DSAF_SUB_SC_GE_RESET_REQ0_REG, + dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_GE_RESET_REQ0_REG, reg_val_2); } else { - dsaf_write_reg(dsaf_dev->sc_base, - DSAF_SUB_SC_GE_RESET_DREQ0_REG, + dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_GE_RESET_DREQ0_REG, reg_val_2); - dsaf_write_reg(dsaf_dev->sc_base, - DSAF_SUB_SC_GE_RESET_DREQ1_REG, + dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_GE_RESET_DREQ1_REG, reg_val_1); } } else { @@ -185,20 +199,16 @@ void hns_dsaf_ge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val) reg_val_2 = 0x100 << dsaf_dev->reset_offset; if (val == 0) { - dsaf_write_reg(dsaf_dev->sc_base, - DSAF_SUB_SC_GE_RESET_REQ1_REG, + dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_GE_RESET_REQ1_REG, reg_val_1); - dsaf_write_reg(dsaf_dev->sc_base, - DSAF_SUB_SC_PPE_RESET_REQ_REG, + dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_PPE_RESET_REQ_REG, reg_val_2); } else { - dsaf_write_reg(dsaf_dev->sc_base, - DSAF_SUB_SC_GE_RESET_DREQ1_REG, + dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_GE_RESET_DREQ1_REG, reg_val_1); - dsaf_write_reg(dsaf_dev->sc_base, - DSAF_SUB_SC_PPE_RESET_DREQ_REG, + dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_PPE_RESET_DREQ_REG, reg_val_2); } } @@ -220,7 +230,7 @@ void hns_ppe_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val) else reg_addr = DSAF_SUB_SC_PPE_RESET_DREQ_REG; - dsaf_write_reg(dsaf_dev->sc_base, reg_addr, reg_val); + dsaf_write_sub(dsaf_dev, reg_addr, reg_val); } void hns_ppe_com_srst(struct ppe_common_cb *ppe_common, u32 val) @@ -245,7 +255,7 @@ void hns_ppe_com_srst(struct ppe_common_cb *ppe_common, u32 val) reg_addr = DSAF_SUB_SC_PPE_RESET_DREQ_REG; } - dsaf_write_reg(dsaf_dev->sc_base, reg_addr, reg_val); + dsaf_write_sub(dsaf_dev, reg_addr, reg_val); } /** @@ -260,7 +270,6 @@ phy_interface_t hns_mac_get_phy_if(struct hns_mac_cb *mac_cb) u32 shift; u32 phy_offset; bool is_ver1 = AE_IS_VER1(mac_cb->dsaf_dev->dsaf_ver); - void __iomem *sys_ctl_vaddr = mac_cb->sys_ctl_vaddr; int mac_id = mac_cb->mac_id; phy_interface_t phy_if = PHY_INTERFACE_MODE_NA; @@ -269,7 +278,7 @@ phy_interface_t hns_mac_get_phy_if(struct hns_mac_cb *mac_cb) } else if (mac_id >= 0 && mac_id <= 3 && !HNS_DSAF_IS_DEBUG(mac_cb->dsaf_dev)) { reg = is_ver1 ? HNS_MAC_HILINK4_REG : HNS_MAC_HILINK4V2_REG; - mode = dsaf_read_reg(sys_ctl_vaddr, reg); + mode = dsaf_read_sub(mac_cb->dsaf_dev, reg); /* mac_id 0, 1, 2, 3 ---> hilink4 lane 0, 1, 2, 3 */ shift = is_ver1 ? 0 : mac_id; if (dsaf_get_bit(mode, shift)) @@ -278,7 +287,7 @@ phy_interface_t hns_mac_get_phy_if(struct hns_mac_cb *mac_cb) phy_if = PHY_INTERFACE_MODE_SGMII; } else { reg = is_ver1 ? HNS_MAC_HILINK3_REG : HNS_MAC_HILINK3V2_REG; - mode = dsaf_read_reg(sys_ctl_vaddr, reg); + mode = dsaf_read_sub(mac_cb->dsaf_dev, reg); /* mac_id 4, 5,---> hilink3 lane 2, 3 * debug port 0(6), 1(7) ---> hilink3 lane 0, 1 */ @@ -328,7 +337,14 @@ int hns_mac_config_sds_loopback(struct hns_mac_cb *mac_cb, u8 en) pr_info("no sfp in this eth\n"); } - dsaf_set_reg_field(base_addr, reg_offset, 1ull << 10, 10, !!en); + if (mac_cb->serdes_ctrl) { + u32 origin = dsaf_read_syscon(mac_cb->serdes_ctrl, reg_offset); + + dsaf_set_field(origin, 1ull << 10, 10, !!en); + dsaf_write_syscon(mac_cb->serdes_ctrl, reg_offset, origin); + } else { + dsaf_set_reg_field(base_addr, reg_offset, 1ull << 10, 10, !!en); + } return 0; } diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c index 3f59a8a30c86..8cd151a5245e 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c @@ -61,22 +61,10 @@ void hns_ppe_set_indir_table(struct hns_ppe_cb *ppe_cb, } } -static void __iomem *hns_ppe_common_get_ioaddr( - struct ppe_common_cb *ppe_common) +static void __iomem * +hns_ppe_common_get_ioaddr(struct ppe_common_cb *ppe_common) { - void __iomem *base_addr; - - int idx = ppe_common->comm_index; - - if (!HNS_DSAF_IS_DEBUG(ppe_common->dsaf_dev)) - base_addr = ppe_common->dsaf_dev->ppe_base - + PPE_COMMON_REG_OFFSET; - else - base_addr = ppe_common->dsaf_dev->sds_base - + (idx - 1) * HNS_DSAF_DEBUG_NW_REG_OFFSET - + PPE_COMMON_REG_OFFSET; - - return base_addr; + return ppe_common->dsaf_dev->ppe_base + PPE_COMMON_REG_OFFSET; } /** @@ -124,32 +112,8 @@ void hns_ppe_common_free_cfg(struct dsaf_device *dsaf_dev, u32 comm_index) static void __iomem *hns_ppe_get_iobase(struct ppe_common_cb *ppe_common, int ppe_idx) { - void __iomem *base_addr; - int common_idx = ppe_common->comm_index; - if (ppe_common->ppe_mode == PPE_COMMON_MODE_SERVICE) { - base_addr = ppe_common->dsaf_dev->ppe_base + - ppe_idx * PPE_REG_OFFSET; - - } else { - base_addr = ppe_common->dsaf_dev->sds_base + - (common_idx - 1) * HNS_DSAF_DEBUG_NW_REG_OFFSET; - } - - return base_addr; -} - -static int hns_ppe_get_port(struct ppe_common_cb *ppe_common, int idx) -{ - int port; - - if (ppe_common->ppe_mode == PPE_COMMON_MODE_SERVICE) - port = idx; - else - port = HNS_PPE_SERVICE_NW_ENGINE_NUM - + ppe_common->comm_index - 1; - - return port; + return ppe_common->dsaf_dev->ppe_base + ppe_idx * PPE_REG_OFFSET; } static void hns_ppe_get_cfg(struct ppe_common_cb *ppe_common) @@ -164,7 +128,6 @@ static void hns_ppe_get_cfg(struct ppe_common_cb *ppe_common) ppe_cb->next = NULL; ppe_cb->ppe_common_cb = ppe_common; ppe_cb->index = i; - ppe_cb->port = hns_ppe_get_port(ppe_common, i); ppe_cb->io_base = hns_ppe_get_iobase(ppe_common, i); ppe_cb->virq = 0; } @@ -318,7 +281,7 @@ static void hns_ppe_exc_irq_en(struct hns_ppe_cb *ppe_cb, int en) static void hns_ppe_init_hw(struct hns_ppe_cb *ppe_cb) { struct ppe_common_cb *ppe_common_cb = ppe_cb->ppe_common_cb; - u32 port = ppe_cb->port; + u32 port = ppe_cb->index; struct dsaf_device *dsaf_dev = ppe_common_cb->dsaf_dev; int i; @@ -377,7 +340,8 @@ void hns_ppe_uninit_ex(struct ppe_common_cb *ppe_common) u32 i; for (i = 0; i < ppe_common->ppe_num; i++) { - hns_ppe_uninit_hw(&ppe_common->ppe_cb[i]); + if (ppe_common->dsaf_dev->mac_cb[i]) + hns_ppe_uninit_hw(&ppe_common->ppe_cb[i]); memset(&ppe_common->ppe_cb[i], 0, sizeof(struct hns_ppe_cb)); } } @@ -410,8 +374,11 @@ void hns_ppe_reset_common(struct dsaf_device *dsaf_dev, u8 ppe_common_index) if (ret) return; - for (i = 0; i < ppe_common->ppe_num; i++) - hns_ppe_init_hw(&ppe_common->ppe_cb[i]); + for (i = 0; i < ppe_common->ppe_num; i++) { + /* We only need to initiate ppe when the port exists */ + if (dsaf_dev->mac_cb[i]) + hns_ppe_init_hw(&ppe_common->ppe_cb[i]); + } ret = hns_rcb_common_init_hw(dsaf_dev->rcb_common[ppe_common_index]); if (ret) diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h index e9c0ec2fa0dd..9d8e643e8aa6 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h @@ -80,7 +80,6 @@ struct hns_ppe_cb { struct hns_ppe_hw_stats hw_stats; u8 index; /* index in a ppe common device */ - u8 port; /* port id in dsaf */ void __iomem *io_base; int virq; u32 rss_indir_table[HNS_PPEV2_RSS_IND_TBL_SIZE]; /*shadow indir tab */ diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c index 054f391a3eeb..4ef6d23d998e 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c @@ -430,17 +430,8 @@ static void hns_rcb_ring_pair_get_cfg(struct ring_pair_cb *ring_pair_cb) static int hns_rcb_get_port_in_comm( struct rcb_common_cb *rcb_common, int ring_idx) { - int port; - int q_num; - if (!HNS_DSAF_IS_DEBUG(rcb_common->dsaf_dev)) { - q_num = (int)rcb_common->max_q_per_vf * rcb_common->max_vfn; - port = ring_idx / q_num; - } else { - port = 0; /* config debug-ports port_id_in_comm to 0*/ - } - - return port; + return ring_idx / (rcb_common->max_q_per_vf * rcb_common->max_vfn); } #define SERVICE_RING_IRQ_IDX(v1) \ @@ -658,42 +649,18 @@ int hns_rcb_get_ring_num(struct dsaf_device *dsaf_dev) } } -void __iomem *hns_rcb_common_get_vaddr(struct dsaf_device *dsaf_dev, - int comm_index) +void __iomem *hns_rcb_common_get_vaddr(struct rcb_common_cb *rcb_common) { - void __iomem *base_addr; + struct dsaf_device *dsaf_dev = rcb_common->dsaf_dev; - if (!HNS_DSAF_IS_DEBUG(dsaf_dev)) - base_addr = dsaf_dev->ppe_base + RCB_COMMON_REG_OFFSET; - else - base_addr = dsaf_dev->sds_base - + (comm_index - 1) * HNS_DSAF_DEBUG_NW_REG_OFFSET - + RCB_COMMON_REG_OFFSET; - - return base_addr; + return dsaf_dev->ppe_base + RCB_COMMON_REG_OFFSET; } -static phys_addr_t hns_rcb_common_get_paddr(struct dsaf_device *dsaf_dev, - int comm_index) +static phys_addr_t hns_rcb_common_get_paddr(struct rcb_common_cb *rcb_common) { - struct device_node *np = dsaf_dev->dev->of_node; - phys_addr_t phy_addr; - const __be32 *tmp_addr; - u64 addr_offset = 0; - u64 size = 0; - int index = 0; + struct dsaf_device *dsaf_dev = rcb_common->dsaf_dev; - if (!HNS_DSAF_IS_DEBUG(dsaf_dev)) { - index = 2; - addr_offset = RCB_COMMON_REG_OFFSET; - } else { - index = 1; - addr_offset = (comm_index - 1) * HNS_DSAF_DEBUG_NW_REG_OFFSET + - RCB_COMMON_REG_OFFSET; - } - tmp_addr = of_get_address(np, index, &size, NULL); - phy_addr = of_translate_address(np, tmp_addr); - return phy_addr + addr_offset; + return dsaf_dev->ppe_paddr + RCB_COMMON_REG_OFFSET; } int hns_rcb_common_get_cfg(struct dsaf_device *dsaf_dev, @@ -722,8 +689,8 @@ int hns_rcb_common_get_cfg(struct dsaf_device *dsaf_dev, rcb_common->max_vfn = max_vfn; rcb_common->max_q_per_vf = max_q_per_vf; - rcb_common->io_base = hns_rcb_common_get_vaddr(dsaf_dev, comm_index); - rcb_common->phy_base = hns_rcb_common_get_paddr(dsaf_dev, comm_index); + rcb_common->io_base = hns_rcb_common_get_vaddr(rcb_common); + rcb_common->phy_base = hns_rcb_common_get_paddr(rcb_common); dsaf_dev->rcb_common[comm_index] = rcb_common; return 0; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h index 6a03c94821d5..7c3b5103d151 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h @@ -11,16 +11,15 @@ #define _DSAF_REG_H_ #include -#define HNS_DEBUG_RING_IRQ_IDX 0 -#define HNS_SERVICE_RING_IRQ_IDX 59 -#define HNSV2_SERVICE_RING_IRQ_IDX 25 +#define HNS_DEBUG_RING_IRQ_IDX 0 +#define HNS_SERVICE_RING_IRQ_IDX 59 +#define HNSV2_SERVICE_RING_IRQ_IDX 25 -#define DSAF_MAX_PORT_NUM_PER_CHIP 8 -#define DSAF_SERVICE_PORT_NUM_PER_DSAF 6 -#define DSAF_MAX_VM_NUM 128 +#define DSAF_MAX_PORT_NUM 6 +#define DSAF_MAX_VM_NUM 128 -#define DSAF_COMM_DEV_NUM 3 -#define DSAF_PPE_INODE_BASE 6 +#define DSAF_COMM_DEV_NUM 1 +#define DSAF_PPE_INODE_BASE 6 #define DSAF_DEBUG_NW_NUM 2 #define DSAF_SERVICE_NW_NUM 6 #define DSAF_COMM_CHN DSAF_SERVICE_NW_NUM From 31d4446dca9112ce7b9eada8e6d631a7580e2feb Mon Sep 17 00:00:00 2001 From: "Yisen.Zhuang\\(Zhuangyuzeng\\)" Date: Sat, 23 Apr 2016 17:05:12 +0800 Subject: [PATCH 1004/1649] net: hns: add attribute cpld_ctrl for dsaf port node This patch adds attribute cpld_ctrl for dsaf port node, parses the syscon for mac_cb from dts, and changes the method of access the cpld related registers through syscon. Signed-off-by: Daode Huang Signed-off-by: Yisen Zhuang Signed-off-by: David S. Miller --- .../net/ethernet/hisilicon/hns/hns_ae_adapt.c | 2 +- .../net/ethernet/hisilicon/hns/hns_dsaf_mac.c | 38 ++++++++++--------- .../net/ethernet/hisilicon/hns/hns_dsaf_mac.h | 3 +- .../ethernet/hisilicon/hns/hns_dsaf_main.c | 5 --- .../ethernet/hisilicon/hns/hns_dsaf_main.h | 1 - .../ethernet/hisilicon/hns/hns_dsaf_misc.c | 36 +++++++++++++----- 6 files changed, 51 insertions(+), 34 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c index 58341dad8042..7a757e88c89a 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c @@ -664,7 +664,7 @@ void hns_ae_update_led_status(struct hnae_handle *handle) assert(handle); mac_cb = hns_get_mac_cb(handle); - if (!mac_cb->cpld_vaddr) + if (!mac_cb->cpld_ctrl) return; hns_set_led_opt(mac_cb); } diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c index a731777415dc..7073ca23af56 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c @@ -82,17 +82,6 @@ static enum mac_mode hns_get_enet_interface(const struct hns_mac_cb *mac_cb) } } -int hns_mac_get_sfp_prsnt(struct hns_mac_cb *mac_cb, int *sfp_prsnt) -{ - if (!mac_cb->cpld_vaddr) - return -ENODEV; - - *sfp_prsnt = !dsaf_read_b((u8 *)mac_cb->cpld_vaddr - + MAC_SFP_PORT_OFFSET); - - return 0; -} - void hns_mac_get_link_status(struct hns_mac_cb *mac_cb, u32 *link_status) { struct mac_driver *mac_ctrl_drv; @@ -658,6 +647,8 @@ static int hns_mac_get_info(struct hns_mac_cb *mac_cb) { struct device_node *np = mac_cb->dev->of_node; struct regmap *syscon; + u32 ret; + mac_cb->link = false; mac_cb->half_duplex = false; mac_cb->speed = mac_phy_to_speed[mac_cb->phy_if]; @@ -701,6 +692,23 @@ static int hns_mac_get_info(struct hns_mac_cb *mac_cb) return -EINVAL; } mac_cb->serdes_ctrl = syscon; + + syscon = syscon_node_to_regmap( + of_parse_phandle(to_of_node(mac_cb->fw_port), + "cpld-syscon", 0)); + if (IS_ERR_OR_NULL(syscon)) { + dev_dbg(mac_cb->dev, "no cpld-syscon found!\n"); + mac_cb->cpld_ctrl = NULL; + } else { + mac_cb->cpld_ctrl = syscon; + ret = fwnode_property_read_u32(mac_cb->fw_port, + "cpld-ctrl-reg", + &mac_cb->cpld_ctrl_reg); + if (ret) { + dev_err(mac_cb->dev, "get cpld-ctrl-reg fail!\n"); + return ret; + } + } return 0; } @@ -751,11 +759,6 @@ int hns_mac_get_cfg(struct dsaf_device *dsaf_dev, struct hns_mac_cb *mac_cb) mac_cb->sys_ctl_vaddr = dsaf_dev->sc_base; mac_cb->serdes_vaddr = dsaf_dev->sds_base; - if (dsaf_dev->cpld_base && !HNS_DSAF_IS_DEBUG(dsaf_dev)) { - mac_cb->cpld_vaddr = dsaf_dev->cpld_base + - mac_cb->mac_id * CPLD_ADDR_PORT_OFFSET; - cpld_led_reset(mac_cb); - } mac_cb->sfp_prsnt = 0; mac_cb->txpkt_for_led = 0; mac_cb->rxpkt_for_led = 0; @@ -780,6 +783,7 @@ int hns_mac_get_cfg(struct dsaf_device *dsaf_dev, struct hns_mac_cb *mac_cb) if (ret) return ret; + cpld_led_reset(mac_cb); mac_cb->vaddr = hns_mac_get_vaddr(dsaf_dev, mac_cb, mac_mode_idx); return 0; @@ -956,7 +960,7 @@ void hns_set_led_opt(struct hns_mac_cb *mac_cb) int hns_cpld_led_set_id(struct hns_mac_cb *mac_cb, enum hnae_led_state status) { - if (!mac_cb || !mac_cb->cpld_vaddr) + if (!mac_cb || !mac_cb->cpld_ctrl) return 0; return cpld_set_led_id(mac_cb, status); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h index 45c5f16ae735..719816bf4606 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h @@ -313,10 +313,11 @@ struct hns_mac_cb { struct mac_priv priv; struct fwnode_handle *fw_port; u8 __iomem *vaddr; - u8 __iomem *cpld_vaddr; u8 __iomem *sys_ctl_vaddr; u8 __iomem *serdes_vaddr; struct regmap *serdes_ctrl; + struct regmap *cpld_ctrl; + u32 cpld_ctrl_reg; struct mac_entry_idx addr_entry_idx[DSAF_MAX_VM_NUM]; u8 sfp_prsnt; u8 cpld_led_value; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c index 33cdb215a547..1c2ddb25e776 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c @@ -189,8 +189,6 @@ unmap_base_addr: iounmap(dsaf_dev->sds_base); if (dsaf_dev->sc_base) iounmap(dsaf_dev->sc_base); - if (dsaf_dev->cpld_base) - iounmap(dsaf_dev->cpld_base); return ret; } @@ -207,9 +205,6 @@ static void hns_dsaf_free_cfg(struct dsaf_device *dsaf_dev) if (dsaf_dev->sc_base) iounmap(dsaf_dev->sc_base); - - if (dsaf_dev->cpld_base) - iounmap(dsaf_dev->cpld_base); } /** diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h index a48ef2644355..f0502ba0a677 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h @@ -277,7 +277,6 @@ struct dsaf_device { u8 __iomem *sds_base; u8 __iomem *ppe_base; u8 __iomem *io_base; - u8 __iomem *cpld_base; struct regmap *sub_ctrl; phys_addr_t ppe_paddr; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c index 972eab0ad89d..c549aa832df7 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c @@ -42,8 +42,8 @@ void hns_cpld_set_led(struct hns_mac_cb *mac_cb, int link_status, pr_err("sfp_led_opt mac_dev is null!\n"); return; } - if (!mac_cb->cpld_vaddr) { - dev_err(mac_cb->dev, "mac_id=%d, cpld_vaddr is null !\n", + if (!mac_cb->cpld_ctrl) { + dev_err(mac_cb->dev, "mac_id=%d, cpld syscon is null !\n", mac_cb->mac_id); return; } @@ -60,21 +60,24 @@ void hns_cpld_set_led(struct hns_mac_cb *mac_cb, int link_status, dsaf_set_bit(value, DSAF_LED_DATA_B, data); if (value != mac_cb->cpld_led_value) { - dsaf_write_b(mac_cb->cpld_vaddr, value); + dsaf_write_syscon(mac_cb->cpld_ctrl, + mac_cb->cpld_ctrl_reg, value); mac_cb->cpld_led_value = value; } } else { - dsaf_write_b(mac_cb->cpld_vaddr, CPLD_LED_DEFAULT_VALUE); + dsaf_write_syscon(mac_cb->cpld_ctrl, mac_cb->cpld_ctrl_reg, + CPLD_LED_DEFAULT_VALUE); mac_cb->cpld_led_value = CPLD_LED_DEFAULT_VALUE; } } void cpld_led_reset(struct hns_mac_cb *mac_cb) { - if (!mac_cb || !mac_cb->cpld_vaddr) + if (!mac_cb || !mac_cb->cpld_ctrl) return; - dsaf_write_b(mac_cb->cpld_vaddr, CPLD_LED_DEFAULT_VALUE); + dsaf_write_syscon(mac_cb->cpld_ctrl, mac_cb->cpld_ctrl_reg, + CPLD_LED_DEFAULT_VALUE); mac_cb->cpld_led_value = CPLD_LED_DEFAULT_VALUE; } @@ -83,15 +86,19 @@ int cpld_set_led_id(struct hns_mac_cb *mac_cb, { switch (status) { case HNAE_LED_ACTIVE: - mac_cb->cpld_led_value = dsaf_read_b(mac_cb->cpld_vaddr); + mac_cb->cpld_led_value = + dsaf_read_syscon(mac_cb->cpld_ctrl, + mac_cb->cpld_ctrl_reg); dsaf_set_bit(mac_cb->cpld_led_value, DSAF_LED_ANCHOR_B, CPLD_LED_ON_VALUE); - dsaf_write_b(mac_cb->cpld_vaddr, mac_cb->cpld_led_value); + dsaf_write_syscon(mac_cb->cpld_ctrl, mac_cb->cpld_ctrl_reg, + mac_cb->cpld_led_value); return 2; case HNAE_LED_INACTIVE: dsaf_set_bit(mac_cb->cpld_led_value, DSAF_LED_ANCHOR_B, CPLD_LED_DEFAULT_VALUE); - dsaf_write_b(mac_cb->cpld_vaddr, mac_cb->cpld_led_value); + dsaf_write_syscon(mac_cb->cpld_ctrl, mac_cb->cpld_ctrl_reg, + mac_cb->cpld_led_value); break; default: break; @@ -301,6 +308,17 @@ phy_interface_t hns_mac_get_phy_if(struct hns_mac_cb *mac_cb) return phy_if; } +int hns_mac_get_sfp_prsnt(struct hns_mac_cb *mac_cb, int *sfp_prsnt) +{ + if (!mac_cb->cpld_ctrl) + return -ENODEV; + + *sfp_prsnt = !dsaf_read_syscon(mac_cb->cpld_ctrl, mac_cb->cpld_ctrl_reg + + MAC_SFP_PORT_OFFSET); + + return 0; +} + /** * hns_mac_config_sds_loopback - set loop back for serdes * @mac_cb: mac control block From 850bfa3b78ea8849fef78ed74f5f2ccf947db0ca Mon Sep 17 00:00:00 2001 From: "Yisen.Zhuang\\(Zhuangyuzeng\\)" Date: Sat, 23 Apr 2016 17:05:13 +0800 Subject: [PATCH 1005/1649] net: hns: add attribute port-rst-offset for dsaf port node The reset offset for each port in a dsaf is different. The current code is not so readability. This patch adds configuration named port-rst-offset to make the code simple and more readability. If this attribute doesn't exist, default value of this attribute is equal to its port index. Signed-off-by: Yisen Zhuang Signed-off-by: David S. Miller --- .../net/ethernet/hisilicon/hns/hns_dsaf_mac.c | 10 ++++++++ .../net/ethernet/hisilicon/hns/hns_dsaf_mac.h | 1 + .../ethernet/hisilicon/hns/hns_dsaf_misc.c | 25 ++++++------------- 3 files changed, 19 insertions(+), 17 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c index 7073ca23af56..52d757df623e 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c @@ -664,6 +664,7 @@ static int hns_mac_get_info(struct hns_mac_cb *mac_cb) mac_cb->max_frm = MAC_DEFAULT_MTU; mac_cb->tx_pause_frm_time = MAC_DEFAULT_PAUSE_TIME; + mac_cb->port_rst_off = mac_cb->mac_id; /* if the dsaf node doesn't contain a port subnode, get phy-handle * from dsaf node @@ -693,6 +694,15 @@ static int hns_mac_get_info(struct hns_mac_cb *mac_cb) } mac_cb->serdes_ctrl = syscon; + ret = fwnode_property_read_u32(mac_cb->fw_port, + "port-rst-offset", + &mac_cb->port_rst_off); + if (ret) { + dev_dbg(mac_cb->dev, + "mac%d port-rst-offset not found, use default value.\n", + mac_cb->mac_id); + } + syscon = syscon_node_to_regmap( of_parse_phandle(to_of_node(mac_cb->fw_port), "cpld-syscon", 0)); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h index 719816bf4606..7be71043133b 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h @@ -318,6 +318,7 @@ struct hns_mac_cb { struct regmap *serdes_ctrl; struct regmap *cpld_ctrl; u32 cpld_ctrl_reg; + u32 port_rst_off; struct mac_entry_idx addr_entry_idx[DSAF_MAX_VM_NUM]; u8 sfp_prsnt; u8 cpld_led_value; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c index c549aa832df7..e549a11420b4 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c @@ -135,11 +135,7 @@ void hns_dsaf_xge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val) return; reg_val |= RESET_REQ_OR_DREQ; - - if (!HNS_DSAF_IS_DEBUG(dsaf_dev)) - reg_val |= 0x2082082 << port; - else - reg_val |= 0x2082082 << (dsaf_dev->reset_offset + 6); + reg_val |= 0x2082082 << dsaf_dev->mac_cb[port]->port_rst_off; if (val == 0) reg_addr = DSAF_SUB_SC_XGE_RESET_REQ_REG; @@ -158,11 +154,8 @@ void hns_dsaf_xge_core_srst_by_port(struct dsaf_device *dsaf_dev, if (port >= DSAF_XGE_NUM) return; - if (!HNS_DSAF_IS_DEBUG(dsaf_dev)) - reg_val |= XGMAC_TRX_CORE_SRST_M << port; - else - reg_val |= XGMAC_TRX_CORE_SRST_M << - (dsaf_dev->reset_offset + 6); + reg_val |= XGMAC_TRX_CORE_SRST_M + << dsaf_dev->mac_cb[port]->port_rst_off; if (val == 0) reg_addr = DSAF_SUB_SC_XGE_RESET_REQ_REG; @@ -176,17 +169,19 @@ void hns_dsaf_ge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val) { u32 reg_val_1; u32 reg_val_2; + u32 port_rst_off; if (port >= DSAF_GE_NUM) return; if (!HNS_DSAF_IS_DEBUG(dsaf_dev)) { reg_val_1 = 0x1 << port; + port_rst_off = dsaf_dev->mac_cb[port]->port_rst_off; /* there is difference between V1 and V2 in register.*/ if (AE_IS_VER1(dsaf_dev->dsaf_ver)) - reg_val_2 = 0x1041041 << port; + reg_val_2 = 0x1041041 << port_rst_off; else - reg_val_2 = 0x2082082 << port; + reg_val_2 = 0x2082082 << port_rst_off; if (val == 0) { dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_GE_RESET_REQ1_REG, @@ -226,11 +221,7 @@ void hns_ppe_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val) u32 reg_val = 0; u32 reg_addr; - if (!HNS_DSAF_IS_DEBUG(dsaf_dev)) - reg_val |= RESET_REQ_OR_DREQ << port; - else - reg_val |= RESET_REQ_OR_DREQ << - (dsaf_dev->reset_offset + 6); + reg_val |= RESET_REQ_OR_DREQ << dsaf_dev->mac_cb[port]->port_rst_off; if (val == 0) reg_addr = DSAF_SUB_SC_PPE_RESET_REQ_REG; From 0d768fc62def08628affa4a2abe4f319926027a9 Mon Sep 17 00:00:00 2001 From: "Yisen.Zhuang\\(Zhuangyuzeng\\)" Date: Sat, 23 Apr 2016 17:05:14 +0800 Subject: [PATCH 1006/1649] net: hns: add attribute port-mode-offset for dsaf port node Port mode offset for each dsaf port is different. The current code is not so readability. This patch adds configuration named port-mode-offset to make the code simple and more readability. If port-mode-offset isn't exists, default value 0 will be used. Signed-off-by: Daode Huang Signed-off-by: Yisen Zhuang Signed-off-by: David S. Miller --- .../net/ethernet/hisilicon/hns/hns_dsaf_mac.c | 10 +++++ .../net/ethernet/hisilicon/hns/hns_dsaf_mac.h | 1 + .../ethernet/hisilicon/hns/hns_dsaf_misc.c | 44 ++++++++----------- 3 files changed, 30 insertions(+), 25 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c index 52d757df623e..1c8fdd316ca0 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c @@ -665,6 +665,7 @@ static int hns_mac_get_info(struct hns_mac_cb *mac_cb) mac_cb->max_frm = MAC_DEFAULT_MTU; mac_cb->tx_pause_frm_time = MAC_DEFAULT_PAUSE_TIME; mac_cb->port_rst_off = mac_cb->mac_id; + mac_cb->port_mode_off = 0; /* if the dsaf node doesn't contain a port subnode, get phy-handle * from dsaf node @@ -703,6 +704,15 @@ static int hns_mac_get_info(struct hns_mac_cb *mac_cb) mac_cb->mac_id); } + ret = fwnode_property_read_u32(mac_cb->fw_port, + "port-mode-offset", + &mac_cb->port_mode_off); + if (ret) { + dev_dbg(mac_cb->dev, + "mac%d port-mode-offset not found, use default value.\n", + mac_cb->mac_id); + } + syscon = syscon_node_to_regmap( of_parse_phandle(to_of_node(mac_cb->fw_port), "cpld-syscon", 0)); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h index 7be71043133b..97ce9a750aaf 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h @@ -319,6 +319,7 @@ struct hns_mac_cb { struct regmap *cpld_ctrl; u32 cpld_ctrl_reg; u32 port_rst_off; + u32 port_mode_off; struct mac_entry_idx addr_entry_idx[DSAF_MAX_VM_NUM]; u8 sfp_prsnt; u8 cpld_led_value; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c index e549a11420b4..a837bb9e3839 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c @@ -265,37 +265,31 @@ phy_interface_t hns_mac_get_phy_if(struct hns_mac_cb *mac_cb) { u32 mode; u32 reg; - u32 shift; - u32 phy_offset; bool is_ver1 = AE_IS_VER1(mac_cb->dsaf_dev->dsaf_ver); int mac_id = mac_cb->mac_id; - phy_interface_t phy_if = PHY_INTERFACE_MODE_NA; + phy_interface_t phy_if; - if (is_ver1 && HNS_DSAF_IS_DEBUG(mac_cb->dsaf_dev)) { - phy_if = PHY_INTERFACE_MODE_SGMII; - } else if (mac_id >= 0 && mac_id <= 3 && - !HNS_DSAF_IS_DEBUG(mac_cb->dsaf_dev)) { - reg = is_ver1 ? HNS_MAC_HILINK4_REG : HNS_MAC_HILINK4V2_REG; - mode = dsaf_read_sub(mac_cb->dsaf_dev, reg); - /* mac_id 0, 1, 2, 3 ---> hilink4 lane 0, 1, 2, 3 */ - shift = is_ver1 ? 0 : mac_id; - if (dsaf_get_bit(mode, shift)) - phy_if = PHY_INTERFACE_MODE_XGMII; + if (is_ver1) { + if (HNS_DSAF_IS_DEBUG(mac_cb->dsaf_dev)) + return PHY_INTERFACE_MODE_SGMII; + + if (mac_id >= 0 && mac_id <= 3) + reg = HNS_MAC_HILINK4_REG; else - phy_if = PHY_INTERFACE_MODE_SGMII; - } else { - reg = is_ver1 ? HNS_MAC_HILINK3_REG : HNS_MAC_HILINK3V2_REG; - mode = dsaf_read_sub(mac_cb->dsaf_dev, reg); - /* mac_id 4, 5,---> hilink3 lane 2, 3 - * debug port 0(6), 1(7) ---> hilink3 lane 0, 1 - */ - phy_offset = mac_cb->dsaf_dev->reset_offset - 1; - shift = is_ver1 ? 0 : mac_id >= 4 ? mac_id - 2 : phy_offset; - if (dsaf_get_bit(mode, shift)) - phy_if = PHY_INTERFACE_MODE_XGMII; + reg = HNS_MAC_HILINK3_REG; + } else{ + if (!HNS_DSAF_IS_DEBUG(mac_cb->dsaf_dev) && mac_id <= 3) + reg = HNS_MAC_HILINK4V2_REG; else - phy_if = PHY_INTERFACE_MODE_SGMII; + reg = HNS_MAC_HILINK3V2_REG; } + + mode = dsaf_read_sub(mac_cb->dsaf_dev, reg); + if (dsaf_get_bit(mode, mac_cb->port_mode_off)) + phy_if = PHY_INTERFACE_MODE_XGMII; + else + phy_if = PHY_INTERFACE_MODE_SGMII; + return phy_if; } From 2fc695a1bb00141a0f5df74e7a19e125f4babaa5 Mon Sep 17 00:00:00 2001 From: "Yisen.Zhuang\\(Zhuangyuzeng\\)" Date: Sat, 23 Apr 2016 17:05:15 +0800 Subject: [PATCH 1007/1649] Documentation: Bindings: Update DT binding for separating dsaf dev support Because debug dsaf port was separated from service dsaf port, this patch updates the related information of DT binding. Signed-off-by: Yisen Zhuang Signed-off-by: David S. Miller --- .../bindings/net/hisilicon-hns-dsaf.txt | 59 +++++++++++++++---- 1 file changed, 49 insertions(+), 10 deletions(-) diff --git a/Documentation/devicetree/bindings/net/hisilicon-hns-dsaf.txt b/Documentation/devicetree/bindings/net/hisilicon-hns-dsaf.txt index ecacfa44b1eb..5ccd4f002a67 100644 --- a/Documentation/devicetree/bindings/net/hisilicon-hns-dsaf.txt +++ b/Documentation/devicetree/bindings/net/hisilicon-hns-dsaf.txt @@ -7,19 +7,47 @@ Required properties: - mode: dsa fabric mode string. only support one of dsaf modes like these: "2port-64vf", "6port-16rss", - "6port-16vf". + "6port-16vf", + "single-port". - interrupt-parent: the interrupt parent of this device. - interrupts: should contain the DSA Fabric and rcb interrupt. - reg: specifies base physical address(es) and size of the device registers. - The first region is external interface control register base and size. - The second region is SerDes base register and size. + The first region is external interface control register base and size(optional, + only be used when subctrl-syscon is not exists). It is recommended using + subctrl-syscon rather than this address. + The second region is SerDes base register and size(optional, only be used when + serdes-syscon in port node is not exists. It is recommended using + serdes-syscon rather than this address. The third region is the PPE register base and size. - The fourth region is dsa fabric base register and size. - The fifth region is cpld base register and size, it is not required if do not use cpld. -- phy-handle: phy handle of physicl port, 0 if not any phy device. see ethernet.txt [1]. + The fourth region is dsa fabric base register and size. It is not required for + single-port mode. +- reg-names: may be ppe-base and(or) dsaf-base. It is used to find the + corresponding reg's index. + +- phy-handle: phy handle of physicl port, 0 if not any phy device. It is optional + attribute. If port node is exists, phy-handle in each port node will be used. + see ethernet.txt [1]. +- subctrl-syscon: is syscon handle for external interface control register. +- reset-field-offset: is offset of reset field. Its value depends on the hardware + user manual. - buf-size: rx buffer size, should be 16-1024. - desc-num: number of description in TX and RX queue, should be 512, 1024, 2048 or 4096. +- port: subnodes of dsaf. A dsaf node may contain several port nodes(Depending + on mode of dsaf). Port node contain some attributes listed below: +- port-id: is physical port index in one dsaf. +- phy-handle: phy handle of physicl port. It is not required if there isn't + phy device. see ethernet.txt [1]. +- serdes-syscon: is syscon handle for SerDes register. +- cpld-syscon: is syscon handle for cpld register. It is not required if there + isn't cpld device. +- cpld-ctrl-reg: is cpld register offset. It is not required if there isn't + cpld-syscon. +- port-rst-offset: is offset of reset field for each port in dsaf. Its value + depends on the hardware user manual. +- port-mode-offset: is offset of port mode field for each port in dsaf. Its + value depends on the hardware user manual. + [1] Documentation/devicetree/bindings/net/phy.txt Example: @@ -28,11 +56,11 @@ dsaf0: dsa@c7000000 { compatible = "hisilicon,hns-dsaf-v1"; mode = "6port-16rss"; interrupt-parent = <&mbigen_dsa>; - reg = <0x0 0xC0000000 0x0 0x420000 - 0x0 0xC2000000 0x0 0x300000 - 0x0 0xc5000000 0x0 0x890000 + reg = <0x0 0xc5000000 0x0 0x890000 0x0 0xc7000000 0x0 0x60000>; - phy-handle = <0 0 0 0 &soc0_phy4 &soc0_phy5 0 0>; + reg-names = "ppe-base", "dsaf-base"; + subctrl-syscon = <&subctrl>; + reset-field-offset = 0; interrupts = <131 4>,<132 4>, <133 4>,<134 4>, <135 4>,<136 4>, <137 4>,<138 4>, <139 4>,<140 4>, <141 4>,<142 4>, @@ -43,4 +71,15 @@ dsaf0: dsa@c7000000 { buf-size = <4096>; desc-num = <1024>; dma-coherent; + + prot@0 { + port-id = 0; + phy-handle = <&phy0>; + serdes-syscon = <&serdes>; + }; + + prot@1 { + port-id = 1; + serdes-syscon = <&serdes>; + }; }; From c132cdccb71ee000d6456ec63acdf0535b5f35da Mon Sep 17 00:00:00 2001 From: "Yisen.Zhuang\\(Zhuangyuzeng\\)" Date: Sat, 23 Apr 2016 17:05:16 +0800 Subject: [PATCH 1008/1649] Documentation: Bindings: add port-idx-in-ae for enet node This patch adds description for port-idx-in-ae attribute. Signed-off-by: Yisen Zhuang Signed-off-by: David S. Miller --- .../bindings/net/hisilicon-hns-nic.txt | 30 ++++++++++++++++++- 1 file changed, 29 insertions(+), 1 deletion(-) diff --git a/Documentation/devicetree/bindings/net/hisilicon-hns-nic.txt b/Documentation/devicetree/bindings/net/hisilicon-hns-nic.txt index e6a9d1c30878..b9ff4ba6454e 100644 --- a/Documentation/devicetree/bindings/net/hisilicon-hns-nic.txt +++ b/Documentation/devicetree/bindings/net/hisilicon-hns-nic.txt @@ -36,6 +36,34 @@ Required properties: | | | | | | external port + This attribute is remained for compatible purpose. It is not recommended to + use it in new code. + +- port-idx-in-ae: is the index of port provided by AE. + In NIC mode of DSAF, all 6 PHYs of service DSAF are taken as ethernet ports + to the CPU. The port-idx-in-ae can be 0 to 5. Here is the diagram: + +-----+---------------+ + | CPU | + +-+-+-+---+-+-+-+-+-+-+ + | | | | | | | | + debug debug service + port port port + (0) (0) (0-5) + + In Switch mode of DSAF, all 6 PHYs of service DSAF are taken as physical + ports connected to a LAN Switch while the CPU side assume itself have one + single NIC connected to this switch. In this case, the port-idx-in-ae + will be 0 only. + +-----+-----+------+------+ + | CPU | + +-+-+-+-+-+-+-+-+-+-+-+-+-+ + | | service| port(0) + debug debug +------------+ + port port | switch | + (0) (0) +-+-+-+-+-+-++ + | | | | | | + external port + - local-mac-address: mac addr of the ethernet interface Example: @@ -43,6 +71,6 @@ Example: ethernet@0{ compatible = "hisilicon,hns-nic-v1"; ae-handle = <&dsaf0>; - port-id = <0>; + port-idx-in-ae = <0>; local-mac-address = [a2 14 e4 4b 56 76]; }; From 218afd68a2e6de8226c4048e4dd051075648a9c6 Mon Sep 17 00:00:00 2001 From: "Yisen.Zhuang\\(Zhuangyuzeng\\)" Date: Sat, 23 Apr 2016 17:05:17 +0800 Subject: [PATCH 1009/1649] dts: hisi: update hns dst for separating dsaf dev support Because debug dsaf port was separated from service dsaf port, this patch updates the related configurations of hns dts, changes it to match with the new binding files. This also removes enet nodes which don't exist in d02 board. Signed-off-by: Yisen Zhuang Signed-off-by: David S. Miller --- arch/arm64/boot/dts/hisilicon/hip05_hns.dtsi | 72 ++++++++------------ 1 file changed, 30 insertions(+), 42 deletions(-) diff --git a/arch/arm64/boot/dts/hisilicon/hip05_hns.dtsi b/arch/arm64/boot/dts/hisilicon/hip05_hns.dtsi index 933cba359918..7d625141c917 100644 --- a/arch/arm64/boot/dts/hisilicon/hip05_hns.dtsi +++ b/arch/arm64/boot/dts/hisilicon/hip05_hns.dtsi @@ -28,13 +28,13 @@ soc0: soc@000000000 { mode = "6port-16rss"; interrupt-parent = <&mbigen_dsa>; - reg = <0x0 0xC0000000 0x0 0x420000 - 0x0 0xC2000000 0x0 0x300000 - 0x0 0xc5000000 0x0 0x890000 + reg = <0x0 0xc5000000 0x0 0x890000 0x0 0xc7000000 0x0 0x60000 >; - phy-handle = <0 0 0 0 &soc0_phy0 &soc0_phy1 0 0>; + reg-names = "ppe-base","dsaf-base"; + subctrl-syscon = <&dsaf_subctrl>; + reset-field-offset = <0>; interrupts = < /* [14] ge fifo err 8 / xge 6**/ 149 0x4 150 0x4 151 0x4 152 0x4 @@ -122,12 +122,31 @@ soc0: soc@000000000 { buf-size = <4096>; desc-num = <1024>; dma-coherent; + + port@0 { + port-id = <0>; + serdes-syscon = <&serdes_ctrl0>; + }; + port@1 { + port-id = <1>; + serdes-syscon = <&serdes_ctrl0>; + }; + port@4 { + port-id = <4>; + phy-handle = <&soc0_phy0>; + serdes-syscon = <&serdes_ctrl1>; + }; + port@5 { + port-id = <5>; + phy-handle = <&soc0_phy1>; + serdes-syscon = <&serdes_ctrl1>; + }; }; eth0: ethernet@0{ compatible = "hisilicon,hns-nic-v1"; ae-handle = <&dsaf0>; - port-id = <0>; + port-idx-in-ae = <0>; local-mac-address = [00 00 00 01 00 58]; status = "disabled"; dma-coherent; @@ -135,56 +154,25 @@ soc0: soc@000000000 { eth1: ethernet@1{ compatible = "hisilicon,hns-nic-v1"; ae-handle = <&dsaf0>; - port-id = <1>; + port-idx-in-ae = <1>; + local-mac-address = [00 00 00 01 00 59]; status = "disabled"; dma-coherent; }; - eth2: ethernet@2{ + eth2: ethernet@4{ compatible = "hisilicon,hns-nic-v1"; ae-handle = <&dsaf0>; - port-id = <2>; + port-idx-in-ae = <4>; local-mac-address = [00 00 00 01 00 5a]; status = "disabled"; dma-coherent; }; - eth3: ethernet@3{ + eth3: ethernet@5{ compatible = "hisilicon,hns-nic-v1"; ae-handle = <&dsaf0>; - port-id = <3>; + port-idx-in-ae = <5>; local-mac-address = [00 00 00 01 00 5b]; status = "disabled"; dma-coherent; }; - eth4: ethernet@4{ - compatible = "hisilicon,hns-nic-v1"; - ae-handle = <&dsaf0>; - port-id = <4>; - local-mac-address = [00 00 00 01 00 5c]; - status = "disabled"; - dma-coherent; - }; - eth5: ethernet@5{ - compatible = "hisilicon,hns-nic-v1"; - ae-handle = <&dsaf0>; - port-id = <5>; - local-mac-address = [00 00 00 01 00 5d]; - status = "disabled"; - dma-coherent; - }; - eth6: ethernet@6{ - compatible = "hisilicon,hns-nic-v1"; - ae-handle = <&dsaf0>; - port-id = <6>; - local-mac-address = [00 00 00 01 00 5e]; - status = "disabled"; - dma-coherent; - }; - eth7: ethernet@7{ - compatible = "hisilicon,hns-nic-v1"; - ae-handle = <&dsaf0>; - port-id = <7>; - local-mac-address = [00 00 00 01 00 5f]; - status = "disabled"; - dma-coherent; - }; }; From a843311d87b69540641d491b97b328309d3a28a1 Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Sat, 23 Apr 2016 11:07:02 +0200 Subject: [PATCH 1010/1649] net: tsi108: use NULL for pointer-typed argument The first argument of pci_free_consistent has type struct pci_dev *, so use NULL instead of 0. The semantic patch that performs this transformation is as follows: (http://coccinelle.lip6.fr/) // @@ @@ pci_free_consistent( - 0 + NULL , ...) // Signed-off-by: Julia Lawall Signed-off-by: David S. Miller --- drivers/net/ethernet/tundra/tsi108_eth.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c index 520cf50a3d5a..01a77145a0fa 100644 --- a/drivers/net/ethernet/tundra/tsi108_eth.c +++ b/drivers/net/ethernet/tundra/tsi108_eth.c @@ -1314,7 +1314,8 @@ static int tsi108_open(struct net_device *dev) data->txring = dma_zalloc_coherent(NULL, txring_size, &data->txdma, GFP_KERNEL); if (!data->txring) { - pci_free_consistent(0, rxring_size, data->rxring, data->rxdma); + pci_free_consistent(NULL, rxring_size, data->rxring, + data->rxdma); return -ENOMEM; } From 351596aad54a7e07de63fde38496656514661b07 Mon Sep 17 00:00:00 2001 From: Tom Herbert Date: Sat, 23 Apr 2016 11:46:55 -0700 Subject: [PATCH 1011/1649] ila: Add struct definitions and helpers Add structures for identifiers, locators, and an ila address which is composed of a locator and identifier and in6_addr can be cast to it. This includes a three bit type field and enums for the types defined in ILA I-D. In ILA lwt don't allow user to set a translation for a non-ILA address (type of identifier is zero meaning it is an IID). This also requires that the destination prefix is at least 65 bytes (64 bit locator and first byte of identifier). Signed-off-by: Tom Herbert Signed-off-by: David S. Miller --- net/ipv6/ila/ila.h | 67 +++++++++++++++++++- net/ipv6/ila/ila_common.c | 11 ++-- net/ipv6/ila/ila_lwt.c | 39 ++++++++---- net/ipv6/ila/ila_xlat.c | 126 +++++++++++++++++++------------------- 4 files changed, 161 insertions(+), 82 deletions(-) diff --git a/net/ipv6/ila/ila.h b/net/ipv6/ila/ila.h index 28542cb2b387..f532967d9ed7 100644 --- a/net/ipv6/ila/ila.h +++ b/net/ipv6/ila/ila.h @@ -23,9 +23,70 @@ #include #include +struct ila_locator { + union { + __u8 v8[8]; + __be16 v16[4]; + __be32 v32[2]; + __be64 v64; + }; +}; + +struct ila_identifier { + union { + struct { +#if defined(__LITTLE_ENDIAN_BITFIELD) + u8 __space:5; + u8 type:3; +#elif defined(__BIG_ENDIAN_BITFIELD) + u8 type:3; + u8 __space:5; +#else +#error "Adjust your defines" +#endif + u8 __space2[7]; + }; + __u8 v8[8]; + __be16 v16[4]; + __be32 v32[2]; + __be64 v64; + }; +}; + +enum { + ILA_ATYPE_IID = 0, + ILA_ATYPE_LUID, + ILA_ATYPE_VIRT_V4, + ILA_ATYPE_VIRT_UNI_V6, + ILA_ATYPE_VIRT_MULTI_V6, + ILA_ATYPE_RSVD_1, + ILA_ATYPE_RSVD_2, + ILA_ATYPE_RSVD_3, +}; + +struct ila_addr { + union { + struct in6_addr addr; + struct { + struct ila_locator loc; + struct ila_identifier ident; + }; + }; +}; + +static inline struct ila_addr *ila_a2i(struct in6_addr *addr) +{ + return (struct ila_addr *)addr; +} + +static inline bool ila_addr_is_ila(struct ila_addr *iaddr) +{ + return (iaddr->ident.type != ILA_ATYPE_IID); +} + struct ila_params { - __be64 locator; - __be64 locator_match; + struct ila_locator locator; + struct ila_locator locator_match; __wsum csum_diff; }; @@ -38,7 +99,7 @@ static inline __wsum compute_csum_diff8(const __be32 *from, const __be32 *to) return csum_partial(diff, sizeof(diff), 0); } -void update_ipv6_locator(struct sk_buff *skb, struct ila_params *p); +void ila_update_ipv6_locator(struct sk_buff *skb, struct ila_params *p); int ila_lwt_init(void); void ila_lwt_fini(void); diff --git a/net/ipv6/ila/ila_common.c b/net/ipv6/ila/ila_common.c index 30613050e4ca..c3078d0b64e1 100644 --- a/net/ipv6/ila/ila_common.c +++ b/net/ipv6/ila/ila_common.c @@ -15,17 +15,20 @@ static __wsum get_csum_diff(struct ipv6hdr *ip6h, struct ila_params *p) { - if (*(__be64 *)&ip6h->daddr == p->locator_match) + struct ila_addr *iaddr = ila_a2i(&ip6h->daddr); + + if (iaddr->loc.v64 == p->locator_match.v64) return p->csum_diff; else - return compute_csum_diff8((__be32 *)&ip6h->daddr, + return compute_csum_diff8((__be32 *)&iaddr->loc, (__be32 *)&p->locator); } -void update_ipv6_locator(struct sk_buff *skb, struct ila_params *p) +void ila_update_ipv6_locator(struct sk_buff *skb, struct ila_params *p) { __wsum diff; struct ipv6hdr *ip6h = ipv6_hdr(skb); + struct ila_addr *iaddr = ila_a2i(&ip6h->daddr); size_t nhoff = sizeof(struct ipv6hdr); /* First update checksum */ @@ -68,7 +71,7 @@ void update_ipv6_locator(struct sk_buff *skb, struct ila_params *p) } /* Now change destination address */ - *(__be64 *)&ip6h->daddr = p->locator; + iaddr->loc = p->locator; } static int __init ila_init(void) diff --git a/net/ipv6/ila/ila_lwt.c b/net/ipv6/ila/ila_lwt.c index 9db3621b2126..de7f6d76e928 100644 --- a/net/ipv6/ila/ila_lwt.c +++ b/net/ipv6/ila/ila_lwt.c @@ -26,7 +26,7 @@ static int ila_output(struct net *net, struct sock *sk, struct sk_buff *skb) if (skb->protocol != htons(ETH_P_IPV6)) goto drop; - update_ipv6_locator(skb, ila_params_lwtunnel(dst->lwtstate)); + ila_update_ipv6_locator(skb, ila_params_lwtunnel(dst->lwtstate)); return dst->lwtstate->orig_output(net, sk, skb); @@ -42,7 +42,7 @@ static int ila_input(struct sk_buff *skb) if (skb->protocol != htons(ETH_P_IPV6)) goto drop; - update_ipv6_locator(skb, ila_params_lwtunnel(dst->lwtstate)); + ila_update_ipv6_locator(skb, ila_params_lwtunnel(dst->lwtstate)); return dst->lwtstate->orig_input(skb); @@ -64,11 +64,26 @@ static int ila_build_state(struct net_device *dev, struct nlattr *nla, size_t encap_len = sizeof(*p); struct lwtunnel_state *newts; const struct fib6_config *cfg6 = cfg; + struct ila_addr *iaddr; int ret; if (family != AF_INET6) return -EINVAL; + if (cfg6->fc_dst_len < sizeof(struct ila_locator) + 1) { + /* Need to have full locator and at least type field + * included in destination + */ + return -EINVAL; + } + + iaddr = (struct ila_addr *)&cfg6->fc_dst; + + if (!ila_addr_is_ila(iaddr)) { + /* Don't allow setting a translation for a non-ILA address */ + return -EINVAL; + } + ret = nla_parse_nested(tb, ILA_ATTR_MAX, nla, ila_nl_policy); if (ret < 0) @@ -84,16 +99,14 @@ static int ila_build_state(struct net_device *dev, struct nlattr *nla, newts->len = encap_len; p = ila_params_lwtunnel(newts); - p->locator = (__force __be64)nla_get_u64(tb[ILA_ATTR_LOCATOR]); + p->locator.v64 = (__force __be64)nla_get_u64(tb[ILA_ATTR_LOCATOR]); - if (cfg6->fc_dst_len > sizeof(__be64)) { - /* Precompute checksum difference for translation since we - * know both the old locator and the new one. - */ - p->locator_match = *(__be64 *)&cfg6->fc_dst; - p->csum_diff = compute_csum_diff8( - (__be32 *)&p->locator_match, (__be32 *)&p->locator); - } + /* Precompute checksum difference for translation since we + * know both the old locator and the new one. + */ + p->locator_match = iaddr->loc; + p->csum_diff = compute_csum_diff8( + (__be32 *)&p->locator_match, (__be32 *)&p->locator); newts->type = LWTUNNEL_ENCAP_ILA; newts->flags |= LWTUNNEL_STATE_OUTPUT_REDIRECT | @@ -109,7 +122,7 @@ static int ila_fill_encap_info(struct sk_buff *skb, { struct ila_params *p = ila_params_lwtunnel(lwtstate); - if (nla_put_u64_64bit(skb, ILA_ATTR_LOCATOR, (__force u64)p->locator, + if (nla_put_u64_64bit(skb, ILA_ATTR_LOCATOR, (__force u64)p->locator.v64, ILA_ATTR_PAD)) goto nla_put_failure; @@ -130,7 +143,7 @@ static int ila_encap_cmp(struct lwtunnel_state *a, struct lwtunnel_state *b) struct ila_params *a_p = ila_params_lwtunnel(a); struct ila_params *b_p = ila_params_lwtunnel(b); - return (a_p->locator != b_p->locator); + return (a_p->locator.v64 != b_p->locator.v64); } static const struct lwtunnel_encap_ops ila_encap_ops = { diff --git a/net/ipv6/ila/ila_xlat.c b/net/ipv6/ila/ila_xlat.c index 0e9e579410da..020153bc47f5 100644 --- a/net/ipv6/ila/ila_xlat.c +++ b/net/ipv6/ila/ila_xlat.c @@ -11,13 +11,13 @@ struct ila_xlat_params { struct ila_params ip; - __be64 identifier; + struct ila_identifier identifier; int ifindex; unsigned int dir; }; struct ila_map { - struct ila_xlat_params p; + struct ila_xlat_params xp; struct rhash_head node; struct ila_map __rcu *next; struct rcu_head rcu; @@ -66,31 +66,35 @@ static __always_inline void __ila_hash_secret_init(void) net_get_random_once(&hashrnd, sizeof(hashrnd)); } -static inline u32 ila_identifier_hash(__be64 identifier) +static inline u32 ila_identifier_hash(struct ila_identifier ident) { - u32 *v = (u32 *)&identifier; + u32 *v = (u32 *)ident.v32; return jhash_2words(v[0], v[1], hashrnd); } -static inline spinlock_t *ila_get_lock(struct ila_net *ilan, __be64 identifier) +static inline spinlock_t *ila_get_lock(struct ila_net *ilan, + struct ila_identifier ident) { - return &ilan->locks[ila_identifier_hash(identifier) & ilan->locks_mask]; + return &ilan->locks[ila_identifier_hash(ident) & ilan->locks_mask]; } -static inline int ila_cmp_wildcards(struct ila_map *ila, __be64 loc, - int ifindex, unsigned int dir) +static inline int ila_cmp_wildcards(struct ila_map *ila, + struct ila_addr *iaddr, int ifindex, + unsigned int dir) { - return (ila->p.ip.locator_match && ila->p.ip.locator_match != loc) || - (ila->p.ifindex && ila->p.ifindex != ifindex) || - !(ila->p.dir & dir); + return (ila->xp.ip.locator_match.v64 && + ila->xp.ip.locator_match.v64 != iaddr->loc.v64) || + (ila->xp.ifindex && ila->xp.ifindex != ifindex) || + !(ila->xp.dir & dir); } -static inline int ila_cmp_params(struct ila_map *ila, struct ila_xlat_params *p) +static inline int ila_cmp_params(struct ila_map *ila, + struct ila_xlat_params *xp) { - return (ila->p.ip.locator_match != p->ip.locator_match) || - (ila->p.ifindex != p->ifindex) || - (ila->p.dir != p->dir); + return (ila->xp.ip.locator_match.v64 != xp->ip.locator_match.v64) || + (ila->xp.ifindex != xp->ifindex) || + (ila->xp.dir != xp->dir); } static int ila_cmpfn(struct rhashtable_compare_arg *arg, @@ -98,17 +102,17 @@ static int ila_cmpfn(struct rhashtable_compare_arg *arg, { const struct ila_map *ila = obj; - return (ila->p.identifier != *(__be64 *)arg->key); + return (ila->xp.identifier.v64 != *(__be64 *)arg->key); } static inline int ila_order(struct ila_map *ila) { int score = 0; - if (ila->p.ip.locator_match) + if (ila->xp.ip.locator_match.v64) score += 1 << 0; - if (ila->p.ifindex) + if (ila->xp.ifindex) score += 1 << 1; return score; @@ -117,7 +121,7 @@ static inline int ila_order(struct ila_map *ila) static const struct rhashtable_params rht_params = { .nelem_hint = 1024, .head_offset = offsetof(struct ila_map, node), - .key_offset = offsetof(struct ila_map, p.identifier), + .key_offset = offsetof(struct ila_map, xp.identifier), .key_len = sizeof(u64), /* identifier */ .max_size = 1048576, .min_size = 256, @@ -144,42 +148,43 @@ static struct nla_policy ila_nl_policy[ILA_ATTR_MAX + 1] = { }; static int parse_nl_config(struct genl_info *info, - struct ila_xlat_params *p) + struct ila_xlat_params *xp) { - memset(p, 0, sizeof(*p)); + memset(xp, 0, sizeof(*xp)); if (info->attrs[ILA_ATTR_IDENTIFIER]) - p->identifier = (__force __be64)nla_get_u64( + xp->identifier.v64 = (__force __be64)nla_get_u64( info->attrs[ILA_ATTR_IDENTIFIER]); if (info->attrs[ILA_ATTR_LOCATOR]) - p->ip.locator = (__force __be64)nla_get_u64( + xp->ip.locator.v64 = (__force __be64)nla_get_u64( info->attrs[ILA_ATTR_LOCATOR]); if (info->attrs[ILA_ATTR_LOCATOR_MATCH]) - p->ip.locator_match = (__force __be64)nla_get_u64( + xp->ip.locator_match.v64 = (__force __be64)nla_get_u64( info->attrs[ILA_ATTR_LOCATOR_MATCH]); if (info->attrs[ILA_ATTR_IFINDEX]) - p->ifindex = nla_get_s32(info->attrs[ILA_ATTR_IFINDEX]); + xp->ifindex = nla_get_s32(info->attrs[ILA_ATTR_IFINDEX]); if (info->attrs[ILA_ATTR_DIR]) - p->dir = nla_get_u32(info->attrs[ILA_ATTR_DIR]); + xp->dir = nla_get_u32(info->attrs[ILA_ATTR_DIR]); return 0; } /* Must be called with rcu readlock */ -static inline struct ila_map *ila_lookup_wildcards(__be64 id, __be64 loc, +static inline struct ila_map *ila_lookup_wildcards(struct ila_addr *iaddr, int ifindex, unsigned int dir, struct ila_net *ilan) { struct ila_map *ila; - ila = rhashtable_lookup_fast(&ilan->rhash_table, &id, rht_params); + ila = rhashtable_lookup_fast(&ilan->rhash_table, &iaddr->ident, + rht_params); while (ila) { - if (!ila_cmp_wildcards(ila, loc, ifindex, dir)) + if (!ila_cmp_wildcards(ila, iaddr, ifindex, dir)) return ila; ila = rcu_access_pointer(ila->next); } @@ -188,15 +193,15 @@ static inline struct ila_map *ila_lookup_wildcards(__be64 id, __be64 loc, } /* Must be called with rcu readlock */ -static inline struct ila_map *ila_lookup_by_params(struct ila_xlat_params *p, +static inline struct ila_map *ila_lookup_by_params(struct ila_xlat_params *xp, struct ila_net *ilan) { struct ila_map *ila; - ila = rhashtable_lookup_fast(&ilan->rhash_table, &p->identifier, + ila = rhashtable_lookup_fast(&ilan->rhash_table, &xp->identifier, rht_params); while (ila) { - if (!ila_cmp_params(ila, p)) + if (!ila_cmp_params(ila, xp)) return ila; ila = rcu_access_pointer(ila->next); } @@ -241,11 +246,11 @@ static struct nf_hook_ops ila_nf_hook_ops[] __read_mostly = { }, }; -static int ila_add_mapping(struct net *net, struct ila_xlat_params *p) +static int ila_add_mapping(struct net *net, struct ila_xlat_params *xp) { struct ila_net *ilan = net_generic(net, ila_net_id); struct ila_map *ila, *head; - spinlock_t *lock = ila_get_lock(ilan, p->identifier); + spinlock_t *lock = ila_get_lock(ilan, xp->identifier); int err = 0, order; if (!ilan->hooks_registered) { @@ -264,22 +269,22 @@ static int ila_add_mapping(struct net *net, struct ila_xlat_params *p) if (!ila) return -ENOMEM; - ila->p = *p; + ila->xp = *xp; - if (p->ip.locator_match) { + if (xp->ip.locator_match.v64) { /* Precompute checksum difference for translation since we * know both the old identifier and the new one. */ - ila->p.ip.csum_diff = compute_csum_diff8( - (__be32 *)&p->ip.locator_match, - (__be32 *)&p->ip.locator); + ila->xp.ip.csum_diff = compute_csum_diff8( + (__be32 *)&xp->ip.locator_match, + (__be32 *)&xp->ip.locator); } order = ila_order(ila); spin_lock(lock); - head = rhashtable_lookup_fast(&ilan->rhash_table, &p->identifier, + head = rhashtable_lookup_fast(&ilan->rhash_table, &xp->identifier, rht_params); if (!head) { /* New entry for the rhash_table */ @@ -289,7 +294,7 @@ static int ila_add_mapping(struct net *net, struct ila_xlat_params *p) struct ila_map *tila = head, *prev = NULL; do { - if (!ila_cmp_params(tila, p)) { + if (!ila_cmp_params(tila, xp)) { err = -EEXIST; goto out; } @@ -326,23 +331,23 @@ out: return err; } -static int ila_del_mapping(struct net *net, struct ila_xlat_params *p) +static int ila_del_mapping(struct net *net, struct ila_xlat_params *xp) { struct ila_net *ilan = net_generic(net, ila_net_id); struct ila_map *ila, *head, *prev; - spinlock_t *lock = ila_get_lock(ilan, p->identifier); + spinlock_t *lock = ila_get_lock(ilan, xp->identifier); int err = -ENOENT; spin_lock(lock); head = rhashtable_lookup_fast(&ilan->rhash_table, - &p->identifier, rht_params); + &xp->identifier, rht_params); ila = head; prev = NULL; while (ila) { - if (ila_cmp_params(ila, p)) { + if (ila_cmp_params(ila, xp)) { prev = ila; ila = rcu_dereference_protected(ila->next, lockdep_is_held(lock)); @@ -404,14 +409,14 @@ static int ila_nl_cmd_add_mapping(struct sk_buff *skb, struct genl_info *info) static int ila_nl_cmd_del_mapping(struct sk_buff *skb, struct genl_info *info) { struct net *net = genl_info_net(info); - struct ila_xlat_params p; + struct ila_xlat_params xp; int err; - err = parse_nl_config(info, &p); + err = parse_nl_config(info, &xp); if (err) return err; - ila_del_mapping(net, &p); + ila_del_mapping(net, &xp); return 0; } @@ -419,16 +424,16 @@ static int ila_nl_cmd_del_mapping(struct sk_buff *skb, struct genl_info *info) static int ila_fill_info(struct ila_map *ila, struct sk_buff *msg) { if (nla_put_u64_64bit(msg, ILA_ATTR_IDENTIFIER, - (__force u64)ila->p.identifier, + (__force u64)ila->xp.identifier.v64, ILA_ATTR_PAD) || nla_put_u64_64bit(msg, ILA_ATTR_LOCATOR, - (__force u64)ila->p.ip.locator, + (__force u64)ila->xp.ip.locator.v64, ILA_ATTR_PAD) || nla_put_u64_64bit(msg, ILA_ATTR_LOCATOR_MATCH, - (__force u64)ila->p.ip.locator_match, + (__force u64)ila->xp.ip.locator_match.v64, ILA_ATTR_PAD) || - nla_put_s32(msg, ILA_ATTR_IFINDEX, ila->p.ifindex) || - nla_put_u32(msg, ILA_ATTR_DIR, ila->p.dir)) + nla_put_s32(msg, ILA_ATTR_IFINDEX, ila->xp.ifindex) || + nla_put_u32(msg, ILA_ATTR_DIR, ila->xp.dir)) return -1; return 0; @@ -460,11 +465,11 @@ static int ila_nl_cmd_get_mapping(struct sk_buff *skb, struct genl_info *info) struct net *net = genl_info_net(info); struct ila_net *ilan = net_generic(net, ila_net_id); struct sk_buff *msg; - struct ila_xlat_params p; + struct ila_xlat_params xp; struct ila_map *ila; int ret; - ret = parse_nl_config(info, &p); + ret = parse_nl_config(info, &xp); if (ret) return ret; @@ -474,7 +479,7 @@ static int ila_nl_cmd_get_mapping(struct sk_buff *skb, struct genl_info *info) rcu_read_lock(); - ila = ila_lookup_by_params(&p, ilan); + ila = ila_lookup_by_params(&xp, ilan); if (ila) { ret = ila_dump_info(ila, info->snd_portid, @@ -623,21 +628,18 @@ static int ila_xlat_addr(struct sk_buff *skb, int dir) struct ipv6hdr *ip6h = ipv6_hdr(skb); struct net *net = dev_net(skb->dev); struct ila_net *ilan = net_generic(net, ila_net_id); - __be64 identifier, locator_match; + struct ila_addr *iaddr = ila_a2i(&ip6h->daddr); size_t nhoff; /* Assumes skb contains a valid IPv6 header that is pulled */ - identifier = *(__be64 *)&ip6h->daddr.in6_u.u6_addr8[8]; - locator_match = *(__be64 *)&ip6h->daddr.in6_u.u6_addr8[0]; nhoff = sizeof(struct ipv6hdr); rcu_read_lock(); - ila = ila_lookup_wildcards(identifier, locator_match, - skb->dev->ifindex, dir, ilan); + ila = ila_lookup_wildcards(iaddr, skb->dev->ifindex, dir, ilan); if (ila) - update_ipv6_locator(skb, &ila->p.ip); + ila_update_ipv6_locator(skb, &ila->xp.ip); rcu_read_unlock(); From 642c2c95585dac4ea977140dbb1149fd1e2e7f7f Mon Sep 17 00:00:00 2001 From: Tom Herbert Date: Sat, 23 Apr 2016 11:46:56 -0700 Subject: [PATCH 1012/1649] ila: xlat changes Change model of xlat to be used only for input where lookup is done on the locator part of an address (comparing to locator_match as key in rhashtable). This is needed for checksum neutral translation which obfuscates the low order 16 bits of the identifier. It also permits hosts to be in muliple ILA domains (each locator can map to a different SIR address). A check is also added to disallow translating non-ILA addresses (check of type in identifier). Signed-off-by: Tom Herbert Signed-off-by: David S. Miller --- net/ipv6/ila/ila_xlat.c | 103 +++++++++++++--------------------------- 1 file changed, 34 insertions(+), 69 deletions(-) diff --git a/net/ipv6/ila/ila_xlat.c b/net/ipv6/ila/ila_xlat.c index 020153bc47f5..2e6cb97aee19 100644 --- a/net/ipv6/ila/ila_xlat.c +++ b/net/ipv6/ila/ila_xlat.c @@ -11,9 +11,7 @@ struct ila_xlat_params { struct ila_params ip; - struct ila_identifier identifier; int ifindex; - unsigned int dir; }; struct ila_map { @@ -66,35 +64,29 @@ static __always_inline void __ila_hash_secret_init(void) net_get_random_once(&hashrnd, sizeof(hashrnd)); } -static inline u32 ila_identifier_hash(struct ila_identifier ident) +static inline u32 ila_locator_hash(struct ila_locator loc) { - u32 *v = (u32 *)ident.v32; + u32 *v = (u32 *)loc.v32; return jhash_2words(v[0], v[1], hashrnd); } static inline spinlock_t *ila_get_lock(struct ila_net *ilan, - struct ila_identifier ident) + struct ila_locator loc) { - return &ilan->locks[ila_identifier_hash(ident) & ilan->locks_mask]; + return &ilan->locks[ila_locator_hash(loc) & ilan->locks_mask]; } static inline int ila_cmp_wildcards(struct ila_map *ila, - struct ila_addr *iaddr, int ifindex, - unsigned int dir) + struct ila_addr *iaddr, int ifindex) { - return (ila->xp.ip.locator_match.v64 && - ila->xp.ip.locator_match.v64 != iaddr->loc.v64) || - (ila->xp.ifindex && ila->xp.ifindex != ifindex) || - !(ila->xp.dir & dir); + return (ila->xp.ifindex && ila->xp.ifindex != ifindex); } static inline int ila_cmp_params(struct ila_map *ila, struct ila_xlat_params *xp) { - return (ila->xp.ip.locator_match.v64 != xp->ip.locator_match.v64) || - (ila->xp.ifindex != xp->ifindex) || - (ila->xp.dir != xp->dir); + return (ila->xp.ifindex != xp->ifindex); } static int ila_cmpfn(struct rhashtable_compare_arg *arg, @@ -102,16 +94,13 @@ static int ila_cmpfn(struct rhashtable_compare_arg *arg, { const struct ila_map *ila = obj; - return (ila->xp.identifier.v64 != *(__be64 *)arg->key); + return (ila->xp.ip.locator_match.v64 != *(__be64 *)arg->key); } static inline int ila_order(struct ila_map *ila) { int score = 0; - if (ila->xp.ip.locator_match.v64) - score += 1 << 0; - if (ila->xp.ifindex) score += 1 << 1; @@ -121,7 +110,7 @@ static inline int ila_order(struct ila_map *ila) static const struct rhashtable_params rht_params = { .nelem_hint = 1024, .head_offset = offsetof(struct ila_map, node), - .key_offset = offsetof(struct ila_map, xp.identifier), + .key_offset = offsetof(struct ila_map, xp.ip.locator_match), .key_len = sizeof(u64), /* identifier */ .max_size = 1048576, .min_size = 256, @@ -140,11 +129,9 @@ static struct genl_family ila_nl_family = { }; static struct nla_policy ila_nl_policy[ILA_ATTR_MAX + 1] = { - [ILA_ATTR_IDENTIFIER] = { .type = NLA_U64, }, [ILA_ATTR_LOCATOR] = { .type = NLA_U64, }, [ILA_ATTR_LOCATOR_MATCH] = { .type = NLA_U64, }, [ILA_ATTR_IFINDEX] = { .type = NLA_U32, }, - [ILA_ATTR_DIR] = { .type = NLA_U32, }, }; static int parse_nl_config(struct genl_info *info, @@ -152,10 +139,6 @@ static int parse_nl_config(struct genl_info *info, { memset(xp, 0, sizeof(*xp)); - if (info->attrs[ILA_ATTR_IDENTIFIER]) - xp->identifier.v64 = (__force __be64)nla_get_u64( - info->attrs[ILA_ATTR_IDENTIFIER]); - if (info->attrs[ILA_ATTR_LOCATOR]) xp->ip.locator.v64 = (__force __be64)nla_get_u64( info->attrs[ILA_ATTR_LOCATOR]); @@ -167,24 +150,20 @@ static int parse_nl_config(struct genl_info *info, if (info->attrs[ILA_ATTR_IFINDEX]) xp->ifindex = nla_get_s32(info->attrs[ILA_ATTR_IFINDEX]); - if (info->attrs[ILA_ATTR_DIR]) - xp->dir = nla_get_u32(info->attrs[ILA_ATTR_DIR]); - return 0; } /* Must be called with rcu readlock */ static inline struct ila_map *ila_lookup_wildcards(struct ila_addr *iaddr, int ifindex, - unsigned int dir, struct ila_net *ilan) { struct ila_map *ila; - ila = rhashtable_lookup_fast(&ilan->rhash_table, &iaddr->ident, + ila = rhashtable_lookup_fast(&ilan->rhash_table, &iaddr->loc, rht_params); while (ila) { - if (!ila_cmp_wildcards(ila, iaddr, ifindex, dir)) + if (!ila_cmp_wildcards(ila, iaddr, ifindex)) return ila; ila = rcu_access_pointer(ila->next); } @@ -198,7 +177,8 @@ static inline struct ila_map *ila_lookup_by_params(struct ila_xlat_params *xp, { struct ila_map *ila; - ila = rhashtable_lookup_fast(&ilan->rhash_table, &xp->identifier, + ila = rhashtable_lookup_fast(&ilan->rhash_table, + &xp->ip.locator_match, rht_params); while (ila) { if (!ila_cmp_params(ila, xp)) @@ -226,14 +206,14 @@ static void ila_free_cb(void *ptr, void *arg) } } -static int ila_xlat_addr(struct sk_buff *skb, int dir); +static int ila_xlat_addr(struct sk_buff *skb); static unsigned int ila_nf_input(void *priv, struct sk_buff *skb, const struct nf_hook_state *state) { - ila_xlat_addr(skb, ILA_DIR_IN); + ila_xlat_addr(skb); return NF_ACCEPT; } @@ -250,7 +230,7 @@ static int ila_add_mapping(struct net *net, struct ila_xlat_params *xp) { struct ila_net *ilan = net_generic(net, ila_net_id); struct ila_map *ila, *head; - spinlock_t *lock = ila_get_lock(ilan, xp->identifier); + spinlock_t *lock = ila_get_lock(ilan, xp->ip.locator_match); int err = 0, order; if (!ilan->hooks_registered) { @@ -271,20 +251,19 @@ static int ila_add_mapping(struct net *net, struct ila_xlat_params *xp) ila->xp = *xp; - if (xp->ip.locator_match.v64) { - /* Precompute checksum difference for translation since we - * know both the old identifier and the new one. - */ - ila->xp.ip.csum_diff = compute_csum_diff8( - (__be32 *)&xp->ip.locator_match, - (__be32 *)&xp->ip.locator); - } + /* Precompute checksum difference for translation since we + * know both the old identifier and the new one. + */ + ila->xp.ip.csum_diff = compute_csum_diff8( + (__be32 *)&xp->ip.locator_match, + (__be32 *)&xp->ip.locator); order = ila_order(ila); spin_lock(lock); - head = rhashtable_lookup_fast(&ilan->rhash_table, &xp->identifier, + head = rhashtable_lookup_fast(&ilan->rhash_table, + &xp->ip.locator_match, rht_params); if (!head) { /* New entry for the rhash_table */ @@ -335,13 +314,13 @@ static int ila_del_mapping(struct net *net, struct ila_xlat_params *xp) { struct ila_net *ilan = net_generic(net, ila_net_id); struct ila_map *ila, *head, *prev; - spinlock_t *lock = ila_get_lock(ilan, xp->identifier); + spinlock_t *lock = ila_get_lock(ilan, xp->ip.locator_match); int err = -ENOENT; spin_lock(lock); head = rhashtable_lookup_fast(&ilan->rhash_table, - &xp->identifier, rht_params); + &xp->ip.locator_match, rht_params); ila = head; prev = NULL; @@ -423,17 +402,13 @@ static int ila_nl_cmd_del_mapping(struct sk_buff *skb, struct genl_info *info) static int ila_fill_info(struct ila_map *ila, struct sk_buff *msg) { - if (nla_put_u64_64bit(msg, ILA_ATTR_IDENTIFIER, - (__force u64)ila->xp.identifier.v64, - ILA_ATTR_PAD) || - nla_put_u64_64bit(msg, ILA_ATTR_LOCATOR, + if (nla_put_u64_64bit(msg, ILA_ATTR_LOCATOR, (__force u64)ila->xp.ip.locator.v64, ILA_ATTR_PAD) || nla_put_u64_64bit(msg, ILA_ATTR_LOCATOR_MATCH, (__force u64)ila->xp.ip.locator_match.v64, ILA_ATTR_PAD) || - nla_put_s32(msg, ILA_ATTR_IFINDEX, ila->xp.ifindex) || - nla_put_u32(msg, ILA_ATTR_DIR, ila->xp.dir)) + nla_put_s32(msg, ILA_ATTR_IFINDEX, ila->xp.ifindex)) return -1; return 0; @@ -622,22 +597,24 @@ static struct pernet_operations ila_net_ops = { .size = sizeof(struct ila_net), }; -static int ila_xlat_addr(struct sk_buff *skb, int dir) +static int ila_xlat_addr(struct sk_buff *skb) { struct ila_map *ila; struct ipv6hdr *ip6h = ipv6_hdr(skb); struct net *net = dev_net(skb->dev); struct ila_net *ilan = net_generic(net, ila_net_id); struct ila_addr *iaddr = ila_a2i(&ip6h->daddr); - size_t nhoff; /* Assumes skb contains a valid IPv6 header that is pulled */ - nhoff = sizeof(struct ipv6hdr); + if (!ila_addr_is_ila(iaddr)) { + /* Type indicates this is not an ILA address */ + return 0; + } rcu_read_lock(); - ila = ila_lookup_wildcards(iaddr, skb->dev->ifindex, dir, ilan); + ila = ila_lookup_wildcards(iaddr, skb->dev->ifindex, ilan); if (ila) ila_update_ipv6_locator(skb, &ila->xp.ip); @@ -646,18 +623,6 @@ static int ila_xlat_addr(struct sk_buff *skb, int dir) return 0; } -int ila_xlat_incoming(struct sk_buff *skb) -{ - return ila_xlat_addr(skb, ILA_DIR_IN); -} -EXPORT_SYMBOL(ila_xlat_incoming); - -int ila_xlat_outgoing(struct sk_buff *skb) -{ - return ila_xlat_addr(skb, ILA_DIR_OUT); -} -EXPORT_SYMBOL(ila_xlat_outgoing); - int ila_xlat_init(void) { int ret; From 90bfe662db13d49cadc6714b0b8ed7e2d0535c5c Mon Sep 17 00:00:00 2001 From: Tom Herbert Date: Sat, 23 Apr 2016 11:46:57 -0700 Subject: [PATCH 1013/1649] ila: add checksum neutral ILA translations Support checksum neutral ILA as described in the ILA draft. The low order 16 bits of the identifier are used to contain the checksum adjustment value. The csum-mode parameter is added to described checksum processing. There are three values: - adjust transport checksum (previous behavior) - do checksum neutral mapping - do nothing On output the csum-mode in the ila_params is checked and acted on. If mode is checksum neutral mapping then to mapping and set C-bit. On input, C-bit is checked. If it is set checksum-netural mapping is done (regardless of csum-mode in ila params) and C-bit will be cleared. If it is not set then action in csum-mode is taken. Signed-off-by: Tom Herbert Signed-off-by: David S. Miller --- include/uapi/linux/ila.h | 7 ++++ net/ipv6/ila/ila.h | 16 +++++++-- net/ipv6/ila/ila_common.c | 74 +++++++++++++++++++++++++++++++++++++-- net/ipv6/ila/ila_lwt.c | 14 ++++++-- net/ipv6/ila/ila_xlat.c | 16 ++++----- 5 files changed, 112 insertions(+), 15 deletions(-) diff --git a/include/uapi/linux/ila.h b/include/uapi/linux/ila.h index cd97951680bf..948c0a91e11b 100644 --- a/include/uapi/linux/ila.h +++ b/include/uapi/linux/ila.h @@ -15,6 +15,7 @@ enum { ILA_ATTR_IFINDEX, /* s32 */ ILA_ATTR_DIR, /* u32 */ ILA_ATTR_PAD, + ILA_ATTR_CSUM_MODE, /* u8 */ __ILA_ATTR_MAX, }; @@ -35,4 +36,10 @@ enum { #define ILA_DIR_IN (1 << 0) #define ILA_DIR_OUT (1 << 1) +enum { + ILA_CSUM_ADJUST_TRANSPORT, + ILA_CSUM_NEUTRAL_MAP, + ILA_CSUM_NO_ACTION, +}; + #endif /* _UAPI_LINUX_ILA_H */ diff --git a/net/ipv6/ila/ila.h b/net/ipv6/ila/ila.h index f532967d9ed7..d08fd2d48a78 100644 --- a/net/ipv6/ila/ila.h +++ b/net/ipv6/ila/ila.h @@ -36,11 +36,13 @@ struct ila_identifier { union { struct { #if defined(__LITTLE_ENDIAN_BITFIELD) - u8 __space:5; + u8 __space:4; + u8 csum_neutral:1; u8 type:3; #elif defined(__BIG_ENDIAN_BITFIELD) u8 type:3; - u8 __space:5; + u8 csum_neutral:1; + u8 __space:4; #else #error "Adjust your defines" #endif @@ -64,6 +66,8 @@ enum { ILA_ATYPE_RSVD_3, }; +#define CSUM_NEUTRAL_FLAG htonl(0x10000000) + struct ila_addr { union { struct in6_addr addr; @@ -88,6 +92,7 @@ struct ila_params { struct ila_locator locator; struct ila_locator locator_match; __wsum csum_diff; + u8 csum_mode; }; static inline __wsum compute_csum_diff8(const __be32 *from, const __be32 *to) @@ -99,8 +104,15 @@ static inline __wsum compute_csum_diff8(const __be32 *from, const __be32 *to) return csum_partial(diff, sizeof(diff), 0); } +static inline bool ila_csum_neutral_set(struct ila_identifier ident) +{ + return !!(ident.csum_neutral); +} + void ila_update_ipv6_locator(struct sk_buff *skb, struct ila_params *p); +void ila_init_saved_csum(struct ila_params *p); + int ila_lwt_init(void); void ila_lwt_fini(void); int ila_xlat_init(void); diff --git a/net/ipv6/ila/ila_common.c b/net/ipv6/ila/ila_common.c index c3078d0b64e1..0e94042d1289 100644 --- a/net/ipv6/ila/ila_common.c +++ b/net/ipv6/ila/ila_common.c @@ -17,21 +17,50 @@ static __wsum get_csum_diff(struct ipv6hdr *ip6h, struct ila_params *p) { struct ila_addr *iaddr = ila_a2i(&ip6h->daddr); - if (iaddr->loc.v64 == p->locator_match.v64) + if (p->locator_match.v64) return p->csum_diff; else return compute_csum_diff8((__be32 *)&iaddr->loc, (__be32 *)&p->locator); } -void ila_update_ipv6_locator(struct sk_buff *skb, struct ila_params *p) +static void ila_csum_do_neutral(struct ila_addr *iaddr, + struct ila_params *p) +{ + __sum16 *adjust = (__force __sum16 *)&iaddr->ident.v16[3]; + __wsum diff, fval; + + /* Check if checksum adjust value has been cached */ + if (p->locator_match.v64) { + diff = p->csum_diff; + } else { + diff = compute_csum_diff8((__be32 *)iaddr, + (__be32 *)&p->locator); + } + + fval = (__force __wsum)(ila_csum_neutral_set(iaddr->ident) ? + ~CSUM_NEUTRAL_FLAG : CSUM_NEUTRAL_FLAG); + + diff = csum_add(diff, fval); + + *adjust = ~csum_fold(csum_add(diff, csum_unfold(*adjust))); + + /* Flip the csum-neutral bit. Either we are doing a SIR->ILA + * translation with ILA_CSUM_NEUTRAL_MAP as the csum_method + * and the C-bit is not set, or we are doing an ILA-SIR + * tranlsation and the C-bit is set. + */ + iaddr->ident.csum_neutral ^= 1; +} + +static void ila_csum_adjust_transport(struct sk_buff *skb, + struct ila_params *p) { __wsum diff; struct ipv6hdr *ip6h = ipv6_hdr(skb); struct ila_addr *iaddr = ila_a2i(&ip6h->daddr); size_t nhoff = sizeof(struct ipv6hdr); - /* First update checksum */ switch (ip6h->nexthdr) { case NEXTHDR_TCP: if (likely(pskb_may_pull(skb, nhoff + sizeof(struct tcphdr)))) { @@ -74,6 +103,45 @@ void ila_update_ipv6_locator(struct sk_buff *skb, struct ila_params *p) iaddr->loc = p->locator; } +void ila_update_ipv6_locator(struct sk_buff *skb, struct ila_params *p) +{ + struct ipv6hdr *ip6h = ipv6_hdr(skb); + struct ila_addr *iaddr = ila_a2i(&ip6h->daddr); + + /* First deal with the transport checksum */ + if (ila_csum_neutral_set(iaddr->ident)) { + /* C-bit is set in the locator indicating that this + * is a locator being translated to a SIR address. + * Perform (receiver) checksum-neutral translation. + */ + ila_csum_do_neutral(iaddr, p); + } else { + switch (p->csum_mode) { + case ILA_CSUM_ADJUST_TRANSPORT: + ila_csum_adjust_transport(skb, p); + break; + case ILA_CSUM_NEUTRAL_MAP: + ila_csum_do_neutral(iaddr, p); + break; + case ILA_CSUM_NO_ACTION: + break; + } + } + + /* Now change destination address */ + iaddr->loc = p->locator; +} + +void ila_init_saved_csum(struct ila_params *p) +{ + if (!p->locator_match.v64) + return; + + p->csum_diff = compute_csum_diff8( + (__be32 *)&p->locator_match, + (__be32 *)&p->locator); +} + static int __init ila_init(void) { int ret; diff --git a/net/ipv6/ila/ila_lwt.c b/net/ipv6/ila/ila_lwt.c index de7f6d76e928..4985e1a735a6 100644 --- a/net/ipv6/ila/ila_lwt.c +++ b/net/ipv6/ila/ila_lwt.c @@ -53,6 +53,7 @@ drop: static struct nla_policy ila_nl_policy[ILA_ATTR_MAX + 1] = { [ILA_ATTR_LOCATOR] = { .type = NLA_U64, }, + [ILA_ATTR_CSUM_MODE] = { .type = NLA_U8, }, }; static int ila_build_state(struct net_device *dev, struct nlattr *nla, @@ -79,8 +80,10 @@ static int ila_build_state(struct net_device *dev, struct nlattr *nla, iaddr = (struct ila_addr *)&cfg6->fc_dst; - if (!ila_addr_is_ila(iaddr)) { - /* Don't allow setting a translation for a non-ILA address */ + if (!ila_addr_is_ila(iaddr) || ila_csum_neutral_set(iaddr->ident)) { + /* Don't allow translation for a non-ILA address or checksum + * neutral flag to be set. + */ return -EINVAL; } @@ -108,6 +111,11 @@ static int ila_build_state(struct net_device *dev, struct nlattr *nla, p->csum_diff = compute_csum_diff8( (__be32 *)&p->locator_match, (__be32 *)&p->locator); + if (tb[ILA_ATTR_CSUM_MODE]) + p->csum_mode = nla_get_u8(tb[ILA_ATTR_CSUM_MODE]); + + ila_init_saved_csum(p); + newts->type = LWTUNNEL_ENCAP_ILA; newts->flags |= LWTUNNEL_STATE_OUTPUT_REDIRECT | LWTUNNEL_STATE_INPUT_REDIRECT; @@ -125,6 +133,8 @@ static int ila_fill_encap_info(struct sk_buff *skb, if (nla_put_u64_64bit(skb, ILA_ATTR_LOCATOR, (__force u64)p->locator.v64, ILA_ATTR_PAD)) goto nla_put_failure; + if (nla_put_u64(skb, ILA_ATTR_CSUM_MODE, (__force u8)p->csum_mode)) + goto nla_put_failure; return 0; diff --git a/net/ipv6/ila/ila_xlat.c b/net/ipv6/ila/ila_xlat.c index 2e6cb97aee19..a90e57229c6c 100644 --- a/net/ipv6/ila/ila_xlat.c +++ b/net/ipv6/ila/ila_xlat.c @@ -132,6 +132,7 @@ static struct nla_policy ila_nl_policy[ILA_ATTR_MAX + 1] = { [ILA_ATTR_LOCATOR] = { .type = NLA_U64, }, [ILA_ATTR_LOCATOR_MATCH] = { .type = NLA_U64, }, [ILA_ATTR_IFINDEX] = { .type = NLA_U32, }, + [ILA_ATTR_CSUM_MODE] = { .type = NLA_U8, }, }; static int parse_nl_config(struct genl_info *info, @@ -147,6 +148,9 @@ static int parse_nl_config(struct genl_info *info, xp->ip.locator_match.v64 = (__force __be64)nla_get_u64( info->attrs[ILA_ATTR_LOCATOR_MATCH]); + if (info->attrs[ILA_ATTR_CSUM_MODE]) + xp->ip.csum_mode = nla_get_u8(info->attrs[ILA_ATTR_CSUM_MODE]); + if (info->attrs[ILA_ATTR_IFINDEX]) xp->ifindex = nla_get_s32(info->attrs[ILA_ATTR_IFINDEX]); @@ -249,14 +253,9 @@ static int ila_add_mapping(struct net *net, struct ila_xlat_params *xp) if (!ila) return -ENOMEM; - ila->xp = *xp; + ila_init_saved_csum(&xp->ip); - /* Precompute checksum difference for translation since we - * know both the old identifier and the new one. - */ - ila->xp.ip.csum_diff = compute_csum_diff8( - (__be32 *)&xp->ip.locator_match, - (__be32 *)&xp->ip.locator); + ila->xp = *xp; order = ila_order(ila); @@ -408,7 +407,8 @@ static int ila_fill_info(struct ila_map *ila, struct sk_buff *msg) nla_put_u64_64bit(msg, ILA_ATTR_LOCATOR_MATCH, (__force u64)ila->xp.ip.locator_match.v64, ILA_ATTR_PAD) || - nla_put_s32(msg, ILA_ATTR_IFINDEX, ila->xp.ifindex)) + nla_put_s32(msg, ILA_ATTR_IFINDEX, ila->xp.ifindex) || + nla_put_u32(msg, ILA_ATTR_CSUM_MODE, ila->xp.ip.csum_mode)) return -1; return 0; From 739960f128e5a1f251659a4430a8898087701099 Mon Sep 17 00:00:00 2001 From: Mohammed Shafi Shajakhan Date: Thu, 7 Apr 2016 19:59:34 +0530 Subject: [PATCH 1014/1649] cfg80211/nl80211: Add support for NL80211_STA_INFO_RX_DURATION Add support for the a station statistics netlink attribute: NL80211_STA_INFO_RX_DURATION. If present, this attribute contains the aggregate PPDU duration (in microseconds) for all the frames from the peer. This is useful to help understand the total time spent transmitting to us by all of the connected peers. Signed-off-by: Mohammed Shafi Shajakhan Signed-off-by: Johannes Berg --- include/net/cfg80211.h | 4 +++- include/uapi/linux/nl80211.h | 3 +++ net/wireless/nl80211.c | 3 ++- 3 files changed, 8 insertions(+), 2 deletions(-) diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 183916e168f1..c8414962683d 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -1045,11 +1045,12 @@ struct cfg80211_tid_stats { * @rx_beacon: number of beacons received from this peer * @rx_beacon_signal_avg: signal strength average (in dBm) for beacons received * from this peer + * @rx_duration: aggregate PPDU duration(usecs) for all the frames from a peer * @pertid: per-TID statistics, see &struct cfg80211_tid_stats, using the last * (IEEE80211_NUM_TIDS) index for MSDUs not encapsulated in QoS-MPDUs. */ struct station_info { - u32 filled; + u64 filled; u32 connected_time; u32 inactive_time; u64 rx_bytes; @@ -1088,6 +1089,7 @@ struct station_info { u32 expected_throughput; u64 rx_beacon; + u64 rx_duration; u8 rx_beacon_signal_avg; struct cfg80211_tid_stats pertid[IEEE80211_NUM_TIDS + 1]; }; diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index 2c55dd1894c3..51fc4abf6491 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -2513,6 +2513,8 @@ enum nl80211_sta_bss_param { * TID+1 and the special TID 16 (i.e. value 17) is used for non-QoS frames; * each one of those is again nested with &enum nl80211_tid_stats * attributes carrying the actual values. + * @NL80211_STA_INFO_RX_DURATION: aggregate PPDU duration for all frames + * received from the station (u64, usec) * @__NL80211_STA_INFO_AFTER_LAST: internal * @NL80211_STA_INFO_MAX: highest possible station info attribute */ @@ -2549,6 +2551,7 @@ enum nl80211_sta_info { NL80211_STA_INFO_BEACON_RX, NL80211_STA_INFO_BEACON_SIGNAL_AVG, NL80211_STA_INFO_TID_STATS, + NL80211_STA_INFO_RX_DURATION, /* keep last */ __NL80211_STA_INFO_AFTER_LAST, diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index afeb1ef1b199..5b0d2c8c2165 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -3755,7 +3755,7 @@ static int nl80211_send_station(struct sk_buff *msg, u32 cmd, u32 portid, goto nla_put_failure; #define PUT_SINFO(attr, memb, type) do { \ - if (sinfo->filled & BIT(NL80211_STA_INFO_ ## attr) && \ + if (sinfo->filled & (1ULL << NL80211_STA_INFO_ ## attr) && \ nla_put_ ## type(msg, NL80211_STA_INFO_ ## attr, \ sinfo->memb)) \ goto nla_put_failure; \ @@ -3781,6 +3781,7 @@ static int nl80211_send_station(struct sk_buff *msg, u32 cmd, u32 portid, PUT_SINFO(LLID, llid, u16); PUT_SINFO(PLID, plid, u16); PUT_SINFO(PLINK_STATE, plink_state, u8); + PUT_SINFO(RX_DURATION, rx_duration, u64); switch (rdev->wiphy.signal_type) { case CFG80211_SIGNAL_TYPE_MBM: From e705498945ad3a3b945771c5d683df064bb9819c Mon Sep 17 00:00:00 2001 From: "Kanchanapally, Vidyullatha" Date: Mon, 11 Apr 2016 15:16:01 +0530 Subject: [PATCH 1015/1649] cfg80211: Add option to report the bss entry in connect result Since cfg80211 maintains separate BSS table entries for APs if the same BSSID, SSID pair is seen on multiple channels, it is possible that it can map the current_bss to a BSS entry on the wrong channel. This current_bss will not get flushed unless disconnected and cfg80211 reports a wrong channel as the associated channel. Fix this by introducing a new cfg80211_connect_bss() function which is similar to cfg80211_connect_result(), but it includes an additional parameter: the bss the STA is connected to. This allows drivers to provide the exact bss entry that matches the BSS to which the connection was completed. Reviewed-by: Jouni Malinen Signed-off-by: Vidyullatha Kanchanapally Signed-off-by: Sunil Dutt Signed-off-by: Johannes Berg --- Documentation/DocBook/80211.tmpl | 1 + include/net/cfg80211.h | 39 ++++++++++++++++++++++++++++---- net/wireless/core.h | 1 + net/wireless/sme.c | 28 ++++++++++++++++++----- net/wireless/util.c | 2 +- 5 files changed, 60 insertions(+), 11 deletions(-) diff --git a/Documentation/DocBook/80211.tmpl b/Documentation/DocBook/80211.tmpl index f2a312b35875..5f7c55999c77 100644 --- a/Documentation/DocBook/80211.tmpl +++ b/Documentation/DocBook/80211.tmpl @@ -135,6 +135,7 @@ !Finclude/net/cfg80211.h cfg80211_tx_mlme_mgmt !Finclude/net/cfg80211.h cfg80211_ibss_joined !Finclude/net/cfg80211.h cfg80211_connect_result +!Finclude/net/cfg80211.h cfg80211_connect_bss !Finclude/net/cfg80211.h cfg80211_roamed !Finclude/net/cfg80211.h cfg80211_disconnected !Finclude/net/cfg80211.h cfg80211_ready_on_channel diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index c8414962683d..1e008cddd41d 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -4651,6 +4651,32 @@ static inline void cfg80211_testmode_event(struct sk_buff *skb, gfp_t gfp) #define CFG80211_TESTMODE_DUMP(cmd) #endif +/** + * cfg80211_connect_bss - notify cfg80211 of connection result + * + * @dev: network device + * @bssid: the BSSID of the AP + * @bss: entry of bss to which STA got connected to, can be obtained + * through cfg80211_get_bss (may be %NULL) + * @req_ie: association request IEs (maybe be %NULL) + * @req_ie_len: association request IEs length + * @resp_ie: association response IEs (may be %NULL) + * @resp_ie_len: assoc response IEs length + * @status: status code, 0 for successful connection, use + * %WLAN_STATUS_UNSPECIFIED_FAILURE if your device cannot give you + * the real status code for failures. + * @gfp: allocation flags + * + * It should be called by the underlying driver whenever connect() has + * succeeded. This is similar to cfg80211_connect_result(), but with the + * option of identifying the exact bss entry for the connection. Only one of + * these functions should be called. + */ +void cfg80211_connect_bss(struct net_device *dev, const u8 *bssid, + struct cfg80211_bss *bss, const u8 *req_ie, + size_t req_ie_len, const u8 *resp_ie, + size_t resp_ie_len, u16 status, gfp_t gfp); + /** * cfg80211_connect_result - notify cfg80211 of connection result * @@ -4668,10 +4694,15 @@ static inline void cfg80211_testmode_event(struct sk_buff *skb, gfp_t gfp) * It should be called by the underlying driver whenever connect() has * succeeded. */ -void cfg80211_connect_result(struct net_device *dev, const u8 *bssid, - const u8 *req_ie, size_t req_ie_len, - const u8 *resp_ie, size_t resp_ie_len, - u16 status, gfp_t gfp); +static inline void +cfg80211_connect_result(struct net_device *dev, const u8 *bssid, + const u8 *req_ie, size_t req_ie_len, + const u8 *resp_ie, size_t resp_ie_len, + u16 status, gfp_t gfp) +{ + cfg80211_connect_bss(dev, bssid, NULL, req_ie, req_ie_len, resp_ie, + resp_ie_len, status, gfp); +} /** * cfg80211_roamed - notify cfg80211 of roaming diff --git a/net/wireless/core.h b/net/wireless/core.h index 022ccad06cbe..ac44e77ac2f2 100644 --- a/net/wireless/core.h +++ b/net/wireless/core.h @@ -214,6 +214,7 @@ struct cfg80211_event { const u8 *resp_ie; size_t req_ie_len; size_t resp_ie_len; + struct cfg80211_bss *bss; u16 status; } cr; struct { diff --git a/net/wireless/sme.c b/net/wireless/sme.c index e22e5b83cfa9..d814279fb556 100644 --- a/net/wireless/sme.c +++ b/net/wireless/sme.c @@ -753,19 +753,32 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid, kfree(country_ie); } -void cfg80211_connect_result(struct net_device *dev, const u8 *bssid, - const u8 *req_ie, size_t req_ie_len, - const u8 *resp_ie, size_t resp_ie_len, - u16 status, gfp_t gfp) +/* Consumes bss object one way or another */ +void cfg80211_connect_bss(struct net_device *dev, const u8 *bssid, + struct cfg80211_bss *bss, const u8 *req_ie, + size_t req_ie_len, const u8 *resp_ie, + size_t resp_ie_len, u16 status, gfp_t gfp) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); struct cfg80211_event *ev; unsigned long flags; + if (bss) { + /* Make sure the bss entry provided by the driver is valid. */ + struct cfg80211_internal_bss *ibss = bss_from_pub(bss); + + if (WARN_ON(list_empty(&ibss->list))) { + cfg80211_put_bss(wdev->wiphy, bss); + return; + } + } + ev = kzalloc(sizeof(*ev) + req_ie_len + resp_ie_len, gfp); - if (!ev) + if (!ev) { + cfg80211_put_bss(wdev->wiphy, bss); return; + } ev->type = EVENT_CONNECT_RESULT; if (bssid) @@ -780,6 +793,9 @@ void cfg80211_connect_result(struct net_device *dev, const u8 *bssid, ev->cr.resp_ie_len = resp_ie_len; memcpy((void *)ev->cr.resp_ie, resp_ie, resp_ie_len); } + if (bss) + cfg80211_hold_bss(bss_from_pub(bss)); + ev->cr.bss = bss; ev->cr.status = status; spin_lock_irqsave(&wdev->event_lock, flags); @@ -787,7 +803,7 @@ void cfg80211_connect_result(struct net_device *dev, const u8 *bssid, spin_unlock_irqrestore(&wdev->event_lock, flags); queue_work(cfg80211_wq, &rdev->event_work); } -EXPORT_SYMBOL(cfg80211_connect_result); +EXPORT_SYMBOL(cfg80211_connect_bss); /* Consumes bss object one way or another */ void __cfg80211_roamed(struct wireless_dev *wdev, diff --git a/net/wireless/util.c b/net/wireless/util.c index f36039888eb5..7cfabd6e83c6 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c @@ -950,7 +950,7 @@ void cfg80211_process_wdev_events(struct wireless_dev *wdev) ev->cr.resp_ie, ev->cr.resp_ie_len, ev->cr.status, ev->cr.status == WLAN_STATUS_SUCCESS, - NULL); + ev->cr.bss); break; case EVENT_ROAMED: __cfg80211_roamed(wdev, ev->rm.bss, ev->rm.req_ie, From 9b95fe59b18bcc891a6c60ae11d725c9c679574b Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Tue, 26 Apr 2016 09:42:39 +0200 Subject: [PATCH 1016/1649] nl80211: add missing kerneldoc for new *_PAD attributes Nicolas's patch missed this, now generating docbook warnings. Add the missing descriptions to address that. Fixes: 2dad624e6dd6 ("wireless: use nla_put_u64_64bit()") Signed-off-by: Johannes Berg --- include/uapi/linux/nl80211.h | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index 51fc4abf6491..f958a7173eb4 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -1817,6 +1817,8 @@ enum nl80211_commands { * @NL80211_ATTR_STA_SUPPORT_P2P_PS: whether P2P PS mechanism supported * or not. u8, one of the values of &enum nl80211_sta_p2p_ps_status * + * @NL80211_ATTR_PAD: attribute used for padding for 64-bit alignment + * * @NUM_NL80211_ATTR: total number of nl80211_attrs available * @NL80211_ATTR_MAX: highest attribute number currently defined * @__NL80211_ATTR_AFTER_LAST: internal use @@ -3013,6 +3015,7 @@ enum nl80211_user_reg_hint_type { * transmitting data (on channel or globally) * @NL80211_SURVEY_INFO_TIME_SCAN: time the radio spent for scan * (on this channel or globally) + * @NL80211_SURVEY_INFO_PAD: attribute used for padding for 64-bit alignment * @NL80211_SURVEY_INFO_MAX: highest survey info attribute number * currently defined * @__NL80211_SURVEY_INFO_AFTER_LAST: internal use @@ -3454,6 +3457,7 @@ enum nl80211_bss_scan_width { * @NL80211_BSS_LAST_SEEN_BOOTTIME: CLOCK_BOOTTIME timestamp when this entry * was last updated by a received frame. The value is expected to be * accurate to about 10ms. (u64, nanoseconds) + * @NL80211_BSS_PAD: attribute used for padding for 64-bit alignment * @__NL80211_BSS_AFTER_LAST: internal * @NL80211_BSS_MAX: highest BSS attribute */ From e4c8b456c53d4beee5adaf5768c762171e2244f3 Mon Sep 17 00:00:00 2001 From: Jia-Ju Bai Date: Fri, 18 Mar 2016 13:27:09 +1100 Subject: [PATCH 1017/1649] rtl818x_pci: Fix a memory leak in rtl8180_init_rx_ring When dev_alloc_skb or pci_dma_mapping_error in rtl8180_init_rx_ring fails, the memory allocated by pci_zalloc_consistent is not freed. This patch fixes the bug by adding pci_free_consistent in error handling code. Signed-off-by: Jia-Ju Bai Signed-off-by: Julian Calaby Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c b/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c index ba242d0160ec..e895a84481da 100644 --- a/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c +++ b/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c @@ -1018,6 +1018,8 @@ static int rtl8180_init_rx_ring(struct ieee80211_hw *dev) dma_addr_t *mapping; entry = priv->rx_ring + priv->rx_ring_sz*i; if (!skb) { + pci_free_consistent(priv->pdev, priv->rx_ring_sz * 32, + priv->rx_ring, priv->rx_ring_dma); wiphy_err(dev->wiphy, "Cannot allocate RX skb\n"); return -ENOMEM; } @@ -1028,6 +1030,8 @@ static int rtl8180_init_rx_ring(struct ieee80211_hw *dev) if (pci_dma_mapping_error(priv->pdev, *mapping)) { kfree_skb(skb); + pci_free_consistent(priv->pdev, priv->rx_ring_sz * 32, + priv->rx_ring, priv->rx_ring_dma); wiphy_err(dev->wiphy, "Cannot map DMA for RX skb\n"); return -ENOMEM; } From 706a527ca32b3bf950754631fa42982c0f1c060b Mon Sep 17 00:00:00 2001 From: Tina Ruchandani Date: Tue, 12 Apr 2016 23:09:16 -0700 Subject: [PATCH 1018/1649] prism54: isl_38xx: Replace 'struct timeval' 'struct timeval' uses a 32-bit seconds field which will overflow in year 2038 and beyond. This patch is part of a larger effort to remove all instances of 'struct timeval' from the kernel and replace them with 64-bit timekeeping variables. The patch also fixes the debug printf specifier to avoid the seconds value being truncated. The patch was build-tested / debugged by removing the "if VERBOSE > SHOW_ERROR_MESSAGES" guards. Signed-off-by: Tina Ruchandani Suggested-by: Arnd Bergmann Signed-off-by: Kalle Valo --- .../net/wireless/intersil/prism54/isl_38xx.c | 35 ++++++++++--------- 1 file changed, 18 insertions(+), 17 deletions(-) diff --git a/drivers/net/wireless/intersil/prism54/isl_38xx.c b/drivers/net/wireless/intersil/prism54/isl_38xx.c index 333c1a2f882e..6700387ef9ab 100644 --- a/drivers/net/wireless/intersil/prism54/isl_38xx.c +++ b/drivers/net/wireless/intersil/prism54/isl_38xx.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include @@ -113,7 +114,7 @@ isl38xx_trigger_device(int asleep, void __iomem *device_base) #if VERBOSE > SHOW_ERROR_MESSAGES u32 counter = 0; - struct timeval current_time; + struct timespec64 current_ts64; DEBUG(SHOW_FUNCTION_CALLS, "isl38xx trigger device\n"); #endif @@ -121,22 +122,22 @@ isl38xx_trigger_device(int asleep, void __iomem *device_base) if (asleep) { /* device is in powersave, trigger the device for wakeup */ #if VERBOSE > SHOW_ERROR_MESSAGES - do_gettimeofday(¤t_time); - DEBUG(SHOW_TRACING, "%08li.%08li Device wakeup triggered\n", - current_time.tv_sec, (long)current_time.tv_usec); + ktime_get_real_ts64(¤t_ts64); + DEBUG(SHOW_TRACING, "%lld.%09ld Device wakeup triggered\n", + (s64)current_ts64.tv_sec, current_ts64.tv_nsec); - DEBUG(SHOW_TRACING, "%08li.%08li Device register read %08x\n", - current_time.tv_sec, (long)current_time.tv_usec, + DEBUG(SHOW_TRACING, "%lld.%09ld Device register read %08x\n", + (s64)current_ts64.tv_sec, current_ts64.tv_nsec, readl(device_base + ISL38XX_CTRL_STAT_REG)); #endif reg = readl(device_base + ISL38XX_INT_IDENT_REG); if (reg == 0xabadface) { #if VERBOSE > SHOW_ERROR_MESSAGES - do_gettimeofday(¤t_time); + ktime_get_real_ts64(¤t_ts64); DEBUG(SHOW_TRACING, - "%08li.%08li Device register abadface\n", - current_time.tv_sec, (long)current_time.tv_usec); + "%lld.%09ld Device register abadface\n", + (s64)current_ts64.tv_sec, current_ts64.tv_nsec); #endif /* read the Device Status Register until Sleepmode bit is set */ while (reg = readl(device_base + ISL38XX_CTRL_STAT_REG), @@ -149,13 +150,13 @@ isl38xx_trigger_device(int asleep, void __iomem *device_base) #if VERBOSE > SHOW_ERROR_MESSAGES DEBUG(SHOW_TRACING, - "%08li.%08li Device register read %08x\n", - current_time.tv_sec, (long)current_time.tv_usec, + "%lld.%09ld Device register read %08x\n", + (s64)current_ts64.tv_sec, current_ts64.tv_nsec, readl(device_base + ISL38XX_CTRL_STAT_REG)); - do_gettimeofday(¤t_time); + ktime_get_real_ts64(¤t_ts64); DEBUG(SHOW_TRACING, - "%08li.%08li Device asleep counter %i\n", - current_time.tv_sec, (long)current_time.tv_usec, + "%lld.%09ld Device asleep counter %i\n", + (s64)current_ts64.tv_sec, current_ts64.tv_nsec, counter); #endif } @@ -168,9 +169,9 @@ isl38xx_trigger_device(int asleep, void __iomem *device_base) /* perform another read on the Device Status Register */ reg = readl(device_base + ISL38XX_CTRL_STAT_REG); - do_gettimeofday(¤t_time); - DEBUG(SHOW_TRACING, "%08li.%08li Device register read %08x\n", - current_time.tv_sec, (long)current_time.tv_usec, reg); + ktime_get_real_ts64(¤t_ts64); + DEBUG(SHOW_TRACING, "%lld.%00ld Device register read %08x\n", + (s64)current_ts64.tv_sec, current_ts64.tv_nsec, reg); #endif } else { /* device is (still) awake */ From 005a425b24e101d312eca669b96a6b71d75e97fc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?David=20M=C3=BCller?= Date: Fri, 15 Apr 2016 08:50:25 +0200 Subject: [PATCH 1019/1649] rtlwifi: rtl8821ae: Make sure loop counter is signed on all architectures MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The for-loop condition does not work correctly on architectures where "char" is unsigned. Fix it by using an "int", which may also result in more efficient code. Signed-off-by: David Müller Acked-by: Larry Finger Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c index ddf74d527017..0c3b9ce86e2e 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c @@ -959,7 +959,7 @@ static void _rtl8821ae_phy_store_txpower_by_rate_base(struct ieee80211_hw *hw) static void _phy_convert_txpower_dbm_to_relative_value(u32 *data, u8 start, u8 end, u8 base_val) { - char i = 0; + int i; u8 temp_value = 0; u32 temp_data = 0; From 7705ba6f7badb8cf38a0a19dad71e11a77ecb9cd Mon Sep 17 00:00:00 2001 From: Arend van Spriel Date: Sun, 17 Apr 2016 16:44:58 +0200 Subject: [PATCH 1020/1649] brcmfmac: add support for nl80211 BSS_SELECT feature Announce support for nl80211 feature BSS_SELECT and process BSS selection behaviour provided in .connect() callback. Reviewed-by: Hante Meuleman Reviewed-by: Franky (Zhenhui) Lin Reviewed-by: Pieter-Paul Giesberts Reviewed-by: Lei Zhang Signed-off-by: Arend van Spriel Signed-off-by: Kalle Valo --- .../broadcom/brcm80211/brcmfmac/cfg80211.c | 64 +++++++++++++++++++ .../broadcom/brcm80211/brcmfmac/common.c | 38 ++++++----- .../broadcom/brcm80211/brcmfmac/core.h | 1 + .../broadcom/brcm80211/brcmfmac/fwil.h | 1 + 4 files changed, 89 insertions(+), 15 deletions(-) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index 8daad782b3c3..d0631b6cfd53 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -250,6 +250,20 @@ struct parsed_vndr_ies { struct parsed_vndr_ie_info ie_info[VNDR_IE_PARSE_LIMIT]; }; +static u8 nl80211_band_to_fwil(enum nl80211_band band) +{ + switch (band) { + case NL80211_BAND_2GHZ: + return WLC_BAND_2G; + case NL80211_BAND_5GHZ: + return WLC_BAND_5G; + default: + WARN_ON(1); + break; + } + return 0; +} + static u16 chandef_to_chanspec(struct brcmu_d11inf *d11inf, struct cfg80211_chan_def *ch) { @@ -1796,6 +1810,50 @@ enum nl80211_auth_type brcmf_war_auth_type(struct brcmf_if *ifp, return type; } +static void brcmf_set_join_pref(struct brcmf_if *ifp, + struct cfg80211_bss_selection *bss_select) +{ + struct brcmf_join_pref_params join_pref_params[2]; + enum nl80211_band band; + int err, i = 0; + + join_pref_params[i].len = 2; + join_pref_params[i].rssi_gain = 0; + + if (bss_select->behaviour != NL80211_BSS_SELECT_ATTR_BAND_PREF) + brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_ASSOC_PREFER, WLC_BAND_AUTO); + + switch (bss_select->behaviour) { + case __NL80211_BSS_SELECT_ATTR_INVALID: + brcmf_c_set_joinpref_default(ifp); + return; + case NL80211_BSS_SELECT_ATTR_BAND_PREF: + join_pref_params[i].type = BRCMF_JOIN_PREF_BAND; + band = bss_select->param.band_pref; + join_pref_params[i].band = nl80211_band_to_fwil(band); + i++; + break; + case NL80211_BSS_SELECT_ATTR_RSSI_ADJUST: + join_pref_params[i].type = BRCMF_JOIN_PREF_RSSI_DELTA; + band = bss_select->param.adjust.band; + join_pref_params[i].band = nl80211_band_to_fwil(band); + join_pref_params[i].rssi_gain = bss_select->param.adjust.delta; + i++; + break; + case NL80211_BSS_SELECT_ATTR_RSSI: + default: + break; + } + join_pref_params[i].type = BRCMF_JOIN_PREF_RSSI; + join_pref_params[i].len = 2; + join_pref_params[i].rssi_gain = 0; + join_pref_params[i].band = 0; + err = brcmf_fil_iovar_data_set(ifp, "join_pref", join_pref_params, + sizeof(join_pref_params)); + if (err) + brcmf_err("Set join_pref error (%d)\n", err); +} + static s32 brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev, struct cfg80211_connect_params *sme) @@ -1952,6 +2010,8 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev, ext_join_params->scan_le.nprobes = cpu_to_le32(-1); } + brcmf_set_join_pref(ifp, &sme->bss_select); + err = brcmf_fil_bsscfg_data_set(ifp, "join", ext_join_params, join_params_size); kfree(ext_join_params); @@ -6280,6 +6340,10 @@ static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp) wiphy->n_cipher_suites = ARRAY_SIZE(brcmf_cipher_suites); if (!brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MFP)) wiphy->n_cipher_suites--; + wiphy->bss_select_support = BIT(NL80211_BSS_SELECT_ATTR_RSSI) | + BIT(NL80211_BSS_SELECT_ATTR_BAND_PREF) | + BIT(NL80211_BSS_SELECT_ATTR_RSSI_ADJUST); + wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT | WIPHY_FLAG_OFFCHAN_TX | WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c index 9e909e3c2f0c..3e15d64c6481 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c @@ -38,7 +38,7 @@ const u8 ALLFFMAC[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; #define BRCMF_DEFAULT_SCAN_CHANNEL_TIME 40 #define BRCMF_DEFAULT_SCAN_UNASSOC_TIME 40 -/* boost value for RSSI_DELTA in preferred join selection */ +/* default boost value for RSSI_DELTA in preferred join selection */ #define BRCMF_JOIN_PREF_RSSI_BOOST 8 #define BRCMF_DEFAULT_TXGLOM_SIZE 32 /* max tx frames in glom chain */ @@ -83,11 +83,31 @@ MODULE_PARM_DESC(ignore_probe_fail, "always succeed probe for debugging"); static struct brcmfmac_platform_data *brcmfmac_pdata; struct brcmf_mp_global_t brcmf_mp_global; +void brcmf_c_set_joinpref_default(struct brcmf_if *ifp) +{ + struct brcmf_join_pref_params join_pref_params[2]; + int err; + + /* Setup join_pref to select target by RSSI (boost on 5GHz) */ + join_pref_params[0].type = BRCMF_JOIN_PREF_RSSI_DELTA; + join_pref_params[0].len = 2; + join_pref_params[0].rssi_gain = BRCMF_JOIN_PREF_RSSI_BOOST; + join_pref_params[0].band = WLC_BAND_5G; + + join_pref_params[1].type = BRCMF_JOIN_PREF_RSSI; + join_pref_params[1].len = 2; + join_pref_params[1].rssi_gain = 0; + join_pref_params[1].band = 0; + err = brcmf_fil_iovar_data_set(ifp, "join_pref", join_pref_params, + sizeof(join_pref_params)); + if (err) + brcmf_err("Set join_pref error (%d)\n", err); +} + int brcmf_c_preinit_dcmds(struct brcmf_if *ifp) { s8 eventmask[BRCMF_EVENTING_MASK_LEN]; u8 buf[BRCMF_DCMD_SMLEN]; - struct brcmf_join_pref_params join_pref_params[2]; struct brcmf_rev_info_le revinfo; struct brcmf_rev_info *ri; char *ptr; @@ -154,19 +174,7 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp) goto done; } - /* Setup join_pref to select target by RSSI(with boost on 5GHz) */ - join_pref_params[0].type = BRCMF_JOIN_PREF_RSSI_DELTA; - join_pref_params[0].len = 2; - join_pref_params[0].rssi_gain = BRCMF_JOIN_PREF_RSSI_BOOST; - join_pref_params[0].band = WLC_BAND_5G; - join_pref_params[1].type = BRCMF_JOIN_PREF_RSSI; - join_pref_params[1].len = 2; - join_pref_params[1].rssi_gain = 0; - join_pref_params[1].band = 0; - err = brcmf_fil_iovar_data_set(ifp, "join_pref", join_pref_params, - sizeof(join_pref_params)); - if (err) - brcmf_err("Set join_pref error (%d)\n", err); + brcmf_c_set_joinpref_default(ifp); /* Setup event_msgs, enable E_IF */ err = brcmf_fil_iovar_data_get(ifp, "event_msgs", eventmask, diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h index 241ee8d13e54..647d3cc2a4dc 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h @@ -223,6 +223,7 @@ void brcmf_txflowblock_if(struct brcmf_if *ifp, void brcmf_txfinalize(struct brcmf_if *ifp, struct sk_buff *txp, bool success); void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb); void brcmf_net_setcarrier(struct brcmf_if *ifp, bool on); +void brcmf_c_set_joinpref_default(struct brcmf_if *ifp); int __init brcmf_core_init(void); void __exit brcmf_core_exit(void); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h index 6b72df17744e..3a9a76dd9222 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h @@ -78,6 +78,7 @@ #define BRCMF_C_SET_SCAN_CHANNEL_TIME 185 #define BRCMF_C_SET_SCAN_UNASSOC_TIME 187 #define BRCMF_C_SCB_DEAUTHENTICATE_FOR_REASON 201 +#define BRCMF_C_SET_ASSOC_PREFER 205 #define BRCMF_C_GET_VALID_CHANNELS 217 #define BRCMF_C_GET_KEY_PRIMARY 235 #define BRCMF_C_SET_KEY_PRIMARY 236 From 53985dccb1c98b7af080e2314bff0c5024e781b0 Mon Sep 17 00:00:00 2001 From: Per Forlin Date: Sun, 17 Apr 2016 15:25:03 +0200 Subject: [PATCH 1021/1649] brcmf: Fix null pointer exception in bcdc_hdrpull In fwsignal.c: brcmf_fws_commit_skb() ... if (rc < 0) { entry->transit_count--; if (entry->suppressed) entry->suppr_transit_count--; (void)brcmf_proto_hdrpull(fws->drvr, false, skb, NULL); ^^^^^^^ goto rollback; } ... The call to hdrpull will trigger a null pointer exception unless a null check is made in the method implementation. Signed-off-by: Per Forlin Signed-off-by: Kalle Valo --- drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c index 288fe906c80e..d1bc51f92686 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c @@ -321,7 +321,8 @@ brcmf_proto_bcdc_hdrpull(struct brcmf_pub *drvr, bool do_fws, if (pktbuf->len == 0) return -ENODATA; - *ifp = tmp_if; + if (ifp != NULL) + *ifp = tmp_if; return 0; } From 84039920bdff60030b2b79e50e4c9d230ae00dad Mon Sep 17 00:00:00 2001 From: Xinming Hu Date: Mon, 18 Apr 2016 05:22:22 -0700 Subject: [PATCH 1022/1649] dt: bindings: add MARVELL's sd8xxx wireless device Add device tree binding documentation for MARVELL's sd8xxx (sd8897 and sd8997) wlan chip. Signed-off-by: Xinming Hu Signed-off-by: Amitkumar Karwar Acked-by: Rob Herring Signed-off-by: Kalle Valo --- .../bindings/net/wireless/marvell-sd8xxx.txt | 63 +++++++++++++++++++ 1 file changed, 63 insertions(+) create mode 100644 Documentation/devicetree/bindings/net/wireless/marvell-sd8xxx.txt diff --git a/Documentation/devicetree/bindings/net/wireless/marvell-sd8xxx.txt b/Documentation/devicetree/bindings/net/wireless/marvell-sd8xxx.txt new file mode 100644 index 000000000000..c421aba0a5bc --- /dev/null +++ b/Documentation/devicetree/bindings/net/wireless/marvell-sd8xxx.txt @@ -0,0 +1,63 @@ +Marvell 8897/8997 (sd8897/sd8997) SDIO devices +------ + +This node provides properties for controlling the marvell sdio wireless device. +The node is expected to be specified as a child node to the SDIO controller that +connects the device to the system. + +Required properties: + + - compatible : should be one of the following: + * "marvell,sd8897" + * "marvell,sd8997" + +Optional properties: + + - marvell,caldata* : A series of properties with marvell,caldata prefix, + represent calibration data downloaded to the device during + initialization. This is an array of unsigned 8-bit values. + the properties should follow below property name and + corresponding array length: + "marvell,caldata-txpwrlimit-2g" (length = 566). + "marvell,caldata-txpwrlimit-5g-sub0" (length = 502). + "marvell,caldata-txpwrlimit-5g-sub1" (length = 688). + "marvell,caldata-txpwrlimit-5g-sub2" (length = 750). + "marvell,caldata-txpwrlimit-5g-sub3" (length = 502). + - marvell,wakeup-pin : a wakeup pin number of wifi chip which will be configured + to firmware. Firmware will wakeup the host using this pin + during suspend/resume. + - interrupt-parent: phandle of the parent interrupt controller + - interrupts : interrupt pin number to the cpu. driver will request an irq based on + this interrupt number. during system suspend, the irq will be enabled + so that the wifi chip can wakeup host platform under certain condition. + during system resume, the irq will be disabled to make sure + unnecessary interrupt is not received. + +Example: + +Tx power limit calibration data is configured in below example. +The calibration data is an array of unsigned values, the length +can vary between hw versions. +IRQ pin 38 is used as system wakeup source interrupt. wakeup pin 3 is configured +so that firmware can wakeup host using this device side pin. + +&mmc3 { + status = "okay"; + vmmc-supply = <&wlan_en_reg>; + bus-width = <4>; + cap-power-off-card; + keep-power-in-suspend; + + #address-cells = <1>; + #size-cells = <0>; + mwifiex: wifi@1 { + compatible = "marvell,sd8897"; + reg = <1>; + interrupt-parent = <&pio>; + interrupts = <38 IRQ_TYPE_LEVEL_LOW>; + + marvell,caldata_00_txpwrlimit_2g_cfg_set = /bits/ 8 < + 0x01 0x00 0x06 0x00 0x08 0x02 0x89 0x01>; + marvell,wakeup-pin = <3>; + }; +}; From ce4f6f0c353b7bfd7b527667287a87fd83aea119 Mon Sep 17 00:00:00 2001 From: Xinming Hu Date: Mon, 18 Apr 2016 05:22:23 -0700 Subject: [PATCH 1023/1649] mwifiex: add platform specific wakeup interrupt support On some arm-based platforms, we need to configure platform specific parameters by device tree node and also define our node as a child node of parent SDIO host controller. This patch parses these parameters from device tree. It includes calibration data dowoload to firmware, wakeup pin configured to firmware, and soc specific wake up gpio, which will be set as wakeup interrupt pin. Signed-off-by: Xinming Hu Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/main.h | 11 +++ drivers/net/wireless/marvell/mwifiex/sdio.c | 77 +++++++++++++++++++ drivers/net/wireless/marvell/mwifiex/sdio.h | 7 ++ .../net/wireless/marvell/mwifiex/sta_cmd.c | 14 +++- 4 files changed, 106 insertions(+), 3 deletions(-) diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h index 63069dd8b8e8..4c742a597cb0 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.h +++ b/drivers/net/wireless/marvell/mwifiex/main.h @@ -37,6 +37,17 @@ #include #include #include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include #include "decl.h" #include "ioctl.h" diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c index a0aec3e00457..cbd9dcd88b98 100644 --- a/drivers/net/wireless/marvell/mwifiex/sdio.c +++ b/drivers/net/wireless/marvell/mwifiex/sdio.c @@ -73,6 +73,66 @@ static struct memory_type_mapping mem_type_mapping_tbl[] = { {"EXTLAST", NULL, 0, 0xFE}, }; +static const struct of_device_id mwifiex_sdio_of_match_table[] = { + { .compatible = "marvell,sd8897" }, + { .compatible = "marvell,sd8997" }, + { } +}; + +static irqreturn_t mwifiex_wake_irq_wifi(int irq, void *priv) +{ + struct mwifiex_plt_wake_cfg *cfg = priv; + + if (cfg->irq_wifi >= 0) { + pr_info("%s: wake by wifi", __func__); + cfg->wake_by_wifi = true; + disable_irq_nosync(irq); + } + + return IRQ_HANDLED; +} + +/* This function parse device tree node using mmc subnode devicetree API. + * The device node is saved in card->plt_of_node. + * if the device tree node exist and include interrupts attributes, this + * function will also request platform specific wakeup interrupt. + */ +static int mwifiex_sdio_probe_of(struct device *dev, struct sdio_mmc_card *card) +{ + struct mwifiex_plt_wake_cfg *cfg; + int ret; + + if (!dev->of_node || + !of_match_node(mwifiex_sdio_of_match_table, dev->of_node)) { + pr_err("sdio platform data not available"); + return -1; + } + + card->plt_of_node = dev->of_node; + card->plt_wake_cfg = devm_kzalloc(dev, sizeof(*card->plt_wake_cfg), + GFP_KERNEL); + cfg = card->plt_wake_cfg; + if (cfg && card->plt_of_node) { + cfg->irq_wifi = irq_of_parse_and_map(card->plt_of_node, 0); + if (!cfg->irq_wifi) { + dev_err(dev, "fail to parse irq_wifi from device tree"); + } else { + ret = devm_request_irq(dev, cfg->irq_wifi, + mwifiex_wake_irq_wifi, + IRQF_TRIGGER_LOW, + "wifi_wake", cfg); + if (ret) { + dev_err(dev, + "Failed to request irq_wifi %d (%d)\n", + cfg->irq_wifi, ret); + } + disable_irq(cfg->irq_wifi); + } + } + + return 0; +} + /* * SDIO probe. * @@ -127,6 +187,9 @@ mwifiex_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id) return -EIO; } + /* device tree node parsing and platform specific configuration*/ + mwifiex_sdio_probe_of(&func->dev, card); + if (mwifiex_add_card(card, &add_remove_card_sem, &sdio_ops, MWIFIEX_SDIO)) { pr_err("%s: add card failed\n", __func__); @@ -183,6 +246,13 @@ static int mwifiex_sdio_resume(struct device *dev) mwifiex_cancel_hs(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA), MWIFIEX_SYNC_CMD); + /* Disable platform specific wakeup interrupt */ + if (card->plt_wake_cfg && card->plt_wake_cfg->irq_wifi >= 0) { + disable_irq_wake(card->plt_wake_cfg->irq_wifi); + if (!card->plt_wake_cfg->wake_by_wifi) + disable_irq(card->plt_wake_cfg->irq_wifi); + } + return 0; } @@ -262,6 +332,13 @@ static int mwifiex_sdio_suspend(struct device *dev) adapter = card->adapter; + /* Enable platform specific wakeup interrupt */ + if (card->plt_wake_cfg && card->plt_wake_cfg->irq_wifi >= 0) { + card->plt_wake_cfg->wake_by_wifi = false; + enable_irq(card->plt_wake_cfg->irq_wifi); + enable_irq_wake(card->plt_wake_cfg->irq_wifi); + } + /* Enable the Host Sleep */ if (!mwifiex_enable_hs(adapter)) { mwifiex_dbg(adapter, ERROR, diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.h b/drivers/net/wireless/marvell/mwifiex/sdio.h index b9fbc5cf6262..db837f12c547 100644 --- a/drivers/net/wireless/marvell/mwifiex/sdio.h +++ b/drivers/net/wireless/marvell/mwifiex/sdio.h @@ -154,6 +154,11 @@ a->mpa_rx.start_port = 0; \ } while (0) +struct mwifiex_plt_wake_cfg { + int irq_wifi; + bool wake_by_wifi; +}; + /* data structure for SDIO MPA TX */ struct mwifiex_sdio_mpa_tx { /* multiport tx aggregation buffer pointer */ @@ -237,6 +242,8 @@ struct mwifiex_sdio_card_reg { struct sdio_mmc_card { struct sdio_func *func; struct mwifiex_adapter *adapter; + struct device_node *plt_of_node; + struct mwifiex_plt_wake_cfg *plt_wake_cfg; const char *firmware; const struct mwifiex_sdio_card_reg *reg; diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c index 8cb895b7f2ee..e436574b1698 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c @@ -2162,6 +2162,7 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta, bool init) enum state_11d_t state_11d; struct mwifiex_ds_11n_tx_cfg tx_cfg; u8 sdio_sp_rx_aggr_enable; + int data; if (first_sta) { if (priv->adapter->iface_type == MWIFIEX_PCIE) { @@ -2182,9 +2183,16 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta, bool init) * The cal-data can be read from device tree and/or * a configuration file and downloaded to firmware. */ - adapter->dt_node = - of_find_node_by_name(NULL, "marvell_cfgdata"); - if (adapter->dt_node) { + if (priv->adapter->iface_type == MWIFIEX_SDIO && + adapter->dev->of_node) { + adapter->dt_node = adapter->dev->of_node; + if (of_property_read_u32(adapter->dt_node, + "marvell,wakeup-pin", + &data) == 0) { + pr_debug("Wakeup pin = 0x%x\n", data); + adapter->hs_cfg.gpio = data; + } + ret = mwifiex_dnld_dt_cfgdata(priv, adapter->dt_node, "marvell,caldata"); if (ret) From eaf46b5fda3acd75df68f02d25754dccb446ec2d Mon Sep 17 00:00:00 2001 From: Xinming Hu Date: Mon, 18 Apr 2016 06:42:55 -0700 Subject: [PATCH 1024/1649] mwifiex: stop background scan when net device closed Transmit data path should not touch background scan. We will stop background scan when net device is closed. Signed-off-by: Xinming Hu Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/main.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c index b459c70dc43f..8b67a552a690 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.c +++ b/drivers/net/wireless/marvell/mwifiex/main.c @@ -702,6 +702,13 @@ mwifiex_close(struct net_device *dev) priv->scan_aborting = true; } + if (priv->sched_scanning) { + mwifiex_dbg(priv->adapter, INFO, + "aborting bgscan on ndo_stop\n"); + mwifiex_stop_bg_scan(priv); + cfg80211_sched_scan_stopped(priv->wdev.wiphy); + } + return 0; } @@ -753,13 +760,6 @@ int mwifiex_queue_tx_pkt(struct mwifiex_private *priv, struct sk_buff *skb) mwifiex_queue_main_work(priv->adapter); - if (priv->sched_scanning) { - mwifiex_dbg(priv->adapter, INFO, - "aborting bgscan on ndo_stop\n"); - mwifiex_stop_bg_scan(priv); - cfg80211_sched_scan_stopped(priv->wdev.wiphy); - } - return 0; } From a8c8dfa5931970a31794eeb65ced790fcff099d0 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 18 Apr 2016 11:49:21 -0400 Subject: [PATCH 1025/1649] rtl8xxxu: Rename rtl8723bu_update_rate_mask() to rtl8xxxu_gen2_update_rate_mask() Update the name of rtl8723bu_update_rate_mask() to make it reflect it's applicable for all/most gen2 N parts. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 0ba84b5fe0d6..bea45095944f 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -8223,8 +8223,8 @@ static void rtl8723au_update_rate_mask(struct rtl8xxxu_priv *priv, rtl8723a_h2c_cmd(priv, &h2c, sizeof(h2c.ramask)); } -static void rtl8723bu_update_rate_mask(struct rtl8xxxu_priv *priv, - u32 ramask, int sgi) +static void rtl8xxxu_gen2_update_rate_mask(struct rtl8xxxu_priv *priv, + u32 ramask, int sgi) { struct h2c_cmd h2c; u8 bw = 0; @@ -9925,7 +9925,7 @@ static struct rtl8xxxu_fileops rtl8723bu_fops = { .disable_rf = rtl8723b_disable_rf, .usb_quirks = rtl8xxxu_gen2_usb_quirks, .set_tx_power = rtl8723b_set_tx_power, - .update_rate_mask = rtl8723bu_update_rate_mask, + .update_rate_mask = rtl8xxxu_gen2_update_rate_mask, .report_connect = rtl8723bu_report_connect, .writeN_block_size = 1024, .mbox_ext_reg = REG_HMBOX_EXT0_8723B, @@ -9996,7 +9996,7 @@ static struct rtl8xxxu_fileops rtl8192eu_fops = { .disable_rf = rtl8723b_disable_rf, .usb_quirks = rtl8xxxu_gen2_usb_quirks, .set_tx_power = rtl8192e_set_tx_power, - .update_rate_mask = rtl8723bu_update_rate_mask, + .update_rate_mask = rtl8xxxu_gen2_update_rate_mask, .report_connect = rtl8723bu_report_connect, .writeN_block_size = 128, .mbox_ext_reg = REG_HMBOX_EXT0_8723B, From 32353a784f431185690919049b6951d309dc186e Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 18 Apr 2016 11:49:22 -0400 Subject: [PATCH 1026/1649] rtl8xxxu: Rename rtl8723bu_report_connect() to rtl8xxxu_gen2_report_connect() Make the name reflect this is for most/all gen2 parts. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index bea45095944f..4af0c33b71bd 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -8266,8 +8266,8 @@ static void rtl8723au_report_connect(struct rtl8xxxu_priv *priv, rtl8723a_h2c_cmd(priv, &h2c, sizeof(h2c.joinbss)); } -static void rtl8723bu_report_connect(struct rtl8xxxu_priv *priv, - u8 macid, bool connect) +static void rtl8xxxu_gen2_report_connect(struct rtl8xxxu_priv *priv, + u8 macid, bool connect) { struct h2c_cmd h2c; @@ -9926,7 +9926,7 @@ static struct rtl8xxxu_fileops rtl8723bu_fops = { .usb_quirks = rtl8xxxu_gen2_usb_quirks, .set_tx_power = rtl8723b_set_tx_power, .update_rate_mask = rtl8xxxu_gen2_update_rate_mask, - .report_connect = rtl8723bu_report_connect, + .report_connect = rtl8xxxu_gen2_report_connect, .writeN_block_size = 1024, .mbox_ext_reg = REG_HMBOX_EXT0_8723B, .mbox_ext_width = 4, @@ -9997,7 +9997,7 @@ static struct rtl8xxxu_fileops rtl8192eu_fops = { .usb_quirks = rtl8xxxu_gen2_usb_quirks, .set_tx_power = rtl8192e_set_tx_power, .update_rate_mask = rtl8xxxu_gen2_update_rate_mask, - .report_connect = rtl8723bu_report_connect, + .report_connect = rtl8xxxu_gen2_report_connect, .writeN_block_size = 128, .mbox_ext_reg = REG_HMBOX_EXT0_8723B, .mbox_ext_width = 4, From beb5531619c615f9b1d204d6d300dfe9e2fd454e Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 18 Apr 2016 11:49:23 -0400 Subject: [PATCH 1027/1649] rtl8xxxu: Rename rtl8723au_report_connect() to rtl8xxxu_gen1_report_connect() Rename the function to reflect it is for all/most gen1 parts. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 4af0c33b71bd..882fa12266f9 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -8249,8 +8249,8 @@ static void rtl8xxxu_gen2_update_rate_mask(struct rtl8xxxu_priv *priv, rtl8723a_h2c_cmd(priv, &h2c, sizeof(h2c.b_macid_cfg)); } -static void rtl8723au_report_connect(struct rtl8xxxu_priv *priv, - u8 macid, bool connect) +static void rtl8xxxu_gen1_report_connect(struct rtl8xxxu_priv *priv, + u8 macid, bool connect) { struct h2c_cmd h2c; @@ -9890,7 +9890,7 @@ static struct rtl8xxxu_fileops rtl8723au_fops = { .usb_quirks = rtl8xxxu_gen1_usb_quirks, .set_tx_power = rtl8723a_set_tx_power, .update_rate_mask = rtl8723au_update_rate_mask, - .report_connect = rtl8723au_report_connect, + .report_connect = rtl8xxxu_gen1_report_connect, .writeN_block_size = 1024, .mbox_ext_reg = REG_HMBOX_EXT_0, .mbox_ext_width = 2, @@ -9962,7 +9962,7 @@ static struct rtl8xxxu_fileops rtl8192cu_fops = { .usb_quirks = rtl8xxxu_gen1_usb_quirks, .set_tx_power = rtl8723a_set_tx_power, .update_rate_mask = rtl8723au_update_rate_mask, - .report_connect = rtl8723au_report_connect, + .report_connect = rtl8xxxu_gen1_report_connect, .writeN_block_size = 128, .mbox_ext_reg = REG_HMBOX_EXT_0, .mbox_ext_width = 2, From 3a56bf6aa13c1bb6e4012824e0897f32acbb1f04 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 18 Apr 2016 11:49:24 -0400 Subject: [PATCH 1028/1649] rtl8xxxu: Rename rtl8723bu_config_channel() to rtl8xxxu_gen2_config_channel() Rename the function to indicate it is applicable to most/all gen2 parts. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 882fa12266f9..88e8fe92688f 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -2345,7 +2345,7 @@ static void rtl8723au_config_channel(struct ieee80211_hw *hw) } } -static void rtl8723bu_config_channel(struct ieee80211_hw *hw) +static void rtl8xxxu_gen2_config_channel(struct ieee80211_hw *hw) { struct rtl8xxxu_priv *priv = hw->priv; u32 val32, rsr; @@ -9917,7 +9917,7 @@ static struct rtl8xxxu_fileops rtl8723bu_fops = { .init_phy_rf = rtl8723bu_init_phy_rf, .phy_init_antenna_selection = rtl8723bu_phy_init_antenna_selection, .phy_iq_calibrate = rtl8723bu_phy_iq_calibrate, - .config_channel = rtl8723bu_config_channel, + .config_channel = rtl8xxxu_gen2_config_channel, .parse_rx_desc = rtl8xxxu_parse_rxdesc24, .init_aggregation = rtl8723bu_init_aggregation, .init_statistics = rtl8723bu_init_statistics, @@ -9990,7 +9990,7 @@ static struct rtl8xxxu_fileops rtl8192eu_fops = { .init_phy_bb = rtl8192eu_init_phy_bb, .init_phy_rf = rtl8192eu_init_phy_rf, .phy_iq_calibrate = rtl8192eu_phy_iq_calibrate, - .config_channel = rtl8723bu_config_channel, + .config_channel = rtl8xxxu_gen2_config_channel, .parse_rx_desc = rtl8xxxu_parse_rxdesc24, .enable_rf = rtl8192e_enable_rf, .disable_rf = rtl8723b_disable_rf, From 6a07b7915afe876195f8c8715ebc438c0b1d1348 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 18 Apr 2016 11:49:25 -0400 Subject: [PATCH 1029/1649] rtl8xxxu: Rename rtl8723b_disable_rf() to rtl8xxxu_gen2_disable_rf() At least for now, all gen2 parts use the same disable_rf() function Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 88e8fe92688f..3499f9b2b9f6 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -7663,7 +7663,7 @@ static void rtl8723b_enable_rf(struct rtl8xxxu_priv *priv) rtl8723a_h2c_cmd(priv, &h2c, sizeof(h2c.ignore_wlan)); } -static void rtl8723b_disable_rf(struct rtl8xxxu_priv *priv) +static void rtl8xxxu_gen2_disable_rf(struct rtl8xxxu_priv *priv) { u32 val32; @@ -9922,7 +9922,7 @@ static struct rtl8xxxu_fileops rtl8723bu_fops = { .init_aggregation = rtl8723bu_init_aggregation, .init_statistics = rtl8723bu_init_statistics, .enable_rf = rtl8723b_enable_rf, - .disable_rf = rtl8723b_disable_rf, + .disable_rf = rtl8xxxu_gen2_disable_rf, .usb_quirks = rtl8xxxu_gen2_usb_quirks, .set_tx_power = rtl8723b_set_tx_power, .update_rate_mask = rtl8xxxu_gen2_update_rate_mask, @@ -9993,7 +9993,7 @@ static struct rtl8xxxu_fileops rtl8192eu_fops = { .config_channel = rtl8xxxu_gen2_config_channel, .parse_rx_desc = rtl8xxxu_parse_rxdesc24, .enable_rf = rtl8192e_enable_rf, - .disable_rf = rtl8723b_disable_rf, + .disable_rf = rtl8xxxu_gen2_disable_rf, .usb_quirks = rtl8xxxu_gen2_usb_quirks, .set_tx_power = rtl8192e_set_tx_power, .update_rate_mask = rtl8xxxu_gen2_update_rate_mask, From 7eb1400c247934dc485817a2c2b62540121a6e55 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 18 Apr 2016 11:49:26 -0400 Subject: [PATCH 1030/1649] rtl8xxxu: Rename rtl8723a_disable_rf() to rtl8xxxu_gen1_disable_rf() All currently supported gen1 parts use the same disable_rf() routine, so rename the function to reflect that. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 3499f9b2b9f6..0d2beef089ff 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -2124,7 +2124,7 @@ static void rtl8723a_enable_rf(struct rtl8xxxu_priv *priv) rtl8xxxu_write8(priv, REG_TXPAUSE, 0x00); } -static void rtl8723a_disable_rf(struct rtl8xxxu_priv *priv) +static void rtl8xxxu_gen1_disable_rf(struct rtl8xxxu_priv *priv) { u8 sps0; u32 val32; @@ -9886,7 +9886,7 @@ static struct rtl8xxxu_fileops rtl8723au_fops = { .config_channel = rtl8723au_config_channel, .parse_rx_desc = rtl8xxxu_parse_rxdesc16, .enable_rf = rtl8723a_enable_rf, - .disable_rf = rtl8723a_disable_rf, + .disable_rf = rtl8xxxu_gen1_disable_rf, .usb_quirks = rtl8xxxu_gen1_usb_quirks, .set_tx_power = rtl8723a_set_tx_power, .update_rate_mask = rtl8723au_update_rate_mask, @@ -9958,7 +9958,7 @@ static struct rtl8xxxu_fileops rtl8192cu_fops = { .config_channel = rtl8723au_config_channel, .parse_rx_desc = rtl8xxxu_parse_rxdesc16, .enable_rf = rtl8723a_enable_rf, - .disable_rf = rtl8723a_disable_rf, + .disable_rf = rtl8xxxu_gen1_disable_rf, .usb_quirks = rtl8xxxu_gen1_usb_quirks, .set_tx_power = rtl8723a_set_tx_power, .update_rate_mask = rtl8723au_update_rate_mask, From e09718c2fddfc68a14503ea2016970e48872e722 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 18 Apr 2016 11:49:27 -0400 Subject: [PATCH 1031/1649] rtl8xxxu: Rename rtl8723au_config_channel() to rtl8xxxu_gen1_config_channel() All supported gen1 parts use the same config_channel() function, so rename it to reflect this. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 0d2beef089ff..e7f8039abe80 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -2223,7 +2223,7 @@ static int rtl8723b_channel_to_group(int channel) return group; } -static void rtl8723au_config_channel(struct ieee80211_hw *hw) +static void rtl8xxxu_gen1_config_channel(struct ieee80211_hw *hw) { struct rtl8xxxu_priv *priv = hw->priv; u32 val32, rsr; @@ -9883,7 +9883,7 @@ static struct rtl8xxxu_fileops rtl8723au_fops = { .init_phy_bb = rtl8723au_init_phy_bb, .init_phy_rf = rtl8723au_init_phy_rf, .phy_iq_calibrate = rtl8723au_phy_iq_calibrate, - .config_channel = rtl8723au_config_channel, + .config_channel = rtl8xxxu_gen1_config_channel, .parse_rx_desc = rtl8xxxu_parse_rxdesc16, .enable_rf = rtl8723a_enable_rf, .disable_rf = rtl8xxxu_gen1_disable_rf, @@ -9955,7 +9955,7 @@ static struct rtl8xxxu_fileops rtl8192cu_fops = { .init_phy_bb = rtl8723au_init_phy_bb, .init_phy_rf = rtl8192cu_init_phy_rf, .phy_iq_calibrate = rtl8723au_phy_iq_calibrate, - .config_channel = rtl8723au_config_channel, + .config_channel = rtl8xxxu_gen1_config_channel, .parse_rx_desc = rtl8xxxu_parse_rxdesc16, .enable_rf = rtl8723a_enable_rf, .disable_rf = rtl8xxxu_gen1_disable_rf, From c6e39da02e17bf3521f1c08df1555b6ffa77dee4 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 18 Apr 2016 11:49:28 -0400 Subject: [PATCH 1032/1649] rtl8xxxu: Rename rtl8723au_update_rate_mask() to rtl8xxxu_update_rate_mask() All currently supported gen1 parts use the same function for updating the rate mask, so rename it to reflect this. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index e7f8039abe80..c6d5d7267954 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -8203,8 +8203,8 @@ static void rtl8xxxu_sw_scan_complete(struct ieee80211_hw *hw, rtl8xxxu_write8(priv, REG_BEACON_CTRL, val8); } -static void rtl8723au_update_rate_mask(struct rtl8xxxu_priv *priv, - u32 ramask, int sgi) +static void rtl8xxxu_update_rate_mask(struct rtl8xxxu_priv *priv, + u32 ramask, int sgi) { struct h2c_cmd h2c; @@ -9889,7 +9889,7 @@ static struct rtl8xxxu_fileops rtl8723au_fops = { .disable_rf = rtl8xxxu_gen1_disable_rf, .usb_quirks = rtl8xxxu_gen1_usb_quirks, .set_tx_power = rtl8723a_set_tx_power, - .update_rate_mask = rtl8723au_update_rate_mask, + .update_rate_mask = rtl8xxxu_update_rate_mask, .report_connect = rtl8xxxu_gen1_report_connect, .writeN_block_size = 1024, .mbox_ext_reg = REG_HMBOX_EXT_0, @@ -9961,7 +9961,7 @@ static struct rtl8xxxu_fileops rtl8192cu_fops = { .disable_rf = rtl8xxxu_gen1_disable_rf, .usb_quirks = rtl8xxxu_gen1_usb_quirks, .set_tx_power = rtl8723a_set_tx_power, - .update_rate_mask = rtl8723au_update_rate_mask, + .update_rate_mask = rtl8xxxu_update_rate_mask, .report_connect = rtl8xxxu_gen1_report_connect, .writeN_block_size = 128, .mbox_ext_reg = REG_HMBOX_EXT_0, From 28466e9214029ef0978acfbaae0f02f78caa4ae9 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 18 Apr 2016 11:49:29 -0400 Subject: [PATCH 1033/1649] rtl8xxxu: Rename rtl8723au_phy_iq_calibrate() to rtl8xxxu_gen1_phy_iq_calibrate() All supported gen1 parts use the same phy_iq_calibrate() function (unlike their gen2 counterparts). Rename the function to reflect this. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index c6d5d7267954..31918475cd85 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -6107,7 +6107,7 @@ static void rtl8xxxu_prepare_calibrate(struct rtl8xxxu_priv *priv, u8 start) rtl8723a_h2c_cmd(priv, &h2c, sizeof(h2c.bt_wlan_calibration)); } -static void rtl8723au_phy_iq_calibrate(struct rtl8xxxu_priv *priv) +static void rtl8xxxu_gen1_phy_iq_calibrate(struct rtl8xxxu_priv *priv) { struct device *dev = &priv->udev->dev; int result[4][8]; /* last is final result */ @@ -9882,7 +9882,7 @@ static struct rtl8xxxu_fileops rtl8723au_fops = { .llt_init = rtl8xxxu_init_llt_table, .init_phy_bb = rtl8723au_init_phy_bb, .init_phy_rf = rtl8723au_init_phy_rf, - .phy_iq_calibrate = rtl8723au_phy_iq_calibrate, + .phy_iq_calibrate = rtl8xxxu_gen1_phy_iq_calibrate, .config_channel = rtl8xxxu_gen1_config_channel, .parse_rx_desc = rtl8xxxu_parse_rxdesc16, .enable_rf = rtl8723a_enable_rf, @@ -9954,7 +9954,7 @@ static struct rtl8xxxu_fileops rtl8192cu_fops = { .llt_init = rtl8xxxu_init_llt_table, .init_phy_bb = rtl8723au_init_phy_bb, .init_phy_rf = rtl8192cu_init_phy_rf, - .phy_iq_calibrate = rtl8723au_phy_iq_calibrate, + .phy_iq_calibrate = rtl8xxxu_gen1_phy_iq_calibrate, .config_channel = rtl8xxxu_gen1_config_channel, .parse_rx_desc = rtl8xxxu_parse_rxdesc16, .enable_rf = rtl8723a_enable_rf, From de7c189c342da635d8b17460b851fbe9b7f1e898 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 18 Apr 2016 11:49:30 -0400 Subject: [PATCH 1034/1649] rtl8xxxu: Rename rtl8723au_init_phy_bb() to rtl8xxxu_gen1_init_phy_bb() All gen1 parts use the same init_phy_bb() function, so rename it to reflect this. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 31918475cd85..a0b9bd9cbad1 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -3814,7 +3814,7 @@ static int rtl8xxxu_init_phy_regs(struct rtl8xxxu_priv *priv, return 0; } -static void rtl8723au_init_phy_bb(struct rtl8xxxu_priv *priv) +static void rtl8xxxu_gen1_init_phy_bb(struct rtl8xxxu_priv *priv) { u8 val8, ldoa15, ldov12d, lpldo, ldohci12; u16 val16; @@ -9880,7 +9880,7 @@ static struct rtl8xxxu_fileops rtl8723au_fops = { .power_off = rtl8xxxu_power_off, .reset_8051 = rtl8xxxu_reset_8051, .llt_init = rtl8xxxu_init_llt_table, - .init_phy_bb = rtl8723au_init_phy_bb, + .init_phy_bb = rtl8xxxu_gen1_init_phy_bb, .init_phy_rf = rtl8723au_init_phy_rf, .phy_iq_calibrate = rtl8xxxu_gen1_phy_iq_calibrate, .config_channel = rtl8xxxu_gen1_config_channel, @@ -9952,7 +9952,7 @@ static struct rtl8xxxu_fileops rtl8192cu_fops = { .power_off = rtl8xxxu_power_off, .reset_8051 = rtl8xxxu_reset_8051, .llt_init = rtl8xxxu_init_llt_table, - .init_phy_bb = rtl8723au_init_phy_bb, + .init_phy_bb = rtl8xxxu_gen1_init_phy_bb, .init_phy_rf = rtl8192cu_init_phy_rf, .phy_iq_calibrate = rtl8xxxu_gen1_phy_iq_calibrate, .config_channel = rtl8xxxu_gen1_config_channel, From 42a3bc7a2a8525f9a3679e5dad19866ef34e2c09 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 18 Apr 2016 11:49:31 -0400 Subject: [PATCH 1035/1649] rtl8xxxu: Rename rtl8723a_set_tx_power() to rtl8xxxu_gen1_set_tx_power() All gen1 parts use the same interface for setting TX power, so rename the function to reflect this. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index a0b9bd9cbad1..843d4eac0d5a 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -2476,7 +2476,7 @@ static void rtl8xxxu_gen2_config_channel(struct ieee80211_hw *hw) } static void -rtl8723a_set_tx_power(struct rtl8xxxu_priv *priv, int channel, bool ht40) +rtl8xxxu_gen1_set_tx_power(struct rtl8xxxu_priv *priv, int channel, bool ht40) { struct rtl8xxxu_power_base *power_base = priv->power_base; u8 cck[RTL8723A_MAX_RF_PATHS], ofdm[RTL8723A_MAX_RF_PATHS]; @@ -9888,7 +9888,7 @@ static struct rtl8xxxu_fileops rtl8723au_fops = { .enable_rf = rtl8723a_enable_rf, .disable_rf = rtl8xxxu_gen1_disable_rf, .usb_quirks = rtl8xxxu_gen1_usb_quirks, - .set_tx_power = rtl8723a_set_tx_power, + .set_tx_power = rtl8xxxu_gen1_set_tx_power, .update_rate_mask = rtl8xxxu_update_rate_mask, .report_connect = rtl8xxxu_gen1_report_connect, .writeN_block_size = 1024, @@ -9960,7 +9960,7 @@ static struct rtl8xxxu_fileops rtl8192cu_fops = { .enable_rf = rtl8723a_enable_rf, .disable_rf = rtl8xxxu_gen1_disable_rf, .usb_quirks = rtl8xxxu_gen1_usb_quirks, - .set_tx_power = rtl8723a_set_tx_power, + .set_tx_power = rtl8xxxu_gen1_set_tx_power, .update_rate_mask = rtl8xxxu_update_rate_mask, .report_connect = rtl8xxxu_gen1_report_connect, .writeN_block_size = 128, From 8396a41cf7478f950f4682c1333771e1e4cf0b36 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 18 Apr 2016 11:49:32 -0400 Subject: [PATCH 1036/1649] rtl8xxxu: Rename rtl8723a_enable_rf() to rtl8xxxu_gen1_enable_rf() All gen1 parts use the same enable_rf() function, so rename it to reflect this. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 843d4eac0d5a..7ed685cc3cbb 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -2080,7 +2080,7 @@ static void rtl8723bu_write_btreg(struct rtl8xxxu_priv *priv, u8 reg, u8 data) rtl8723a_h2c_cmd(priv, &h2c, sizeof(h2c.bt_mp_oper)); } -static void rtl8723a_enable_rf(struct rtl8xxxu_priv *priv) +static void rtl8xxxu_gen1_enable_rf(struct rtl8xxxu_priv *priv) { u8 val8; u32 val32; @@ -9885,7 +9885,7 @@ static struct rtl8xxxu_fileops rtl8723au_fops = { .phy_iq_calibrate = rtl8xxxu_gen1_phy_iq_calibrate, .config_channel = rtl8xxxu_gen1_config_channel, .parse_rx_desc = rtl8xxxu_parse_rxdesc16, - .enable_rf = rtl8723a_enable_rf, + .enable_rf = rtl8xxxu_gen1_enable_rf, .disable_rf = rtl8xxxu_gen1_disable_rf, .usb_quirks = rtl8xxxu_gen1_usb_quirks, .set_tx_power = rtl8xxxu_gen1_set_tx_power, @@ -9957,7 +9957,7 @@ static struct rtl8xxxu_fileops rtl8192cu_fops = { .phy_iq_calibrate = rtl8xxxu_gen1_phy_iq_calibrate, .config_channel = rtl8xxxu_gen1_config_channel, .parse_rx_desc = rtl8xxxu_parse_rxdesc16, - .enable_rf = rtl8723a_enable_rf, + .enable_rf = rtl8xxxu_gen1_enable_rf, .disable_rf = rtl8xxxu_gen1_disable_rf, .usb_quirks = rtl8xxxu_gen1_usb_quirks, .set_tx_power = rtl8xxxu_gen1_set_tx_power, From 8db71451e5c4ab5813e70b22a876e67a29a57a3f Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 18 Apr 2016 11:49:33 -0400 Subject: [PATCH 1037/1649] rtl8xxxu: Rename rtl8723a_mac_init_table to rtl8xxxu_gen1_mac_init_table All currently supported gen1 parts use the same mac_init_table, so rename it to reflect this. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 7ed685cc3cbb..84c4bc65117b 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -128,7 +128,7 @@ static struct ieee80211_supported_band rtl8xxxu_supported_band = { .n_bitrates = ARRAY_SIZE(rtl8xxxu_rates), }; -static struct rtl8xxxu_reg8val rtl8723a_mac_init_table[] = { +static struct rtl8xxxu_reg8val rtl8xxxu_gen1_mac_init_table[] = { {0x420, 0x80}, {0x423, 0x00}, {0x430, 0x00}, {0x431, 0x00}, {0x432, 0x00}, {0x433, 0x01}, {0x434, 0x04}, {0x435, 0x05}, {0x436, 0x06}, {0x437, 0x07}, {0x438, 0x00}, {0x439, 0x00}, @@ -9903,7 +9903,7 @@ static struct rtl8xxxu_fileops rtl8723au_fops = { .trxff_boundary = 0x27ff, .pbp_rx = PBP_PAGE_SIZE_128, .pbp_tx = PBP_PAGE_SIZE_128, - .mactable = rtl8723a_mac_init_table, + .mactable = rtl8xxxu_gen1_mac_init_table, }; static struct rtl8xxxu_fileops rtl8723bu_fops = { @@ -9975,7 +9975,7 @@ static struct rtl8xxxu_fileops rtl8192cu_fops = { .trxff_boundary = 0x27ff, .pbp_rx = PBP_PAGE_SIZE_128, .pbp_tx = PBP_PAGE_SIZE_128, - .mactable = rtl8723a_mac_init_table, + .mactable = rtl8xxxu_gen1_mac_init_table, }; #endif From 5ac74145487dbf93a109a06c8aec0da1395c35f5 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 18 Apr 2016 11:49:34 -0400 Subject: [PATCH 1038/1649] rtl8xxxu: Rename rtl8723b_channel_to_group() This renames rtl8723b_channel_to_group() to rtl8xxxu_gen2_channel_to_group() to reflect it is used by all currently supported gen2 parts. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 84c4bc65117b..1f517d0bb4bb 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -2205,7 +2205,7 @@ static int rtl8723a_channel_to_group(int channel) /* * Valid for rtl8723bu and rtl8192eu */ -static int rtl8723b_channel_to_group(int channel) +static int rtl8xxxu_gen2_channel_to_group(int channel) { int group; @@ -2617,7 +2617,7 @@ rtl8723b_set_tx_power(struct rtl8xxxu_priv *priv, int channel, bool ht40) int group, tx_idx; tx_idx = 0; - group = rtl8723b_channel_to_group(channel); + group = rtl8xxxu_gen2_channel_to_group(channel); cck = priv->cck_tx_power_index_B[group]; val32 = rtl8xxxu_read32(priv, REG_TX_AGC_A_CCK1_MCS32); @@ -2656,7 +2656,7 @@ rtl8192e_set_tx_power(struct rtl8xxxu_priv *priv, int channel, bool ht40) int group, tx_idx; tx_idx = 0; - group = rtl8723b_channel_to_group(channel); + group = rtl8xxxu_gen2_channel_to_group(channel); cck = priv->cck_tx_power_index_A[group]; From 85f466350abf3a7eeb579a3abd75f85e36d591fa Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 18 Apr 2016 11:49:35 -0400 Subject: [PATCH 1039/1649] rtl8xxxu: Rename rtl8723bu_simularity_compare() This renames rtl8723bu_simularity_compare() to rtl8xxxu_gen2_simularity_compare() to reflect it is used for all gen2 parts. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- .../net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 22 ++++++++++++------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 1f517d0bb4bb..7d4645588ab9 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -4595,8 +4595,8 @@ static bool rtl8xxxu_simularity_compare(struct rtl8xxxu_priv *priv, return false; } -static bool rtl8723bu_simularity_compare(struct rtl8xxxu_priv *priv, - int result[][8], int c1, int c2) +static bool rtl8xxxu_gen2_simularity_compare(struct rtl8xxxu_priv *priv, + int result[][8], int c1, int c2) { u32 i, j, diff, simubitmap, bound = 0; int candidate[2] = {-1, -1}; /* for path A and path B */ @@ -6237,7 +6237,8 @@ static void rtl8723bu_phy_iq_calibrate(struct rtl8xxxu_priv *priv) rtl8723bu_phy_iqcalibrate(priv, result, i); if (i == 1) { - simu = rtl8723bu_simularity_compare(priv, result, 0, 1); + simu = rtl8xxxu_gen2_simularity_compare(priv, + result, 0, 1); if (simu) { candidate = 0; break; @@ -6245,13 +6246,15 @@ static void rtl8723bu_phy_iq_calibrate(struct rtl8xxxu_priv *priv) } if (i == 2) { - simu = rtl8723bu_simularity_compare(priv, result, 0, 2); + simu = rtl8xxxu_gen2_simularity_compare(priv, + result, 0, 2); if (simu) { candidate = 0; break; } - simu = rtl8723bu_simularity_compare(priv, result, 1, 2); + simu = rtl8xxxu_gen2_simularity_compare(priv, + result, 1, 2); if (simu) { candidate = 1; } else { @@ -6352,7 +6355,8 @@ static void rtl8192eu_phy_iq_calibrate(struct rtl8xxxu_priv *priv) rtl8192eu_phy_iqcalibrate(priv, result, i); if (i == 1) { - simu = rtl8723bu_simularity_compare(priv, result, 0, 1); + simu = rtl8xxxu_gen2_simularity_compare(priv, + result, 0, 1); if (simu) { candidate = 0; break; @@ -6360,13 +6364,15 @@ static void rtl8192eu_phy_iq_calibrate(struct rtl8xxxu_priv *priv) } if (i == 2) { - simu = rtl8723bu_simularity_compare(priv, result, 0, 2); + simu = rtl8xxxu_gen2_simularity_compare(priv, + result, 0, 2); if (simu) { candidate = 0; break; } - simu = rtl8723bu_simularity_compare(priv, result, 1, 2); + simu = rtl8xxxu_gen2_simularity_compare(priv, + result, 1, 2); if (simu) candidate = 1; else From 04a74a9f8af0cd61598e886c260ad0644b1dd5c0 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 18 Apr 2016 11:49:36 -0400 Subject: [PATCH 1040/1649] rtl8xxxu: Rename rtl8723au_iqk_phy_iq_bb_reg There is nothing 8723au specific about rtl8723au_iqk_phy_iq_bb_reg so rename the array to rtl8xxxu_iqk_phy_iq_bb_reg. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 7d4645588ab9..f2ce8c9a31cf 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -1747,7 +1747,7 @@ static struct rtl8xxxu_rfregs rtl8xxxu_rfregs[] = { }, }; -static const u32 rtl8723au_iqk_phy_iq_bb_reg[RTL8XXXU_BB_REGS] = { +static const u32 rtl8xxxu_iqk_phy_iq_bb_reg[RTL8XXXU_BB_REGS] = { REG_OFDM0_XA_RX_IQ_IMBALANCE, REG_OFDM0_XB_RX_IQ_IMBALANCE, REG_OFDM0_ENERGY_CCA_THRES, @@ -6205,7 +6205,7 @@ static void rtl8xxxu_gen1_phy_iq_calibrate(struct rtl8xxxu_priv *priv) rtl8xxxu_fill_iqk_matrix_b(priv, path_b_ok, result, candidate, (reg_ec4 == 0)); - rtl8xxxu_save_regs(priv, rtl8723au_iqk_phy_iq_bb_reg, + rtl8xxxu_save_regs(priv, rtl8xxxu_iqk_phy_iq_bb_reg, priv->bb_recovery_backup, RTL8XXXU_BB_REGS); rtl8xxxu_prepare_calibrate(priv, 0); @@ -6313,7 +6313,7 @@ static void rtl8723bu_phy_iq_calibrate(struct rtl8xxxu_priv *priv) rtl8xxxu_fill_iqk_matrix_b(priv, path_b_ok, result, candidate, (reg_ec4 == 0)); - rtl8xxxu_save_regs(priv, rtl8723au_iqk_phy_iq_bb_reg, + rtl8xxxu_save_regs(priv, rtl8xxxu_iqk_phy_iq_bb_reg, priv->bb_recovery_backup, RTL8XXXU_BB_REGS); rtl8xxxu_write32(priv, REG_BT_CONTROL_8723BU, bt_control); @@ -6424,7 +6424,7 @@ static void rtl8192eu_phy_iq_calibrate(struct rtl8xxxu_priv *priv) rtl8xxxu_fill_iqk_matrix_b(priv, path_b_ok, result, candidate, (reg_ec4 == 0)); - rtl8xxxu_save_regs(priv, rtl8723au_iqk_phy_iq_bb_reg, + rtl8xxxu_save_regs(priv, rtl8xxxu_iqk_phy_iq_bb_reg, priv->bb_recovery_backup, RTL8XXXU_BB_REGS); } From e0bdef0f75f0ee0d4747b72fa75310da78dbfa56 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Tue, 19 Apr 2016 07:21:58 -0700 Subject: [PATCH 1041/1649] mwifiex: missing error code on allocation failure We accidentally return success instead of -ENOMEM. Signed-off-by: Dan Carpenter Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/usb.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/marvell/mwifiex/usb.c b/drivers/net/wireless/marvell/mwifiex/usb.c index 05108618430d..cdd8f9a867a9 100644 --- a/drivers/net/wireless/marvell/mwifiex/usb.c +++ b/drivers/net/wireless/marvell/mwifiex/usb.c @@ -1017,8 +1017,10 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter, /* Allocate memory for receive */ recv_buff = kzalloc(FW_DNLD_RX_BUF_SIZE, GFP_KERNEL); - if (!recv_buff) + if (!recv_buff) { + ret = -ENOMEM; goto cleanup; + } do { /* Send pseudo data to check winner status first */ From 394f0ed53108d5e038910e3eb733ccd3f0d9c464 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Tue, 19 Apr 2016 07:23:44 -0700 Subject: [PATCH 1042/1649] mwifiex: fix loop timeout in mwifiex_prog_fw_w_helper() USB8XXX_FW_MAX_RETRY is 3. We were using a post-op loop "while (retries--) {" but then the lines after that assume the loop exits with retries set to zero. I've fixed this by changing to a pre-op loop. I started with retries set to 4 instead of 3 so that we still go through the loop the same number of times. Signed-off-by: Dan Carpenter Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/usb.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/net/wireless/marvell/mwifiex/usb.c b/drivers/net/wireless/marvell/mwifiex/usb.c index cdd8f9a867a9..0857575c5c39 100644 --- a/drivers/net/wireless/marvell/mwifiex/usb.c +++ b/drivers/net/wireless/marvell/mwifiex/usb.c @@ -995,7 +995,8 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter, { int ret = 0; u8 *firmware = fw->fw_buf, *recv_buff; - u32 retries = USB8XXX_FW_MAX_RETRY, dlen; + u32 retries = USB8XXX_FW_MAX_RETRY + 1; + u32 dlen; u32 fw_seqnum = 0, tlen = 0, dnld_cmd = 0; struct fw_data *fwdata; struct fw_sync_header sync_fw; @@ -1043,7 +1044,7 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter, } /* If the send/receive fails or CRC occurs then retry */ - while (retries--) { + while (--retries) { u8 *buf = (u8 *)fwdata; u32 len = FW_DATA_XMIT_SIZE; @@ -1103,7 +1104,7 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter, continue; } - retries = USB8XXX_FW_MAX_RETRY; + retries = USB8XXX_FW_MAX_RETRY + 1; break; } fw_seqnum++; From 81542fac6eae586e0d6fd502618342e8e20b4c5a Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Tue, 19 Apr 2016 07:25:43 -0700 Subject: [PATCH 1043/1649] brcmfmac: testing the wrong variable in brcmf_rx_hdrpull() Smatch complains about this code: drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c:335 brcmf_rx_hdrpull() error: we previously assumed '*ifp' could be null (see line 333) The problem is that we recently changed these from "ifp" to "*ifp" but there was one that we didn't update. - if (ret || !ifp || !ifp->ndev) { + if (ret || !(*ifp) || !(*ifp)->ndev) { if (ret != -ENODATA && ifp) ^^^ - ifp->stats.rx_errors++; + (*ifp)->stats.rx_errors++; I have updated it to *ifp as well. We always call this function is a non-NULL "ifp" pointer, btw. Fixes: c462ebcdfe42 ('brcmfmac: create common function for handling brcmf_proto_hdrpull()') Signed-off-by: Dan Carpenter Acked-by: Arend van Spriel Signed-off-by: Kalle Valo --- drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c index 1b476d1fec2c..b590499f6883 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c @@ -331,7 +331,7 @@ static int brcmf_rx_hdrpull(struct brcmf_pub *drvr, struct sk_buff *skb, ret = brcmf_proto_hdrpull(drvr, true, skb, ifp); if (ret || !(*ifp) || !(*ifp)->ndev) { - if (ret != -ENODATA && ifp) + if (ret != -ENODATA && *ifp) (*ifp)->stats.rx_errors++; brcmu_pkt_buf_free_skb(skb); return -ENODATA; From 2557654df14b95141f1410127c33ed1661a8abbb Mon Sep 17 00:00:00 2001 From: Chun-Yeow Yeoh Date: Thu, 21 Apr 2016 00:41:34 +0800 Subject: [PATCH 1044/1649] rt2800lib: enable MFP if hw crypt is disabled If rt2800usb is loaded with nohwcrypt=1, mac80211 takes care of the crypto with software encryption/decryption and thus, MFP can be used. Tested for secured mesh using ath9k_htc and ath9k. Signed-off-by: Chun-Yeow Yeoh Acked-by: Stanislaw Gruszka Signed-off-by: Kalle Valo --- drivers/net/wireless/ralink/rt2x00/rt2800lib.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c index c36fa4e03fb6..bf3f0a39908c 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c @@ -7492,6 +7492,10 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev) if (!rt2x00_is_usb(rt2x00dev)) ieee80211_hw_set(rt2x00dev->hw, HOST_BROADCAST_PS_BUFFERING); + /* Set MFP if HW crypto is disabled. */ + if (rt2800_hwcrypt_disabled(rt2x00dev)) + ieee80211_hw_set(rt2x00dev->hw, MFP_CAPABLE); + SET_IEEE80211_DEV(rt2x00dev->hw, rt2x00dev->dev); SET_IEEE80211_PERM_ADDR(rt2x00dev->hw, rt2800_eeprom_addr(rt2x00dev, From f0d8f38cd909e072833a06b79939256c4aebe3a0 Mon Sep 17 00:00:00 2001 From: Luca Coelho Date: Mon, 25 Apr 2016 20:02:09 +0300 Subject: [PATCH 1045/1649] iwlwifi: fix fw version reading for DVM devices In commit 97f95c93c8ed ("iwlwifi: remove support for fw older than -16.ucode") we accidentally changed the fw version reading code for DVM devices. The code intended to remove the old fw version API, because all MVM firmwares version 16 and above that we support don't use it anymore. But DVM devices still use the old FW API. Fix that by bringing the code back in. Reported-by: Pat Erley Tested-by: Kalle Valo Fixes: 97f95c93c8ed ("iwlwifi: remove support for fw older than-16.ucode") Signed-off-by: Luca Coelho Signed-off-by: Kalle Valo --- drivers/net/wireless/intel/iwlwifi/iwl-drv.c | 5 ++++- drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h | 2 ++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c index 48e873732d4e..4f495d9153a6 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c @@ -1280,7 +1280,10 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) if (err) goto try_again; - api_ver = drv->fw.ucode_ver; + if (fw_has_api(&drv->fw.ucode_capa, IWL_UCODE_TLV_API_NEW_VERSION)) + api_ver = drv->fw.ucode_ver; + else + api_ver = IWL_UCODE_API(drv->fw.ucode_ver); /* * api_ver should match the api version forming part of the diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h index 843232bd8bbe..37dc09e8b6a7 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h @@ -251,6 +251,7 @@ typedef unsigned int __bitwise__ iwl_ucode_tlv_api_t; * @IWL_UCODE_TLV_API_WIFI_MCC_UPDATE: ucode supports MCC updates with source. * @IWL_UCODE_TLV_API_WIDE_CMD_HDR: ucode supports wide command header * @IWL_UCODE_TLV_API_LQ_SS_PARAMS: Configure STBC/BFER via LQ CMD ss_params + * @IWL_UCODE_TLV_API_NEW_VERSION: new versioning format * @IWL_UCODE_TLV_API_EXT_SCAN_PRIORITY: scan APIs use 8-level priority * instead of 3. * @IWL_UCODE_TLV_API_TX_POWER_CHAIN: TX power API has larger command size @@ -263,6 +264,7 @@ enum iwl_ucode_tlv_api { IWL_UCODE_TLV_API_WIFI_MCC_UPDATE = (__force iwl_ucode_tlv_api_t)9, IWL_UCODE_TLV_API_WIDE_CMD_HDR = (__force iwl_ucode_tlv_api_t)14, IWL_UCODE_TLV_API_LQ_SS_PARAMS = (__force iwl_ucode_tlv_api_t)18, + IWL_UCODE_TLV_API_NEW_VERSION = (__force iwl_ucode_tlv_api_t)20, IWL_UCODE_TLV_API_EXT_SCAN_PRIORITY = (__force iwl_ucode_tlv_api_t)24, IWL_UCODE_TLV_API_TX_POWER_CHAIN = (__force iwl_ucode_tlv_api_t)27, From b0fe3306432796c8f7adbede8ccd479bb7b53d0a Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Sat, 2 Apr 2016 00:05:14 -0700 Subject: [PATCH 1046/1649] i40e/i40evf: Clean up feature flags The feature flags list for i40e and i40evf is beginning to become pretty massive. I plan to add another 4 or so features to these drivers and duplicating the flags for each and every flags list is becoming a bit repetitive. The primary change here is that we now build our features list around hw_encap_features. After that we assign that to vlan_features, hw_features, and finally map that onto features. In addition we end up throwing features onto hw_encap_features that end up having no effect such as the Rx offloads and SCTP_CRC. However that should have no impact and makes things a bit easier for us as hw_encap_features is one of the less updated features maps available. For i40evf I went through and sanity checked a few features as well. Specifically RXCSUM was being set as a read-only feature which didn't make much sense. I have updated things so we can clear the NETIF_F_RXCSUM flag since that is really a software feature and not a hardware one anyway so disabling it is just a matter of ignoring the result from the hardware. Signed-off-by: Alexander Duyck Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_main.c | 61 ++++++++----------- .../net/ethernet/intel/i40evf/i40evf_main.c | 58 +++++++++--------- 2 files changed, 54 insertions(+), 65 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 0b071cea305d..f2e83fe4d66c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -9111,40 +9111,36 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) np = netdev_priv(netdev); np->vsi = vsi; - netdev->hw_enc_features |= NETIF_F_IP_CSUM | - NETIF_F_IPV6_CSUM | - NETIF_F_TSO | - NETIF_F_TSO6 | - NETIF_F_TSO_ECN | - NETIF_F_GSO_GRE | - NETIF_F_GSO_UDP_TUNNEL | - NETIF_F_GSO_UDP_TUNNEL_CSUM | + netdev->hw_enc_features |= NETIF_F_SG | + NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM | + NETIF_F_HIGHDMA | + NETIF_F_SOFT_FEATURES | + NETIF_F_TSO | + NETIF_F_TSO_ECN | + NETIF_F_TSO6 | + NETIF_F_GSO_GRE | + NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_UDP_TUNNEL_CSUM | + NETIF_F_SCTP_CRC | + NETIF_F_RXHASH | + NETIF_F_RXCSUM | 0; - netdev->features = NETIF_F_SG | - NETIF_F_IP_CSUM | - NETIF_F_SCTP_CRC | - NETIF_F_HIGHDMA | - NETIF_F_GSO_UDP_TUNNEL | - NETIF_F_GSO_GRE | - NETIF_F_HW_VLAN_CTAG_TX | - NETIF_F_HW_VLAN_CTAG_RX | - NETIF_F_HW_VLAN_CTAG_FILTER | - NETIF_F_IPV6_CSUM | - NETIF_F_TSO | - NETIF_F_TSO_ECN | - NETIF_F_TSO6 | - NETIF_F_RXCSUM | - NETIF_F_RXHASH | - 0; + if (!(pf->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE)) + netdev->hw_enc_features ^= NETIF_F_GSO_UDP_TUNNEL_CSUM; + + /* record features VLANs can make use of */ + netdev->vlan_features |= netdev->hw_enc_features; if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) - netdev->features |= NETIF_F_NTUPLE; - if (pf->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE) - netdev->features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; + netdev->hw_features |= NETIF_F_NTUPLE; - /* copy netdev features into list of user selectable features */ - netdev->hw_features |= netdev->features; + netdev->hw_features |= netdev->hw_enc_features | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX; + + netdev->features |= netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER; if (vsi->type == I40E_VSI_MAIN) { SET_NETDEV_DEV(netdev, &pf->pdev->dev); @@ -9183,12 +9179,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) ether_addr_copy(netdev->dev_addr, mac_addr); ether_addr_copy(netdev->perm_addr, mac_addr); - /* vlan gets same features (except vlan offload) - * after any tweaks for specific VSI types - */ - netdev->vlan_features = netdev->features & ~(NETIF_F_HW_VLAN_CTAG_TX | - NETIF_F_HW_VLAN_CTAG_RX | - NETIF_F_HW_VLAN_CTAG_FILTER); + netdev->priv_flags |= IFF_UNICAST_FLT; netdev->priv_flags |= IFF_SUPP_NOFCS; /* Setup netdev TC information */ diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index 9110319a8f00..e3857d890cfb 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -2337,40 +2337,38 @@ int i40evf_process_config(struct i40evf_adapter *adapter) return -ENODEV; } - netdev->features |= NETIF_F_HIGHDMA | - NETIF_F_SG | - NETIF_F_IP_CSUM | - NETIF_F_SCTP_CRC | - NETIF_F_IPV6_CSUM | - NETIF_F_TSO | - NETIF_F_TSO6 | - NETIF_F_TSO_ECN | - NETIF_F_GSO_GRE | - NETIF_F_GSO_UDP_TUNNEL | - NETIF_F_RXCSUM | - NETIF_F_GRO; + netdev->hw_enc_features |= NETIF_F_SG | + NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM | + NETIF_F_HIGHDMA | + NETIF_F_SOFT_FEATURES | + NETIF_F_TSO | + NETIF_F_TSO_ECN | + NETIF_F_TSO6 | + NETIF_F_GSO_GRE | + NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_UDP_TUNNEL_CSUM | + NETIF_F_SCTP_CRC | + NETIF_F_RXHASH | + NETIF_F_RXCSUM | + 0; - netdev->hw_enc_features |= NETIF_F_IP_CSUM | - NETIF_F_IPV6_CSUM | - NETIF_F_TSO | - NETIF_F_TSO6 | - NETIF_F_TSO_ECN | - NETIF_F_GSO_GRE | - NETIF_F_GSO_UDP_TUNNEL | - NETIF_F_GSO_UDP_TUNNEL_CSUM; + if (!(adapter->flags & I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE)) + netdev->hw_enc_features ^= NETIF_F_GSO_UDP_TUNNEL_CSUM; - if (adapter->flags & I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE) - netdev->features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; + /* record features VLANs can make use of */ + netdev->vlan_features |= netdev->hw_enc_features; - /* always clear VLAN features because they can change at every reset */ - netdev->features &= ~(I40EVF_VLAN_FEATURES); - /* copy netdev features into list of user selectable features */ - netdev->hw_features |= netdev->features; + /* Write features and hw_features separately to avoid polluting + * with, or dropping, features that are set when we registgered. + */ + netdev->hw_features |= netdev->hw_enc_features; - if (vfres->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_VLAN) { - netdev->vlan_features = netdev->features; - netdev->features |= I40EVF_VLAN_FEATURES; - } + netdev->features |= netdev->hw_enc_features | I40EVF_VLAN_FEATURES; + + /* disable VLAN features if not supported */ + if (!(vfres->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_VLAN)) + netdev->features ^= I40EVF_VLAN_FEATURES; adapter->vsi.id = adapter->vsi_res->vsi_id; From 577389a5db766c44400e75e6a79f39d9b0d585f8 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Sat, 2 Apr 2016 00:06:56 -0700 Subject: [PATCH 1047/1649] i40e/i40evf: Add support for IPIP and SIT offloads Looking over the documentation it turns out enabling IPIP and SIT offloads for i40e is pretty straightforward. As such I decided to enable them with this patch. In my testing I am seeing an improvement of 8 to 10 Gb/s for IPIP and SIT tunnels with this offload enabled. Signed-off-by: Alexander Duyck Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_main.c | 2 ++ drivers/net/ethernet/intel/i40e/i40e_txrx.c | 24 ++++++++++++------- drivers/net/ethernet/intel/i40evf/i40e_txrx.c | 24 ++++++++++++------- .../net/ethernet/intel/i40evf/i40evf_main.c | 2 ++ 4 files changed, 36 insertions(+), 16 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index f2e83fe4d66c..ec94ad6c783a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -9120,6 +9120,8 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_GSO_GRE | + NETIF_F_GSO_IPIP | + NETIF_F_GSO_SIT | NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC | diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 39efba0636fd..6e44cf118843 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -2299,7 +2299,10 @@ static int i40e_tso(struct sk_buff *skb, u8 *hdr_len, u64 *cd_type_cmd_tso_mss) ip.v6->payload_len = 0; } - if (skb_shinfo(skb)->gso_type & (SKB_GSO_UDP_TUNNEL | SKB_GSO_GRE | + if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE | + SKB_GSO_IPIP | + SKB_GSO_SIT | + SKB_GSO_UDP_TUNNEL | SKB_GSO_UDP_TUNNEL_CSUM)) { if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM) { /* determine offset of outer transport header */ @@ -2442,13 +2445,6 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, &l4_proto, &frag_off); } - /* compute outer L3 header size */ - tunnel |= ((l4.hdr - ip.hdr) / 4) << - I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT; - - /* switch IP header pointer from outer to inner header */ - ip.hdr = skb_inner_network_header(skb); - /* define outer transport */ switch (l4_proto) { case IPPROTO_UDP: @@ -2459,6 +2455,11 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, tunnel |= I40E_TXD_CTX_GRE_TUNNELING; *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL; break; + case IPPROTO_IPIP: + case IPPROTO_IPV6: + *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL; + l4.hdr = skb_inner_network_header(skb); + break; default: if (*tx_flags & I40E_TX_FLAGS_TSO) return -1; @@ -2467,6 +2468,13 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, return 0; } + /* compute outer L3 header size */ + tunnel |= ((l4.hdr - ip.hdr) / 4) << + I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT; + + /* switch IP header pointer from outer to inner header */ + ip.hdr = skb_inner_network_header(skb); + /* compute tunnel header size */ tunnel |= ((ip.hdr - l4.hdr) / 2) << I40E_TXD_CTX_QW0_NATLEN_SHIFT; diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index fc228182dc88..f101895ecf4a 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -1564,7 +1564,10 @@ static int i40e_tso(struct sk_buff *skb, u8 *hdr_len, u64 *cd_type_cmd_tso_mss) ip.v6->payload_len = 0; } - if (skb_shinfo(skb)->gso_type & (SKB_GSO_UDP_TUNNEL | SKB_GSO_GRE | + if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE | + SKB_GSO_IPIP | + SKB_GSO_SIT | + SKB_GSO_UDP_TUNNEL | SKB_GSO_UDP_TUNNEL_CSUM)) { if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM) { /* determine offset of outer transport header */ @@ -1665,13 +1668,6 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, &l4_proto, &frag_off); } - /* compute outer L3 header size */ - tunnel |= ((l4.hdr - ip.hdr) / 4) << - I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT; - - /* switch IP header pointer from outer to inner header */ - ip.hdr = skb_inner_network_header(skb); - /* define outer transport */ switch (l4_proto) { case IPPROTO_UDP: @@ -1682,6 +1678,11 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, tunnel |= I40E_TXD_CTX_GRE_TUNNELING; *tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL; break; + case IPPROTO_IPIP: + case IPPROTO_IPV6: + *tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL; + l4.hdr = skb_inner_network_header(skb); + break; default: if (*tx_flags & I40E_TX_FLAGS_TSO) return -1; @@ -1690,6 +1691,13 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, return 0; } + /* compute outer L3 header size */ + tunnel |= ((l4.hdr - ip.hdr) / 4) << + I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT; + + /* switch IP header pointer from outer to inner header */ + ip.hdr = skb_inner_network_header(skb); + /* compute tunnel header size */ tunnel |= ((ip.hdr - l4.hdr) / 2) << I40E_TXD_CTX_QW0_NATLEN_SHIFT; diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index e3857d890cfb..806da2686623 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -2346,6 +2346,8 @@ int i40evf_process_config(struct i40evf_adapter *adapter) NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_GSO_GRE | + NETIF_F_GSO_IPIP | + NETIF_F_GSO_SIT | NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC | From c4e1868c3aa1992de1cba600e7083fcd49bd20b8 Mon Sep 17 00:00:00 2001 From: Mitch Williams Date: Tue, 12 Apr 2016 08:30:40 -0700 Subject: [PATCH 1048/1649] i40e: Add support for configuring VF RSS Add support for configuring RSS on behalf of the VFs. This removes the burden of dealing with different hardware interfaces from the VF drivers, allowing for better future compatibility. Change-ID: Icea75d3f37241ee8e447be5779e5abb53ddf04c0 Signed-off-by: Mitch Williams Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e.h | 1 + drivers/net/ethernet/intel/i40e/i40e_main.c | 35 +++- .../ethernet/intel/i40e/i40e_virtchnl_pf.c | 193 +++++++++++++++++- 3 files changed, 217 insertions(+), 12 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index d25b3be5ba89..e312adf64260 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -202,6 +202,7 @@ struct i40e_lump_tracking { #define I40E_HKEY_ARRAY_SIZE ((I40E_PFQF_HKEY_MAX_INDEX + 1) * 4) #define I40E_HLUT_ARRAY_SIZE ((I40E_PFQF_HLUT_MAX_INDEX + 1) * 4) +#define I40E_VF_HLUT_ARRAY_SIZE ((I40E_VFQF_HLUT1_MAX_INDEX + 1) * 4) enum i40e_fd_stat_idx { I40E_FD_STAT_ATR, diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index ec94ad6c783a..39b3b56d3a9f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -8082,24 +8082,45 @@ static int i40e_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed, { struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; + u16 vf_id = vsi->vf_id; u8 i; /* Fill out hash function seed */ if (seed) { u32 *seed_dw = (u32 *)seed; - for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) - i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), seed_dw[i]); + if (vsi->type == I40E_VSI_MAIN) { + for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) + i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), + seed_dw[i]); + } else if (vsi->type == I40E_VSI_SRIOV) { + for (i = 0; i <= I40E_VFQF_HKEY1_MAX_INDEX; i++) + i40e_write_rx_ctl(hw, + I40E_VFQF_HKEY1(i, vf_id), + seed_dw[i]); + } else { + dev_err(&pf->pdev->dev, "Cannot set RSS seed - invalid VSI type\n"); + } } if (lut) { u32 *lut_dw = (u32 *)lut; - if (lut_size != I40E_HLUT_ARRAY_SIZE) - return -EINVAL; - - for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++) - wr32(hw, I40E_PFQF_HLUT(i), lut_dw[i]); + if (vsi->type == I40E_VSI_MAIN) { + if (lut_size != I40E_HLUT_ARRAY_SIZE) + return -EINVAL; + for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++) + wr32(hw, I40E_PFQF_HLUT(i), lut_dw[i]); + } else if (vsi->type == I40E_VSI_SRIOV) { + if (lut_size != I40E_VF_HLUT_ARRAY_SIZE) + return -EINVAL; + for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) + i40e_write_rx_ctl(hw, + I40E_VFQF_HLUT1(i, vf_id), + lut_dw[i]); + } else { + dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n"); + } } i40e_flush(hw); diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 30f8cbe6b54b..c3645886670e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -1348,12 +1348,16 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) set_bit(I40E_VF_STAT_IWARPENA, &vf->vf_states); } - if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) { - if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ) - vfres->vf_offload_flags |= - I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ; + if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF) { + vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF; } else { - vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG; + if ((pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) && + (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ)) + vfres->vf_offload_flags |= + I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ; + else + vfres->vf_offload_flags |= + I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG; } if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) { @@ -1382,6 +1386,9 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) vfres->num_vsis = num_vsis; vfres->num_queue_pairs = vf->num_queue_pairs; vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf; + vfres->rss_key_size = I40E_HKEY_ARRAY_SIZE; + vfres->rss_lut_size = I40E_VF_HLUT_ARRAY_SIZE; + if (vf->lan_vsi_idx) { vfres->vsi_res[0].vsi_id = vf->lan_vsi_id; vfres->vsi_res[0].vsi_type = I40E_VSI_SRIOV; @@ -2041,6 +2048,139 @@ error_param: aq_ret); } +/** + * i40e_vc_config_rss_key + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * @msglen: msg length + * + * Configure the VF's RSS key + **/ +static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg, u16 msglen) +{ + struct i40e_virtchnl_rss_key *vrk = + (struct i40e_virtchnl_rss_key *)msg; + struct i40e_pf *pf = vf->pf; + struct i40e_vsi *vsi = NULL; + u16 vsi_id = vrk->vsi_id; + i40e_status aq_ret = 0; + + if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || + !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) || + !i40e_vc_isvalid_vsi_id(vf, vsi_id) || + (vrk->key_len != I40E_HKEY_ARRAY_SIZE)) { + aq_ret = I40E_ERR_PARAM; + goto err; + } + + vsi = pf->vsi[vf->lan_vsi_idx]; + aq_ret = i40e_config_rss(vsi, vrk->key, NULL, 0); +err: + /* send the response to the VF */ + return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_RSS_KEY, + aq_ret); +} + +/** + * i40e_vc_config_rss_lut + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * @msglen: msg length + * + * Configure the VF's RSS LUT + **/ +static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg, u16 msglen) +{ + struct i40e_virtchnl_rss_lut *vrl = + (struct i40e_virtchnl_rss_lut *)msg; + struct i40e_pf *pf = vf->pf; + struct i40e_vsi *vsi = NULL; + u16 vsi_id = vrl->vsi_id; + i40e_status aq_ret = 0; + + if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || + !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) || + !i40e_vc_isvalid_vsi_id(vf, vsi_id) || + (vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE)) { + aq_ret = I40E_ERR_PARAM; + goto err; + } + + vsi = pf->vsi[vf->lan_vsi_idx]; + aq_ret = i40e_config_rss(vsi, NULL, vrl->lut, I40E_VF_HLUT_ARRAY_SIZE); + /* send the response to the VF */ +err: + return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_RSS_LUT, + aq_ret); +} + +/** + * i40e_vc_get_rss_hena + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * @msglen: msg length + * + * Return the RSS HENA bits allowed by the hardware + **/ +static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen) +{ + struct i40e_virtchnl_rss_hena *vrh = NULL; + struct i40e_pf *pf = vf->pf; + i40e_status aq_ret = 0; + int len = 0; + + if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || + !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) { + aq_ret = I40E_ERR_PARAM; + goto err; + } + len = sizeof(struct i40e_virtchnl_rss_hena); + + vrh = kzalloc(len, GFP_KERNEL); + if (!vrh) { + aq_ret = I40E_ERR_NO_MEMORY; + len = 0; + goto err; + } + vrh->hena = i40e_pf_get_default_rss_hena(pf); +err: + /* send the response back to the VF */ + aq_ret = i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS, + aq_ret, (u8 *)vrh, len); + return aq_ret; +} + +/** + * i40e_vc_set_rss_hena + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * @msglen: msg length + * + * Set the RSS HENA bits for the VF + **/ +static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen) +{ + struct i40e_virtchnl_rss_hena *vrh = + (struct i40e_virtchnl_rss_hena *)msg; + struct i40e_pf *pf = vf->pf; + struct i40e_hw *hw = &pf->hw; + i40e_status aq_ret = 0; + + if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || + !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) { + aq_ret = I40E_ERR_PARAM; + goto err; + } + i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)vrh->hena); + i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(1, vf->vf_id), + (u32)(vrh->hena >> 32)); + + /* send the response to the VF */ +err: + return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_SET_RSS_HENA, + aq_ret); +} + /** * i40e_vc_validate_vf_msg * @vf: pointer to the VF info @@ -2162,6 +2302,36 @@ static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode, sizeof(struct i40e_virtchnl_iwarp_qv_info)); } break; + case I40E_VIRTCHNL_OP_CONFIG_RSS_KEY: + valid_len = sizeof(struct i40e_virtchnl_rss_key); + if (msglen >= valid_len) { + struct i40e_virtchnl_rss_key *vrk = + (struct i40e_virtchnl_rss_key *)msg; + if (vrk->key_len != I40E_HKEY_ARRAY_SIZE) { + err_msg_format = true; + break; + } + valid_len += vrk->key_len - 1; + } + break; + case I40E_VIRTCHNL_OP_CONFIG_RSS_LUT: + valid_len = sizeof(struct i40e_virtchnl_rss_lut); + if (msglen >= valid_len) { + struct i40e_virtchnl_rss_lut *vrl = + (struct i40e_virtchnl_rss_lut *)msg; + if (vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE) { + err_msg_format = true; + break; + } + valid_len += vrl->lut_entries - 1; + } + break; + case I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS: + valid_len = 0; + break; + case I40E_VIRTCHNL_OP_SET_RSS_HENA: + valid_len = sizeof(struct i40e_virtchnl_rss_hena); + break; /* These are always errors coming from the VF. */ case I40E_VIRTCHNL_OP_EVENT: case I40E_VIRTCHNL_OP_UNKNOWN: @@ -2260,6 +2430,19 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode, case I40E_VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP: ret = i40e_vc_iwarp_qvmap_msg(vf, msg, msglen, false); break; + case I40E_VIRTCHNL_OP_CONFIG_RSS_KEY: + ret = i40e_vc_config_rss_key(vf, msg, msglen); + break; + case I40E_VIRTCHNL_OP_CONFIG_RSS_LUT: + ret = i40e_vc_config_rss_lut(vf, msg, msglen); + break; + case I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS: + ret = i40e_vc_get_rss_hena(vf, msg, msglen); + break; + case I40E_VIRTCHNL_OP_SET_RSS_HENA: + ret = i40e_vc_set_rss_hena(vf, msg, msglen); + break; + case I40E_VIRTCHNL_OP_UNKNOWN: default: dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n", From c0913c2e431c86026acba667f8655d90979bb79c Mon Sep 17 00:00:00 2001 From: Mitch Williams Date: Tue, 12 Apr 2016 08:30:41 -0700 Subject: [PATCH 1049/1649] i40evf: Don't Panic Under some circumstances the driver remove function may be called before the driver is fully initialized. So we can't assume that we know where our towel is at, or that all of the data structures are initialized. To ensure that we don't panic, check that the vsi_res pointer is valid before dereferencing it. Then drink beer and eat peanuts. Change-ID: If697b4db57348e39f9538793e16aa755e3e1af03 Signed-off-by: Mitch Williams Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40evf/i40evf.h | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h index e657eccd232c..017c83b6271f 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf.h +++ b/drivers/net/ethernet/intel/i40evf/i40evf.h @@ -256,8 +256,10 @@ struct i40evf_adapter { bool netdev_registered; bool link_up; enum i40e_virtchnl_ops current_op; -#define CLIENT_ENABLED(_a) ((_a)->vf_res->vf_offload_flags & \ - I40E_VIRTCHNL_VF_OFFLOAD_IWARP) +#define CLIENT_ENABLED(_a) ((_a)->vf_res ? \ + (_a)->vf_res->vf_offload_flags & \ + I40E_VIRTCHNL_VF_OFFLOAD_IWARP : \ + 0) #define RSS_AQ(_a) ((_a)->vf_res->vf_offload_flags & \ I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ) #define VLAN_ALLOWED(_a) ((_a)->vf_res->vf_offload_flags & \ From bab3a34bb5f0b4056e37edf1bf6d097b147de453 Mon Sep 17 00:00:00 2001 From: Shannon Nelson Date: Tue, 12 Apr 2016 08:30:42 -0700 Subject: [PATCH 1050/1649] i40e: Code cleanup in i40e_add_fdir_ethtool A little bit of code cleanup in prep for more cloud filter work. Change-ID: I0dc33ce0d4c207944336a07437640fef920c100c Signed-off-by: Shannon Nelson Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_ethtool.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 8a83d4514812..8e56c43c4104 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -2506,7 +2506,6 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi, if (!vsi) return -EINVAL; - pf = vsi->back; if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) @@ -2564,15 +2563,18 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi, input->src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst; if (ntohl(fsp->m_ext.data[1])) { - if (ntohl(fsp->h_ext.data[1]) >= pf->num_alloc_vfs) { - netif_info(pf, drv, vsi->netdev, "Invalid VF id\n"); + vf_id = ntohl(fsp->h_ext.data[1]); + if (vf_id >= pf->num_alloc_vfs) { + netif_info(pf, drv, vsi->netdev, + "Invalid VF id %d\n", vf_id); goto free_input; } - vf_id = ntohl(fsp->h_ext.data[1]); /* Find vsi id from vf id and override dest vsi */ input->dest_vsi = pf->vf[vf_id].lan_vsi_id; if (input->q_index >= pf->vf[vf_id].num_queue_pairs) { - netif_info(pf, drv, vsi->netdev, "Invalid queue id\n"); + netif_info(pf, drv, vsi->netdev, + "Invalid queue id %d for VF %d\n", + input->q_index, vf_id); goto free_input; } } From f60d94c009685da0632d93297cae971c5898a04b Mon Sep 17 00:00:00 2001 From: Nicolas Dichtel Date: Tue, 26 Apr 2016 10:06:11 +0200 Subject: [PATCH 1051/1649] macsec: use nla_put_u64_64bit() Signed-off-by: Nicolas Dichtel Signed-off-by: David S. Miller --- drivers/net/macsec.c | 121 ++++++++++++++++++++++++--------- include/uapi/linux/if_link.h | 1 + include/uapi/linux/if_macsec.h | 6 ++ 3 files changed, 95 insertions(+), 33 deletions(-) diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index 6caa72402de7..a172a1ffa151 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c @@ -1405,9 +1405,10 @@ static sci_t nla_get_sci(const struct nlattr *nla) return (__force sci_t)nla_get_u64(nla); } -static int nla_put_sci(struct sk_buff *skb, int attrtype, sci_t value) +static int nla_put_sci(struct sk_buff *skb, int attrtype, sci_t value, + int padattr) { - return nla_put_u64(skb, attrtype, (__force u64)value); + return nla_put_u64_64bit(skb, attrtype, (__force u64)value, padattr); } static struct macsec_tx_sa *get_txsa_from_nl(struct net *net, @@ -2131,16 +2132,36 @@ static int copy_rx_sc_stats(struct sk_buff *skb, sum.InPktsUnusedSA += tmp.InPktsUnusedSA; } - if (nla_put_u64(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_VALIDATED, sum.InOctetsValidated) || - nla_put_u64(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_DECRYPTED, sum.InOctetsDecrypted) || - nla_put_u64(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNCHECKED, sum.InPktsUnchecked) || - nla_put_u64(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_DELAYED, sum.InPktsDelayed) || - nla_put_u64(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_OK, sum.InPktsOK) || - nla_put_u64(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_INVALID, sum.InPktsInvalid) || - nla_put_u64(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_LATE, sum.InPktsLate) || - nla_put_u64(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_VALID, sum.InPktsNotValid) || - nla_put_u64(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_USING_SA, sum.InPktsNotUsingSA) || - nla_put_u64(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNUSED_SA, sum.InPktsUnusedSA)) + if (nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_VALIDATED, + sum.InOctetsValidated, + MACSEC_RXSC_STATS_ATTR_PAD) || + nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_DECRYPTED, + sum.InOctetsDecrypted, + MACSEC_RXSC_STATS_ATTR_PAD) || + nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNCHECKED, + sum.InPktsUnchecked, + MACSEC_RXSC_STATS_ATTR_PAD) || + nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_DELAYED, + sum.InPktsDelayed, + MACSEC_RXSC_STATS_ATTR_PAD) || + nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_OK, + sum.InPktsOK, + MACSEC_RXSC_STATS_ATTR_PAD) || + nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_INVALID, + sum.InPktsInvalid, + MACSEC_RXSC_STATS_ATTR_PAD) || + nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_LATE, + sum.InPktsLate, + MACSEC_RXSC_STATS_ATTR_PAD) || + nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_VALID, + sum.InPktsNotValid, + MACSEC_RXSC_STATS_ATTR_PAD) || + nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_USING_SA, + sum.InPktsNotUsingSA, + MACSEC_RXSC_STATS_ATTR_PAD) || + nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNUSED_SA, + sum.InPktsUnusedSA, + MACSEC_RXSC_STATS_ATTR_PAD)) return -EMSGSIZE; return 0; @@ -2169,10 +2190,18 @@ static int copy_tx_sc_stats(struct sk_buff *skb, sum.OutOctetsEncrypted += tmp.OutOctetsEncrypted; } - if (nla_put_u64(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_PROTECTED, sum.OutPktsProtected) || - nla_put_u64(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_ENCRYPTED, sum.OutPktsEncrypted) || - nla_put_u64(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_PROTECTED, sum.OutOctetsProtected) || - nla_put_u64(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_ENCRYPTED, sum.OutOctetsEncrypted)) + if (nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_PROTECTED, + sum.OutPktsProtected, + MACSEC_TXSC_STATS_ATTR_PAD) || + nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_ENCRYPTED, + sum.OutPktsEncrypted, + MACSEC_TXSC_STATS_ATTR_PAD) || + nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_PROTECTED, + sum.OutOctetsProtected, + MACSEC_TXSC_STATS_ATTR_PAD) || + nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_ENCRYPTED, + sum.OutOctetsEncrypted, + MACSEC_TXSC_STATS_ATTR_PAD)) return -EMSGSIZE; return 0; @@ -2205,14 +2234,30 @@ static int copy_secy_stats(struct sk_buff *skb, sum.InPktsOverrun += tmp.InPktsOverrun; } - if (nla_put_u64(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_UNTAGGED, sum.OutPktsUntagged) || - nla_put_u64(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNTAGGED, sum.InPktsUntagged) || - nla_put_u64(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_TOO_LONG, sum.OutPktsTooLong) || - nla_put_u64(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_TAG, sum.InPktsNoTag) || - nla_put_u64(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_BAD_TAG, sum.InPktsBadTag) || - nla_put_u64(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNKNOWN_SCI, sum.InPktsUnknownSCI) || - nla_put_u64(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_SCI, sum.InPktsNoSCI) || - nla_put_u64(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_OVERRUN, sum.InPktsOverrun)) + if (nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_UNTAGGED, + sum.OutPktsUntagged, + MACSEC_SECY_STATS_ATTR_PAD) || + nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNTAGGED, + sum.InPktsUntagged, + MACSEC_SECY_STATS_ATTR_PAD) || + nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_TOO_LONG, + sum.OutPktsTooLong, + MACSEC_SECY_STATS_ATTR_PAD) || + nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_TAG, + sum.InPktsNoTag, + MACSEC_SECY_STATS_ATTR_PAD) || + nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_BAD_TAG, + sum.InPktsBadTag, + MACSEC_SECY_STATS_ATTR_PAD) || + nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNKNOWN_SCI, + sum.InPktsUnknownSCI, + MACSEC_SECY_STATS_ATTR_PAD) || + nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_SCI, + sum.InPktsNoSCI, + MACSEC_SECY_STATS_ATTR_PAD) || + nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_OVERRUN, + sum.InPktsOverrun, + MACSEC_SECY_STATS_ATTR_PAD)) return -EMSGSIZE; return 0; @@ -2226,8 +2271,11 @@ static int nla_put_secy(struct macsec_secy *secy, struct sk_buff *skb) if (!secy_nest) return 1; - if (nla_put_sci(skb, MACSEC_SECY_ATTR_SCI, secy->sci) || - nla_put_u64(skb, MACSEC_SECY_ATTR_CIPHER_SUITE, DEFAULT_CIPHER_ID) || + if (nla_put_sci(skb, MACSEC_SECY_ATTR_SCI, secy->sci, + MACSEC_SECY_ATTR_PAD) || + nla_put_u64_64bit(skb, MACSEC_SECY_ATTR_CIPHER_SUITE, + DEFAULT_CIPHER_ID, + MACSEC_SECY_ATTR_PAD) || nla_put_u8(skb, MACSEC_SECY_ATTR_ICV_LEN, secy->icv_len) || nla_put_u8(skb, MACSEC_SECY_ATTR_OPER, secy->operational) || nla_put_u8(skb, MACSEC_SECY_ATTR_PROTECT, secy->protect_frames) || @@ -2312,7 +2360,9 @@ static int dump_secy(struct macsec_secy *secy, struct net_device *dev, if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) || nla_put_u32(skb, MACSEC_SA_ATTR_PN, tx_sa->next_pn) || - nla_put_u64(skb, MACSEC_SA_ATTR_KEYID, tx_sa->key.id) || + nla_put_u64_64bit(skb, MACSEC_SA_ATTR_KEYID, + tx_sa->key.id, + MACSEC_SA_ATTR_PAD) || nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, tx_sa->active)) { nla_nest_cancel(skb, txsa_nest); nla_nest_cancel(skb, txsa_list); @@ -2353,7 +2403,8 @@ static int dump_secy(struct macsec_secy *secy, struct net_device *dev, } if (nla_put_u8(skb, MACSEC_RXSC_ATTR_ACTIVE, rx_sc->active) || - nla_put_sci(skb, MACSEC_RXSC_ATTR_SCI, rx_sc->sci)) { + nla_put_sci(skb, MACSEC_RXSC_ATTR_SCI, rx_sc->sci, + MACSEC_RXSC_ATTR_PAD)) { nla_nest_cancel(skb, rxsc_nest); nla_nest_cancel(skb, rxsc_list); goto nla_put_failure; @@ -2413,7 +2464,9 @@ static int dump_secy(struct macsec_secy *secy, struct net_device *dev, if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) || nla_put_u32(skb, MACSEC_SA_ATTR_PN, rx_sa->next_pn) || - nla_put_u64(skb, MACSEC_SA_ATTR_KEYID, rx_sa->key.id) || + nla_put_u64_64bit(skb, MACSEC_SA_ATTR_KEYID, + rx_sa->key.id, + MACSEC_SA_ATTR_PAD) || nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, rx_sa->active)) { nla_nest_cancel(skb, rxsa_nest); nla_nest_cancel(skb, rxsc_nest); @@ -3145,9 +3198,9 @@ static struct net *macsec_get_link_net(const struct net_device *dev) static size_t macsec_get_size(const struct net_device *dev) { return 0 + - nla_total_size(8) + /* SCI */ + nla_total_size_64bit(8) + /* SCI */ nla_total_size(1) + /* ICV_LEN */ - nla_total_size(8) + /* CIPHER_SUITE */ + nla_total_size_64bit(8) + /* CIPHER_SUITE */ nla_total_size(4) + /* WINDOW */ nla_total_size(1) + /* ENCODING_SA */ nla_total_size(1) + /* ENCRYPT */ @@ -3166,9 +3219,11 @@ static int macsec_fill_info(struct sk_buff *skb, struct macsec_secy *secy = &macsec_priv(dev)->secy; struct macsec_tx_sc *tx_sc = &secy->tx_sc; - if (nla_put_sci(skb, IFLA_MACSEC_SCI, secy->sci) || + if (nla_put_sci(skb, IFLA_MACSEC_SCI, secy->sci, + IFLA_MACSEC_PAD) || nla_put_u8(skb, IFLA_MACSEC_ICV_LEN, secy->icv_len) || - nla_put_u64(skb, IFLA_MACSEC_CIPHER_SUITE, DEFAULT_CIPHER_ID) || + nla_put_u64_64bit(skb, IFLA_MACSEC_CIPHER_SUITE, + DEFAULT_CIPHER_ID, IFLA_MACSEC_PAD) || nla_put_u8(skb, IFLA_MACSEC_ENCODING_SA, tx_sc->encoding_sa) || nla_put_u8(skb, IFLA_MACSEC_ENCRYPT, tx_sc->encrypt) || nla_put_u8(skb, IFLA_MACSEC_PROTECT, secy->protect_frames) || diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index 9300c08346c8..d82de331bb6b 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -434,6 +434,7 @@ enum { IFLA_MACSEC_SCB, IFLA_MACSEC_REPLAY_PROTECT, IFLA_MACSEC_VALIDATION, + IFLA_MACSEC_PAD, __IFLA_MACSEC_MAX, }; diff --git a/include/uapi/linux/if_macsec.h b/include/uapi/linux/if_macsec.h index 26b0d1e3e3e7..4c623d617b84 100644 --- a/include/uapi/linux/if_macsec.h +++ b/include/uapi/linux/if_macsec.h @@ -55,6 +55,7 @@ enum macsec_secy_attrs { MACSEC_SECY_ATTR_INC_SCI, MACSEC_SECY_ATTR_ES, MACSEC_SECY_ATTR_SCB, + MACSEC_SECY_ATTR_PAD, __MACSEC_SECY_ATTR_END, NUM_MACSEC_SECY_ATTR = __MACSEC_SECY_ATTR_END, MACSEC_SECY_ATTR_MAX = __MACSEC_SECY_ATTR_END - 1, @@ -66,6 +67,7 @@ enum macsec_rxsc_attrs { MACSEC_RXSC_ATTR_ACTIVE, /* config/dump, u8 0..1 */ MACSEC_RXSC_ATTR_SA_LIST, /* dump, nested */ MACSEC_RXSC_ATTR_STATS, /* dump, nested, macsec_rxsc_stats_attr */ + MACSEC_RXSC_ATTR_PAD, __MACSEC_RXSC_ATTR_END, NUM_MACSEC_RXSC_ATTR = __MACSEC_RXSC_ATTR_END, MACSEC_RXSC_ATTR_MAX = __MACSEC_RXSC_ATTR_END - 1, @@ -79,6 +81,7 @@ enum macsec_sa_attrs { MACSEC_SA_ATTR_KEY, /* config, data */ MACSEC_SA_ATTR_KEYID, /* config/dump, u64 */ MACSEC_SA_ATTR_STATS, /* dump, nested, macsec_sa_stats_attr */ + MACSEC_SA_ATTR_PAD, __MACSEC_SA_ATTR_END, NUM_MACSEC_SA_ATTR = __MACSEC_SA_ATTR_END, MACSEC_SA_ATTR_MAX = __MACSEC_SA_ATTR_END - 1, @@ -110,6 +113,7 @@ enum macsec_rxsc_stats_attr { MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_VALID, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_USING_SA, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNUSED_SA, + MACSEC_RXSC_STATS_ATTR_PAD, __MACSEC_RXSC_STATS_ATTR_END, NUM_MACSEC_RXSC_STATS_ATTR = __MACSEC_RXSC_STATS_ATTR_END, MACSEC_RXSC_STATS_ATTR_MAX = __MACSEC_RXSC_STATS_ATTR_END - 1, @@ -137,6 +141,7 @@ enum macsec_txsc_stats_attr { MACSEC_TXSC_STATS_ATTR_OUT_PKTS_ENCRYPTED, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_PROTECTED, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_ENCRYPTED, + MACSEC_TXSC_STATS_ATTR_PAD, __MACSEC_TXSC_STATS_ATTR_END, NUM_MACSEC_TXSC_STATS_ATTR = __MACSEC_TXSC_STATS_ATTR_END, MACSEC_TXSC_STATS_ATTR_MAX = __MACSEC_TXSC_STATS_ATTR_END - 1, @@ -153,6 +158,7 @@ enum macsec_secy_stats_attr { MACSEC_SECY_STATS_ATTR_IN_PKTS_UNKNOWN_SCI, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_SCI, MACSEC_SECY_STATS_ATTR_IN_PKTS_OVERRUN, + MACSEC_SECY_STATS_ATTR_PAD, __MACSEC_SECY_STATS_ATTR_END, NUM_MACSEC_SECY_STATS_ATTR = __MACSEC_SECY_STATS_ATTR_END, MACSEC_SECY_STATS_ATTR_MAX = __MACSEC_SECY_STATS_ATTR_END - 1, From 08f4cbb8f207e2b8f40e8acc2a4e3a7df642b095 Mon Sep 17 00:00:00 2001 From: Nicolas Dichtel Date: Tue, 26 Apr 2016 10:06:12 +0200 Subject: [PATCH 1052/1649] drivers/wireless: use nla_put_u64_64bit() Signed-off-by: Nicolas Dichtel Signed-off-by: David S. Miller --- drivers/net/wireless/mac80211_hwsim.c | 2 +- drivers/net/wireless/mac80211_hwsim.h | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index c757f14c4c00..9ed0ed1bf514 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -1030,7 +1030,7 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw, data->pending_cookie++; cookie = data->pending_cookie; info->rate_driver_data[0] = (void *)cookie; - if (nla_put_u64(skb, HWSIM_ATTR_COOKIE, cookie)) + if (nla_put_u64_64bit(skb, HWSIM_ATTR_COOKIE, cookie, HWSIM_ATTR_PAD)) goto nla_put_failure; genlmsg_end(skb, msg_head); diff --git a/drivers/net/wireless/mac80211_hwsim.h b/drivers/net/wireless/mac80211_hwsim.h index 66e1c73bd507..39f22467ca2a 100644 --- a/drivers/net/wireless/mac80211_hwsim.h +++ b/drivers/net/wireless/mac80211_hwsim.h @@ -148,6 +148,7 @@ enum { HWSIM_ATTR_RADIO_NAME, HWSIM_ATTR_NO_VIF, HWSIM_ATTR_FREQ, + HWSIM_ATTR_PAD, __HWSIM_ATTR_MAX, }; #define HWSIM_ATTR_MAX (__HWSIM_ATTR_MAX - 1) From 3c6f3714d6a9e051eb84759e4fa5a2f4a3e730c6 Mon Sep 17 00:00:00 2001 From: Nicolas Dichtel Date: Tue, 26 Apr 2016 10:06:13 +0200 Subject: [PATCH 1053/1649] fs/quota: use nla_put_u64_64bit() Signed-off-by: Nicolas Dichtel Acked-by: Jan Kara Signed-off-by: David S. Miller --- fs/quota/netlink.c | 12 +++++++----- include/uapi/linux/quota.h | 1 + 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c index d07a2f91d858..8b252673d454 100644 --- a/fs/quota/netlink.c +++ b/fs/quota/netlink.c @@ -47,7 +47,7 @@ void quota_send_warning(struct kqid qid, dev_t dev, void *msg_head; int ret; int msg_size = 4 * nla_total_size(sizeof(u32)) + - 2 * nla_total_size(sizeof(u64)); + 2 * nla_total_size_64bit(sizeof(u64)); /* We have to allocate using GFP_NOFS as we are called from a * filesystem performing write and thus further recursion into @@ -68,8 +68,9 @@ void quota_send_warning(struct kqid qid, dev_t dev, ret = nla_put_u32(skb, QUOTA_NL_A_QTYPE, qid.type); if (ret) goto attr_err_out; - ret = nla_put_u64(skb, QUOTA_NL_A_EXCESS_ID, - from_kqid_munged(&init_user_ns, qid)); + ret = nla_put_u64_64bit(skb, QUOTA_NL_A_EXCESS_ID, + from_kqid_munged(&init_user_ns, qid), + QUOTA_NL_A_PAD); if (ret) goto attr_err_out; ret = nla_put_u32(skb, QUOTA_NL_A_WARNING, warntype); @@ -81,8 +82,9 @@ void quota_send_warning(struct kqid qid, dev_t dev, ret = nla_put_u32(skb, QUOTA_NL_A_DEV_MINOR, MINOR(dev)); if (ret) goto attr_err_out; - ret = nla_put_u64(skb, QUOTA_NL_A_CAUSED_ID, - from_kuid_munged(&init_user_ns, current_uid())); + ret = nla_put_u64_64bit(skb, QUOTA_NL_A_CAUSED_ID, + from_kuid_munged(&init_user_ns, current_uid()), + QUOTA_NL_A_PAD); if (ret) goto attr_err_out; genlmsg_end(skb, msg_head); diff --git a/include/uapi/linux/quota.h b/include/uapi/linux/quota.h index 38baddb807f5..4d2489ef6f10 100644 --- a/include/uapi/linux/quota.h +++ b/include/uapi/linux/quota.h @@ -191,6 +191,7 @@ enum { QUOTA_NL_A_DEV_MAJOR, QUOTA_NL_A_DEV_MINOR, QUOTA_NL_A_CAUSED_ID, + QUOTA_NL_A_PAD, __QUOTA_NL_A_MAX, }; #define QUOTA_NL_A_MAX (__QUOTA_NL_A_MAX - 1) From 6ed46d1247a595c58b6c04481fa77cf532f45de0 Mon Sep 17 00:00:00 2001 From: Nicolas Dichtel Date: Tue, 26 Apr 2016 10:06:14 +0200 Subject: [PATCH 1054/1649] sock_diag: align nlattr properly when needed I also fix the value of INET_DIAG_MAX. It's wrong since commit 8f840e47f190 which is only in net-next right now, thus I didn't make a separate patch. Fixes: 8f840e47f190 ("sctp: add the sctp_diag.c file") Signed-off-by: Nicolas Dichtel Signed-off-by: David S. Miller --- include/uapi/linux/inet_diag.h | 4 +++- net/core/sock_diag.c | 2 +- net/ipv4/inet_diag.c | 9 ++++++--- net/sctp/sctp_diag.c | 5 +++-- 4 files changed, 13 insertions(+), 7 deletions(-) diff --git a/include/uapi/linux/inet_diag.h b/include/uapi/linux/inet_diag.h index f5f3629dd553..a16643705669 100644 --- a/include/uapi/linux/inet_diag.h +++ b/include/uapi/linux/inet_diag.h @@ -115,9 +115,11 @@ enum { INET_DIAG_SKV6ONLY, INET_DIAG_LOCALS, INET_DIAG_PEERS, + INET_DIAG_PAD, + __INET_DIAG_MAX, }; -#define INET_DIAG_MAX INET_DIAG_SKV6ONLY +#define INET_DIAG_MAX (__INET_DIAG_MAX - 1) /* INET_DIAG_MEM */ diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c index ca9e35bbe13c..6b10573cc9fa 100644 --- a/net/core/sock_diag.c +++ b/net/core/sock_diag.c @@ -120,7 +120,7 @@ static size_t sock_diag_nlmsg_size(void) { return NLMSG_ALIGN(sizeof(struct inet_diag_msg) + nla_total_size(sizeof(u8)) /* INET_DIAG_PROTOCOL */ - + nla_total_size(sizeof(struct tcp_info))); /* INET_DIAG_INFO */ + + nla_total_size_64bit(sizeof(struct tcp_info))); /* INET_DIAG_INFO */ } static void sock_diag_broadcast_destroy_work(struct work_struct *work) diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c index ad7956fa659a..25af1243649b 100644 --- a/net/ipv4/inet_diag.c +++ b/net/ipv4/inet_diag.c @@ -220,8 +220,9 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk, } if ((ext & (1 << (INET_DIAG_INFO - 1))) && handler->idiag_info_size) { - attr = nla_reserve(skb, INET_DIAG_INFO, - handler->idiag_info_size); + attr = nla_reserve_64bit(skb, INET_DIAG_INFO, + handler->idiag_info_size, + INET_DIAG_PAD); if (!attr) goto errout; @@ -1078,7 +1079,9 @@ int inet_diag_handler_get_info(struct sk_buff *skb, struct sock *sk) } attr = handler->idiag_info_size - ? nla_reserve(skb, INET_DIAG_INFO, handler->idiag_info_size) + ? nla_reserve_64bit(skb, INET_DIAG_INFO, + handler->idiag_info_size, + INET_DIAG_PAD) : NULL; if (attr) info = nla_data(attr); diff --git a/net/sctp/sctp_diag.c b/net/sctp/sctp_diag.c index bb2d8d9608e9..84829fff3bc9 100644 --- a/net/sctp/sctp_diag.c +++ b/net/sctp/sctp_diag.c @@ -161,8 +161,9 @@ static int inet_sctp_diag_fill(struct sock *sk, struct sctp_association *asoc, if (ext & (1 << (INET_DIAG_INFO - 1))) { struct nlattr *attr; - attr = nla_reserve(skb, INET_DIAG_INFO, - sizeof(struct sctp_info)); + attr = nla_reserve_64bit(skb, INET_DIAG_INFO, + sizeof(struct sctp_info), + INET_DIAG_PAD); if (!attr) goto errout; From 66c7a5ee1a6b7c69d41dfd68d207fdd54efba56a Mon Sep 17 00:00:00 2001 From: Nicolas Dichtel Date: Tue, 26 Apr 2016 10:06:15 +0200 Subject: [PATCH 1055/1649] ovs: align nlattr properly when needed I also fix commit 8b32ab9e6ef1: use nla_total_size_64bit() for OVS_FLOW_ATTR_USED in ovs_flow_cmd_msg_size(). Fixes: 8b32ab9e6ef1 ("ovs: use nla_put_u64_64bit()") Signed-off-by: Nicolas Dichtel Signed-off-by: David S. Miller --- include/uapi/linux/openvswitch.h | 2 ++ net/openvswitch/datapath.c | 27 +++++++++++++++------------ 2 files changed, 17 insertions(+), 12 deletions(-) diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h index d6be1fb778a5..bb0d515b7654 100644 --- a/include/uapi/linux/openvswitch.h +++ b/include/uapi/linux/openvswitch.h @@ -84,6 +84,7 @@ enum ovs_datapath_attr { OVS_DP_ATTR_STATS, /* struct ovs_dp_stats */ OVS_DP_ATTR_MEGAFLOW_STATS, /* struct ovs_dp_megaflow_stats */ OVS_DP_ATTR_USER_FEATURES, /* OVS_DP_F_* */ + OVS_DP_ATTR_PAD, __OVS_DP_ATTR_MAX }; @@ -253,6 +254,7 @@ enum ovs_vport_attr { OVS_VPORT_ATTR_UPCALL_PID, /* array of u32 Netlink socket PIDs for */ /* receiving upcalls */ OVS_VPORT_ATTR_STATS, /* struct ovs_vport_stats */ + OVS_VPORT_ATTR_PAD, __OVS_VPORT_ATTR_MAX }; diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index 22d9a5316304..856bd8dba676 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c @@ -738,9 +738,9 @@ static size_t ovs_flow_cmd_msg_size(const struct sw_flow_actions *acts, len += nla_total_size(acts->orig_len); return len - + nla_total_size(sizeof(struct ovs_flow_stats)) /* OVS_FLOW_ATTR_STATS */ + + nla_total_size_64bit(sizeof(struct ovs_flow_stats)) /* OVS_FLOW_ATTR_STATS */ + nla_total_size(1) /* OVS_FLOW_ATTR_TCP_FLAGS */ - + nla_total_size(8); /* OVS_FLOW_ATTR_USED */ + + nla_total_size_64bit(8); /* OVS_FLOW_ATTR_USED */ } /* Called with ovs_mutex or RCU read lock. */ @@ -759,7 +759,9 @@ static int ovs_flow_cmd_fill_stats(const struct sw_flow *flow, return -EMSGSIZE; if (stats.n_packets && - nla_put(skb, OVS_FLOW_ATTR_STATS, sizeof(struct ovs_flow_stats), &stats)) + nla_put_64bit(skb, OVS_FLOW_ATTR_STATS, + sizeof(struct ovs_flow_stats), &stats, + OVS_FLOW_ATTR_PAD)) return -EMSGSIZE; if ((u8)ntohs(tcp_flags) && @@ -1435,8 +1437,8 @@ static size_t ovs_dp_cmd_msg_size(void) size_t msgsize = NLMSG_ALIGN(sizeof(struct ovs_header)); msgsize += nla_total_size(IFNAMSIZ); - msgsize += nla_total_size(sizeof(struct ovs_dp_stats)); - msgsize += nla_total_size(sizeof(struct ovs_dp_megaflow_stats)); + msgsize += nla_total_size_64bit(sizeof(struct ovs_dp_stats)); + msgsize += nla_total_size_64bit(sizeof(struct ovs_dp_megaflow_stats)); msgsize += nla_total_size(sizeof(u32)); /* OVS_DP_ATTR_USER_FEATURES */ return msgsize; @@ -1463,13 +1465,13 @@ static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb, goto nla_put_failure; get_dp_stats(dp, &dp_stats, &dp_megaflow_stats); - if (nla_put(skb, OVS_DP_ATTR_STATS, sizeof(struct ovs_dp_stats), - &dp_stats)) + if (nla_put_64bit(skb, OVS_DP_ATTR_STATS, sizeof(struct ovs_dp_stats), + &dp_stats, OVS_DP_ATTR_PAD)) goto nla_put_failure; - if (nla_put(skb, OVS_DP_ATTR_MEGAFLOW_STATS, - sizeof(struct ovs_dp_megaflow_stats), - &dp_megaflow_stats)) + if (nla_put_64bit(skb, OVS_DP_ATTR_MEGAFLOW_STATS, + sizeof(struct ovs_dp_megaflow_stats), + &dp_megaflow_stats, OVS_DP_ATTR_PAD)) goto nla_put_failure; if (nla_put_u32(skb, OVS_DP_ATTR_USER_FEATURES, dp->user_features)) @@ -1838,8 +1840,9 @@ static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb, goto nla_put_failure; ovs_vport_get_stats(vport, &vport_stats); - if (nla_put(skb, OVS_VPORT_ATTR_STATS, sizeof(struct ovs_vport_stats), - &vport_stats)) + if (nla_put_64bit(skb, OVS_VPORT_ATTR_STATS, + sizeof(struct ovs_vport_stats), &vport_stats, + OVS_VPORT_ATTR_PAD)) goto nla_put_failure; if (ovs_vport_get_upcall_portids(vport, skb)) From 270cb4d05b2923a4a4d712276e61f64c82567138 Mon Sep 17 00:00:00 2001 From: Nicolas Dichtel Date: Tue, 26 Apr 2016 10:06:16 +0200 Subject: [PATCH 1056/1649] rtnl: align nlattr properly when needed Signed-off-by: Nicolas Dichtel Signed-off-by: David S. Miller --- net/core/rtnetlink.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 9efc1f34ef3b..5503dfe6a050 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -876,7 +876,7 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev, + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */ + nla_total_size(IFALIASZ) /* IFLA_IFALIAS */ + nla_total_size(IFNAMSIZ) /* IFLA_QDISC */ - + nla_total_size(sizeof(struct rtnl_link_ifmap)) + + nla_total_size_64bit(sizeof(struct rtnl_link_ifmap)) + nla_total_size(sizeof(struct rtnl_link_stats)) + nla_total_size_64bit(sizeof(struct rtnl_link_stats64)) + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */ @@ -1181,7 +1181,7 @@ static int rtnl_fill_link_ifmap(struct sk_buff *skb, struct net_device *dev) .dma = dev->dma, .port = dev->if_port, }; - if (nla_put(skb, IFLA_MAP, sizeof(map), &map)) + if (nla_put_64bit(skb, IFLA_MAP, sizeof(map), &map, IFLA_PAD)) return -EMSGSIZE; return 0; From b676338fb3aab0b63b4a2489feb8f35003db22e8 Mon Sep 17 00:00:00 2001 From: Nicolas Dichtel Date: Tue, 26 Apr 2016 10:06:17 +0200 Subject: [PATCH 1057/1649] neigh: align nlattr properly when needed Signed-off-by: Nicolas Dichtel Signed-off-by: David S. Miller --- net/core/neighbour.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 6a395d440228..29dd8cc22bbf 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -1857,7 +1857,8 @@ static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl, ndst.ndts_table_fulls += st->table_fulls; } - if (nla_put(skb, NDTA_STATS, sizeof(ndst), &ndst)) + if (nla_put_64bit(skb, NDTA_STATS, sizeof(ndst), &ndst, + NDTA_PAD)) goto nla_put_failure; } From 9854518ea04db33738602d45ebc96a200e6f5198 Mon Sep 17 00:00:00 2001 From: Nicolas Dichtel Date: Tue, 26 Apr 2016 10:06:18 +0200 Subject: [PATCH 1058/1649] sched: align nlattr properly when needed Signed-off-by: Nicolas Dichtel Signed-off-by: David S. Miller --- Documentation/networking/gen_stats.txt | 6 +++-- include/net/gen_stats.h | 6 +++-- include/uapi/linux/gen_stats.h | 1 + include/uapi/linux/pkt_cls.h | 2 ++ include/uapi/linux/rtnetlink.h | 1 + include/uapi/linux/tc_act/tc_bpf.h | 1 + include/uapi/linux/tc_act/tc_connmark.h | 1 + include/uapi/linux/tc_act/tc_csum.h | 1 + include/uapi/linux/tc_act/tc_defact.h | 1 + include/uapi/linux/tc_act/tc_gact.h | 1 + include/uapi/linux/tc_act/tc_ife.h | 1 + include/uapi/linux/tc_act/tc_ipt.h | 1 + include/uapi/linux/tc_act/tc_mirred.h | 1 + include/uapi/linux/tc_act/tc_nat.h | 1 + include/uapi/linux/tc_act/tc_pedit.h | 1 + include/uapi/linux/tc_act/tc_skbedit.h | 1 + include/uapi/linux/tc_act/tc_vlan.h | 1 + net/core/gen_stats.c | 35 +++++++++++++++---------- net/sched/act_api.c | 7 +++-- net/sched/act_bpf.c | 3 ++- net/sched/act_connmark.c | 3 ++- net/sched/act_csum.c | 2 +- net/sched/act_gact.c | 2 +- net/sched/act_ife.c | 2 +- net/sched/act_ipt.c | 2 +- net/sched/act_mirred.c | 2 +- net/sched/act_nat.c | 2 +- net/sched/act_pedit.c | 2 +- net/sched/act_simple.c | 2 +- net/sched/act_skbedit.c | 2 +- net/sched/act_vlan.c | 2 +- net/sched/cls_u32.c | 7 ++--- net/sched/sch_api.c | 6 +++-- 33 files changed, 72 insertions(+), 37 deletions(-) diff --git a/Documentation/networking/gen_stats.txt b/Documentation/networking/gen_stats.txt index 70e6275b757a..ff630a87b511 100644 --- a/Documentation/networking/gen_stats.txt +++ b/Documentation/networking/gen_stats.txt @@ -33,7 +33,8 @@ my_dumping_routine(struct sk_buff *skb, ...) { struct gnet_dump dump; - if (gnet_stats_start_copy(skb, TCA_STATS2, &mystruct->lock, &dump) < 0) + if (gnet_stats_start_copy(skb, TCA_STATS2, &mystruct->lock, &dump, + TCA_PAD) < 0) goto rtattr_failure; if (gnet_stats_copy_basic(&dump, &mystruct->bstats) < 0 || @@ -56,7 +57,8 @@ existing TLV types. my_dumping_routine(struct sk_buff *skb, ...) { if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, - TCA_XSTATS, &mystruct->lock, &dump) < 0) + TCA_XSTATS, &mystruct->lock, &dump, + TCA_PAD) < 0) goto rtattr_failure; ... } diff --git a/include/net/gen_stats.h b/include/net/gen_stats.h index cbafa3768d48..610cd397890e 100644 --- a/include/net/gen_stats.h +++ b/include/net/gen_stats.h @@ -19,17 +19,19 @@ struct gnet_dump { /* Backward compatibility */ int compat_tc_stats; int compat_xstats; + int padattr; void * xstats; int xstats_len; struct tc_stats tc_stats; }; int gnet_stats_start_copy(struct sk_buff *skb, int type, spinlock_t *lock, - struct gnet_dump *d); + struct gnet_dump *d, int padattr); int gnet_stats_start_copy_compat(struct sk_buff *skb, int type, int tc_stats_type, int xstats_type, - spinlock_t *lock, struct gnet_dump *d); + spinlock_t *lock, struct gnet_dump *d, + int padattr); int gnet_stats_copy_basic(struct gnet_dump *d, struct gnet_stats_basic_cpu __percpu *cpu, diff --git a/include/uapi/linux/gen_stats.h b/include/uapi/linux/gen_stats.h index 6487317ea619..52deccc2128e 100644 --- a/include/uapi/linux/gen_stats.h +++ b/include/uapi/linux/gen_stats.h @@ -10,6 +10,7 @@ enum { TCA_STATS_QUEUE, TCA_STATS_APP, TCA_STATS_RATE_EST64, + TCA_STATS_PAD, __TCA_STATS_MAX, }; #define TCA_STATS_MAX (__TCA_STATS_MAX - 1) diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h index c43c5f78b9c4..84660905fedf 100644 --- a/include/uapi/linux/pkt_cls.h +++ b/include/uapi/linux/pkt_cls.h @@ -66,6 +66,7 @@ enum { TCA_ACT_OPTIONS, TCA_ACT_INDEX, TCA_ACT_STATS, + TCA_ACT_PAD, __TCA_ACT_MAX }; @@ -173,6 +174,7 @@ enum { TCA_U32_PCNT, TCA_U32_MARK, TCA_U32_FLAGS, + TCA_U32_PAD, __TCA_U32_MAX }; diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h index a94e0b69c769..262f0379d83a 100644 --- a/include/uapi/linux/rtnetlink.h +++ b/include/uapi/linux/rtnetlink.h @@ -542,6 +542,7 @@ enum { TCA_FCNT, TCA_STATS2, TCA_STAB, + TCA_PAD, __TCA_MAX }; diff --git a/include/uapi/linux/tc_act/tc_bpf.h b/include/uapi/linux/tc_act/tc_bpf.h index 07f17cc70bb3..063d9d465119 100644 --- a/include/uapi/linux/tc_act/tc_bpf.h +++ b/include/uapi/linux/tc_act/tc_bpf.h @@ -26,6 +26,7 @@ enum { TCA_ACT_BPF_OPS, TCA_ACT_BPF_FD, TCA_ACT_BPF_NAME, + TCA_ACT_BPF_PAD, __TCA_ACT_BPF_MAX, }; #define TCA_ACT_BPF_MAX (__TCA_ACT_BPF_MAX - 1) diff --git a/include/uapi/linux/tc_act/tc_connmark.h b/include/uapi/linux/tc_act/tc_connmark.h index 994b0971bce2..62a5e944c554 100644 --- a/include/uapi/linux/tc_act/tc_connmark.h +++ b/include/uapi/linux/tc_act/tc_connmark.h @@ -15,6 +15,7 @@ enum { TCA_CONNMARK_UNSPEC, TCA_CONNMARK_PARMS, TCA_CONNMARK_TM, + TCA_CONNMARK_PAD, __TCA_CONNMARK_MAX }; #define TCA_CONNMARK_MAX (__TCA_CONNMARK_MAX - 1) diff --git a/include/uapi/linux/tc_act/tc_csum.h b/include/uapi/linux/tc_act/tc_csum.h index a047c49a3153..8ac8041ab5f1 100644 --- a/include/uapi/linux/tc_act/tc_csum.h +++ b/include/uapi/linux/tc_act/tc_csum.h @@ -10,6 +10,7 @@ enum { TCA_CSUM_UNSPEC, TCA_CSUM_PARMS, TCA_CSUM_TM, + TCA_CSUM_PAD, __TCA_CSUM_MAX }; #define TCA_CSUM_MAX (__TCA_CSUM_MAX - 1) diff --git a/include/uapi/linux/tc_act/tc_defact.h b/include/uapi/linux/tc_act/tc_defact.h index 17dddb40f740..d2a3abb77aeb 100644 --- a/include/uapi/linux/tc_act/tc_defact.h +++ b/include/uapi/linux/tc_act/tc_defact.h @@ -12,6 +12,7 @@ enum { TCA_DEF_TM, TCA_DEF_PARMS, TCA_DEF_DATA, + TCA_DEF_PAD, __TCA_DEF_MAX }; #define TCA_DEF_MAX (__TCA_DEF_MAX - 1) diff --git a/include/uapi/linux/tc_act/tc_gact.h b/include/uapi/linux/tc_act/tc_gact.h index f7bf94eed510..70b536a8f8b2 100644 --- a/include/uapi/linux/tc_act/tc_gact.h +++ b/include/uapi/linux/tc_act/tc_gact.h @@ -25,6 +25,7 @@ enum { TCA_GACT_TM, TCA_GACT_PARMS, TCA_GACT_PROB, + TCA_GACT_PAD, __TCA_GACT_MAX }; #define TCA_GACT_MAX (__TCA_GACT_MAX - 1) diff --git a/include/uapi/linux/tc_act/tc_ife.h b/include/uapi/linux/tc_act/tc_ife.h index d648ff66586f..4ece02a77b9a 100644 --- a/include/uapi/linux/tc_act/tc_ife.h +++ b/include/uapi/linux/tc_act/tc_ife.h @@ -23,6 +23,7 @@ enum { TCA_IFE_SMAC, TCA_IFE_TYPE, TCA_IFE_METALST, + TCA_IFE_PAD, __TCA_IFE_MAX }; #define TCA_IFE_MAX (__TCA_IFE_MAX - 1) diff --git a/include/uapi/linux/tc_act/tc_ipt.h b/include/uapi/linux/tc_act/tc_ipt.h index 130aaadf6fac..7c6e155dd981 100644 --- a/include/uapi/linux/tc_act/tc_ipt.h +++ b/include/uapi/linux/tc_act/tc_ipt.h @@ -14,6 +14,7 @@ enum { TCA_IPT_CNT, TCA_IPT_TM, TCA_IPT_TARG, + TCA_IPT_PAD, __TCA_IPT_MAX }; #define TCA_IPT_MAX (__TCA_IPT_MAX - 1) diff --git a/include/uapi/linux/tc_act/tc_mirred.h b/include/uapi/linux/tc_act/tc_mirred.h index 7561750e8fd6..3d7a2b352a62 100644 --- a/include/uapi/linux/tc_act/tc_mirred.h +++ b/include/uapi/linux/tc_act/tc_mirred.h @@ -20,6 +20,7 @@ enum { TCA_MIRRED_UNSPEC, TCA_MIRRED_TM, TCA_MIRRED_PARMS, + TCA_MIRRED_PAD, __TCA_MIRRED_MAX }; #define TCA_MIRRED_MAX (__TCA_MIRRED_MAX - 1) diff --git a/include/uapi/linux/tc_act/tc_nat.h b/include/uapi/linux/tc_act/tc_nat.h index 6663aeba0b9a..923457c9ebf0 100644 --- a/include/uapi/linux/tc_act/tc_nat.h +++ b/include/uapi/linux/tc_act/tc_nat.h @@ -10,6 +10,7 @@ enum { TCA_NAT_UNSPEC, TCA_NAT_PARMS, TCA_NAT_TM, + TCA_NAT_PAD, __TCA_NAT_MAX }; #define TCA_NAT_MAX (__TCA_NAT_MAX - 1) diff --git a/include/uapi/linux/tc_act/tc_pedit.h b/include/uapi/linux/tc_act/tc_pedit.h index 716cfabcd5b2..6389959a5157 100644 --- a/include/uapi/linux/tc_act/tc_pedit.h +++ b/include/uapi/linux/tc_act/tc_pedit.h @@ -10,6 +10,7 @@ enum { TCA_PEDIT_UNSPEC, TCA_PEDIT_TM, TCA_PEDIT_PARMS, + TCA_PEDIT_PAD, __TCA_PEDIT_MAX }; #define TCA_PEDIT_MAX (__TCA_PEDIT_MAX - 1) diff --git a/include/uapi/linux/tc_act/tc_skbedit.h b/include/uapi/linux/tc_act/tc_skbedit.h index 7a2e910a5f08..fecb5cc48c40 100644 --- a/include/uapi/linux/tc_act/tc_skbedit.h +++ b/include/uapi/linux/tc_act/tc_skbedit.h @@ -39,6 +39,7 @@ enum { TCA_SKBEDIT_PRIORITY, TCA_SKBEDIT_QUEUE_MAPPING, TCA_SKBEDIT_MARK, + TCA_SKBEDIT_PAD, __TCA_SKBEDIT_MAX }; #define TCA_SKBEDIT_MAX (__TCA_SKBEDIT_MAX - 1) diff --git a/include/uapi/linux/tc_act/tc_vlan.h b/include/uapi/linux/tc_act/tc_vlan.h index f7b8d448b960..31151ff6264f 100644 --- a/include/uapi/linux/tc_act/tc_vlan.h +++ b/include/uapi/linux/tc_act/tc_vlan.h @@ -28,6 +28,7 @@ enum { TCA_VLAN_PARMS, TCA_VLAN_PUSH_VLAN_ID, TCA_VLAN_PUSH_VLAN_PROTOCOL, + TCA_VLAN_PAD, __TCA_VLAN_MAX, }; #define TCA_VLAN_MAX (__TCA_VLAN_MAX - 1) diff --git a/net/core/gen_stats.c b/net/core/gen_stats.c index e640462ea8bf..f96ee8b9478d 100644 --- a/net/core/gen_stats.c +++ b/net/core/gen_stats.c @@ -25,9 +25,9 @@ static inline int -gnet_stats_copy(struct gnet_dump *d, int type, void *buf, int size) +gnet_stats_copy(struct gnet_dump *d, int type, void *buf, int size, int padattr) { - if (nla_put(d->skb, type, size, buf)) + if (nla_put_64bit(d->skb, type, size, buf, padattr)) goto nla_put_failure; return 0; @@ -59,7 +59,8 @@ nla_put_failure: */ int gnet_stats_start_copy_compat(struct sk_buff *skb, int type, int tc_stats_type, - int xstats_type, spinlock_t *lock, struct gnet_dump *d) + int xstats_type, spinlock_t *lock, + struct gnet_dump *d, int padattr) __acquires(lock) { memset(d, 0, sizeof(*d)); @@ -71,16 +72,17 @@ gnet_stats_start_copy_compat(struct sk_buff *skb, int type, int tc_stats_type, d->skb = skb; d->compat_tc_stats = tc_stats_type; d->compat_xstats = xstats_type; + d->padattr = padattr; if (d->tail) - return gnet_stats_copy(d, type, NULL, 0); + return gnet_stats_copy(d, type, NULL, 0, padattr); return 0; } EXPORT_SYMBOL(gnet_stats_start_copy_compat); /** - * gnet_stats_start_copy_compat - start dumping procedure in compatibility mode + * gnet_stats_start_copy - start dumping procedure in compatibility mode * @skb: socket buffer to put statistics TLVs into * @type: TLV type for top level statistic TLV * @lock: statistics lock @@ -94,9 +96,9 @@ EXPORT_SYMBOL(gnet_stats_start_copy_compat); */ int gnet_stats_start_copy(struct sk_buff *skb, int type, spinlock_t *lock, - struct gnet_dump *d) + struct gnet_dump *d, int padattr) { - return gnet_stats_start_copy_compat(skb, type, 0, 0, lock, d); + return gnet_stats_start_copy_compat(skb, type, 0, 0, lock, d, padattr); } EXPORT_SYMBOL(gnet_stats_start_copy); @@ -169,7 +171,8 @@ gnet_stats_copy_basic(struct gnet_dump *d, memset(&sb, 0, sizeof(sb)); sb.bytes = bstats.bytes; sb.packets = bstats.packets; - return gnet_stats_copy(d, TCA_STATS_BASIC, &sb, sizeof(sb)); + return gnet_stats_copy(d, TCA_STATS_BASIC, &sb, sizeof(sb), + TCA_STATS_PAD); } return 0; } @@ -208,11 +211,13 @@ gnet_stats_copy_rate_est(struct gnet_dump *d, } if (d->tail) { - res = gnet_stats_copy(d, TCA_STATS_RATE_EST, &est, sizeof(est)); + res = gnet_stats_copy(d, TCA_STATS_RATE_EST, &est, sizeof(est), + TCA_STATS_PAD); if (res < 0 || est.bps == r->bps) return res; /* emit 64bit stats only if needed */ - return gnet_stats_copy(d, TCA_STATS_RATE_EST64, r, sizeof(*r)); + return gnet_stats_copy(d, TCA_STATS_RATE_EST64, r, sizeof(*r), + TCA_STATS_PAD); } return 0; @@ -286,7 +291,8 @@ gnet_stats_copy_queue(struct gnet_dump *d, if (d->tail) return gnet_stats_copy(d, TCA_STATS_QUEUE, - &qstats, sizeof(qstats)); + &qstats, sizeof(qstats), + TCA_STATS_PAD); return 0; } @@ -316,7 +322,8 @@ gnet_stats_copy_app(struct gnet_dump *d, void *st, int len) } if (d->tail) - return gnet_stats_copy(d, TCA_STATS_APP, st, len); + return gnet_stats_copy(d, TCA_STATS_APP, st, len, + TCA_STATS_PAD); return 0; @@ -347,12 +354,12 @@ gnet_stats_finish_copy(struct gnet_dump *d) if (d->compat_tc_stats) if (gnet_stats_copy(d, d->compat_tc_stats, &d->tc_stats, - sizeof(d->tc_stats)) < 0) + sizeof(d->tc_stats), d->padattr) < 0) return -1; if (d->compat_xstats && d->xstats) { if (gnet_stats_copy(d, d->compat_xstats, d->xstats, - d->xstats_len) < 0) + d->xstats_len, d->padattr) < 0) return -1; } diff --git a/net/sched/act_api.c b/net/sched/act_api.c index 96066665e376..336774a535c3 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c @@ -657,12 +657,15 @@ int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *a, if (compat_mode) { if (a->type == TCA_OLD_COMPAT) err = gnet_stats_start_copy_compat(skb, 0, - TCA_STATS, TCA_XSTATS, &p->tcfc_lock, &d); + TCA_STATS, + TCA_XSTATS, + &p->tcfc_lock, &d, + TCA_PAD); else return 0; } else err = gnet_stats_start_copy(skb, TCA_ACT_STATS, - &p->tcfc_lock, &d); + &p->tcfc_lock, &d, TCA_ACT_PAD); if (err < 0) goto errout; diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c index 8c9f1f0459ab..4fd703362563 100644 --- a/net/sched/act_bpf.c +++ b/net/sched/act_bpf.c @@ -156,7 +156,8 @@ static int tcf_bpf_dump(struct sk_buff *skb, struct tc_action *act, tm.lastuse = jiffies_to_clock_t(jiffies - prog->tcf_tm.lastuse); tm.expires = jiffies_to_clock_t(prog->tcf_tm.expires); - if (nla_put(skb, TCA_ACT_BPF_TM, sizeof(tm), &tm)) + if (nla_put_64bit(skb, TCA_ACT_BPF_TM, sizeof(tm), &tm, + TCA_ACT_BPF_PAD)) goto nla_put_failure; return skb->len; diff --git a/net/sched/act_connmark.c b/net/sched/act_connmark.c index c0ed93ce2391..2ba700c765e0 100644 --- a/net/sched/act_connmark.c +++ b/net/sched/act_connmark.c @@ -163,7 +163,8 @@ static inline int tcf_connmark_dump(struct sk_buff *skb, struct tc_action *a, t.install = jiffies_to_clock_t(jiffies - ci->tcf_tm.install); t.lastuse = jiffies_to_clock_t(jiffies - ci->tcf_tm.lastuse); t.expires = jiffies_to_clock_t(ci->tcf_tm.expires); - if (nla_put(skb, TCA_CONNMARK_TM, sizeof(t), &t)) + if (nla_put_64bit(skb, TCA_CONNMARK_TM, sizeof(t), &t, + TCA_CONNMARK_PAD)) goto nla_put_failure; return skb->len; diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c index d22426cdebc0..28e934ed038a 100644 --- a/net/sched/act_csum.c +++ b/net/sched/act_csum.c @@ -549,7 +549,7 @@ static int tcf_csum_dump(struct sk_buff *skb, t.install = jiffies_to_clock_t(jiffies - p->tcf_tm.install); t.lastuse = jiffies_to_clock_t(jiffies - p->tcf_tm.lastuse); t.expires = jiffies_to_clock_t(p->tcf_tm.expires); - if (nla_put(skb, TCA_CSUM_TM, sizeof(t), &t)) + if (nla_put_64bit(skb, TCA_CSUM_TM, sizeof(t), &t, TCA_CSUM_PAD)) goto nla_put_failure; return skb->len; diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c index 887fc1f209ff..1a6e09fbb2a5 100644 --- a/net/sched/act_gact.c +++ b/net/sched/act_gact.c @@ -177,7 +177,7 @@ static int tcf_gact_dump(struct sk_buff *skb, struct tc_action *a, int bind, int t.install = jiffies_to_clock_t(jiffies - gact->tcf_tm.install); t.lastuse = jiffies_to_clock_t(jiffies - gact->tcf_tm.lastuse); t.expires = jiffies_to_clock_t(gact->tcf_tm.expires); - if (nla_put(skb, TCA_GACT_TM, sizeof(t), &t)) + if (nla_put_64bit(skb, TCA_GACT_TM, sizeof(t), &t, TCA_GACT_PAD)) goto nla_put_failure; return skb->len; diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c index c589a9ba506a..556f44c9c454 100644 --- a/net/sched/act_ife.c +++ b/net/sched/act_ife.c @@ -550,7 +550,7 @@ static int tcf_ife_dump(struct sk_buff *skb, struct tc_action *a, int bind, t.install = jiffies_to_clock_t(jiffies - ife->tcf_tm.install); t.lastuse = jiffies_to_clock_t(jiffies - ife->tcf_tm.lastuse); t.expires = jiffies_to_clock_t(ife->tcf_tm.expires); - if (nla_put(skb, TCA_IFE_TM, sizeof(t), &t)) + if (nla_put_64bit(skb, TCA_IFE_TM, sizeof(t), &t, TCA_IFE_PAD)) goto nla_put_failure; if (!is_zero_ether_addr(ife->eth_dst)) { diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c index 350e134cffb3..1464f6a09446 100644 --- a/net/sched/act_ipt.c +++ b/net/sched/act_ipt.c @@ -275,7 +275,7 @@ static int tcf_ipt_dump(struct sk_buff *skb, struct tc_action *a, int bind, int tm.install = jiffies_to_clock_t(jiffies - ipt->tcf_tm.install); tm.lastuse = jiffies_to_clock_t(jiffies - ipt->tcf_tm.lastuse); tm.expires = jiffies_to_clock_t(ipt->tcf_tm.expires); - if (nla_put(skb, TCA_IPT_TM, sizeof (tm), &tm)) + if (nla_put_64bit(skb, TCA_IPT_TM, sizeof(tm), &tm, TCA_IPT_PAD)) goto nla_put_failure; kfree(t); return skb->len; diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c index e8a760cf7775..dea57c1ec90c 100644 --- a/net/sched/act_mirred.c +++ b/net/sched/act_mirred.c @@ -214,7 +214,7 @@ static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind, i t.install = jiffies_to_clock_t(jiffies - m->tcf_tm.install); t.lastuse = jiffies_to_clock_t(jiffies - m->tcf_tm.lastuse); t.expires = jiffies_to_clock_t(m->tcf_tm.expires); - if (nla_put(skb, TCA_MIRRED_TM, sizeof(t), &t)) + if (nla_put_64bit(skb, TCA_MIRRED_TM, sizeof(t), &t, TCA_MIRRED_PAD)) goto nla_put_failure; return skb->len; diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c index 0f65cdfbfb1d..c0a879f940de 100644 --- a/net/sched/act_nat.c +++ b/net/sched/act_nat.c @@ -267,7 +267,7 @@ static int tcf_nat_dump(struct sk_buff *skb, struct tc_action *a, t.install = jiffies_to_clock_t(jiffies - p->tcf_tm.install); t.lastuse = jiffies_to_clock_t(jiffies - p->tcf_tm.lastuse); t.expires = jiffies_to_clock_t(p->tcf_tm.expires); - if (nla_put(skb, TCA_NAT_TM, sizeof(t), &t)) + if (nla_put_64bit(skb, TCA_NAT_TM, sizeof(t), &t, TCA_NAT_PAD)) goto nla_put_failure; return skb->len; diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c index 429c3ab65142..c6e18f230af6 100644 --- a/net/sched/act_pedit.c +++ b/net/sched/act_pedit.c @@ -203,7 +203,7 @@ static int tcf_pedit_dump(struct sk_buff *skb, struct tc_action *a, t.install = jiffies_to_clock_t(jiffies - p->tcf_tm.install); t.lastuse = jiffies_to_clock_t(jiffies - p->tcf_tm.lastuse); t.expires = jiffies_to_clock_t(p->tcf_tm.expires); - if (nla_put(skb, TCA_PEDIT_TM, sizeof(t), &t)) + if (nla_put_64bit(skb, TCA_PEDIT_TM, sizeof(t), &t, TCA_PEDIT_PAD)) goto nla_put_failure; kfree(opt); return skb->len; diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c index 75b2be13fbcc..2057fd56d74c 100644 --- a/net/sched/act_simple.c +++ b/net/sched/act_simple.c @@ -155,7 +155,7 @@ static int tcf_simp_dump(struct sk_buff *skb, struct tc_action *a, t.install = jiffies_to_clock_t(jiffies - d->tcf_tm.install); t.lastuse = jiffies_to_clock_t(jiffies - d->tcf_tm.lastuse); t.expires = jiffies_to_clock_t(d->tcf_tm.expires); - if (nla_put(skb, TCA_DEF_TM, sizeof(t), &t)) + if (nla_put_64bit(skb, TCA_DEF_TM, sizeof(t), &t, TCA_DEF_PAD)) goto nla_put_failure; return skb->len; diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c index cfcdbdc00c9b..51b24998904f 100644 --- a/net/sched/act_skbedit.c +++ b/net/sched/act_skbedit.c @@ -167,7 +167,7 @@ static int tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a, t.install = jiffies_to_clock_t(jiffies - d->tcf_tm.install); t.lastuse = jiffies_to_clock_t(jiffies - d->tcf_tm.lastuse); t.expires = jiffies_to_clock_t(d->tcf_tm.expires); - if (nla_put(skb, TCA_SKBEDIT_TM, sizeof(t), &t)) + if (nla_put_64bit(skb, TCA_SKBEDIT_TM, sizeof(t), &t, TCA_SKBEDIT_PAD)) goto nla_put_failure; return skb->len; diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c index bab8ae0cefc0..c1682ab9bc7e 100644 --- a/net/sched/act_vlan.c +++ b/net/sched/act_vlan.c @@ -175,7 +175,7 @@ static int tcf_vlan_dump(struct sk_buff *skb, struct tc_action *a, t.install = jiffies_to_clock_t(jiffies - v->tcf_tm.install); t.lastuse = jiffies_to_clock_t(jiffies - v->tcf_tm.lastuse); t.expires = jiffies_to_clock_t(v->tcf_tm.expires); - if (nla_put(skb, TCA_VLAN_TM, sizeof(t), &t)) + if (nla_put_64bit(skb, TCA_VLAN_TM, sizeof(t), &t, TCA_VLAN_PAD)) goto nla_put_failure; return skb->len; diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c index 563cdad76448..e64877a3c084 100644 --- a/net/sched/cls_u32.c +++ b/net/sched/cls_u32.c @@ -1140,9 +1140,10 @@ static int u32_dump(struct net *net, struct tcf_proto *tp, unsigned long fh, gpf->kcnts[i] += pf->kcnts[i]; } - if (nla_put(skb, TCA_U32_PCNT, - sizeof(struct tc_u32_pcnt) + n->sel.nkeys*sizeof(u64), - gpf)) { + if (nla_put_64bit(skb, TCA_U32_PCNT, + sizeof(struct tc_u32_pcnt) + + n->sel.nkeys * sizeof(u64), + gpf, TCA_U32_PAD)) { kfree(gpf); goto nla_put_failure; } diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 3b180ff72f79..64f71a2155f3 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -1365,7 +1365,8 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid, goto nla_put_failure; if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS, - qdisc_root_sleeping_lock(q), &d) < 0) + qdisc_root_sleeping_lock(q), &d, + TCA_PAD) < 0) goto nla_put_failure; if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0) @@ -1679,7 +1680,8 @@ static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q, goto nla_put_failure; if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS, - qdisc_root_sleeping_lock(q), &d) < 0) + qdisc_root_sleeping_lock(q), &d, + TCA_PAD) < 0) goto nla_put_failure; if (cl_ops->dump_stats && cl_ops->dump_stats(q, cl, &d) < 0) From fed2db99824334b3a7219da6b45d70f448449d7d Mon Sep 17 00:00:00 2001 From: Shannon Nelson Date: Tue, 12 Apr 2016 08:30:43 -0700 Subject: [PATCH 1059/1649] i40e: Specify AQ event opcode to wait for To add a little flexibility to the nvmupdate facility, this code adds the ability to specify an AQ event opcode to wait on after the Exec_AQ request. Change-ID: Iddbfd63c3de8df3edb9d3e90678b08989bc4946e Signed-off-by: Shannon Nelson Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_nvm.c | 49 ++++++++++++++++--- drivers/net/ethernet/intel/i40e/i40e_type.h | 1 + drivers/net/ethernet/intel/i40evf/i40e_type.h | 1 + 3 files changed, 44 insertions(+), 7 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c index f2cea3d25de3..954efe3118db 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c +++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c @@ -693,10 +693,10 @@ i40e_status i40e_nvmupd_command(struct i40e_hw *hw, /* early check for status command and debug msgs */ upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno); - i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d cmd 0x%08x config 0x%08x offset 0x%08x data_size 0x%08x\n", + i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d opc 0x%04x cmd 0x%08x config 0x%08x offset 0x%08x data_size 0x%08x\n", i40e_nvm_update_state_str[upd_cmd], hw->nvmupd_state, - hw->nvm_release_on_done, + hw->nvm_release_on_done, hw->nvm_wait_opcode, cmd->command, cmd->config, cmd->offset, cmd->data_size); if (upd_cmd == I40E_NVMUPD_INVALID) { @@ -710,7 +710,18 @@ i40e_status i40e_nvmupd_command(struct i40e_hw *hw, * going into the state machine */ if (upd_cmd == I40E_NVMUPD_STATUS) { + if (!cmd->data_size) { + *perrno = -EFAULT; + return I40E_ERR_BUF_TOO_SHORT; + } + bytes[0] = hw->nvmupd_state; + + if (cmd->data_size >= 4) { + bytes[1] = 0; + *((u16 *)&bytes[2]) = hw->nvm_wait_opcode; + } + return 0; } @@ -729,6 +740,14 @@ i40e_status i40e_nvmupd_command(struct i40e_hw *hw, case I40E_NVMUPD_STATE_INIT_WAIT: case I40E_NVMUPD_STATE_WRITE_WAIT: + /* if we need to stop waiting for an event, clear + * the wait info and return before doing anything else + */ + if (cmd->offset == 0xffff) { + i40e_nvmupd_check_wait_event(hw, hw->nvm_wait_opcode); + return 0; + } + status = I40E_ERR_NOT_READY; *perrno = -EBUSY; break; @@ -800,6 +819,7 @@ static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw, i40e_release_nvm(hw); } else { hw->nvm_release_on_done = true; + hw->nvm_wait_opcode = i40e_aqc_opc_nvm_erase; hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT; } } @@ -816,6 +836,7 @@ static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw, i40e_release_nvm(hw); } else { hw->nvm_release_on_done = true; + hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update; hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT; } } @@ -828,10 +849,12 @@ static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw, hw->aq.asq_last_status); } else { status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno); - if (status) + if (status) { i40e_release_nvm(hw); - else + } else { + hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update; hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT; + } } break; @@ -850,6 +873,7 @@ static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw, i40e_release_nvm(hw); } else { hw->nvm_release_on_done = true; + hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update; hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT; } } @@ -940,8 +964,10 @@ retry: switch (upd_cmd) { case I40E_NVMUPD_WRITE_CON: status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno); - if (!status) + if (!status) { + hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update; hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT; + } break; case I40E_NVMUPD_WRITE_LCB: @@ -954,6 +980,7 @@ retry: hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; } else { hw->nvm_release_on_done = true; + hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update; hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT; } break; @@ -967,6 +994,7 @@ retry: -EIO; hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; } else { + hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update; hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT; } break; @@ -981,6 +1009,7 @@ retry: hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; } else { hw->nvm_release_on_done = true; + hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update; hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT; } break; @@ -1036,14 +1065,14 @@ retry: **/ void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode) { - if (opcode == i40e_aqc_opc_nvm_erase || - opcode == i40e_aqc_opc_nvm_update) { + if (opcode == hw->nvm_wait_opcode) { i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: clearing wait on opcode 0x%04x\n", opcode); if (hw->nvm_release_on_done) { i40e_release_nvm(hw); hw->nvm_release_on_done = false; } + hw->nvm_wait_opcode = 0; switch (hw->nvmupd_state) { case I40E_NVMUPD_STATE_INIT_WAIT: @@ -1220,6 +1249,12 @@ static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw, *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); } + /* should we wait for a followup event? */ + if (cmd->offset) { + hw->nvm_wait_opcode = cmd->offset; + hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT; + } + return status; } diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h index 793036b259e5..bb57cd909c47 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_type.h +++ b/drivers/net/ethernet/intel/i40e/i40e_type.h @@ -550,6 +550,7 @@ struct i40e_hw { struct i40e_aq_desc nvm_wb_desc; struct i40e_virt_mem nvm_buff; bool nvm_release_on_done; + u16 nvm_wait_opcode; /* HMC info */ struct i40e_hmc_info hmc; /* HMC info struct */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h index 4a78c18e0b7b..b72071363a8f 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_type.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h @@ -523,6 +523,7 @@ struct i40e_hw { struct i40e_aq_desc nvm_wb_desc; struct i40e_virt_mem nvm_buff; bool nvm_release_on_done; + u16 nvm_wait_opcode; /* HMC info */ struct i40e_hmc_info hmc; /* HMC info struct */ From 43a3d9ba34c9ca313573201d3f45de5ab3494cec Mon Sep 17 00:00:00 2001 From: Mitch Williams Date: Tue, 12 Apr 2016 08:30:44 -0700 Subject: [PATCH 1060/1649] i40evf: Allow PF driver to configure RSS If the PF driver reports proper support, allow the PF driver to configure RSS on the behalf of the VF driver. This will allow for RSS support on future hardware without changes to the VF driver. Unfortunately, the old RSS code still needs to stay as the driver needs to be compatible with PF drivers that don't support this interface. But this change still simplifies the data structures a bunch and makes this code simpler to read and maintain. Change-ID: I0375aad40788ecdc0cb24d5cfeccf07804e69771 Signed-off-by: Mitch Williams Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40evf/i40evf.h | 30 +- .../ethernet/intel/i40evf/i40evf_ethtool.c | 121 +++---- .../net/ethernet/intel/i40evf/i40evf_main.c | 308 ++++++------------ .../ethernet/intel/i40evf/i40evf_virtchnl.c | 119 +++++++ 4 files changed, 294 insertions(+), 284 deletions(-) diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h index 017c83b6271f..63f7aae2c8ce 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf.h +++ b/drivers/net/ethernet/intel/i40evf/i40evf.h @@ -67,8 +67,6 @@ struct i40e_vsi { u16 rx_itr_setting; u16 tx_itr_setting; u16 qs_handle; - u8 *rss_hkey_user; /* User configured hash keys */ - u8 *rss_lut_user; /* User configured lookup table entries */ }; /* How many Rx Buffers do we bundle into one write to the hardware ? */ @@ -239,8 +237,13 @@ struct i40evf_adapter { #define I40EVF_FLAG_AQ_CONFIGURE_QUEUES BIT(6) #define I40EVF_FLAG_AQ_MAP_VECTORS BIT(7) #define I40EVF_FLAG_AQ_HANDLE_RESET BIT(8) -#define I40EVF_FLAG_AQ_CONFIGURE_RSS BIT(9) +#define I40EVF_FLAG_AQ_CONFIGURE_RSS BIT(9) /* direct AQ config */ #define I40EVF_FLAG_AQ_GET_CONFIG BIT(10) +/* Newer style, RSS done by the PF so we can ignore hardware vagaries. */ +#define I40EVF_FLAG_AQ_GET_HENA BIT(11) +#define I40EVF_FLAG_AQ_SET_HENA BIT(12) +#define I40EVF_FLAG_AQ_SET_RSS_KEY BIT(13) +#define I40EVF_FLAG_AQ_SET_RSS_LUT BIT(14) /* OS defined structs */ struct net_device *netdev; @@ -260,8 +263,14 @@ struct i40evf_adapter { (_a)->vf_res->vf_offload_flags & \ I40E_VIRTCHNL_VF_OFFLOAD_IWARP : \ 0) +/* RSS by the PF should be preferred over RSS via other methods. */ +#define RSS_PF(_a) ((_a)->vf_res->vf_offload_flags & \ + I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF) #define RSS_AQ(_a) ((_a)->vf_res->vf_offload_flags & \ I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ) +#define RSS_REG(_a) (!((_a)->vf_res->vf_offload_flags & \ + (I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ | \ + I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF))) #define VLAN_ALLOWED(_a) ((_a)->vf_res->vf_offload_flags & \ I40E_VIRTCHNL_VF_OFFLOAD_VLAN) struct i40e_virtchnl_vf_resource *vf_res; /* incl. all VSIs */ @@ -273,6 +282,12 @@ struct i40evf_adapter { struct i40e_eth_stats current_stats; struct i40e_vsi vsi; u32 aq_wait_count; + /* RSS stuff */ + u64 hena; + u16 rss_key_size; + u16 rss_lut_size; + u8 *rss_key; + u8 *rss_lut; }; @@ -316,11 +331,12 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter); void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags); void i40evf_request_stats(struct i40evf_adapter *adapter); void i40evf_request_reset(struct i40evf_adapter *adapter); +void i40evf_get_hena(struct i40evf_adapter *adapter); +void i40evf_set_hena(struct i40evf_adapter *adapter); +void i40evf_set_rss_key(struct i40evf_adapter *adapter); +void i40evf_set_rss_lut(struct i40evf_adapter *adapter); void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, enum i40e_virtchnl_ops v_opcode, i40e_status v_retval, u8 *msg, u16 msglen); -int i40evf_config_rss(struct i40e_vsi *vsi, const u8 *seed, u8 *lut, - u16 lut_size); -int i40evf_get_rss(struct i40e_vsi *vsi, const u8 *seed, u8 *lut, - u16 lut_size); +int i40evf_config_rss(struct i40evf_adapter *adapter); #endif /* _I40EVF_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c index dd4430aae7fa..9f7657c68688 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c @@ -387,20 +387,16 @@ static int i40evf_set_coalesce(struct net_device *netdev, static int i40evf_get_rss_hash_opts(struct i40evf_adapter *adapter, struct ethtool_rxnfc *cmd) { - struct i40e_hw *hw = &adapter->hw; - u64 hena = (u64)rd32(hw, I40E_VFQF_HENA(0)) | - ((u64)rd32(hw, I40E_VFQF_HENA(1)) << 32); - /* We always hash on IP src and dest addresses */ cmd->data = RXH_IP_SRC | RXH_IP_DST; switch (cmd->flow_type) { case TCP_V4_FLOW: - if (hena & BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP)) + if (adapter->hena & BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP)) cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; break; case UDP_V4_FLOW: - if (hena & BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP)) + if (adapter->hena & BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP)) cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; break; @@ -412,11 +408,11 @@ static int i40evf_get_rss_hash_opts(struct i40evf_adapter *adapter, break; case TCP_V6_FLOW: - if (hena & BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP)) + if (adapter->hena & BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP)) cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; break; case UDP_V6_FLOW: - if (hena & BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP)) + if (adapter->hena & BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP)) cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; break; @@ -476,9 +472,6 @@ static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter, struct i40e_hw *hw = &adapter->hw; u32 flags = adapter->vf_res->vf_offload_flags; - u64 hena = (u64)rd32(hw, I40E_VFQF_HENA(0)) | - ((u64)rd32(hw, I40E_VFQF_HENA(1)) << 32); - /* RSS does not support anything other than hashing * to queues on src and dst IPs and ports */ @@ -495,10 +488,11 @@ static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter, case TCP_V4_FLOW: if (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { if (flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) - hena |= + adapter->hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK); - hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP); + adapter->hena |= + BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP); } else { return -EINVAL; } @@ -506,10 +500,11 @@ static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter, case TCP_V6_FLOW: if (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { if (flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) - hena |= + adapter->hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK); - hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP); + adapter->hena |= + BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP); } else { return -EINVAL; } @@ -517,11 +512,12 @@ static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter, case UDP_V4_FLOW: if (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { if (flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) - hena |= + adapter->hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP); - hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | + adapter->hena |= + (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4)); } else { return -EINVAL; @@ -530,11 +526,12 @@ static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter, case UDP_V6_FLOW: if (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { if (flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) - hena |= + adapter->hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP); - hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | + adapter->hena |= + (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6)); } else { return -EINVAL; @@ -547,7 +544,7 @@ static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter, if ((nfc->data & RXH_L4_B_0_1) || (nfc->data & RXH_L4_B_2_3)) return -EINVAL; - hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER); + adapter->hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER); break; case AH_ESP_V6_FLOW: case AH_V6_FLOW: @@ -556,23 +553,27 @@ static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter, if ((nfc->data & RXH_L4_B_0_1) || (nfc->data & RXH_L4_B_2_3)) return -EINVAL; - hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER); + adapter->hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER); break; case IPV4_FLOW: - hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | - BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4)); + adapter->hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | + BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4)); break; case IPV6_FLOW: - hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | - BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6)); + adapter->hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | + BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6)); break; default: return -EINVAL; } - wr32(hw, I40E_VFQF_HENA(0), (u32)hena); - wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32)); - i40e_flush(hw); + if (RSS_PF(adapter)) { + adapter->aq_required = I40EVF_FLAG_AQ_SET_HENA; + } else { + wr32(hw, I40E_VFQF_HENA(0), (u32)adapter->hena); + wr32(hw, I40E_VFQF_HENA(1), (u32)(adapter->hena >> 32)); + i40e_flush(hw); + } return 0; } @@ -623,6 +624,19 @@ static void i40evf_get_channels(struct net_device *netdev, ch->combined_count = adapter->num_active_queues; } +/** + * i40evf_get_rxfh_key_size - get the RSS hash key size + * @netdev: network interface device structure + * + * Returns the table size. + **/ +static u32 i40evf_get_rxfh_key_size(struct net_device *netdev) +{ + struct i40evf_adapter *adapter = netdev_priv(netdev); + + return adapter->rss_key_size; +} + /** * i40evf_get_rxfh_indir_size - get the rx flow hash indirection table size * @netdev: network interface device structure @@ -631,7 +645,9 @@ static void i40evf_get_channels(struct net_device *netdev, **/ static u32 i40evf_get_rxfh_indir_size(struct net_device *netdev) { - return (I40E_VFQF_HLUT_MAX_INDEX + 1) * 4; + struct i40evf_adapter *adapter = netdev_priv(netdev); + + return adapter->rss_lut_size; } /** @@ -646,9 +662,6 @@ static int i40evf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc) { struct i40evf_adapter *adapter = netdev_priv(netdev); - struct i40e_vsi *vsi = &adapter->vsi; - u8 *seed = NULL, *lut; - int ret; u16 i; if (hfunc) @@ -656,24 +669,13 @@ static int i40evf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, if (!indir) return 0; - seed = key; - - lut = kzalloc(I40EVF_HLUT_ARRAY_SIZE, GFP_KERNEL); - if (!lut) - return -ENOMEM; - - ret = i40evf_get_rss(vsi, seed, lut, I40EVF_HLUT_ARRAY_SIZE); - if (ret) - goto out; + memcpy(key, adapter->rss_key, adapter->rss_key_size); /* Each 32 bits pointed by 'indir' is stored with a lut entry */ - for (i = 0; i < I40EVF_HLUT_ARRAY_SIZE; i++) - indir[i] = (u32)lut[i]; + for (i = 0; i < adapter->rss_lut_size; i++) + indir[i] = (u32)adapter->rss_lut[i]; -out: - kfree(lut); - - return ret; + return 0; } /** @@ -689,8 +691,6 @@ static int i40evf_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key, const u8 hfunc) { struct i40evf_adapter *adapter = netdev_priv(netdev); - struct i40e_vsi *vsi = &adapter->vsi; - u8 *seed = NULL; u16 i; /* We do not allow change in unsupported parameters */ @@ -701,28 +701,14 @@ static int i40evf_set_rxfh(struct net_device *netdev, const u32 *indir, return 0; if (key) { - if (!vsi->rss_hkey_user) { - vsi->rss_hkey_user = kzalloc(I40EVF_HKEY_ARRAY_SIZE, - GFP_KERNEL); - if (!vsi->rss_hkey_user) - return -ENOMEM; - } - memcpy(vsi->rss_hkey_user, key, I40EVF_HKEY_ARRAY_SIZE); - seed = vsi->rss_hkey_user; - } - if (!vsi->rss_lut_user) { - vsi->rss_lut_user = kzalloc(I40EVF_HLUT_ARRAY_SIZE, - GFP_KERNEL); - if (!vsi->rss_lut_user) - return -ENOMEM; + memcpy(adapter->rss_key, key, adapter->rss_key_size); } /* Each 32 bits pointed by 'indir' is stored with a lut entry */ - for (i = 0; i < I40EVF_HLUT_ARRAY_SIZE; i++) - vsi->rss_lut_user[i] = (u8)(indir[i]); + for (i = 0; i < adapter->rss_lut_size; i++) + adapter->rss_lut[i] = (u8)(indir[i]); - return i40evf_config_rss(vsi, seed, vsi->rss_lut_user, - I40EVF_HLUT_ARRAY_SIZE); + return i40evf_config_rss(adapter); } /** @@ -794,6 +780,7 @@ static const struct ethtool_ops i40evf_ethtool_ops = { .get_rxfh = i40evf_get_rxfh, .set_rxfh = i40evf_set_rxfh, .get_channels = i40evf_get_channels, + .get_rxfh_key_size = i40evf_get_rxfh_key_size, }; /** diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index 806da2686623..af53159010ab 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -1224,24 +1224,18 @@ out: } /** - * i40e_config_rss_aq - Prepare for RSS using AQ commands - * @vsi: vsi structure - * @seed: RSS hash seed - * @lut: Lookup table - * @lut_size: Lookup table size + * i40e_config_rss_aq - Configure RSS keys and lut by using AQ commands + * @adapter: board private structure * * Return 0 on success, negative on failure **/ -static int i40evf_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed, - u8 *lut, u16 lut_size) +static int i40evf_config_rss_aq(struct i40evf_adapter *adapter) { - struct i40evf_adapter *adapter = vsi->back; + struct i40e_aqc_get_set_rss_key_data *rss_key = + (struct i40e_aqc_get_set_rss_key_data *)adapter->rss_key; struct i40e_hw *hw = &adapter->hw; int ret = 0; - if (!vsi->id) - return -EINVAL; - if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ dev_err(&adapter->pdev->dev, "Cannot configure RSS, command %d pending\n", @@ -1249,198 +1243,82 @@ static int i40evf_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed, return -EBUSY; } - if (seed) { - struct i40e_aqc_get_set_rss_key_data *rss_key = - (struct i40e_aqc_get_set_rss_key_data *)seed; - ret = i40evf_aq_set_rss_key(hw, vsi->id, rss_key); - if (ret) { - dev_err(&adapter->pdev->dev, "Cannot set RSS key, err %s aq_err %s\n", - i40evf_stat_str(hw, ret), - i40evf_aq_str(hw, hw->aq.asq_last_status)); - return ret; - } + ret = i40evf_aq_set_rss_key(hw, adapter->vsi.id, rss_key); + if (ret) { + dev_err(&adapter->pdev->dev, "Cannot set RSS key, err %s aq_err %s\n", + i40evf_stat_str(hw, ret), + i40evf_aq_str(hw, hw->aq.asq_last_status)); + return ret; + } - if (lut) { - ret = i40evf_aq_set_rss_lut(hw, vsi->id, false, lut, lut_size); - if (ret) { - dev_err(&adapter->pdev->dev, - "Cannot set RSS lut, err %s aq_err %s\n", - i40evf_stat_str(hw, ret), - i40evf_aq_str(hw, hw->aq.asq_last_status)); - return ret; - } + ret = i40evf_aq_set_rss_lut(hw, adapter->vsi.id, false, + adapter->rss_lut, adapter->rss_lut_size); + if (ret) { + dev_err(&adapter->pdev->dev, "Cannot set RSS lut, err %s aq_err %s\n", + i40evf_stat_str(hw, ret), + i40evf_aq_str(hw, hw->aq.asq_last_status)); } return ret; + } /** * i40evf_config_rss_reg - Configure RSS keys and lut by writing registers - * @vsi: Pointer to vsi structure - * @seed: RSS hash seed - * @lut: Lookup table - * @lut_size: Lookup table size + * @adapter: board private structure * * Returns 0 on success, negative on failure **/ -static int i40evf_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed, - const u8 *lut, u16 lut_size) +static int i40evf_config_rss_reg(struct i40evf_adapter *adapter) { - struct i40evf_adapter *adapter = vsi->back; struct i40e_hw *hw = &adapter->hw; + u32 *dw; u16 i; - if (seed) { - u32 *seed_dw = (u32 *)seed; + dw = (u32 *)adapter->rss_key; + for (i = 0; i <= adapter->rss_key_size / 4; i++) + wr32(hw, I40E_VFQF_HKEY(i), dw[i]); - for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++) - wr32(hw, I40E_VFQF_HKEY(i), seed_dw[i]); - } + dw = (u32 *)adapter->rss_lut; + for (i = 0; i <= adapter->rss_lut_size / 4; i++) + wr32(hw, I40E_VFQF_HLUT(i), dw[i]); - if (lut) { - u32 *lut_dw = (u32 *)lut; - - if (lut_size != I40EVF_HLUT_ARRAY_SIZE) - return -EINVAL; - - for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) - wr32(hw, I40E_VFQF_HLUT(i), lut_dw[i]); - } i40e_flush(hw); return 0; } -/** - * * i40evf_get_rss_aq - Get RSS keys and lut by using AQ commands - * @vsi: Pointer to vsi structure - * @seed: RSS hash seed - * @lut: Lookup table - * @lut_size: Lookup table size - * - * Return 0 on success, negative on failure - **/ -static int i40evf_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed, - u8 *lut, u16 lut_size) -{ - struct i40evf_adapter *adapter = vsi->back; - struct i40e_hw *hw = &adapter->hw; - int ret = 0; - - if (seed) { - ret = i40evf_aq_get_rss_key(hw, vsi->id, - (struct i40e_aqc_get_set_rss_key_data *)seed); - if (ret) { - dev_err(&adapter->pdev->dev, - "Cannot get RSS key, err %s aq_err %s\n", - i40evf_stat_str(hw, ret), - i40evf_aq_str(hw, hw->aq.asq_last_status)); - return ret; - } - } - - if (lut) { - ret = i40evf_aq_get_rss_lut(hw, vsi->id, false, lut, lut_size); - if (ret) { - dev_err(&adapter->pdev->dev, - "Cannot get RSS lut, err %s aq_err %s\n", - i40evf_stat_str(hw, ret), - i40evf_aq_str(hw, hw->aq.asq_last_status)); - return ret; - } - } - - return ret; -} - -/** - * * i40evf_get_rss_reg - Get RSS keys and lut by reading registers - * @vsi: Pointer to vsi structure - * @seed: RSS hash seed - * @lut: Lookup table - * @lut_size: Lookup table size - * - * Returns 0 on success, negative on failure - **/ -static int i40evf_get_rss_reg(struct i40e_vsi *vsi, const u8 *seed, - const u8 *lut, u16 lut_size) -{ - struct i40evf_adapter *adapter = vsi->back; - struct i40e_hw *hw = &adapter->hw; - u16 i; - - if (seed) { - u32 *seed_dw = (u32 *)seed; - - for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++) - seed_dw[i] = rd32(hw, I40E_VFQF_HKEY(i)); - } - - if (lut) { - u32 *lut_dw = (u32 *)lut; - - if (lut_size != I40EVF_HLUT_ARRAY_SIZE) - return -EINVAL; - - for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) - lut_dw[i] = rd32(hw, I40E_VFQF_HLUT(i)); - } - - return 0; -} - /** * i40evf_config_rss - Configure RSS keys and lut - * @vsi: Pointer to vsi structure - * @seed: RSS hash seed - * @lut: Lookup table - * @lut_size: Lookup table size + * @adapter: board private structure * * Returns 0 on success, negative on failure **/ -int i40evf_config_rss(struct i40e_vsi *vsi, const u8 *seed, - u8 *lut, u16 lut_size) +int i40evf_config_rss(struct i40evf_adapter *adapter) { - struct i40evf_adapter *adapter = vsi->back; - if (RSS_AQ(adapter)) - return i40evf_config_rss_aq(vsi, seed, lut, lut_size); - else - return i40evf_config_rss_reg(vsi, seed, lut, lut_size); -} - -/** - * i40evf_get_rss - Get RSS keys and lut - * @vsi: Pointer to vsi structure - * @seed: RSS hash seed - * @lut: Lookup table - * @lut_size: Lookup table size - * - * Returns 0 on success, negative on failure - **/ -int i40evf_get_rss(struct i40e_vsi *vsi, const u8 *seed, u8 *lut, u16 lut_size) -{ - struct i40evf_adapter *adapter = vsi->back; - - if (RSS_AQ(adapter)) - return i40evf_get_rss_aq(vsi, seed, lut, lut_size); - else - return i40evf_get_rss_reg(vsi, seed, lut, lut_size); + if (RSS_PF(adapter)) { + adapter->aq_required |= I40EVF_FLAG_AQ_SET_RSS_LUT | + I40EVF_FLAG_AQ_SET_RSS_KEY; + return 0; + } else if (RSS_AQ(adapter)) { + return i40evf_config_rss_aq(adapter); + } else { + return i40evf_config_rss_reg(adapter); + } } /** * i40evf_fill_rss_lut - Fill the lut with default values - * @lut: Lookup table to be filled with - * @rss_table_size: Lookup table size - * @rss_size: Range of queue number for hashing + * @adapter: board private structure **/ -static void i40evf_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size) +static void i40evf_fill_rss_lut(struct i40evf_adapter *adapter) { u16 i; - for (i = 0; i < rss_table_size; i++) - lut[i] = i % rss_size; + for (i = 0; i < adapter->rss_lut_size; i++) + adapter->rss_lut[i] = i % adapter->num_active_queues; } /** @@ -1451,42 +1329,25 @@ static void i40evf_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size) **/ static int i40evf_init_rss(struct i40evf_adapter *adapter) { - struct i40e_vsi *vsi = &adapter->vsi; struct i40e_hw *hw = &adapter->hw; - u8 seed[I40EVF_HKEY_ARRAY_SIZE]; - u64 hena; - u8 *lut; int ret; - /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */ - if (adapter->vf_res->vf_offload_flags & - I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) - hena = I40E_DEFAULT_RSS_HENA_EXPANDED; - else - hena = I40E_DEFAULT_RSS_HENA; - wr32(hw, I40E_VFQF_HENA(0), (u32)hena); - wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32)); + if (!RSS_PF(adapter)) { + /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */ + if (adapter->vf_res->vf_offload_flags & + I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) + adapter->hena = I40E_DEFAULT_RSS_HENA_EXPANDED; + else + adapter->hena = I40E_DEFAULT_RSS_HENA; - lut = kzalloc(I40EVF_HLUT_ARRAY_SIZE, GFP_KERNEL); - if (!lut) - return -ENOMEM; + wr32(hw, I40E_VFQF_HENA(0), (u32)adapter->hena); + wr32(hw, I40E_VFQF_HENA(1), (u32)(adapter->hena >> 32)); + } - /* Use user configured lut if there is one, otherwise use default */ - if (vsi->rss_lut_user) - memcpy(lut, vsi->rss_lut_user, I40EVF_HLUT_ARRAY_SIZE); - else - i40evf_fill_rss_lut(lut, I40EVF_HLUT_ARRAY_SIZE, - adapter->num_active_queues); + i40evf_fill_rss_lut(adapter); - /* Use user configured hash key if there is one, otherwise - * user default. - */ - if (vsi->rss_hkey_user) - memcpy(seed, vsi->rss_hkey_user, I40EVF_HKEY_ARRAY_SIZE); - else - netdev_rss_key_fill((void *)seed, I40EVF_HKEY_ARRAY_SIZE); - ret = i40evf_config_rss(vsi, seed, lut, I40EVF_HLUT_ARRAY_SIZE); - kfree(lut); + netdev_rss_key_fill((void *)adapter->rss_key, adapter->rss_key_size); + ret = i40evf_config_rss(adapter); return ret; } @@ -1601,19 +1462,16 @@ err_set_interrupt: } /** - * i40evf_clear_rss_config_user - Clear user configurations of RSS - * @vsi: Pointer to VSI structure + * i40evf_free_rss - Free memory used by RSS structs + * @adapter: board private structure **/ -static void i40evf_clear_rss_config_user(struct i40e_vsi *vsi) +static void i40evf_free_rss(struct i40evf_adapter *adapter) { - if (!vsi) - return; + kfree(adapter->rss_key); + adapter->rss_key = NULL; - kfree(vsi->rss_hkey_user); - vsi->rss_hkey_user = NULL; - - kfree(vsi->rss_lut_user); - vsi->rss_lut_user = NULL; + kfree(adapter->rss_lut); + adapter->rss_lut = NULL; } /** @@ -1747,6 +1605,22 @@ static void i40evf_watchdog_task(struct work_struct *work) adapter->aq_required &= ~I40EVF_FLAG_AQ_CONFIGURE_RSS; goto watchdog_done; } + if (adapter->aq_required & I40EVF_FLAG_AQ_GET_HENA) { + i40evf_get_hena(adapter); + goto watchdog_done; + } + if (adapter->aq_required & I40EVF_FLAG_AQ_SET_HENA) { + i40evf_set_hena(adapter); + goto watchdog_done; + } + if (adapter->aq_required & I40EVF_FLAG_AQ_SET_RSS_KEY) { + i40evf_set_rss_key(adapter); + goto watchdog_done; + } + if (adapter->aq_required & I40EVF_FLAG_AQ_SET_RSS_LUT) { + i40evf_set_rss_lut(adapter); + goto watchdog_done; + } if (adapter->state == __I40EVF_RUNNING) i40evf_request_stats(adapter); @@ -2325,6 +2199,7 @@ int i40evf_process_config(struct i40evf_adapter *adapter) { struct i40e_virtchnl_vf_resource *vfres = adapter->vf_res; struct net_device *netdev = adapter->netdev; + struct i40e_vsi *vsi = &adapter->vsi; int i; /* got VF config message back from PF, now we can parse it */ @@ -2381,8 +2256,16 @@ int i40evf_process_config(struct i40evf_adapter *adapter) ITR_REG_TO_USEC(I40E_ITR_RX_DEF)); adapter->vsi.tx_itr_setting = (I40E_ITR_DYNAMIC | ITR_REG_TO_USEC(I40E_ITR_TX_DEF)); - adapter->vsi.netdev = adapter->netdev; - adapter->vsi.qs_handle = adapter->vsi_res->qset_handle; + vsi->netdev = adapter->netdev; + vsi->qs_handle = adapter->vsi_res->qset_handle; + if (vfres->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF) { + adapter->rss_key_size = vfres->rss_key_size; + adapter->rss_lut_size = vfres->rss_lut_size; + } else { + adapter->rss_key_size = I40EVF_HKEY_ARRAY_SIZE; + adapter->rss_lut_size = I40EVF_HLUT_ARRAY_SIZE; + } + return 0; } @@ -2578,6 +2461,11 @@ static void i40evf_init_task(struct work_struct *work) set_bit(__I40E_DOWN, &adapter->vsi.state); i40evf_misc_irq_enable(adapter); + adapter->rss_key = kzalloc(adapter->rss_key_size, GFP_KERNEL); + adapter->rss_lut = kzalloc(adapter->rss_lut_size, GFP_KERNEL); + if (!adapter->rss_key || !adapter->rss_lut) + goto err_mem; + if (RSS_AQ(adapter)) { adapter->aq_required |= I40EVF_FLAG_AQ_CONFIGURE_RSS; mod_timer_pending(&adapter->watchdog_timer, jiffies + 1); @@ -2588,7 +2476,8 @@ static void i40evf_init_task(struct work_struct *work) restart: schedule_delayed_work(&adapter->init_task, msecs_to_jiffies(30)); return; - +err_mem: + i40evf_free_rss(adapter); err_register: i40evf_free_misc_irq(adapter); err_sw_init: @@ -2870,8 +2759,7 @@ static void i40evf_remove(struct pci_dev *pdev) flush_scheduled_work(); - /* Clear user configurations for RSS */ - i40evf_clear_rss_config_user(&adapter->vsi); + i40evf_free_rss(adapter); if (hw->aq.asq.count) i40evf_shutdown_adminq(hw); diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c index 488e738f76c6..e62c56b5a141 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c @@ -681,6 +681,115 @@ void i40evf_request_stats(struct i40evf_adapter *adapter) /* if the request failed, don't lock out others */ adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN; } + +/** + * i40evf_get_hena + * @adapter: adapter structure + * + * Request hash enable capabilities from PF + **/ +void i40evf_get_hena(struct i40evf_adapter *adapter) +{ + if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "Cannot get RSS hash capabilities, command %d pending\n", + adapter->current_op); + return; + } + adapter->current_op = I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS; + adapter->aq_required &= ~I40EVF_FLAG_AQ_GET_HENA; + i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS, + NULL, 0); +} + +/** + * i40evf_set_hena + * @adapter: adapter structure + * + * Request the PF to set our RSS hash capabilities + **/ +void i40evf_set_hena(struct i40evf_adapter *adapter) +{ + struct i40e_virtchnl_rss_hena vrh; + + if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "Cannot set RSS hash enable, command %d pending\n", + adapter->current_op); + return; + } + vrh.hena = adapter->hena; + adapter->current_op = I40E_VIRTCHNL_OP_SET_RSS_HENA; + adapter->aq_required &= ~I40EVF_FLAG_AQ_SET_HENA; + i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_SET_RSS_HENA, + (u8 *)&vrh, sizeof(vrh)); +} + +/** + * i40evf_set_rss_key + * @adapter: adapter structure + * + * Request the PF to set our RSS hash key + **/ +void i40evf_set_rss_key(struct i40evf_adapter *adapter) +{ + struct i40e_virtchnl_rss_key *vrk; + int len; + + if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "Cannot set RSS key, command %d pending\n", + adapter->current_op); + return; + } + len = sizeof(struct i40e_virtchnl_rss_key) + + (adapter->rss_key_size * sizeof(u8)) - 1; + vrk = kzalloc(len, GFP_KERNEL); + if (!vrk) + return; + vrk->vsi_id = adapter->vsi.id; + vrk->key_len = adapter->rss_key_size; + memcpy(vrk->key, adapter->rss_key, adapter->rss_key_size); + + adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_RSS_KEY; + adapter->aq_required &= ~I40EVF_FLAG_AQ_SET_RSS_KEY; + i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_RSS_KEY, + (u8 *)vrk, len); + kfree(vrk); +} + +/** + * i40evf_set_rss_lut + * @adapter: adapter structure + * + * Request the PF to set our RSS lookup table + **/ +void i40evf_set_rss_lut(struct i40evf_adapter *adapter) +{ + struct i40e_virtchnl_rss_lut *vrl; + int len; + + if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "Cannot set RSS LUT, command %d pending\n", + adapter->current_op); + return; + } + len = sizeof(struct i40e_virtchnl_rss_lut) + + (adapter->rss_lut_size * sizeof(u8)) - 1; + vrl = kzalloc(len, GFP_KERNEL); + if (!vrl) + return; + vrl->vsi_id = adapter->vsi.id; + vrl->lut_entries = adapter->rss_lut_size; + memcpy(vrl->lut, adapter->rss_lut, adapter->rss_lut_size); + adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_RSS_LUT; + adapter->aq_required &= ~I40EVF_FLAG_AQ_SET_RSS_LUT; + i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_RSS_LUT, + (u8 *)vrl, len); + kfree(vrl); +} + /** * i40evf_request_reset * @adapter: adapter structure @@ -820,6 +929,16 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, if (v_opcode != adapter->current_op) return; break; + case I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS: { + struct i40e_virtchnl_rss_hena *vrh = + (struct i40e_virtchnl_rss_hena *)msg; + if (msglen == sizeof(*vrh)) + adapter->hena = vrh->hena; + else + dev_warn(&adapter->pdev->dev, + "Invalid message %d from PF\n", v_opcode); + } + break; default: if (v_opcode != adapter->current_op) dev_warn(&adapter->pdev->dev, "Expected response %d from PF, received %d\n", From 0de727383c46510f12932d32e4b66292854be508 Mon Sep 17 00:00:00 2001 From: Hariprasad Shenai Date: Tue, 26 Apr 2016 20:10:22 +0530 Subject: [PATCH 1061/1649] cxgb4: add new routine to get adapter info Add new routine to print out general adapter information (various version numbers, adapter name, part number, serial number, etc.) and remove redundant information dumped in the Port Information. Signed-off-by: Hariprasad Shenai Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | 4 + .../net/ethernet/chelsio/cxgb4/cxgb4_main.c | 78 +++++++++++++++++-- drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | 14 ++++ drivers/net/ethernet/chelsio/cxgb4/t4_hw.h | 7 ++ 4 files changed, 95 insertions(+), 8 deletions(-) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 326d4009525e..459775884cad 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -324,7 +324,9 @@ struct adapter_params { unsigned int sf_fw_start; /* start of FW image in flash */ unsigned int fw_vers; + unsigned int bs_vers; /* bootstrap version */ unsigned int tp_vers; + unsigned int er_vers; /* expansion ROM version */ u8 api_vers[7]; unsigned short mtus[NMTUS]; @@ -731,6 +733,7 @@ struct adapter { u32 t4_bar0; struct pci_dev *pdev; struct device *pdev_dev; + const char *name; unsigned int mbox; unsigned int pf; unsigned int flags; @@ -1306,6 +1309,7 @@ int t4_fl_pkt_align(struct adapter *adap); unsigned int t4_flash_cfg_addr(struct adapter *adapter); int t4_check_fw_version(struct adapter *adap); int t4_get_fw_version(struct adapter *adapter, u32 *vers); +int t4_get_bs_version(struct adapter *adapter, u32 *vers); int t4_get_tp_version(struct adapter *adapter, u32 *vers); int t4_get_exprom_version(struct adapter *adapter, u32 *vers); int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info, diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index a1e329ec24cd..b8dc7921b258 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -3738,7 +3738,10 @@ static int adap_init0(struct adapter *adap) * is excessively mismatched relative to the driver.) */ t4_get_fw_version(adap, &adap->params.fw_vers); + t4_get_bs_version(adap, &adap->params.bs_vers); t4_get_tp_version(adap, &adap->params.tp_vers); + t4_get_exprom_version(adap, &adap->params.er_vers); + ret = t4_check_fw_version(adap); /* If firmware is too old (not supported by driver) force an update. */ if (ret) @@ -4652,6 +4655,68 @@ static void cxgb4_check_pcie_caps(struct adapter *adap) "suggested for optimal performance.\n"); } +/* Dump basic information about the adapter */ +static void print_adapter_info(struct adapter *adapter) +{ + /* Device information */ + dev_info(adapter->pdev_dev, "Chelsio %s rev %d\n", + adapter->params.vpd.id, + CHELSIO_CHIP_RELEASE(adapter->params.chip)); + dev_info(adapter->pdev_dev, "S/N: %s, P/N: %s\n", + adapter->params.vpd.sn, adapter->params.vpd.pn); + + /* Firmware Version */ + if (!adapter->params.fw_vers) + dev_warn(adapter->pdev_dev, "No firmware loaded\n"); + else + dev_info(adapter->pdev_dev, "Firmware version: %u.%u.%u.%u\n", + FW_HDR_FW_VER_MAJOR_G(adapter->params.fw_vers), + FW_HDR_FW_VER_MINOR_G(adapter->params.fw_vers), + FW_HDR_FW_VER_MICRO_G(adapter->params.fw_vers), + FW_HDR_FW_VER_BUILD_G(adapter->params.fw_vers)); + + /* Bootstrap Firmware Version. (Some adapters don't have Bootstrap + * Firmware, so dev_info() is more appropriate here.) + */ + if (!adapter->params.bs_vers) + dev_info(adapter->pdev_dev, "No bootstrap loaded\n"); + else + dev_info(adapter->pdev_dev, "Bootstrap version: %u.%u.%u.%u\n", + FW_HDR_FW_VER_MAJOR_G(adapter->params.bs_vers), + FW_HDR_FW_VER_MINOR_G(adapter->params.bs_vers), + FW_HDR_FW_VER_MICRO_G(adapter->params.bs_vers), + FW_HDR_FW_VER_BUILD_G(adapter->params.bs_vers)); + + /* TP Microcode Version */ + if (!adapter->params.tp_vers) + dev_warn(adapter->pdev_dev, "No TP Microcode loaded\n"); + else + dev_info(adapter->pdev_dev, + "TP Microcode version: %u.%u.%u.%u\n", + FW_HDR_FW_VER_MAJOR_G(adapter->params.tp_vers), + FW_HDR_FW_VER_MINOR_G(adapter->params.tp_vers), + FW_HDR_FW_VER_MICRO_G(adapter->params.tp_vers), + FW_HDR_FW_VER_BUILD_G(adapter->params.tp_vers)); + + /* Expansion ROM version */ + if (!adapter->params.er_vers) + dev_info(adapter->pdev_dev, "No Expansion ROM loaded\n"); + else + dev_info(adapter->pdev_dev, + "Expansion ROM version: %u.%u.%u.%u\n", + FW_HDR_FW_VER_MAJOR_G(adapter->params.er_vers), + FW_HDR_FW_VER_MINOR_G(adapter->params.er_vers), + FW_HDR_FW_VER_MICRO_G(adapter->params.er_vers), + FW_HDR_FW_VER_BUILD_G(adapter->params.er_vers)); + + /* Software/Hardware configuration */ + dev_info(adapter->pdev_dev, "Configuration: %sNIC %s, %s capable\n", + is_offload(adapter) ? "R" : "", + ((adapter->flags & USING_MSIX) ? "MSI-X" : + (adapter->flags & USING_MSI) ? "MSI" : ""), + is_offload(adapter) ? "Offload" : "non-Offload"); +} + static void print_port_info(const struct net_device *dev) { char buf[80]; @@ -4679,14 +4744,8 @@ static void print_port_info(const struct net_device *dev) --bufp; sprintf(bufp, "BASE-%s", t4_get_port_type_description(pi->port_type)); - netdev_info(dev, "Chelsio %s rev %d %s %sNIC %s\n", - adap->params.vpd.id, - CHELSIO_CHIP_RELEASE(adap->params.chip), buf, - is_offload(adap) ? "R" : "", - (adap->flags & USING_MSIX) ? " MSI-X" : - (adap->flags & USING_MSI) ? " MSI" : ""); - netdev_info(dev, "S/N: %s, P/N: %s\n", - adap->params.vpd.sn, adap->params.vpd.pn); + netdev_info(dev, "%s: Chelsio %s (%s) %s\n", + dev->name, adap->params.vpd.id, adap->name, buf); } static void enable_pcie_relaxed_ordering(struct pci_dev *dev) @@ -4844,6 +4903,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) adapter->regs = regs; adapter->pdev = pdev; adapter->pdev_dev = &pdev->dev; + adapter->name = pci_name(pdev); adapter->mbox = func; adapter->pf = func; adapter->msg_enable = dflt_msg_enable; @@ -5074,6 +5134,8 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (is_offload(adapter)) attach_ulds(adapter); + print_adapter_info(adapter); + sriov: #ifdef CONFIG_PCI_IOV if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0) diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 71586a3e0f61..2ced24fc569d 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -2936,6 +2936,20 @@ int t4_get_fw_version(struct adapter *adapter, u32 *vers) vers, 0); } +/** + * t4_get_bs_version - read the firmware bootstrap version + * @adapter: the adapter + * @vers: where to place the version + * + * Reads the FW Bootstrap version from flash. + */ +int t4_get_bs_version(struct adapter *adapter, u32 *vers) +{ + return t4_read_flash(adapter, FLASH_FWBOOTSTRAP_START + + offsetof(struct fw_hdr, fw_ver), 1, + vers, 0); +} + /** * t4_get_tp_version - read the TP microcode version * @adapter: the adapter diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h index 2fc60e83a7a1..7f59ca458431 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h @@ -220,6 +220,13 @@ enum { FLASH_FW_START = FLASH_START(FLASH_FW_START_SEC), FLASH_FW_MAX_SIZE = FLASH_MAX_SIZE(FLASH_FW_NSECS), + /* Location of bootstrap firmware image in FLASH. + */ + FLASH_FWBOOTSTRAP_START_SEC = 27, + FLASH_FWBOOTSTRAP_NSECS = 1, + FLASH_FWBOOTSTRAP_START = FLASH_START(FLASH_FWBOOTSTRAP_START_SEC), + FLASH_FWBOOTSTRAP_MAX_SIZE = FLASH_MAX_SIZE(FLASH_FWBOOTSTRAP_NSECS), + /* * iSCSI persistent/crash information. */ From ed98c85ee9a16373f73afeb5bcd7b37b65c2a62f Mon Sep 17 00:00:00 2001 From: Hariprasad Shenai Date: Tue, 26 Apr 2016 20:10:23 +0530 Subject: [PATCH 1062/1649] cxgb4: Add llseek operation for flash debugfs entry Signed-off-by: Hariprasad Shenai Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c index 0bb41e9b9b1c..9506c5cd11b9 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c @@ -1572,6 +1572,7 @@ static const struct file_operations flash_debugfs_fops = { .owner = THIS_MODULE, .open = mem_open, .read = flash_read, + .llseek = default_llseek, }; static inline void tcamxy2valmask(u64 x, u64 y, u8 *addr, u64 *mask) From fbe8077687330a55e87bc26745d4992991c101ec Mon Sep 17 00:00:00 2001 From: Hariprasad Shenai Date: Tue, 26 Apr 2016 20:10:24 +0530 Subject: [PATCH 1063/1649] cxgb4: Avoids race and deadlock while freeing tx descriptor There could be race between t4_eth_xmit() and t4_free_sge_resources() while freeing tx descriptors, take txq lock in t4_free_sge_resources(). We need to stop the xmit frame path which runs in bottom half context while unloading the driver using _bh variant of the lock. This is to prevent the deadlock between xmit and driver unload. Signed-off-by: Hariprasad Shenai Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/sge.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index 6278e5a74b74..bad253beb8c8 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -3006,7 +3006,9 @@ void t4_free_sge_resources(struct adapter *adap) if (etq->q.desc) { t4_eth_eq_free(adap, adap->mbox, adap->pf, 0, etq->q.cntxt_id); + __netif_tx_lock_bh(etq->txq); free_tx_desc(adap, &etq->q, etq->q.in_use, true); + __netif_tx_unlock_bh(etq->txq); kfree(etq->q.sdesc); free_txq(adap, &etq->q); } From be81a2deb1134c47fadb10cb0bd6540caf6f32d7 Mon Sep 17 00:00:00 2001 From: Hariprasad Shenai Date: Tue, 26 Apr 2016 20:10:25 +0530 Subject: [PATCH 1064/1649] cxgb4: Properly decode port module type Decode and log port module error, unknown modules and unsupported modules. Signed-off-by: Hariprasad Shenai Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index b8dc7921b258..abc425bfc744 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -337,6 +337,17 @@ void t4_os_portmod_changed(const struct adapter *adap, int port_id) netdev_info(dev, "port module unplugged\n"); else if (pi->mod_type < ARRAY_SIZE(mod_str)) netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]); + else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED) + netdev_info(dev, "%s: unsupported port module inserted\n", + dev->name); + else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN) + netdev_info(dev, "%s: unknown port module inserted\n", + dev->name); + else if (pi->mod_type == FW_PORT_MOD_TYPE_ERROR) + netdev_info(dev, "%s: transceiver module error\n", dev->name); + else + netdev_info(dev, "%s: unknown module type %d inserted\n", + dev->name, pi->mod_type); } int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */ From c3e324e3d0662661f53c80fee28d70b6713f9dc8 Mon Sep 17 00:00:00 2001 From: Hariprasad Shenai Date: Tue, 26 Apr 2016 20:10:26 +0530 Subject: [PATCH 1065/1649] cxgb4: Refactor t4_port_init function Refactor t4_port_init() so that the core functionality is done by t4_init_portinfo() for a particular port. Also rename variables to sensible ones. Signed-off-by: Hariprasad Shenai Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | 2 + drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | 93 ++++++++++++---------- 2 files changed, 55 insertions(+), 40 deletions(-) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 459775884cad..f69119bc5990 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -1333,6 +1333,8 @@ int t4_init_sge_params(struct adapter *adapter); int t4_init_tp_params(struct adapter *adap); int t4_filter_field_shift(const struct adapter *adap, int filter_sel); int t4_init_rss_mode(struct adapter *adap, int mbox); +int t4_init_portinfo(struct port_info *pi, int mbox, + int port, int pf, int vf, u8 mac[]); int t4_port_init(struct adapter *adap, int mbox, int pf, int vf); void t4_fatal_err(struct adapter *adapter); int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid, diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 2ced24fc569d..1ebbbb9323e3 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -7668,61 +7668,74 @@ int t4_init_rss_mode(struct adapter *adap, int mbox) return 0; } +/** + * t4_init_portinfo - allocate a virtual interface amd initialize port_info + * @pi: the port_info + * @mbox: mailbox to use for the FW command + * @port: physical port associated with the VI + * @pf: the PF owning the VI + * @vf: the VF owning the VI + * @mac: the MAC address of the VI + * + * Allocates a virtual interface for the given physical port. If @mac is + * not %NULL it contains the MAC address of the VI as assigned by FW. + * @mac should be large enough to hold an Ethernet address. + * Returns < 0 on error. + */ +int t4_init_portinfo(struct port_info *pi, int mbox, + int port, int pf, int vf, u8 mac[]) +{ + int ret; + struct fw_port_cmd c; + unsigned int rss_size; + + memset(&c, 0, sizeof(c)); + c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) | + FW_CMD_REQUEST_F | FW_CMD_READ_F | + FW_PORT_CMD_PORTID_V(port)); + c.action_to_len16 = cpu_to_be32( + FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_GET_PORT_INFO) | + FW_LEN16(c)); + ret = t4_wr_mbox(pi->adapter, mbox, &c, sizeof(c), &c); + if (ret) + return ret; + + ret = t4_alloc_vi(pi->adapter, mbox, port, pf, vf, 1, mac, &rss_size); + if (ret < 0) + return ret; + + pi->viid = ret; + pi->tx_chan = port; + pi->lport = port; + pi->rss_size = rss_size; + + ret = be32_to_cpu(c.u.info.lstatus_to_modtype); + pi->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP_F) ? + FW_PORT_CMD_MDIOADDR_G(ret) : -1; + pi->port_type = FW_PORT_CMD_PTYPE_G(ret); + pi->mod_type = FW_PORT_MOD_TYPE_NA; + + init_link_config(&pi->link_cfg, be16_to_cpu(c.u.info.pcap)); + return 0; +} + int t4_port_init(struct adapter *adap, int mbox, int pf, int vf) { u8 addr[6]; int ret, i, j = 0; - struct fw_port_cmd c; - struct fw_rss_vi_config_cmd rvc; - - memset(&c, 0, sizeof(c)); - memset(&rvc, 0, sizeof(rvc)); for_each_port(adap, i) { - unsigned int rss_size; - struct port_info *p = adap2pinfo(adap, i); + struct port_info *pi = adap2pinfo(adap, i); while ((adap->params.portvec & (1 << j)) == 0) j++; - c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) | - FW_CMD_REQUEST_F | FW_CMD_READ_F | - FW_PORT_CMD_PORTID_V(j)); - c.action_to_len16 = cpu_to_be32( - FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_GET_PORT_INFO) | - FW_LEN16(c)); - ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); + ret = t4_init_portinfo(pi, mbox, j, pf, vf, addr); if (ret) return ret; - ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size); - if (ret < 0) - return ret; - - p->viid = ret; - p->tx_chan = j; - p->lport = j; - p->rss_size = rss_size; memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN); adap->port[i]->dev_port = j; - - ret = be32_to_cpu(c.u.info.lstatus_to_modtype); - p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP_F) ? - FW_PORT_CMD_MDIOADDR_G(ret) : -1; - p->port_type = FW_PORT_CMD_PTYPE_G(ret); - p->mod_type = FW_PORT_MOD_TYPE_NA; - - rvc.op_to_viid = - cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) | - FW_CMD_REQUEST_F | FW_CMD_READ_F | - FW_RSS_VI_CONFIG_CMD_VIID(p->viid)); - rvc.retval_len16 = cpu_to_be32(FW_LEN16(rvc)); - ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc); - if (ret) - return ret; - p->rss_mode = be32_to_cpu(rvc.u.basicvirtual.defaultq_to_udpen); - - init_link_config(&p->link_cfg, be16_to_cpu(c.u.info.pcap)); j++; } return 0; From 134491fdc319037f37adc5f8ec51093e5cd5ada1 Mon Sep 17 00:00:00 2001 From: Hariprasad Shenai Date: Tue, 26 Apr 2016 20:10:27 +0530 Subject: [PATCH 1066/1649] cxgb4: DCB message handler needs to use correct portid to netdev mapping Signed-off-by: Hariprasad Shenai Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c | 2 +- drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 5 +++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c index 052c660aca80..6ee2ed30626b 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c @@ -253,7 +253,7 @@ void cxgb4_dcb_handle_fw_update(struct adapter *adap, { const union fw_port_dcb *fwdcb = &pcmd->u.dcb; int port = FW_PORT_CMD_PORTID_G(be32_to_cpu(pcmd->op_to_portid)); - struct net_device *dev = adap->port[port]; + struct net_device *dev = adap->port[adap->chan_map[port]]; struct port_info *pi = netdev_priv(dev); struct port_dcb_info *dcb = &pi->dcb; int dcb_type = pcmd->u.dcb.pgid.type; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index abc425bfc744..4f627f3edb98 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -515,7 +515,7 @@ EXPORT_SYMBOL(cxgb4_dcb_enabled); static void dcb_rpl(struct adapter *adap, const struct fw_port_cmd *pcmd) { int port = FW_PORT_CMD_PORTID_G(ntohl(pcmd->op_to_portid)); - struct net_device *dev = adap->port[port]; + struct net_device *dev = adap->port[adap->chan_map[port]]; int old_dcb_enabled = cxgb4_dcb_enabled(dev); int new_dcb_enabled; @@ -645,7 +645,8 @@ static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp, action == FW_PORT_ACTION_GET_PORT_INFO) { int port = FW_PORT_CMD_PORTID_G( be32_to_cpu(pcmd->op_to_portid)); - struct net_device *dev = q->adap->port[port]; + struct net_device *dev = + q->adap->port[q->adap->chan_map[port]]; int state_input = ((pcmd->u.info.dcbxdis_pkd & FW_PORT_CMD_DCBXDIS_F) ? CXGB4_DCB_INPUT_FW_DISABLED From 23853a0a9a7621922a21759eeed8c5bc09c71c54 Mon Sep 17 00:00:00 2001 From: Hariprasad Shenai Date: Tue, 26 Apr 2016 20:10:28 +0530 Subject: [PATCH 1067/1649] cxgb4: Don't assume FW_PORT_CMD reply is always port info msg The firmware can send a set of asynchronous replies through FW_PORT_CMD with DCBX information when that's negotiated with the Link Peer. The old code always assumed that a FW_PORT_CMD reply was always a Get Port Information message. This change conditionalizes the code to only handle the Get Port Information messages and throws a warning if we don't understand what we've been given. Also refactor t4_handle_fw_rpl() so that core functionality performed by t4_handle_get_port_info() for a specified port. Signed-off-by: Hariprasad Shenai Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | 1 + drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | 108 ++++++++++++++------- 2 files changed, 73 insertions(+), 36 deletions(-) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index f69119bc5990..911fe11d32c6 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -1470,6 +1470,7 @@ int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int eqid); int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox); +void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl); int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl); void t4_db_full(struct adapter *adapter); void t4_db_dropped(struct adapter *adapter); diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 1ebbbb9323e3..cf3efbf4a37a 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -7103,52 +7103,88 @@ int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, } /** - * t4_handle_fw_rpl - process a FW reply message - * @adap: the adapter + * t4_handle_get_port_info - process a FW reply message + * @pi: the port info * @rpl: start of the FW message * - * Processes a FW message, such as link state change messages. + * Processes a GET_PORT_INFO FW reply message. + */ +void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl) +{ + const struct fw_port_cmd *p = (const void *)rpl; + struct adapter *adap = pi->adapter; + + /* link/module state change message */ + int speed = 0, fc = 0; + struct link_config *lc; + u32 stat = be32_to_cpu(p->u.info.lstatus_to_modtype); + int link_ok = (stat & FW_PORT_CMD_LSTATUS_F) != 0; + u32 mod = FW_PORT_CMD_MODTYPE_G(stat); + + if (stat & FW_PORT_CMD_RXPAUSE_F) + fc |= PAUSE_RX; + if (stat & FW_PORT_CMD_TXPAUSE_F) + fc |= PAUSE_TX; + if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M)) + speed = 100; + else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G)) + speed = 1000; + else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G)) + speed = 10000; + else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G)) + speed = 40000; + + lc = &pi->link_cfg; + + if (mod != pi->mod_type) { + pi->mod_type = mod; + t4_os_portmod_changed(adap, pi->port_id); + } + if (link_ok != lc->link_ok || speed != lc->speed || + fc != lc->fc) { /* something changed */ + lc->link_ok = link_ok; + lc->speed = speed; + lc->fc = fc; + lc->supported = be16_to_cpu(p->u.info.pcap); + t4_os_link_changed(adap, pi->port_id, link_ok); + } +} + +/** + * t4_handle_fw_rpl - process a FW reply message + * @adap: the adapter + * @rpl: start of the FW message + * + * Processes a FW message, such as link state change messages. */ int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl) { u8 opcode = *(const u8 *)rpl; - if (opcode == FW_PORT_CMD) { /* link/module state change message */ - int speed = 0, fc = 0; - const struct fw_port_cmd *p = (void *)rpl; + /* This might be a port command ... this simplifies the following + * conditionals ... We can get away with pre-dereferencing + * action_to_len16 because it's in the first 16 bytes and all messages + * will be at least that long. + */ + const struct fw_port_cmd *p = (const void *)rpl; + unsigned int action = + FW_PORT_CMD_ACTION_G(be32_to_cpu(p->action_to_len16)); + + if (opcode == FW_PORT_CMD && action == FW_PORT_ACTION_GET_PORT_INFO) { + int i; int chan = FW_PORT_CMD_PORTID_G(be32_to_cpu(p->op_to_portid)); - int port = adap->chan_map[chan]; - struct port_info *pi = adap2pinfo(adap, port); - struct link_config *lc = &pi->link_cfg; - u32 stat = be32_to_cpu(p->u.info.lstatus_to_modtype); - int link_ok = (stat & FW_PORT_CMD_LSTATUS_F) != 0; - u32 mod = FW_PORT_CMD_MODTYPE_G(stat); + struct port_info *pi = NULL; - if (stat & FW_PORT_CMD_RXPAUSE_F) - fc |= PAUSE_RX; - if (stat & FW_PORT_CMD_TXPAUSE_F) - fc |= PAUSE_TX; - if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M)) - speed = 100; - else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G)) - speed = 1000; - else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G)) - speed = 10000; - else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G)) - speed = 40000; + for_each_port(adap, i) { + pi = adap2pinfo(adap, i); + if (pi->tx_chan == chan) + break; + } - if (link_ok != lc->link_ok || speed != lc->speed || - fc != lc->fc) { /* something changed */ - lc->link_ok = link_ok; - lc->speed = speed; - lc->fc = fc; - lc->supported = be16_to_cpu(p->u.info.pcap); - t4_os_link_changed(adap, port, link_ok); - } - if (mod != pi->mod_type) { - pi->mod_type = mod; - t4_os_portmod_changed(adap, port); - } + t4_handle_get_port_info(pi, rpl); + } else { + dev_warn(adap->pdev_dev, "Unknown firmware reply %d\n", opcode); + return -EINVAL; } return 0; } From ddc7740d9a7c7e61650309a81037f12d5cfad88e Mon Sep 17 00:00:00 2001 From: Hariprasad Shenai Date: Tue, 26 Apr 2016 20:10:29 +0530 Subject: [PATCH 1068/1649] cxgb4: Decode link down reason code obtained from firmware Signed-off-by: Hariprasad Shenai Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | 1 + drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | 34 +++++++++++++++++++ drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h | 5 +++ 3 files changed, 40 insertions(+) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 911fe11d32c6..6af5242e6d21 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -396,6 +396,7 @@ struct link_config { unsigned char fc; /* actual link flow control */ unsigned char autoneg; /* autonegotiating? */ unsigned char link_ok; /* link up? */ + unsigned char link_down_rc; /* link down reason */ }; #define FW_LEN16(fw_struct) FW_CMD_LEN16_V(sizeof(fw_struct) / 16) diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index cf3efbf4a37a..7907d85efa4c 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -7102,6 +7102,32 @@ int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } +/** + * t4_link_down_rc_str - return a string for a Link Down Reason Code + * @adap: the adapter + * @link_down_rc: Link Down Reason Code + * + * Returns a string representation of the Link Down Reason Code. + */ +static const char *t4_link_down_rc_str(unsigned char link_down_rc) +{ + static const char * const reason[] = { + "Link Down", + "Remote Fault", + "Auto-negotiation Failure", + "Reserved", + "Insufficient Airflow", + "Unable To Determine Reason", + "No RX Signal Detected", + "Reserved", + }; + + if (link_down_rc >= ARRAY_SIZE(reason)) + return "Bad Reason Code"; + + return reason[link_down_rc]; +} + /** * t4_handle_get_port_info - process a FW reply message * @pi: the port info @@ -7142,6 +7168,14 @@ void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl) } if (link_ok != lc->link_ok || speed != lc->speed || fc != lc->fc) { /* something changed */ + if (!link_ok && lc->link_ok) { + unsigned char rc = FW_PORT_CMD_LINKDNRC_G(stat); + + lc->link_down_rc = rc; + dev_warn(adap->pdev_dev, + "Port %d link down, reason: %s\n", + pi->port_id, t4_link_down_rc_str(rc)); + } lc->link_ok = link_ok; lc->speed = speed; lc->fc = fc; diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h index 7ad6d4e75b2a..392d6644fdd8 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h @@ -2510,6 +2510,11 @@ struct fw_port_cmd { #define FW_PORT_CMD_PTYPE_G(x) \ (((x) >> FW_PORT_CMD_PTYPE_S) & FW_PORT_CMD_PTYPE_M) +#define FW_PORT_CMD_LINKDNRC_S 5 +#define FW_PORT_CMD_LINKDNRC_M 0x7 +#define FW_PORT_CMD_LINKDNRC_G(x) \ + (((x) >> FW_PORT_CMD_LINKDNRC_S) & FW_PORT_CMD_LINKDNRC_M) + #define FW_PORT_CMD_MODTYPE_S 0 #define FW_PORT_CMD_MODTYPE_M 0x1f #define FW_PORT_CMD_MODTYPE_V(x) ((x) << FW_PORT_CMD_MODTYPE_S) From bcd197c81f63afa4610e481ed353d1507ba401d0 Mon Sep 17 00:00:00 2001 From: Manish Chopra Date: Tue, 26 Apr 2016 10:56:08 -0400 Subject: [PATCH 1069/1649] qed: Add vport WFQ configuration APIs This patch adds relevant APIs needed to configure WFQ (Weighted fair queueing) values for the vports. WFQ configuration is used per vport basis when minimum bandwidth update/configuration is notified to the PF by the management firmware. Signed-off-by: Manish Chopra Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed.h | 11 + drivers/net/ethernet/qlogic/qed/qed_dev.c | 188 +++++++++++++++++- drivers/net/ethernet/qlogic/qed/qed_hsi.h | 2 + .../ethernet/qlogic/qed/qed_init_fw_funcs.c | 25 +++ .../net/ethernet/qlogic/qed/qed_reg_addr.h | 1 + 5 files changed, 223 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index 33e2ed60c18f..cceac3272cce 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h @@ -32,6 +32,8 @@ extern const struct qed_common_ops qed_common_ops_pass; #define NAME_SIZE 16 #define VER_SIZE 16 +#define QED_WFQ_UNIT 100 + /* cau states */ enum qed_coalescing_mode { QED_COAL_MODE_DISABLE, @@ -237,6 +239,12 @@ struct qed_dmae_info { struct dmae_cmd *p_dmae_cmd; }; +struct qed_wfq_data { + /* when feature is configured for at least 1 vport */ + u32 min_speed; + bool configured; +}; + struct qed_qm_info { struct init_qm_pq_params *qm_pq_params; struct init_qm_vport_params *qm_vport_params; @@ -257,6 +265,7 @@ struct qed_qm_info { bool vport_wfq_en; u8 pf_wfq; u32 pf_rl; + struct qed_wfq_data *wfq_data; }; struct storm_stats { @@ -526,6 +535,8 @@ static inline u8 qed_concrete_to_sw_fid(struct qed_dev *cdev, #define PURE_LB_TC 8 +void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, u32 min_pf_rate); + #define QED_LEADING_HWFN(dev) (&dev->hwfns[0]) /* Other Linux specific common definitions */ diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index bdae5a55afa4..28e0619a290e 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -105,6 +105,8 @@ static void qed_qm_info_free(struct qed_hwfn *p_hwfn) qm_info->qm_vport_params = NULL; kfree(qm_info->qm_port_params); qm_info->qm_port_params = NULL; + kfree(qm_info->wfq_data); + qm_info->wfq_data = NULL; } void qed_resc_free(struct qed_dev *cdev) @@ -175,6 +177,11 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn) if (!qm_info->qm_port_params) goto alloc_err; + qm_info->wfq_data = kcalloc(num_vports, sizeof(*qm_info->wfq_data), + GFP_KERNEL); + if (!qm_info->wfq_data) + goto alloc_err; + vport_id = (u8)RESC_START(p_hwfn, QED_VPORT); /* First init per-TC PQs */ @@ -221,10 +228,7 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn) alloc_err: DP_NOTICE(p_hwfn, "Failed to allocate memory for QM params\n"); - kfree(qm_info->qm_pq_params); - kfree(qm_info->qm_vport_params); - kfree(qm_info->qm_port_params); - + qed_qm_info_free(p_hwfn); return -ENOMEM; } @@ -1595,3 +1599,179 @@ int qed_fw_rss_eng(struct qed_hwfn *p_hwfn, return 0; } + +/* Calculate final WFQ values for all vports and configure them. + * After this configuration each vport will have + * approx min rate = min_pf_rate * (vport_wfq / QED_WFQ_UNIT) + */ +static void qed_configure_wfq_for_all_vports(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 min_pf_rate) +{ + struct init_qm_vport_params *vport_params; + int i; + + vport_params = p_hwfn->qm_info.qm_vport_params; + + for (i = 0; i < p_hwfn->qm_info.num_vports; i++) { + u32 wfq_speed = p_hwfn->qm_info.wfq_data[i].min_speed; + + vport_params[i].vport_wfq = (wfq_speed * QED_WFQ_UNIT) / + min_pf_rate; + qed_init_vport_wfq(p_hwfn, p_ptt, + vport_params[i].first_tx_pq_id, + vport_params[i].vport_wfq); + } +} + +static void qed_init_wfq_default_param(struct qed_hwfn *p_hwfn, + u32 min_pf_rate) + +{ + int i; + + for (i = 0; i < p_hwfn->qm_info.num_vports; i++) + p_hwfn->qm_info.qm_vport_params[i].vport_wfq = 1; +} + +static void qed_disable_wfq_for_all_vports(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 min_pf_rate) +{ + struct init_qm_vport_params *vport_params; + int i; + + vport_params = p_hwfn->qm_info.qm_vport_params; + + for (i = 0; i < p_hwfn->qm_info.num_vports; i++) { + qed_init_wfq_default_param(p_hwfn, min_pf_rate); + qed_init_vport_wfq(p_hwfn, p_ptt, + vport_params[i].first_tx_pq_id, + vport_params[i].vport_wfq); + } +} + +/* This function performs several validations for WFQ + * configuration and required min rate for a given vport + * 1. req_rate must be greater than one percent of min_pf_rate. + * 2. req_rate should not cause other vports [not configured for WFQ explicitly] + * rates to get less than one percent of min_pf_rate. + * 3. total_req_min_rate [all vports min rate sum] shouldn't exceed min_pf_rate. + */ +static int qed_init_wfq_param(struct qed_hwfn *p_hwfn, + u16 vport_id, u32 req_rate, + u32 min_pf_rate) +{ + u32 total_req_min_rate = 0, total_left_rate = 0, left_rate_per_vp = 0; + int non_requested_count = 0, req_count = 0, i, num_vports; + + num_vports = p_hwfn->qm_info.num_vports; + + /* Accounting for the vports which are configured for WFQ explicitly */ + for (i = 0; i < num_vports; i++) { + u32 tmp_speed; + + if ((i != vport_id) && + p_hwfn->qm_info.wfq_data[i].configured) { + req_count++; + tmp_speed = p_hwfn->qm_info.wfq_data[i].min_speed; + total_req_min_rate += tmp_speed; + } + } + + /* Include current vport data as well */ + req_count++; + total_req_min_rate += req_rate; + non_requested_count = num_vports - req_count; + + if (req_rate < min_pf_rate / QED_WFQ_UNIT) { + DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, + "Vport [%d] - Requested rate[%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n", + vport_id, req_rate, min_pf_rate); + return -EINVAL; + } + + if (num_vports > QED_WFQ_UNIT) { + DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, + "Number of vports is greater than %d\n", + QED_WFQ_UNIT); + return -EINVAL; + } + + if (total_req_min_rate > min_pf_rate) { + DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, + "Total requested min rate for all vports[%d Mbps] is greater than configured PF min rate[%d Mbps]\n", + total_req_min_rate, min_pf_rate); + return -EINVAL; + } + + total_left_rate = min_pf_rate - total_req_min_rate; + + left_rate_per_vp = total_left_rate / non_requested_count; + if (left_rate_per_vp < min_pf_rate / QED_WFQ_UNIT) { + DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, + "Non WFQ configured vports rate [%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n", + left_rate_per_vp, min_pf_rate); + return -EINVAL; + } + + p_hwfn->qm_info.wfq_data[vport_id].min_speed = req_rate; + p_hwfn->qm_info.wfq_data[vport_id].configured = true; + + for (i = 0; i < num_vports; i++) { + if (p_hwfn->qm_info.wfq_data[i].configured) + continue; + + p_hwfn->qm_info.wfq_data[i].min_speed = left_rate_per_vp; + } + + return 0; +} + +static int __qed_configure_vp_wfq_on_link_change(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 min_pf_rate) +{ + bool use_wfq = false; + int rc = 0; + u16 i; + + /* Validate all pre configured vports for wfq */ + for (i = 0; i < p_hwfn->qm_info.num_vports; i++) { + u32 rate; + + if (!p_hwfn->qm_info.wfq_data[i].configured) + continue; + + rate = p_hwfn->qm_info.wfq_data[i].min_speed; + use_wfq = true; + + rc = qed_init_wfq_param(p_hwfn, i, rate, min_pf_rate); + if (rc) { + DP_NOTICE(p_hwfn, + "WFQ validation failed while configuring min rate\n"); + break; + } + } + + if (!rc && use_wfq) + qed_configure_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate); + else + qed_disable_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate); + + return rc; +} + +/* API to configure WFQ from mcp link change */ +void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, u32 min_pf_rate) +{ + int i; + + for_each_hwfn(cdev, i) { + struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + + __qed_configure_vp_wfq_on_link_change(p_hwfn, + p_hwfn->p_dpc_ptt, + min_pf_rate); + } +} diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index 15e02ab9be5a..7d5ed0c17da7 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -5116,4 +5116,6 @@ struct hw_set_image { struct hw_set_info hw_sets[1]; }; +int qed_init_vport_wfq(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + u16 first_tx_pq_id[NUM_OF_TCS], u16 vport_wfq); #endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c index 1dd53248b984..e646987a3d41 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c +++ b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c @@ -732,6 +732,31 @@ int qed_init_pf_rl(struct qed_hwfn *p_hwfn, return 0; } +int qed_init_vport_wfq(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u16 first_tx_pq_id[NUM_OF_TCS], + u16 vport_wfq) +{ + u32 inc_val = QM_WFQ_INC_VAL(vport_wfq); + u8 tc; + + if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) { + DP_NOTICE(p_hwfn, "Invalid VPORT WFQ weight configuration"); + return -1; + } + + for (tc = 0; tc < NUM_OF_TCS; tc++) { + u16 vport_pq_id = first_tx_pq_id[tc]; + + if (vport_pq_id != QM_INVALID_PQ_ID) + qed_wr(p_hwfn, p_ptt, + QM_REG_WFQVPWEIGHT + vport_pq_id * 4, + inc_val); + } + + return 0; +} + int qed_init_vport_rl(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u8 vport_id, diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h index 55451a4dc587..d2f57301cb3e 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h +++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h @@ -458,4 +458,5 @@ #define PBF_REG_NGE_COMP_VER 0xd80524UL #define PRS_REG_NGE_COMP_VER 0x1f0878UL +#define QM_REG_WFQVPWEIGHT 0x2fa000UL #endif From 4b01e5192bd26ed4d0c3c271611cc74ae2c164f2 Mon Sep 17 00:00:00 2001 From: Manish Chopra Date: Tue, 26 Apr 2016 10:56:09 -0400 Subject: [PATCH 1070/1649] qed: Add PF max bandwidth configuration support This patch adds support for PF maximum bandwidth update or configuration notified by management firmware. Signed-off-by: Manish Chopra Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_dev.c | 68 ++++++++++- drivers/net/ethernet/qlogic/qed/qed_hsi.h | 2 +- drivers/net/ethernet/qlogic/qed/qed_mcp.c | 138 +++++++++++++--------- drivers/net/ethernet/qlogic/qed/qed_mcp.h | 14 ++- 4 files changed, 165 insertions(+), 57 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 28e0619a290e..4e99108d9427 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -579,7 +579,7 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn, p_hwfn->qm_info.pf_wfq = p_info->bandwidth_min; /* Update rate limit once we'll actually have a link */ - p_hwfn->qm_info.pf_rl = 100; + p_hwfn->qm_info.pf_rl = 100000; } qed_cxt_hw_init_pf(p_hwfn); @@ -1775,3 +1775,69 @@ void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, u32 min_pf_rate) min_pf_rate); } } + +int __qed_configure_pf_max_bandwidth(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_mcp_link_state *p_link, + u8 max_bw) +{ + int rc = 0; + + p_hwfn->mcp_info->func_info.bandwidth_max = max_bw; + + if (!p_link->line_speed && (max_bw != 100)) + return rc; + + p_link->speed = (p_link->line_speed * max_bw) / 100; + p_hwfn->qm_info.pf_rl = p_link->speed; + + /* Since the limiter also affects Tx-switched traffic, we don't want it + * to limit such traffic in case there's no actual limit. + * In that case, set limit to imaginary high boundary. + */ + if (max_bw == 100) + p_hwfn->qm_info.pf_rl = 100000; + + rc = qed_init_pf_rl(p_hwfn, p_ptt, p_hwfn->rel_pf_id, + p_hwfn->qm_info.pf_rl); + + DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, + "Configured MAX bandwidth to be %08x Mb/sec\n", + p_link->speed); + + return rc; +} + +/* Main API to configure PF max bandwidth where bw range is [1 - 100] */ +int qed_configure_pf_max_bandwidth(struct qed_dev *cdev, u8 max_bw) +{ + int i, rc = -EINVAL; + + if (max_bw < 1 || max_bw > 100) { + DP_NOTICE(cdev, "PF max bw valid range is [1-100]\n"); + return rc; + } + + for_each_hwfn(cdev, i) { + struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + struct qed_hwfn *p_lead = QED_LEADING_HWFN(cdev); + struct qed_mcp_link_state *p_link; + struct qed_ptt *p_ptt; + + p_link = &p_lead->mcp_info->link_output; + + p_ptt = qed_ptt_acquire(p_hwfn); + if (!p_ptt) + return -EBUSY; + + rc = __qed_configure_pf_max_bandwidth(p_hwfn, p_ptt, + p_link, max_bw); + + qed_ptt_release(p_hwfn, p_ptt); + + if (rc) + break; + } + + return rc; +} diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index 7d5ed0c17da7..81cf62573ec4 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -3837,7 +3837,7 @@ struct public_drv_mb { #define DRV_MSG_CODE_SET_LLDP 0x24000000 #define DRV_MSG_CODE_SET_DCBX 0x25000000 - +#define DRV_MSG_CODE_BW_UPDATE_ACK 0x32000000 #define DRV_MSG_CODE_NIG_DRAIN 0x30000000 #define DRV_MSG_CODE_INITIATE_FLR 0x02000000 diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index b89c9a8e1655..578b09c2ae18 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -473,6 +473,7 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn, { struct qed_mcp_link_state *p_link; u32 status = 0; + u8 max_bw; p_link = &p_hwfn->mcp_info->link_output; memset(p_link, 0, sizeof(*p_link)); @@ -527,17 +528,15 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn, p_link->speed = 0; } + if (p_link->link_up && p_link->speed) + p_link->line_speed = p_link->speed; + else + p_link->line_speed = 0; + + max_bw = p_hwfn->mcp_info->func_info.bandwidth_max; + /* Correct speed according to bandwidth allocation */ - if (p_hwfn->mcp_info->func_info.bandwidth_max && p_link->speed) { - p_link->speed = p_link->speed * - p_hwfn->mcp_info->func_info.bandwidth_max / - 100; - qed_init_pf_rl(p_hwfn, p_ptt, p_hwfn->rel_pf_id, - p_link->speed); - DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, - "Configured MAX bandwidth to be %08x Mb/sec\n", - p_link->speed); - } + __qed_configure_pf_max_bandwidth(p_hwfn, p_ptt, p_link, max_bw); p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED); p_link->an_complete = !!(status & @@ -648,6 +647,76 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn, return 0; } +static void qed_read_pf_bandwidth(struct qed_hwfn *p_hwfn, + struct public_func *p_shmem_info) +{ + struct qed_mcp_function_info *p_info; + + p_info = &p_hwfn->mcp_info->func_info; + + p_info->bandwidth_min = (p_shmem_info->config & + FUNC_MF_CFG_MIN_BW_MASK) >> + FUNC_MF_CFG_MIN_BW_SHIFT; + if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) { + DP_INFO(p_hwfn, + "bandwidth minimum out of bounds [%02x]. Set to 1\n", + p_info->bandwidth_min); + p_info->bandwidth_min = 1; + } + + p_info->bandwidth_max = (p_shmem_info->config & + FUNC_MF_CFG_MAX_BW_MASK) >> + FUNC_MF_CFG_MAX_BW_SHIFT; + if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) { + DP_INFO(p_hwfn, + "bandwidth maximum out of bounds [%02x]. Set to 100\n", + p_info->bandwidth_max); + p_info->bandwidth_max = 100; + } +} + +static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct public_func *p_data, + int pfid) +{ + u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, + PUBLIC_FUNC); + u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr); + u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid); + u32 i, size; + + memset(p_data, 0, sizeof(*p_data)); + + size = min_t(u32, sizeof(*p_data), + QED_SECTION_SIZE(mfw_path_offsize)); + for (i = 0; i < size / sizeof(u32); i++) + ((u32 *)p_data)[i] = qed_rd(p_hwfn, p_ptt, + func_addr + (i << 2)); + return size; +} + +static void qed_mcp_update_bw(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt) +{ + struct qed_mcp_function_info *p_info; + struct public_func shmem_info; + u32 resp = 0, param = 0; + + qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, + MCP_PF_ID(p_hwfn)); + + qed_read_pf_bandwidth(p_hwfn, &shmem_info); + + p_info = &p_hwfn->mcp_info->func_info; + + qed_configure_pf_max_bandwidth(p_hwfn->cdev, p_info->bandwidth_max); + + /* Acknowledge the MFW */ + qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BW_UPDATE_ACK, 0, &resp, + ¶m); +} + int qed_mcp_handle_events(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { @@ -679,6 +748,9 @@ int qed_mcp_handle_events(struct qed_hwfn *p_hwfn, case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE: qed_mcp_handle_transceiver_change(p_hwfn, p_ptt); break; + case MFW_DRV_MSG_BW_UPDATE: + qed_mcp_update_bw(p_hwfn, p_ptt); + break; default: DP_NOTICE(p_hwfn, "Unimplemented MFW message %d\n", i); rc = -EINVAL; @@ -758,28 +830,6 @@ int qed_mcp_get_media_type(struct qed_dev *cdev, return 0; } -static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - struct public_func *p_data, - int pfid) -{ - u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, - PUBLIC_FUNC); - u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr); - u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid); - u32 i, size; - - memset(p_data, 0, sizeof(*p_data)); - - size = min_t(u32, sizeof(*p_data), - QED_SECTION_SIZE(mfw_path_offsize)); - for (i = 0; i < size / sizeof(u32); i++) - ((u32 *)p_data)[i] = qed_rd(p_hwfn, p_ptt, - func_addr + (i << 2)); - - return size; -} - static int qed_mcp_get_shmem_proto(struct qed_hwfn *p_hwfn, struct public_func *p_info, @@ -818,26 +868,7 @@ int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn, return -EINVAL; } - - info->bandwidth_min = (shmem_info.config & - FUNC_MF_CFG_MIN_BW_MASK) >> - FUNC_MF_CFG_MIN_BW_SHIFT; - if (info->bandwidth_min < 1 || info->bandwidth_min > 100) { - DP_INFO(p_hwfn, - "bandwidth minimum out of bounds [%02x]. Set to 1\n", - info->bandwidth_min); - info->bandwidth_min = 1; - } - - info->bandwidth_max = (shmem_info.config & - FUNC_MF_CFG_MAX_BW_MASK) >> - FUNC_MF_CFG_MAX_BW_SHIFT; - if (info->bandwidth_max < 1 || info->bandwidth_max > 100) { - DP_INFO(p_hwfn, - "bandwidth maximum out of bounds [%02x]. Set to 100\n", - info->bandwidth_max); - info->bandwidth_max = 100; - } + qed_read_pf_bandwidth(p_hwfn, &shmem_info); if (shmem_info.mac_upper || shmem_info.mac_lower) { info->mac[0] = (u8)(shmem_info.mac_upper >> 8); @@ -938,9 +969,10 @@ qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn, p_drv_version = &union_data.drv_version; p_drv_version->version = p_ver->version; + for (i = 0; i < MCP_DRV_VER_STR_SIZE - 1; i += 4) { val = cpu_to_be32(p_ver->name[i]); - *(u32 *)&p_drv_version->name[i * sizeof(u32)] = val; + *(__be32 *)&p_drv_version->name[i * sizeof(u32)] = val; } memset(&mb_params, 0, sizeof(mb_params)); diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h index 50917a2131a5..29a51ada038c 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h @@ -40,7 +40,13 @@ struct qed_mcp_link_capabilities { struct qed_mcp_link_state { bool link_up; - u32 speed; /* In Mb/s */ + /* Actual link speed in Mb/s */ + u32 line_speed; + + /* PF max speed in Mb/s, deduced from line_speed + * according to PF max bandwidth configuration. + */ + u32 speed; bool full_duplex; bool an; @@ -388,5 +394,9 @@ int qed_mcp_reset(struct qed_hwfn *p_hwfn, * @return true iff MFW is running and mcp_info is initialized */ bool qed_mcp_is_init(struct qed_hwfn *p_hwfn); - +int qed_configure_pf_max_bandwidth(struct qed_dev *cdev, u8 max_bw); +int __qed_configure_pf_max_bandwidth(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_mcp_link_state *p_link, + u8 max_bw); #endif From a64b02d5301cc7da7ac33ae3b3531ab1262d196e Mon Sep 17 00:00:00 2001 From: Manish Chopra Date: Tue, 26 Apr 2016 10:56:10 -0400 Subject: [PATCH 1071/1649] qed: Add PF min bandwidth configuration support This patch adds support for PF minimum bandwidth update or configuration notified by management firmware. Signed-off-by: Manish Chopra Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_dev.c | 71 +++++++++++++++++++ drivers/net/ethernet/qlogic/qed/qed_hsi.h | 2 + .../ethernet/qlogic/qed/qed_init_fw_funcs.c | 15 ++++ drivers/net/ethernet/qlogic/qed/qed_mcp.c | 10 ++- drivers/net/ethernet/qlogic/qed/qed_mcp.h | 7 ++ .../net/ethernet/qlogic/qed/qed_reg_addr.h | 1 + 6 files changed, 104 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 4e99108d9427..b500c86d7d06 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -220,9 +220,13 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn) qm_info->start_vport = (u8)RESC_START(p_hwfn, QED_VPORT); + for (i = 0; i < qm_info->num_vports; i++) + qm_info->qm_vport_params[i].vport_wfq = 1; + qm_info->pf_wfq = 0; qm_info->pf_rl = 0; qm_info->vport_rl_en = 1; + qm_info->vport_wfq_en = 1; return 0; @@ -1841,3 +1845,70 @@ int qed_configure_pf_max_bandwidth(struct qed_dev *cdev, u8 max_bw) return rc; } + +int __qed_configure_pf_min_bandwidth(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_mcp_link_state *p_link, + u8 min_bw) +{ + int rc = 0; + + p_hwfn->mcp_info->func_info.bandwidth_min = min_bw; + p_hwfn->qm_info.pf_wfq = min_bw; + + if (!p_link->line_speed) + return rc; + + p_link->min_pf_rate = (p_link->line_speed * min_bw) / 100; + + rc = qed_init_pf_wfq(p_hwfn, p_ptt, p_hwfn->rel_pf_id, min_bw); + + DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, + "Configured MIN bandwidth to be %d Mb/sec\n", + p_link->min_pf_rate); + + return rc; +} + +/* Main API to configure PF min bandwidth where bw range is [1-100] */ +int qed_configure_pf_min_bandwidth(struct qed_dev *cdev, u8 min_bw) +{ + int i, rc = -EINVAL; + + if (min_bw < 1 || min_bw > 100) { + DP_NOTICE(cdev, "PF min bw valid range is [1-100]\n"); + return rc; + } + + for_each_hwfn(cdev, i) { + struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + struct qed_hwfn *p_lead = QED_LEADING_HWFN(cdev); + struct qed_mcp_link_state *p_link; + struct qed_ptt *p_ptt; + + p_link = &p_lead->mcp_info->link_output; + + p_ptt = qed_ptt_acquire(p_hwfn); + if (!p_ptt) + return -EBUSY; + + rc = __qed_configure_pf_min_bandwidth(p_hwfn, p_ptt, + p_link, min_bw); + if (rc) { + qed_ptt_release(p_hwfn, p_ptt); + return rc; + } + + if (p_link->min_pf_rate) { + u32 min_rate = p_link->min_pf_rate; + + rc = __qed_configure_vp_wfq_on_link_change(p_hwfn, + p_ptt, + min_rate); + } + + qed_ptt_release(p_hwfn, p_ptt); + } + + return rc; +} diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index 81cf62573ec4..5aa78a9ae17f 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -5116,6 +5116,8 @@ struct hw_set_image { struct hw_set_info hw_sets[1]; }; +int qed_init_pf_wfq(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + u8 pf_id, u16 pf_wfq); int qed_init_vport_wfq(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 first_tx_pq_id[NUM_OF_TCS], u16 vport_wfq); #endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c index e646987a3d41..e8a3b9da59b5 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c +++ b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c @@ -712,6 +712,21 @@ int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn, return 0; } +int qed_init_pf_wfq(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u8 pf_id, u16 pf_wfq) +{ + u32 inc_val = QM_WFQ_INC_VAL(pf_wfq); + + if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) { + DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration"); + return -1; + } + + qed_wr(p_hwfn, p_ptt, QM_REG_WFQPFWEIGHT + pf_id * 4, inc_val); + return 0; +} + int qed_init_pf_rl(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u8 pf_id, diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index 578b09c2ae18..cb46dbdf47dd 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -472,8 +472,8 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn, bool b_reset) { struct qed_mcp_link_state *p_link; + u8 max_bw, min_bw; u32 status = 0; - u8 max_bw; p_link = &p_hwfn->mcp_info->link_output; memset(p_link, 0, sizeof(*p_link)); @@ -534,10 +534,15 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn, p_link->line_speed = 0; max_bw = p_hwfn->mcp_info->func_info.bandwidth_max; + min_bw = p_hwfn->mcp_info->func_info.bandwidth_min; - /* Correct speed according to bandwidth allocation */ + /* Max bandwidth configuration */ __qed_configure_pf_max_bandwidth(p_hwfn, p_ptt, p_link, max_bw); + /* Min bandwidth configuration */ + __qed_configure_pf_min_bandwidth(p_hwfn, p_ptt, p_link, min_bw); + qed_configure_vp_wfq_on_link_change(p_hwfn->cdev, p_link->min_pf_rate); + p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED); p_link->an_complete = !!(status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE); @@ -710,6 +715,7 @@ static void qed_mcp_update_bw(struct qed_hwfn *p_hwfn, p_info = &p_hwfn->mcp_info->func_info; + qed_configure_pf_min_bandwidth(p_hwfn->cdev, p_info->bandwidth_min); qed_configure_pf_max_bandwidth(p_hwfn->cdev, p_info->bandwidth_max); /* Acknowledge the MFW */ diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h index 29a51ada038c..608bcb2403cb 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h @@ -40,6 +40,8 @@ struct qed_mcp_link_capabilities { struct qed_mcp_link_state { bool link_up; + u32 min_pf_rate; + /* Actual link speed in Mb/s */ u32 line_speed; @@ -394,9 +396,14 @@ int qed_mcp_reset(struct qed_hwfn *p_hwfn, * @return true iff MFW is running and mcp_info is initialized */ bool qed_mcp_is_init(struct qed_hwfn *p_hwfn); +int qed_configure_pf_min_bandwidth(struct qed_dev *cdev, u8 min_bw); int qed_configure_pf_max_bandwidth(struct qed_dev *cdev, u8 max_bw); int __qed_configure_pf_max_bandwidth(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_mcp_link_state *p_link, u8 max_bw); +int __qed_configure_pf_min_bandwidth(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_mcp_link_state *p_link, + u8 min_bw); #endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h index d2f57301cb3e..bf4d7ccd56bb 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h +++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h @@ -458,5 +458,6 @@ #define PBF_REG_NGE_COMP_VER 0xd80524UL #define PRS_REG_NGE_COMP_VER 0x1f0878UL +#define QM_REG_WFQPFWEIGHT 0x2f4e80UL #define QM_REG_WFQVPWEIGHT 0x2fa000UL #endif From 308379607548524b8d86dbf20134681024935e0b Mon Sep 17 00:00:00 2001 From: Francesco Ruggeri Date: Sat, 23 Apr 2016 15:03:32 -0700 Subject: [PATCH 1072/1649] macvlan: fix failure during registration v3 If macvlan_common_newlink fails in register_netdevice after macvlan_init then it decrements port->count twice, first in macvlan_uninit (from register_netdevice or rollback_registered) and then again in macvlan_common_newlink. A similar problem may exist in the ipvlan driver. This patch consolidates modifications to port->count into macvlan_init and macvlan_uninit (thanks to Eric Biederman for suggesting this approach). v3: remove macvtap specific bits. Signed-off-by: Francesco Ruggeri Acked-by: "Eric W. Biederman" Signed-off-by: David S. Miller --- drivers/net/macvlan.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 2bcf1f321bea..cb01023eab41 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -795,6 +795,7 @@ static int macvlan_init(struct net_device *dev) { struct macvlan_dev *vlan = netdev_priv(dev); const struct net_device *lowerdev = vlan->lowerdev; + struct macvlan_port *port = vlan->port; dev->state = (dev->state & ~MACVLAN_STATE_MASK) | (lowerdev->state & MACVLAN_STATE_MASK); @@ -812,6 +813,8 @@ static int macvlan_init(struct net_device *dev) if (!vlan->pcpu_stats) return -ENOMEM; + port->count += 1; + return 0; } @@ -1312,10 +1315,9 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev, return err; } - port->count += 1; err = register_netdevice(dev); if (err < 0) - goto destroy_port; + return err; dev->priv_flags |= IFF_MACVLAN; err = netdev_upper_dev_link(lowerdev, dev); @@ -1330,10 +1332,6 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev, unregister_netdev: unregister_netdevice(dev); -destroy_port: - port->count -= 1; - if (!port->count) - macvlan_port_destroy(lowerdev); return err; } From e96c37f185529d1db4ebc021e4f56822d43945bb Mon Sep 17 00:00:00 2001 From: Francesco Ruggeri Date: Sat, 23 Apr 2016 15:04:31 -0700 Subject: [PATCH 1073/1649] macvtap: check minor when unregistering macvtap_device_event(NETDEV_UNREGISTER) should check vlan->minor to determine if it is being invoked in the context of a macvtap_newlink that failed, for example in this code sequence: macvtap_newlink macvlan_common_newlink register_netdevice call_netdevice_notifiers(NETDEV_REGISTER, dev) macvtap_device_event(NETDEV_REGISTER) minor = 0> rollback_registered(dev); rollback_registered_many call_netdevice_notifiers(NETDEV_UNREGISTER, dev); macvtap_device_event(NETDEV_UNREGISTER) Signed-off-by: Francesco Ruggeri Acked-by: "Eric W. Biederman" Signed-off-by: David S. Miller --- drivers/net/macvtap.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index 95394edd1ed5..74cb15a2e032 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c @@ -1303,6 +1303,9 @@ static int macvtap_device_event(struct notifier_block *unused, } break; case NETDEV_UNREGISTER: + /* vlan->minor == 0 if NETDEV_REGISTER above failed */ + if (vlan->minor == 0) + break; devt = MKDEV(MAJOR(macvtap_major), vlan->minor); device_destroy(macvtap_class, devt); macvtap_free_minor(vlan); From f052f20a825a80c4662dafd3899dddafd4c8f036 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Sun, 24 Apr 2016 23:21:22 +0800 Subject: [PATCH 1074/1649] sctp: sctp_diag should fill RMEM_ALLOC with asoc->rmem_alloc when rcvbuf_policy is set For sctp assoc, when rcvbuf_policy is set, it will has it's own rmem_alloc, when we dump asoc info in sctp_diag, we should use that value on RMEM_ALLOC as well, just like WMEM_ALLOC. Signed-off-by: Xin Long Signed-off-by: David S. Miller --- net/sctp/sctp_diag.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/net/sctp/sctp_diag.c b/net/sctp/sctp_diag.c index 84829fff3bc9..8e3e769dc9ea 100644 --- a/net/sctp/sctp_diag.c +++ b/net/sctp/sctp_diag.c @@ -145,7 +145,11 @@ static int inet_sctp_diag_fill(struct sock *sk, struct sctp_association *asoc, else amt = sk_wmem_alloc_get(sk); mem[SK_MEMINFO_WMEM_ALLOC] = amt; - mem[SK_MEMINFO_RMEM_ALLOC] = sk_rmem_alloc_get(sk); + if (asoc && asoc->ep->rcvbuf_policy) + amt = atomic_read(&asoc->rmem_alloc); + else + amt = sk_rmem_alloc_get(sk); + mem[SK_MEMINFO_RMEM_ALLOC] = amt; mem[SK_MEMINFO_RCVBUF] = sk->sk_rcvbuf; mem[SK_MEMINFO_SNDBUF] = sk->sk_sndbuf; mem[SK_MEMINFO_FWD_ALLOC] = sk->sk_forward_alloc; From f796721040f933bb51b33f53899a382b4872b3e6 Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Sun, 24 Apr 2016 19:11:07 +0300 Subject: [PATCH 1075/1649] sh_eth: get rid of the 2nd parameter to sh_eth_dev_init() sh_eth_dev_init() is now always called with 'true' as the 2nd argument, so that there's no more sense in having 2 parameters to this function... Signed-off-by: Sergei Shtylyov Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/sh_eth.c | 23 +++++++++-------------- 1 file changed, 9 insertions(+), 14 deletions(-) diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index ceea74cc2229..edf6356c7034 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -1229,7 +1229,7 @@ ring_free: return -ENOMEM; } -static int sh_eth_dev_init(struct net_device *ndev, bool start) +static int sh_eth_dev_init(struct net_device *ndev) { struct sh_eth_private *mdp = netdev_priv(ndev); int ret; @@ -1279,10 +1279,8 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start) RFLR); sh_eth_modify(ndev, EESR, 0, 0); - if (start) { - mdp->irq_enabled = true; - sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); - } + mdp->irq_enabled = true; + sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); /* PAUSE Prohibition */ sh_eth_write(ndev, ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) | @@ -1295,8 +1293,7 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start) sh_eth_write(ndev, mdp->cd->ecsr_value, ECSR); /* E-MAC Interrupt Enable register */ - if (start) - sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR); + sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR); /* Set MAC address */ update_mac_address(ndev); @@ -1309,10 +1306,8 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start) if (mdp->cd->tpauser) sh_eth_write(ndev, TPAUSER_UNLIMITED, TPAUSER); - if (start) { - /* Setting the Rx mode will start the Rx process. */ - sh_eth_write(ndev, EDRRR_R, EDRRR); - } + /* Setting the Rx mode will start the Rx process. */ + sh_eth_write(ndev, EDRRR_R, EDRRR); return ret; } @@ -2194,7 +2189,7 @@ static int sh_eth_set_ringparam(struct net_device *ndev, __func__); return ret; } - ret = sh_eth_dev_init(ndev, true); + ret = sh_eth_dev_init(ndev); if (ret < 0) { netdev_err(ndev, "%s: sh_eth_dev_init failed.\n", __func__); @@ -2246,7 +2241,7 @@ static int sh_eth_open(struct net_device *ndev) goto out_free_irq; /* device init */ - ret = sh_eth_dev_init(ndev, true); + ret = sh_eth_dev_init(ndev); if (ret) goto out_free_irq; @@ -2299,7 +2294,7 @@ static void sh_eth_tx_timeout(struct net_device *ndev) } /* device init */ - sh_eth_dev_init(ndev, true); + sh_eth_dev_init(ndev); netif_start_queue(ndev); } From b74766a0a0feeef5c779709cc5d109451c0d5b17 Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Sun, 24 Apr 2016 20:25:23 +0300 Subject: [PATCH 1076/1649] phylib: don't return NULL from get_phy_device() Arnd Bergmann asked that get_phy_device() returns either NULL or the error value, not both on error. Do as he said, return ERR_PTR(-ENODEV) instead of NULL when the PHY ID registers read as all ones. Suggested-by: Arnd Bergmann Signed-off-by: Sergei Shtylyov Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/phy/phy_device.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 10e39c2fbf81..e977ba931878 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -529,7 +529,7 @@ struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45) /* If the phy_id is mostly Fs, there is no device there */ if ((phy_id & 0x1fffffff) == 0x1fffffff) - return NULL; + return ERR_PTR(-ENODEV); return phy_device_create(bus, addr, phy_id, is_c45, &c45_ids); } From fb1116ab7cf55f9b022c2a2d40a0f0c4464eb201 Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Sun, 24 Apr 2016 20:27:49 +0300 Subject: [PATCH 1077/1649] xgene: get_phy_device() doesn't return NULL anymore Now that get_phy_device() no longer returns NULL on error, we don't need to check for it... Signed-off-by: Sergei Shtylyov Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/ethernet/apm/xgene/xgene_enet_hw.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c index 39e081a70f5b..457f74500242 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c @@ -824,7 +824,7 @@ static int xgene_mdiobus_register(struct xgene_enet_pdata *pdata, return -EINVAL; phy = get_phy_device(mdio, phy_id, false); - if (!phy || IS_ERR(phy)) + if (IS_ERR(phy)) return -EIO; ret = phy_device_register(phy); From 4914a584b1d66680532b20898cba1cf7d5ae63e4 Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Sun, 24 Apr 2016 20:29:23 +0300 Subject: [PATCH 1078/1649] fixed_phy: get_phy_device() doesn't return NULL anymore Now that get_phy_device() no longer returns NULL on error, we don't need to check for it... Signed-off-by: Sergei Shtylyov Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/phy/fixed_phy.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c index fc07a8866020..9050f21e6f33 100644 --- a/drivers/net/phy/fixed_phy.c +++ b/drivers/net/phy/fixed_phy.c @@ -328,7 +328,7 @@ struct phy_device *fixed_phy_register(unsigned int irq, return ERR_PTR(ret); phy = get_phy_device(fmb->mii_bus, phy_addr, false); - if (!phy || IS_ERR(phy)) { + if (IS_ERR(phy)) { fixed_phy_del(phy_addr); return ERR_PTR(-EINVAL); } From 66c239e71adee694bc35f44387fb373942cfd957 Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Sun, 24 Apr 2016 20:30:53 +0300 Subject: [PATCH 1079/1649] mdio_bus: get_phy_device() doesn't return NULL anymore Now that get_phy_device() no longer returns NULL on error, we don't need to check for it... Signed-off-by: Sergei Shtylyov Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/phy/mdio_bus.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index 751202a285a6..499003ee8055 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c @@ -419,7 +419,7 @@ struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr) int err; phydev = get_phy_device(bus, addr, false); - if (IS_ERR(phydev) || phydev == NULL) + if (IS_ERR(phydev)) return phydev; /* From af5840a968fcb175900e5b821f37363a089f5b75 Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Sun, 24 Apr 2016 20:31:42 +0300 Subject: [PATCH 1080/1649] of_mdio: get_phy_device() doesn't return NULL anymore Now that get_phy_device() no longer returns NULL on error, we don't need to check for it... Signed-off-by: Sergei Shtylyov Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/of/of_mdio.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c index 2c1e52e06102..b622b33dbf93 100644 --- a/drivers/of/of_mdio.c +++ b/drivers/of/of_mdio.c @@ -56,7 +56,7 @@ static void of_mdiobus_register_phy(struct mii_bus *mdio, phy = phy_device_create(mdio, addr, phy_id, 0, NULL); else phy = get_phy_device(mdio, addr, is_c45); - if (IS_ERR_OR_NULL(phy)) + if (IS_ERR(phy)) return; rc = irq_of_parse_and_map(child, 0); From 47975cd1022d12735f02046ef8ada83f8ad24087 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Sun, 24 Apr 2016 21:38:09 +0200 Subject: [PATCH 1081/1649] RDMA/nes: remove use of NETDEV_TX_LOCKED ndo_start_xmit never returns it to stack, but nes_nic_send helper used it if skb could not be queued to hardware. Switch to bool instead. Cc: Signed-off-by: Florian Westphal Signed-off-by: David S. Miller --- drivers/infiniband/hw/nes/nes_nic.c | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c index 3ea9e055fdd3..b09a6db80201 100644 --- a/drivers/infiniband/hw/nes/nes_nic.c +++ b/drivers/infiniband/hw/nes/nes_nic.c @@ -356,7 +356,7 @@ static int nes_netdev_stop(struct net_device *netdev) /** * nes_nic_send */ -static int nes_nic_send(struct sk_buff *skb, struct net_device *netdev) +static bool nes_nic_send(struct sk_buff *skb, struct net_device *netdev) { struct nes_vnic *nesvnic = netdev_priv(netdev); struct nes_device *nesdev = nesvnic->nesdev; @@ -413,7 +413,7 @@ static int nes_nic_send(struct sk_buff *skb, struct net_device *netdev) netdev->name, skb_shinfo(skb)->nr_frags + 2, skb_headlen(skb)); kfree_skb(skb); nesvnic->tx_sw_dropped++; - return NETDEV_TX_LOCKED; + return false; } set_bit(nesnic->sq_head, nesnic->first_frag_overflow); bus_address = pci_map_single(nesdev->pcidev, skb->data + NES_FIRST_FRAG_SIZE, @@ -454,8 +454,7 @@ static int nes_nic_send(struct sk_buff *skb, struct net_device *netdev) set_wqe_32bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_MISC_IDX, wqe_misc); nesnic->sq_head++; nesnic->sq_head &= nesnic->sq_size - 1; - - return NETDEV_TX_OK; + return true; } @@ -673,13 +672,11 @@ tso_sq_no_longer_full: skb_linearize(skb); skb_set_transport_header(skb, hoffset); skb_set_network_header(skb, nhoffset); - send_rc = nes_nic_send(skb, netdev); - if (send_rc != NETDEV_TX_OK) + if (!nes_nic_send(skb, netdev)) return NETDEV_TX_OK; } } else { - send_rc = nes_nic_send(skb, netdev); - if (send_rc != NETDEV_TX_OK) + if (!nes_nic_send(skb, netdev)) return NETDEV_TX_OK; } From 353e3bd5a7081f23a9f015cbf172ec25b1412b93 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Sun, 24 Apr 2016 21:38:10 +0200 Subject: [PATCH 1082/1649] atl1c: remove private tx lock AFAICS this is safe: the lock is only used in the .ndo_start_xmit function and this driver does not set LLTX. Gets rid of TX_LOCKED return value, followup patches will remove it. Cc: Jay Cliburn Cc: Chris Snook Signed-off-by: Florian Westphal Signed-off-by: David S. Miller --- drivers/net/ethernet/atheros/atl1c/atl1c.h | 3 +-- drivers/net/ethernet/atheros/atl1c/atl1c_main.c | 11 ----------- 2 files changed, 1 insertion(+), 13 deletions(-) diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c.h b/drivers/net/ethernet/atheros/atl1c/atl1c.h index b9203d928938..c46b489ce9b4 100644 --- a/drivers/net/ethernet/atheros/atl1c/atl1c.h +++ b/drivers/net/ethernet/atheros/atl1c/atl1c.h @@ -488,7 +488,7 @@ struct atl1c_tpd_ring { dma_addr_t dma; /* descriptor ring physical address */ u16 size; /* descriptor ring length in bytes */ u16 count; /* number of descriptors in the ring */ - u16 next_to_use; /* this is protectd by adapter->tx_lock */ + u16 next_to_use; atomic_t next_to_clean; struct atl1c_buffer *buffer_info; }; @@ -542,7 +542,6 @@ struct atl1c_adapter { u16 link_duplex; spinlock_t mdio_lock; - spinlock_t tx_lock; atomic_t irq_sem; struct work_struct common_task; diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c index d0084d4d1a9b..a3200ea6d765 100644 --- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c +++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c @@ -821,7 +821,6 @@ static int atl1c_sw_init(struct atl1c_adapter *adapter) atl1c_set_rxbufsize(adapter, adapter->netdev); atomic_set(&adapter->irq_sem, 1); spin_lock_init(&adapter->mdio_lock); - spin_lock_init(&adapter->tx_lock); set_bit(__AT_DOWN, &adapter->flags); return 0; @@ -2206,7 +2205,6 @@ static netdev_tx_t atl1c_xmit_frame(struct sk_buff *skb, struct net_device *netdev) { struct atl1c_adapter *adapter = netdev_priv(netdev); - unsigned long flags; u16 tpd_req = 1; struct atl1c_tpd_desc *tpd; enum atl1c_trans_queue type = atl1c_trans_normal; @@ -2217,16 +2215,10 @@ static netdev_tx_t atl1c_xmit_frame(struct sk_buff *skb, } tpd_req = atl1c_cal_tpd_req(skb); - if (!spin_trylock_irqsave(&adapter->tx_lock, flags)) { - if (netif_msg_pktdata(adapter)) - dev_info(&adapter->pdev->dev, "tx locked\n"); - return NETDEV_TX_LOCKED; - } if (atl1c_tpd_avail(adapter, type) < tpd_req) { /* no enough descriptor, just stop queue */ netif_stop_queue(netdev); - spin_unlock_irqrestore(&adapter->tx_lock, flags); return NETDEV_TX_BUSY; } @@ -2234,7 +2226,6 @@ static netdev_tx_t atl1c_xmit_frame(struct sk_buff *skb, /* do TSO and check sum */ if (atl1c_tso_csum(adapter, skb, &tpd, type) != 0) { - spin_unlock_irqrestore(&adapter->tx_lock, flags); dev_kfree_skb_any(skb); return NETDEV_TX_OK; } @@ -2257,12 +2248,10 @@ static netdev_tx_t atl1c_xmit_frame(struct sk_buff *skb, "tx-skb droppted due to dma error\n"); /* roll back tpd/buffer */ atl1c_tx_rollback(adapter, tpd, type); - spin_unlock_irqrestore(&adapter->tx_lock, flags); dev_kfree_skb_any(skb); } else { netdev_sent_queue(adapter->netdev, skb->len); atl1c_tx_queue(adapter, skb, tpd, type); - spin_unlock_irqrestore(&adapter->tx_lock, flags); } return NETDEV_TX_OK; From 4acff371f2e3f386422253c7fce0092a793ec1a4 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Sun, 24 Apr 2016 21:38:11 +0200 Subject: [PATCH 1083/1649] atle1: remove LLTX support and TX_UNLOCKED similar to atl1c: lock is only used in ndo_start_xmit, but we also advertised LLTX, so remove that as well and let core stack handle tx locking. Allows to remove the TX_LOCKED return value from the driver. Cc: Jay Cliburn Cc: Chris Snook Signed-off-by: Florian Westphal Signed-off-by: David S. Miller --- drivers/net/ethernet/atheros/atl1e/atl1e.h | 1 - drivers/net/ethernet/atheros/atl1e/atl1e_main.c | 12 +----------- 2 files changed, 1 insertion(+), 12 deletions(-) diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e.h b/drivers/net/ethernet/atheros/atl1e/atl1e.h index 0212dac7e23a..632bb843aed6 100644 --- a/drivers/net/ethernet/atheros/atl1e/atl1e.h +++ b/drivers/net/ethernet/atheros/atl1e/atl1e.h @@ -442,7 +442,6 @@ struct atl1e_adapter { u16 link_duplex; spinlock_t mdio_lock; - spinlock_t tx_lock; atomic_t irq_sem; struct work_struct reset_task; diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c index 59a03a193e83..974713b19ab6 100644 --- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c +++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c @@ -648,7 +648,6 @@ static int atl1e_sw_init(struct atl1e_adapter *adapter) atomic_set(&adapter->irq_sem, 1); spin_lock_init(&adapter->mdio_lock); - spin_lock_init(&adapter->tx_lock); set_bit(__AT_DOWN, &adapter->flags); @@ -1866,7 +1865,6 @@ static netdev_tx_t atl1e_xmit_frame(struct sk_buff *skb, struct net_device *netdev) { struct atl1e_adapter *adapter = netdev_priv(netdev); - unsigned long flags; u16 tpd_req = 1; struct atl1e_tpd_desc *tpd; @@ -1880,13 +1878,10 @@ static netdev_tx_t atl1e_xmit_frame(struct sk_buff *skb, return NETDEV_TX_OK; } tpd_req = atl1e_cal_tdp_req(skb); - if (!spin_trylock_irqsave(&adapter->tx_lock, flags)) - return NETDEV_TX_LOCKED; if (atl1e_tpd_avail(adapter) < tpd_req) { /* no enough descriptor, just stop queue */ netif_stop_queue(netdev); - spin_unlock_irqrestore(&adapter->tx_lock, flags); return NETDEV_TX_BUSY; } @@ -1910,7 +1905,6 @@ static netdev_tx_t atl1e_xmit_frame(struct sk_buff *skb, /* do TSO and check sum */ if (atl1e_tso_csum(adapter, skb, tpd) != 0) { - spin_unlock_irqrestore(&adapter->tx_lock, flags); dev_kfree_skb_any(skb); return NETDEV_TX_OK; } @@ -1921,10 +1915,7 @@ static netdev_tx_t atl1e_xmit_frame(struct sk_buff *skb, } atl1e_tx_queue(adapter, tpd_req, tpd); - - netdev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */ out: - spin_unlock_irqrestore(&adapter->tx_lock, flags); return NETDEV_TX_OK; } @@ -2285,8 +2276,7 @@ static int atl1e_init_netdev(struct net_device *netdev, struct pci_dev *pdev) netdev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_HW_VLAN_CTAG_RX; - netdev->features = netdev->hw_features | NETIF_F_LLTX | - NETIF_F_HW_VLAN_CTAG_TX; + netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_CTAG_TX; /* not enabled by default */ netdev->hw_features |= NETIF_F_RXALL | NETIF_F_RXFCS; return 0; From 926f27300100f4233c7665649f68fcf615f58d68 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Sun, 24 Apr 2016 21:38:12 +0200 Subject: [PATCH 1084/1649] drivers: net: use NETDEV_TX_OK instead of NETDEV_TX_LOCKED These drivers already call netif_stop_queue() so we should not be called unless tx space is available. Just free the skb and return TX_OK. Followup patch will remove NETDEV_TX_LOCKED from the kernel. Cc: linux-parisc@vger.kernel.org Cc: linux-hams@vger.kernel.org Cc: Thomas Sailer Signed-off-by: Florian Westphal Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/7990.c | 8 +++++--- drivers/net/ethernet/amd/a2065.c | 7 +++---- drivers/net/ethernet/dec/tulip/de4x5.c | 7 +++++-- drivers/net/hamradio/baycom_epp.c | 6 ++++-- drivers/net/hamradio/hdlcdrv.c | 6 ++++-- 5 files changed, 21 insertions(+), 13 deletions(-) diff --git a/drivers/net/ethernet/amd/7990.c b/drivers/net/ethernet/amd/7990.c index 66d0b73c39c0..8e7575571531 100644 --- a/drivers/net/ethernet/amd/7990.c +++ b/drivers/net/ethernet/amd/7990.c @@ -543,11 +543,13 @@ int lance_start_xmit(struct sk_buff *skb, struct net_device *dev) static int outs; unsigned long flags; - if (!TX_BUFFS_AVAIL) - return NETDEV_TX_LOCKED; - netif_stop_queue(dev); + if (!TX_BUFFS_AVAIL) { + dev_consume_skb_any(skb); + return NETDEV_TX_OK; + } + skblen = skb->len; #ifdef DEBUG_DRIVER diff --git a/drivers/net/ethernet/amd/a2065.c b/drivers/net/ethernet/amd/a2065.c index 56139184b801..2a18d34d2610 100644 --- a/drivers/net/ethernet/amd/a2065.c +++ b/drivers/net/ethernet/amd/a2065.c @@ -547,10 +547,8 @@ static netdev_tx_t lance_start_xmit(struct sk_buff *skb, local_irq_save(flags); - if (!lance_tx_buffs_avail(lp)) { - local_irq_restore(flags); - return NETDEV_TX_LOCKED; - } + if (!lance_tx_buffs_avail(lp)) + goto out_free; #ifdef DEBUG /* dump the packet */ @@ -573,6 +571,7 @@ static netdev_tx_t lance_start_xmit(struct sk_buff *skb, /* Kick the lance: transmit now */ ll->rdp = LE_C0_INEA | LE_C0_TDMD; + out_free: dev_kfree_skb(skb); local_irq_restore(flags); diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c index 3acde3b9b767..d88fbab378aa 100644 --- a/drivers/net/ethernet/dec/tulip/de4x5.c +++ b/drivers/net/ethernet/dec/tulip/de4x5.c @@ -1465,7 +1465,7 @@ de4x5_queue_pkt(struct sk_buff *skb, struct net_device *dev) netif_stop_queue(dev); if (!lp->tx_enable) /* Cannot send for now */ - return NETDEV_TX_LOCKED; + goto tx_err; /* ** Clean out the TX ring asynchronously to interrupts - sometimes the @@ -1478,7 +1478,7 @@ de4x5_queue_pkt(struct sk_buff *skb, struct net_device *dev) /* Test if cache is already locked - requeue skb if so */ if (test_and_set_bit(0, (void *)&lp->cache.lock) && !lp->interrupt) - return NETDEV_TX_LOCKED; + goto tx_err; /* Transmit descriptor ring full or stale skb */ if (netif_queue_stopped(dev) || (u_long) lp->tx_skb[lp->tx_new] > 1) { @@ -1519,6 +1519,9 @@ de4x5_queue_pkt(struct sk_buff *skb, struct net_device *dev) lp->cache.lock = 0; return NETDEV_TX_OK; +tx_err: + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; } /* diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c index 72c9f1f352b4..eb6663866c9f 100644 --- a/drivers/net/hamradio/baycom_epp.c +++ b/drivers/net/hamradio/baycom_epp.c @@ -780,8 +780,10 @@ static int baycom_send_packet(struct sk_buff *skb, struct net_device *dev) dev_kfree_skb(skb); return NETDEV_TX_OK; } - if (bc->skb) - return NETDEV_TX_LOCKED; + if (bc->skb) { + dev_kfree_skb(skb); + return NETDEV_TX_OK; + } /* strip KISS byte */ if (skb->len >= HDLCDRV_MAXFLEN+1 || skb->len < 3) { dev_kfree_skb(skb); diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c index 49fe59b180a8..4bad0b894e9c 100644 --- a/drivers/net/hamradio/hdlcdrv.c +++ b/drivers/net/hamradio/hdlcdrv.c @@ -412,8 +412,10 @@ static netdev_tx_t hdlcdrv_send_packet(struct sk_buff *skb, dev_kfree_skb(skb); return NETDEV_TX_OK; } - if (sm->skb) - return NETDEV_TX_LOCKED; + if (sm->skb) { + dev_kfree_skb(skb); + return NETDEV_TX_OK; + } netif_stop_queue(dev); sm->skb = skb; return NETDEV_TX_OK; From a6086a893718db07ef9e7af5624ec27cb376ef0a Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Sun, 24 Apr 2016 21:38:13 +0200 Subject: [PATCH 1085/1649] drivers: net: remove NETDEV_TX_LOCKED replace the trylock by a full spin_lock and remove TX_LOCKED return value. Followup patch will remove TX_LOCKED from the kernel. Cc: Jon Mason Cc: Andy Gospodarek Signed-off-by: Florian Westphal Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb/sge.c | 3 +-- drivers/net/ethernet/neterion/s2io.c | 9 +-------- drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c | 6 ++---- drivers/net/ethernet/tehuti/tehuti.c | 8 +------- drivers/net/rionet.c | 6 +----- 5 files changed, 6 insertions(+), 26 deletions(-) diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.c b/drivers/net/ethernet/chelsio/cxgb/sge.c index 526ea74e82d9..86f467a2c485 100644 --- a/drivers/net/ethernet/chelsio/cxgb/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb/sge.c @@ -1664,8 +1664,7 @@ static int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter, struct cmdQ *q = &sge->cmdQ[qid]; unsigned int credits, pidx, genbit, count, use_sched_skb = 0; - if (!spin_trylock(&q->lock)) - return NETDEV_TX_LOCKED; + spin_lock(&q->lock); reclaim_completed_tx(sge, q); diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c index 9ba975853ec6..2874dffe77de 100644 --- a/drivers/net/ethernet/neterion/s2io.c +++ b/drivers/net/ethernet/neterion/s2io.c @@ -4021,7 +4021,6 @@ static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev) unsigned long flags = 0; u16 vlan_tag = 0; struct fifo_info *fifo = NULL; - int do_spin_lock = 1; int offload_type; int enable_per_list_interrupt = 0; struct config_param *config = &sp->config; @@ -4074,7 +4073,6 @@ static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev) queue += sp->udp_fifo_idx; if (skb->len > 1024) enable_per_list_interrupt = 1; - do_spin_lock = 0; } } } @@ -4084,12 +4082,7 @@ static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev) [skb->priority & (MAX_TX_FIFOS - 1)]; fifo = &mac_control->fifos[queue]; - if (do_spin_lock) - spin_lock_irqsave(&fifo->tx_lock, flags); - else { - if (unlikely(!spin_trylock_irqsave(&fifo->tx_lock, flags))) - return NETDEV_TX_LOCKED; - } + spin_lock_irqsave(&fifo->tx_lock, flags); if (sp->config.multiq) { if (__netif_subqueue_stopped(dev, fifo->fifo_no)) { diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c index 3b98b263bad0..4475dcc687a2 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c @@ -2137,10 +2137,8 @@ static int pch_gbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev) struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring; unsigned long flags; - if (!spin_trylock_irqsave(&tx_ring->tx_lock, flags)) { - /* Collision - tell upper layer to requeue */ - return NETDEV_TX_LOCKED; - } + spin_trylock_irqsave(&tx_ring->tx_lock, flags); + if (unlikely(!PCH_GBE_DESC_UNUSED(tx_ring))) { netif_stop_queue(netdev); spin_unlock_irqrestore(&tx_ring->tx_lock, flags); diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c index 14c9d1baa85c..2524a69db318 100644 --- a/drivers/net/ethernet/tehuti/tehuti.c +++ b/drivers/net/ethernet/tehuti/tehuti.c @@ -1610,7 +1610,6 @@ static inline int bdx_tx_space(struct bdx_priv *priv) * o NETDEV_TX_BUSY Cannot transmit packet, try later * Usually a bug, means queue start/stop flow control is broken in * the driver. Note: the driver must NOT put the skb in its DMA ring. - * o NETDEV_TX_LOCKED Locking failed, please retry quickly. */ static netdev_tx_t bdx_tx_transmit(struct sk_buff *skb, struct net_device *ndev) @@ -1630,12 +1629,7 @@ static netdev_tx_t bdx_tx_transmit(struct sk_buff *skb, ENTER; local_irq_save(flags); - if (!spin_trylock(&priv->tx_lock)) { - local_irq_restore(flags); - DBG("%s[%s]: TX locked, returning NETDEV_TX_LOCKED\n", - BDX_DRV_NAME, ndev->name); - return NETDEV_TX_LOCKED; - } + spin_lock(&priv->tx_lock); /* build tx descriptor */ BDX_ASSERT(f->m.wptr >= f->m.memsz); /* started with valid wptr */ diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c index 9cfe6aeac84e..a31f4610b493 100644 --- a/drivers/net/rionet.c +++ b/drivers/net/rionet.c @@ -179,11 +179,7 @@ static int rionet_start_xmit(struct sk_buff *skb, struct net_device *ndev) unsigned long flags; int add_num = 1; - local_irq_save(flags); - if (!spin_trylock(&rnet->tx_lock)) { - local_irq_restore(flags); - return NETDEV_TX_LOCKED; - } + spin_lock_irqsave(&rnet->tx_lock, flags); if (is_multicast_ether_addr(eth->h_dest)) add_num = nets[rnet->mport->id].nact; From f0cdf76c103ffa34ca5ac87dcdef7edffc722cbf Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Sun, 24 Apr 2016 21:38:14 +0200 Subject: [PATCH 1086/1649] net: remove NETDEV_TX_LOCKED support No more users in the tree, remove NETDEV_TX_LOCKED support. Adds another hole in softnet_stats struct, but better than keeping the unused collision counter around. Signed-off-by: Florian Westphal Signed-off-by: David S. Miller --- Documentation/networking/netdev-features.txt | 10 +++--- Documentation/networking/netdevices.txt | 9 ++---- include/linux/netdevice.h | 3 -- net/core/net-procfs.c | 3 +- net/core/pktgen.c | 1 - net/sched/sch_generic.c | 32 -------------------- 6 files changed, 9 insertions(+), 49 deletions(-) diff --git a/Documentation/networking/netdev-features.txt b/Documentation/networking/netdev-features.txt index f310edec8a77..7413eb05223b 100644 --- a/Documentation/networking/netdev-features.txt +++ b/Documentation/networking/netdev-features.txt @@ -131,13 +131,11 @@ stack. Driver should not change behaviour based on them. * LLTX driver (deprecated for hardware drivers) -NETIF_F_LLTX should be set in drivers that implement their own locking in -transmit path or don't need locking at all (e.g. software tunnels). -In ndo_start_xmit, it is recommended to use a try_lock and return -NETDEV_TX_LOCKED when the spin lock fails. The locking should also properly -protect against other callbacks (the rules you need to find out). +NETIF_F_LLTX is meant to be used by drivers that don't need locking at all, +e.g. software tunnels. -Don't use it for new drivers. +This is also used in a few legacy drivers that implement their +own locking, don't use it for new (hardware) drivers. * netns-local device diff --git a/Documentation/networking/netdevices.txt b/Documentation/networking/netdevices.txt index 0b1cf6b2a592..7fec2061a334 100644 --- a/Documentation/networking/netdevices.txt +++ b/Documentation/networking/netdevices.txt @@ -69,10 +69,9 @@ ndo_start_xmit: When the driver sets NETIF_F_LLTX in dev->features this will be called without holding netif_tx_lock. In this case the driver - has to lock by itself when needed. It is recommended to use a try lock - for this and return NETDEV_TX_LOCKED when the spin lock fails. - The locking there should also properly protect against - set_rx_mode. Note that the use of NETIF_F_LLTX is deprecated. + has to lock by itself when needed. + The locking there should also properly protect against + set_rx_mode. WARNING: use of NETIF_F_LLTX is deprecated. Don't use it for new drivers. Context: Process with BHs disabled or BH (timer), @@ -83,8 +82,6 @@ ndo_start_xmit: o NETDEV_TX_BUSY Cannot transmit packet, try later Usually a bug, means queue start/stop flow control is broken in the driver. Note: the driver must NOT put the skb in its DMA ring. - o NETDEV_TX_LOCKED Locking failed, please retry quickly. - Only valid when NETIF_F_LLTX is set. ndo_tx_timeout: Synchronization: netif_tx_lock spinlock; all TX queues frozen. diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 1f6d5db471a2..18d8394f2e5d 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -106,7 +106,6 @@ enum netdev_tx { __NETDEV_TX_MIN = INT_MIN, /* make sure enum is signed */ NETDEV_TX_OK = 0x00, /* driver took care of packet */ NETDEV_TX_BUSY = 0x10, /* driver tx path was busy*/ - NETDEV_TX_LOCKED = 0x20, /* driver tx lock was already taken */ }; typedef enum netdev_tx netdev_tx_t; @@ -831,7 +830,6 @@ struct tc_to_netdev { * the queue before that can happen; it's for obsolete devices and weird * corner cases, but the stack really does a non-trivial amount * of useless work if you return NETDEV_TX_BUSY. - * (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX) * Required; cannot be NULL. * * netdev_features_t (*ndo_fix_features)(struct net_device *dev, @@ -2737,7 +2735,6 @@ struct softnet_data { /* stats */ unsigned int processed; unsigned int time_squeeze; - unsigned int cpu_collision; unsigned int received_rps; #ifdef CONFIG_RPS struct softnet_data *rps_ipi_list; diff --git a/net/core/net-procfs.c b/net/core/net-procfs.c index 2bf83299600a..14d09345f00d 100644 --- a/net/core/net-procfs.c +++ b/net/core/net-procfs.c @@ -162,7 +162,8 @@ static int softnet_seq_show(struct seq_file *seq, void *v) "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n", sd->processed, sd->dropped, sd->time_squeeze, 0, 0, 0, 0, 0, /* was fastroute */ - sd->cpu_collision, sd->received_rps, flow_limit_count); + 0, /* was cpu_collision */ + sd->received_rps, flow_limit_count); return 0; } diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 20999aa596dd..8604ae245960 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -3472,7 +3472,6 @@ xmit_more: pkt_dev->odevname, ret); pkt_dev->errors++; /* fallthru */ - case NETDEV_TX_LOCKED: case NETDEV_TX_BUSY: /* Retry it next time */ atomic_dec(&(pkt_dev->skb->users)); diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 80742edea96f..9c7756237904 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -108,35 +108,6 @@ static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate, return skb; } -static inline int handle_dev_cpu_collision(struct sk_buff *skb, - struct netdev_queue *dev_queue, - struct Qdisc *q) -{ - int ret; - - if (unlikely(dev_queue->xmit_lock_owner == smp_processor_id())) { - /* - * Same CPU holding the lock. It may be a transient - * configuration error, when hard_start_xmit() recurses. We - * detect it by checking xmit owner and drop the packet when - * deadloop is detected. Return OK to try the next skb. - */ - kfree_skb_list(skb); - net_warn_ratelimited("Dead loop on netdevice %s, fix it urgently!\n", - dev_queue->dev->name); - ret = qdisc_qlen(q); - } else { - /* - * Another cpu is holding lock, requeue & delay xmits for - * some time. - */ - __this_cpu_inc(softnet_data.cpu_collision); - ret = dev_requeue_skb(skb, q); - } - - return ret; -} - /* * Transmit possibly several skbs, and handle the return status as * required. Holding the __QDISC___STATE_RUNNING bit guarantees that @@ -174,9 +145,6 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q, if (dev_xmit_complete(ret)) { /* Driver sent out skb successfully or skb was consumed */ ret = qdisc_qlen(q); - } else if (ret == NETDEV_TX_LOCKED) { - /* Driver try lock failed */ - ret = handle_dev_cpu_collision(skb, txq, q); } else { /* Driver returned NETDEV_TX_BUSY - requeue skb */ if (unlikely(ret != NETDEV_TX_BUSY)) From 4e095a9a6ee50ba8a9820a4991b6a2a27c67bdb4 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 26 Apr 2016 15:57:19 -0400 Subject: [PATCH 1087/1649] infiniband: nes: Kill unused variable in nes_netdev_start_xmit() Signed-off-by: David S. Miller --- drivers/infiniband/hw/nes/nes_nic.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c index b09a6db80201..99cef26e74b4 100644 --- a/drivers/infiniband/hw/nes/nes_nic.c +++ b/drivers/infiniband/hw/nes/nes_nic.c @@ -478,7 +478,6 @@ static int nes_netdev_start_xmit(struct sk_buff *skb, struct net_device *netdev) u32 tso_wqe_length; u32 curr_tcp_seq; u32 wqe_count=1; - u32 send_rc; struct iphdr *iph; __le16 *wqe_fragment_length; u32 nr_frags; From 269e6b3af3bfbc4e50b497924c2abb1c4cf3364e Mon Sep 17 00:00:00 2001 From: Gal Pressman Date: Sun, 24 Apr 2016 22:51:46 +0300 Subject: [PATCH 1088/1649] net/mlx5e: Report additional error statistics in get stats ndo Provide rtnl_link_stats64 with information regarding physical errors to be seen in ifconfig and ip tool. Signed-off-by: Gal Pressman Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlx5/core/en_main.c | 39 ++++++++++++++++--- 1 file changed, 33 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index d485d1e4e100..6270f8d539db 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -2074,18 +2074,45 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5e_vport_stats *vstats = &priv->stats.vport; + struct mlx5e_pport_stats *pstats = &priv->stats.pport; stats->rx_packets = vstats->rx_packets; stats->rx_bytes = vstats->rx_bytes; stats->tx_packets = vstats->tx_packets; stats->tx_bytes = vstats->tx_bytes; - stats->multicast = vstats->rx_multicast_packets + - vstats->tx_multicast_packets; - stats->tx_errors = vstats->tx_error_packets; - stats->rx_errors = vstats->rx_error_packets; + +#define PPCNT_GET_802_3_CTR(fld) \ + (MLX5_GET64(eth_802_3_cntrs_grp_data_layout, \ + pstats->IEEE_802_3_counters, fld##_high)) + +#define PPCNT_GET_2863_CTR(fld) \ + (MLX5_GET64(eth_2863_cntrs_grp_data_layout, \ + pstats->RFC_2863_counters, fld##_high)) + + stats->rx_dropped = priv->stats.qcnt.rx_out_of_buffer; stats->tx_dropped = vstats->tx_queue_dropped; - stats->rx_crc_errors = 0; - stats->rx_length_errors = 0; + + stats->rx_length_errors = + PPCNT_GET_802_3_CTR(a_in_range_length_errors) + + PPCNT_GET_802_3_CTR(a_out_of_range_length_field) + + PPCNT_GET_802_3_CTR(a_frame_too_long_errors); + stats->rx_crc_errors = + PPCNT_GET_802_3_CTR(a_frame_check_sequence_errors); + stats->rx_frame_errors = + PPCNT_GET_802_3_CTR(a_alignment_errors); + stats->tx_aborted_errors = + PPCNT_GET_2863_CTR(if_out_discards); + stats->tx_carrier_errors = + PPCNT_GET_802_3_CTR(a_symbol_error_during_carrier); + stats->rx_errors = stats->rx_length_errors + stats->rx_crc_errors + + stats->rx_frame_errors; + stats->tx_errors = stats->tx_aborted_errors + stats->tx_carrier_errors; + + /* vport multicast also counts packets that are dropped due to steering + * or rx out of buffer + */ + stats->multicast = vstats->rx_multicast_packets; + return stats; } From 9218b44dcc059e08e249f6f7614b8e391eb041d8 Mon Sep 17 00:00:00 2001 From: Gal Pressman Date: Sun, 24 Apr 2016 22:51:47 +0300 Subject: [PATCH 1089/1649] net/mlx5e: Statistics handling refactoring Redesign ethtool statistics handling and reporting in the driver: 1. Move counters to a separate file (en_stats.h). 2. Remove unnecessary dependencies between stats and strings. 3. Use counter descriptors which hold a name and offset for each counter, and will be used to decide which counters will be exposed. For example when adding a new software counter to ethtool, instead of: 1. Add to stats struct. 2. Add to strings struct in the same order. 3. Change macro defining number of software counters. The only thing needed is to link the new counter to a counter descriptor. VPort counters are a set of hardware traffic counters created automatically for each virtual port opened. PPort counters are a set of counters describing per physical port performance statistics. These counters are gathered from hardware register and divided to groups according to different protocols. Signed-off-by: Gal Pressman Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 240 +------------ .../ethernet/mellanox/mlx5/core/en_ethtool.c | 132 +++++--- .../net/ethernet/mellanox/mlx5/core/en_main.c | 230 +++++-------- .../ethernet/mellanox/mlx5/core/en_stats.h | 318 ++++++++++++++++++ include/linux/mlx5/device.h | 1 + 5 files changed, 482 insertions(+), 439 deletions(-) create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/en_stats.h diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 6e24e821a1d8..e903eff2574f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -46,6 +46,7 @@ #include #include "wq.h" #include "mlx5_core.h" +#include "en_stats.h" #define MLX5E_MAX_NUM_TC 8 @@ -148,245 +149,6 @@ struct mlx5e_umr_wqe { #define MLX5E_MIN_BW_ALLOC 1 /* Min percentage of BW allocation */ #endif -static const char vport_strings[][ETH_GSTRING_LEN] = { - /* vport statistics */ - "rx_packets", - "rx_bytes", - "tx_packets", - "tx_bytes", - "rx_error_packets", - "rx_error_bytes", - "tx_error_packets", - "tx_error_bytes", - "rx_unicast_packets", - "rx_unicast_bytes", - "tx_unicast_packets", - "tx_unicast_bytes", - "rx_multicast_packets", - "rx_multicast_bytes", - "tx_multicast_packets", - "tx_multicast_bytes", - "rx_broadcast_packets", - "rx_broadcast_bytes", - "tx_broadcast_packets", - "tx_broadcast_bytes", - - /* SW counters */ - "tso_packets", - "tso_bytes", - "tso_inner_packets", - "tso_inner_bytes", - "lro_packets", - "lro_bytes", - "rx_csum_good", - "rx_csum_none", - "rx_csum_sw", - "tx_csum_offload", - "tx_csum_inner", - "tx_queue_stopped", - "tx_queue_wake", - "tx_queue_dropped", - "rx_wqe_err", - "rx_mpwqe_filler", - "rx_mpwqe_frag", - "rx_buff_alloc_err", -}; - -struct mlx5e_vport_stats { - /* HW counters */ - u64 rx_packets; - u64 rx_bytes; - u64 tx_packets; - u64 tx_bytes; - u64 rx_error_packets; - u64 rx_error_bytes; - u64 tx_error_packets; - u64 tx_error_bytes; - u64 rx_unicast_packets; - u64 rx_unicast_bytes; - u64 tx_unicast_packets; - u64 tx_unicast_bytes; - u64 rx_multicast_packets; - u64 rx_multicast_bytes; - u64 tx_multicast_packets; - u64 tx_multicast_bytes; - u64 rx_broadcast_packets; - u64 rx_broadcast_bytes; - u64 tx_broadcast_packets; - u64 tx_broadcast_bytes; - - /* SW counters */ - u64 tso_packets; - u64 tso_bytes; - u64 tso_inner_packets; - u64 tso_inner_bytes; - u64 lro_packets; - u64 lro_bytes; - u64 rx_csum_good; - u64 rx_csum_none; - u64 rx_csum_sw; - u64 tx_csum_offload; - u64 tx_csum_inner; - u64 tx_queue_stopped; - u64 tx_queue_wake; - u64 tx_queue_dropped; - u64 rx_wqe_err; - u64 rx_mpwqe_filler; - u64 rx_mpwqe_frag; - u64 rx_buff_alloc_err; - -#define NUM_VPORT_COUNTERS 38 -}; - -static const char pport_strings[][ETH_GSTRING_LEN] = { - /* IEEE802.3 counters */ - "frames_tx", - "frames_rx", - "check_seq_err", - "alignment_err", - "octets_tx", - "octets_received", - "multicast_xmitted", - "broadcast_xmitted", - "multicast_rx", - "broadcast_rx", - "in_range_len_errors", - "out_of_range_len", - "too_long_errors", - "symbol_err", - "mac_control_tx", - "mac_control_rx", - "unsupported_op_rx", - "pause_ctrl_rx", - "pause_ctrl_tx", - - /* RFC2863 counters */ - "in_octets", - "in_ucast_pkts", - "in_discards", - "in_errors", - "in_unknown_protos", - "out_octets", - "out_ucast_pkts", - "out_discards", - "out_errors", - "in_multicast_pkts", - "in_broadcast_pkts", - "out_multicast_pkts", - "out_broadcast_pkts", - - /* RFC2819 counters */ - "drop_events", - "octets", - "pkts", - "broadcast_pkts", - "multicast_pkts", - "crc_align_errors", - "undersize_pkts", - "oversize_pkts", - "fragments", - "jabbers", - "collisions", - "p64octets", - "p65to127octets", - "p128to255octets", - "p256to511octets", - "p512to1023octets", - "p1024to1518octets", - "p1519to2047octets", - "p2048to4095octets", - "p4096to8191octets", - "p8192to10239octets", -}; - -#define NUM_IEEE_802_3_COUNTERS 19 -#define NUM_RFC_2863_COUNTERS 13 -#define NUM_RFC_2819_COUNTERS 21 -#define NUM_PPORT_COUNTERS (NUM_IEEE_802_3_COUNTERS + \ - NUM_RFC_2863_COUNTERS + \ - NUM_RFC_2819_COUNTERS) - -struct mlx5e_pport_stats { - __be64 IEEE_802_3_counters[NUM_IEEE_802_3_COUNTERS]; - __be64 RFC_2863_counters[NUM_RFC_2863_COUNTERS]; - __be64 RFC_2819_counters[NUM_RFC_2819_COUNTERS]; -}; - -static const char qcounter_stats_strings[][ETH_GSTRING_LEN] = { - "rx_out_of_buffer", -}; - -struct mlx5e_qcounter_stats { - u32 rx_out_of_buffer; -#define NUM_Q_COUNTERS 1 -}; - -static const char rq_stats_strings[][ETH_GSTRING_LEN] = { - "packets", - "bytes", - "csum_none", - "csum_sw", - "lro_packets", - "lro_bytes", - "wqe_err", - "mpwqe_filler", - "mpwqe_frag", - "buff_alloc_err", -}; - -struct mlx5e_rq_stats { - u64 packets; - u64 bytes; - u64 csum_none; - u64 csum_sw; - u64 lro_packets; - u64 lro_bytes; - u64 wqe_err; - u64 mpwqe_filler; - u64 mpwqe_frag; - u64 buff_alloc_err; -#define NUM_RQ_STATS 10 -}; - -static const char sq_stats_strings[][ETH_GSTRING_LEN] = { - "packets", - "bytes", - "tso_packets", - "tso_bytes", - "tso_inner_packets", - "tso_inner_bytes", - "csum_offload_inner", - "nop", - "csum_offload_none", - "stopped", - "wake", - "dropped", -}; - -struct mlx5e_sq_stats { - /* commonly accessed in data path */ - u64 packets; - u64 bytes; - u64 tso_packets; - u64 tso_bytes; - u64 tso_inner_packets; - u64 tso_inner_bytes; - u64 csum_offload_inner; - u64 nop; - /* less likely accessed in data path */ - u64 csum_offload_none; - u64 stopped; - u64 wake; - u64 dropped; -#define NUM_SQ_STATS 12 -}; - -struct mlx5e_stats { - struct mlx5e_vport_stats vport; - struct mlx5e_pport_stats pport; - struct mlx5e_qcounter_stats qcnt; -}; - struct mlx5e_params { u8 log_sq_size; u8 rq_wq_type; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 4077856aab76..f1649d543475 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -166,6 +166,12 @@ static const struct { }; #define MLX5E_NUM_Q_CNTRS(priv) (NUM_Q_COUNTERS * (!!priv->q_counter)) +#define MLX5E_NUM_RQ_STATS(priv) \ + (NUM_RQ_STATS * priv->params.num_channels * \ + test_bit(MLX5E_STATE_OPENED, &priv->state)) +#define MLX5E_NUM_SQ_STATS(priv) \ + (NUM_SQ_STATS * priv->params.num_channels * priv->params.num_tc * \ + test_bit(MLX5E_STATE_OPENED, &priv->state)) static int mlx5e_get_sset_count(struct net_device *dev, int sset) { @@ -173,21 +179,68 @@ static int mlx5e_get_sset_count(struct net_device *dev, int sset) switch (sset) { case ETH_SS_STATS: - return NUM_VPORT_COUNTERS + NUM_PPORT_COUNTERS + + return NUM_SW_COUNTERS + MLX5E_NUM_Q_CNTRS(priv) + - priv->params.num_channels * NUM_RQ_STATS + - priv->params.num_channels * priv->params.num_tc * - NUM_SQ_STATS; + NUM_VPORT_COUNTERS + NUM_PPORT_COUNTERS + + MLX5E_NUM_RQ_STATS(priv) + + MLX5E_NUM_SQ_STATS(priv); /* fallthrough */ default: return -EOPNOTSUPP; } } +static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, uint8_t *data) +{ + int i, j, tc, idx = 0; + + /* SW counters */ + for (i = 0; i < NUM_SW_COUNTERS; i++) + strcpy(data + (idx++) * ETH_GSTRING_LEN, sw_stats_desc[i].name); + + /* Q counters */ + for (i = 0; i < MLX5E_NUM_Q_CNTRS(priv); i++) + strcpy(data + (idx++) * ETH_GSTRING_LEN, q_stats_desc[i].name); + + /* VPORT counters */ + for (i = 0; i < NUM_VPORT_COUNTERS; i++) + strcpy(data + (idx++) * ETH_GSTRING_LEN, + vport_stats_desc[i].name); + + /* PPORT counters */ + for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++) + strcpy(data + (idx++) * ETH_GSTRING_LEN, + pport_802_3_stats_desc[i].name); + + for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++) + strcpy(data + (idx++) * ETH_GSTRING_LEN, + pport_2863_stats_desc[i].name); + + for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++) + strcpy(data + (idx++) * ETH_GSTRING_LEN, + pport_2819_stats_desc[i].name); + + if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) + return; + + /* per channel counters */ + for (i = 0; i < priv->params.num_channels; i++) + for (j = 0; j < NUM_RQ_STATS; j++) + sprintf(data + (idx++) * ETH_GSTRING_LEN, "rx%d_%s", i, + rq_stats_desc[j].name); + + for (tc = 0; tc < priv->params.num_tc; tc++) + for (i = 0; i < priv->params.num_channels; i++) + for (j = 0; j < NUM_SQ_STATS; j++) + sprintf(data + (idx++) * ETH_GSTRING_LEN, + "tx%d_%s", + priv->channeltc_to_txq_map[i][tc], + sq_stats_desc[j].name); +} + static void mlx5e_get_strings(struct net_device *dev, uint32_t stringset, uint8_t *data) { - int i, j, tc, idx = 0; struct mlx5e_priv *priv = netdev_priv(dev); switch (stringset) { @@ -198,35 +251,7 @@ static void mlx5e_get_strings(struct net_device *dev, break; case ETH_SS_STATS: - /* VPORT counters */ - for (i = 0; i < NUM_VPORT_COUNTERS; i++) - strcpy(data + (idx++) * ETH_GSTRING_LEN, - vport_strings[i]); - - /* Q counters */ - for (i = 0; i < MLX5E_NUM_Q_CNTRS(priv); i++) - strcpy(data + (idx++) * ETH_GSTRING_LEN, - qcounter_stats_strings[i]); - - /* PPORT counters */ - for (i = 0; i < NUM_PPORT_COUNTERS; i++) - strcpy(data + (idx++) * ETH_GSTRING_LEN, - pport_strings[i]); - - /* per channel counters */ - for (i = 0; i < priv->params.num_channels; i++) - for (j = 0; j < NUM_RQ_STATS; j++) - sprintf(data + (idx++) * ETH_GSTRING_LEN, - "rx%d_%s", i, rq_stats_strings[j]); - - for (tc = 0; tc < priv->params.num_tc; tc++) - for (i = 0; i < priv->params.num_channels; i++) - for (j = 0; j < NUM_SQ_STATS; j++) - sprintf(data + - (idx++) * ETH_GSTRING_LEN, - "tx%d_%s", - priv->channeltc_to_txq_map[i][tc], - sq_stats_strings[j]); + mlx5e_fill_stats_strings(priv, data); break; } } @@ -245,28 +270,45 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev, mlx5e_update_stats(priv); mutex_unlock(&priv->state_lock); - for (i = 0; i < NUM_VPORT_COUNTERS; i++) - data[idx++] = ((u64 *)&priv->stats.vport)[i]; + for (i = 0; i < NUM_SW_COUNTERS; i++) + data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.sw, + sw_stats_desc, i); for (i = 0; i < MLX5E_NUM_Q_CNTRS(priv); i++) - data[idx++] = ((u32 *)&priv->stats.qcnt)[i]; + data[idx++] = MLX5E_READ_CTR32_CPU(&priv->stats.qcnt, + q_stats_desc, i); - for (i = 0; i < NUM_PPORT_COUNTERS; i++) - data[idx++] = be64_to_cpu(((__be64 *)&priv->stats.pport)[i]); + for (i = 0; i < NUM_VPORT_COUNTERS; i++) + data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vport.query_vport_out, + vport_stats_desc, i); + + for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++) + data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.IEEE_802_3_counters, + pport_802_3_stats_desc, i); + + for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++) + data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2863_counters, + pport_2863_stats_desc, i); + + for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++) + data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2819_counters, + pport_2819_stats_desc, i); + + if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) + return; /* per channel counters */ for (i = 0; i < priv->params.num_channels; i++) for (j = 0; j < NUM_RQ_STATS; j++) - data[idx++] = !test_bit(MLX5E_STATE_OPENED, - &priv->state) ? 0 : - ((u64 *)&priv->channel[i]->rq.stats)[j]; + data[idx++] = + MLX5E_READ_CTR64_CPU(&priv->channel[i]->rq.stats, + rq_stats_desc, j); for (tc = 0; tc < priv->params.num_tc; tc++) for (i = 0; i < priv->params.num_channels; i++) for (j = 0; j < NUM_SQ_STATS; j++) - data[idx++] = !test_bit(MLX5E_STATE_OPENED, - &priv->state) ? 0 : - ((u64 *)&priv->channel[i]->sq[tc].stats)[j]; + data[idx++] = MLX5E_READ_CTR64_CPU(&priv->channel[i]->sq[tc].stats, + sq_stats_desc, j); } static void mlx5e_get_ringparam(struct net_device *dev, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 6270f8d539db..0c532367ff13 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -91,96 +91,15 @@ static void mlx5e_update_carrier_work(struct work_struct *work) mutex_unlock(&priv->state_lock); } -static void mlx5e_update_pport_counters(struct mlx5e_priv *priv) +static void mlx5e_update_sw_counters(struct mlx5e_priv *priv) { - struct mlx5_core_dev *mdev = priv->mdev; - struct mlx5e_pport_stats *s = &priv->stats.pport; - u32 *in; - u32 *out; - int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); - - in = mlx5_vzalloc(sz); - out = mlx5_vzalloc(sz); - if (!in || !out) - goto free_out; - - MLX5_SET(ppcnt_reg, in, local_port, 1); - - MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP); - mlx5_core_access_reg(mdev, in, sz, out, - sz, MLX5_REG_PPCNT, 0, 0); - memcpy(s->IEEE_802_3_counters, - MLX5_ADDR_OF(ppcnt_reg, out, counter_set), - sizeof(s->IEEE_802_3_counters)); - - MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP); - mlx5_core_access_reg(mdev, in, sz, out, - sz, MLX5_REG_PPCNT, 0, 0); - memcpy(s->RFC_2863_counters, - MLX5_ADDR_OF(ppcnt_reg, out, counter_set), - sizeof(s->RFC_2863_counters)); - - MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP); - mlx5_core_access_reg(mdev, in, sz, out, - sz, MLX5_REG_PPCNT, 0, 0); - memcpy(s->RFC_2819_counters, - MLX5_ADDR_OF(ppcnt_reg, out, counter_set), - sizeof(s->RFC_2819_counters)); - -free_out: - kvfree(in); - kvfree(out); -} - -static void mlx5e_update_q_counter(struct mlx5e_priv *priv) -{ - struct mlx5e_qcounter_stats *qcnt = &priv->stats.qcnt; - - if (!priv->q_counter) - return; - - mlx5_core_query_out_of_buffer(priv->mdev, priv->q_counter, - &qcnt->rx_out_of_buffer); -} - -void mlx5e_update_stats(struct mlx5e_priv *priv) -{ - struct mlx5_core_dev *mdev = priv->mdev; - struct mlx5e_vport_stats *s = &priv->stats.vport; + struct mlx5e_sw_stats *s = &priv->stats.sw; struct mlx5e_rq_stats *rq_stats; struct mlx5e_sq_stats *sq_stats; - u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)]; - u32 *out; - int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out); - u64 tx_offload_none; + u64 tx_offload_none = 0; int i, j; - out = mlx5_vzalloc(outlen); - if (!out) - return; - - /* Collect firts the SW counters and then HW for consistency */ - s->rx_packets = 0; - s->rx_bytes = 0; - s->tx_packets = 0; - s->tx_bytes = 0; - s->tso_packets = 0; - s->tso_bytes = 0; - s->tso_inner_packets = 0; - s->tso_inner_bytes = 0; - s->tx_queue_stopped = 0; - s->tx_queue_wake = 0; - s->tx_queue_dropped = 0; - s->tx_csum_inner = 0; - tx_offload_none = 0; - s->lro_packets = 0; - s->lro_bytes = 0; - s->rx_csum_none = 0; - s->rx_csum_sw = 0; - s->rx_wqe_err = 0; - s->rx_mpwqe_filler = 0; - s->rx_mpwqe_frag = 0; - s->rx_buff_alloc_err = 0; + memset(s, 0, sizeof(*s)); for (i = 0; i < priv->params.num_channels; i++) { rq_stats = &priv->channel[i]->rq.stats; @@ -212,7 +131,19 @@ void mlx5e_update_stats(struct mlx5e_priv *priv) } } - /* HW counters */ + /* Update calculated offload counters */ + s->tx_csum_offload = s->tx_packets - tx_offload_none - s->tx_csum_inner; + s->rx_csum_good = s->rx_packets - s->rx_csum_none - + s->rx_csum_sw; +} + +static void mlx5e_update_vport_counters(struct mlx5e_priv *priv) +{ + int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out); + u32 *out = (u32 *)priv->stats.vport.query_vport_out; + u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)]; + struct mlx5_core_dev *mdev = priv->mdev; + memset(in, 0, sizeof(in)); MLX5_SET(query_vport_counter_in, in, opcode, @@ -222,58 +153,56 @@ void mlx5e_update_stats(struct mlx5e_priv *priv) memset(out, 0, outlen); - if (mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen)) + mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen); +} + +static void mlx5e_update_pport_counters(struct mlx5e_priv *priv) +{ + struct mlx5e_pport_stats *pstats = &priv->stats.pport; + struct mlx5_core_dev *mdev = priv->mdev; + int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); + void *out; + u32 *in; + + in = mlx5_vzalloc(sz); + if (!in) goto free_out; -#define MLX5_GET_CTR(p, x) \ - MLX5_GET64(query_vport_counter_out, p, x) + MLX5_SET(ppcnt_reg, in, local_port, 1); - s->rx_error_packets = - MLX5_GET_CTR(out, received_errors.packets); - s->rx_error_bytes = - MLX5_GET_CTR(out, received_errors.octets); - s->tx_error_packets = - MLX5_GET_CTR(out, transmit_errors.packets); - s->tx_error_bytes = - MLX5_GET_CTR(out, transmit_errors.octets); + out = pstats->IEEE_802_3_counters; + MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP); + mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); - s->rx_unicast_packets = - MLX5_GET_CTR(out, received_eth_unicast.packets); - s->rx_unicast_bytes = - MLX5_GET_CTR(out, received_eth_unicast.octets); - s->tx_unicast_packets = - MLX5_GET_CTR(out, transmitted_eth_unicast.packets); - s->tx_unicast_bytes = - MLX5_GET_CTR(out, transmitted_eth_unicast.octets); + out = pstats->RFC_2863_counters; + MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP); + mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); - s->rx_multicast_packets = - MLX5_GET_CTR(out, received_eth_multicast.packets); - s->rx_multicast_bytes = - MLX5_GET_CTR(out, received_eth_multicast.octets); - s->tx_multicast_packets = - MLX5_GET_CTR(out, transmitted_eth_multicast.packets); - s->tx_multicast_bytes = - MLX5_GET_CTR(out, transmitted_eth_multicast.octets); - - s->rx_broadcast_packets = - MLX5_GET_CTR(out, received_eth_broadcast.packets); - s->rx_broadcast_bytes = - MLX5_GET_CTR(out, received_eth_broadcast.octets); - s->tx_broadcast_packets = - MLX5_GET_CTR(out, transmitted_eth_broadcast.packets); - s->tx_broadcast_bytes = - MLX5_GET_CTR(out, transmitted_eth_broadcast.octets); - - /* Update calculated offload counters */ - s->tx_csum_offload = s->tx_packets - tx_offload_none - s->tx_csum_inner; - s->rx_csum_good = s->rx_packets - s->rx_csum_none - - s->rx_csum_sw; - - mlx5e_update_pport_counters(priv); - mlx5e_update_q_counter(priv); + out = pstats->RFC_2819_counters; + MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP); + mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); free_out: - kvfree(out); + kvfree(in); +} + +static void mlx5e_update_q_counter(struct mlx5e_priv *priv) +{ + struct mlx5e_qcounter_stats *qcnt = &priv->stats.qcnt; + + if (!priv->q_counter) + return; + + mlx5_core_query_out_of_buffer(priv->mdev, priv->q_counter, + &qcnt->rx_out_of_buffer); +} + +void mlx5e_update_stats(struct mlx5e_priv *priv) +{ + mlx5e_update_sw_counters(priv); + mlx5e_update_q_counter(priv); + mlx5e_update_vport_counters(priv); + mlx5e_update_pport_counters(priv); } static void mlx5e_update_stats_work(struct work_struct *work) @@ -2073,37 +2002,28 @@ static struct rtnl_link_stats64 * mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct mlx5e_priv *priv = netdev_priv(dev); + struct mlx5e_sw_stats *sstats = &priv->stats.sw; struct mlx5e_vport_stats *vstats = &priv->stats.vport; struct mlx5e_pport_stats *pstats = &priv->stats.pport; - stats->rx_packets = vstats->rx_packets; - stats->rx_bytes = vstats->rx_bytes; - stats->tx_packets = vstats->tx_packets; - stats->tx_bytes = vstats->tx_bytes; - -#define PPCNT_GET_802_3_CTR(fld) \ - (MLX5_GET64(eth_802_3_cntrs_grp_data_layout, \ - pstats->IEEE_802_3_counters, fld##_high)) - -#define PPCNT_GET_2863_CTR(fld) \ - (MLX5_GET64(eth_2863_cntrs_grp_data_layout, \ - pstats->RFC_2863_counters, fld##_high)) + stats->rx_packets = sstats->rx_packets; + stats->rx_bytes = sstats->rx_bytes; + stats->tx_packets = sstats->tx_packets; + stats->tx_bytes = sstats->tx_bytes; stats->rx_dropped = priv->stats.qcnt.rx_out_of_buffer; - stats->tx_dropped = vstats->tx_queue_dropped; + stats->tx_dropped = sstats->tx_queue_dropped; stats->rx_length_errors = - PPCNT_GET_802_3_CTR(a_in_range_length_errors) + - PPCNT_GET_802_3_CTR(a_out_of_range_length_field) + - PPCNT_GET_802_3_CTR(a_frame_too_long_errors); + PPORT_802_3_GET(pstats, a_in_range_length_errors) + + PPORT_802_3_GET(pstats, a_out_of_range_length_field) + + PPORT_802_3_GET(pstats, a_frame_too_long_errors); stats->rx_crc_errors = - PPCNT_GET_802_3_CTR(a_frame_check_sequence_errors); - stats->rx_frame_errors = - PPCNT_GET_802_3_CTR(a_alignment_errors); - stats->tx_aborted_errors = - PPCNT_GET_2863_CTR(if_out_discards); + PPORT_802_3_GET(pstats, a_frame_check_sequence_errors); + stats->rx_frame_errors = PPORT_802_3_GET(pstats, a_alignment_errors); + stats->tx_aborted_errors = PPORT_2863_GET(pstats, if_out_discards); stats->tx_carrier_errors = - PPCNT_GET_802_3_CTR(a_symbol_error_during_carrier); + PPORT_802_3_GET(pstats, a_symbol_error_during_carrier); stats->rx_errors = stats->rx_length_errors + stats->rx_crc_errors + stats->rx_frame_errors; stats->tx_errors = stats->tx_aborted_errors + stats->tx_carrier_errors; @@ -2111,8 +2031,8 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) /* vport multicast also counts packets that are dropped due to steering * or rx out of buffer */ - stats->multicast = vstats->rx_multicast_packets; - + stats->multicast = + VPORT_COUNTER_GET(vstats, received_eth_multicast.packets); return stats; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h new file mode 100644 index 000000000000..116320d8fc42 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -0,0 +1,318 @@ +/* + * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef __MLX5_EN_STATS_H__ +#define __MLX5_EN_STATS_H__ + +#define MLX5E_READ_CTR64_CPU(ptr, dsc, i) \ + (*(u64 *)((char *)ptr + dsc[i].offset)) +#define MLX5E_READ_CTR64_BE(ptr, dsc, i) \ + be64_to_cpu(*(__be64 *)((char *)ptr + dsc[i].offset)) +#define MLX5E_READ_CTR32_CPU(ptr, dsc, i) \ + (*(u32 *)((char *)ptr + dsc[i].offset)) +#define MLX5E_READ_CTR32_BE(ptr, dsc, i) \ + be64_to_cpu(*(__be32 *)((char *)ptr + dsc[i].offset)) + +#define MLX5E_DECLARE_STAT(type, fld) #fld, offsetof(type, fld) + +struct counter_desc { + char name[ETH_GSTRING_LEN]; + int offset; /* Byte offset */ +}; + +struct mlx5e_sw_stats { + u64 rx_packets; + u64 rx_bytes; + u64 tx_packets; + u64 tx_bytes; + u64 tso_packets; + u64 tso_bytes; + u64 tso_inner_packets; + u64 tso_inner_bytes; + u64 lro_packets; + u64 lro_bytes; + u64 rx_csum_good; + u64 rx_csum_none; + u64 rx_csum_sw; + u64 tx_csum_offload; + u64 tx_csum_inner; + u64 tx_queue_stopped; + u64 tx_queue_wake; + u64 tx_queue_dropped; + u64 rx_wqe_err; + u64 rx_mpwqe_filler; + u64 rx_mpwqe_frag; + u64 rx_buff_alloc_err; +}; + +static const struct counter_desc sw_stats_desc[] = { + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tso_packets) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tso_bytes) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tso_inner_packets) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tso_inner_bytes) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, lro_packets) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, lro_bytes) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_good) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_sw) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_offload) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_inner) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_dropped) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_frag) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) }, +}; + +struct mlx5e_qcounter_stats { + u32 rx_out_of_buffer; +}; + +static const struct counter_desc q_stats_desc[] = { + { MLX5E_DECLARE_STAT(struct mlx5e_qcounter_stats, rx_out_of_buffer) }, +}; + +#define VPORT_COUNTER_OFF(c) MLX5_BYTE_OFF(query_vport_counter_out, c) +#define VPORT_COUNTER_GET(vstats, c) MLX5_GET64(query_vport_counter_out, \ + vstats->query_vport_out, c) + +struct mlx5e_vport_stats { + __be64 query_vport_out[MLX5_ST_SZ_QW(query_vport_counter_out)]; +}; + +static const struct counter_desc vport_stats_desc[] = { + { "rx_error_packets", VPORT_COUNTER_OFF(received_errors.packets) }, + { "rx_error_bytes", VPORT_COUNTER_OFF(received_errors.octets) }, + { "tx_error_packets", VPORT_COUNTER_OFF(transmit_errors.packets) }, + { "tx_error_bytes", VPORT_COUNTER_OFF(transmit_errors.octets) }, + { "rx_unicast_packets", + VPORT_COUNTER_OFF(received_eth_unicast.packets) }, + { "rx_unicast_bytes", VPORT_COUNTER_OFF(received_eth_unicast.octets) }, + { "tx_unicast_packets", + VPORT_COUNTER_OFF(transmitted_eth_unicast.packets) }, + { "tx_unicast_bytes", + VPORT_COUNTER_OFF(transmitted_eth_unicast.octets) }, + { "rx_multicast_packets", + VPORT_COUNTER_OFF(received_eth_multicast.packets) }, + { "rx_multicast_bytes", + VPORT_COUNTER_OFF(received_eth_multicast.octets) }, + { "tx_multicast_packets", + VPORT_COUNTER_OFF(transmitted_eth_multicast.packets) }, + { "tx_multicast_bytes", + VPORT_COUNTER_OFF(transmitted_eth_multicast.octets) }, + { "rx_broadcast_packets", + VPORT_COUNTER_OFF(received_eth_broadcast.packets) }, + { "rx_broadcast_bytes", + VPORT_COUNTER_OFF(received_eth_broadcast.octets) }, + { "tx_broadcast_packets", + VPORT_COUNTER_OFF(transmitted_eth_broadcast.packets) }, + { "tx_broadcast_bytes", + VPORT_COUNTER_OFF(transmitted_eth_broadcast.octets) }, +}; + +#define PPORT_802_3_OFF(c) \ + MLX5_BYTE_OFF(ppcnt_reg, \ + counter_set.eth_802_3_cntrs_grp_data_layout.c##_high) +#define PPORT_802_3_GET(pstats, c) \ + MLX5_GET64(ppcnt_reg, pstats->IEEE_802_3_counters, \ + counter_set.eth_802_3_cntrs_grp_data_layout.c##_high) +#define PPORT_2863_OFF(c) \ + MLX5_BYTE_OFF(ppcnt_reg, \ + counter_set.eth_2863_cntrs_grp_data_layout.c##_high) +#define PPORT_2863_GET(pstats, c) \ + MLX5_GET64(ppcnt_reg, pstats->RFC_2863_counters, \ + counter_set.eth_2863_cntrs_grp_data_layout.c##_high) +#define PPORT_2819_OFF(c) \ + MLX5_BYTE_OFF(ppcnt_reg, \ + counter_set.eth_2819_cntrs_grp_data_layout.c##_high) +#define PPORT_2819_GET(pstats, c) \ + MLX5_GET64(ppcnt_reg, pstats->RFC_2819_counters, \ + counter_set.eth_2819_cntrs_grp_data_layout.c##_high) + +struct mlx5e_pport_stats { + __be64 IEEE_802_3_counters[MLX5_ST_SZ_QW(ppcnt_reg)]; + __be64 RFC_2863_counters[MLX5_ST_SZ_QW(ppcnt_reg)]; + __be64 RFC_2819_counters[MLX5_ST_SZ_QW(ppcnt_reg)]; +}; + +static const struct counter_desc pport_802_3_stats_desc[] = { + { "frames_tx", PPORT_802_3_OFF(a_frames_transmitted_ok) }, + { "frames_rx", PPORT_802_3_OFF(a_frames_received_ok) }, + { "check_seq_err", PPORT_802_3_OFF(a_frame_check_sequence_errors) }, + { "alignment_err", PPORT_802_3_OFF(a_alignment_errors) }, + { "octets_tx", PPORT_802_3_OFF(a_octets_transmitted_ok) }, + { "octets_received", PPORT_802_3_OFF(a_octets_received_ok) }, + { "multicast_xmitted", PPORT_802_3_OFF(a_multicast_frames_xmitted_ok) }, + { "broadcast_xmitted", PPORT_802_3_OFF(a_broadcast_frames_xmitted_ok) }, + { "multicast_rx", PPORT_802_3_OFF(a_multicast_frames_received_ok) }, + { "broadcast_rx", PPORT_802_3_OFF(a_broadcast_frames_received_ok) }, + { "in_range_len_errors", PPORT_802_3_OFF(a_in_range_length_errors) }, + { "out_of_range_len", PPORT_802_3_OFF(a_out_of_range_length_field) }, + { "too_long_errors", PPORT_802_3_OFF(a_frame_too_long_errors) }, + { "symbol_err", PPORT_802_3_OFF(a_symbol_error_during_carrier) }, + { "mac_control_tx", PPORT_802_3_OFF(a_mac_control_frames_transmitted) }, + { "mac_control_rx", PPORT_802_3_OFF(a_mac_control_frames_received) }, + { "unsupported_op_rx", + PPORT_802_3_OFF(a_unsupported_opcodes_received) }, + { "pause_ctrl_rx", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_received) }, + { "pause_ctrl_tx", + PPORT_802_3_OFF(a_pause_mac_ctrl_frames_transmitted) }, +}; + +static const struct counter_desc pport_2863_stats_desc[] = { + { "in_octets", PPORT_2863_OFF(if_in_octets) }, + { "in_ucast_pkts", PPORT_2863_OFF(if_in_ucast_pkts) }, + { "in_discards", PPORT_2863_OFF(if_in_discards) }, + { "in_errors", PPORT_2863_OFF(if_in_errors) }, + { "in_unknown_protos", PPORT_2863_OFF(if_in_unknown_protos) }, + { "out_octets", PPORT_2863_OFF(if_out_octets) }, + { "out_ucast_pkts", PPORT_2863_OFF(if_out_ucast_pkts) }, + { "out_discards", PPORT_2863_OFF(if_out_discards) }, + { "out_errors", PPORT_2863_OFF(if_out_errors) }, + { "in_multicast_pkts", PPORT_2863_OFF(if_in_multicast_pkts) }, + { "in_broadcast_pkts", PPORT_2863_OFF(if_in_broadcast_pkts) }, + { "out_multicast_pkts", PPORT_2863_OFF(if_out_multicast_pkts) }, + { "out_broadcast_pkts", PPORT_2863_OFF(if_out_broadcast_pkts) }, +}; + +static const struct counter_desc pport_2819_stats_desc[] = { + { "drop_events", PPORT_2819_OFF(ether_stats_drop_events) }, + { "octets", PPORT_2819_OFF(ether_stats_octets) }, + { "pkts", PPORT_2819_OFF(ether_stats_pkts) }, + { "broadcast_pkts", PPORT_2819_OFF(ether_stats_broadcast_pkts) }, + { "multicast_pkts", PPORT_2819_OFF(ether_stats_multicast_pkts) }, + { "crc_align_errors", PPORT_2819_OFF(ether_stats_crc_align_errors) }, + { "undersize_pkts", PPORT_2819_OFF(ether_stats_undersize_pkts) }, + { "oversize_pkts", PPORT_2819_OFF(ether_stats_oversize_pkts) }, + { "fragments", PPORT_2819_OFF(ether_stats_fragments) }, + { "jabbers", PPORT_2819_OFF(ether_stats_jabbers) }, + { "collisions", PPORT_2819_OFF(ether_stats_collisions) }, + { "p64octets", PPORT_2819_OFF(ether_stats_pkts64octets) }, + { "p65to127octets", PPORT_2819_OFF(ether_stats_pkts65to127octets) }, + { "p128to255octets", PPORT_2819_OFF(ether_stats_pkts128to255octets) }, + { "p256to511octets", PPORT_2819_OFF(ether_stats_pkts256to511octets) }, + { "p512to1023octets", PPORT_2819_OFF(ether_stats_pkts512to1023octets) }, + { "p1024to1518octets", + PPORT_2819_OFF(ether_stats_pkts1024to1518octets) }, + { "p1519to2047octets", + PPORT_2819_OFF(ether_stats_pkts1519to2047octets) }, + { "p2048to4095octets", + PPORT_2819_OFF(ether_stats_pkts2048to4095octets) }, + { "p4096to8191octets", + PPORT_2819_OFF(ether_stats_pkts4096to8191octets) }, + { "p8192to10239octets", + PPORT_2819_OFF(ether_stats_pkts8192to10239octets) }, +}; + +struct mlx5e_rq_stats { + u64 packets; + u64 bytes; + u64 csum_none; + u64 csum_sw; + u64 lro_packets; + u64 lro_bytes; + u64 wqe_err; + u64 mpwqe_filler; + u64 mpwqe_frag; + u64 buff_alloc_err; +}; + +static const struct counter_desc rq_stats_desc[] = { + { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, packets) }, + { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, bytes) }, + { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, csum_none) }, + { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, csum_sw) }, + { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, lro_packets) }, + { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, lro_bytes) }, + { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, wqe_err) }, + { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, mpwqe_filler) }, + { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, mpwqe_frag) }, + { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, buff_alloc_err) }, +}; + +struct mlx5e_sq_stats { + /* commonly accessed in data path */ + u64 packets; + u64 bytes; + u64 tso_packets; + u64 tso_bytes; + u64 tso_inner_packets; + u64 tso_inner_bytes; + u64 csum_offload_inner; + u64 nop; + /* less likely accessed in data path */ + u64 csum_offload_none; + u64 stopped; + u64 wake; + u64 dropped; +}; + +static const struct counter_desc sq_stats_desc[] = { + { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, packets) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, bytes) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, tso_packets) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, tso_bytes) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, tso_inner_packets) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, tso_inner_bytes) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, csum_offload_inner) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, nop) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, csum_offload_none) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, stopped) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, wake) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, dropped) }, +}; + +#define NUM_SW_COUNTERS ARRAY_SIZE(sw_stats_desc) +#define NUM_Q_COUNTERS ARRAY_SIZE(q_stats_desc) +#define NUM_VPORT_COUNTERS ARRAY_SIZE(vport_stats_desc) +#define NUM_PPORT_802_3_COUNTERS ARRAY_SIZE(pport_802_3_stats_desc) +#define NUM_PPORT_2863_COUNTERS ARRAY_SIZE(pport_2863_stats_desc) +#define NUM_PPORT_2819_COUNTERS ARRAY_SIZE(pport_2819_stats_desc) +#define NUM_PPORT_COUNTERS (NUM_PPORT_802_3_COUNTERS + \ + NUM_PPORT_2863_COUNTERS + \ + NUM_PPORT_2819_COUNTERS) +#define NUM_RQ_STATS ARRAY_SIZE(rq_stats_desc) +#define NUM_SQ_STATS ARRAY_SIZE(sq_stats_desc) + +struct mlx5e_stats { + struct mlx5e_sw_stats sw; + struct mlx5e_qcounter_stats qcnt; + struct mlx5e_vport_stats vport; + struct mlx5e_pport_stats pport; +}; + +#endif /* __MLX5_EN_STATS_H__ */ diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 03f8d719b680..8be44ca777ed 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -59,6 +59,7 @@ #define MLX5_FLD_SZ_BYTES(typ, fld) (__mlx5_bit_sz(typ, fld) / 8) #define MLX5_ST_SZ_BYTES(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 8) #define MLX5_ST_SZ_DW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 32) +#define MLX5_ST_SZ_QW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 64) #define MLX5_UN_SZ_BYTES(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 8) #define MLX5_UN_SZ_DW(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 32) #define MLX5_BYTE_OFF(typ, fld) (__mlx5_bit_off(typ, fld) / 8) From 8075cb72382bf854a3a95f74ea4f9d19ebe29fd5 Mon Sep 17 00:00:00 2001 From: Gal Pressman Date: Sun, 24 Apr 2016 22:51:48 +0300 Subject: [PATCH 1090/1649] net/mlx5e: Rename VPort counters VPort and software counters names are confusing and may be unclear, all VPort counters now have a prefix of rx/tx_vport_*. Signed-off-by: Gal Pressman Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- .../ethernet/mellanox/mlx5/core/en_stats.h | 35 ++++++++++--------- 1 file changed, 19 insertions(+), 16 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index 116320d8fc42..4f3a08d7e8ed 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -115,32 +115,35 @@ struct mlx5e_vport_stats { }; static const struct counter_desc vport_stats_desc[] = { - { "rx_error_packets", VPORT_COUNTER_OFF(received_errors.packets) }, - { "rx_error_bytes", VPORT_COUNTER_OFF(received_errors.octets) }, - { "tx_error_packets", VPORT_COUNTER_OFF(transmit_errors.packets) }, - { "tx_error_bytes", VPORT_COUNTER_OFF(transmit_errors.octets) }, - { "rx_unicast_packets", + { "rx_vport_error_packets", + VPORT_COUNTER_OFF(received_errors.packets) }, + { "rx_vport_error_bytes", VPORT_COUNTER_OFF(received_errors.octets) }, + { "tx_vport_error_packets", + VPORT_COUNTER_OFF(transmit_errors.packets) }, + { "tx_vport_error_bytes", VPORT_COUNTER_OFF(transmit_errors.octets) }, + { "rx_vport_unicast_packets", VPORT_COUNTER_OFF(received_eth_unicast.packets) }, - { "rx_unicast_bytes", VPORT_COUNTER_OFF(received_eth_unicast.octets) }, - { "tx_unicast_packets", + { "rx_vport_unicast_bytes", + VPORT_COUNTER_OFF(received_eth_unicast.octets) }, + { "tx_vport_unicast_packets", VPORT_COUNTER_OFF(transmitted_eth_unicast.packets) }, - { "tx_unicast_bytes", + { "tx_vport_unicast_bytes", VPORT_COUNTER_OFF(transmitted_eth_unicast.octets) }, - { "rx_multicast_packets", + { "rx_vport_multicast_packets", VPORT_COUNTER_OFF(received_eth_multicast.packets) }, - { "rx_multicast_bytes", + { "rx_vport_multicast_bytes", VPORT_COUNTER_OFF(received_eth_multicast.octets) }, - { "tx_multicast_packets", + { "tx_vport_multicast_packets", VPORT_COUNTER_OFF(transmitted_eth_multicast.packets) }, - { "tx_multicast_bytes", + { "tx_vport_multicast_bytes", VPORT_COUNTER_OFF(transmitted_eth_multicast.octets) }, - { "rx_broadcast_packets", + { "rx_vport_broadcast_packets", VPORT_COUNTER_OFF(received_eth_broadcast.packets) }, - { "rx_broadcast_bytes", + { "rx_vport_broadcast_bytes", VPORT_COUNTER_OFF(received_eth_broadcast.octets) }, - { "tx_broadcast_packets", + { "tx_vport_broadcast_packets", VPORT_COUNTER_OFF(transmitted_eth_broadcast.packets) }, - { "tx_broadcast_bytes", + { "tx_vport_broadcast_bytes", VPORT_COUNTER_OFF(transmitted_eth_broadcast.octets) }, }; From cf678570d5a1022c4c4dbda7792f2a36f0b9fec0 Mon Sep 17 00:00:00 2001 From: Gal Pressman Date: Sun, 24 Apr 2016 22:51:49 +0300 Subject: [PATCH 1091/1649] net/mlx5e: Add per priority group to PPort counters Expose counters providing information for each priority level (PCP) through ethtool -S option and DCBNL. This includes rx/tx bytes, frames, and pause counters. Signed-off-by: Gal Pressman Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- .../ethernet/mellanox/mlx5/core/en_dcbnl.c | 6 +++ .../ethernet/mellanox/mlx5/core/en_ethtool.c | 51 +++++++++++++++++-- .../net/ethernet/mellanox/mlx5/core/en_main.c | 9 ++++ .../ethernet/mellanox/mlx5/core/en_stats.h | 31 ++++++++++- 4 files changed, 93 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c index 3036f279a8fd..b2db180ae2a5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c @@ -174,8 +174,14 @@ static int mlx5e_dcbnl_ieee_getpfc(struct net_device *dev, { struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5_core_dev *mdev = priv->mdev; + struct mlx5e_pport_stats *pstats = &priv->stats.pport; + int i; pfc->pfc_cap = mlx5_max_tc(mdev) + 1; + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + pfc->requests[i] = PPORT_PER_PRIO_GET(pstats, i, tx_pause); + pfc->indications[i] = PPORT_PER_PRIO_GET(pstats, i, rx_pause); + } return mlx5_query_port_pfc(mdev, &pfc->pfc_en, NULL); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index f1649d543475..522d584bc05f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -165,6 +165,18 @@ static const struct { }, }; +static unsigned long mlx5e_query_pfc_combined(struct mlx5e_priv *priv) +{ + struct mlx5_core_dev *mdev = priv->mdev; + u8 pfc_en_tx; + u8 pfc_en_rx; + int err; + + err = mlx5_query_port_pfc(mdev, &pfc_en_tx, &pfc_en_rx); + + return err ? 0 : pfc_en_tx | pfc_en_rx; +} + #define MLX5E_NUM_Q_CNTRS(priv) (NUM_Q_COUNTERS * (!!priv->q_counter)) #define MLX5E_NUM_RQ_STATS(priv) \ (NUM_RQ_STATS * priv->params.num_channels * \ @@ -172,6 +184,7 @@ static const struct { #define MLX5E_NUM_SQ_STATS(priv) \ (NUM_SQ_STATS * priv->params.num_channels * priv->params.num_tc * \ test_bit(MLX5E_STATE_OPENED, &priv->state)) +#define MLX5E_NUM_PFC_COUNTERS(priv) hweight8(mlx5e_query_pfc_combined(priv)) static int mlx5e_get_sset_count(struct net_device *dev, int sset) { @@ -183,7 +196,8 @@ static int mlx5e_get_sset_count(struct net_device *dev, int sset) MLX5E_NUM_Q_CNTRS(priv) + NUM_VPORT_COUNTERS + NUM_PPORT_COUNTERS + MLX5E_NUM_RQ_STATS(priv) + - MLX5E_NUM_SQ_STATS(priv); + MLX5E_NUM_SQ_STATS(priv) + + MLX5E_NUM_PFC_COUNTERS(priv); /* fallthrough */ default: return -EOPNOTSUPP; @@ -192,7 +206,8 @@ static int mlx5e_get_sset_count(struct net_device *dev, int sset) static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, uint8_t *data) { - int i, j, tc, idx = 0; + int i, j, tc, prio, idx = 0; + unsigned long pfc_combined; /* SW counters */ for (i = 0; i < NUM_SW_COUNTERS; i++) @@ -220,6 +235,21 @@ static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, uint8_t *data) strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_2819_stats_desc[i].name); + for (prio = 0; prio < NUM_PPORT_PRIO; prio++) { + for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++) + sprintf(data + (idx++) * ETH_GSTRING_LEN, "prio%d_%s", + prio, + pport_per_prio_traffic_stats_desc[i].name); + } + + pfc_combined = mlx5e_query_pfc_combined(priv); + for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) { + for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) { + sprintf(data + (idx++) * ETH_GSTRING_LEN, "prio%d_%s", + prio, pport_per_prio_pfc_stats_desc[i].name); + } + } + if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) return; @@ -260,7 +290,8 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { struct mlx5e_priv *priv = netdev_priv(dev); - int i, j, tc, idx = 0; + int i, j, tc, prio, idx = 0; + unsigned long pfc_combined; if (!data) return; @@ -294,6 +325,20 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev, data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2819_counters, pport_2819_stats_desc, i); + for (prio = 0; prio < NUM_PPORT_PRIO; prio++) { + for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++) + data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio], + pport_per_prio_traffic_stats_desc, i); + } + + pfc_combined = mlx5e_query_pfc_combined(priv); + for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) { + for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) { + data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio], + pport_per_prio_pfc_stats_desc, i); + } + } + if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) return; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 0c532367ff13..ef66ba65f5cd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -161,6 +161,7 @@ static void mlx5e_update_pport_counters(struct mlx5e_priv *priv) struct mlx5e_pport_stats *pstats = &priv->stats.pport; struct mlx5_core_dev *mdev = priv->mdev; int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); + int prio; void *out; u32 *in; @@ -182,6 +183,14 @@ static void mlx5e_update_pport_counters(struct mlx5e_priv *priv) MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP); mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); + MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP); + for (prio = 0; prio < NUM_PPORT_PRIO; prio++) { + out = pstats->per_prio_counters[prio]; + MLX5_SET(ppcnt_reg, in, prio_tc, prio); + mlx5_core_access_reg(mdev, in, sz, out, sz, + MLX5_REG_PPCNT, 0, 0); + } + free_out: kvfree(in); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index 4f3a08d7e8ed..de27eeaa3bd2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -165,11 +165,19 @@ static const struct counter_desc vport_stats_desc[] = { #define PPORT_2819_GET(pstats, c) \ MLX5_GET64(ppcnt_reg, pstats->RFC_2819_counters, \ counter_set.eth_2819_cntrs_grp_data_layout.c##_high) +#define PPORT_PER_PRIO_OFF(c) \ + MLX5_BYTE_OFF(ppcnt_reg, \ + counter_set.eth_per_prio_grp_data_layout.c##_high) +#define PPORT_PER_PRIO_GET(pstats, prio, c) \ + MLX5_GET64(ppcnt_reg, pstats->per_prio_counters[prio], \ + counter_set.eth_per_prio_grp_data_layout.c##_high) +#define NUM_PPORT_PRIO 8 struct mlx5e_pport_stats { __be64 IEEE_802_3_counters[MLX5_ST_SZ_QW(ppcnt_reg)]; __be64 RFC_2863_counters[MLX5_ST_SZ_QW(ppcnt_reg)]; __be64 RFC_2819_counters[MLX5_ST_SZ_QW(ppcnt_reg)]; + __be64 per_prio_counters[NUM_PPORT_PRIO][MLX5_ST_SZ_QW(ppcnt_reg)]; }; static const struct counter_desc pport_802_3_stats_desc[] = { @@ -241,6 +249,21 @@ static const struct counter_desc pport_2819_stats_desc[] = { PPORT_2819_OFF(ether_stats_pkts8192to10239octets) }, }; +static const struct counter_desc pport_per_prio_traffic_stats_desc[] = { + { "rx_octets", PPORT_PER_PRIO_OFF(rx_octets) }, + { "rx_frames", PPORT_PER_PRIO_OFF(rx_frames) }, + { "tx_octets", PPORT_PER_PRIO_OFF(tx_octets) }, + { "tx_frames", PPORT_PER_PRIO_OFF(tx_frames) }, +}; + +static const struct counter_desc pport_per_prio_pfc_stats_desc[] = { + { "rx_pause", PPORT_PER_PRIO_OFF(rx_pause) }, + { "rx_pause_duration", PPORT_PER_PRIO_OFF(rx_pause_duration) }, + { "tx_pause", PPORT_PER_PRIO_OFF(tx_pause) }, + { "tx_pause_duration", PPORT_PER_PRIO_OFF(tx_pause_duration) }, + { "rx_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) }, +}; + struct mlx5e_rq_stats { u64 packets; u64 bytes; @@ -305,9 +328,15 @@ static const struct counter_desc sq_stats_desc[] = { #define NUM_PPORT_802_3_COUNTERS ARRAY_SIZE(pport_802_3_stats_desc) #define NUM_PPORT_2863_COUNTERS ARRAY_SIZE(pport_2863_stats_desc) #define NUM_PPORT_2819_COUNTERS ARRAY_SIZE(pport_2819_stats_desc) +#define NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS \ + ARRAY_SIZE(pport_per_prio_traffic_stats_desc) +#define NUM_PPORT_PER_PRIO_PFC_COUNTERS \ + ARRAY_SIZE(pport_per_prio_pfc_stats_desc) #define NUM_PPORT_COUNTERS (NUM_PPORT_802_3_COUNTERS + \ NUM_PPORT_2863_COUNTERS + \ - NUM_PPORT_2819_COUNTERS) + NUM_PPORT_2819_COUNTERS + \ + NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS * \ + NUM_PPORT_PRIO) #define NUM_RQ_STATS ARRAY_SIZE(rq_stats_desc) #define NUM_SQ_STATS ARRAY_SIZE(sq_stats_desc) From 121fcdc84d8240d4dfe1f737befd5814b12623ee Mon Sep 17 00:00:00 2001 From: Gal Pressman Date: Sun, 24 Apr 2016 22:51:50 +0300 Subject: [PATCH 1092/1649] net/mlx5e: Add link down events counter Expose link_down_events counter through ethtool -S. This counter is read from PPort statistics, then proccessed and stored as a special handling software counter. This counter is stored along software counters since it is the only PPort counter that it's size is not 64 bits. Signed-off-by: Gal Pressman Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 10 +++++++++- drivers/net/ethernet/mellanox/mlx5/core/en_stats.h | 5 +++++ include/linux/mlx5/device.h | 1 + 3 files changed, 15 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index ef66ba65f5cd..61e261c3d247 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -135,6 +135,10 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv) s->tx_csum_offload = s->tx_packets - tx_offload_none - s->tx_csum_inner; s->rx_csum_good = s->rx_packets - s->rx_csum_none - s->rx_csum_sw; + + s->link_down_events = MLX5_GET(ppcnt_reg, + priv->stats.pport.phy_counters, + counter_set.phys_layer_cntrs.link_down_events); } static void mlx5e_update_vport_counters(struct mlx5e_priv *priv) @@ -183,6 +187,10 @@ static void mlx5e_update_pport_counters(struct mlx5e_priv *priv) MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP); mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); + out = pstats->phy_counters; + MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP); + mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); + MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP); for (prio = 0; prio < NUM_PPORT_PRIO; prio++) { out = pstats->per_prio_counters[prio]; @@ -208,10 +216,10 @@ static void mlx5e_update_q_counter(struct mlx5e_priv *priv) void mlx5e_update_stats(struct mlx5e_priv *priv) { - mlx5e_update_sw_counters(priv); mlx5e_update_q_counter(priv); mlx5e_update_vport_counters(priv); mlx5e_update_pport_counters(priv); + mlx5e_update_sw_counters(priv); } static void mlx5e_update_stats_work(struct work_struct *work) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index de27eeaa3bd2..7cd8cb44b2ab 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -71,6 +71,9 @@ struct mlx5e_sw_stats { u64 rx_mpwqe_filler; u64 rx_mpwqe_frag; u64 rx_buff_alloc_err; + + /* Special handling counters */ + u64 link_down_events; }; static const struct counter_desc sw_stats_desc[] = { @@ -96,6 +99,7 @@ static const struct counter_desc sw_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_frag) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, link_down_events) }, }; struct mlx5e_qcounter_stats { @@ -178,6 +182,7 @@ struct mlx5e_pport_stats { __be64 RFC_2863_counters[MLX5_ST_SZ_QW(ppcnt_reg)]; __be64 RFC_2819_counters[MLX5_ST_SZ_QW(ppcnt_reg)]; __be64 per_prio_counters[NUM_PPORT_PRIO][MLX5_ST_SZ_QW(ppcnt_reg)]; + __be64 phy_counters[MLX5_ST_SZ_QW(ppcnt_reg)]; }; static const struct counter_desc pport_802_3_stats_desc[] = { diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 8be44ca777ed..942bccacd7b7 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -1369,6 +1369,7 @@ enum { MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP = 0x5, MLX5_PER_PRIORITY_COUNTERS_GROUP = 0x10, MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP = 0x11, + MLX5_PHYSICAL_LAYER_COUNTERS_GROUP = 0x12, MLX5_INFINIBAND_PORT_COUNTERS_GROUP = 0x20, }; From 0e405443e803a3ce9ba22f11be37e2a74f3fb9ad Mon Sep 17 00:00:00 2001 From: Gal Pressman Date: Sun, 24 Apr 2016 22:51:51 +0300 Subject: [PATCH 1093/1649] net/mlx5e: Improve set features ndo resiliency In current mlx5e ndo_set_features implementation, setting some features can success while others can fail. Today, we return one error code which doesn't reflect the current features status of the netdev at the end of the ndo callback. Set netdev->features with features which were successfully set in order to keep the current status in case of failure. For this purpose, define new Macro to set/unset specific feature in netdev->features. This patch introduces a mechanism that uses feature handlers for each feature. Set features will call a generic handler, which will then call a specific handler in his turn and update netdev->features according to it's return value. Each specific handler is responsible to perform driver specific actions, and updating params if needed. Signed-off-by: Gal Pressman Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlx5/core/en_main.c | 110 +++++++++++++----- 1 file changed, 82 insertions(+), 28 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 61e261c3d247..d82bc6b697f9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -2078,50 +2078,104 @@ static int mlx5e_set_mac(struct net_device *netdev, void *addr) return 0; } -static int mlx5e_set_features(struct net_device *netdev, - netdev_features_t features) +#define MLX5E_SET_FEATURE(netdev, feature, enable) \ + do { \ + if (enable) \ + netdev->features |= feature; \ + else \ + netdev->features &= ~feature; \ + } while (0) + +typedef int (*mlx5e_feature_handler)(struct net_device *netdev, bool enable); + +static int set_feature_lro(struct net_device *netdev, bool enable) { struct mlx5e_priv *priv = netdev_priv(netdev); - int err = 0; - netdev_features_t changes = features ^ netdev->features; + bool was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state); + int err; mutex_lock(&priv->state_lock); - if (changes & NETIF_F_LRO) { - bool was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state); + if (was_opened && (priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST)) + mlx5e_close_locked(priv->netdev); - if (was_opened && (priv->params.rq_wq_type == - MLX5_WQ_TYPE_LINKED_LIST)) - mlx5e_close_locked(priv->netdev); - - priv->params.lro_en = !!(features & NETIF_F_LRO); - err = mlx5e_modify_tirs_lro(priv); - if (err) - mlx5_core_warn(priv->mdev, "lro modify failed, %d\n", - err); - - if (was_opened && (priv->params.rq_wq_type == - MLX5_WQ_TYPE_LINKED_LIST)) - err = mlx5e_open_locked(priv->netdev); + priv->params.lro_en = enable; + err = mlx5e_modify_tirs_lro(priv); + if (err) { + netdev_err(netdev, "lro modify failed, %d\n", err); + priv->params.lro_en = !enable; } + if (was_opened && (priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST)) + mlx5e_open_locked(priv->netdev); + mutex_unlock(&priv->state_lock); - if (changes & NETIF_F_HW_VLAN_CTAG_FILTER) { - if (features & NETIF_F_HW_VLAN_CTAG_FILTER) - mlx5e_enable_vlan_filter(priv); - else - mlx5e_disable_vlan_filter(priv); - } + return err; +} - if ((changes & NETIF_F_HW_TC) && !(features & NETIF_F_HW_TC) && - mlx5e_tc_num_filters(priv)) { +static int set_feature_vlan_filter(struct net_device *netdev, bool enable) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + + if (enable) + mlx5e_enable_vlan_filter(priv); + else + mlx5e_disable_vlan_filter(priv); + + return 0; +} + +static int set_feature_tc_num_filters(struct net_device *netdev, bool enable) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + + if (!enable && mlx5e_tc_num_filters(priv)) { netdev_err(netdev, "Active offloaded tc filters, can't turn hw_tc_offload off\n"); return -EINVAL; } - return err; + return 0; +} + +static int mlx5e_handle_feature(struct net_device *netdev, + netdev_features_t wanted_features, + netdev_features_t feature, + mlx5e_feature_handler feature_handler) +{ + netdev_features_t changes = wanted_features ^ netdev->features; + bool enable = !!(wanted_features & feature); + int err; + + if (!(changes & feature)) + return 0; + + err = feature_handler(netdev, enable); + if (err) { + netdev_err(netdev, "%s feature 0x%llx failed err %d\n", + enable ? "Enable" : "Disable", feature, err); + return err; + } + + MLX5E_SET_FEATURE(netdev, feature, enable); + return 0; +} + +static int mlx5e_set_features(struct net_device *netdev, + netdev_features_t features) +{ + int err; + + err = mlx5e_handle_feature(netdev, features, NETIF_F_LRO, + set_feature_lro); + err |= mlx5e_handle_feature(netdev, features, + NETIF_F_HW_VLAN_CTAG_FILTER, + set_feature_vlan_filter); + err |= mlx5e_handle_feature(netdev, features, NETIF_F_HW_TC, + set_feature_tc_num_filters); + + return err ? -EINVAL : 0; } static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu) From 94cb1ebbafd509210887eea6ced55c40da7b4baa Mon Sep 17 00:00:00 2001 From: Eran Ben Elisha Date: Sun, 24 Apr 2016 22:51:52 +0300 Subject: [PATCH 1094/1649] net/mlx5e: Add support for RXALL netdev feature Introduce new access register named Ports Check Mask Register (PCMR) to control all HW checks on port. With this register, the driver can enable/disable Hardware FCS validation. When RXALL is enabled/disabled using ndo_set_features, enable/disable fcs check at HW. User can change HW configuration using rx-all flag at ethtool. Signed-off-by: Eran Ben Elisha Signed-off-by: Gal Pressman Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlx5/core/en_main.c | 20 ++++++++ .../net/ethernet/mellanox/mlx5/core/port.c | 49 +++++++++++++++++++ include/linux/mlx5/driver.h | 1 + include/linux/mlx5/port.h | 4 ++ 4 files changed, 74 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index d82bc6b697f9..ad0cb4aa593b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -2139,6 +2139,14 @@ static int set_feature_tc_num_filters(struct net_device *netdev, bool enable) return 0; } +static int set_feature_rx_all(struct net_device *netdev, bool enable) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5_core_dev *mdev = priv->mdev; + + return mlx5_set_port_fcs(mdev, !enable); +} + static int mlx5e_handle_feature(struct net_device *netdev, netdev_features_t wanted_features, netdev_features_t feature, @@ -2174,6 +2182,8 @@ static int mlx5e_set_features(struct net_device *netdev, set_feature_vlan_filter); err |= mlx5e_handle_feature(netdev, features, NETIF_F_HW_TC, set_feature_tc_num_filters); + err |= mlx5e_handle_feature(netdev, features, NETIF_F_RXALL, + set_feature_rx_all); return err ? -EINVAL : 0; } @@ -2564,6 +2574,8 @@ static void mlx5e_build_netdev(struct net_device *netdev) { struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5_core_dev *mdev = priv->mdev; + bool fcs_supported; + bool fcs_enabled; SET_NETDEV_DEV(netdev, &mdev->pdev->dev); @@ -2607,10 +2619,18 @@ static void mlx5e_build_netdev(struct net_device *netdev) netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL; } + mlx5_query_port_fcs(mdev, &fcs_supported, &fcs_enabled); + + if (fcs_supported) + netdev->hw_features |= NETIF_F_RXALL; + netdev->features = netdev->hw_features; if (!priv->params.lro_en) netdev->features &= ~NETIF_F_LRO; + if (fcs_enabled) + netdev->features &= ~NETIF_F_RXALL; + #define FT_CAP(f) MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.f) if (FT_CAP(flow_modify_en) && FT_CAP(modify_root) && diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c index ae378c575deb..c37740f30fbe 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c @@ -607,3 +607,52 @@ int mlx5_query_port_wol(struct mlx5_core_dev *mdev, u8 *wol_mode) return err; } EXPORT_SYMBOL_GPL(mlx5_query_port_wol); + +static int mlx5_query_ports_check(struct mlx5_core_dev *mdev, u32 *out, + int outlen) +{ + u32 in[MLX5_ST_SZ_DW(pcmr_reg)]; + + memset(in, 0, sizeof(in)); + MLX5_SET(pcmr_reg, in, local_port, 1); + + return mlx5_core_access_reg(mdev, in, sizeof(in), out, + outlen, MLX5_REG_PCMR, 0, 0); +} + +static int mlx5_set_ports_check(struct mlx5_core_dev *mdev, u32 *in, int inlen) +{ + u32 out[MLX5_ST_SZ_DW(pcmr_reg)]; + + return mlx5_core_access_reg(mdev, in, inlen, out, + sizeof(out), MLX5_REG_PCMR, 0, 1); +} + +int mlx5_set_port_fcs(struct mlx5_core_dev *mdev, u8 enable) +{ + u32 in[MLX5_ST_SZ_DW(pcmr_reg)]; + + memset(in, 0, sizeof(in)); + MLX5_SET(pcmr_reg, in, local_port, 1); + MLX5_SET(pcmr_reg, in, fcs_chk, enable); + + return mlx5_set_ports_check(mdev, in, sizeof(in)); +} + +void mlx5_query_port_fcs(struct mlx5_core_dev *mdev, bool *supported, + bool *enabled) +{ + u32 out[MLX5_ST_SZ_DW(pcmr_reg)]; + /* Default values for FW which do not support MLX5_REG_PCMR */ + *supported = false; + *enabled = true; + + if (!MLX5_CAP_GEN(mdev, ports_check)) + return; + + if (mlx5_query_ports_check(mdev, out, sizeof(out))) + return; + + *supported = !!(MLX5_GET(pcmr_reg, out, fcs_cap)); + *enabled = !!(MLX5_GET(pcmr_reg, out, fcs_chk)); +} diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index dcd5ac8d3b14..497a4dbd91b0 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -112,6 +112,7 @@ enum { MLX5_REG_PMPE = 0x5010, MLX5_REG_PELC = 0x500e, MLX5_REG_PVLC = 0x500f, + MLX5_REG_PCMR = 0x5041, MLX5_REG_PMLP = 0, /* TBD */ MLX5_REG_NODE_DESC = 0x6001, MLX5_REG_HOST_ENDIANNESS = 0x7004, diff --git a/include/linux/mlx5/port.h b/include/linux/mlx5/port.h index a1d145abd4eb..577e953d0aa7 100644 --- a/include/linux/mlx5/port.h +++ b/include/linux/mlx5/port.h @@ -84,4 +84,8 @@ int mlx5_query_port_ets_rate_limit(struct mlx5_core_dev *mdev, int mlx5_set_port_wol(struct mlx5_core_dev *mdev, u8 wol_mode); int mlx5_query_port_wol(struct mlx5_core_dev *mdev, u8 *wol_mode); +int mlx5_set_port_fcs(struct mlx5_core_dev *mdev, u8 enable); +void mlx5_query_port_fcs(struct mlx5_core_dev *mdev, bool *supported, + bool *enabled); + #endif /* __MLX5_PORT_H__ */ From da54d24ec3ef736de04c61a01653776a9750334f Mon Sep 17 00:00:00 2001 From: Gal Pressman Date: Sun, 24 Apr 2016 22:51:53 +0300 Subject: [PATCH 1095/1649] net/mlx5e: Add ethtool support for interface identify (LED blinking) Add the needed hardware command and mlx5_ifc structs for managing LED control. Add set_phys_id ethtool callback to support ethtool -p flag. Signed-off-by: Gal Pressman Signed-off-by: Eugenia Emantayev Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- .../ethernet/mellanox/mlx5/core/en_ethtool.c | 25 +++++++++++++++++++ .../net/ethernet/mellanox/mlx5/core/port.c | 13 ++++++++++ include/linux/mlx5/driver.h | 1 + include/linux/mlx5/port.h | 6 +++++ 4 files changed, 45 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 522d584bc05f..a2c444ec191b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -1135,6 +1135,30 @@ static int mlx5e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) return mlx5_set_port_wol(mdev, mlx5_wol_mode); } +static int mlx5e_set_phys_id(struct net_device *dev, + enum ethtool_phys_id_state state) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + struct mlx5_core_dev *mdev = priv->mdev; + u16 beacon_duration; + + if (!MLX5_CAP_GEN(mdev, beacon_led)) + return -EOPNOTSUPP; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + beacon_duration = MLX5_BEACON_DURATION_INF; + break; + case ETHTOOL_ID_INACTIVE: + beacon_duration = MLX5_BEACON_DURATION_OFF; + break; + default: + return -EOPNOTSUPP; + } + + return mlx5_set_port_beacon(mdev, beacon_duration); +} + const struct ethtool_ops mlx5e_ethtool_ops = { .get_drvinfo = mlx5e_get_drvinfo, .get_link = ethtool_op_get_link, @@ -1159,6 +1183,7 @@ const struct ethtool_ops mlx5e_ethtool_ops = { .get_pauseparam = mlx5e_get_pauseparam, .set_pauseparam = mlx5e_set_pauseparam, .get_ts_info = mlx5e_get_ts_info, + .set_phys_id = mlx5e_set_phys_id, .get_wol = mlx5e_get_wol, .set_wol = mlx5e_set_wol, }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c index c37740f30fbe..446549f63b5c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c @@ -115,6 +115,19 @@ int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys, } EXPORT_SYMBOL_GPL(mlx5_query_port_ptys); +int mlx5_set_port_beacon(struct mlx5_core_dev *dev, u16 beacon_duration) +{ + u32 out[MLX5_ST_SZ_DW(mlcr_reg)]; + u32 in[MLX5_ST_SZ_DW(mlcr_reg)]; + + memset(in, 0, sizeof(in)); + MLX5_SET(mlcr_reg, in, local_port, 1); + MLX5_SET(mlcr_reg, in, beacon_duration, beacon_duration); + + return mlx5_core_access_reg(dev, in, sizeof(in), out, + sizeof(out), MLX5_REG_MLCR, 0, 1); +} + int mlx5_query_port_proto_cap(struct mlx5_core_dev *dev, u32 *proto_cap, int proto_mask) { diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 497a4dbd91b0..2e8758d1b19e 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -116,6 +116,7 @@ enum { MLX5_REG_PMLP = 0, /* TBD */ MLX5_REG_NODE_DESC = 0x6001, MLX5_REG_HOST_ENDIANNESS = 0x7004, + MLX5_REG_MLCR = 0x902b, }; enum { diff --git a/include/linux/mlx5/port.h b/include/linux/mlx5/port.h index 577e953d0aa7..a364ab1737a0 100644 --- a/include/linux/mlx5/port.h +++ b/include/linux/mlx5/port.h @@ -35,6 +35,11 @@ #include +enum mlx5_beacon_duration { + MLX5_BEACON_DURATION_OFF = 0x0, + MLX5_BEACON_DURATION_INF = 0xffff, +}; + int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps); int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys, int ptys_size, int proto_mask, u8 local_port); @@ -53,6 +58,7 @@ int mlx5_set_port_admin_status(struct mlx5_core_dev *dev, enum mlx5_port_status status); int mlx5_query_port_admin_status(struct mlx5_core_dev *dev, enum mlx5_port_status *status); +int mlx5_set_port_beacon(struct mlx5_core_dev *dev, u16 beacon_duration); int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port); void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu, u8 port); From bb64143eee8c036a89b31daa4e9bf8360a8bded1 Mon Sep 17 00:00:00 2001 From: Gal Pressman Date: Sun, 24 Apr 2016 22:51:54 +0300 Subject: [PATCH 1096/1649] net/mlx5e: Add ethtool support for dump module EEPROM Add query MCIA, PMLP registers infrastructure and commands. Add ethtool support for get_module_info() and get_module_eeprom() callbacks. Signed-off-by: Gal Pressman Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- .../ethernet/mellanox/mlx5/core/en_ethtool.c | 80 +++++++++++++++++++ .../net/ethernet/mellanox/mlx5/core/port.c | 76 ++++++++++++++++++ include/linux/mlx5/driver.h | 3 +- include/linux/mlx5/port.h | 15 ++++ 4 files changed, 173 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index a2c444ec191b..0518c8658507 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -1159,6 +1159,84 @@ static int mlx5e_set_phys_id(struct net_device *dev, return mlx5_set_port_beacon(mdev, beacon_duration); } +static int mlx5e_get_module_info(struct net_device *netdev, + struct ethtool_modinfo *modinfo) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5_core_dev *dev = priv->mdev; + int size_read = 0; + u8 data[4]; + + size_read = mlx5_query_module_eeprom(dev, 0, 2, data); + if (size_read < 2) + return -EIO; + + /* data[0] = identifier byte */ + switch (data[0]) { + case MLX5_MODULE_ID_QSFP: + modinfo->type = ETH_MODULE_SFF_8436; + modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; + break; + case MLX5_MODULE_ID_QSFP_PLUS: + case MLX5_MODULE_ID_QSFP28: + /* data[1] = revision id */ + if (data[0] == MLX5_MODULE_ID_QSFP28 || data[1] >= 0x3) { + modinfo->type = ETH_MODULE_SFF_8636; + modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN; + } else { + modinfo->type = ETH_MODULE_SFF_8436; + modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; + } + break; + case MLX5_MODULE_ID_SFP: + modinfo->type = ETH_MODULE_SFF_8472; + modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; + break; + default: + netdev_err(priv->netdev, "%s: cable type not recognized:0x%x\n", + __func__, data[0]); + return -EINVAL; + } + + return 0; +} + +static int mlx5e_get_module_eeprom(struct net_device *netdev, + struct ethtool_eeprom *ee, + u8 *data) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5_core_dev *mdev = priv->mdev; + int offset = ee->offset; + int size_read; + int i = 0; + + if (!ee->len) + return -EINVAL; + + memset(data, 0, ee->len); + + while (i < ee->len) { + size_read = mlx5_query_module_eeprom(mdev, offset, ee->len - i, + data + i); + + if (!size_read) + /* Done reading */ + return 0; + + if (size_read < 0) { + netdev_err(priv->netdev, "%s: mlx5_query_eeprom failed:0x%x\n", + __func__, size_read); + return 0; + } + + i += size_read; + offset += size_read; + } + + return 0; +} + const struct ethtool_ops mlx5e_ethtool_ops = { .get_drvinfo = mlx5e_get_drvinfo, .get_link = ethtool_op_get_link, @@ -1186,4 +1264,6 @@ const struct ethtool_ops mlx5e_ethtool_ops = { .set_phys_id = mlx5e_set_phys_id, .get_wol = mlx5e_get_wol, .set_wol = mlx5e_set_wol, + .get_module_info = mlx5e_get_module_info, + .get_module_eeprom = mlx5e_get_module_eeprom, }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c index 446549f63b5c..4cb2a44510fa 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c @@ -310,6 +310,82 @@ void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu, } EXPORT_SYMBOL_GPL(mlx5_query_port_oper_mtu); +static int mlx5_query_module_num(struct mlx5_core_dev *dev, int *module_num) +{ + u32 out[MLX5_ST_SZ_DW(pmlp_reg)]; + u32 in[MLX5_ST_SZ_DW(pmlp_reg)]; + int module_mapping; + int err; + + memset(in, 0, sizeof(in)); + + MLX5_SET(pmlp_reg, in, local_port, 1); + + err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), + MLX5_REG_PMLP, 0, 0); + if (err) + return err; + + module_mapping = MLX5_GET(pmlp_reg, out, lane0_module_mapping); + *module_num = module_mapping & MLX5_EEPROM_IDENTIFIER_BYTE_MASK; + + return 0; +} + +int mlx5_query_module_eeprom(struct mlx5_core_dev *dev, + u16 offset, u16 size, u8 *data) +{ + u32 out[MLX5_ST_SZ_DW(mcia_reg)]; + u32 in[MLX5_ST_SZ_DW(mcia_reg)]; + int module_num; + u16 i2c_addr; + int status; + int err; + void *ptr = MLX5_ADDR_OF(mcia_reg, out, dword_0); + + err = mlx5_query_module_num(dev, &module_num); + if (err) + return err; + + memset(in, 0, sizeof(in)); + size = min_t(int, size, MLX5_EEPROM_MAX_BYTES); + + if (offset < MLX5_EEPROM_PAGE_LENGTH && + offset + size > MLX5_EEPROM_PAGE_LENGTH) + /* Cross pages read, read until offset 256 in low page */ + size -= offset + size - MLX5_EEPROM_PAGE_LENGTH; + + i2c_addr = MLX5_I2C_ADDR_LOW; + if (offset >= MLX5_EEPROM_PAGE_LENGTH) { + i2c_addr = MLX5_I2C_ADDR_HIGH; + offset -= MLX5_EEPROM_PAGE_LENGTH; + } + + MLX5_SET(mcia_reg, in, l, 0); + MLX5_SET(mcia_reg, in, module, module_num); + MLX5_SET(mcia_reg, in, i2c_device_address, i2c_addr); + MLX5_SET(mcia_reg, in, page_number, 0); + MLX5_SET(mcia_reg, in, device_address, offset); + MLX5_SET(mcia_reg, in, size, size); + + err = mlx5_core_access_reg(dev, in, sizeof(in), out, + sizeof(out), MLX5_REG_MCIA, 0, 0); + if (err) + return err; + + status = MLX5_GET(mcia_reg, out, status); + if (status) { + mlx5_core_err(dev, "query_mcia_reg failed: status: 0x%x\n", + status); + return -EIO; + } + + memcpy(data, ptr, size); + + return size; +} +EXPORT_SYMBOL_GPL(mlx5_query_module_eeprom); + static int mlx5_query_port_pvlc(struct mlx5_core_dev *dev, u32 *pvlc, int pvlc_size, u8 local_port) { diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 2e8758d1b19e..1a170672c656 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -113,9 +113,10 @@ enum { MLX5_REG_PELC = 0x500e, MLX5_REG_PVLC = 0x500f, MLX5_REG_PCMR = 0x5041, - MLX5_REG_PMLP = 0, /* TBD */ + MLX5_REG_PMLP = 0x5002, MLX5_REG_NODE_DESC = 0x6001, MLX5_REG_HOST_ENDIANNESS = 0x7004, + MLX5_REG_MCIA = 0x9014, MLX5_REG_MLCR = 0x902b, }; diff --git a/include/linux/mlx5/port.h b/include/linux/mlx5/port.h index a364ab1737a0..7391eb833253 100644 --- a/include/linux/mlx5/port.h +++ b/include/linux/mlx5/port.h @@ -40,6 +40,19 @@ enum mlx5_beacon_duration { MLX5_BEACON_DURATION_INF = 0xffff, }; +enum mlx5_module_id { + MLX5_MODULE_ID_SFP = 0x3, + MLX5_MODULE_ID_QSFP = 0xC, + MLX5_MODULE_ID_QSFP_PLUS = 0xD, + MLX5_MODULE_ID_QSFP28 = 0x11, +}; + +#define MLX5_EEPROM_MAX_BYTES 32 +#define MLX5_EEPROM_IDENTIFIER_BYTE_MASK 0x000000ff +#define MLX5_I2C_ADDR_LOW 0x50 +#define MLX5_I2C_ADDR_HIGH 0x51 +#define MLX5_EEPROM_PAGE_LENGTH 256 + int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps); int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys, int ptys_size, int proto_mask, u8 local_port); @@ -93,5 +106,7 @@ int mlx5_query_port_wol(struct mlx5_core_dev *mdev, u8 *wol_mode); int mlx5_set_port_fcs(struct mlx5_core_dev *mdev, u8 enable); void mlx5_query_port_fcs(struct mlx5_core_dev *mdev, bool *supported, bool *enabled); +int mlx5_query_module_eeprom(struct mlx5_core_dev *dev, + u16 offset, u16 size, u8 *data); #endif /* __MLX5_PORT_H__ */ From 363501145e3faa650193722fe7047b767ed87172 Mon Sep 17 00:00:00 2001 From: Gal Pressman Date: Sun, 24 Apr 2016 22:51:55 +0300 Subject: [PATCH 1097/1649] net/mlx5e: Add ethtool support for rxvlan-offload (vlan stripping) Use ethtool -K rxvlan to enable/disable C-TAG vlan stripping by hardware. Signed-off-by: Gal Pressman Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 3 + .../net/ethernet/mellanox/mlx5/core/en_main.c | 74 ++++++++++++++++++- include/linux/mlx5/driver.h | 4 + 3 files changed, 78 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index e903eff2574f..8abc289ac1fb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -166,6 +166,7 @@ struct mlx5e_params { u8 rss_hfunc; u8 toeplitz_hash_key[40]; u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE]; + bool vlan_strip_disable; #ifdef CONFIG_MLX5_CORE_EN_DCB struct ieee_ets ets; #endif @@ -575,6 +576,8 @@ int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto, void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv); void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv); +int mlx5e_modify_rqs_vsd(struct mlx5e_priv *priv, bool vsd); + int mlx5e_redirect_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix); void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index ad0cb4aa593b..6c9c10c131a0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -388,6 +388,7 @@ static int mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param) MLX5_SET(rqc, rqc, cqn, rq->cq.mcq.cqn); MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST); MLX5_SET(rqc, rqc, flush_in_error_en, 1); + MLX5_SET(rqc, rqc, vsd, priv->params.vlan_strip_disable); MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT); MLX5_SET64(wq, wq, dbr_addr, rq->wq_ctrl.db.dma); @@ -402,7 +403,8 @@ static int mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param) return err; } -static int mlx5e_modify_rq(struct mlx5e_rq *rq, int curr_state, int next_state) +static int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, + int next_state) { struct mlx5e_channel *c = rq->channel; struct mlx5e_priv *priv = c->priv; @@ -430,6 +432,36 @@ static int mlx5e_modify_rq(struct mlx5e_rq *rq, int curr_state, int next_state) return err; } +static int mlx5e_modify_rq_vsd(struct mlx5e_rq *rq, bool vsd) +{ + struct mlx5e_channel *c = rq->channel; + struct mlx5e_priv *priv = c->priv; + struct mlx5_core_dev *mdev = priv->mdev; + + void *in; + void *rqc; + int inlen; + int err; + + inlen = MLX5_ST_SZ_BYTES(modify_rq_in); + in = mlx5_vzalloc(inlen); + if (!in) + return -ENOMEM; + + rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx); + + MLX5_SET(modify_rq_in, in, rq_state, MLX5_RQC_STATE_RDY); + MLX5_SET64(modify_rq_in, in, modify_bitmask, MLX5_RQ_BITMASK_VSD); + MLX5_SET(rqc, rqc, vsd, vsd); + MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RDY); + + err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen); + + kvfree(in); + + return err; +} + static void mlx5e_disable_rq(struct mlx5e_rq *rq) { mlx5_core_destroy_rq(rq->priv->mdev, rq->rqn); @@ -468,7 +500,7 @@ static int mlx5e_open_rq(struct mlx5e_channel *c, if (err) goto err_destroy_rq; - err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY); + err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY); if (err) goto err_disable_rq; @@ -493,7 +525,7 @@ static void mlx5e_close_rq(struct mlx5e_rq *rq) clear_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state); napi_synchronize(&rq->channel->napi); /* prevent mlx5e_post_rx_wqes */ - mlx5e_modify_rq(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR); + mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR); while (!mlx5_wq_ll_is_empty(&rq->wq)) msleep(20); @@ -1963,6 +1995,23 @@ static void mlx5e_destroy_tirs(struct mlx5e_priv *priv) mlx5e_destroy_tir(priv, i); } +int mlx5e_modify_rqs_vsd(struct mlx5e_priv *priv, bool vsd) +{ + int err = 0; + int i; + + if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) + return 0; + + for (i = 0; i < priv->params.num_channels; i++) { + err = mlx5e_modify_rq_vsd(&priv->channel[i]->rq, vsd); + if (err) + return err; + } + + return 0; +} + static int mlx5e_setup_tc(struct net_device *netdev, u8 tc) { struct mlx5e_priv *priv = netdev_priv(netdev); @@ -2147,6 +2196,23 @@ static int set_feature_rx_all(struct net_device *netdev, bool enable) return mlx5_set_port_fcs(mdev, !enable); } +static int set_feature_rx_vlan(struct net_device *netdev, bool enable) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + int err; + + mutex_lock(&priv->state_lock); + + priv->params.vlan_strip_disable = !enable; + err = mlx5e_modify_rqs_vsd(priv, !enable); + if (err) + priv->params.vlan_strip_disable = enable; + + mutex_unlock(&priv->state_lock); + + return err; +} + static int mlx5e_handle_feature(struct net_device *netdev, netdev_features_t wanted_features, netdev_features_t feature, @@ -2184,6 +2250,8 @@ static int mlx5e_set_features(struct net_device *netdev, set_feature_tc_num_filters); err |= mlx5e_handle_feature(netdev, features, NETIF_F_RXALL, set_feature_rx_all); + err |= mlx5e_handle_feature(netdev, features, NETIF_F_HW_VLAN_CTAG_RX, + set_feature_rx_vlan); return err ? -EINVAL : 0; } diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 1a170672c656..2cc5e9fd5913 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -45,6 +45,10 @@ #include #include +enum { + MLX5_RQ_BITMASK_VSD = 1 << 1, +}; + enum { MLX5_BOARD_ID_LEN = 64, MLX5_MAX_NAME_LEN = 16, From 1b223dd391622fde05e03829d813c3c6cc998685 Mon Sep 17 00:00:00 2001 From: Saeed Mahameed Date: Sun, 24 Apr 2016 22:51:56 +0300 Subject: [PATCH 1098/1649] net/mlx5e: Fix checksum handling for non-stripped vlan packets Now as rx-vlan offload can be disabled, packets can be received with vlan tag not stripped, which means is_first_ethertype_ip will return false, for that we need to check if the hardware reported csum OK so we will report CHECKSUM_UNNECESSARY for those packets. Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlx5/core/en_main.c | 1 + .../net/ethernet/mellanox/mlx5/core/en_rx.c | 20 +++++++++++++----- .../ethernet/mellanox/mlx5/core/en_stats.h | 8 +++++-- include/linux/mlx5/device.h | 21 ++++++++++++++----- 4 files changed, 38 insertions(+), 12 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 6c9c10c131a0..5bad17d37d7b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -109,6 +109,7 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv) s->lro_bytes += rq_stats->lro_bytes; s->rx_csum_none += rq_stats->csum_none; s->rx_csum_sw += rq_stats->csum_sw; + s->rx_csum_inner += rq_stats->csum_inner; s->rx_wqe_err += rq_stats->wqe_err; s->rx_mpwqe_filler += rq_stats->mpwqe_filler; s->rx_mpwqe_frag += rq_stats->mpwqe_frag; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 918b7c7fd74f..23adfe2fcba9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -543,16 +543,26 @@ static inline void mlx5e_handle_csum(struct net_device *netdev, if (lro) { skb->ip_summed = CHECKSUM_UNNECESSARY; - } else if (likely(is_first_ethertype_ip(skb))) { + return; + } + + if (is_first_ethertype_ip(skb)) { skb->ip_summed = CHECKSUM_COMPLETE; skb->csum = csum_unfold((__force __sum16)cqe->check_sum); rq->stats.csum_sw++; - } else { - goto csum_none; + return; } - return; - + if (likely((cqe->hds_ip_ext & CQE_L3_OK) && + (cqe->hds_ip_ext & CQE_L4_OK))) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + if (cqe_is_tunneled(cqe)) { + skb->csum_level = 1; + skb->encapsulation = 1; + rq->stats.csum_inner++; + } + return; + } csum_none: skb->ip_summed = CHECKSUM_NONE; rq->stats.csum_none++; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index 7cd8cb44b2ab..115752b53d85 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -62,6 +62,7 @@ struct mlx5e_sw_stats { u64 rx_csum_good; u64 rx_csum_none; u64 rx_csum_sw; + u64 rx_csum_inner; u64 tx_csum_offload; u64 tx_csum_inner; u64 tx_queue_stopped; @@ -90,6 +91,7 @@ static const struct counter_desc sw_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_good) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_sw) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_inner) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_offload) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_inner) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) }, @@ -272,8 +274,9 @@ static const struct counter_desc pport_per_prio_pfc_stats_desc[] = { struct mlx5e_rq_stats { u64 packets; u64 bytes; - u64 csum_none; u64 csum_sw; + u64 csum_inner; + u64 csum_none; u64 lro_packets; u64 lro_bytes; u64 wqe_err; @@ -285,8 +288,9 @@ struct mlx5e_rq_stats { static const struct counter_desc rq_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, packets) }, { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, bytes) }, - { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, csum_none) }, { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, csum_sw) }, + { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, csum_inner) }, + { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, csum_none) }, { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, lro_packets) }, { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, lro_bytes) }, { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, wqe_err) }, diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 942bccacd7b7..6bd429b53b77 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -645,8 +645,9 @@ struct mlx5_err_cqe { }; struct mlx5_cqe64 { - u8 rsvd0[2]; - __be16 wqe_id; + u8 outer_l3_tunneled; + u8 rsvd0; + __be16 wqe_id; u8 lro_tcppsh_abort_dupack; u8 lro_min_ttl; __be16 lro_tcp_win; @@ -659,7 +660,7 @@ struct mlx5_cqe64 { __be16 slid; __be32 flags_rqpn; u8 hds_ip_ext; - u8 l4_hdr_type_etc; + u8 l4_l3_hdr_type; __be16 vlan_info; __be32 srqn; /* [31:24]: lro_num_seg, [23:0]: srqn */ __be32 imm_inval_pkey; @@ -680,12 +681,22 @@ static inline int get_cqe_lro_tcppsh(struct mlx5_cqe64 *cqe) static inline u8 get_cqe_l4_hdr_type(struct mlx5_cqe64 *cqe) { - return (cqe->l4_hdr_type_etc >> 4) & 0x7; + return (cqe->l4_l3_hdr_type >> 4) & 0x7; +} + +static inline u8 get_cqe_l3_hdr_type(struct mlx5_cqe64 *cqe) +{ + return (cqe->l4_l3_hdr_type >> 2) & 0x3; +} + +static inline u8 cqe_is_tunneled(struct mlx5_cqe64 *cqe) +{ + return cqe->outer_l3_tunneled & 0x1; } static inline int cqe_has_vlan(struct mlx5_cqe64 *cqe) { - return !!(cqe->l4_hdr_type_etc & 0x1); + return !!(cqe->l4_l3_hdr_type & 0x1); } static inline u64 get_cqe_ts(struct mlx5_cqe64 *cqe) From 97717edc69eabdcc8b1859af75a363790c3e9cb6 Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Sun, 24 Apr 2016 23:45:23 +0300 Subject: [PATCH 1099/1649] sh_eth: use EDMR_SRST_GETHER in sh_eth_check_reset() sh_eth_check_reset() uses a bare number where EDMR_SRST_GETHER would fit, i.e. the receive/trasmit software reset bits that comprise EDMR_SRST_GETHER read as 1 while the corresponding reset is in progress and thus, when both are 0, the reset is complete. Signed-off-by: Sergei Shtylyov Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/sh_eth.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index edf6356c7034..3d7a40af5aab 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -899,7 +899,7 @@ static int sh_eth_check_reset(struct net_device *ndev) int cnt = 100; while (cnt > 0) { - if (!(sh_eth_read(ndev, EDMR) & 0x3)) + if (!(sh_eth_read(ndev, EDMR) & EDMR_SRST_GETHER)) break; mdelay(1); cnt--; From ec65cfce508885e433445036ab46f4ec91d32c5c Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Sun, 24 Apr 2016 23:46:15 +0300 Subject: [PATCH 1100/1649] sh_eth: rename ARSTR register bit The Renesas RZ/A1H manual names the software reset bit in the software reset register (ARSTR) ARST which makes a bit more sense than the ARSTR_ARSTR name used now by the driver -- rename the latter to ARSTR_ARST. Signed-off-by: Sergei Shtylyov Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/sh_eth.c | 6 +++--- drivers/net/ethernet/renesas/sh_eth.h | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 3d7a40af5aab..07e29638299f 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -482,7 +482,7 @@ static void sh_eth_chip_reset(struct net_device *ndev) struct sh_eth_private *mdp = netdev_priv(ndev); /* reset device */ - sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR); + sh_eth_tsu_write(mdp, ARSTR_ARST, ARSTR); mdelay(1); } @@ -540,7 +540,7 @@ static void sh_eth_chip_reset_r8a7740(struct net_device *ndev) struct sh_eth_private *mdp = netdev_priv(ndev); /* reset device */ - sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR); + sh_eth_tsu_write(mdp, ARSTR_ARST, ARSTR); mdelay(1); sh_eth_select_mii(ndev); @@ -735,7 +735,7 @@ static void sh_eth_chip_reset_giga(struct net_device *ndev) } /* reset device */ - iowrite32(ARSTR_ARSTR, (void *)(SH_GIGA_ETH_BASE + 0x1800)); + iowrite32(ARSTR_ARST, (void *)(SH_GIGA_ETH_BASE + 0x1800)); mdelay(1); /* restore MAHR and MALR */ diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h index 8fa4ef3a7fdd..c62380e34a1d 100644 --- a/drivers/net/ethernet/renesas/sh_eth.h +++ b/drivers/net/ethernet/renesas/sh_eth.h @@ -394,7 +394,7 @@ enum RPADIR_BIT { #define DEFAULT_FDR_INIT 0x00000707 /* ARSTR */ -enum ARSTR_BIT { ARSTR_ARSTR = 0x00000001, }; +enum ARSTR_BIT { ARSTR_ARST = 0x00000001, }; /* TSU_FWEN0 */ enum TSU_FWEN0_BIT { From 0e7dd0c9c3cde901f79f04260aa706bbfdc0c67e Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Tue, 26 Apr 2016 23:14:30 +0200 Subject: [PATCH 1101/1649] pch_gbe: fix bogus trylock conversion Should have converted 'if (trylock)' to 'lock'. Fixes: a6086a893718db ("drivers: net: remove NETDEV_TX_LOCKED") Signed-off-by: Florian Westphal Signed-off-by: David S. Miller --- drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c index 4475dcc687a2..ca4add749410 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c @@ -2137,7 +2137,7 @@ static int pch_gbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev) struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring; unsigned long flags; - spin_trylock_irqsave(&tx_ring->tx_lock, flags); + spin_lock_irqsave(&tx_ring->tx_lock, flags); if (unlikely(!PCH_GBE_DESC_UNUSED(tx_ring))) { netif_stop_queue(netdev); From d686b920abb7136e0575ec974cd5a24f51a7a549 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Tue, 26 Apr 2016 09:54:11 +0200 Subject: [PATCH 1102/1649] nl80211: use nla_put_u64_64bit() for the remaining u64 attributes Nicolas converted most users, but didn't realize some were generated by macros. Convert those over as well. Signed-off-by: Johannes Berg Acked-by: Nicolas Dichtel Signed-off-by: Johannes Berg --- include/uapi/linux/nl80211.h | 4 ++++ net/wireless/nl80211.c | 36 ++++++++++++++++++++++-------------- 2 files changed, 26 insertions(+), 14 deletions(-) diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index f958a7173eb4..e23d78685a01 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -2517,6 +2517,7 @@ enum nl80211_sta_bss_param { * attributes carrying the actual values. * @NL80211_STA_INFO_RX_DURATION: aggregate PPDU duration for all frames * received from the station (u64, usec) + * @NL80211_STA_INFO_PAD: attribute used for padding for 64-bit alignment * @__NL80211_STA_INFO_AFTER_LAST: internal * @NL80211_STA_INFO_MAX: highest possible station info attribute */ @@ -2554,6 +2555,7 @@ enum nl80211_sta_info { NL80211_STA_INFO_BEACON_SIGNAL_AVG, NL80211_STA_INFO_TID_STATS, NL80211_STA_INFO_RX_DURATION, + NL80211_STA_INFO_PAD, /* keep last */ __NL80211_STA_INFO_AFTER_LAST, @@ -2570,6 +2572,7 @@ enum nl80211_sta_info { * transmitted MSDUs (not counting the first attempt; u64) * @NL80211_TID_STATS_TX_MSDU_FAILED: number of failed transmitted * MSDUs (u64) + * @NL80211_TID_STATS_PAD: attribute used for padding for 64-bit alignment * @NUM_NL80211_TID_STATS: number of attributes here * @NL80211_TID_STATS_MAX: highest numbered attribute here */ @@ -2579,6 +2582,7 @@ enum nl80211_tid_stats { NL80211_TID_STATS_TX_MSDU, NL80211_TID_STATS_TX_MSDU_RETRIES, NL80211_TID_STATS_TX_MSDU_FAILED, + NL80211_TID_STATS_PAD, /* keep last */ NUM_NL80211_TID_STATS, diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 5b0d2c8c2165..9bc84a2ddd34 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -3755,11 +3755,18 @@ static int nl80211_send_station(struct sk_buff *msg, u32 cmd, u32 portid, goto nla_put_failure; #define PUT_SINFO(attr, memb, type) do { \ + BUILD_BUG_ON(sizeof(type) == sizeof(u64)); \ if (sinfo->filled & (1ULL << NL80211_STA_INFO_ ## attr) && \ nla_put_ ## type(msg, NL80211_STA_INFO_ ## attr, \ sinfo->memb)) \ goto nla_put_failure; \ } while (0) +#define PUT_SINFO_U64(attr, memb) do { \ + if (sinfo->filled & (1ULL << NL80211_STA_INFO_ ## attr) && \ + nla_put_u64_64bit(msg, NL80211_STA_INFO_ ## attr, \ + sinfo->memb, NL80211_STA_INFO_PAD)) \ + goto nla_put_failure; \ + } while (0) PUT_SINFO(CONNECTED_TIME, connected_time, u32); PUT_SINFO(INACTIVE_TIME, inactive_time, u32); @@ -3776,12 +3783,12 @@ static int nl80211_send_station(struct sk_buff *msg, u32 cmd, u32 portid, (u32)sinfo->tx_bytes)) goto nla_put_failure; - PUT_SINFO(RX_BYTES64, rx_bytes, u64); - PUT_SINFO(TX_BYTES64, tx_bytes, u64); + PUT_SINFO_U64(RX_BYTES64, rx_bytes); + PUT_SINFO_U64(TX_BYTES64, tx_bytes); PUT_SINFO(LLID, llid, u16); PUT_SINFO(PLID, plid, u16); PUT_SINFO(PLINK_STATE, plink_state, u8); - PUT_SINFO(RX_DURATION, rx_duration, u64); + PUT_SINFO_U64(RX_DURATION, rx_duration); switch (rdev->wiphy.signal_type) { case CFG80211_SIGNAL_TYPE_MBM: @@ -3849,12 +3856,13 @@ static int nl80211_send_station(struct sk_buff *msg, u32 cmd, u32 portid, &sinfo->sta_flags)) goto nla_put_failure; - PUT_SINFO(T_OFFSET, t_offset, u64); - PUT_SINFO(RX_DROP_MISC, rx_dropped_misc, u64); - PUT_SINFO(BEACON_RX, rx_beacon, u64); + PUT_SINFO_U64(T_OFFSET, t_offset); + PUT_SINFO_U64(RX_DROP_MISC, rx_dropped_misc); + PUT_SINFO_U64(BEACON_RX, rx_beacon); PUT_SINFO(BEACON_SIGNAL_AVG, rx_beacon_signal_avg, u8); #undef PUT_SINFO +#undef PUT_SINFO_U64 if (sinfo->filled & BIT(NL80211_STA_INFO_TID_STATS)) { struct nlattr *tidsattr; @@ -3877,19 +3885,19 @@ static int nl80211_send_station(struct sk_buff *msg, u32 cmd, u32 portid, if (!tidattr) goto nla_put_failure; -#define PUT_TIDVAL(attr, memb, type) do { \ +#define PUT_TIDVAL_U64(attr, memb) do { \ if (tidstats->filled & BIT(NL80211_TID_STATS_ ## attr) && \ - nla_put_ ## type(msg, NL80211_TID_STATS_ ## attr, \ - tidstats->memb)) \ + nla_put_u64_64bit(msg, NL80211_TID_STATS_ ## attr, \ + tidstats->memb, NL80211_TID_STATS_PAD)) \ goto nla_put_failure; \ } while (0) - PUT_TIDVAL(RX_MSDU, rx_msdu, u64); - PUT_TIDVAL(TX_MSDU, tx_msdu, u64); - PUT_TIDVAL(TX_MSDU_RETRIES, tx_msdu_retries, u64); - PUT_TIDVAL(TX_MSDU_FAILED, tx_msdu_failed, u64); + PUT_TIDVAL_U64(RX_MSDU, rx_msdu); + PUT_TIDVAL_U64(TX_MSDU, tx_msdu); + PUT_TIDVAL_U64(TX_MSDU_RETRIES, tx_msdu_retries); + PUT_TIDVAL_U64(TX_MSDU_FAILED, tx_msdu_failed); -#undef PUT_TIDVAL +#undef PUT_TIDVAL_U64 nla_nest_end(msg, tidattr); } From bb28c28ee10559c5bc5b5f48c2d6f6f2f6bd5586 Mon Sep 17 00:00:00 2001 From: Marty Faltesek Date: Wed, 20 Apr 2016 00:19:35 -0400 Subject: [PATCH 1103/1649] mwifiex: bridged packets cause wmm_tx_pending counter to go negative When a packet is queued from the bridge, wmm_tx_pending is not incremented, but when the packet is dequeued the counter is decremented. Signed-off-by: Marty Faltesek Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/uap_txrx.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/net/wireless/marvell/mwifiex/uap_txrx.c b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c index c95b61dc87c2..666e91af59d7 100644 --- a/drivers/net/wireless/marvell/mwifiex/uap_txrx.c +++ b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c @@ -102,6 +102,7 @@ static void mwifiex_uap_queue_bridged_pkt(struct mwifiex_private *priv, int hdr_chop; struct ethhdr *p_ethhdr; struct mwifiex_sta_node *src_node; + int index; uap_rx_pd = (struct uap_rxpd *)(skb->data); rx_pkt_hdr = (void *)uap_rx_pd + le16_to_cpu(uap_rx_pd->rx_pkt_offset); @@ -208,6 +209,9 @@ static void mwifiex_uap_queue_bridged_pkt(struct mwifiex_private *priv, } __net_timestamp(skb); + + index = mwifiex_1d_to_wmm_queue[skb->priority]; + atomic_inc(&priv->wmm_tx_pending[index]); mwifiex_wmm_add_buf_txqueue(priv, skb); atomic_inc(&adapter->tx_pending); atomic_inc(&adapter->pending_bridged_pkts); From b977d305ad20e05212c0ded2a7ef90e411edc1b3 Mon Sep 17 00:00:00 2001 From: Marty Faltesek Date: Wed, 20 Apr 2016 00:20:52 -0400 Subject: [PATCH 1104/1649] mwifiex: fw download does not release sdio bus during failure Signed-off-by: Marty Faltesek Reviewed-by: Julian Calaby Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/sdio.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c index cbd9dcd88b98..099722e1f867 100644 --- a/drivers/net/wireless/marvell/mwifiex/sdio.c +++ b/drivers/net/wireless/marvell/mwifiex/sdio.c @@ -1103,13 +1103,12 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter, offset += txlen; } while (true); - sdio_release_host(card->func); - mwifiex_dbg(adapter, MSG, "info: FW download over, size %d bytes\n", offset); ret = 0; done: + sdio_release_host(card->func); kfree(fwbuf); return ret; } From 8d666302df95ab585f5cc7b2fb779991f8797eb5 Mon Sep 17 00:00:00 2001 From: Marty Faltesek Date: Wed, 20 Apr 2016 00:22:01 -0400 Subject: [PATCH 1105/1649] mwifiex: transmit packet stats incorrect. tx_packets counter is incremented for aggregated packets, when it had already been incremented for the aggregated packet's constituent parts. Removing the extra count. Signed-off-by: Marty Faltesek Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/txrx.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/drivers/net/wireless/marvell/mwifiex/txrx.c b/drivers/net/wireless/marvell/mwifiex/txrx.c index bf6182b646a5..abdd0cf710bf 100644 --- a/drivers/net/wireless/marvell/mwifiex/txrx.c +++ b/drivers/net/wireless/marvell/mwifiex/txrx.c @@ -297,6 +297,13 @@ int mwifiex_write_data_complete(struct mwifiex_adapter *adapter, goto done; mwifiex_set_trans_start(priv->netdev); + + if (tx_info->flags & MWIFIEX_BUF_FLAG_BRIDGED_PKT) + atomic_dec_return(&adapter->pending_bridged_pkts); + + if (tx_info->flags & MWIFIEX_BUF_FLAG_AGGR_PKT) + goto done; + if (!status) { priv->stats.tx_packets++; priv->stats.tx_bytes += tx_info->pkt_len; @@ -306,12 +313,6 @@ int mwifiex_write_data_complete(struct mwifiex_adapter *adapter, priv->stats.tx_errors++; } - if (tx_info->flags & MWIFIEX_BUF_FLAG_BRIDGED_PKT) - atomic_dec_return(&adapter->pending_bridged_pkts); - - if (tx_info->flags & MWIFIEX_BUF_FLAG_AGGR_PKT) - goto done; - if (aggr) /* For skb_aggr, do not wake up tx queue */ goto done; From 3f210e2f1269ffe83188db2f1f5839da907fc722 Mon Sep 17 00:00:00 2001 From: Amitkumar Karwar Date: Thu, 21 Apr 2016 08:07:54 -0700 Subject: [PATCH 1106/1649] mwifiex: fix coding style Redundant space in case statement is removed. Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/cfg80211.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c index 6db202fa7157..369ea06eca52 100644 --- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c +++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c @@ -3388,7 +3388,7 @@ static int mwifiex_cfg80211_resume(struct wiphy *wiphy) break; case CONTROL_FRAME_MATCHED: break; - case MANAGEMENT_FRAME_MATCHED: + case MANAGEMENT_FRAME_MATCHED: break; case GTK_REKEY_FAILURE: if (wiphy->wowlan_config->gtk_rekey_failure) From df2288623ee0ce77743d76ed8ca816e92246e77b Mon Sep 17 00:00:00 2001 From: Amitkumar Karwar Date: Thu, 21 Apr 2016 08:07:55 -0700 Subject: [PATCH 1107/1649] mwifiex: report wowlan wakeup reasons correctly It's been observed that wakeup on GTK rekey failure wasn't reported to cfg80211. This patch corrects the check so that all valid wakeup reasons are reported. Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/cfg80211.c | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c index 369ea06eca52..734cf67b79c1 100644 --- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c +++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c @@ -3344,6 +3344,7 @@ static int mwifiex_cfg80211_resume(struct wiphy *wiphy) struct mwifiex_ds_wakeup_reason wakeup_reason; struct cfg80211_wowlan_wakeup wakeup_report; int i; + bool report_wakeup_reason = true; for (i = 0; i < adapter->priv_num; i++) { priv = adapter->priv[i]; @@ -3386,20 +3387,16 @@ static int mwifiex_cfg80211_resume(struct wiphy *wiphy) if (wiphy->wowlan_config->n_patterns) wakeup_report.pattern_idx = 1; break; - case CONTROL_FRAME_MATCHED: - break; - case MANAGEMENT_FRAME_MATCHED: - break; case GTK_REKEY_FAILURE: if (wiphy->wowlan_config->gtk_rekey_failure) wakeup_report.gtk_rekey_failure = true; break; default: + report_wakeup_reason = false; break; } - if ((wakeup_reason.hs_wakeup_reason > 0) && - (wakeup_reason.hs_wakeup_reason <= 7)) + if (report_wakeup_reason) cfg80211_report_wowlan_wakeup(&priv->wdev, &wakeup_report, GFP_KERNEL); From d286af9bf40ab88f2035aef293f6ecae4187c6bb Mon Sep 17 00:00:00 2001 From: Amitkumar Karwar Date: Thu, 21 Apr 2016 08:07:56 -0700 Subject: [PATCH 1108/1649] mwifiex: avoid querying wakeup reason when wowlan is disabled In cfg80211 resume handler, we query wakeup reason from firmware and report to cfg80211. if wowlan is disabled, connection is already terminated during suspend. We don't need to query wakeup reason in this case. Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/cfg80211.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c index 734cf67b79c1..ff948a922222 100644 --- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c +++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c @@ -3355,6 +3355,9 @@ static int mwifiex_cfg80211_resume(struct wiphy *wiphy) } } + if (!wiphy->wowlan_config) + goto done; + priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA); mwifiex_get_wakeup_reason(priv, HostCmd_ACT_GEN_GET, MWIFIEX_SYNC_CMD, &wakeup_reason); @@ -3400,6 +3403,7 @@ static int mwifiex_cfg80211_resume(struct wiphy *wiphy) cfg80211_report_wowlan_wakeup(&priv->wdev, &wakeup_report, GFP_KERNEL); +done: if (adapter->nd_info) { for (i = 0 ; i < adapter->nd_info->n_matches ; i++) kfree(adapter->nd_info->matches[i]); From 1b499cb72f26bbf44f2fa158c2d1487730aae96a Mon Sep 17 00:00:00 2001 From: Amitkumar Karwar Date: Sun, 24 Apr 2016 23:49:51 -0700 Subject: [PATCH 1109/1649] mwifiex: disable channel filtering feature in firmware As 2.4Ghz channels are overlapping, sometimes AP responds to probe request even if it's operating on neighbouring channel. Currently firmware drops those scan entries, as current channel doesn't match with APs channel. This patch enables MWIFIEX_DISABLE_CHAN_FILT flag in scan command to disable the feature so that better scan results will be received in 2.4Ghz band. Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/scan.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c index 36cc9cca95fc..bc5e52cebce1 100644 --- a/drivers/net/wireless/marvell/mwifiex/scan.c +++ b/drivers/net/wireless/marvell/mwifiex/scan.c @@ -498,11 +498,13 @@ mwifiex_scan_create_channel_list(struct mwifiex_private *priv, &= ~MWIFIEX_PASSIVE_SCAN; scan_chan_list[chan_idx].chan_number = (u32) ch->hw_value; + + scan_chan_list[chan_idx].chan_scan_mode_bitmap + |= MWIFIEX_DISABLE_CHAN_FILT; + if (filtered_scan) { scan_chan_list[chan_idx].max_scan_time = cpu_to_le16(adapter->specific_scan_time); - scan_chan_list[chan_idx].chan_scan_mode_bitmap - |= MWIFIEX_DISABLE_CHAN_FILT; } chan_idx++; } @@ -1060,9 +1062,8 @@ mwifiex_config_scan(struct mwifiex_private *priv, scan_chan_list[chan_idx].chan_scan_mode_bitmap &= ~MWIFIEX_PASSIVE_SCAN; - if (*filtered_scan) - scan_chan_list[chan_idx].chan_scan_mode_bitmap - |= MWIFIEX_DISABLE_CHAN_FILT; + scan_chan_list[chan_idx].chan_scan_mode_bitmap + |= MWIFIEX_DISABLE_CHAN_FILT; if (user_scan_in->chan_list[chan_idx].scan_time) { scan_dur = (u16) user_scan_in-> From 9d3f65b0c2ddb926340af96eb89ad9be589865c0 Mon Sep 17 00:00:00 2001 From: Amitkumar Karwar Date: Sun, 24 Apr 2016 23:49:52 -0700 Subject: [PATCH 1110/1649] mwifiex: increase dwell time for active scan It's been observed that sometimes AP's probe response is received after scan duration gets completed for the channel. This happens especially when wildcard scan is performed along with specific SSID scan. We will increase the time from 30 msecs to 40 msecs. Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/main.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h index 4c742a597cb0..0207af00be42 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.h +++ b/drivers/net/wireless/marvell/mwifiex/main.h @@ -111,8 +111,8 @@ enum { #define SCAN_BEACON_ENTRY_PAD 6 #define MWIFIEX_PASSIVE_SCAN_CHAN_TIME 110 -#define MWIFIEX_ACTIVE_SCAN_CHAN_TIME 30 -#define MWIFIEX_SPECIFIC_SCAN_CHAN_TIME 30 +#define MWIFIEX_ACTIVE_SCAN_CHAN_TIME 40 +#define MWIFIEX_SPECIFIC_SCAN_CHAN_TIME 40 #define MWIFIEX_DEF_SCAN_CHAN_GAP_TIME 50 #define SCAN_RSSI(RSSI) (0x100 - ((u8)(RSSI))) From 570d8e9398011a63590c281a36cdce311196608e Mon Sep 17 00:00:00 2001 From: Nicolas Dichtel Date: Wed, 27 Apr 2016 17:53:08 +0200 Subject: [PATCH 1111/1649] taskstats: fix nl parsing in accounting/getdelays.c The type TASKSTATS_TYPE_NULL should always be ignored. When jumping to the next attribute, only the length of the current attribute should be added, not the length of all nested attributes. This last bug was not visible before commit 80df554275c2, because the kernel didn't put more than two nested attributes. Fixes: a3baf649ca9c ("[PATCH] per-task-delay-accounting: documentation") Fixes: 80df554275c2 ("taskstats: use the libnl API to align nlattr on 64-bit") Signed-off-by: Nicolas Dichtel Signed-off-by: David S. Miller --- Documentation/accounting/getdelays.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/Documentation/accounting/getdelays.c b/Documentation/accounting/getdelays.c index 7785fb5eb93f..b5ca536e56a8 100644 --- a/Documentation/accounting/getdelays.c +++ b/Documentation/accounting/getdelays.c @@ -505,6 +505,8 @@ int main(int argc, char *argv[]) if (!loop) goto done; break; + case TASKSTATS_TYPE_NULL: + break; default: fprintf(stderr, "Unknown nested" " nla_type %d\n", @@ -512,7 +514,8 @@ int main(int argc, char *argv[]) break; } len2 += NLA_ALIGN(na->nla_len); - na = (struct nlattr *) ((char *) na + len2); + na = (struct nlattr *)((char *)na + + NLA_ALIGN(na->nla_len)); } break; From 8c14586fc320acfed8a0048eb21d1f2e2856fc36 Mon Sep 17 00:00:00 2001 From: David Ahern Date: Sun, 24 Apr 2016 21:26:04 -0700 Subject: [PATCH 1112/1649] net: ipv6: Use passed in table for nexthop lookups Similar to 3bfd847203c6 ("net: Use passed in table for nexthop lookups") for IPv4, if the route spec contains a table id use that to lookup the next hop first and fall back to a full lookup if it fails (per the fix 4c9bcd117918b ("net: Fix nexthop lookups")). Example: root@kenny:~# ip -6 ro ls table red local 2100:1::1 dev lo proto none metric 0 pref medium 2100:1::/120 dev eth1 proto kernel metric 256 pref medium local 2100:2::1 dev lo proto none metric 0 pref medium 2100:2::/120 dev eth2 proto kernel metric 256 pref medium local fe80::e0:f9ff:fe09:3cac dev lo proto none metric 0 pref medium local fe80::e0:f9ff:fe1c:b974 dev lo proto none metric 0 pref medium fe80::/64 dev eth1 proto kernel metric 256 pref medium fe80::/64 dev eth2 proto kernel metric 256 pref medium ff00::/8 dev red metric 256 pref medium ff00::/8 dev eth1 metric 256 pref medium ff00::/8 dev eth2 metric 256 pref medium unreachable default dev lo metric 240 error -113 pref medium root@kenny:~# ip -6 ro add table red 2100:3::/64 via 2100:1::64 RTNETLINK answers: No route to host Route add fails even though 2100:1::64 is a reachable next hop: root@kenny:~# ping6 -I red 2100:1::64 ping6: Warning: source address might be selected on device other than red. PING 2100:1::64(2100:1::64) from 2100:1::1 red: 56 data bytes 64 bytes from 2100:1::64: icmp_seq=1 ttl=64 time=1.33 ms With this patch: root@kenny:~# ip -6 ro add table red 2100:3::/64 via 2100:1::64 root@kenny:~# ip -6 ro ls table red local 2100:1::1 dev lo proto none metric 0 pref medium 2100:1::/120 dev eth1 proto kernel metric 256 pref medium local 2100:2::1 dev lo proto none metric 0 pref medium 2100:2::/120 dev eth2 proto kernel metric 256 pref medium 2100:3::/64 via 2100:1::64 dev eth1 metric 1024 pref medium local fe80::e0:f9ff:fe09:3cac dev lo proto none metric 0 pref medium local fe80::e0:f9ff:fe1c:b974 dev lo proto none metric 0 pref medium fe80::/64 dev eth1 proto kernel metric 256 pref medium fe80::/64 dev eth2 proto kernel metric 256 pref medium ff00::/8 dev red metric 256 pref medium ff00::/8 dev eth1 metric 256 pref medium ff00::/8 dev eth2 metric 256 pref medium unreachable default dev lo metric 240 error -113 pref medium Signed-off-by: David Ahern Signed-off-by: David S. Miller --- net/ipv6/route.c | 40 ++++++++++++++++++++++++++++++++++++++-- 1 file changed, 38 insertions(+), 2 deletions(-) diff --git a/net/ipv6/route.c b/net/ipv6/route.c index d916d6ab9ad2..af46e19205f5 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -1769,6 +1769,37 @@ static int ip6_convert_metrics(struct mx6_config *mxc, return -EINVAL; } +static struct rt6_info *ip6_nh_lookup_table(struct net *net, + struct fib6_config *cfg, + const struct in6_addr *gw_addr) +{ + struct flowi6 fl6 = { + .flowi6_oif = cfg->fc_ifindex, + .daddr = *gw_addr, + .saddr = cfg->fc_prefsrc, + }; + struct fib6_table *table; + struct rt6_info *rt; + int flags = 0; + + table = fib6_get_table(net, cfg->fc_table); + if (!table) + return NULL; + + if (!ipv6_addr_any(&cfg->fc_prefsrc)) + flags |= RT6_LOOKUP_F_HAS_SADDR; + + rt = ip6_pol_route(net, table, cfg->fc_ifindex, &fl6, flags); + + /* if table lookup failed, fall back to full lookup */ + if (rt == net->ipv6.ip6_null_entry) { + ip6_rt_put(rt); + rt = NULL; + } + + return rt; +} + static struct rt6_info *ip6_route_info_create(struct fib6_config *cfg) { struct net *net = cfg->fc_nlinfo.nl_net; @@ -1940,7 +1971,7 @@ static struct rt6_info *ip6_route_info_create(struct fib6_config *cfg) rt->rt6i_gateway = *gw_addr; if (gwa_type != (IPV6_ADDR_LINKLOCAL|IPV6_ADDR_UNICAST)) { - struct rt6_info *grt; + struct rt6_info *grt = NULL; /* IPv6 strictly inhibits using not link-local addresses as nexthop address. @@ -1952,7 +1983,12 @@ static struct rt6_info *ip6_route_info_create(struct fib6_config *cfg) if (!(gwa_type & IPV6_ADDR_UNICAST)) goto out; - grt = rt6_lookup(net, gw_addr, NULL, cfg->fc_ifindex, 1); + if (cfg->fc_table) + grt = ip6_nh_lookup_table(net, cfg, gw_addr); + + if (!grt) + grt = rt6_lookup(net, gw_addr, NULL, + cfg->fc_ifindex, 1); err = -EHOSTUNREACH; if (!grt) From 68a1c5a777f8e94d3bb0b0e287443492316ea5b5 Mon Sep 17 00:00:00 2001 From: Michal Kosiarz Date: Tue, 12 Apr 2016 08:30:46 -0700 Subject: [PATCH 1113/1649] i40e: Add device capability which defines if update is available Add device capability which defines if update is available and security check is needed during update process. Change-ID: I380787c878275e1df18b39198df3ee3666342282 Signed-off-by: Michal Kosiarz Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h | 1 + drivers/net/ethernet/intel/i40e/i40e_common.c | 6 ++++++ drivers/net/ethernet/intel/i40e/i40e_type.h | 5 +++++ drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h | 1 + drivers/net/ethernet/intel/i40evf/i40e_type.h | 5 +++++ 5 files changed, 18 insertions(+) diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h index 8d5c65ab6267..5179b3b25acb 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h @@ -429,6 +429,7 @@ struct i40e_aqc_list_capabilities_element_resp { #define I40E_AQ_CAP_ID_SDP 0x0062 #define I40E_AQ_CAP_ID_MDIO 0x0063 #define I40E_AQ_CAP_ID_WSR_PROT 0x0064 +#define I40E_AQ_CAP_ID_NVM_MGMT 0x0080 #define I40E_AQ_CAP_ID_FLEX10 0x00F1 #define I40E_AQ_CAP_ID_CEM 0x00F2 diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index f3c1d8890cbb..34e86f55b2c0 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -3138,6 +3138,12 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff, p->wr_csr_prot = (u64)number; p->wr_csr_prot |= (u64)logical_id << 32; break; + case I40E_AQ_CAP_ID_NVM_MGMT: + if (number & I40E_NVM_MGMT_SEC_REV_DISABLED) + p->sec_rev_disabled = true; + if (number & I40E_NVM_MGMT_UPDATE_DISABLED) + p->update_disabled = true; + break; default: break; } diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h index bb57cd909c47..8aa14aacdd35 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_type.h +++ b/drivers/net/ethernet/intel/i40e/i40e_type.h @@ -275,6 +275,11 @@ struct i40e_hw_capabilities { #define I40E_FLEX10_STATUS_DCC_ERROR 0x1 #define I40E_FLEX10_STATUS_VC_MODE 0x2 + bool sec_rev_disabled; + bool update_disabled; +#define I40E_NVM_MGMT_SEC_REV_DISABLED 0x1 +#define I40E_NVM_MGMT_UPDATE_DISABLED 0x2 + bool mgmt_cem; bool ieee_1588; bool iwarp; diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h index aad8d6277110..1bcb8cf89801 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h @@ -426,6 +426,7 @@ struct i40e_aqc_list_capabilities_element_resp { #define I40E_AQ_CAP_ID_SDP 0x0062 #define I40E_AQ_CAP_ID_MDIO 0x0063 #define I40E_AQ_CAP_ID_WSR_PROT 0x0064 +#define I40E_AQ_CAP_ID_NVM_MGMT 0x0080 #define I40E_AQ_CAP_ID_FLEX10 0x00F1 #define I40E_AQ_CAP_ID_CEM 0x00F2 diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h index b72071363a8f..bfc97c2f22bb 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_type.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h @@ -258,6 +258,11 @@ struct i40e_hw_capabilities { #define I40E_FLEX10_STATUS_DCC_ERROR 0x1 #define I40E_FLEX10_STATUS_VC_MODE 0x2 + bool sec_rev_disabled; + bool update_disabled; +#define I40E_NVM_MGMT_SEC_REV_DISABLED 0x1 +#define I40E_NVM_MGMT_UPDATE_DISABLED 0x2 + bool mgmt_cem; bool ieee_1588; bool iwarp; From bccf474435b668312e9fc8bd9586c2e256b66841 Mon Sep 17 00:00:00 2001 From: Kamil Krawczyk Date: Tue, 12 Apr 2016 08:30:47 -0700 Subject: [PATCH 1114/1649] i40e: Add DeviceID for X722 QSFP+ Change-ID: I1370fbc7774e815ac1ad56561e97488e829592fc Signed-off-by: Kamil Krawczyk Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_common.c | 1 + drivers/net/ethernet/intel/i40e/i40e_devids.h | 1 + drivers/net/ethernet/intel/i40evf/i40e_common.c | 1 + drivers/net/ethernet/intel/i40evf/i40e_devids.h | 1 + 4 files changed, 4 insertions(+) diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index 34e86f55b2c0..1db4790423f1 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -61,6 +61,7 @@ static i40e_status i40e_set_mac_type(struct i40e_hw *hw) case I40E_DEV_ID_1G_BASE_T_X722: case I40E_DEV_ID_10G_BASE_T_X722: case I40E_DEV_ID_SFP_I_X722: + case I40E_DEV_ID_QSFP_I_X722: hw->mac.type = I40E_MAC_X722; break; default: diff --git a/drivers/net/ethernet/intel/i40e/i40e_devids.h b/drivers/net/ethernet/intel/i40e/i40e_devids.h index dd4457d29e98..d701861c6e1e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_devids.h +++ b/drivers/net/ethernet/intel/i40e/i40e_devids.h @@ -45,6 +45,7 @@ #define I40E_DEV_ID_1G_BASE_T_X722 0x37D1 #define I40E_DEV_ID_10G_BASE_T_X722 0x37D2 #define I40E_DEV_ID_SFP_I_X722 0x37D3 +#define I40E_DEV_ID_QSFP_I_X722 0x37D4 #define i40e_is_40G_device(d) ((d) == I40E_DEV_ID_QSFP_A || \ (d) == I40E_DEV_ID_QSFP_B || \ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c index 4db0c0326185..8f64204000fb 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_common.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c @@ -59,6 +59,7 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw) case I40E_DEV_ID_1G_BASE_T_X722: case I40E_DEV_ID_10G_BASE_T_X722: case I40E_DEV_ID_SFP_I_X722: + case I40E_DEV_ID_QSFP_I_X722: hw->mac.type = I40E_MAC_X722; break; case I40E_DEV_ID_X722_VF: diff --git a/drivers/net/ethernet/intel/i40evf/i40e_devids.h b/drivers/net/ethernet/intel/i40evf/i40e_devids.h index 70235706915e..d34972bab09c 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_devids.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_devids.h @@ -45,6 +45,7 @@ #define I40E_DEV_ID_1G_BASE_T_X722 0x37D1 #define I40E_DEV_ID_10G_BASE_T_X722 0x37D2 #define I40E_DEV_ID_SFP_I_X722 0x37D3 +#define I40E_DEV_ID_QSFP_I_X722 0x37D4 #define I40E_DEV_ID_X722_VF 0x37CD #define I40E_DEV_ID_X722_VF_HV 0x37D9 From db0772782f6b99adbd8548e1d3830fe019c9f250 Mon Sep 17 00:00:00 2001 From: Greg Rose Date: Tue, 12 Apr 2016 08:30:48 -0700 Subject: [PATCH 1115/1649] i40e: Remove zero check A mirror rule ID may be zero so do not return invalid parameter when the user passes in a zero value for a rule ID. Change-ID: I261b8c24725ce2c6ed32f859da81093dfcbe2970 Signed-off-by: Greg Rose Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_common.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index 1db4790423f1..25872f2012c5 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -2668,10 +2668,7 @@ i40e_status i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid, u16 *rules_used, u16 *rules_free) { /* Rule ID has to be valid except rule_type: INGRESS VLAN mirroring */ - if (rule_type != I40E_AQC_MIRROR_RULE_TYPE_VLAN) { - if (!rule_id) - return I40E_ERR_PARAM; - } else { + if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) { /* count and mr_list shall be valid for rule_type INGRESS VLAN * mirroring. For other rule_type, count and rule_type should * not matter. From a149f2c323b62bc6cff91d874d853250250e8497 Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Tue, 12 Apr 2016 08:30:49 -0700 Subject: [PATCH 1116/1649] i40e/i40evf: Only offload VLAN tag if enabled The driver was offloading the VLAN tag into the skb any time there was a VLAN tag and the hardware stripping was enabled. Just check to make sure it's enabled before put_tag. Change-Id: Ife95290c06edd9a616393b38679923938b382241 Signed-off-by: Jesse Brandeburg Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_txrx.c | 3 ++- drivers/net/ethernet/intel/i40evf/i40e_txrx.c | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 6e44cf118843..285efe955c64 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -1370,7 +1370,8 @@ static void i40e_receive_skb(struct i40e_ring *rx_ring, { struct i40e_q_vector *q_vector = rx_ring->q_vector; - if (vlan_tag & VLAN_VID_MASK) + if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && + (vlan_tag & VLAN_VID_MASK)) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); napi_gro_receive(&q_vector->napi, skb); diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index f101895ecf4a..4633235ee70b 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -842,7 +842,8 @@ static void i40e_receive_skb(struct i40e_ring *rx_ring, { struct i40e_q_vector *q_vector = rx_ring->q_vector; - if (vlan_tag & VLAN_VID_MASK) + if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && + (vlan_tag & VLAN_VID_MASK)) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); napi_gro_receive(&q_vector->napi, skb); From 6c41a7606967803702d0b5c2ba57acce71ec8d9d Mon Sep 17 00:00:00 2001 From: Greg Rose Date: Tue, 12 Apr 2016 08:30:50 -0700 Subject: [PATCH 1117/1649] i40e: Add promiscuous on VLAN support NFV use cases require the ability to steer packets to VSIs by VLAN tag alone while being in promiscuous mode for multicast and unicast MAC addresses. These two new functions support that ability. Signed-off-by: Greg Rose Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_common.c | 70 +++++++++++++++++++ .../net/ethernet/intel/i40e/i40e_prototype.h | 8 +++ 2 files changed, 78 insertions(+) diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index 25872f2012c5..0e8552b2fba0 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -2038,6 +2038,76 @@ i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw, return status; } +/** + * i40e_aq_set_vsi_mc_promisc_on_vlan + * @hw: pointer to the hw struct + * @seid: vsi number + * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN + * @vid: The VLAN tag filter - capture any multicast packet with this VLAN tag + * @cmd_details: pointer to command details structure or NULL + **/ +enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw, + u16 seid, bool enable, + u16 vid, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_set_vsi_promiscuous_modes *cmd = + (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; + enum i40e_status_code status; + u16 flags = 0; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_set_vsi_promiscuous_modes); + + if (enable) + flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST; + + cmd->promiscuous_flags = cpu_to_le16(flags); + cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_MULTICAST); + cmd->seid = cpu_to_le16(seid); + cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_aq_set_vsi_uc_promisc_on_vlan + * @hw: pointer to the hw struct + * @seid: vsi number + * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN + * @vid: The VLAN tag filter - capture any unicast packet with this VLAN tag + * @cmd_details: pointer to command details structure or NULL + **/ +enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw, + u16 seid, bool enable, + u16 vid, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_set_vsi_promiscuous_modes *cmd = + (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; + enum i40e_status_code status; + u16 flags = 0; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_set_vsi_promiscuous_modes); + + if (enable) + flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST; + + cmd->promiscuous_flags = cpu_to_le16(flags); + cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST); + cmd->seid = cpu_to_le16(seid); + cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + /** * i40e_aq_set_vsi_broadcast * @hw: pointer to the hw struct diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h index 134035f53f2c..8afb2375ec9f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h +++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h @@ -133,6 +133,14 @@ i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw, u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details); i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw, u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw, + u16 seid, bool enable, + u16 vid, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw, + u16 seid, bool enable, + u16 vid, + struct i40e_asq_cmd_details *cmd_details); i40e_status i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw, u16 seid, bool enable, struct i40e_asq_cmd_details *cmd_details); From 5676a8b9cd9a1c9822cdb3d88109f449eb2126c1 Mon Sep 17 00:00:00 2001 From: Anjali Singhai Jain Date: Tue, 12 Apr 2016 08:30:51 -0700 Subject: [PATCH 1118/1649] i40e: Add VF promiscuous mode driver support Add infrastructure for Network Function Virtualization VLAN tagged packet steering feature. Change-Id: I9b873d8fcc253858e6baba65ac68ec5b9363944e Signed-off-by: Anjali Singhai Jain Signed-off-by: Greg Rose Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- .../ethernet/intel/i40e/i40e_virtchnl_pf.c | 153 +++++++++++++++++- .../ethernet/intel/i40e/i40e_virtchnl_pf.h | 2 + 2 files changed, 149 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index c3645886670e..f47b0e8d02bb 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -1426,6 +1426,25 @@ static void i40e_vc_reset_vf_msg(struct i40e_vf *vf) i40e_reset_vf(vf, false); } +/** + * i40e_getnum_vf_vsi_vlan_filters + * @vsi: pointer to the vsi + * + * called to get the number of VLANs offloaded on this VF + **/ +static inline int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi) +{ + struct i40e_mac_filter *f; + int num_vlans = 0; + + list_for_each_entry(f, &vsi->mac_filter_list, list) { + if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID) + num_vlans++; + } + + return num_vlans; +} + /** * i40e_vc_config_promiscuous_mode_msg * @vf: pointer to the VF info @@ -1442,22 +1461,122 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, (struct i40e_virtchnl_promisc_info *)msg; struct i40e_pf *pf = vf->pf; struct i40e_hw *hw = &pf->hw; - struct i40e_vsi *vsi; + struct i40e_mac_filter *f; + i40e_status aq_ret = 0; bool allmulti = false; - i40e_status aq_ret; + struct i40e_vsi *vsi; + bool alluni = false; + int aq_err = 0; vsi = i40e_find_vsi_from_id(pf, info->vsi_id); if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) || - !i40e_vc_isvalid_vsi_id(vf, info->vsi_id) || - (vsi->type != I40E_VSI_FCOE)) { + !i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) { + dev_err(&pf->pdev->dev, + "VF %d doesn't meet requirements to enter promiscuous mode\n", + vf->vf_id); aq_ret = I40E_ERR_PARAM; goto error_param; } + /* Multicast promiscuous handling*/ if (info->flags & I40E_FLAG_VF_MULTICAST_PROMISC) allmulti = true; - aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, - allmulti, NULL); + + if (vf->port_vlan_id) { + aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw, vsi->seid, + allmulti, + vf->port_vlan_id, + NULL); + } else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) { + list_for_each_entry(f, &vsi->mac_filter_list, list) { + if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID) + aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan + (hw, + vsi->seid, + allmulti, + f->vlan, + NULL); + aq_err = pf->hw.aq.asq_last_status; + if (aq_ret) { + dev_err(&pf->pdev->dev, + "Could not add VLAN %d to multicast promiscuous domain err %s aq_err %s\n", + f->vlan, + i40e_stat_str(&pf->hw, aq_ret), + i40e_aq_str(&pf->hw, aq_err)); + break; + } + } + } else { + aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, + allmulti, NULL); + aq_err = pf->hw.aq.asq_last_status; + if (aq_ret) { + dev_err(&pf->pdev->dev, + "VF %d failed to set multicast promiscuous mode err %s aq_err %s\n", + vf->vf_id, + i40e_stat_str(&pf->hw, aq_ret), + i40e_aq_str(&pf->hw, aq_err)); + goto error_param_int; + } + } + + if (!aq_ret) { + dev_info(&pf->pdev->dev, + "VF %d successfully set multicast promiscuous mode\n", + vf->vf_id); + if (allmulti) + set_bit(I40E_VF_STAT_MC_PROMISC, &vf->vf_states); + else + clear_bit(I40E_VF_STAT_MC_PROMISC, &vf->vf_states); + } + + if (info->flags & I40E_FLAG_VF_UNICAST_PROMISC) + alluni = true; + if (vf->port_vlan_id) { + aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, vsi->seid, + alluni, + vf->port_vlan_id, + NULL); + } else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) { + list_for_each_entry(f, &vsi->mac_filter_list, list) { + aq_ret = 0; + if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID) + aq_ret = + i40e_aq_set_vsi_uc_promisc_on_vlan(hw, + vsi->seid, + alluni, + f->vlan, + NULL); + aq_err = pf->hw.aq.asq_last_status; + if (aq_ret) + dev_err(&pf->pdev->dev, + "Could not add VLAN %d to Unicast promiscuous domain err %s aq_err %s\n", + f->vlan, + i40e_stat_str(&pf->hw, aq_ret), + i40e_aq_str(&pf->hw, aq_err)); + } + } else { + aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid, + allmulti, NULL); + aq_err = pf->hw.aq.asq_last_status; + if (aq_ret) + dev_err(&pf->pdev->dev, + "VF %d failed to set unicast promiscuous mode %8.8x err %s aq_err %s\n", + vf->vf_id, info->flags, + i40e_stat_str(&pf->hw, aq_ret), + i40e_aq_str(&pf->hw, aq_err)); + } + +error_param_int: + if (!aq_ret) { + dev_info(&pf->pdev->dev, + "VF %d successfully set unicast promiscuous mode\n", + vf->vf_id); + if (alluni) + set_bit(I40E_VF_STAT_UC_PROMISC, &vf->vf_states); + else + clear_bit(I40E_VF_STAT_UC_PROMISC, &vf->vf_states); + } error_param: /* send the response to the VF */ @@ -1919,6 +2038,17 @@ static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) /* add new VLAN filter */ int ret = i40e_vsi_add_vlan(vsi, vfl->vlan_id[i]); + if (test_bit(I40E_VF_STAT_UC_PROMISC, &vf->vf_states)) + i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid, + true, + vfl->vlan_id[i], + NULL); + if (test_bit(I40E_VF_STAT_MC_PROMISC, &vf->vf_states)) + i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid, + true, + vfl->vlan_id[i], + NULL); + if (ret) dev_err(&pf->pdev->dev, "Unable to add VLAN filter %d for VF %d, error %d\n", @@ -1971,6 +2101,17 @@ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) for (i = 0; i < vfl->num_elements; i++) { int ret = i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]); + if (test_bit(I40E_VF_STAT_UC_PROMISC, &vf->vf_states)) + i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid, + false, + vfl->vlan_id[i], + NULL); + if (test_bit(I40E_VF_STAT_MC_PROMISC, &vf->vf_states)) + i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid, + false, + vfl->vlan_id[i], + NULL); + if (ret) dev_err(&pf->pdev->dev, "Unable to delete VLAN filter %d for VF %d, error %d\n", diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h index 838cbd2299a4..8cbf57988607 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h @@ -61,6 +61,8 @@ enum i40e_vf_states { I40E_VF_STAT_IWARPENA, I40E_VF_STAT_FCOEENA, I40E_VF_STAT_DISABLED, + I40E_VF_STAT_MC_PROMISC, + I40E_VF_STAT_UC_PROMISC, }; /* VF capabilities */ From 47d3483988f649739ad8d6462eaa1723e5d077c3 Mon Sep 17 00:00:00 2001 From: Anjali Singhai Jain Date: Tue, 12 Apr 2016 08:30:52 -0700 Subject: [PATCH 1119/1649] i40evf: Add driver support for promiscuous mode Add necessary Linux Ethernet driver support for promiscuous mode operation. Add a flag so the VF knows it is in promiscuous mode and two state flags to discreetly track multicast and unicast promiscuous states. Change-Id: Ib2f2dc7a7582304fec90fc917ebb7ded21ba1de4 Signed-off-by: Anjali Singhai Jain Signed-off-by: Greg Rose Signed-off-by: Jesse Brandeburg Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- .../ethernet/intel/i40e/i40e_virtchnl_pf.c | 14 +++++++------- drivers/net/ethernet/intel/i40evf/i40evf.h | 3 +++ .../net/ethernet/intel/i40evf/i40evf_main.c | 19 +++++++++++++++++++ .../ethernet/intel/i40evf/i40evf_virtchnl.c | 11 +++++++++++ 4 files changed, 40 insertions(+), 7 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index f47b0e8d02bb..c226c2dad247 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -1489,13 +1489,13 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, NULL); } else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) { list_for_each_entry(f, &vsi->mac_filter_list, list) { - if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID) - aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan - (hw, - vsi->seid, - allmulti, - f->vlan, - NULL); + if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID) + continue; + aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw, + vsi->seid, + allmulti, + f->vlan, + NULL); aq_err = pf->hw.aq.asq_last_status; if (aq_ret) { dev_err(&pf->pdev->dev, diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h index 63f7aae2c8ce..25afabf999d0 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf.h +++ b/drivers/net/ethernet/intel/i40evf/i40evf.h @@ -220,6 +220,7 @@ struct i40evf_adapter { #define I40EVF_FLAG_WB_ON_ITR_CAPABLE BIT(11) #define I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE BIT(12) #define I40EVF_FLAG_ADDR_SET_BY_PF BIT(13) +#define I40EVF_FLAG_PROMISC_ON BIT(15) /* duplicates for common code */ #define I40E_FLAG_FDIR_ATR_ENABLED 0 #define I40E_FLAG_DCB_ENABLED 0 @@ -244,6 +245,8 @@ struct i40evf_adapter { #define I40EVF_FLAG_AQ_SET_HENA BIT(12) #define I40EVF_FLAG_AQ_SET_RSS_KEY BIT(13) #define I40EVF_FLAG_AQ_SET_RSS_LUT BIT(14) +#define I40EVF_FLAG_AQ_REQUEST_PROMISC BIT(15) +#define I40EVF_FLAG_AQ_RELEASE_PROMISC BIT(16) /* OS defined structs */ struct net_device *netdev; diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index af53159010ab..d1c4afdd9435 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -943,6 +943,14 @@ static void i40evf_set_rx_mode(struct net_device *netdev) bottom_of_search_loop: continue; } + + if (netdev->flags & IFF_PROMISC && + !(adapter->flags & I40EVF_FLAG_PROMISC_ON)) + adapter->aq_required |= I40EVF_FLAG_AQ_REQUEST_PROMISC; + else if (!(netdev->flags & IFF_PROMISC) && + adapter->flags & I40EVF_FLAG_PROMISC_ON) + adapter->aq_required |= I40EVF_FLAG_AQ_RELEASE_PROMISC; + clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); } @@ -1622,6 +1630,17 @@ static void i40evf_watchdog_task(struct work_struct *work) goto watchdog_done; } + if (adapter->aq_required & I40EVF_FLAG_AQ_REQUEST_PROMISC) { + i40evf_set_promiscuous(adapter, I40E_FLAG_VF_UNICAST_PROMISC | + I40E_FLAG_VF_MULTICAST_PROMISC); + goto watchdog_done; + } + + if (adapter->aq_required & I40EVF_FLAG_AQ_RELEASE_PROMISC) { + i40evf_set_promiscuous(adapter, 0); + goto watchdog_done; + } + if (adapter->state == __I40EVF_RUNNING) i40evf_request_stats(adapter); watchdog_done: diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c index e62c56b5a141..ba7fbc0608a6 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c @@ -652,6 +652,17 @@ void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags) adapter->current_op); return; } + + if (flags) { + adapter->flags |= I40EVF_FLAG_PROMISC_ON; + adapter->aq_required &= ~I40EVF_FLAG_AQ_REQUEST_PROMISC; + dev_info(&adapter->pdev->dev, "Entering promiscuous mode\n"); + } else { + adapter->flags &= ~I40EVF_FLAG_PROMISC_ON; + adapter->aq_required &= ~I40EVF_FLAG_AQ_RELEASE_PROMISC; + dev_info(&adapter->pdev->dev, "Leaving promiscuous mode\n"); + } + adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE; vpi.vsi_id = adapter->vsi_res->vsi_id; vpi.flags = flags; From 35ef7d689d7d54ab345b179e50c749fe3a2529eb Mon Sep 17 00:00:00 2001 From: Akinobu Mita Date: Wed, 27 Apr 2016 05:43:48 +0900 Subject: [PATCH 1120/1649] net: w5100: support W5500 This adds support for W5500 chip. W5500 has similar register and memory organization with W5100 and W5200. There are a few important differences listed below but it is still possible to share common code with W5100 and W5200. * W5500 register and memory are organized by multiple blocks. Each one is selected by 16bits offset address and 5bits block select bits. But the existing register access operations take u16 address. This change extends the addess by u32 address and put offset address to lower 16bits and block select bits to upper 16bits. This change also adds the offset addresses for socket register and TX/RX memory blocks to the driver private data structure in order to reduce conditional switches for each chip. * W5500 has the different register offset for socket interrupt mask register. Newly added internal functions w5100_enable_intr() and w5100_disable_intr() take care of the diffrence. * W5500 has the different register offset for retry time-value register. But this register is only used to verify that the reset value is correctly read at initialization. So move the verification to w5100_hw_reset() which already does different things for different chips. Signed-off-by: Akinobu Mita Cc: Mike Sinkovsky Cc: David S. Miller Signed-off-by: David S. Miller --- drivers/net/ethernet/wiznet/Kconfig | 2 +- drivers/net/ethernet/wiznet/w5100-spi.c | 192 +++++++++++++++++-- drivers/net/ethernet/wiznet/w5100.c | 243 +++++++++++++++++------- drivers/net/ethernet/wiznet/w5100.h | 13 +- 4 files changed, 365 insertions(+), 85 deletions(-) diff --git a/drivers/net/ethernet/wiznet/Kconfig b/drivers/net/ethernet/wiznet/Kconfig index f3385a1999a2..1981e88c18dc 100644 --- a/drivers/net/ethernet/wiznet/Kconfig +++ b/drivers/net/ethernet/wiznet/Kconfig @@ -70,7 +70,7 @@ config WIZNET_BUS_ANY endchoice config WIZNET_W5100_SPI - tristate "WIZnet W5100/W5200 Ethernet support for SPI mode" + tristate "WIZnet W5100/W5200/W5500 Ethernet support for SPI mode" depends on WIZNET_BUS_ANY && WIZNET_W5100 depends on SPI ---help--- diff --git a/drivers/net/ethernet/wiznet/w5100-spi.c b/drivers/net/ethernet/wiznet/w5100-spi.c index 598a7b00fdb9..b868e458d0b5 100644 --- a/drivers/net/ethernet/wiznet/w5100-spi.c +++ b/drivers/net/ethernet/wiznet/w5100-spi.c @@ -1,5 +1,5 @@ /* - * Ethernet driver for the WIZnet W5100/W5200 chip. + * Ethernet driver for the WIZnet W5100/W5200/W5500 chip. * * Copyright (C) 2016 Akinobu Mita * @@ -8,6 +8,7 @@ * Datasheet: * http://www.wiznet.co.kr/wp-content/uploads/wiznethome/Chip/W5100/Document/W5100_Datasheet_v1.2.6.pdf * http://wiznethome.cafe24.com/wp-content/uploads/wiznethome/Chip/W5200/Documents/W5200_DS_V140E.pdf + * http://wizwiki.net/wiki/lib/exe/fetch.php?media=products:w5500:w5500_ds_v106e_141230.pdf */ #include @@ -21,7 +22,7 @@ #define W5100_SPI_WRITE_OPCODE 0xf0 #define W5100_SPI_READ_OPCODE 0x0f -static int w5100_spi_read(struct net_device *ndev, u16 addr) +static int w5100_spi_read(struct net_device *ndev, u32 addr) { struct spi_device *spi = to_spi_device(ndev->dev.parent); u8 cmd[3] = { W5100_SPI_READ_OPCODE, addr >> 8, addr & 0xff }; @@ -33,7 +34,7 @@ static int w5100_spi_read(struct net_device *ndev, u16 addr) return ret ? ret : data; } -static int w5100_spi_write(struct net_device *ndev, u16 addr, u8 data) +static int w5100_spi_write(struct net_device *ndev, u32 addr, u8 data) { struct spi_device *spi = to_spi_device(ndev->dev.parent); u8 cmd[4] = { W5100_SPI_WRITE_OPCODE, addr >> 8, addr & 0xff, data}; @@ -41,7 +42,7 @@ static int w5100_spi_write(struct net_device *ndev, u16 addr, u8 data) return spi_write_then_read(spi, cmd, sizeof(cmd), NULL, 0); } -static int w5100_spi_read16(struct net_device *ndev, u16 addr) +static int w5100_spi_read16(struct net_device *ndev, u32 addr) { u16 data; int ret; @@ -55,7 +56,7 @@ static int w5100_spi_read16(struct net_device *ndev, u16 addr) return ret < 0 ? ret : data | ret; } -static int w5100_spi_write16(struct net_device *ndev, u16 addr, u16 data) +static int w5100_spi_write16(struct net_device *ndev, u32 addr, u16 data) { int ret; @@ -66,7 +67,7 @@ static int w5100_spi_write16(struct net_device *ndev, u16 addr, u16 data) return w5100_spi_write(ndev, addr + 1, data & 0xff); } -static int w5100_spi_readbulk(struct net_device *ndev, u16 addr, u8 *buf, +static int w5100_spi_readbulk(struct net_device *ndev, u32 addr, u8 *buf, int len) { int i; @@ -82,7 +83,7 @@ static int w5100_spi_readbulk(struct net_device *ndev, u16 addr, u8 *buf, return 0; } -static int w5100_spi_writebulk(struct net_device *ndev, u16 addr, const u8 *buf, +static int w5100_spi_writebulk(struct net_device *ndev, u32 addr, const u8 *buf, int len) { int i; @@ -134,7 +135,7 @@ static int w5200_spi_init(struct net_device *ndev) return 0; } -static int w5200_spi_read(struct net_device *ndev, u16 addr) +static int w5200_spi_read(struct net_device *ndev, u32 addr) { struct spi_device *spi = to_spi_device(ndev->dev.parent); u8 cmd[4] = { addr >> 8, addr & 0xff, 0, 1 }; @@ -146,7 +147,7 @@ static int w5200_spi_read(struct net_device *ndev, u16 addr) return ret ? ret : data; } -static int w5200_spi_write(struct net_device *ndev, u16 addr, u8 data) +static int w5200_spi_write(struct net_device *ndev, u32 addr, u8 data) { struct spi_device *spi = to_spi_device(ndev->dev.parent); u8 cmd[5] = { addr >> 8, addr & 0xff, W5200_SPI_WRITE_OPCODE, 1, data }; @@ -154,7 +155,7 @@ static int w5200_spi_write(struct net_device *ndev, u16 addr, u8 data) return spi_write_then_read(spi, cmd, sizeof(cmd), NULL, 0); } -static int w5200_spi_read16(struct net_device *ndev, u16 addr) +static int w5200_spi_read16(struct net_device *ndev, u32 addr) { struct spi_device *spi = to_spi_device(ndev->dev.parent); u8 cmd[4] = { addr >> 8, addr & 0xff, 0, 2 }; @@ -166,7 +167,7 @@ static int w5200_spi_read16(struct net_device *ndev, u16 addr) return ret ? ret : be16_to_cpu(data); } -static int w5200_spi_write16(struct net_device *ndev, u16 addr, u16 data) +static int w5200_spi_write16(struct net_device *ndev, u32 addr, u16 data) { struct spi_device *spi = to_spi_device(ndev->dev.parent); u8 cmd[6] = { @@ -178,7 +179,7 @@ static int w5200_spi_write16(struct net_device *ndev, u16 addr, u16 data) return spi_write_then_read(spi, cmd, sizeof(cmd), NULL, 0); } -static int w5200_spi_readbulk(struct net_device *ndev, u16 addr, u8 *buf, +static int w5200_spi_readbulk(struct net_device *ndev, u32 addr, u8 *buf, int len) { struct spi_device *spi = to_spi_device(ndev->dev.parent); @@ -208,7 +209,7 @@ static int w5200_spi_readbulk(struct net_device *ndev, u16 addr, u8 *buf, return ret; } -static int w5200_spi_writebulk(struct net_device *ndev, u16 addr, const u8 *buf, +static int w5200_spi_writebulk(struct net_device *ndev, u32 addr, const u8 *buf, int len) { struct spi_device *spi = to_spi_device(ndev->dev.parent); @@ -250,6 +251,164 @@ static const struct w5100_ops w5200_ops = { .init = w5200_spi_init, }; +#define W5500_SPI_BLOCK_SELECT(addr) (((addr) >> 16) & 0x1f) +#define W5500_SPI_READ_CONTROL(addr) (W5500_SPI_BLOCK_SELECT(addr) << 3) +#define W5500_SPI_WRITE_CONTROL(addr) \ + ((W5500_SPI_BLOCK_SELECT(addr) << 3) | BIT(2)) + +struct w5500_spi_priv { + /* Serialize access to cmd_buf */ + struct mutex cmd_lock; + + /* DMA (thus cache coherency maintenance) requires the + * transfer buffers to live in their own cache lines. + */ + u8 cmd_buf[3] ____cacheline_aligned; +}; + +static struct w5500_spi_priv *w5500_spi_priv(struct net_device *ndev) +{ + return w5100_ops_priv(ndev); +} + +static int w5500_spi_init(struct net_device *ndev) +{ + struct w5500_spi_priv *spi_priv = w5500_spi_priv(ndev); + + mutex_init(&spi_priv->cmd_lock); + + return 0; +} + +static int w5500_spi_read(struct net_device *ndev, u32 addr) +{ + struct spi_device *spi = to_spi_device(ndev->dev.parent); + u8 cmd[3] = { + addr >> 8, + addr, + W5500_SPI_READ_CONTROL(addr) + }; + u8 data; + int ret; + + ret = spi_write_then_read(spi, cmd, sizeof(cmd), &data, 1); + + return ret ? ret : data; +} + +static int w5500_spi_write(struct net_device *ndev, u32 addr, u8 data) +{ + struct spi_device *spi = to_spi_device(ndev->dev.parent); + u8 cmd[4] = { + addr >> 8, + addr, + W5500_SPI_WRITE_CONTROL(addr), + data + }; + + return spi_write_then_read(spi, cmd, sizeof(cmd), NULL, 0); +} + +static int w5500_spi_read16(struct net_device *ndev, u32 addr) +{ + struct spi_device *spi = to_spi_device(ndev->dev.parent); + u8 cmd[3] = { + addr >> 8, + addr, + W5500_SPI_READ_CONTROL(addr) + }; + __be16 data; + int ret; + + ret = spi_write_then_read(spi, cmd, sizeof(cmd), &data, sizeof(data)); + + return ret ? ret : be16_to_cpu(data); +} + +static int w5500_spi_write16(struct net_device *ndev, u32 addr, u16 data) +{ + struct spi_device *spi = to_spi_device(ndev->dev.parent); + u8 cmd[5] = { + addr >> 8, + addr, + W5500_SPI_WRITE_CONTROL(addr), + data >> 8, + data + }; + + return spi_write_then_read(spi, cmd, sizeof(cmd), NULL, 0); +} + +static int w5500_spi_readbulk(struct net_device *ndev, u32 addr, u8 *buf, + int len) +{ + struct spi_device *spi = to_spi_device(ndev->dev.parent); + struct w5500_spi_priv *spi_priv = w5500_spi_priv(ndev); + struct spi_transfer xfer[] = { + { + .tx_buf = spi_priv->cmd_buf, + .len = sizeof(spi_priv->cmd_buf), + }, + { + .rx_buf = buf, + .len = len, + }, + }; + int ret; + + mutex_lock(&spi_priv->cmd_lock); + + spi_priv->cmd_buf[0] = addr >> 8; + spi_priv->cmd_buf[1] = addr; + spi_priv->cmd_buf[2] = W5500_SPI_READ_CONTROL(addr); + ret = spi_sync_transfer(spi, xfer, ARRAY_SIZE(xfer)); + + mutex_unlock(&spi_priv->cmd_lock); + + return ret; +} + +static int w5500_spi_writebulk(struct net_device *ndev, u32 addr, const u8 *buf, + int len) +{ + struct spi_device *spi = to_spi_device(ndev->dev.parent); + struct w5500_spi_priv *spi_priv = w5500_spi_priv(ndev); + struct spi_transfer xfer[] = { + { + .tx_buf = spi_priv->cmd_buf, + .len = sizeof(spi_priv->cmd_buf), + }, + { + .tx_buf = buf, + .len = len, + }, + }; + int ret; + + mutex_lock(&spi_priv->cmd_lock); + + spi_priv->cmd_buf[0] = addr >> 8; + spi_priv->cmd_buf[1] = addr; + spi_priv->cmd_buf[2] = W5500_SPI_WRITE_CONTROL(addr); + ret = spi_sync_transfer(spi, xfer, ARRAY_SIZE(xfer)); + + mutex_unlock(&spi_priv->cmd_lock); + + return ret; +} + +static const struct w5100_ops w5500_ops = { + .may_sleep = true, + .chip_id = W5500, + .read = w5500_spi_read, + .write = w5500_spi_write, + .read16 = w5500_spi_read16, + .write16 = w5500_spi_write16, + .readbulk = w5500_spi_readbulk, + .writebulk = w5500_spi_writebulk, + .init = w5500_spi_init, +}; + static int w5100_spi_probe(struct spi_device *spi) { const struct spi_device_id *id = spi_get_device_id(spi); @@ -265,6 +424,10 @@ static int w5100_spi_probe(struct spi_device *spi) ops = &w5200_ops; priv_size = sizeof(struct w5200_spi_priv); break; + case W5500: + ops = &w5500_ops; + priv_size = sizeof(struct w5500_spi_priv); + break; default: return -EINVAL; } @@ -280,6 +443,7 @@ static int w5100_spi_remove(struct spi_device *spi) static const struct spi_device_id w5100_spi_ids[] = { { "w5100", W5100 }, { "w5200", W5200 }, + { "w5500", W5500 }, {} }; MODULE_DEVICE_TABLE(spi, w5100_spi_ids); @@ -295,6 +459,6 @@ static struct spi_driver w5100_spi_driver = { }; module_spi_driver(w5100_spi_driver); -MODULE_DESCRIPTION("WIZnet W5100/W5200 Ethernet driver for SPI mode"); +MODULE_DESCRIPTION("WIZnet W5100/W5200/W5500 Ethernet driver for SPI mode"); MODULE_AUTHOR("Akinobu Mita "); MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c index 09149c9ebeff..8ed0c7735ee3 100644 --- a/drivers/net/ethernet/wiznet/w5100.c +++ b/drivers/net/ethernet/wiznet/w5100.c @@ -38,7 +38,7 @@ MODULE_ALIAS("platform:"DRV_NAME); MODULE_LICENSE("GPL"); /* - * W5100 and W5100 common registers + * W5100/W5200/W5500 common registers */ #define W5100_COMMON_REGS 0x0000 #define W5100_MR 0x0000 /* Mode Register */ @@ -48,10 +48,6 @@ MODULE_LICENSE("GPL"); #define MR_IND 0x01 /* Indirect mode */ #define W5100_SHAR 0x0009 /* Source MAC address */ #define W5100_IR 0x0015 /* Interrupt Register */ -#define W5100_IMR 0x0016 /* Interrupt Mask Register */ -#define IR_S0 0x01 /* S0 interrupt */ -#define W5100_RTR 0x0017 /* Retry Time-value Register */ -#define RTR_DEFAULT 2000 /* =0x07d0 (2000) */ #define W5100_COMMON_REGS_LEN 0x0040 #define W5100_Sn_MR 0x0000 /* Sn Mode Register */ @@ -64,7 +60,7 @@ MODULE_LICENSE("GPL"); #define W5100_Sn_RX_RSR 0x0026 /* Sn Receive free memory size */ #define W5100_Sn_RX_RD 0x0028 /* Sn Receive memory read pointer */ -#define S0_REGS(priv) (is_w5200(priv) ? W5200_S0_REGS : W5100_S0_REGS) +#define S0_REGS(priv) ((priv)->s0_regs) #define W5100_S0_MR(priv) (S0_REGS(priv) + W5100_Sn_MR) #define S0_MR_MACRAW 0x04 /* MAC RAW mode (promiscuous) */ @@ -88,7 +84,15 @@ MODULE_LICENSE("GPL"); #define W5100_S0_REGS_LEN 0x0040 /* - * W5100 specific registers + * W5100 and W5200 common registers + */ +#define W5100_IMR 0x0016 /* Interrupt Mask Register */ +#define IR_S0 0x01 /* S0 interrupt */ +#define W5100_RTR 0x0017 /* Retry Time-value Register */ +#define RTR_DEFAULT 2000 /* =0x07d0 (2000) */ + +/* + * W5100 specific register and memory */ #define W5100_RMSR 0x001a /* Receive Memory Size */ #define W5100_TMSR 0x001b /* Transmit Memory Size */ @@ -101,25 +105,57 @@ MODULE_LICENSE("GPL"); #define W5100_RX_MEM_SIZE 0x2000 /* - * W5200 specific registers + * W5200 specific register and memory */ #define W5200_S0_REGS 0x4000 #define W5200_Sn_RXMEM_SIZE(n) (0x401e + (n) * 0x0100) /* Sn RX Memory Size */ #define W5200_Sn_TXMEM_SIZE(n) (0x401f + (n) * 0x0100) /* Sn TX Memory Size */ -#define W5200_S0_IMR 0x402c /* S0 Interrupt Mask Register */ #define W5200_TX_MEM_START 0x8000 #define W5200_TX_MEM_SIZE 0x4000 #define W5200_RX_MEM_START 0xc000 #define W5200_RX_MEM_SIZE 0x4000 +/* + * W5500 specific register and memory + * + * W5500 register and memory are organized by multiple blocks. Each one is + * selected by 16bits offset address and 5bits block select bits. So we + * encode it into 32bits address. (lower 16bits is offset address and + * upper 16bits is block select bits) + */ +#define W5500_SIMR 0x0018 /* Socket Interrupt Mask Register */ +#define W5500_RTR 0x0019 /* Retry Time-value Register */ + +#define W5500_S0_REGS 0x10000 + +#define W5500_Sn_RXMEM_SIZE(n) \ + (0x1001e + (n) * 0x40000) /* Sn RX Memory Size */ +#define W5500_Sn_TXMEM_SIZE(n) \ + (0x1001f + (n) * 0x40000) /* Sn TX Memory Size */ + +#define W5500_TX_MEM_START 0x20000 +#define W5500_TX_MEM_SIZE 0x04000 +#define W5500_RX_MEM_START 0x30000 +#define W5500_RX_MEM_SIZE 0x04000 + /* * Device driver private data structure */ struct w5100_priv { const struct w5100_ops *ops; + + /* Socket 0 register offset address */ + u32 s0_regs; + /* Socket 0 TX buffer offset address and size */ + u32 s0_tx_buf; + u16 s0_tx_buf_size; + /* Socket 0 RX buffer offset address and size */ + u32 s0_rx_buf; + u16 s0_rx_buf_size; + int irq; int link_irq; int link_gpio; @@ -172,12 +208,12 @@ static inline void __iomem *w5100_mmio(struct net_device *ndev) * * 0x8000 bytes are required for memory space. */ -static inline int w5100_read_direct(struct net_device *ndev, u16 addr) +static inline int w5100_read_direct(struct net_device *ndev, u32 addr) { return ioread8(w5100_mmio(ndev) + (addr << CONFIG_WIZNET_BUS_SHIFT)); } -static inline int __w5100_write_direct(struct net_device *ndev, u16 addr, +static inline int __w5100_write_direct(struct net_device *ndev, u32 addr, u8 data) { iowrite8(data, w5100_mmio(ndev) + (addr << CONFIG_WIZNET_BUS_SHIFT)); @@ -185,7 +221,7 @@ static inline int __w5100_write_direct(struct net_device *ndev, u16 addr, return 0; } -static inline int w5100_write_direct(struct net_device *ndev, u16 addr, u8 data) +static inline int w5100_write_direct(struct net_device *ndev, u32 addr, u8 data) { __w5100_write_direct(ndev, addr, data); mmiowb(); @@ -193,7 +229,7 @@ static inline int w5100_write_direct(struct net_device *ndev, u16 addr, u8 data) return 0; } -static int w5100_read16_direct(struct net_device *ndev, u16 addr) +static int w5100_read16_direct(struct net_device *ndev, u32 addr) { u16 data; data = w5100_read_direct(ndev, addr) << 8; @@ -201,7 +237,7 @@ static int w5100_read16_direct(struct net_device *ndev, u16 addr) return data; } -static int w5100_write16_direct(struct net_device *ndev, u16 addr, u16 data) +static int w5100_write16_direct(struct net_device *ndev, u32 addr, u16 data) { __w5100_write_direct(ndev, addr, data >> 8); __w5100_write_direct(ndev, addr + 1, data); @@ -210,7 +246,7 @@ static int w5100_write16_direct(struct net_device *ndev, u16 addr, u16 data) return 0; } -static int w5100_readbulk_direct(struct net_device *ndev, u16 addr, u8 *buf, +static int w5100_readbulk_direct(struct net_device *ndev, u32 addr, u8 *buf, int len) { int i; @@ -221,7 +257,7 @@ static int w5100_readbulk_direct(struct net_device *ndev, u16 addr, u8 *buf, return 0; } -static int w5100_writebulk_direct(struct net_device *ndev, u16 addr, +static int w5100_writebulk_direct(struct net_device *ndev, u32 addr, const u8 *buf, int len) { int i; @@ -275,7 +311,7 @@ static const struct w5100_ops w5100_mmio_direct_ops = { #define W5100_IDM_AR 0x01 /* Indirect Mode Address Register */ #define W5100_IDM_DR 0x03 /* Indirect Mode Data Register */ -static int w5100_read_indirect(struct net_device *ndev, u16 addr) +static int w5100_read_indirect(struct net_device *ndev, u32 addr) { struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev); unsigned long flags; @@ -289,7 +325,7 @@ static int w5100_read_indirect(struct net_device *ndev, u16 addr) return data; } -static int w5100_write_indirect(struct net_device *ndev, u16 addr, u8 data) +static int w5100_write_indirect(struct net_device *ndev, u32 addr, u8 data) { struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev); unsigned long flags; @@ -302,7 +338,7 @@ static int w5100_write_indirect(struct net_device *ndev, u16 addr, u8 data) return 0; } -static int w5100_read16_indirect(struct net_device *ndev, u16 addr) +static int w5100_read16_indirect(struct net_device *ndev, u32 addr) { struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev); unsigned long flags; @@ -317,7 +353,7 @@ static int w5100_read16_indirect(struct net_device *ndev, u16 addr) return data; } -static int w5100_write16_indirect(struct net_device *ndev, u16 addr, u16 data) +static int w5100_write16_indirect(struct net_device *ndev, u32 addr, u16 data) { struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev); unsigned long flags; @@ -331,7 +367,7 @@ static int w5100_write16_indirect(struct net_device *ndev, u16 addr, u16 data) return 0; } -static int w5100_readbulk_indirect(struct net_device *ndev, u16 addr, u8 *buf, +static int w5100_readbulk_indirect(struct net_device *ndev, u32 addr, u8 *buf, int len) { struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev); @@ -350,7 +386,7 @@ static int w5100_readbulk_indirect(struct net_device *ndev, u16 addr, u8 *buf, return 0; } -static int w5100_writebulk_indirect(struct net_device *ndev, u16 addr, +static int w5100_writebulk_indirect(struct net_device *ndev, u32 addr, const u8 *buf, int len) { struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev); @@ -392,32 +428,32 @@ static const struct w5100_ops w5100_mmio_indirect_ops = { #if defined(CONFIG_WIZNET_BUS_DIRECT) -static int w5100_read(struct w5100_priv *priv, u16 addr) +static int w5100_read(struct w5100_priv *priv, u32 addr) { return w5100_read_direct(priv->ndev, addr); } -static int w5100_write(struct w5100_priv *priv, u16 addr, u8 data) +static int w5100_write(struct w5100_priv *priv, u32 addr, u8 data) { return w5100_write_direct(priv->ndev, addr, data); } -static int w5100_read16(struct w5100_priv *priv, u16 addr) +static int w5100_read16(struct w5100_priv *priv, u32 addr) { return w5100_read16_direct(priv->ndev, addr); } -static int w5100_write16(struct w5100_priv *priv, u16 addr, u16 data) +static int w5100_write16(struct w5100_priv *priv, u32 addr, u16 data) { return w5100_write16_direct(priv->ndev, addr, data); } -static int w5100_readbulk(struct w5100_priv *priv, u16 addr, u8 *buf, int len) +static int w5100_readbulk(struct w5100_priv *priv, u32 addr, u8 *buf, int len) { return w5100_readbulk_direct(priv->ndev, addr, buf, len); } -static int w5100_writebulk(struct w5100_priv *priv, u16 addr, const u8 *buf, +static int w5100_writebulk(struct w5100_priv *priv, u32 addr, const u8 *buf, int len) { return w5100_writebulk_direct(priv->ndev, addr, buf, len); @@ -425,32 +461,32 @@ static int w5100_writebulk(struct w5100_priv *priv, u16 addr, const u8 *buf, #elif defined(CONFIG_WIZNET_BUS_INDIRECT) -static int w5100_read(struct w5100_priv *priv, u16 addr) +static int w5100_read(struct w5100_priv *priv, u32 addr) { return w5100_read_indirect(priv->ndev, addr); } -static int w5100_write(struct w5100_priv *priv, u16 addr, u8 data) +static int w5100_write(struct w5100_priv *priv, u32 addr, u8 data) { return w5100_write_indirect(priv->ndev, addr, data); } -static int w5100_read16(struct w5100_priv *priv, u16 addr) +static int w5100_read16(struct w5100_priv *priv, u32 addr) { return w5100_read16_indirect(priv->ndev, addr); } -static int w5100_write16(struct w5100_priv *priv, u16 addr, u16 data) +static int w5100_write16(struct w5100_priv *priv, u32 addr, u16 data) { return w5100_write16_indirect(priv->ndev, addr, data); } -static int w5100_readbulk(struct w5100_priv *priv, u16 addr, u8 *buf, int len) +static int w5100_readbulk(struct w5100_priv *priv, u32 addr, u8 *buf, int len) { return w5100_readbulk_indirect(priv->ndev, addr, buf, len); } -static int w5100_writebulk(struct w5100_priv *priv, u16 addr, const u8 *buf, +static int w5100_writebulk(struct w5100_priv *priv, u32 addr, const u8 *buf, int len) { return w5100_writebulk_indirect(priv->ndev, addr, buf, len); @@ -458,32 +494,32 @@ static int w5100_writebulk(struct w5100_priv *priv, u16 addr, const u8 *buf, #else /* CONFIG_WIZNET_BUS_ANY */ -static int w5100_read(struct w5100_priv *priv, u16 addr) +static int w5100_read(struct w5100_priv *priv, u32 addr) { return priv->ops->read(priv->ndev, addr); } -static int w5100_write(struct w5100_priv *priv, u16 addr, u8 data) +static int w5100_write(struct w5100_priv *priv, u32 addr, u8 data) { return priv->ops->write(priv->ndev, addr, data); } -static int w5100_read16(struct w5100_priv *priv, u16 addr) +static int w5100_read16(struct w5100_priv *priv, u32 addr) { return priv->ops->read16(priv->ndev, addr); } -static int w5100_write16(struct w5100_priv *priv, u16 addr, u16 data) +static int w5100_write16(struct w5100_priv *priv, u32 addr, u16 data) { return priv->ops->write16(priv->ndev, addr, data); } -static int w5100_readbulk(struct w5100_priv *priv, u16 addr, u8 *buf, int len) +static int w5100_readbulk(struct w5100_priv *priv, u32 addr, u8 *buf, int len) { return priv->ops->readbulk(priv->ndev, addr, buf, len); } -static int w5100_writebulk(struct w5100_priv *priv, u16 addr, const u8 *buf, +static int w5100_writebulk(struct w5100_priv *priv, u32 addr, const u8 *buf, int len) { return priv->ops->writebulk(priv->ndev, addr, buf, len); @@ -493,13 +529,11 @@ static int w5100_writebulk(struct w5100_priv *priv, u16 addr, const u8 *buf, static int w5100_readbuf(struct w5100_priv *priv, u16 offset, u8 *buf, int len) { - u16 addr; + u32 addr; int remain = 0; int ret; - const u16 mem_start = - is_w5200(priv) ? W5200_RX_MEM_START : W5100_RX_MEM_START; - const u16 mem_size = - is_w5200(priv) ? W5200_RX_MEM_SIZE : W5100_RX_MEM_SIZE; + const u32 mem_start = priv->s0_rx_buf; + const u16 mem_size = priv->s0_rx_buf_size; offset %= mem_size; addr = mem_start + offset; @@ -519,13 +553,11 @@ static int w5100_readbuf(struct w5100_priv *priv, u16 offset, u8 *buf, int len) static int w5100_writebuf(struct w5100_priv *priv, u16 offset, const u8 *buf, int len) { - u16 addr; + u32 addr; int ret; int remain = 0; - const u16 mem_start = - is_w5200(priv) ? W5200_TX_MEM_START : W5100_TX_MEM_START; - const u16 mem_size = - is_w5200(priv) ? W5200_TX_MEM_SIZE : W5100_TX_MEM_SIZE; + const u32 mem_start = priv->s0_tx_buf; + const u16 mem_size = priv->s0_tx_buf_size; offset %= mem_size; addr = mem_start + offset; @@ -578,6 +610,28 @@ static void w5100_write_macaddr(struct w5100_priv *priv) w5100_writebulk(priv, W5100_SHAR, ndev->dev_addr, ETH_ALEN); } +static void w5100_socket_intr_mask(struct w5100_priv *priv, u8 mask) +{ + u32 imr; + + if (priv->ops->chip_id == W5500) + imr = W5500_SIMR; + else + imr = W5100_IMR; + + w5100_write(priv, imr, mask); +} + +static void w5100_enable_intr(struct w5100_priv *priv) +{ + w5100_socket_intr_mask(priv, IR_S0); +} + +static void w5100_disable_intr(struct w5100_priv *priv) +{ + w5100_socket_intr_mask(priv, 0); +} + static void w5100_memory_configure(struct w5100_priv *priv) { /* Configure 16K of internal memory @@ -603,17 +657,52 @@ static void w5200_memory_configure(struct w5100_priv *priv) } } -static void w5100_hw_reset(struct w5100_priv *priv) +static void w5500_memory_configure(struct w5100_priv *priv) { + int i; + + /* Configure internal RX memory as 16K RX buffer and + * internal TX memory as 16K TX buffer + */ + w5100_write(priv, W5500_Sn_RXMEM_SIZE(0), 0x10); + w5100_write(priv, W5500_Sn_TXMEM_SIZE(0), 0x10); + + for (i = 1; i < 8; i++) { + w5100_write(priv, W5500_Sn_RXMEM_SIZE(i), 0); + w5100_write(priv, W5500_Sn_TXMEM_SIZE(i), 0); + } +} + +static int w5100_hw_reset(struct w5100_priv *priv) +{ + u32 rtr; + w5100_reset(priv); - w5100_write(priv, W5100_IMR, 0); + w5100_disable_intr(priv); w5100_write_macaddr(priv); - if (is_w5200(priv)) - w5200_memory_configure(priv); - else + switch (priv->ops->chip_id) { + case W5100: w5100_memory_configure(priv); + rtr = W5100_RTR; + break; + case W5200: + w5200_memory_configure(priv); + rtr = W5100_RTR; + break; + case W5500: + w5500_memory_configure(priv); + rtr = W5500_RTR; + break; + default: + return -EINVAL; + } + + if (w5100_read16(priv, rtr) != RTR_DEFAULT) + return -ENODEV; + + return 0; } static void w5100_hw_start(struct w5100_priv *priv) @@ -621,12 +710,12 @@ static void w5100_hw_start(struct w5100_priv *priv) w5100_write(priv, W5100_S0_MR(priv), priv->promisc ? S0_MR_MACRAW : S0_MR_MACRAW_MF); w5100_command(priv, S0_CR_OPEN); - w5100_write(priv, W5100_IMR, IR_S0); + w5100_enable_intr(priv); } static void w5100_hw_close(struct w5100_priv *priv) { - w5100_write(priv, W5100_IMR, 0); + w5100_disable_intr(priv); w5100_command(priv, S0_CR_CLOSE); } @@ -805,7 +894,7 @@ static void w5100_rx_work(struct work_struct *work) while ((skb = w5100_rx_skb(priv->ndev))) netif_rx_ni(skb); - w5100_write(priv, W5100_IMR, IR_S0); + w5100_enable_intr(priv); } static int w5100_napi_poll(struct napi_struct *napi, int budget) @@ -824,7 +913,7 @@ static int w5100_napi_poll(struct napi_struct *napi, int budget) if (rx_count < budget) { napi_complete(napi); - w5100_write(priv, W5100_IMR, IR_S0); + w5100_enable_intr(priv); } return rx_count; @@ -846,7 +935,7 @@ static irqreturn_t w5100_interrupt(int irq, void *ndev_instance) } if (ir & S0_IR_RECV) { - w5100_write(priv, W5100_IMR, 0); + w5100_disable_intr(priv); if (priv->ops->may_sleep) queue_work(priv->xfer_wq, &priv->rx_work); @@ -1014,6 +1103,34 @@ int w5100_probe(struct device *dev, const struct w5100_ops *ops, SET_NETDEV_DEV(ndev, dev); dev_set_drvdata(dev, ndev); priv = netdev_priv(ndev); + + switch (ops->chip_id) { + case W5100: + priv->s0_regs = W5100_S0_REGS; + priv->s0_tx_buf = W5100_TX_MEM_START; + priv->s0_tx_buf_size = W5100_TX_MEM_SIZE; + priv->s0_rx_buf = W5100_RX_MEM_START; + priv->s0_rx_buf_size = W5100_RX_MEM_SIZE; + break; + case W5200: + priv->s0_regs = W5200_S0_REGS; + priv->s0_tx_buf = W5200_TX_MEM_START; + priv->s0_tx_buf_size = W5200_TX_MEM_SIZE; + priv->s0_rx_buf = W5200_RX_MEM_START; + priv->s0_rx_buf_size = W5200_RX_MEM_SIZE; + break; + case W5500: + priv->s0_regs = W5500_S0_REGS; + priv->s0_tx_buf = W5500_TX_MEM_START; + priv->s0_tx_buf_size = W5500_TX_MEM_SIZE; + priv->s0_rx_buf = W5500_RX_MEM_START; + priv->s0_rx_buf_size = W5500_RX_MEM_SIZE; + break; + default: + err = -EINVAL; + goto err_register; + } + priv->ndev = ndev; priv->ops = ops; priv->irq = irq; @@ -1055,11 +1172,9 @@ int w5100_probe(struct device *dev, const struct w5100_ops *ops, goto err_hw; } - w5100_hw_reset(priv); - if (w5100_read16(priv, W5100_RTR) != RTR_DEFAULT) { - err = -ENODEV; + err = w5100_hw_reset(priv); + if (err) goto err_hw; - } if (ops->may_sleep) { err = request_threaded_irq(priv->irq, NULL, w5100_interrupt, diff --git a/drivers/net/ethernet/wiznet/w5100.h b/drivers/net/ethernet/wiznet/w5100.h index 9b1fa23b46fe..f8a16fad807b 100644 --- a/drivers/net/ethernet/wiznet/w5100.h +++ b/drivers/net/ethernet/wiznet/w5100.h @@ -10,17 +10,18 @@ enum { W5100, W5200, + W5500, }; struct w5100_ops { bool may_sleep; int chip_id; - int (*read)(struct net_device *ndev, u16 addr); - int (*write)(struct net_device *ndev, u16 addr, u8 data); - int (*read16)(struct net_device *ndev, u16 addr); - int (*write16)(struct net_device *ndev, u16 addr, u16 data); - int (*readbulk)(struct net_device *ndev, u16 addr, u8 *buf, int len); - int (*writebulk)(struct net_device *ndev, u16 addr, const u8 *buf, + int (*read)(struct net_device *ndev, u32 addr); + int (*write)(struct net_device *ndev, u32 addr, u8 data); + int (*read16)(struct net_device *ndev, u32 addr); + int (*write16)(struct net_device *ndev, u32 addr, u16 data); + int (*readbulk)(struct net_device *ndev, u32 addr, u8 *buf, int len); + int (*writebulk)(struct net_device *ndev, u32 addr, const u8 *buf, int len); int (*reset)(struct net_device *ndev); int (*init)(struct net_device *ndev); From 501e7ef569f4ea2dc7e50773cf6a5d757c94f9b4 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 26 Apr 2016 15:30:07 -0700 Subject: [PATCH 1121/1649] net-rfs: fix false sharing accessing sd->input_queue_head sd->input_queue_head is incremented for each processed packet in process_backlog(), and read from other cpus performing Out Of Order avoidance in get_rps_cpu() Moving this field in a separate cache line keeps it mostly hot for the cpu in process_backlog(), as other cpus will only read it. In a stress test, process_backlog() was consuming 6.80 % of cpu cycles, and the patch reduced the cost to 0.65 % Signed-off-by: Eric Dumazet Acked-by: Tom Herbert Signed-off-by: David S. Miller --- include/linux/netdevice.h | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 18d8394f2e5d..934ca866562d 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -2747,11 +2747,15 @@ struct softnet_data { struct sk_buff *completion_queue; #ifdef CONFIG_RPS - /* Elements below can be accessed between CPUs for RPS */ + /* input_queue_head should be written by cpu owning this struct, + * and only read by other cpus. Worth using a cache line. + */ + unsigned int input_queue_head ____cacheline_aligned_in_smp; + + /* Elements below can be accessed between CPUs for RPS/RFS */ struct call_single_data csd ____cacheline_aligned_in_smp; struct softnet_data *rps_ipi_next; unsigned int cpu; - unsigned int input_queue_head; unsigned int input_queue_tail; #endif unsigned int dropped; From 6aef70a851ac77967992340faaff33f44598f60a Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 27 Apr 2016 16:44:27 -0700 Subject: [PATCH 1122/1649] net: snmp: kill various STATS_USER() helpers In the old days (before linux-3.0), SNMP counters were duplicated, one for user context, and one for BH context. After commit 8f0ea0fe3a03 ("snmp: reduce percpu needs by 50%") we have a single copy, and what really matters is preemption being enabled or disabled, since we use this_cpu_inc() or __this_cpu_inc() respectively. We therefore kill SNMP_INC_STATS_USER(), SNMP_ADD_STATS_USER(), NET_INC_STATS_USER(), NET_ADD_STATS_USER(), SCTP_INC_STATS_USER(), SNMP_INC_STATS64_USER(), SNMP_ADD_STATS64_USER(), TCP_ADD_STATS_USER(), UDP_INC_STATS_USER(), UDP6_INC_STATS_USER(), and XFRM_INC_STATS_USER() Following patches will rename __BH helpers to make clear their usage is not tied to BH being disabled. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/net/ip.h | 2 -- include/net/sctp/sctp.h | 1 - include/net/snmp.h | 22 +++++------------- include/net/tcp.h | 9 ++++---- include/net/udp.h | 14 ++++++------ include/net/xfrm.h | 2 -- net/ipv4/tcp.c | 12 +++++----- net/ipv4/udp.c | 24 ++++++++++---------- net/ipv6/udp.c | 49 ++++++++++++++++++++--------------------- net/sctp/chunk.c | 2 +- 10 files changed, 59 insertions(+), 78 deletions(-) diff --git a/include/net/ip.h b/include/net/ip.h index 93725e546758..ae0e85d018e8 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -194,10 +194,8 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb, #define IP_UPD_PO_STATS_BH(net, field, val) SNMP_UPD_PO_STATS64_BH((net)->mib.ip_statistics, field, val) #define NET_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.net_statistics, field) #define NET_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->mib.net_statistics, field) -#define NET_INC_STATS_USER(net, field) SNMP_INC_STATS_USER((net)->mib.net_statistics, field) #define NET_ADD_STATS(net, field, adnd) SNMP_ADD_STATS((net)->mib.net_statistics, field, adnd) #define NET_ADD_STATS_BH(net, field, adnd) SNMP_ADD_STATS_BH((net)->mib.net_statistics, field, adnd) -#define NET_ADD_STATS_USER(net, field, adnd) SNMP_ADD_STATS_USER((net)->mib.net_statistics, field, adnd) u64 snmp_get_cpu_field(void __percpu *mib, int cpu, int offct); unsigned long snmp_fold_field(void __percpu *mib, int offt); diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index 3f1c0ff7d4b6..5a2c4c3307a7 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h @@ -207,7 +207,6 @@ extern int sysctl_sctp_wmem[3]; /* SCTP SNMP MIB stats handlers */ #define SCTP_INC_STATS(net, field) SNMP_INC_STATS((net)->sctp.sctp_statistics, field) #define SCTP_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->sctp.sctp_statistics, field) -#define SCTP_INC_STATS_USER(net, field) SNMP_INC_STATS_USER((net)->sctp.sctp_statistics, field) #define SCTP_DEC_STATS(net, field) SNMP_DEC_STATS((net)->sctp.sctp_statistics, field) /* sctp mib definitions */ diff --git a/include/net/snmp.h b/include/net/snmp.h index 35512ac6dcfb..56239fc05c51 100644 --- a/include/net/snmp.h +++ b/include/net/snmp.h @@ -126,9 +126,6 @@ struct linux_xfrm_mib { #define SNMP_INC_STATS_BH(mib, field) \ __this_cpu_inc(mib->mibs[field]) -#define SNMP_INC_STATS_USER(mib, field) \ - this_cpu_inc(mib->mibs[field]) - #define SNMP_INC_STATS_ATOMIC_LONG(mib, field) \ atomic_long_inc(&mib->mibs[field]) @@ -141,9 +138,6 @@ struct linux_xfrm_mib { #define SNMP_ADD_STATS_BH(mib, field, addend) \ __this_cpu_add(mib->mibs[field], addend) -#define SNMP_ADD_STATS_USER(mib, field, addend) \ - this_cpu_add(mib->mibs[field], addend) - #define SNMP_ADD_STATS(mib, field, addend) \ this_cpu_add(mib->mibs[field], addend) #define SNMP_UPD_PO_STATS(mib, basefield, addend) \ @@ -170,18 +164,14 @@ struct linux_xfrm_mib { u64_stats_update_end(&ptr->syncp); \ } while (0) -#define SNMP_ADD_STATS64_USER(mib, field, addend) \ +#define SNMP_ADD_STATS64(mib, field, addend) \ do { \ - local_bh_disable(); \ + preempt_disable(); \ SNMP_ADD_STATS64_BH(mib, field, addend); \ - local_bh_enable(); \ + preempt_enable(); \ } while (0) -#define SNMP_ADD_STATS64(mib, field, addend) \ - SNMP_ADD_STATS64_USER(mib, field, addend) - #define SNMP_INC_STATS64_BH(mib, field) SNMP_ADD_STATS64_BH(mib, field, 1) -#define SNMP_INC_STATS64_USER(mib, field) SNMP_ADD_STATS64_USER(mib, field, 1) #define SNMP_INC_STATS64(mib, field) SNMP_ADD_STATS64(mib, field, 1) #define SNMP_UPD_PO_STATS64_BH(mib, basefield, addend) \ do { \ @@ -194,17 +184,15 @@ struct linux_xfrm_mib { } while (0) #define SNMP_UPD_PO_STATS64(mib, basefield, addend) \ do { \ - local_bh_disable(); \ + preempt_disable(); \ SNMP_UPD_PO_STATS64_BH(mib, basefield, addend); \ - local_bh_enable(); \ + preempt_enable(); \ } while (0) #else #define SNMP_INC_STATS64_BH(mib, field) SNMP_INC_STATS_BH(mib, field) -#define SNMP_INC_STATS64_USER(mib, field) SNMP_INC_STATS_USER(mib, field) #define SNMP_INC_STATS64(mib, field) SNMP_INC_STATS(mib, field) #define SNMP_DEC_STATS64(mib, field) SNMP_DEC_STATS(mib, field) #define SNMP_ADD_STATS64_BH(mib, field, addend) SNMP_ADD_STATS_BH(mib, field, addend) -#define SNMP_ADD_STATS64_USER(mib, field, addend) SNMP_ADD_STATS_USER(mib, field, addend) #define SNMP_ADD_STATS64(mib, field, addend) SNMP_ADD_STATS(mib, field, addend) #define SNMP_UPD_PO_STATS64(mib, basefield, addend) SNMP_UPD_PO_STATS(mib, basefield, addend) #define SNMP_UPD_PO_STATS64_BH(mib, basefield, addend) SNMP_UPD_PO_STATS_BH(mib, basefield, addend) diff --git a/include/net/tcp.h b/include/net/tcp.h index 7f2553da10d1..cfe15f712164 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -334,7 +334,6 @@ extern struct proto tcp_prot; #define TCP_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.tcp_statistics, field) #define TCP_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->mib.tcp_statistics, field) #define TCP_DEC_STATS(net, field) SNMP_DEC_STATS((net)->mib.tcp_statistics, field) -#define TCP_ADD_STATS_USER(net, field, val) SNMP_ADD_STATS_USER((net)->mib.tcp_statistics, field, val) #define TCP_ADD_STATS(net, field, val) SNMP_ADD_STATS((net)->mib.tcp_statistics, field, val) void tcp_tasklet_init(void); @@ -1298,10 +1297,10 @@ bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb, static inline void tcp_mib_init(struct net *net) { /* See RFC 2012 */ - TCP_ADD_STATS_USER(net, TCP_MIB_RTOALGORITHM, 1); - TCP_ADD_STATS_USER(net, TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ); - TCP_ADD_STATS_USER(net, TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ); - TCP_ADD_STATS_USER(net, TCP_MIB_MAXCONN, -1); + TCP_ADD_STATS(net, TCP_MIB_RTOALGORITHM, 1); + TCP_ADD_STATS(net, TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ); + TCP_ADD_STATS(net, TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ); + TCP_ADD_STATS(net, TCP_MIB_MAXCONN, -1); } /* from STCP */ diff --git a/include/net/udp.h b/include/net/udp.h index 3c5a65e0946d..2f37f689d85a 100644 --- a/include/net/udp.h +++ b/include/net/udp.h @@ -289,20 +289,20 @@ struct sock *udp6_lib_lookup_skb(struct sk_buff *skb, /* * SNMP statistics for UDP and UDP-Lite */ -#define UDP_INC_STATS_USER(net, field, is_udplite) do { \ - if (is_udplite) SNMP_INC_STATS_USER((net)->mib.udplite_statistics, field); \ - else SNMP_INC_STATS_USER((net)->mib.udp_statistics, field); } while(0) +#define UDP_INC_STATS(net, field, is_udplite) do { \ + if (is_udplite) SNMP_INC_STATS((net)->mib.udplite_statistics, field); \ + else SNMP_INC_STATS((net)->mib.udp_statistics, field); } while(0) #define UDP_INC_STATS_BH(net, field, is_udplite) do { \ if (is_udplite) SNMP_INC_STATS_BH((net)->mib.udplite_statistics, field); \ else SNMP_INC_STATS_BH((net)->mib.udp_statistics, field); } while(0) -#define UDP6_INC_STATS_BH(net, field, is_udplite) do { \ +#define UDP6_INC_STATS_BH(net, field, is_udplite) do { \ if (is_udplite) SNMP_INC_STATS_BH((net)->mib.udplite_stats_in6, field);\ else SNMP_INC_STATS_BH((net)->mib.udp_stats_in6, field); \ } while(0) -#define UDP6_INC_STATS_USER(net, field, __lite) do { \ - if (__lite) SNMP_INC_STATS_USER((net)->mib.udplite_stats_in6, field); \ - else SNMP_INC_STATS_USER((net)->mib.udp_stats_in6, field); \ +#define UDP6_INC_STATS(net, field, __lite) do { \ + if (__lite) SNMP_INC_STATS((net)->mib.udplite_stats_in6, field); \ + else SNMP_INC_STATS((net)->mib.udp_stats_in6, field); \ } while(0) #if IS_ENABLED(CONFIG_IPV6) diff --git a/include/net/xfrm.h b/include/net/xfrm.h index d6f6e5006ee9..dab9e1b82963 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -46,11 +46,9 @@ #ifdef CONFIG_XFRM_STATISTICS #define XFRM_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.xfrm_statistics, field) #define XFRM_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->mib.xfrm_statistics, field) -#define XFRM_INC_STATS_USER(net, field) SNMP_INC_STATS_USER((net)-mib.xfrm_statistics, field) #else #define XFRM_INC_STATS(net, field) ((void)(net)) #define XFRM_INC_STATS_BH(net, field) ((void)(net)) -#define XFRM_INC_STATS_USER(net, field) ((void)(net)) #endif diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 4d73858991af..55ef55ac9e38 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -1443,7 +1443,7 @@ static void tcp_prequeue_process(struct sock *sk) struct sk_buff *skb; struct tcp_sock *tp = tcp_sk(sk); - NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPPREQUEUED); + NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPPREQUEUED); /* RX process wants to run with disabled BHs, though it is not * necessary */ @@ -1777,7 +1777,7 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock, chunk = len - tp->ucopy.len; if (chunk != 0) { - NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk); + NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk); len -= chunk; copied += chunk; } @@ -1789,7 +1789,7 @@ do_prequeue: chunk = len - tp->ucopy.len; if (chunk != 0) { - NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk); + NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk); len -= chunk; copied += chunk; } @@ -1875,7 +1875,7 @@ skip_copy: tcp_prequeue_process(sk); if (copied > 0 && (chunk = len - tp->ucopy.len) != 0) { - NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk); + NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk); len -= chunk; copied += chunk; } @@ -2065,13 +2065,13 @@ void tcp_close(struct sock *sk, long timeout) sk->sk_prot->disconnect(sk, 0); } else if (data_was_unread) { /* Unread data was tossed, zap the connection. */ - NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPABORTONCLOSE); + NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONCLOSE); tcp_set_state(sk, TCP_CLOSE); tcp_send_active_reset(sk, sk->sk_allocation); } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) { /* Check zero linger _after_ checking for unread data. */ sk->sk_prot->disconnect(sk, 0); - NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPABORTONDATA); + NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA); } else if (tcp_close_state(sk)) { /* We FIN if the application ate all the data before * zapping the connection. diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 76ea0a8be090..00f5de9a155e 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -882,13 +882,13 @@ send: err = ip_send_skb(sock_net(sk), skb); if (err) { if (err == -ENOBUFS && !inet->recverr) { - UDP_INC_STATS_USER(sock_net(sk), - UDP_MIB_SNDBUFERRORS, is_udplite); + UDP_INC_STATS(sock_net(sk), + UDP_MIB_SNDBUFERRORS, is_udplite); err = 0; } } else - UDP_INC_STATS_USER(sock_net(sk), - UDP_MIB_OUTDATAGRAMS, is_udplite); + UDP_INC_STATS(sock_net(sk), + UDP_MIB_OUTDATAGRAMS, is_udplite); return err; } @@ -1157,8 +1157,8 @@ out: * seems like overkill. */ if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { - UDP_INC_STATS_USER(sock_net(sk), - UDP_MIB_SNDBUFERRORS, is_udplite); + UDP_INC_STATS(sock_net(sk), + UDP_MIB_SNDBUFERRORS, is_udplite); } return err; @@ -1352,16 +1352,16 @@ try_again: trace_kfree_skb(skb, udp_recvmsg); if (!peeked) { atomic_inc(&sk->sk_drops); - UDP_INC_STATS_USER(sock_net(sk), - UDP_MIB_INERRORS, is_udplite); + UDP_INC_STATS(sock_net(sk), + UDP_MIB_INERRORS, is_udplite); } skb_free_datagram_locked(sk, skb); return err; } if (!peeked) - UDP_INC_STATS_USER(sock_net(sk), - UDP_MIB_INDATAGRAMS, is_udplite); + UDP_INC_STATS(sock_net(sk), + UDP_MIB_INDATAGRAMS, is_udplite); sock_recv_ts_and_drops(msg, sk, skb); @@ -1386,8 +1386,8 @@ try_again: csum_copy_err: slow = lock_sock_fast(sk); if (!skb_kill_datagram(sk, skb, flags)) { - UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); - UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite); + UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); + UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite); } unlock_sock_fast(sk, slow); diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 8d8b2cd8ec5b..baa56ca41a31 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -423,24 +423,22 @@ try_again: if (!peeked) { atomic_inc(&sk->sk_drops); if (is_udp4) - UDP_INC_STATS_USER(sock_net(sk), - UDP_MIB_INERRORS, - is_udplite); + UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, + is_udplite); else - UDP6_INC_STATS_USER(sock_net(sk), - UDP_MIB_INERRORS, - is_udplite); + UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, + is_udplite); } skb_free_datagram_locked(sk, skb); return err; } if (!peeked) { if (is_udp4) - UDP_INC_STATS_USER(sock_net(sk), - UDP_MIB_INDATAGRAMS, is_udplite); + UDP_INC_STATS(sock_net(sk), UDP_MIB_INDATAGRAMS, + is_udplite); else - UDP6_INC_STATS_USER(sock_net(sk), - UDP_MIB_INDATAGRAMS, is_udplite); + UDP6_INC_STATS(sock_net(sk), UDP_MIB_INDATAGRAMS, + is_udplite); } sock_recv_ts_and_drops(msg, sk, skb); @@ -487,15 +485,15 @@ csum_copy_err: slow = lock_sock_fast(sk); if (!skb_kill_datagram(sk, skb, flags)) { if (is_udp4) { - UDP_INC_STATS_USER(sock_net(sk), - UDP_MIB_CSUMERRORS, is_udplite); - UDP_INC_STATS_USER(sock_net(sk), - UDP_MIB_INERRORS, is_udplite); + UDP_INC_STATS(sock_net(sk), + UDP_MIB_CSUMERRORS, is_udplite); + UDP_INC_STATS(sock_net(sk), + UDP_MIB_INERRORS, is_udplite); } else { - UDP6_INC_STATS_USER(sock_net(sk), - UDP_MIB_CSUMERRORS, is_udplite); - UDP6_INC_STATS_USER(sock_net(sk), - UDP_MIB_INERRORS, is_udplite); + UDP6_INC_STATS(sock_net(sk), + UDP_MIB_CSUMERRORS, is_udplite); + UDP6_INC_STATS(sock_net(sk), + UDP_MIB_INERRORS, is_udplite); } } unlock_sock_fast(sk, slow); @@ -1015,13 +1013,14 @@ send: err = ip6_send_skb(skb); if (err) { if (err == -ENOBUFS && !inet6_sk(sk)->recverr) { - UDP6_INC_STATS_USER(sock_net(sk), - UDP_MIB_SNDBUFERRORS, is_udplite); + UDP6_INC_STATS(sock_net(sk), + UDP_MIB_SNDBUFERRORS, is_udplite); err = 0; } - } else - UDP6_INC_STATS_USER(sock_net(sk), - UDP_MIB_OUTDATAGRAMS, is_udplite); + } else { + UDP6_INC_STATS(sock_net(sk), + UDP_MIB_OUTDATAGRAMS, is_udplite); + } return err; } @@ -1342,8 +1341,8 @@ out: * seems like overkill. */ if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { - UDP6_INC_STATS_USER(sock_net(sk), - UDP_MIB_SNDBUFERRORS, is_udplite); + UDP6_INC_STATS(sock_net(sk), + UDP_MIB_SNDBUFERRORS, is_udplite); } return err; diff --git a/net/sctp/chunk.c b/net/sctp/chunk.c index 958ef5f33f4b..1eb94bf18ef4 100644 --- a/net/sctp/chunk.c +++ b/net/sctp/chunk.c @@ -239,7 +239,7 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc, offset = 0; if ((whole > 1) || (whole && over)) - SCTP_INC_STATS_USER(sock_net(asoc->base.sk), SCTP_MIB_FRAGUSRMSGS); + SCTP_INC_STATS(sock_net(asoc->base.sk), SCTP_MIB_FRAGUSRMSGS); /* Create chunks for all the full sized DATA chunks. */ for (i = 0, len = first_len; i < whole; i++) { From aa62d76b6e1a7c927a9e0ca39de8a93b751f3b8c Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 27 Apr 2016 16:44:28 -0700 Subject: [PATCH 1123/1649] dccp: rename DCCP_INC_STATS_BH() Rename DCCP_INC_STATS_BH() to __DCCP_INC_STATS() Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/dccp/dccp.h | 6 +++--- net/dccp/input.c | 2 +- net/dccp/ipv4.c | 8 ++++---- net/dccp/ipv6.c | 8 ++++---- net/dccp/minisocks.c | 2 +- net/dccp/options.c | 2 +- net/dccp/timer.c | 4 ++-- 7 files changed, 16 insertions(+), 16 deletions(-) diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h index b0e28d24e1a7..a4c6e2fed91c 100644 --- a/net/dccp/dccp.h +++ b/net/dccp/dccp.h @@ -198,9 +198,9 @@ struct dccp_mib { }; DECLARE_SNMP_STAT(struct dccp_mib, dccp_statistics); -#define DCCP_INC_STATS(field) SNMP_INC_STATS(dccp_statistics, field) -#define DCCP_INC_STATS_BH(field) SNMP_INC_STATS_BH(dccp_statistics, field) -#define DCCP_DEC_STATS(field) SNMP_DEC_STATS(dccp_statistics, field) +#define DCCP_INC_STATS(field) SNMP_INC_STATS(dccp_statistics, field) +#define __DCCP_INC_STATS(field) SNMP_INC_STATS_BH(dccp_statistics, field) +#define DCCP_DEC_STATS(field) SNMP_DEC_STATS(dccp_statistics, field) /* * Checksumming routines diff --git a/net/dccp/input.c b/net/dccp/input.c index 3bd14e885396..2437ecc13b82 100644 --- a/net/dccp/input.c +++ b/net/dccp/input.c @@ -359,7 +359,7 @@ send_sync: goto discard; } - DCCP_INC_STATS_BH(DCCP_MIB_INERRS); + __DCCP_INC_STATS(DCCP_MIB_INERRS); discard: __kfree_skb(skb); return 0; diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index f6d183f8f332..4b78067669d6 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c @@ -318,7 +318,7 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info) case DCCP_REQUESTING: case DCCP_RESPOND: if (!sock_owned_by_user(sk)) { - DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS); + __DCCP_INC_STATS(DCCP_MIB_ATTEMPTFAILS); sk->sk_err = err; sk->sk_error_report(sk); @@ -533,8 +533,8 @@ static void dccp_v4_ctl_send_reset(const struct sock *sk, struct sk_buff *rxskb) bh_unlock_sock(ctl_sk); if (net_xmit_eval(err) == 0) { - DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS); - DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS); + __DCCP_INC_STATS(DCCP_MIB_OUTSEGS); + __DCCP_INC_STATS(DCCP_MIB_OUTRSTS); } out: dst_release(dst); @@ -637,7 +637,7 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb) drop_and_free: reqsk_free(req); drop: - DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS); + __DCCP_INC_STATS(DCCP_MIB_ATTEMPTFAILS); return -1; } EXPORT_SYMBOL_GPL(dccp_v4_conn_request); diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index 8ceb3cebcad4..e175b8fe1a87 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c @@ -156,7 +156,7 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, case DCCP_RESPOND: /* Cannot happen. It can, it SYNs are crossed. --ANK */ if (!sock_owned_by_user(sk)) { - DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS); + __DCCP_INC_STATS(DCCP_MIB_ATTEMPTFAILS); sk->sk_err = err; /* * Wake people up to see the error @@ -277,8 +277,8 @@ static void dccp_v6_ctl_send_reset(const struct sock *sk, struct sk_buff *rxskb) if (!IS_ERR(dst)) { skb_dst_set(skb, dst); ip6_xmit(ctl_sk, skb, &fl6, NULL, 0); - DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS); - DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS); + __DCCP_INC_STATS(DCCP_MIB_OUTSEGS); + __DCCP_INC_STATS(DCCP_MIB_OUTRSTS); return; } @@ -378,7 +378,7 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb) drop_and_free: reqsk_free(req); drop: - DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS); + __DCCP_INC_STATS(DCCP_MIB_ATTEMPTFAILS); return -1; } diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c index 1994f8af646b..53eddf99e4f6 100644 --- a/net/dccp/minisocks.c +++ b/net/dccp/minisocks.c @@ -127,7 +127,7 @@ struct sock *dccp_create_openreq_child(const struct sock *sk, } dccp_init_xmit_timers(newsk); - DCCP_INC_STATS_BH(DCCP_MIB_PASSIVEOPENS); + __DCCP_INC_STATS(DCCP_MIB_PASSIVEOPENS); } return newsk; } diff --git a/net/dccp/options.c b/net/dccp/options.c index 9bce31886bda..b82b7ee9a1d2 100644 --- a/net/dccp/options.c +++ b/net/dccp/options.c @@ -253,7 +253,7 @@ out_nonsensical_length: return 0; out_invalid_option: - DCCP_INC_STATS_BH(DCCP_MIB_INVALIDOPT); + __DCCP_INC_STATS(DCCP_MIB_INVALIDOPT); rc = DCCP_RESET_CODE_OPTION_ERROR; out_featneg_failed: DCCP_WARN("DCCP(%p): Option %d (len=%d) error=%u\n", sk, opt, len, rc); diff --git a/net/dccp/timer.c b/net/dccp/timer.c index 3ef7acef3ce8..4ff22c24ff14 100644 --- a/net/dccp/timer.c +++ b/net/dccp/timer.c @@ -28,7 +28,7 @@ static void dccp_write_err(struct sock *sk) dccp_send_reset(sk, DCCP_RESET_CODE_ABORTED); dccp_done(sk); - DCCP_INC_STATS_BH(DCCP_MIB_ABORTONTIMEOUT); + __DCCP_INC_STATS(DCCP_MIB_ABORTONTIMEOUT); } /* A write timeout has occurred. Process the after effects. */ @@ -100,7 +100,7 @@ static void dccp_retransmit_timer(struct sock *sk) * total number of retransmissions of clones of original packets. */ if (icsk->icsk_retransmits == 0) - DCCP_INC_STATS_BH(DCCP_MIB_TIMEOUTS); + __DCCP_INC_STATS(DCCP_MIB_TIMEOUTS); if (dccp_retransmit_skb(sk) != 0) { /* From 5d3848bc33b7d13fc97b5b6e0dccde2d0755bfd5 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 27 Apr 2016 16:44:29 -0700 Subject: [PATCH 1124/1649] net: rename ICMP_INC_STATS_BH() Rename ICMP_INC_STATS_BH() to __ICMP_INC_STATS() Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/net/icmp.h | 2 +- net/dccp/ipv4.c | 4 ++-- net/ipv4/icmp.c | 16 ++++++++-------- net/ipv4/tcp_ipv4.c | 2 +- net/ipv4/udp.c | 2 +- net/sctp/input.c | 2 +- 6 files changed, 14 insertions(+), 14 deletions(-) diff --git a/include/net/icmp.h b/include/net/icmp.h index 970028e13382..5a60ce819078 100644 --- a/include/net/icmp.h +++ b/include/net/icmp.h @@ -30,7 +30,7 @@ struct icmp_err { extern const struct icmp_err icmp_err_convert[]; #define ICMP_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.icmp_statistics, field) -#define ICMP_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->mib.icmp_statistics, field) +#define __ICMP_INC_STATS(net, field) SNMP_INC_STATS_BH((net)->mib.icmp_statistics, field) #define ICMPMSGOUT_INC_STATS(net, field) SNMP_INC_STATS_ATOMIC_LONG((net)->mib.icmpmsg_statistics, field+256) #define ICMPMSGIN_INC_STATS_BH(net, field) SNMP_INC_STATS_ATOMIC_LONG((net)->mib.icmpmsg_statistics, field) diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index 4b78067669d6..14e30584e59d 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c @@ -247,7 +247,7 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info) if (skb->len < offset + sizeof(*dh) || skb->len < offset + __dccp_basic_hdr_len(dh)) { - ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS); + __ICMP_INC_STATS(net, ICMP_MIB_INERRORS); return; } @@ -256,7 +256,7 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info) iph->saddr, ntohs(dh->dccph_sport), inet_iif(skb)); if (!sk) { - ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS); + __ICMP_INC_STATS(net, ICMP_MIB_INERRORS); return; } diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index 6333489771ed..995fef9c5099 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c @@ -363,7 +363,7 @@ static void icmp_push_reply(struct icmp_bxm *icmp_param, icmp_param->data_len+icmp_param->head_len, icmp_param->head_len, ipc, rt, MSG_DONTWAIT) < 0) { - ICMP_INC_STATS_BH(sock_net(sk), ICMP_MIB_OUTERRORS); + __ICMP_INC_STATS(sock_net(sk), ICMP_MIB_OUTERRORS); ip_flush_pending_frames(sk); } else if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) { struct icmphdr *icmph = icmp_hdr(skb); @@ -744,7 +744,7 @@ static void icmp_socket_deliver(struct sk_buff *skb, u32 info) * avoid additional coding at protocol handlers. */ if (!pskb_may_pull(skb, iph->ihl * 4 + 8)) { - ICMP_INC_STATS_BH(dev_net(skb->dev), ICMP_MIB_INERRORS); + __ICMP_INC_STATS(dev_net(skb->dev), ICMP_MIB_INERRORS); return; } @@ -865,7 +865,7 @@ static bool icmp_unreach(struct sk_buff *skb) out: return true; out_err: - ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS); + __ICMP_INC_STATS(net, ICMP_MIB_INERRORS); return false; } @@ -877,7 +877,7 @@ out_err: static bool icmp_redirect(struct sk_buff *skb) { if (skb->len < sizeof(struct iphdr)) { - ICMP_INC_STATS_BH(dev_net(skb->dev), ICMP_MIB_INERRORS); + __ICMP_INC_STATS(dev_net(skb->dev), ICMP_MIB_INERRORS); return false; } @@ -956,7 +956,7 @@ static bool icmp_timestamp(struct sk_buff *skb) return true; out_err: - ICMP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), ICMP_MIB_INERRORS); + __ICMP_INC_STATS(dev_net(skb_dst(skb)->dev), ICMP_MIB_INERRORS); return false; } @@ -996,7 +996,7 @@ int icmp_rcv(struct sk_buff *skb) skb_set_network_header(skb, nh); } - ICMP_INC_STATS_BH(net, ICMP_MIB_INMSGS); + __ICMP_INC_STATS(net, ICMP_MIB_INMSGS); if (skb_checksum_simple_validate(skb)) goto csum_error; @@ -1052,9 +1052,9 @@ drop: kfree_skb(skb); return 0; csum_error: - ICMP_INC_STATS_BH(net, ICMP_MIB_CSUMERRORS); + __ICMP_INC_STATS(net, ICMP_MIB_CSUMERRORS); error: - ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS); + __ICMP_INC_STATS(net, ICMP_MIB_INERRORS); goto drop; } diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index d2a5763e5abc..ebd8f3b9e61b 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -372,7 +372,7 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info) th->dest, iph->saddr, ntohs(th->source), inet_iif(icmp_skb)); if (!sk) { - ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS); + __ICMP_INC_STATS(net, ICMP_MIB_INERRORS); return; } if (sk->sk_state == TCP_TIME_WAIT) { diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 00f5de9a155e..6b004b838966 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -688,7 +688,7 @@ void __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable) iph->saddr, uh->source, skb->dev->ifindex, udptable, NULL); if (!sk) { - ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS); + __ICMP_INC_STATS(net, ICMP_MIB_INERRORS); return; /* No socket for error */ } diff --git a/net/sctp/input.c b/net/sctp/input.c index 00b8445364e3..f8eca792dbcf 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c @@ -589,7 +589,7 @@ void sctp_v4_err(struct sk_buff *skb, __u32 info) skb->network_header = saveip; skb->transport_header = savesctp; if (!sk) { - ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS); + __ICMP_INC_STATS(net, ICMP_MIB_INERRORS); return; } /* Warning: The sock lock is held. Remember to call From 02c223470c3cc30e5ff90217abea761679553ac3 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 27 Apr 2016 16:44:30 -0700 Subject: [PATCH 1125/1649] net: udp: rename UDP_INC_STATS_BH() Rename UDP_INC_STATS_BH() to __UDP_INC_STATS(), and UDP6_INC_STATS_BH() to __UDP6_INC_STATS() Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/net/udp.h | 12 +++++------ net/ipv4/udp.c | 46 +++++++++++++++++++++---------------------- net/ipv6/udp.c | 38 +++++++++++++++++------------------ net/rxrpc/ar-input.c | 4 ++-- net/sunrpc/xprtsock.c | 4 ++-- 5 files changed, 52 insertions(+), 52 deletions(-) diff --git a/include/net/udp.h b/include/net/udp.h index 2f37f689d85a..bf6a7c29cf6a 100644 --- a/include/net/udp.h +++ b/include/net/udp.h @@ -292,11 +292,11 @@ struct sock *udp6_lib_lookup_skb(struct sk_buff *skb, #define UDP_INC_STATS(net, field, is_udplite) do { \ if (is_udplite) SNMP_INC_STATS((net)->mib.udplite_statistics, field); \ else SNMP_INC_STATS((net)->mib.udp_statistics, field); } while(0) -#define UDP_INC_STATS_BH(net, field, is_udplite) do { \ +#define __UDP_INC_STATS(net, field, is_udplite) do { \ if (is_udplite) SNMP_INC_STATS_BH((net)->mib.udplite_statistics, field); \ else SNMP_INC_STATS_BH((net)->mib.udp_statistics, field); } while(0) -#define UDP6_INC_STATS_BH(net, field, is_udplite) do { \ +#define __UDP6_INC_STATS(net, field, is_udplite) do { \ if (is_udplite) SNMP_INC_STATS_BH((net)->mib.udplite_stats_in6, field);\ else SNMP_INC_STATS_BH((net)->mib.udp_stats_in6, field); \ } while(0) @@ -306,15 +306,15 @@ struct sock *udp6_lib_lookup_skb(struct sk_buff *skb, } while(0) #if IS_ENABLED(CONFIG_IPV6) -#define UDPX_INC_STATS_BH(sk, field) \ +#define __UDPX_INC_STATS(sk, field) \ do { \ if ((sk)->sk_family == AF_INET) \ - UDP_INC_STATS_BH(sock_net(sk), field, 0); \ + __UDP_INC_STATS(sock_net(sk), field, 0); \ else \ - UDP6_INC_STATS_BH(sock_net(sk), field, 0); \ + __UDP6_INC_STATS(sock_net(sk), field, 0); \ } while (0) #else -#define UDPX_INC_STATS_BH(sk, field) UDP_INC_STATS_BH(sock_net(sk), field, 0) +#define __UDPX_INC_STATS(sk, field) __UDP_INC_STATS(sock_net(sk), field, 0) #endif /* /proc */ diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 6b004b838966..093284c5c03b 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -1242,10 +1242,10 @@ static unsigned int first_packet_length(struct sock *sk) spin_lock_bh(&rcvq->lock); while ((skb = skb_peek(rcvq)) != NULL && udp_lib_checksum_complete(skb)) { - UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, - IS_UDPLITE(sk)); - UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, - IS_UDPLITE(sk)); + __UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, + IS_UDPLITE(sk)); + __UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, + IS_UDPLITE(sk)); atomic_inc(&sk->sk_drops); __skb_unlink(skb, rcvq); __skb_queue_tail(&list_kill, skb); @@ -1514,9 +1514,9 @@ static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) /* Note that an ENOMEM error is charged twice */ if (rc == -ENOMEM) - UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS, - is_udplite); - UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite); + __UDP_INC_STATS(sock_net(sk), UDP_MIB_RCVBUFERRORS, + is_udplite); + __UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite); kfree_skb(skb); trace_udp_fail_queue_rcv_skb(rc, sk); return -1; @@ -1580,9 +1580,9 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) ret = encap_rcv(sk, skb); if (ret <= 0) { - UDP_INC_STATS_BH(sock_net(sk), - UDP_MIB_INDATAGRAMS, - is_udplite); + __UDP_INC_STATS(sock_net(sk), + UDP_MIB_INDATAGRAMS, + is_udplite); return -ret; } } @@ -1633,8 +1633,8 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) udp_csum_pull_header(skb); if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) { - UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS, - is_udplite); + __UDP_INC_STATS(sock_net(sk), UDP_MIB_RCVBUFERRORS, + is_udplite); goto drop; } @@ -1653,9 +1653,9 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) return rc; csum_error: - UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); + __UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); drop: - UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite); + __UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite); atomic_inc(&sk->sk_drops); kfree_skb(skb); return -1; @@ -1715,10 +1715,10 @@ start_lookup: if (unlikely(!nskb)) { atomic_inc(&sk->sk_drops); - UDP_INC_STATS_BH(net, UDP_MIB_RCVBUFERRORS, - IS_UDPLITE(sk)); - UDP_INC_STATS_BH(net, UDP_MIB_INERRORS, - IS_UDPLITE(sk)); + __UDP_INC_STATS(net, UDP_MIB_RCVBUFERRORS, + IS_UDPLITE(sk)); + __UDP_INC_STATS(net, UDP_MIB_INERRORS, + IS_UDPLITE(sk)); continue; } if (udp_queue_rcv_skb(sk, nskb) > 0) @@ -1736,8 +1736,8 @@ start_lookup: consume_skb(skb); } else { kfree_skb(skb); - UDP_INC_STATS_BH(net, UDP_MIB_IGNOREDMULTI, - proto == IPPROTO_UDPLITE); + __UDP_INC_STATS(net, UDP_MIB_IGNOREDMULTI, + proto == IPPROTO_UDPLITE); } return 0; } @@ -1851,7 +1851,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, if (udp_lib_checksum_complete(skb)) goto csum_error; - UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE); + __UDP_INC_STATS(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE); icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); /* @@ -1878,9 +1878,9 @@ csum_error: proto == IPPROTO_UDPLITE ? "Lite" : "", &saddr, ntohs(uh->source), &daddr, ntohs(uh->dest), ulen); - UDP_INC_STATS_BH(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE); + __UDP_INC_STATS(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE); drop: - UDP_INC_STATS_BH(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE); + __UDP_INC_STATS(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE); kfree_skb(skb); return 0; } diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index baa56ca41a31..1243d22e2b1d 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -570,9 +570,9 @@ static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) /* Note that an ENOMEM error is charged twice */ if (rc == -ENOMEM) - UDP6_INC_STATS_BH(sock_net(sk), - UDP_MIB_RCVBUFERRORS, is_udplite); - UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite); + __UDP6_INC_STATS(sock_net(sk), + UDP_MIB_RCVBUFERRORS, is_udplite); + __UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite); kfree_skb(skb); return -1; } @@ -628,9 +628,9 @@ int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) ret = encap_rcv(sk, skb); if (ret <= 0) { - UDP_INC_STATS_BH(sock_net(sk), - UDP_MIB_INDATAGRAMS, - is_udplite); + __UDP_INC_STATS(sock_net(sk), + UDP_MIB_INDATAGRAMS, + is_udplite); return -ret; } } @@ -664,8 +664,8 @@ int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) udp_csum_pull_header(skb); if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) { - UDP6_INC_STATS_BH(sock_net(sk), - UDP_MIB_RCVBUFERRORS, is_udplite); + __UDP6_INC_STATS(sock_net(sk), + UDP_MIB_RCVBUFERRORS, is_udplite); goto drop; } @@ -684,9 +684,9 @@ int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) return rc; csum_error: - UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); + __UDP6_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); drop: - UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite); + __UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite); atomic_inc(&sk->sk_drops); kfree_skb(skb); return -1; @@ -769,10 +769,10 @@ start_lookup: nskb = skb_clone(skb, GFP_ATOMIC); if (unlikely(!nskb)) { atomic_inc(&sk->sk_drops); - UDP6_INC_STATS_BH(net, UDP_MIB_RCVBUFERRORS, - IS_UDPLITE(sk)); - UDP6_INC_STATS_BH(net, UDP_MIB_INERRORS, - IS_UDPLITE(sk)); + __UDP6_INC_STATS(net, UDP_MIB_RCVBUFERRORS, + IS_UDPLITE(sk)); + __UDP6_INC_STATS(net, UDP_MIB_INERRORS, + IS_UDPLITE(sk)); continue; } @@ -791,8 +791,8 @@ start_lookup: consume_skb(skb); } else { kfree_skb(skb); - UDP6_INC_STATS_BH(net, UDP_MIB_IGNOREDMULTI, - proto == IPPROTO_UDPLITE); + __UDP6_INC_STATS(net, UDP_MIB_IGNOREDMULTI, + proto == IPPROTO_UDPLITE); } return 0; } @@ -885,7 +885,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, if (udp_lib_checksum_complete(skb)) goto csum_error; - UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE); + __UDP6_INC_STATS(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE); icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0); kfree_skb(skb); @@ -899,9 +899,9 @@ short_packet: daddr, ntohs(uh->dest)); goto discard; csum_error: - UDP6_INC_STATS_BH(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE); + __UDP6_INC_STATS(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE); discard: - UDP6_INC_STATS_BH(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE); + __UDP6_INC_STATS(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE); kfree_skb(skb); return 0; } diff --git a/net/rxrpc/ar-input.c b/net/rxrpc/ar-input.c index 01e038146b7c..6ff97412a0bb 100644 --- a/net/rxrpc/ar-input.c +++ b/net/rxrpc/ar-input.c @@ -698,12 +698,12 @@ void rxrpc_data_ready(struct sock *sk) if (skb_checksum_complete(skb)) { rxrpc_free_skb(skb); rxrpc_put_local(local); - UDP_INC_STATS_BH(&init_net, UDP_MIB_INERRORS, 0); + __UDP_INC_STATS(&init_net, UDP_MIB_INERRORS, 0); _leave(" [CSUM failed]"); return; } - UDP_INC_STATS_BH(&init_net, UDP_MIB_INDATAGRAMS, 0); + __UDP_INC_STATS(&init_net, UDP_MIB_INDATAGRAMS, 0); /* The socket buffer we have is owned by UDP, with UDP's data all over * it, but we really want our own data there. diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index d0756ac5c0f2..a6c68dc086af 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -1018,11 +1018,11 @@ static void xs_udp_data_read_skb(struct rpc_xprt *xprt, /* Suck it into the iovec, verify checksum if not done by hw. */ if (csum_partial_copy_to_xdr(&rovr->rq_private_buf, skb)) { - UDPX_INC_STATS_BH(sk, UDP_MIB_INERRORS); + __UDPX_INC_STATS(sk, UDP_MIB_INERRORS); goto out_unlock; } - UDPX_INC_STATS_BH(sk, UDP_MIB_INDATAGRAMS); + __UDPX_INC_STATS(sk, UDP_MIB_INDATAGRAMS); xprt_adjust_cwnd(xprt, task, copied); xprt_complete_rqst(task, copied); From b540f9d702f0eedf4f2dc49472f4cf40d053d5b1 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 27 Apr 2016 16:44:31 -0700 Subject: [PATCH 1126/1649] net: xfrm: kill XFRM_INC_STATS_BH() Not used anymore. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/net/xfrm.h | 2 -- 1 file changed, 2 deletions(-) diff --git a/include/net/xfrm.h b/include/net/xfrm.h index dab9e1b82963..adfebd6f243c 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -45,10 +45,8 @@ #ifdef CONFIG_XFRM_STATISTICS #define XFRM_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.xfrm_statistics, field) -#define XFRM_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->mib.xfrm_statistics, field) #else #define XFRM_INC_STATS(net, field) ((void)(net)) -#define XFRM_INC_STATS_BH(net, field) ((void)(net)) #endif From 90bbcc608369a1b46089b0f5aa22b8ea31ffa12e Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 27 Apr 2016 16:44:32 -0700 Subject: [PATCH 1127/1649] net: tcp: rename TCP_INC_STATS_BH Rename TCP_INC_STATS_BH() to __TCP_INC_STATS() Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/net/tcp.h | 2 +- net/ipv4/tcp.c | 2 +- net/ipv4/tcp_input.c | 8 ++++---- net/ipv4/tcp_ipv4.c | 16 ++++++++-------- net/ipv4/tcp_minisocks.c | 4 ++-- net/ipv4/tcp_output.c | 4 ++-- net/ipv6/tcp_ipv6.c | 14 +++++++------- 7 files changed, 25 insertions(+), 25 deletions(-) diff --git a/include/net/tcp.h b/include/net/tcp.h index cfe15f712164..939ebd5320a9 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -332,7 +332,7 @@ bool tcp_check_oom(struct sock *sk, int shift); extern struct proto tcp_prot; #define TCP_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.tcp_statistics, field) -#define TCP_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->mib.tcp_statistics, field) +#define __TCP_INC_STATS(net, field) SNMP_INC_STATS_BH((net)->mib.tcp_statistics, field) #define TCP_DEC_STATS(net, field) SNMP_DEC_STATS((net)->mib.tcp_statistics, field) #define TCP_ADD_STATS(net, field, val) SNMP_ADD_STATS((net)->mib.tcp_statistics, field, val) diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 55ef55ac9e38..96833433c2c3 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -3091,7 +3091,7 @@ void tcp_done(struct sock *sk) struct request_sock *req = tcp_sk(sk)->fastopen_rsk; if (sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV) - TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_ATTEMPTFAILS); + __TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS); tcp_set_state(sk, TCP_CLOSE); tcp_clear_xmit_timers(sk); diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 967520dbe0bf..dad8d93262ed 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -5233,7 +5233,7 @@ static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb, if (th->syn) { syn_challenge: if (syn_inerr) - TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS); + __TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNCHALLENGE); tcp_send_challenge_ack(sk, skb); goto discard; @@ -5349,7 +5349,7 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb, tcp_data_snd_check(sk); return; } else { /* Header too small */ - TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS); + __TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS); goto discard; } } else { @@ -5456,8 +5456,8 @@ step5: return; csum_error: - TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS); - TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS); + __TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS); + __TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS); discard: tcp_drop(sk, skb); diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index ebd8f3b9e61b..378e92d41c6c 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -697,8 +697,8 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb) ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len); - TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS); - TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS); + __TCP_INC_STATS(net, TCP_MIB_OUTSEGS); + __TCP_INC_STATS(net, TCP_MIB_OUTRSTS); #ifdef CONFIG_TCP_MD5SIG out: @@ -779,7 +779,7 @@ static void tcp_v4_send_ack(struct net *net, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len); - TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS); + __TCP_INC_STATS(net, TCP_MIB_OUTSEGS); } static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb) @@ -1432,8 +1432,8 @@ discard: return 0; csum_err: - TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS); - TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS); + __TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS); + __TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS); goto discard; } EXPORT_SYMBOL(tcp_v4_do_rcv); @@ -1547,7 +1547,7 @@ int tcp_v4_rcv(struct sk_buff *skb) goto discard_it; /* Count it even if it's bad */ - TCP_INC_STATS_BH(net, TCP_MIB_INSEGS); + __TCP_INC_STATS(net, TCP_MIB_INSEGS); if (!pskb_may_pull(skb, sizeof(struct tcphdr))) goto discard_it; @@ -1679,9 +1679,9 @@ no_tcp_socket: if (tcp_checksum_complete(skb)) { csum_error: - TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS); + __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS); bad_packet: - TCP_INC_STATS_BH(net, TCP_MIB_INERRS); + __TCP_INC_STATS(net, TCP_MIB_INERRS); } else { tcp_v4_send_reset(NULL, skb); } diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 4c53e7c86586..0be6bfeab553 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -545,7 +545,7 @@ struct sock *tcp_create_openreq_child(const struct sock *sk, newtp->rack.mstamp.v64 = 0; newtp->rack.advanced = 0; - TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_PASSIVEOPENS); + __TCP_INC_STATS(sock_net(sk), TCP_MIB_PASSIVEOPENS); } return newsk; } @@ -729,7 +729,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, * "fourth, check the SYN bit" */ if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) { - TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_ATTEMPTFAILS); + __TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS); goto embryonic_reset; } diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 9d3b4b364652..c48baf734e8c 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -3042,7 +3042,7 @@ struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst, th->window = htons(min(req->rsk_rcv_wnd, 65535U)); tcp_options_write((__be32 *)(th + 1), NULL, &opts); th->doff = (tcp_header_size >> 2); - TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_OUTSEGS); + __TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTSEGS); #ifdef CONFIG_TCP_MD5SIG /* Okay, we have all we need - do the md5 hash if needed */ @@ -3540,7 +3540,7 @@ int tcp_rtx_synack(const struct sock *sk, struct request_sock *req) tcp_rsk(req)->txhash = net_tx_rndhash(); res = af_ops->send_synack(sk, NULL, &fl, req, NULL, TCP_SYNACK_NORMAL); if (!res) { - TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS); + __TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNRETRANS); } return res; diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 800265c7fd3f..52ca8fac7429 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -825,9 +825,9 @@ static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 if (!IS_ERR(dst)) { skb_dst_set(buff, dst); ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass); - TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS); + __TCP_INC_STATS(net, TCP_MIB_OUTSEGS); if (rst) - TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS); + __TCP_INC_STATS(net, TCP_MIB_OUTRSTS); return; } @@ -1276,8 +1276,8 @@ discard: kfree_skb(skb); return 0; csum_err: - TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS); - TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS); + __TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS); + __TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS); goto discard; @@ -1359,7 +1359,7 @@ static int tcp_v6_rcv(struct sk_buff *skb) /* * Count it even if it's bad. */ - TCP_INC_STATS_BH(net, TCP_MIB_INSEGS); + __TCP_INC_STATS(net, TCP_MIB_INSEGS); if (!pskb_may_pull(skb, sizeof(struct tcphdr))) goto discard_it; @@ -1472,9 +1472,9 @@ no_tcp_socket: if (tcp_checksum_complete(skb)) { csum_error: - TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS); + __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS); bad_packet: - TCP_INC_STATS_BH(net, TCP_MIB_INERRS); + __TCP_INC_STATS(net, TCP_MIB_INERRS); } else { tcp_v6_send_reset(NULL, skb); } From 214d3f1f87e17357c422dad844f03be7b9d65ce7 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 27 Apr 2016 16:44:33 -0700 Subject: [PATCH 1128/1649] net: icmp: rename ICMPMSGIN_INC_STATS_BH() Remove misleading _BH suffix. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/net/icmp.h | 2 +- net/ipv4/icmp.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/include/net/icmp.h b/include/net/icmp.h index 5a60ce819078..25edb740c648 100644 --- a/include/net/icmp.h +++ b/include/net/icmp.h @@ -32,7 +32,7 @@ extern const struct icmp_err icmp_err_convert[]; #define ICMP_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.icmp_statistics, field) #define __ICMP_INC_STATS(net, field) SNMP_INC_STATS_BH((net)->mib.icmp_statistics, field) #define ICMPMSGOUT_INC_STATS(net, field) SNMP_INC_STATS_ATOMIC_LONG((net)->mib.icmpmsg_statistics, field+256) -#define ICMPMSGIN_INC_STATS_BH(net, field) SNMP_INC_STATS_ATOMIC_LONG((net)->mib.icmpmsg_statistics, field) +#define ICMPMSGIN_INC_STATS(net, field) SNMP_INC_STATS_ATOMIC_LONG((net)->mib.icmpmsg_statistics, field) struct dst_entry; struct net_proto_family; diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index 995fef9c5099..38abe70e595f 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c @@ -1006,7 +1006,7 @@ int icmp_rcv(struct sk_buff *skb) icmph = icmp_hdr(skb); - ICMPMSGIN_INC_STATS_BH(net, icmph->type); + ICMPMSGIN_INC_STATS(net, icmph->type); /* * 18 is the highest 'known' ICMP type. Anything else is a mystery * From 08e3baef65e2e9481637a1e8fb06089ca70be707 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 27 Apr 2016 16:44:34 -0700 Subject: [PATCH 1129/1649] net: sctp: rename SCTP_INC_STATS_BH() Rename SCTP_INC_STATS_BH() to __SCTP_INC_STATS() Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/net/sctp/sctp.h | 2 +- net/sctp/input.c | 12 ++++++------ 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index 5a2c4c3307a7..5607c009f738 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h @@ -206,7 +206,7 @@ extern int sysctl_sctp_wmem[3]; /* SCTP SNMP MIB stats handlers */ #define SCTP_INC_STATS(net, field) SNMP_INC_STATS((net)->sctp.sctp_statistics, field) -#define SCTP_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->sctp.sctp_statistics, field) +#define __SCTP_INC_STATS(net, field) SNMP_INC_STATS_BH((net)->sctp.sctp_statistics, field) #define SCTP_DEC_STATS(net, field) SNMP_DEC_STATS((net)->sctp.sctp_statistics, field) /* sctp mib definitions */ diff --git a/net/sctp/input.c b/net/sctp/input.c index f8eca792dbcf..12332fc3eb44 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c @@ -84,7 +84,7 @@ static inline int sctp_rcv_checksum(struct net *net, struct sk_buff *skb) if (val != cmp) { /* CRC failure, dump it. */ - SCTP_INC_STATS_BH(net, SCTP_MIB_CHECKSUMERRORS); + __SCTP_INC_STATS(net, SCTP_MIB_CHECKSUMERRORS); return -1; } return 0; @@ -122,7 +122,7 @@ int sctp_rcv(struct sk_buff *skb) if (skb->pkt_type != PACKET_HOST) goto discard_it; - SCTP_INC_STATS_BH(net, SCTP_MIB_INSCTPPACKS); + __SCTP_INC_STATS(net, SCTP_MIB_INSCTPPACKS); if (skb_linearize(skb)) goto discard_it; @@ -208,7 +208,7 @@ int sctp_rcv(struct sk_buff *skb) */ if (!asoc) { if (sctp_rcv_ootb(skb)) { - SCTP_INC_STATS_BH(net, SCTP_MIB_OUTOFBLUES); + __SCTP_INC_STATS(net, SCTP_MIB_OUTOFBLUES); goto discard_release; } } @@ -264,9 +264,9 @@ int sctp_rcv(struct sk_buff *skb) skb = NULL; /* sctp_chunk_free already freed the skb */ goto discard_release; } - SCTP_INC_STATS_BH(net, SCTP_MIB_IN_PKT_BACKLOG); + __SCTP_INC_STATS(net, SCTP_MIB_IN_PKT_BACKLOG); } else { - SCTP_INC_STATS_BH(net, SCTP_MIB_IN_PKT_SOFTIRQ); + __SCTP_INC_STATS(net, SCTP_MIB_IN_PKT_SOFTIRQ); sctp_inq_push(&chunk->rcvr->inqueue, chunk); } @@ -281,7 +281,7 @@ int sctp_rcv(struct sk_buff *skb) return 0; discard_it: - SCTP_INC_STATS_BH(net, SCTP_MIB_IN_PKT_DISCARDS); + __SCTP_INC_STATS(net, SCTP_MIB_IN_PKT_DISCARDS); kfree_skb(skb); return 0; From b45386efa2ec4533196a24d397ec5f9f0a42abc4 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 27 Apr 2016 16:44:35 -0700 Subject: [PATCH 1130/1649] net: rename IP_INC_STATS_BH() Rename IP_INC_STATS_BH() to __IP_INC_STATS(), to better express this is used in non preemptible context. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/net/ip.h | 2 +- net/bridge/br_netfilter_hooks.c | 6 +++--- net/dccp/ipv4.c | 2 +- net/ipv4/inet_connection_sock.c | 4 ++-- net/ipv4/ip_forward.c | 4 ++-- net/ipv4/ip_fragment.c | 14 +++++++------- net/ipv4/ip_input.c | 20 ++++++++++---------- net/ipv4/route.c | 6 +++--- 8 files changed, 29 insertions(+), 29 deletions(-) diff --git a/include/net/ip.h b/include/net/ip.h index ae0e85d018e8..0be0af3017ba 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -187,7 +187,7 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb, unsigned int len); #define IP_INC_STATS(net, field) SNMP_INC_STATS64((net)->mib.ip_statistics, field) -#define IP_INC_STATS_BH(net, field) SNMP_INC_STATS64_BH((net)->mib.ip_statistics, field) +#define __IP_INC_STATS(net, field) SNMP_INC_STATS64_BH((net)->mib.ip_statistics, field) #define IP_ADD_STATS(net, field, val) SNMP_ADD_STATS64((net)->mib.ip_statistics, field, val) #define IP_ADD_STATS_BH(net, field, val) SNMP_ADD_STATS64_BH((net)->mib.ip_statistics, field, val) #define IP_UPD_PO_STATS(net, field, val) SNMP_UPD_PO_STATS64((net)->mib.ip_statistics, field, val) diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c index 44114a94c576..2d25979273a6 100644 --- a/net/bridge/br_netfilter_hooks.c +++ b/net/bridge/br_netfilter_hooks.c @@ -217,13 +217,13 @@ static int br_validate_ipv4(struct net *net, struct sk_buff *skb) len = ntohs(iph->tot_len); if (skb->len < len) { - IP_INC_STATS_BH(net, IPSTATS_MIB_INTRUNCATEDPKTS); + __IP_INC_STATS(net, IPSTATS_MIB_INTRUNCATEDPKTS); goto drop; } else if (len < (iph->ihl*4)) goto inhdr_error; if (pskb_trim_rcsum(skb, len)) { - IP_INC_STATS_BH(net, IPSTATS_MIB_INDISCARDS); + __IP_INC_STATS(net, IPSTATS_MIB_INDISCARDS); goto drop; } @@ -236,7 +236,7 @@ static int br_validate_ipv4(struct net *net, struct sk_buff *skb) return 0; inhdr_error: - IP_INC_STATS_BH(net, IPSTATS_MIB_INHDRERRORS); + __IP_INC_STATS(net, IPSTATS_MIB_INHDRERRORS); drop: return -1; } diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index 14e30584e59d..a9c75e79ba99 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c @@ -462,7 +462,7 @@ static struct dst_entry* dccp_v4_route_skb(struct net *net, struct sock *sk, security_skb_classify_flow(skb, flowi4_to_flowi(&fl4)); rt = ip_route_output_flow(net, &fl4, sk); if (IS_ERR(rt)) { - IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES); + __IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES); return NULL; } diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index ab69da2d2a77..7ce112aa3a7b 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -427,7 +427,7 @@ struct dst_entry *inet_csk_route_req(const struct sock *sk, route_err: ip_rt_put(rt); no_route: - IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES); + __IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES); return NULL; } EXPORT_SYMBOL_GPL(inet_csk_route_req); @@ -466,7 +466,7 @@ route_err: ip_rt_put(rt); no_route: rcu_read_unlock(); - IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES); + __IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES); return NULL; } EXPORT_SYMBOL_GPL(inet_csk_route_child_sock); diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c index af18f1e4889e..42fbd59b0ba8 100644 --- a/net/ipv4/ip_forward.c +++ b/net/ipv4/ip_forward.c @@ -65,7 +65,7 @@ static int ip_forward_finish(struct net *net, struct sock *sk, struct sk_buff *s { struct ip_options *opt = &(IPCB(skb)->opt); - IP_INC_STATS_BH(net, IPSTATS_MIB_OUTFORWDATAGRAMS); + __IP_INC_STATS(net, IPSTATS_MIB_OUTFORWDATAGRAMS); IP_ADD_STATS_BH(net, IPSTATS_MIB_OUTOCTETS, skb->len); if (unlikely(opt->optlen)) @@ -157,7 +157,7 @@ sr_failed: too_many_hops: /* Tell the sender its packet died... */ - IP_INC_STATS_BH(net, IPSTATS_MIB_INHDRERRORS); + __IP_INC_STATS(net, IPSTATS_MIB_INHDRERRORS); icmp_send(skb, ICMP_TIME_EXCEEDED, ICMP_EXC_TTL, 0); drop: kfree_skb(skb); diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index efbd47d1a531..bbe7f72db9c1 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c @@ -204,14 +204,14 @@ static void ip_expire(unsigned long arg) goto out; ipq_kill(qp); - IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS); + __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS); if (!inet_frag_evicting(&qp->q)) { struct sk_buff *head = qp->q.fragments; const struct iphdr *iph; int err; - IP_INC_STATS_BH(net, IPSTATS_MIB_REASMTIMEOUT); + __IP_INC_STATS(net, IPSTATS_MIB_REASMTIMEOUT); if (!(qp->q.flags & INET_FRAG_FIRST_IN) || !qp->q.fragments) goto out; @@ -291,7 +291,7 @@ static int ip_frag_too_far(struct ipq *qp) struct net *net; net = container_of(qp->q.net, struct net, ipv4.frags); - IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS); + __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS); } return rc; @@ -635,7 +635,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, ip_send_check(iph); - IP_INC_STATS_BH(net, IPSTATS_MIB_REASMOKS); + __IP_INC_STATS(net, IPSTATS_MIB_REASMOKS); qp->q.fragments = NULL; qp->q.fragments_tail = NULL; return 0; @@ -647,7 +647,7 @@ out_nomem: out_oversize: net_info_ratelimited("Oversized IP packet from %pI4\n", &qp->saddr); out_fail: - IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS); + __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS); return err; } @@ -658,7 +658,7 @@ int ip_defrag(struct net *net, struct sk_buff *skb, u32 user) int vif = l3mdev_master_ifindex_rcu(dev); struct ipq *qp; - IP_INC_STATS_BH(net, IPSTATS_MIB_REASMREQDS); + __IP_INC_STATS(net, IPSTATS_MIB_REASMREQDS); skb_orphan(skb); /* Lookup (or create) queue header */ @@ -675,7 +675,7 @@ int ip_defrag(struct net *net, struct sk_buff *skb, u32 user) return ret; } - IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS); + __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS); kfree_skb(skb); return -ENOMEM; } diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c index e3d782746d9d..cca6729cd6ee 100644 --- a/net/ipv4/ip_input.c +++ b/net/ipv4/ip_input.c @@ -218,17 +218,17 @@ static int ip_local_deliver_finish(struct net *net, struct sock *sk, struct sk_b protocol = -ret; goto resubmit; } - IP_INC_STATS_BH(net, IPSTATS_MIB_INDELIVERS); + __IP_INC_STATS(net, IPSTATS_MIB_INDELIVERS); } else { if (!raw) { if (xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) { - IP_INC_STATS_BH(net, IPSTATS_MIB_INUNKNOWNPROTOS); + __IP_INC_STATS(net, IPSTATS_MIB_INUNKNOWNPROTOS); icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PROT_UNREACH, 0); } kfree_skb(skb); } else { - IP_INC_STATS_BH(net, IPSTATS_MIB_INDELIVERS); + __IP_INC_STATS(net, IPSTATS_MIB_INDELIVERS); consume_skb(skb); } } @@ -273,7 +273,7 @@ static inline bool ip_rcv_options(struct sk_buff *skb) --ANK (980813) */ if (skb_cow(skb, skb_headroom(skb))) { - IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INDISCARDS); + __IP_INC_STATS(dev_net(dev), IPSTATS_MIB_INDISCARDS); goto drop; } @@ -282,7 +282,7 @@ static inline bool ip_rcv_options(struct sk_buff *skb) opt->optlen = iph->ihl*4 - sizeof(struct iphdr); if (ip_options_compile(dev_net(dev), opt, skb)) { - IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INHDRERRORS); + __IP_INC_STATS(dev_net(dev), IPSTATS_MIB_INHDRERRORS); goto drop; } @@ -413,7 +413,7 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, skb = skb_share_check(skb, GFP_ATOMIC); if (!skb) { - IP_INC_STATS_BH(net, IPSTATS_MIB_INDISCARDS); + __IP_INC_STATS(net, IPSTATS_MIB_INDISCARDS); goto out; } @@ -453,7 +453,7 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, len = ntohs(iph->tot_len); if (skb->len < len) { - IP_INC_STATS_BH(net, IPSTATS_MIB_INTRUNCATEDPKTS); + __IP_INC_STATS(net, IPSTATS_MIB_INTRUNCATEDPKTS); goto drop; } else if (len < (iph->ihl*4)) goto inhdr_error; @@ -463,7 +463,7 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, * Note this now means skb->len holds ntohs(iph->tot_len). */ if (pskb_trim_rcsum(skb, len)) { - IP_INC_STATS_BH(net, IPSTATS_MIB_INDISCARDS); + __IP_INC_STATS(net, IPSTATS_MIB_INDISCARDS); goto drop; } @@ -480,9 +480,9 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, ip_rcv_finish); csum_error: - IP_INC_STATS_BH(net, IPSTATS_MIB_CSUMERRORS); + __IP_INC_STATS(net, IPSTATS_MIB_CSUMERRORS); inhdr_error: - IP_INC_STATS_BH(net, IPSTATS_MIB_INHDRERRORS); + __IP_INC_STATS(net, IPSTATS_MIB_INHDRERRORS); drop: kfree_skb(skb); out: diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 60398a9370e7..8c8c655bb2c4 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -915,11 +915,11 @@ static int ip_error(struct sk_buff *skb) if (!IN_DEV_FORWARD(in_dev)) { switch (rt->dst.error) { case EHOSTUNREACH: - IP_INC_STATS_BH(net, IPSTATS_MIB_INADDRERRORS); + __IP_INC_STATS(net, IPSTATS_MIB_INADDRERRORS); break; case ENETUNREACH: - IP_INC_STATS_BH(net, IPSTATS_MIB_INNOROUTES); + __IP_INC_STATS(net, IPSTATS_MIB_INNOROUTES); break; } goto out; @@ -934,7 +934,7 @@ static int ip_error(struct sk_buff *skb) break; case ENETUNREACH: code = ICMP_NET_UNREACH; - IP_INC_STATS_BH(net, IPSTATS_MIB_INNOROUTES); + __IP_INC_STATS(net, IPSTATS_MIB_INNOROUTES); break; case EACCES: code = ICMP_PKT_FILTERED; From a16292a0f0e0cef40ed51685dfde12b3002959b5 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 27 Apr 2016 16:44:36 -0700 Subject: [PATCH 1131/1649] net: rename ICMP6_INC_STATS_BH() Rename ICMP6_INC_STATS_BH() to __ICMP6_INC_STATS() Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/net/ipv6.h | 2 +- net/dccp/ipv6.c | 8 ++++---- net/ipv6/icmp.c | 10 +++++----- net/ipv6/tcp_ipv6.c | 4 ++-- net/ipv6/udp.c | 4 ++-- net/sctp/ipv6.c | 2 +- 6 files changed, 15 insertions(+), 15 deletions(-) diff --git a/include/net/ipv6.h b/include/net/ipv6.h index e93e947d04ff..a620fc56e2f5 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -179,7 +179,7 @@ extern int sysctl_mld_qrv; _DEVUPD(net, ipv6, 64_BH, idev, field, val) #define ICMP6_INC_STATS(net, idev, field) \ _DEVINCATOMIC(net, icmpv6, , idev, field) -#define ICMP6_INC_STATS_BH(net, idev, field) \ +#define __ICMP6_INC_STATS(net, idev, field) \ _DEVINCATOMIC(net, icmpv6, _BH, idev, field) #define ICMP6MSGOUT_INC_STATS(net, idev, field) \ diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index e175b8fe1a87..323c6b595e31 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c @@ -80,8 +80,8 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, if (skb->len < offset + sizeof(*dh) || skb->len < offset + __dccp_basic_hdr_len(dh)) { - ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev), - ICMP6_MIB_INERRORS); + __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev), + ICMP6_MIB_INERRORS); return; } @@ -91,8 +91,8 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, inet6_iif(skb)); if (!sk) { - ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev), - ICMP6_MIB_INERRORS); + __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev), + ICMP6_MIB_INERRORS); return; } diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index 6b573ebe49de..823a1fc576e3 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c @@ -622,7 +622,7 @@ static void icmpv6_echo_reply(struct sk_buff *skb) np->dontfrag, &sockc_unused); if (err) { - ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTERRORS); + __ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTERRORS); ip6_flush_pending_frames(sk); } else { err = icmpv6_push_pending_frames(sk, &fl6, &tmp_hdr, @@ -674,7 +674,7 @@ void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info) return; out: - ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev), ICMP6_MIB_INERRORS); + __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev), ICMP6_MIB_INERRORS); } /* @@ -710,7 +710,7 @@ static int icmpv6_rcv(struct sk_buff *skb) skb_set_network_header(skb, nh); } - ICMP6_INC_STATS_BH(dev_net(dev), idev, ICMP6_MIB_INMSGS); + __ICMP6_INC_STATS(dev_net(dev), idev, ICMP6_MIB_INMSGS); saddr = &ipv6_hdr(skb)->saddr; daddr = &ipv6_hdr(skb)->daddr; @@ -812,9 +812,9 @@ static int icmpv6_rcv(struct sk_buff *skb) return 0; csum_error: - ICMP6_INC_STATS_BH(dev_net(dev), idev, ICMP6_MIB_CSUMERRORS); + __ICMP6_INC_STATS(dev_net(dev), idev, ICMP6_MIB_CSUMERRORS); discard_it: - ICMP6_INC_STATS_BH(dev_net(dev), idev, ICMP6_MIB_INERRORS); + __ICMP6_INC_STATS(dev_net(dev), idev, ICMP6_MIB_INERRORS); drop_no_count: kfree_skb(skb); return 0; diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 52ca8fac7429..78c45c027acc 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -336,8 +336,8 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, skb->dev->ifindex); if (!sk) { - ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev), - ICMP6_MIB_INERRORS); + __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev), + ICMP6_MIB_INERRORS); return; } diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 1243d22e2b1d..1ba5a74ac18f 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -521,8 +521,8 @@ void __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt, sk = __udp6_lib_lookup(net, daddr, uh->dest, saddr, uh->source, inet6_iif(skb), udptable, skb); if (!sk) { - ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev), - ICMP6_MIB_INERRORS); + __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev), + ICMP6_MIB_INERRORS); return; } diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index ce46f1c7f133..0657d18a85bf 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c @@ -162,7 +162,7 @@ static void sctp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, skb->network_header = saveip; skb->transport_header = savesctp; if (!sk) { - ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_INERRORS); + __ICMP6_INC_STATS(net, idev, ICMP6_MIB_INERRORS); goto out; } From 98f619957ec2717fea09b398957e130e4bf4b30c Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 27 Apr 2016 16:44:37 -0700 Subject: [PATCH 1132/1649] net: rename IP_ADD_STATS_BH() Rename IP_ADD_STATS_BH() to __IP_ADD_STATS() Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/net/ip.h | 2 +- net/ipv4/ip_forward.c | 2 +- net/ipv4/ip_input.c | 6 +++--- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/include/net/ip.h b/include/net/ip.h index 0be0af3017ba..0df4809bc68a 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -189,7 +189,7 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb, #define IP_INC_STATS(net, field) SNMP_INC_STATS64((net)->mib.ip_statistics, field) #define __IP_INC_STATS(net, field) SNMP_INC_STATS64_BH((net)->mib.ip_statistics, field) #define IP_ADD_STATS(net, field, val) SNMP_ADD_STATS64((net)->mib.ip_statistics, field, val) -#define IP_ADD_STATS_BH(net, field, val) SNMP_ADD_STATS64_BH((net)->mib.ip_statistics, field, val) +#define __IP_ADD_STATS(net, field, val) SNMP_ADD_STATS64_BH((net)->mib.ip_statistics, field, val) #define IP_UPD_PO_STATS(net, field, val) SNMP_UPD_PO_STATS64((net)->mib.ip_statistics, field, val) #define IP_UPD_PO_STATS_BH(net, field, val) SNMP_UPD_PO_STATS64_BH((net)->mib.ip_statistics, field, val) #define NET_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.net_statistics, field) diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c index 42fbd59b0ba8..cbfb1808fcc4 100644 --- a/net/ipv4/ip_forward.c +++ b/net/ipv4/ip_forward.c @@ -66,7 +66,7 @@ static int ip_forward_finish(struct net *net, struct sock *sk, struct sk_buff *s struct ip_options *opt = &(IPCB(skb)->opt); __IP_INC_STATS(net, IPSTATS_MIB_OUTFORWDATAGRAMS); - IP_ADD_STATS_BH(net, IPSTATS_MIB_OUTOCTETS, skb->len); + __IP_ADD_STATS(net, IPSTATS_MIB_OUTOCTETS, skb->len); if (unlikely(opt->optlen)) ip_forward_options(skb); diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c index cca6729cd6ee..11f34e421270 100644 --- a/net/ipv4/ip_input.c +++ b/net/ipv4/ip_input.c @@ -439,9 +439,9 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, BUILD_BUG_ON(IPSTATS_MIB_ECT1PKTS != IPSTATS_MIB_NOECTPKTS + INET_ECN_ECT_1); BUILD_BUG_ON(IPSTATS_MIB_ECT0PKTS != IPSTATS_MIB_NOECTPKTS + INET_ECN_ECT_0); BUILD_BUG_ON(IPSTATS_MIB_CEPKTS != IPSTATS_MIB_NOECTPKTS + INET_ECN_CE); - IP_ADD_STATS_BH(net, - IPSTATS_MIB_NOECTPKTS + (iph->tos & INET_ECN_MASK), - max_t(unsigned short, 1, skb_shinfo(skb)->gso_segs)); + __IP_ADD_STATS(net, + IPSTATS_MIB_NOECTPKTS + (iph->tos & INET_ECN_MASK), + max_t(unsigned short, 1, skb_shinfo(skb)->gso_segs)); if (!pskb_may_pull(skb, iph->ihl*4)) goto inhdr_error; From b15084ec7d4c89000242d69b5f57b4d138bad1b9 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 27 Apr 2016 16:44:38 -0700 Subject: [PATCH 1133/1649] net: rename IP_UPD_PO_STATS_BH() Rename IP_UPD_PO_STATS_BH() to __IP_UPD_PO_STATS() Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/net/ip.h | 2 +- net/ipv4/ip_input.c | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/include/net/ip.h b/include/net/ip.h index 0df4809bc68a..55f5de50a564 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -191,7 +191,7 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb, #define IP_ADD_STATS(net, field, val) SNMP_ADD_STATS64((net)->mib.ip_statistics, field, val) #define __IP_ADD_STATS(net, field, val) SNMP_ADD_STATS64_BH((net)->mib.ip_statistics, field, val) #define IP_UPD_PO_STATS(net, field, val) SNMP_UPD_PO_STATS64((net)->mib.ip_statistics, field, val) -#define IP_UPD_PO_STATS_BH(net, field, val) SNMP_UPD_PO_STATS64_BH((net)->mib.ip_statistics, field, val) +#define __IP_UPD_PO_STATS(net, field, val) SNMP_UPD_PO_STATS64_BH((net)->mib.ip_statistics, field, val) #define NET_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.net_statistics, field) #define NET_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->mib.net_statistics, field) #define NET_ADD_STATS(net, field, adnd) SNMP_ADD_STATS((net)->mib.net_statistics, field, adnd) diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c index 11f34e421270..8fda63d78435 100644 --- a/net/ipv4/ip_input.c +++ b/net/ipv4/ip_input.c @@ -358,9 +358,9 @@ static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb) rt = skb_rtable(skb); if (rt->rt_type == RTN_MULTICAST) { - IP_UPD_PO_STATS_BH(net, IPSTATS_MIB_INMCAST, skb->len); + __IP_UPD_PO_STATS(net, IPSTATS_MIB_INMCAST, skb->len); } else if (rt->rt_type == RTN_BROADCAST) { - IP_UPD_PO_STATS_BH(net, IPSTATS_MIB_INBCAST, skb->len); + __IP_UPD_PO_STATS(net, IPSTATS_MIB_INBCAST, skb->len); } else if (skb->pkt_type == PACKET_BROADCAST || skb->pkt_type == PACKET_MULTICAST) { struct in_device *in_dev = __in_dev_get_rcu(skb->dev); @@ -409,7 +409,7 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, net = dev_net(dev); - IP_UPD_PO_STATS_BH(net, IPSTATS_MIB_IN, skb->len); + __IP_UPD_PO_STATS(net, IPSTATS_MIB_IN, skb->len); skb = skb_share_check(skb, GFP_ATOMIC); if (!skb) { From 02a1d6e7a6bb025a77da77012190e1efc1970f1c Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 27 Apr 2016 16:44:39 -0700 Subject: [PATCH 1134/1649] net: rename NET_{ADD|INC}_STATS_BH() Rename NET_INC_STATS_BH() to __NET_INC_STATS() and NET_ADD_STATS_BH() to __NET_ADD_STATS() Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/net/ip.h | 4 +- include/net/tcp.h | 4 +- net/core/dev.c | 4 +- net/dccp/ipv4.c | 10 ++-- net/dccp/ipv6.c | 8 +-- net/dccp/timer.c | 4 +- net/ipv4/arp.c | 2 +- net/ipv4/inet_hashtables.c | 2 +- net/ipv4/inet_timewait_sock.c | 4 +- net/ipv4/ip_input.c | 2 +- net/ipv4/syncookies.c | 4 +- net/ipv4/tcp.c | 4 +- net/ipv4/tcp_cdg.c | 20 +++---- net/ipv4/tcp_cubic.c | 20 +++---- net/ipv4/tcp_fastopen.c | 14 ++--- net/ipv4/tcp_input.c | 100 ++++++++++++++++++---------------- net/ipv4/tcp_ipv4.c | 22 ++++---- net/ipv4/tcp_minisocks.c | 10 ++-- net/ipv4/tcp_output.c | 14 ++--- net/ipv4/tcp_recovery.c | 4 +- net/ipv4/tcp_timer.c | 22 ++++---- net/ipv6/inet6_hashtables.c | 2 +- net/ipv6/syncookies.c | 4 +- net/ipv6/tcp_ipv6.c | 16 +++--- net/sctp/input.c | 2 +- 25 files changed, 153 insertions(+), 149 deletions(-) diff --git a/include/net/ip.h b/include/net/ip.h index 55f5de50a564..fb3b766ca1c7 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -193,9 +193,9 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb, #define IP_UPD_PO_STATS(net, field, val) SNMP_UPD_PO_STATS64((net)->mib.ip_statistics, field, val) #define __IP_UPD_PO_STATS(net, field, val) SNMP_UPD_PO_STATS64_BH((net)->mib.ip_statistics, field, val) #define NET_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.net_statistics, field) -#define NET_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->mib.net_statistics, field) +#define __NET_INC_STATS(net, field) SNMP_INC_STATS_BH((net)->mib.net_statistics, field) #define NET_ADD_STATS(net, field, adnd) SNMP_ADD_STATS((net)->mib.net_statistics, field, adnd) -#define NET_ADD_STATS_BH(net, field, adnd) SNMP_ADD_STATS_BH((net)->mib.net_statistics, field, adnd) +#define __NET_ADD_STATS(net, field, adnd) SNMP_ADD_STATS_BH((net)->mib.net_statistics, field, adnd) u64 snmp_get_cpu_field(void __percpu *mib, int cpu, int offct); unsigned long snmp_fold_field(void __percpu *mib, int offt); diff --git a/include/net/tcp.h b/include/net/tcp.h index 939ebd5320a9..ff8b4265cb2b 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -1743,7 +1743,7 @@ static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops, __u16 *mss) { tcp_synq_overflow(sk); - NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT); + __NET_INC_STATS(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT); return ops->cookie_init_seq(skb, mss); } #else @@ -1852,7 +1852,7 @@ static inline void tcp_segs_in(struct tcp_sock *tp, const struct sk_buff *skb) static inline void tcp_listendrop(const struct sock *sk) { atomic_inc(&((struct sock *)sk)->sk_drops); - NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); + __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENDROPS); } #endif /* _TCP_H */ diff --git a/net/core/dev.c b/net/core/dev.c index 6324bc9267f7..e96a3bc2c634 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -4982,8 +4982,8 @@ bool sk_busy_loop(struct sock *sk, int nonblock) netpoll_poll_unlock(have); } if (rc > 0) - NET_ADD_STATS_BH(sock_net(sk), - LINUX_MIB_BUSYPOLLRXPACKETS, rc); + __NET_ADD_STATS(sock_net(sk), + LINUX_MIB_BUSYPOLLRXPACKETS, rc); local_bh_enable(); if (rc == LL_FLUSH_FAILED) diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index a9c75e79ba99..a8164272e0f4 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c @@ -205,7 +205,7 @@ void dccp_req_err(struct sock *sk, u64 seq) * socket here. */ if (!between48(seq, dccp_rsk(req)->dreq_iss, dccp_rsk(req)->dreq_gss)) { - NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS); + __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS); } else { /* * Still in RESPOND, just remove it silently. @@ -273,7 +273,7 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info) * servers this needs to be solved differently. */ if (sock_owned_by_user(sk)) - NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS); + __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS); if (sk->sk_state == DCCP_CLOSED) goto out; @@ -281,7 +281,7 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info) dp = dccp_sk(sk); if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_LISTEN) && !between48(seq, dp->dccps_awl, dp->dccps_awh)) { - NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS); + __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS); goto out; } @@ -431,11 +431,11 @@ struct sock *dccp_v4_request_recv_sock(const struct sock *sk, return newsk; exit_overflow: - NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); + __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); exit_nonewsk: dst_release(dst); exit: - NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); + __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENDROPS); return NULL; put_and_exit: inet_csk_prepare_forced_close(newsk); diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index 323c6b595e31..0f4eb4ea57a5 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c @@ -106,7 +106,7 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, bh_lock_sock(sk); if (sock_owned_by_user(sk)) - NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS); + __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS); if (sk->sk_state == DCCP_CLOSED) goto out; @@ -114,7 +114,7 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, dp = dccp_sk(sk); if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_LISTEN) && !between48(seq, dp->dccps_awl, dp->dccps_awh)) { - NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS); + __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS); goto out; } @@ -527,11 +527,11 @@ static struct sock *dccp_v6_request_recv_sock(const struct sock *sk, return newsk; out_overflow: - NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); + __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); out_nonewsk: dst_release(dst); out: - NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); + __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENDROPS); return NULL; } diff --git a/net/dccp/timer.c b/net/dccp/timer.c index 4ff22c24ff14..3a2c34027758 100644 --- a/net/dccp/timer.c +++ b/net/dccp/timer.c @@ -179,7 +179,7 @@ static void dccp_delack_timer(unsigned long data) if (sock_owned_by_user(sk)) { /* Try again later. */ icsk->icsk_ack.blocked = 1; - NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED); + __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED); sk_reset_timer(sk, &icsk->icsk_delack_timer, jiffies + TCP_DELACK_MIN); goto out; @@ -209,7 +209,7 @@ static void dccp_delack_timer(unsigned long data) icsk->icsk_ack.ato = TCP_ATO_MIN; } dccp_send_ack(sk); - NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKS); + __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKS); } out: bh_unlock_sock(sk); diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c index c34c7544d1db..89a8cac4726a 100644 --- a/net/ipv4/arp.c +++ b/net/ipv4/arp.c @@ -436,7 +436,7 @@ static int arp_filter(__be32 sip, __be32 tip, struct net_device *dev) if (IS_ERR(rt)) return 1; if (rt->dst.dev != dev) { - NET_INC_STATS_BH(net, LINUX_MIB_ARPFILTER); + __NET_INC_STATS(net, LINUX_MIB_ARPFILTER); flag = 1; } ip_rt_put(rt); diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c index b76b0d7e59c1..3177211ab651 100644 --- a/net/ipv4/inet_hashtables.c +++ b/net/ipv4/inet_hashtables.c @@ -360,7 +360,7 @@ static int __inet_check_established(struct inet_timewait_death_row *death_row, __sk_nulls_add_node_rcu(sk, &head->chain); if (tw) { sk_nulls_del_node_init_rcu((struct sock *)tw); - NET_INC_STATS_BH(net, LINUX_MIB_TIMEWAITRECYCLED); + __NET_INC_STATS(net, LINUX_MIB_TIMEWAITRECYCLED); } spin_unlock(lock); sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c index c67f9bd7699c..99ee5c4a9b68 100644 --- a/net/ipv4/inet_timewait_sock.c +++ b/net/ipv4/inet_timewait_sock.c @@ -147,9 +147,9 @@ static void tw_timer_handler(unsigned long data) struct inet_timewait_sock *tw = (struct inet_timewait_sock *)data; if (tw->tw_kill) - NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_TIMEWAITKILLED); + __NET_INC_STATS(twsk_net(tw), LINUX_MIB_TIMEWAITKILLED); else - NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_TIMEWAITED); + __NET_INC_STATS(twsk_net(tw), LINUX_MIB_TIMEWAITED); inet_twsk_kill(tw); } diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c index 8fda63d78435..751c0658e194 100644 --- a/net/ipv4/ip_input.c +++ b/net/ipv4/ip_input.c @@ -337,7 +337,7 @@ static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb) iph->tos, skb->dev); if (unlikely(err)) { if (err == -EXDEV) - NET_INC_STATS_BH(net, LINUX_MIB_IPRPFILTER); + __NET_INC_STATS(net, LINUX_MIB_IPRPFILTER); goto drop; } } diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c index 4c04f09338e3..e3c4043c27de 100644 --- a/net/ipv4/syncookies.c +++ b/net/ipv4/syncookies.c @@ -312,11 +312,11 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb) mss = __cookie_v4_check(ip_hdr(skb), th, cookie); if (mss == 0) { - NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESFAILED); + __NET_INC_STATS(sock_net(sk), LINUX_MIB_SYNCOOKIESFAILED); goto out; } - NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESRECV); + __NET_INC_STATS(sock_net(sk), LINUX_MIB_SYNCOOKIESRECV); /* check for timestamp cookie support */ memset(&tcp_opt, 0, sizeof(tcp_opt)); diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 96833433c2c3..040f35e7efe0 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -2148,7 +2148,7 @@ adjudge_to_death: if (tp->linger2 < 0) { tcp_set_state(sk, TCP_CLOSE); tcp_send_active_reset(sk, GFP_ATOMIC); - NET_INC_STATS_BH(sock_net(sk), + __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONLINGER); } else { const int tmo = tcp_fin_time(sk); @@ -2167,7 +2167,7 @@ adjudge_to_death: if (tcp_check_oom(sk, 0)) { tcp_set_state(sk, TCP_CLOSE); tcp_send_active_reset(sk, GFP_ATOMIC); - NET_INC_STATS_BH(sock_net(sk), + __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONMEMORY); } } diff --git a/net/ipv4/tcp_cdg.c b/net/ipv4/tcp_cdg.c index 167b6a3e1b98..3c00208c37f4 100644 --- a/net/ipv4/tcp_cdg.c +++ b/net/ipv4/tcp_cdg.c @@ -155,11 +155,11 @@ static void tcp_cdg_hystart_update(struct sock *sk) ca->last_ack = now_us; if (after(now_us, ca->round_start + base_owd)) { - NET_INC_STATS_BH(sock_net(sk), - LINUX_MIB_TCPHYSTARTTRAINDETECT); - NET_ADD_STATS_BH(sock_net(sk), - LINUX_MIB_TCPHYSTARTTRAINCWND, - tp->snd_cwnd); + __NET_INC_STATS(sock_net(sk), + LINUX_MIB_TCPHYSTARTTRAINDETECT); + __NET_ADD_STATS(sock_net(sk), + LINUX_MIB_TCPHYSTARTTRAINCWND, + tp->snd_cwnd); tp->snd_ssthresh = tp->snd_cwnd; return; } @@ -174,11 +174,11 @@ static void tcp_cdg_hystart_update(struct sock *sk) 125U); if (ca->rtt.min > thresh) { - NET_INC_STATS_BH(sock_net(sk), - LINUX_MIB_TCPHYSTARTDELAYDETECT); - NET_ADD_STATS_BH(sock_net(sk), - LINUX_MIB_TCPHYSTARTDELAYCWND, - tp->snd_cwnd); + __NET_INC_STATS(sock_net(sk), + LINUX_MIB_TCPHYSTARTDELAYDETECT); + __NET_ADD_STATS(sock_net(sk), + LINUX_MIB_TCPHYSTARTDELAYCWND, + tp->snd_cwnd); tp->snd_ssthresh = tp->snd_cwnd; } } diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c index 448c2615fece..59155af9de5d 100644 --- a/net/ipv4/tcp_cubic.c +++ b/net/ipv4/tcp_cubic.c @@ -402,11 +402,11 @@ static void hystart_update(struct sock *sk, u32 delay) ca->last_ack = now; if ((s32)(now - ca->round_start) > ca->delay_min >> 4) { ca->found |= HYSTART_ACK_TRAIN; - NET_INC_STATS_BH(sock_net(sk), - LINUX_MIB_TCPHYSTARTTRAINDETECT); - NET_ADD_STATS_BH(sock_net(sk), - LINUX_MIB_TCPHYSTARTTRAINCWND, - tp->snd_cwnd); + __NET_INC_STATS(sock_net(sk), + LINUX_MIB_TCPHYSTARTTRAINDETECT); + __NET_ADD_STATS(sock_net(sk), + LINUX_MIB_TCPHYSTARTTRAINCWND, + tp->snd_cwnd); tp->snd_ssthresh = tp->snd_cwnd; } } @@ -423,11 +423,11 @@ static void hystart_update(struct sock *sk, u32 delay) if (ca->curr_rtt > ca->delay_min + HYSTART_DELAY_THRESH(ca->delay_min >> 3)) { ca->found |= HYSTART_DELAY; - NET_INC_STATS_BH(sock_net(sk), - LINUX_MIB_TCPHYSTARTDELAYDETECT); - NET_ADD_STATS_BH(sock_net(sk), - LINUX_MIB_TCPHYSTARTDELAYCWND, - tp->snd_cwnd); + __NET_INC_STATS(sock_net(sk), + LINUX_MIB_TCPHYSTARTDELAYDETECT); + __NET_ADD_STATS(sock_net(sk), + LINUX_MIB_TCPHYSTARTDELAYCWND, + tp->snd_cwnd); tp->snd_ssthresh = tp->snd_cwnd; } } diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c index cffd8f9ed1a9..a1498d507e42 100644 --- a/net/ipv4/tcp_fastopen.c +++ b/net/ipv4/tcp_fastopen.c @@ -256,8 +256,8 @@ static bool tcp_fastopen_queue_check(struct sock *sk) req1 = fastopenq->rskq_rst_head; if (!req1 || time_after(req1->rsk_timer.expires, jiffies)) { spin_unlock(&fastopenq->lock); - NET_INC_STATS_BH(sock_net(sk), - LINUX_MIB_TCPFASTOPENLISTENOVERFLOW); + __NET_INC_STATS(sock_net(sk), + LINUX_MIB_TCPFASTOPENLISTENOVERFLOW); return false; } fastopenq->rskq_rst_head = req1->dl_next; @@ -282,7 +282,7 @@ struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb, struct sock *child; if (foc->len == 0) /* Client requests a cookie */ - NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENCOOKIEREQD); + __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENCOOKIEREQD); if (!((sysctl_tcp_fastopen & TFO_SERVER_ENABLE) && (syn_data || foc->len >= 0) && @@ -311,13 +311,13 @@ fastopen: child = tcp_fastopen_create_child(sk, skb, dst, req); if (child) { foc->len = -1; - NET_INC_STATS_BH(sock_net(sk), - LINUX_MIB_TCPFASTOPENPASSIVE); + __NET_INC_STATS(sock_net(sk), + LINUX_MIB_TCPFASTOPENPASSIVE); return child; } - NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENPASSIVEFAIL); + __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENPASSIVEFAIL); } else if (foc->len > 0) /* Client presents an invalid cookie */ - NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENPASSIVEFAIL); + __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENPASSIVEFAIL); valid_foc.exp = foc->exp; *foc = valid_foc; diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index dad8d93262ed..0d5239c283cb 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -869,7 +869,7 @@ static void tcp_update_reordering(struct sock *sk, const int metric, else mib_idx = LINUX_MIB_TCPSACKREORDER; - NET_INC_STATS_BH(sock_net(sk), mib_idx); + __NET_INC_STATS(sock_net(sk), mib_idx); #if FASTRETRANS_DEBUG > 1 pr_debug("Disorder%d %d %u f%u s%u rr%d\n", tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state, @@ -1062,7 +1062,7 @@ static bool tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb, if (before(start_seq_0, TCP_SKB_CB(ack_skb)->ack_seq)) { dup_sack = true; tcp_dsack_seen(tp); - NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDSACKRECV); + __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDSACKRECV); } else if (num_sacks > 1) { u32 end_seq_1 = get_unaligned_be32(&sp[1].end_seq); u32 start_seq_1 = get_unaligned_be32(&sp[1].start_seq); @@ -1071,7 +1071,7 @@ static bool tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb, !before(start_seq_0, start_seq_1)) { dup_sack = true; tcp_dsack_seen(tp); - NET_INC_STATS_BH(sock_net(sk), + __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDSACKOFORECV); } } @@ -1289,7 +1289,7 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb, if (skb->len > 0) { BUG_ON(!tcp_skb_pcount(skb)); - NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SACKSHIFTED); + __NET_INC_STATS(sock_net(sk), LINUX_MIB_SACKSHIFTED); return false; } @@ -1313,7 +1313,7 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb, tcp_unlink_write_queue(skb, sk); sk_wmem_free_skb(sk, skb); - NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SACKMERGED); + __NET_INC_STATS(sock_net(sk), LINUX_MIB_SACKMERGED); return true; } @@ -1469,7 +1469,7 @@ noop: return skb; fallback: - NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SACKSHIFTFALLBACK); + __NET_INC_STATS(sock_net(sk), LINUX_MIB_SACKSHIFTFALLBACK); return NULL; } @@ -1657,7 +1657,7 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb, mib_idx = LINUX_MIB_TCPSACKDISCARD; } - NET_INC_STATS_BH(sock_net(sk), mib_idx); + __NET_INC_STATS(sock_net(sk), mib_idx); if (i == 0) first_sack_index = -1; continue; @@ -1909,7 +1909,7 @@ void tcp_enter_loss(struct sock *sk) skb = tcp_write_queue_head(sk); is_reneg = skb && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED); if (is_reneg) { - NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSACKRENEGING); + __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSACKRENEGING); tp->sacked_out = 0; tp->fackets_out = 0; } @@ -2395,7 +2395,7 @@ static bool tcp_try_undo_recovery(struct sock *sk) else mib_idx = LINUX_MIB_TCPFULLUNDO; - NET_INC_STATS_BH(sock_net(sk), mib_idx); + __NET_INC_STATS(sock_net(sk), mib_idx); } if (tp->snd_una == tp->high_seq && tcp_is_reno(tp)) { /* Hold old state until something *above* high_seq @@ -2417,7 +2417,7 @@ static bool tcp_try_undo_dsack(struct sock *sk) if (tp->undo_marker && !tp->undo_retrans) { DBGUNDO(sk, "D-SACK"); tcp_undo_cwnd_reduction(sk, false); - NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDSACKUNDO); + __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDSACKUNDO); return true; } return false; @@ -2432,10 +2432,10 @@ static bool tcp_try_undo_loss(struct sock *sk, bool frto_undo) tcp_undo_cwnd_reduction(sk, true); DBGUNDO(sk, "partial loss"); - NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSSUNDO); + __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPLOSSUNDO); if (frto_undo) - NET_INC_STATS_BH(sock_net(sk), - LINUX_MIB_TCPSPURIOUSRTOS); + __NET_INC_STATS(sock_net(sk), + LINUX_MIB_TCPSPURIOUSRTOS); inet_csk(sk)->icsk_retransmits = 0; if (frto_undo || tcp_is_sack(tp)) tcp_set_ca_state(sk, TCP_CA_Open); @@ -2559,7 +2559,7 @@ static void tcp_mtup_probe_failed(struct sock *sk) icsk->icsk_mtup.search_high = icsk->icsk_mtup.probe_size - 1; icsk->icsk_mtup.probe_size = 0; - NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMTUPFAIL); + __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMTUPFAIL); } static void tcp_mtup_probe_success(struct sock *sk) @@ -2579,7 +2579,7 @@ static void tcp_mtup_probe_success(struct sock *sk) icsk->icsk_mtup.search_low = icsk->icsk_mtup.probe_size; icsk->icsk_mtup.probe_size = 0; tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); - NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMTUPSUCCESS); + __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMTUPSUCCESS); } /* Do a simple retransmit without using the backoff mechanisms in @@ -2643,7 +2643,7 @@ static void tcp_enter_recovery(struct sock *sk, bool ece_ack) else mib_idx = LINUX_MIB_TCPSACKRECOVERY; - NET_INC_STATS_BH(sock_net(sk), mib_idx); + __NET_INC_STATS(sock_net(sk), mib_idx); tp->prior_ssthresh = 0; tcp_init_undo(tp); @@ -2736,7 +2736,7 @@ static bool tcp_try_undo_partial(struct sock *sk, const int acked) DBGUNDO(sk, "partial recovery"); tcp_undo_cwnd_reduction(sk, true); - NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPPARTIALUNDO); + __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPPARTIALUNDO); tcp_try_keep_open(sk); return true; } @@ -3431,7 +3431,7 @@ bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb, s32 elapsed = (s32)(tcp_time_stamp - *last_oow_ack_time); if (0 <= elapsed && elapsed < sysctl_tcp_invalid_ratelimit) { - NET_INC_STATS_BH(net, mib_idx); + __NET_INC_STATS(net, mib_idx); return true; /* rate-limited: don't send yet! */ } } @@ -3464,7 +3464,7 @@ static void tcp_send_challenge_ack(struct sock *sk, const struct sk_buff *skb) challenge_count = 0; } if (++challenge_count <= sysctl_tcp_challenge_ack_limit) { - NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPCHALLENGEACK); + __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPCHALLENGEACK); tcp_send_ack(sk); } } @@ -3513,8 +3513,8 @@ static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag) tcp_set_ca_state(sk, TCP_CA_CWR); tcp_end_cwnd_reduction(sk); tcp_try_keep_open(sk); - NET_INC_STATS_BH(sock_net(sk), - LINUX_MIB_TCPLOSSPROBERECOVERY); + __NET_INC_STATS(sock_net(sk), + LINUX_MIB_TCPLOSSPROBERECOVERY); } else if (!(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP | FLAG_DATA_SACKED))) { /* Pure dupack: original and TLP probe arrived; no loss */ @@ -3618,14 +3618,14 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) tcp_in_ack_event(sk, CA_ACK_WIN_UPDATE); - NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPACKS); + __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPHPACKS); } else { u32 ack_ev_flags = CA_ACK_SLOWPATH; if (ack_seq != TCP_SKB_CB(skb)->end_seq) flag |= FLAG_DATA; else - NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPPUREACKS); + __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPPUREACKS); flag |= tcp_ack_update_window(sk, skb, ack, ack_seq); @@ -4128,7 +4128,7 @@ static void tcp_dsack_set(struct sock *sk, u32 seq, u32 end_seq) else mib_idx = LINUX_MIB_TCPDSACKOFOSENT; - NET_INC_STATS_BH(sock_net(sk), mib_idx); + __NET_INC_STATS(sock_net(sk), mib_idx); tp->rx_opt.dsack = 1; tp->duplicate_sack[0].start_seq = seq; @@ -4152,7 +4152,7 @@ static void tcp_send_dupack(struct sock *sk, const struct sk_buff *skb) if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { - NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOST); + __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOST); tcp_enter_quickack_mode(sk); if (tcp_is_sack(tp) && sysctl_tcp_dsack) { @@ -4302,7 +4302,7 @@ static bool tcp_try_coalesce(struct sock *sk, atomic_add(delta, &sk->sk_rmem_alloc); sk_mem_charge(sk, delta); - NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOALESCE); + __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRCVCOALESCE); TCP_SKB_CB(to)->end_seq = TCP_SKB_CB(from)->end_seq; TCP_SKB_CB(to)->ack_seq = TCP_SKB_CB(from)->ack_seq; TCP_SKB_CB(to)->tcp_flags |= TCP_SKB_CB(from)->tcp_flags; @@ -4390,7 +4390,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) tcp_ecn_check_ce(tp, skb); if (unlikely(tcp_try_rmem_schedule(sk, skb, skb->truesize))) { - NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFODROP); + __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFODROP); tcp_drop(sk, skb); return; } @@ -4399,7 +4399,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) tp->pred_flags = 0; inet_csk_schedule_ack(sk); - NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFOQUEUE); + __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFOQUEUE); SOCK_DEBUG(sk, "out of order segment: rcv_next %X seq %X - %X\n", tp->rcv_nxt, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); @@ -4454,7 +4454,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) if (skb1 && before(seq, TCP_SKB_CB(skb1)->end_seq)) { if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) { /* All the bits are present. Drop. */ - NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFOMERGE); + __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFOMERGE); tcp_drop(sk, skb); skb = NULL; tcp_dsack_set(sk, seq, end_seq); @@ -4493,7 +4493,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) __skb_unlink(skb1, &tp->out_of_order_queue); tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq, TCP_SKB_CB(skb1)->end_seq); - NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFOMERGE); + __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFOMERGE); tcp_drop(sk, skb1); } @@ -4658,7 +4658,7 @@ queue_and_out: if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) { /* A retransmit, 2nd most common case. Force an immediate ack. */ - NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOST); + __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOST); tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); out_of_window: @@ -4704,7 +4704,7 @@ static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb, __skb_unlink(skb, list); __kfree_skb(skb); - NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOLLAPSED); + __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRCVCOLLAPSED); return next; } @@ -4863,7 +4863,7 @@ static bool tcp_prune_ofo_queue(struct sock *sk) bool res = false; if (!skb_queue_empty(&tp->out_of_order_queue)) { - NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_OFOPRUNED); + __NET_INC_STATS(sock_net(sk), LINUX_MIB_OFOPRUNED); __skb_queue_purge(&tp->out_of_order_queue); /* Reset SACK state. A conforming SACK implementation will @@ -4892,7 +4892,7 @@ static int tcp_prune_queue(struct sock *sk) SOCK_DEBUG(sk, "prune_queue: c=%x\n", tp->copied_seq); - NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PRUNECALLED); + __NET_INC_STATS(sock_net(sk), LINUX_MIB_PRUNECALLED); if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) tcp_clamp_window(sk); @@ -4922,7 +4922,7 @@ static int tcp_prune_queue(struct sock *sk) * drop receive data on the floor. It will get retransmitted * and hopefully then we'll have sufficient space. */ - NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_RCVPRUNED); + __NET_INC_STATS(sock_net(sk), LINUX_MIB_RCVPRUNED); /* Massive buffer overcommit. */ tp->pred_flags = 0; @@ -5181,7 +5181,7 @@ static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb, if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp && tcp_paws_discard(sk, skb)) { if (!th->rst) { - NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED); + __NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED); if (!tcp_oow_rate_limited(sock_net(sk), skb, LINUX_MIB_TCPACKSKIPPEDPAWS, &tp->last_oow_ack_time)) @@ -5234,7 +5234,7 @@ static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb, syn_challenge: if (syn_inerr) __TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS); - NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNCHALLENGE); + __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNCHALLENGE); tcp_send_challenge_ack(sk, skb); goto discard; } @@ -5377,7 +5377,8 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb, __skb_pull(skb, tcp_header_len); tcp_rcv_nxt_update(tp, TCP_SKB_CB(skb)->end_seq); - NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITSTOUSER); + __NET_INC_STATS(sock_net(sk), + LINUX_MIB_TCPHPHITSTOUSER); eaten = 1; } } @@ -5399,7 +5400,7 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb, tcp_rcv_rtt_measure_ts(sk, skb); - NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITS); + __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPHPHITS); /* Bulk data transfer: receiver */ eaten = tcp_queue_rcv(sk, skb, tcp_header_len, @@ -5549,12 +5550,14 @@ static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack, break; } tcp_rearm_rto(sk); - NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENACTIVEFAIL); + __NET_INC_STATS(sock_net(sk), + LINUX_MIB_TCPFASTOPENACTIVEFAIL); return true; } tp->syn_data_acked = tp->syn_data; if (tp->syn_data_acked) - NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENACTIVE); + __NET_INC_STATS(sock_net(sk), + LINUX_MIB_TCPFASTOPENACTIVE); tcp_fastopen_add_skb(sk, synack); @@ -5589,7 +5592,8 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && !between(tp->rx_opt.rcv_tsecr, tp->retrans_stamp, tcp_time_stamp)) { - NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSACTIVEREJECTED); + __NET_INC_STATS(sock_net(sk), + LINUX_MIB_PAWSACTIVEREJECTED); goto reset_and_undo; } @@ -5958,7 +5962,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb) (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt))) { tcp_done(sk); - NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONDATA); + __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA); return 1; } @@ -6015,7 +6019,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb) if (sk->sk_shutdown & RCV_SHUTDOWN) { if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt)) { - NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONDATA); + __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA); tcp_reset(sk); return 1; } @@ -6153,10 +6157,10 @@ static bool tcp_syn_flood_action(const struct sock *sk, if (net->ipv4.sysctl_tcp_syncookies) { msg = "Sending cookies"; want_cookie = true; - NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES); + __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES); } else #endif - NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP); + __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP); if (!queue->synflood_warned && net->ipv4.sysctl_tcp_syncookies != 2 && @@ -6217,7 +6221,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops, * timeout. */ if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) { - NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); + __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); goto drop; } @@ -6264,7 +6268,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops, if (dst && strict && !tcp_peer_is_proven(req, dst, true, tmp_opt.saw_tstamp)) { - NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED); + __NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED); goto drop_and_release; } } diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 378e92d41c6c..510f7a3c758b 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -320,7 +320,7 @@ void tcp_req_err(struct sock *sk, u32 seq, bool abort) * an established socket here. */ if (seq != tcp_rsk(req)->snt_isn) { - NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS); + __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS); } else if (abort) { /* * Still in SYN_RECV, just remove it silently. @@ -396,13 +396,13 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info) */ if (sock_owned_by_user(sk)) { if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED)) - NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS); + __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS); } if (sk->sk_state == TCP_CLOSE) goto out; if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) { - NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP); + __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP); goto out; } @@ -413,7 +413,7 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info) snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una; if (sk->sk_state != TCP_LISTEN && !between(seq, snd_una, tp->snd_nxt)) { - NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS); + __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS); goto out; } @@ -1151,12 +1151,12 @@ static bool tcp_v4_inbound_md5_hash(const struct sock *sk, return false; if (hash_expected && !hash_location) { - NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND); + __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND); return true; } if (!hash_expected && hash_location) { - NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED); + __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED); return true; } @@ -1342,7 +1342,7 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb, return newsk; exit_overflow: - NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); + __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); exit_nonewsk: dst_release(dst); exit: @@ -1513,8 +1513,8 @@ bool tcp_prequeue(struct sock *sk, struct sk_buff *skb) while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) { sk_backlog_rcv(sk, skb1); - NET_INC_STATS_BH(sock_net(sk), - LINUX_MIB_TCPPREQUEUEDROPPED); + __NET_INC_STATS(sock_net(sk), + LINUX_MIB_TCPPREQUEUEDROPPED); } tp->ucopy.memory = 0; @@ -1629,7 +1629,7 @@ process: } } if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) { - NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP); + __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP); goto discard_and_relse; } @@ -1662,7 +1662,7 @@ process: } else if (unlikely(sk_add_backlog(sk, skb, sk->sk_rcvbuf + sk->sk_sndbuf))) { bh_unlock_sock(sk); - NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP); + __NET_INC_STATS(net, LINUX_MIB_TCPBACKLOGDROP); goto discard_and_relse; } bh_unlock_sock(sk); diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 0be6bfeab553..ffbfecdae471 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -235,7 +235,7 @@ kill: } if (paws_reject) - NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_PAWSESTABREJECTED); + __NET_INC_STATS(twsk_net(tw), LINUX_MIB_PAWSESTABREJECTED); if (!th->rst) { /* In this case we must reset the TIMEWAIT timer. @@ -337,7 +337,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo) * socket up. We've got bigger problems than * non-graceful socket closings. */ - NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPTIMEWAITOVERFLOW); + __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPTIMEWAITOVERFLOW); } tcp_update_metrics(sk); @@ -710,7 +710,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, &tcp_rsk(req)->last_oow_ack_time)) req->rsk_ops->send_ack(sk, skb, req); if (paws_reject) - NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED); + __NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED); return NULL; } @@ -752,7 +752,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, if (req->num_timeout < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept && TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) { inet_rsk(req)->acked = 1; - NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP); + __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP); return NULL; } @@ -791,7 +791,7 @@ embryonic_reset: } if (!fastopen) { inet_csk_reqsk_queue_drop(sk, req); - NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS); + __NET_INC_STATS(sock_net(sk), LINUX_MIB_EMBRYONICRSTS); } return NULL; } diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index c48baf734e8c..b1b2045ac3a9 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -2212,8 +2212,8 @@ static bool skb_still_in_host_queue(const struct sock *sk, const struct sk_buff *skb) { if (unlikely(skb_fclone_busy(sk, skb))) { - NET_INC_STATS_BH(sock_net(sk), - LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES); + __NET_INC_STATS(sock_net(sk), + LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES); return true; } return false; @@ -2275,7 +2275,7 @@ void tcp_send_loss_probe(struct sock *sk) tp->tlp_high_seq = tp->snd_nxt; probe_sent: - NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSSPROBES); + __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPLOSSPROBES); /* Reset s.t. tcp_rearm_rto will restart timer from now */ inet_csk(sk)->icsk_pending = 0; rearm_timer: @@ -2656,7 +2656,7 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs) /* Update global TCP statistics. */ TCP_ADD_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS, segs); if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN) - NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNRETRANS); + __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNRETRANS); tp->total_retrans += segs; } return err; @@ -2681,7 +2681,7 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs) tp->retrans_stamp = tcp_skb_timestamp(skb); } else if (err != -EBUSY) { - NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL); + __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL); } if (tp->undo_retrans < 0) @@ -2805,7 +2805,7 @@ begin_fwd: if (tcp_retransmit_skb(sk, skb, segs)) return; - NET_INC_STATS_BH(sock_net(sk), mib_idx); + __NET_INC_STATS(sock_net(sk), mib_idx); if (tcp_in_cwnd_reduction(sk)) tp->prr_out += tcp_skb_pcount(skb); @@ -3541,7 +3541,7 @@ int tcp_rtx_synack(const struct sock *sk, struct request_sock *req) res = af_ops->send_synack(sk, NULL, &fl, req, NULL, TCP_SYNACK_NORMAL); if (!res) { __TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS); - NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNRETRANS); + __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNRETRANS); } return res; } diff --git a/net/ipv4/tcp_recovery.c b/net/ipv4/tcp_recovery.c index 5353085fd0b2..e0d0afaf15be 100644 --- a/net/ipv4/tcp_recovery.c +++ b/net/ipv4/tcp_recovery.c @@ -65,8 +65,8 @@ int tcp_rack_mark_lost(struct sock *sk) if (scb->sacked & TCPCB_SACKED_RETRANS) { scb->sacked &= ~TCPCB_SACKED_RETRANS; tp->retrans_out -= tcp_skb_pcount(skb); - NET_INC_STATS_BH(sock_net(sk), - LINUX_MIB_TCPLOSTRETRANSMIT); + __NET_INC_STATS(sock_net(sk), + LINUX_MIB_TCPLOSTRETRANSMIT); } } else if (!(scb->sacked & TCPCB_RETRANS)) { /* Original data are sent sequentially so stop early diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index 373b03e78aaa..35f643d8ffbb 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c @@ -30,7 +30,7 @@ static void tcp_write_err(struct sock *sk) sk->sk_error_report(sk); tcp_done(sk); - NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONTIMEOUT); + __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONTIMEOUT); } /* Do not allow orphaned sockets to eat all our resources. @@ -68,7 +68,7 @@ static int tcp_out_of_resources(struct sock *sk, bool do_reset) if (do_reset) tcp_send_active_reset(sk, GFP_ATOMIC); tcp_done(sk); - NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONMEMORY); + __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONMEMORY); return 1; } return 0; @@ -162,8 +162,8 @@ static int tcp_write_timeout(struct sock *sk) if (tp->syn_fastopen || tp->syn_data) tcp_fastopen_cache_set(sk, 0, NULL, true, 0); if (tp->syn_data && icsk->icsk_retransmits == 1) - NET_INC_STATS_BH(sock_net(sk), - LINUX_MIB_TCPFASTOPENACTIVEFAIL); + __NET_INC_STATS(sock_net(sk), + LINUX_MIB_TCPFASTOPENACTIVEFAIL); } retry_until = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_syn_retries; syn_set = true; @@ -178,8 +178,8 @@ static int tcp_write_timeout(struct sock *sk) tp->bytes_acked <= tp->rx_opt.mss_clamp) { tcp_fastopen_cache_set(sk, 0, NULL, true, 0); if (icsk->icsk_retransmits == net->ipv4.sysctl_tcp_retries1) - NET_INC_STATS_BH(sock_net(sk), - LINUX_MIB_TCPFASTOPENACTIVEFAIL); + __NET_INC_STATS(sock_net(sk), + LINUX_MIB_TCPFASTOPENACTIVEFAIL); } /* Black hole detection */ tcp_mtu_probing(icsk, sk); @@ -228,7 +228,7 @@ void tcp_delack_timer_handler(struct sock *sk) if (!skb_queue_empty(&tp->ucopy.prequeue)) { struct sk_buff *skb; - NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSCHEDULERFAILED); + __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSCHEDULERFAILED); while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) sk_backlog_rcv(sk, skb); @@ -248,7 +248,7 @@ void tcp_delack_timer_handler(struct sock *sk) icsk->icsk_ack.ato = TCP_ATO_MIN; } tcp_send_ack(sk); - NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKS); + __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKS); } out: @@ -265,7 +265,7 @@ static void tcp_delack_timer(unsigned long data) tcp_delack_timer_handler(sk); } else { inet_csk(sk)->icsk_ack.blocked = 1; - NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED); + __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED); /* deleguate our work to tcp_release_cb() */ if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED, &tcp_sk(sk)->tsq_flags)) sock_hold(sk); @@ -431,7 +431,7 @@ void tcp_retransmit_timer(struct sock *sk) } else { mib_idx = LINUX_MIB_TCPTIMEOUTS; } - NET_INC_STATS_BH(sock_net(sk), mib_idx); + __NET_INC_STATS(sock_net(sk), mib_idx); } tcp_enter_loss(sk); @@ -549,7 +549,7 @@ void tcp_syn_ack_timeout(const struct request_sock *req) { struct net *net = read_pnet(&inet_rsk(req)->ireq_net); - NET_INC_STATS_BH(net, LINUX_MIB_TCPTIMEOUTS); + __NET_INC_STATS(net, LINUX_MIB_TCPTIMEOUTS); } EXPORT_SYMBOL(tcp_syn_ack_timeout); diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c index f1678388fb0d..00cf28ad4565 100644 --- a/net/ipv6/inet6_hashtables.c +++ b/net/ipv6/inet6_hashtables.c @@ -222,7 +222,7 @@ static int __inet6_check_established(struct inet_timewait_death_row *death_row, __sk_nulls_add_node_rcu(sk, &head->chain); if (tw) { sk_nulls_del_node_init_rcu((struct sock *)tw); - NET_INC_STATS_BH(net, LINUX_MIB_TIMEWAITRECYCLED); + __NET_INC_STATS(net, LINUX_MIB_TIMEWAITRECYCLED); } spin_unlock(lock); sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c index aab91fa86c5e..59c483937aec 100644 --- a/net/ipv6/syncookies.c +++ b/net/ipv6/syncookies.c @@ -155,11 +155,11 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) mss = __cookie_v6_check(ipv6_hdr(skb), th, cookie); if (mss == 0) { - NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESFAILED); + __NET_INC_STATS(sock_net(sk), LINUX_MIB_SYNCOOKIESFAILED); goto out; } - NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESRECV); + __NET_INC_STATS(sock_net(sk), LINUX_MIB_SYNCOOKIESRECV); /* check for timestamp cookie support */ memset(&tcp_opt, 0, sizeof(tcp_opt)); diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 78c45c027acc..52914714b923 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -352,13 +352,13 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, bh_lock_sock(sk); if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG) - NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS); + __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS); if (sk->sk_state == TCP_CLOSE) goto out; if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) { - NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP); + __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP); goto out; } @@ -368,7 +368,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una; if (sk->sk_state != TCP_LISTEN && !between(seq, snd_una, tp->snd_nxt)) { - NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS); + __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS); goto out; } @@ -649,12 +649,12 @@ static bool tcp_v6_inbound_md5_hash(const struct sock *sk, return false; if (hash_expected && !hash_location) { - NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND); + __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND); return true; } if (!hash_expected && hash_location) { - NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED); + __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED); return true; } @@ -1165,7 +1165,7 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff * return newsk; out_overflow: - NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); + __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); out_nonewsk: dst_release(dst); out: @@ -1421,7 +1421,7 @@ process: } } if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) { - NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP); + __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP); goto discard_and_relse; } @@ -1454,7 +1454,7 @@ process: } else if (unlikely(sk_add_backlog(sk, skb, sk->sk_rcvbuf + sk->sk_sndbuf))) { bh_unlock_sock(sk); - NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP); + __NET_INC_STATS(net, LINUX_MIB_TCPBACKLOGDROP); goto discard_and_relse; } bh_unlock_sock(sk); diff --git a/net/sctp/input.c b/net/sctp/input.c index 12332fc3eb44..a701527a9480 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c @@ -532,7 +532,7 @@ struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *skb, * servers this needs to be solved differently. */ if (sock_owned_by_user(sk)) - NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS); + __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS); *app = asoc; *tpp = transport; From 1d0155035918aa44e634941ac05721536b461d7c Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 27 Apr 2016 16:44:40 -0700 Subject: [PATCH 1135/1649] ipv6: rename IP6_INC_STATS_BH() Rename IP6_INC_STATS_BH() to __IP6_INC_STATS() and IP6_ADD_STATS_BH() to __IP6_ADD_STATS() Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/net/ipv6.h | 4 +-- net/bridge/br_netfilter_ipv6.c | 10 +++--- net/ipv6/exthdrs.c | 66 +++++++++++++++++----------------- net/ipv6/ip6_input.c | 28 +++++++-------- net/ipv6/ip6_output.c | 34 +++++++++--------- net/ipv6/ip6mr.c | 8 ++--- net/ipv6/reassembly.c | 32 ++++++++--------- 7 files changed, 91 insertions(+), 91 deletions(-) diff --git a/include/net/ipv6.h b/include/net/ipv6.h index a620fc56e2f5..aba8760dd108 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -167,11 +167,11 @@ extern int sysctl_mld_qrv; #define IP6_INC_STATS(net, idev,field) \ _DEVINC(net, ipv6, 64, idev, field) -#define IP6_INC_STATS_BH(net, idev,field) \ +#define __IP6_INC_STATS(net, idev,field) \ _DEVINC(net, ipv6, 64_BH, idev, field) #define IP6_ADD_STATS(net, idev,field,val) \ _DEVADD(net, ipv6, 64, idev, field, val) -#define IP6_ADD_STATS_BH(net, idev,field,val) \ +#define __IP6_ADD_STATS(net, idev,field,val) \ _DEVADD(net, ipv6, 64_BH, idev, field, val) #define IP6_UPD_PO_STATS(net, idev,field,val) \ _DEVUPD(net, ipv6, 64, idev, field, val) diff --git a/net/bridge/br_netfilter_ipv6.c b/net/bridge/br_netfilter_ipv6.c index d61f56efc8dc..5e59a8457e7b 100644 --- a/net/bridge/br_netfilter_ipv6.c +++ b/net/bridge/br_netfilter_ipv6.c @@ -122,13 +122,13 @@ int br_validate_ipv6(struct net *net, struct sk_buff *skb) if (pkt_len || hdr->nexthdr != NEXTHDR_HOP) { if (pkt_len + ip6h_len > skb->len) { - IP6_INC_STATS_BH(net, idev, - IPSTATS_MIB_INTRUNCATEDPKTS); + __IP6_INC_STATS(net, idev, + IPSTATS_MIB_INTRUNCATEDPKTS); goto drop; } if (pskb_trim_rcsum(skb, pkt_len + ip6h_len)) { - IP6_INC_STATS_BH(net, idev, - IPSTATS_MIB_INDISCARDS); + __IP6_INC_STATS(net, idev, + IPSTATS_MIB_INDISCARDS); goto drop; } } @@ -142,7 +142,7 @@ int br_validate_ipv6(struct net *net, struct sk_buff *skb) return 0; inhdr_error: - IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INHDRERRORS); + __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS); drop: return -1; } diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c index ea7c4d64a00a..8de5dd7aaa05 100644 --- a/net/ipv6/exthdrs.c +++ b/net/ipv6/exthdrs.c @@ -258,8 +258,8 @@ static int ipv6_destopt_rcv(struct sk_buff *skb) if (!pskb_may_pull(skb, skb_transport_offset(skb) + 8) || !pskb_may_pull(skb, (skb_transport_offset(skb) + ((skb_transport_header(skb)[1] + 1) << 3)))) { - IP6_INC_STATS_BH(dev_net(dst->dev), ip6_dst_idev(dst), - IPSTATS_MIB_INHDRERRORS); + __IP6_INC_STATS(dev_net(dst->dev), ip6_dst_idev(dst), + IPSTATS_MIB_INHDRERRORS); kfree_skb(skb); return -1; } @@ -280,8 +280,8 @@ static int ipv6_destopt_rcv(struct sk_buff *skb) return 1; } - IP6_INC_STATS_BH(dev_net(dst->dev), - ip6_dst_idev(dst), IPSTATS_MIB_INHDRERRORS); + __IP6_INC_STATS(dev_net(dst->dev), + ip6_dst_idev(dst), IPSTATS_MIB_INHDRERRORS); return -1; } @@ -309,8 +309,8 @@ static int ipv6_rthdr_rcv(struct sk_buff *skb) if (!pskb_may_pull(skb, skb_transport_offset(skb) + 8) || !pskb_may_pull(skb, (skb_transport_offset(skb) + ((skb_transport_header(skb)[1] + 1) << 3)))) { - IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), - IPSTATS_MIB_INHDRERRORS); + __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), + IPSTATS_MIB_INHDRERRORS); kfree_skb(skb); return -1; } @@ -319,8 +319,8 @@ static int ipv6_rthdr_rcv(struct sk_buff *skb) if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr) || skb->pkt_type != PACKET_HOST) { - IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), - IPSTATS_MIB_INADDRERRORS); + __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), + IPSTATS_MIB_INADDRERRORS); kfree_skb(skb); return -1; } @@ -334,8 +334,8 @@ looped_back: * processed by own */ if (!addr) { - IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), - IPSTATS_MIB_INADDRERRORS); + __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), + IPSTATS_MIB_INADDRERRORS); kfree_skb(skb); return -1; } @@ -360,8 +360,8 @@ looped_back: goto unknown_rh; /* Silently discard invalid RTH type 2 */ if (hdr->hdrlen != 2 || hdr->segments_left != 1) { - IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), - IPSTATS_MIB_INHDRERRORS); + __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), + IPSTATS_MIB_INHDRERRORS); kfree_skb(skb); return -1; } @@ -379,8 +379,8 @@ looped_back: n = hdr->hdrlen >> 1; if (hdr->segments_left > n) { - IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), - IPSTATS_MIB_INHDRERRORS); + __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), + IPSTATS_MIB_INHDRERRORS); icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, ((&hdr->segments_left) - skb_network_header(skb))); @@ -393,8 +393,8 @@ looped_back: if (skb_cloned(skb)) { /* the copy is a forwarded packet */ if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) { - IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), - IPSTATS_MIB_OUTDISCARDS); + __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), + IPSTATS_MIB_OUTDISCARDS); kfree_skb(skb); return -1; } @@ -416,14 +416,14 @@ looped_back: if (xfrm6_input_addr(skb, (xfrm_address_t *)addr, (xfrm_address_t *)&ipv6_hdr(skb)->saddr, IPPROTO_ROUTING) < 0) { - IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), - IPSTATS_MIB_INADDRERRORS); + __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), + IPSTATS_MIB_INADDRERRORS); kfree_skb(skb); return -1; } if (!ipv6_chk_home_addr(dev_net(skb_dst(skb)->dev), addr)) { - IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), - IPSTATS_MIB_INADDRERRORS); + __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), + IPSTATS_MIB_INADDRERRORS); kfree_skb(skb); return -1; } @@ -434,8 +434,8 @@ looped_back: } if (ipv6_addr_is_multicast(addr)) { - IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), - IPSTATS_MIB_INADDRERRORS); + __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), + IPSTATS_MIB_INADDRERRORS); kfree_skb(skb); return -1; } @@ -454,8 +454,8 @@ looped_back: if (skb_dst(skb)->dev->flags&IFF_LOOPBACK) { if (ipv6_hdr(skb)->hop_limit <= 1) { - IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), - IPSTATS_MIB_INHDRERRORS); + __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), + IPSTATS_MIB_INHDRERRORS); icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, 0); kfree_skb(skb); @@ -470,7 +470,7 @@ looped_back: return -1; unknown_rh: - IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_INHDRERRORS); + __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_INHDRERRORS); icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, (&hdr->type) - skb_network_header(skb)); return -1; @@ -568,28 +568,28 @@ static bool ipv6_hop_jumbo(struct sk_buff *skb, int optoff) if (nh[optoff + 1] != 4 || (optoff & 3) != 2) { net_dbg_ratelimited("ipv6_hop_jumbo: wrong jumbo opt length/alignment %d\n", nh[optoff+1]); - IP6_INC_STATS_BH(net, ipv6_skb_idev(skb), - IPSTATS_MIB_INHDRERRORS); + __IP6_INC_STATS(net, ipv6_skb_idev(skb), + IPSTATS_MIB_INHDRERRORS); goto drop; } pkt_len = ntohl(*(__be32 *)(nh + optoff + 2)); if (pkt_len <= IPV6_MAXPLEN) { - IP6_INC_STATS_BH(net, ipv6_skb_idev(skb), - IPSTATS_MIB_INHDRERRORS); + __IP6_INC_STATS(net, ipv6_skb_idev(skb), + IPSTATS_MIB_INHDRERRORS); icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, optoff+2); return false; } if (ipv6_hdr(skb)->payload_len) { - IP6_INC_STATS_BH(net, ipv6_skb_idev(skb), - IPSTATS_MIB_INHDRERRORS); + __IP6_INC_STATS(net, ipv6_skb_idev(skb), + IPSTATS_MIB_INHDRERRORS); icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, optoff); return false; } if (pkt_len > skb->len - sizeof(struct ipv6hdr)) { - IP6_INC_STATS_BH(net, ipv6_skb_idev(skb), - IPSTATS_MIB_INTRUNCATEDPKTS); + __IP6_INC_STATS(net, ipv6_skb_idev(skb), + IPSTATS_MIB_INTRUNCATEDPKTS); goto drop; } diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c index c05c425c2389..218bb906c620 100644 --- a/net/ipv6/ip6_input.c +++ b/net/ipv6/ip6_input.c @@ -82,7 +82,7 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL || !idev || unlikely(idev->cnf.disable_ipv6)) { - IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INDISCARDS); + __IP6_INC_STATS(net, idev, IPSTATS_MIB_INDISCARDS); goto drop; } @@ -109,10 +109,10 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt if (hdr->version != 6) goto err; - IP6_ADD_STATS_BH(net, idev, - IPSTATS_MIB_NOECTPKTS + + __IP6_ADD_STATS(net, idev, + IPSTATS_MIB_NOECTPKTS + (ipv6_get_dsfield(hdr) & INET_ECN_MASK), - max_t(unsigned short, 1, skb_shinfo(skb)->gso_segs)); + max_t(unsigned short, 1, skb_shinfo(skb)->gso_segs)); /* * RFC4291 2.5.3 * A packet received on an interface with a destination address @@ -169,12 +169,12 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt /* pkt_len may be zero if Jumbo payload option is present */ if (pkt_len || hdr->nexthdr != NEXTHDR_HOP) { if (pkt_len + sizeof(struct ipv6hdr) > skb->len) { - IP6_INC_STATS_BH(net, - idev, IPSTATS_MIB_INTRUNCATEDPKTS); + __IP6_INC_STATS(net, + idev, IPSTATS_MIB_INTRUNCATEDPKTS); goto drop; } if (pskb_trim_rcsum(skb, pkt_len + sizeof(struct ipv6hdr))) { - IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INHDRERRORS); + __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS); goto drop; } hdr = ipv6_hdr(skb); @@ -182,7 +182,7 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt if (hdr->nexthdr == NEXTHDR_HOP) { if (ipv6_parse_hopopts(skb) < 0) { - IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INHDRERRORS); + __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS); rcu_read_unlock(); return NET_RX_DROP; } @@ -197,7 +197,7 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt net, NULL, skb, dev, NULL, ip6_rcv_finish); err: - IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INHDRERRORS); + __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS); drop: rcu_read_unlock(); kfree_skb(skb); @@ -259,18 +259,18 @@ resubmit: if (ret > 0) goto resubmit; else if (ret == 0) - IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INDELIVERS); + __IP6_INC_STATS(net, idev, IPSTATS_MIB_INDELIVERS); } else { if (!raw) { if (xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) { - IP6_INC_STATS_BH(net, idev, - IPSTATS_MIB_INUNKNOWNPROTOS); + __IP6_INC_STATS(net, idev, + IPSTATS_MIB_INUNKNOWNPROTOS); icmpv6_send(skb, ICMPV6_PARAMPROB, ICMPV6_UNK_NEXTHDR, nhoff); } kfree_skb(skb); } else { - IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INDELIVERS); + __IP6_INC_STATS(net, idev, IPSTATS_MIB_INDELIVERS); consume_skb(skb); } } @@ -278,7 +278,7 @@ resubmit: return 0; discard: - IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INDISCARDS); + __IP6_INC_STATS(net, idev, IPSTATS_MIB_INDISCARDS); rcu_read_unlock(); kfree_skb(skb); return 0; diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 171518e3ca21..2b3ffc582d16 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -395,8 +395,8 @@ int ip6_forward(struct sk_buff *skb) goto drop; if (!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) { - IP6_INC_STATS_BH(net, ip6_dst_idev(dst), - IPSTATS_MIB_INDISCARDS); + __IP6_INC_STATS(net, ip6_dst_idev(dst), + IPSTATS_MIB_INDISCARDS); goto drop; } @@ -427,8 +427,8 @@ int ip6_forward(struct sk_buff *skb) /* Force OUTPUT device used as source address */ skb->dev = dst->dev; icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, 0); - IP6_INC_STATS_BH(net, ip6_dst_idev(dst), - IPSTATS_MIB_INHDRERRORS); + __IP6_INC_STATS(net, ip6_dst_idev(dst), + IPSTATS_MIB_INHDRERRORS); kfree_skb(skb); return -ETIMEDOUT; @@ -441,15 +441,15 @@ int ip6_forward(struct sk_buff *skb) if (proxied > 0) return ip6_input(skb); else if (proxied < 0) { - IP6_INC_STATS_BH(net, ip6_dst_idev(dst), - IPSTATS_MIB_INDISCARDS); + __IP6_INC_STATS(net, ip6_dst_idev(dst), + IPSTATS_MIB_INDISCARDS); goto drop; } } if (!xfrm6_route_forward(skb)) { - IP6_INC_STATS_BH(net, ip6_dst_idev(dst), - IPSTATS_MIB_INDISCARDS); + __IP6_INC_STATS(net, ip6_dst_idev(dst), + IPSTATS_MIB_INDISCARDS); goto drop; } dst = skb_dst(skb); @@ -505,17 +505,17 @@ int ip6_forward(struct sk_buff *skb) /* Again, force OUTPUT device used as source address */ skb->dev = dst->dev; icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); - IP6_INC_STATS_BH(net, ip6_dst_idev(dst), - IPSTATS_MIB_INTOOBIGERRORS); - IP6_INC_STATS_BH(net, ip6_dst_idev(dst), - IPSTATS_MIB_FRAGFAILS); + __IP6_INC_STATS(net, ip6_dst_idev(dst), + IPSTATS_MIB_INTOOBIGERRORS); + __IP6_INC_STATS(net, ip6_dst_idev(dst), + IPSTATS_MIB_FRAGFAILS); kfree_skb(skb); return -EMSGSIZE; } if (skb_cow(skb, dst->dev->hard_header_len)) { - IP6_INC_STATS_BH(net, ip6_dst_idev(dst), - IPSTATS_MIB_OUTDISCARDS); + __IP6_INC_STATS(net, ip6_dst_idev(dst), + IPSTATS_MIB_OUTDISCARDS); goto drop; } @@ -525,14 +525,14 @@ int ip6_forward(struct sk_buff *skb) hdr->hop_limit--; - IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS); - IP6_ADD_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTOCTETS, skb->len); + __IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS); + __IP6_ADD_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTOCTETS, skb->len); return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD, net, NULL, skb, skb->dev, dst->dev, ip6_forward_finish); error: - IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_INADDRERRORS); + __IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_INADDRERRORS); drop: kfree_skb(skb); return -EINVAL; diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index bf678324fd52..f2e2013f8346 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c @@ -1984,10 +1984,10 @@ int ip6mr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg) static inline int ip6mr_forward2_finish(struct net *net, struct sock *sk, struct sk_buff *skb) { - IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), - IPSTATS_MIB_OUTFORWDATAGRAMS); - IP6_ADD_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), - IPSTATS_MIB_OUTOCTETS, skb->len); + __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), + IPSTATS_MIB_OUTFORWDATAGRAMS); + __IP6_ADD_STATS(net, ip6_dst_idev(skb_dst(skb)), + IPSTATS_MIB_OUTOCTETS, skb->len); return dst_output(net, sk, skb); } diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c index e2ea31175ef9..2160d5d009cb 100644 --- a/net/ipv6/reassembly.c +++ b/net/ipv6/reassembly.c @@ -145,12 +145,12 @@ void ip6_expire_frag_queue(struct net *net, struct frag_queue *fq, if (!dev) goto out_rcu_unlock; - IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS); + __IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS); if (inet_frag_evicting(&fq->q)) goto out_rcu_unlock; - IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT); + __IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT); /* Don't send error if the first segment did not arrive. */ if (!(fq->q.flags & INET_FRAG_FIRST_IN) || !fq->q.fragments) @@ -223,8 +223,8 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb, ((u8 *)(fhdr + 1) - (u8 *)(ipv6_hdr(skb) + 1))); if ((unsigned int)end > IPV6_MAXPLEN) { - IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), - IPSTATS_MIB_INHDRERRORS); + __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), + IPSTATS_MIB_INHDRERRORS); icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, ((u8 *)&fhdr->frag_off - skb_network_header(skb))); @@ -258,8 +258,8 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb, /* RFC2460 says always send parameter problem in * this case. -DaveM */ - IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), - IPSTATS_MIB_INHDRERRORS); + __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), + IPSTATS_MIB_INHDRERRORS); icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, offsetof(struct ipv6hdr, payload_len)); return -1; @@ -361,8 +361,8 @@ found: discard_fq: inet_frag_kill(&fq->q, &ip6_frags); err: - IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), - IPSTATS_MIB_REASMFAILS); + __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), + IPSTATS_MIB_REASMFAILS); kfree_skb(skb); return -1; } @@ -500,7 +500,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev, skb_network_header_len(head)); rcu_read_lock(); - IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMOKS); + __IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMOKS); rcu_read_unlock(); fq->q.fragments = NULL; fq->q.fragments_tail = NULL; @@ -513,7 +513,7 @@ out_oom: net_dbg_ratelimited("ip6_frag_reasm: no memory for reassembly\n"); out_fail: rcu_read_lock(); - IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS); + __IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS); rcu_read_unlock(); return -1; } @@ -528,7 +528,7 @@ static int ipv6_frag_rcv(struct sk_buff *skb) if (IP6CB(skb)->flags & IP6SKB_FRAGMENTED) goto fail_hdr; - IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMREQDS); + __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMREQDS); /* Jumbo payload inhibits frag. header */ if (hdr->payload_len == 0) @@ -544,8 +544,8 @@ static int ipv6_frag_rcv(struct sk_buff *skb) if (!(fhdr->frag_off & htons(0xFFF9))) { /* It is not a fragmented frame */ skb->transport_header += sizeof(struct frag_hdr); - IP6_INC_STATS_BH(net, - ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMOKS); + __IP6_INC_STATS(net, + ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMOKS); IP6CB(skb)->nhoff = (u8 *)fhdr - skb_network_header(skb); IP6CB(skb)->flags |= IP6SKB_FRAGMENTED; @@ -566,13 +566,13 @@ static int ipv6_frag_rcv(struct sk_buff *skb) return ret; } - IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMFAILS); + __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMFAILS); kfree_skb(skb); return -1; fail_hdr: - IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), - IPSTATS_MIB_INHDRERRORS); + __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), + IPSTATS_MIB_INHDRERRORS); icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, skb_network_header_len(skb)); return -1; } From c2005eb01044e82498209ee4ee43be604da3ef2a Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 27 Apr 2016 16:44:41 -0700 Subject: [PATCH 1136/1649] ipv6: rename IP6_UPD_PO_STATS_BH() Rename IP6_UPD_PO_STATS_BH() to __IP6_UPD_PO_STATS() Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/net/ipv6.h | 2 +- net/ipv6/ip6_input.c | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/include/net/ipv6.h b/include/net/ipv6.h index aba8760dd108..9f3b53f2819b 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -175,7 +175,7 @@ extern int sysctl_mld_qrv; _DEVADD(net, ipv6, 64_BH, idev, field, val) #define IP6_UPD_PO_STATS(net, idev,field,val) \ _DEVUPD(net, ipv6, 64, idev, field, val) -#define IP6_UPD_PO_STATS_BH(net, idev,field,val) \ +#define __IP6_UPD_PO_STATS(net, idev,field,val) \ _DEVUPD(net, ipv6, 64_BH, idev, field, val) #define ICMP6_INC_STATS(net, idev, field) \ _DEVINCATOMIC(net, icmpv6, , idev, field) diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c index 218bb906c620..6ed56012005d 100644 --- a/net/ipv6/ip6_input.c +++ b/net/ipv6/ip6_input.c @@ -78,7 +78,7 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt idev = __in6_dev_get(skb->dev); - IP6_UPD_PO_STATS_BH(net, idev, IPSTATS_MIB_IN, skb->len); + __IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_IN, skb->len); if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL || !idev || unlikely(idev->cnf.disable_ipv6)) { @@ -297,7 +297,7 @@ int ip6_mc_input(struct sk_buff *skb) const struct ipv6hdr *hdr; bool deliver; - IP6_UPD_PO_STATS_BH(dev_net(skb_dst(skb)->dev), + __IP6_UPD_PO_STATS(dev_net(skb_dst(skb)->dev), ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_INMCAST, skb->len); From f3832ed2c27e7ad13300791db4089a7d4304f500 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 27 Apr 2016 16:44:42 -0700 Subject: [PATCH 1137/1649] ipv6: kill ICMP6MSGIN_INC_STATS_BH() IPv6 ICMP stats are atomics anyway. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/net/ipv6.h | 4 +--- net/ipv6/icmp.c | 2 +- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/include/net/ipv6.h b/include/net/ipv6.h index 9f3b53f2819b..64ce3670d40a 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -184,9 +184,7 @@ extern int sysctl_mld_qrv; #define ICMP6MSGOUT_INC_STATS(net, idev, field) \ _DEVINC_ATOMIC_ATOMIC(net, icmpv6msg, idev, field +256) -#define ICMP6MSGOUT_INC_STATS_BH(net, idev, field) \ - _DEVINC_ATOMIC_ATOMIC(net, icmpv6msg, idev, field +256) -#define ICMP6MSGIN_INC_STATS_BH(net, idev, field) \ +#define ICMP6MSGIN_INC_STATS(net, idev, field) \ _DEVINC_ATOMIC_ATOMIC(net, icmpv6msg, idev, field) struct ip6_ra_chain { diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index 823a1fc576e3..23b9a4cc418e 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c @@ -728,7 +728,7 @@ static int icmpv6_rcv(struct sk_buff *skb) type = hdr->icmp6_type; - ICMP6MSGIN_INC_STATS_BH(dev_net(dev), idev, type); + ICMP6MSGIN_INC_STATS(dev_net(dev), idev, type); switch (type) { case ICMPV6_ECHO_REQUEST: From 13415e46c5915e2dac089de516369005fbc045f9 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 27 Apr 2016 16:44:43 -0700 Subject: [PATCH 1138/1649] net: snmp: kill STATS_BH macros There is nothing related to BH in SNMP counters anymore, since linux-3.0. Rename helpers to use __ prefix instead of _BH prefix, for contexts where preemption is disabled. This more closely matches convention used to update percpu variables. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/net/icmp.h | 2 +- include/net/ip.h | 10 +++++----- include/net/ipv6.h | 36 ++++++++++++++++++------------------ include/net/sctp/sctp.h | 6 +++--- include/net/snmp.h | 24 ++++++++++++------------ include/net/tcp.h | 2 +- include/net/udp.h | 8 ++++---- net/dccp/dccp.h | 2 +- 8 files changed, 45 insertions(+), 45 deletions(-) diff --git a/include/net/icmp.h b/include/net/icmp.h index 25edb740c648..3ef2743a8eec 100644 --- a/include/net/icmp.h +++ b/include/net/icmp.h @@ -30,7 +30,7 @@ struct icmp_err { extern const struct icmp_err icmp_err_convert[]; #define ICMP_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.icmp_statistics, field) -#define __ICMP_INC_STATS(net, field) SNMP_INC_STATS_BH((net)->mib.icmp_statistics, field) +#define __ICMP_INC_STATS(net, field) __SNMP_INC_STATS((net)->mib.icmp_statistics, field) #define ICMPMSGOUT_INC_STATS(net, field) SNMP_INC_STATS_ATOMIC_LONG((net)->mib.icmpmsg_statistics, field+256) #define ICMPMSGIN_INC_STATS(net, field) SNMP_INC_STATS_ATOMIC_LONG((net)->mib.icmpmsg_statistics, field) diff --git a/include/net/ip.h b/include/net/ip.h index fb3b766ca1c7..247ac82e9cf2 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -187,15 +187,15 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb, unsigned int len); #define IP_INC_STATS(net, field) SNMP_INC_STATS64((net)->mib.ip_statistics, field) -#define __IP_INC_STATS(net, field) SNMP_INC_STATS64_BH((net)->mib.ip_statistics, field) +#define __IP_INC_STATS(net, field) __SNMP_INC_STATS64((net)->mib.ip_statistics, field) #define IP_ADD_STATS(net, field, val) SNMP_ADD_STATS64((net)->mib.ip_statistics, field, val) -#define __IP_ADD_STATS(net, field, val) SNMP_ADD_STATS64_BH((net)->mib.ip_statistics, field, val) +#define __IP_ADD_STATS(net, field, val) __SNMP_ADD_STATS64((net)->mib.ip_statistics, field, val) #define IP_UPD_PO_STATS(net, field, val) SNMP_UPD_PO_STATS64((net)->mib.ip_statistics, field, val) -#define __IP_UPD_PO_STATS(net, field, val) SNMP_UPD_PO_STATS64_BH((net)->mib.ip_statistics, field, val) +#define __IP_UPD_PO_STATS(net, field, val) __SNMP_UPD_PO_STATS64((net)->mib.ip_statistics, field, val) #define NET_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.net_statistics, field) -#define __NET_INC_STATS(net, field) SNMP_INC_STATS_BH((net)->mib.net_statistics, field) +#define __NET_INC_STATS(net, field) __SNMP_INC_STATS((net)->mib.net_statistics, field) #define NET_ADD_STATS(net, field, adnd) SNMP_ADD_STATS((net)->mib.net_statistics, field, adnd) -#define __NET_ADD_STATS(net, field, adnd) SNMP_ADD_STATS_BH((net)->mib.net_statistics, field, adnd) +#define __NET_ADD_STATS(net, field, adnd) __SNMP_ADD_STATS((net)->mib.net_statistics, field, adnd) u64 snmp_get_cpu_field(void __percpu *mib, int cpu, int offct); unsigned long snmp_fold_field(void __percpu *mib, int offt); diff --git a/include/net/ipv6.h b/include/net/ipv6.h index 64ce3670d40a..415213da5be3 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -121,21 +121,21 @@ struct frag_hdr { extern int sysctl_mld_max_msf; extern int sysctl_mld_qrv; -#define _DEVINC(net, statname, modifier, idev, field) \ +#define _DEVINC(net, statname, mod, idev, field) \ ({ \ struct inet6_dev *_idev = (idev); \ if (likely(_idev != NULL)) \ - SNMP_INC_STATS##modifier((_idev)->stats.statname, (field)); \ - SNMP_INC_STATS##modifier((net)->mib.statname##_statistics, (field));\ + mod##SNMP_INC_STATS64((_idev)->stats.statname, (field));\ + mod##SNMP_INC_STATS64((net)->mib.statname##_statistics, (field));\ }) /* per device counters are atomic_long_t */ -#define _DEVINCATOMIC(net, statname, modifier, idev, field) \ +#define _DEVINCATOMIC(net, statname, mod, idev, field) \ ({ \ struct inet6_dev *_idev = (idev); \ if (likely(_idev != NULL)) \ SNMP_INC_STATS_ATOMIC_LONG((_idev)->stats.statname##dev, (field)); \ - SNMP_INC_STATS##modifier((net)->mib.statname##_statistics, (field));\ + mod##SNMP_INC_STATS((net)->mib.statname##_statistics, (field));\ }) /* per device and per net counters are atomic_long_t */ @@ -147,40 +147,40 @@ extern int sysctl_mld_qrv; SNMP_INC_STATS_ATOMIC_LONG((net)->mib.statname##_statistics, (field));\ }) -#define _DEVADD(net, statname, modifier, idev, field, val) \ +#define _DEVADD(net, statname, mod, idev, field, val) \ ({ \ struct inet6_dev *_idev = (idev); \ if (likely(_idev != NULL)) \ - SNMP_ADD_STATS##modifier((_idev)->stats.statname, (field), (val)); \ - SNMP_ADD_STATS##modifier((net)->mib.statname##_statistics, (field), (val));\ + mod##SNMP_ADD_STATS((_idev)->stats.statname, (field), (val)); \ + mod##SNMP_ADD_STATS((net)->mib.statname##_statistics, (field), (val));\ }) -#define _DEVUPD(net, statname, modifier, idev, field, val) \ +#define _DEVUPD(net, statname, mod, idev, field, val) \ ({ \ struct inet6_dev *_idev = (idev); \ if (likely(_idev != NULL)) \ - SNMP_UPD_PO_STATS##modifier((_idev)->stats.statname, field, (val)); \ - SNMP_UPD_PO_STATS##modifier((net)->mib.statname##_statistics, field, (val));\ + mod##SNMP_UPD_PO_STATS((_idev)->stats.statname, field, (val)); \ + mod##SNMP_UPD_PO_STATS((net)->mib.statname##_statistics, field, (val));\ }) /* MIBs */ #define IP6_INC_STATS(net, idev,field) \ - _DEVINC(net, ipv6, 64, idev, field) + _DEVINC(net, ipv6, , idev, field) #define __IP6_INC_STATS(net, idev,field) \ - _DEVINC(net, ipv6, 64_BH, idev, field) + _DEVINC(net, ipv6, __, idev, field) #define IP6_ADD_STATS(net, idev,field,val) \ - _DEVADD(net, ipv6, 64, idev, field, val) + _DEVADD(net, ipv6, , idev, field, val) #define __IP6_ADD_STATS(net, idev,field,val) \ - _DEVADD(net, ipv6, 64_BH, idev, field, val) + _DEVADD(net, ipv6, __, idev, field, val) #define IP6_UPD_PO_STATS(net, idev,field,val) \ - _DEVUPD(net, ipv6, 64, idev, field, val) + _DEVUPD(net, ipv6, , idev, field, val) #define __IP6_UPD_PO_STATS(net, idev,field,val) \ - _DEVUPD(net, ipv6, 64_BH, idev, field, val) + _DEVUPD(net, ipv6, __, idev, field, val) #define ICMP6_INC_STATS(net, idev, field) \ _DEVINCATOMIC(net, icmpv6, , idev, field) #define __ICMP6_INC_STATS(net, idev, field) \ - _DEVINCATOMIC(net, icmpv6, _BH, idev, field) + _DEVINCATOMIC(net, icmpv6, __, idev, field) #define ICMP6MSGOUT_INC_STATS(net, idev, field) \ _DEVINC_ATOMIC_ATOMIC(net, icmpv6msg, idev, field +256) diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index 5607c009f738..b392ac8382f2 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h @@ -205,9 +205,9 @@ extern int sysctl_sctp_wmem[3]; */ /* SCTP SNMP MIB stats handlers */ -#define SCTP_INC_STATS(net, field) SNMP_INC_STATS((net)->sctp.sctp_statistics, field) -#define __SCTP_INC_STATS(net, field) SNMP_INC_STATS_BH((net)->sctp.sctp_statistics, field) -#define SCTP_DEC_STATS(net, field) SNMP_DEC_STATS((net)->sctp.sctp_statistics, field) +#define SCTP_INC_STATS(net, field) SNMP_INC_STATS((net)->sctp.sctp_statistics, field) +#define __SCTP_INC_STATS(net, field) __SNMP_INC_STATS((net)->sctp.sctp_statistics, field) +#define SCTP_DEC_STATS(net, field) SNMP_DEC_STATS((net)->sctp.sctp_statistics, field) /* sctp mib definitions */ enum { diff --git a/include/net/snmp.h b/include/net/snmp.h index 56239fc05c51..6bdd255b2250 100644 --- a/include/net/snmp.h +++ b/include/net/snmp.h @@ -123,7 +123,7 @@ struct linux_xfrm_mib { #define DECLARE_SNMP_STAT(type, name) \ extern __typeof__(type) __percpu *name -#define SNMP_INC_STATS_BH(mib, field) \ +#define __SNMP_INC_STATS(mib, field) \ __this_cpu_inc(mib->mibs[field]) #define SNMP_INC_STATS_ATOMIC_LONG(mib, field) \ @@ -135,7 +135,7 @@ struct linux_xfrm_mib { #define SNMP_DEC_STATS(mib, field) \ this_cpu_dec(mib->mibs[field]) -#define SNMP_ADD_STATS_BH(mib, field, addend) \ +#define __SNMP_ADD_STATS(mib, field, addend) \ __this_cpu_add(mib->mibs[field], addend) #define SNMP_ADD_STATS(mib, field, addend) \ @@ -146,7 +146,7 @@ struct linux_xfrm_mib { this_cpu_inc(ptr[basefield##PKTS]); \ this_cpu_add(ptr[basefield##OCTETS], addend); \ } while (0) -#define SNMP_UPD_PO_STATS_BH(mib, basefield, addend) \ +#define __SNMP_UPD_PO_STATS(mib, basefield, addend) \ do { \ __typeof__((mib->mibs) + 0) ptr = mib->mibs; \ __this_cpu_inc(ptr[basefield##PKTS]); \ @@ -156,7 +156,7 @@ struct linux_xfrm_mib { #if BITS_PER_LONG==32 -#define SNMP_ADD_STATS64_BH(mib, field, addend) \ +#define __SNMP_ADD_STATS64(mib, field, addend) \ do { \ __typeof__(*mib) *ptr = raw_cpu_ptr(mib); \ u64_stats_update_begin(&ptr->syncp); \ @@ -164,16 +164,16 @@ struct linux_xfrm_mib { u64_stats_update_end(&ptr->syncp); \ } while (0) -#define SNMP_ADD_STATS64(mib, field, addend) \ +#define SNMP_ADD_STATS64(mib, field, addend) \ do { \ preempt_disable(); \ - SNMP_ADD_STATS64_BH(mib, field, addend); \ + __SNMP_ADD_STATS64(mib, field, addend); \ preempt_enable(); \ } while (0) -#define SNMP_INC_STATS64_BH(mib, field) SNMP_ADD_STATS64_BH(mib, field, 1) +#define __SNMP_INC_STATS64(mib, field) SNMP_ADD_STATS64(mib, field, 1) #define SNMP_INC_STATS64(mib, field) SNMP_ADD_STATS64(mib, field, 1) -#define SNMP_UPD_PO_STATS64_BH(mib, basefield, addend) \ +#define __SNMP_UPD_PO_STATS64(mib, basefield, addend) \ do { \ __typeof__(*mib) *ptr; \ ptr = raw_cpu_ptr((mib)); \ @@ -185,17 +185,17 @@ struct linux_xfrm_mib { #define SNMP_UPD_PO_STATS64(mib, basefield, addend) \ do { \ preempt_disable(); \ - SNMP_UPD_PO_STATS64_BH(mib, basefield, addend); \ + __SNMP_UPD_PO_STATS64(mib, basefield, addend); \ preempt_enable(); \ } while (0) #else -#define SNMP_INC_STATS64_BH(mib, field) SNMP_INC_STATS_BH(mib, field) +#define __SNMP_INC_STATS64(mib, field) __SNMP_INC_STATS(mib, field) #define SNMP_INC_STATS64(mib, field) SNMP_INC_STATS(mib, field) #define SNMP_DEC_STATS64(mib, field) SNMP_DEC_STATS(mib, field) -#define SNMP_ADD_STATS64_BH(mib, field, addend) SNMP_ADD_STATS_BH(mib, field, addend) +#define __SNMP_ADD_STATS64(mib, field, addend) __SNMP_ADD_STATS(mib, field, addend) #define SNMP_ADD_STATS64(mib, field, addend) SNMP_ADD_STATS(mib, field, addend) #define SNMP_UPD_PO_STATS64(mib, basefield, addend) SNMP_UPD_PO_STATS(mib, basefield, addend) -#define SNMP_UPD_PO_STATS64_BH(mib, basefield, addend) SNMP_UPD_PO_STATS_BH(mib, basefield, addend) +#define __SNMP_UPD_PO_STATS64(mib, basefield, addend) __SNMP_UPD_PO_STATS(mib, basefield, addend) #endif #endif diff --git a/include/net/tcp.h b/include/net/tcp.h index ff8b4265cb2b..992f317c1abe 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -332,7 +332,7 @@ bool tcp_check_oom(struct sock *sk, int shift); extern struct proto tcp_prot; #define TCP_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.tcp_statistics, field) -#define __TCP_INC_STATS(net, field) SNMP_INC_STATS_BH((net)->mib.tcp_statistics, field) +#define __TCP_INC_STATS(net, field) __SNMP_INC_STATS((net)->mib.tcp_statistics, field) #define TCP_DEC_STATS(net, field) SNMP_DEC_STATS((net)->mib.tcp_statistics, field) #define TCP_ADD_STATS(net, field, val) SNMP_ADD_STATS((net)->mib.tcp_statistics, field, val) diff --git a/include/net/udp.h b/include/net/udp.h index bf6a7c29cf6a..ae07f375370d 100644 --- a/include/net/udp.h +++ b/include/net/udp.h @@ -293,12 +293,12 @@ struct sock *udp6_lib_lookup_skb(struct sk_buff *skb, if (is_udplite) SNMP_INC_STATS((net)->mib.udplite_statistics, field); \ else SNMP_INC_STATS((net)->mib.udp_statistics, field); } while(0) #define __UDP_INC_STATS(net, field, is_udplite) do { \ - if (is_udplite) SNMP_INC_STATS_BH((net)->mib.udplite_statistics, field); \ - else SNMP_INC_STATS_BH((net)->mib.udp_statistics, field); } while(0) + if (is_udplite) __SNMP_INC_STATS((net)->mib.udplite_statistics, field); \ + else __SNMP_INC_STATS((net)->mib.udp_statistics, field); } while(0) #define __UDP6_INC_STATS(net, field, is_udplite) do { \ - if (is_udplite) SNMP_INC_STATS_BH((net)->mib.udplite_stats_in6, field);\ - else SNMP_INC_STATS_BH((net)->mib.udp_stats_in6, field); \ + if (is_udplite) __SNMP_INC_STATS((net)->mib.udplite_stats_in6, field);\ + else __SNMP_INC_STATS((net)->mib.udp_stats_in6, field); \ } while(0) #define UDP6_INC_STATS(net, field, __lite) do { \ if (__lite) SNMP_INC_STATS((net)->mib.udplite_stats_in6, field); \ diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h index a4c6e2fed91c..0c55ffb859bf 100644 --- a/net/dccp/dccp.h +++ b/net/dccp/dccp.h @@ -199,7 +199,7 @@ struct dccp_mib { DECLARE_SNMP_STAT(struct dccp_mib, dccp_statistics); #define DCCP_INC_STATS(field) SNMP_INC_STATS(dccp_statistics, field) -#define __DCCP_INC_STATS(field) SNMP_INC_STATS_BH(dccp_statistics, field) +#define __DCCP_INC_STATS(field) __SNMP_INC_STATS(dccp_statistics, field) #define DCCP_DEC_STATS(field) SNMP_DEC_STATS(dccp_statistics, field) /* From 9317bb69824ec8d078b0b786b6971aedb0af3d4f Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Mon, 25 Apr 2016 10:39:32 -0700 Subject: [PATCH 1139/1649] net: SOCKWQ_ASYNC_NOSPACE optimizations SOCKWQ_ASYNC_NOSPACE is tested in sock_wake_async() so that a SIGIO signal is sent when needed. tcp_sendmsg() clears the bit. tcp_poll() sets the bit when stream is not writeable. We can avoid two atomic operations by first checking if socket is actually interested in the FASYNC business (most sockets in real applications do not use AIO, but select()/poll()/epoll()) This also removes one cache line miss to access sk->sk_wq->flags in tcp_sendmsg() Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/net/sock.h | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/include/net/sock.h b/include/net/sock.h index d63b8494124e..0f48aad9f8e8 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1940,11 +1940,17 @@ static inline unsigned long sock_wspace(struct sock *sk) */ static inline void sk_set_bit(int nr, struct sock *sk) { + if (nr == SOCKWQ_ASYNC_NOSPACE && !sock_flag(sk, SOCK_FASYNC)) + return; + set_bit(nr, &sk->sk_wq_raw->flags); } static inline void sk_clear_bit(int nr, struct sock *sk) { + if (nr == SOCKWQ_ASYNC_NOSPACE && !sock_flag(sk, SOCK_FASYNC)) + return; + clear_bit(nr, &sk->sk_wq_raw->flags); } From 4be735225f7cd040ca81c18740e7b672021bafeb Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Mon, 25 Apr 2016 10:39:34 -0700 Subject: [PATCH 1140/1649] net: SOCKWQ_ASYNC_WAITDATA optimizations SOCKWQ_ASYNC_WAITDATA is set/cleared in sk_wait_data() and equivalent functions, so that sock_wake_async() can send a SIGIO only when necessary. Since these atomic operations are really not needed unless socket expressed interest in FASYNC, we can omit them in most cases. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/net/sock.h | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/include/net/sock.h b/include/net/sock.h index 0f48aad9f8e8..3df778ccaa82 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1940,7 +1940,8 @@ static inline unsigned long sock_wspace(struct sock *sk) */ static inline void sk_set_bit(int nr, struct sock *sk) { - if (nr == SOCKWQ_ASYNC_NOSPACE && !sock_flag(sk, SOCK_FASYNC)) + if ((nr == SOCKWQ_ASYNC_NOSPACE || nr == SOCKWQ_ASYNC_WAITDATA) && + !sock_flag(sk, SOCK_FASYNC)) return; set_bit(nr, &sk->sk_wq_raw->flags); @@ -1948,7 +1949,8 @@ static inline void sk_set_bit(int nr, struct sock *sk) static inline void sk_clear_bit(int nr, struct sock *sk) { - if (nr == SOCKWQ_ASYNC_NOSPACE && !sock_flag(sk, SOCK_FASYNC)) + if ((nr == SOCKWQ_ASYNC_NOSPACE || nr == SOCKWQ_ASYNC_WAITDATA) && + !sock_flag(sk, SOCK_FASYNC)) return; clear_bit(nr, &sk->sk_wq_raw->flags); From ba7863f4d3bfe1698e0a92934cbc9c3021f4448d Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 28 Apr 2016 06:33:24 -0700 Subject: [PATCH 1141/1649] net: snmp: fix 64bit stats on 32bit arches I accidentally replaced BH disabling by preemption disabling in SNMP_ADD_STATS64() and SNMP_UPD_PO_STATS64() on 32bit builds. For 64bit stats on 32bit arch, we really need to disable BH, since the "struct u64_stats_sync syncp" might be manipulated both from process and BH contexts. Fixes: 6aef70a851ac ("net: snmp: kill various STATS_USER() helpers") Reported-by: Nicolas Dichtel Tested-by: Nicolas Dichtel Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/net/snmp.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/include/net/snmp.h b/include/net/snmp.h index 6bdd255b2250..c9228ad7ee91 100644 --- a/include/net/snmp.h +++ b/include/net/snmp.h @@ -166,9 +166,9 @@ struct linux_xfrm_mib { #define SNMP_ADD_STATS64(mib, field, addend) \ do { \ - preempt_disable(); \ + local_bh_disable(); \ __SNMP_ADD_STATS64(mib, field, addend); \ - preempt_enable(); \ + local_bh_enable(); \ } while (0) #define __SNMP_INC_STATS64(mib, field) SNMP_ADD_STATS64(mib, field, 1) @@ -184,9 +184,9 @@ struct linux_xfrm_mib { } while (0) #define SNMP_UPD_PO_STATS64(mib, basefield, addend) \ do { \ - preempt_disable(); \ + local_bh_disable(); \ __SNMP_UPD_PO_STATS64(mib, basefield, addend); \ - preempt_enable(); \ + local_bh_enable(); \ } while (0) #else #define __SNMP_INC_STATS64(mib, field) __SNMP_INC_STATS(mib, field) From 863c1fd9814618eefba02218f8fadf8a430c2a17 Mon Sep 17 00:00:00 2001 From: Soheil Hassas Yeganeh Date: Wed, 27 Apr 2016 23:39:00 -0400 Subject: [PATCH 1142/1649] tcp: remove an unnecessary check in tcp_tx_timestamp Remove the redundant check for sk->sk_tsflags in tcp_tx_timestamp. tcp_tx_timestamp() receives the tsflags as a parameter. As a result the "sk->sk_tsflags || tsflags" is redundant, since tsflags already includes sk->sk_tsflags plus overrides from control messages. Signed-off-by: Soheil Hassas Yeganeh Acked-by: Eric Dumazet Signed-off-by: David S. Miller --- net/ipv4/tcp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 040f35e7efe0..53890a730ff4 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -430,7 +430,7 @@ EXPORT_SYMBOL(tcp_init_sock); static void tcp_tx_timestamp(struct sock *sk, u16 tsflags, struct sk_buff *skb) { - if (sk->sk_tsflags || tsflags) { + if (tsflags) { struct skb_shared_info *shinfo = skb_shinfo(skb); struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); From 0a2cf20c3fb62ad4717276b5303bf831f7b29d54 Mon Sep 17 00:00:00 2001 From: Soheil Hassas Yeganeh Date: Wed, 27 Apr 2016 23:39:01 -0400 Subject: [PATCH 1143/1649] tcp: remove SKBTX_ACK_TSTAMP since it is redundant The SKBTX_ACK_TSTAMP flag is set in skb_shinfo->tx_flags when the timestamp of the TCP acknowledgement should be reported on error queue. Since accessing skb_shinfo is likely to incur a cache-line miss at the time of receiving the ack, the txstamp_ack bit was added in tcp_skb_cb, which is set iff the SKBTX_ACK_TSTAMP flag is set for an skb. This makes SKBTX_ACK_TSTAMP flag redundant. Remove the SKBTX_ACK_TSTAMP and instead use the txstamp_ack bit everywhere. Note that this frees one bit in shinfo->tx_flags. Signed-off-by: Soheil Hassas Yeganeh Acked-by: Martin KaFai Lau Suggested-by: Willem de Bruijn Acked-by: Eric Dumazet Signed-off-by: David S. Miller --- include/linux/skbuff.h | 6 +----- net/ipv4/tcp.c | 5 +++-- net/ipv4/tcp_input.c | 3 +-- net/ipv4/tcp_output.c | 17 +++++++++++------ net/socket.c | 3 --- 5 files changed, 16 insertions(+), 18 deletions(-) diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index a1ce63979ad8..c84a5a1078c5 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -382,14 +382,10 @@ enum { /* generate software time stamp when entering packet scheduling */ SKBTX_SCHED_TSTAMP = 1 << 6, - - /* generate software timestamp on peer data acknowledgment */ - SKBTX_ACK_TSTAMP = 1 << 7, }; #define SKBTX_ANY_SW_TSTAMP (SKBTX_SW_TSTAMP | \ - SKBTX_SCHED_TSTAMP | \ - SKBTX_ACK_TSTAMP) + SKBTX_SCHED_TSTAMP) #define SKBTX_ANY_TSTAMP (SKBTX_HW_TSTAMP | SKBTX_ANY_SW_TSTAMP) /* diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 53890a730ff4..91993782a947 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -435,9 +435,10 @@ static void tcp_tx_timestamp(struct sock *sk, u16 tsflags, struct sk_buff *skb) struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); sock_tx_timestamp(sk, tsflags, &shinfo->tx_flags); - if (shinfo->tx_flags & SKBTX_ANY_TSTAMP) + if (tsflags & SOF_TIMESTAMPING_TX_ACK) + tcb->txstamp_ack = 1; + if (tsflags & SOF_TIMESTAMPING_TX_RECORD_MASK) shinfo->tskey = TCP_SKB_CB(skb)->seq + skb->len - 1; - tcb->txstamp_ack = !!(shinfo->tx_flags & SKBTX_ACK_TSTAMP); } } diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 0d5239c283cb..70c370b93762 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -3087,8 +3087,7 @@ static void tcp_ack_tstamp(struct sock *sk, struct sk_buff *skb, return; shinfo = skb_shinfo(skb); - if ((shinfo->tx_flags & SKBTX_ACK_TSTAMP) && - !before(shinfo->tskey, prior_snd_una) && + if (!before(shinfo->tskey, prior_snd_una) && before(shinfo->tskey, tcp_sk(sk)->snd_una)) __skb_tstamp_tx(skb, NULL, sk, SCM_TSTAMP_ACK); } diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index b1b2045ac3a9..b3a31b4df57c 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -1111,11 +1111,17 @@ static void tcp_adjust_pcount(struct sock *sk, const struct sk_buff *skb, int de tcp_verify_left_out(tp); } +static bool tcp_has_tx_tstamp(const struct sk_buff *skb) +{ + return TCP_SKB_CB(skb)->txstamp_ack || + (skb_shinfo(skb)->tx_flags & SKBTX_ANY_TSTAMP); +} + static void tcp_fragment_tstamp(struct sk_buff *skb, struct sk_buff *skb2) { struct skb_shared_info *shinfo = skb_shinfo(skb); - if (unlikely(shinfo->tx_flags & SKBTX_ANY_TSTAMP) && + if (unlikely(tcp_has_tx_tstamp(skb)) && !before(shinfo->tskey, TCP_SKB_CB(skb2)->seq)) { struct skb_shared_info *shinfo2 = skb_shinfo(skb2); u8 tsflags = shinfo->tx_flags & SKBTX_ANY_TSTAMP; @@ -2446,13 +2452,12 @@ u32 __tcp_select_window(struct sock *sk) void tcp_skb_collapse_tstamp(struct sk_buff *skb, const struct sk_buff *next_skb) { - const struct skb_shared_info *next_shinfo = skb_shinfo(next_skb); - u8 tsflags = next_shinfo->tx_flags & SKBTX_ANY_TSTAMP; - - if (unlikely(tsflags)) { + if (unlikely(tcp_has_tx_tstamp(next_skb))) { + const struct skb_shared_info *next_shinfo = + skb_shinfo(next_skb); struct skb_shared_info *shinfo = skb_shinfo(skb); - shinfo->tx_flags |= tsflags; + shinfo->tx_flags |= next_shinfo->tx_flags & SKBTX_ANY_TSTAMP; shinfo->tskey = next_shinfo->tskey; TCP_SKB_CB(skb)->txstamp_ack |= TCP_SKB_CB(next_skb)->txstamp_ack; diff --git a/net/socket.c b/net/socket.c index 5dbb0bbe12a7..7789d79609dd 100644 --- a/net/socket.c +++ b/net/socket.c @@ -600,9 +600,6 @@ void __sock_tx_timestamp(__u16 tsflags, __u8 *tx_flags) if (tsflags & SOF_TIMESTAMPING_TX_SCHED) flags |= SKBTX_SCHED_TSTAMP; - if (tsflags & SOF_TIMESTAMPING_TX_ACK) - flags |= SKBTX_ACK_TSTAMP; - *tx_flags = flags; } EXPORT_SYMBOL(__sock_tx_timestamp); From c134ecb87817ce70fd62b2dc48bb079c44fc08df Mon Sep 17 00:00:00 2001 From: Martin KaFai Lau Date: Mon, 25 Apr 2016 14:44:48 -0700 Subject: [PATCH 1144/1649] tcp: Make use of MSG_EOR in tcp_sendmsg This patch adds an eor bit to the TCP_SKB_CB. When MSG_EOR is passed to tcp_sendmsg, the eor bit will be set at the skb containing the last byte of the userland's msg. The eor bit will prevent data from appending to that skb in the future. The change in do_tcp_sendpages is to honor the eor set during the previous tcp_sendmsg(MSG_EOR) call. This patch handles the tcp_sendmsg case. The followup patches will handle other skb coalescing and fragment cases. One potential use case is to use MSG_EOR with SOF_TIMESTAMPING_TX_ACK to get a more accurate TCP ack timestamping on application protocol with multiple outgoing response messages (e.g. HTTP2). Packetdrill script for testing: ~~~~~~ +0 `sysctl -q -w net.ipv4.tcp_min_tso_segs=10` +0 `sysctl -q -w net.ipv4.tcp_no_metrics_save=1` +0 socket(..., SOCK_STREAM, IPPROTO_TCP) = 3 +0 setsockopt(3, SOL_SOCKET, SO_REUSEADDR, [1], 4) = 0 +0 bind(3, ..., ...) = 0 +0 listen(3, 1) = 0 0.100 < S 0:0(0) win 32792 0.100 > S. 0:0(0) ack 1 0.200 < . 1:1(0) ack 1 win 257 0.200 accept(3, ..., ...) = 4 +0 setsockopt(4, SOL_TCP, TCP_NODELAY, [1], 4) = 0 0.200 write(4, ..., 14600) = 14600 0.200 sendto(4, ..., 730, MSG_EOR, ..., ...) = 730 0.200 sendto(4, ..., 730, MSG_EOR, ..., ...) = 730 0.200 > . 1:7301(7300) ack 1 0.200 > P. 7301:14601(7300) ack 1 0.300 < . 1:1(0) ack 14601 win 257 0.300 > P. 14601:15331(730) ack 1 0.300 > P. 15331:16061(730) ack 1 0.400 < . 1:1(0) ack 16061 win 257 0.400 close(4) = 0 0.400 > F. 16061:16061(0) ack 1 0.400 < F. 1:1(0) ack 16062 win 257 0.400 > . 16062:16062(0) ack 2 Signed-off-by: Martin KaFai Lau Cc: Eric Dumazet Cc: Neal Cardwell Cc: Soheil Hassas Yeganeh Cc: Willem de Bruijn Cc: Yuchung Cheng Suggested-by: Eric Dumazet Acked-by: Eric Dumazet Acked-by: Soheil Hassas Yeganeh Signed-off-by: David S. Miller --- include/net/tcp.h | 8 +++++++- net/ipv4/tcp.c | 7 +++++-- 2 files changed, 12 insertions(+), 3 deletions(-) diff --git a/include/net/tcp.h b/include/net/tcp.h index 992f317c1abe..24ec80483805 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -761,7 +761,8 @@ struct tcp_skb_cb { __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */ __u8 txstamp_ack:1, /* Record TX timestamp for ack? */ - unused:7; + eor:1, /* Is skb MSG_EOR marked? */ + unused:6; __u32 ack_seq; /* Sequence number ACK'd */ union { struct inet_skb_parm h4; @@ -808,6 +809,11 @@ static inline int tcp_skb_mss(const struct sk_buff *skb) return TCP_SKB_CB(skb)->tcp_gso_size; } +static inline bool tcp_skb_can_collapse_to(const struct sk_buff *skb) +{ + return likely(!TCP_SKB_CB(skb)->eor); +} + /* Events passed to congestion control interface */ enum tcp_ca_event { CA_EVENT_TX_START, /* first transmit when no packets in flight */ diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 91993782a947..cb4d1cabb42c 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -909,7 +909,8 @@ static ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset, int copy, i; bool can_coalesce; - if (!tcp_send_head(sk) || (copy = size_goal - skb->len) <= 0) { + if (!tcp_send_head(sk) || (copy = size_goal - skb->len) <= 0 || + !tcp_skb_can_collapse_to(skb)) { new_segment: if (!sk_stream_memory_free(sk)) goto wait_for_sndbuf; @@ -1157,7 +1158,7 @@ int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) copy = max - skb->len; } - if (copy <= 0) { + if (copy <= 0 || !tcp_skb_can_collapse_to(skb)) { new_segment: /* Allocate new segment. If the interface is SG, * allocate skb fitting to single page. @@ -1251,6 +1252,8 @@ new_segment: copied += copy; if (!msg_data_left(msg)) { tcp_tx_timestamp(sk, sockc.tsflags, skb); + if (unlikely(flags & MSG_EOR)) + TCP_SKB_CB(skb)->eor = 1; goto out; } From a643b5d41c95164c14b111d19c05d6913bedb80b Mon Sep 17 00:00:00 2001 From: Martin KaFai Lau Date: Mon, 25 Apr 2016 14:44:49 -0700 Subject: [PATCH 1145/1649] tcp: Handle eor bit when coalescing skb This patch: 1. Prevent next_skb from coalescing to the prev_skb if TCP_SKB_CB(prev_skb)->eor is set 2. Update the TCP_SKB_CB(prev_skb)->eor if coalescing is allowed Packetdrill script for testing: ~~~~~~ +0 `sysctl -q -w net.ipv4.tcp_min_tso_segs=10` +0 `sysctl -q -w net.ipv4.tcp_no_metrics_save=1` +0 socket(..., SOCK_STREAM, IPPROTO_TCP) = 3 +0 setsockopt(3, SOL_SOCKET, SO_REUSEADDR, [1], 4) = 0 +0 bind(3, ..., ...) = 0 +0 listen(3, 1) = 0 0.100 < S 0:0(0) win 32792 0.100 > S. 0:0(0) ack 1 0.200 < . 1:1(0) ack 1 win 257 0.200 accept(3, ..., ...) = 4 +0 setsockopt(4, SOL_TCP, TCP_NODELAY, [1], 4) = 0 0.200 sendto(4, ..., 730, MSG_EOR, ..., ...) = 730 0.200 sendto(4, ..., 730, MSG_EOR, ..., ...) = 730 0.200 write(4, ..., 11680) = 11680 0.200 > P. 1:731(730) ack 1 0.200 > P. 731:1461(730) ack 1 0.200 > . 1461:8761(7300) ack 1 0.200 > P. 8761:13141(4380) ack 1 0.300 < . 1:1(0) ack 1 win 257 0.300 > P. 1:731(730) ack 1 0.300 > P. 731:1461(730) ack 1 0.400 < . 1:1(0) ack 13141 win 257 0.400 close(4) = 0 0.400 > F. 13141:13141(0) ack 1 0.500 < F. 1:1(0) ack 13142 win 257 0.500 > . 13142:13142(0) ack 2 Signed-off-by: Martin KaFai Lau Cc: Eric Dumazet Cc: Neal Cardwell Cc: Soheil Hassas Yeganeh Cc: Willem de Bruijn Cc: Yuchung Cheng Acked-by: Eric Dumazet Acked-by: Soheil Hassas Yeganeh Signed-off-by: David S. Miller --- net/ipv4/tcp_input.c | 4 ++++ net/ipv4/tcp_output.c | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 70c370b93762..1fb19c91e091 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -1303,6 +1303,7 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb, } TCP_SKB_CB(prev)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags; + TCP_SKB_CB(prev)->eor = TCP_SKB_CB(skb)->eor; if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) TCP_SKB_CB(prev)->end_seq++; @@ -1368,6 +1369,9 @@ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb, if ((TCP_SKB_CB(prev)->sacked & TCPCB_TAGBITS) != TCPCB_SACKED_ACKED) goto fallback; + if (!tcp_skb_can_collapse_to(prev)) + goto fallback; + in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq) && !before(end_seq, TCP_SKB_CB(skb)->end_seq); diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index b3a31b4df57c..77c6cbb897e6 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -2499,6 +2499,7 @@ static void tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb) * packet counting does not break. */ TCP_SKB_CB(skb)->sacked |= TCP_SKB_CB(next_skb)->sacked & TCPCB_EVER_RETRANS; + TCP_SKB_CB(skb)->eor = TCP_SKB_CB(next_skb)->eor; /* changed transmit queue under us so clear hints */ tcp_clear_retrans_hints_partial(tp); @@ -2550,6 +2551,9 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to, if (!tcp_can_collapse(sk, skb)) break; + if (!tcp_skb_can_collapse_to(to)) + break; + space -= skb->len; if (first) { From a166140e810e74682f3ca248ef3879177b5c1315 Mon Sep 17 00:00:00 2001 From: Martin KaFai Lau Date: Mon, 25 Apr 2016 14:44:50 -0700 Subject: [PATCH 1146/1649] tcp: Handle eor bit when fragmenting a skb When fragmenting a skb, the next_skb should carry the eor from prev_skb. The eor of prev_skb should also be reset. Packetdrill script for testing: ~~~~~~ +0 `sysctl -q -w net.ipv4.tcp_min_tso_segs=10` +0 `sysctl -q -w net.ipv4.tcp_no_metrics_save=1` +0 socket(..., SOCK_STREAM, IPPROTO_TCP) = 3 +0 setsockopt(3, SOL_SOCKET, SO_REUSEADDR, [1], 4) = 0 +0 bind(3, ..., ...) = 0 +0 listen(3, 1) = 0 0.100 < S 0:0(0) win 32792 0.100 > S. 0:0(0) ack 1 0.200 < . 1:1(0) ack 1 win 257 0.200 accept(3, ..., ...) = 4 +0 setsockopt(4, SOL_TCP, TCP_NODELAY, [1], 4) = 0 0.200 sendto(4, ..., 15330, MSG_EOR, ..., ...) = 15330 0.200 sendto(4, ..., 730, 0, ..., ...) = 730 0.200 > . 1:7301(7300) ack 1 0.200 > . 7301:14601(7300) ack 1 0.300 < . 1:1(0) ack 14601 win 257 0.300 > P. 14601:15331(730) ack 1 0.300 > P. 15331:16061(730) ack 1 0.400 < . 1:1(0) ack 16061 win 257 0.400 close(4) = 0 0.400 > F. 16061:16061(0) ack 1 0.400 < F. 1:1(0) ack 16062 win 257 0.400 > . 16062:16062(0) ack 2 Signed-off-by: Martin KaFai Lau Cc: Eric Dumazet Cc: Neal Cardwell Cc: Soheil Hassas Yeganeh Cc: Willem de Bruijn Cc: Yuchung Cheng Acked-by: Eric Dumazet Acked-by: Soheil Hassas Yeganeh Signed-off-by: David S. Miller --- net/ipv4/tcp_output.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 77c6cbb897e6..1a487ff95d4c 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -1134,6 +1134,12 @@ static void tcp_fragment_tstamp(struct sk_buff *skb, struct sk_buff *skb2) } } +static void tcp_skb_fragment_eor(struct sk_buff *skb, struct sk_buff *skb2) +{ + TCP_SKB_CB(skb2)->eor = TCP_SKB_CB(skb)->eor; + TCP_SKB_CB(skb)->eor = 0; +} + /* Function to create two new TCP segments. Shrinks the given segment * to the specified size and appends a new segment with the rest of the * packet to the list. This won't be called frequently, I hope. @@ -1179,6 +1185,7 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH); TCP_SKB_CB(buff)->tcp_flags = flags; TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked; + tcp_skb_fragment_eor(skb, buff); if (!skb_shinfo(skb)->nr_frags && skb->ip_summed != CHECKSUM_PARTIAL) { /* Copy and checksum data tail into the new buffer. */ @@ -1739,6 +1746,8 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len, /* This packet was never sent out yet, so no SACK bits. */ TCP_SKB_CB(buff)->sacked = 0; + tcp_skb_fragment_eor(skb, buff); + buff->ip_summed = skb->ip_summed = CHECKSUM_PARTIAL; skb_split(skb, buff, len); tcp_fragment_tstamp(skb, buff); From 3df97ba83019d524c012fd43d3216d4cc3005955 Mon Sep 17 00:00:00 2001 From: Jason Wang Date: Mon, 25 Apr 2016 23:13:42 -0400 Subject: [PATCH 1147/1649] tuntap: calculate rps hash only when needed There's no need to calculate rps hash if it was not enabled. So this patch export rps_needed and check it before trying to get rps hash. Tests (using pktgen to inject packets to guest) shows this can improve pps about 13% (when rps is disabled). Before: ~1150000 pps After: ~1300000 pps Cc: Michael S. Tsirkin Signed-off-by: Jason Wang ---- Changes from V1: - Fix build when CONFIG_RPS is not set Signed-off-by: David S. Miller --- drivers/net/tun.c | 4 +++- net/core/dev.c | 1 + 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 42992dcbdda8..425e983bab93 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -833,7 +833,8 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev) if (txq >= numqueues) goto drop; - if (numqueues == 1) { +#ifdef CONFIG_RPS + if (numqueues == 1 && static_key_false(&rps_needed)) { /* Select queue was not called for the skbuff, so we extract the * RPS hash and save it into the flow_table here. */ @@ -848,6 +849,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev) tun_flow_save_rps_rxhash(e, rxhash); } } +#endif tun_debug(KERN_INFO, tun, "tun_net_xmit %d\n", skb->len); diff --git a/net/core/dev.c b/net/core/dev.c index e96a3bc2c634..c2f3d5dbde56 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -3469,6 +3469,7 @@ u32 rps_cpu_mask __read_mostly; EXPORT_SYMBOL(rps_cpu_mask); struct static_key rps_needed __read_mostly; +EXPORT_SYMBOL(rps_needed); static struct rps_dev_flow * set_rps_cpu(struct net_device *dev, struct sk_buff *skb, From 6b87663fbe4a366e558d6566b3e6b6bc227da5a1 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 26 Apr 2016 17:52:33 +0200 Subject: [PATCH 1148/1649] net/mlx5e: avoid stack overflow in mlx5e_open_channels struct mlx5e_channel_param is a large structure that is allocated on the stack of mlx5e_open_channels, and with a recent change it has grown beyond the warning size for the maximum stack that a single function should use: mellanox/mlx5/core/en_main.c: In function 'mlx5e_open_channels': mellanox/mlx5/core/en_main.c:1325:1: error: the frame size of 1072 bytes is larger than 1024 bytes [-Werror=frame-larger-than=] The function is already using dynamic allocation and is not in a fast path, so the easiest workaround is to use another kzalloc for allocating the channel parameters. Signed-off-by: Arnd Bergmann Fixes: d3c9bc2743dc ("net/mlx5e: Added ICO SQs") Acked-by: Saeed Mahameed Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlx5/core/en_main.c | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 8484ac4c6ceb..8b0bd42a9762 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -1245,13 +1245,10 @@ static void mlx5e_build_icosq_param(struct mlx5e_priv *priv, param->icosq = true; } -static void mlx5e_build_channel_param(struct mlx5e_priv *priv, - struct mlx5e_channel_param *cparam) +static void mlx5e_build_channel_param(struct mlx5e_priv *priv, struct mlx5e_channel_param *cparam) { u8 icosq_log_wq_sz = MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE; - memset(cparam, 0, sizeof(*cparam)); - mlx5e_build_rq_param(priv, &cparam->rq); mlx5e_build_sq_param(priv, &cparam->sq); mlx5e_build_icosq_param(priv, &cparam->icosq, icosq_log_wq_sz); @@ -1262,7 +1259,7 @@ static void mlx5e_build_channel_param(struct mlx5e_priv *priv, static int mlx5e_open_channels(struct mlx5e_priv *priv) { - struct mlx5e_channel_param cparam; + struct mlx5e_channel_param *cparam; int nch = priv->params.num_channels; int err = -ENOMEM; int i; @@ -1274,12 +1271,15 @@ static int mlx5e_open_channels(struct mlx5e_priv *priv) priv->txq_to_sq_map = kcalloc(nch * priv->params.num_tc, sizeof(struct mlx5e_sq *), GFP_KERNEL); - if (!priv->channel || !priv->txq_to_sq_map) + cparam = kzalloc(sizeof(struct mlx5e_channel_param), GFP_KERNEL); + + if (!priv->channel || !priv->txq_to_sq_map || !cparam) goto err_free_txq_to_sq_map; - mlx5e_build_channel_param(priv, &cparam); + mlx5e_build_channel_param(priv, cparam); + for (i = 0; i < nch; i++) { - err = mlx5e_open_channel(priv, i, &cparam, &priv->channel[i]); + err = mlx5e_open_channel(priv, i, cparam, &priv->channel[i]); if (err) goto err_close_channels; } @@ -1290,6 +1290,7 @@ static int mlx5e_open_channels(struct mlx5e_priv *priv) goto err_close_channels; } + kfree(cparam); return 0; err_close_channels: @@ -1299,6 +1300,7 @@ err_close_channels: err_free_txq_to_sq_map: kfree(priv->txq_to_sq_map); kfree(priv->channel); + kfree(cparam); return err; } From b43586576e54609f7970096478cf4113de18a4db Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Wed, 27 Apr 2016 11:05:28 +0300 Subject: [PATCH 1149/1649] tipc: remove an unnecessary NULL check This is never called with a NULL "buf" and anyway, we dereference 's' on the lines before so it would Oops before we reach the check. Signed-off-by: Dan Carpenter Acked-by: Ying Xue Signed-off-by: David S. Miller --- net/tipc/subscr.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c index 79de588c7bd6..0dd02244e21d 100644 --- a/net/tipc/subscr.c +++ b/net/tipc/subscr.c @@ -326,8 +326,7 @@ static void tipc_subscrb_rcv_cb(struct net *net, int conid, return tipc_subscrp_cancel(s, subscriber); } - if (s) - tipc_subscrp_subscribe(net, s, subscriber, swap); + tipc_subscrp_subscribe(net, s, subscriber, swap); } /* Handle one request to establish a new subscriber */ From b43e7199a9061562e28c72192a1d07e00ec4e97f Mon Sep 17 00:00:00 2001 From: Michal Kazior Date: Wed, 27 Apr 2016 12:59:13 +0200 Subject: [PATCH 1150/1649] fq: split out backlog update logic mac80211 (which will be the first user of the fq.h) recently started to support software A-MSDU aggregation. It glues skbuffs together into a single one so the backlog accounting needs to be more fine-grained. To avoid backlog sorting logic duplication split it up for re-use. Signed-off-by: Michal Kazior Signed-off-by: David S. Miller --- include/net/fq_impl.h | 40 ++++++++++++++++++++++++---------------- 1 file changed, 24 insertions(+), 16 deletions(-) diff --git a/include/net/fq_impl.h b/include/net/fq_impl.h index 02eab7c51adb..163f3ed0f05a 100644 --- a/include/net/fq_impl.h +++ b/include/net/fq_impl.h @@ -120,25 +120,12 @@ static struct fq_flow *fq_flow_classify(struct fq *fq, return flow; } -static void fq_tin_enqueue(struct fq *fq, - struct fq_tin *tin, - struct sk_buff *skb, - fq_skb_free_t free_func, - fq_flow_get_default_t get_default_func) +static void fq_recalc_backlog(struct fq *fq, + struct fq_tin *tin, + struct fq_flow *flow) { - struct fq_flow *flow; struct fq_flow *i; - lockdep_assert_held(&fq->lock); - - flow = fq_flow_classify(fq, tin, skb, get_default_func); - - flow->tin = tin; - flow->backlog += skb->len; - tin->backlog_bytes += skb->len; - tin->backlog_packets++; - fq->backlog++; - if (list_empty(&flow->backlogchain)) list_add_tail(&flow->backlogchain, &fq->backlogs); @@ -149,6 +136,27 @@ static void fq_tin_enqueue(struct fq *fq, break; list_move(&flow->backlogchain, &i->backlogchain); +} + +static void fq_tin_enqueue(struct fq *fq, + struct fq_tin *tin, + struct sk_buff *skb, + fq_skb_free_t free_func, + fq_flow_get_default_t get_default_func) +{ + struct fq_flow *flow; + + lockdep_assert_held(&fq->lock); + + flow = fq_flow_classify(fq, tin, skb, get_default_func); + + flow->tin = tin; + flow->backlog += skb->len; + tin->backlog_bytes += skb->len; + tin->backlog_packets++; + fq->backlog++; + + fq_recalc_backlog(fq, tin, flow); if (list_empty(&flow->flowchain)) { flow->deficit = fq->quantum; From 0cef6a4c34b56a9a6894f2dad2fad4be789990e1 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 27 Apr 2016 10:12:25 -0700 Subject: [PATCH 1151/1649] tcp: give prequeue mode some care TCP prequeue goal is to defer processing of incoming packets to user space thread currently blocked in a recvmsg() system call. Intent is to spend less time processing these packets on behalf of softirq handler, as softirq handler is unfair to normal process scheduler decisions, as it might interrupt threads that do not even use networking. Current prequeue implementation has following issues : 1) It only checks size of the prequeue against sk_rcvbuf It was fine 15 years ago when sk_rcvbuf was in the 64KB vicinity. But we now have ~8MB values to cope with modern networking needs. We have to add sk_rmem_alloc in the equation, since out of order packets can definitely use up to sk_rcvbuf memory themselves. 2) Even with a fixed memory truesize check, prequeue can be filled by thousands of packets. When prequeue needs to be flushed, either from sofirq context (in tcp_prequeue() or timer code), or process context (in tcp_prequeue_process()), this adds a latency spike which is often not desirable. I added a fixed limit of 32 packets, as this translated to a max flush time of 60 us on my test hosts. Also note that all packets in prequeue are not accounted for tcp_mem, since they are not charged against sk_forward_alloc at this point. This is probably not a big deal. Note that this might increase LINUX_MIB_TCPPREQUEUEDROPPED counts, which is misnamed, as packets are not dropped at all, but rather pushed to the stack (where they can be either consumed or dropped) Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/ipv4/tcp_ipv4.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 510f7a3c758b..87b173b563b0 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -1506,16 +1506,16 @@ bool tcp_prequeue(struct sock *sk, struct sk_buff *skb) __skb_queue_tail(&tp->ucopy.prequeue, skb); tp->ucopy.memory += skb->truesize; - if (tp->ucopy.memory > sk->sk_rcvbuf) { + if (skb_queue_len(&tp->ucopy.prequeue) >= 32 || + tp->ucopy.memory + atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf) { struct sk_buff *skb1; BUG_ON(sock_owned_by_user(sk)); + __NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPPREQUEUEDROPPED, + skb_queue_len(&tp->ucopy.prequeue)); - while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) { + while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) sk_backlog_rcv(sk, skb1); - __NET_INC_STATS(sock_net(sk), - LINUX_MIB_TCPPREQUEUEDROPPED); - } tp->ucopy.memory = 0; } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) { From badf3ada60ab8f76f9488dc8f5c0c57f70682f5a Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Wed, 27 Apr 2016 11:45:14 -0700 Subject: [PATCH 1152/1649] net: dsa: Provide CPU port statistics to master netdev This patch overloads the DSA master netdev, aka CPU Ethernet MAC to also include switch-side statistics, which is useful for debugging purposes, when the switch is not properly connected to the Ethernet MAC (duplex mismatch, (RG)MII electrical issues etc.). We accomplish this by retaining the original copy of the master netdev's ethtool_ops, and just overload the 3 operations we care about: get_sset_count, get_strings and get_ethtool_stats so as to intercept these calls and call into the original master_netdev ethtool_ops, plus our own. We take this approach as opposed to providing a set of DSA helper functions that would retrive the CPU port's statistics, because the entire purpose of DSA is to allow unmodified Ethernet MAC drivers to be used as CPU conduit interfaces, therefore, statistics overlay in such drivers would simply not scale. The new ethtool -S output would therefore look like this now: statistics p<2 digits cpu port number>_ Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- include/net/dsa.h | 5 +++ net/dsa/slave.c | 88 +++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 93 insertions(+) diff --git a/include/net/dsa.h b/include/net/dsa.h index 2d280aba97e2..8e86af87c84f 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -110,6 +110,11 @@ struct dsa_switch_tree { struct net_device *orig_dev); enum dsa_tag_protocol tag_protocol; + /* + * Original copy of the master netdev ethtool_ops + */ + struct ethtool_ops master_ethtool_ops; + /* * The switch and port to which the CPU is attached. */ diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 3b6750f5e68b..5ea8a40c8d33 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -666,6 +666,78 @@ static void dsa_slave_get_strings(struct net_device *dev, } } +static void dsa_cpu_port_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, + uint64_t *data) +{ + struct dsa_switch_tree *dst = dev->dsa_ptr; + struct dsa_switch *ds = dst->ds[0]; + s8 cpu_port = dst->cpu_port; + int count = 0; + + if (dst->master_ethtool_ops.get_sset_count) { + count = dst->master_ethtool_ops.get_sset_count(dev, + ETH_SS_STATS); + dst->master_ethtool_ops.get_ethtool_stats(dev, stats, data); + } + + if (ds->drv->get_ethtool_stats) + ds->drv->get_ethtool_stats(ds, cpu_port, data + count); +} + +static int dsa_cpu_port_get_sset_count(struct net_device *dev, int sset) +{ + struct dsa_switch_tree *dst = dev->dsa_ptr; + struct dsa_switch *ds = dst->ds[0]; + int count = 0; + + if (dst->master_ethtool_ops.get_sset_count) + count += dst->master_ethtool_ops.get_sset_count(dev, sset); + + if (sset == ETH_SS_STATS && ds->drv->get_sset_count) + count += ds->drv->get_sset_count(ds); + + return count; +} + +static void dsa_cpu_port_get_strings(struct net_device *dev, + uint32_t stringset, uint8_t *data) +{ + struct dsa_switch_tree *dst = dev->dsa_ptr; + struct dsa_switch *ds = dst->ds[0]; + s8 cpu_port = dst->cpu_port; + int len = ETH_GSTRING_LEN; + int mcount = 0, count; + unsigned int i; + uint8_t pfx[4]; + uint8_t *ndata; + + snprintf(pfx, sizeof(pfx), "p%.2d", cpu_port); + /* We do not want to be NULL-terminated, since this is a prefix */ + pfx[sizeof(pfx) - 1] = '_'; + + if (dst->master_ethtool_ops.get_sset_count) { + mcount = dst->master_ethtool_ops.get_sset_count(dev, + ETH_SS_STATS); + dst->master_ethtool_ops.get_strings(dev, stringset, data); + } + + if (stringset == ETH_SS_STATS && ds->drv->get_strings) { + ndata = data + mcount * len; + /* This function copies ETH_GSTRINGS_LEN bytes, we will mangle + * the output after to prepend our CPU port prefix we + * constructed earlier + */ + ds->drv->get_strings(ds, cpu_port, ndata); + count = ds->drv->get_sset_count(ds); + for (i = 0; i < count; i++) { + memmove(ndata + (i * len + sizeof(pfx)), + ndata + i * len, len - sizeof(pfx)); + memcpy(ndata + i * len, pfx, sizeof(pfx)); + } + } +} + static void dsa_slave_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, uint64_t *data) @@ -821,6 +893,8 @@ static const struct ethtool_ops dsa_slave_ethtool_ops = { .get_eee = dsa_slave_get_eee, }; +static struct ethtool_ops dsa_cpu_port_ethtool_ops; + static const struct net_device_ops dsa_slave_netdev_ops = { .ndo_open = dsa_slave_open, .ndo_stop = dsa_slave_close, @@ -1038,6 +1112,7 @@ int dsa_slave_create(struct dsa_switch *ds, struct device *parent, int port, char *name) { struct net_device *master = ds->dst->master_netdev; + struct dsa_switch_tree *dst = ds->dst; struct net_device *slave_dev; struct dsa_slave_priv *p; int ret; @@ -1049,6 +1124,19 @@ int dsa_slave_create(struct dsa_switch *ds, struct device *parent, slave_dev->features = master->vlan_features; slave_dev->ethtool_ops = &dsa_slave_ethtool_ops; + if (master->ethtool_ops != &dsa_cpu_port_ethtool_ops) { + memcpy(&dst->master_ethtool_ops, master->ethtool_ops, + sizeof(struct ethtool_ops)); + memcpy(&dsa_cpu_port_ethtool_ops, &dst->master_ethtool_ops, + sizeof(struct ethtool_ops)); + dsa_cpu_port_ethtool_ops.get_sset_count = + dsa_cpu_port_get_sset_count; + dsa_cpu_port_ethtool_ops.get_ethtool_stats = + dsa_cpu_port_get_ethtool_stats; + dsa_cpu_port_ethtool_ops.get_strings = + dsa_cpu_port_get_strings; + master->ethtool_ops = &dsa_cpu_port_ethtool_ops; + } eth_hw_addr_inherit(slave_dev, master); slave_dev->priv_flags |= IFF_NO_QUEUE; slave_dev->netdev_ops = &dsa_slave_netdev_ops; From 222e4d0b13c674b28a562d67c270367d45d0a53d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?fran=C3=A7ois=20romieu?= Date: Wed, 27 Apr 2016 23:29:44 +0200 Subject: [PATCH 1153/1649] pch_gbe: replace private tx ring lock with common netif_tx_lock pch_gbe_tx_ring.tx_lock is only used in the hard_xmit handler and in the transmit completion reaper called from NAPI context. Compile-tested only. Potential victims Cced. Someone more knowledgeable may check if pch_gbe_tx_queue could have some use for a mmiowb. Signed-off-by: Francois Romieu Cc: Darren Hart Cc: Andy Cress Cc: bryan@fossetcon.org Signed-off-by: David S. Miller --- drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h | 2 -- drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c | 10 ++-------- 2 files changed, 2 insertions(+), 10 deletions(-) diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h index 2a55d6d53ee6..8d710a3b4db0 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h @@ -481,7 +481,6 @@ struct pch_gbe_buffer { /** * struct pch_gbe_tx_ring - tx ring information - * @tx_lock: spinlock structs * @desc: pointer to the descriptor ring memory * @dma: physical address of the descriptor ring * @size: length of descriptor ring in bytes @@ -491,7 +490,6 @@ struct pch_gbe_buffer { * @buffer_info: array of buffer information structs */ struct pch_gbe_tx_ring { - spinlock_t tx_lock; struct pch_gbe_tx_desc *desc; dma_addr_t dma; unsigned int size; diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c index ca4add749410..3cd87a41ac92 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c @@ -1640,7 +1640,7 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter, cleaned_count); if (cleaned_count > 0) { /*skip this if nothing cleaned*/ /* Recover from running out of Tx resources in xmit_frame */ - spin_lock(&tx_ring->tx_lock); + netif_tx_lock(adapter->netdev); if (unlikely(cleaned && (netif_queue_stopped(adapter->netdev)))) { netif_wake_queue(adapter->netdev); @@ -1652,7 +1652,7 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter, netdev_dbg(adapter->netdev, "next_to_clean : %d\n", tx_ring->next_to_clean); - spin_unlock(&tx_ring->tx_lock); + netif_tx_unlock(adapter->netdev); } return cleaned; } @@ -1805,7 +1805,6 @@ int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter, tx_ring->next_to_use = 0; tx_ring->next_to_clean = 0; - spin_lock_init(&tx_ring->tx_lock); for (desNo = 0; desNo < tx_ring->count; desNo++) { tx_desc = PCH_GBE_TX_DESC(*tx_ring, desNo); @@ -2135,13 +2134,9 @@ static int pch_gbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev) { struct pch_gbe_adapter *adapter = netdev_priv(netdev); struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring; - unsigned long flags; - - spin_lock_irqsave(&tx_ring->tx_lock, flags); if (unlikely(!PCH_GBE_DESC_UNUSED(tx_ring))) { netif_stop_queue(netdev); - spin_unlock_irqrestore(&tx_ring->tx_lock, flags); netdev_dbg(netdev, "Return : BUSY next_to use : 0x%08x next_to clean : 0x%08x\n", tx_ring->next_to_use, tx_ring->next_to_clean); @@ -2150,7 +2145,6 @@ static int pch_gbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev) /* CRC,ITAG no support */ pch_gbe_tx_queue(adapter, tx_ring, skb); - spin_unlock_irqrestore(&tx_ring->tx_lock, flags); return NETDEV_TX_OK; } From 494e8489db50157637d146ee377991ed6f0018f1 Mon Sep 17 00:00:00 2001 From: Mahesh Bandewar Date: Wed, 27 Apr 2016 14:59:27 -0700 Subject: [PATCH 1154/1649] ipvlan: Fix failure path in dev registration during link creation When newlink creation fails at device-registration, the port->count is decremented twice. Francesco Ruggeri (fruggeri@arista.com) found this issue in Macvlan and the same exists in IPvlan driver too. While fixing this issue I noticed another issue of missing unregister in case of failure, so adding it to the fix which is similar to the macvlan fix by Francesco in commit 308379607548 ("macvlan: fix failure during registration v3") Reported-by: Francesco Ruggeri Signed-off-by: Mahesh Bandewar CC: Eric Dumazet CC: Eric W. Biederman Signed-off-by: David S. Miller --- drivers/net/ipvlan/ipvlan_main.c | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c index 57941d3f4227..1c4d395fbd49 100644 --- a/drivers/net/ipvlan/ipvlan_main.c +++ b/drivers/net/ipvlan/ipvlan_main.c @@ -113,6 +113,7 @@ static int ipvlan_init(struct net_device *dev) { struct ipvl_dev *ipvlan = netdev_priv(dev); const struct net_device *phy_dev = ipvlan->phy_dev; + struct ipvl_port *port = ipvlan->port; dev->state = (dev->state & ~IPVLAN_STATE_MASK) | (phy_dev->state & IPVLAN_STATE_MASK); @@ -128,6 +129,8 @@ static int ipvlan_init(struct net_device *dev) if (!ipvlan->pcpu_stats) return -ENOMEM; + port->count += 1; + return 0; } @@ -481,27 +484,21 @@ static int ipvlan_link_new(struct net *src_net, struct net_device *dev, dev->priv_flags |= IFF_IPVLAN_SLAVE; - port->count += 1; err = register_netdevice(dev); if (err < 0) - goto ipvlan_destroy_port; + return err; err = netdev_upper_dev_link(phy_dev, dev); - if (err) - goto ipvlan_destroy_port; + if (err) { + unregister_netdevice(dev); + return err; + } list_add_tail_rcu(&ipvlan->pnode, &port->ipvlans); ipvlan_set_port_mode(port, mode); netif_stacked_transfer_operstate(phy_dev, dev); return 0; - -ipvlan_destroy_port: - port->count -= 1; - if (!port->count) - ipvlan_port_destroy(phy_dev); - - return err; } static void ipvlan_link_delete(struct net_device *dev, struct list_head *head) From 92b4423e3a0bc5d43ecde4bcad871f8b5ba04efd Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Fri, 29 Apr 2016 10:39:34 +0200 Subject: [PATCH 1155/1649] netfilter: fix IS_ERR_VALUE usage This is a forward-port of the original patch from Andrzej Hajda, he said: "IS_ERR_VALUE should be used only with unsigned long type. Otherwise it can work incorrectly. To achieve this function xt_percpu_counter_alloc is modified to return unsigned long, and its result is assigned to temporary variable to perform error checking, before assigning to .pcnt field. The patch follows conclusion from discussion on LKML [1][2]. [1]: http://permalink.gmane.org/gmane.linux.kernel/2120927 [2]: http://permalink.gmane.org/gmane.linux.kernel/2150581" Original patch from Andrzej is here: http://patchwork.ozlabs.org/patch/582970/ This patch has clashed with input validation fixes for x_tables. Signed-off-by: Pablo Neira Ayuso --- include/linux/netfilter/x_tables.h | 6 +++--- net/ipv4/netfilter/arp_tables.c | 6 ++++-- net/ipv4/netfilter/ip_tables.c | 6 ++++-- net/ipv6/netfilter/ip6_tables.c | 6 ++++-- 4 files changed, 15 insertions(+), 9 deletions(-) diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h index 4dd9306c9d56..dc4f58a3cdcc 100644 --- a/include/linux/netfilter/x_tables.h +++ b/include/linux/netfilter/x_tables.h @@ -380,16 +380,16 @@ static inline unsigned long ifname_compare_aligned(const char *_a, * allows us to return 0 for single core systems without forcing * callers to deal with SMP vs. NONSMP issues. */ -static inline u64 xt_percpu_counter_alloc(void) +static inline unsigned long xt_percpu_counter_alloc(void) { if (nr_cpu_ids > 1) { void __percpu *res = __alloc_percpu(sizeof(struct xt_counters), sizeof(struct xt_counters)); if (res == NULL) - return (u64) -ENOMEM; + return -ENOMEM; - return (u64) (__force unsigned long) res; + return (__force unsigned long) res; } return 0; diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index 60f5161abcb4..3355ed72051d 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c @@ -513,11 +513,13 @@ find_check_entry(struct arpt_entry *e, const char *name, unsigned int size) { struct xt_entry_target *t; struct xt_target *target; + unsigned long pcnt; int ret; - e->counters.pcnt = xt_percpu_counter_alloc(); - if (IS_ERR_VALUE(e->counters.pcnt)) + pcnt = xt_percpu_counter_alloc(); + if (IS_ERR_VALUE(pcnt)) return -ENOMEM; + e->counters.pcnt = pcnt; t = arpt_get_target(e); target = xt_request_find_target(NFPROTO_ARP, t->u.user.name, diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index 735d1ee8c1ab..21ccc19e1e6f 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c @@ -656,10 +656,12 @@ find_check_entry(struct ipt_entry *e, struct net *net, const char *name, unsigned int j; struct xt_mtchk_param mtpar; struct xt_entry_match *ematch; + unsigned long pcnt; - e->counters.pcnt = xt_percpu_counter_alloc(); - if (IS_ERR_VALUE(e->counters.pcnt)) + pcnt = xt_percpu_counter_alloc(); + if (IS_ERR_VALUE(pcnt)) return -ENOMEM; + e->counters.pcnt = pcnt; j = 0; mtpar.net = net; diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index 73e606c719ef..17874e83a950 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c @@ -669,10 +669,12 @@ find_check_entry(struct ip6t_entry *e, struct net *net, const char *name, unsigned int j; struct xt_mtchk_param mtpar; struct xt_entry_match *ematch; + unsigned long pcnt; - e->counters.pcnt = xt_percpu_counter_alloc(); - if (IS_ERR_VALUE(e->counters.pcnt)) + pcnt = xt_percpu_counter_alloc(); + if (IS_ERR_VALUE(pcnt)) return -ENOMEM; + e->counters.pcnt = pcnt; j = 0; mtpar.net = net; From 1ffdfac99f36555b66c5b9d979c855a9c1a4503b Mon Sep 17 00:00:00 2001 From: "Yisen.Zhuang\\(Zhuangyuzeng\\)" Date: Thu, 28 Apr 2016 15:09:01 +0800 Subject: [PATCH 1156/1649] net: hns: remove cpld-ctrl-reg and add cell in the cpld-syscon property Because cpld-ctrl-reg property is offset base on cpld-syscon property, we make it as a cell in the cpld-syscon property. Signed-off-by: Yisen Zhuang Signed-off-by: David S. Miller --- .../net/ethernet/hisilicon/hns/hns_dsaf_mac.c | 26 ++++++++++--------- 1 file changed, 14 insertions(+), 12 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c index 1c8fdd316ca0..210ba8974a8b 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c @@ -647,6 +647,7 @@ static int hns_mac_get_info(struct hns_mac_cb *mac_cb) { struct device_node *np = mac_cb->dev->of_node; struct regmap *syscon; + struct of_phandle_args cpld_args; u32 ret; mac_cb->link = false; @@ -713,22 +714,23 @@ static int hns_mac_get_info(struct hns_mac_cb *mac_cb) mac_cb->mac_id); } - syscon = syscon_node_to_regmap( - of_parse_phandle(to_of_node(mac_cb->fw_port), - "cpld-syscon", 0)); - if (IS_ERR_OR_NULL(syscon)) { - dev_dbg(mac_cb->dev, "no cpld-syscon found!\n"); + ret = of_parse_phandle_with_fixed_args(to_of_node(mac_cb->fw_port), + "cpld-syscon", 1, 0, &cpld_args); + if (ret) { + dev_dbg(mac_cb->dev, "mac%d no cpld-syscon found.\n", + mac_cb->mac_id); mac_cb->cpld_ctrl = NULL; } else { - mac_cb->cpld_ctrl = syscon; - ret = fwnode_property_read_u32(mac_cb->fw_port, - "cpld-ctrl-reg", - &mac_cb->cpld_ctrl_reg); - if (ret) { - dev_err(mac_cb->dev, "get cpld-ctrl-reg fail!\n"); - return ret; + syscon = syscon_node_to_regmap(cpld_args.np); + if (IS_ERR_OR_NULL(syscon)) { + dev_dbg(mac_cb->dev, "no cpld-syscon found!\n"); + mac_cb->cpld_ctrl = NULL; + } else { + mac_cb->cpld_ctrl = syscon; + mac_cb->cpld_ctrl_reg = cpld_args.args[0]; } } + return 0; } From 0211b8fb5dd750357b3113a7c40b879c4a055a98 Mon Sep 17 00:00:00 2001 From: "Yisen.Zhuang\\(Zhuangyuzeng\\)" Date: Thu, 28 Apr 2016 15:09:02 +0800 Subject: [PATCH 1157/1649] net: hns: change port-id property to reg property in dsaf port node Indexes should generally be avoided. So we use reg rather than port-id to index ports. Signed-off-by: Yisen Zhuang Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c index 210ba8974a8b..611581fccf2a 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c @@ -834,15 +834,15 @@ int hns_mac_init(struct dsaf_device *dsaf_dev) struct fwnode_handle *child; device_for_each_child_node(dsaf_dev->dev, child) { - ret = fwnode_property_read_u32(child, "port-id", &port_id); + ret = fwnode_property_read_u32(child, "reg", &port_id); if (ret) { dev_err(dsaf_dev->dev, - "get port-id fail, ret=%d!\n", ret); + "get reg fail, ret=%d!\n", ret); return ret; } if (port_id >= max_port_num) { dev_err(dsaf_dev->dev, - "port-id(%u) out of range!\n", port_id); + "reg(%u) out of range!\n", port_id); return -EINVAL; } mac_cb = devm_kzalloc(dsaf_dev->dev, sizeof(*mac_cb), From a1ecde2c6f00825e3a6d90dc774cddc18cb0e247 Mon Sep 17 00:00:00 2001 From: "Yisen.Zhuang\\(Zhuangyuzeng\\)" Date: Thu, 28 Apr 2016 15:09:03 +0800 Subject: [PATCH 1158/1649] Documentation: Bindings: Update DT binding for hns dsaf node This patch changes property port-id to reg in dsaf port node, removes property cpld-ctrl-reg, and fixes some typos. Signed-off-by: Yisen Zhuang Signed-off-by: David S. Miller --- .../bindings/net/hisilicon-hns-dsaf.txt | 28 +++++++++---------- 1 file changed, 13 insertions(+), 15 deletions(-) diff --git a/Documentation/devicetree/bindings/net/hisilicon-hns-dsaf.txt b/Documentation/devicetree/bindings/net/hisilicon-hns-dsaf.txt index 5ccd4f002a67..d4b7f2e49984 100644 --- a/Documentation/devicetree/bindings/net/hisilicon-hns-dsaf.txt +++ b/Documentation/devicetree/bindings/net/hisilicon-hns-dsaf.txt @@ -13,10 +13,10 @@ Required properties: - interrupts: should contain the DSA Fabric and rcb interrupt. - reg: specifies base physical address(es) and size of the device registers. The first region is external interface control register base and size(optional, - only be used when subctrl-syscon is not exists). It is recommended using + only used when subctrl-syscon does not exist). It is recommended using subctrl-syscon rather than this address. - The second region is SerDes base register and size(optional, only be used when - serdes-syscon in port node is not exists. It is recommended using + The second region is SerDes base register and size(optional, only used when + serdes-syscon in port node does not exist). It is recommended using serdes-syscon rather than this address. The third region is the PPE register base and size. The fourth region is dsa fabric base register and size. It is not required for @@ -24,8 +24,8 @@ Required properties: - reg-names: may be ppe-base and(or) dsaf-base. It is used to find the corresponding reg's index. -- phy-handle: phy handle of physicl port, 0 if not any phy device. It is optional - attribute. If port node is exists, phy-handle in each port node will be used. +- phy-handle: phy handle of physical port, 0 if not any phy device. It is optional + attribute. If port node exists, phy-handle in each port node will be used. see ethernet.txt [1]. - subctrl-syscon: is syscon handle for external interface control register. - reset-field-offset: is offset of reset field. Its value depends on the hardware @@ -35,14 +35,12 @@ Required properties: - port: subnodes of dsaf. A dsaf node may contain several port nodes(Depending on mode of dsaf). Port node contain some attributes listed below: -- port-id: is physical port index in one dsaf. -- phy-handle: phy handle of physicl port. It is not required if there isn't +- reg: is physical port index in one dsaf. +- phy-handle: phy handle of physical port. It is not required if there isn't phy device. see ethernet.txt [1]. - serdes-syscon: is syscon handle for SerDes register. -- cpld-syscon: is syscon handle for cpld register. It is not required if there - isn't cpld device. -- cpld-ctrl-reg: is cpld register offset. It is not required if there isn't - cpld-syscon. +- cpld-syscon: is syscon handle + register offset pair for cpld register. It is + not required if there isn't cpld device. - port-rst-offset: is offset of reset field for each port in dsaf. Its value depends on the hardware user manual. - port-mode-offset: is offset of port mode field for each port in dsaf. Its @@ -72,14 +70,14 @@ dsaf0: dsa@c7000000 { desc-num = <1024>; dma-coherent; - prot@0 { - port-id = 0; + port@0 { + reg = 0; phy-handle = <&phy0>; serdes-syscon = <&serdes>; }; - prot@1 { - port-id = 1; + port@1 { + reg = 1; serdes-syscon = <&serdes>; }; }; From ea991027efcb2acf801633190bc2f35ad1eb78c2 Mon Sep 17 00:00:00 2001 From: "Yisen.Zhuang\\(Zhuangyuzeng\\)" Date: Thu, 28 Apr 2016 15:09:04 +0800 Subject: [PATCH 1159/1649] dts: hisi: update hns dst for changing property port-id to reg Indexes should generally be avoided. This patch changes property port-id to reg in dsaf port node. Signed-off-by: Yisen Zhuang Signed-off-by: David S. Miller --- arch/arm64/boot/dts/hisilicon/hip05_hns.dtsi | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/arch/arm64/boot/dts/hisilicon/hip05_hns.dtsi b/arch/arm64/boot/dts/hisilicon/hip05_hns.dtsi index 7d625141c917..b6a130c2e5a4 100644 --- a/arch/arm64/boot/dts/hisilicon/hip05_hns.dtsi +++ b/arch/arm64/boot/dts/hisilicon/hip05_hns.dtsi @@ -24,6 +24,8 @@ soc0: soc@000000000 { }; dsaf0: dsa@c7000000 { + #address-cells = <1>; + #size-cells = <0>; compatible = "hisilicon,hns-dsaf-v1"; mode = "6port-16rss"; interrupt-parent = <&mbigen_dsa>; @@ -124,20 +126,20 @@ soc0: soc@000000000 { dma-coherent; port@0 { - port-id = <0>; + reg = <0>; serdes-syscon = <&serdes_ctrl0>; }; port@1 { - port-id = <1>; + reg = <1>; serdes-syscon = <&serdes_ctrl0>; }; port@4 { - port-id = <4>; + reg = <4>; phy-handle = <&soc0_phy0>; serdes-syscon = <&serdes_ctrl1>; }; port@5 { - port-id = <5>; + reg = <5>; phy-handle = <&soc0_phy1>; serdes-syscon = <&serdes_ctrl1>; }; From 7f080c3f2ff091c095248f670bb34308f141ff7a Mon Sep 17 00:00:00 2001 From: Hariprasad Shenai Date: Thu, 28 Apr 2016 13:23:18 +0530 Subject: [PATCH 1160/1649] cxgb4: Add support to enable logging of firmware mailbox commands Add new /sys/kernel/debug/ support to dump a firmware mailbox command issued and replies for debugging purpose. Based on original work by Casey Leedom Signed-off-by: Hariprasad Shenai Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | 32 ++++++ .../ethernet/chelsio/cxgb4/cxgb4_debugfs.c | 99 +++++++++++++++++++ .../net/ethernet/chelsio/cxgb4/cxgb4_main.c | 12 +++ drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | 69 +++++++++---- 4 files changed, 192 insertions(+), 20 deletions(-) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 6af5242e6d21..b4fceb92479f 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -359,6 +359,34 @@ struct sge_idma_monitor_state { unsigned int idma_warn[2]; /* time to warning in HZ */ }; +/* Firmware Mailbox Command/Reply log. All values are in Host-Endian format. + * The access and execute times are signed in order to accommodate negative + * error returns. + */ +struct mbox_cmd { + u64 cmd[MBOX_LEN / 8]; /* a Firmware Mailbox Command/Reply */ + u64 timestamp; /* OS-dependent timestamp */ + u32 seqno; /* sequence number */ + s16 access; /* time (ms) to access mailbox */ + s16 execute; /* time (ms) to execute */ +}; + +struct mbox_cmd_log { + unsigned int size; /* number of entries in the log */ + unsigned int cursor; /* next position in the log to write */ + u32 seqno; /* next sequence number */ + /* variable length mailbox command log starts here */ +}; + +/* Given a pointer to a Firmware Mailbox Command Log and a log entry index, + * return a pointer to the specified entry. + */ +static inline struct mbox_cmd *mbox_cmd_log_entry(struct mbox_cmd_log *log, + unsigned int entry_idx) +{ + return &((struct mbox_cmd *)&(log)[1])[entry_idx]; +} + #include "t4fw_api.h" #define FW_VERSION(chip) ( \ @@ -780,6 +808,10 @@ struct adapter { struct work_struct db_drop_task; bool tid_release_task_busy; + /* support for mailbox command/reply logging */ +#define T4_OS_LOG_MBOX_CMDS 256 + struct mbox_cmd_log *mbox_log; + struct dentry *debugfs_root; bool use_bd; /* Use SGE Back Door intfc for reading SGE Contexts */ bool trace_rss; /* 1 implies that different RSS flit per filter is diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c index 9506c5cd11b9..91fb50850fff 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c @@ -1152,6 +1152,104 @@ static const struct file_operations devlog_fops = { .release = seq_release_private }; +/* Show Firmware Mailbox Command/Reply Log + * + * Note that we don't do any locking when dumping the Firmware Mailbox Log so + * it's possible that we can catch things during a log update and therefore + * see partially corrupted log entries. But it's probably Good Enough(tm). + * If we ever decide that we want to make sure that we're dumping a coherent + * log, we'd need to perform locking in the mailbox logging and in + * mboxlog_open() where we'd need to grab the entire mailbox log in one go + * like we do for the Firmware Device Log. + */ +static int mboxlog_show(struct seq_file *seq, void *v) +{ + struct adapter *adapter = seq->private; + struct mbox_cmd_log *log = adapter->mbox_log; + struct mbox_cmd *entry; + int entry_idx, i; + + if (v == SEQ_START_TOKEN) { + seq_printf(seq, + "%10s %15s %5s %5s %s\n", + "Seq#", "Tstamp", "Atime", "Etime", + "Command/Reply"); + return 0; + } + + entry_idx = log->cursor + ((uintptr_t)v - 2); + if (entry_idx >= log->size) + entry_idx -= log->size; + entry = mbox_cmd_log_entry(log, entry_idx); + + /* skip over unused entries */ + if (entry->timestamp == 0) + return 0; + + seq_printf(seq, "%10u %15llu %5d %5d", + entry->seqno, entry->timestamp, + entry->access, entry->execute); + for (i = 0; i < MBOX_LEN / 8; i++) { + u64 flit = entry->cmd[i]; + u32 hi = (u32)(flit >> 32); + u32 lo = (u32)flit; + + seq_printf(seq, " %08x %08x", hi, lo); + } + seq_puts(seq, "\n"); + return 0; +} + +static inline void *mboxlog_get_idx(struct seq_file *seq, loff_t pos) +{ + struct adapter *adapter = seq->private; + struct mbox_cmd_log *log = adapter->mbox_log; + + return ((pos <= log->size) ? (void *)(uintptr_t)(pos + 1) : NULL); +} + +static void *mboxlog_start(struct seq_file *seq, loff_t *pos) +{ + return *pos ? mboxlog_get_idx(seq, *pos) : SEQ_START_TOKEN; +} + +static void *mboxlog_next(struct seq_file *seq, void *v, loff_t *pos) +{ + ++*pos; + return mboxlog_get_idx(seq, *pos); +} + +static void mboxlog_stop(struct seq_file *seq, void *v) +{ +} + +static const struct seq_operations mboxlog_seq_ops = { + .start = mboxlog_start, + .next = mboxlog_next, + .stop = mboxlog_stop, + .show = mboxlog_show +}; + +static int mboxlog_open(struct inode *inode, struct file *file) +{ + int res = seq_open(file, &mboxlog_seq_ops); + + if (!res) { + struct seq_file *seq = file->private_data; + + seq->private = inode->i_private; + } + return res; +} + +static const struct file_operations mboxlog_fops = { + .owner = THIS_MODULE, + .open = mboxlog_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + static int mbox_show(struct seq_file *seq, void *v) { static const char * const owner[] = { "none", "FW", "driver", @@ -3129,6 +3227,7 @@ int t4_setup_debugfs(struct adapter *adap) { "cim_qcfg", &cim_qcfg_fops, S_IRUSR, 0 }, { "clk", &clk_debugfs_fops, S_IRUSR, 0 }, { "devlog", &devlog_fops, S_IRUSR, 0 }, + { "mboxlog", &mboxlog_fops, S_IRUSR, 0 }, { "mbox0", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 0 }, { "mbox1", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 1 }, { "mbox2", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 2 }, diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 4f627f3edb98..d7f40436f319 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -4909,6 +4909,16 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) goto out_free_adapter; } + adapter->mbox_log = kzalloc(sizeof(*adapter->mbox_log) + + (sizeof(struct mbox_cmd) * + T4_OS_LOG_MBOX_CMDS), + GFP_KERNEL); + if (!adapter->mbox_log) { + err = -ENOMEM; + goto out_free_adapter; + } + adapter->mbox_log->size = T4_OS_LOG_MBOX_CMDS; + /* PCI device has been enabled */ adapter->flags |= DEV_ENABLED; @@ -5167,6 +5177,7 @@ sriov: if (adapter->workq) destroy_workqueue(adapter->workq); + kfree(adapter->mbox_log); kfree(adapter); out_unmap_bar0: iounmap(regs); @@ -5233,6 +5244,7 @@ static void remove_one(struct pci_dev *pdev) adapter->flags &= ~DEV_ENABLED; } pci_release_regions(pdev); + kfree(adapter->mbox_log); synchronize_rcu(); kfree(adapter); } else diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 7907d85efa4c..49bcbf16c9ca 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -224,18 +224,34 @@ static void fw_asrt(struct adapter *adap, u32 mbox_addr) be32_to_cpu(asrt.u.assert.x), be32_to_cpu(asrt.u.assert.y)); } -static void dump_mbox(struct adapter *adap, int mbox, u32 data_reg) +/** + * t4_record_mbox - record a Firmware Mailbox Command/Reply in the log + * @adapter: the adapter + * @cmd: the Firmware Mailbox Command or Reply + * @size: command length in bytes + * @access: the time (ms) needed to access the Firmware Mailbox + * @execute: the time (ms) the command spent being executed + */ +static void t4_record_mbox(struct adapter *adapter, + const __be64 *cmd, unsigned int size, + int access, int execute) { - dev_err(adap->pdev_dev, - "mbox %d: %llx %llx %llx %llx %llx %llx %llx %llx\n", mbox, - (unsigned long long)t4_read_reg64(adap, data_reg), - (unsigned long long)t4_read_reg64(adap, data_reg + 8), - (unsigned long long)t4_read_reg64(adap, data_reg + 16), - (unsigned long long)t4_read_reg64(adap, data_reg + 24), - (unsigned long long)t4_read_reg64(adap, data_reg + 32), - (unsigned long long)t4_read_reg64(adap, data_reg + 40), - (unsigned long long)t4_read_reg64(adap, data_reg + 48), - (unsigned long long)t4_read_reg64(adap, data_reg + 56)); + struct mbox_cmd_log *log = adapter->mbox_log; + struct mbox_cmd *entry; + int i; + + entry = mbox_cmd_log_entry(log, log->cursor++); + if (log->cursor == log->size) + log->cursor = 0; + + for (i = 0; i < size / 8; i++) + entry->cmd[i] = be64_to_cpu(cmd[i]); + while (i < MBOX_LEN / 8) + entry->cmd[i++] = 0; + entry->timestamp = jiffies; + entry->seqno = log->seqno++; + entry->access = access; + entry->execute = execute; } /** @@ -268,12 +284,15 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd, 1, 1, 3, 5, 10, 10, 20, 50, 100, 200 }; + u16 access = 0; + u16 execute = 0; u32 v; u64 res; - int i, ms, delay_idx; + int i, ms, delay_idx, ret; const __be64 *p = cmd; u32 data_reg = PF_REG(mbox, CIM_PF_MAILBOX_DATA_A); u32 ctl_reg = PF_REG(mbox, CIM_PF_MAILBOX_CTRL_A); + __be64 cmd_rpl[MBOX_LEN / 8]; if ((size & 15) || size > MBOX_LEN) return -EINVAL; @@ -289,9 +308,14 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd, for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++) v = MBOWNER_G(t4_read_reg(adap, ctl_reg)); - if (v != MBOX_OWNER_DRV) - return v ? -EBUSY : -ETIMEDOUT; + if (v != MBOX_OWNER_DRV) { + ret = (v == MBOX_OWNER_FW) ? -EBUSY : -ETIMEDOUT; + t4_record_mbox(adap, cmd, MBOX_LEN, access, ret); + return ret; + } + /* Copy in the new mailbox command and send it on its way ... */ + t4_record_mbox(adap, cmd, MBOX_LEN, access, 0); for (i = 0; i < size; i += 8) t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++)); @@ -317,26 +341,31 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd, continue; } - res = t4_read_reg64(adap, data_reg); + get_mbox_rpl(adap, cmd_rpl, MBOX_LEN / 8, data_reg); + res = be64_to_cpu(cmd_rpl[0]); + if (FW_CMD_OP_G(res >> 32) == FW_DEBUG_CMD) { fw_asrt(adap, data_reg); res = FW_CMD_RETVAL_V(EIO); } else if (rpl) { - get_mbox_rpl(adap, rpl, size / 8, data_reg); + memcpy(rpl, cmd_rpl, size); } - if (FW_CMD_RETVAL_G((int)res)) - dump_mbox(adap, mbox, data_reg); t4_write_reg(adap, ctl_reg, 0); + + execute = i + ms; + t4_record_mbox(adap, cmd_rpl, + MBOX_LEN, access, execute); return -FW_CMD_RETVAL_G((int)res); } } - dump_mbox(adap, mbox, data_reg); + ret = -ETIMEDOUT; + t4_record_mbox(adap, cmd, MBOX_LEN, access, ret); dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n", *(const u8 *)cmd, mbox); t4_report_fw_error(adap); - return -ETIMEDOUT; + return ret; } int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size, From ae7b7576220560617e903910c6307e13ec93b279 Mon Sep 17 00:00:00 2001 From: Hariprasad Shenai Date: Thu, 28 Apr 2016 13:23:19 +0530 Subject: [PATCH 1161/1649] cxgb4vf: Add support to enable logging of firmware mailbox commands for VF Add new /sys/kernel/debug/ support to dump firmware mailbox commands and replies for debugging purpose. Based on original work by Casey Leedom Signed-off-by: Hariprasad Shenai Signed-off-by: David S. Miller --- .../net/ethernet/chelsio/cxgb4vf/adapter.h | 4 + .../ethernet/chelsio/cxgb4vf/cxgb4vf_main.c | 112 ++++++++++++++++++ .../ethernet/chelsio/cxgb4vf/t4vf_common.h | 29 +++++ .../net/ethernet/chelsio/cxgb4vf/t4vf_hw.c | 72 +++++++---- 4 files changed, 192 insertions(+), 25 deletions(-) diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h index 4a707c32d76f..734dd776c22f 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h +++ b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h @@ -387,6 +387,10 @@ struct adapter { /* various locks */ spinlock_t stats_lock; + /* support for mailbox command/reply logging */ +#define T4VF_OS_LOG_MBOX_CMDS 256 + struct mbox_cmd_log *mbox_log; + /* list of MAC addresses in MPS Hash */ struct list_head mac_hlist; }; diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c index 730fec73d5a6..04fc6f6d1e25 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c @@ -1703,6 +1703,105 @@ static const struct ethtool_ops cxgb4vf_ethtool_ops = { * ================================================ */ +/* + * Show Firmware Mailbox Command/Reply Log + * + * Note that we don't do any locking when dumping the Firmware Mailbox Log so + * it's possible that we can catch things during a log update and therefore + * see partially corrupted log entries. But i9t's probably Good Enough(tm). + * If we ever decide that we want to make sure that we're dumping a coherent + * log, we'd need to perform locking in the mailbox logging and in + * mboxlog_open() where we'd need to grab the entire mailbox log in one go + * like we do for the Firmware Device Log. But as stated above, meh ... + */ +static int mboxlog_show(struct seq_file *seq, void *v) +{ + struct adapter *adapter = seq->private; + struct mbox_cmd_log *log = adapter->mbox_log; + struct mbox_cmd *entry; + int entry_idx, i; + + if (v == SEQ_START_TOKEN) { + seq_printf(seq, + "%10s %15s %5s %5s %s\n", + "Seq#", "Tstamp", "Atime", "Etime", + "Command/Reply"); + return 0; + } + + entry_idx = log->cursor + ((uintptr_t)v - 2); + if (entry_idx >= log->size) + entry_idx -= log->size; + entry = mbox_cmd_log_entry(log, entry_idx); + + /* skip over unused entries */ + if (entry->timestamp == 0) + return 0; + + seq_printf(seq, "%10u %15llu %5d %5d", + entry->seqno, entry->timestamp, + entry->access, entry->execute); + for (i = 0; i < MBOX_LEN / 8; i++) { + u64 flit = entry->cmd[i]; + u32 hi = (u32)(flit >> 32); + u32 lo = (u32)flit; + + seq_printf(seq, " %08x %08x", hi, lo); + } + seq_puts(seq, "\n"); + return 0; +} + +static inline void *mboxlog_get_idx(struct seq_file *seq, loff_t pos) +{ + struct adapter *adapter = seq->private; + struct mbox_cmd_log *log = adapter->mbox_log; + + return ((pos <= log->size) ? (void *)(uintptr_t)(pos + 1) : NULL); +} + +static void *mboxlog_start(struct seq_file *seq, loff_t *pos) +{ + return *pos ? mboxlog_get_idx(seq, *pos) : SEQ_START_TOKEN; +} + +static void *mboxlog_next(struct seq_file *seq, void *v, loff_t *pos) +{ + ++*pos; + return mboxlog_get_idx(seq, *pos); +} + +static void mboxlog_stop(struct seq_file *seq, void *v) +{ +} + +static const struct seq_operations mboxlog_seq_ops = { + .start = mboxlog_start, + .next = mboxlog_next, + .stop = mboxlog_stop, + .show = mboxlog_show +}; + +static int mboxlog_open(struct inode *inode, struct file *file) +{ + int res = seq_open(file, &mboxlog_seq_ops); + + if (!res) { + struct seq_file *seq = file->private_data; + + seq->private = inode->i_private; + } + return res; +} + +static const struct file_operations mboxlog_fops = { + .owner = THIS_MODULE, + .open = mboxlog_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + /* * Show SGE Queue Set information. We display QPL Queues Sets per line. */ @@ -2122,6 +2221,7 @@ struct cxgb4vf_debugfs_entry { }; static struct cxgb4vf_debugfs_entry debugfs_files[] = { + { "mboxlog", S_IRUGO, &mboxlog_fops }, { "sge_qinfo", S_IRUGO, &sge_qinfo_debugfs_fops }, { "sge_qstats", S_IRUGO, &sge_qstats_proc_fops }, { "resources", S_IRUGO, &resources_proc_fops }, @@ -2664,6 +2764,16 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev, adapter->pdev = pdev; adapter->pdev_dev = &pdev->dev; + adapter->mbox_log = kzalloc(sizeof(*adapter->mbox_log) + + (sizeof(struct mbox_cmd) * + T4VF_OS_LOG_MBOX_CMDS), + GFP_KERNEL); + if (!adapter->mbox_log) { + err = -ENOMEM; + goto err_free_adapter; + } + adapter->mbox_log->size = T4VF_OS_LOG_MBOX_CMDS; + /* * Initialize SMP data synchronization resources. */ @@ -2913,6 +3023,7 @@ err_unmap_bar0: iounmap(adapter->regs); err_free_adapter: + kfree(adapter->mbox_log); kfree(adapter); err_release_regions: @@ -2982,6 +3093,7 @@ static void cxgb4vf_pci_remove(struct pci_dev *pdev) iounmap(adapter->regs); if (!is_t4(adapter->params.chip)) iounmap(adapter->bar2); + kfree(adapter->mbox_log); kfree(adapter); } diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h index 9b40a85cc1e4..438374a05791 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h @@ -36,6 +36,7 @@ #ifndef __T4VF_COMMON_H__ #define __T4VF_COMMON_H__ +#include "../cxgb4/t4_hw.h" #include "../cxgb4/t4fw_api.h" #define CHELSIO_CHIP_CODE(version, revision) (((version) << 4) | (revision)) @@ -227,6 +228,34 @@ struct adapter_params { u8 nports; /* # of Ethernet "ports" */ }; +/* Firmware Mailbox Command/Reply log. All values are in Host-Endian format. + * The access and execute times are signed in order to accommodate negative + * error returns. + */ +struct mbox_cmd { + u64 cmd[MBOX_LEN / 8]; /* a Firmware Mailbox Command/Reply */ + u64 timestamp; /* OS-dependent timestamp */ + u32 seqno; /* sequence number */ + s16 access; /* time (ms) to access mailbox */ + s16 execute; /* time (ms) to execute */ +}; + +struct mbox_cmd_log { + unsigned int size; /* number of entries in the log */ + unsigned int cursor; /* next position in the log to write */ + u32 seqno; /* next sequence number */ + /* variable length mailbox command log starts here */ +}; + +/* Given a pointer to a Firmware Mailbox Command Log and a log entry index, + * return a pointer to the specified entry. + */ +static inline struct mbox_cmd *mbox_cmd_log_entry(struct mbox_cmd_log *log, + unsigned int entry_idx) +{ + return &((struct mbox_cmd *)&(log)[1])[entry_idx]; +} + #include "adapter.h" #ifndef PCI_VENDOR_ID_CHELSIO diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c index fed83d88fc4e..955ff7c61f1b 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c @@ -76,21 +76,33 @@ static void get_mbox_rpl(struct adapter *adapter, __be64 *rpl, int size, *rpl++ = cpu_to_be64(t4_read_reg64(adapter, mbox_data)); } -/* - * Dump contents of mailbox with a leading tag. +/** + * t4vf_record_mbox - record a Firmware Mailbox Command/Reply in the log + * @adapter: the adapter + * @cmd: the Firmware Mailbox Command or Reply + * @size: command length in bytes + * @access: the time (ms) needed to access the Firmware Mailbox + * @execute: the time (ms) the command spent being executed */ -static void dump_mbox(struct adapter *adapter, const char *tag, u32 mbox_data) +static void t4vf_record_mbox(struct adapter *adapter, const __be64 *cmd, + int size, int access, int execute) { - dev_err(adapter->pdev_dev, - "mbox %s: %llx %llx %llx %llx %llx %llx %llx %llx\n", tag, - (unsigned long long)t4_read_reg64(adapter, mbox_data + 0), - (unsigned long long)t4_read_reg64(adapter, mbox_data + 8), - (unsigned long long)t4_read_reg64(adapter, mbox_data + 16), - (unsigned long long)t4_read_reg64(adapter, mbox_data + 24), - (unsigned long long)t4_read_reg64(adapter, mbox_data + 32), - (unsigned long long)t4_read_reg64(adapter, mbox_data + 40), - (unsigned long long)t4_read_reg64(adapter, mbox_data + 48), - (unsigned long long)t4_read_reg64(adapter, mbox_data + 56)); + struct mbox_cmd_log *log = adapter->mbox_log; + struct mbox_cmd *entry; + int i; + + entry = mbox_cmd_log_entry(log, log->cursor++); + if (log->cursor == log->size) + log->cursor = 0; + + for (i = 0; i < size / 8; i++) + entry->cmd[i] = be64_to_cpu(cmd[i]); + while (i < MBOX_LEN / 8) + entry->cmd[i++] = 0; + entry->timestamp = jiffies; + entry->seqno = log->seqno++; + entry->access = access; + entry->execute = execute; } /** @@ -120,10 +132,13 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size, 1, 1, 3, 5, 10, 10, 20, 50, 100 }; + u16 access = 0, execute = 0; u32 v, mbox_data; - int i, ms, delay_idx; + int i, ms, delay_idx, ret; const __be64 *p; u32 mbox_ctl = T4VF_CIM_BASE_ADDR + CIM_VF_EXT_MAILBOX_CTRL; + u32 cmd_op = FW_CMD_OP_G(be32_to_cpu(((struct fw_cmd_hdr *)cmd)->hi)); + __be64 cmd_rpl[MBOX_LEN / 8]; /* In T6, mailbox size is changed to 128 bytes to avoid * invalidating the entire prefetch buffer. @@ -148,8 +163,11 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size, v = MBOWNER_G(t4_read_reg(adapter, mbox_ctl)); for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++) v = MBOWNER_G(t4_read_reg(adapter, mbox_ctl)); - if (v != MBOX_OWNER_DRV) - return v == MBOX_OWNER_FW ? -EBUSY : -ETIMEDOUT; + if (v != MBOX_OWNER_DRV) { + ret = (v == MBOX_OWNER_FW) ? -EBUSY : -ETIMEDOUT; + t4vf_record_mbox(adapter, cmd, size, access, ret); + return ret; + } /* * Write the command array into the Mailbox Data register array and @@ -164,6 +182,8 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size, * Data registers before doing the write to the VF Mailbox Control * register. */ + if (cmd_op != FW_VI_STATS_CMD) + t4vf_record_mbox(adapter, cmd, size, access, 0); for (i = 0, p = cmd; i < size; i += 8) t4_write_reg64(adapter, mbox_data + i, be64_to_cpu(*p++)); t4_read_reg(adapter, mbox_data); /* flush write */ @@ -209,31 +229,33 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size, * We return the (negated) firmware command return * code (this depends on FW_SUCCESS == 0). */ + get_mbox_rpl(adapter, cmd_rpl, size, mbox_data); /* return value in low-order little-endian word */ - v = t4_read_reg(adapter, mbox_data); - if (FW_CMD_RETVAL_G(v)) - dump_mbox(adapter, "FW Error", mbox_data); + v = be64_to_cpu(cmd_rpl[0]); if (rpl) { /* request bit in high-order BE word */ WARN_ON((be32_to_cpu(*(const __be32 *)cmd) & FW_CMD_REQUEST_F) == 0); - get_mbox_rpl(adapter, rpl, size, mbox_data); + memcpy(rpl, cmd_rpl, size); WARN_ON((be32_to_cpu(*(__be32 *)rpl) & FW_CMD_REQUEST_F) != 0); } t4_write_reg(adapter, mbox_ctl, MBOWNER_V(MBOX_OWNER_NONE)); + execute = i + ms; + if (cmd_op != FW_VI_STATS_CMD) + t4vf_record_mbox(adapter, cmd_rpl, size, access, + execute); return -FW_CMD_RETVAL_G(v); } } - /* - * We timed out. Return the error ... - */ - dump_mbox(adapter, "FW Timeout", mbox_data); - return -ETIMEDOUT; + /* We timed out. Return the error ... */ + ret = -ETIMEDOUT; + t4vf_record_mbox(adapter, cmd, size, access, ret); + return ret; } #define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\ From 6ccfba75d3137efc3b665a337b946fd6df1162b9 Mon Sep 17 00:00:00 2001 From: Jesper Dangaard Brouer Date: Thu, 28 Apr 2016 14:20:53 +0200 Subject: [PATCH 1162/1649] samples/bpf: add back functionality to redefine LLC command It is practical to be-able-to redefine the location of the LLVM command 'llc', because not all distros have a LLVM version with bpf target support. Thus, it is sometimes required to compile LLVM from source, and sometimes it is not desired to overwrite the distros default LLVM version. This feature was removed with 128d1514be35 ("samples/bpf: Use llc in PATH, rather than a hardcoded value"). Add this features back. Note that it is possible to redefine the LLC on the make command like: make samples/bpf/ LLC=~/git/llvm/build/bin/llc Fixes: 128d1514be35 ("samples/bpf: Use llc in PATH, rather than a hardcoded value") Signed-off-by: Jesper Dangaard Brouer Acked-by: Alexei Starovoitov Acked-by: Naveen N. Rao Signed-off-by: David S. Miller --- samples/bpf/Makefile | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile index 744dd7a16144..5bae9536f100 100644 --- a/samples/bpf/Makefile +++ b/samples/bpf/Makefile @@ -81,10 +81,14 @@ HOSTLOADLIBES_spintest += -lelf HOSTLOADLIBES_map_perf_test += -lelf -lrt HOSTLOADLIBES_test_overhead += -lelf -lrt +# Allows pointing LLC to a LLVM backend with bpf support, redefine on cmdline: +# make samples/bpf/ LLC=~/git/llvm/build/bin/llc +LLC ?= llc + # asm/sysreg.h - inline assembly used by it is incompatible with llvm. # But, there is no easy way to fix it, so just exclude it since it is # useless for BPF samples. $(obj)/%.o: $(src)/%.c clang $(NOSTDINC_FLAGS) $(LINUXINCLUDE) $(EXTRA_CFLAGS) \ -D__KERNEL__ -D__ASM_SYSREG_H -Wno-unused-value -Wno-pointer-sign \ - -O2 -emit-llvm -c $< -o -| llc -march=bpf -filetype=obj -o $@ + -O2 -emit-llvm -c $< -o -| $(LLC) -march=bpf -filetype=obj -o $@ From 7b01dd5793394ee2ef47c328b28c30f5c01107c9 Mon Sep 17 00:00:00 2001 From: Jesper Dangaard Brouer Date: Thu, 28 Apr 2016 14:20:58 +0200 Subject: [PATCH 1163/1649] samples/bpf: Makefile verify LLVM compiler avail and bpf target is supported Make compiling samples/bpf more user friendly, by detecting if LLVM compiler tool 'llc' is available, and also detect if the 'bpf' target is available in this version of LLVM. Signed-off-by: Jesper Dangaard Brouer Acked-by: Alexei Starovoitov Signed-off-by: David S. Miller --- samples/bpf/Makefile | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile index 5bae9536f100..45859c99f573 100644 --- a/samples/bpf/Makefile +++ b/samples/bpf/Makefile @@ -85,6 +85,24 @@ HOSTLOADLIBES_test_overhead += -lelf -lrt # make samples/bpf/ LLC=~/git/llvm/build/bin/llc LLC ?= llc +# Verify LLVM compiler is available and bpf target is supported +.PHONY: verify_cmd_llc verify_target_bpf + +verify_cmd_llc: + @if ! (which "${LLC}" > /dev/null 2>&1); then \ + echo "*** ERROR: Cannot find LLVM tool 'llc' (${LLC})" ;\ + exit 1; \ + else true; fi + +verify_target_bpf: verify_cmd_llc + @if ! (${LLC} -march=bpf -mattr=help > /dev/null 2>&1); then \ + echo "*** ERROR: LLVM (${LLC}) does not support 'bpf' target" ;\ + echo " NOTICE: LLVM version >= 3.7.1 required" ;\ + exit 2; \ + else true; fi + +$(src)/*.c: verify_target_bpf + # asm/sysreg.h - inline assembly used by it is incompatible with llvm. # But, there is no easy way to fix it, so just exclude it since it is # useless for BPF samples. From 1c97566d515de2ef66873e30288b150f0154f3b3 Mon Sep 17 00:00:00 2001 From: Jesper Dangaard Brouer Date: Thu, 28 Apr 2016 14:21:04 +0200 Subject: [PATCH 1164/1649] samples/bpf: add a README file to get users started Getting started with using examples in samples/bpf/ is not straightforward. There are several dependencies, and specific versions of these dependencies. Just compiling the example tool is also slightly obscure, e.g. one need to call make like: make samples/bpf/ Do notice the "/" slash after the directory name. Signed-off-by: Jesper Dangaard Brouer Acked-by: Naveen N. Rao Acked-by: Alexei Starovoitov Signed-off-by: David S. Miller --- samples/bpf/README.rst | 63 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 63 insertions(+) create mode 100644 samples/bpf/README.rst diff --git a/samples/bpf/README.rst b/samples/bpf/README.rst new file mode 100644 index 000000000000..6f133f3f0075 --- /dev/null +++ b/samples/bpf/README.rst @@ -0,0 +1,63 @@ +eBPF sample programs +==================== + +This directory contains a mini eBPF library, test stubs, verifier +test-suite and examples for using eBPF. + +Build dependencies +================== + +Compiling requires having installed: + * clang >= version 3.4.0 + * llvm >= version 3.7.1 + +Note that LLVM's tool 'llc' must support target 'bpf', list version +and supported targets with command: ``llc --version`` + +Kernel headers +-------------- + +There are usually dependencies to header files of the current kernel. +To avoid installing devel kernel headers system wide, as a normal +user, simply call:: + + make headers_install + +This will creates a local "usr/include" directory in the git/build top +level directory, that the make system automatically pickup first. + +Compiling +========= + +For building the BPF samples, issue the below command from the kernel +top level directory:: + + make samples/bpf/ + +Do notice the "/" slash after the directory name. + +Manually compiling LLVM with 'bpf' support +------------------------------------------ + +Since version 3.7.0, LLVM adds a proper LLVM backend target for the +BPF bytecode architecture. + +By default llvm will build all non-experimental backends including bpf. +To generate a smaller llc binary one can use:: + + -DLLVM_TARGETS_TO_BUILD="BPF" + +Quick sniplet for manually compiling LLVM and clang +(build dependencies are cmake and gcc-c++):: + + $ git clone http://llvm.org/git/llvm.git + $ cd llvm/tools + $ git clone --depth 1 http://llvm.org/git/clang.git + $ cd ..; mkdir build; cd build + $ cmake .. -DLLVM_TARGETS_TO_BUILD="BPF;X86" + $ make -j $(getconf _NPROCESSORS_ONLN) + +It is also possible to point make to the newly compiled 'llc' command +via redefining LLC on the make command line:: + + make samples/bpf/ LLC=~/git/llvm/build/bin/llc From b62a796c109ca0be3e49de620a8ea8248412446d Mon Sep 17 00:00:00 2001 From: Jesper Dangaard Brouer Date: Thu, 28 Apr 2016 14:21:09 +0200 Subject: [PATCH 1165/1649] samples/bpf: allow make to be run from samples/bpf/ directory It is not intuitive that 'make' must be run from the top level directory with argument "samples/bpf/" to compile these eBPF samples. Introduce a kbuild make file trick that allow make to be run from the "samples/bpf/" directory itself. It basically change to the top level directory and call "make samples/bpf/" with the "/" slash after the directory name. Also add a clean target that only cleans this directory, by taking advantage of the kbuild external module setting M=$PWD. Signed-off-by: Jesper Dangaard Brouer Acked-by: Alexei Starovoitov Signed-off-by: David S. Miller --- samples/bpf/Makefile | 8 ++++++++ samples/bpf/README.rst | 3 +++ 2 files changed, 11 insertions(+) diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile index 45859c99f573..dd63521832d8 100644 --- a/samples/bpf/Makefile +++ b/samples/bpf/Makefile @@ -85,6 +85,14 @@ HOSTLOADLIBES_test_overhead += -lelf -lrt # make samples/bpf/ LLC=~/git/llvm/build/bin/llc LLC ?= llc +# Trick to allow make to be run from this directory +all: + $(MAKE) -C ../../ $$PWD/ + +clean: + $(MAKE) -C ../../ M=$$PWD clean + @rm -f *~ + # Verify LLVM compiler is available and bpf target is supported .PHONY: verify_cmd_llc verify_target_bpf diff --git a/samples/bpf/README.rst b/samples/bpf/README.rst index 6f133f3f0075..e36687d900c8 100644 --- a/samples/bpf/README.rst +++ b/samples/bpf/README.rst @@ -36,6 +36,9 @@ top level directory:: Do notice the "/" slash after the directory name. +It is also possible to call make from this directory. This will just +hide the the invocation of make as above with the appended "/". + Manually compiling LLVM with 'bpf' support ------------------------------------------ From bdefbbf2ecff6efcd253767179a60961aebed9dc Mon Sep 17 00:00:00 2001 From: Jesper Dangaard Brouer Date: Thu, 28 Apr 2016 14:21:14 +0200 Subject: [PATCH 1166/1649] samples/bpf: like LLC also verify and allow redefining CLANG command Users are likely to manually compile both LLVM 'llc' and 'clang' tools. Thus, also allow redefining CLANG and verify command exist. Makefile implementation wise, the target that verify the command have been generalized. Signed-off-by: Jesper Dangaard Brouer Acked-by: Alexei Starovoitov Signed-off-by: David S. Miller --- samples/bpf/Makefile | 25 ++++++++++++++----------- samples/bpf/README.rst | 6 +++--- 2 files changed, 17 insertions(+), 14 deletions(-) diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile index dd63521832d8..66897e61232c 100644 --- a/samples/bpf/Makefile +++ b/samples/bpf/Makefile @@ -81,9 +81,10 @@ HOSTLOADLIBES_spintest += -lelf HOSTLOADLIBES_map_perf_test += -lelf -lrt HOSTLOADLIBES_test_overhead += -lelf -lrt -# Allows pointing LLC to a LLVM backend with bpf support, redefine on cmdline: -# make samples/bpf/ LLC=~/git/llvm/build/bin/llc +# Allows pointing LLC/CLANG to a LLVM backend with bpf support, redefine on cmdline: +# make samples/bpf/ LLC=~/git/llvm/build/bin/llc CLANG=~/git/llvm/build/bin/clang LLC ?= llc +CLANG ?= clang # Trick to allow make to be run from this directory all: @@ -93,16 +94,18 @@ clean: $(MAKE) -C ../../ M=$$PWD clean @rm -f *~ -# Verify LLVM compiler is available and bpf target is supported -.PHONY: verify_cmd_llc verify_target_bpf +# Verify LLVM compiler tools are available and bpf target is supported by llc +.PHONY: verify_cmds verify_target_bpf $(CLANG) $(LLC) -verify_cmd_llc: - @if ! (which "${LLC}" > /dev/null 2>&1); then \ - echo "*** ERROR: Cannot find LLVM tool 'llc' (${LLC})" ;\ - exit 1; \ - else true; fi +verify_cmds: $(CLANG) $(LLC) + @for TOOL in $^ ; do \ + if ! (which -- "$${TOOL}" > /dev/null 2>&1); then \ + echo "*** ERROR: Cannot find LLVM tool $${TOOL}" ;\ + exit 1; \ + else true; fi; \ + done -verify_target_bpf: verify_cmd_llc +verify_target_bpf: verify_cmds @if ! (${LLC} -march=bpf -mattr=help > /dev/null 2>&1); then \ echo "*** ERROR: LLVM (${LLC}) does not support 'bpf' target" ;\ echo " NOTICE: LLVM version >= 3.7.1 required" ;\ @@ -115,6 +118,6 @@ $(src)/*.c: verify_target_bpf # But, there is no easy way to fix it, so just exclude it since it is # useless for BPF samples. $(obj)/%.o: $(src)/%.c - clang $(NOSTDINC_FLAGS) $(LINUXINCLUDE) $(EXTRA_CFLAGS) \ + $(CLANG) $(NOSTDINC_FLAGS) $(LINUXINCLUDE) $(EXTRA_CFLAGS) \ -D__KERNEL__ -D__ASM_SYSREG_H -Wno-unused-value -Wno-pointer-sign \ -O2 -emit-llvm -c $< -o -| $(LLC) -march=bpf -filetype=obj -o $@ diff --git a/samples/bpf/README.rst b/samples/bpf/README.rst index e36687d900c8..a43eae3f0551 100644 --- a/samples/bpf/README.rst +++ b/samples/bpf/README.rst @@ -60,7 +60,7 @@ Quick sniplet for manually compiling LLVM and clang $ cmake .. -DLLVM_TARGETS_TO_BUILD="BPF;X86" $ make -j $(getconf _NPROCESSORS_ONLN) -It is also possible to point make to the newly compiled 'llc' command -via redefining LLC on the make command line:: +It is also possible to point make to the newly compiled 'llc' or +'clang' command via redefining LLC or CLANG on the make command line:: - make samples/bpf/ LLC=~/git/llvm/build/bin/llc + make samples/bpf/ LLC=~/git/llvm/build/bin/llc CLANG=~/git/llvm/build/bin/clang From 65226ef8ea167c773de4c5b31a0fd3cb3a08f5b4 Mon Sep 17 00:00:00 2001 From: Jiri Benc Date: Thu, 28 Apr 2016 16:36:30 +0200 Subject: [PATCH 1167/1649] vxlan: fix initialization with custom link parameters Commit 0c867c9bf84c ("vxlan: move Ethernet initialization to a separate function") changed initialization order and as an unintended result, when the user specifies additional link parameters (such as IFLA_ADDRESS) while creating vxlan interface, those are overwritten by vxlan_ether_setup later. It's necessary to call ether_setup from withing the ->setup callback. That way, the correct parameters are set by rtnl_create_link later. This is done also for VXLAN-GPE, as we don't know the interface type yet at that point, and changed to the correct interface type later. Fixes: 0c867c9bf84c ("vxlan: move Ethernet initialization to a separate function") Reported-by: Nicolas Dichtel Signed-off-by: Jiri Benc Tested-by: Nicolas Dichtel Signed-off-by: David S. Miller --- drivers/net/vxlan.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 6fb93b57a724..2668e528dee4 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -2557,6 +2557,9 @@ static void vxlan_setup(struct net_device *dev) struct vxlan_dev *vxlan = netdev_priv(dev); unsigned int h; + eth_hw_addr_random(dev); + ether_setup(dev); + dev->destructor = free_netdev; SET_NETDEV_DEVTYPE(dev, &vxlan_type); @@ -2592,8 +2595,6 @@ static void vxlan_setup(struct net_device *dev) static void vxlan_ether_setup(struct net_device *dev) { - eth_hw_addr_random(dev); - ether_setup(dev); dev->priv_flags &= ~IFF_TX_SKB_SHARING; dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; dev->netdev_ops = &vxlan_netdev_ether_ops; @@ -2601,11 +2602,10 @@ static void vxlan_ether_setup(struct net_device *dev) static void vxlan_raw_setup(struct net_device *dev) { + dev->header_ops = NULL; dev->type = ARPHRD_NONE; dev->hard_header_len = 0; dev->addr_len = 0; - dev->mtu = ETH_DATA_LEN; - dev->tx_queue_len = 1000; dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; dev->netdev_ops = &vxlan_netdev_raw_ops; } From ac1f74a7fc197660f26b59395b3fd2468f59f5dc Mon Sep 17 00:00:00 2001 From: Alexandre TORGUE Date: Thu, 28 Apr 2016 15:56:45 +0200 Subject: [PATCH 1168/1649] net: ethernet: stmmac: update MDIO support for GMAC4 On new GMAC4 IP, MAC_MDIO_address register has been updated, and bitmaps changed. This patch takes into account those changes. Signed-off-by: Alexandre TORGUE Signed-off-by: David S. Miller --- .../net/ethernet/stmicro/stmmac/stmmac_mdio.c | 102 +++++++++++++++++- 1 file changed, 98 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c index 06704ca6f9ca..3f83c369f56c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c @@ -37,6 +37,18 @@ #define MII_BUSY 0x00000001 #define MII_WRITE 0x00000002 +/* GMAC4 defines */ +#define MII_GMAC4_GOC_SHIFT 2 +#define MII_GMAC4_WRITE (1 << MII_GMAC4_GOC_SHIFT) +#define MII_GMAC4_READ (3 << MII_GMAC4_GOC_SHIFT) + +#define MII_PHY_ADDR_GMAC4_SHIFT 21 +#define MII_PHY_ADDR_GMAC4_MASK GENMASK(25, 21) +#define MII_PHY_REG_GMAC4_SHIFT 16 +#define MII_PHY_REG_GMAC4_MASK GENMASK(20, 16) +#define MII_CSR_CLK_GMAC4_SHIFT 8 +#define MII_CSR_CLK_GMAC4_MASK GENMASK(11, 8) + static int stmmac_mdio_busy_wait(void __iomem *ioaddr, unsigned int mii_addr) { unsigned long curr; @@ -123,6 +135,80 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg, return stmmac_mdio_busy_wait(priv->ioaddr, mii_address); } +/** + * stmmac_mdio_read_gmac4 + * @bus: points to the mii_bus structure + * @phyaddr: MII addr reg bits 25-21 + * @phyreg: MII addr reg bits 20-16 + * Description: it reads data from the MII register of GMAC4 from within + * the phy device. + */ +static int stmmac_mdio_read_gmac4(struct mii_bus *bus, int phyaddr, int phyreg) +{ + struct net_device *ndev = bus->priv; + struct stmmac_priv *priv = netdev_priv(ndev); + unsigned int mii_address = priv->hw->mii.addr; + unsigned int mii_data = priv->hw->mii.data; + int data; + u32 value = (((phyaddr << MII_PHY_ADDR_GMAC4_SHIFT) & + (MII_PHY_ADDR_GMAC4_MASK)) | + ((phyreg << MII_PHY_REG_GMAC4_SHIFT) & + (MII_PHY_REG_GMAC4_MASK))) | MII_GMAC4_READ; + + value |= MII_BUSY | ((priv->clk_csr & MII_CSR_CLK_GMAC4_MASK) + << MII_CSR_CLK_GMAC4_SHIFT); + + if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address)) + return -EBUSY; + + writel(value, priv->ioaddr + mii_address); + + if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address)) + return -EBUSY; + + /* Read the data from the MII data register */ + data = (int)readl(priv->ioaddr + mii_data); + + return data; +} + +/** + * stmmac_mdio_write_gmac4 + * @bus: points to the mii_bus structure + * @phyaddr: MII addr reg bits 25-21 + * @phyreg: MII addr reg bits 20-16 + * @phydata: phy data + * Description: it writes the data into the MII register of GMAC4 from within + * the device. + */ +static int stmmac_mdio_write_gmac4(struct mii_bus *bus, int phyaddr, int phyreg, + u16 phydata) +{ + struct net_device *ndev = bus->priv; + struct stmmac_priv *priv = netdev_priv(ndev); + unsigned int mii_address = priv->hw->mii.addr; + unsigned int mii_data = priv->hw->mii.data; + + u32 value = (((phyaddr << MII_PHY_ADDR_GMAC4_SHIFT) & + (MII_PHY_ADDR_GMAC4_MASK)) | + ((phyreg << MII_PHY_REG_GMAC4_SHIFT) & + (MII_PHY_REG_GMAC4_MASK))) | MII_GMAC4_WRITE; + + value |= MII_BUSY | ((priv->clk_csr & MII_CSR_CLK_GMAC4_MASK) + << MII_CSR_CLK_GMAC4_SHIFT); + + /* Wait until any existing MII operation is complete */ + if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address)) + return -EBUSY; + + /* Set the MII address register to write */ + writel(phydata, priv->ioaddr + mii_data); + writel(value, priv->ioaddr + mii_address); + + /* Wait until any existing MII operation is complete */ + return stmmac_mdio_busy_wait(priv->ioaddr, mii_address); +} + /** * stmmac_mdio_reset * @bus: points to the mii_bus structure @@ -180,9 +266,11 @@ int stmmac_mdio_reset(struct mii_bus *bus) /* This is a workaround for problems with the STE101P PHY. * It doesn't complete its reset until at least one clock cycle - * on MDC, so perform a dummy mdio read. + * on MDC, so perform a dummy mdio read. To be upadted for GMAC4 + * if needed. */ - writel(0, priv->ioaddr + mii_address); + if (!priv->plat->has_gmac4) + writel(0, priv->ioaddr + mii_address); #endif return 0; } @@ -217,8 +305,14 @@ int stmmac_mdio_register(struct net_device *ndev) #endif new_bus->name = "stmmac"; - new_bus->read = &stmmac_mdio_read; - new_bus->write = &stmmac_mdio_write; + if (priv->plat->has_gmac4) { + new_bus->read = &stmmac_mdio_read_gmac4; + new_bus->write = &stmmac_mdio_write_gmac4; + } else { + new_bus->read = &stmmac_mdio_read; + new_bus->write = &stmmac_mdio_write; + } + new_bus->reset = &stmmac_mdio_reset; snprintf(new_bus->id, MII_BUS_ID_SIZE, "%s-%x", new_bus->name, priv->plat->bus_id); From 7d9f0b48746d37e4381efc02da27535a0a1bac43 Mon Sep 17 00:00:00 2001 From: Guillaume Nault Date: Thu, 28 Apr 2016 17:55:28 +0200 Subject: [PATCH 1169/1649] ppp: define reusable device creation functions Move PPP device initialisation and registration out of ppp_create_interface(). This prepares code for device registration with rtnetlink. While there, simplify the prototype of ppp_create_interface(): * Since ppp_dev_configure() takes care of setting file->private_data, there's no need to return a ppp structure to ppp_unattached_ioctl() anymore. * The unit parameter is made read/write so that ppp_create_interface() can tell which unit number has been assigned. Signed-off-by: Guillaume Nault Signed-off-by: David S. Miller --- drivers/net/ppp/ppp_generic.c | 206 +++++++++++++++++++--------------- 1 file changed, 118 insertions(+), 88 deletions(-) diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c index f572b31a2b20..59077c86ba0e 100644 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c @@ -183,6 +183,11 @@ struct channel { #endif /* CONFIG_PPP_MULTILINK */ }; +struct ppp_config { + struct file *file; + s32 unit; +}; + /* * SMP locking issues: * Both the ppp.rlock and ppp.wlock locks protect the ppp.channels @@ -269,8 +274,7 @@ static void ppp_ccp_peek(struct ppp *ppp, struct sk_buff *skb, int inbound); static void ppp_ccp_closed(struct ppp *ppp); static struct compressor *find_compressor(int type); static void ppp_get_stats(struct ppp *ppp, struct ppp_stats *st); -static struct ppp *ppp_create_interface(struct net *net, int unit, - struct file *file, int *retp); +static int ppp_create_interface(struct net *net, struct file *file, int *unit); static void init_ppp_file(struct ppp_file *pf, int kind); static void ppp_destroy_interface(struct ppp *ppp); static struct ppp *ppp_find_unit(struct ppp_net *pn, int unit); @@ -853,12 +857,12 @@ static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf, /* Create a new ppp unit */ if (get_user(unit, p)) break; - ppp = ppp_create_interface(net, unit, file, &err); - if (!ppp) + err = ppp_create_interface(net, file, &unit); + if (err < 0) break; - file->private_data = &ppp->file; + err = -EFAULT; - if (put_user(ppp->file.index, p)) + if (put_user(unit, p)) break; err = 0; break; @@ -960,6 +964,94 @@ static struct pernet_operations ppp_net_ops = { .size = sizeof(struct ppp_net), }; +static int ppp_unit_register(struct ppp *ppp, int unit) +{ + struct ppp_net *pn = ppp_pernet(ppp->ppp_net); + int ret; + + mutex_lock(&pn->all_ppp_mutex); + + if (unit < 0) { + ret = unit_get(&pn->units_idr, ppp); + if (ret < 0) + goto err; + } else { + /* Caller asked for a specific unit number. Fail with -EEXIST + * if unavailable. For backward compatibility, return -EEXIST + * too if idr allocation fails; this makes pppd retry without + * requesting a specific unit number. + */ + if (unit_find(&pn->units_idr, unit)) { + ret = -EEXIST; + goto err; + } + ret = unit_set(&pn->units_idr, ppp, unit); + if (ret < 0) { + /* Rewrite error for backward compatibility */ + ret = -EEXIST; + goto err; + } + } + ppp->file.index = ret; + + snprintf(ppp->dev->name, IFNAMSIZ, "ppp%i", ppp->file.index); + + ret = register_netdevice(ppp->dev); + if (ret < 0) + goto err_unit; + + atomic_inc(&ppp_unit_count); + + mutex_unlock(&pn->all_ppp_mutex); + + return 0; + +err_unit: + unit_put(&pn->units_idr, ppp->file.index); +err: + mutex_unlock(&pn->all_ppp_mutex); + + return ret; +} + +static int ppp_dev_configure(struct net *src_net, struct net_device *dev, + const struct ppp_config *conf) +{ + struct ppp *ppp = netdev_priv(dev); + int indx; + int err; + + ppp->dev = dev; + ppp->ppp_net = src_net; + ppp->mru = PPP_MRU; + ppp->owner = conf->file; + + init_ppp_file(&ppp->file, INTERFACE); + ppp->file.hdrlen = PPP_HDRLEN - 2; /* don't count proto bytes */ + + for (indx = 0; indx < NUM_NP; ++indx) + ppp->npmode[indx] = NPMODE_PASS; + INIT_LIST_HEAD(&ppp->channels); + spin_lock_init(&ppp->rlock); + spin_lock_init(&ppp->wlock); +#ifdef CONFIG_PPP_MULTILINK + ppp->minseq = -1; + skb_queue_head_init(&ppp->mrq); +#endif /* CONFIG_PPP_MULTILINK */ +#ifdef CONFIG_PPP_FILTER + ppp->pass_filter = NULL; + ppp->active_filter = NULL; +#endif /* CONFIG_PPP_FILTER */ + + err = ppp_unit_register(ppp, conf->unit); + if (err < 0) + return err; + + conf->file->private_data = &ppp->file; + + return 0; +} + #define PPP_MAJOR 108 /* Called at boot time if ppp is compiled into the kernel, @@ -2732,102 +2824,40 @@ ppp_get_stats(struct ppp *ppp, struct ppp_stats *st) * or if there is already a unit with the requested number. * unit == -1 means allocate a new number. */ -static struct ppp *ppp_create_interface(struct net *net, int unit, - struct file *file, int *retp) +static int ppp_create_interface(struct net *net, struct file *file, int *unit) { + struct ppp_config conf = { + .file = file, + .unit = *unit, + }; + struct net_device *dev; struct ppp *ppp; - struct ppp_net *pn; - struct net_device *dev = NULL; - int ret = -ENOMEM; - int i; + int err; dev = alloc_netdev(sizeof(struct ppp), "", NET_NAME_ENUM, ppp_setup); - if (!dev) - goto out1; - - pn = ppp_pernet(net); - - ppp = netdev_priv(dev); - ppp->dev = dev; - ppp->mru = PPP_MRU; - init_ppp_file(&ppp->file, INTERFACE); - ppp->file.hdrlen = PPP_HDRLEN - 2; /* don't count proto bytes */ - ppp->owner = file; - for (i = 0; i < NUM_NP; ++i) - ppp->npmode[i] = NPMODE_PASS; - INIT_LIST_HEAD(&ppp->channels); - spin_lock_init(&ppp->rlock); - spin_lock_init(&ppp->wlock); -#ifdef CONFIG_PPP_MULTILINK - ppp->minseq = -1; - skb_queue_head_init(&ppp->mrq); -#endif /* CONFIG_PPP_MULTILINK */ -#ifdef CONFIG_PPP_FILTER - ppp->pass_filter = NULL; - ppp->active_filter = NULL; -#endif /* CONFIG_PPP_FILTER */ - - /* - * drum roll: don't forget to set - * the net device is belong to - */ + if (!dev) { + err = -ENOMEM; + goto err; + } dev_net_set(dev, net); rtnl_lock(); - mutex_lock(&pn->all_ppp_mutex); - if (unit < 0) { - unit = unit_get(&pn->units_idr, ppp); - if (unit < 0) { - ret = unit; - goto out2; - } - } else { - ret = -EEXIST; - if (unit_find(&pn->units_idr, unit)) - goto out2; /* unit already exists */ - /* - * if caller need a specified unit number - * lets try to satisfy him, otherwise -- - * he should better ask us for new unit number - * - * NOTE: yes I know that returning EEXIST it's not - * fair but at least pppd will ask us to allocate - * new unit in this case so user is happy :) - */ - unit = unit_set(&pn->units_idr, ppp, unit); - if (unit < 0) - goto out2; - } + err = ppp_dev_configure(net, dev, &conf); + if (err < 0) + goto err_dev; + ppp = netdev_priv(dev); + *unit = ppp->file.index; - /* Initialize the new ppp unit */ - ppp->file.index = unit; - sprintf(dev->name, "ppp%d", unit); - - ret = register_netdevice(dev); - if (ret != 0) { - unit_put(&pn->units_idr, unit); - netdev_err(ppp->dev, "PPP: couldn't register device %s (%d)\n", - dev->name, ret); - goto out2; - } - - ppp->ppp_net = net; - - atomic_inc(&ppp_unit_count); - mutex_unlock(&pn->all_ppp_mutex); rtnl_unlock(); - *retp = 0; - return ppp; + return 0; -out2: - mutex_unlock(&pn->all_ppp_mutex); +err_dev: rtnl_unlock(); free_netdev(dev); -out1: - *retp = ret; - return NULL; +err: + return err; } /* From 96d934c70db6e1bc135600c57da1285eaf7efb26 Mon Sep 17 00:00:00 2001 From: Guillaume Nault Date: Thu, 28 Apr 2016 17:55:30 +0200 Subject: [PATCH 1170/1649] ppp: add rtnetlink device creation support Define PPP device handler for use with rtnetlink. The only PPP specific attribute is IFLA_PPP_DEV_FD. It is mandatory and contains the file descriptor of the associated /dev/ppp instance (the file descriptor which would have been used for ioctl(PPPIOCNEWUNIT) in the ioctl-based API). The PPP device is removed when this file descriptor is released (same behaviour as with ioctl based PPP devices). PPP devices created with the rtnetlink API behave like the ones created with ioctl(PPPIOCNEWUNIT). In particular existing ioctls work the same way, no matter how the PPP device was created. The rtnl callbacks are also assigned to ioctl based PPP devices. This way, rtnl messages have the same effect on any PPP devices. The immediate effect is that all PPP devices, even ioctl-based ones, can now be removed with "ip link del". A minor difference still exists between ioctl and rtnl based PPP interfaces: in the device name, the number following the "ppp" prefix corresponds to the PPP unit number for ioctl based devices, while it is just an unrelated incrementing index for rtnl ones. Signed-off-by: Guillaume Nault Signed-off-by: David S. Miller --- drivers/net/ppp/ppp_generic.c | 115 +++++++++++++++++++++++++++++++++- include/uapi/linux/if_link.h | 8 +++ 2 files changed, 120 insertions(+), 3 deletions(-) diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c index 59077c86ba0e..8dedafa1a95d 100644 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c @@ -46,6 +46,7 @@ #include #include #include +#include #include #include #include @@ -186,6 +187,7 @@ struct channel { struct ppp_config { struct file *file; s32 unit; + bool ifname_is_set; }; /* @@ -286,6 +288,7 @@ static int unit_get(struct idr *p, void *ptr); static int unit_set(struct idr *p, void *ptr, int n); static void unit_put(struct idr *p, int n); static void *unit_find(struct idr *p, int n); +static void ppp_setup(struct net_device *dev); static const struct net_device_ops ppp_netdev_ops; @@ -964,7 +967,7 @@ static struct pernet_operations ppp_net_ops = { .size = sizeof(struct ppp_net), }; -static int ppp_unit_register(struct ppp *ppp, int unit) +static int ppp_unit_register(struct ppp *ppp, int unit, bool ifname_is_set) { struct ppp_net *pn = ppp_pernet(ppp->ppp_net); int ret; @@ -994,7 +997,8 @@ static int ppp_unit_register(struct ppp *ppp, int unit) } ppp->file.index = ret; - snprintf(ppp->dev->name, IFNAMSIZ, "ppp%i", ppp->file.index); + if (!ifname_is_set) + snprintf(ppp->dev->name, IFNAMSIZ, "ppp%i", ppp->file.index); ret = register_netdevice(ppp->dev); if (ret < 0) @@ -1043,7 +1047,7 @@ static int ppp_dev_configure(struct net *src_net, struct net_device *dev, ppp->active_filter = NULL; #endif /* CONFIG_PPP_FILTER */ - err = ppp_unit_register(ppp, conf->unit); + err = ppp_unit_register(ppp, conf->unit, conf->ifname_is_set); if (err < 0) return err; @@ -1052,6 +1056,99 @@ static int ppp_dev_configure(struct net *src_net, struct net_device *dev, return 0; } +static const struct nla_policy ppp_nl_policy[IFLA_PPP_MAX + 1] = { + [IFLA_PPP_DEV_FD] = { .type = NLA_S32 }, +}; + +static int ppp_nl_validate(struct nlattr *tb[], struct nlattr *data[]) +{ + if (!data) + return -EINVAL; + + if (!data[IFLA_PPP_DEV_FD]) + return -EINVAL; + if (nla_get_s32(data[IFLA_PPP_DEV_FD]) < 0) + return -EBADF; + + return 0; +} + +static int ppp_nl_newlink(struct net *src_net, struct net_device *dev, + struct nlattr *tb[], struct nlattr *data[]) +{ + struct ppp_config conf = { + .unit = -1, + .ifname_is_set = true, + }; + struct file *file; + int err; + + file = fget(nla_get_s32(data[IFLA_PPP_DEV_FD])); + if (!file) + return -EBADF; + + /* rtnl_lock is already held here, but ppp_create_interface() locks + * ppp_mutex before holding rtnl_lock. Using mutex_trylock() avoids + * possible deadlock due to lock order inversion, at the cost of + * pushing the problem back to userspace. + */ + if (!mutex_trylock(&ppp_mutex)) { + err = -EBUSY; + goto out; + } + + if (file->f_op != &ppp_device_fops || file->private_data) { + err = -EBADF; + goto out_unlock; + } + + conf.file = file; + err = ppp_dev_configure(src_net, dev, &conf); + +out_unlock: + mutex_unlock(&ppp_mutex); +out: + fput(file); + + return err; +} + +static void ppp_nl_dellink(struct net_device *dev, struct list_head *head) +{ + unregister_netdevice_queue(dev, head); +} + +static size_t ppp_nl_get_size(const struct net_device *dev) +{ + return 0; +} + +static int ppp_nl_fill_info(struct sk_buff *skb, const struct net_device *dev) +{ + return 0; +} + +static struct net *ppp_nl_get_link_net(const struct net_device *dev) +{ + struct ppp *ppp = netdev_priv(dev); + + return ppp->ppp_net; +} + +static struct rtnl_link_ops ppp_link_ops __read_mostly = { + .kind = "ppp", + .maxtype = IFLA_PPP_MAX, + .policy = ppp_nl_policy, + .priv_size = sizeof(struct ppp), + .setup = ppp_setup, + .validate = ppp_nl_validate, + .newlink = ppp_nl_newlink, + .dellink = ppp_nl_dellink, + .get_size = ppp_nl_get_size, + .fill_info = ppp_nl_fill_info, + .get_link_net = ppp_nl_get_link_net, +}; + #define PPP_MAJOR 108 /* Called at boot time if ppp is compiled into the kernel, @@ -1080,11 +1177,19 @@ static int __init ppp_init(void) goto out_chrdev; } + err = rtnl_link_register(&ppp_link_ops); + if (err) { + pr_err("failed to register rtnetlink PPP handler\n"); + goto out_class; + } + /* not a big deal if we fail here :-) */ device_create(ppp_class, NULL, MKDEV(PPP_MAJOR, 0), NULL, "ppp"); return 0; +out_class: + class_destroy(ppp_class); out_chrdev: unregister_chrdev(PPP_MAJOR, "ppp"); out_net: @@ -2829,6 +2934,7 @@ static int ppp_create_interface(struct net *net, struct file *file, int *unit) struct ppp_config conf = { .file = file, .unit = *unit, + .ifname_is_set = false, }; struct net_device *dev; struct ppp *ppp; @@ -2840,6 +2946,7 @@ static int ppp_create_interface(struct net *net, struct file *file, int *unit) goto err; } dev_net_set(dev, net); + dev->rtnl_link_ops = &ppp_link_ops; rtnl_lock(); @@ -3046,6 +3153,7 @@ static void __exit ppp_cleanup(void) /* should never happen */ if (atomic_read(&ppp_unit_count) || atomic_read(&channel_count)) pr_err("PPP: removing module but units remain!\n"); + rtnl_link_unregister(&ppp_link_ops); unregister_chrdev(PPP_MAJOR, "ppp"); device_destroy(ppp_class, MKDEV(PPP_MAJOR, 0)); class_destroy(ppp_class); @@ -3104,4 +3212,5 @@ EXPORT_SYMBOL(ppp_register_compressor); EXPORT_SYMBOL(ppp_unregister_compressor); MODULE_LICENSE("GPL"); MODULE_ALIAS_CHARDEV(PPP_MAJOR, 0); +MODULE_ALIAS_RTNL_LINK("ppp"); MODULE_ALIAS("devname:ppp"); diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index d82de331bb6b..3e80974566bb 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -520,6 +520,14 @@ enum { }; #define IFLA_GENEVE_MAX (__IFLA_GENEVE_MAX - 1) +/* PPP section */ +enum { + IFLA_PPP_UNSPEC, + IFLA_PPP_DEV_FD, + __IFLA_PPP_MAX +}; +#define IFLA_PPP_MAX (__IFLA_PPP_MAX - 1) + /* Bonding section */ enum { From f4b05d27ec6b032ca504591e2a157b058b6f172f Mon Sep 17 00:00:00 2001 From: Nikolay Aleksandrov Date: Thu, 28 Apr 2016 17:59:28 +0200 Subject: [PATCH 1171/1649] net: constify is_skb_forwardable's arguments is_skb_forwardable is not supposed to change anything so constify its arguments Signed-off-by: Nikolay Aleksandrov Signed-off-by: David S. Miller --- include/linux/netdevice.h | 3 ++- net/core/dev.c | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 934ca866562d..52914a854386 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -3264,7 +3264,8 @@ struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, struct netdev_queue *txq, int *ret); int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb); int dev_forward_skb(struct net_device *dev, struct sk_buff *skb); -bool is_skb_forwardable(struct net_device *dev, struct sk_buff *skb); +bool is_skb_forwardable(const struct net_device *dev, + const struct sk_buff *skb); extern int netdev_budget; diff --git a/net/core/dev.c b/net/core/dev.c index c2f3d5dbde56..d91dfbec0fc6 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1741,7 +1741,7 @@ static inline void net_timestamp_set(struct sk_buff *skb) __net_timestamp(SKB); \ } \ -bool is_skb_forwardable(struct net_device *dev, struct sk_buff *skb) +bool is_skb_forwardable(const struct net_device *dev, const struct sk_buff *skb) { unsigned int len; From 2957a28a0e874aea0f734e2897b27f669502a970 Mon Sep 17 00:00:00 2001 From: Michael Heimpold Date: Thu, 28 Apr 2016 22:06:14 +0200 Subject: [PATCH 1172/1649] net: ethernet: enc28j60: support half-duplex SPI controllers The current spi_read_buf function fails on SPI host masters which are only half-duplex capable. Splitting the Tx and Rx part solves this issue. Tested on Raspberry Pi (full duplex) and I2SE Duckbill (half duplex). Signed-off-by: Michael Heimpold Signed-off-by: David S. Miller --- drivers/net/ethernet/microchip/enc28j60.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/microchip/enc28j60.c b/drivers/net/ethernet/microchip/enc28j60.c index 86ea17e7ba7b..b723622fe94a 100644 --- a/drivers/net/ethernet/microchip/enc28j60.c +++ b/drivers/net/ethernet/microchip/enc28j60.c @@ -89,22 +89,26 @@ spi_read_buf(struct enc28j60_net *priv, int len, u8 *data) { u8 *rx_buf = priv->spi_transfer_buf + 4; u8 *tx_buf = priv->spi_transfer_buf; - struct spi_transfer t = { + struct spi_transfer tx = { .tx_buf = tx_buf, + .len = SPI_OPLEN, + }; + struct spi_transfer rx = { .rx_buf = rx_buf, - .len = SPI_OPLEN + len, + .len = len, }; struct spi_message msg; int ret; tx_buf[0] = ENC28J60_READ_BUF_MEM; - tx_buf[1] = tx_buf[2] = tx_buf[3] = 0; /* don't care */ spi_message_init(&msg); - spi_message_add_tail(&t, &msg); + spi_message_add_tail(&tx, &msg); + spi_message_add_tail(&rx, &msg); + ret = spi_sync(priv->spi, &msg); if (ret == 0) { - memcpy(data, &rx_buf[SPI_OPLEN], len); + memcpy(data, rx_buf, len); ret = msg.status; } if (ret && netif_msg_drv(priv)) From 2dd355a007e44960ec049c75920ddb6778fec9ee Mon Sep 17 00:00:00 2001 From: Michael Heimpold Date: Thu, 28 Apr 2016 22:06:15 +0200 Subject: [PATCH 1173/1649] net: ethernet: enc28j60: add device tree support The following patch adds the required match table for device tree support (and while at, fix the indent). It's also possible to specify the MAC address in the DT blob. Also add the corresponding binding documentation file. Signed-off-by: Michael Heimpold Signed-off-by: David S. Miller --- .../bindings/net/microchip,enc28j60.txt | 59 +++++++++++++++++++ drivers/net/ethernet/microchip/enc28j60.c | 20 ++++++- 2 files changed, 76 insertions(+), 3 deletions(-) create mode 100644 Documentation/devicetree/bindings/net/microchip,enc28j60.txt diff --git a/Documentation/devicetree/bindings/net/microchip,enc28j60.txt b/Documentation/devicetree/bindings/net/microchip,enc28j60.txt new file mode 100644 index 000000000000..1dc3bc75539d --- /dev/null +++ b/Documentation/devicetree/bindings/net/microchip,enc28j60.txt @@ -0,0 +1,59 @@ +* Microchip ENC28J60 + +This is a standalone 10 MBit ethernet controller with SPI interface. + +For each device connected to a SPI bus, define a child node within +the SPI master node. + +Required properties: +- compatible: Should be "microchip,enc28j60" +- reg: Specify the SPI chip select the ENC28J60 is wired to +- interrupt-parent: Specify the phandle of the source interrupt, see interrupt + binding documentation for details. Usually this is the GPIO bank + the interrupt line is wired to. +- interrupts: Specify the interrupt index within the interrupt controller (referred + to above in interrupt-parent) and interrupt type. The ENC28J60 natively + generates falling edge interrupts, however, additional board logic + might invert the signal. +- pinctrl-names: List of assigned state names, see pinctrl binding documentation. +- pinctrl-0: List of phandles to configure the GPIO pin used as interrupt line, + see also generic and your platform specific pinctrl binding + documentation. + +Optional properties: +- spi-max-frequency: Maximum frequency of the SPI bus when accessing the ENC28J60. + According to the ENC28J80 datasheet, the chip allows a maximum of 20 MHz, however, + board designs may need to limit this value. +- local-mac-address: See ethernet.txt in the same directory. + + +Example (for NXP i.MX28 with pin control stuff for GPIO irq): + + ssp2: ssp@80014000 { + compatible = "fsl,imx28-spi"; + pinctrl-names = "default"; + pinctrl-0 = <&spi2_pins_b &spi2_sck_cfg>; + status = "okay"; + + enc28j60: ethernet@0 { + compatible = "microchip,enc28j60"; + pinctrl-names = "default"; + pinctrl-0 = <&enc28j60_pins>; + reg = <0>; + interrupt-parent = <&gpio3>; + interrupts = <3 IRQ_TYPE_EDGE_FALLING>; + spi-max-frequency = <12000000>; + }; + }; + + pinctrl@80018000 { + enc28j60_pins: enc28j60_pins@0 { + reg = <0>; + fsl,pinmux-ids = < + MX28_PAD_AUART0_RTS__GPIO_3_3 /* Interrupt */ + >; + fsl,drive-strength = ; + fsl,voltage = ; + fsl,pull-up = ; + }; + }; diff --git a/drivers/net/ethernet/microchip/enc28j60.c b/drivers/net/ethernet/microchip/enc28j60.c index b723622fe94a..7066954c39d6 100644 --- a/drivers/net/ethernet/microchip/enc28j60.c +++ b/drivers/net/ethernet/microchip/enc28j60.c @@ -28,11 +28,12 @@ #include #include #include +#include #include "enc28j60_hw.h" #define DRV_NAME "enc28j60" -#define DRV_VERSION "1.01" +#define DRV_VERSION "1.02" #define SPI_OPLEN 1 @@ -1548,6 +1549,7 @@ static int enc28j60_probe(struct spi_device *spi) { struct net_device *dev; struct enc28j60_net *priv; + const void *macaddr; int ret = 0; if (netif_msg_drv(&debug)) @@ -1579,7 +1581,12 @@ static int enc28j60_probe(struct spi_device *spi) ret = -EIO; goto error_irq; } - eth_hw_addr_random(dev); + + macaddr = of_get_mac_address(spi->dev.of_node); + if (macaddr) + ether_addr_copy(dev->dev_addr, macaddr); + else + eth_hw_addr_random(dev); enc28j60_set_hw_macaddr(dev); /* Board setup must set the relevant edge trigger type; @@ -1634,9 +1641,16 @@ static int enc28j60_remove(struct spi_device *spi) return 0; } +static const struct of_device_id enc28j60_dt_ids[] = { + { .compatible = "microchip,enc28j60" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, enc28j60_dt_ids); + static struct spi_driver enc28j60_driver = { .driver = { - .name = DRV_NAME, + .name = DRV_NAME, + .of_match_table = enc28j60_dt_ids, }, .probe = enc28j60_probe, .remove = enc28j60_remove, From 01a14098d3374e1b931d605da666300ee248d3d9 Mon Sep 17 00:00:00 2001 From: Matthew Finlay Date: Fri, 29 Apr 2016 01:36:31 +0300 Subject: [PATCH 1174/1649] net/mlx5e: Call vxlan_get_rx_port() with rtnl lock Hold the rtnl lock when calling vxlan_get_rx_port(). Fixes: b7aade15485a ("vxlan: break dependency with netdev drivers") Signed-off-by: Matthew Finlay Reported-by: Alexander Duyck Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 8b0bd42a9762..48825cb69e7e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -2938,8 +2938,11 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev) goto err_tc_cleanup; } - if (mlx5e_vxlan_allowed(mdev)) + if (mlx5e_vxlan_allowed(mdev)) { + rtnl_lock(); vxlan_get_rx_port(netdev); + rtnl_unlock(); + } mlx5e_enable_async_events(priv); schedule_work(&priv->set_rx_mode_work); From 1da366964ec907fccdea0339d81c2e9ef3d803f8 Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Fri, 29 Apr 2016 01:36:32 +0300 Subject: [PATCH 1175/1649] net/mlx5e: Direct TIR per RQ Introduce new TIRs for direct access per RQ. Now we have 2 available kinds of TIRs: - indirect TIR per traffic type, each points to one RQT (RSS RQT) same as before. - New direct TIR per RQ, each points to RQT with a size of one that forwards packets to that RQ only. Driver will open max channels (num cores) direct TIRs by default, they will be filled with the actual RQs once channels are allocated. Needed for downstream aRFS and ethtool direct steering functionalities. Signed-off-by: Tariq Toukan Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 21 +- .../ethernet/mellanox/mlx5/core/en_ethtool.c | 9 +- .../net/ethernet/mellanox/mlx5/core/en_fs.c | 4 +- .../net/ethernet/mellanox/mlx5/core/en_main.c | 303 ++++++++++-------- 4 files changed, 195 insertions(+), 142 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index bbc01a49db02..5c8e98c277dc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -385,14 +385,7 @@ enum mlx5e_traffic_types { MLX5E_TT_IPV6, MLX5E_TT_ANY, MLX5E_NUM_TT, -}; - -#define IS_HASHING_TT(tt) (tt != MLX5E_TT_ANY) - -enum mlx5e_rqt_ix { - MLX5E_INDIRECTION_RQT, - MLX5E_SINGLE_RQ_RQT, - MLX5E_NUM_RQT, + MLX5E_NUM_INDIR_TIRS = MLX5E_TT_ANY, }; struct mlx5e_eth_addr_info { @@ -453,6 +446,11 @@ struct mlx5e_flow_tables { struct mlx5e_flow_table main; }; +struct mlx5e_direct_tir { + u32 tirn; + u32 rqtn; +}; + struct mlx5e_priv { /* priv data path fields - start */ struct mlx5e_sq **txq_to_sq_map; @@ -470,8 +468,9 @@ struct mlx5e_priv { struct mlx5e_channel **channel; u32 tisn[MLX5E_MAX_NUM_TC]; - u32 rqtn[MLX5E_NUM_RQT]; - u32 tirn[MLX5E_NUM_TT]; + u32 indir_rqtn; + u32 indir_tirn[MLX5E_NUM_INDIR_TIRS]; + struct mlx5e_direct_tir direct_tir[MLX5E_MAX_NUM_CHANNELS]; struct mlx5e_flow_tables fts; struct mlx5e_eth_addr_db eth_addr; @@ -578,7 +577,7 @@ void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv); int mlx5e_modify_rqs_vsd(struct mlx5e_priv *priv, bool vsd); -int mlx5e_redirect_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix); +int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz, int ix); void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv); int mlx5e_open_locked(struct net_device *netdev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index a06958a925f7..498d40784ae9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -826,9 +826,8 @@ static void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen) MLX5_SET(modify_tir_in, in, bitmask.hash, 1); mlx5e_build_tir_ctx_hash(tirc, priv); - for (i = 0; i < MLX5E_NUM_TT; i++) - if (IS_HASHING_TT(i)) - mlx5_core_modify_tir(mdev, priv->tirn[i], in, inlen); + for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++) + mlx5_core_modify_tir(mdev, priv->indir_tirn[i], in, inlen); } static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir, @@ -850,9 +849,11 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir, mutex_lock(&priv->state_lock); if (indir) { + u32 rqtn = priv->indir_rqtn; + memcpy(priv->params.indirection_rqt, indir, sizeof(priv->params.indirection_rqt)); - mlx5e_redirect_rqt(priv, MLX5E_INDIRECTION_RQT); + mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, 0); } if (key) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c index d00a24203410..4df49e660587 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c @@ -247,7 +247,7 @@ static int __mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv, outer_headers.dmac_47_16); u8 *mv_dmac = MLX5_ADDR_OF(fte_match_param, mv, outer_headers.dmac_47_16); - u32 *tirn = priv->tirn; + u32 *tirn = priv->indir_tirn; u32 tt_vec; int err = 0; @@ -274,7 +274,7 @@ static int __mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv, if (tt_vec & BIT(MLX5E_TT_ANY)) { rule_p = &ai->ft_rule[MLX5E_TT_ANY]; - dest.tir_num = tirn[MLX5E_TT_ANY]; + dest.tir_num = priv->direct_tir[0].tirn; *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FS_DEFAULT_FLOW_TAG, &dest); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 48825cb69e7e..04ad659f54ed 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -1340,48 +1340,36 @@ static void mlx5e_fill_indir_rqt_rqns(struct mlx5e_priv *priv, void *rqtc) for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++) { int ix = i; + u32 rqn; if (priv->params.rss_hfunc == ETH_RSS_HASH_XOR) ix = mlx5e_bits_invert(i, MLX5E_LOG_INDIR_RQT_SIZE); ix = priv->params.indirection_rqt[ix]; - MLX5_SET(rqtc, rqtc, rq_num[i], - test_bit(MLX5E_STATE_OPENED, &priv->state) ? - priv->channel[ix]->rq.rqn : - priv->drop_rq.rqn); + rqn = test_bit(MLX5E_STATE_OPENED, &priv->state) ? + priv->channel[ix]->rq.rqn : + priv->drop_rq.rqn; + MLX5_SET(rqtc, rqtc, rq_num[i], rqn); } } -static void mlx5e_fill_rqt_rqns(struct mlx5e_priv *priv, void *rqtc, - enum mlx5e_rqt_ix rqt_ix) +static void mlx5e_fill_direct_rqt_rqn(struct mlx5e_priv *priv, void *rqtc, + int ix) { + u32 rqn = test_bit(MLX5E_STATE_OPENED, &priv->state) ? + priv->channel[ix]->rq.rqn : + priv->drop_rq.rqn; - switch (rqt_ix) { - case MLX5E_INDIRECTION_RQT: - mlx5e_fill_indir_rqt_rqns(priv, rqtc); - - break; - - default: /* MLX5E_SINGLE_RQ_RQT */ - MLX5_SET(rqtc, rqtc, rq_num[0], - test_bit(MLX5E_STATE_OPENED, &priv->state) ? - priv->channel[0]->rq.rqn : - priv->drop_rq.rqn); - - break; - } + MLX5_SET(rqtc, rqtc, rq_num[0], rqn); } -static int mlx5e_create_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix) +static int mlx5e_create_rqt(struct mlx5e_priv *priv, int sz, int ix, u32 *rqtn) { struct mlx5_core_dev *mdev = priv->mdev; - u32 *in; void *rqtc; int inlen; - int sz; int err; - - sz = (rqt_ix == MLX5E_SINGLE_RQ_RQT) ? 1 : MLX5E_INDIR_RQT_SIZE; + u32 *in; inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz; in = mlx5_vzalloc(inlen); @@ -1393,26 +1381,73 @@ static int mlx5e_create_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix) MLX5_SET(rqtc, rqtc, rqt_actual_size, sz); MLX5_SET(rqtc, rqtc, rqt_max_size, sz); - mlx5e_fill_rqt_rqns(priv, rqtc, rqt_ix); + if (sz > 1) /* RSS */ + mlx5e_fill_indir_rqt_rqns(priv, rqtc); + else + mlx5e_fill_direct_rqt_rqn(priv, rqtc, ix); - err = mlx5_core_create_rqt(mdev, in, inlen, &priv->rqtn[rqt_ix]); + err = mlx5_core_create_rqt(mdev, in, inlen, rqtn); kvfree(in); + return err; +} + +static void mlx5e_destroy_rqt(struct mlx5e_priv *priv, u32 rqtn) +{ + mlx5_core_destroy_rqt(priv->mdev, rqtn); +} + +static int mlx5e_create_rqts(struct mlx5e_priv *priv) +{ + int nch = mlx5e_get_max_num_channels(priv->mdev); + u32 *rqtn; + int err; + int ix; + + /* Indirect RQT */ + rqtn = &priv->indir_rqtn; + err = mlx5e_create_rqt(priv, MLX5E_INDIR_RQT_SIZE, 0, rqtn); + if (err) + return err; + + /* Direct RQTs */ + for (ix = 0; ix < nch; ix++) { + rqtn = &priv->direct_tir[ix].rqtn; + err = mlx5e_create_rqt(priv, 1 /*size */, ix, rqtn); + if (err) + goto err_destroy_rqts; + } + + return 0; + +err_destroy_rqts: + for (ix--; ix >= 0; ix--) + mlx5e_destroy_rqt(priv, priv->direct_tir[ix].rqtn); + + mlx5e_destroy_rqt(priv, priv->indir_rqtn); return err; } -int mlx5e_redirect_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix) +static void mlx5e_destroy_rqts(struct mlx5e_priv *priv) +{ + int nch = mlx5e_get_max_num_channels(priv->mdev); + int i; + + for (i = 0; i < nch; i++) + mlx5e_destroy_rqt(priv, priv->direct_tir[i].rqtn); + + mlx5e_destroy_rqt(priv, priv->indir_rqtn); +} + +int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz, int ix) { struct mlx5_core_dev *mdev = priv->mdev; - u32 *in; void *rqtc; int inlen; - int sz; + u32 *in; int err; - sz = (rqt_ix == MLX5E_SINGLE_RQ_RQT) ? 1 : MLX5E_INDIR_RQT_SIZE; - inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + sizeof(u32) * sz; in = mlx5_vzalloc(inlen); if (!in) @@ -1421,27 +1456,31 @@ int mlx5e_redirect_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix) rqtc = MLX5_ADDR_OF(modify_rqt_in, in, ctx); MLX5_SET(rqtc, rqtc, rqt_actual_size, sz); - - mlx5e_fill_rqt_rqns(priv, rqtc, rqt_ix); + if (sz > 1) /* RSS */ + mlx5e_fill_indir_rqt_rqns(priv, rqtc); + else + mlx5e_fill_direct_rqt_rqn(priv, rqtc, ix); MLX5_SET(modify_rqt_in, in, bitmask.rqn_list, 1); - err = mlx5_core_modify_rqt(mdev, priv->rqtn[rqt_ix], in, inlen); + err = mlx5_core_modify_rqt(mdev, rqtn, in, inlen); kvfree(in); return err; } -static void mlx5e_destroy_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix) -{ - mlx5_core_destroy_rqt(priv->mdev, priv->rqtn[rqt_ix]); -} - static void mlx5e_redirect_rqts(struct mlx5e_priv *priv) { - mlx5e_redirect_rqt(priv, MLX5E_INDIRECTION_RQT); - mlx5e_redirect_rqt(priv, MLX5E_SINGLE_RQ_RQT); + u32 rqtn; + int ix; + + rqtn = priv->indir_rqtn; + mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, 0); + for (ix = 0; ix < priv->params.num_channels; ix++) { + rqtn = priv->direct_tir[ix].rqtn; + mlx5e_redirect_rqt(priv, rqtn, 1, ix); + } } static void mlx5e_build_tir_ctx_lro(void *tirc, struct mlx5e_priv *priv) @@ -1486,6 +1525,7 @@ static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv) int inlen; int err; int tt; + int ix; inlen = MLX5_ST_SZ_BYTES(modify_tir_in); in = mlx5_vzalloc(inlen); @@ -1497,23 +1537,32 @@ static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv) mlx5e_build_tir_ctx_lro(tirc, priv); - for (tt = 0; tt < MLX5E_NUM_TT; tt++) { - err = mlx5_core_modify_tir(mdev, priv->tirn[tt], in, inlen); + for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) { + err = mlx5_core_modify_tir(mdev, priv->indir_tirn[tt], in, + inlen); if (err) - break; + goto free_in; } + for (ix = 0; ix < mlx5e_get_max_num_channels(mdev); ix++) { + err = mlx5_core_modify_tir(mdev, priv->direct_tir[ix].tirn, + in, inlen); + if (err) + goto free_in; + } + +free_in: kvfree(in); return err; } -static int mlx5e_refresh_tir_self_loopback_enable(struct mlx5_core_dev *mdev, - u32 tirn) +static int mlx5e_refresh_tirs_self_loopback_enable(struct mlx5e_priv *priv) { void *in; int inlen; int err; + int i; inlen = MLX5_ST_SZ_BYTES(modify_tir_in); in = mlx5_vzalloc(inlen); @@ -1522,25 +1571,23 @@ static int mlx5e_refresh_tir_self_loopback_enable(struct mlx5_core_dev *mdev, MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1); - err = mlx5_core_modify_tir(mdev, tirn, in, inlen); - - kvfree(in); - - return err; -} - -static int mlx5e_refresh_tirs_self_loopback_enable(struct mlx5e_priv *priv) -{ - int err; - int i; - - for (i = 0; i < MLX5E_NUM_TT; i++) { - err = mlx5e_refresh_tir_self_loopback_enable(priv->mdev, - priv->tirn[i]); + for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++) { + err = mlx5_core_modify_tir(priv->mdev, priv->indir_tirn[i], in, + inlen); if (err) return err; } + for (i = 0; i < priv->params.num_channels; i++) { + err = mlx5_core_modify_tir(priv->mdev, + priv->direct_tir[i].tirn, in, + inlen); + if (err) + return err; + } + + kvfree(in); + return 0; } @@ -1851,7 +1898,8 @@ static void mlx5e_destroy_tises(struct mlx5e_priv *priv) mlx5e_destroy_tis(priv, tc); } -static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt) +static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, + enum mlx5e_traffic_types tt) { void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer); @@ -1872,19 +1920,8 @@ static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt) mlx5e_build_tir_ctx_lro(tirc, priv); MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT); - - switch (tt) { - case MLX5E_TT_ANY: - MLX5_SET(tirc, tirc, indirect_table, - priv->rqtn[MLX5E_SINGLE_RQ_RQT]); - MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8); - break; - default: - MLX5_SET(tirc, tirc, indirect_table, - priv->rqtn[MLX5E_INDIRECTION_RQT]); - mlx5e_build_tir_ctx_hash(tirc, priv); - break; - } + MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqtn); + mlx5e_build_tir_ctx_hash(tirc, priv); switch (tt) { case MLX5E_TT_IPV4_TCP: @@ -1964,64 +2001,90 @@ static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt) MLX5_SET(rx_hash_field_select, hfso, selected_fields, MLX5_HASH_IP); break; + default: + WARN_ONCE(true, + "mlx5e_build_indir_tir_ctx: bad traffic type!\n"); } } -static int mlx5e_create_tir(struct mlx5e_priv *priv, int tt) +static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, + u32 rqtn) { - struct mlx5_core_dev *mdev = priv->mdev; - u32 *in; + MLX5_SET(tirc, tirc, transport_domain, priv->tdn); + + mlx5e_build_tir_ctx_lro(tirc, priv); + + MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT); + MLX5_SET(tirc, tirc, indirect_table, rqtn); + MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8); +} + +static int mlx5e_create_tirs(struct mlx5e_priv *priv) +{ + int nch = mlx5e_get_max_num_channels(priv->mdev); void *tirc; int inlen; + u32 *tirn; int err; + u32 *in; + int ix; + int tt; inlen = MLX5_ST_SZ_BYTES(create_tir_in); in = mlx5_vzalloc(inlen); if (!in) return -ENOMEM; - tirc = MLX5_ADDR_OF(create_tir_in, in, ctx); + /* indirect tirs */ + for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) { + memset(in, 0, inlen); + tirn = &priv->indir_tirn[tt]; + tirc = MLX5_ADDR_OF(create_tir_in, in, ctx); + mlx5e_build_indir_tir_ctx(priv, tirc, tt); + err = mlx5_core_create_tir(priv->mdev, in, inlen, tirn); + if (err) + goto err_destroy_tirs; + } - mlx5e_build_tir_ctx(priv, tirc, tt); + /* direct tirs */ + for (ix = 0; ix < nch; ix++) { + memset(in, 0, inlen); + tirn = &priv->direct_tir[ix].tirn; + tirc = MLX5_ADDR_OF(create_tir_in, in, ctx); + mlx5e_build_direct_tir_ctx(priv, tirc, + priv->direct_tir[ix].rqtn); + err = mlx5_core_create_tir(priv->mdev, in, inlen, tirn); + if (err) + goto err_destroy_ch_tirs; + } - err = mlx5_core_create_tir(mdev, in, inlen, &priv->tirn[tt]); + kvfree(in); + + return 0; + +err_destroy_ch_tirs: + for (ix--; ix >= 0; ix--) + mlx5_core_destroy_tir(priv->mdev, priv->direct_tir[ix].tirn); + +err_destroy_tirs: + for (tt--; tt >= 0; tt--) + mlx5_core_destroy_tir(priv->mdev, priv->indir_tirn[tt]); kvfree(in); return err; } -static void mlx5e_destroy_tir(struct mlx5e_priv *priv, int tt) -{ - mlx5_core_destroy_tir(priv->mdev, priv->tirn[tt]); -} - -static int mlx5e_create_tirs(struct mlx5e_priv *priv) -{ - int err; - int i; - - for (i = 0; i < MLX5E_NUM_TT; i++) { - err = mlx5e_create_tir(priv, i); - if (err) - goto err_destroy_tirs; - } - - return 0; - -err_destroy_tirs: - for (i--; i >= 0; i--) - mlx5e_destroy_tir(priv, i); - - return err; -} - static void mlx5e_destroy_tirs(struct mlx5e_priv *priv) { + int nch = mlx5e_get_max_num_channels(priv->mdev); int i; - for (i = 0; i < MLX5E_NUM_TT; i++) - mlx5e_destroy_tir(priv, i); + for (i = 0; i < nch; i++) + mlx5_core_destroy_tir(priv->mdev, priv->direct_tir[i].tirn); + + for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++) + mlx5_core_destroy_tir(priv->mdev, priv->indir_tirn[i]); } int mlx5e_modify_rqs_vsd(struct mlx5e_priv *priv, bool vsd) @@ -2894,22 +2957,16 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev) goto err_destroy_tises; } - err = mlx5e_create_rqt(priv, MLX5E_INDIRECTION_RQT); + err = mlx5e_create_rqts(priv); if (err) { - mlx5_core_warn(mdev, "create rqt(INDIR) failed, %d\n", err); + mlx5_core_warn(mdev, "create rqts failed, %d\n", err); goto err_close_drop_rq; } - err = mlx5e_create_rqt(priv, MLX5E_SINGLE_RQ_RQT); - if (err) { - mlx5_core_warn(mdev, "create rqt(SINGLE) failed, %d\n", err); - goto err_destroy_rqt_indir; - } - err = mlx5e_create_tirs(priv); if (err) { mlx5_core_warn(mdev, "create tirs failed, %d\n", err); - goto err_destroy_rqt_single; + goto err_destroy_rqts; } err = mlx5e_create_flow_tables(priv); @@ -2959,11 +3016,8 @@ err_dealloc_q_counters: err_destroy_tirs: mlx5e_destroy_tirs(priv); -err_destroy_rqt_single: - mlx5e_destroy_rqt(priv, MLX5E_SINGLE_RQ_RQT); - -err_destroy_rqt_indir: - mlx5e_destroy_rqt(priv, MLX5E_INDIRECTION_RQT); +err_destroy_rqts: + mlx5e_destroy_rqts(priv); err_close_drop_rq: mlx5e_close_drop_rq(priv); @@ -3017,8 +3071,7 @@ static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv) mlx5e_destroy_q_counter(priv); mlx5e_destroy_flow_tables(priv); mlx5e_destroy_tirs(priv); - mlx5e_destroy_rqt(priv, MLX5E_SINGLE_RQ_RQT); - mlx5e_destroy_rqt(priv, MLX5E_INDIRECTION_RQT); + mlx5e_destroy_rqts(priv); mlx5e_close_drop_rq(priv); mlx5e_destroy_tises(priv); mlx5_core_destroy_mkey(priv->mdev, &priv->umr_mkey); From d745098cedb3f5c6a554796d4a3a505abd4ebaa6 Mon Sep 17 00:00:00 2001 From: Maor Gottlieb Date: Fri, 29 Apr 2016 01:36:33 +0300 Subject: [PATCH 1176/1649] net/mlx5: Introduce modify flow rule destination This API is used for modifying the flow rule destination. This is needed for modifying the pointed flow table by the traffic type classifier rules to point on the aRFS tables. Signed-off-by: Maor Gottlieb Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | 4 ++-- include/linux/mlx5/fs.h | 3 +++ 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 89cce97d46c6..bb2c1cd35fd7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -615,8 +615,8 @@ static int update_root_ft_create(struct mlx5_flow_table *ft, struct fs_prio return err; } -static int mlx5_modify_rule_destination(struct mlx5_flow_rule *rule, - struct mlx5_flow_destination *dest) +int mlx5_modify_rule_destination(struct mlx5_flow_rule *rule, + struct mlx5_flow_destination *dest) { struct mlx5_flow_table *ft; struct mlx5_flow_group *fg; diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h index 8dec5508d93d..28a5b662ab6a 100644 --- a/include/linux/mlx5/fs.h +++ b/include/linux/mlx5/fs.h @@ -113,4 +113,7 @@ mlx5_add_flow_rule(struct mlx5_flow_table *ft, struct mlx5_flow_destination *dest); void mlx5_del_flow_rule(struct mlx5_flow_rule *fr); +int mlx5_modify_rule_destination(struct mlx5_flow_rule *rule, + struct mlx5_flow_destination *dest); + #endif From a257b94a18f7eb60bbe9b5fd415d208ac71d49ea Mon Sep 17 00:00:00 2001 From: Maor Gottlieb Date: Fri, 29 Apr 2016 01:36:34 +0300 Subject: [PATCH 1177/1649] net/mlx5: Set number of allowed levels in priority Refactors the flow steering namespace creation, by changing the name num_fts to num_levels. When new flow table is created, the driver assign new level to this flow table therefore the meaning is equivalent. Since downstream patches will introduce the ability to create more than one flow table per level, the name num_fts is no longer accurate. Signed-off-by: Maor Gottlieb Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlx5/core/fs_core.c | 61 ++++++++++--------- .../net/ethernet/mellanox/mlx5/core/fs_core.h | 2 +- 2 files changed, 33 insertions(+), 30 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index bb2c1cd35fd7..cfb35c334cd1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -40,18 +40,18 @@ #define INIT_TREE_NODE_ARRAY_SIZE(...) (sizeof((struct init_tree_node[]){__VA_ARGS__}) /\ sizeof(struct init_tree_node)) -#define ADD_PRIO(num_prios_val, min_level_val, max_ft_val, caps_val,\ +#define ADD_PRIO(num_prios_val, min_level_val, num_levels_val, caps_val,\ ...) {.type = FS_TYPE_PRIO,\ .min_ft_level = min_level_val,\ - .max_ft = max_ft_val,\ + .num_levels = num_levels_val,\ .num_leaf_prios = num_prios_val,\ .caps = caps_val,\ .children = (struct init_tree_node[]) {__VA_ARGS__},\ .ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \ } -#define ADD_MULTIPLE_PRIO(num_prios_val, max_ft_val, ...)\ - ADD_PRIO(num_prios_val, 0, max_ft_val, {},\ +#define ADD_MULTIPLE_PRIO(num_prios_val, num_levels_val, ...)\ + ADD_PRIO(num_prios_val, 0, num_levels_val, {},\ __VA_ARGS__)\ #define ADD_NS(...) {.type = FS_TYPE_NAMESPACE,\ @@ -67,17 +67,18 @@ #define FS_REQUIRED_CAPS(...) {.arr_sz = INIT_CAPS_ARRAY_SIZE(__VA_ARGS__), \ .caps = (long[]) {__VA_ARGS__} } -#define LEFTOVERS_MAX_FT 1 +#define LEFTOVERS_NUM_LEVELS 1 #define LEFTOVERS_NUM_PRIOS 1 -#define BY_PASS_PRIO_MAX_FT 1 -#define BY_PASS_MIN_LEVEL (KENREL_MIN_LEVEL + MLX5_BY_PASS_NUM_PRIOS +\ - LEFTOVERS_MAX_FT) -#define KERNEL_MAX_FT 3 +#define BY_PASS_PRIO_NUM_LEVELS 1 +#define BY_PASS_MIN_LEVEL (KERNEL_MIN_LEVEL + MLX5_BY_PASS_NUM_PRIOS +\ + LEFTOVERS_NUM_PRIOS) + +#define KERNEL_NUM_LEVELS 3 #define KERNEL_NUM_PRIOS 2 -#define KENREL_MIN_LEVEL 2 +#define KERNEL_MIN_LEVEL 2 -#define ANCHOR_MAX_FT 1 +#define ANCHOR_NUM_LEVELS 1 #define ANCHOR_NUM_PRIOS 1 #define ANCHOR_MIN_LEVEL (BY_PASS_MIN_LEVEL + 1) struct node_caps { @@ -92,7 +93,7 @@ static struct init_tree_node { int min_ft_level; int num_leaf_prios; int prio; - int max_ft; + int num_levels; } root_fs = { .type = FS_TYPE_NAMESPACE, .ar_size = 4, @@ -102,17 +103,19 @@ static struct init_tree_node { FS_CAP(flow_table_properties_nic_receive.modify_root), FS_CAP(flow_table_properties_nic_receive.identified_miss_table_mode), FS_CAP(flow_table_properties_nic_receive.flow_table_modify)), - ADD_NS(ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS, BY_PASS_PRIO_MAX_FT))), - ADD_PRIO(0, KENREL_MIN_LEVEL, 0, {}, - ADD_NS(ADD_MULTIPLE_PRIO(KERNEL_NUM_PRIOS, KERNEL_MAX_FT))), + ADD_NS(ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS, + BY_PASS_PRIO_NUM_LEVELS))), + ADD_PRIO(0, KERNEL_MIN_LEVEL, 0, {}, + ADD_NS(ADD_MULTIPLE_PRIO(KERNEL_NUM_PRIOS, + KERNEL_NUM_LEVELS))), ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0, FS_REQUIRED_CAPS(FS_CAP(flow_table_properties_nic_receive.flow_modify_en), FS_CAP(flow_table_properties_nic_receive.modify_root), FS_CAP(flow_table_properties_nic_receive.identified_miss_table_mode), FS_CAP(flow_table_properties_nic_receive.flow_table_modify)), - ADD_NS(ADD_MULTIPLE_PRIO(LEFTOVERS_NUM_PRIOS, LEFTOVERS_MAX_FT))), + ADD_NS(ADD_MULTIPLE_PRIO(LEFTOVERS_NUM_PRIOS, LEFTOVERS_NUM_LEVELS))), ADD_PRIO(0, ANCHOR_MIN_LEVEL, 0, {}, - ADD_NS(ADD_MULTIPLE_PRIO(ANCHOR_NUM_PRIOS, ANCHOR_MAX_FT))), + ADD_NS(ADD_MULTIPLE_PRIO(ANCHOR_NUM_PRIOS, ANCHOR_NUM_LEVELS))), } }; @@ -716,7 +719,7 @@ struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns, err = -EINVAL; goto unlock_root; } - if (fs_prio->num_ft == fs_prio->max_ft) { + if (fs_prio->num_ft == fs_prio->num_levels) { err = -ENOSPC; goto unlock_root; } @@ -1311,7 +1314,7 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev, EXPORT_SYMBOL(mlx5_get_flow_namespace); static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns, - unsigned prio, int max_ft) + unsigned int prio, int num_levels) { struct fs_prio *fs_prio; @@ -1322,7 +1325,7 @@ static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns, fs_prio->node.type = FS_TYPE_PRIO; tree_init_node(&fs_prio->node, 1, NULL); tree_add_node(&fs_prio->node, &ns->node); - fs_prio->max_ft = max_ft; + fs_prio->num_levels = num_levels; fs_prio->prio = prio; list_add_tail(&fs_prio->node.list, &ns->node.children); @@ -1360,7 +1363,7 @@ static int create_leaf_prios(struct mlx5_flow_namespace *ns, struct init_tree_no int i; for (i = 0; i < prio_metadata->num_leaf_prios; i++) { - fs_prio = fs_create_prio(ns, i, prio_metadata->max_ft); + fs_prio = fs_create_prio(ns, i, prio_metadata->num_levels); if (IS_ERR(fs_prio)) return PTR_ERR(fs_prio); } @@ -1406,7 +1409,7 @@ static int init_root_tree_recursive(struct mlx5_core_dev *dev, fs_get_obj(fs_ns, fs_parent_node); if (init_node->num_leaf_prios) return create_leaf_prios(fs_ns, init_node); - fs_prio = fs_create_prio(fs_ns, index, init_node->max_ft); + fs_prio = fs_create_prio(fs_ns, index, init_node->num_levels); if (IS_ERR(fs_prio)) return PTR_ERR(fs_prio); base = &fs_prio->node; @@ -1479,9 +1482,9 @@ static int set_prio_attrs_in_ns(struct mlx5_flow_namespace *ns, int acc_level) struct fs_prio *prio; fs_for_each_prio(prio, ns) { - /* This updates prio start_level and max_ft */ + /* This updates prio start_level and num_levels */ set_prio_attrs_in_prio(prio, acc_level); - acc_level += prio->max_ft; + acc_level += prio->num_levels; } return acc_level; } @@ -1493,11 +1496,11 @@ static void set_prio_attrs_in_prio(struct fs_prio *prio, int acc_level) prio->start_level = acc_level; fs_for_each_ns(ns, prio) - /* This updates start_level and max_ft of ns's priority descendants */ + /* This updates start_level and num_levels of ns's priority descendants */ acc_level_ns = set_prio_attrs_in_ns(ns, acc_level); - if (!prio->max_ft) - prio->max_ft = acc_level_ns - prio->start_level; - WARN_ON(prio->max_ft < acc_level_ns - prio->start_level); + if (!prio->num_levels) + prio->num_levels = acc_level_ns - prio->start_level; + WARN_ON(prio->num_levels < acc_level_ns - prio->start_level); } static void set_prio_attrs(struct mlx5_flow_root_namespace *root_ns) @@ -1508,7 +1511,7 @@ static void set_prio_attrs(struct mlx5_flow_root_namespace *root_ns) fs_for_each_prio(prio, ns) { set_prio_attrs_in_prio(prio, start_level); - start_level += prio->max_ft; + start_level += prio->num_levels; } } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h index f37a6248a27b..d607e564f454 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h @@ -107,7 +107,7 @@ struct fs_fte { /* Type of children is mlx5_flow_table/namespace */ struct fs_prio { struct fs_node node; - unsigned int max_ft; + unsigned int num_levels; unsigned int start_level; unsigned int prio; unsigned int num_ft; From d63cd28608bb563d52e62990fa01c016e8dbdb75 Mon Sep 17 00:00:00 2001 From: Maor Gottlieb Date: Fri, 29 Apr 2016 01:36:35 +0300 Subject: [PATCH 1178/1649] net/mlx5: Add user chosen levels when allocating flow tables Currently, consumers of the flow steering infrastructure can't choose their own flow table levels and are limited to one flow table per level. This just waste levels. Instead, we introduce here the possibility to use multiple flow tables in a level. The user is free to connect these flow tables, while following the rule (FTEs in FT of level x could only point to FTs of level y where y > x). In addition this patch switch the order of the create/destroy flow tables of the NIC(vlan and main). Signed-off-by: Maor Gottlieb Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/infiniband/hw/mlx5/main.c | 3 +- .../net/ethernet/mellanox/mlx5/core/en_fs.c | 30 +++++---- .../net/ethernet/mellanox/mlx5/core/en_tc.c | 3 +- .../net/ethernet/mellanox/mlx5/core/eswitch.c | 2 +- .../net/ethernet/mellanox/mlx5/core/fs_core.c | 66 ++++++++++++------- include/linux/mlx5/fs.h | 6 +- 6 files changed, 70 insertions(+), 40 deletions(-) diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 99eb1c1a3b7b..3ff663c35bac 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -1438,7 +1438,8 @@ static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev, if (!ft) { ft = mlx5_create_auto_grouped_flow_table(ns, priority, num_entries, - num_groups); + num_groups, + 0); if (!IS_ERR(ft)) { prio->refcount = 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c index 4df49e660587..d61171ae0168 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c @@ -37,6 +37,11 @@ #include #include "en.h" +enum { + MLX5E_VLAN_FT_LEVEL = 0, + MLX5E_MAIN_FT_LEVEL +}; + #define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v) enum { @@ -1041,7 +1046,8 @@ static int mlx5e_create_main_flow_table(struct mlx5e_priv *priv) int err; ft->num_groups = 0; - ft->t = mlx5_create_flow_table(priv->fts.ns, 1, MLX5E_MAIN_TABLE_SIZE); + ft->t = mlx5_create_flow_table(priv->fts.ns, 1, MLX5E_MAIN_TABLE_SIZE, + MLX5E_MAIN_FT_LEVEL); if (IS_ERR(ft->t)) { err = PTR_ERR(ft->t); @@ -1150,7 +1156,8 @@ static int mlx5e_create_vlan_flow_table(struct mlx5e_priv *priv) int err; ft->num_groups = 0; - ft->t = mlx5_create_flow_table(priv->fts.ns, 1, MLX5E_VLAN_TABLE_SIZE); + ft->t = mlx5_create_flow_table(priv->fts.ns, 1, MLX5E_VLAN_TABLE_SIZE, + MLX5E_VLAN_FT_LEVEL); if (IS_ERR(ft->t)) { err = PTR_ERR(ft->t); @@ -1167,11 +1174,16 @@ static int mlx5e_create_vlan_flow_table(struct mlx5e_priv *priv) if (err) goto err_free_g; + err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0); + if (err) + goto err_destroy_vlan_flow_groups; + return 0; +err_destroy_vlan_flow_groups: + mlx5e_destroy_groups(ft); err_free_g: kfree(ft->g); - err_destroy_vlan_flow_table: mlx5_destroy_flow_table(ft->t); ft->t = NULL; @@ -1194,15 +1206,11 @@ int mlx5e_create_flow_tables(struct mlx5e_priv *priv) if (!priv->fts.ns) return -EINVAL; - err = mlx5e_create_vlan_flow_table(priv); + err = mlx5e_create_main_flow_table(priv); if (err) return err; - err = mlx5e_create_main_flow_table(priv); - if (err) - goto err_destroy_vlan_flow_table; - - err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0); + err = mlx5e_create_vlan_flow_table(priv); if (err) goto err_destroy_main_flow_table; @@ -1210,8 +1218,6 @@ int mlx5e_create_flow_tables(struct mlx5e_priv *priv) err_destroy_main_flow_table: mlx5e_destroy_main_flow_table(priv); -err_destroy_vlan_flow_table: - mlx5e_destroy_vlan_flow_table(priv); return err; } @@ -1219,6 +1225,6 @@ err_destroy_vlan_flow_table: void mlx5e_destroy_flow_tables(struct mlx5e_priv *priv) { mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0); - mlx5e_destroy_main_flow_table(priv); mlx5e_destroy_vlan_flow_table(priv); + mlx5e_destroy_main_flow_table(priv); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index b3de09f13425..2137387c043d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -64,7 +64,8 @@ static struct mlx5_flow_rule *mlx5e_tc_add_flow(struct mlx5e_priv *priv, priv->fts.tc.t = mlx5_create_auto_grouped_flow_table(priv->fts.ns, 0, MLX5E_TC_FLOW_TABLE_NUM_ENTRIES, - MLX5E_TC_FLOW_TABLE_NUM_GROUPS); + MLX5E_TC_FLOW_TABLE_NUM_GROUPS, + 0); if (IS_ERR(priv->fts.tc.t)) { netdev_err(priv->netdev, "Failed to create tc offload table\n"); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index bc3d9f8a75c1..ff91bb5e1c43 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -401,7 +401,7 @@ static int esw_create_fdb_table(struct mlx5_eswitch *esw, int nvports) memset(flow_group_in, 0, inlen); table_size = BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size)); - fdb = mlx5_create_flow_table(root_ns, 0, table_size); + fdb = mlx5_create_flow_table(root_ns, 0, table_size, 0); if (IS_ERR_OR_NULL(fdb)) { err = PTR_ERR(fdb); esw_warn(dev, "Failed to create FDB Table err %d\n", err); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index cfb35c334cd1..ca55d7e30e5c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -225,19 +225,6 @@ static struct fs_prio *find_prio(struct mlx5_flow_namespace *ns, return NULL; } -static unsigned int find_next_free_level(struct fs_prio *prio) -{ - if (!list_empty(&prio->node.children)) { - struct mlx5_flow_table *ft; - - ft = list_last_entry(&prio->node.children, - struct mlx5_flow_table, - node.list); - return ft->level + 1; - } - return prio->start_level; -} - static bool masked_memcmp(void *mask, void *val1, void *val2, size_t size) { unsigned int i; @@ -696,9 +683,23 @@ static int connect_flow_table(struct mlx5_core_dev *dev, struct mlx5_flow_table return err; } +static void list_add_flow_table(struct mlx5_flow_table *ft, + struct fs_prio *prio) +{ + struct list_head *prev = &prio->node.children; + struct mlx5_flow_table *iter; + + fs_for_each_ft(iter, prio) { + if (iter->level > ft->level) + break; + prev = &iter->node.list; + } + list_add(&ft->node.list, prev); +} + struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns, - int prio, - int max_fte) + int prio, int max_fte, + u32 level) { struct mlx5_flow_table *next_ft = NULL; struct mlx5_flow_table *ft; @@ -719,12 +720,15 @@ struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns, err = -EINVAL; goto unlock_root; } - if (fs_prio->num_ft == fs_prio->num_levels) { + if (level >= fs_prio->num_levels) { err = -ENOSPC; goto unlock_root; } - - ft = alloc_flow_table(find_next_free_level(fs_prio), + /* The level is related to the + * priority level range. + */ + level += fs_prio->start_level; + ft = alloc_flow_table(level, roundup_pow_of_two(max_fte), root->table_type); if (!ft) { @@ -745,7 +749,7 @@ struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns, goto destroy_ft; lock_ref_node(&fs_prio->node); tree_add_node(&ft->node, &fs_prio->node); - list_add_tail(&ft->node.list, &fs_prio->node.children); + list_add_flow_table(ft, fs_prio); fs_prio->num_ft++; unlock_ref_node(&fs_prio->node); mutex_unlock(&root->chain_lock); @@ -762,14 +766,15 @@ unlock_root: struct mlx5_flow_table *mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns, int prio, int num_flow_table_entries, - int max_num_groups) + int max_num_groups, + u32 level) { struct mlx5_flow_table *ft; if (max_num_groups > num_flow_table_entries) return ERR_PTR(-EINVAL); - ft = mlx5_create_flow_table(ns, prio, num_flow_table_entries); + ft = mlx5_create_flow_table(ns, prio, num_flow_table_entries, level); if (IS_ERR(ft)) return ft; @@ -1068,6 +1073,20 @@ unlock_fg: return rule; } +static bool dest_is_valid(struct mlx5_flow_destination *dest, + u32 action, + struct mlx5_flow_table *ft) +{ + if (!(action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST)) + return true; + + if (!dest || ((dest->type == + MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) && + (dest->ft->level <= ft->level))) + return false; + return true; +} + static struct mlx5_flow_rule * _mlx5_add_flow_rule(struct mlx5_flow_table *ft, u8 match_criteria_enable, @@ -1080,7 +1099,7 @@ _mlx5_add_flow_rule(struct mlx5_flow_table *ft, struct mlx5_flow_group *g; struct mlx5_flow_rule *rule; - if ((action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) && !dest) + if (!dest_is_valid(dest, action, ft)) return ERR_PTR(-EINVAL); nested_lock_ref_node(&ft->node, FS_MUTEX_GRANDPARENT); @@ -1517,6 +1536,7 @@ static void set_prio_attrs(struct mlx5_flow_root_namespace *root_ns) #define ANCHOR_PRIO 0 #define ANCHOR_SIZE 1 +#define ANCHOR_LEVEL 0 static int create_anchor_flow_table(struct mlx5_core_dev *dev) { @@ -1526,7 +1546,7 @@ static int create_anchor_flow_table(struct mlx5_core_dev ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ANCHOR); if (!ns) return -EINVAL; - ft = mlx5_create_flow_table(ns, ANCHOR_PRIO, ANCHOR_SIZE); + ft = mlx5_create_flow_table(ns, ANCHOR_PRIO, ANCHOR_SIZE, ANCHOR_LEVEL); if (IS_ERR(ft)) { mlx5_core_err(dev, "Failed to create last anchor flow table"); return PTR_ERR(ft); diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h index 28a5b662ab6a..165ff4f9cc6a 100644 --- a/include/linux/mlx5/fs.h +++ b/include/linux/mlx5/fs.h @@ -82,12 +82,14 @@ struct mlx5_flow_table * mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns, int prio, int num_flow_table_entries, - int max_num_groups); + int max_num_groups, + u32 level); struct mlx5_flow_table * mlx5_create_flow_table(struct mlx5_flow_namespace *ns, int prio, - int num_flow_table_entries); + int num_flow_table_entries, + u32 level); int mlx5_destroy_flow_table(struct mlx5_flow_table *ft); /* inbox should be set with the following values: From 13de6c106cdd68e43b6c282c17c110e8f7905872 Mon Sep 17 00:00:00 2001 From: Maor Gottlieb Date: Fri, 29 Apr 2016 01:36:36 +0300 Subject: [PATCH 1179/1649] net/mlx5: Support different attributes for priorities in namespace Currently, namespace could be initialized only with priorities with the same attributes. Add support to initialize namespace with priorities with different attributes(e.g. different number of levels). Signed-off-by: Maor Gottlieb Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlx5/core/fs_core.c | 31 ++++++++++++------- 1 file changed, 19 insertions(+), 12 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index ca55d7e30e5c..2b822933557d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -74,9 +74,10 @@ #define BY_PASS_MIN_LEVEL (KERNEL_MIN_LEVEL + MLX5_BY_PASS_NUM_PRIOS +\ LEFTOVERS_NUM_PRIOS) -#define KERNEL_NUM_LEVELS 3 -#define KERNEL_NUM_PRIOS 2 -#define KERNEL_MIN_LEVEL 2 +#define KERNEL_NIC_PRIO_NUM_LEVELS 2 +#define KERNEL_NIC_NUM_PRIOS 1 +/* One more level for tc */ +#define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 1) #define ANCHOR_NUM_LEVELS 1 #define ANCHOR_NUM_PRIOS 1 @@ -106,8 +107,9 @@ static struct init_tree_node { ADD_NS(ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS, BY_PASS_PRIO_NUM_LEVELS))), ADD_PRIO(0, KERNEL_MIN_LEVEL, 0, {}, - ADD_NS(ADD_MULTIPLE_PRIO(KERNEL_NUM_PRIOS, - KERNEL_NUM_LEVELS))), + ADD_NS(ADD_MULTIPLE_PRIO(1, 1), + ADD_MULTIPLE_PRIO(KERNEL_NIC_NUM_PRIOS, + KERNEL_NIC_PRIO_NUM_LEVELS))), ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0, FS_REQUIRED_CAPS(FS_CAP(flow_table_properties_nic_receive.flow_modify_en), FS_CAP(flow_table_properties_nic_receive.modify_root), @@ -1375,14 +1377,14 @@ static struct mlx5_flow_namespace *fs_create_namespace(struct fs_prio *prio) return ns; } -static int create_leaf_prios(struct mlx5_flow_namespace *ns, struct init_tree_node - *prio_metadata) +static int create_leaf_prios(struct mlx5_flow_namespace *ns, int prio, + struct init_tree_node *prio_metadata) { struct fs_prio *fs_prio; int i; for (i = 0; i < prio_metadata->num_leaf_prios; i++) { - fs_prio = fs_create_prio(ns, i, prio_metadata->num_levels); + fs_prio = fs_create_prio(ns, prio++, prio_metadata->num_levels); if (IS_ERR(fs_prio)) return PTR_ERR(fs_prio); } @@ -1409,7 +1411,7 @@ static int init_root_tree_recursive(struct mlx5_core_dev *dev, struct init_tree_node *init_node, struct fs_node *fs_parent_node, struct init_tree_node *init_parent_node, - int index) + int prio) { int max_ft_level = MLX5_CAP_FLOWTABLE(dev, flow_table_properties_nic_receive. @@ -1427,8 +1429,8 @@ static int init_root_tree_recursive(struct mlx5_core_dev *dev, fs_get_obj(fs_ns, fs_parent_node); if (init_node->num_leaf_prios) - return create_leaf_prios(fs_ns, init_node); - fs_prio = fs_create_prio(fs_ns, index, init_node->num_levels); + return create_leaf_prios(fs_ns, prio, init_node); + fs_prio = fs_create_prio(fs_ns, prio, init_node->num_levels); if (IS_ERR(fs_prio)) return PTR_ERR(fs_prio); base = &fs_prio->node; @@ -1441,11 +1443,16 @@ static int init_root_tree_recursive(struct mlx5_core_dev *dev, } else { return -EINVAL; } + prio = 0; for (i = 0; i < init_node->ar_size; i++) { err = init_root_tree_recursive(dev, &init_node->children[i], - base, init_node, i); + base, init_node, prio); if (err) return err; + if (init_node->children[i].type == FS_TYPE_PRIO && + init_node->children[i].num_leaf_prios) { + prio += init_node->children[i].num_leaf_prios; + } } return 0; From acff797cd187402d73cf9f290531a41250613294 Mon Sep 17 00:00:00 2001 From: Maor Gottlieb Date: Fri, 29 Apr 2016 01:36:37 +0300 Subject: [PATCH 1180/1649] net/mlx5e: Refactor mlx5e flow steering structs Slightly refactor and re-order the flow steering structs, tables and data-bases for better self-containment and flexibility to add more future steering phases (tables/rules/data bases) e.g: aRFS. Changes: 1. Move the vlan DB and address DB into their table structs. 2. Rename steering table structs to unique format: mlx5e_*_table, e.g: mlx5e_vlan_table. Signed-off-by: Maor Gottlieb Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 73 +++---- .../net/ethernet/mellanox/mlx5/core/en_fs.c | 186 +++++++++--------- .../net/ethernet/mellanox/mlx5/core/en_main.c | 8 +- .../net/ethernet/mellanox/mlx5/core/en_tc.c | 45 ++--- .../net/ethernet/mellanox/mlx5/core/en_tc.h | 2 +- 5 files changed, 160 insertions(+), 154 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 5c8e98c277dc..02b964402156 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -388,6 +388,17 @@ enum mlx5e_traffic_types { MLX5E_NUM_INDIR_TIRS = MLX5E_TT_ANY, }; +enum { + MLX5E_STATE_ASYNC_EVENTS_ENABLE, + MLX5E_STATE_OPENED, + MLX5E_STATE_DESTROYING, +}; + +struct mlx5e_vxlan_db { + spinlock_t lock; /* protect vxlan table */ + struct radix_tree_root tree; +}; + struct mlx5e_eth_addr_info { u8 addr[ETH_ALEN + 2]; u32 tt_vec; @@ -396,7 +407,14 @@ struct mlx5e_eth_addr_info { #define MLX5E_ETH_ADDR_HASH_SIZE (1 << BITS_PER_BYTE) -struct mlx5e_eth_addr_db { +struct mlx5e_flow_table { + int num_groups; + struct mlx5_flow_table *t; + struct mlx5_flow_group **g; +}; + +struct mlx5e_main_table { + struct mlx5e_flow_table ft; struct hlist_head netdev_uc[MLX5E_ETH_ADDR_HASH_SIZE]; struct hlist_head netdev_mc[MLX5E_ETH_ADDR_HASH_SIZE]; struct mlx5e_eth_addr_info broadcast; @@ -407,13 +425,15 @@ struct mlx5e_eth_addr_db { bool promisc_enabled; }; -enum { - MLX5E_STATE_ASYNC_EVENTS_ENABLE, - MLX5E_STATE_OPENED, - MLX5E_STATE_DESTROYING, +struct mlx5e_tc_table { + struct mlx5_flow_table *t; + + struct rhashtable_params ht_params; + struct rhashtable ht; }; -struct mlx5e_vlan_db { +struct mlx5e_vlan_table { + struct mlx5e_flow_table ft; unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; struct mlx5_flow_rule *active_vlans_rule[VLAN_N_VID]; struct mlx5_flow_rule *untagged_rule; @@ -421,29 +441,11 @@ struct mlx5e_vlan_db { bool filter_disabled; }; -struct mlx5e_vxlan_db { - spinlock_t lock; /* protect vxlan table */ - struct radix_tree_root tree; -}; - -struct mlx5e_flow_table { - int num_groups; - struct mlx5_flow_table *t; - struct mlx5_flow_group **g; -}; - -struct mlx5e_tc_flow_table { - struct mlx5_flow_table *t; - - struct rhashtable_params ht_params; - struct rhashtable ht; -}; - -struct mlx5e_flow_tables { - struct mlx5_flow_namespace *ns; - struct mlx5e_tc_flow_table tc; - struct mlx5e_flow_table vlan; - struct mlx5e_flow_table main; +struct mlx5e_flow_steering { + struct mlx5_flow_namespace *ns; + struct mlx5e_tc_table tc; + struct mlx5e_vlan_table vlan; + struct mlx5e_main_table main; }; struct mlx5e_direct_tir { @@ -451,6 +453,11 @@ struct mlx5e_direct_tir { u32 rqtn; }; +enum { + MLX5E_TC_PRIO = 0, + MLX5E_NIC_PRIO +}; + struct mlx5e_priv { /* priv data path fields - start */ struct mlx5e_sq **txq_to_sq_map; @@ -472,9 +479,7 @@ struct mlx5e_priv { u32 indir_tirn[MLX5E_NUM_INDIR_TIRS]; struct mlx5e_direct_tir direct_tir[MLX5E_MAX_NUM_CHANNELS]; - struct mlx5e_flow_tables fts; - struct mlx5e_eth_addr_db eth_addr; - struct mlx5e_vlan_db vlan; + struct mlx5e_flow_steering fs; struct mlx5e_vxlan_db vxlan; struct mlx5e_params params; @@ -556,8 +561,8 @@ struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq); void mlx5e_update_stats(struct mlx5e_priv *priv); -int mlx5e_create_flow_tables(struct mlx5e_priv *priv); -void mlx5e_destroy_flow_tables(struct mlx5e_priv *priv); +int mlx5e_create_flow_steering(struct mlx5e_priv *priv); +void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv); void mlx5e_init_eth_addr(struct mlx5e_priv *priv); void mlx5e_set_rx_mode_work(struct work_struct *work); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c index d61171ae0168..3ee35b094c82 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c @@ -247,7 +247,7 @@ static int __mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv, struct mlx5_flow_destination dest; u8 match_criteria_enable = 0; struct mlx5_flow_rule **rule_p; - struct mlx5_flow_table *ft = priv->fts.main.t; + struct mlx5_flow_table *ft = priv->fs.main.ft.t; u8 *mc_dmac = MLX5_ADDR_OF(fte_match_param, mc, outer_headers.dmac_47_16); u8 *mv_dmac = MLX5_ADDR_OF(fte_match_param, mv, @@ -477,7 +477,7 @@ static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv) int i; list_size = 0; - for_each_set_bit(vlan, priv->vlan.active_vlans, VLAN_N_VID) + for_each_set_bit(vlan, priv->fs.vlan.active_vlans, VLAN_N_VID) list_size++; max_list_size = 1 << MLX5_CAP_GEN(priv->mdev, log_max_vlan_list); @@ -494,7 +494,7 @@ static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv) return -ENOMEM; i = 0; - for_each_set_bit(vlan, priv->vlan.active_vlans, VLAN_N_VID) { + for_each_set_bit(vlan, priv->fs.vlan.active_vlans, VLAN_N_VID) { if (i >= list_size) break; vlans[i++] = vlan; @@ -519,28 +519,28 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv, enum mlx5e_vlan_rule_type rule_type, u16 vid, u32 *mc, u32 *mv) { - struct mlx5_flow_table *ft = priv->fts.vlan.t; + struct mlx5_flow_table *ft = priv->fs.vlan.ft.t; struct mlx5_flow_destination dest; u8 match_criteria_enable = 0; struct mlx5_flow_rule **rule_p; int err = 0; dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; - dest.ft = priv->fts.main.t; + dest.ft = priv->fs.main.ft.t; match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.vlan_tag); switch (rule_type) { case MLX5E_VLAN_RULE_TYPE_UNTAGGED: - rule_p = &priv->vlan.untagged_rule; + rule_p = &priv->fs.vlan.untagged_rule; break; case MLX5E_VLAN_RULE_TYPE_ANY_VID: - rule_p = &priv->vlan.any_vlan_rule; + rule_p = &priv->fs.vlan.any_vlan_rule; MLX5_SET(fte_match_param, mv, outer_headers.vlan_tag, 1); break; default: /* MLX5E_VLAN_RULE_TYPE_MATCH_VID */ - rule_p = &priv->vlan.active_vlans_rule[vid]; + rule_p = &priv->fs.vlan.active_vlans_rule[vid]; MLX5_SET(fte_match_param, mv, outer_headers.vlan_tag, 1); MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid); MLX5_SET(fte_match_param, mv, outer_headers.first_vid, vid); @@ -594,22 +594,22 @@ static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv, { switch (rule_type) { case MLX5E_VLAN_RULE_TYPE_UNTAGGED: - if (priv->vlan.untagged_rule) { - mlx5_del_flow_rule(priv->vlan.untagged_rule); - priv->vlan.untagged_rule = NULL; + if (priv->fs.vlan.untagged_rule) { + mlx5_del_flow_rule(priv->fs.vlan.untagged_rule); + priv->fs.vlan.untagged_rule = NULL; } break; case MLX5E_VLAN_RULE_TYPE_ANY_VID: - if (priv->vlan.any_vlan_rule) { - mlx5_del_flow_rule(priv->vlan.any_vlan_rule); - priv->vlan.any_vlan_rule = NULL; + if (priv->fs.vlan.any_vlan_rule) { + mlx5_del_flow_rule(priv->fs.vlan.any_vlan_rule); + priv->fs.vlan.any_vlan_rule = NULL; } break; case MLX5E_VLAN_RULE_TYPE_MATCH_VID: mlx5e_vport_context_update_vlans(priv); - if (priv->vlan.active_vlans_rule[vid]) { - mlx5_del_flow_rule(priv->vlan.active_vlans_rule[vid]); - priv->vlan.active_vlans_rule[vid] = NULL; + if (priv->fs.vlan.active_vlans_rule[vid]) { + mlx5_del_flow_rule(priv->fs.vlan.active_vlans_rule[vid]); + priv->fs.vlan.active_vlans_rule[vid] = NULL; } mlx5e_vport_context_update_vlans(priv); break; @@ -618,10 +618,10 @@ static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv, void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv) { - if (!priv->vlan.filter_disabled) + if (!priv->fs.vlan.filter_disabled) return; - priv->vlan.filter_disabled = false; + priv->fs.vlan.filter_disabled = false; if (priv->netdev->flags & IFF_PROMISC) return; mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0); @@ -629,10 +629,10 @@ void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv) void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv) { - if (priv->vlan.filter_disabled) + if (priv->fs.vlan.filter_disabled) return; - priv->vlan.filter_disabled = true; + priv->fs.vlan.filter_disabled = true; if (priv->netdev->flags & IFF_PROMISC) return; mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0); @@ -643,7 +643,7 @@ int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto, { struct mlx5e_priv *priv = netdev_priv(dev); - set_bit(vid, priv->vlan.active_vlans); + set_bit(vid, priv->fs.vlan.active_vlans); return mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid); } @@ -653,7 +653,7 @@ int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto, { struct mlx5e_priv *priv = netdev_priv(dev); - clear_bit(vid, priv->vlan.active_vlans); + clear_bit(vid, priv->fs.vlan.active_vlans); mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid); @@ -687,14 +687,14 @@ static void mlx5e_sync_netdev_addr(struct mlx5e_priv *priv) netif_addr_lock_bh(netdev); - mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_uc, + mlx5e_add_eth_addr_to_hash(priv->fs.main.netdev_uc, priv->netdev->dev_addr); netdev_for_each_uc_addr(ha, netdev) - mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_uc, ha->addr); + mlx5e_add_eth_addr_to_hash(priv->fs.main.netdev_uc, ha->addr); netdev_for_each_mc_addr(ha, netdev) - mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_mc, ha->addr); + mlx5e_add_eth_addr_to_hash(priv->fs.main.netdev_mc, ha->addr); netif_addr_unlock_bh(netdev); } @@ -710,11 +710,11 @@ static void mlx5e_fill_addr_array(struct mlx5e_priv *priv, int list_type, int i = 0; int hi; - addr_list = is_uc ? priv->eth_addr.netdev_uc : priv->eth_addr.netdev_mc; + addr_list = is_uc ? priv->fs.main.netdev_uc : priv->fs.main.netdev_mc; if (is_uc) /* Make sure our own address is pushed first */ ether_addr_copy(addr_array[i++], ndev->dev_addr); - else if (priv->eth_addr.broadcast_enabled) + else if (priv->fs.main.broadcast_enabled) ether_addr_copy(addr_array[i++], ndev->broadcast); mlx5e_for_each_hash_node(hn, tmp, addr_list, hi) { @@ -739,12 +739,12 @@ static void mlx5e_vport_context_update_addr_list(struct mlx5e_priv *priv, int err; int hi; - size = is_uc ? 0 : (priv->eth_addr.broadcast_enabled ? 1 : 0); + size = is_uc ? 0 : (priv->fs.main.broadcast_enabled ? 1 : 0); max_size = is_uc ? 1 << MLX5_CAP_GEN(priv->mdev, log_max_current_uc_list) : 1 << MLX5_CAP_GEN(priv->mdev, log_max_current_mc_list); - addr_list = is_uc ? priv->eth_addr.netdev_uc : priv->eth_addr.netdev_mc; + addr_list = is_uc ? priv->fs.main.netdev_uc : priv->fs.main.netdev_mc; mlx5e_for_each_hash_node(hn, tmp, addr_list, hi) size++; @@ -775,13 +775,13 @@ out: static void mlx5e_vport_context_update(struct mlx5e_priv *priv) { - struct mlx5e_eth_addr_db *ea = &priv->eth_addr; + struct mlx5e_main_table *main_table = &priv->fs.main; mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_UC); mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_MC); mlx5_modify_nic_vport_promisc(priv->mdev, 0, - ea->allmulti_enabled, - ea->promisc_enabled); + main_table->allmulti_enabled, + main_table->promisc_enabled); } static void mlx5e_apply_netdev_addr(struct mlx5e_priv *priv) @@ -790,10 +790,10 @@ static void mlx5e_apply_netdev_addr(struct mlx5e_priv *priv) struct hlist_node *tmp; int i; - mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_uc, i) + mlx5e_for_each_hash_node(hn, tmp, priv->fs.main.netdev_uc, i) mlx5e_execute_action(priv, hn); - mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_mc, i) + mlx5e_for_each_hash_node(hn, tmp, priv->fs.main.netdev_mc, i) mlx5e_execute_action(priv, hn); } @@ -803,9 +803,9 @@ static void mlx5e_handle_netdev_addr(struct mlx5e_priv *priv) struct hlist_node *tmp; int i; - mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_uc, i) + mlx5e_for_each_hash_node(hn, tmp, priv->fs.main.netdev_uc, i) hn->action = MLX5E_ACTION_DEL; - mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_mc, i) + mlx5e_for_each_hash_node(hn, tmp, priv->fs.main.netdev_mc, i) hn->action = MLX5E_ACTION_DEL; if (!test_bit(MLX5E_STATE_DESTROYING, &priv->state)) @@ -819,7 +819,7 @@ void mlx5e_set_rx_mode_work(struct work_struct *work) struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv, set_rx_mode_work); - struct mlx5e_eth_addr_db *ea = &priv->eth_addr; + struct mlx5e_main_table *main_table = &priv->fs.main; struct net_device *ndev = priv->netdev; bool rx_mode_enable = !test_bit(MLX5E_STATE_DESTROYING, &priv->state); @@ -827,40 +827,40 @@ void mlx5e_set_rx_mode_work(struct work_struct *work) bool allmulti_enabled = rx_mode_enable && (ndev->flags & IFF_ALLMULTI); bool broadcast_enabled = rx_mode_enable; - bool enable_promisc = !ea->promisc_enabled && promisc_enabled; - bool disable_promisc = ea->promisc_enabled && !promisc_enabled; - bool enable_allmulti = !ea->allmulti_enabled && allmulti_enabled; - bool disable_allmulti = ea->allmulti_enabled && !allmulti_enabled; - bool enable_broadcast = !ea->broadcast_enabled && broadcast_enabled; - bool disable_broadcast = ea->broadcast_enabled && !broadcast_enabled; + bool enable_promisc = !main_table->promisc_enabled && promisc_enabled; + bool disable_promisc = main_table->promisc_enabled && !promisc_enabled; + bool enable_allmulti = !main_table->allmulti_enabled && allmulti_enabled; + bool disable_allmulti = main_table->allmulti_enabled && !allmulti_enabled; + bool enable_broadcast = !main_table->broadcast_enabled && broadcast_enabled; + bool disable_broadcast = main_table->broadcast_enabled && !broadcast_enabled; if (enable_promisc) { - mlx5e_add_eth_addr_rule(priv, &ea->promisc, MLX5E_PROMISC); - if (!priv->vlan.filter_disabled) + mlx5e_add_eth_addr_rule(priv, &main_table->promisc, MLX5E_PROMISC); + if (!priv->fs.vlan.filter_disabled) mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0); } if (enable_allmulti) - mlx5e_add_eth_addr_rule(priv, &ea->allmulti, MLX5E_ALLMULTI); + mlx5e_add_eth_addr_rule(priv, &main_table->allmulti, MLX5E_ALLMULTI); if (enable_broadcast) - mlx5e_add_eth_addr_rule(priv, &ea->broadcast, MLX5E_FULLMATCH); + mlx5e_add_eth_addr_rule(priv, &main_table->broadcast, MLX5E_FULLMATCH); mlx5e_handle_netdev_addr(priv); if (disable_broadcast) - mlx5e_del_eth_addr_from_flow_table(priv, &ea->broadcast); + mlx5e_del_eth_addr_from_flow_table(priv, &main_table->broadcast); if (disable_allmulti) - mlx5e_del_eth_addr_from_flow_table(priv, &ea->allmulti); + mlx5e_del_eth_addr_from_flow_table(priv, &main_table->allmulti); if (disable_promisc) { - if (!priv->vlan.filter_disabled) + if (!priv->fs.vlan.filter_disabled) mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0); - mlx5e_del_eth_addr_from_flow_table(priv, &ea->promisc); + mlx5e_del_eth_addr_from_flow_table(priv, &main_table->promisc); } - ea->promisc_enabled = promisc_enabled; - ea->allmulti_enabled = allmulti_enabled; - ea->broadcast_enabled = broadcast_enabled; + main_table->promisc_enabled = promisc_enabled; + main_table->allmulti_enabled = allmulti_enabled; + main_table->broadcast_enabled = broadcast_enabled; mlx5e_vport_context_update(priv); } @@ -879,7 +879,7 @@ static void mlx5e_destroy_groups(struct mlx5e_flow_table *ft) void mlx5e_init_eth_addr(struct mlx5e_priv *priv) { - ether_addr_copy(priv->eth_addr.broadcast.addr, priv->netdev->broadcast); + ether_addr_copy(priv->fs.main.broadcast.addr, priv->netdev->broadcast); } #define MLX5E_MAIN_GROUP0_SIZE BIT(3) @@ -901,8 +901,8 @@ void mlx5e_init_eth_addr(struct mlx5e_priv *priv) MLX5E_MAIN_GROUP7_SIZE +\ MLX5E_MAIN_GROUP8_SIZE) -static int __mlx5e_create_main_groups(struct mlx5e_flow_table *ft, u32 *in, - int inlen) +static int __mlx5e_create_main_table_groups(struct mlx5e_flow_table *ft, u32 *in, + int inlen) { u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria); u8 *dmac = MLX5_ADDR_OF(create_flow_group_in, in, @@ -1024,7 +1024,7 @@ err_destroy_groups: return err; } -static int mlx5e_create_main_groups(struct mlx5e_flow_table *ft) +static int mlx5e_create_main_table_groups(struct mlx5e_flow_table *ft) { u32 *in; int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); @@ -1034,20 +1034,20 @@ static int mlx5e_create_main_groups(struct mlx5e_flow_table *ft) if (!in) return -ENOMEM; - err = __mlx5e_create_main_groups(ft, in, inlen); + err = __mlx5e_create_main_table_groups(ft, in, inlen); kvfree(in); return err; } -static int mlx5e_create_main_flow_table(struct mlx5e_priv *priv) +static int mlx5e_create_main_table(struct mlx5e_priv *priv) { - struct mlx5e_flow_table *ft = &priv->fts.main; + struct mlx5e_flow_table *ft = &priv->fs.main.ft; int err; ft->num_groups = 0; - ft->t = mlx5_create_flow_table(priv->fts.ns, 1, MLX5E_MAIN_TABLE_SIZE, - MLX5E_MAIN_FT_LEVEL); + ft->t = mlx5_create_flow_table(priv->fs.ns, MLX5E_NIC_PRIO, + MLX5E_MAIN_TABLE_SIZE, MLX5E_MAIN_FT_LEVEL); if (IS_ERR(ft->t)) { err = PTR_ERR(ft->t); @@ -1057,10 +1057,10 @@ static int mlx5e_create_main_flow_table(struct mlx5e_priv *priv) ft->g = kcalloc(MLX5E_NUM_MAIN_GROUPS, sizeof(*ft->g), GFP_KERNEL); if (!ft->g) { err = -ENOMEM; - goto err_destroy_main_flow_table; + goto err_destroy_main_table; } - err = mlx5e_create_main_groups(ft); + err = mlx5e_create_main_table_groups(ft); if (err) goto err_free_g; return 0; @@ -1068,7 +1068,7 @@ static int mlx5e_create_main_flow_table(struct mlx5e_priv *priv) err_free_g: kfree(ft->g); -err_destroy_main_flow_table: +err_destroy_main_table: mlx5_destroy_flow_table(ft->t); ft->t = NULL; @@ -1083,9 +1083,9 @@ static void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft) ft->t = NULL; } -static void mlx5e_destroy_main_flow_table(struct mlx5e_priv *priv) +static void mlx5e_destroy_main_table(struct mlx5e_priv *priv) { - mlx5e_destroy_flow_table(&priv->fts.main); + mlx5e_destroy_flow_table(&priv->fs.main.ft); } #define MLX5E_NUM_VLAN_GROUPS 2 @@ -1094,8 +1094,8 @@ static void mlx5e_destroy_main_flow_table(struct mlx5e_priv *priv) #define MLX5E_VLAN_TABLE_SIZE (MLX5E_VLAN_GROUP0_SIZE +\ MLX5E_VLAN_GROUP1_SIZE) -static int __mlx5e_create_vlan_groups(struct mlx5e_flow_table *ft, u32 *in, - int inlen) +static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft, u32 *in, + int inlen) { int err; int ix = 0; @@ -1134,7 +1134,7 @@ err_destroy_groups: return err; } -static int mlx5e_create_vlan_groups(struct mlx5e_flow_table *ft) +static int mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft) { u32 *in; int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); @@ -1144,20 +1144,20 @@ static int mlx5e_create_vlan_groups(struct mlx5e_flow_table *ft) if (!in) return -ENOMEM; - err = __mlx5e_create_vlan_groups(ft, in, inlen); + err = __mlx5e_create_vlan_table_groups(ft, in, inlen); kvfree(in); return err; } -static int mlx5e_create_vlan_flow_table(struct mlx5e_priv *priv) +static int mlx5e_create_vlan_table(struct mlx5e_priv *priv) { - struct mlx5e_flow_table *ft = &priv->fts.vlan; + struct mlx5e_flow_table *ft = &priv->fs.vlan.ft; int err; ft->num_groups = 0; - ft->t = mlx5_create_flow_table(priv->fts.ns, 1, MLX5E_VLAN_TABLE_SIZE, - MLX5E_VLAN_FT_LEVEL); + ft->t = mlx5_create_flow_table(priv->fs.ns, MLX5E_NIC_PRIO, + MLX5E_VLAN_TABLE_SIZE, MLX5E_VLAN_FT_LEVEL); if (IS_ERR(ft->t)) { err = PTR_ERR(ft->t); @@ -1167,10 +1167,10 @@ static int mlx5e_create_vlan_flow_table(struct mlx5e_priv *priv) ft->g = kcalloc(MLX5E_NUM_VLAN_GROUPS, sizeof(*ft->g), GFP_KERNEL); if (!ft->g) { err = -ENOMEM; - goto err_destroy_vlan_flow_table; + goto err_destroy_vlan_table; } - err = mlx5e_create_vlan_groups(ft); + err = mlx5e_create_vlan_table_groups(ft); if (err) goto err_free_g; @@ -1184,47 +1184,47 @@ err_destroy_vlan_flow_groups: mlx5e_destroy_groups(ft); err_free_g: kfree(ft->g); -err_destroy_vlan_flow_table: +err_destroy_vlan_table: mlx5_destroy_flow_table(ft->t); ft->t = NULL; return err; } -static void mlx5e_destroy_vlan_flow_table(struct mlx5e_priv *priv) +static void mlx5e_destroy_vlan_table(struct mlx5e_priv *priv) { - mlx5e_destroy_flow_table(&priv->fts.vlan); + mlx5e_destroy_flow_table(&priv->fs.vlan.ft); } -int mlx5e_create_flow_tables(struct mlx5e_priv *priv) +int mlx5e_create_flow_steering(struct mlx5e_priv *priv) { int err; - priv->fts.ns = mlx5_get_flow_namespace(priv->mdev, + priv->fs.ns = mlx5_get_flow_namespace(priv->mdev, MLX5_FLOW_NAMESPACE_KERNEL); - if (!priv->fts.ns) + if (!priv->fs.ns) return -EINVAL; - err = mlx5e_create_main_flow_table(priv); + err = mlx5e_create_main_table(priv); if (err) return err; - err = mlx5e_create_vlan_flow_table(priv); + err = mlx5e_create_vlan_table(priv); if (err) - goto err_destroy_main_flow_table; + goto err_destroy_main_table; return 0; -err_destroy_main_flow_table: - mlx5e_destroy_main_flow_table(priv); +err_destroy_main_table: + mlx5e_destroy_main_table(priv); return err; } -void mlx5e_destroy_flow_tables(struct mlx5e_priv *priv) +void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv) { mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0); - mlx5e_destroy_vlan_flow_table(priv); - mlx5e_destroy_main_flow_table(priv); + mlx5e_destroy_vlan_table(priv); + mlx5e_destroy_main_table(priv); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 04ad659f54ed..953c8afd8559 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -2969,9 +2969,9 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev) goto err_destroy_rqts; } - err = mlx5e_create_flow_tables(priv); + err = mlx5e_create_flow_steering(priv); if (err) { - mlx5_core_warn(mdev, "create flow tables failed, %d\n", err); + mlx5_core_warn(mdev, "create flow steering failed, %d\n", err); goto err_destroy_tirs; } @@ -3011,7 +3011,7 @@ err_tc_cleanup: err_dealloc_q_counters: mlx5e_destroy_q_counter(priv); - mlx5e_destroy_flow_tables(priv); + mlx5e_destroy_flow_steering(priv); err_destroy_tirs: mlx5e_destroy_tirs(priv); @@ -3069,7 +3069,7 @@ static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv) mlx5e_tc_cleanup(priv); mlx5e_vxlan_cleanup(priv); mlx5e_destroy_q_counter(priv); - mlx5e_destroy_flow_tables(priv); + mlx5e_destroy_flow_steering(priv); mlx5e_destroy_tirs(priv); mlx5e_destroy_rqts(priv); mlx5e_close_drop_rq(priv); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 2137387c043d..ef017c0decdc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -46,8 +46,8 @@ struct mlx5e_tc_flow { struct mlx5_flow_rule *rule; }; -#define MLX5E_TC_FLOW_TABLE_NUM_ENTRIES 1024 -#define MLX5E_TC_FLOW_TABLE_NUM_GROUPS 4 +#define MLX5E_TC_TABLE_NUM_ENTRIES 1024 +#define MLX5E_TC_TABLE_NUM_GROUPS 4 static struct mlx5_flow_rule *mlx5e_tc_add_flow(struct mlx5e_priv *priv, u32 *match_c, u32 *match_v, @@ -55,34 +55,35 @@ static struct mlx5_flow_rule *mlx5e_tc_add_flow(struct mlx5e_priv *priv, { struct mlx5_flow_destination dest = { .type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE, - {.ft = priv->fts.vlan.t}, + {.ft = priv->fs.vlan.ft.t}, }; struct mlx5_flow_rule *rule; bool table_created = false; - if (IS_ERR_OR_NULL(priv->fts.tc.t)) { - priv->fts.tc.t = - mlx5_create_auto_grouped_flow_table(priv->fts.ns, 0, - MLX5E_TC_FLOW_TABLE_NUM_ENTRIES, - MLX5E_TC_FLOW_TABLE_NUM_GROUPS, + if (IS_ERR_OR_NULL(priv->fs.tc.t)) { + priv->fs.tc.t = + mlx5_create_auto_grouped_flow_table(priv->fs.ns, + MLX5E_TC_PRIO, + MLX5E_TC_TABLE_NUM_ENTRIES, + MLX5E_TC_TABLE_NUM_GROUPS, 0); - if (IS_ERR(priv->fts.tc.t)) { + if (IS_ERR(priv->fs.tc.t)) { netdev_err(priv->netdev, "Failed to create tc offload table\n"); - return ERR_CAST(priv->fts.tc.t); + return ERR_CAST(priv->fs.tc.t); } table_created = true; } - rule = mlx5_add_flow_rule(priv->fts.tc.t, MLX5_MATCH_OUTER_HEADERS, + rule = mlx5_add_flow_rule(priv->fs.tc.t, MLX5_MATCH_OUTER_HEADERS, match_c, match_v, action, flow_tag, action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST ? &dest : NULL); if (IS_ERR(rule) && table_created) { - mlx5_destroy_flow_table(priv->fts.tc.t); - priv->fts.tc.t = NULL; + mlx5_destroy_flow_table(priv->fs.tc.t); + priv->fs.tc.t = NULL; } return rule; @@ -94,8 +95,8 @@ static void mlx5e_tc_del_flow(struct mlx5e_priv *priv, mlx5_del_flow_rule(rule); if (!mlx5e_tc_num_filters(priv)) { - mlx5_destroy_flow_table(priv->fts.tc.t); - priv->fts.tc.t = NULL; + mlx5_destroy_flow_table(priv->fs.tc.t); + priv->fs.tc.t = NULL; } } @@ -311,7 +312,7 @@ static int parse_tc_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol, struct tc_cls_flower_offload *f) { - struct mlx5e_tc_flow_table *tc = &priv->fts.tc; + struct mlx5e_tc_table *tc = &priv->fs.tc; u32 *match_c; u32 *match_v; int err = 0; @@ -377,7 +378,7 @@ int mlx5e_delete_flower(struct mlx5e_priv *priv, struct tc_cls_flower_offload *f) { struct mlx5e_tc_flow *flow; - struct mlx5e_tc_flow_table *tc = &priv->fts.tc; + struct mlx5e_tc_table *tc = &priv->fs.tc; flow = rhashtable_lookup_fast(&tc->ht, &f->cookie, tc->ht_params); @@ -402,7 +403,7 @@ static const struct rhashtable_params mlx5e_tc_flow_ht_params = { int mlx5e_tc_init(struct mlx5e_priv *priv) { - struct mlx5e_tc_flow_table *tc = &priv->fts.tc; + struct mlx5e_tc_table *tc = &priv->fs.tc; tc->ht_params = mlx5e_tc_flow_ht_params; return rhashtable_init(&tc->ht, &tc->ht_params); @@ -419,12 +420,12 @@ static void _mlx5e_tc_del_flow(void *ptr, void *arg) void mlx5e_tc_cleanup(struct mlx5e_priv *priv) { - struct mlx5e_tc_flow_table *tc = &priv->fts.tc; + struct mlx5e_tc_table *tc = &priv->fs.tc; rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, priv); - if (!IS_ERR_OR_NULL(priv->fts.tc.t)) { - mlx5_destroy_flow_table(priv->fts.tc.t); - priv->fts.tc.t = NULL; + if (!IS_ERR_OR_NULL(tc->t)) { + mlx5_destroy_flow_table(tc->t); + tc->t = NULL; } } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h index d677428dc10f..a4f17b974d62 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h @@ -45,7 +45,7 @@ int mlx5e_delete_flower(struct mlx5e_priv *priv, static inline int mlx5e_tc_num_filters(struct mlx5e_priv *priv) { - return atomic_read(&priv->fts.tc.ht.nelems); + return atomic_read(&priv->fs.tc.ht.nelems); } #endif /* __MLX5_EN_TC_H__ */ From 33cfaaa8f36ffbee9ad259264334325b5449f5fe Mon Sep 17 00:00:00 2001 From: Maor Gottlieb Date: Fri, 29 Apr 2016 01:36:38 +0300 Subject: [PATCH 1181/1649] net/mlx5e: Split the main flow steering table Currently, the main flow table is used for two purposes: One is to do mac filtering and the other is to classify the packet l3-l4 header in order to steer the packet to the right RSS TIR. This design is very complex, for each configured mac address we have to add eleven rules (rule for each traffic type), the same if the device is put to promiscuous/allmulti mode. This scheme isn't scalable for future features like aRFS. In order to simplify it, the main flow table is split to two flow tables: 1. l2 table - filter the packet dmac address, if there is a match we forward to the ttc flow table. 2. TTC (Traffic Type Classifier) table - classify the traffic type of the packet and steer the packet to the right TIR. In this new design, when new mac address is added, the driver adds only one flow rule instead of eleven. Signed-off-by: Maor Gottlieb Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 42 +- .../net/ethernet/mellanox/mlx5/core/en_fs.c | 1123 ++++++++--------- .../net/ethernet/mellanox/mlx5/core/en_main.c | 2 +- .../net/ethernet/mellanox/mlx5/core/fs_core.c | 2 +- 4 files changed, 534 insertions(+), 635 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 02b964402156..2c9879c011b1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -399,31 +399,18 @@ struct mlx5e_vxlan_db { struct radix_tree_root tree; }; -struct mlx5e_eth_addr_info { +struct mlx5e_l2_rule { u8 addr[ETH_ALEN + 2]; - u32 tt_vec; - struct mlx5_flow_rule *ft_rule[MLX5E_NUM_TT]; + struct mlx5_flow_rule *rule; }; -#define MLX5E_ETH_ADDR_HASH_SIZE (1 << BITS_PER_BYTE) - struct mlx5e_flow_table { int num_groups; struct mlx5_flow_table *t; struct mlx5_flow_group **g; }; -struct mlx5e_main_table { - struct mlx5e_flow_table ft; - struct hlist_head netdev_uc[MLX5E_ETH_ADDR_HASH_SIZE]; - struct hlist_head netdev_mc[MLX5E_ETH_ADDR_HASH_SIZE]; - struct mlx5e_eth_addr_info broadcast; - struct mlx5e_eth_addr_info allmulti; - struct mlx5e_eth_addr_info promisc; - bool broadcast_enabled; - bool allmulti_enabled; - bool promisc_enabled; -}; +#define MLX5E_L2_ADDR_HASH_SIZE BIT(BITS_PER_BYTE) struct mlx5e_tc_table { struct mlx5_flow_table *t; @@ -441,11 +428,30 @@ struct mlx5e_vlan_table { bool filter_disabled; }; +struct mlx5e_l2_table { + struct mlx5e_flow_table ft; + struct hlist_head netdev_uc[MLX5E_L2_ADDR_HASH_SIZE]; + struct hlist_head netdev_mc[MLX5E_L2_ADDR_HASH_SIZE]; + struct mlx5e_l2_rule broadcast; + struct mlx5e_l2_rule allmulti; + struct mlx5e_l2_rule promisc; + bool broadcast_enabled; + bool allmulti_enabled; + bool promisc_enabled; +}; + +/* L3/L4 traffic type classifier */ +struct mlx5e_ttc_table { + struct mlx5e_flow_table ft; + struct mlx5_flow_rule *rules[MLX5E_NUM_TT]; +}; + struct mlx5e_flow_steering { struct mlx5_flow_namespace *ns; struct mlx5e_tc_table tc; struct mlx5e_vlan_table vlan; - struct mlx5e_main_table main; + struct mlx5e_l2_table l2; + struct mlx5e_ttc_table ttc; }; struct mlx5e_direct_tir { @@ -563,7 +569,7 @@ void mlx5e_update_stats(struct mlx5e_priv *priv); int mlx5e_create_flow_steering(struct mlx5e_priv *priv); void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv); -void mlx5e_init_eth_addr(struct mlx5e_priv *priv); +void mlx5e_init_l2_addr(struct mlx5e_priv *priv); void mlx5e_set_rx_mode_work(struct work_struct *work); void mlx5e_fill_hwstamp(struct mlx5e_tstamp *clock, u64 timestamp, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c index 3ee35b094c82..6e353b3a1422 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c @@ -37,9 +37,16 @@ #include #include "en.h" +static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv, + struct mlx5e_l2_rule *ai, int type); +static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv, + struct mlx5e_l2_rule *ai); + +/* NIC prio FTS */ enum { MLX5E_VLAN_FT_LEVEL = 0, - MLX5E_MAIN_FT_LEVEL + MLX5E_L2_FT_LEVEL, + MLX5E_TTC_FT_LEVEL }; #define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v) @@ -63,21 +70,21 @@ enum { MLX5E_ACTION_DEL = 2, }; -struct mlx5e_eth_addr_hash_node { +struct mlx5e_l2_hash_node { struct hlist_node hlist; u8 action; - struct mlx5e_eth_addr_info ai; + struct mlx5e_l2_rule ai; }; -static inline int mlx5e_hash_eth_addr(u8 *addr) +static inline int mlx5e_hash_l2(u8 *addr) { return addr[5]; } -static void mlx5e_add_eth_addr_to_hash(struct hlist_head *hash, u8 *addr) +static void mlx5e_add_l2_to_hash(struct hlist_head *hash, u8 *addr) { - struct mlx5e_eth_addr_hash_node *hn; - int ix = mlx5e_hash_eth_addr(addr); + struct mlx5e_l2_hash_node *hn; + int ix = mlx5e_hash_l2(addr); int found = 0; hlist_for_each_entry(hn, &hash[ix], hlist) @@ -101,371 +108,12 @@ static void mlx5e_add_eth_addr_to_hash(struct hlist_head *hash, u8 *addr) hlist_add_head(&hn->hlist, &hash[ix]); } -static void mlx5e_del_eth_addr_from_hash(struct mlx5e_eth_addr_hash_node *hn) +static void mlx5e_del_l2_from_hash(struct mlx5e_l2_hash_node *hn) { hlist_del(&hn->hlist); kfree(hn); } -static void mlx5e_del_eth_addr_from_flow_table(struct mlx5e_priv *priv, - struct mlx5e_eth_addr_info *ai) -{ - if (ai->tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_ESP)) - mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_IPSEC_ESP]); - - if (ai->tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_ESP)) - mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_IPSEC_ESP]); - - if (ai->tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_AH)) - mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_IPSEC_AH]); - - if (ai->tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_AH)) - mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_IPSEC_AH]); - - if (ai->tt_vec & BIT(MLX5E_TT_IPV6_TCP)) - mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_TCP]); - - if (ai->tt_vec & BIT(MLX5E_TT_IPV4_TCP)) - mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_TCP]); - - if (ai->tt_vec & BIT(MLX5E_TT_IPV6_UDP)) - mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_UDP]); - - if (ai->tt_vec & BIT(MLX5E_TT_IPV4_UDP)) - mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_UDP]); - - if (ai->tt_vec & BIT(MLX5E_TT_IPV6)) - mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6]); - - if (ai->tt_vec & BIT(MLX5E_TT_IPV4)) - mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4]); - - if (ai->tt_vec & BIT(MLX5E_TT_ANY)) - mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_ANY]); -} - -static int mlx5e_get_eth_addr_type(u8 *addr) -{ - if (is_unicast_ether_addr(addr)) - return MLX5E_UC; - - if ((addr[0] == 0x01) && - (addr[1] == 0x00) && - (addr[2] == 0x5e) && - !(addr[3] & 0x80)) - return MLX5E_MC_IPV4; - - if ((addr[0] == 0x33) && - (addr[1] == 0x33)) - return MLX5E_MC_IPV6; - - return MLX5E_MC_OTHER; -} - -static u32 mlx5e_get_tt_vec(struct mlx5e_eth_addr_info *ai, int type) -{ - int eth_addr_type; - u32 ret; - - switch (type) { - case MLX5E_FULLMATCH: - eth_addr_type = mlx5e_get_eth_addr_type(ai->addr); - switch (eth_addr_type) { - case MLX5E_UC: - ret = - BIT(MLX5E_TT_IPV4_TCP) | - BIT(MLX5E_TT_IPV6_TCP) | - BIT(MLX5E_TT_IPV4_UDP) | - BIT(MLX5E_TT_IPV6_UDP) | - BIT(MLX5E_TT_IPV4_IPSEC_AH) | - BIT(MLX5E_TT_IPV6_IPSEC_AH) | - BIT(MLX5E_TT_IPV4_IPSEC_ESP) | - BIT(MLX5E_TT_IPV6_IPSEC_ESP) | - BIT(MLX5E_TT_IPV4) | - BIT(MLX5E_TT_IPV6) | - BIT(MLX5E_TT_ANY) | - 0; - break; - - case MLX5E_MC_IPV4: - ret = - BIT(MLX5E_TT_IPV4_UDP) | - BIT(MLX5E_TT_IPV4) | - 0; - break; - - case MLX5E_MC_IPV6: - ret = - BIT(MLX5E_TT_IPV6_UDP) | - BIT(MLX5E_TT_IPV6) | - 0; - break; - - case MLX5E_MC_OTHER: - ret = - BIT(MLX5E_TT_ANY) | - 0; - break; - } - - break; - - case MLX5E_ALLMULTI: - ret = - BIT(MLX5E_TT_IPV4_UDP) | - BIT(MLX5E_TT_IPV6_UDP) | - BIT(MLX5E_TT_IPV4) | - BIT(MLX5E_TT_IPV6) | - BIT(MLX5E_TT_ANY) | - 0; - break; - - default: /* MLX5E_PROMISC */ - ret = - BIT(MLX5E_TT_IPV4_TCP) | - BIT(MLX5E_TT_IPV6_TCP) | - BIT(MLX5E_TT_IPV4_UDP) | - BIT(MLX5E_TT_IPV6_UDP) | - BIT(MLX5E_TT_IPV4_IPSEC_AH) | - BIT(MLX5E_TT_IPV6_IPSEC_AH) | - BIT(MLX5E_TT_IPV4_IPSEC_ESP) | - BIT(MLX5E_TT_IPV6_IPSEC_ESP) | - BIT(MLX5E_TT_IPV4) | - BIT(MLX5E_TT_IPV6) | - BIT(MLX5E_TT_ANY) | - 0; - break; - } - - return ret; -} - -static int __mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv, - struct mlx5e_eth_addr_info *ai, - int type, u32 *mc, u32 *mv) -{ - struct mlx5_flow_destination dest; - u8 match_criteria_enable = 0; - struct mlx5_flow_rule **rule_p; - struct mlx5_flow_table *ft = priv->fs.main.ft.t; - u8 *mc_dmac = MLX5_ADDR_OF(fte_match_param, mc, - outer_headers.dmac_47_16); - u8 *mv_dmac = MLX5_ADDR_OF(fte_match_param, mv, - outer_headers.dmac_47_16); - u32 *tirn = priv->indir_tirn; - u32 tt_vec; - int err = 0; - - dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR; - - switch (type) { - case MLX5E_FULLMATCH: - match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; - eth_broadcast_addr(mc_dmac); - ether_addr_copy(mv_dmac, ai->addr); - break; - - case MLX5E_ALLMULTI: - match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; - mc_dmac[0] = 0x01; - mv_dmac[0] = 0x01; - break; - - case MLX5E_PROMISC: - break; - } - - tt_vec = mlx5e_get_tt_vec(ai, type); - - if (tt_vec & BIT(MLX5E_TT_ANY)) { - rule_p = &ai->ft_rule[MLX5E_TT_ANY]; - dest.tir_num = priv->direct_tir[0].tirn; - *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv, - MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, - MLX5_FS_DEFAULT_FLOW_TAG, &dest); - if (IS_ERR_OR_NULL(*rule_p)) - goto err_del_ai; - ai->tt_vec |= BIT(MLX5E_TT_ANY); - } - - match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; - MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype); - - if (tt_vec & BIT(MLX5E_TT_IPV4)) { - rule_p = &ai->ft_rule[MLX5E_TT_IPV4]; - dest.tir_num = tirn[MLX5E_TT_IPV4]; - MLX5_SET(fte_match_param, mv, outer_headers.ethertype, - ETH_P_IP); - *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv, - MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, - MLX5_FS_DEFAULT_FLOW_TAG, &dest); - if (IS_ERR_OR_NULL(*rule_p)) - goto err_del_ai; - ai->tt_vec |= BIT(MLX5E_TT_IPV4); - } - - if (tt_vec & BIT(MLX5E_TT_IPV6)) { - rule_p = &ai->ft_rule[MLX5E_TT_IPV6]; - dest.tir_num = tirn[MLX5E_TT_IPV6]; - MLX5_SET(fte_match_param, mv, outer_headers.ethertype, - ETH_P_IPV6); - *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv, - MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, - MLX5_FS_DEFAULT_FLOW_TAG, &dest); - if (IS_ERR_OR_NULL(*rule_p)) - goto err_del_ai; - ai->tt_vec |= BIT(MLX5E_TT_IPV6); - } - - MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol); - MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_UDP); - - if (tt_vec & BIT(MLX5E_TT_IPV4_UDP)) { - rule_p = &ai->ft_rule[MLX5E_TT_IPV4_UDP]; - dest.tir_num = tirn[MLX5E_TT_IPV4_UDP]; - MLX5_SET(fte_match_param, mv, outer_headers.ethertype, - ETH_P_IP); - *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv, - MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, - MLX5_FS_DEFAULT_FLOW_TAG, &dest); - if (IS_ERR_OR_NULL(*rule_p)) - goto err_del_ai; - ai->tt_vec |= BIT(MLX5E_TT_IPV4_UDP); - } - - if (tt_vec & BIT(MLX5E_TT_IPV6_UDP)) { - rule_p = &ai->ft_rule[MLX5E_TT_IPV6_UDP]; - dest.tir_num = tirn[MLX5E_TT_IPV6_UDP]; - MLX5_SET(fte_match_param, mv, outer_headers.ethertype, - ETH_P_IPV6); - *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv, - MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, - MLX5_FS_DEFAULT_FLOW_TAG, &dest); - if (IS_ERR_OR_NULL(*rule_p)) - goto err_del_ai; - ai->tt_vec |= BIT(MLX5E_TT_IPV6_UDP); - } - - MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_TCP); - - if (tt_vec & BIT(MLX5E_TT_IPV4_TCP)) { - rule_p = &ai->ft_rule[MLX5E_TT_IPV4_TCP]; - dest.tir_num = tirn[MLX5E_TT_IPV4_TCP]; - MLX5_SET(fte_match_param, mv, outer_headers.ethertype, - ETH_P_IP); - *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv, - MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, - MLX5_FS_DEFAULT_FLOW_TAG, &dest); - if (IS_ERR_OR_NULL(*rule_p)) - goto err_del_ai; - ai->tt_vec |= BIT(MLX5E_TT_IPV4_TCP); - } - - if (tt_vec & BIT(MLX5E_TT_IPV6_TCP)) { - rule_p = &ai->ft_rule[MLX5E_TT_IPV6_TCP]; - dest.tir_num = tirn[MLX5E_TT_IPV6_TCP]; - MLX5_SET(fte_match_param, mv, outer_headers.ethertype, - ETH_P_IPV6); - *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv, - MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, - MLX5_FS_DEFAULT_FLOW_TAG, &dest); - if (IS_ERR_OR_NULL(*rule_p)) - goto err_del_ai; - - ai->tt_vec |= BIT(MLX5E_TT_IPV6_TCP); - } - - MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_AH); - - if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_AH)) { - rule_p = &ai->ft_rule[MLX5E_TT_IPV4_IPSEC_AH]; - dest.tir_num = tirn[MLX5E_TT_IPV4_IPSEC_AH]; - MLX5_SET(fte_match_param, mv, outer_headers.ethertype, - ETH_P_IP); - *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv, - MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, - MLX5_FS_DEFAULT_FLOW_TAG, &dest); - if (IS_ERR_OR_NULL(*rule_p)) - goto err_del_ai; - ai->tt_vec |= BIT(MLX5E_TT_IPV4_IPSEC_AH); - } - - if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_AH)) { - rule_p = &ai->ft_rule[MLX5E_TT_IPV6_IPSEC_AH]; - dest.tir_num = tirn[MLX5E_TT_IPV6_IPSEC_AH]; - MLX5_SET(fte_match_param, mv, outer_headers.ethertype, - ETH_P_IPV6); - *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv, - MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, - MLX5_FS_DEFAULT_FLOW_TAG, &dest); - if (IS_ERR_OR_NULL(*rule_p)) - goto err_del_ai; - ai->tt_vec |= BIT(MLX5E_TT_IPV6_IPSEC_AH); - } - - MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_ESP); - - if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_ESP)) { - rule_p = &ai->ft_rule[MLX5E_TT_IPV4_IPSEC_ESP]; - dest.tir_num = tirn[MLX5E_TT_IPV4_IPSEC_ESP]; - MLX5_SET(fte_match_param, mv, outer_headers.ethertype, - ETH_P_IP); - *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv, - MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, - MLX5_FS_DEFAULT_FLOW_TAG, &dest); - if (IS_ERR_OR_NULL(*rule_p)) - goto err_del_ai; - ai->tt_vec |= BIT(MLX5E_TT_IPV4_IPSEC_ESP); - } - - if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_ESP)) { - rule_p = &ai->ft_rule[MLX5E_TT_IPV6_IPSEC_ESP]; - dest.tir_num = tirn[MLX5E_TT_IPV6_IPSEC_ESP]; - MLX5_SET(fte_match_param, mv, outer_headers.ethertype, - ETH_P_IPV6); - *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv, - MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, - MLX5_FS_DEFAULT_FLOW_TAG, &dest); - if (IS_ERR_OR_NULL(*rule_p)) - goto err_del_ai; - ai->tt_vec |= BIT(MLX5E_TT_IPV6_IPSEC_ESP); - } - - return 0; - -err_del_ai: - err = PTR_ERR(*rule_p); - *rule_p = NULL; - mlx5e_del_eth_addr_from_flow_table(priv, ai); - - return err; -} - -static int mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv, - struct mlx5e_eth_addr_info *ai, int type) -{ - u32 *match_criteria; - u32 *match_value; - int err = 0; - - match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); - match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); - if (!match_value || !match_criteria) { - netdev_err(priv->netdev, "%s: alloc failed\n", __func__); - err = -ENOMEM; - goto add_eth_addr_rule_out; - } - - err = __mlx5e_add_eth_addr_rule(priv, ai, type, match_criteria, - match_value); - -add_eth_addr_rule_out: - kvfree(match_criteria); - kvfree(match_value); - - return err; -} - static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv) { struct net_device *ndev = priv->netdev; @@ -526,7 +174,7 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv, int err = 0; dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; - dest.ft = priv->fs.main.ft.t; + dest.ft = priv->fs.l2.ft.t; match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.vlan_tag); @@ -661,21 +309,21 @@ int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto, } #define mlx5e_for_each_hash_node(hn, tmp, hash, i) \ - for (i = 0; i < MLX5E_ETH_ADDR_HASH_SIZE; i++) \ + for (i = 0; i < MLX5E_L2_ADDR_HASH_SIZE; i++) \ hlist_for_each_entry_safe(hn, tmp, &hash[i], hlist) -static void mlx5e_execute_action(struct mlx5e_priv *priv, - struct mlx5e_eth_addr_hash_node *hn) +static void mlx5e_execute_l2_action(struct mlx5e_priv *priv, + struct mlx5e_l2_hash_node *hn) { switch (hn->action) { case MLX5E_ACTION_ADD: - mlx5e_add_eth_addr_rule(priv, &hn->ai, MLX5E_FULLMATCH); + mlx5e_add_l2_flow_rule(priv, &hn->ai, MLX5E_FULLMATCH); hn->action = MLX5E_ACTION_NONE; break; case MLX5E_ACTION_DEL: - mlx5e_del_eth_addr_from_flow_table(priv, &hn->ai); - mlx5e_del_eth_addr_from_hash(hn); + mlx5e_del_l2_flow_rule(priv, &hn->ai); + mlx5e_del_l2_from_hash(hn); break; } } @@ -687,14 +335,14 @@ static void mlx5e_sync_netdev_addr(struct mlx5e_priv *priv) netif_addr_lock_bh(netdev); - mlx5e_add_eth_addr_to_hash(priv->fs.main.netdev_uc, - priv->netdev->dev_addr); + mlx5e_add_l2_to_hash(priv->fs.l2.netdev_uc, + priv->netdev->dev_addr); netdev_for_each_uc_addr(ha, netdev) - mlx5e_add_eth_addr_to_hash(priv->fs.main.netdev_uc, ha->addr); + mlx5e_add_l2_to_hash(priv->fs.l2.netdev_uc, ha->addr); netdev_for_each_mc_addr(ha, netdev) - mlx5e_add_eth_addr_to_hash(priv->fs.main.netdev_mc, ha->addr); + mlx5e_add_l2_to_hash(priv->fs.l2.netdev_mc, ha->addr); netif_addr_unlock_bh(netdev); } @@ -704,17 +352,17 @@ static void mlx5e_fill_addr_array(struct mlx5e_priv *priv, int list_type, { bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC); struct net_device *ndev = priv->netdev; - struct mlx5e_eth_addr_hash_node *hn; + struct mlx5e_l2_hash_node *hn; struct hlist_head *addr_list; struct hlist_node *tmp; int i = 0; int hi; - addr_list = is_uc ? priv->fs.main.netdev_uc : priv->fs.main.netdev_mc; + addr_list = is_uc ? priv->fs.l2.netdev_uc : priv->fs.l2.netdev_mc; if (is_uc) /* Make sure our own address is pushed first */ ether_addr_copy(addr_array[i++], ndev->dev_addr); - else if (priv->fs.main.broadcast_enabled) + else if (priv->fs.l2.broadcast_enabled) ether_addr_copy(addr_array[i++], ndev->broadcast); mlx5e_for_each_hash_node(hn, tmp, addr_list, hi) { @@ -730,7 +378,7 @@ static void mlx5e_vport_context_update_addr_list(struct mlx5e_priv *priv, int list_type) { bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC); - struct mlx5e_eth_addr_hash_node *hn; + struct mlx5e_l2_hash_node *hn; u8 (*addr_array)[ETH_ALEN] = NULL; struct hlist_head *addr_list; struct hlist_node *tmp; @@ -739,12 +387,12 @@ static void mlx5e_vport_context_update_addr_list(struct mlx5e_priv *priv, int err; int hi; - size = is_uc ? 0 : (priv->fs.main.broadcast_enabled ? 1 : 0); + size = is_uc ? 0 : (priv->fs.l2.broadcast_enabled ? 1 : 0); max_size = is_uc ? 1 << MLX5_CAP_GEN(priv->mdev, log_max_current_uc_list) : 1 << MLX5_CAP_GEN(priv->mdev, log_max_current_mc_list); - addr_list = is_uc ? priv->fs.main.netdev_uc : priv->fs.main.netdev_mc; + addr_list = is_uc ? priv->fs.l2.netdev_uc : priv->fs.l2.netdev_mc; mlx5e_for_each_hash_node(hn, tmp, addr_list, hi) size++; @@ -775,37 +423,37 @@ out: static void mlx5e_vport_context_update(struct mlx5e_priv *priv) { - struct mlx5e_main_table *main_table = &priv->fs.main; + struct mlx5e_l2_table *ea = &priv->fs.l2; mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_UC); mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_MC); mlx5_modify_nic_vport_promisc(priv->mdev, 0, - main_table->allmulti_enabled, - main_table->promisc_enabled); + ea->allmulti_enabled, + ea->promisc_enabled); } static void mlx5e_apply_netdev_addr(struct mlx5e_priv *priv) { - struct mlx5e_eth_addr_hash_node *hn; + struct mlx5e_l2_hash_node *hn; struct hlist_node *tmp; int i; - mlx5e_for_each_hash_node(hn, tmp, priv->fs.main.netdev_uc, i) - mlx5e_execute_action(priv, hn); + mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_uc, i) + mlx5e_execute_l2_action(priv, hn); - mlx5e_for_each_hash_node(hn, tmp, priv->fs.main.netdev_mc, i) - mlx5e_execute_action(priv, hn); + mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_mc, i) + mlx5e_execute_l2_action(priv, hn); } static void mlx5e_handle_netdev_addr(struct mlx5e_priv *priv) { - struct mlx5e_eth_addr_hash_node *hn; + struct mlx5e_l2_hash_node *hn; struct hlist_node *tmp; int i; - mlx5e_for_each_hash_node(hn, tmp, priv->fs.main.netdev_uc, i) + mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_uc, i) hn->action = MLX5E_ACTION_DEL; - mlx5e_for_each_hash_node(hn, tmp, priv->fs.main.netdev_mc, i) + mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_mc, i) hn->action = MLX5E_ACTION_DEL; if (!test_bit(MLX5E_STATE_DESTROYING, &priv->state)) @@ -819,7 +467,7 @@ void mlx5e_set_rx_mode_work(struct work_struct *work) struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv, set_rx_mode_work); - struct mlx5e_main_table *main_table = &priv->fs.main; + struct mlx5e_l2_table *ea = &priv->fs.l2; struct net_device *ndev = priv->netdev; bool rx_mode_enable = !test_bit(MLX5E_STATE_DESTROYING, &priv->state); @@ -827,40 +475,40 @@ void mlx5e_set_rx_mode_work(struct work_struct *work) bool allmulti_enabled = rx_mode_enable && (ndev->flags & IFF_ALLMULTI); bool broadcast_enabled = rx_mode_enable; - bool enable_promisc = !main_table->promisc_enabled && promisc_enabled; - bool disable_promisc = main_table->promisc_enabled && !promisc_enabled; - bool enable_allmulti = !main_table->allmulti_enabled && allmulti_enabled; - bool disable_allmulti = main_table->allmulti_enabled && !allmulti_enabled; - bool enable_broadcast = !main_table->broadcast_enabled && broadcast_enabled; - bool disable_broadcast = main_table->broadcast_enabled && !broadcast_enabled; + bool enable_promisc = !ea->promisc_enabled && promisc_enabled; + bool disable_promisc = ea->promisc_enabled && !promisc_enabled; + bool enable_allmulti = !ea->allmulti_enabled && allmulti_enabled; + bool disable_allmulti = ea->allmulti_enabled && !allmulti_enabled; + bool enable_broadcast = !ea->broadcast_enabled && broadcast_enabled; + bool disable_broadcast = ea->broadcast_enabled && !broadcast_enabled; if (enable_promisc) { - mlx5e_add_eth_addr_rule(priv, &main_table->promisc, MLX5E_PROMISC); + mlx5e_add_l2_flow_rule(priv, &ea->promisc, MLX5E_PROMISC); if (!priv->fs.vlan.filter_disabled) mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0); } if (enable_allmulti) - mlx5e_add_eth_addr_rule(priv, &main_table->allmulti, MLX5E_ALLMULTI); + mlx5e_add_l2_flow_rule(priv, &ea->allmulti, MLX5E_ALLMULTI); if (enable_broadcast) - mlx5e_add_eth_addr_rule(priv, &main_table->broadcast, MLX5E_FULLMATCH); + mlx5e_add_l2_flow_rule(priv, &ea->broadcast, MLX5E_FULLMATCH); mlx5e_handle_netdev_addr(priv); if (disable_broadcast) - mlx5e_del_eth_addr_from_flow_table(priv, &main_table->broadcast); + mlx5e_del_l2_flow_rule(priv, &ea->broadcast); if (disable_allmulti) - mlx5e_del_eth_addr_from_flow_table(priv, &main_table->allmulti); + mlx5e_del_l2_flow_rule(priv, &ea->allmulti); if (disable_promisc) { if (!priv->fs.vlan.filter_disabled) mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0); - mlx5e_del_eth_addr_from_flow_table(priv, &main_table->promisc); + mlx5e_del_l2_flow_rule(priv, &ea->promisc); } - main_table->promisc_enabled = promisc_enabled; - main_table->allmulti_enabled = allmulti_enabled; - main_table->broadcast_enabled = broadcast_enabled; + ea->promisc_enabled = promisc_enabled; + ea->allmulti_enabled = allmulti_enabled; + ea->broadcast_enabled = broadcast_enabled; mlx5e_vport_context_update(priv); } @@ -877,202 +525,9 @@ static void mlx5e_destroy_groups(struct mlx5e_flow_table *ft) ft->num_groups = 0; } -void mlx5e_init_eth_addr(struct mlx5e_priv *priv) +void mlx5e_init_l2_addr(struct mlx5e_priv *priv) { - ether_addr_copy(priv->fs.main.broadcast.addr, priv->netdev->broadcast); -} - -#define MLX5E_MAIN_GROUP0_SIZE BIT(3) -#define MLX5E_MAIN_GROUP1_SIZE BIT(1) -#define MLX5E_MAIN_GROUP2_SIZE BIT(0) -#define MLX5E_MAIN_GROUP3_SIZE BIT(14) -#define MLX5E_MAIN_GROUP4_SIZE BIT(13) -#define MLX5E_MAIN_GROUP5_SIZE BIT(11) -#define MLX5E_MAIN_GROUP6_SIZE BIT(2) -#define MLX5E_MAIN_GROUP7_SIZE BIT(1) -#define MLX5E_MAIN_GROUP8_SIZE BIT(0) -#define MLX5E_MAIN_TABLE_SIZE (MLX5E_MAIN_GROUP0_SIZE +\ - MLX5E_MAIN_GROUP1_SIZE +\ - MLX5E_MAIN_GROUP2_SIZE +\ - MLX5E_MAIN_GROUP3_SIZE +\ - MLX5E_MAIN_GROUP4_SIZE +\ - MLX5E_MAIN_GROUP5_SIZE +\ - MLX5E_MAIN_GROUP6_SIZE +\ - MLX5E_MAIN_GROUP7_SIZE +\ - MLX5E_MAIN_GROUP8_SIZE) - -static int __mlx5e_create_main_table_groups(struct mlx5e_flow_table *ft, u32 *in, - int inlen) -{ - u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria); - u8 *dmac = MLX5_ADDR_OF(create_flow_group_in, in, - match_criteria.outer_headers.dmac_47_16); - int err; - int ix = 0; - - memset(in, 0, inlen); - MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); - MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype); - MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol); - MLX5_SET_CFG(in, start_flow_index, ix); - ix += MLX5E_MAIN_GROUP0_SIZE; - MLX5_SET_CFG(in, end_flow_index, ix - 1); - ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); - if (IS_ERR(ft->g[ft->num_groups])) - goto err_destroy_groups; - ft->num_groups++; - - memset(in, 0, inlen); - MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); - MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype); - MLX5_SET_CFG(in, start_flow_index, ix); - ix += MLX5E_MAIN_GROUP1_SIZE; - MLX5_SET_CFG(in, end_flow_index, ix - 1); - ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); - if (IS_ERR(ft->g[ft->num_groups])) - goto err_destroy_groups; - ft->num_groups++; - - memset(in, 0, inlen); - MLX5_SET_CFG(in, start_flow_index, ix); - ix += MLX5E_MAIN_GROUP2_SIZE; - MLX5_SET_CFG(in, end_flow_index, ix - 1); - ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); - if (IS_ERR(ft->g[ft->num_groups])) - goto err_destroy_groups; - ft->num_groups++; - - memset(in, 0, inlen); - MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); - MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype); - MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol); - eth_broadcast_addr(dmac); - MLX5_SET_CFG(in, start_flow_index, ix); - ix += MLX5E_MAIN_GROUP3_SIZE; - MLX5_SET_CFG(in, end_flow_index, ix - 1); - ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); - if (IS_ERR(ft->g[ft->num_groups])) - goto err_destroy_groups; - ft->num_groups++; - - memset(in, 0, inlen); - MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); - MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype); - eth_broadcast_addr(dmac); - MLX5_SET_CFG(in, start_flow_index, ix); - ix += MLX5E_MAIN_GROUP4_SIZE; - MLX5_SET_CFG(in, end_flow_index, ix - 1); - ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); - if (IS_ERR(ft->g[ft->num_groups])) - goto err_destroy_groups; - ft->num_groups++; - - memset(in, 0, inlen); - MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); - eth_broadcast_addr(dmac); - MLX5_SET_CFG(in, start_flow_index, ix); - ix += MLX5E_MAIN_GROUP5_SIZE; - MLX5_SET_CFG(in, end_flow_index, ix - 1); - ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); - if (IS_ERR(ft->g[ft->num_groups])) - goto err_destroy_groups; - ft->num_groups++; - - memset(in, 0, inlen); - MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); - MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype); - MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol); - dmac[0] = 0x01; - MLX5_SET_CFG(in, start_flow_index, ix); - ix += MLX5E_MAIN_GROUP6_SIZE; - MLX5_SET_CFG(in, end_flow_index, ix - 1); - ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); - if (IS_ERR(ft->g[ft->num_groups])) - goto err_destroy_groups; - ft->num_groups++; - - memset(in, 0, inlen); - MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); - MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype); - dmac[0] = 0x01; - MLX5_SET_CFG(in, start_flow_index, ix); - ix += MLX5E_MAIN_GROUP7_SIZE; - MLX5_SET_CFG(in, end_flow_index, ix - 1); - ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); - if (IS_ERR(ft->g[ft->num_groups])) - goto err_destroy_groups; - ft->num_groups++; - - memset(in, 0, inlen); - MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); - dmac[0] = 0x01; - MLX5_SET_CFG(in, start_flow_index, ix); - ix += MLX5E_MAIN_GROUP8_SIZE; - MLX5_SET_CFG(in, end_flow_index, ix - 1); - ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); - if (IS_ERR(ft->g[ft->num_groups])) - goto err_destroy_groups; - ft->num_groups++; - - return 0; - -err_destroy_groups: - err = PTR_ERR(ft->g[ft->num_groups]); - ft->g[ft->num_groups] = NULL; - mlx5e_destroy_groups(ft); - - return err; -} - -static int mlx5e_create_main_table_groups(struct mlx5e_flow_table *ft) -{ - u32 *in; - int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); - int err; - - in = mlx5_vzalloc(inlen); - if (!in) - return -ENOMEM; - - err = __mlx5e_create_main_table_groups(ft, in, inlen); - - kvfree(in); - return err; -} - -static int mlx5e_create_main_table(struct mlx5e_priv *priv) -{ - struct mlx5e_flow_table *ft = &priv->fs.main.ft; - int err; - - ft->num_groups = 0; - ft->t = mlx5_create_flow_table(priv->fs.ns, MLX5E_NIC_PRIO, - MLX5E_MAIN_TABLE_SIZE, MLX5E_MAIN_FT_LEVEL); - - if (IS_ERR(ft->t)) { - err = PTR_ERR(ft->t); - ft->t = NULL; - return err; - } - ft->g = kcalloc(MLX5E_NUM_MAIN_GROUPS, sizeof(*ft->g), GFP_KERNEL); - if (!ft->g) { - err = -ENOMEM; - goto err_destroy_main_table; - } - - err = mlx5e_create_main_table_groups(ft); - if (err) - goto err_free_g; - return 0; - -err_free_g: - kfree(ft->g); - -err_destroy_main_table: - mlx5_destroy_flow_table(ft->t); - ft->t = NULL; - - return err; + ether_addr_copy(priv->fs.l2.broadcast.addr, priv->netdev->broadcast); } static void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft) @@ -1083,9 +538,431 @@ static void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft) ft->t = NULL; } -static void mlx5e_destroy_main_table(struct mlx5e_priv *priv) +static void mlx5e_cleanup_ttc_rules(struct mlx5e_ttc_table *ttc) { - mlx5e_destroy_flow_table(&priv->fs.main.ft); + int i; + + for (i = 0; i < MLX5E_NUM_TT; i++) { + if (!IS_ERR_OR_NULL(ttc->rules[i])) { + mlx5_del_flow_rule(ttc->rules[i]); + ttc->rules[i] = NULL; + } + } +} + +static struct { + u16 etype; + u8 proto; +} ttc_rules[] = { + [MLX5E_TT_IPV4_TCP] = { + .etype = ETH_P_IP, + .proto = IPPROTO_TCP, + }, + [MLX5E_TT_IPV6_TCP] = { + .etype = ETH_P_IPV6, + .proto = IPPROTO_TCP, + }, + [MLX5E_TT_IPV4_UDP] = { + .etype = ETH_P_IP, + .proto = IPPROTO_UDP, + }, + [MLX5E_TT_IPV6_UDP] = { + .etype = ETH_P_IPV6, + .proto = IPPROTO_UDP, + }, + [MLX5E_TT_IPV4_IPSEC_AH] = { + .etype = ETH_P_IP, + .proto = IPPROTO_AH, + }, + [MLX5E_TT_IPV6_IPSEC_AH] = { + .etype = ETH_P_IPV6, + .proto = IPPROTO_AH, + }, + [MLX5E_TT_IPV4_IPSEC_ESP] = { + .etype = ETH_P_IP, + .proto = IPPROTO_ESP, + }, + [MLX5E_TT_IPV6_IPSEC_ESP] = { + .etype = ETH_P_IPV6, + .proto = IPPROTO_ESP, + }, + [MLX5E_TT_IPV4] = { + .etype = ETH_P_IP, + .proto = 0, + }, + [MLX5E_TT_IPV6] = { + .etype = ETH_P_IPV6, + .proto = 0, + }, + [MLX5E_TT_ANY] = { + .etype = 0, + .proto = 0, + }, +}; + +static struct mlx5_flow_rule *mlx5e_generate_ttc_rule(struct mlx5e_priv *priv, + struct mlx5_flow_table *ft, + struct mlx5_flow_destination *dest, + u16 etype, + u8 proto) +{ + struct mlx5_flow_rule *rule; + u8 match_criteria_enable = 0; + u32 *match_criteria; + u32 *match_value; + int err = 0; + + match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); + match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); + if (!match_value || !match_criteria) { + netdev_err(priv->netdev, "%s: alloc failed\n", __func__); + err = -ENOMEM; + goto out; + } + + if (proto) { + match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.ip_protocol); + MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol, proto); + } + if (etype) { + match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.ethertype); + MLX5_SET(fte_match_param, match_value, outer_headers.ethertype, etype); + } + + rule = mlx5_add_flow_rule(ft, match_criteria_enable, + match_criteria, match_value, + MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, + MLX5_FS_DEFAULT_FLOW_TAG, + dest); + if (IS_ERR(rule)) { + err = PTR_ERR(rule); + netdev_err(priv->netdev, "%s: add rule failed\n", __func__); + } +out: + kvfree(match_criteria); + kvfree(match_value); + return err ? ERR_PTR(err) : rule; +} + +static int mlx5e_generate_ttc_table_rules(struct mlx5e_priv *priv) +{ + struct mlx5_flow_destination dest; + struct mlx5e_ttc_table *ttc; + struct mlx5_flow_rule **rules; + struct mlx5_flow_table *ft; + int tt; + int err; + + ttc = &priv->fs.ttc; + ft = ttc->ft.t; + rules = ttc->rules; + + dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR; + for (tt = 0; tt < MLX5E_NUM_TT; tt++) { + if (tt == MLX5E_TT_ANY) + dest.tir_num = priv->direct_tir[0].tirn; + else + dest.tir_num = priv->indir_tirn[tt]; + rules[tt] = mlx5e_generate_ttc_rule(priv, ft, &dest, + ttc_rules[tt].etype, + ttc_rules[tt].proto); + if (IS_ERR(rules[tt])) + goto del_rules; + } + + return 0; + +del_rules: + err = PTR_ERR(rules[tt]); + rules[tt] = NULL; + mlx5e_cleanup_ttc_rules(ttc); + return err; +} + +#define MLX5E_TTC_NUM_GROUPS 3 +#define MLX5E_TTC_GROUP1_SIZE BIT(3) +#define MLX5E_TTC_GROUP2_SIZE BIT(1) +#define MLX5E_TTC_GROUP3_SIZE BIT(0) +#define MLX5E_TTC_TABLE_SIZE (MLX5E_TTC_GROUP1_SIZE +\ + MLX5E_TTC_GROUP2_SIZE +\ + MLX5E_TTC_GROUP3_SIZE) +static int mlx5e_create_ttc_table_groups(struct mlx5e_ttc_table *ttc) +{ + int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); + struct mlx5e_flow_table *ft = &ttc->ft; + int ix = 0; + u32 *in; + int err; + u8 *mc; + + ft->g = kcalloc(MLX5E_TTC_NUM_GROUPS, + sizeof(*ft->g), GFP_KERNEL); + if (!ft->g) + return -ENOMEM; + in = mlx5_vzalloc(inlen); + if (!in) { + kfree(ft->g); + return -ENOMEM; + } + + /* L4 Group */ + mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria); + MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol); + MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype); + MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); + MLX5_SET_CFG(in, start_flow_index, ix); + ix += MLX5E_TTC_GROUP1_SIZE; + MLX5_SET_CFG(in, end_flow_index, ix - 1); + ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); + if (IS_ERR(ft->g[ft->num_groups])) + goto err; + ft->num_groups++; + + /* L3 Group */ + MLX5_SET(fte_match_param, mc, outer_headers.ip_protocol, 0); + MLX5_SET_CFG(in, start_flow_index, ix); + ix += MLX5E_TTC_GROUP2_SIZE; + MLX5_SET_CFG(in, end_flow_index, ix - 1); + ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); + if (IS_ERR(ft->g[ft->num_groups])) + goto err; + ft->num_groups++; + + /* Any Group */ + memset(in, 0, inlen); + MLX5_SET_CFG(in, start_flow_index, ix); + ix += MLX5E_TTC_GROUP3_SIZE; + MLX5_SET_CFG(in, end_flow_index, ix - 1); + ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); + if (IS_ERR(ft->g[ft->num_groups])) + goto err; + ft->num_groups++; + + kvfree(in); + return 0; + +err: + err = PTR_ERR(ft->g[ft->num_groups]); + ft->g[ft->num_groups] = NULL; + kvfree(in); + + return err; +} + +static void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv) +{ + struct mlx5e_ttc_table *ttc = &priv->fs.ttc; + + mlx5e_cleanup_ttc_rules(ttc); + mlx5e_destroy_flow_table(&ttc->ft); +} + +static int mlx5e_create_ttc_table(struct mlx5e_priv *priv) +{ + struct mlx5e_ttc_table *ttc = &priv->fs.ttc; + struct mlx5e_flow_table *ft = &ttc->ft; + int err; + + ft->t = mlx5_create_flow_table(priv->fs.ns, MLX5E_NIC_PRIO, + MLX5E_TTC_TABLE_SIZE, MLX5E_TTC_FT_LEVEL); + if (IS_ERR(ft->t)) { + err = PTR_ERR(ft->t); + ft->t = NULL; + return err; + } + + err = mlx5e_create_ttc_table_groups(ttc); + if (err) + goto err; + + err = mlx5e_generate_ttc_table_rules(priv); + if (err) + goto err; + + return 0; +err: + mlx5e_destroy_flow_table(ft); + return err; +} + +static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv, + struct mlx5e_l2_rule *ai) +{ + if (!IS_ERR_OR_NULL(ai->rule)) { + mlx5_del_flow_rule(ai->rule); + ai->rule = NULL; + } +} + +static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv, + struct mlx5e_l2_rule *ai, int type) +{ + struct mlx5_flow_table *ft = priv->fs.l2.ft.t; + struct mlx5_flow_destination dest; + u8 match_criteria_enable = 0; + u32 *match_criteria; + u32 *match_value; + int err = 0; + u8 *mc_dmac; + u8 *mv_dmac; + + match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); + match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); + if (!match_value || !match_criteria) { + netdev_err(priv->netdev, "%s: alloc failed\n", __func__); + err = -ENOMEM; + goto add_l2_rule_out; + } + + mc_dmac = MLX5_ADDR_OF(fte_match_param, match_criteria, + outer_headers.dmac_47_16); + mv_dmac = MLX5_ADDR_OF(fte_match_param, match_value, + outer_headers.dmac_47_16); + + dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; + dest.ft = priv->fs.ttc.ft.t; + + switch (type) { + case MLX5E_FULLMATCH: + match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + eth_broadcast_addr(mc_dmac); + ether_addr_copy(mv_dmac, ai->addr); + break; + + case MLX5E_ALLMULTI: + match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + mc_dmac[0] = 0x01; + mv_dmac[0] = 0x01; + break; + + case MLX5E_PROMISC: + break; + } + + ai->rule = mlx5_add_flow_rule(ft, match_criteria_enable, match_criteria, + match_value, + MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, + MLX5_FS_DEFAULT_FLOW_TAG, &dest); + if (IS_ERR(ai->rule)) { + netdev_err(priv->netdev, "%s: add l2 rule(mac:%pM) failed\n", + __func__, mv_dmac); + err = PTR_ERR(ai->rule); + ai->rule = NULL; + } + +add_l2_rule_out: + kvfree(match_criteria); + kvfree(match_value); + + return err; +} + +#define MLX5E_NUM_L2_GROUPS 3 +#define MLX5E_L2_GROUP1_SIZE BIT(0) +#define MLX5E_L2_GROUP2_SIZE BIT(15) +#define MLX5E_L2_GROUP3_SIZE BIT(0) +#define MLX5E_L2_TABLE_SIZE (MLX5E_L2_GROUP1_SIZE +\ + MLX5E_L2_GROUP2_SIZE +\ + MLX5E_L2_GROUP3_SIZE) +static int mlx5e_create_l2_table_groups(struct mlx5e_l2_table *l2_table) +{ + int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); + struct mlx5e_flow_table *ft = &l2_table->ft; + int ix = 0; + u8 *mc_dmac; + u32 *in; + int err; + u8 *mc; + + ft->g = kcalloc(MLX5E_NUM_L2_GROUPS, sizeof(*ft->g), GFP_KERNEL); + if (!ft->g) + return -ENOMEM; + in = mlx5_vzalloc(inlen); + if (!in) { + kfree(ft->g); + return -ENOMEM; + } + + mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria); + mc_dmac = MLX5_ADDR_OF(fte_match_param, mc, + outer_headers.dmac_47_16); + /* Flow Group for promiscuous */ + MLX5_SET_CFG(in, start_flow_index, ix); + ix += MLX5E_L2_GROUP1_SIZE; + MLX5_SET_CFG(in, end_flow_index, ix - 1); + ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); + if (IS_ERR(ft->g[ft->num_groups])) + goto err_destroy_groups; + ft->num_groups++; + + /* Flow Group for full match */ + eth_broadcast_addr(mc_dmac); + MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); + MLX5_SET_CFG(in, start_flow_index, ix); + ix += MLX5E_L2_GROUP2_SIZE; + MLX5_SET_CFG(in, end_flow_index, ix - 1); + ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); + if (IS_ERR(ft->g[ft->num_groups])) + goto err_destroy_groups; + ft->num_groups++; + + /* Flow Group for allmulti */ + eth_zero_addr(mc_dmac); + mc_dmac[0] = 0x01; + MLX5_SET_CFG(in, start_flow_index, ix); + ix += MLX5E_L2_GROUP3_SIZE; + MLX5_SET_CFG(in, end_flow_index, ix - 1); + ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); + if (IS_ERR(ft->g[ft->num_groups])) + goto err_destroy_groups; + ft->num_groups++; + + kvfree(in); + return 0; + +err_destroy_groups: + err = PTR_ERR(ft->g[ft->num_groups]); + ft->g[ft->num_groups] = NULL; + mlx5e_destroy_groups(ft); + kvfree(in); + + return err; +} + +static void mlx5e_destroy_l2_table(struct mlx5e_priv *priv) +{ + mlx5e_destroy_flow_table(&priv->fs.l2.ft); +} + +static int mlx5e_create_l2_table(struct mlx5e_priv *priv) +{ + struct mlx5e_l2_table *l2_table = &priv->fs.l2; + struct mlx5e_flow_table *ft = &l2_table->ft; + int err; + + ft->num_groups = 0; + ft->t = mlx5_create_flow_table(priv->fs.ns, MLX5E_NIC_PRIO, + MLX5E_L2_TABLE_SIZE, MLX5E_L2_FT_LEVEL); + + if (IS_ERR(ft->t)) { + err = PTR_ERR(ft->t); + ft->t = NULL; + return err; + } + + err = mlx5e_create_l2_table_groups(l2_table); + if (err) + goto err_destroy_flow_table; + + return 0; + +err_destroy_flow_table: + mlx5_destroy_flow_table(ft->t); + ft->t = NULL; + + return err; } #define MLX5E_NUM_VLAN_GROUPS 2 @@ -1206,18 +1083,33 @@ int mlx5e_create_flow_steering(struct mlx5e_priv *priv) if (!priv->fs.ns) return -EINVAL; - err = mlx5e_create_main_table(priv); - if (err) + err = mlx5e_create_ttc_table(priv); + if (err) { + netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n", + err); return err; + } + + err = mlx5e_create_l2_table(priv); + if (err) { + netdev_err(priv->netdev, "Failed to create l2 table, err=%d\n", + err); + goto err_destroy_ttc_table; + } err = mlx5e_create_vlan_table(priv); - if (err) - goto err_destroy_main_table; + if (err) { + netdev_err(priv->netdev, "Failed to create vlan table, err=%d\n", + err); + goto err_destroy_l2_table; + } return 0; -err_destroy_main_table: - mlx5e_destroy_main_table(priv); +err_destroy_l2_table: + mlx5e_destroy_l2_table(priv); +err_destroy_ttc_table: + mlx5e_destroy_ttc_table(priv); return err; } @@ -1226,5 +1118,6 @@ void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv) { mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0); mlx5e_destroy_vlan_table(priv); - mlx5e_destroy_main_table(priv); + mlx5e_destroy_l2_table(priv); + mlx5e_destroy_ttc_table(priv); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 953c8afd8559..518192e59779 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -2977,7 +2977,7 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev) mlx5e_create_q_counter(priv); - mlx5e_init_eth_addr(priv); + mlx5e_init_l2_addr(priv); mlx5e_vxlan_init(priv); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 2b822933557d..83fe8643d89a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -74,7 +74,7 @@ #define BY_PASS_MIN_LEVEL (KERNEL_MIN_LEVEL + MLX5_BY_PASS_NUM_PRIOS +\ LEFTOVERS_NUM_PRIOS) -#define KERNEL_NIC_PRIO_NUM_LEVELS 2 +#define KERNEL_NIC_PRIO_NUM_LEVELS 3 #define KERNEL_NIC_NUM_PRIOS 1 /* One more level for tc */ #define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 1) From 5a7b27eb9cf3986f487469b57a3a41286d2e7100 Mon Sep 17 00:00:00 2001 From: Maor Gottlieb Date: Fri, 29 Apr 2016 01:36:39 +0300 Subject: [PATCH 1182/1649] net/mlx5: Initializing CPU reverse mapping Allocating CPU rmap and add entry for each IRQ. CPU rmap is used in aRFS to get the RX queue number of the RX completion interrupts. Signed-off-by: Maor Gottlieb Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlx5/core/en_main.c | 3 +++ drivers/net/ethernet/mellanox/mlx5/core/main.c | 18 ++++++++++++++++++ include/linux/mlx5/driver.h | 3 +++ 3 files changed, 24 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 518192e59779..8fee224fb98c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -1691,6 +1691,9 @@ int mlx5e_open_locked(struct net_device *netdev) mlx5e_redirect_rqts(priv); mlx5e_update_carrier(priv); mlx5e_timestamp_init(priv); +#ifdef CONFIG_RFS_ACCEL + priv->netdev->rx_cpu_rmap = priv->mdev->rmap; +#endif schedule_delayed_work(&priv->update_stats_work, 0); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 6892746fd10d..6feef7fb9d6a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -48,6 +48,9 @@ #include #include #include +#ifdef CONFIG_RFS_ACCEL +#include +#endif #include "mlx5_core.h" #include "fs_core.h" #ifdef CONFIG_MLX5_CORE_EN @@ -665,6 +668,12 @@ static void free_comp_eqs(struct mlx5_core_dev *dev) struct mlx5_eq_table *table = &dev->priv.eq_table; struct mlx5_eq *eq, *n; +#ifdef CONFIG_RFS_ACCEL + if (dev->rmap) { + free_irq_cpu_rmap(dev->rmap); + dev->rmap = NULL; + } +#endif spin_lock(&table->lock); list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) { list_del(&eq->list); @@ -691,6 +700,11 @@ static int alloc_comp_eqs(struct mlx5_core_dev *dev) INIT_LIST_HEAD(&table->comp_eqs_list); ncomp_vec = table->num_comp_vectors; nent = MLX5_COMP_EQ_SIZE; +#ifdef CONFIG_RFS_ACCEL + dev->rmap = alloc_irq_cpu_rmap(ncomp_vec); + if (!dev->rmap) + return -ENOMEM; +#endif for (i = 0; i < ncomp_vec; i++) { eq = kzalloc(sizeof(*eq), GFP_KERNEL); if (!eq) { @@ -698,6 +712,10 @@ static int alloc_comp_eqs(struct mlx5_core_dev *dev) goto clean; } +#ifdef CONFIG_RFS_ACCEL + irq_cpu_rmap_add(dev->rmap, + dev->priv.msix_arr[i + MLX5_EQ_VEC_COMP_BASE].vector); +#endif snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d", i); err = mlx5_create_map_eq(dev, eq, i + MLX5_EQ_VEC_COMP_BASE, nent, 0, diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 96a428dcac9f..d5529449ef47 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -560,6 +560,9 @@ struct mlx5_core_dev { struct mlx5_profile *profile; atomic_t num_qps; u32 issi; +#ifdef CONFIG_RFS_ACCEL + struct cpu_rmap *rmap; +#endif }; struct mlx5_db { From 1cabe6b0965ec067ac60e8f182f16d479a3b9a5c Mon Sep 17 00:00:00 2001 From: Maor Gottlieb Date: Fri, 29 Apr 2016 01:36:40 +0300 Subject: [PATCH 1183/1649] net/mlx5e: Create aRFS flow tables Create the following four flow tables for aRFS usage: 1. IPv4 TCP - filtering 4-tuple of IPv4 TCP packets. 2. IPv6 TCP - filtering 4-tuple of IPv6 TCP packets. 3. IPv4 UDP - filtering 4-tuple of IPv4 UDP packets. 4. IPv6 UDP - filtering 4-tuple of IPv6 UDP packets. Each flow table has two flow groups: one for the 4-tuple filtering (full match) and the other contains * rule for miss rule. Full match rule means a hit for aRFS and packet will be forwarded to the dedicated RQ/Core, miss rule packets will be forwarded to default RSS hashing. Signed-off-by: Maor Gottlieb Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlx5/core/Makefile | 1 + drivers/net/ethernet/mellanox/mlx5/core/en.h | 41 +++ .../net/ethernet/mellanox/mlx5/core/en_arfs.c | 251 ++++++++++++++++++ .../net/ethernet/mellanox/mlx5/core/en_fs.c | 23 +- .../net/ethernet/mellanox/mlx5/core/en_main.c | 8 +- .../net/ethernet/mellanox/mlx5/core/fs_core.c | 3 +- 6 files changed, 313 insertions(+), 14 deletions(-) create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index 4fc45ee0c5d1..679e18ffb3a6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -9,3 +9,4 @@ mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o eswitch.o \ en_txrx.o en_clock.o vxlan.o en_tc.o mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o +mlx5_core-$(CONFIG_RFS_ACCEL) += en_arfs.o diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 2c9879c011b1..999e05826490 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -48,6 +48,8 @@ #include "mlx5_core.h" #include "en_stats.h" +#define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v) + #define MLX5E_MAX_NUM_TC 8 #define MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE 0x6 @@ -446,12 +448,38 @@ struct mlx5e_ttc_table { struct mlx5_flow_rule *rules[MLX5E_NUM_TT]; }; +struct arfs_table { + struct mlx5e_flow_table ft; + struct mlx5_flow_rule *default_rule; +}; + +enum arfs_type { + ARFS_IPV4_TCP, + ARFS_IPV6_TCP, + ARFS_IPV4_UDP, + ARFS_IPV6_UDP, + ARFS_NUM_TYPES, +}; + +struct mlx5e_arfs_tables { + struct arfs_table arfs_tables[ARFS_NUM_TYPES]; +}; + +/* NIC prio FTS */ +enum { + MLX5E_VLAN_FT_LEVEL = 0, + MLX5E_L2_FT_LEVEL, + MLX5E_TTC_FT_LEVEL, + MLX5E_ARFS_FT_LEVEL +}; + struct mlx5e_flow_steering { struct mlx5_flow_namespace *ns; struct mlx5e_tc_table tc; struct mlx5e_vlan_table vlan; struct mlx5e_l2_table l2; struct mlx5e_ttc_table ttc; + struct mlx5e_arfs_tables arfs; }; struct mlx5e_direct_tir { @@ -570,6 +598,7 @@ void mlx5e_update_stats(struct mlx5e_priv *priv); int mlx5e_create_flow_steering(struct mlx5e_priv *priv); void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv); void mlx5e_init_l2_addr(struct mlx5e_priv *priv); +void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft); void mlx5e_set_rx_mode_work(struct work_struct *work); void mlx5e_fill_hwstamp(struct mlx5e_tstamp *clock, u64 timestamp, @@ -646,6 +675,18 @@ extern const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops; int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets); #endif +#ifndef CONFIG_RFS_ACCEL +static inline int mlx5e_arfs_create_tables(struct mlx5e_priv *priv) +{ + return 0; +} + +static inline void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv) {} +#else +int mlx5e_arfs_create_tables(struct mlx5e_priv *priv); +void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv); +#endif + u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev); #endif /* __MLX5_EN_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c new file mode 100644 index 000000000000..cd504197855b --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c @@ -0,0 +1,251 @@ +/* + * Copyright (c) 2016, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "en.h" +#include + +static void arfs_destroy_table(struct arfs_table *arfs_t) +{ + mlx5_del_flow_rule(arfs_t->default_rule); + mlx5e_destroy_flow_table(&arfs_t->ft); +} + +void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv) +{ + int i; + + if (!(priv->netdev->hw_features & NETIF_F_NTUPLE)) + return; + for (i = 0; i < ARFS_NUM_TYPES; i++) { + if (!IS_ERR_OR_NULL(priv->fs.arfs.arfs_tables[i].ft.t)) + arfs_destroy_table(&priv->fs.arfs.arfs_tables[i]); + } +} + +static int arfs_add_default_rule(struct mlx5e_priv *priv, + enum arfs_type type) +{ + struct arfs_table *arfs_t = &priv->fs.arfs.arfs_tables[type]; + struct mlx5_flow_destination dest; + u8 match_criteria_enable = 0; + u32 *tirn = priv->indir_tirn; + u32 *match_criteria; + u32 *match_value; + int err = 0; + + match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); + match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); + if (!match_value || !match_criteria) { + netdev_err(priv->netdev, "%s: alloc failed\n", __func__); + err = -ENOMEM; + goto out; + } + + dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR; + switch (type) { + case ARFS_IPV4_TCP: + dest.tir_num = tirn[MLX5E_TT_IPV4_TCP]; + break; + case ARFS_IPV4_UDP: + dest.tir_num = tirn[MLX5E_TT_IPV4_UDP]; + break; + case ARFS_IPV6_TCP: + dest.tir_num = tirn[MLX5E_TT_IPV6_TCP]; + break; + case ARFS_IPV6_UDP: + dest.tir_num = tirn[MLX5E_TT_IPV6_UDP]; + break; + default: + err = -EINVAL; + goto out; + } + + arfs_t->default_rule = mlx5_add_flow_rule(arfs_t->ft.t, match_criteria_enable, + match_criteria, match_value, + MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, + MLX5_FS_DEFAULT_FLOW_TAG, + &dest); + if (IS_ERR(arfs_t->default_rule)) { + err = PTR_ERR(arfs_t->default_rule); + arfs_t->default_rule = NULL; + netdev_err(priv->netdev, "%s: add rule failed, arfs type=%d\n", + __func__, type); + } +out: + kvfree(match_criteria); + kvfree(match_value); + return err; +} + +#define MLX5E_ARFS_NUM_GROUPS 2 +#define MLX5E_ARFS_GROUP1_SIZE BIT(12) +#define MLX5E_ARFS_GROUP2_SIZE BIT(0) +#define MLX5E_ARFS_TABLE_SIZE (MLX5E_ARFS_GROUP1_SIZE +\ + MLX5E_ARFS_GROUP2_SIZE) +static int arfs_create_groups(struct mlx5e_flow_table *ft, + enum arfs_type type) +{ + int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); + void *outer_headers_c; + int ix = 0; + u32 *in; + int err; + u8 *mc; + + ft->g = kcalloc(MLX5E_ARFS_NUM_GROUPS, + sizeof(*ft->g), GFP_KERNEL); + in = mlx5_vzalloc(inlen); + if (!in || !ft->g) { + kvfree(ft->g); + kvfree(in); + return -ENOMEM; + } + + mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria); + outer_headers_c = MLX5_ADDR_OF(fte_match_param, mc, + outer_headers); + MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, ethertype); + switch (type) { + case ARFS_IPV4_TCP: + case ARFS_IPV6_TCP: + MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, tcp_dport); + MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, tcp_sport); + break; + case ARFS_IPV4_UDP: + case ARFS_IPV6_UDP: + MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, udp_dport); + MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, udp_sport); + break; + default: + err = -EINVAL; + goto out; + } + + switch (type) { + case ARFS_IPV4_TCP: + case ARFS_IPV4_UDP: + MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, + src_ipv4_src_ipv6.ipv4_layout.ipv4); + MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, + dst_ipv4_dst_ipv6.ipv4_layout.ipv4); + break; + case ARFS_IPV6_TCP: + case ARFS_IPV6_UDP: + memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c, + src_ipv4_src_ipv6.ipv6_layout.ipv6), + 0xff, 16); + memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c, + dst_ipv4_dst_ipv6.ipv6_layout.ipv6), + 0xff, 16); + break; + default: + err = -EINVAL; + goto out; + } + + MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); + MLX5_SET_CFG(in, start_flow_index, ix); + ix += MLX5E_ARFS_GROUP1_SIZE; + MLX5_SET_CFG(in, end_flow_index, ix - 1); + ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); + if (IS_ERR(ft->g[ft->num_groups])) + goto err; + ft->num_groups++; + + memset(in, 0, inlen); + MLX5_SET_CFG(in, start_flow_index, ix); + ix += MLX5E_ARFS_GROUP2_SIZE; + MLX5_SET_CFG(in, end_flow_index, ix - 1); + ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); + if (IS_ERR(ft->g[ft->num_groups])) + goto err; + ft->num_groups++; + + kvfree(in); + return 0; + +err: + err = PTR_ERR(ft->g[ft->num_groups]); + ft->g[ft->num_groups] = NULL; +out: + kvfree(in); + + return err; +} + +static int arfs_create_table(struct mlx5e_priv *priv, + enum arfs_type type) +{ + struct mlx5e_arfs_tables *arfs = &priv->fs.arfs; + struct mlx5e_flow_table *ft = &arfs->arfs_tables[type].ft; + int err; + + ft->t = mlx5_create_flow_table(priv->fs.ns, MLX5E_NIC_PRIO, + MLX5E_ARFS_TABLE_SIZE, MLX5E_ARFS_FT_LEVEL); + if (IS_ERR(ft->t)) { + err = PTR_ERR(ft->t); + ft->t = NULL; + return err; + } + + err = arfs_create_groups(ft, type); + if (err) + goto err; + + err = arfs_add_default_rule(priv, type); + if (err) + goto err; + + return 0; +err: + mlx5e_destroy_flow_table(ft); + return err; +} + +int mlx5e_arfs_create_tables(struct mlx5e_priv *priv) +{ + int err = 0; + int i; + + if (!(priv->netdev->hw_features & NETIF_F_NTUPLE)) + return 0; + + for (i = 0; i < ARFS_NUM_TYPES; i++) { + err = arfs_create_table(priv, i); + if (err) + goto err; + } + return 0; +err: + mlx5e_arfs_destroy_tables(priv); + return err; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c index 6e353b3a1422..b32740092854 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c @@ -42,15 +42,6 @@ static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv, static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv, struct mlx5e_l2_rule *ai); -/* NIC prio FTS */ -enum { - MLX5E_VLAN_FT_LEVEL = 0, - MLX5E_L2_FT_LEVEL, - MLX5E_TTC_FT_LEVEL -}; - -#define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v) - enum { MLX5E_FULLMATCH = 0, MLX5E_ALLMULTI = 1, @@ -530,7 +521,7 @@ void mlx5e_init_l2_addr(struct mlx5e_priv *priv) ether_addr_copy(priv->fs.l2.broadcast.addr, priv->netdev->broadcast); } -static void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft) +void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft) { mlx5e_destroy_groups(ft); kfree(ft->g); @@ -1083,11 +1074,18 @@ int mlx5e_create_flow_steering(struct mlx5e_priv *priv) if (!priv->fs.ns) return -EINVAL; + err = mlx5e_arfs_create_tables(priv); + if (err) { + netdev_err(priv->netdev, "Failed to create arfs tables, err=%d\n", + err); + priv->netdev->hw_features &= ~NETIF_F_NTUPLE; + } + err = mlx5e_create_ttc_table(priv); if (err) { netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n", err); - return err; + goto err_destroy_arfs_tables; } err = mlx5e_create_l2_table(priv); @@ -1110,6 +1108,8 @@ err_destroy_l2_table: mlx5e_destroy_l2_table(priv); err_destroy_ttc_table: mlx5e_destroy_ttc_table(priv); +err_destroy_arfs_tables: + mlx5e_arfs_destroy_tables(priv); return err; } @@ -1120,4 +1120,5 @@ void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv) mlx5e_destroy_vlan_table(priv); mlx5e_destroy_l2_table(priv); mlx5e_destroy_ttc_table(priv); + mlx5e_arfs_destroy_tables(priv); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 8fee224fb98c..20167b9403b6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -2803,8 +2803,12 @@ static void mlx5e_build_netdev(struct net_device *netdev) if (FT_CAP(flow_modify_en) && FT_CAP(modify_root) && FT_CAP(identified_miss_table_mode) && - FT_CAP(flow_table_modify)) - priv->netdev->hw_features |= NETIF_F_HW_TC; + FT_CAP(flow_table_modify)) { + netdev->hw_features |= NETIF_F_HW_TC; +#ifdef CONFIG_RFS_ACCEL + netdev->hw_features |= NETIF_F_NTUPLE; +#endif + } netdev->features |= NETIF_F_HIGHDMA; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 83fe8643d89a..4d78d5a48af3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -74,7 +74,8 @@ #define BY_PASS_MIN_LEVEL (KERNEL_MIN_LEVEL + MLX5_BY_PASS_NUM_PRIOS +\ LEFTOVERS_NUM_PRIOS) -#define KERNEL_NIC_PRIO_NUM_LEVELS 3 +/* Vlan, mac, ttc, aRFS */ +#define KERNEL_NIC_PRIO_NUM_LEVELS 4 #define KERNEL_NIC_NUM_PRIOS 1 /* One more level for tc */ #define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 1) From 18c908e477dcc94ede69323a6b876b5d8cfb40ff Mon Sep 17 00:00:00 2001 From: Maor Gottlieb Date: Fri, 29 Apr 2016 01:36:41 +0300 Subject: [PATCH 1184/1649] net/mlx5e: Add accelerated RFS support Implement ndo_rx_flow_steer ndo. A new flow steering rule will be composed from the skb 4-tuple and added to the hardware aRFS flow table. Each rule is stored in an internal hash table, if such skb 4-tuple rule already exists we update the corresponding hardware steering rule with the new destination. For garbage collection rps_may_expire_flow will be invoked for a limited amount of old rules upon any ndo_rx_flow_steer invocation. Signed-off-by: Maor Gottlieb Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 10 + .../net/ethernet/mellanox/mlx5/core/en_arfs.c | 427 +++++++++++++++++- 2 files changed, 436 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 999e05826490..21c38419ad89 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -448,9 +448,12 @@ struct mlx5e_ttc_table { struct mlx5_flow_rule *rules[MLX5E_NUM_TT]; }; +#define ARFS_HASH_SHIFT BITS_PER_BYTE +#define ARFS_HASH_SIZE BIT(BITS_PER_BYTE) struct arfs_table { struct mlx5e_flow_table ft; struct mlx5_flow_rule *default_rule; + struct hlist_head rules_hash[ARFS_HASH_SIZE]; }; enum arfs_type { @@ -463,6 +466,11 @@ enum arfs_type { struct mlx5e_arfs_tables { struct arfs_table arfs_tables[ARFS_NUM_TYPES]; + /* Protect aRFS rules list */ + spinlock_t arfs_lock; + struct list_head rules; + int last_filter_id; + struct workqueue_struct *wq; }; /* NIC prio FTS */ @@ -685,6 +693,8 @@ static inline void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv) {} #else int mlx5e_arfs_create_tables(struct mlx5e_priv *priv); void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv); +int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, + u16 rxq_index, u32 flow_id); #endif u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c index cd504197855b..e54fbc16f34d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c @@ -30,8 +30,47 @@ * SOFTWARE. */ -#include "en.h" +#include #include +#include +#include +#include "en.h" + +struct arfs_tuple { + __be16 etype; + u8 ip_proto; + union { + __be32 src_ipv4; + struct in6_addr src_ipv6; + }; + union { + __be32 dst_ipv4; + struct in6_addr dst_ipv6; + }; + __be16 src_port; + __be16 dst_port; +}; + +struct arfs_rule { + struct mlx5e_priv *priv; + struct work_struct arfs_work; + struct mlx5_flow_rule *rule; + struct hlist_node hlist; + int rxq; + /* Flow ID passed to ndo_rx_flow_steer */ + int flow_id; + /* Filter ID returned by ndo_rx_flow_steer */ + int filter_id; + struct arfs_tuple tuple; +}; + +#define mlx5e_for_each_arfs_rule(hn, tmp, arfs_tables, i, j) \ + for (i = 0; i < ARFS_NUM_TYPES; i++) \ + mlx5e_for_each_hash_arfs_rule(hn, tmp, arfs_tables[i].rules_hash, j) + +#define mlx5e_for_each_hash_arfs_rule(hn, tmp, hash, j) \ + for (j = 0; j < ARFS_HASH_SIZE; j++) \ + hlist_for_each_entry_safe(hn, tmp, &hash[j], hlist) static void arfs_destroy_table(struct arfs_table *arfs_t) { @@ -39,12 +78,17 @@ static void arfs_destroy_table(struct arfs_table *arfs_t) mlx5e_destroy_flow_table(&arfs_t->ft); } +static void arfs_del_rules(struct mlx5e_priv *priv); + void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv) { int i; if (!(priv->netdev->hw_features & NETIF_F_NTUPLE)) return; + + arfs_del_rules(priv); + destroy_workqueue(priv->fs.arfs.wq); for (i = 0; i < ARFS_NUM_TYPES; i++) { if (!IS_ERR_OR_NULL(priv->fs.arfs.arfs_tables[i].ft.t)) arfs_destroy_table(&priv->fs.arfs.arfs_tables[i]); @@ -239,6 +283,12 @@ int mlx5e_arfs_create_tables(struct mlx5e_priv *priv) if (!(priv->netdev->hw_features & NETIF_F_NTUPLE)) return 0; + spin_lock_init(&priv->fs.arfs.arfs_lock); + INIT_LIST_HEAD(&priv->fs.arfs.rules); + priv->fs.arfs.wq = create_singlethread_workqueue("mlx5e_arfs"); + if (!priv->fs.arfs.wq) + return -ENOMEM; + for (i = 0; i < ARFS_NUM_TYPES; i++) { err = arfs_create_table(priv, i); if (err) @@ -249,3 +299,378 @@ err: mlx5e_arfs_destroy_tables(priv); return err; } + +#define MLX5E_ARFS_EXPIRY_QUOTA 60 + +static void arfs_may_expire_flow(struct mlx5e_priv *priv) +{ + struct arfs_rule *arfs_rule; + struct hlist_node *htmp; + int quota = 0; + int i; + int j; + + HLIST_HEAD(del_list); + spin_lock_bh(&priv->fs.arfs.arfs_lock); + mlx5e_for_each_arfs_rule(arfs_rule, htmp, priv->fs.arfs.arfs_tables, i, j) { + if (quota++ > MLX5E_ARFS_EXPIRY_QUOTA) + break; + if (!work_pending(&arfs_rule->arfs_work) && + rps_may_expire_flow(priv->netdev, + arfs_rule->rxq, arfs_rule->flow_id, + arfs_rule->filter_id)) { + hlist_del_init(&arfs_rule->hlist); + hlist_add_head(&arfs_rule->hlist, &del_list); + } + } + spin_unlock_bh(&priv->fs.arfs.arfs_lock); + hlist_for_each_entry_safe(arfs_rule, htmp, &del_list, hlist) { + if (arfs_rule->rule) + mlx5_del_flow_rule(arfs_rule->rule); + hlist_del(&arfs_rule->hlist); + kfree(arfs_rule); + } +} + +static void arfs_del_rules(struct mlx5e_priv *priv) +{ + struct hlist_node *htmp; + struct arfs_rule *rule; + int i; + int j; + + HLIST_HEAD(del_list); + spin_lock_bh(&priv->fs.arfs.arfs_lock); + mlx5e_for_each_arfs_rule(rule, htmp, priv->fs.arfs.arfs_tables, i, j) { + hlist_del_init(&rule->hlist); + hlist_add_head(&rule->hlist, &del_list); + } + spin_unlock_bh(&priv->fs.arfs.arfs_lock); + + hlist_for_each_entry_safe(rule, htmp, &del_list, hlist) { + cancel_work_sync(&rule->arfs_work); + if (rule->rule) + mlx5_del_flow_rule(rule->rule); + hlist_del(&rule->hlist); + kfree(rule); + } +} + +static struct hlist_head * +arfs_hash_bucket(struct arfs_table *arfs_t, __be16 src_port, + __be16 dst_port) +{ + unsigned long l; + int bucket_idx; + + l = (__force unsigned long)src_port | + ((__force unsigned long)dst_port << 2); + + bucket_idx = hash_long(l, ARFS_HASH_SHIFT); + + return &arfs_t->rules_hash[bucket_idx]; +} + +static u8 arfs_get_ip_proto(const struct sk_buff *skb) +{ + return (skb->protocol == htons(ETH_P_IP)) ? + ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr; +} + +static struct arfs_table *arfs_get_table(struct mlx5e_arfs_tables *arfs, + u8 ip_proto, __be16 etype) +{ + if (etype == htons(ETH_P_IP) && ip_proto == IPPROTO_TCP) + return &arfs->arfs_tables[ARFS_IPV4_TCP]; + if (etype == htons(ETH_P_IP) && ip_proto == IPPROTO_UDP) + return &arfs->arfs_tables[ARFS_IPV4_UDP]; + if (etype == htons(ETH_P_IPV6) && ip_proto == IPPROTO_TCP) + return &arfs->arfs_tables[ARFS_IPV6_TCP]; + if (etype == htons(ETH_P_IPV6) && ip_proto == IPPROTO_UDP) + return &arfs->arfs_tables[ARFS_IPV6_UDP]; + + return NULL; +} + +static struct mlx5_flow_rule *arfs_add_rule(struct mlx5e_priv *priv, + struct arfs_rule *arfs_rule) +{ + struct mlx5e_arfs_tables *arfs = &priv->fs.arfs; + struct arfs_tuple *tuple = &arfs_rule->tuple; + struct mlx5_flow_rule *rule = NULL; + struct mlx5_flow_destination dest; + struct arfs_table *arfs_table; + u8 match_criteria_enable = 0; + struct mlx5_flow_table *ft; + u32 *match_criteria; + u32 *match_value; + int err = 0; + + match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); + match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); + if (!match_value || !match_criteria) { + netdev_err(priv->netdev, "%s: alloc failed\n", __func__); + err = -ENOMEM; + goto out; + } + match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + MLX5_SET_TO_ONES(fte_match_param, match_criteria, + outer_headers.ethertype); + MLX5_SET(fte_match_param, match_value, outer_headers.ethertype, + ntohs(tuple->etype)); + arfs_table = arfs_get_table(arfs, tuple->ip_proto, tuple->etype); + if (!arfs_table) { + err = -EINVAL; + goto out; + } + + ft = arfs_table->ft.t; + if (tuple->ip_proto == IPPROTO_TCP) { + MLX5_SET_TO_ONES(fte_match_param, match_criteria, + outer_headers.tcp_dport); + MLX5_SET_TO_ONES(fte_match_param, match_criteria, + outer_headers.tcp_sport); + MLX5_SET(fte_match_param, match_value, outer_headers.tcp_dport, + ntohs(tuple->dst_port)); + MLX5_SET(fte_match_param, match_value, outer_headers.tcp_sport, + ntohs(tuple->src_port)); + } else { + MLX5_SET_TO_ONES(fte_match_param, match_criteria, + outer_headers.udp_dport); + MLX5_SET_TO_ONES(fte_match_param, match_criteria, + outer_headers.udp_sport); + MLX5_SET(fte_match_param, match_value, outer_headers.udp_dport, + ntohs(tuple->dst_port)); + MLX5_SET(fte_match_param, match_value, outer_headers.udp_sport, + ntohs(tuple->src_port)); + } + if (tuple->etype == htons(ETH_P_IP)) { + memcpy(MLX5_ADDR_OF(fte_match_param, match_value, + outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4), + &tuple->src_ipv4, + 4); + memcpy(MLX5_ADDR_OF(fte_match_param, match_value, + outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4), + &tuple->dst_ipv4, + 4); + MLX5_SET_TO_ONES(fte_match_param, match_criteria, + outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4); + MLX5_SET_TO_ONES(fte_match_param, match_criteria, + outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4); + } else { + memcpy(MLX5_ADDR_OF(fte_match_param, match_value, + outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6), + &tuple->src_ipv6, + 16); + memcpy(MLX5_ADDR_OF(fte_match_param, match_value, + outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), + &tuple->dst_ipv6, + 16); + memset(MLX5_ADDR_OF(fte_match_param, match_criteria, + outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6), + 0xff, + 16); + memset(MLX5_ADDR_OF(fte_match_param, match_criteria, + outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), + 0xff, + 16); + } + dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR; + dest.tir_num = priv->direct_tir[arfs_rule->rxq].tirn; + rule = mlx5_add_flow_rule(ft, match_criteria_enable, match_criteria, + match_value, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, + MLX5_FS_DEFAULT_FLOW_TAG, + &dest); + if (IS_ERR(rule)) { + err = PTR_ERR(rule); + netdev_err(priv->netdev, "%s: add rule(filter id=%d, rq idx=%d) failed, err=%d\n", + __func__, arfs_rule->filter_id, arfs_rule->rxq, err); + } + +out: + kvfree(match_criteria); + kvfree(match_value); + return err ? ERR_PTR(err) : rule; +} + +static void arfs_modify_rule_rq(struct mlx5e_priv *priv, + struct mlx5_flow_rule *rule, u16 rxq) +{ + struct mlx5_flow_destination dst; + int err = 0; + + dst.type = MLX5_FLOW_DESTINATION_TYPE_TIR; + dst.tir_num = priv->direct_tir[rxq].tirn; + err = mlx5_modify_rule_destination(rule, &dst); + if (err) + netdev_warn(priv->netdev, + "Failed to modfiy aRFS rule destination to rq=%d\n", rxq); +} + +static void arfs_handle_work(struct work_struct *work) +{ + struct arfs_rule *arfs_rule = container_of(work, + struct arfs_rule, + arfs_work); + struct mlx5e_priv *priv = arfs_rule->priv; + struct mlx5_flow_rule *rule; + + mutex_lock(&priv->state_lock); + if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { + spin_lock_bh(&priv->fs.arfs.arfs_lock); + hlist_del(&arfs_rule->hlist); + spin_unlock_bh(&priv->fs.arfs.arfs_lock); + + mutex_unlock(&priv->state_lock); + kfree(arfs_rule); + goto out; + } + mutex_unlock(&priv->state_lock); + + if (!arfs_rule->rule) { + rule = arfs_add_rule(priv, arfs_rule); + if (IS_ERR(rule)) + goto out; + arfs_rule->rule = rule; + } else { + arfs_modify_rule_rq(priv, arfs_rule->rule, + arfs_rule->rxq); + } +out: + arfs_may_expire_flow(priv); +} + +/* return L4 destination port from ip4/6 packets */ +static __be16 arfs_get_dst_port(const struct sk_buff *skb) +{ + char *transport_header; + + transport_header = skb_transport_header(skb); + if (arfs_get_ip_proto(skb) == IPPROTO_TCP) + return ((struct tcphdr *)transport_header)->dest; + return ((struct udphdr *)transport_header)->dest; +} + +/* return L4 source port from ip4/6 packets */ +static __be16 arfs_get_src_port(const struct sk_buff *skb) +{ + char *transport_header; + + transport_header = skb_transport_header(skb); + if (arfs_get_ip_proto(skb) == IPPROTO_TCP) + return ((struct tcphdr *)transport_header)->source; + return ((struct udphdr *)transport_header)->source; +} + +static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv, + struct arfs_table *arfs_t, + const struct sk_buff *skb, + u16 rxq, u32 flow_id) +{ + struct arfs_rule *rule; + struct arfs_tuple *tuple; + + rule = kzalloc(sizeof(*rule), GFP_ATOMIC); + if (!rule) + return NULL; + + rule->priv = priv; + rule->rxq = rxq; + INIT_WORK(&rule->arfs_work, arfs_handle_work); + + tuple = &rule->tuple; + tuple->etype = skb->protocol; + if (tuple->etype == htons(ETH_P_IP)) { + tuple->src_ipv4 = ip_hdr(skb)->saddr; + tuple->dst_ipv4 = ip_hdr(skb)->daddr; + } else { + memcpy(&tuple->src_ipv6, &ipv6_hdr(skb)->saddr, + sizeof(struct in6_addr)); + memcpy(&tuple->dst_ipv6, &ipv6_hdr(skb)->daddr, + sizeof(struct in6_addr)); + } + tuple->ip_proto = arfs_get_ip_proto(skb); + tuple->src_port = arfs_get_src_port(skb); + tuple->dst_port = arfs_get_dst_port(skb); + + rule->flow_id = flow_id; + rule->filter_id = priv->fs.arfs.last_filter_id++ % RPS_NO_FILTER; + + hlist_add_head(&rule->hlist, + arfs_hash_bucket(arfs_t, tuple->src_port, + tuple->dst_port)); + return rule; +} + +static bool arfs_cmp_ips(struct arfs_tuple *tuple, + const struct sk_buff *skb) +{ + if (tuple->etype == htons(ETH_P_IP) && + tuple->src_ipv4 == ip_hdr(skb)->saddr && + tuple->dst_ipv4 == ip_hdr(skb)->daddr) + return true; + if (tuple->etype == htons(ETH_P_IPV6) && + (!memcmp(&tuple->src_ipv6, &ipv6_hdr(skb)->saddr, + sizeof(struct in6_addr))) && + (!memcmp(&tuple->dst_ipv6, &ipv6_hdr(skb)->daddr, + sizeof(struct in6_addr)))) + return true; + return false; +} + +static struct arfs_rule *arfs_find_rule(struct arfs_table *arfs_t, + const struct sk_buff *skb) +{ + struct arfs_rule *arfs_rule; + struct hlist_head *head; + __be16 src_port = arfs_get_src_port(skb); + __be16 dst_port = arfs_get_dst_port(skb); + + head = arfs_hash_bucket(arfs_t, src_port, dst_port); + hlist_for_each_entry(arfs_rule, head, hlist) { + if (arfs_rule->tuple.src_port == src_port && + arfs_rule->tuple.dst_port == dst_port && + arfs_cmp_ips(&arfs_rule->tuple, skb)) { + return arfs_rule; + } + } + + return NULL; +} + +int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, + u16 rxq_index, u32 flow_id) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + struct mlx5e_arfs_tables *arfs = &priv->fs.arfs; + struct arfs_table *arfs_t; + struct arfs_rule *arfs_rule; + + if (skb->protocol != htons(ETH_P_IP) && + skb->protocol != htons(ETH_P_IPV6)) + return -EPROTONOSUPPORT; + + arfs_t = arfs_get_table(arfs, arfs_get_ip_proto(skb), skb->protocol); + if (!arfs_t) + return -EPROTONOSUPPORT; + + spin_lock_bh(&arfs->arfs_lock); + arfs_rule = arfs_find_rule(arfs_t, skb); + if (arfs_rule) { + if (arfs_rule->rxq == rxq_index) { + spin_unlock_bh(&arfs->arfs_lock); + return arfs_rule->filter_id; + } + arfs_rule->rxq = rxq_index; + } else { + arfs_rule = arfs_alloc_rule(priv, arfs_t, skb, + rxq_index, flow_id); + if (!arfs_rule) { + spin_unlock_bh(&arfs->arfs_lock); + return -ENOMEM; + } + } + queue_work(priv->fs.arfs.wq, &arfs_rule->arfs_work); + spin_unlock_bh(&arfs->arfs_lock); + return arfs_rule->filter_id; +} From 45bf454ae88414e80b80979ebb2c22bd66ea7d1b Mon Sep 17 00:00:00 2001 From: Maor Gottlieb Date: Fri, 29 Apr 2016 01:36:42 +0300 Subject: [PATCH 1185/1649] net/mlx5e: Enabling aRFS mechanism Accelerated RFS requires that ntuple filtering is enabled via ethtool and driver supports ndo_rx_flow_steer. When the ntuple filtering is enabled, we modify the l3_l4 ttc rules to point on the aRFS flow tables and when the filtering is disabled, we modify the l3_l4 ttc rules to point on the RSS TIRs. Signed-off-by: Maor Gottlieb Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 12 +++ .../net/ethernet/mellanox/mlx5/core/en_arfs.c | 77 ++++++++++++++++++- .../ethernet/mellanox/mlx5/core/en_ethtool.c | 15 ++++ .../net/ethernet/mellanox/mlx5/core/en_main.c | 25 ++++++ 4 files changed, 127 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 21c38419ad89..34523c48444e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -690,9 +690,21 @@ static inline int mlx5e_arfs_create_tables(struct mlx5e_priv *priv) } static inline void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv) {} + +static inline int mlx5e_arfs_enable(struct mlx5e_priv *priv) +{ + return -ENOTSUPP; +} + +static inline int mlx5e_arfs_disable(struct mlx5e_priv *priv) +{ + return -ENOTSUPP; +} #else int mlx5e_arfs_create_tables(struct mlx5e_priv *priv); void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv); +int mlx5e_arfs_enable(struct mlx5e_priv *priv); +int mlx5e_arfs_disable(struct mlx5e_priv *priv); int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, u16 rxq_index, u32 flow_id); #endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c index e54fbc16f34d..b4ae0fe15878 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c @@ -72,14 +72,87 @@ struct arfs_rule { for (j = 0; j < ARFS_HASH_SIZE; j++) \ hlist_for_each_entry_safe(hn, tmp, &hash[j], hlist) +static enum mlx5e_traffic_types arfs_get_tt(enum arfs_type type) +{ + switch (type) { + case ARFS_IPV4_TCP: + return MLX5E_TT_IPV4_TCP; + case ARFS_IPV4_UDP: + return MLX5E_TT_IPV4_UDP; + case ARFS_IPV6_TCP: + return MLX5E_TT_IPV6_TCP; + case ARFS_IPV6_UDP: + return MLX5E_TT_IPV6_UDP; + default: + return -EINVAL; + } +} + +static int arfs_disable(struct mlx5e_priv *priv) +{ + struct mlx5_flow_destination dest; + u32 *tirn = priv->indir_tirn; + int err = 0; + int tt; + int i; + + dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR; + for (i = 0; i < ARFS_NUM_TYPES; i++) { + dest.tir_num = tirn[i]; + tt = arfs_get_tt(i); + /* Modify ttc rules destination to bypass the aRFS tables*/ + err = mlx5_modify_rule_destination(priv->fs.ttc.rules[tt], + &dest); + if (err) { + netdev_err(priv->netdev, + "%s: modify ttc destination failed\n", + __func__); + return err; + } + } + return 0; +} + +static void arfs_del_rules(struct mlx5e_priv *priv); + +int mlx5e_arfs_disable(struct mlx5e_priv *priv) +{ + arfs_del_rules(priv); + + return arfs_disable(priv); +} + +int mlx5e_arfs_enable(struct mlx5e_priv *priv) +{ + struct mlx5_flow_destination dest; + int err = 0; + int tt; + int i; + + dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; + for (i = 0; i < ARFS_NUM_TYPES; i++) { + dest.ft = priv->fs.arfs.arfs_tables[i].ft.t; + tt = arfs_get_tt(i); + /* Modify ttc rules destination to point on the aRFS FTs */ + err = mlx5_modify_rule_destination(priv->fs.ttc.rules[tt], + &dest); + if (err) { + netdev_err(priv->netdev, + "%s: modify ttc destination failed err=%d\n", + __func__, err); + arfs_disable(priv); + return err; + } + } + return 0; +} + static void arfs_destroy_table(struct arfs_table *arfs_t) { mlx5_del_flow_rule(arfs_t->default_rule); mlx5e_destroy_flow_table(&arfs_t->ft); } -static void arfs_del_rules(struct mlx5e_priv *priv); - void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv) { int i; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 498d40784ae9..534d99e2f9c8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -456,6 +456,7 @@ static int mlx5e_set_channels(struct net_device *dev, struct mlx5e_priv *priv = netdev_priv(dev); int ncv = mlx5e_get_max_num_channels(priv->mdev); unsigned int count = ch->combined_count; + bool arfs_enabled; bool was_opened; int err = 0; @@ -484,13 +485,27 @@ static int mlx5e_set_channels(struct net_device *dev, if (was_opened) mlx5e_close_locked(dev); + arfs_enabled = dev->features & NETIF_F_NTUPLE; + if (arfs_enabled) + mlx5e_arfs_disable(priv); + priv->params.num_channels = count; mlx5e_build_default_indir_rqt(priv->mdev, priv->params.indirection_rqt, MLX5E_INDIR_RQT_SIZE, count); if (was_opened) err = mlx5e_open_locked(dev); + if (err) + goto out; + if (arfs_enabled) { + err = mlx5e_arfs_enable(priv); + if (err) + netdev_err(dev, "%s: mlx5e_arfs_enable failed: %d\n", + __func__, err); + } + +out: mutex_unlock(&priv->state_lock); return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 20167b9403b6..4ccfc1ac62c5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -2308,6 +2308,21 @@ static int set_feature_rx_vlan(struct net_device *netdev, bool enable) return err; } +#ifdef CONFIG_RFS_ACCEL +static int set_feature_arfs(struct net_device *netdev, bool enable) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + int err; + + if (enable) + err = mlx5e_arfs_enable(priv); + else + err = mlx5e_arfs_disable(priv); + + return err; +} +#endif + static int mlx5e_handle_feature(struct net_device *netdev, netdev_features_t wanted_features, netdev_features_t feature, @@ -2347,6 +2362,10 @@ static int mlx5e_set_features(struct net_device *netdev, set_feature_rx_all); err |= mlx5e_handle_feature(netdev, features, NETIF_F_HW_VLAN_CTAG_RX, set_feature_rx_vlan); +#ifdef CONFIG_RFS_ACCEL + err |= mlx5e_handle_feature(netdev, features, NETIF_F_NTUPLE, + set_feature_arfs); +#endif return err ? -EINVAL : 0; } @@ -2562,6 +2581,9 @@ static const struct net_device_ops mlx5e_netdev_ops_basic = { .ndo_set_features = mlx5e_set_features, .ndo_change_mtu = mlx5e_change_mtu, .ndo_do_ioctl = mlx5e_ioctl, +#ifdef CONFIG_RFS_ACCEL + .ndo_rx_flow_steer = mlx5e_rx_flow_steer, +#endif }; static const struct net_device_ops mlx5e_netdev_ops_sriov = { @@ -2581,6 +2603,9 @@ static const struct net_device_ops mlx5e_netdev_ops_sriov = { .ndo_add_vxlan_port = mlx5e_add_vxlan_port, .ndo_del_vxlan_port = mlx5e_del_vxlan_port, .ndo_features_check = mlx5e_features_check, +#ifdef CONFIG_RFS_ACCEL + .ndo_rx_flow_steer = mlx5e_rx_flow_steer, +#endif .ndo_set_vf_mac = mlx5e_set_vf_mac, .ndo_set_vf_vlan = mlx5e_set_vf_vlan, .ndo_get_vf_config = mlx5e_get_vf_config, From 692fb0a75e816d2676f222c9db33e91f46ea1e03 Mon Sep 17 00:00:00 2001 From: Anjali Singhai Jain Date: Wed, 13 Apr 2016 03:08:21 -0700 Subject: [PATCH 1186/1649] i40e: Change the default for VFs to be not privileged Make sure a VF is not trusted/privileged until its explicitly set for trust through the new NDO op interface. Change-Id: I476385c290d2b4901d8fceb29de43546accdc499 Signed-off-by: Anjali Singhai Jain Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- .../ethernet/intel/i40e/i40e_virtchnl_pf.c | 20 +++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index c226c2dad247..4c365d7f2ed1 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -860,7 +860,11 @@ static int i40e_alloc_vf_res(struct i40e_vf *vf) if (ret) goto error_alloc; total_queue_pairs += pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; - set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); + + if (vf->trusted) + set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); + else + clear_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); /* store the total qps number for the runtime * VF req validation @@ -1847,15 +1851,17 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf, u8 *macaddr) dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n", macaddr); ret = I40E_ERR_INVALID_MAC_ADDR; } else if (vf->pf_set_mac && !is_multicast_ether_addr(macaddr) && + !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) && !ether_addr_equal(macaddr, vf->default_lan_addr.addr)) { /* If the host VMM administrator has set the VF MAC address * administratively via the ndo_set_vf_mac command then deny * permission to the VF to add or delete unicast MAC addresses. + * Unless the VF is privileged and then it can do whatever. * The VF may request to set the MAC address filter already * assigned to it so do not return an error in that case. */ dev_err(&pf->pdev->dev, - "VF attempting to override administratively set MAC address\nPlease reload the VF driver to resume normal operation\n"); + "VF attempting to override administratively set MAC address, reload the VF driver to resume normal operation\n"); ret = -EPERM; } return ret; @@ -1880,7 +1886,6 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) int i; if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || - !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) || !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { ret = I40E_ERR_PARAM; goto error_param; @@ -1954,7 +1959,6 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) int i; if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || - !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) || !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { ret = I40E_ERR_PARAM; goto error_param; @@ -2207,7 +2211,6 @@ static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg, u16 msglen) i40e_status aq_ret = 0; if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || - !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) || !i40e_vc_isvalid_vsi_id(vf, vsi_id) || (vrk->key_len != I40E_HKEY_ARRAY_SIZE)) { aq_ret = I40E_ERR_PARAM; @@ -2240,7 +2243,6 @@ static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg, u16 msglen) i40e_status aq_ret = 0; if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || - !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) || !i40e_vc_isvalid_vsi_id(vf, vsi_id) || (vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE)) { aq_ret = I40E_ERR_PARAM; @@ -2270,8 +2272,7 @@ static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen) i40e_status aq_ret = 0; int len = 0; - if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || - !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) { + if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) { aq_ret = I40E_ERR_PARAM; goto err; } @@ -2307,8 +2308,7 @@ static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen) struct i40e_hw *hw = &pf->hw; i40e_status aq_ret = 0; - if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || - !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) { + if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) { aq_ret = I40E_ERR_PARAM; goto err; } From 71c08eac2e88b01ecbfba1b1a485a748a4632727 Mon Sep 17 00:00:00 2001 From: Michael Thalmeier Date: Mon, 11 Apr 2016 16:36:02 +0200 Subject: [PATCH 1187/1649] nfc: pn533: Add device tree documentation for i2c phy Add pn533-i2c phy devicetree documentation Signed-off-by: Michael Thalmeier Signed-off-by: Samuel Ortiz --- .../devicetree/bindings/net/nfc/pn533-i2c.txt | 31 +++++++++++++++++++ 1 file changed, 31 insertions(+) create mode 100644 Documentation/devicetree/bindings/net/nfc/pn533-i2c.txt diff --git a/Documentation/devicetree/bindings/net/nfc/pn533-i2c.txt b/Documentation/devicetree/bindings/net/nfc/pn533-i2c.txt new file mode 100644 index 000000000000..1aea822d4530 --- /dev/null +++ b/Documentation/devicetree/bindings/net/nfc/pn533-i2c.txt @@ -0,0 +1,31 @@ +* NXP Semiconductors PN532 NFC Controller + +Required properties: +- compatible: Should be "nxp,pn532-i2c" or "nxp,pn533-i2c". +- clock-frequency: I²C work frequency. +- reg: address on the bus +- interrupt-parent: phandle for the interrupt gpio controller +- interrupts: GPIO interrupt to which the chip is connected + +Optional SoC Specific Properties: +- pinctrl-names: Contains only one value - "default". +- pintctrl-0: Specifies the pin control groups used for this controller. + +Example (for ARM-based BeagleBone with PN532 on I2C2): + +&i2c2 { + + status = "okay"; + + pn532: pn532@24 { + + compatible = "nxp,pn532-i2c"; + + reg = <0x24>; + clock-frequency = <400000>; + + interrupt-parent = <&gpio1>; + interrupts = <17 IRQ_TYPE_EDGE_FALLING>; + + }; +}; From dfeb87df484d609aadef810dbb819830f5f9052a Mon Sep 17 00:00:00 2001 From: Krzysztof Kozlowski Date: Wed, 30 Mar 2016 09:51:04 +0900 Subject: [PATCH 1188/1649] nfc: Drop owner assignment from i2c_driver i2c_driver does not need to set an owner because i2c_register_driver() will set it. Signed-off-by: Krzysztof Kozlowski Signed-off-by: Samuel Ortiz --- drivers/nfc/nxp-nci/i2c.c | 1 - drivers/nfc/pn544/i2c.c | 1 - drivers/nfc/st-nci/i2c.c | 1 - drivers/nfc/st21nfca/i2c.c | 1 - 4 files changed, 4 deletions(-) diff --git a/drivers/nfc/nxp-nci/i2c.c b/drivers/nfc/nxp-nci/i2c.c index 11520f472f98..36099e557730 100644 --- a/drivers/nfc/nxp-nci/i2c.c +++ b/drivers/nfc/nxp-nci/i2c.c @@ -418,7 +418,6 @@ MODULE_DEVICE_TABLE(acpi, acpi_id); static struct i2c_driver nxp_nci_i2c_driver = { .driver = { .name = NXP_NCI_I2C_DRIVER_NAME, - .owner = THIS_MODULE, .acpi_match_table = ACPI_PTR(acpi_id), .of_match_table = of_match_ptr(of_nxp_nci_i2c_match), }, diff --git a/drivers/nfc/pn544/i2c.c b/drivers/nfc/pn544/i2c.c index 45d0e667d7ae..f837c39a8017 100644 --- a/drivers/nfc/pn544/i2c.c +++ b/drivers/nfc/pn544/i2c.c @@ -1106,7 +1106,6 @@ MODULE_DEVICE_TABLE(of, of_pn544_i2c_match); static struct i2c_driver pn544_hci_i2c_driver = { .driver = { .name = PN544_HCI_I2C_DRIVER_NAME, - .owner = THIS_MODULE, .of_match_table = of_match_ptr(of_pn544_i2c_match), .acpi_match_table = ACPI_PTR(pn544_hci_i2c_acpi_match), }, diff --git a/drivers/nfc/st-nci/i2c.c b/drivers/nfc/st-nci/i2c.c index 8a56b5c6e4c4..925dbeef74db 100644 --- a/drivers/nfc/st-nci/i2c.c +++ b/drivers/nfc/st-nci/i2c.c @@ -416,7 +416,6 @@ MODULE_DEVICE_TABLE(of, of_st_nci_i2c_match); static struct i2c_driver st_nci_i2c_driver = { .driver = { - .owner = THIS_MODULE, .name = ST_NCI_I2C_DRIVER_NAME, .of_match_table = of_match_ptr(of_st_nci_i2c_match), .acpi_match_table = ACPI_PTR(st_nci_i2c_acpi_match), diff --git a/drivers/nfc/st21nfca/i2c.c b/drivers/nfc/st21nfca/i2c.c index 1f44a151d206..640b4de05793 100644 --- a/drivers/nfc/st21nfca/i2c.c +++ b/drivers/nfc/st21nfca/i2c.c @@ -721,7 +721,6 @@ MODULE_DEVICE_TABLE(of, of_st21nfca_i2c_match); static struct i2c_driver st21nfca_hci_i2c_driver = { .driver = { - .owner = THIS_MODULE, .name = ST21NFCA_HCI_I2C_DRIVER_NAME, .of_match_table = of_match_ptr(of_st21nfca_i2c_match), .acpi_match_table = ACPI_PTR(st21nfca_hci_i2c_acpi_match), From a77f4f70fd34ac7b67581fe5b89ddc1c9ac20d39 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Thu, 28 Apr 2016 16:24:12 -0700 Subject: [PATCH 1189/1649] of: of_mdio: Check if MDIO bus controller is available Add a check whether the 'struct device_node' pointer passed to of_mdiobus_register() is an available (aka enabled) node in the Device Tree. Rationale for doing this are cases where an Ethernet MAC provides a MDIO bus controller and node, and an additional Ethernet MAC might be connecting its PHY/switches to that first MDIO bus controller, while still embedding one internally which is therefore marked as "disabled". Instead of sprinkling checks like these in callers of of_mdiobus_register(), do this in a central location. Reviewed-by: Andrew Lunn Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/of/of_mdio.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c index b622b33dbf93..e051e1b57609 100644 --- a/drivers/of/of_mdio.c +++ b/drivers/of/of_mdio.c @@ -209,6 +209,10 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np) bool scanphys = false; int addr, rc; + /* Do not continue if the node is disabled */ + if (!of_device_is_available(np)) + return -ENODEV; + /* Mask out all PHYs from auto probing. Instead the PHYs listed in * the device tree are populated after the bus has been registered */ mdio->phy_mask = ~0; From def22c47d749c5ff8011831a8232b951f223963e Mon Sep 17 00:00:00 2001 From: Jon Paul Maloy Date: Thu, 28 Apr 2016 20:16:08 -0400 Subject: [PATCH 1190/1649] tipc: set 'active' state correctly for first established link When we are displaying statistics for the first link established between two peers, it will always be presented as STANDBY although it in reality is ACTIVE. This happens because we forget to set the 'active' flag in the link instance at the moment it is established. Although this is a bug, it only has impact on the presentation view of the link, not on its actual functionality. Signed-off-by: Jon Maloy Signed-off-by: David S. Miller --- net/tipc/node.c | 1 + 1 file changed, 1 insertion(+) diff --git a/net/tipc/node.c b/net/tipc/node.c index 68d9f7b8485c..c29915688230 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c @@ -554,6 +554,7 @@ static void __tipc_node_link_up(struct tipc_node *n, int bearer_id, *slot1 = bearer_id; tipc_node_fsm_evt(n, SELF_ESTABL_CONTACT_EVT); n->action_flags |= TIPC_NOTIFY_NODE_UP; + tipc_link_set_active(nl, true); tipc_bcast_add_peer(n->net, nl, xmitq); return; } From 5f527ba962e277963b08f4c7f12fbeeac3f34e3c Mon Sep 17 00:00:00 2001 From: Anjali Singhai Jain Date: Wed, 13 Apr 2016 03:08:22 -0700 Subject: [PATCH 1191/1649] i40e: Limit the number of MAC and VLAN addresses that can be added for VFs If the VF is privileged/trusted it can do as it may please including but not limited to hogging resources and playing unfair. But if the VF is not privileged/trusted it still can add some number (8) of MAC and VLAN addresses. Other restrictions with respect to Port VLAN and normal VLAN still apply to not privileged/trusted VF. Change-Id: I3a9529201b184c8873e1ad2e300aff468c9e6296 Signed-off-by: Anjali Singhai Jain Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- .../ethernet/intel/i40e/i40e_virtchnl_pf.c | 25 +++++++++++++++++-- .../ethernet/intel/i40e/i40e_virtchnl_pf.h | 3 +++ 2 files changed, 26 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 4c365d7f2ed1..a8a65e030611 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -1831,6 +1831,10 @@ error_param: (u8 *)&stats, sizeof(stats)); } +/* If the VF is not trusted restrict the number of MAC/VLAN it can program */ +#define I40E_VC_MAX_MAC_ADDR_PER_VF 8 +#define I40E_VC_MAX_VLAN_PER_VF 8 + /** * i40e_check_vf_permission * @vf: pointer to the VF info @@ -1863,6 +1867,11 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf, u8 *macaddr) dev_err(&pf->pdev->dev, "VF attempting to override administratively set MAC address, reload the VF driver to resume normal operation\n"); ret = -EPERM; + } else if ((vf->num_mac >= I40E_VC_MAX_MAC_ADDR_PER_VF) && + !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) { + dev_err(&pf->pdev->dev, + "VF is not trusted, switch the VF to trusted to add more functionality\n"); + ret = -EPERM; } return ret; } @@ -1924,6 +1933,8 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) ret = I40E_ERR_PARAM; spin_unlock_bh(&vsi->mac_filter_list_lock); goto error_param; + } else { + vf->num_mac++; } } spin_unlock_bh(&vsi->mac_filter_list_lock); @@ -1982,6 +1993,8 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) ret = I40E_ERR_INVALID_MAC_ADDR; spin_unlock_bh(&vsi->mac_filter_list_lock); goto error_param; + } else { + vf->num_mac--; } spin_unlock_bh(&vsi->mac_filter_list_lock); @@ -2016,8 +2029,13 @@ static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) i40e_status aq_ret = 0; int i; + if ((vf->num_vlan >= I40E_VC_MAX_VLAN_PER_VF) && + !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) { + dev_err(&pf->pdev->dev, + "VF is not trusted, switch the VF to trusted to add more VLAN addresses\n"); + goto error_param; + } if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || - !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) || !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { aq_ret = I40E_ERR_PARAM; goto error_param; @@ -2041,6 +2059,8 @@ static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) for (i = 0; i < vfl->num_elements; i++) { /* add new VLAN filter */ int ret = i40e_vsi_add_vlan(vsi, vfl->vlan_id[i]); + if (!ret) + vf->num_vlan++; if (test_bit(I40E_VF_STAT_UC_PROMISC, &vf->vf_states)) i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid, @@ -2083,7 +2103,6 @@ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) int i; if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || - !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) || !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { aq_ret = I40E_ERR_PARAM; goto error_param; @@ -2104,6 +2123,8 @@ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) for (i = 0; i < vfl->num_elements; i++) { int ret = i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]); + if (!ret) + vf->num_vlan--; if (test_bit(I40E_VF_STAT_UC_PROMISC, &vf->vf_states)) i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid, diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h index 8cbf57988607..bf54873d9c04 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h @@ -111,6 +111,9 @@ struct i40e_vf { bool link_forced; bool link_up; /* only valid if VF link is forced */ bool spoofchk; + u16 num_mac; + u16 num_vlan; + /* RDMA Client */ struct i40e_virtchnl_iwarp_qvlist_info *qvlist_info; }; From a856b5cb83fea40f28634a0220d7587d42e529d2 Mon Sep 17 00:00:00 2001 From: Anjali Singhai Jain Date: Wed, 13 Apr 2016 03:08:23 -0700 Subject: [PATCH 1192/1649] i40e: Prevent falling to promiscuous if the VF is not trusted With this change a non trusted VF can never fall to promiscuous mode when there is no room for a MAC/VLAN filter. Change-Id: I8a155aa25c0bcdc6093414920c9ade4ee0bd20e8 Signed-off-by: Anjali Singhai Jain Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_main.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 39b3b56d3a9f..a45748e52ef6 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -2098,6 +2098,12 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) } } + /* if the VF is not trusted do not do promisc */ + if ((vsi->type == I40E_VSI_SRIOV) && !pf->vf[vsi->vf_id].trusted) { + clear_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state); + goto out; + } + /* check for changes in promiscuous modes */ if (changed_flags & IFF_ALLMULTI) { bool cur_multipromisc; From 2b79c58d80921acb7f784c340fd7532c4a8b8d95 Mon Sep 17 00:00:00 2001 From: Neerav Parikh Date: Sun, 1 May 2016 14:29:53 -0700 Subject: [PATCH 1193/1649] i40e: Remove HMC AQ API implementation Remove the code that implements the HMC AQ APIs and call these APIs. This is done because these are obsolete APIs and are not supported by firmware. Change-ID: I5d771d8f37c3e16e7b0a972ff9b27e75aa2d05d4 Signed-off-by: Neerav Parikh Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_adminq.c | 4 --- .../net/ethernet/intel/i40e/i40e_adminq_cmd.h | 25 ---------------- drivers/net/ethernet/intel/i40e/i40e_common.c | 30 ------------------- .../net/ethernet/intel/i40e/i40e_prototype.h | 4 --- .../ethernet/intel/i40evf/i40e_adminq_cmd.h | 25 ---------------- 5 files changed, 88 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c index 43bb4139d896..738b42a44f20 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c @@ -617,10 +617,6 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw) hw->nvm_release_on_done = false; hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; - ret_code = i40e_aq_set_hmc_resource_profile(hw, - I40E_HMC_PROFILE_DEFAULT, - 0, - NULL); ret_code = 0; /* success! */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h index 5179b3b25acb..48c0c004fcc1 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h @@ -205,10 +205,6 @@ enum i40e_admin_queue_opc { i40e_aqc_opc_resume_port_tx = 0x041C, i40e_aqc_opc_configure_partition_bw = 0x041D, - /* hmc */ - i40e_aqc_opc_query_hmc_resource_profile = 0x0500, - i40e_aqc_opc_set_hmc_resource_profile = 0x0501, - /* phy commands*/ i40e_aqc_opc_get_phy_abilities = 0x0600, i40e_aqc_opc_set_phy_config = 0x0601, @@ -1586,27 +1582,6 @@ struct i40e_aqc_configure_partition_bw_data { I40E_CHECK_STRUCT_LEN(0x22, i40e_aqc_configure_partition_bw_data); -/* Get and set the active HMC resource profile and status. - * (direct 0x0500) and (direct 0x0501) - */ -struct i40e_aq_get_set_hmc_resource_profile { - u8 pm_profile; - u8 pe_vf_enabled; - u8 reserved[14]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aq_get_set_hmc_resource_profile); - -enum i40e_aq_hmc_profile { - /* I40E_HMC_PROFILE_NO_CHANGE = 0, reserved */ - I40E_HMC_PROFILE_DEFAULT = 1, - I40E_HMC_PROFILE_FAVOR_VF = 2, - I40E_HMC_PROFILE_EQUAL = 3, -}; - -#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_PM_MASK 0xF -#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_COUNT_MASK 0x3F - /* Get PHY Abilities (indirect 0x0600) uses the generic indirect struct */ /* set in param0 for get phy abilities to report qualified modules */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index 0e8552b2fba0..4a934e14574d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -2854,36 +2854,6 @@ i40e_status i40e_aq_debug_write_register(struct i40e_hw *hw, return status; } -/** - * i40e_aq_set_hmc_resource_profile - * @hw: pointer to the hw struct - * @profile: type of profile the HMC is to be set as - * @pe_vf_enabled_count: the number of PE enabled VFs the system has - * @cmd_details: pointer to command details structure or NULL - * - * set the HMC profile of the device. - **/ -i40e_status i40e_aq_set_hmc_resource_profile(struct i40e_hw *hw, - enum i40e_aq_hmc_profile profile, - u8 pe_vf_enabled_count, - struct i40e_asq_cmd_details *cmd_details) -{ - struct i40e_aq_desc desc; - struct i40e_aq_get_set_hmc_resource_profile *cmd = - (struct i40e_aq_get_set_hmc_resource_profile *)&desc.params.raw; - i40e_status status; - - i40e_fill_default_direct_cmd_desc(&desc, - i40e_aqc_opc_set_hmc_resource_profile); - - cmd->pm_profile = (u8)profile; - cmd->pe_vf_enabled = pe_vf_enabled_count; - - status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); - - return status; -} - /** * i40e_aq_request_resource * @hw: pointer to the hw struct diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h index 8afb2375ec9f..4c8977c805df 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h +++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h @@ -236,10 +236,6 @@ i40e_status i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw, struct i40e_asq_cmd_details *cmd_details); i40e_status i40e_aq_dcb_updated(struct i40e_hw *hw, struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_set_hmc_resource_profile(struct i40e_hw *hw, - enum i40e_aq_hmc_profile profile, - u8 pe_vf_enabled_count, - struct i40e_asq_cmd_details *cmd_details); i40e_status i40e_aq_config_switch_comp_bw_limit(struct i40e_hw *hw, u16 seid, u16 credit, u8 max_bw, struct i40e_asq_cmd_details *cmd_details); diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h index 1bcb8cf89801..180ae57c51c5 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h @@ -205,10 +205,6 @@ enum i40e_admin_queue_opc { i40e_aqc_opc_resume_port_tx = 0x041C, i40e_aqc_opc_configure_partition_bw = 0x041D, - /* hmc */ - i40e_aqc_opc_query_hmc_resource_profile = 0x0500, - i40e_aqc_opc_set_hmc_resource_profile = 0x0501, - /* phy commands*/ i40e_aqc_opc_get_phy_abilities = 0x0600, i40e_aqc_opc_set_phy_config = 0x0601, @@ -1583,27 +1579,6 @@ struct i40e_aqc_configure_partition_bw_data { I40E_CHECK_STRUCT_LEN(0x22, i40e_aqc_configure_partition_bw_data); -/* Get and set the active HMC resource profile and status. - * (direct 0x0500) and (direct 0x0501) - */ -struct i40e_aq_get_set_hmc_resource_profile { - u8 pm_profile; - u8 pe_vf_enabled; - u8 reserved[14]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aq_get_set_hmc_resource_profile); - -enum i40e_aq_hmc_profile { - /* I40E_HMC_PROFILE_NO_CHANGE = 0, reserved */ - I40E_HMC_PROFILE_DEFAULT = 1, - I40E_HMC_PROFILE_FAVOR_VF = 2, - I40E_HMC_PROFILE_EQUAL = 3, -}; - -#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_PM_MASK 0xF -#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_COUNT_MASK 0x3F - /* Get PHY Abilities (indirect 0x0600) uses the generic indirect struct */ /* set in param0 for get phy abilities to report qualified modules */ From b29699b39924db73993734a605665cf4bebef9ea Mon Sep 17 00:00:00 2001 From: Carolyn Wyborny Date: Wed, 13 Apr 2016 03:08:26 -0700 Subject: [PATCH 1194/1649] i40evf: RSS Hash Option parameters This patch syncs the VF code for the changes made to the PF for the RSS hash tuple settings. Since the VF still cannot change the RSS hash settings, change the code to make this clear to the user. Previously, the default settings were returned in this function. However, the default can be changed by the PF so this does not make sense anymore. Change-Id: I085eaf005fc7978b440d2a1bf2b2dd7cadaff39b Signed-off-by: Carolyn Wyborny Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- .../ethernet/intel/i40evf/i40evf_ethtool.c | 201 +----------------- 1 file changed, 2 insertions(+), 199 deletions(-) diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c index 9f7657c68688..5a48ee07688f 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c @@ -377,59 +377,6 @@ static int i40evf_set_coalesce(struct net_device *netdev, return 0; } -/** - * i40e_get_rss_hash_opts - Get RSS hash Input Set for each flow type - * @adapter: board private structure - * @cmd: ethtool rxnfc command - * - * Returns Success if the flow is supported, else Invalid Input. - **/ -static int i40evf_get_rss_hash_opts(struct i40evf_adapter *adapter, - struct ethtool_rxnfc *cmd) -{ - /* We always hash on IP src and dest addresses */ - cmd->data = RXH_IP_SRC | RXH_IP_DST; - - switch (cmd->flow_type) { - case TCP_V4_FLOW: - if (adapter->hena & BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP)) - cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; - break; - case UDP_V4_FLOW: - if (adapter->hena & BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP)) - cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; - break; - - case SCTP_V4_FLOW: - case AH_ESP_V4_FLOW: - case AH_V4_FLOW: - case ESP_V4_FLOW: - case IPV4_FLOW: - break; - - case TCP_V6_FLOW: - if (adapter->hena & BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP)) - cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; - break; - case UDP_V6_FLOW: - if (adapter->hena & BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP)) - cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; - break; - - case SCTP_V6_FLOW: - case AH_ESP_V6_FLOW: - case AH_V6_FLOW: - case ESP_V6_FLOW: - case IPV6_FLOW: - break; - default: - cmd->data = 0; - return -EINVAL; - } - - return 0; -} - /** * i40evf_get_rxnfc - command to get RX flow classification rules * @netdev: network interface device structure @@ -450,7 +397,8 @@ static int i40evf_get_rxnfc(struct net_device *netdev, ret = 0; break; case ETHTOOL_GRXFH: - ret = i40evf_get_rss_hash_opts(adapter, cmd); + netdev_info(netdev, + "RSS hash info is not available to vf, use pf.\n"); break; default: break; @@ -458,150 +406,6 @@ static int i40evf_get_rxnfc(struct net_device *netdev, return ret; } - -/** - * i40evf_set_rss_hash_opt - Enable/Disable flow types for RSS hash - * @adapter: board private structure - * @cmd: ethtool rxnfc command - * - * Returns Success if the flow input set is supported. - **/ -static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter, - struct ethtool_rxnfc *nfc) -{ - struct i40e_hw *hw = &adapter->hw; - u32 flags = adapter->vf_res->vf_offload_flags; - - /* RSS does not support anything other than hashing - * to queues on src and dst IPs and ports - */ - if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST | - RXH_L4_B_0_1 | RXH_L4_B_2_3)) - return -EINVAL; - - /* We need at least the IP SRC and DEST fields for hashing */ - if (!(nfc->data & RXH_IP_SRC) || - !(nfc->data & RXH_IP_DST)) - return -EINVAL; - - switch (nfc->flow_type) { - case TCP_V4_FLOW: - if (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { - if (flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) - adapter->hena |= - BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK); - - adapter->hena |= - BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP); - } else { - return -EINVAL; - } - break; - case TCP_V6_FLOW: - if (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { - if (flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) - adapter->hena |= - BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK); - - adapter->hena |= - BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP); - } else { - return -EINVAL; - } - break; - case UDP_V4_FLOW: - if (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { - if (flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) - adapter->hena |= - BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | - BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP); - - adapter->hena |= - (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | - BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4)); - } else { - return -EINVAL; - } - break; - case UDP_V6_FLOW: - if (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { - if (flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) - adapter->hena |= - BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | - BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP); - - adapter->hena |= - (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | - BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6)); - } else { - return -EINVAL; - } - break; - case AH_ESP_V4_FLOW: - case AH_V4_FLOW: - case ESP_V4_FLOW: - case SCTP_V4_FLOW: - if ((nfc->data & RXH_L4_B_0_1) || - (nfc->data & RXH_L4_B_2_3)) - return -EINVAL; - adapter->hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER); - break; - case AH_ESP_V6_FLOW: - case AH_V6_FLOW: - case ESP_V6_FLOW: - case SCTP_V6_FLOW: - if ((nfc->data & RXH_L4_B_0_1) || - (nfc->data & RXH_L4_B_2_3)) - return -EINVAL; - adapter->hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER); - break; - case IPV4_FLOW: - adapter->hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | - BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4)); - break; - case IPV6_FLOW: - adapter->hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | - BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6)); - break; - default: - return -EINVAL; - } - - if (RSS_PF(adapter)) { - adapter->aq_required = I40EVF_FLAG_AQ_SET_HENA; - } else { - wr32(hw, I40E_VFQF_HENA(0), (u32)adapter->hena); - wr32(hw, I40E_VFQF_HENA(1), (u32)(adapter->hena >> 32)); - i40e_flush(hw); - } - - return 0; -} - -/** - * i40evf_set_rxnfc - command to set RX flow classification rules - * @netdev: network interface device structure - * @cmd: ethtool rxnfc command - * - * Returns Success if the command is supported. - **/ -static int i40evf_set_rxnfc(struct net_device *netdev, - struct ethtool_rxnfc *cmd) -{ - struct i40evf_adapter *adapter = netdev_priv(netdev); - int ret = -EOPNOTSUPP; - - switch (cmd->cmd) { - case ETHTOOL_SRXFH: - ret = i40evf_set_rss_hash_opt(adapter, cmd); - break; - default: - break; - } - - return ret; -} - /** * i40evf_get_channels: get the number of channels supported by the device * @netdev: network interface device structure @@ -775,7 +579,6 @@ static const struct ethtool_ops i40evf_ethtool_ops = { .get_coalesce = i40evf_get_coalesce, .set_coalesce = i40evf_set_coalesce, .get_rxnfc = i40evf_get_rxnfc, - .set_rxnfc = i40evf_set_rxnfc, .get_rxfh_indir_size = i40evf_get_rxfh_indir_size, .get_rxfh = i40evf_get_rxfh, .set_rxfh = i40evf_set_rxfh, From 3ed439c56e0d82fd08275fff2c21278f92ed8ec5 Mon Sep 17 00:00:00 2001 From: Catherine Sullivan Date: Wed, 13 Apr 2016 03:08:27 -0700 Subject: [PATCH 1195/1649] i40e: Fix uninitialized variable We have an uninitialized variable warning for valid_len for one case in validate_vf_mesg. To fix this, just initialize it to 0 at the top of the function and remove all of the now redundant assignments to 0 in the individual cases. Change-Id: Iacbd97f4c521ed8d662eef803a598d8707708cfd Signed-off-by: Catherine Sullivan Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index a8a65e030611..a534fe67251a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -2356,7 +2356,7 @@ static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen) { bool err_msg_format = false; - int valid_len; + int valid_len = 0; /* Check if VF is disabled. */ if (test_bit(I40E_VF_STAT_DISABLED, &vf->vf_states)) @@ -2368,13 +2368,10 @@ static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode, valid_len = sizeof(struct i40e_virtchnl_version_info); break; case I40E_VIRTCHNL_OP_RESET_VF: - valid_len = 0; break; case I40E_VIRTCHNL_OP_GET_VF_RESOURCES: if (VF_IS_V11(vf)) valid_len = sizeof(u32); - else - valid_len = 0; break; case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE: valid_len = sizeof(struct i40e_virtchnl_txq_info); @@ -2489,7 +2486,6 @@ static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode, } break; case I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS: - valid_len = 0; break; case I40E_VIRTCHNL_OP_SET_RSS_HENA: valid_len = sizeof(struct i40e_virtchnl_rss_hena); From cdc3d93257e162dd12906a6e0207436c2d5c6873 Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Wed, 13 Apr 2016 03:08:28 -0700 Subject: [PATCH 1196/1649] i40e: PTP - avoid aggregate return warnings Aggregate return warnings are when struct types are returned and must be copied to the lvalue with a struct copy by the compiler. This fixes warnings of type aggregate-return from gcc with W=2. Change-Id: I896b1bf514544bf0faeb458869d79914b9f1b168 Signed-off-by: Jesse Brandeburg Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_ptp.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c index 565ca7c835bc..a1b878abd5b0 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c @@ -158,9 +158,10 @@ static int i40e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) static int i40e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) { struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps); - struct timespec64 now, then = ns_to_timespec64(delta); + struct timespec64 now, then; unsigned long flags; + then = ns_to_timespec64(delta); spin_lock_irqsave(&pf->tmreg_lock, flags); i40e_ptp_read(pf, &now); From a1b5a24fccc83430bb1fa6e0f9925fb8328abd34 Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Wed, 13 Apr 2016 03:08:29 -0700 Subject: [PATCH 1197/1649] i40e: Use consistent type for vf_id The driver was all over the place using signed or unsigned types for vf_id, when it should always be signed. This fixes warnings of type unsafe comparisons from gcc with W=2. Change-Id: I2cb681f83d0f68ca124d2e4131e4ac0d9f8a6b22 Signed-off-by: Jesse Brandeburg Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e.h | 2 +- drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c | 13 +++++++------ drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h | 4 ++-- 3 files changed, 10 insertions(+), 9 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index e312adf64260..00c473874f01 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -554,7 +554,7 @@ struct i40e_vsi { u16 num_queue_pairs; /* Used tx and rx pairs */ u16 num_desc; enum i40e_vsi_type type; /* VSI type, e.g., LAN, FCoE, etc */ - u16 vf_id; /* Virtual function ID for SRIOV VSIs */ + s16 vf_id; /* Virtual function ID for SRIOV VSIs */ struct i40e_tc_configuration tc_config; struct i40e_aqc_vsi_properties_data info; diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index a534fe67251a..6b9db7983693 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -48,7 +48,7 @@ static void i40e_vc_vf_broadcast(struct i40e_pf *pf, int i; for (i = 0; i < pf->num_alloc_vfs; i++, vf++) { - int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; + int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id; /* Not all vfs are enabled so skip the ones that are not */ if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states) && !test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) @@ -74,7 +74,7 @@ static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf) struct i40e_pf *pf = vf->pf; struct i40e_hw *hw = &pf->hw; struct i40e_link_status *ls = &pf->hw.phy.link_info; - int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; + int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id; pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE; pfe.severity = I40E_PF_EVENT_SEVERITY_INFO; @@ -141,7 +141,7 @@ void i40e_vc_notify_vf_reset(struct i40e_vf *vf) !test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) return; - abs_vf_id = vf->vf_id + vf->pf->hw.func_caps.vf_base_id; + abs_vf_id = vf->vf_id + (int)vf->pf->hw.func_caps.vf_base_id; pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING; pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM; @@ -2516,11 +2516,11 @@ static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode, * called from the common aeq/arq handler to * process request from VF **/ -int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode, +int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen) { struct i40e_hw *hw = &pf->hw; - unsigned int local_vf_id = vf_id - hw->func_caps.vf_base_id; + int local_vf_id = vf_id - (s16)hw->func_caps.vf_base_id; struct i40e_vf *vf; int ret; @@ -2622,9 +2622,10 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode, **/ int i40e_vc_process_vflr_event(struct i40e_pf *pf) { - u32 reg, reg_idx, bit_idx, vf_id; struct i40e_hw *hw = &pf->hw; + u32 reg, reg_idx, bit_idx; struct i40e_vf *vf; + int vf_id; if (!test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state)) return 0; diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h index bf54873d9c04..875174141451 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h @@ -77,7 +77,7 @@ struct i40e_vf { struct i40e_pf *pf; /* VF id in the PF space */ - u16 vf_id; + s16 vf_id; /* all VF vsis connect to the same parent */ enum i40e_switch_element_types parent_type; struct i40e_virtchnl_version_info vf_ver; @@ -121,7 +121,7 @@ struct i40e_vf { void i40e_free_vfs(struct i40e_pf *pf); int i40e_pci_sriov_configure(struct pci_dev *dev, int num_vfs); int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs); -int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode, +int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen); int i40e_vc_process_vflr_event(struct i40e_pf *pf); void i40e_reset_vf(struct i40e_vf *vf, bool flr); From de38fef610f4e72fdf506bb84ddb05b46f4bf653 Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Wed, 13 Apr 2016 03:08:30 -0700 Subject: [PATCH 1198/1649] i40e: Drop extra copy of function i40e_release_rx_desc was in two files, but was only used and needed in txrx.c. Get rid of the extra copy. Change-Id: I86e18239aa03531fc198b6c052847475084a9200 Signed-off-by: Jesse Brandeburg Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_main.c | 18 ------------------ 1 file changed, 18 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index a45748e52ef6..d6b1b98717bc 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -396,24 +396,6 @@ static void i40e_tx_timeout(struct net_device *netdev) pf->tx_timeout_recovery_level++; } -/** - * i40e_release_rx_desc - Store the new tail and head values - * @rx_ring: ring to bump - * @val: new head index - **/ -static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val) -{ - rx_ring->next_to_use = val; - - /* Force memory writes to complete before letting h/w - * know there are new descriptors to fetch. (Only - * applicable for weak-ordered memory model archs, - * such as IA-64). - */ - wmb(); - writel(val, rx_ring->tail); -} - /** * i40e_get_vsi_stats_struct - Get System Network Statistics * @vsi: the VSI we care about From a3aa5036cf3798c18fe22041fbfbac01642657e0 Mon Sep 17 00:00:00 2001 From: Catherine Sullivan Date: Wed, 13 Apr 2016 03:08:31 -0700 Subject: [PATCH 1199/1649] i40e: Update device ids for X722 Add a device ID for X722. Change-Id: I574f2345ab341de98a6a1c212d0603af853e48b0 Signed-off-by: Catherine Sullivan Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_main.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index d6b1b98717bc..cab639bad152 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -91,6 +91,7 @@ static const struct pci_device_id i40e_pci_tbl[] = { {PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_I_X722), 0}, + {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_I_X722), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0}, /* required last entry */ From 5a6fc256e7b64b3073688cc72fae357612cf31c6 Mon Sep 17 00:00:00 2001 From: Harshitha Ramamurthy Date: Wed, 13 Apr 2016 03:08:32 -0700 Subject: [PATCH 1200/1649] i40e/i40evf : Bump driver version from 1.5.5 to 1.5.10 Signed-off-by: Harshitha Ramamurthy Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_main.c | 2 +- drivers/net/ethernet/intel/i40evf/i40evf_main.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index cab639bad152..19a2d3033f7f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -46,7 +46,7 @@ static const char i40e_driver_string[] = #define DRV_VERSION_MAJOR 1 #define DRV_VERSION_MINOR 5 -#define DRV_VERSION_BUILD 5 +#define DRV_VERSION_BUILD 10 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ __stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_BUILD) DRV_KERN diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index d1c4afdd9435..b9b1dd831a16 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -38,7 +38,7 @@ static const char i40evf_driver_string[] = #define DRV_VERSION_MAJOR 1 #define DRV_VERSION_MINOR 5 -#define DRV_VERSION_BUILD 5 +#define DRV_VERSION_BUILD 10 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ __stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_BUILD) \ From dcb57456e73f204beff12e4532aaf573d1115114 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Wed, 13 Apr 2016 16:08:25 -0700 Subject: [PATCH 1201/1649] i40e/i40evf: fix I40E_MASK signed shift overflow warnings GCC 6 has a new warning which will display when you attempt to left shift a signed value beyond the storage size of the type. I40E_MASK generates a mask value for 32bit registers. Properly typecast the mask value and place the values in parenthesis to prevent macro expansion issues. Signed-off-by: Jacob Keller Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_type.h | 2 +- drivers/net/ethernet/intel/i40evf/i40e_type.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h index 8aa14aacdd35..bd5f13bef83c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_type.h +++ b/drivers/net/ethernet/intel/i40e/i40e_type.h @@ -36,7 +36,7 @@ #include "i40e_devids.h" /* I40E_MASK is a macro used on 32 bit registers */ -#define I40E_MASK(mask, shift) (mask << shift) +#define I40E_MASK(mask, shift) ((u32)(mask) << (shift)) #define I40E_MAX_VSI_QP 16 #define I40E_MAX_VF_VSI 3 diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h index bfc97c2f22bb..97f96e0d9c4c 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_type.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h @@ -36,7 +36,7 @@ #include "i40e_devids.h" /* I40E_MASK is a macro used on 32 bit registers */ -#define I40E_MASK(mask, shift) (mask << shift) +#define I40E_MASK(mask, shift) ((u32)(mask) << (shift)) #define I40E_MAX_VSI_QP 16 #define I40E_MAX_VF_VSI 3 From 2101bac2d4c26208fa0d1d9ffd8b83ad0199d61a Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Wed, 13 Apr 2016 16:08:26 -0700 Subject: [PATCH 1202/1649] i40e: make use of BIT() macro to prevent left shift of signed values Signed-off-by: Jacob Keller Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- .../net/ethernet/intel/i40e/i40e_adminq_cmd.h | 53 +++++++++---------- 1 file changed, 25 insertions(+), 28 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h index 48c0c004fcc1..eacbe7430b48 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h @@ -78,17 +78,17 @@ struct i40e_aq_desc { #define I40E_AQ_FLAG_EI_SHIFT 14 #define I40E_AQ_FLAG_FE_SHIFT 15 -#define I40E_AQ_FLAG_DD (1 << I40E_AQ_FLAG_DD_SHIFT) /* 0x1 */ -#define I40E_AQ_FLAG_CMP (1 << I40E_AQ_FLAG_CMP_SHIFT) /* 0x2 */ -#define I40E_AQ_FLAG_ERR (1 << I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */ -#define I40E_AQ_FLAG_VFE (1 << I40E_AQ_FLAG_VFE_SHIFT) /* 0x8 */ -#define I40E_AQ_FLAG_LB (1 << I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */ -#define I40E_AQ_FLAG_RD (1 << I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */ -#define I40E_AQ_FLAG_VFC (1 << I40E_AQ_FLAG_VFC_SHIFT) /* 0x800 */ -#define I40E_AQ_FLAG_BUF (1 << I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */ -#define I40E_AQ_FLAG_SI (1 << I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */ -#define I40E_AQ_FLAG_EI (1 << I40E_AQ_FLAG_EI_SHIFT) /* 0x4000 */ -#define I40E_AQ_FLAG_FE (1 << I40E_AQ_FLAG_FE_SHIFT) /* 0x8000 */ +#define I40E_AQ_FLAG_DD BIT(I40E_AQ_FLAG_DD_SHIFT) /* 0x1 */ +#define I40E_AQ_FLAG_CMP BIT(I40E_AQ_FLAG_CMP_SHIFT) /* 0x2 */ +#define I40E_AQ_FLAG_ERR BIT(I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */ +#define I40E_AQ_FLAG_VFE BIT(I40E_AQ_FLAG_VFE_SHIFT) /* 0x8 */ +#define I40E_AQ_FLAG_LB BIT(I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */ +#define I40E_AQ_FLAG_RD BIT(I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */ +#define I40E_AQ_FLAG_VFC BIT(I40E_AQ_FLAG_VFC_SHIFT) /* 0x800 */ +#define I40E_AQ_FLAG_BUF BIT(I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */ +#define I40E_AQ_FLAG_SI BIT(I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */ +#define I40E_AQ_FLAG_EI BIT(I40E_AQ_FLAG_EI_SHIFT) /* 0x4000 */ +#define I40E_AQ_FLAG_FE BIT(I40E_AQ_FLAG_FE_SHIFT) /* 0x8000 */ /* error codes */ enum i40e_admin_queue_err { @@ -1628,11 +1628,11 @@ enum i40e_aq_phy_type { enum i40e_aq_link_speed { I40E_LINK_SPEED_UNKNOWN = 0, - I40E_LINK_SPEED_100MB = (1 << I40E_LINK_SPEED_100MB_SHIFT), - I40E_LINK_SPEED_1GB = (1 << I40E_LINK_SPEED_1000MB_SHIFT), - I40E_LINK_SPEED_10GB = (1 << I40E_LINK_SPEED_10GB_SHIFT), - I40E_LINK_SPEED_40GB = (1 << I40E_LINK_SPEED_40GB_SHIFT), - I40E_LINK_SPEED_20GB = (1 << I40E_LINK_SPEED_20GB_SHIFT) + I40E_LINK_SPEED_100MB = BIT(I40E_LINK_SPEED_100MB_SHIFT), + I40E_LINK_SPEED_1GB = BIT(I40E_LINK_SPEED_1000MB_SHIFT), + I40E_LINK_SPEED_10GB = BIT(I40E_LINK_SPEED_10GB_SHIFT), + I40E_LINK_SPEED_40GB = BIT(I40E_LINK_SPEED_40GB_SHIFT), + I40E_LINK_SPEED_20GB = BIT(I40E_LINK_SPEED_20GB_SHIFT) }; struct i40e_aqc_module_desc { @@ -1903,9 +1903,9 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_write); /* Used for 0x0704 as well as for 0x0705 commands */ #define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT 1 #define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_MASK \ - (1 << I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT) + BIT(I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT) #define I40E_AQ_ANVM_FEATURE 0 -#define I40E_AQ_ANVM_IMMEDIATE_FIELD (1 << FEATURE_OR_IMMEDIATE_SHIFT) +#define I40E_AQ_ANVM_IMMEDIATE_FIELD BIT(FEATURE_OR_IMMEDIATE_SHIFT) struct i40e_aqc_nvm_config_data_feature { __le16 feature_id; #define I40E_AQ_ANVM_FEATURE_OPTION_OEM_ONLY 0x01 @@ -2202,13 +2202,11 @@ I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_get_cee_dcb_cfg_resp); */ struct i40e_aqc_lldp_set_local_mib { #define SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT 0 -#define SET_LOCAL_MIB_AC_TYPE_DCBX_MASK (1 << SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT) -#define SET_LOCAL_MIB_AC_TYPE_DCBX_MASK (1 << \ - SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT) +#define SET_LOCAL_MIB_AC_TYPE_DCBX_MASK BIT(SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT) #define SET_LOCAL_MIB_AC_TYPE_LOCAL_MIB 0x0 #define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT (1) -#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_MASK (1 << \ - SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT) +#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_MASK \ + BIT(SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT) #define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS 0x1 u8 type; u8 reserved0; @@ -2226,7 +2224,7 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_set_local_mib); struct i40e_aqc_lldp_stop_start_specific_agent { #define I40E_AQC_START_SPECIFIC_AGENT_SHIFT 0 #define I40E_AQC_START_SPECIFIC_AGENT_MASK \ - (1 << I40E_AQC_START_SPECIFIC_AGENT_SHIFT) + BIT(I40E_AQC_START_SPECIFIC_AGENT_SHIFT) u8 command; u8 reserved[15]; }; @@ -2279,7 +2277,7 @@ struct i40e_aqc_del_udp_tunnel_completion { I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion); struct i40e_aqc_get_set_rss_key { -#define I40E_AQC_SET_RSS_KEY_VSI_VALID (0x1 << 15) +#define I40E_AQC_SET_RSS_KEY_VSI_VALID BIT(15) #define I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT 0 #define I40E_AQC_SET_RSS_KEY_VSI_ID_MASK (0x3FF << \ I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) @@ -2299,14 +2297,13 @@ struct i40e_aqc_get_set_rss_key_data { I40E_CHECK_STRUCT_LEN(0x34, i40e_aqc_get_set_rss_key_data); struct i40e_aqc_get_set_rss_lut { -#define I40E_AQC_SET_RSS_LUT_VSI_VALID (0x1 << 15) +#define I40E_AQC_SET_RSS_LUT_VSI_VALID BIT(15) #define I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT 0 #define I40E_AQC_SET_RSS_LUT_VSI_ID_MASK (0x3FF << \ I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) __le16 vsi_id; #define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT 0 -#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK (0x1 << \ - I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) +#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK BIT(I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) #define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI 0 #define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF 1 From ae63bff0d7f333677e7ec532e9c315c74a510403 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Wed, 13 Apr 2016 16:08:27 -0700 Subject: [PATCH 1203/1649] i40evf: make use of BIT() macro to avoid signed left shift Signed-off-by: Jacob Keller Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- .../ethernet/intel/i40evf/i40e_adminq_cmd.h | 44 +++++++++---------- 1 file changed, 22 insertions(+), 22 deletions(-) diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h index 180ae57c51c5..3114dcfa1724 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h @@ -78,17 +78,17 @@ struct i40e_aq_desc { #define I40E_AQ_FLAG_EI_SHIFT 14 #define I40E_AQ_FLAG_FE_SHIFT 15 -#define I40E_AQ_FLAG_DD (1 << I40E_AQ_FLAG_DD_SHIFT) /* 0x1 */ -#define I40E_AQ_FLAG_CMP (1 << I40E_AQ_FLAG_CMP_SHIFT) /* 0x2 */ -#define I40E_AQ_FLAG_ERR (1 << I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */ -#define I40E_AQ_FLAG_VFE (1 << I40E_AQ_FLAG_VFE_SHIFT) /* 0x8 */ -#define I40E_AQ_FLAG_LB (1 << I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */ -#define I40E_AQ_FLAG_RD (1 << I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */ -#define I40E_AQ_FLAG_VFC (1 << I40E_AQ_FLAG_VFC_SHIFT) /* 0x800 */ -#define I40E_AQ_FLAG_BUF (1 << I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */ -#define I40E_AQ_FLAG_SI (1 << I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */ -#define I40E_AQ_FLAG_EI (1 << I40E_AQ_FLAG_EI_SHIFT) /* 0x4000 */ -#define I40E_AQ_FLAG_FE (1 << I40E_AQ_FLAG_FE_SHIFT) /* 0x8000 */ +#define I40E_AQ_FLAG_DD BIT(I40E_AQ_FLAG_DD_SHIFT) /* 0x1 */ +#define I40E_AQ_FLAG_CMP BIT(I40E_AQ_FLAG_CMP_SHIFT) /* 0x2 */ +#define I40E_AQ_FLAG_ERR BIT(I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */ +#define I40E_AQ_FLAG_VFE BIT(I40E_AQ_FLAG_VFE_SHIFT) /* 0x8 */ +#define I40E_AQ_FLAG_LB BIT(I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */ +#define I40E_AQ_FLAG_RD BIT(I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */ +#define I40E_AQ_FLAG_VFC BIT(I40E_AQ_FLAG_VFC_SHIFT) /* 0x800 */ +#define I40E_AQ_FLAG_BUF BIT(I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */ +#define I40E_AQ_FLAG_SI BIT(I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */ +#define I40E_AQ_FLAG_EI BIT(I40E_AQ_FLAG_EI_SHIFT) /* 0x4000 */ +#define I40E_AQ_FLAG_FE BIT(I40E_AQ_FLAG_FE_SHIFT) /* 0x8000 */ /* error codes */ enum i40e_admin_queue_err { @@ -1625,11 +1625,11 @@ enum i40e_aq_phy_type { enum i40e_aq_link_speed { I40E_LINK_SPEED_UNKNOWN = 0, - I40E_LINK_SPEED_100MB = (1 << I40E_LINK_SPEED_100MB_SHIFT), - I40E_LINK_SPEED_1GB = (1 << I40E_LINK_SPEED_1000MB_SHIFT), - I40E_LINK_SPEED_10GB = (1 << I40E_LINK_SPEED_10GB_SHIFT), - I40E_LINK_SPEED_40GB = (1 << I40E_LINK_SPEED_40GB_SHIFT), - I40E_LINK_SPEED_20GB = (1 << I40E_LINK_SPEED_20GB_SHIFT) + I40E_LINK_SPEED_100MB = BIT(I40E_LINK_SPEED_100MB_SHIFT), + I40E_LINK_SPEED_1GB = BIT(I40E_LINK_SPEED_1000MB_SHIFT), + I40E_LINK_SPEED_10GB = BIT(I40E_LINK_SPEED_10GB_SHIFT), + I40E_LINK_SPEED_40GB = BIT(I40E_LINK_SPEED_40GB_SHIFT), + I40E_LINK_SPEED_20GB = BIT(I40E_LINK_SPEED_20GB_SHIFT) }; struct i40e_aqc_module_desc { @@ -1900,9 +1900,9 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_write); /* Used for 0x0704 as well as for 0x0705 commands */ #define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT 1 #define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_MASK \ - (1 << I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT) + BIT(I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT) #define I40E_AQ_ANVM_FEATURE 0 -#define I40E_AQ_ANVM_IMMEDIATE_FIELD (1 << FEATURE_OR_IMMEDIATE_SHIFT) +#define I40E_AQ_ANVM_IMMEDIATE_FIELD BIT(FEATURE_OR_IMMEDIATE_SHIFT) struct i40e_aqc_nvm_config_data_feature { __le16 feature_id; #define I40E_AQ_ANVM_FEATURE_OPTION_OEM_ONLY 0x01 @@ -2171,7 +2171,7 @@ struct i40e_aqc_del_udp_tunnel_completion { I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion); struct i40e_aqc_get_set_rss_key { -#define I40E_AQC_SET_RSS_KEY_VSI_VALID (0x1 << 15) +#define I40E_AQC_SET_RSS_KEY_VSI_VALID BIT(15) #define I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT 0 #define I40E_AQC_SET_RSS_KEY_VSI_ID_MASK (0x3FF << \ I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) @@ -2191,14 +2191,14 @@ struct i40e_aqc_get_set_rss_key_data { I40E_CHECK_STRUCT_LEN(0x34, i40e_aqc_get_set_rss_key_data); struct i40e_aqc_get_set_rss_lut { -#define I40E_AQC_SET_RSS_LUT_VSI_VALID (0x1 << 15) +#define I40E_AQC_SET_RSS_LUT_VSI_VALID BIT(15) #define I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT 0 #define I40E_AQC_SET_RSS_LUT_VSI_ID_MASK (0x3FF << \ I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) __le16 vsi_id; #define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT 0 -#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK (0x1 << \ - I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) +#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK \ + BIT(I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) #define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI 0 #define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF 1 From 1c7b4a23d12f63a2864c2a67ad96e74d0dbbf39c Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Thu, 14 Apr 2016 17:19:25 -0400 Subject: [PATCH 1204/1649] i40e/i40evf: Add support for GSO partial with UDP_TUNNEL_CSUM and GRE_CSUM This patch makes it so that i40e and i40evf can use GSO_PARTIAL to support segmentation for frames with checksums enabled in outer headers. As a result we can now send data over these types of tunnels at over 20Gb/s versus the 12Gb/s that was previously possible on my system. The advantage with the i40e parts is that this offload is mostly transparent as the hardware still deals with the inner and/or outer IPv4 headers so the IP ID is still incrementing for both when this offload is performed. Signed-off-by: Alexander Duyck Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_main.c | 10 ++++++++-- drivers/net/ethernet/intel/i40e/i40e_txrx.c | 7 ++++++- drivers/net/ethernet/intel/i40evf/i40e_txrx.c | 7 ++++++- drivers/net/ethernet/intel/i40evf/i40evf_main.c | 10 ++++++++-- 4 files changed, 28 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 19a2d3033f7f..8e6c0f2487d7 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -9130,20 +9130,25 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_GSO_GRE | + NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT | NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_UDP_TUNNEL_CSUM | + NETIF_F_GSO_PARTIAL | NETIF_F_SCTP_CRC | NETIF_F_RXHASH | NETIF_F_RXCSUM | 0; if (!(pf->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE)) - netdev->hw_enc_features ^= NETIF_F_GSO_UDP_TUNNEL_CSUM; + netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; + + netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM; /* record features VLANs can make use of */ - netdev->vlan_features |= netdev->hw_enc_features; + netdev->vlan_features |= netdev->hw_enc_features | + NETIF_F_TSO_MANGLEID; if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) netdev->hw_features |= NETIF_F_NTUPLE; @@ -9153,6 +9158,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) NETIF_F_HW_VLAN_CTAG_RX; netdev->features |= netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER; + netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID; if (vsi->type == I40E_VSI_MAIN) { SET_NETDEV_DEV(netdev, &pf->pdev->dev); diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 285efe955c64..2765d7efdd9c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -2301,11 +2301,15 @@ static int i40e_tso(struct sk_buff *skb, u8 *hdr_len, u64 *cd_type_cmd_tso_mss) } if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE | + SKB_GSO_GRE_CSUM | SKB_GSO_IPIP | SKB_GSO_SIT | SKB_GSO_UDP_TUNNEL | SKB_GSO_UDP_TUNNEL_CSUM)) { - if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM) { + if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) && + (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) { + l4.udp->len = 0; + /* determine offset of outer transport header */ l4_offset = l4.hdr - skb->data; @@ -2482,6 +2486,7 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, /* indicate if we need to offload outer UDP header */ if ((*tx_flags & I40E_TX_FLAGS_TSO) && + !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) && (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) tunnel |= I40E_TXD_CTX_QW0_L4T_CS_MASK; diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index 4633235ee70b..ede8dfc189bc 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -1566,11 +1566,15 @@ static int i40e_tso(struct sk_buff *skb, u8 *hdr_len, u64 *cd_type_cmd_tso_mss) } if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE | + SKB_GSO_GRE_CSUM | SKB_GSO_IPIP | SKB_GSO_SIT | SKB_GSO_UDP_TUNNEL | SKB_GSO_UDP_TUNNEL_CSUM)) { - if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM) { + if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) && + (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) { + l4.udp->len = 0; + /* determine offset of outer transport header */ l4_offset = l4.hdr - skb->data; @@ -1705,6 +1709,7 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, /* indicate if we need to offload outer UDP header */ if ((*tx_flags & I40E_TX_FLAGS_TSO) && + !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) && (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) tunnel |= I40E_TXD_CTX_QW0_L4T_CS_MASK; diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index b9b1dd831a16..9f0bd7acc22a 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -2240,20 +2240,25 @@ int i40evf_process_config(struct i40evf_adapter *adapter) NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_GSO_GRE | + NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT | NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_UDP_TUNNEL_CSUM | + NETIF_F_GSO_PARTIAL | NETIF_F_SCTP_CRC | NETIF_F_RXHASH | NETIF_F_RXCSUM | 0; if (!(adapter->flags & I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE)) - netdev->hw_enc_features ^= NETIF_F_GSO_UDP_TUNNEL_CSUM; + netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; + + netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM; /* record features VLANs can make use of */ - netdev->vlan_features |= netdev->hw_enc_features; + netdev->vlan_features |= netdev->hw_enc_features | + NETIF_F_TSO_MANGLEID; /* Write features and hw_features separately to avoid polluting * with, or dropping, features that are set when we registgered. @@ -2261,6 +2266,7 @@ int i40evf_process_config(struct i40evf_adapter *adapter) netdev->hw_features |= netdev->hw_enc_features; netdev->features |= netdev->hw_enc_features | I40EVF_VLAN_FEATURES; + netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID; /* disable VLAN features if not supported */ if (!(vfres->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_VLAN)) From 70e927b98bb632e0c987818835aacd6787ebe107 Mon Sep 17 00:00:00 2001 From: Marek Vasut Date: Mon, 2 May 2016 02:47:31 +0200 Subject: [PATCH 1205/1649] mdio_bus: Fix MDIO bus scanning in __mdiobus_register() Since commit b74766a0a0fe ("phylib: don't return NULL from get_phy_device()") in linux-next, phy_get_device() will return ERR_PTR(-ENODEV) instead of NULL if the PHY device ID is all ones. This causes problem with stmmac driver and likely some other drivers which call mdiobus_register(). I triggered this bug on SoCFPGA MCVEVK board with linux-next 20160427 and 20160428. In case of the stmmac, if there is no PHY node specified in the DT for the stmmac block, the stmmac driver ( drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c function stmmac_mdio_register() ) will call mdiobus_register() , which will register the MDIO bus and probe for the PHY. The mdiobus_register() resp. __mdiobus_register() iterates over all of the addresses on the MDIO bus and calls mdiobus_scan() for each of them, which invokes get_phy_device(). Before the aforementioned patch, the mdiobus_scan() would return NULL if no PHY was found on a given address and mdiobus_register() would continue and try the next PHY address. Now, mdiobus_scan() returns ERR_PTR(-ENODEV), which is caught by the 'if (IS_ERR(phydev))' condition and the loop exits immediately if the PHY address does not contain PHY. Repair this by explicitly checking for the ERR_PTR(-ENODEV) and if this error comes around, continue with the next PHY address. Signed-off-by: Marek Vasut Cc: Arnd Bergmann Cc: David S. Miller Cc: Dinh Nguyen Cc: Florian Fainelli Cc: Sergei Shtylyov Acked-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/phy/mdio_bus.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index 499003ee8055..388f9922647b 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c @@ -333,7 +333,7 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner) struct phy_device *phydev; phydev = mdiobus_scan(bus, i); - if (IS_ERR(phydev)) { + if (IS_ERR(phydev) && (PTR_ERR(phydev) != -ENODEV)) { err = PTR_ERR(phydev); goto error; } From 0970f5b3665933f5f0d069607c78fb10bd918b62 Mon Sep 17 00:00:00 2001 From: Marcelo Ricardo Leitner Date: Fri, 29 Apr 2016 14:17:08 -0300 Subject: [PATCH 1206/1649] sctp: signal sk_data_ready earlier on data chunks reception Dave Miller pointed out that fb586f25300f ("sctp: delay calls to sk_data_ready() as much as possible") may insert latency specially if the receiving application is running on another CPU and that it would be better if we signalled as early as possible. This patch thus basically inverts the logic on fb586f25300f and signals it as early as possible, similar to what we had before. Fixes: fb586f25300f ("sctp: delay calls to sk_data_ready() as much as possible") Reported-by: Dave Miller Signed-off-by: Marcelo Ricardo Leitner Signed-off-by: David S. Miller --- include/net/sctp/structs.h | 2 +- net/sctp/sm_sideeffect.c | 7 +++---- net/sctp/ulpqueue.c | 25 ++++++++++++++++--------- 3 files changed, 20 insertions(+), 14 deletions(-) diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index 558bae3cbe0d..16b013a6191c 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -218,7 +218,7 @@ struct sctp_sock { frag_interleave:1, recvrcvinfo:1, recvnxtinfo:1, - pending_data_ready:1; + data_ready_signalled:1; atomic_t pd_mode; /* Receive to here while partial delivery is in effect. */ diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index e8f0112f9b28..aa3712259368 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c @@ -1741,10 +1741,9 @@ out: } else if (local_cork) error = sctp_outq_uncork(&asoc->outqueue, gfp); - if (sp->pending_data_ready) { - sk->sk_data_ready(sk); - sp->pending_data_ready = 0; - } + if (sp->data_ready_signalled) + sp->data_ready_signalled = 0; + return error; nomem: error = -ENOMEM; diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c index ec12a8920e5f..ec166d2bd2d9 100644 --- a/net/sctp/ulpqueue.c +++ b/net/sctp/ulpqueue.c @@ -194,6 +194,7 @@ static int sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq) int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event) { struct sock *sk = ulpq->asoc->base.sk; + struct sctp_sock *sp = sctp_sk(sk); struct sk_buff_head *queue, *skb_list; struct sk_buff *skb = sctp_event2skb(event); int clear_pd = 0; @@ -211,7 +212,7 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event) sk_incoming_cpu_update(sk); } /* Check if the user wishes to receive this event. */ - if (!sctp_ulpevent_is_enabled(event, &sctp_sk(sk)->subscribe)) + if (!sctp_ulpevent_is_enabled(event, &sp->subscribe)) goto out_free; /* If we are in partial delivery mode, post to the lobby until @@ -219,7 +220,7 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event) * the association the cause of the partial delivery. */ - if (atomic_read(&sctp_sk(sk)->pd_mode) == 0) { + if (atomic_read(&sp->pd_mode) == 0) { queue = &sk->sk_receive_queue; } else { if (ulpq->pd_mode) { @@ -231,7 +232,7 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event) if ((event->msg_flags & MSG_NOTIFICATION) || (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK))) - queue = &sctp_sk(sk)->pd_lobby; + queue = &sp->pd_lobby; else { clear_pd = event->msg_flags & MSG_EOR; queue = &sk->sk_receive_queue; @@ -242,10 +243,10 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event) * can queue this to the receive queue instead * of the lobby. */ - if (sctp_sk(sk)->frag_interleave) + if (sp->frag_interleave) queue = &sk->sk_receive_queue; else - queue = &sctp_sk(sk)->pd_lobby; + queue = &sp->pd_lobby; } } @@ -264,8 +265,10 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event) if (clear_pd) sctp_ulpq_clear_pd(ulpq); - if (queue == &sk->sk_receive_queue) - sctp_sk(sk)->pending_data_ready = 1; + if (queue == &sk->sk_receive_queue && !sp->data_ready_signalled) { + sp->data_ready_signalled = 1; + sk->sk_data_ready(sk); + } return 1; out_free: @@ -1126,11 +1129,13 @@ void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp) { struct sctp_ulpevent *ev = NULL; struct sock *sk; + struct sctp_sock *sp; if (!ulpq->pd_mode) return; sk = ulpq->asoc->base.sk; + sp = sctp_sk(sk); if (sctp_ulpevent_type_enabled(SCTP_PARTIAL_DELIVERY_EVENT, &sctp_sk(sk)->subscribe)) ev = sctp_ulpevent_make_pdapi(ulpq->asoc, @@ -1140,6 +1145,8 @@ void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp) __skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev)); /* If there is data waiting, send it up the socket now. */ - if (sctp_ulpq_clear_pd(ulpq) || ev) - sctp_sk(sk)->pending_data_ready = 1; + if ((sctp_ulpq_clear_pd(ulpq) || ev) && !sp->data_ready_signalled) { + sp->data_ready_signalled = 1; + sk->sk_data_ready(sk); + } } From 158bc065f29c9be0919d18aefab320161936b3a8 Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Thu, 28 Apr 2016 21:24:06 -0400 Subject: [PATCH 1207/1649] net: dsa: mv88e6xxx: replace ds with ps where possible The dsa_switch structure ds is actually needed in very few places, mostly during setup of the switch. The private structure ps is however needed nearly everywhere. Pass ps, not ds internally. [vd: rebased Andrew's patch.] Signed-off-by: Andrew Lunn Signed-off-by: Vivien Didelot Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6123.c | 14 +- drivers/net/dsa/mv88e6131.c | 22 +- drivers/net/dsa/mv88e6171.c | 14 +- drivers/net/dsa/mv88e6352.c | 24 +- drivers/net/dsa/mv88e6xxx.c | 917 ++++++++++++++++++------------------ drivers/net/dsa/mv88e6xxx.h | 14 +- 6 files changed, 511 insertions(+), 494 deletions(-) diff --git a/drivers/net/dsa/mv88e6123.c b/drivers/net/dsa/mv88e6123.c index 534ebc84de84..5535a42a6113 100644 --- a/drivers/net/dsa/mv88e6123.c +++ b/drivers/net/dsa/mv88e6123.c @@ -50,6 +50,7 @@ static const char *mv88e6123_drv_probe(struct device *dsa_dev, static int mv88e6123_setup_global(struct dsa_switch *ds) { + struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); u32 upstream_port = dsa_upstream_port(ds); int ret; u32 reg; @@ -62,7 +63,7 @@ static int mv88e6123_setup_global(struct dsa_switch *ds) * external PHYs to poll), don't discard packets with * excessive collisions, and mask all interrupt sources. */ - ret = mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_CONTROL, 0x0000); + ret = mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_CONTROL, 0x0000); if (ret) return ret; @@ -73,26 +74,29 @@ static int mv88e6123_setup_global(struct dsa_switch *ds) reg = upstream_port << GLOBAL_MONITOR_CONTROL_INGRESS_SHIFT | upstream_port << GLOBAL_MONITOR_CONTROL_EGRESS_SHIFT | upstream_port << GLOBAL_MONITOR_CONTROL_ARP_SHIFT; - ret = mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_MONITOR_CONTROL, reg); + ret = mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_MONITOR_CONTROL, reg); if (ret) return ret; /* Disable remote management for now, and set the switch's * DSA device number. */ - return mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_CONTROL_2, + return mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_CONTROL_2, ds->index & 0x1f); } static int mv88e6123_setup(struct dsa_switch *ds) { + struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); int ret; - ret = mv88e6xxx_setup_common(ds); + ps->ds = ds; + + ret = mv88e6xxx_setup_common(ps); if (ret < 0) return ret; - ret = mv88e6xxx_switch_reset(ds, false); + ret = mv88e6xxx_switch_reset(ps, false); if (ret < 0) return ret; diff --git a/drivers/net/dsa/mv88e6131.c b/drivers/net/dsa/mv88e6131.c index c3eb9a884cfd..357ab794d720 100644 --- a/drivers/net/dsa/mv88e6131.c +++ b/drivers/net/dsa/mv88e6131.c @@ -56,6 +56,7 @@ static const char *mv88e6131_drv_probe(struct device *dsa_dev, static int mv88e6131_setup_global(struct dsa_switch *ds) { + struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); u32 upstream_port = dsa_upstream_port(ds); int ret; u32 reg; @@ -69,14 +70,14 @@ static int mv88e6131_setup_global(struct dsa_switch *ds) * to arbitrate between packet queues, set the maximum frame * size to 1632, and mask all interrupt sources. */ - ret = mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_CONTROL, + ret = mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_CONTROL, GLOBAL_CONTROL_PPU_ENABLE | GLOBAL_CONTROL_MAX_FRAME_1632); if (ret) return ret; /* Set the VLAN ethertype to 0x8100. */ - ret = mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_CORE_TAG_TYPE, 0x8100); + ret = mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_CORE_TAG_TYPE, 0x8100); if (ret) return ret; @@ -87,7 +88,7 @@ static int mv88e6131_setup_global(struct dsa_switch *ds) reg = upstream_port << GLOBAL_MONITOR_CONTROL_INGRESS_SHIFT | upstream_port << GLOBAL_MONITOR_CONTROL_EGRESS_SHIFT | GLOBAL_MONITOR_CONTROL_ARP_DISABLED; - ret = mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_MONITOR_CONTROL, reg); + ret = mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_MONITOR_CONTROL, reg); if (ret) return ret; @@ -96,11 +97,11 @@ static int mv88e6131_setup_global(struct dsa_switch *ds) * DSA device number. */ if (ds->dst->pd->nr_chips > 1) - ret = mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_CONTROL_2, + ret = mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_CONTROL_2, GLOBAL_CONTROL_2_MULTIPLE_CASCADE | (ds->index & 0x1f)); else - ret = mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_CONTROL_2, + ret = mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_CONTROL_2, GLOBAL_CONTROL_2_NO_CASCADE | (ds->index & 0x1f)); if (ret) @@ -109,7 +110,7 @@ static int mv88e6131_setup_global(struct dsa_switch *ds) /* Force the priority of IGMP/MLD snoop frames and ARP frames * to the highest setting. */ - return mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_PRIO_OVERRIDE, + return mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_PRIO_OVERRIDE, GLOBAL2_PRIO_OVERRIDE_FORCE_SNOOP | 7 << GLOBAL2_PRIO_OVERRIDE_SNOOP_SHIFT | GLOBAL2_PRIO_OVERRIDE_FORCE_ARP | @@ -118,15 +119,18 @@ static int mv88e6131_setup_global(struct dsa_switch *ds) static int mv88e6131_setup(struct dsa_switch *ds) { + struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); int ret; - ret = mv88e6xxx_setup_common(ds); + ps->ds = ds; + + ret = mv88e6xxx_setup_common(ps); if (ret < 0) return ret; - mv88e6xxx_ppu_state_init(ds); + mv88e6xxx_ppu_state_init(ps); - ret = mv88e6xxx_switch_reset(ds, false); + ret = mv88e6xxx_switch_reset(ps, false); if (ret < 0) return ret; diff --git a/drivers/net/dsa/mv88e6171.c b/drivers/net/dsa/mv88e6171.c index 841ffe14ef75..f75164dc3bd6 100644 --- a/drivers/net/dsa/mv88e6171.c +++ b/drivers/net/dsa/mv88e6171.c @@ -56,6 +56,7 @@ static const char *mv88e6171_drv_probe(struct device *dsa_dev, static int mv88e6171_setup_global(struct dsa_switch *ds) { + struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); u32 upstream_port = dsa_upstream_port(ds); int ret; u32 reg; @@ -67,7 +68,7 @@ static int mv88e6171_setup_global(struct dsa_switch *ds) /* Discard packets with excessive collisions, mask all * interrupt sources, enable PPU. */ - ret = mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_CONTROL, + ret = mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_CONTROL, GLOBAL_CONTROL_PPU_ENABLE | GLOBAL_CONTROL_DISCARD_EXCESS); if (ret) @@ -81,26 +82,29 @@ static int mv88e6171_setup_global(struct dsa_switch *ds) upstream_port << GLOBAL_MONITOR_CONTROL_EGRESS_SHIFT | upstream_port << GLOBAL_MONITOR_CONTROL_ARP_SHIFT | upstream_port << GLOBAL_MONITOR_CONTROL_MIRROR_SHIFT; - ret = mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_MONITOR_CONTROL, reg); + ret = mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_MONITOR_CONTROL, reg); if (ret) return ret; /* Disable remote management for now, and set the switch's * DSA device number. */ - return mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_CONTROL_2, + return mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_CONTROL_2, ds->index & 0x1f); } static int mv88e6171_setup(struct dsa_switch *ds) { + struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); int ret; - ret = mv88e6xxx_setup_common(ds); + ps->ds = ds; + + ret = mv88e6xxx_setup_common(ps); if (ret < 0) return ret; - ret = mv88e6xxx_switch_reset(ds, true); + ret = mv88e6xxx_switch_reset(ps, true); if (ret < 0) return ret; diff --git a/drivers/net/dsa/mv88e6352.c b/drivers/net/dsa/mv88e6352.c index 4afc24df56b8..c622a1d58480 100644 --- a/drivers/net/dsa/mv88e6352.c +++ b/drivers/net/dsa/mv88e6352.c @@ -73,6 +73,7 @@ static const char *mv88e6352_drv_probe(struct device *dsa_dev, static int mv88e6352_setup_global(struct dsa_switch *ds) { + struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); u32 upstream_port = dsa_upstream_port(ds); int ret; u32 reg; @@ -84,7 +85,7 @@ static int mv88e6352_setup_global(struct dsa_switch *ds) /* Discard packets with excessive collisions, * mask all interrupt sources, enable PPU (bit 14, undocumented). */ - ret = mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_CONTROL, + ret = mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_CONTROL, GLOBAL_CONTROL_PPU_ENABLE | GLOBAL_CONTROL_DISCARD_EXCESS); if (ret) @@ -97,14 +98,14 @@ static int mv88e6352_setup_global(struct dsa_switch *ds) reg = upstream_port << GLOBAL_MONITOR_CONTROL_INGRESS_SHIFT | upstream_port << GLOBAL_MONITOR_CONTROL_EGRESS_SHIFT | upstream_port << GLOBAL_MONITOR_CONTROL_ARP_SHIFT; - ret = mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_MONITOR_CONTROL, reg); + ret = mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_MONITOR_CONTROL, reg); if (ret) return ret; /* Disable remote management for now, and set the switch's * DSA device number. */ - return mv88e6xxx_reg_write(ds, REG_GLOBAL, 0x1c, ds->index & 0x1f); + return mv88e6xxx_reg_write(ps, REG_GLOBAL, 0x1c, ds->index & 0x1f); } static int mv88e6352_setup(struct dsa_switch *ds) @@ -112,13 +113,15 @@ static int mv88e6352_setup(struct dsa_switch *ds) struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); int ret; - ret = mv88e6xxx_setup_common(ds); + ps->ds = ds; + + ret = mv88e6xxx_setup_common(ps); if (ret < 0) return ret; mutex_init(&ps->eeprom_mutex); - ret = mv88e6xxx_switch_reset(ds, true); + ret = mv88e6xxx_switch_reset(ps, true); if (ret < 0) return ret; @@ -136,7 +139,7 @@ static int mv88e6352_read_eeprom_word(struct dsa_switch *ds, int addr) mutex_lock(&ps->eeprom_mutex); - ret = mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_EEPROM_OP, + ret = mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_EEPROM_OP, GLOBAL2_EEPROM_OP_READ | (addr & GLOBAL2_EEPROM_OP_ADDR_MASK)); if (ret < 0) @@ -146,7 +149,7 @@ static int mv88e6352_read_eeprom_word(struct dsa_switch *ds, int addr) if (ret < 0) goto error; - ret = mv88e6xxx_reg_read(ds, REG_GLOBAL2, GLOBAL2_EEPROM_DATA); + ret = mv88e6xxx_reg_read(ps, REG_GLOBAL2, GLOBAL2_EEPROM_DATA); error: mutex_unlock(&ps->eeprom_mutex); return ret; @@ -217,9 +220,10 @@ static int mv88e6352_get_eeprom(struct dsa_switch *ds, static int mv88e6352_eeprom_is_readonly(struct dsa_switch *ds) { + struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); int ret; - ret = mv88e6xxx_reg_read(ds, REG_GLOBAL2, GLOBAL2_EEPROM_OP); + ret = mv88e6xxx_reg_read(ps, REG_GLOBAL2, GLOBAL2_EEPROM_OP); if (ret < 0) return ret; @@ -237,11 +241,11 @@ static int mv88e6352_write_eeprom_word(struct dsa_switch *ds, int addr, mutex_lock(&ps->eeprom_mutex); - ret = mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_EEPROM_DATA, data); + ret = mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_EEPROM_DATA, data); if (ret < 0) goto error; - ret = mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_EEPROM_OP, + ret = mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_EEPROM_OP, GLOBAL2_EEPROM_OP_WRITE | (addr & GLOBAL2_EEPROM_OP_ADDR_MASK)); if (ret < 0) diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index 028f92f2f375..61150af37bc7 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c @@ -25,12 +25,10 @@ #include #include "mv88e6xxx.h" -static void assert_smi_lock(struct dsa_switch *ds) +static void assert_smi_lock(struct mv88e6xxx_priv_state *ps) { - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); - if (unlikely(!mutex_is_locked(&ps->smi_mutex))) { - dev_err(ds->master_dev, "SMI lock not held!\n"); + dev_err(ps->dev, "SMI lock not held!\n"); dump_stack(); } } @@ -92,30 +90,29 @@ static int __mv88e6xxx_reg_read(struct mii_bus *bus, int sw_addr, int addr, return ret & 0xffff; } -static int _mv88e6xxx_reg_read(struct dsa_switch *ds, int addr, int reg) +static int _mv88e6xxx_reg_read(struct mv88e6xxx_priv_state *ps, + int addr, int reg) { - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); int ret; - assert_smi_lock(ds); + assert_smi_lock(ps); ret = __mv88e6xxx_reg_read(ps->bus, ps->sw_addr, addr, reg); if (ret < 0) return ret; - dev_dbg(ds->master_dev, "<- addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n", + dev_dbg(ps->dev, "<- addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n", addr, reg, ret); return ret; } -int mv88e6xxx_reg_read(struct dsa_switch *ds, int addr, int reg) +int mv88e6xxx_reg_read(struct mv88e6xxx_priv_state *ps, int addr, int reg) { - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); int ret; mutex_lock(&ps->smi_mutex); - ret = _mv88e6xxx_reg_read(ds, addr, reg); + ret = _mv88e6xxx_reg_read(ps, addr, reg); mutex_unlock(&ps->smi_mutex); return ret; @@ -153,26 +150,24 @@ static int __mv88e6xxx_reg_write(struct mii_bus *bus, int sw_addr, int addr, return 0; } -static int _mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg, - u16 val) +static int _mv88e6xxx_reg_write(struct mv88e6xxx_priv_state *ps, int addr, + int reg, u16 val) { - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + assert_smi_lock(ps); - assert_smi_lock(ds); - - dev_dbg(ds->master_dev, "-> addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n", + dev_dbg(ps->dev, "-> addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n", addr, reg, val); return __mv88e6xxx_reg_write(ps->bus, ps->sw_addr, addr, reg, val); } -int mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg, u16 val) +int mv88e6xxx_reg_write(struct mv88e6xxx_priv_state *ps, int addr, + int reg, u16 val) { - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); int ret; mutex_lock(&ps->smi_mutex); - ret = _mv88e6xxx_reg_write(ds, addr, reg, val); + ret = _mv88e6xxx_reg_write(ps, addr, reg, val); mutex_unlock(&ps->smi_mutex); return ret; @@ -180,24 +175,26 @@ int mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg, u16 val) int mv88e6xxx_set_addr_direct(struct dsa_switch *ds, u8 *addr) { + struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); int err; - err = mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_MAC_01, + err = mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_MAC_01, (addr[0] << 8) | addr[1]); if (err) return err; - err = mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_MAC_23, + err = mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_MAC_23, (addr[2] << 8) | addr[3]); if (err) return err; - return mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_MAC_45, + return mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_MAC_45, (addr[4] << 8) | addr[5]); } int mv88e6xxx_set_addr_indirect(struct dsa_switch *ds, u8 *addr) { + struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); int ret; int i; @@ -205,7 +202,7 @@ int mv88e6xxx_set_addr_indirect(struct dsa_switch *ds, u8 *addr) int j; /* Write the MAC address byte. */ - ret = mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_SWITCH_MAC, + ret = mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_SWITCH_MAC, GLOBAL2_SWITCH_MAC_BUSY | (i << 8) | addr[i]); if (ret) @@ -213,7 +210,7 @@ int mv88e6xxx_set_addr_indirect(struct dsa_switch *ds, u8 *addr) /* Wait for the write to complete. */ for (j = 0; j < 16; j++) { - ret = mv88e6xxx_reg_read(ds, REG_GLOBAL2, + ret = mv88e6xxx_reg_read(ps, REG_GLOBAL2, GLOBAL2_SWITCH_MAC); if (ret < 0) return ret; @@ -228,39 +225,40 @@ int mv88e6xxx_set_addr_indirect(struct dsa_switch *ds, u8 *addr) return 0; } -static int _mv88e6xxx_phy_read(struct dsa_switch *ds, int addr, int regnum) +static int _mv88e6xxx_phy_read(struct mv88e6xxx_priv_state *ps, int addr, + int regnum) { if (addr >= 0) - return _mv88e6xxx_reg_read(ds, addr, regnum); + return _mv88e6xxx_reg_read(ps, addr, regnum); return 0xffff; } -static int _mv88e6xxx_phy_write(struct dsa_switch *ds, int addr, int regnum, - u16 val) +static int _mv88e6xxx_phy_write(struct mv88e6xxx_priv_state *ps, int addr, + int regnum, u16 val) { if (addr >= 0) - return _mv88e6xxx_reg_write(ds, addr, regnum, val); + return _mv88e6xxx_reg_write(ps, addr, regnum, val); return 0; } #ifdef CONFIG_NET_DSA_MV88E6XXX_NEED_PPU -static int mv88e6xxx_ppu_disable(struct dsa_switch *ds) +static int mv88e6xxx_ppu_disable(struct mv88e6xxx_priv_state *ps) { int ret; unsigned long timeout; - ret = mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_CONTROL); + ret = mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_CONTROL); if (ret < 0) return ret; - ret = mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_CONTROL, + ret = mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_CONTROL, ret & ~GLOBAL_CONTROL_PPU_ENABLE); if (ret) return ret; timeout = jiffies + 1 * HZ; while (time_before(jiffies, timeout)) { - ret = mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_STATUS); + ret = mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_STATUS); if (ret < 0) return ret; @@ -273,23 +271,23 @@ static int mv88e6xxx_ppu_disable(struct dsa_switch *ds) return -ETIMEDOUT; } -static int mv88e6xxx_ppu_enable(struct dsa_switch *ds) +static int mv88e6xxx_ppu_enable(struct mv88e6xxx_priv_state *ps) { int ret, err; unsigned long timeout; - ret = mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_CONTROL); + ret = mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_CONTROL); if (ret < 0) return ret; - err = mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_CONTROL, + err = mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_CONTROL, ret | GLOBAL_CONTROL_PPU_ENABLE); if (err) return err; timeout = jiffies + 1 * HZ; while (time_before(jiffies, timeout)) { - ret = mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_STATUS); + ret = mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_STATUS); if (ret < 0) return ret; @@ -308,9 +306,7 @@ static void mv88e6xxx_ppu_reenable_work(struct work_struct *ugly) ps = container_of(ugly, struct mv88e6xxx_priv_state, ppu_work); if (mutex_trylock(&ps->ppu_mutex)) { - struct dsa_switch *ds = ps->ds; - - if (mv88e6xxx_ppu_enable(ds) == 0) + if (mv88e6xxx_ppu_enable(ps) == 0) ps->ppu_disabled = 0; mutex_unlock(&ps->ppu_mutex); } @@ -323,9 +319,8 @@ static void mv88e6xxx_ppu_reenable_timer(unsigned long _ps) schedule_work(&ps->ppu_work); } -static int mv88e6xxx_ppu_access_get(struct dsa_switch *ds) +static int mv88e6xxx_ppu_access_get(struct mv88e6xxx_priv_state *ps) { - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); int ret; mutex_lock(&ps->ppu_mutex); @@ -336,7 +331,7 @@ static int mv88e6xxx_ppu_access_get(struct dsa_switch *ds) * it. */ if (!ps->ppu_disabled) { - ret = mv88e6xxx_ppu_disable(ds); + ret = mv88e6xxx_ppu_disable(ps); if (ret < 0) { mutex_unlock(&ps->ppu_mutex); return ret; @@ -350,19 +345,15 @@ static int mv88e6xxx_ppu_access_get(struct dsa_switch *ds) return ret; } -static void mv88e6xxx_ppu_access_put(struct dsa_switch *ds) +static void mv88e6xxx_ppu_access_put(struct mv88e6xxx_priv_state *ps) { - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); - /* Schedule a timer to re-enable the PHY polling unit. */ mod_timer(&ps->ppu_timer, jiffies + msecs_to_jiffies(10)); mutex_unlock(&ps->ppu_mutex); } -void mv88e6xxx_ppu_state_init(struct dsa_switch *ds) +void mv88e6xxx_ppu_state_init(struct mv88e6xxx_priv_state *ps) { - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); - mutex_init(&ps->ppu_mutex); INIT_WORK(&ps->ppu_work, mv88e6xxx_ppu_reenable_work); init_timer(&ps->ppu_timer); @@ -372,12 +363,13 @@ void mv88e6xxx_ppu_state_init(struct dsa_switch *ds) int mv88e6xxx_phy_read_ppu(struct dsa_switch *ds, int addr, int regnum) { + struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); int ret; - ret = mv88e6xxx_ppu_access_get(ds); + ret = mv88e6xxx_ppu_access_get(ps); if (ret >= 0) { - ret = mv88e6xxx_reg_read(ds, addr, regnum); - mv88e6xxx_ppu_access_put(ds); + ret = mv88e6xxx_reg_read(ps, addr, regnum); + mv88e6xxx_ppu_access_put(ps); } return ret; @@ -386,96 +378,79 @@ int mv88e6xxx_phy_read_ppu(struct dsa_switch *ds, int addr, int regnum) int mv88e6xxx_phy_write_ppu(struct dsa_switch *ds, int addr, int regnum, u16 val) { + struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); int ret; - ret = mv88e6xxx_ppu_access_get(ds); + ret = mv88e6xxx_ppu_access_get(ps); if (ret >= 0) { - ret = mv88e6xxx_reg_write(ds, addr, regnum, val); - mv88e6xxx_ppu_access_put(ds); + ret = mv88e6xxx_reg_write(ps, addr, regnum, val); + mv88e6xxx_ppu_access_put(ps); } return ret; } #endif -static bool mv88e6xxx_6065_family(struct dsa_switch *ds) +static bool mv88e6xxx_6065_family(struct mv88e6xxx_priv_state *ps) { - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); - return ps->info->family == MV88E6XXX_FAMILY_6065; } -static bool mv88e6xxx_6095_family(struct dsa_switch *ds) +static bool mv88e6xxx_6095_family(struct mv88e6xxx_priv_state *ps) { - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); - return ps->info->family == MV88E6XXX_FAMILY_6095; } -static bool mv88e6xxx_6097_family(struct dsa_switch *ds) +static bool mv88e6xxx_6097_family(struct mv88e6xxx_priv_state *ps) { - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); - return ps->info->family == MV88E6XXX_FAMILY_6097; } -static bool mv88e6xxx_6165_family(struct dsa_switch *ds) +static bool mv88e6xxx_6165_family(struct mv88e6xxx_priv_state *ps) { - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); - return ps->info->family == MV88E6XXX_FAMILY_6165; } -static bool mv88e6xxx_6185_family(struct dsa_switch *ds) +static bool mv88e6xxx_6185_family(struct mv88e6xxx_priv_state *ps) { - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); - return ps->info->family == MV88E6XXX_FAMILY_6185; } -static bool mv88e6xxx_6320_family(struct dsa_switch *ds) +static bool mv88e6xxx_6320_family(struct mv88e6xxx_priv_state *ps) { - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); - return ps->info->family == MV88E6XXX_FAMILY_6320; } -static bool mv88e6xxx_6351_family(struct dsa_switch *ds) +static bool mv88e6xxx_6351_family(struct mv88e6xxx_priv_state *ps) { - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); - return ps->info->family == MV88E6XXX_FAMILY_6351; } -static bool mv88e6xxx_6352_family(struct dsa_switch *ds) +static bool mv88e6xxx_6352_family(struct mv88e6xxx_priv_state *ps) { - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); - return ps->info->family == MV88E6XXX_FAMILY_6352; } -static unsigned int mv88e6xxx_num_databases(struct dsa_switch *ds) +static unsigned int mv88e6xxx_num_databases(struct mv88e6xxx_priv_state *ps) { - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); - return ps->info->num_databases; } -static bool mv88e6xxx_has_fid_reg(struct dsa_switch *ds) +static bool mv88e6xxx_has_fid_reg(struct mv88e6xxx_priv_state *ps) { /* Does the device have dedicated FID registers for ATU and VTU ops? */ - if (mv88e6xxx_6097_family(ds) || mv88e6xxx_6165_family(ds) || - mv88e6xxx_6351_family(ds) || mv88e6xxx_6352_family(ds)) + if (mv88e6xxx_6097_family(ps) || mv88e6xxx_6165_family(ps) || + mv88e6xxx_6351_family(ps) || mv88e6xxx_6352_family(ps)) return true; return false; } -static bool mv88e6xxx_has_stu(struct dsa_switch *ds) +static bool mv88e6xxx_has_stu(struct mv88e6xxx_priv_state *ps) { /* Does the device have STU and dedicated SID registers for VTU ops? */ - if (mv88e6xxx_6097_family(ds) || mv88e6xxx_6165_family(ds) || - mv88e6xxx_6351_family(ds) || mv88e6xxx_6352_family(ds)) + if (mv88e6xxx_6097_family(ps) || mv88e6xxx_6165_family(ps) || + mv88e6xxx_6351_family(ps) || mv88e6xxx_6352_family(ps)) return true; return false; @@ -497,7 +472,7 @@ void mv88e6xxx_adjust_link(struct dsa_switch *ds, int port, mutex_lock(&ps->smi_mutex); - ret = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_PCS_CTRL); + ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_PCS_CTRL); if (ret < 0) goto out; @@ -511,7 +486,7 @@ void mv88e6xxx_adjust_link(struct dsa_switch *ds, int port, if (phydev->link) reg |= PORT_PCS_CTRL_LINK_UP; - if (mv88e6xxx_6065_family(ds) && phydev->speed > SPEED_100) + if (mv88e6xxx_6065_family(ps) && phydev->speed > SPEED_100) goto out; switch (phydev->speed) { @@ -533,7 +508,7 @@ void mv88e6xxx_adjust_link(struct dsa_switch *ds, int port, if (phydev->duplex == DUPLEX_FULL) reg |= PORT_PCS_CTRL_DUPLEX_FULL; - if ((mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds)) && + if ((mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps)) && (port >= ps->info->num_ports - 2)) { if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) reg |= PORT_PCS_CTRL_RGMII_DELAY_RXCLK; @@ -543,19 +518,19 @@ void mv88e6xxx_adjust_link(struct dsa_switch *ds, int port, reg |= (PORT_PCS_CTRL_RGMII_DELAY_RXCLK | PORT_PCS_CTRL_RGMII_DELAY_TXCLK); } - _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_PCS_CTRL, reg); + _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_PCS_CTRL, reg); out: mutex_unlock(&ps->smi_mutex); } -static int _mv88e6xxx_stats_wait(struct dsa_switch *ds) +static int _mv88e6xxx_stats_wait(struct mv88e6xxx_priv_state *ps) { int ret; int i; for (i = 0; i < 10; i++) { - ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_STATS_OP); + ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_STATS_OP); if ((ret & GLOBAL_STATS_OP_BUSY) == 0) return 0; } @@ -563,52 +538,54 @@ static int _mv88e6xxx_stats_wait(struct dsa_switch *ds) return -ETIMEDOUT; } -static int _mv88e6xxx_stats_snapshot(struct dsa_switch *ds, int port) +static int _mv88e6xxx_stats_snapshot(struct mv88e6xxx_priv_state *ps, + int port) { int ret; - if (mv88e6xxx_6320_family(ds) || mv88e6xxx_6352_family(ds)) + if (mv88e6xxx_6320_family(ps) || mv88e6xxx_6352_family(ps)) port = (port + 1) << 5; /* Snapshot the hardware statistics counters for this port. */ - ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_STATS_OP, + ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_STATS_OP, GLOBAL_STATS_OP_CAPTURE_PORT | GLOBAL_STATS_OP_HIST_RX_TX | port); if (ret < 0) return ret; /* Wait for the snapshotting to complete. */ - ret = _mv88e6xxx_stats_wait(ds); + ret = _mv88e6xxx_stats_wait(ps); if (ret < 0) return ret; return 0; } -static void _mv88e6xxx_stats_read(struct dsa_switch *ds, int stat, u32 *val) +static void _mv88e6xxx_stats_read(struct mv88e6xxx_priv_state *ps, + int stat, u32 *val) { u32 _val; int ret; *val = 0; - ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_STATS_OP, + ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_STATS_OP, GLOBAL_STATS_OP_READ_CAPTURED | GLOBAL_STATS_OP_HIST_RX_TX | stat); if (ret < 0) return; - ret = _mv88e6xxx_stats_wait(ds); + ret = _mv88e6xxx_stats_wait(ps); if (ret < 0) return; - ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_STATS_COUNTER_32); + ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_STATS_COUNTER_32); if (ret < 0) return; _val = ret << 16; - ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_STATS_COUNTER_01); + ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_STATS_COUNTER_01); if (ret < 0) return; @@ -677,26 +654,26 @@ static struct mv88e6xxx_hw_stat mv88e6xxx_hw_stats[] = { { "out_management", 4, 0x1f | GLOBAL_STATS_OP_BANK_1, BANK1, }, }; -static bool mv88e6xxx_has_stat(struct dsa_switch *ds, +static bool mv88e6xxx_has_stat(struct mv88e6xxx_priv_state *ps, struct mv88e6xxx_hw_stat *stat) { switch (stat->type) { case BANK0: return true; case BANK1: - return mv88e6xxx_6320_family(ds); + return mv88e6xxx_6320_family(ps); case PORT: - return mv88e6xxx_6095_family(ds) || - mv88e6xxx_6185_family(ds) || - mv88e6xxx_6097_family(ds) || - mv88e6xxx_6165_family(ds) || - mv88e6xxx_6351_family(ds) || - mv88e6xxx_6352_family(ds); + return mv88e6xxx_6095_family(ps) || + mv88e6xxx_6185_family(ps) || + mv88e6xxx_6097_family(ps) || + mv88e6xxx_6165_family(ps) || + mv88e6xxx_6351_family(ps) || + mv88e6xxx_6352_family(ps); } return false; } -static uint64_t _mv88e6xxx_get_ethtool_stat(struct dsa_switch *ds, +static uint64_t _mv88e6xxx_get_ethtool_stat(struct mv88e6xxx_priv_state *ps, struct mv88e6xxx_hw_stat *s, int port) { @@ -707,13 +684,13 @@ static uint64_t _mv88e6xxx_get_ethtool_stat(struct dsa_switch *ds, switch (s->type) { case PORT: - ret = _mv88e6xxx_reg_read(ds, REG_PORT(port), s->reg); + ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), s->reg); if (ret < 0) return UINT64_MAX; low = ret; if (s->sizeof_stat == 4) { - ret = _mv88e6xxx_reg_read(ds, REG_PORT(port), + ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), s->reg + 1); if (ret < 0) return UINT64_MAX; @@ -722,9 +699,9 @@ static uint64_t _mv88e6xxx_get_ethtool_stat(struct dsa_switch *ds, break; case BANK0: case BANK1: - _mv88e6xxx_stats_read(ds, s->reg, &low); + _mv88e6xxx_stats_read(ps, s->reg, &low); if (s->sizeof_stat == 8) - _mv88e6xxx_stats_read(ds, s->reg + 1, &high); + _mv88e6xxx_stats_read(ps, s->reg + 1, &high); } value = (((u64)high) << 16) | low; return value; @@ -732,12 +709,13 @@ static uint64_t _mv88e6xxx_get_ethtool_stat(struct dsa_switch *ds, void mv88e6xxx_get_strings(struct dsa_switch *ds, int port, uint8_t *data) { + struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); struct mv88e6xxx_hw_stat *stat; int i, j; for (i = 0, j = 0; i < ARRAY_SIZE(mv88e6xxx_hw_stats); i++) { stat = &mv88e6xxx_hw_stats[i]; - if (mv88e6xxx_has_stat(ds, stat)) { + if (mv88e6xxx_has_stat(ps, stat)) { memcpy(data + j * ETH_GSTRING_LEN, stat->string, ETH_GSTRING_LEN); j++; @@ -747,12 +725,13 @@ void mv88e6xxx_get_strings(struct dsa_switch *ds, int port, uint8_t *data) int mv88e6xxx_get_sset_count(struct dsa_switch *ds) { + struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); struct mv88e6xxx_hw_stat *stat; int i, j; for (i = 0, j = 0; i < ARRAY_SIZE(mv88e6xxx_hw_stats); i++) { stat = &mv88e6xxx_hw_stats[i]; - if (mv88e6xxx_has_stat(ds, stat)) + if (mv88e6xxx_has_stat(ps, stat)) j++; } return j; @@ -769,15 +748,15 @@ mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds, mutex_lock(&ps->smi_mutex); - ret = _mv88e6xxx_stats_snapshot(ds, port); + ret = _mv88e6xxx_stats_snapshot(ps, port); if (ret < 0) { mutex_unlock(&ps->smi_mutex); return; } for (i = 0, j = 0; i < ARRAY_SIZE(mv88e6xxx_hw_stats); i++) { stat = &mv88e6xxx_hw_stats[i]; - if (mv88e6xxx_has_stat(ds, stat)) { - data[j] = _mv88e6xxx_get_ethtool_stat(ds, stat, port); + if (mv88e6xxx_has_stat(ps, stat)) { + data[j] = _mv88e6xxx_get_ethtool_stat(ps, stat, port); j++; } } @@ -793,6 +772,7 @@ int mv88e6xxx_get_regs_len(struct dsa_switch *ds, int port) void mv88e6xxx_get_regs(struct dsa_switch *ds, int port, struct ethtool_regs *regs, void *_p) { + struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); u16 *p = _p; int i; @@ -803,13 +783,13 @@ void mv88e6xxx_get_regs(struct dsa_switch *ds, int port, for (i = 0; i < 32; i++) { int ret; - ret = mv88e6xxx_reg_read(ds, REG_PORT(port), i); + ret = mv88e6xxx_reg_read(ps, REG_PORT(port), i); if (ret >= 0) p[i] = ret; } } -static int _mv88e6xxx_wait(struct dsa_switch *ds, int reg, int offset, +static int _mv88e6xxx_wait(struct mv88e6xxx_priv_state *ps, int reg, int offset, u16 mask) { unsigned long timeout = jiffies + HZ / 10; @@ -817,7 +797,7 @@ static int _mv88e6xxx_wait(struct dsa_switch *ds, int reg, int offset, while (time_before(jiffies, timeout)) { int ret; - ret = _mv88e6xxx_reg_read(ds, reg, offset); + ret = _mv88e6xxx_reg_read(ps, reg, offset); if (ret < 0) return ret; if (!(ret & mask)) @@ -828,74 +808,80 @@ static int _mv88e6xxx_wait(struct dsa_switch *ds, int reg, int offset, return -ETIMEDOUT; } -static int mv88e6xxx_wait(struct dsa_switch *ds, int reg, int offset, u16 mask) +static int mv88e6xxx_wait(struct mv88e6xxx_priv_state *ps, int reg, + int offset, u16 mask) { - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); int ret; mutex_lock(&ps->smi_mutex); - ret = _mv88e6xxx_wait(ds, reg, offset, mask); + ret = _mv88e6xxx_wait(ps, reg, offset, mask); mutex_unlock(&ps->smi_mutex); return ret; } -static int _mv88e6xxx_phy_wait(struct dsa_switch *ds) +static int _mv88e6xxx_phy_wait(struct mv88e6xxx_priv_state *ps) { - return _mv88e6xxx_wait(ds, REG_GLOBAL2, GLOBAL2_SMI_OP, + return _mv88e6xxx_wait(ps, REG_GLOBAL2, GLOBAL2_SMI_OP, GLOBAL2_SMI_OP_BUSY); } int mv88e6xxx_eeprom_load_wait(struct dsa_switch *ds) { - return mv88e6xxx_wait(ds, REG_GLOBAL2, GLOBAL2_EEPROM_OP, + struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + + return mv88e6xxx_wait(ps, REG_GLOBAL2, GLOBAL2_EEPROM_OP, GLOBAL2_EEPROM_OP_LOAD); } int mv88e6xxx_eeprom_busy_wait(struct dsa_switch *ds) { - return mv88e6xxx_wait(ds, REG_GLOBAL2, GLOBAL2_EEPROM_OP, + struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + + return mv88e6xxx_wait(ps, REG_GLOBAL2, GLOBAL2_EEPROM_OP, GLOBAL2_EEPROM_OP_BUSY); } -static int _mv88e6xxx_atu_wait(struct dsa_switch *ds) +static int _mv88e6xxx_atu_wait(struct mv88e6xxx_priv_state *ps) { - return _mv88e6xxx_wait(ds, REG_GLOBAL, GLOBAL_ATU_OP, + return _mv88e6xxx_wait(ps, REG_GLOBAL, GLOBAL_ATU_OP, GLOBAL_ATU_OP_BUSY); } -static int _mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int addr, - int regnum) +static int _mv88e6xxx_phy_read_indirect(struct mv88e6xxx_priv_state *ps, + int addr, int regnum) { int ret; - ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_SMI_OP, + ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_SMI_OP, GLOBAL2_SMI_OP_22_READ | (addr << 5) | regnum); if (ret < 0) return ret; - ret = _mv88e6xxx_phy_wait(ds); + ret = _mv88e6xxx_phy_wait(ps); if (ret < 0) return ret; - return _mv88e6xxx_reg_read(ds, REG_GLOBAL2, GLOBAL2_SMI_DATA); + ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL2, GLOBAL2_SMI_DATA); + + return ret; } -static int _mv88e6xxx_phy_write_indirect(struct dsa_switch *ds, int addr, - int regnum, u16 val) +static int _mv88e6xxx_phy_write_indirect(struct mv88e6xxx_priv_state *ps, + int addr, int regnum, u16 val) { int ret; - ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_SMI_DATA, val); + ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_SMI_DATA, val); if (ret < 0) return ret; - ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_SMI_OP, + ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_SMI_OP, GLOBAL2_SMI_OP_22_WRITE | (addr << 5) | regnum); - return _mv88e6xxx_phy_wait(ds); + return _mv88e6xxx_phy_wait(ps); } int mv88e6xxx_get_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e) @@ -905,14 +891,14 @@ int mv88e6xxx_get_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e) mutex_lock(&ps->smi_mutex); - reg = _mv88e6xxx_phy_read_indirect(ds, port, 16); + reg = _mv88e6xxx_phy_read_indirect(ps, port, 16); if (reg < 0) goto out; e->eee_enabled = !!(reg & 0x0200); e->tx_lpi_enabled = !!(reg & 0x0100); - reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_STATUS); + reg = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_STATUS); if (reg < 0) goto out; @@ -933,7 +919,7 @@ int mv88e6xxx_set_eee(struct dsa_switch *ds, int port, mutex_lock(&ps->smi_mutex); - ret = _mv88e6xxx_phy_read_indirect(ds, port, 16); + ret = _mv88e6xxx_phy_read_indirect(ps, port, 16); if (ret < 0) goto out; @@ -943,28 +929,28 @@ int mv88e6xxx_set_eee(struct dsa_switch *ds, int port, if (e->tx_lpi_enabled) reg |= 0x0100; - ret = _mv88e6xxx_phy_write_indirect(ds, port, 16, reg); + ret = _mv88e6xxx_phy_write_indirect(ps, port, 16, reg); out: mutex_unlock(&ps->smi_mutex); return ret; } -static int _mv88e6xxx_atu_cmd(struct dsa_switch *ds, u16 fid, u16 cmd) +static int _mv88e6xxx_atu_cmd(struct mv88e6xxx_priv_state *ps, u16 fid, u16 cmd) { int ret; - if (mv88e6xxx_has_fid_reg(ds)) { - ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_FID, fid); + if (mv88e6xxx_has_fid_reg(ps)) { + ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_ATU_FID, fid); if (ret < 0) return ret; - } else if (mv88e6xxx_num_databases(ds) == 256) { + } else if (mv88e6xxx_num_databases(ps) == 256) { /* ATU DBNum[7:4] are located in ATU Control 15:12 */ - ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_ATU_CONTROL); + ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_ATU_CONTROL); if (ret < 0) return ret; - ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_CONTROL, + ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_ATU_CONTROL, (ret & 0xfff) | ((fid << 8) & 0xf000)); if (ret < 0) @@ -974,14 +960,14 @@ static int _mv88e6xxx_atu_cmd(struct dsa_switch *ds, u16 fid, u16 cmd) cmd |= fid & 0xf; } - ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_OP, cmd); + ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_ATU_OP, cmd); if (ret < 0) return ret; - return _mv88e6xxx_atu_wait(ds); + return _mv88e6xxx_atu_wait(ps); } -static int _mv88e6xxx_atu_data_write(struct dsa_switch *ds, +static int _mv88e6xxx_atu_data_write(struct mv88e6xxx_priv_state *ps, struct mv88e6xxx_atu_entry *entry) { u16 data = entry->state & GLOBAL_ATU_DATA_STATE_MASK; @@ -1001,21 +987,21 @@ static int _mv88e6xxx_atu_data_write(struct dsa_switch *ds, data |= (entry->portv_trunkid << shift) & mask; } - return _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_DATA, data); + return _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_ATU_DATA, data); } -static int _mv88e6xxx_atu_flush_move(struct dsa_switch *ds, +static int _mv88e6xxx_atu_flush_move(struct mv88e6xxx_priv_state *ps, struct mv88e6xxx_atu_entry *entry, bool static_too) { int op; int err; - err = _mv88e6xxx_atu_wait(ds); + err = _mv88e6xxx_atu_wait(ps); if (err) return err; - err = _mv88e6xxx_atu_data_write(ds, entry); + err = _mv88e6xxx_atu_data_write(ps, entry); if (err) return err; @@ -1027,21 +1013,22 @@ static int _mv88e6xxx_atu_flush_move(struct dsa_switch *ds, GLOBAL_ATU_OP_FLUSH_MOVE_NON_STATIC; } - return _mv88e6xxx_atu_cmd(ds, entry->fid, op); + return _mv88e6xxx_atu_cmd(ps, entry->fid, op); } -static int _mv88e6xxx_atu_flush(struct dsa_switch *ds, u16 fid, bool static_too) +static int _mv88e6xxx_atu_flush(struct mv88e6xxx_priv_state *ps, + u16 fid, bool static_too) { struct mv88e6xxx_atu_entry entry = { .fid = fid, .state = 0, /* EntryState bits must be 0 */ }; - return _mv88e6xxx_atu_flush_move(ds, &entry, static_too); + return _mv88e6xxx_atu_flush_move(ps, &entry, static_too); } -static int _mv88e6xxx_atu_move(struct dsa_switch *ds, u16 fid, int from_port, - int to_port, bool static_too) +static int _mv88e6xxx_atu_move(struct mv88e6xxx_priv_state *ps, u16 fid, + int from_port, int to_port, bool static_too) { struct mv88e6xxx_atu_entry entry = { .trunk = false, @@ -1055,14 +1042,14 @@ static int _mv88e6xxx_atu_move(struct dsa_switch *ds, u16 fid, int from_port, entry.portv_trunkid = (to_port & 0x0f) << 4; entry.portv_trunkid |= from_port & 0x0f; - return _mv88e6xxx_atu_flush_move(ds, &entry, static_too); + return _mv88e6xxx_atu_flush_move(ps, &entry, static_too); } -static int _mv88e6xxx_atu_remove(struct dsa_switch *ds, u16 fid, int port, - bool static_too) +static int _mv88e6xxx_atu_remove(struct mv88e6xxx_priv_state *ps, u16 fid, + int port, bool static_too) { /* Destination port 0xF means remove the entries */ - return _mv88e6xxx_atu_move(ds, fid, port, 0x0f, static_too); + return _mv88e6xxx_atu_move(ps, fid, port, 0x0f, static_too); } static const char * const mv88e6xxx_port_state_names[] = { @@ -1072,12 +1059,14 @@ static const char * const mv88e6xxx_port_state_names[] = { [PORT_CONTROL_STATE_FORWARDING] = "Forwarding", }; -static int _mv88e6xxx_port_state(struct dsa_switch *ds, int port, u8 state) +static int _mv88e6xxx_port_state(struct mv88e6xxx_priv_state *ps, int port, + u8 state) { + struct dsa_switch *ds = ps->ds; int reg, ret = 0; u8 oldstate; - reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_CONTROL); + reg = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_CONTROL); if (reg < 0) return reg; @@ -1092,13 +1081,13 @@ static int _mv88e6xxx_port_state(struct dsa_switch *ds, int port, u8 state) oldstate == PORT_CONTROL_STATE_FORWARDING) && (state == PORT_CONTROL_STATE_DISABLED || state == PORT_CONTROL_STATE_BLOCKING)) { - ret = _mv88e6xxx_atu_remove(ds, 0, port, false); + ret = _mv88e6xxx_atu_remove(ps, 0, port, false); if (ret) return ret; } reg = (reg & ~PORT_CONTROL_STATE_MASK) | state; - ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_CONTROL, + ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_CONTROL, reg); if (ret) return ret; @@ -1111,11 +1100,12 @@ static int _mv88e6xxx_port_state(struct dsa_switch *ds, int port, u8 state) return ret; } -static int _mv88e6xxx_port_based_vlan_map(struct dsa_switch *ds, int port) +static int _mv88e6xxx_port_based_vlan_map(struct mv88e6xxx_priv_state *ps, + int port) { - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); struct net_device *bridge = ps->ports[port].bridge_dev; const u16 mask = (1 << ps->info->num_ports) - 1; + struct dsa_switch *ds = ps->ds; u16 output_ports = 0; int reg; int i; @@ -1138,14 +1128,14 @@ static int _mv88e6xxx_port_based_vlan_map(struct dsa_switch *ds, int port) /* prevent frames from going back out of the port they came in on */ output_ports &= ~BIT(port); - reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_BASE_VLAN); + reg = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_BASE_VLAN); if (reg < 0) return reg; reg &= ~mask; reg |= output_ports & mask; - return _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_BASE_VLAN, reg); + return _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_BASE_VLAN, reg); } void mv88e6xxx_port_stp_state_set(struct dsa_switch *ds, int port, u8 state) @@ -1178,13 +1168,14 @@ void mv88e6xxx_port_stp_state_set(struct dsa_switch *ds, int port, u8 state) schedule_work(&ps->bridge_work); } -static int _mv88e6xxx_port_pvid(struct dsa_switch *ds, int port, u16 *new, - u16 *old) +static int _mv88e6xxx_port_pvid(struct mv88e6xxx_priv_state *ps, int port, + u16 *new, u16 *old) { + struct dsa_switch *ds = ps->ds; u16 pvid; int ret; - ret = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_DEFAULT_VLAN); + ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_DEFAULT_VLAN); if (ret < 0) return ret; @@ -1194,7 +1185,7 @@ static int _mv88e6xxx_port_pvid(struct dsa_switch *ds, int port, u16 *new, ret &= ~PORT_DEFAULT_VLAN_MASK; ret |= *new & PORT_DEFAULT_VLAN_MASK; - ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), + ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_DEFAULT_VLAN, ret); if (ret < 0) return ret; @@ -1209,55 +1200,56 @@ static int _mv88e6xxx_port_pvid(struct dsa_switch *ds, int port, u16 *new, return 0; } -static int _mv88e6xxx_port_pvid_get(struct dsa_switch *ds, int port, u16 *pvid) +static int _mv88e6xxx_port_pvid_get(struct mv88e6xxx_priv_state *ps, + int port, u16 *pvid) { - return _mv88e6xxx_port_pvid(ds, port, NULL, pvid); + return _mv88e6xxx_port_pvid(ps, port, NULL, pvid); } -static int _mv88e6xxx_port_pvid_set(struct dsa_switch *ds, int port, u16 pvid) +static int _mv88e6xxx_port_pvid_set(struct mv88e6xxx_priv_state *ps, + int port, u16 pvid) { - return _mv88e6xxx_port_pvid(ds, port, &pvid, NULL); + return _mv88e6xxx_port_pvid(ps, port, &pvid, NULL); } -static int _mv88e6xxx_vtu_wait(struct dsa_switch *ds) +static int _mv88e6xxx_vtu_wait(struct mv88e6xxx_priv_state *ps) { - return _mv88e6xxx_wait(ds, REG_GLOBAL, GLOBAL_VTU_OP, + return _mv88e6xxx_wait(ps, REG_GLOBAL, GLOBAL_VTU_OP, GLOBAL_VTU_OP_BUSY); } -static int _mv88e6xxx_vtu_cmd(struct dsa_switch *ds, u16 op) +static int _mv88e6xxx_vtu_cmd(struct mv88e6xxx_priv_state *ps, u16 op) { int ret; - ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_OP, op); + ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_OP, op); if (ret < 0) return ret; - return _mv88e6xxx_vtu_wait(ds); + return _mv88e6xxx_vtu_wait(ps); } -static int _mv88e6xxx_vtu_stu_flush(struct dsa_switch *ds) +static int _mv88e6xxx_vtu_stu_flush(struct mv88e6xxx_priv_state *ps) { int ret; - ret = _mv88e6xxx_vtu_wait(ds); + ret = _mv88e6xxx_vtu_wait(ps); if (ret < 0) return ret; - return _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_FLUSH_ALL); + return _mv88e6xxx_vtu_cmd(ps, GLOBAL_VTU_OP_FLUSH_ALL); } -static int _mv88e6xxx_vtu_stu_data_read(struct dsa_switch *ds, +static int _mv88e6xxx_vtu_stu_data_read(struct mv88e6xxx_priv_state *ps, struct mv88e6xxx_vtu_stu_entry *entry, unsigned int nibble_offset) { - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); u16 regs[3]; int i; int ret; for (i = 0; i < 3; ++i) { - ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, + ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_VTU_DATA_0_3 + i); if (ret < 0) return ret; @@ -1275,11 +1267,10 @@ static int _mv88e6xxx_vtu_stu_data_read(struct dsa_switch *ds, return 0; } -static int _mv88e6xxx_vtu_stu_data_write(struct dsa_switch *ds, +static int _mv88e6xxx_vtu_stu_data_write(struct mv88e6xxx_priv_state *ps, struct mv88e6xxx_vtu_stu_entry *entry, unsigned int nibble_offset) { - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); u16 regs[3] = { 0 }; int i; int ret; @@ -1292,7 +1283,7 @@ static int _mv88e6xxx_vtu_stu_data_write(struct dsa_switch *ds, } for (i = 0; i < 3; ++i) { - ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, + ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_DATA_0_3 + i, regs[i]); if (ret < 0) return ret; @@ -1301,27 +1292,27 @@ static int _mv88e6xxx_vtu_stu_data_write(struct dsa_switch *ds, return 0; } -static int _mv88e6xxx_vtu_vid_write(struct dsa_switch *ds, u16 vid) +static int _mv88e6xxx_vtu_vid_write(struct mv88e6xxx_priv_state *ps, u16 vid) { - return _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_VID, + return _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_VID, vid & GLOBAL_VTU_VID_MASK); } -static int _mv88e6xxx_vtu_getnext(struct dsa_switch *ds, +static int _mv88e6xxx_vtu_getnext(struct mv88e6xxx_priv_state *ps, struct mv88e6xxx_vtu_stu_entry *entry) { struct mv88e6xxx_vtu_stu_entry next = { 0 }; int ret; - ret = _mv88e6xxx_vtu_wait(ds); + ret = _mv88e6xxx_vtu_wait(ps); if (ret < 0) return ret; - ret = _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_VTU_GET_NEXT); + ret = _mv88e6xxx_vtu_cmd(ps, GLOBAL_VTU_OP_VTU_GET_NEXT); if (ret < 0) return ret; - ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_VTU_VID); + ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_VTU_VID); if (ret < 0) return ret; @@ -1329,22 +1320,22 @@ static int _mv88e6xxx_vtu_getnext(struct dsa_switch *ds, next.valid = !!(ret & GLOBAL_VTU_VID_VALID); if (next.valid) { - ret = _mv88e6xxx_vtu_stu_data_read(ds, &next, 0); + ret = _mv88e6xxx_vtu_stu_data_read(ps, &next, 0); if (ret < 0) return ret; - if (mv88e6xxx_has_fid_reg(ds)) { - ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, + if (mv88e6xxx_has_fid_reg(ps)) { + ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_VTU_FID); if (ret < 0) return ret; next.fid = ret & GLOBAL_VTU_FID_MASK; - } else if (mv88e6xxx_num_databases(ds) == 256) { + } else if (mv88e6xxx_num_databases(ps) == 256) { /* VTU DBNum[7:4] are located in VTU Operation 11:8, and * VTU DBNum[3:0] are located in VTU Operation 3:0 */ - ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, + ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_VTU_OP); if (ret < 0) return ret; @@ -1353,8 +1344,8 @@ static int _mv88e6xxx_vtu_getnext(struct dsa_switch *ds, next.fid |= ret & 0xf; } - if (mv88e6xxx_has_stu(ds)) { - ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, + if (mv88e6xxx_has_stu(ps)) { + ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_VTU_SID); if (ret < 0) return ret; @@ -1378,16 +1369,16 @@ int mv88e6xxx_port_vlan_dump(struct dsa_switch *ds, int port, mutex_lock(&ps->smi_mutex); - err = _mv88e6xxx_port_pvid_get(ds, port, &pvid); + err = _mv88e6xxx_port_pvid_get(ps, port, &pvid); if (err) goto unlock; - err = _mv88e6xxx_vtu_vid_write(ds, GLOBAL_VTU_VID_MASK); + err = _mv88e6xxx_vtu_vid_write(ps, GLOBAL_VTU_VID_MASK); if (err) goto unlock; do { - err = _mv88e6xxx_vtu_getnext(ds, &next); + err = _mv88e6xxx_vtu_getnext(ps, &next); if (err) break; @@ -1418,14 +1409,14 @@ unlock: return err; } -static int _mv88e6xxx_vtu_loadpurge(struct dsa_switch *ds, +static int _mv88e6xxx_vtu_loadpurge(struct mv88e6xxx_priv_state *ps, struct mv88e6xxx_vtu_stu_entry *entry) { u16 op = GLOBAL_VTU_OP_VTU_LOAD_PURGE; u16 reg = 0; int ret; - ret = _mv88e6xxx_vtu_wait(ds); + ret = _mv88e6xxx_vtu_wait(ps); if (ret < 0) return ret; @@ -1433,23 +1424,23 @@ static int _mv88e6xxx_vtu_loadpurge(struct dsa_switch *ds, goto loadpurge; /* Write port member tags */ - ret = _mv88e6xxx_vtu_stu_data_write(ds, entry, 0); + ret = _mv88e6xxx_vtu_stu_data_write(ps, entry, 0); if (ret < 0) return ret; - if (mv88e6xxx_has_stu(ds)) { + if (mv88e6xxx_has_stu(ps)) { reg = entry->sid & GLOBAL_VTU_SID_MASK; - ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_SID, reg); + ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_SID, reg); if (ret < 0) return ret; } - if (mv88e6xxx_has_fid_reg(ds)) { + if (mv88e6xxx_has_fid_reg(ps)) { reg = entry->fid & GLOBAL_VTU_FID_MASK; - ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_FID, reg); + ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_FID, reg); if (ret < 0) return ret; - } else if (mv88e6xxx_num_databases(ds) == 256) { + } else if (mv88e6xxx_num_databases(ps) == 256) { /* VTU DBNum[7:4] are located in VTU Operation 11:8, and * VTU DBNum[3:0] are located in VTU Operation 3:0 */ @@ -1460,46 +1451,46 @@ static int _mv88e6xxx_vtu_loadpurge(struct dsa_switch *ds, reg = GLOBAL_VTU_VID_VALID; loadpurge: reg |= entry->vid & GLOBAL_VTU_VID_MASK; - ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_VID, reg); + ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_VID, reg); if (ret < 0) return ret; - return _mv88e6xxx_vtu_cmd(ds, op); + return _mv88e6xxx_vtu_cmd(ps, op); } -static int _mv88e6xxx_stu_getnext(struct dsa_switch *ds, u8 sid, +static int _mv88e6xxx_stu_getnext(struct mv88e6xxx_priv_state *ps, u8 sid, struct mv88e6xxx_vtu_stu_entry *entry) { struct mv88e6xxx_vtu_stu_entry next = { 0 }; int ret; - ret = _mv88e6xxx_vtu_wait(ds); + ret = _mv88e6xxx_vtu_wait(ps); if (ret < 0) return ret; - ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_SID, + ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_SID, sid & GLOBAL_VTU_SID_MASK); if (ret < 0) return ret; - ret = _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_STU_GET_NEXT); + ret = _mv88e6xxx_vtu_cmd(ps, GLOBAL_VTU_OP_STU_GET_NEXT); if (ret < 0) return ret; - ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_VTU_SID); + ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_VTU_SID); if (ret < 0) return ret; next.sid = ret & GLOBAL_VTU_SID_MASK; - ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_VTU_VID); + ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_VTU_VID); if (ret < 0) return ret; next.valid = !!(ret & GLOBAL_VTU_VID_VALID); if (next.valid) { - ret = _mv88e6xxx_vtu_stu_data_read(ds, &next, 2); + ret = _mv88e6xxx_vtu_stu_data_read(ps, &next, 2); if (ret < 0) return ret; } @@ -1508,13 +1499,13 @@ static int _mv88e6xxx_stu_getnext(struct dsa_switch *ds, u8 sid, return 0; } -static int _mv88e6xxx_stu_loadpurge(struct dsa_switch *ds, +static int _mv88e6xxx_stu_loadpurge(struct mv88e6xxx_priv_state *ps, struct mv88e6xxx_vtu_stu_entry *entry) { u16 reg = 0; int ret; - ret = _mv88e6xxx_vtu_wait(ds); + ret = _mv88e6xxx_vtu_wait(ps); if (ret < 0) return ret; @@ -1522,40 +1513,41 @@ static int _mv88e6xxx_stu_loadpurge(struct dsa_switch *ds, goto loadpurge; /* Write port states */ - ret = _mv88e6xxx_vtu_stu_data_write(ds, entry, 2); + ret = _mv88e6xxx_vtu_stu_data_write(ps, entry, 2); if (ret < 0) return ret; reg = GLOBAL_VTU_VID_VALID; loadpurge: - ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_VID, reg); + ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_VID, reg); if (ret < 0) return ret; reg = entry->sid & GLOBAL_VTU_SID_MASK; - ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_SID, reg); + ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_SID, reg); if (ret < 0) return ret; - return _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_STU_LOAD_PURGE); + return _mv88e6xxx_vtu_cmd(ps, GLOBAL_VTU_OP_STU_LOAD_PURGE); } -static int _mv88e6xxx_port_fid(struct dsa_switch *ds, int port, u16 *new, - u16 *old) +static int _mv88e6xxx_port_fid(struct mv88e6xxx_priv_state *ps, int port, + u16 *new, u16 *old) { + struct dsa_switch *ds = ps->ds; u16 upper_mask; u16 fid; int ret; - if (mv88e6xxx_num_databases(ds) == 4096) + if (mv88e6xxx_num_databases(ps) == 4096) upper_mask = 0xff; - else if (mv88e6xxx_num_databases(ds) == 256) + else if (mv88e6xxx_num_databases(ps) == 256) upper_mask = 0xf; else return -EOPNOTSUPP; /* Port's default FID bits 3:0 are located in reg 0x06, offset 12 */ - ret = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_BASE_VLAN); + ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_BASE_VLAN); if (ret < 0) return ret; @@ -1565,14 +1557,14 @@ static int _mv88e6xxx_port_fid(struct dsa_switch *ds, int port, u16 *new, ret &= ~PORT_BASE_VLAN_FID_3_0_MASK; ret |= (*new << 12) & PORT_BASE_VLAN_FID_3_0_MASK; - ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_BASE_VLAN, + ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_BASE_VLAN, ret); if (ret < 0) return ret; } /* Port's default FID bits 11:4 are located in reg 0x05, offset 0 */ - ret = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_CONTROL_1); + ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_CONTROL_1); if (ret < 0) return ret; @@ -1582,7 +1574,7 @@ static int _mv88e6xxx_port_fid(struct dsa_switch *ds, int port, u16 *new, ret &= ~upper_mask; ret |= (*new >> 4) & upper_mask; - ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_CONTROL_1, + ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_CONTROL_1, ret); if (ret < 0) return ret; @@ -1596,19 +1588,20 @@ static int _mv88e6xxx_port_fid(struct dsa_switch *ds, int port, u16 *new, return 0; } -static int _mv88e6xxx_port_fid_get(struct dsa_switch *ds, int port, u16 *fid) +static int _mv88e6xxx_port_fid_get(struct mv88e6xxx_priv_state *ps, + int port, u16 *fid) { - return _mv88e6xxx_port_fid(ds, port, NULL, fid); + return _mv88e6xxx_port_fid(ps, port, NULL, fid); } -static int _mv88e6xxx_port_fid_set(struct dsa_switch *ds, int port, u16 fid) +static int _mv88e6xxx_port_fid_set(struct mv88e6xxx_priv_state *ps, + int port, u16 fid) { - return _mv88e6xxx_port_fid(ds, port, &fid, NULL); + return _mv88e6xxx_port_fid(ps, port, &fid, NULL); } -static int _mv88e6xxx_fid_new(struct dsa_switch *ds, u16 *fid) +static int _mv88e6xxx_fid_new(struct mv88e6xxx_priv_state *ps, u16 *fid) { - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); DECLARE_BITMAP(fid_bitmap, MV88E6XXX_N_FID); struct mv88e6xxx_vtu_stu_entry vlan; int i, err; @@ -1617,7 +1610,7 @@ static int _mv88e6xxx_fid_new(struct dsa_switch *ds, u16 *fid) /* Set every FID bit used by the (un)bridged ports */ for (i = 0; i < ps->info->num_ports; ++i) { - err = _mv88e6xxx_port_fid_get(ds, i, fid); + err = _mv88e6xxx_port_fid_get(ps, i, fid); if (err) return err; @@ -1625,12 +1618,12 @@ static int _mv88e6xxx_fid_new(struct dsa_switch *ds, u16 *fid) } /* Set every FID bit used by the VLAN entries */ - err = _mv88e6xxx_vtu_vid_write(ds, GLOBAL_VTU_VID_MASK); + err = _mv88e6xxx_vtu_vid_write(ps, GLOBAL_VTU_VID_MASK); if (err) return err; do { - err = _mv88e6xxx_vtu_getnext(ds, &vlan); + err = _mv88e6xxx_vtu_getnext(ps, &vlan); if (err) return err; @@ -1644,24 +1637,24 @@ static int _mv88e6xxx_fid_new(struct dsa_switch *ds, u16 *fid) * databases are not needed. Return the next positive available. */ *fid = find_next_zero_bit(fid_bitmap, MV88E6XXX_N_FID, 1); - if (unlikely(*fid >= mv88e6xxx_num_databases(ds))) + if (unlikely(*fid >= mv88e6xxx_num_databases(ps))) return -ENOSPC; /* Clear the database */ - return _mv88e6xxx_atu_flush(ds, *fid, true); + return _mv88e6xxx_atu_flush(ps, *fid, true); } -static int _mv88e6xxx_vtu_new(struct dsa_switch *ds, u16 vid, +static int _mv88e6xxx_vtu_new(struct mv88e6xxx_priv_state *ps, u16 vid, struct mv88e6xxx_vtu_stu_entry *entry) { - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + struct dsa_switch *ds = ps->ds; struct mv88e6xxx_vtu_stu_entry vlan = { .valid = true, .vid = vid, }; int i, err; - err = _mv88e6xxx_fid_new(ds, &vlan.fid); + err = _mv88e6xxx_fid_new(ps, &vlan.fid); if (err) return err; @@ -1671,8 +1664,8 @@ static int _mv88e6xxx_vtu_new(struct dsa_switch *ds, u16 vid, ? GLOBAL_VTU_DATA_MEMBER_TAG_UNMODIFIED : GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER; - if (mv88e6xxx_6097_family(ds) || mv88e6xxx_6165_family(ds) || - mv88e6xxx_6351_family(ds) || mv88e6xxx_6352_family(ds)) { + if (mv88e6xxx_6097_family(ps) || mv88e6xxx_6165_family(ps) || + mv88e6xxx_6351_family(ps) || mv88e6xxx_6352_family(ps)) { struct mv88e6xxx_vtu_stu_entry vstp; /* Adding a VTU entry requires a valid STU entry. As VSTP is not @@ -1680,7 +1673,7 @@ static int _mv88e6xxx_vtu_new(struct dsa_switch *ds, u16 vid, * entries. Thus, validate the SID 0. */ vlan.sid = 0; - err = _mv88e6xxx_stu_getnext(ds, GLOBAL_VTU_SID_MASK, &vstp); + err = _mv88e6xxx_stu_getnext(ps, GLOBAL_VTU_SID_MASK, &vstp); if (err) return err; @@ -1689,7 +1682,7 @@ static int _mv88e6xxx_vtu_new(struct dsa_switch *ds, u16 vid, vstp.valid = true; vstp.sid = vlan.sid; - err = _mv88e6xxx_stu_loadpurge(ds, &vstp); + err = _mv88e6xxx_stu_loadpurge(ps, &vstp); if (err) return err; } @@ -1699,7 +1692,7 @@ static int _mv88e6xxx_vtu_new(struct dsa_switch *ds, u16 vid, return 0; } -static int _mv88e6xxx_vtu_get(struct dsa_switch *ds, u16 vid, +static int _mv88e6xxx_vtu_get(struct mv88e6xxx_priv_state *ps, u16 vid, struct mv88e6xxx_vtu_stu_entry *entry, bool creat) { int err; @@ -1707,11 +1700,11 @@ static int _mv88e6xxx_vtu_get(struct dsa_switch *ds, u16 vid, if (!vid) return -EINVAL; - err = _mv88e6xxx_vtu_vid_write(ds, vid - 1); + err = _mv88e6xxx_vtu_vid_write(ps, vid - 1); if (err) return err; - err = _mv88e6xxx_vtu_getnext(ds, entry); + err = _mv88e6xxx_vtu_getnext(ps, entry); if (err) return err; @@ -1722,7 +1715,7 @@ static int _mv88e6xxx_vtu_get(struct dsa_switch *ds, u16 vid, * -EOPNOTSUPP to inform bridge about an eventual software VLAN. */ - err = _mv88e6xxx_vtu_new(ds, vid, entry); + err = _mv88e6xxx_vtu_new(ps, vid, entry); } return err; @@ -1740,12 +1733,12 @@ static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port, mutex_lock(&ps->smi_mutex); - err = _mv88e6xxx_vtu_vid_write(ds, vid_begin - 1); + err = _mv88e6xxx_vtu_vid_write(ps, vid_begin - 1); if (err) goto unlock; do { - err = _mv88e6xxx_vtu_getnext(ds, &vlan); + err = _mv88e6xxx_vtu_getnext(ps, &vlan); if (err) goto unlock; @@ -1799,7 +1792,7 @@ int mv88e6xxx_port_vlan_filtering(struct dsa_switch *ds, int port, mutex_lock(&ps->smi_mutex); - ret = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_CONTROL_2); + ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_CONTROL_2); if (ret < 0) goto unlock; @@ -1809,7 +1802,7 @@ int mv88e6xxx_port_vlan_filtering(struct dsa_switch *ds, int port, ret &= ~PORT_CONTROL_2_8021Q_MASK; ret |= new & PORT_CONTROL_2_8021Q_MASK; - ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_CONTROL_2, + ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_CONTROL_2, ret); if (ret < 0) goto unlock; @@ -1846,13 +1839,13 @@ int mv88e6xxx_port_vlan_prepare(struct dsa_switch *ds, int port, return 0; } -static int _mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port, u16 vid, - bool untagged) +static int _mv88e6xxx_port_vlan_add(struct mv88e6xxx_priv_state *ps, int port, + u16 vid, bool untagged) { struct mv88e6xxx_vtu_stu_entry vlan; int err; - err = _mv88e6xxx_vtu_get(ds, vid, &vlan, true); + err = _mv88e6xxx_vtu_get(ps, vid, &vlan, true); if (err) return err; @@ -1860,7 +1853,7 @@ static int _mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port, u16 vid, GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED : GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED; - return _mv88e6xxx_vtu_loadpurge(ds, &vlan); + return _mv88e6xxx_vtu_loadpurge(ps, &vlan); } void mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port, @@ -1875,24 +1868,25 @@ void mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port, mutex_lock(&ps->smi_mutex); for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) - if (_mv88e6xxx_port_vlan_add(ds, port, vid, untagged)) + if (_mv88e6xxx_port_vlan_add(ps, port, vid, untagged)) netdev_err(ds->ports[port], "failed to add VLAN %d%c\n", vid, untagged ? 'u' : 't'); - if (pvid && _mv88e6xxx_port_pvid_set(ds, port, vlan->vid_end)) + if (pvid && _mv88e6xxx_port_pvid_set(ps, port, vlan->vid_end)) netdev_err(ds->ports[port], "failed to set PVID %d\n", vlan->vid_end); mutex_unlock(&ps->smi_mutex); } -static int _mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port, u16 vid) +static int _mv88e6xxx_port_vlan_del(struct mv88e6xxx_priv_state *ps, + int port, u16 vid) { - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + struct dsa_switch *ds = ps->ds; struct mv88e6xxx_vtu_stu_entry vlan; int i, err; - err = _mv88e6xxx_vtu_get(ds, vid, &vlan, false); + err = _mv88e6xxx_vtu_get(ps, vid, &vlan, false); if (err) return err; @@ -1914,11 +1908,11 @@ static int _mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port, u16 vid) } } - err = _mv88e6xxx_vtu_loadpurge(ds, &vlan); + err = _mv88e6xxx_vtu_loadpurge(ps, &vlan); if (err) return err; - return _mv88e6xxx_atu_remove(ds, vlan.fid, port, false); + return _mv88e6xxx_atu_remove(ps, vlan.fid, port, false); } int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port, @@ -1930,17 +1924,17 @@ int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port, mutex_lock(&ps->smi_mutex); - err = _mv88e6xxx_port_pvid_get(ds, port, &pvid); + err = _mv88e6xxx_port_pvid_get(ps, port, &pvid); if (err) goto unlock; for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) { - err = _mv88e6xxx_port_vlan_del(ds, port, vid); + err = _mv88e6xxx_port_vlan_del(ps, port, vid); if (err) goto unlock; if (vid == pvid) { - err = _mv88e6xxx_port_pvid_set(ds, port, 0); + err = _mv88e6xxx_port_pvid_set(ps, port, 0); if (err) goto unlock; } @@ -1952,14 +1946,14 @@ unlock: return err; } -static int _mv88e6xxx_atu_mac_write(struct dsa_switch *ds, +static int _mv88e6xxx_atu_mac_write(struct mv88e6xxx_priv_state *ps, const unsigned char *addr) { int i, ret; for (i = 0; i < 3; i++) { ret = _mv88e6xxx_reg_write( - ds, REG_GLOBAL, GLOBAL_ATU_MAC_01 + i, + ps, REG_GLOBAL, GLOBAL_ATU_MAC_01 + i, (addr[i * 2] << 8) | addr[i * 2 + 1]); if (ret < 0) return ret; @@ -1968,12 +1962,13 @@ static int _mv88e6xxx_atu_mac_write(struct dsa_switch *ds, return 0; } -static int _mv88e6xxx_atu_mac_read(struct dsa_switch *ds, unsigned char *addr) +static int _mv88e6xxx_atu_mac_read(struct mv88e6xxx_priv_state *ps, + unsigned char *addr) { int i, ret; for (i = 0; i < 3; i++) { - ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, + ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_ATU_MAC_01 + i); if (ret < 0) return ret; @@ -1984,27 +1979,27 @@ static int _mv88e6xxx_atu_mac_read(struct dsa_switch *ds, unsigned char *addr) return 0; } -static int _mv88e6xxx_atu_load(struct dsa_switch *ds, +static int _mv88e6xxx_atu_load(struct mv88e6xxx_priv_state *ps, struct mv88e6xxx_atu_entry *entry) { int ret; - ret = _mv88e6xxx_atu_wait(ds); + ret = _mv88e6xxx_atu_wait(ps); if (ret < 0) return ret; - ret = _mv88e6xxx_atu_mac_write(ds, entry->mac); + ret = _mv88e6xxx_atu_mac_write(ps, entry->mac); if (ret < 0) return ret; - ret = _mv88e6xxx_atu_data_write(ds, entry); + ret = _mv88e6xxx_atu_data_write(ps, entry); if (ret < 0) return ret; - return _mv88e6xxx_atu_cmd(ds, entry->fid, GLOBAL_ATU_OP_LOAD_DB); + return _mv88e6xxx_atu_cmd(ps, entry->fid, GLOBAL_ATU_OP_LOAD_DB); } -static int _mv88e6xxx_port_fdb_load(struct dsa_switch *ds, int port, +static int _mv88e6xxx_port_fdb_load(struct mv88e6xxx_priv_state *ps, int port, const unsigned char *addr, u16 vid, u8 state) { @@ -2014,9 +2009,9 @@ static int _mv88e6xxx_port_fdb_load(struct dsa_switch *ds, int port, /* Null VLAN ID corresponds to the port private database */ if (vid == 0) - err = _mv88e6xxx_port_fid_get(ds, port, &vlan.fid); + err = _mv88e6xxx_port_fid_get(ps, port, &vlan.fid); else - err = _mv88e6xxx_vtu_get(ds, vid, &vlan, false); + err = _mv88e6xxx_vtu_get(ps, vid, &vlan, false); if (err) return err; @@ -2028,7 +2023,7 @@ static int _mv88e6xxx_port_fdb_load(struct dsa_switch *ds, int port, entry.portv_trunkid = BIT(port); } - return _mv88e6xxx_atu_load(ds, &entry); + return _mv88e6xxx_atu_load(ps, &entry); } int mv88e6xxx_port_fdb_prepare(struct dsa_switch *ds, int port, @@ -2051,7 +2046,7 @@ void mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port, struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); mutex_lock(&ps->smi_mutex); - if (_mv88e6xxx_port_fdb_load(ds, port, fdb->addr, fdb->vid, state)) + if (_mv88e6xxx_port_fdb_load(ps, port, fdb->addr, fdb->vid, state)) netdev_err(ds->ports[port], "failed to load MAC address\n"); mutex_unlock(&ps->smi_mutex); } @@ -2063,14 +2058,14 @@ int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port, int ret; mutex_lock(&ps->smi_mutex); - ret = _mv88e6xxx_port_fdb_load(ds, port, fdb->addr, fdb->vid, + ret = _mv88e6xxx_port_fdb_load(ps, port, fdb->addr, fdb->vid, GLOBAL_ATU_DATA_STATE_UNUSED); mutex_unlock(&ps->smi_mutex); return ret; } -static int _mv88e6xxx_atu_getnext(struct dsa_switch *ds, u16 fid, +static int _mv88e6xxx_atu_getnext(struct mv88e6xxx_priv_state *ps, u16 fid, struct mv88e6xxx_atu_entry *entry) { struct mv88e6xxx_atu_entry next = { 0 }; @@ -2078,19 +2073,19 @@ static int _mv88e6xxx_atu_getnext(struct dsa_switch *ds, u16 fid, next.fid = fid; - ret = _mv88e6xxx_atu_wait(ds); + ret = _mv88e6xxx_atu_wait(ps); if (ret < 0) return ret; - ret = _mv88e6xxx_atu_cmd(ds, fid, GLOBAL_ATU_OP_GET_NEXT_DB); + ret = _mv88e6xxx_atu_cmd(ps, fid, GLOBAL_ATU_OP_GET_NEXT_DB); if (ret < 0) return ret; - ret = _mv88e6xxx_atu_mac_read(ds, next.mac); + ret = _mv88e6xxx_atu_mac_read(ps, next.mac); if (ret < 0) return ret; - ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_ATU_DATA); + ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_ATU_DATA); if (ret < 0) return ret; @@ -2115,8 +2110,8 @@ static int _mv88e6xxx_atu_getnext(struct dsa_switch *ds, u16 fid, return 0; } -static int _mv88e6xxx_port_fdb_dump_one(struct dsa_switch *ds, u16 fid, u16 vid, - int port, +static int _mv88e6xxx_port_fdb_dump_one(struct mv88e6xxx_priv_state *ps, + u16 fid, u16 vid, int port, struct switchdev_obj_port_fdb *fdb, int (*cb)(struct switchdev_obj *obj)) { @@ -2125,12 +2120,12 @@ static int _mv88e6xxx_port_fdb_dump_one(struct dsa_switch *ds, u16 fid, u16 vid, }; int err; - err = _mv88e6xxx_atu_mac_write(ds, addr.mac); + err = _mv88e6xxx_atu_mac_write(ps, addr.mac); if (err) return err; do { - err = _mv88e6xxx_atu_getnext(ds, fid, &addr); + err = _mv88e6xxx_atu_getnext(ps, fid, &addr); if (err) break; @@ -2170,28 +2165,28 @@ int mv88e6xxx_port_fdb_dump(struct dsa_switch *ds, int port, mutex_lock(&ps->smi_mutex); /* Dump port's default Filtering Information Database (VLAN ID 0) */ - err = _mv88e6xxx_port_fid_get(ds, port, &fid); + err = _mv88e6xxx_port_fid_get(ps, port, &fid); if (err) goto unlock; - err = _mv88e6xxx_port_fdb_dump_one(ds, fid, 0, port, fdb, cb); + err = _mv88e6xxx_port_fdb_dump_one(ps, fid, 0, port, fdb, cb); if (err) goto unlock; /* Dump VLANs' Filtering Information Databases */ - err = _mv88e6xxx_vtu_vid_write(ds, vlan.vid); + err = _mv88e6xxx_vtu_vid_write(ps, vlan.vid); if (err) goto unlock; do { - err = _mv88e6xxx_vtu_getnext(ds, &vlan); + err = _mv88e6xxx_vtu_getnext(ps, &vlan); if (err) break; if (!vlan.valid) break; - err = _mv88e6xxx_port_fdb_dump_one(ds, vlan.fid, vlan.vid, port, + err = _mv88e6xxx_port_fdb_dump_one(ps, vlan.fid, vlan.vid, port, fdb, cb); if (err) break; @@ -2216,7 +2211,7 @@ int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port, for (i = 0; i < ps->info->num_ports; ++i) { if (ps->ports[i].bridge_dev == bridge) { - err = _mv88e6xxx_port_based_vlan_map(ds, i); + err = _mv88e6xxx_port_based_vlan_map(ps, i); if (err) break; } @@ -2240,7 +2235,7 @@ void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port) for (i = 0; i < ps->info->num_ports; ++i) if (i == port || ps->ports[i].bridge_dev == bridge) - if (_mv88e6xxx_port_based_vlan_map(ds, i)) + if (_mv88e6xxx_port_based_vlan_map(ps, i)) netdev_warn(ds->ports[i], "failed to remap\n"); mutex_unlock(&ps->smi_mutex); @@ -2259,57 +2254,58 @@ static void mv88e6xxx_bridge_work(struct work_struct *work) for (port = 0; port < ps->info->num_ports; ++port) if (test_and_clear_bit(port, ps->port_state_update_mask) && - _mv88e6xxx_port_state(ds, port, ps->ports[port].state)) - netdev_warn(ds->ports[port], "failed to update state to %s\n", + _mv88e6xxx_port_state(ps, port, ps->ports[port].state)) + netdev_warn(ds->ports[port], + "failed to update state to %s\n", mv88e6xxx_port_state_names[ps->ports[port].state]); mutex_unlock(&ps->smi_mutex); } -static int _mv88e6xxx_phy_page_write(struct dsa_switch *ds, int port, int page, - int reg, int val) +static int _mv88e6xxx_phy_page_write(struct mv88e6xxx_priv_state *ps, + int port, int page, int reg, int val) { int ret; - ret = _mv88e6xxx_phy_write_indirect(ds, port, 0x16, page); + ret = _mv88e6xxx_phy_write_indirect(ps, port, 0x16, page); if (ret < 0) goto restore_page_0; - ret = _mv88e6xxx_phy_write_indirect(ds, port, reg, val); + ret = _mv88e6xxx_phy_write_indirect(ps, port, reg, val); restore_page_0: - _mv88e6xxx_phy_write_indirect(ds, port, 0x16, 0x0); + _mv88e6xxx_phy_write_indirect(ps, port, 0x16, 0x0); return ret; } -static int _mv88e6xxx_phy_page_read(struct dsa_switch *ds, int port, int page, - int reg) +static int _mv88e6xxx_phy_page_read(struct mv88e6xxx_priv_state *ps, + int port, int page, int reg) { int ret; - ret = _mv88e6xxx_phy_write_indirect(ds, port, 0x16, page); + ret = _mv88e6xxx_phy_write_indirect(ps, port, 0x16, page); if (ret < 0) goto restore_page_0; - ret = _mv88e6xxx_phy_read_indirect(ds, port, reg); + ret = _mv88e6xxx_phy_read_indirect(ps, port, reg); restore_page_0: - _mv88e6xxx_phy_write_indirect(ds, port, 0x16, 0x0); + _mv88e6xxx_phy_write_indirect(ps, port, 0x16, 0x0); return ret; } -static int mv88e6xxx_power_on_serdes(struct dsa_switch *ds) +static int mv88e6xxx_power_on_serdes(struct mv88e6xxx_priv_state *ps) { int ret; - ret = _mv88e6xxx_phy_page_read(ds, REG_FIBER_SERDES, PAGE_FIBER_SERDES, + ret = _mv88e6xxx_phy_page_read(ps, REG_FIBER_SERDES, PAGE_FIBER_SERDES, MII_BMCR); if (ret < 0) return ret; if (ret & BMCR_PDOWN) { ret &= ~BMCR_PDOWN; - ret = _mv88e6xxx_phy_page_write(ds, REG_FIBER_SERDES, + ret = _mv88e6xxx_phy_page_write(ps, REG_FIBER_SERDES, PAGE_FIBER_SERDES, MII_BMCR, ret); } @@ -2325,24 +2321,24 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port) mutex_lock(&ps->smi_mutex); - if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) || - mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) || - mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds) || - mv88e6xxx_6065_family(ds) || mv88e6xxx_6320_family(ds)) { + if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) || + mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) || + mv88e6xxx_6185_family(ps) || mv88e6xxx_6095_family(ps) || + mv88e6xxx_6065_family(ps) || mv88e6xxx_6320_family(ps)) { /* MAC Forcing register: don't force link, speed, * duplex or flow control state to any particular * values on physical ports, but force the CPU port * and all DSA ports to their maximum bandwidth and * full duplex. */ - reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_PCS_CTRL); + reg = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_PCS_CTRL); if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) { reg &= ~PORT_PCS_CTRL_UNFORCED; reg |= PORT_PCS_CTRL_FORCE_LINK | PORT_PCS_CTRL_LINK_UP | PORT_PCS_CTRL_DUPLEX_FULL | PORT_PCS_CTRL_FORCE_DUPLEX; - if (mv88e6xxx_6065_family(ds)) + if (mv88e6xxx_6065_family(ps)) reg |= PORT_PCS_CTRL_100; else reg |= PORT_PCS_CTRL_1000; @@ -2350,7 +2346,7 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port) reg |= PORT_PCS_CTRL_UNFORCED; } - ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), + ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_PCS_CTRL, reg); if (ret) goto abort; @@ -2371,19 +2367,19 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port) * forwarding of unknown unicasts and multicasts. */ reg = 0; - if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) || - mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) || - mv88e6xxx_6095_family(ds) || mv88e6xxx_6065_family(ds) || - mv88e6xxx_6185_family(ds) || mv88e6xxx_6320_family(ds)) + if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) || + mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) || + mv88e6xxx_6095_family(ps) || mv88e6xxx_6065_family(ps) || + mv88e6xxx_6185_family(ps) || mv88e6xxx_6320_family(ps)) reg = PORT_CONTROL_IGMP_MLD_SNOOP | PORT_CONTROL_USE_TAG | PORT_CONTROL_USE_IP | PORT_CONTROL_STATE_FORWARDING; if (dsa_is_cpu_port(ds, port)) { - if (mv88e6xxx_6095_family(ds) || mv88e6xxx_6185_family(ds)) + if (mv88e6xxx_6095_family(ps) || mv88e6xxx_6185_family(ps)) reg |= PORT_CONTROL_DSA_TAG; - if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) || - mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) || - mv88e6xxx_6320_family(ds)) { + if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) || + mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) || + mv88e6xxx_6320_family(ps)) { if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA) reg |= PORT_CONTROL_FRAME_ETHER_TYPE_DSA; else @@ -2392,20 +2388,20 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port) PORT_CONTROL_FORWARD_UNKNOWN_MC; } - if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) || - mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) || - mv88e6xxx_6095_family(ds) || mv88e6xxx_6065_family(ds) || - mv88e6xxx_6185_family(ds) || mv88e6xxx_6320_family(ds)) { + if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) || + mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) || + mv88e6xxx_6095_family(ps) || mv88e6xxx_6065_family(ps) || + mv88e6xxx_6185_family(ps) || mv88e6xxx_6320_family(ps)) { if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA) reg |= PORT_CONTROL_EGRESS_ADD_TAG; } } if (dsa_is_dsa_port(ds, port)) { - if (mv88e6xxx_6095_family(ds) || mv88e6xxx_6185_family(ds)) + if (mv88e6xxx_6095_family(ps) || mv88e6xxx_6185_family(ps)) reg |= PORT_CONTROL_DSA_TAG; - if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) || - mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) || - mv88e6xxx_6320_family(ds)) { + if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) || + mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) || + mv88e6xxx_6320_family(ps)) { reg |= PORT_CONTROL_FRAME_MODE_DSA; } @@ -2414,7 +2410,7 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port) PORT_CONTROL_FORWARD_UNKNOWN_MC; } if (reg) { - ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), + ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_CONTROL, reg); if (ret) goto abort; @@ -2423,15 +2419,15 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port) /* If this port is connected to a SerDes, make sure the SerDes is not * powered down. */ - if (mv88e6xxx_6352_family(ds)) { - ret = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_STATUS); + if (mv88e6xxx_6352_family(ps)) { + ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_STATUS); if (ret < 0) goto abort; ret &= PORT_STATUS_CMODE_MASK; if ((ret == PORT_STATUS_CMODE_100BASE_X) || (ret == PORT_STATUS_CMODE_1000BASE_X) || (ret == PORT_STATUS_CMODE_SGMII)) { - ret = mv88e6xxx_power_on_serdes(ds); + ret = mv88e6xxx_power_on_serdes(ps); if (ret < 0) goto abort; } @@ -2444,17 +2440,17 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port) * copy of all transmitted/received frames on this port to the CPU. */ reg = 0; - if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) || - mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) || - mv88e6xxx_6095_family(ds) || mv88e6xxx_6320_family(ds) || - mv88e6xxx_6185_family(ds)) + if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) || + mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) || + mv88e6xxx_6095_family(ps) || mv88e6xxx_6320_family(ps) || + mv88e6xxx_6185_family(ps)) reg = PORT_CONTROL_2_MAP_DA; - if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) || - mv88e6xxx_6165_family(ds) || mv88e6xxx_6320_family(ds)) + if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) || + mv88e6xxx_6165_family(ps) || mv88e6xxx_6320_family(ps)) reg |= PORT_CONTROL_2_JUMBO_10240; - if (mv88e6xxx_6095_family(ds) || mv88e6xxx_6185_family(ds)) { + if (mv88e6xxx_6095_family(ps) || mv88e6xxx_6185_family(ps)) { /* Set the upstream port this port should use */ reg |= dsa_upstream_port(ds); /* enable forwarding of unknown multicast addresses to @@ -2467,7 +2463,7 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port) reg |= PORT_CONTROL_2_8021Q_DISABLED; if (reg) { - ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), + ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_CONTROL_2, reg); if (ret) goto abort; @@ -2483,24 +2479,24 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port) if (dsa_is_cpu_port(ds, port)) reg = 0; - ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_ASSOC_VECTOR, reg); + ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_ASSOC_VECTOR, reg); if (ret) goto abort; /* Egress rate control 2: disable egress rate control. */ - ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_RATE_CONTROL_2, + ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_RATE_CONTROL_2, 0x0000); if (ret) goto abort; - if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) || - mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) || - mv88e6xxx_6320_family(ds)) { + if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) || + mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) || + mv88e6xxx_6320_family(ps)) { /* Do not limit the period of time that this port can * be paused for by the remote end or the period of * time that this port can pause the remote end. */ - ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), + ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_PAUSE_CTRL, 0x0000); if (ret) goto abort; @@ -2509,12 +2505,12 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port) * address database entries that this port is allowed * to use. */ - ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), + ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_ATU_CONTROL, 0x0000); /* Priority Override: disable DA, SA and VTU priority * override. */ - ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), + ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_PRI_OVERRIDE, 0x0000); if (ret) goto abort; @@ -2522,14 +2518,14 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port) /* Port Ethertype: use the Ethertype DSA Ethertype * value. */ - ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), + ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_ETH_TYPE, ETH_P_EDSA); if (ret) goto abort; /* Tag Remap: use an identity 802.1p prio -> switch * prio mapping. */ - ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), + ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_TAG_REGMAP_0123, 0x3210); if (ret) goto abort; @@ -2537,18 +2533,18 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port) /* Tag Remap 2: use an identity 802.1p prio -> switch * prio mapping. */ - ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), + ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_TAG_REGMAP_4567, 0x7654); if (ret) goto abort; } - if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) || - mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) || - mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds) || - mv88e6xxx_6320_family(ds)) { + if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) || + mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) || + mv88e6xxx_6185_family(ps) || mv88e6xxx_6095_family(ps) || + mv88e6xxx_6320_family(ps)) { /* Rate Control: disable ingress rate limiting. */ - ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), + ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_RATE_CONTROL, 0x0001); if (ret) goto abort; @@ -2557,7 +2553,7 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port) /* Port Control 1: disable trunking, disable sending * learning messages to this port. */ - ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_CONTROL_1, 0x0000); + ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_CONTROL_1, 0x0000); if (ret) goto abort; @@ -2565,18 +2561,18 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port) * database, and allow bidirectional communication between the * CPU and DSA port(s), and the other ports. */ - ret = _mv88e6xxx_port_fid_set(ds, port, 0); + ret = _mv88e6xxx_port_fid_set(ps, port, 0); if (ret) goto abort; - ret = _mv88e6xxx_port_based_vlan_map(ds, port); + ret = _mv88e6xxx_port_based_vlan_map(ps, port); if (ret) goto abort; /* Default VLAN ID and priority: don't set a default VLAN * ID, and set the default packet priority to zero. */ - ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_DEFAULT_VLAN, + ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_DEFAULT_VLAN, 0x0000); abort: mutex_unlock(&ps->smi_mutex); @@ -2597,11 +2593,8 @@ int mv88e6xxx_setup_ports(struct dsa_switch *ds) return 0; } -int mv88e6xxx_setup_common(struct dsa_switch *ds) +int mv88e6xxx_setup_common(struct mv88e6xxx_priv_state *ps) { - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); - - ps->ds = ds; mutex_init(&ps->smi_mutex); INIT_WORK(&ps->bridge_work, mv88e6xxx_bridge_work); @@ -2620,46 +2613,46 @@ int mv88e6xxx_setup_global(struct dsa_switch *ds) * enable address learn messages to be sent to all message * ports. */ - err = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_CONTROL, + err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_ATU_CONTROL, 0x0140 | GLOBAL_ATU_CONTROL_LEARN2ALL); if (err) goto unlock; /* Configure the IP ToS mapping registers. */ - err = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_IP_PRI_0, 0x0000); + err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_0, 0x0000); if (err) goto unlock; - err = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_IP_PRI_1, 0x0000); + err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_1, 0x0000); if (err) goto unlock; - err = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_IP_PRI_2, 0x5555); + err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_2, 0x5555); if (err) goto unlock; - err = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_IP_PRI_3, 0x5555); + err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_3, 0x5555); if (err) goto unlock; - err = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_IP_PRI_4, 0xaaaa); + err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_4, 0xaaaa); if (err) goto unlock; - err = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_IP_PRI_5, 0xaaaa); + err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_5, 0xaaaa); if (err) goto unlock; - err = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_IP_PRI_6, 0xffff); + err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_6, 0xffff); if (err) goto unlock; - err = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_IP_PRI_7, 0xffff); + err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_7, 0xffff); if (err) goto unlock; /* Configure the IEEE 802.1p priority mapping register. */ - err = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_IEEE_PRI, 0xfa41); + err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IEEE_PRI, 0xfa41); if (err) goto unlock; /* Send all frames with destination addresses matching * 01:80:c2:00:00:0x to the CPU port. */ - err = _mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_MGMT_EN_0X, 0xffff); + err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_MGMT_EN_0X, 0xffff); if (err) goto unlock; @@ -2668,7 +2661,7 @@ int mv88e6xxx_setup_global(struct dsa_switch *ds) * highest, and send all special multicast frames to the CPU * port at the highest priority. */ - err = _mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_SWITCH_MGMT, + err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_SWITCH_MGMT, 0x7 | GLOBAL2_SWITCH_MGMT_RSVD2CPU | 0x70 | GLOBAL2_SWITCH_MGMT_FORCE_FLOW_CTRL_PRI); if (err) @@ -2683,7 +2676,7 @@ int mv88e6xxx_setup_global(struct dsa_switch *ds) nexthop = ds->pd->rtable[i] & 0x1f; err = _mv88e6xxx_reg_write( - ds, REG_GLOBAL2, + ps, REG_GLOBAL2, GLOBAL2_DEVICE_MAPPING, GLOBAL2_DEVICE_MAPPING_UPDATE | (i << GLOBAL2_DEVICE_MAPPING_TARGET_SHIFT) | nexthop); @@ -2693,7 +2686,7 @@ int mv88e6xxx_setup_global(struct dsa_switch *ds) /* Clear all trunk masks. */ for (i = 0; i < 8; i++) { - err = _mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_TRUNK_MASK, + err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_TRUNK_MASK, 0x8000 | (i << GLOBAL2_TRUNK_MASK_NUM_SHIFT) | ((1 << ps->info->num_ports) - 1)); @@ -2704,7 +2697,7 @@ int mv88e6xxx_setup_global(struct dsa_switch *ds) /* Clear all trunk mappings. */ for (i = 0; i < 16; i++) { err = _mv88e6xxx_reg_write( - ds, REG_GLOBAL2, + ps, REG_GLOBAL2, GLOBAL2_TRUNK_MAPPING, GLOBAL2_TRUNK_MAPPING_UPDATE | (i << GLOBAL2_TRUNK_MAPPING_ID_SHIFT)); @@ -2712,13 +2705,13 @@ int mv88e6xxx_setup_global(struct dsa_switch *ds) goto unlock; } - if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) || - mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) || - mv88e6xxx_6320_family(ds)) { + if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) || + mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) || + mv88e6xxx_6320_family(ps)) { /* Send all frames with destination addresses matching * 01:80:c2:00:00:2x to the CPU port. */ - err = _mv88e6xxx_reg_write(ds, REG_GLOBAL2, + err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_MGMT_EN_2X, 0xffff); if (err) goto unlock; @@ -2726,14 +2719,14 @@ int mv88e6xxx_setup_global(struct dsa_switch *ds) /* Initialise cross-chip port VLAN table to reset * defaults. */ - err = _mv88e6xxx_reg_write(ds, REG_GLOBAL2, + err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_PVT_ADDR, 0x9000); if (err) goto unlock; /* Clear the priority override table. */ for (i = 0; i < 16; i++) { - err = _mv88e6xxx_reg_write(ds, REG_GLOBAL2, + err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_PRIO_OVERRIDE, 0x8000 | (i << 8)); if (err) @@ -2741,16 +2734,16 @@ int mv88e6xxx_setup_global(struct dsa_switch *ds) } } - if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) || - mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) || - mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds) || - mv88e6xxx_6320_family(ds)) { + if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) || + mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) || + mv88e6xxx_6185_family(ps) || mv88e6xxx_6095_family(ps) || + mv88e6xxx_6320_family(ps)) { /* Disable ingress rate limiting by resetting all * ingress rate limit registers to their initial * state. */ for (i = 0; i < ps->info->num_ports; i++) { - err = _mv88e6xxx_reg_write(ds, REG_GLOBAL2, + err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_INGRESS_OP, 0x9000 | (i << 8)); if (err) @@ -2759,34 +2752,33 @@ int mv88e6xxx_setup_global(struct dsa_switch *ds) } /* Clear the statistics counters for all ports */ - err = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_STATS_OP, + err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_STATS_OP, GLOBAL_STATS_OP_FLUSH_ALL); if (err) goto unlock; /* Wait for the flush to complete. */ - err = _mv88e6xxx_stats_wait(ds); + err = _mv88e6xxx_stats_wait(ps); if (err < 0) goto unlock; /* Clear all ATU entries */ - err = _mv88e6xxx_atu_flush(ds, 0, true); + err = _mv88e6xxx_atu_flush(ps, 0, true); if (err < 0) goto unlock; /* Clear all the VTU and STU entries */ - err = _mv88e6xxx_vtu_stu_flush(ds); + err = _mv88e6xxx_vtu_stu_flush(ps); unlock: mutex_unlock(&ps->smi_mutex); return err; } -int mv88e6xxx_switch_reset(struct dsa_switch *ds, bool ppu_active) +int mv88e6xxx_switch_reset(struct mv88e6xxx_priv_state *ps, bool ppu_active) { - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); u16 is_reset = (ppu_active ? 0x8800 : 0xc800); - struct gpio_desc *gpiod = ds->pd->reset; + struct gpio_desc *gpiod = ps->ds->pd->reset; unsigned long timeout; int ret; int i; @@ -2795,11 +2787,11 @@ int mv88e6xxx_switch_reset(struct dsa_switch *ds, bool ppu_active) /* Set all ports to the disabled state. */ for (i = 0; i < ps->info->num_ports; i++) { - ret = _mv88e6xxx_reg_read(ds, REG_PORT(i), PORT_CONTROL); + ret = _mv88e6xxx_reg_read(ps, REG_PORT(i), PORT_CONTROL); if (ret < 0) goto unlock; - ret = _mv88e6xxx_reg_write(ds, REG_PORT(i), PORT_CONTROL, + ret = _mv88e6xxx_reg_write(ps, REG_PORT(i), PORT_CONTROL, ret & 0xfffc); if (ret) goto unlock; @@ -2821,16 +2813,16 @@ int mv88e6xxx_switch_reset(struct dsa_switch *ds, bool ppu_active) * through global registers 0x18 and 0x19. */ if (ppu_active) - ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, 0x04, 0xc000); + ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, 0x04, 0xc000); else - ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, 0x04, 0xc400); + ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, 0x04, 0xc400); if (ret) goto unlock; /* Wait up to one second for reset to complete. */ timeout = jiffies + 1 * HZ; while (time_before(jiffies, timeout)) { - ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, 0x00); + ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, 0x00); if (ret < 0) goto unlock; @@ -2854,7 +2846,7 @@ int mv88e6xxx_phy_page_read(struct dsa_switch *ds, int port, int page, int reg) int ret; mutex_lock(&ps->smi_mutex); - ret = _mv88e6xxx_phy_page_read(ds, port, page, reg); + ret = _mv88e6xxx_phy_page_read(ps, port, page, reg); mutex_unlock(&ps->smi_mutex); return ret; @@ -2867,16 +2859,15 @@ int mv88e6xxx_phy_page_write(struct dsa_switch *ds, int port, int page, int ret; mutex_lock(&ps->smi_mutex); - ret = _mv88e6xxx_phy_page_write(ds, port, page, reg, val); + ret = _mv88e6xxx_phy_page_write(ps, port, page, reg, val); mutex_unlock(&ps->smi_mutex); return ret; } -static int mv88e6xxx_port_to_phy_addr(struct dsa_switch *ds, int port) +static int mv88e6xxx_port_to_phy_addr(struct mv88e6xxx_priv_state *ps, + int port) { - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); - if (port >= 0 && port < ps->info->num_ports) return port; return -EINVAL; @@ -2886,14 +2877,14 @@ int mv88e6xxx_phy_read(struct dsa_switch *ds, int port, int regnum) { struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); - int addr = mv88e6xxx_port_to_phy_addr(ds, port); + int addr = mv88e6xxx_port_to_phy_addr(ps, port); int ret; if (addr < 0) - return addr; + return 0xffff; mutex_lock(&ps->smi_mutex); - ret = _mv88e6xxx_phy_read(ds, addr, regnum); + ret = _mv88e6xxx_phy_read(ps, addr, regnum); mutex_unlock(&ps->smi_mutex); return ret; } @@ -2902,14 +2893,14 @@ int mv88e6xxx_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val) { struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); - int addr = mv88e6xxx_port_to_phy_addr(ds, port); + int addr = mv88e6xxx_port_to_phy_addr(ps, port); int ret; if (addr < 0) - return addr; + return 0xffff; mutex_lock(&ps->smi_mutex); - ret = _mv88e6xxx_phy_write(ds, addr, regnum, val); + ret = _mv88e6xxx_phy_write(ps, addr, regnum, val); mutex_unlock(&ps->smi_mutex); return ret; } @@ -2918,14 +2909,14 @@ int mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int port, int regnum) { struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); - int addr = mv88e6xxx_port_to_phy_addr(ds, port); + int addr = mv88e6xxx_port_to_phy_addr(ps, port); int ret; if (addr < 0) - return addr; + return 0xffff; mutex_lock(&ps->smi_mutex); - ret = _mv88e6xxx_phy_read_indirect(ds, addr, regnum); + ret = _mv88e6xxx_phy_read_indirect(ps, addr, regnum); mutex_unlock(&ps->smi_mutex); return ret; } @@ -2935,14 +2926,14 @@ mv88e6xxx_phy_write_indirect(struct dsa_switch *ds, int port, int regnum, u16 val) { struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); - int addr = mv88e6xxx_port_to_phy_addr(ds, port); + int addr = mv88e6xxx_port_to_phy_addr(ps, port); int ret; if (addr < 0) return addr; mutex_lock(&ps->smi_mutex); - ret = _mv88e6xxx_phy_write_indirect(ds, addr, regnum, val); + ret = _mv88e6xxx_phy_write_indirect(ps, addr, regnum, val); mutex_unlock(&ps->smi_mutex); return ret; } @@ -2959,44 +2950,45 @@ static int mv88e61xx_get_temp(struct dsa_switch *ds, int *temp) mutex_lock(&ps->smi_mutex); - ret = _mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x6); + ret = _mv88e6xxx_phy_write(ps, 0x0, 0x16, 0x6); if (ret < 0) goto error; /* Enable temperature sensor */ - ret = _mv88e6xxx_phy_read(ds, 0x0, 0x1a); + ret = _mv88e6xxx_phy_read(ps, 0x0, 0x1a); if (ret < 0) goto error; - ret = _mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret | (1 << 5)); + ret = _mv88e6xxx_phy_write(ps, 0x0, 0x1a, ret | (1 << 5)); if (ret < 0) goto error; /* Wait for temperature to stabilize */ usleep_range(10000, 12000); - val = _mv88e6xxx_phy_read(ds, 0x0, 0x1a); + val = _mv88e6xxx_phy_read(ps, 0x0, 0x1a); if (val < 0) { ret = val; goto error; } /* Disable temperature sensor */ - ret = _mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret & ~(1 << 5)); + ret = _mv88e6xxx_phy_write(ps, 0x0, 0x1a, ret & ~(1 << 5)); if (ret < 0) goto error; *temp = ((val & 0x1f) - 5) * 5; error: - _mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x0); + _mv88e6xxx_phy_write(ps, 0x0, 0x16, 0x0); mutex_unlock(&ps->smi_mutex); return ret; } static int mv88e63xx_get_temp(struct dsa_switch *ds, int *temp) { - int phy = mv88e6xxx_6320_family(ds) ? 3 : 0; + struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + int phy = mv88e6xxx_6320_family(ps) ? 3 : 0; int ret; *temp = 0; @@ -3012,7 +3004,9 @@ static int mv88e63xx_get_temp(struct dsa_switch *ds, int *temp) int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp) { - if (mv88e6xxx_6320_family(ds) || mv88e6xxx_6352_family(ds)) + struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + + if (mv88e6xxx_6320_family(ps) || mv88e6xxx_6352_family(ps)) return mv88e63xx_get_temp(ds, temp); return mv88e61xx_get_temp(ds, temp); @@ -3020,10 +3014,11 @@ int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp) int mv88e6xxx_get_temp_limit(struct dsa_switch *ds, int *temp) { - int phy = mv88e6xxx_6320_family(ds) ? 3 : 0; + struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + int phy = mv88e6xxx_6320_family(ps) ? 3 : 0; int ret; - if (!mv88e6xxx_6320_family(ds) && !mv88e6xxx_6352_family(ds)) + if (!mv88e6xxx_6320_family(ps) && !mv88e6xxx_6352_family(ps)) return -EOPNOTSUPP; *temp = 0; @@ -3039,10 +3034,11 @@ int mv88e6xxx_get_temp_limit(struct dsa_switch *ds, int *temp) int mv88e6xxx_set_temp_limit(struct dsa_switch *ds, int temp) { - int phy = mv88e6xxx_6320_family(ds) ? 3 : 0; + struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + int phy = mv88e6xxx_6320_family(ps) ? 3 : 0; int ret; - if (!mv88e6xxx_6320_family(ds) && !mv88e6xxx_6352_family(ds)) + if (!mv88e6xxx_6320_family(ps) && !mv88e6xxx_6352_family(ps)) return -EOPNOTSUPP; ret = mv88e6xxx_phy_page_read(ds, phy, 6, 26); @@ -3055,10 +3051,11 @@ int mv88e6xxx_set_temp_limit(struct dsa_switch *ds, int temp) int mv88e6xxx_get_temp_alarm(struct dsa_switch *ds, bool *alarm) { - int phy = mv88e6xxx_6320_family(ds) ? 3 : 0; + struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + int phy = mv88e6xxx_6320_family(ps) ? 3 : 0; int ret; - if (!mv88e6xxx_6320_family(ds) && !mv88e6xxx_6352_family(ds)) + if (!mv88e6xxx_6320_family(ps) && !mv88e6xxx_6352_family(ps)) return -EOPNOTSUPP; *alarm = false; diff --git a/drivers/net/dsa/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx.h index 0dbe2d1779dd..4f455d219859 100644 --- a/drivers/net/dsa/mv88e6xxx.h +++ b/drivers/net/dsa/mv88e6xxx.h @@ -388,6 +388,9 @@ struct mv88e6xxx_priv_state { /* The dsa_switch this private structure is related to */ struct dsa_switch *ds; + /* The device this structure is associated to */ + struct device *dev; + /* When using multi-chip addressing, this mutex protects * access to the indirect access registers. (In single-chip * mode, this mutex is effectively useless.) @@ -446,17 +449,18 @@ struct mv88e6xxx_hw_stat { enum stat_type type; }; -int mv88e6xxx_switch_reset(struct dsa_switch *ds, bool ppu_active); +int mv88e6xxx_switch_reset(struct mv88e6xxx_priv_state *ps, bool ppu_active); const char *mv88e6xxx_drv_probe(struct device *dsa_dev, struct device *host_dev, int sw_addr, void **priv, const struct mv88e6xxx_info *table, unsigned int num); int mv88e6xxx_setup_ports(struct dsa_switch *ds); -int mv88e6xxx_setup_common(struct dsa_switch *ds); +int mv88e6xxx_setup_common(struct mv88e6xxx_priv_state *ps); int mv88e6xxx_setup_global(struct dsa_switch *ds); -int mv88e6xxx_reg_read(struct dsa_switch *ds, int addr, int reg); -int mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg, u16 val); +int mv88e6xxx_reg_read(struct mv88e6xxx_priv_state *ps, int addr, int reg); +int mv88e6xxx_reg_write(struct mv88e6xxx_priv_state *ps, int addr, + int reg, u16 val); int mv88e6xxx_set_addr_direct(struct dsa_switch *ds, u8 *addr); int mv88e6xxx_set_addr_indirect(struct dsa_switch *ds, u8 *addr); int mv88e6xxx_phy_read(struct dsa_switch *ds, int port, int regnum); @@ -464,7 +468,7 @@ int mv88e6xxx_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val); int mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int port, int regnum); int mv88e6xxx_phy_write_indirect(struct dsa_switch *ds, int port, int regnum, u16 val); -void mv88e6xxx_ppu_state_init(struct dsa_switch *ds); +void mv88e6xxx_ppu_state_init(struct mv88e6xxx_priv_state *ps); int mv88e6xxx_phy_read_ppu(struct dsa_switch *ds, int addr, int regnum); int mv88e6xxx_phy_write_ppu(struct dsa_switch *ds, int addr, int regnum, u16 val); From 03dc76ca1ee5d02401d5a22ed7ddf15b5e9dfe76 Mon Sep 17 00:00:00 2001 From: Sudarsana Reddy Kalluru Date: Thu, 28 Apr 2016 20:20:52 -0400 Subject: [PATCH 1208/1649] qed: add infrastructure for device self tests. This patch adds the functionality and APIs needed for selftests. It adds the ability to configure the link-mode which is required for the implementation of loopback tests. It adds the APIs for clock test, register test, interrupt test and memory test. Signed-off-by: Sudarsana Reddy Kalluru Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/Makefile | 3 +- drivers/net/ethernet/qlogic/qed/qed_hsi.h | 13 ++++ drivers/net/ethernet/qlogic/qed/qed_main.c | 28 +++++++ drivers/net/ethernet/qlogic/qed/qed_mcp.c | 42 ++++++++++ drivers/net/ethernet/qlogic/qed/qed_mcp.h | 22 ++++++ .../net/ethernet/qlogic/qed/qed_selftest.c | 76 +++++++++++++++++++ .../net/ethernet/qlogic/qed/qed_selftest.h | 40 ++++++++++ drivers/net/ethernet/qlogic/qed/qed_sp.h | 10 +++ .../net/ethernet/qlogic/qed/qed_sp_commands.c | 21 +++++ include/linux/qed/qed_if.h | 47 ++++++++++++ 10 files changed, 301 insertions(+), 1 deletion(-) create mode 100644 drivers/net/ethernet/qlogic/qed/qed_selftest.c create mode 100644 drivers/net/ethernet/qlogic/qed/qed_selftest.h diff --git a/drivers/net/ethernet/qlogic/qed/Makefile b/drivers/net/ethernet/qlogic/qed/Makefile index 5c2fd57236fe..aafa6692e62f 100644 --- a/drivers/net/ethernet/qlogic/qed/Makefile +++ b/drivers/net/ethernet/qlogic/qed/Makefile @@ -1,4 +1,5 @@ obj-$(CONFIG_QED) := qed.o qed-y := qed_cxt.o qed_dev.o qed_hw.o qed_init_fw_funcs.o qed_init_ops.o \ - qed_int.o qed_main.o qed_mcp.o qed_sp_commands.o qed_spq.o qed_l2.o + qed_int.o qed_main.o qed_mcp.o qed_sp_commands.o qed_spq.o qed_l2.o \ + qed_selftest.o diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index 5aa78a9ae17f..c4fae71bed11 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -3857,6 +3857,7 @@ struct public_drv_mb { #define DRV_MSG_CODE_PHY_CORE_WRITE 0x000e0000 #define DRV_MSG_CODE_SET_VERSION 0x000f0000 +#define DRV_MSG_CODE_BIST_TEST 0x001e0000 #define DRV_MSG_CODE_SET_LED_MODE 0x00200000 #define DRV_MSG_SEQ_NUMBER_MASK 0x0000ffff @@ -3914,6 +3915,18 @@ struct public_drv_mb { #define DRV_MB_PARAM_SET_LED_MODE_ON 0x1 #define DRV_MB_PARAM_SET_LED_MODE_OFF 0x2 +#define DRV_MB_PARAM_BIST_UNKNOWN_TEST 0 +#define DRV_MB_PARAM_BIST_REGISTER_TEST 1 +#define DRV_MB_PARAM_BIST_CLOCK_TEST 2 + +#define DRV_MB_PARAM_BIST_RC_UNKNOWN 0 +#define DRV_MB_PARAM_BIST_RC_PASSED 1 +#define DRV_MB_PARAM_BIST_RC_FAILED 2 +#define DRV_MB_PARAM_BIST_RC_INVALID_PARAMETER 3 + +#define DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT 0 +#define DRV_MB_PARAM_BIST_TEST_INDEX_MASK 0x000000FF + u32 fw_mb_header; #define FW_MSG_CODE_MASK 0xffff0000 #define FW_MSG_CODE_DRV_LOAD_ENGINE 0x10100000 diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 1918b83f0a97..1b758bdec587 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -28,6 +28,7 @@ #include "qed_dev_api.h" #include "qed_mcp.h" #include "qed_hw.h" +#include "qed_selftest.h" static char version[] = "QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION "\n"; @@ -976,6 +977,25 @@ static int qed_set_link(struct qed_dev *cdev, else link_params->pause.forced_tx = false; } + if (params->override_flags & QED_LINK_OVERRIDE_LOOPBACK_MODE) { + switch (params->loopback_mode) { + case QED_LINK_LOOPBACK_INT_PHY: + link_params->loopback_mode = PMM_LOOPBACK_INT_PHY; + break; + case QED_LINK_LOOPBACK_EXT_PHY: + link_params->loopback_mode = PMM_LOOPBACK_EXT_PHY; + break; + case QED_LINK_LOOPBACK_EXT: + link_params->loopback_mode = PMM_LOOPBACK_EXT; + break; + case QED_LINK_LOOPBACK_MAC: + link_params->loopback_mode = PMM_LOOPBACK_MAC; + break; + default: + link_params->loopback_mode = PMM_LOOPBACK_NONE; + break; + } + } rc = qed_mcp_set_link(hwfn, ptt, params->link_up); @@ -1182,7 +1202,15 @@ static int qed_set_led(struct qed_dev *cdev, enum qed_led_mode mode) return status; } +struct qed_selftest_ops qed_selftest_ops_pass = { + .selftest_memory = &qed_selftest_memory, + .selftest_interrupt = &qed_selftest_interrupt, + .selftest_register = &qed_selftest_register, + .selftest_clock = &qed_selftest_clock, +}; + const struct qed_common_ops qed_common_ops_pass = { + .selftest = &qed_selftest_ops_pass, .probe = &qed_probe, .remove = &qed_remove, .set_power_state = &qed_set_power_state, diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index cb46dbdf47dd..2f8309d772c8 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -1017,3 +1017,45 @@ int qed_mcp_set_led(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, return rc; } + +int qed_mcp_bist_register_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + u32 drv_mb_param = 0, rsp, param; + int rc = 0; + + drv_mb_param = (DRV_MB_PARAM_BIST_REGISTER_TEST << + DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT); + + rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST, + drv_mb_param, &rsp, ¶m); + + if (rc) + return rc; + + if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) || + (param != DRV_MB_PARAM_BIST_RC_PASSED)) + rc = -EAGAIN; + + return rc; +} + +int qed_mcp_bist_clock_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + u32 drv_mb_param, rsp, param; + int rc = 0; + + drv_mb_param = (DRV_MB_PARAM_BIST_CLOCK_TEST << + DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT); + + rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST, + drv_mb_param, &rsp, ¶m); + + if (rc) + return rc; + + if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) || + (param != DRV_MB_PARAM_BIST_RC_PASSED)) + rc = -EAGAIN; + + return rc; +} diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h index 608bcb2403cb..5f218eed0541 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h @@ -245,6 +245,28 @@ int qed_mcp_set_led(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, enum qed_led_mode mode); +/** + * @brief Bist register test + * + * @param p_hwfn - hw function + * @param p_ptt - PTT required for register access + * + * @return int - 0 - operation was successful. + */ +int qed_mcp_bist_register_test(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt); + +/** + * @brief Bist clock test + * + * @param p_hwfn - hw function + * @param p_ptt - PTT required for register access + * + * @return int - 0 - operation was successful. + */ +int qed_mcp_bist_clock_test(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt); + /* Using hwfn number (and not pf_num) is required since in CMT mode, * same pf_num may be used by two different hwfn * TODO - this shouldn't really be in .h file, but until all fields diff --git a/drivers/net/ethernet/qlogic/qed/qed_selftest.c b/drivers/net/ethernet/qlogic/qed/qed_selftest.c new file mode 100644 index 000000000000..a342bfe4280d --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_selftest.c @@ -0,0 +1,76 @@ +#include "qed.h" +#include "qed_dev_api.h" +#include "qed_mcp.h" +#include "qed_sp.h" + +int qed_selftest_memory(struct qed_dev *cdev) +{ + int rc = 0, i; + + for_each_hwfn(cdev, i) { + rc = qed_sp_heartbeat_ramrod(&cdev->hwfns[i]); + if (rc) + return rc; + } + + return rc; +} + +int qed_selftest_interrupt(struct qed_dev *cdev) +{ + int rc = 0, i; + + for_each_hwfn(cdev, i) { + rc = qed_sp_heartbeat_ramrod(&cdev->hwfns[i]); + if (rc) + return rc; + } + + return rc; +} + +int qed_selftest_register(struct qed_dev *cdev) +{ + struct qed_hwfn *p_hwfn; + struct qed_ptt *p_ptt; + int rc = 0, i; + + /* although performed by MCP, this test is per engine */ + for_each_hwfn(cdev, i) { + p_hwfn = &cdev->hwfns[i]; + p_ptt = qed_ptt_acquire(p_hwfn); + if (!p_ptt) { + DP_ERR(p_hwfn, "failed to acquire ptt\n"); + return -EBUSY; + } + rc = qed_mcp_bist_register_test(p_hwfn, p_ptt); + qed_ptt_release(p_hwfn, p_ptt); + if (rc) + break; + } + + return rc; +} + +int qed_selftest_clock(struct qed_dev *cdev) +{ + struct qed_hwfn *p_hwfn; + struct qed_ptt *p_ptt; + int rc = 0, i; + + /* although performed by MCP, this test is per engine */ + for_each_hwfn(cdev, i) { + p_hwfn = &cdev->hwfns[i]; + p_ptt = qed_ptt_acquire(p_hwfn); + if (!p_ptt) { + DP_ERR(p_hwfn, "failed to acquire ptt\n"); + return -EBUSY; + } + rc = qed_mcp_bist_clock_test(p_hwfn, p_ptt); + qed_ptt_release(p_hwfn, p_ptt); + if (rc) + break; + } + + return rc; +} diff --git a/drivers/net/ethernet/qlogic/qed/qed_selftest.h b/drivers/net/ethernet/qlogic/qed/qed_selftest.h new file mode 100644 index 000000000000..50eb0b49950f --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_selftest.h @@ -0,0 +1,40 @@ +#ifndef _QED_SELFTEST_API_H +#define _QED_SELFTEST_API_H +#include + +/** + * @brief qed_selftest_memory - Perform memory test + * + * @param cdev + * + * @return int + */ +int qed_selftest_memory(struct qed_dev *cdev); + +/** + * @brief qed_selftest_interrupt - Perform interrupt test + * + * @param cdev + * + * @return int + */ +int qed_selftest_interrupt(struct qed_dev *cdev); + +/** + * @brief qed_selftest_register - Perform register test + * + * @param cdev + * + * @return int + */ +int qed_selftest_register(struct qed_dev *cdev); + +/** + * @brief qed_selftest_clock - Perform clock test + * + * @param cdev + * + * @return int + */ +int qed_selftest_clock(struct qed_dev *cdev); +#endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h index 4b91cb32f317..eec137f40895 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h @@ -369,4 +369,14 @@ int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn, struct qed_tunn_update_params *p_tunn, enum spq_mode comp_mode, struct qed_spq_comp_cb *p_comp_data); +/** + * @brief qed_sp_heartbeat_ramrod - Send empty Ramrod + * + * @param p_hwfn + * + * @return int + */ + +int qed_sp_heartbeat_ramrod(struct qed_hwfn *p_hwfn); + #endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c index 7ccd96e5802b..9f9bc10d0f6c 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c @@ -428,3 +428,24 @@ int qed_sp_pf_stop(struct qed_hwfn *p_hwfn) return qed_spq_post(p_hwfn, p_ent, NULL); } + +int qed_sp_heartbeat_ramrod(struct qed_hwfn *p_hwfn) +{ + struct qed_spq_entry *p_ent = NULL; + struct qed_sp_init_data init_data; + int rc; + + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = qed_spq_get_cid(p_hwfn); + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = QED_SPQ_MODE_EBLOCK; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + COMMON_RAMROD_EMPTY, PROTOCOLID_COMMON, + &init_data); + if (rc) + return rc; + + return qed_spq_post(p_hwfn, p_ent, NULL); +} diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index e5de42b62976..d72c832a9397 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -110,6 +110,7 @@ struct qed_link_params { #define QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS BIT(1) #define QED_LINK_OVERRIDE_SPEED_FORCED_SPEED BIT(2) #define QED_LINK_OVERRIDE_PAUSE_CONFIG BIT(3) +#define QED_LINK_OVERRIDE_LOOPBACK_MODE BIT(4) u32 override_flags; bool autoneg; u32 adv_speeds; @@ -118,6 +119,12 @@ struct qed_link_params { #define QED_LINK_PAUSE_RX_ENABLE BIT(1) #define QED_LINK_PAUSE_TX_ENABLE BIT(2) u32 pause_config; +#define QED_LINK_LOOPBACK_NONE BIT(0) +#define QED_LINK_LOOPBACK_INT_PHY BIT(1) +#define QED_LINK_LOOPBACK_EXT_PHY BIT(2) +#define QED_LINK_LOOPBACK_EXT BIT(3) +#define QED_LINK_LOOPBACK_MAC BIT(4) + u32 loopback_mode; }; struct qed_link_output { @@ -158,7 +165,47 @@ struct qed_common_cb_ops { struct qed_link_output *link); }; +struct qed_selftest_ops { +/** + * @brief selftest_interrupt - Perform interrupt test + * + * @param cdev + * + * @return 0 on success, error otherwise. + */ + int (*selftest_interrupt)(struct qed_dev *cdev); + +/** + * @brief selftest_memory - Perform memory test + * + * @param cdev + * + * @return 0 on success, error otherwise. + */ + int (*selftest_memory)(struct qed_dev *cdev); + +/** + * @brief selftest_register - Perform register test + * + * @param cdev + * + * @return 0 on success, error otherwise. + */ + int (*selftest_register)(struct qed_dev *cdev); + +/** + * @brief selftest_clock - Perform clock test + * + * @param cdev + * + * @return 0 on success, error otherwise. + */ + int (*selftest_clock)(struct qed_dev *cdev); +}; + struct qed_common_ops { + struct qed_selftest_ops *selftest; + struct qed_dev* (*probe)(struct pci_dev *dev, enum qed_protocol protocol, u32 dp_module, u8 dp_level); From 3044a02eeb61d6fde77ea5140651bfc54afe524c Mon Sep 17 00:00:00 2001 From: Sudarsana Reddy Kalluru Date: Thu, 28 Apr 2016 20:20:53 -0400 Subject: [PATCH 1209/1649] qede: add support for selftests. This patch adds the qede ethtool support for the following tests: - interrupt test - memory test - register test - clock test Signed-off-by: Sudarsana Reddy Kalluru Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- .../net/ethernet/qlogic/qede/qede_ethtool.c | 56 ++++++++++++++++++- 1 file changed, 55 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c index f1dd25ac5552..e25a05ba0249 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c @@ -125,6 +125,21 @@ static const char qede_private_arr[QEDE_PRI_FLAG_LEN][ETH_GSTRING_LEN] = { "Coupled-Function", }; +enum qede_ethtool_tests { + QEDE_ETHTOOL_INTERRUPT_TEST, + QEDE_ETHTOOL_MEMORY_TEST, + QEDE_ETHTOOL_REGISTER_TEST, + QEDE_ETHTOOL_CLOCK_TEST, + QEDE_ETHTOOL_TEST_MAX +}; + +static const char qede_tests_str_arr[QEDE_ETHTOOL_TEST_MAX][ETH_GSTRING_LEN] = { + "Interrupt (online)\t", + "Memory (online)\t\t", + "Register (online)\t", + "Clock (online)\t\t", +}; + static void qede_get_strings_stats(struct qede_dev *edev, u8 *buf) { int i, j, k; @@ -152,6 +167,10 @@ static void qede_get_strings(struct net_device *dev, u32 stringset, u8 *buf) memcpy(buf, qede_private_arr, ETH_GSTRING_LEN * QEDE_PRI_FLAG_LEN); break; + case ETH_SS_TEST: + memcpy(buf, qede_tests_str_arr, + ETH_GSTRING_LEN * QEDE_ETHTOOL_TEST_MAX); + break; default: DP_VERBOSE(edev, QED_MSG_DEBUG, "Unsupported stringset 0x%08x\n", stringset); @@ -192,7 +211,8 @@ static int qede_get_sset_count(struct net_device *dev, int stringset) return num_stats + QEDE_NUM_RQSTATS; case ETH_SS_PRIV_FLAGS: return QEDE_PRI_FLAG_LEN; - + case ETH_SS_TEST: + return QEDE_ETHTOOL_TEST_MAX; default: DP_VERBOSE(edev, QED_MSG_DEBUG, "Unsupported stringset 0x%08x\n", stringset); @@ -827,6 +847,39 @@ static int qede_set_rxfh(struct net_device *dev, const u32 *indir, return 0; } +static void qede_self_test(struct net_device *dev, + struct ethtool_test *etest, u64 *buf) +{ + struct qede_dev *edev = netdev_priv(dev); + + DP_VERBOSE(edev, QED_MSG_DEBUG, + "Self-test command parameters: offline = %d, external_lb = %d\n", + (etest->flags & ETH_TEST_FL_OFFLINE), + (etest->flags & ETH_TEST_FL_EXTERNAL_LB) >> 2); + + memset(buf, 0, sizeof(u64) * QEDE_ETHTOOL_TEST_MAX); + + if (edev->ops->common->selftest->selftest_interrupt(edev->cdev)) { + buf[QEDE_ETHTOOL_INTERRUPT_TEST] = 1; + etest->flags |= ETH_TEST_FL_FAILED; + } + + if (edev->ops->common->selftest->selftest_memory(edev->cdev)) { + buf[QEDE_ETHTOOL_MEMORY_TEST] = 1; + etest->flags |= ETH_TEST_FL_FAILED; + } + + if (edev->ops->common->selftest->selftest_register(edev->cdev)) { + buf[QEDE_ETHTOOL_REGISTER_TEST] = 1; + etest->flags |= ETH_TEST_FL_FAILED; + } + + if (edev->ops->common->selftest->selftest_clock(edev->cdev)) { + buf[QEDE_ETHTOOL_CLOCK_TEST] = 1; + etest->flags |= ETH_TEST_FL_FAILED; + } +} + static const struct ethtool_ops qede_ethtool_ops = { .get_settings = qede_get_settings, .set_settings = qede_set_settings, @@ -852,6 +905,7 @@ static const struct ethtool_ops qede_ethtool_ops = { .set_rxfh = qede_set_rxfh, .get_channels = qede_get_channels, .set_channels = qede_set_channels, + .self_test = qede_self_test, }; void qede_set_ethtool_ops(struct net_device *dev) From 16f46bf054f8bb12c002c0ba64fc9ff17a61bf8f Mon Sep 17 00:00:00 2001 From: Sudarsana Reddy Kalluru Date: Thu, 28 Apr 2016 20:20:54 -0400 Subject: [PATCH 1210/1649] qede: add implementation for internal loopback test. This patch adds the qede implementation for internal loopback test. Signed-off-by: Sudarsana Reddy Kalluru Signed-off-by: Yuval Mintz Signed-off-by: Manish Chopra Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qede/qede.h | 4 + .../net/ethernet/qlogic/qede/qede_ethtool.c | 234 ++++++++++++++++++ drivers/net/ethernet/qlogic/qede/qede_main.c | 8 +- 3 files changed, 242 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h index a687e7a1dc8d..ff3ac0caad5b 100644 --- a/drivers/net/ethernet/qlogic/qede/qede.h +++ b/drivers/net/ethernet/qlogic/qede/qede.h @@ -308,6 +308,10 @@ void qede_reload(struct qede_dev *edev, union qede_reload_args *args); int qede_change_mtu(struct net_device *dev, int new_mtu); void qede_fill_by_demand_stats(struct qede_dev *edev); +bool qede_has_rx_work(struct qede_rx_queue *rxq); +int qede_txq_has_work(struct qede_tx_queue *txq); +void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, struct qede_dev *edev, + u8 count); #define RX_RING_SIZE_POW 13 #define RX_RING_SIZE ((u16)BIT(RX_RING_SIZE_POW)) diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c index e25a05ba0249..0d04f163ae45 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c @@ -9,6 +9,7 @@ #include #include #include +#include #include #include #include @@ -27,6 +28,9 @@ #define QEDE_RQSTAT_STRING(stat_name) (#stat_name) #define QEDE_RQSTAT(stat_name) \ {QEDE_RQSTAT_OFFSET(stat_name), QEDE_RQSTAT_STRING(stat_name)} + +#define QEDE_SELFTEST_POLL_COUNT 100 + static const struct { u64 offset; char string[ETH_GSTRING_LEN]; @@ -126,6 +130,7 @@ static const char qede_private_arr[QEDE_PRI_FLAG_LEN][ETH_GSTRING_LEN] = { }; enum qede_ethtool_tests { + QEDE_ETHTOOL_INT_LOOPBACK, QEDE_ETHTOOL_INTERRUPT_TEST, QEDE_ETHTOOL_MEMORY_TEST, QEDE_ETHTOOL_REGISTER_TEST, @@ -134,6 +139,7 @@ enum qede_ethtool_tests { }; static const char qede_tests_str_arr[QEDE_ETHTOOL_TEST_MAX][ETH_GSTRING_LEN] = { + "Internal loopback (offline)", "Interrupt (online)\t", "Memory (online)\t\t", "Register (online)\t", @@ -847,6 +853,226 @@ static int qede_set_rxfh(struct net_device *dev, const u32 *indir, return 0; } +/* This function enables the interrupt generation and the NAPI on the device */ +static void qede_netif_start(struct qede_dev *edev) +{ + int i; + + if (!netif_running(edev->ndev)) + return; + + for_each_rss(i) { + /* Update and reenable interrupts */ + qed_sb_ack(edev->fp_array[i].sb_info, IGU_INT_ENABLE, 1); + napi_enable(&edev->fp_array[i].napi); + } +} + +/* This function disables the NAPI and the interrupt generation on the device */ +static void qede_netif_stop(struct qede_dev *edev) +{ + int i; + + for_each_rss(i) { + napi_disable(&edev->fp_array[i].napi); + /* Disable interrupts */ + qed_sb_ack(edev->fp_array[i].sb_info, IGU_INT_DISABLE, 0); + } +} + +static int qede_selftest_transmit_traffic(struct qede_dev *edev, + struct sk_buff *skb) +{ + struct qede_tx_queue *txq = &edev->fp_array[0].txqs[0]; + struct eth_tx_1st_bd *first_bd; + dma_addr_t mapping; + int i, idx, val; + + /* Fill the entry in the SW ring and the BDs in the FW ring */ + idx = txq->sw_tx_prod & NUM_TX_BDS_MAX; + txq->sw_tx_ring[idx].skb = skb; + first_bd = qed_chain_produce(&txq->tx_pbl); + memset(first_bd, 0, sizeof(*first_bd)); + val = 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT; + first_bd->data.bd_flags.bitfields = val; + + /* Map skb linear data for DMA and set in the first BD */ + mapping = dma_map_single(&edev->pdev->dev, skb->data, + skb_headlen(skb), DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) { + DP_NOTICE(edev, "SKB mapping failed\n"); + return -ENOMEM; + } + BD_SET_UNMAP_ADDR_LEN(first_bd, mapping, skb_headlen(skb)); + + /* update the first BD with the actual num BDs */ + first_bd->data.nbds = 1; + txq->sw_tx_prod++; + /* 'next page' entries are counted in the producer value */ + val = cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl)); + txq->tx_db.data.bd_prod = val; + + /* wmb makes sure that the BDs data is updated before updating the + * producer, otherwise FW may read old data from the BDs. + */ + wmb(); + barrier(); + writel(txq->tx_db.raw, txq->doorbell_addr); + + /* mmiowb is needed to synchronize doorbell writes from more than one + * processor. It guarantees that the write arrives to the device before + * the queue lock is released and another start_xmit is called (possibly + * on another CPU). Without this barrier, the next doorbell can bypass + * this doorbell. This is applicable to IA64/Altix systems. + */ + mmiowb(); + + for (i = 0; i < QEDE_SELFTEST_POLL_COUNT; i++) { + if (qede_txq_has_work(txq)) + break; + usleep_range(100, 200); + } + + if (!qede_txq_has_work(txq)) { + DP_NOTICE(edev, "Tx completion didn't happen\n"); + return -1; + } + + first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl); + dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd), + BD_UNMAP_LEN(first_bd), DMA_TO_DEVICE); + txq->sw_tx_cons++; + txq->sw_tx_ring[idx].skb = NULL; + + return 0; +} + +static int qede_selftest_receive_traffic(struct qede_dev *edev) +{ + struct qede_rx_queue *rxq = edev->fp_array[0].rxq; + u16 hw_comp_cons, sw_comp_cons, sw_rx_index, len; + struct eth_fast_path_rx_reg_cqe *fp_cqe; + struct sw_rx_data *sw_rx_data; + union eth_rx_cqe *cqe; + u8 *data_ptr; + int i; + + /* The packet is expected to receive on rx-queue 0 even though RSS is + * enabled. This is because the queue 0 is configured as the default + * queue and that the loopback traffic is not IP. + */ + for (i = 0; i < QEDE_SELFTEST_POLL_COUNT; i++) { + if (qede_has_rx_work(rxq)) + break; + usleep_range(100, 200); + } + + if (!qede_has_rx_work(rxq)) { + DP_NOTICE(edev, "Failed to receive the traffic\n"); + return -1; + } + + hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr); + sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring); + + /* Memory barrier to prevent the CPU from doing speculative reads of CQE + * / BD before reading hw_comp_cons. If the CQE is read before it is + * written by FW, then FW writes CQE and SB, and then the CPU reads the + * hw_comp_cons, it will use an old CQE. + */ + rmb(); + + /* Get the CQE from the completion ring */ + cqe = (union eth_rx_cqe *)qed_chain_consume(&rxq->rx_comp_ring); + + /* Get the data from the SW ring */ + sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX; + sw_rx_data = &rxq->sw_rx_ring[sw_rx_index]; + fp_cqe = &cqe->fast_path_regular; + len = le16_to_cpu(fp_cqe->len_on_first_bd); + data_ptr = (u8 *)(page_address(sw_rx_data->data) + + fp_cqe->placement_offset + sw_rx_data->page_offset); + for (i = ETH_HLEN; i < len; i++) + if (data_ptr[i] != (unsigned char)(i & 0xff)) { + DP_NOTICE(edev, "Loopback test failed\n"); + qede_recycle_rx_bd_ring(rxq, edev, 1); + return -1; + } + + qede_recycle_rx_bd_ring(rxq, edev, 1); + + return 0; +} + +static int qede_selftest_run_loopback(struct qede_dev *edev, u32 loopback_mode) +{ + struct qed_link_params link_params; + struct sk_buff *skb = NULL; + int rc = 0, i; + u32 pkt_size; + u8 *packet; + + if (!netif_running(edev->ndev)) { + DP_NOTICE(edev, "Interface is down\n"); + return -EINVAL; + } + + qede_netif_stop(edev); + + /* Bring up the link in Loopback mode */ + memset(&link_params, 0, sizeof(link_params)); + link_params.link_up = true; + link_params.override_flags = QED_LINK_OVERRIDE_LOOPBACK_MODE; + link_params.loopback_mode = loopback_mode; + edev->ops->common->set_link(edev->cdev, &link_params); + + /* Wait for loopback configuration to apply */ + msleep_interruptible(500); + + /* prepare the loopback packet */ + pkt_size = edev->ndev->mtu + ETH_HLEN; + + skb = netdev_alloc_skb(edev->ndev, pkt_size); + if (!skb) { + DP_INFO(edev, "Can't allocate skb\n"); + rc = -ENOMEM; + goto test_loopback_exit; + } + packet = skb_put(skb, pkt_size); + ether_addr_copy(packet, edev->ndev->dev_addr); + ether_addr_copy(packet + ETH_ALEN, edev->ndev->dev_addr); + memset(packet + (2 * ETH_ALEN), 0x77, (ETH_HLEN - (2 * ETH_ALEN))); + for (i = ETH_HLEN; i < pkt_size; i++) + packet[i] = (unsigned char)(i & 0xff); + + rc = qede_selftest_transmit_traffic(edev, skb); + if (rc) + goto test_loopback_exit; + + rc = qede_selftest_receive_traffic(edev); + if (rc) + goto test_loopback_exit; + + DP_VERBOSE(edev, NETIF_MSG_RX_STATUS, "Loopback test successful\n"); + +test_loopback_exit: + dev_kfree_skb(skb); + + /* Bring up the link in Normal mode */ + memset(&link_params, 0, sizeof(link_params)); + link_params.link_up = true; + link_params.override_flags = QED_LINK_OVERRIDE_LOOPBACK_MODE; + link_params.loopback_mode = QED_LINK_LOOPBACK_NONE; + edev->ops->common->set_link(edev->cdev, &link_params); + + /* Wait for loopback configuration to apply */ + msleep_interruptible(500); + + qede_netif_start(edev); + + return rc; +} + static void qede_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf) { @@ -859,6 +1085,14 @@ static void qede_self_test(struct net_device *dev, memset(buf, 0, sizeof(u64) * QEDE_ETHTOOL_TEST_MAX); + if (etest->flags & ETH_TEST_FL_OFFLINE) { + if (qede_selftest_run_loopback(edev, + QED_LINK_LOOPBACK_INT_PHY)) { + buf[QEDE_ETHTOOL_INT_LOOPBACK] = 1; + etest->flags |= ETH_TEST_FL_FAILED; + } + } + if (edev->ops->common->selftest->selftest_interrupt(edev->cdev)) { buf[QEDE_ETHTOOL_INTERRUPT_TEST] = 1; etest->flags |= ETH_TEST_FL_FAILED; diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 1e3ee49bae24..82d85ccc9ed1 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -668,7 +668,7 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, return NETDEV_TX_OK; } -static int qede_txq_has_work(struct qede_tx_queue *txq) +int qede_txq_has_work(struct qede_tx_queue *txq) { u16 hw_bd_cons; @@ -751,7 +751,7 @@ static int qede_tx_int(struct qede_dev *edev, return 0; } -static bool qede_has_rx_work(struct qede_rx_queue *rxq) +bool qede_has_rx_work(struct qede_rx_queue *rxq) { u16 hw_comp_cons, sw_comp_cons; @@ -806,8 +806,8 @@ static inline void qede_reuse_page(struct qede_dev *edev, /* In case of allocation failures reuse buffers * from consumer index to produce buffers for firmware */ -static void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, - struct qede_dev *edev, u8 count) +void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, + struct qede_dev *edev, u8 count) { struct sw_rx_data *curr_cons; From 0065d1c5acdb60ee2c0e54585a29243718465bb7 Mon Sep 17 00:00:00 2001 From: Xinming Hu Date: Tue, 26 Apr 2016 06:57:26 -0700 Subject: [PATCH 1211/1649] dt: bindings: add MARVELL's bt-sd8xxx wireless device Add device tree binding documentation for MARVELL's bluetooth sdio (sd8897 and sd8997) chip. Signed-off-by: Xinming Hu Signed-off-by: Amitkumar Karwar Acked-by: Rob Herring Signed-off-by: Marcel Holtmann --- Documentation/devicetree/bindings/btmrvl.txt | 29 ---------- .../bindings/net/marvell-bt-sd8xxx.txt | 56 +++++++++++++++++++ 2 files changed, 56 insertions(+), 29 deletions(-) delete mode 100644 Documentation/devicetree/bindings/btmrvl.txt create mode 100644 Documentation/devicetree/bindings/net/marvell-bt-sd8xxx.txt diff --git a/Documentation/devicetree/bindings/btmrvl.txt b/Documentation/devicetree/bindings/btmrvl.txt deleted file mode 100644 index 58f964bb0a52..000000000000 --- a/Documentation/devicetree/bindings/btmrvl.txt +++ /dev/null @@ -1,29 +0,0 @@ -btmrvl ------- - -Required properties: - - - compatible : must be "btmrvl,cfgdata" - -Optional properties: - - - btmrvl,cal-data : Calibration data downloaded to the device during - initialization. This is an array of 28 values(u8). - - - btmrvl,gpio-gap : gpio and gap (in msecs) combination to be - configured. - -Example: - -GPIO pin 13 is configured as a wakeup source and GAP is set to 100 msecs -in below example. - -btmrvl { - compatible = "btmrvl,cfgdata"; - - btmrvl,cal-data = /bits/ 8 < - 0x37 0x01 0x1c 0x00 0xff 0xff 0xff 0xff 0x01 0x7f 0x04 0x02 - 0x00 0x00 0xba 0xce 0xc0 0xc6 0x2d 0x00 0x00 0x00 0x00 0x00 - 0x00 0x00 0xf0 0x00>; - btmrvl,gpio-gap = <0x0d64>; -}; diff --git a/Documentation/devicetree/bindings/net/marvell-bt-sd8xxx.txt b/Documentation/devicetree/bindings/net/marvell-bt-sd8xxx.txt new file mode 100644 index 000000000000..14aa6cf58201 --- /dev/null +++ b/Documentation/devicetree/bindings/net/marvell-bt-sd8xxx.txt @@ -0,0 +1,56 @@ +Marvell 8897/8997 (sd8897/sd8997) bluetooth SDIO devices +------ + +Required properties: + + - compatible : should be one of the following: + * "marvell,sd8897-bt" + * "marvell,sd8997-bt" + +Optional properties: + + - marvell,cal-data: Calibration data downloaded to the device during + initialization. This is an array of 28 values(u8). + + - marvell,wakeup-pin: It represents wakeup pin number of the bluetooth chip. + firmware will use the pin to wakeup host system. + - marvell,wakeup-gap-ms: wakeup gap represents wakeup latency of the host + platform. The value will be configured to firmware. This + is needed to work chip's sleep feature as expected. + - interrupt-parent: phandle of the parent interrupt controller + - interrupts : interrupt pin number to the cpu. Driver will request an irq based + on this interrupt number. During system suspend, the irq will be + enabled so that the bluetooth chip can wakeup host platform under + certain condition. During system resume, the irq will be disabled + to make sure unnecessary interrupt is not received. + +Example: + +IRQ pin 119 is used as system wakeup source interrupt. +wakeup pin 13 and gap 100ms are configured so that firmware can wakeup host +using this device side pin and wakeup latency. +calibration data is also available in below example. + +&mmc3 { + status = "okay"; + vmmc-supply = <&wlan_en_reg>; + bus-width = <4>; + cap-power-off-card; + keep-power-in-suspend; + + #address-cells = <1>; + #size-cells = <0>; + btmrvl: bluetooth@2 { + compatible = "marvell,sd8897-bt"; + reg = <2>; + interrupt-parent = <&pio>; + interrupts = <119 IRQ_TYPE_LEVEL_LOW>; + + marvell,cal-data = /bits/ 8 < + 0x37 0x01 0x1c 0x00 0xff 0xff 0xff 0xff 0x01 0x7f 0x04 0x02 + 0x00 0x00 0xba 0xce 0xc0 0xc6 0x2d 0x00 0x00 0x00 0x00 0x00 + 0x00 0x00 0xf0 0x00>; + marvell,wakeup-pin = <0x0d>; + marvell,wakeup-gap-ms = <0x64>; + }; +}; From bb7f4f0bcee6844632d7366d6abff4b9996ad454 Mon Sep 17 00:00:00 2001 From: Xinming Hu Date: Tue, 26 Apr 2016 06:57:27 -0700 Subject: [PATCH 1212/1649] btmrvl: add platform specific wakeup interrupt support On some arm-based platforms, we need to configure platform specific parameters by device tree node and also define our node as a child node of parent SDIO host controller. This patch parses these parameters from device tree. It includes calibration data download to firmware, wakeup pin configured to firmware, and soc specific wake up gpio, which will be set as wakeup interrupt pin. Signed-off-by: Xinming Hu Signed-off-by: Amitkumar Karwar Signed-off-by: Marcel Holtmann --- drivers/bluetooth/btmrvl_drv.h | 11 +++++ drivers/bluetooth/btmrvl_main.c | 35 ++++++++------- drivers/bluetooth/btmrvl_sdio.c | 79 +++++++++++++++++++++++++++++++++ drivers/bluetooth/btmrvl_sdio.h | 6 +++ 4 files changed, 116 insertions(+), 15 deletions(-) diff --git a/drivers/bluetooth/btmrvl_drv.h b/drivers/bluetooth/btmrvl_drv.h index 05904732e6f1..f742384b53f7 100644 --- a/drivers/bluetooth/btmrvl_drv.h +++ b/drivers/bluetooth/btmrvl_drv.h @@ -23,6 +23,17 @@ #include #include #include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include #define BTM_HEADER_LEN 4 #define BTM_UPLD_SIZE 2312 diff --git a/drivers/bluetooth/btmrvl_main.c b/drivers/bluetooth/btmrvl_main.c index f25a825a693f..7ad8d61c0c61 100644 --- a/drivers/bluetooth/btmrvl_main.c +++ b/drivers/bluetooth/btmrvl_main.c @@ -510,34 +510,39 @@ static int btmrvl_download_cal_data(struct btmrvl_private *priv, static int btmrvl_check_device_tree(struct btmrvl_private *priv) { struct device_node *dt_node; + struct btmrvl_sdio_card *card = priv->btmrvl_dev.card; u8 cal_data[BT_CAL_HDR_LEN + BT_CAL_DATA_SIZE]; - int ret; - u32 val; + int ret = 0; + u16 gpio, gap; - for_each_compatible_node(dt_node, NULL, "btmrvl,cfgdata") { - ret = of_property_read_u32(dt_node, "btmrvl,gpio-gap", &val); - if (!ret) - priv->btmrvl_dev.gpio_gap = val; + if (card->plt_of_node) { + dt_node = card->plt_of_node; + ret = of_property_read_u16(dt_node, "marvell,wakeup-pin", + &gpio); + if (ret) + gpio = (priv->btmrvl_dev.gpio_gap & 0xff00) >> 8; - ret = of_property_read_u8_array(dt_node, "btmrvl,cal-data", + ret = of_property_read_u16(dt_node, "marvell,wakeup-gap-ms", + &gap); + if (ret) + gap = (u8)(priv->btmrvl_dev.gpio_gap & 0x00ff); + + priv->btmrvl_dev.gpio_gap = (gpio << 8) + gap; + + ret = of_property_read_u8_array(dt_node, "marvell,cal-data", cal_data + BT_CAL_HDR_LEN, BT_CAL_DATA_SIZE); - if (ret) { - of_node_put(dt_node); + if (ret) return ret; - } BT_DBG("Use cal data from device tree"); ret = btmrvl_download_cal_data(priv, cal_data, BT_CAL_DATA_SIZE); - if (ret) { + if (ret) BT_ERR("Fail to download calibrate data"); - of_node_put(dt_node); - return ret; - } } - return 0; + return ret; } static int btmrvl_setup(struct hci_dev *hdev) diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c index c6ef248de5e4..f425ddf91a24 100644 --- a/drivers/bluetooth/btmrvl_sdio.c +++ b/drivers/bluetooth/btmrvl_sdio.c @@ -52,6 +52,68 @@ static struct memory_type_mapping mem_type_mapping_tbl[] = { {"EXTLAST", NULL, 0, 0xFE}, }; +static const struct of_device_id btmrvl_sdio_of_match_table[] = { + { .compatible = "marvell,sd8897-bt" }, + { .compatible = "marvell,sd8997-bt" }, + { } +}; + +static irqreturn_t btmrvl_wake_irq_bt(int irq, void *priv) +{ + struct btmrvl_plt_wake_cfg *cfg = priv; + + if (cfg->irq_bt >= 0) { + pr_info("%s: wake by bt", __func__); + cfg->wake_by_bt = true; + disable_irq_nosync(irq); + } + + return IRQ_HANDLED; +} + +/* This function parses device tree node using mmc subnode devicetree API. + * The device node is saved in card->plt_of_node. + * If the device tree node exists and includes interrupts attributes, this + * function will request platform specific wakeup interrupt. + */ +static int btmrvl_sdio_probe_of(struct device *dev, + struct btmrvl_sdio_card *card) +{ + struct btmrvl_plt_wake_cfg *cfg; + int ret; + + if (!dev->of_node || + !of_match_node(btmrvl_sdio_of_match_table, dev->of_node)) { + pr_err("sdio platform data not available"); + return -1; + } + + card->plt_of_node = dev->of_node; + + card->plt_wake_cfg = devm_kzalloc(dev, sizeof(*card->plt_wake_cfg), + GFP_KERNEL); + cfg = card->plt_wake_cfg; + if (cfg && card->plt_of_node) { + cfg->irq_bt = irq_of_parse_and_map(card->plt_of_node, 0); + if (!cfg->irq_bt) { + dev_err(dev, "fail to parse irq_bt from device tree"); + } else { + ret = devm_request_irq(dev, cfg->irq_bt, + btmrvl_wake_irq_bt, + IRQF_TRIGGER_LOW, + "bt_wake", cfg); + if (ret) { + dev_err(dev, + "Failed to request irq_bt %d (%d)\n", + cfg->irq_bt, ret); + } + disable_irq(cfg->irq_bt); + } + } + + return 0; +} + /* The btmrvl_sdio_remove() callback function is called * when user removes this module from kernel space or ejects * the card from the slot. The driver handles these 2 cases @@ -1464,6 +1526,9 @@ static int btmrvl_sdio_probe(struct sdio_func *func, btmrvl_sdio_enable_host_int(card); + /* Device tree node parsing and platform specific configuration*/ + btmrvl_sdio_probe_of(&func->dev, card); + priv = btmrvl_add_card(card); if (!priv) { BT_ERR("Initializing card failed!"); @@ -1544,6 +1609,13 @@ static int btmrvl_sdio_suspend(struct device *dev) return 0; } + /* Enable platform specific wakeup interrupt */ + if (card->plt_wake_cfg && card->plt_wake_cfg->irq_bt >= 0) { + card->plt_wake_cfg->wake_by_bt = false; + enable_irq(card->plt_wake_cfg->irq_bt); + enable_irq_wake(card->plt_wake_cfg->irq_bt); + } + priv = card->priv; priv->adapter->is_suspending = true; hcidev = priv->btmrvl_dev.hcidev; @@ -1606,6 +1678,13 @@ static int btmrvl_sdio_resume(struct device *dev) BT_DBG("%s: SDIO resume", hcidev->name); hci_resume_dev(hcidev); + /* Disable platform specific wakeup interrupt */ + if (card->plt_wake_cfg && card->plt_wake_cfg->irq_bt >= 0) { + disable_irq_wake(card->plt_wake_cfg->irq_bt); + if (!card->plt_wake_cfg->wake_by_bt) + disable_irq(card->plt_wake_cfg->irq_bt); + } + return 0; } diff --git a/drivers/bluetooth/btmrvl_sdio.h b/drivers/bluetooth/btmrvl_sdio.h index 1a3bd064c442..3a522d23ee6e 100644 --- a/drivers/bluetooth/btmrvl_sdio.h +++ b/drivers/bluetooth/btmrvl_sdio.h @@ -62,6 +62,10 @@ #define FIRMWARE_READY 0xfedc +struct btmrvl_plt_wake_cfg { + int irq_bt; + bool wake_by_bt; +}; struct btmrvl_sdio_card_reg { u8 cfg; @@ -97,6 +101,8 @@ struct btmrvl_sdio_card { u16 sd_blksz_fw_dl; u8 rx_unit; struct btmrvl_private *priv; + struct device_node *plt_of_node; + struct btmrvl_plt_wake_cfg *plt_wake_cfg; }; struct btmrvl_sdio_device { From 32b9ccbc3522811c0e483637b85ae25f5491296f Mon Sep 17 00:00:00 2001 From: Loic Poulain Date: Thu, 28 Apr 2016 18:48:25 +0200 Subject: [PATCH 1213/1649] Bluetooth: hci_intel: Fix null gpio desc pointer dereference gpiod_get_optional can return either ERR_PTR or NULL pointer. NULL case is not tested and then dereferenced later in desc_to_gpio. Fix this by using non optional version which returns ERR_PTR in any error case (this is not an optional gpio). Use the same non optional version for the host-wake gpio. Fixes: 765ea3abd116 ("Bluetooth: hci_intel: Retrieve host-wake IRQ") Signed-off-by: Loic Poulain Signed-off-by: Marcel Holtmann --- drivers/bluetooth/hci_intel.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/bluetooth/hci_intel.c b/drivers/bluetooth/hci_intel.c index 91d605147b10..f6f2b01a1fea 100644 --- a/drivers/bluetooth/hci_intel.c +++ b/drivers/bluetooth/hci_intel.c @@ -1210,8 +1210,7 @@ static int intel_probe(struct platform_device *pdev) idev->pdev = pdev; - idev->reset = devm_gpiod_get_optional(&pdev->dev, "reset", - GPIOD_OUT_LOW); + idev->reset = devm_gpiod_get(&pdev->dev, "reset", GPIOD_OUT_LOW); if (IS_ERR(idev->reset)) { dev_err(&pdev->dev, "Unable to retrieve gpio\n"); return PTR_ERR(idev->reset); @@ -1223,8 +1222,7 @@ static int intel_probe(struct platform_device *pdev) dev_err(&pdev->dev, "No IRQ, falling back to gpio-irq\n"); - host_wake = devm_gpiod_get_optional(&pdev->dev, "host-wake", - GPIOD_IN); + host_wake = devm_gpiod_get(&pdev->dev, "host-wake", GPIOD_IN); if (IS_ERR(host_wake)) { dev_err(&pdev->dev, "Unable to retrieve IRQ\n"); goto no_irq; From 2a37daa634416a617bb8e9032626ab491004e7da Mon Sep 17 00:00:00 2001 From: Iyappan Subramanian Date: Fri, 29 Apr 2016 11:10:13 -0700 Subject: [PATCH 1214/1649] drivers: net: xgene: Get channel number from device binding This patch gets ethernet to CPU channel (prefetch buffer number) from the newly added 'channel' property, thus decoupling Linux driver from resource management. Signed-off-by: Iyappan Subramanian Signed-off-by: David S. Miller --- drivers/net/ethernet/apm/xgene/xgene_enet_main.c | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c index 8d4c1ad2fc60..409152b21191 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c @@ -973,6 +973,17 @@ static enum xgene_ring_owner xgene_derive_ring_owner(struct xgene_enet_pdata *p) return owner; } +static u8 xgene_start_cpu_bufnum(struct xgene_enet_pdata *pdata) +{ + struct device *dev = &pdata->pdev->dev; + u32 cpu_bufnum; + int ret; + + ret = device_property_read_u32(dev, "channel", &cpu_bufnum); + + return (!ret) ? cpu_bufnum : pdata->cpu_bufnum; +} + static int xgene_enet_create_desc_rings(struct net_device *ndev) { struct xgene_enet_pdata *pdata = netdev_priv(ndev); @@ -981,13 +992,15 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev) struct xgene_enet_desc_ring *buf_pool = NULL; enum xgene_ring_owner owner; dma_addr_t dma_exp_bufs; - u8 cpu_bufnum = pdata->cpu_bufnum; + u8 cpu_bufnum; u8 eth_bufnum = pdata->eth_bufnum; u8 bp_bufnum = pdata->bp_bufnum; u16 ring_num = pdata->ring_num; u16 ring_id; int i, ret, size; + cpu_bufnum = xgene_start_cpu_bufnum(pdata); + for (i = 0; i < pdata->rxq_cnt; i++) { /* allocate rx descriptor ring */ owner = xgene_derive_ring_owner(pdata); From 4cac949f59a133df11d88bc3d1512786507b02bf Mon Sep 17 00:00:00 2001 From: Iyappan Subramanian Date: Fri, 29 Apr 2016 11:10:14 -0700 Subject: [PATCH 1215/1649] Documentation: dtb: xgene: Add channel property Signed-off-by: Iyappan Subramanian Signed-off-by: David S. Miller --- Documentation/devicetree/bindings/net/apm-xgene-enet.txt | 2 ++ 1 file changed, 2 insertions(+) diff --git a/Documentation/devicetree/bindings/net/apm-xgene-enet.txt b/Documentation/devicetree/bindings/net/apm-xgene-enet.txt index 078060a97f95..05f705e32a4a 100644 --- a/Documentation/devicetree/bindings/net/apm-xgene-enet.txt +++ b/Documentation/devicetree/bindings/net/apm-xgene-enet.txt @@ -18,6 +18,8 @@ Required properties for all the ethernet interfaces: - First is the Rx interrupt. This irq is mandatory. - Second is the Tx completion interrupt. This is supported only on SGMII based 1GbE and 10GbE interfaces. +- channel: Ethernet to CPU, start channel (prefetch buffer) number + - Must map to the first irq and irqs must be sequential - port-id: Port number (0 or 1) - clocks: Reference to the clock entry. - local-mac-address: MAC address assigned to this device From 6619ac5a440961536c167dbc10596a26e9288915 Mon Sep 17 00:00:00 2001 From: Iyappan Subramanian Date: Fri, 29 Apr 2016 11:10:15 -0700 Subject: [PATCH 1216/1649] dtb: xgene: Add channel property Added 'channel' property, describing ethernet to CPU channel number. Signed-off-by: Iyappan Subramanian Signed-off-by: David S. Miller --- arch/arm64/boot/dts/apm/apm-shadowcat.dtsi | 1 + arch/arm64/boot/dts/apm/apm-storm.dtsi | 1 + 2 files changed, 2 insertions(+) diff --git a/arch/arm64/boot/dts/apm/apm-shadowcat.dtsi b/arch/arm64/boot/dts/apm/apm-shadowcat.dtsi index a055a5d443b7..ba0487751524 100644 --- a/arch/arm64/boot/dts/apm/apm-shadowcat.dtsi +++ b/arch/arm64/boot/dts/apm/apm-shadowcat.dtsi @@ -653,6 +653,7 @@ <0 113 4>, <0 114 4>, <0 115 4>; + channel = <12>; port-id = <1>; dma-coherent; clocks = <&xge1clk 0>; diff --git a/arch/arm64/boot/dts/apm/apm-storm.dtsi b/arch/arm64/boot/dts/apm/apm-storm.dtsi index ae4a173df493..5147d7698924 100644 --- a/arch/arm64/boot/dts/apm/apm-storm.dtsi +++ b/arch/arm64/boot/dts/apm/apm-storm.dtsi @@ -993,6 +993,7 @@ <0x0 0x65 0x4>, <0x0 0x66 0x4>, <0x0 0x67 0x4>; + channel = <0>; dma-coherent; clocks = <&xge0clk 0>; /* mac address will be overwritten by the bootloader */ From 866daf6eaae36b414764c4830ed197da9801a361 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Tue, 26 Apr 2016 09:35:42 +0200 Subject: [PATCH 1217/1649] wext: remove a/b/g/n from SIOCGIWNAME Since a/b/g/n no longer exist as spec amendements and VHT (ex 802.11ac) wasn't handled at all, it's better to just remove the amendment strings to avoid confusion. Signed-off-by: Johannes Berg Reviewed-by: Luca Coelho Signed-off-by: Johannes Berg --- net/wireless/wext-compat.c | 35 ----------------------------------- 1 file changed, 35 deletions(-) diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c index 4c89f0ca61ba..9f27221c8913 100644 --- a/net/wireless/wext-compat.c +++ b/net/wireless/wext-compat.c @@ -25,42 +25,7 @@ int cfg80211_wext_giwname(struct net_device *dev, struct iw_request_info *info, char *name, char *extra) { - struct wireless_dev *wdev = dev->ieee80211_ptr; - struct ieee80211_supported_band *sband; - bool is_ht = false, is_a = false, is_b = false, is_g = false; - - if (!wdev) - return -EOPNOTSUPP; - - sband = wdev->wiphy->bands[NL80211_BAND_5GHZ]; - if (sband) { - is_a = true; - is_ht |= sband->ht_cap.ht_supported; - } - - sband = wdev->wiphy->bands[NL80211_BAND_2GHZ]; - if (sband) { - int i; - /* Check for mandatory rates */ - for (i = 0; i < sband->n_bitrates; i++) { - if (sband->bitrates[i].bitrate == 10) - is_b = true; - if (sband->bitrates[i].bitrate == 60) - is_g = true; - } - is_ht |= sband->ht_cap.ht_supported; - } - strcpy(name, "IEEE 802.11"); - if (is_a) - strcat(name, "a"); - if (is_b) - strcat(name, "b"); - if (is_g) - strcat(name, "g"); - if (is_ht) - strcat(name, "n"); - return 0; } EXPORT_WEXT_HANDLER(cfg80211_wext_giwname); From c10d9310edf5aa4a676991139d1a43ec7d87e56b Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 29 Apr 2016 14:16:47 -0700 Subject: [PATCH 1218/1649] tcp: do not assume TCP code is non preemptible We want to to make TCP stack preemptible, as draining prequeue and backlog queues can take lot of time. Many SNMP updates were assuming that BH (and preemption) was disabled. Need to convert some __NET_INC_STATS() calls to NET_INC_STATS() and some __TCP_INC_STATS() to TCP_INC_STATS() Before using this_cpu_ptr(net->ipv4.tcp_sk) in tcp_v4_send_reset() and tcp_v4_send_ack(), we add an explicit preempt disabled section. Signed-off-by: Eric Dumazet Acked-by: Soheil Hassas Yeganeh Signed-off-by: David S. Miller --- net/ipv4/tcp.c | 2 +- net/ipv4/tcp_cdg.c | 20 ++++----- net/ipv4/tcp_cubic.c | 20 ++++----- net/ipv4/tcp_fastopen.c | 12 ++--- net/ipv4/tcp_input.c | 96 ++++++++++++++++++++-------------------- net/ipv4/tcp_ipv4.c | 14 +++--- net/ipv4/tcp_minisocks.c | 2 +- net/ipv4/tcp_output.c | 11 +++-- net/ipv4/tcp_recovery.c | 4 +- net/ipv4/tcp_timer.c | 10 +++-- net/ipv6/tcp_ipv6.c | 12 ++--- 11 files changed, 104 insertions(+), 99 deletions(-) diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index cb4d1cabb42c..b24c6ed4a04f 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -3095,7 +3095,7 @@ void tcp_done(struct sock *sk) struct request_sock *req = tcp_sk(sk)->fastopen_rsk; if (sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV) - __TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS); + TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS); tcp_set_state(sk, TCP_CLOSE); tcp_clear_xmit_timers(sk); diff --git a/net/ipv4/tcp_cdg.c b/net/ipv4/tcp_cdg.c index 3c00208c37f4..ccce8a55f1e1 100644 --- a/net/ipv4/tcp_cdg.c +++ b/net/ipv4/tcp_cdg.c @@ -155,11 +155,11 @@ static void tcp_cdg_hystart_update(struct sock *sk) ca->last_ack = now_us; if (after(now_us, ca->round_start + base_owd)) { - __NET_INC_STATS(sock_net(sk), - LINUX_MIB_TCPHYSTARTTRAINDETECT); - __NET_ADD_STATS(sock_net(sk), - LINUX_MIB_TCPHYSTARTTRAINCWND, - tp->snd_cwnd); + NET_INC_STATS(sock_net(sk), + LINUX_MIB_TCPHYSTARTTRAINDETECT); + NET_ADD_STATS(sock_net(sk), + LINUX_MIB_TCPHYSTARTTRAINCWND, + tp->snd_cwnd); tp->snd_ssthresh = tp->snd_cwnd; return; } @@ -174,11 +174,11 @@ static void tcp_cdg_hystart_update(struct sock *sk) 125U); if (ca->rtt.min > thresh) { - __NET_INC_STATS(sock_net(sk), - LINUX_MIB_TCPHYSTARTDELAYDETECT); - __NET_ADD_STATS(sock_net(sk), - LINUX_MIB_TCPHYSTARTDELAYCWND, - tp->snd_cwnd); + NET_INC_STATS(sock_net(sk), + LINUX_MIB_TCPHYSTARTDELAYDETECT); + NET_ADD_STATS(sock_net(sk), + LINUX_MIB_TCPHYSTARTDELAYCWND, + tp->snd_cwnd); tp->snd_ssthresh = tp->snd_cwnd; } } diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c index 59155af9de5d..0ce946e395e1 100644 --- a/net/ipv4/tcp_cubic.c +++ b/net/ipv4/tcp_cubic.c @@ -402,11 +402,11 @@ static void hystart_update(struct sock *sk, u32 delay) ca->last_ack = now; if ((s32)(now - ca->round_start) > ca->delay_min >> 4) { ca->found |= HYSTART_ACK_TRAIN; - __NET_INC_STATS(sock_net(sk), - LINUX_MIB_TCPHYSTARTTRAINDETECT); - __NET_ADD_STATS(sock_net(sk), - LINUX_MIB_TCPHYSTARTTRAINCWND, - tp->snd_cwnd); + NET_INC_STATS(sock_net(sk), + LINUX_MIB_TCPHYSTARTTRAINDETECT); + NET_ADD_STATS(sock_net(sk), + LINUX_MIB_TCPHYSTARTTRAINCWND, + tp->snd_cwnd); tp->snd_ssthresh = tp->snd_cwnd; } } @@ -423,11 +423,11 @@ static void hystart_update(struct sock *sk, u32 delay) if (ca->curr_rtt > ca->delay_min + HYSTART_DELAY_THRESH(ca->delay_min >> 3)) { ca->found |= HYSTART_DELAY; - __NET_INC_STATS(sock_net(sk), - LINUX_MIB_TCPHYSTARTDELAYDETECT); - __NET_ADD_STATS(sock_net(sk), - LINUX_MIB_TCPHYSTARTDELAYCWND, - tp->snd_cwnd); + NET_INC_STATS(sock_net(sk), + LINUX_MIB_TCPHYSTARTDELAYDETECT); + NET_ADD_STATS(sock_net(sk), + LINUX_MIB_TCPHYSTARTDELAYCWND, + tp->snd_cwnd); tp->snd_ssthresh = tp->snd_cwnd; } } diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c index a1498d507e42..54d9f9b0120f 100644 --- a/net/ipv4/tcp_fastopen.c +++ b/net/ipv4/tcp_fastopen.c @@ -255,9 +255,9 @@ static bool tcp_fastopen_queue_check(struct sock *sk) spin_lock(&fastopenq->lock); req1 = fastopenq->rskq_rst_head; if (!req1 || time_after(req1->rsk_timer.expires, jiffies)) { - spin_unlock(&fastopenq->lock); __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENLISTENOVERFLOW); + spin_unlock(&fastopenq->lock); return false; } fastopenq->rskq_rst_head = req1->dl_next; @@ -282,7 +282,7 @@ struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb, struct sock *child; if (foc->len == 0) /* Client requests a cookie */ - __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENCOOKIEREQD); + NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENCOOKIEREQD); if (!((sysctl_tcp_fastopen & TFO_SERVER_ENABLE) && (syn_data || foc->len >= 0) && @@ -311,13 +311,13 @@ fastopen: child = tcp_fastopen_create_child(sk, skb, dst, req); if (child) { foc->len = -1; - __NET_INC_STATS(sock_net(sk), - LINUX_MIB_TCPFASTOPENPASSIVE); + NET_INC_STATS(sock_net(sk), + LINUX_MIB_TCPFASTOPENPASSIVE); return child; } - __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENPASSIVEFAIL); + NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENPASSIVEFAIL); } else if (foc->len > 0) /* Client presents an invalid cookie */ - __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENPASSIVEFAIL); + NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENPASSIVEFAIL); valid_foc.exp = foc->exp; *foc = valid_foc; diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 1fb19c91e091..ac85fb42a5a2 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -869,7 +869,7 @@ static void tcp_update_reordering(struct sock *sk, const int metric, else mib_idx = LINUX_MIB_TCPSACKREORDER; - __NET_INC_STATS(sock_net(sk), mib_idx); + NET_INC_STATS(sock_net(sk), mib_idx); #if FASTRETRANS_DEBUG > 1 pr_debug("Disorder%d %d %u f%u s%u rr%d\n", tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state, @@ -1062,7 +1062,7 @@ static bool tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb, if (before(start_seq_0, TCP_SKB_CB(ack_skb)->ack_seq)) { dup_sack = true; tcp_dsack_seen(tp); - __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDSACKRECV); + NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDSACKRECV); } else if (num_sacks > 1) { u32 end_seq_1 = get_unaligned_be32(&sp[1].end_seq); u32 start_seq_1 = get_unaligned_be32(&sp[1].start_seq); @@ -1071,7 +1071,7 @@ static bool tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb, !before(start_seq_0, start_seq_1)) { dup_sack = true; tcp_dsack_seen(tp); - __NET_INC_STATS(sock_net(sk), + NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDSACKOFORECV); } } @@ -1289,7 +1289,7 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb, if (skb->len > 0) { BUG_ON(!tcp_skb_pcount(skb)); - __NET_INC_STATS(sock_net(sk), LINUX_MIB_SACKSHIFTED); + NET_INC_STATS(sock_net(sk), LINUX_MIB_SACKSHIFTED); return false; } @@ -1314,7 +1314,7 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb, tcp_unlink_write_queue(skb, sk); sk_wmem_free_skb(sk, skb); - __NET_INC_STATS(sock_net(sk), LINUX_MIB_SACKMERGED); + NET_INC_STATS(sock_net(sk), LINUX_MIB_SACKMERGED); return true; } @@ -1473,7 +1473,7 @@ noop: return skb; fallback: - __NET_INC_STATS(sock_net(sk), LINUX_MIB_SACKSHIFTFALLBACK); + NET_INC_STATS(sock_net(sk), LINUX_MIB_SACKSHIFTFALLBACK); return NULL; } @@ -1661,7 +1661,7 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb, mib_idx = LINUX_MIB_TCPSACKDISCARD; } - __NET_INC_STATS(sock_net(sk), mib_idx); + NET_INC_STATS(sock_net(sk), mib_idx); if (i == 0) first_sack_index = -1; continue; @@ -1913,7 +1913,7 @@ void tcp_enter_loss(struct sock *sk) skb = tcp_write_queue_head(sk); is_reneg = skb && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED); if (is_reneg) { - __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSACKRENEGING); + NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSACKRENEGING); tp->sacked_out = 0; tp->fackets_out = 0; } @@ -2399,7 +2399,7 @@ static bool tcp_try_undo_recovery(struct sock *sk) else mib_idx = LINUX_MIB_TCPFULLUNDO; - __NET_INC_STATS(sock_net(sk), mib_idx); + NET_INC_STATS(sock_net(sk), mib_idx); } if (tp->snd_una == tp->high_seq && tcp_is_reno(tp)) { /* Hold old state until something *above* high_seq @@ -2421,7 +2421,7 @@ static bool tcp_try_undo_dsack(struct sock *sk) if (tp->undo_marker && !tp->undo_retrans) { DBGUNDO(sk, "D-SACK"); tcp_undo_cwnd_reduction(sk, false); - __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDSACKUNDO); + NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDSACKUNDO); return true; } return false; @@ -2436,9 +2436,9 @@ static bool tcp_try_undo_loss(struct sock *sk, bool frto_undo) tcp_undo_cwnd_reduction(sk, true); DBGUNDO(sk, "partial loss"); - __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPLOSSUNDO); + NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPLOSSUNDO); if (frto_undo) - __NET_INC_STATS(sock_net(sk), + NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSPURIOUSRTOS); inet_csk(sk)->icsk_retransmits = 0; if (frto_undo || tcp_is_sack(tp)) @@ -2563,7 +2563,7 @@ static void tcp_mtup_probe_failed(struct sock *sk) icsk->icsk_mtup.search_high = icsk->icsk_mtup.probe_size - 1; icsk->icsk_mtup.probe_size = 0; - __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMTUPFAIL); + NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMTUPFAIL); } static void tcp_mtup_probe_success(struct sock *sk) @@ -2583,7 +2583,7 @@ static void tcp_mtup_probe_success(struct sock *sk) icsk->icsk_mtup.search_low = icsk->icsk_mtup.probe_size; icsk->icsk_mtup.probe_size = 0; tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); - __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMTUPSUCCESS); + NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMTUPSUCCESS); } /* Do a simple retransmit without using the backoff mechanisms in @@ -2647,7 +2647,7 @@ static void tcp_enter_recovery(struct sock *sk, bool ece_ack) else mib_idx = LINUX_MIB_TCPSACKRECOVERY; - __NET_INC_STATS(sock_net(sk), mib_idx); + NET_INC_STATS(sock_net(sk), mib_idx); tp->prior_ssthresh = 0; tcp_init_undo(tp); @@ -2740,7 +2740,7 @@ static bool tcp_try_undo_partial(struct sock *sk, const int acked) DBGUNDO(sk, "partial recovery"); tcp_undo_cwnd_reduction(sk, true); - __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPPARTIALUNDO); + NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPPARTIALUNDO); tcp_try_keep_open(sk); return true; } @@ -3434,7 +3434,7 @@ bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb, s32 elapsed = (s32)(tcp_time_stamp - *last_oow_ack_time); if (0 <= elapsed && elapsed < sysctl_tcp_invalid_ratelimit) { - __NET_INC_STATS(net, mib_idx); + NET_INC_STATS(net, mib_idx); return true; /* rate-limited: don't send yet! */ } } @@ -3467,7 +3467,7 @@ static void tcp_send_challenge_ack(struct sock *sk, const struct sk_buff *skb) challenge_count = 0; } if (++challenge_count <= sysctl_tcp_challenge_ack_limit) { - __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPCHALLENGEACK); + NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPCHALLENGEACK); tcp_send_ack(sk); } } @@ -3516,7 +3516,7 @@ static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag) tcp_set_ca_state(sk, TCP_CA_CWR); tcp_end_cwnd_reduction(sk); tcp_try_keep_open(sk); - __NET_INC_STATS(sock_net(sk), + NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPLOSSPROBERECOVERY); } else if (!(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP | FLAG_DATA_SACKED))) { @@ -3621,14 +3621,14 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) tcp_in_ack_event(sk, CA_ACK_WIN_UPDATE); - __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPHPACKS); + NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPHPACKS); } else { u32 ack_ev_flags = CA_ACK_SLOWPATH; if (ack_seq != TCP_SKB_CB(skb)->end_seq) flag |= FLAG_DATA; else - __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPPUREACKS); + NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPPUREACKS); flag |= tcp_ack_update_window(sk, skb, ack, ack_seq); @@ -4131,7 +4131,7 @@ static void tcp_dsack_set(struct sock *sk, u32 seq, u32 end_seq) else mib_idx = LINUX_MIB_TCPDSACKOFOSENT; - __NET_INC_STATS(sock_net(sk), mib_idx); + NET_INC_STATS(sock_net(sk), mib_idx); tp->rx_opt.dsack = 1; tp->duplicate_sack[0].start_seq = seq; @@ -4155,7 +4155,7 @@ static void tcp_send_dupack(struct sock *sk, const struct sk_buff *skb) if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { - __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOST); + NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOST); tcp_enter_quickack_mode(sk); if (tcp_is_sack(tp) && sysctl_tcp_dsack) { @@ -4305,7 +4305,7 @@ static bool tcp_try_coalesce(struct sock *sk, atomic_add(delta, &sk->sk_rmem_alloc); sk_mem_charge(sk, delta); - __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRCVCOALESCE); + NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRCVCOALESCE); TCP_SKB_CB(to)->end_seq = TCP_SKB_CB(from)->end_seq; TCP_SKB_CB(to)->ack_seq = TCP_SKB_CB(from)->ack_seq; TCP_SKB_CB(to)->tcp_flags |= TCP_SKB_CB(from)->tcp_flags; @@ -4393,7 +4393,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) tcp_ecn_check_ce(tp, skb); if (unlikely(tcp_try_rmem_schedule(sk, skb, skb->truesize))) { - __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFODROP); + NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFODROP); tcp_drop(sk, skb); return; } @@ -4402,7 +4402,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) tp->pred_flags = 0; inet_csk_schedule_ack(sk); - __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFOQUEUE); + NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFOQUEUE); SOCK_DEBUG(sk, "out of order segment: rcv_next %X seq %X - %X\n", tp->rcv_nxt, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); @@ -4457,7 +4457,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) if (skb1 && before(seq, TCP_SKB_CB(skb1)->end_seq)) { if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) { /* All the bits are present. Drop. */ - __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFOMERGE); + NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFOMERGE); tcp_drop(sk, skb); skb = NULL; tcp_dsack_set(sk, seq, end_seq); @@ -4496,7 +4496,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) __skb_unlink(skb1, &tp->out_of_order_queue); tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq, TCP_SKB_CB(skb1)->end_seq); - __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFOMERGE); + NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFOMERGE); tcp_drop(sk, skb1); } @@ -4661,7 +4661,7 @@ queue_and_out: if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) { /* A retransmit, 2nd most common case. Force an immediate ack. */ - __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOST); + NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOST); tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); out_of_window: @@ -4707,7 +4707,7 @@ static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb, __skb_unlink(skb, list); __kfree_skb(skb); - __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRCVCOLLAPSED); + NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRCVCOLLAPSED); return next; } @@ -4866,7 +4866,7 @@ static bool tcp_prune_ofo_queue(struct sock *sk) bool res = false; if (!skb_queue_empty(&tp->out_of_order_queue)) { - __NET_INC_STATS(sock_net(sk), LINUX_MIB_OFOPRUNED); + NET_INC_STATS(sock_net(sk), LINUX_MIB_OFOPRUNED); __skb_queue_purge(&tp->out_of_order_queue); /* Reset SACK state. A conforming SACK implementation will @@ -4895,7 +4895,7 @@ static int tcp_prune_queue(struct sock *sk) SOCK_DEBUG(sk, "prune_queue: c=%x\n", tp->copied_seq); - __NET_INC_STATS(sock_net(sk), LINUX_MIB_PRUNECALLED); + NET_INC_STATS(sock_net(sk), LINUX_MIB_PRUNECALLED); if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) tcp_clamp_window(sk); @@ -4925,7 +4925,7 @@ static int tcp_prune_queue(struct sock *sk) * drop receive data on the floor. It will get retransmitted * and hopefully then we'll have sufficient space. */ - __NET_INC_STATS(sock_net(sk), LINUX_MIB_RCVPRUNED); + NET_INC_STATS(sock_net(sk), LINUX_MIB_RCVPRUNED); /* Massive buffer overcommit. */ tp->pred_flags = 0; @@ -5184,7 +5184,7 @@ static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb, if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp && tcp_paws_discard(sk, skb)) { if (!th->rst) { - __NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED); + NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED); if (!tcp_oow_rate_limited(sock_net(sk), skb, LINUX_MIB_TCPACKSKIPPEDPAWS, &tp->last_oow_ack_time)) @@ -5236,8 +5236,8 @@ static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb, if (th->syn) { syn_challenge: if (syn_inerr) - __TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS); - __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNCHALLENGE); + TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS); + NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNCHALLENGE); tcp_send_challenge_ack(sk, skb); goto discard; } @@ -5352,7 +5352,7 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb, tcp_data_snd_check(sk); return; } else { /* Header too small */ - __TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS); + TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS); goto discard; } } else { @@ -5380,7 +5380,7 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb, __skb_pull(skb, tcp_header_len); tcp_rcv_nxt_update(tp, TCP_SKB_CB(skb)->end_seq); - __NET_INC_STATS(sock_net(sk), + NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPHPHITSTOUSER); eaten = 1; } @@ -5403,7 +5403,7 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb, tcp_rcv_rtt_measure_ts(sk, skb); - __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPHPHITS); + NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPHPHITS); /* Bulk data transfer: receiver */ eaten = tcp_queue_rcv(sk, skb, tcp_header_len, @@ -5460,8 +5460,8 @@ step5: return; csum_error: - __TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS); - __TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS); + TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS); + TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS); discard: tcp_drop(sk, skb); @@ -5553,13 +5553,13 @@ static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack, break; } tcp_rearm_rto(sk); - __NET_INC_STATS(sock_net(sk), + NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENACTIVEFAIL); return true; } tp->syn_data_acked = tp->syn_data; if (tp->syn_data_acked) - __NET_INC_STATS(sock_net(sk), + NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENACTIVE); tcp_fastopen_add_skb(sk, synack); @@ -5595,7 +5595,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && !between(tp->rx_opt.rcv_tsecr, tp->retrans_stamp, tcp_time_stamp)) { - __NET_INC_STATS(sock_net(sk), + NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSACTIVEREJECTED); goto reset_and_undo; } @@ -5965,7 +5965,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb) (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt))) { tcp_done(sk); - __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA); + NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA); return 1; } @@ -6022,7 +6022,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb) if (sk->sk_shutdown & RCV_SHUTDOWN) { if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt)) { - __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA); + NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA); tcp_reset(sk); return 1; } @@ -6224,7 +6224,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops, * timeout. */ if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) { - __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); + NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); goto drop; } @@ -6271,7 +6271,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops, if (dst && strict && !tcp_peer_is_proven(req, dst, true, tmp_opt.saw_tstamp)) { - __NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED); + NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED); goto drop_and_release; } } diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 87b173b563b0..761bc492c5e3 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -692,6 +692,7 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb) offsetof(struct inet_timewait_sock, tw_bound_dev_if)); arg.tos = ip_hdr(skb)->tos; + preempt_disable(); ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk), skb, &TCP_SKB_CB(skb)->header.h4.opt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, @@ -699,6 +700,7 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb) __TCP_INC_STATS(net, TCP_MIB_OUTSEGS); __TCP_INC_STATS(net, TCP_MIB_OUTRSTS); + preempt_enable(); #ifdef CONFIG_TCP_MD5SIG out: @@ -774,12 +776,14 @@ static void tcp_v4_send_ack(struct net *net, if (oif) arg.bound_dev_if = oif; arg.tos = tos; + preempt_disable(); ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk), skb, &TCP_SKB_CB(skb)->header.h4.opt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len); __TCP_INC_STATS(net, TCP_MIB_OUTSEGS); + preempt_enable(); } static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb) @@ -1151,12 +1155,12 @@ static bool tcp_v4_inbound_md5_hash(const struct sock *sk, return false; if (hash_expected && !hash_location) { - __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND); + NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND); return true; } if (!hash_expected && hash_location) { - __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED); + NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED); return true; } @@ -1342,7 +1346,7 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb, return newsk; exit_overflow: - __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); + NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); exit_nonewsk: dst_release(dst); exit: @@ -1432,8 +1436,8 @@ discard: return 0; csum_err: - __TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS); - __TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS); + TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS); + TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS); goto discard; } EXPORT_SYMBOL(tcp_v4_do_rcv); diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index ffbfecdae471..4b95ec4ed2c8 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -337,7 +337,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo) * socket up. We've got bigger problems than * non-graceful socket closings. */ - __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPTIMEWAITOVERFLOW); + NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPTIMEWAITOVERFLOW); } tcp_update_metrics(sk); diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 1a487ff95d4c..25d527922b18 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -2221,14 +2221,13 @@ bool tcp_schedule_loss_probe(struct sock *sk) /* Thanks to skb fast clones, we can detect if a prior transmit of * a packet is still in a qdisc or driver queue. * In this case, there is very little point doing a retransmit ! - * Note: This is called from BH context only. */ static bool skb_still_in_host_queue(const struct sock *sk, const struct sk_buff *skb) { if (unlikely(skb_fclone_busy(sk, skb))) { - __NET_INC_STATS(sock_net(sk), - LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES); + NET_INC_STATS(sock_net(sk), + LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES); return true; } return false; @@ -2290,7 +2289,7 @@ void tcp_send_loss_probe(struct sock *sk) tp->tlp_high_seq = tp->snd_nxt; probe_sent: - __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPLOSSPROBES); + NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPLOSSPROBES); /* Reset s.t. tcp_rearm_rto will restart timer from now */ inet_csk(sk)->icsk_pending = 0; rearm_timer: @@ -2699,7 +2698,7 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs) tp->retrans_stamp = tcp_skb_timestamp(skb); } else if (err != -EBUSY) { - __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL); + NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL); } if (tp->undo_retrans < 0) @@ -2823,7 +2822,7 @@ begin_fwd: if (tcp_retransmit_skb(sk, skb, segs)) return; - __NET_INC_STATS(sock_net(sk), mib_idx); + NET_INC_STATS(sock_net(sk), mib_idx); if (tcp_in_cwnd_reduction(sk)) tp->prr_out += tcp_skb_pcount(skb); diff --git a/net/ipv4/tcp_recovery.c b/net/ipv4/tcp_recovery.c index e0d0afaf15be..e36df4fcfeba 100644 --- a/net/ipv4/tcp_recovery.c +++ b/net/ipv4/tcp_recovery.c @@ -65,8 +65,8 @@ int tcp_rack_mark_lost(struct sock *sk) if (scb->sacked & TCPCB_SACKED_RETRANS) { scb->sacked &= ~TCPCB_SACKED_RETRANS; tp->retrans_out -= tcp_skb_pcount(skb); - __NET_INC_STATS(sock_net(sk), - LINUX_MIB_TCPLOSTRETRANSMIT); + NET_INC_STATS(sock_net(sk), + LINUX_MIB_TCPLOSTRETRANSMIT); } } else if (!(scb->sacked & TCPCB_RETRANS)) { /* Original data are sent sequentially so stop early diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index 35f643d8ffbb..debdd8b33e69 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c @@ -162,8 +162,8 @@ static int tcp_write_timeout(struct sock *sk) if (tp->syn_fastopen || tp->syn_data) tcp_fastopen_cache_set(sk, 0, NULL, true, 0); if (tp->syn_data && icsk->icsk_retransmits == 1) - __NET_INC_STATS(sock_net(sk), - LINUX_MIB_TCPFASTOPENACTIVEFAIL); + NET_INC_STATS(sock_net(sk), + LINUX_MIB_TCPFASTOPENACTIVEFAIL); } retry_until = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_syn_retries; syn_set = true; @@ -178,8 +178,8 @@ static int tcp_write_timeout(struct sock *sk) tp->bytes_acked <= tp->rx_opt.mss_clamp) { tcp_fastopen_cache_set(sk, 0, NULL, true, 0); if (icsk->icsk_retransmits == net->ipv4.sysctl_tcp_retries1) - __NET_INC_STATS(sock_net(sk), - LINUX_MIB_TCPFASTOPENACTIVEFAIL); + NET_INC_STATS(sock_net(sk), + LINUX_MIB_TCPFASTOPENACTIVEFAIL); } /* Black hole detection */ tcp_mtu_probing(icsk, sk); @@ -209,6 +209,7 @@ static int tcp_write_timeout(struct sock *sk) return 0; } +/* Called with BH disabled */ void tcp_delack_timer_handler(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); @@ -493,6 +494,7 @@ out_reset_timer: out:; } +/* Called with BH disabled */ void tcp_write_timer_handler(struct sock *sk) { struct inet_connection_sock *icsk = inet_csk(sk); diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 52914714b923..7bdc9c9c231b 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -649,12 +649,12 @@ static bool tcp_v6_inbound_md5_hash(const struct sock *sk, return false; if (hash_expected && !hash_location) { - __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND); + NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND); return true; } if (!hash_expected && hash_location) { - __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED); + NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED); return true; } @@ -825,9 +825,9 @@ static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 if (!IS_ERR(dst)) { skb_dst_set(buff, dst); ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass); - __TCP_INC_STATS(net, TCP_MIB_OUTSEGS); + TCP_INC_STATS(net, TCP_MIB_OUTSEGS); if (rst) - __TCP_INC_STATS(net, TCP_MIB_OUTRSTS); + TCP_INC_STATS(net, TCP_MIB_OUTRSTS); return; } @@ -1276,8 +1276,8 @@ discard: kfree_skb(skb); return 0; csum_err: - __TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS); - __TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS); + TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS); + TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS); goto discard; From fb3477c0f45aad5dfb2de559949872770e6cd431 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 29 Apr 2016 14:16:48 -0700 Subject: [PATCH 1219/1649] tcp: do not block bh during prequeue processing AFAIK, nothing in current TCP stack absolutely wants BH being disabled once socket is owned by a thread running in process context. As mentioned in my prior patch ("tcp: give prequeue mode some care"), processing a batch of packets might take time, better not block BH at all. Signed-off-by: Eric Dumazet Acked-by: Soheil Hassas Yeganeh Acked-by: Alexei Starovoitov Signed-off-by: David S. Miller --- net/ipv4/tcp.c | 4 ---- net/ipv4/tcp_input.c | 30 ++---------------------------- 2 files changed, 2 insertions(+), 32 deletions(-) diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index b24c6ed4a04f..4787f86ae64c 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -1449,12 +1449,8 @@ static void tcp_prequeue_process(struct sock *sk) NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPPREQUEUED); - /* RX process wants to run with disabled BHs, though it is not - * necessary */ - local_bh_disable(); while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) sk_backlog_rcv(sk, skb); - local_bh_enable(); /* Clear memory counter. */ tp->ucopy.memory = 0; diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index ac85fb42a5a2..6171f92be090 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -4611,14 +4611,12 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) __set_current_state(TASK_RUNNING); - local_bh_enable(); if (!skb_copy_datagram_msg(skb, 0, tp->ucopy.msg, chunk)) { tp->ucopy.len -= chunk; tp->copied_seq += chunk; eaten = (chunk == skb->len); tcp_rcv_space_adjust(sk); } - local_bh_disable(); } if (eaten <= 0) { @@ -5134,7 +5132,6 @@ static int tcp_copy_to_iovec(struct sock *sk, struct sk_buff *skb, int hlen) int chunk = skb->len - hlen; int err; - local_bh_enable(); if (skb_csum_unnecessary(skb)) err = skb_copy_datagram_msg(skb, hlen, tp->ucopy.msg, chunk); else @@ -5146,32 +5143,9 @@ static int tcp_copy_to_iovec(struct sock *sk, struct sk_buff *skb, int hlen) tcp_rcv_space_adjust(sk); } - local_bh_disable(); return err; } -static __sum16 __tcp_checksum_complete_user(struct sock *sk, - struct sk_buff *skb) -{ - __sum16 result; - - if (sock_owned_by_user(sk)) { - local_bh_enable(); - result = __tcp_checksum_complete(skb); - local_bh_disable(); - } else { - result = __tcp_checksum_complete(skb); - } - return result; -} - -static inline bool tcp_checksum_complete_user(struct sock *sk, - struct sk_buff *skb) -{ - return !skb_csum_unnecessary(skb) && - __tcp_checksum_complete_user(sk, skb); -} - /* Does PAWS and seqno based validation of an incoming segment, flags will * play significant role here. */ @@ -5386,7 +5360,7 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb, } } if (!eaten) { - if (tcp_checksum_complete_user(sk, skb)) + if (tcp_checksum_complete(skb)) goto csum_error; if ((int)skb->truesize > sk->sk_forward_alloc) @@ -5430,7 +5404,7 @@ no_ack: } slow_path: - if (len < (th->doff << 2) || tcp_checksum_complete_user(sk, skb)) + if (len < (th->doff << 2) || tcp_checksum_complete(skb)) goto csum_error; if (!th->ack && !th->rst && !th->syn) From 7309f8821fd65e8272ce82e852532b02967812da Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 29 Apr 2016 14:16:49 -0700 Subject: [PATCH 1220/1649] dccp: do not assume DCCP code is non preemptible DCCP uses the generic backlog code, and this will soon be changed to not disable BH when protocol is called back. Signed-off-by: Eric Dumazet Acked-by: Soheil Hassas Yeganeh Signed-off-by: David S. Miller --- net/dccp/input.c | 2 +- net/dccp/ipv4.c | 4 ++-- net/dccp/ipv6.c | 4 ++-- net/dccp/options.c | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/net/dccp/input.c b/net/dccp/input.c index 2437ecc13b82..ba347184bda9 100644 --- a/net/dccp/input.c +++ b/net/dccp/input.c @@ -359,7 +359,7 @@ send_sync: goto discard; } - __DCCP_INC_STATS(DCCP_MIB_INERRS); + DCCP_INC_STATS(DCCP_MIB_INERRS); discard: __kfree_skb(skb); return 0; diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index a8164272e0f4..5c7e413a3ae4 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c @@ -533,8 +533,8 @@ static void dccp_v4_ctl_send_reset(const struct sock *sk, struct sk_buff *rxskb) bh_unlock_sock(ctl_sk); if (net_xmit_eval(err) == 0) { - __DCCP_INC_STATS(DCCP_MIB_OUTSEGS); - __DCCP_INC_STATS(DCCP_MIB_OUTRSTS); + DCCP_INC_STATS(DCCP_MIB_OUTSEGS); + DCCP_INC_STATS(DCCP_MIB_OUTRSTS); } out: dst_release(dst); diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index 0f4eb4ea57a5..d176f4e66369 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c @@ -277,8 +277,8 @@ static void dccp_v6_ctl_send_reset(const struct sock *sk, struct sk_buff *rxskb) if (!IS_ERR(dst)) { skb_dst_set(skb, dst); ip6_xmit(ctl_sk, skb, &fl6, NULL, 0); - __DCCP_INC_STATS(DCCP_MIB_OUTSEGS); - __DCCP_INC_STATS(DCCP_MIB_OUTRSTS); + DCCP_INC_STATS(DCCP_MIB_OUTSEGS); + DCCP_INC_STATS(DCCP_MIB_OUTRSTS); return; } diff --git a/net/dccp/options.c b/net/dccp/options.c index b82b7ee9a1d2..74d29c56c367 100644 --- a/net/dccp/options.c +++ b/net/dccp/options.c @@ -253,7 +253,7 @@ out_nonsensical_length: return 0; out_invalid_option: - __DCCP_INC_STATS(DCCP_MIB_INVALIDOPT); + DCCP_INC_STATS(DCCP_MIB_INVALIDOPT); rc = DCCP_RESET_CODE_OPTION_ERROR; out_featneg_failed: DCCP_WARN("DCCP(%p): Option %d (len=%d) error=%u\n", sk, opt, len, rc); From e61da9e259ef887bd516ab08dfdf1c1261017e8e Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 29 Apr 2016 14:16:50 -0700 Subject: [PATCH 1221/1649] udp: prepare for non BH masking at backlog processing UDP uses the generic socket backlog code, and this will soon be changed to not disable BH when protocol is called back. We need to use appropriate SNMP accessors. Signed-off-by: Eric Dumazet Acked-by: Soheil Hassas Yeganeh Signed-off-by: David S. Miller --- net/ipv4/udp.c | 4 ++-- net/ipv6/udp.c | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 093284c5c03b..f67f52ba4809 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -1514,9 +1514,9 @@ static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) /* Note that an ENOMEM error is charged twice */ if (rc == -ENOMEM) - __UDP_INC_STATS(sock_net(sk), UDP_MIB_RCVBUFERRORS, + UDP_INC_STATS(sock_net(sk), UDP_MIB_RCVBUFERRORS, is_udplite); - __UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite); + UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite); kfree_skb(skb); trace_udp_fail_queue_rcv_skb(rc, sk); return -1; diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 1ba5a74ac18f..f911c63f79e6 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -570,9 +570,9 @@ static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) /* Note that an ENOMEM error is charged twice */ if (rc == -ENOMEM) - __UDP6_INC_STATS(sock_net(sk), + UDP6_INC_STATS(sock_net(sk), UDP_MIB_RCVBUFERRORS, is_udplite); - __UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite); + UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite); kfree_skb(skb); return -1; } From 860fbbc343bf05a71b31555579ff4878194be01b Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 29 Apr 2016 14:16:51 -0700 Subject: [PATCH 1222/1649] sctp: prepare for socket backlog behavior change sctp_inq_push() will soon be called without BH being blocked when generic socket code flushes the socket backlog. It is very possible SCTP can be converted to not rely on BH, but this needs to be done by SCTP experts. Signed-off-by: Eric Dumazet Acked-by: Marcelo Ricardo Leitner Signed-off-by: David S. Miller --- net/sctp/inqueue.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c index b335ffcef0b9..9d87bba0ff1d 100644 --- a/net/sctp/inqueue.c +++ b/net/sctp/inqueue.c @@ -89,10 +89,12 @@ void sctp_inq_push(struct sctp_inq *q, struct sctp_chunk *chunk) * Eventually, we should clean up inqueue to not rely * on the BH related data structures. */ + local_bh_disable(); list_add_tail(&chunk->list, &q->in_chunk_list); if (chunk->asoc) chunk->asoc->stats.ipackets++; q->immediate.func(&q->immediate); + local_bh_enable(); } /* Peek at the next chunk on the inqeue. */ From 5413d1babe8f10de13d72496c12b862eef8ba613 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 29 Apr 2016 14:16:52 -0700 Subject: [PATCH 1223/1649] net: do not block BH while processing socket backlog Socket backlog processing is a major latency source. With current TCP socket sk_rcvbuf limits, I have sampled __release_sock() holding cpu for more than 5 ms, and packets being dropped by the NIC once ring buffer is filled. All users are now ready to be called from process context, we can unblock BH and let interrupts be serviced faster. cond_resched_softirq() could be removed, as it has no more user. Signed-off-by: Eric Dumazet Acked-by: Soheil Hassas Yeganeh Acked-by: Alexei Starovoitov Signed-off-by: David S. Miller --- net/core/sock.c | 22 ++++++++-------------- 1 file changed, 8 insertions(+), 14 deletions(-) diff --git a/net/core/sock.c b/net/core/sock.c index e16a5db853c6..70744dbb6c3f 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -2019,33 +2019,27 @@ static void __release_sock(struct sock *sk) __releases(&sk->sk_lock.slock) __acquires(&sk->sk_lock.slock) { - struct sk_buff *skb = sk->sk_backlog.head; + struct sk_buff *skb, *next; - do { + while ((skb = sk->sk_backlog.head) != NULL) { sk->sk_backlog.head = sk->sk_backlog.tail = NULL; - bh_unlock_sock(sk); + + spin_unlock_bh(&sk->sk_lock.slock); do { - struct sk_buff *next = skb->next; - + next = skb->next; prefetch(next); WARN_ON_ONCE(skb_dst_is_noref(skb)); skb->next = NULL; sk_backlog_rcv(sk, skb); - /* - * We are in process context here with softirqs - * disabled, use cond_resched_softirq() to preempt. - * This is safe to do because we've taken the backlog - * queue private: - */ - cond_resched_softirq(); + cond_resched(); skb = next; } while (skb != NULL); - bh_lock_sock(sk); - } while ((skb = sk->sk_backlog.head) != NULL); + spin_lock_bh(&sk->sk_lock.slock); + } /* * Doing the zeroing here guarantee we can not loop forever From d41a69f1d390fa3f2546498103cdcd78b30676ff Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 29 Apr 2016 14:16:53 -0700 Subject: [PATCH 1224/1649] tcp: make tcp_sendmsg() aware of socket backlog Large sendmsg()/write() hold socket lock for the duration of the call, unless sk->sk_sndbuf limit is hit. This is bad because incoming packets are parked into socket backlog for a long time. Critical decisions like fast retransmit might be delayed. Receivers have to maintain a big out of order queue with additional cpu overhead, and also possible stalls in TX once windows are full. Bidirectional flows are particularly hurt since the backlog can become quite big if the copy from user space triggers IO (page faults) Some applications learnt to use sendmsg() (or sendmmsg()) with small chunks to avoid this issue. Kernel should know better, right ? Add a generic sk_flush_backlog() helper and use it right before a new skb is allocated. Typically we put 64KB of payload per skb (unless MSG_EOR is requested) and checking socket backlog every 64KB gives good results. As a matter of fact, tests with TSO/GSO disabled give very nice results, as we manage to keep a small write queue and smaller perceived rtt. Note that sk_flush_backlog() maintains socket ownership, so is not equivalent to a {release_sock(sk); lock_sock(sk);}, to ensure implicit atomicity rules that sendmsg() was giving to (possibly buggy) applications. In this simple implementation, I chose to not call tcp_release_cb(), but we might consider this later. Signed-off-by: Eric Dumazet Cc: Alexei Starovoitov Cc: Marcelo Ricardo Leitner Acked-by: Soheil Hassas Yeganeh Signed-off-by: David S. Miller --- include/net/sock.h | 11 +++++++++++ net/core/sock.c | 7 +++++++ net/ipv4/tcp.c | 8 ++++++-- 3 files changed, 24 insertions(+), 2 deletions(-) diff --git a/include/net/sock.h b/include/net/sock.h index 3df778ccaa82..1dbb1f9f7c1b 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -926,6 +926,17 @@ void sk_stream_kill_queues(struct sock *sk); void sk_set_memalloc(struct sock *sk); void sk_clear_memalloc(struct sock *sk); +void __sk_flush_backlog(struct sock *sk); + +static inline bool sk_flush_backlog(struct sock *sk) +{ + if (unlikely(READ_ONCE(sk->sk_backlog.tail))) { + __sk_flush_backlog(sk); + return true; + } + return false; +} + int sk_wait_data(struct sock *sk, long *timeo, const struct sk_buff *skb); struct request_sock_ops; diff --git a/net/core/sock.c b/net/core/sock.c index 70744dbb6c3f..f615e9391170 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -2048,6 +2048,13 @@ static void __release_sock(struct sock *sk) sk->sk_backlog.len = 0; } +void __sk_flush_backlog(struct sock *sk) +{ + spin_lock_bh(&sk->sk_lock.slock); + __release_sock(sk); + spin_unlock_bh(&sk->sk_lock.slock); +} + /** * sk_wait_data - wait for data to arrive at sk_receive_queue * @sk: sock to wait on diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 4787f86ae64c..b945c2b046c5 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -1136,11 +1136,12 @@ int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) /* This should be in poll */ sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); - mss_now = tcp_send_mss(sk, &size_goal, flags); - /* Ok commence sending. */ copied = 0; +restart: + mss_now = tcp_send_mss(sk, &size_goal, flags); + err = -EPIPE; if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) goto out_err; @@ -1166,6 +1167,9 @@ new_segment: if (!sk_stream_memory_free(sk)) goto wait_for_sndbuf; + if (sk_flush_backlog(sk)) + goto restart; + skb = sk_stream_alloc_skb(sk, select_size(sk, sg), sk->sk_allocation, From 0d3c703a9d1723c7707e0680019ac8ff5922db42 Mon Sep 17 00:00:00 2001 From: Tom Herbert Date: Fri, 29 Apr 2016 17:12:15 -0700 Subject: [PATCH 1225/1649] ipv6: Cleanup IPv6 tunnel receive path Some basic changes to make IPv6 tunnel receive path look more like IPv4 path: - Make ip6_tnl_rcv non-static so that GREv6 and others can call it - Make ip6_tnl_rcv look like ip_tunnel_rcv - Switch to gro_cells_receive - Make ip6_tnl_rcv non-static and export it Signed-off-by: Tom Herbert Signed-off-by: David S. Miller --- include/net/ip6_tunnel.h | 4 + net/ipv6/ip6_tunnel.c | 222 ++++++++++++++++++++++++++------------- 2 files changed, 151 insertions(+), 75 deletions(-) diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h index 499a707765ea..eab3a9b19ae0 100644 --- a/include/net/ip6_tunnel.h +++ b/include/net/ip6_tunnel.h @@ -42,6 +42,7 @@ struct ip6_tnl { struct __ip6_tnl_parm parms; /* tunnel configuration parameters */ struct flowi fl; /* flowi template for xmit */ struct dst_cache dst_cache; /* cached dst */ + struct gro_cells gro_cells; int err_count; unsigned long err_time; @@ -63,6 +64,9 @@ struct ipv6_tlv_tnl_enc_lim { int ip6_tnl_rcv_ctl(struct ip6_tnl *t, const struct in6_addr *laddr, const struct in6_addr *raddr); +int ip6_tnl_rcv(struct ip6_tnl *tunnel, struct sk_buff *skb, + const struct tnl_ptk_info *tpi, struct metadata_dst *tun_dst, + bool log_ecn_error); int ip6_tnl_xmit_ctl(struct ip6_tnl *t, const struct in6_addr *laddr, const struct in6_addr *raddr); __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw); diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 1f20345cbc97..94ed065eff7f 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -238,6 +238,7 @@ static void ip6_dev_free(struct net_device *dev) { struct ip6_tnl *t = netdev_priv(dev); + gro_cells_destroy(&t->gro_cells); dst_cache_destroy(&t->dst_cache); free_percpu(dev->tstats); free_netdev(dev); @@ -753,97 +754,157 @@ int ip6_tnl_rcv_ctl(struct ip6_tnl *t, } EXPORT_SYMBOL_GPL(ip6_tnl_rcv_ctl); -/** - * ip6_tnl_rcv - decapsulate IPv6 packet and retransmit it locally - * @skb: received socket buffer - * @protocol: ethernet protocol ID - * @dscp_ecn_decapsulate: the function to decapsulate DSCP code and ECN - * - * Return: 0 - **/ +static int __ip6_tnl_rcv(struct ip6_tnl *tunnel, struct sk_buff *skb, + const struct tnl_ptk_info *tpi, + struct metadata_dst *tun_dst, + int (*dscp_ecn_decapsulate)(const struct ip6_tnl *t, + const struct ipv6hdr *ipv6h, + struct sk_buff *skb), + bool log_ecn_err) +{ + struct pcpu_sw_netstats *tstats; + const struct ipv6hdr *ipv6h = ipv6_hdr(skb); + int err; -static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol, - __u8 ipproto, - int (*dscp_ecn_decapsulate)(const struct ip6_tnl *t, - const struct ipv6hdr *ipv6h, - struct sk_buff *skb)) + if ((!(tpi->flags & TUNNEL_CSUM) && + (tunnel->parms.i_flags & TUNNEL_CSUM)) || + ((tpi->flags & TUNNEL_CSUM) && + !(tunnel->parms.i_flags & TUNNEL_CSUM))) { + tunnel->dev->stats.rx_crc_errors++; + tunnel->dev->stats.rx_errors++; + goto drop; + } + + if (tunnel->parms.i_flags & TUNNEL_SEQ) { + if (!(tpi->flags & TUNNEL_SEQ) || + (tunnel->i_seqno && + (s32)(ntohl(tpi->seq) - tunnel->i_seqno) < 0)) { + tunnel->dev->stats.rx_fifo_errors++; + tunnel->dev->stats.rx_errors++; + goto drop; + } + tunnel->i_seqno = ntohl(tpi->seq) + 1; + } + + skb->protocol = tpi->proto; + + /* Warning: All skb pointers will be invalidated! */ + if (tunnel->dev->type == ARPHRD_ETHER) { + if (!pskb_may_pull(skb, ETH_HLEN)) { + tunnel->dev->stats.rx_length_errors++; + tunnel->dev->stats.rx_errors++; + goto drop; + } + + ipv6h = ipv6_hdr(skb); + skb->protocol = eth_type_trans(skb, tunnel->dev); + skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); + } else { + skb->dev = tunnel->dev; + } + + skb_reset_network_header(skb); + memset(skb->cb, 0, sizeof(struct inet6_skb_parm)); + + __skb_tunnel_rx(skb, tunnel->dev, tunnel->net); + + err = dscp_ecn_decapsulate(tunnel, ipv6h, skb); + if (unlikely(err)) { + if (log_ecn_err) + net_info_ratelimited("non-ECT from %pI6 with DS=%#x\n", + &ipv6h->saddr, + ipv6_get_dsfield(ipv6h)); + if (err > 1) { + ++tunnel->dev->stats.rx_frame_errors; + ++tunnel->dev->stats.rx_errors; + goto drop; + } + } + + tstats = this_cpu_ptr(tunnel->dev->tstats); + u64_stats_update_begin(&tstats->syncp); + tstats->rx_packets++; + tstats->rx_bytes += skb->len; + u64_stats_update_end(&tstats->syncp); + + skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(tunnel->dev))); + + gro_cells_receive(&tunnel->gro_cells, skb); + return 0; + +drop: + kfree_skb(skb); + return 0; +} + +int ip6_tnl_rcv(struct ip6_tnl *t, struct sk_buff *skb, + const struct tnl_ptk_info *tpi, + struct metadata_dst *tun_dst, + bool log_ecn_err) +{ + return __ip6_tnl_rcv(t, skb, tpi, NULL, ip6ip6_dscp_ecn_decapsulate, + log_ecn_err); +} +EXPORT_SYMBOL(ip6_tnl_rcv); + +static const struct tnl_ptk_info tpi_v6 = { + /* no tunnel info required for ipxip6. */ + .proto = htons(ETH_P_IPV6), +}; + +static const struct tnl_ptk_info tpi_v4 = { + /* no tunnel info required for ipxip6. */ + .proto = htons(ETH_P_IP), +}; + +static int ipxip6_rcv(struct sk_buff *skb, u8 ipproto, + const struct tnl_ptk_info *tpi, + int (*dscp_ecn_decapsulate)(const struct ip6_tnl *t, + const struct ipv6hdr *ipv6h, + struct sk_buff *skb)) { struct ip6_tnl *t; const struct ipv6hdr *ipv6h = ipv6_hdr(skb); - u8 tproto; - int err; + int ret = -1; rcu_read_lock(); t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->saddr, &ipv6h->daddr); + if (t) { - struct pcpu_sw_netstats *tstats; + u8 tproto = ACCESS_ONCE(t->parms.proto); - tproto = ACCESS_ONCE(t->parms.proto); - if (tproto != ipproto && tproto != 0) { - rcu_read_unlock(); - goto discard; - } - - if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) { - rcu_read_unlock(); - goto discard; - } - - if (!ip6_tnl_rcv_ctl(t, &ipv6h->daddr, &ipv6h->saddr)) { - t->dev->stats.rx_dropped++; - rcu_read_unlock(); - goto discard; - } - skb->mac_header = skb->network_header; - skb_reset_network_header(skb); - skb->protocol = htons(protocol); - memset(skb->cb, 0, sizeof(struct inet6_skb_parm)); - - __skb_tunnel_rx(skb, t->dev, t->net); - - err = dscp_ecn_decapsulate(t, ipv6h, skb); - if (unlikely(err)) { - if (log_ecn_error) - net_info_ratelimited("non-ECT from %pI6 with dsfield=%#x\n", - &ipv6h->saddr, - ipv6_get_dsfield(ipv6h)); - if (err > 1) { - ++t->dev->stats.rx_frame_errors; - ++t->dev->stats.rx_errors; - rcu_read_unlock(); - goto discard; - } - } - - tstats = this_cpu_ptr(t->dev->tstats); - u64_stats_update_begin(&tstats->syncp); - tstats->rx_packets++; - tstats->rx_bytes += skb->len; - u64_stats_update_end(&tstats->syncp); - - netif_rx(skb); - - rcu_read_unlock(); - return 0; + if (tproto != ipproto && tproto != 0) + goto drop; + if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) + goto drop; + if (!ip6_tnl_rcv_ctl(t, &ipv6h->daddr, &ipv6h->saddr)) + goto drop; + if (iptunnel_pull_header(skb, 0, tpi->proto, false)) + goto drop; + ret = __ip6_tnl_rcv(t, skb, tpi, NULL, dscp_ecn_decapsulate, + log_ecn_error); } - rcu_read_unlock(); - return 1; -discard: + rcu_read_unlock(); + + return ret; + +drop: + rcu_read_unlock(); kfree_skb(skb); return 0; } static int ip4ip6_rcv(struct sk_buff *skb) { - return ip6_tnl_rcv(skb, ETH_P_IP, IPPROTO_IPIP, - ip4ip6_dscp_ecn_decapsulate); + return ipxip6_rcv(skb, IPPROTO_IP, &tpi_v4, + ip4ip6_dscp_ecn_decapsulate); } static int ip6ip6_rcv(struct sk_buff *skb) { - return ip6_tnl_rcv(skb, ETH_P_IPV6, IPPROTO_IPV6, - ip6ip6_dscp_ecn_decapsulate); + return ipxip6_rcv(skb, IPPROTO_IPV6, &tpi_v6, + ip6ip6_dscp_ecn_decapsulate); } struct ipv6_tel_txoption { @@ -1370,6 +1431,8 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) struct net *net = t->net; struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); + memset(&p1, 0, sizeof(p1)); + switch (cmd) { case SIOCGETTUNNEL: if (dev == ip6n->fb_tnl_dev) { @@ -1549,13 +1612,22 @@ ip6_tnl_dev_init_gen(struct net_device *dev) return -ENOMEM; ret = dst_cache_init(&t->dst_cache, GFP_KERNEL); - if (ret) { - free_percpu(dev->tstats); - dev->tstats = NULL; - return ret; - } + if (ret) + goto free_stats; + + ret = gro_cells_init(&t->gro_cells, dev); + if (ret) + goto destroy_dst; return 0; + +destroy_dst: + dst_cache_destroy(&t->dst_cache); +free_stats: + free_percpu(dev->tstats); + dev->tstats = NULL; + + return ret; } /** From 95f5c64c3c13a609e137d35c4b452519e0b954df Mon Sep 17 00:00:00 2001 From: Tom Herbert Date: Fri, 29 Apr 2016 17:12:16 -0700 Subject: [PATCH 1226/1649] gre: Move utility functions to common headers Several of the GRE functions defined in net/ipv4/ip_gre.c are usable for IPv6 GRE implementation (that is they are protocol agnostic). These include: - GRE flag handling functions are move to gre.h - GRE build_header is moved to gre.h and renamed gre_build_header - parse_gre_header is moved to gre_demux.c and renamed gre_parse_header - iptunnel_pull_header is taken out of gre_parse_header. This is now done by caller. The header length is returned from gre_parse_header in an int* argument. Signed-off-by: Tom Herbert Signed-off-by: David S. Miller --- include/net/gre.h | 60 +++++++++++++++++ net/ipv4/gre_demux.c | 64 +++++++++++++++++++ net/ipv4/ip_gre.c | 149 ++++++------------------------------------- 3 files changed, 144 insertions(+), 129 deletions(-) diff --git a/include/net/gre.h b/include/net/gre.h index 97eafdc47eea..39591584ec92 100644 --- a/include/net/gre.h +++ b/include/net/gre.h @@ -25,4 +25,64 @@ int gre_del_protocol(const struct gre_protocol *proto, u8 version); struct net_device *gretap_fb_dev_create(struct net *net, const char *name, u8 name_assign_type); +int gre_parse_header(struct sk_buff *skb, struct tnl_ptk_info *tpi, + bool *csum_err, int *hdr_len); + +static inline int gre_calc_hlen(__be16 o_flags) +{ + int addend = 4; + + if (o_flags & TUNNEL_CSUM) + addend += 4; + if (o_flags & TUNNEL_KEY) + addend += 4; + if (o_flags & TUNNEL_SEQ) + addend += 4; + return addend; +} + +static inline __be16 gre_flags_to_tnl_flags(__be16 flags) +{ + __be16 tflags = 0; + + if (flags & GRE_CSUM) + tflags |= TUNNEL_CSUM; + if (flags & GRE_ROUTING) + tflags |= TUNNEL_ROUTING; + if (flags & GRE_KEY) + tflags |= TUNNEL_KEY; + if (flags & GRE_SEQ) + tflags |= TUNNEL_SEQ; + if (flags & GRE_STRICT) + tflags |= TUNNEL_STRICT; + if (flags & GRE_REC) + tflags |= TUNNEL_REC; + if (flags & GRE_VERSION) + tflags |= TUNNEL_VERSION; + + return tflags; +} + +static inline __be16 gre_tnl_flags_to_gre_flags(__be16 tflags) +{ + __be16 flags = 0; + + if (tflags & TUNNEL_CSUM) + flags |= GRE_CSUM; + if (tflags & TUNNEL_ROUTING) + flags |= GRE_ROUTING; + if (tflags & TUNNEL_KEY) + flags |= GRE_KEY; + if (tflags & TUNNEL_SEQ) + flags |= GRE_SEQ; + if (tflags & TUNNEL_STRICT) + flags |= GRE_STRICT; + if (tflags & TUNNEL_REC) + flags |= GRE_REC; + if (tflags & TUNNEL_VERSION) + flags |= GRE_VERSION; + + return flags; +} + #endif diff --git a/net/ipv4/gre_demux.c b/net/ipv4/gre_demux.c index d9c552a721fc..371674801e84 100644 --- a/net/ipv4/gre_demux.c +++ b/net/ipv4/gre_demux.c @@ -60,6 +60,70 @@ int gre_del_protocol(const struct gre_protocol *proto, u8 version) } EXPORT_SYMBOL_GPL(gre_del_protocol); +int gre_parse_header(struct sk_buff *skb, struct tnl_ptk_info *tpi, + bool *csum_err, int *ret_hdr_len) +{ + const struct gre_base_hdr *greh; + __be32 *options; + int hdr_len; + + if (unlikely(!pskb_may_pull(skb, sizeof(struct gre_base_hdr)))) + return -EINVAL; + + greh = (struct gre_base_hdr *)skb_transport_header(skb); + if (unlikely(greh->flags & (GRE_VERSION | GRE_ROUTING))) + return -EINVAL; + + tpi->flags = gre_flags_to_tnl_flags(greh->flags); + hdr_len = gre_calc_hlen(tpi->flags); + + if (!pskb_may_pull(skb, hdr_len)) + return -EINVAL; + + greh = (struct gre_base_hdr *)skb_transport_header(skb); + tpi->proto = greh->protocol; + + options = (__be32 *)(greh + 1); + if (greh->flags & GRE_CSUM) { + if (skb_checksum_simple_validate(skb)) { + *csum_err = true; + return -EINVAL; + } + + skb_checksum_try_convert(skb, IPPROTO_GRE, 0, + null_compute_pseudo); + options++; + } + + if (greh->flags & GRE_KEY) { + tpi->key = *options; + options++; + } else { + tpi->key = 0; + } + if (unlikely(greh->flags & GRE_SEQ)) { + tpi->seq = *options; + options++; + } else { + tpi->seq = 0; + } + /* WCCP version 1 and 2 protocol decoding. + * - Change protocol to IP + * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header + */ + if (greh->flags == 0 && tpi->proto == htons(ETH_P_WCCP)) { + tpi->proto = htons(ETH_P_IP); + if ((*(u8 *)options & 0xF0) != 0x40) { + hdr_len += 4; + if (!pskb_may_pull(skb, hdr_len)) + return -EINVAL; + } + } + *ret_hdr_len = hdr_len; + return 0; +} +EXPORT_SYMBOL(gre_parse_header); + static int gre_rcv(struct sk_buff *skb) { const struct gre_protocol *proto; diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index eedd829a2f87..f6db3d6a4d4d 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -122,125 +122,6 @@ static int ipgre_tunnel_init(struct net_device *dev); static int ipgre_net_id __read_mostly; static int gre_tap_net_id __read_mostly; -static int ip_gre_calc_hlen(__be16 o_flags) -{ - int addend = 4; - - if (o_flags & TUNNEL_CSUM) - addend += 4; - if (o_flags & TUNNEL_KEY) - addend += 4; - if (o_flags & TUNNEL_SEQ) - addend += 4; - return addend; -} - -static __be16 gre_flags_to_tnl_flags(__be16 flags) -{ - __be16 tflags = 0; - - if (flags & GRE_CSUM) - tflags |= TUNNEL_CSUM; - if (flags & GRE_ROUTING) - tflags |= TUNNEL_ROUTING; - if (flags & GRE_KEY) - tflags |= TUNNEL_KEY; - if (flags & GRE_SEQ) - tflags |= TUNNEL_SEQ; - if (flags & GRE_STRICT) - tflags |= TUNNEL_STRICT; - if (flags & GRE_REC) - tflags |= TUNNEL_REC; - if (flags & GRE_VERSION) - tflags |= TUNNEL_VERSION; - - return tflags; -} - -static __be16 tnl_flags_to_gre_flags(__be16 tflags) -{ - __be16 flags = 0; - - if (tflags & TUNNEL_CSUM) - flags |= GRE_CSUM; - if (tflags & TUNNEL_ROUTING) - flags |= GRE_ROUTING; - if (tflags & TUNNEL_KEY) - flags |= GRE_KEY; - if (tflags & TUNNEL_SEQ) - flags |= GRE_SEQ; - if (tflags & TUNNEL_STRICT) - flags |= GRE_STRICT; - if (tflags & TUNNEL_REC) - flags |= GRE_REC; - if (tflags & TUNNEL_VERSION) - flags |= GRE_VERSION; - - return flags; -} - -static int parse_gre_header(struct sk_buff *skb, struct tnl_ptk_info *tpi, - bool *csum_err) -{ - const struct gre_base_hdr *greh; - __be32 *options; - int hdr_len; - - if (unlikely(!pskb_may_pull(skb, sizeof(struct gre_base_hdr)))) - return -EINVAL; - - greh = (struct gre_base_hdr *)skb_transport_header(skb); - if (unlikely(greh->flags & (GRE_VERSION | GRE_ROUTING))) - return -EINVAL; - - tpi->flags = gre_flags_to_tnl_flags(greh->flags); - hdr_len = ip_gre_calc_hlen(tpi->flags); - - if (!pskb_may_pull(skb, hdr_len)) - return -EINVAL; - - greh = (struct gre_base_hdr *)skb_transport_header(skb); - tpi->proto = greh->protocol; - - options = (__be32 *)(greh + 1); - if (greh->flags & GRE_CSUM) { - if (skb_checksum_simple_validate(skb)) { - *csum_err = true; - return -EINVAL; - } - - skb_checksum_try_convert(skb, IPPROTO_GRE, 0, - null_compute_pseudo); - options++; - } - - if (greh->flags & GRE_KEY) { - tpi->key = *options; - options++; - } else { - tpi->key = 0; - } - if (unlikely(greh->flags & GRE_SEQ)) { - tpi->seq = *options; - options++; - } else { - tpi->seq = 0; - } - /* WCCP version 1 and 2 protocol decoding. - * - Change protocol to IP - * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header - */ - if (greh->flags == 0 && tpi->proto == htons(ETH_P_WCCP)) { - tpi->proto = htons(ETH_P_IP); - if ((*(u8 *)options & 0xF0) != 0x40) { - hdr_len += 4; - if (!pskb_may_pull(skb, hdr_len)) - return -EINVAL; - } - } - return iptunnel_pull_header(skb, hdr_len, tpi->proto, false); -} - static void ipgre_err(struct sk_buff *skb, u32 info, const struct tnl_ptk_info *tpi) { @@ -340,12 +221,16 @@ static void gre_err(struct sk_buff *skb, u32 info) const int code = icmp_hdr(skb)->code; struct tnl_ptk_info tpi; bool csum_err = false; + int hdr_len; - if (parse_gre_header(skb, &tpi, &csum_err)) { + if (gre_parse_header(skb, &tpi, &csum_err, &hdr_len)) { if (!csum_err) /* ignore csum errors. */ return; } + if (iptunnel_pull_header(skb, hdr_len, tpi.proto, false)) + return; + if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) { ipv4_update_pmtu(skb, dev_net(skb->dev), info, skb->dev->ifindex, 0, IPPROTO_GRE, 0); @@ -419,6 +304,7 @@ static int gre_rcv(struct sk_buff *skb) { struct tnl_ptk_info tpi; bool csum_err = false; + int hdr_len; #ifdef CONFIG_NET_IPGRE_BROADCAST if (ipv4_is_multicast(ip_hdr(skb)->daddr)) { @@ -428,7 +314,10 @@ static int gre_rcv(struct sk_buff *skb) } #endif - if (parse_gre_header(skb, &tpi, &csum_err) < 0) + if (gre_parse_header(skb, &tpi, &csum_err, &hdr_len) < 0) + goto drop; + + if (iptunnel_pull_header(skb, hdr_len, tpi.proto, false)) goto drop; if (ipgre_rcv(skb, &tpi) == PACKET_RCVD) @@ -460,7 +349,7 @@ static void build_header(struct sk_buff *skb, int hdr_len, __be16 flags, skb_reset_transport_header(skb); greh = (struct gre_base_hdr *)skb->data; - greh->flags = tnl_flags_to_gre_flags(flags); + greh->flags = gre_tnl_flags_to_gre_flags(flags); greh->protocol = proto; if (flags & (TUNNEL_KEY | TUNNEL_CSUM | TUNNEL_SEQ)) { @@ -552,7 +441,7 @@ static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev) fl.saddr); } - tunnel_hlen = ip_gre_calc_hlen(key->tun_flags); + tunnel_hlen = gre_calc_hlen(key->tun_flags); min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len + tunnel_hlen + sizeof(struct iphdr); @@ -694,8 +583,8 @@ static int ipgre_tunnel_ioctl(struct net_device *dev, if (err) return err; - p.i_flags = tnl_flags_to_gre_flags(p.i_flags); - p.o_flags = tnl_flags_to_gre_flags(p.o_flags); + p.i_flags = gre_tnl_flags_to_gre_flags(p.i_flags); + p.o_flags = gre_tnl_flags_to_gre_flags(p.o_flags); if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) return -EFAULT; @@ -739,7 +628,7 @@ static int ipgre_header(struct sk_buff *skb, struct net_device *dev, iph = (struct iphdr *)skb_push(skb, t->hlen + sizeof(*iph)); greh = (struct gre_base_hdr *)(iph+1); - greh->flags = tnl_flags_to_gre_flags(t->parms.o_flags); + greh->flags = gre_tnl_flags_to_gre_flags(t->parms.o_flags); greh->protocol = htons(type); memcpy(iph, &t->parms.iph, sizeof(struct iphdr)); @@ -840,7 +729,7 @@ static void __gre_tunnel_init(struct net_device *dev) int t_hlen; tunnel = netdev_priv(dev); - tunnel->tun_hlen = ip_gre_calc_hlen(tunnel->parms.o_flags); + tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags); tunnel->parms.iph.protocol = IPPROTO_GRE; tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen; @@ -1155,8 +1044,10 @@ static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev) struct ip_tunnel_parm *p = &t->parms; if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) || - nla_put_be16(skb, IFLA_GRE_IFLAGS, tnl_flags_to_gre_flags(p->i_flags)) || - nla_put_be16(skb, IFLA_GRE_OFLAGS, tnl_flags_to_gre_flags(p->o_flags)) || + nla_put_be16(skb, IFLA_GRE_IFLAGS, + gre_tnl_flags_to_gre_flags(p->i_flags)) || + nla_put_be16(skb, IFLA_GRE_OFLAGS, + gre_tnl_flags_to_gre_flags(p->o_flags)) || nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) || nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) || nla_put_in_addr(skb, IFLA_GRE_LOCAL, p->iph.saddr) || From 308edfdf1563f78e93ebda9aee608279de1c5898 Mon Sep 17 00:00:00 2001 From: Tom Herbert Date: Fri, 29 Apr 2016 17:12:17 -0700 Subject: [PATCH 1227/1649] gre6: Cleanup GREv6 receive path, call common GRE functions - Create gre_rcv function. This calls gre_parse_header and ip6gre_rcv. - Call ip6_tnl_rcv. Doing this and using gre_parse_header eliminates most of the code in ip6gre_rcv. Signed-off-by: Tom Herbert Signed-off-by: David S. Miller --- net/ipv6/ip6_gre.c | 150 +++++++++------------------------------------ 1 file changed, 28 insertions(+), 122 deletions(-) diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index ca5a2c5675c5..9b33745761ca 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c @@ -54,6 +54,7 @@ #include #include #include +#include static bool log_ecn_error = true; @@ -443,137 +444,40 @@ static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt, t->err_time = jiffies; } -static int ip6gre_rcv(struct sk_buff *skb) +static int ip6gre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi) { const struct ipv6hdr *ipv6h; - u8 *h; - __be16 flags; - __sum16 csum = 0; - __be32 key = 0; - u32 seqno = 0; struct ip6_tnl *tunnel; - int offset = 4; - __be16 gre_proto; - int err; - - if (!pskb_may_pull(skb, sizeof(struct in6_addr))) - goto drop; ipv6h = ipv6_hdr(skb); - h = skb->data; - flags = *(__be16 *)h; - - if (flags&(GRE_CSUM|GRE_KEY|GRE_ROUTING|GRE_SEQ|GRE_VERSION)) { - /* - Version must be 0. - - We do not support routing headers. - */ - if (flags&(GRE_VERSION|GRE_ROUTING)) - goto drop; - - if (flags&GRE_CSUM) { - csum = skb_checksum_simple_validate(skb); - offset += 4; - } - if (flags&GRE_KEY) { - key = *(__be32 *)(h + offset); - offset += 4; - } - if (flags&GRE_SEQ) { - seqno = ntohl(*(__be32 *)(h + offset)); - offset += 4; - } - } - - gre_proto = *(__be16 *)(h + 2); - tunnel = ip6gre_tunnel_lookup(skb->dev, - &ipv6h->saddr, &ipv6h->daddr, key, - gre_proto); + &ipv6h->saddr, &ipv6h->daddr, tpi->key, + tpi->proto); if (tunnel) { - struct pcpu_sw_netstats *tstats; + ip6_tnl_rcv(tunnel, skb, tpi, NULL, false); - if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) - goto drop; - - if (!ip6_tnl_rcv_ctl(tunnel, &ipv6h->daddr, &ipv6h->saddr)) { - tunnel->dev->stats.rx_dropped++; - goto drop; - } - - skb->protocol = gre_proto; - /* WCCP version 1 and 2 protocol decoding. - * - Change protocol to IPv6 - * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header - */ - if (flags == 0 && gre_proto == htons(ETH_P_WCCP)) { - skb->protocol = htons(ETH_P_IPV6); - if ((*(h + offset) & 0xF0) != 0x40) - offset += 4; - } - - skb->mac_header = skb->network_header; - __pskb_pull(skb, offset); - skb_postpull_rcsum(skb, skb_transport_header(skb), offset); - - if (((flags&GRE_CSUM) && csum) || - (!(flags&GRE_CSUM) && tunnel->parms.i_flags&GRE_CSUM)) { - tunnel->dev->stats.rx_crc_errors++; - tunnel->dev->stats.rx_errors++; - goto drop; - } - if (tunnel->parms.i_flags&GRE_SEQ) { - if (!(flags&GRE_SEQ) || - (tunnel->i_seqno && - (s32)(seqno - tunnel->i_seqno) < 0)) { - tunnel->dev->stats.rx_fifo_errors++; - tunnel->dev->stats.rx_errors++; - goto drop; - } - tunnel->i_seqno = seqno + 1; - } - - /* Warning: All skb pointers will be invalidated! */ - if (tunnel->dev->type == ARPHRD_ETHER) { - if (!pskb_may_pull(skb, ETH_HLEN)) { - tunnel->dev->stats.rx_length_errors++; - tunnel->dev->stats.rx_errors++; - goto drop; - } - - ipv6h = ipv6_hdr(skb); - skb->protocol = eth_type_trans(skb, tunnel->dev); - skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); - } - - __skb_tunnel_rx(skb, tunnel->dev, tunnel->net); - - skb_reset_network_header(skb); - - err = IP6_ECN_decapsulate(ipv6h, skb); - if (unlikely(err)) { - if (log_ecn_error) - net_info_ratelimited("non-ECT from %pI6 with dsfield=%#x\n", - &ipv6h->saddr, - ipv6_get_dsfield(ipv6h)); - if (err > 1) { - ++tunnel->dev->stats.rx_frame_errors; - ++tunnel->dev->stats.rx_errors; - goto drop; - } - } - - tstats = this_cpu_ptr(tunnel->dev->tstats); - u64_stats_update_begin(&tstats->syncp); - tstats->rx_packets++; - tstats->rx_bytes += skb->len; - u64_stats_update_end(&tstats->syncp); - - netif_rx(skb); - - return 0; + return PACKET_RCVD; } - icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0); + return PACKET_REJECT; +} + +static int gre_rcv(struct sk_buff *skb) +{ + struct tnl_ptk_info tpi; + bool csum_err = false; + int hdr_len; + + if (gre_parse_header(skb, &tpi, &csum_err, &hdr_len) < 0) + goto drop; + + if (iptunnel_pull_header(skb, hdr_len, tpi.proto, false)) + goto drop; + + if (ip6gre_rcv(skb, &tpi) == PACKET_RCVD) + return 0; + + icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0); drop: kfree_skb(skb); return 0; @@ -1075,6 +979,8 @@ static int ip6gre_tunnel_ioctl(struct net_device *dev, struct net *net = t->net; struct ip6gre_net *ign = net_generic(net, ip6gre_net_id); + memset(&p1, 0, sizeof(p1)); + switch (cmd) { case SIOCGETTUNNEL: if (dev == ign->fb_tunnel_dev) { @@ -1318,7 +1224,7 @@ static void ip6gre_fb_tunnel_init(struct net_device *dev) static struct inet6_protocol ip6gre_protocol __read_mostly = { - .handler = ip6gre_rcv, + .handler = gre_rcv, .err_handler = ip6gre_err, .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, }; From 8eb30be0352d09165e94a41fef1c7b994dca0714 Mon Sep 17 00:00:00 2001 From: Tom Herbert Date: Fri, 29 Apr 2016 17:12:18 -0700 Subject: [PATCH 1228/1649] ipv6: Create ip6_tnl_xmit This patch renames ip6_tnl_xmit2 to ip6_tnl_xmit and exports it. Other users like GRE will be able to call this. The original ip6_tnl_xmit function is renamed to ip6_tnl_start_xmit (this is an ndo_start_xmit function). Signed-off-by: Tom Herbert Signed-off-by: David S. Miller --- include/net/ip6_tunnel.h | 2 ++ net/ipv6/ip6_tunnel.c | 47 +++++++++++++++++++++++++--------------- 2 files changed, 32 insertions(+), 17 deletions(-) diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h index eab3a9b19ae0..835491bd5636 100644 --- a/include/net/ip6_tunnel.h +++ b/include/net/ip6_tunnel.h @@ -69,6 +69,8 @@ int ip6_tnl_rcv(struct ip6_tnl *tunnel, struct sk_buff *skb, bool log_ecn_error); int ip6_tnl_xmit_ctl(struct ip6_tnl *t, const struct in6_addr *laddr, const struct in6_addr *raddr); +int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield, + struct flowi6 *fl6, int encap_limit, __u32 *pmtu, __u8 proto); __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw); __u32 ip6_tnl_get_cap(struct ip6_tnl *t, const struct in6_addr *laddr, const struct in6_addr *raddr); diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 94ed065eff7f..b1f31d2b17cd 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -979,13 +979,14 @@ int ip6_tnl_xmit_ctl(struct ip6_tnl *t, EXPORT_SYMBOL_GPL(ip6_tnl_xmit_ctl); /** - * ip6_tnl_xmit2 - encapsulate packet and send + * ip6_tnl_xmit - encapsulate packet and send * @skb: the outgoing socket buffer * @dev: the outgoing tunnel device * @dsfield: dscp code for outer header - * @fl: flow of tunneled packet + * @fl6: flow of tunneled packet * @encap_limit: encapsulation limit * @pmtu: Path MTU is stored if packet is too big + * @proto: next header value * * Description: * Build new header and do some sanity checks on the packet before sending @@ -997,12 +998,9 @@ EXPORT_SYMBOL_GPL(ip6_tnl_xmit_ctl); * %-EMSGSIZE message too big. return mtu in this case. **/ -static int ip6_tnl_xmit2(struct sk_buff *skb, - struct net_device *dev, - __u8 dsfield, - struct flowi6 *fl6, - int encap_limit, - __u32 *pmtu) +int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield, + struct flowi6 *fl6, int encap_limit, __u32 *pmtu, + __u8 proto) { struct ip6_tnl *t = netdev_priv(dev); struct net *net = t->net; @@ -1013,7 +1011,6 @@ static int ip6_tnl_xmit2(struct sk_buff *skb, struct net_device *tdev; int mtu; unsigned int max_headroom = sizeof(struct ipv6hdr); - u8 proto; int err = -1; /* NBMA tunnel */ @@ -1075,12 +1072,23 @@ static int ip6_tnl_xmit2(struct sk_buff *skb, mtu = IPV6_MIN_MTU; if (skb_dst(skb)) skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu); - if (skb->len > mtu) { + if (skb->len > mtu && !skb_is_gso(skb)) { *pmtu = mtu; err = -EMSGSIZE; goto tx_err_dst_release; } + if (t->err_count > 0) { + if (time_before(jiffies, + t->err_time + IP6TUNNEL_ERR_TIMEO)) { + t->err_count--; + + dst_link_failure(skb); + } else { + t->err_count = 0; + } + } + skb_scrub_packet(skb, !net_eq(t->net, dev_net(dev))); /* @@ -1108,7 +1116,6 @@ static int ip6_tnl_xmit2(struct sk_buff *skb, skb->transport_header = skb->network_header; - proto = fl6->flowi6_proto; if (encap_limit >= 0) { init_tel_txopt(&opt, encap_limit); ipv6_push_nfrag_opts(skb, &opt.ops, &proto, NULL); @@ -1119,6 +1126,11 @@ static int ip6_tnl_xmit2(struct sk_buff *skb, skb->encapsulation = 1; } + max_headroom = LL_RESERVED_SPACE(dst->dev) + sizeof(struct ipv6hdr) + + dst->header_len; + if (max_headroom > dev->needed_headroom) + dev->needed_headroom = max_headroom; + skb_push(skb, sizeof(struct ipv6hdr)); skb_reset_network_header(skb); ipv6h = ipv6_hdr(skb); @@ -1137,6 +1149,7 @@ tx_err_dst_release: dst_release(dst); return err; } +EXPORT_SYMBOL(ip6_tnl_xmit); static inline int ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) @@ -1160,7 +1173,6 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) encap_limit = t->parms.encap_limit; memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6)); - fl6.flowi6_proto = IPPROTO_IPIP; dsfield = ipv4_get_dsfield(iph); @@ -1170,7 +1182,8 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK) fl6.flowi6_mark = skb->mark; - err = ip6_tnl_xmit2(skb, dev, dsfield, &fl6, encap_limit, &mtu); + err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu, + IPPROTO_IPIP); if (err != 0) { /* XXX: send ICMP error even if DF is not set. */ if (err == -EMSGSIZE) @@ -1214,7 +1227,6 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) encap_limit = t->parms.encap_limit; memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6)); - fl6.flowi6_proto = IPPROTO_IPV6; dsfield = ipv6_get_dsfield(ipv6h); if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS) @@ -1224,7 +1236,8 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK) fl6.flowi6_mark = skb->mark; - err = ip6_tnl_xmit2(skb, dev, dsfield, &fl6, encap_limit, &mtu); + err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu, + IPPROTO_IPV6); if (err != 0) { if (err == -EMSGSIZE) icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); @@ -1235,7 +1248,7 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) } static netdev_tx_t -ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) +ip6_tnl_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct ip6_tnl *t = netdev_priv(dev); struct net_device_stats *stats = &t->dev->stats; @@ -1556,7 +1569,7 @@ EXPORT_SYMBOL(ip6_tnl_get_iflink); static const struct net_device_ops ip6_tnl_netdev_ops = { .ndo_init = ip6_tnl_dev_init, .ndo_uninit = ip6_tnl_dev_uninit, - .ndo_start_xmit = ip6_tnl_xmit, + .ndo_start_xmit = ip6_tnl_start_xmit, .ndo_do_ioctl = ip6_tnl_ioctl, .ndo_change_mtu = ip6_tnl_change_mtu, .ndo_get_stats = ip6_get_stats, From 182a352d2d5e0b435f7856c0cc23d467dcec55ef Mon Sep 17 00:00:00 2001 From: Tom Herbert Date: Fri, 29 Apr 2016 17:12:19 -0700 Subject: [PATCH 1229/1649] gre: Create common functions for transmit Create common functions for both IPv4 and IPv6 GRE in transmit. These are put into gre.h. Common functions are for: - GRE checksum calculation. Move gre_checksum to gre.h. - Building a GRE header. Move GRE build_header and rename gre_build_header. Signed-off-by: Tom Herbert Signed-off-by: David S. Miller --- include/net/gre.h | 44 +++++++++++++++++++++++++++++++++++++++ net/ipv4/ip_gre.c | 52 +++++------------------------------------------ 2 files changed, 49 insertions(+), 47 deletions(-) diff --git a/include/net/gre.h b/include/net/gre.h index 39591584ec92..29e37322c06e 100644 --- a/include/net/gre.h +++ b/include/net/gre.h @@ -85,4 +85,48 @@ static inline __be16 gre_tnl_flags_to_gre_flags(__be16 tflags) return flags; } +static inline __sum16 gre_checksum(struct sk_buff *skb) +{ + __wsum csum; + + if (skb->ip_summed == CHECKSUM_PARTIAL) + csum = lco_csum(skb); + else + csum = skb_checksum(skb, 0, skb->len, 0); + return csum_fold(csum); +} + +static inline void gre_build_header(struct sk_buff *skb, int hdr_len, + __be16 flags, __be16 proto, + __be32 key, __be32 seq) +{ + struct gre_base_hdr *greh; + + skb_push(skb, hdr_len); + + skb_reset_transport_header(skb); + greh = (struct gre_base_hdr *)skb->data; + greh->flags = gre_tnl_flags_to_gre_flags(flags); + greh->protocol = proto; + + if (flags & (TUNNEL_KEY | TUNNEL_CSUM | TUNNEL_SEQ)) { + __be32 *ptr = (__be32 *)(((u8 *)greh) + hdr_len - 4); + + if (flags & TUNNEL_SEQ) { + *ptr = seq; + ptr--; + } + if (flags & TUNNEL_KEY) { + *ptr = key; + ptr--; + } + if (flags & TUNNEL_CSUM && + !(skb_shinfo(skb)->gso_type & + (SKB_GSO_GRE | SKB_GSO_GRE_CSUM))) { + *ptr = 0; + *(__sum16 *)ptr = gre_checksum(skb); + } + } +} + #endif diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index f6db3d6a4d4d..2480d79b0e37 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -329,49 +329,6 @@ drop: return 0; } -static __sum16 gre_checksum(struct sk_buff *skb) -{ - __wsum csum; - - if (skb->ip_summed == CHECKSUM_PARTIAL) - csum = lco_csum(skb); - else - csum = skb_checksum(skb, 0, skb->len, 0); - return csum_fold(csum); -} - -static void build_header(struct sk_buff *skb, int hdr_len, __be16 flags, - __be16 proto, __be32 key, __be32 seq) -{ - struct gre_base_hdr *greh; - - skb_push(skb, hdr_len); - - skb_reset_transport_header(skb); - greh = (struct gre_base_hdr *)skb->data; - greh->flags = gre_tnl_flags_to_gre_flags(flags); - greh->protocol = proto; - - if (flags & (TUNNEL_KEY | TUNNEL_CSUM | TUNNEL_SEQ)) { - __be32 *ptr = (__be32 *)(((u8 *)greh) + hdr_len - 4); - - if (flags & TUNNEL_SEQ) { - *ptr = seq; - ptr--; - } - if (flags & TUNNEL_KEY) { - *ptr = key; - ptr--; - } - if (flags & TUNNEL_CSUM && - !(skb_shinfo(skb)->gso_type & - (SKB_GSO_GRE | SKB_GSO_GRE_CSUM))) { - *ptr = 0; - *(__sum16 *)ptr = gre_checksum(skb); - } - } -} - static void __gre_xmit(struct sk_buff *skb, struct net_device *dev, const struct iphdr *tnl_params, __be16 proto) @@ -382,8 +339,9 @@ static void __gre_xmit(struct sk_buff *skb, struct net_device *dev, tunnel->o_seqno++; /* Push GRE header. */ - build_header(skb, tunnel->tun_hlen, tunnel->parms.o_flags, - proto, tunnel->parms.o_key, htonl(tunnel->o_seqno)); + gre_build_header(skb, tunnel->tun_hlen, + tunnel->parms.o_flags, proto, tunnel->parms.o_key, + htonl(tunnel->o_seqno)); skb_set_inner_protocol(skb, proto); ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol); @@ -460,8 +418,8 @@ static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev) goto err_free_rt; flags = tun_info->key.tun_flags & (TUNNEL_CSUM | TUNNEL_KEY); - build_header(skb, tunnel_hlen, flags, htons(ETH_P_TEB), - tunnel_id_to_key(tun_info->key.tun_id), 0); + gre_build_header(skb, tunnel_hlen, flags, htons(ETH_P_TEB), + tunnel_id_to_key(tun_info->key.tun_id), 0); df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0; From 79ecb90e65f33d1941ac1f8e43eec34ec3bdbad8 Mon Sep 17 00:00:00 2001 From: Tom Herbert Date: Fri, 29 Apr 2016 17:12:20 -0700 Subject: [PATCH 1230/1649] ipv6: Generic tunnel cleanup A few generic changes to generalize tunnels in IPv6: - Export ip6_tnl_change_mtu so that it can be called by ip6_gre - Add tun_hlen to ip6_tnl structure. Signed-off-by: Tom Herbert Signed-off-by: David S. Miller --- include/net/ip6_tunnel.h | 5 ++++- net/ipv6/ip6_tunnel.c | 7 +++++-- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h index 835491bd5636..fb9e0153f4f2 100644 --- a/include/net/ip6_tunnel.h +++ b/include/net/ip6_tunnel.h @@ -50,8 +50,10 @@ struct ip6_tnl { /* These fields used only by GRE */ __u32 i_seqno; /* The last seen seqno */ __u32 o_seqno; /* The last output seqno */ - int hlen; /* Precalculated GRE header length */ + int hlen; /* tun_hlen + encap_hlen */ + int tun_hlen; /* Precalculated header length */ int mlink; + }; /* Tunnel encapsulation limit destination sub-option */ @@ -76,6 +78,7 @@ __u32 ip6_tnl_get_cap(struct ip6_tnl *t, const struct in6_addr *laddr, const struct in6_addr *raddr); struct net *ip6_tnl_get_link_net(const struct net_device *dev); int ip6_tnl_get_iflink(const struct net_device *dev); +int ip6_tnl_change_mtu(struct net_device *dev, int new_mtu); #ifdef CONFIG_INET static inline void ip6tunnel_xmit(struct sock *sk, struct sk_buff *skb, diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index b1f31d2b17cd..ade55af6ace6 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -1540,8 +1540,7 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) * %-EINVAL if mtu too small **/ -static int -ip6_tnl_change_mtu(struct net_device *dev, int new_mtu) +int ip6_tnl_change_mtu(struct net_device *dev, int new_mtu) { struct ip6_tnl *tnl = netdev_priv(dev); @@ -1557,6 +1556,7 @@ ip6_tnl_change_mtu(struct net_device *dev, int new_mtu) dev->mtu = new_mtu; return 0; } +EXPORT_SYMBOL(ip6_tnl_change_mtu); int ip6_tnl_get_iflink(const struct net_device *dev) { @@ -1632,6 +1632,9 @@ ip6_tnl_dev_init_gen(struct net_device *dev) if (ret) goto destroy_dst; + t->hlen = 0; + t->tun_hlen = 0; + return 0; destroy_dst: From b05229f442288210f2b1f5b4b2a9d71836e90686 Mon Sep 17 00:00:00 2001 From: Tom Herbert Date: Fri, 29 Apr 2016 17:12:21 -0700 Subject: [PATCH 1231/1649] gre6: Cleanup GREv6 transmit path, call common GRE functions Changes in GREv6 transmit path: - Call gre_checksum, remove gre6_checksum - Rename ip6gre_xmit2 to __gre6_xmit - Call gre_build_header utility function - Call ip6_tnl_xmit common function - Call ip6_tnl_change_mtu, eliminate ip6gre_tunnel_change_mtu Signed-off-by: Tom Herbert Signed-off-by: David S. Miller --- net/ipv6/ip6_gre.c | 252 +++++++++------------------------------------ 1 file changed, 50 insertions(+), 202 deletions(-) diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index 9b33745761ca..10127741a60d 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c @@ -488,199 +488,40 @@ struct ipv6_tel_txoption { __u8 dst_opt[8]; }; -static void init_tel_txopt(struct ipv6_tel_txoption *opt, __u8 encap_limit) +static int gre_handle_offloads(struct sk_buff *skb, bool csum) { - memset(opt, 0, sizeof(struct ipv6_tel_txoption)); - - opt->dst_opt[2] = IPV6_TLV_TNL_ENCAP_LIMIT; - opt->dst_opt[3] = 1; - opt->dst_opt[4] = encap_limit; - opt->dst_opt[5] = IPV6_TLV_PADN; - opt->dst_opt[6] = 1; - - opt->ops.dst0opt = (struct ipv6_opt_hdr *) opt->dst_opt; - opt->ops.opt_nflen = 8; + return iptunnel_handle_offloads(skb, + csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE); } -static __sum16 gre6_checksum(struct sk_buff *skb) -{ - __wsum csum; - - if (skb->ip_summed == CHECKSUM_PARTIAL) - csum = lco_csum(skb); - else - csum = skb_checksum(skb, sizeof(struct ipv6hdr), - skb->len - sizeof(struct ipv6hdr), 0); - return csum_fold(csum); -} - -static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb, - struct net_device *dev, - __u8 dsfield, - struct flowi6 *fl6, - int encap_limit, - __u32 *pmtu) +static netdev_tx_t __gre6_xmit(struct sk_buff *skb, + struct net_device *dev, __u8 dsfield, + struct flowi6 *fl6, int encap_limit, + __u32 *pmtu, __be16 proto) { struct ip6_tnl *tunnel = netdev_priv(dev); - struct net *net = tunnel->net; - struct net_device *tdev; /* Device to other host */ - struct ipv6hdr *ipv6h; /* Our new IP header */ - unsigned int min_headroom = 0; /* The extra header space needed */ - int gre_hlen; - struct ipv6_tel_txoption opt; - int mtu; - struct dst_entry *dst = NULL, *ndst = NULL; - struct net_device_stats *stats = &tunnel->dev->stats; - int err = -1; - u8 proto; - __be16 protocol; + __be16 protocol = (dev->type == ARPHRD_ETHER) ? + htons(ETH_P_TEB) : proto; if (dev->type == ARPHRD_ETHER) IPCB(skb)->flags = 0; - if (dev->header_ops && dev->type == ARPHRD_IP6GRE) { - gre_hlen = 0; - ipv6h = (struct ipv6hdr *)skb->data; - fl6->daddr = ipv6h->daddr; - } else { - gre_hlen = tunnel->hlen; + if (dev->header_ops && dev->type == ARPHRD_IP6GRE) + fl6->daddr = ((struct ipv6hdr *)skb->data)->daddr; + else fl6->daddr = tunnel->parms.raddr; - } - if (!fl6->flowi6_mark) - dst = dst_cache_get(&tunnel->dst_cache); + if (tunnel->parms.o_flags & TUNNEL_SEQ) + tunnel->o_seqno++; - if (!dst) { - dst = ip6_route_output(net, NULL, fl6); + /* Push GRE header. */ + gre_build_header(skb, tunnel->tun_hlen, tunnel->parms.o_flags, + protocol, tunnel->parms.o_key, htonl(tunnel->o_seqno)); - if (dst->error) - goto tx_err_link_failure; - dst = xfrm_lookup(net, dst, flowi6_to_flowi(fl6), NULL, 0); - if (IS_ERR(dst)) { - err = PTR_ERR(dst); - dst = NULL; - goto tx_err_link_failure; - } - ndst = dst; - } + skb_set_inner_protocol(skb, proto); - tdev = dst->dev; - - if (tdev == dev) { - stats->collisions++; - net_warn_ratelimited("%s: Local routing loop detected!\n", - tunnel->parms.name); - goto tx_err_dst_release; - } - - mtu = dst_mtu(dst) - sizeof(*ipv6h); - if (encap_limit >= 0) { - min_headroom += 8; - mtu -= 8; - } - if (mtu < IPV6_MIN_MTU) - mtu = IPV6_MIN_MTU; - if (skb_dst(skb)) - skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu); - if (skb->len > mtu && !skb_is_gso(skb)) { - *pmtu = mtu; - err = -EMSGSIZE; - goto tx_err_dst_release; - } - - if (tunnel->err_count > 0) { - if (time_before(jiffies, - tunnel->err_time + IP6TUNNEL_ERR_TIMEO)) { - tunnel->err_count--; - - dst_link_failure(skb); - } else - tunnel->err_count = 0; - } - - skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(dev))); - - min_headroom += LL_RESERVED_SPACE(tdev) + gre_hlen + dst->header_len; - - if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) { - int head_delta = SKB_DATA_ALIGN(min_headroom - - skb_headroom(skb) + - 16); - - err = pskb_expand_head(skb, max_t(int, head_delta, 0), - 0, GFP_ATOMIC); - if (min_headroom > dev->needed_headroom) - dev->needed_headroom = min_headroom; - if (unlikely(err)) - goto tx_err_dst_release; - } - - if (!fl6->flowi6_mark && ndst) - dst_cache_set_ip6(&tunnel->dst_cache, ndst, &fl6->saddr); - skb_dst_set(skb, dst); - - proto = NEXTHDR_GRE; - if (encap_limit >= 0) { - init_tel_txopt(&opt, encap_limit); - ipv6_push_nfrag_opts(skb, &opt.ops, &proto, NULL); - } - - err = iptunnel_handle_offloads(skb, - (tunnel->parms.o_flags & GRE_CSUM) ? - SKB_GSO_GRE_CSUM : SKB_GSO_GRE); - if (err) - goto tx_err_dst_release; - - skb_push(skb, gre_hlen); - skb_reset_network_header(skb); - skb_set_transport_header(skb, sizeof(*ipv6h)); - - /* - * Push down and install the IP header. - */ - ipv6h = ipv6_hdr(skb); - ip6_flow_hdr(ipv6h, INET_ECN_encapsulate(0, dsfield), - ip6_make_flowlabel(net, skb, fl6->flowlabel, true, fl6)); - ipv6h->hop_limit = tunnel->parms.hop_limit; - ipv6h->nexthdr = proto; - ipv6h->saddr = fl6->saddr; - ipv6h->daddr = fl6->daddr; - - ((__be16 *)(ipv6h + 1))[0] = tunnel->parms.o_flags; - protocol = (dev->type == ARPHRD_ETHER) ? - htons(ETH_P_TEB) : skb->protocol; - ((__be16 *)(ipv6h + 1))[1] = protocol; - - if (tunnel->parms.o_flags&(GRE_KEY|GRE_CSUM|GRE_SEQ)) { - __be32 *ptr = (__be32 *)(((u8 *)ipv6h) + tunnel->hlen - 4); - - if (tunnel->parms.o_flags&GRE_SEQ) { - ++tunnel->o_seqno; - *ptr = htonl(tunnel->o_seqno); - ptr--; - } - if (tunnel->parms.o_flags&GRE_KEY) { - *ptr = tunnel->parms.o_key; - ptr--; - } - if ((tunnel->parms.o_flags & GRE_CSUM) && - !(skb_shinfo(skb)->gso_type & - (SKB_GSO_GRE | SKB_GSO_GRE_CSUM))) { - *ptr = 0; - *(__sum16 *)ptr = gre6_checksum(skb); - } - } - - skb_set_inner_protocol(skb, protocol); - - ip6tunnel_xmit(NULL, skb, dev); - return 0; -tx_err_link_failure: - stats->tx_carrier_errors++; - dst_link_failure(skb); -tx_err_dst_release: - dst_release(dst); - return err; + return ip6_tnl_xmit(skb, dev, dsfield, fl6, encap_limit, pmtu, + NEXTHDR_GRE); } static inline int ip6gre_xmit_ipv4(struct sk_buff *skb, struct net_device *dev) @@ -699,7 +540,6 @@ static inline int ip6gre_xmit_ipv4(struct sk_buff *skb, struct net_device *dev) encap_limit = t->parms.encap_limit; memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6)); - fl6.flowi6_proto = IPPROTO_GRE; dsfield = ipv4_get_dsfield(iph); @@ -709,7 +549,12 @@ static inline int ip6gre_xmit_ipv4(struct sk_buff *skb, struct net_device *dev) if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK) fl6.flowi6_mark = skb->mark; - err = ip6gre_xmit2(skb, dev, dsfield, &fl6, encap_limit, &mtu); + err = gre_handle_offloads(skb, !!(t->parms.o_flags & TUNNEL_CSUM)); + if (err) + return -1; + + err = __gre6_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu, + skb->protocol); if (err != 0) { /* XXX: send ICMP error even if DF is not set. */ if (err == -EMSGSIZE) @@ -749,7 +594,6 @@ static inline int ip6gre_xmit_ipv6(struct sk_buff *skb, struct net_device *dev) encap_limit = t->parms.encap_limit; memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6)); - fl6.flowi6_proto = IPPROTO_GRE; dsfield = ipv6_get_dsfield(ipv6h); if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS) @@ -759,7 +603,11 @@ static inline int ip6gre_xmit_ipv6(struct sk_buff *skb, struct net_device *dev) if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK) fl6.flowi6_mark = skb->mark; - err = ip6gre_xmit2(skb, dev, dsfield, &fl6, encap_limit, &mtu); + if (gre_handle_offloads(skb, !!(t->parms.o_flags & TUNNEL_CSUM))) + return -1; + + err = __gre6_xmit(skb, dev, dsfield, &fl6, encap_limit, + &mtu, skb->protocol); if (err != 0) { if (err == -EMSGSIZE) icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); @@ -803,7 +651,11 @@ static int ip6gre_xmit_other(struct sk_buff *skb, struct net_device *dev) memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6)); fl6.flowi6_proto = skb->protocol; - err = ip6gre_xmit2(skb, dev, 0, &fl6, encap_limit, &mtu); + err = gre_handle_offloads(skb, !!(t->parms.o_flags & TUNNEL_CSUM)); + if (err) + return err; + + err = __gre6_xmit(skb, dev, 0, &fl6, encap_limit, &mtu, skb->protocol); return err; } @@ -1080,15 +932,6 @@ done: return err; } -static int ip6gre_tunnel_change_mtu(struct net_device *dev, int new_mtu) -{ - if (new_mtu < 68 || - new_mtu > 0xFFF8 - dev->hard_header_len) - return -EINVAL; - dev->mtu = new_mtu; - return 0; -} - static int ip6gre_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, const void *daddr, const void *saddr, unsigned int len) @@ -1132,7 +975,7 @@ static const struct net_device_ops ip6gre_netdev_ops = { .ndo_uninit = ip6gre_tunnel_uninit, .ndo_start_xmit = ip6gre_tunnel_xmit, .ndo_do_ioctl = ip6gre_tunnel_ioctl, - .ndo_change_mtu = ip6gre_tunnel_change_mtu, + .ndo_change_mtu = ip6_tnl_change_mtu, .ndo_get_stats64 = ip_tunnel_get_stats64, .ndo_get_iflink = ip6_tnl_get_iflink, }; @@ -1148,17 +991,11 @@ static void ip6gre_dev_free(struct net_device *dev) static void ip6gre_tunnel_setup(struct net_device *dev) { - struct ip6_tnl *t; - dev->netdev_ops = &ip6gre_netdev_ops; dev->destructor = ip6gre_dev_free; dev->type = ARPHRD_IP6GRE; - dev->hard_header_len = LL_MAX_HEADER + sizeof(struct ipv6hdr) + 4; - dev->mtu = ETH_DATA_LEN - sizeof(struct ipv6hdr) - 4; - t = netdev_priv(dev); - if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) - dev->mtu -= 8; + dev->flags |= IFF_NOARP; dev->addr_len = sizeof(struct in6_addr); netif_keep_dst(dev); @@ -1168,6 +1005,7 @@ static int ip6gre_tunnel_init_common(struct net_device *dev) { struct ip6_tnl *tunnel; int ret; + int t_hlen; tunnel = netdev_priv(dev); @@ -1186,6 +1024,16 @@ static int ip6gre_tunnel_init_common(struct net_device *dev) return ret; } + tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags); + + t_hlen = tunnel->hlen + sizeof(struct ipv6hdr); + + dev->needed_headroom = LL_MAX_HEADER + t_hlen + 4; + dev->mtu = ETH_DATA_LEN - t_hlen - 4; + + if (!(tunnel->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) + dev->mtu -= 8; + return 0; } @@ -1420,7 +1268,7 @@ static const struct net_device_ops ip6gre_tap_netdev_ops = { .ndo_start_xmit = ip6gre_tunnel_xmit, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, - .ndo_change_mtu = ip6gre_tunnel_change_mtu, + .ndo_change_mtu = ip6_tnl_change_mtu, .ndo_get_stats64 = ip_tunnel_get_stats64, .ndo_get_iflink = ip6_tnl_get_iflink, }; From e8872a25a05efcf0a133ca7ed6511fe9f908dc41 Mon Sep 17 00:00:00 2001 From: Nikolay Aleksandrov Date: Sat, 30 Apr 2016 10:25:26 +0200 Subject: [PATCH 1232/1649] net: rtnetlink: allow rtnl_fill_statsinfo to save private state counter The new prividx argument allows the current dumping device to save a private state counter which would enable it to continue dumping from where it left off. And the idxattr is used to save the current idx user so multiple prividx using attributes can be requested at the same time as suggested by Roopa Prabhu. Signed-off-by: Nikolay Aleksandrov Signed-off-by: David S. Miller --- net/core/rtnetlink.c | 46 ++++++++++++++++++++++++++++++-------------- 1 file changed, 32 insertions(+), 14 deletions(-) diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 5503dfe6a050..de529a20cd18 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -3444,13 +3444,21 @@ out: return err; } +static bool stats_attr_valid(unsigned int mask, int attrid, int idxattr) +{ + return (mask & IFLA_STATS_FILTER_BIT(attrid)) && + (!idxattr || idxattr == attrid); +} + static int rtnl_fill_statsinfo(struct sk_buff *skb, struct net_device *dev, int type, u32 pid, u32 seq, u32 change, - unsigned int flags, unsigned int filter_mask) + unsigned int flags, unsigned int filter_mask, + int *idxattr, int *prividx) { struct if_stats_msg *ifsm; struct nlmsghdr *nlh; struct nlattr *attr; + int s_prividx = *prividx; ASSERT_RTNL(); @@ -3462,7 +3470,7 @@ static int rtnl_fill_statsinfo(struct sk_buff *skb, struct net_device *dev, ifsm->ifindex = dev->ifindex; ifsm->filter_mask = filter_mask; - if (filter_mask & IFLA_STATS_FILTER_BIT(IFLA_STATS_LINK_64)) { + if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_64, *idxattr)) { struct rtnl_link_stats64 *sp; attr = nla_reserve_64bit(skb, IFLA_STATS_LINK_64, @@ -3480,7 +3488,11 @@ static int rtnl_fill_statsinfo(struct sk_buff *skb, struct net_device *dev, return 0; nla_put_failure: - nlmsg_cancel(skb, nlh); + /* not a multi message or no progress mean a real error */ + if (!(flags & NLM_F_MULTI) || s_prividx == *prividx) + nlmsg_cancel(skb, nlh); + else + nlmsg_end(skb, nlh); return -EMSGSIZE; } @@ -3494,7 +3506,7 @@ static size_t if_nlmsg_stats_size(const struct net_device *dev, { size_t size = 0; - if (filter_mask & IFLA_STATS_FILTER_BIT(IFLA_STATS_LINK_64)) + if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_64, 0)) size += nla_total_size_64bit(sizeof(struct rtnl_link_stats64)); return size; @@ -3503,8 +3515,9 @@ static size_t if_nlmsg_stats_size(const struct net_device *dev, static int rtnl_stats_get(struct sk_buff *skb, struct nlmsghdr *nlh) { struct net *net = sock_net(skb->sk); - struct if_stats_msg *ifsm; struct net_device *dev = NULL; + int idxattr = 0, prividx = 0; + struct if_stats_msg *ifsm; struct sk_buff *nskb; u32 filter_mask; int err; @@ -3528,7 +3541,7 @@ static int rtnl_stats_get(struct sk_buff *skb, struct nlmsghdr *nlh) err = rtnl_fill_statsinfo(nskb, dev, RTM_NEWSTATS, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0, - 0, filter_mask); + 0, filter_mask, &idxattr, &prividx); if (err < 0) { /* -EMSGSIZE implies BUG in if_nlmsg_stats_size */ WARN_ON(err == -EMSGSIZE); @@ -3542,18 +3555,19 @@ static int rtnl_stats_get(struct sk_buff *skb, struct nlmsghdr *nlh) static int rtnl_stats_dump(struct sk_buff *skb, struct netlink_callback *cb) { + int h, s_h, err, s_idx, s_idxattr, s_prividx; struct net *net = sock_net(skb->sk); - struct if_stats_msg *ifsm; - int h, s_h; - int idx = 0, s_idx; - struct net_device *dev; - struct hlist_head *head; unsigned int flags = NLM_F_MULTI; + struct if_stats_msg *ifsm; + struct hlist_head *head; + struct net_device *dev; u32 filter_mask = 0; - int err; + int idx = 0; s_h = cb->args[0]; s_idx = cb->args[1]; + s_idxattr = cb->args[2]; + s_prividx = cb->args[3]; cb->seq = net->dev_base_seq; @@ -3571,7 +3585,8 @@ static int rtnl_stats_dump(struct sk_buff *skb, struct netlink_callback *cb) err = rtnl_fill_statsinfo(skb, dev, RTM_NEWSTATS, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, 0, - flags, filter_mask); + flags, filter_mask, + &s_idxattr, &s_prividx); /* If we ran out of room on the first message, * we're in trouble */ @@ -3579,13 +3594,16 @@ static int rtnl_stats_dump(struct sk_buff *skb, struct netlink_callback *cb) if (err < 0) goto out; - + s_prividx = 0; + s_idxattr = 0; nl_dump_check_consistent(cb, nlmsg_hdr(skb)); cont: idx++; } } out: + cb->args[3] = s_prividx; + cb->args[2] = s_idxattr; cb->args[1] = idx; cb->args[0] = h; From 97a47facf3468fb6ebd697324fc2a7245755c417 Mon Sep 17 00:00:00 2001 From: Nikolay Aleksandrov Date: Sat, 30 Apr 2016 10:25:27 +0200 Subject: [PATCH 1233/1649] net: rtnetlink: add linkxstats callbacks and attribute Add callbacks to calculate the size and fill link extended statistics which can be split into multiple messages and are dumped via the new rtnl stats API (RTM_GETSTATS) with the IFLA_STATS_LINK_XSTATS attribute. Also add that attribute to the idx mask check since it is expected to be able to save state and resume dumping (e.g. future bridge per-vlan stats will be dumped via this attribute and callbacks). Each link type should nest its private attributes under the per-link type attribute. This allows to have any number of separated private attributes and to avoid one call to get the dev link type. Signed-off-by: Nikolay Aleksandrov Signed-off-by: David S. Miller --- include/net/rtnetlink.h | 7 +++++++ include/uapi/linux/if_link.h | 12 ++++++++++++ net/core/rtnetlink.c | 30 ++++++++++++++++++++++++++++++ 3 files changed, 49 insertions(+) diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h index 2f87c1ba13de..006a7b81d758 100644 --- a/include/net/rtnetlink.h +++ b/include/net/rtnetlink.h @@ -47,6 +47,9 @@ static inline int rtnl_msg_family(const struct nlmsghdr *nlh) * @get_num_rx_queues: Function to determine number of receive queues * to create when creating a new device. * @get_link_net: Function to get the i/o netns of the device + * @get_linkxstats_size: Function to calculate the required room for + * dumping device-specific extended link stats + * @fill_linkxstats: Function to dump device-specific extended link stats */ struct rtnl_link_ops { struct list_head list; @@ -95,6 +98,10 @@ struct rtnl_link_ops { const struct net_device *dev, const struct net_device *slave_dev); struct net *(*get_link_net)(const struct net_device *dev); + size_t (*get_linkxstats_size)(const struct net_device *dev); + int (*fill_linkxstats)(struct sk_buff *skb, + const struct net_device *dev, + int *prividx); }; int __rtnl_link_register(struct rtnl_link_ops *ops); diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index 3e80974566bb..2bfdb9c58342 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -810,6 +810,7 @@ struct if_stats_msg { enum { IFLA_STATS_UNSPEC, /* also used as 64bit pad attribute */ IFLA_STATS_LINK_64, + IFLA_STATS_LINK_XSTATS, __IFLA_STATS_MAX, }; @@ -817,4 +818,15 @@ enum { #define IFLA_STATS_FILTER_BIT(ATTR) (1 << (ATTR - 1)) +/* These are embedded into IFLA_STATS_LINK_XSTATS: + * [IFLA_STATS_LINK_XSTATS] + * -> [LINK_XSTATS_TYPE_xxx] + * -> [rtnl link type specific attributes] + */ +enum { + LINK_XSTATS_TYPE_UNSPEC, + __LINK_XSTATS_TYPE_MAX +}; +#define LINK_XSTATS_TYPE_MAX (__LINK_XSTATS_TYPE_MAX - 1) + #endif /* _UAPI_LINUX_IF_LINK_H */ diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index de529a20cd18..d471f097c739 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -3483,6 +3483,26 @@ static int rtnl_fill_statsinfo(struct sk_buff *skb, struct net_device *dev, dev_get_stats(dev, sp); } + if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS, *idxattr)) { + const struct rtnl_link_ops *ops = dev->rtnl_link_ops; + + if (ops && ops->fill_linkxstats) { + int err; + + *idxattr = IFLA_STATS_LINK_XSTATS; + attr = nla_nest_start(skb, + IFLA_STATS_LINK_XSTATS); + if (!attr) + goto nla_put_failure; + + err = ops->fill_linkxstats(skb, dev, prividx); + nla_nest_end(skb, attr); + if (err) + goto nla_put_failure; + *idxattr = 0; + } + } + nlmsg_end(skb, nlh); return 0; @@ -3509,6 +3529,16 @@ static size_t if_nlmsg_stats_size(const struct net_device *dev, if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_64, 0)) size += nla_total_size_64bit(sizeof(struct rtnl_link_stats64)); + if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS, 0)) { + const struct rtnl_link_ops *ops = dev->rtnl_link_ops; + + if (ops && ops->get_linkxstats_size) { + size += nla_total_size(ops->get_linkxstats_size(dev)); + /* for IFLA_STATS_LINK_XSTATS */ + size += nla_total_size(0); + } + } + return size; } From 6dada9b10a0818ba72c249526a742c8c41274a73 Mon Sep 17 00:00:00 2001 From: Nikolay Aleksandrov Date: Sat, 30 Apr 2016 10:25:28 +0200 Subject: [PATCH 1234/1649] bridge: vlan: learn to count Add support for per-VLAN Tx/Rx statistics. Every global vlan context gets allocated a per-cpu stats which is then set in each per-port vlan context for quick access. The br_allowed_ingress() common function is used to account for Rx packets and the br_handle_vlan() common function is used to account for Tx packets. Stats accounting is performed only if the bridge-wide vlan_stats_enabled option is set either via sysfs or netlink. A struct hole between vlan_enabled and vlan_proto is used for the new option so it is in the same cache line. Currently it is binary (on/off) but it is intentionally restricted to exactly 0 and 1 since other values will be used in the future for different purposes (e.g. per-port stats). Signed-off-by: Nikolay Aleksandrov Signed-off-by: David S. Miller --- include/uapi/linux/if_link.h | 1 + net/bridge/br_netlink.c | 13 +++++- net/bridge/br_private.h | 13 +++++- net/bridge/br_sysfs_br.c | 17 ++++++++ net/bridge/br_vlan.c | 82 ++++++++++++++++++++++++++++++------ 5 files changed, 110 insertions(+), 16 deletions(-) diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index 2bfdb9c58342..95f77113388f 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -272,6 +272,7 @@ enum { IFLA_BR_NF_CALL_ARPTABLES, IFLA_BR_VLAN_DEFAULT_PVID, IFLA_BR_PAD, + IFLA_BR_VLAN_STATS_ENABLED, __IFLA_BR_MAX, }; diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index 6bae1125e36d..7fba1f018bc9 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c @@ -850,6 +850,7 @@ static const struct nla_policy br_policy[IFLA_BR_MAX + 1] = { [IFLA_BR_NF_CALL_IP6TABLES] = { .type = NLA_U8 }, [IFLA_BR_NF_CALL_ARPTABLES] = { .type = NLA_U8 }, [IFLA_BR_VLAN_DEFAULT_PVID] = { .type = NLA_U16 }, + [IFLA_BR_VLAN_STATS_ENABLED] = { .type = NLA_U8 }, }; static int br_changelink(struct net_device *brdev, struct nlattr *tb[], @@ -921,6 +922,14 @@ static int br_changelink(struct net_device *brdev, struct nlattr *tb[], if (err) return err; } + + if (data[IFLA_BR_VLAN_STATS_ENABLED]) { + __u8 vlan_stats = nla_get_u8(data[IFLA_BR_VLAN_STATS_ENABLED]); + + err = br_vlan_set_stats(br, vlan_stats); + if (err) + return err; + } #endif if (data[IFLA_BR_GROUP_FWD_MASK]) { @@ -1082,6 +1091,7 @@ static size_t br_get_size(const struct net_device *brdev) #ifdef CONFIG_BRIDGE_VLAN_FILTERING nla_total_size(sizeof(__be16)) + /* IFLA_BR_VLAN_PROTOCOL */ nla_total_size(sizeof(u16)) + /* IFLA_BR_VLAN_DEFAULT_PVID */ + nla_total_size(sizeof(u8)) + /* IFLA_BR_VLAN_STATS_ENABLED */ #endif nla_total_size(sizeof(u16)) + /* IFLA_BR_GROUP_FWD_MASK */ nla_total_size(sizeof(struct ifla_bridge_id)) + /* IFLA_BR_ROOT_ID */ @@ -1167,7 +1177,8 @@ static int br_fill_info(struct sk_buff *skb, const struct net_device *brdev) #ifdef CONFIG_BRIDGE_VLAN_FILTERING if (nla_put_be16(skb, IFLA_BR_VLAN_PROTOCOL, br->vlan_proto) || - nla_put_u16(skb, IFLA_BR_VLAN_DEFAULT_PVID, br->default_pvid)) + nla_put_u16(skb, IFLA_BR_VLAN_DEFAULT_PVID, br->default_pvid) || + nla_put_u8(skb, IFLA_BR_VLAN_STATS_ENABLED, br->vlan_stats_enabled)) return -EMSGSIZE; #endif #ifdef CONFIG_BRIDGE_IGMP_SNOOPING diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index d9da857182ef..12b6d82dbd68 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -77,12 +77,21 @@ struct bridge_mcast_querier { }; #endif +struct br_vlan_stats { + u64 rx_bytes; + u64 rx_packets; + u64 tx_bytes; + u64 tx_packets; + struct u64_stats_sync syncp; +}; + /** * struct net_bridge_vlan - per-vlan entry * * @vnode: rhashtable member * @vid: VLAN id * @flags: bridge vlan flags + * @stats: per-cpu VLAN statistics * @br: if MASTER flag set, this points to a bridge struct * @port: if MASTER flag unset, this points to a port struct * @refcnt: if MASTER flag set, this is bumped for each port referencing it @@ -100,6 +109,7 @@ struct net_bridge_vlan { struct rhash_head vnode; u16 vid; u16 flags; + struct br_vlan_stats __percpu *stats; union { struct net_bridge *br; struct net_bridge_port *port; @@ -342,6 +352,7 @@ struct net_bridge #ifdef CONFIG_BRIDGE_VLAN_FILTERING struct net_bridge_vlan_group __rcu *vlgrp; u8 vlan_enabled; + u8 vlan_stats_enabled; __be16 vlan_proto; u16 default_pvid; #endif @@ -691,6 +702,7 @@ int __br_vlan_filter_toggle(struct net_bridge *br, unsigned long val); int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val); int __br_vlan_set_proto(struct net_bridge *br, __be16 proto); int br_vlan_set_proto(struct net_bridge *br, unsigned long val); +int br_vlan_set_stats(struct net_bridge *br, unsigned long val); int br_vlan_init(struct net_bridge *br); int br_vlan_set_default_pvid(struct net_bridge *br, unsigned long val); int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid); @@ -880,7 +892,6 @@ static inline struct net_bridge_vlan_group *nbp_vlan_group_rcu( { return NULL; } - #endif struct nf_br_ops { diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c index 70bddfd0f3e9..beb47071e38d 100644 --- a/net/bridge/br_sysfs_br.c +++ b/net/bridge/br_sysfs_br.c @@ -731,6 +731,22 @@ static ssize_t default_pvid_store(struct device *d, return store_bridge_parm(d, buf, len, br_vlan_set_default_pvid); } static DEVICE_ATTR_RW(default_pvid); + +static ssize_t vlan_stats_enabled_show(struct device *d, + struct device_attribute *attr, + char *buf) +{ + struct net_bridge *br = to_bridge(d); + return sprintf(buf, "%u\n", br->vlan_stats_enabled); +} + +static ssize_t vlan_stats_enabled_store(struct device *d, + struct device_attribute *attr, + const char *buf, size_t len) +{ + return store_bridge_parm(d, buf, len, br_vlan_set_stats); +} +static DEVICE_ATTR_RW(vlan_stats_enabled); #endif static struct attribute *bridge_attrs[] = { @@ -778,6 +794,7 @@ static struct attribute *bridge_attrs[] = { &dev_attr_vlan_filtering.attr, &dev_attr_vlan_protocol.attr, &dev_attr_default_pvid.attr, + &dev_attr_vlan_stats_enabled.attr, #endif NULL }; diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c index e001152d6ad1..065c35351356 100644 --- a/net/bridge/br_vlan.c +++ b/net/bridge/br_vlan.c @@ -162,6 +162,17 @@ static struct net_bridge_vlan *br_vlan_get_master(struct net_bridge *br, u16 vid return masterv; } +static void br_master_vlan_rcu_free(struct rcu_head *rcu) +{ + struct net_bridge_vlan *v; + + v = container_of(rcu, struct net_bridge_vlan, rcu); + WARN_ON(!br_vlan_is_master(v)); + free_percpu(v->stats); + v->stats = NULL; + kfree(v); +} + static void br_vlan_put_master(struct net_bridge_vlan *masterv) { struct net_bridge_vlan_group *vg; @@ -174,7 +185,7 @@ static void br_vlan_put_master(struct net_bridge_vlan *masterv) rhashtable_remove_fast(&vg->vlan_hash, &masterv->vnode, br_vlan_rht_params); __vlan_del_list(masterv); - kfree_rcu(masterv, rcu); + call_rcu(&masterv->rcu, br_master_vlan_rcu_free); } } @@ -230,6 +241,7 @@ static int __vlan_add(struct net_bridge_vlan *v, u16 flags) if (!masterv) goto out_filt; v->brvlan = masterv; + v->stats = masterv->stats; } /* Add the dev mac and count the vlan only if it's usable */ @@ -329,6 +341,7 @@ struct sk_buff *br_handle_vlan(struct net_bridge *br, struct net_bridge_vlan_group *vg, struct sk_buff *skb) { + struct br_vlan_stats *stats; struct net_bridge_vlan *v; u16 vid; @@ -355,18 +368,27 @@ struct sk_buff *br_handle_vlan(struct net_bridge *br, return NULL; } } + if (br->vlan_stats_enabled) { + stats = this_cpu_ptr(v->stats); + u64_stats_update_begin(&stats->syncp); + stats->tx_bytes += skb->len; + stats->tx_packets++; + u64_stats_update_end(&stats->syncp); + } + if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED) skb->vlan_tci = 0; - out: return skb; } /* Called under RCU */ -static bool __allowed_ingress(struct net_bridge_vlan_group *vg, __be16 proto, +static bool __allowed_ingress(const struct net_bridge *br, + struct net_bridge_vlan_group *vg, struct sk_buff *skb, u16 *vid) { - const struct net_bridge_vlan *v; + struct br_vlan_stats *stats; + struct net_bridge_vlan *v; bool tagged; BR_INPUT_SKB_CB(skb)->vlan_filtered = true; @@ -375,7 +397,7 @@ static bool __allowed_ingress(struct net_bridge_vlan_group *vg, __be16 proto, * HW accelerated vlan tag. */ if (unlikely(!skb_vlan_tag_present(skb) && - skb->protocol == proto)) { + skb->protocol == br->vlan_proto)) { skb = skb_vlan_untag(skb); if (unlikely(!skb)) return false; @@ -383,7 +405,7 @@ static bool __allowed_ingress(struct net_bridge_vlan_group *vg, __be16 proto, if (!br_vlan_get_tag(skb, vid)) { /* Tagged frame */ - if (skb->vlan_proto != proto) { + if (skb->vlan_proto != br->vlan_proto) { /* Protocol-mismatch, empty out vlan_tci for new tag */ skb_push(skb, ETH_HLEN); skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto, @@ -419,7 +441,7 @@ static bool __allowed_ingress(struct net_bridge_vlan_group *vg, __be16 proto, *vid = pvid; if (likely(!tagged)) /* Untagged Frame. */ - __vlan_hwaccel_put_tag(skb, proto, pvid); + __vlan_hwaccel_put_tag(skb, br->vlan_proto, pvid); else /* Priority-tagged Frame. * At this point, We know that skb->vlan_tci had @@ -428,13 +450,24 @@ static bool __allowed_ingress(struct net_bridge_vlan_group *vg, __be16 proto, */ skb->vlan_tci |= pvid; - return true; + /* if stats are disabled we can avoid the lookup */ + if (!br->vlan_stats_enabled) + return true; + } + v = br_vlan_find(vg, *vid); + if (!v || !br_vlan_should_use(v)) + goto drop; + + if (br->vlan_stats_enabled) { + stats = this_cpu_ptr(v->stats); + u64_stats_update_begin(&stats->syncp); + stats->rx_bytes += skb->len; + stats->rx_packets++; + u64_stats_update_end(&stats->syncp); } - /* Frame had a valid vlan tag. See if vlan is allowed */ - v = br_vlan_find(vg, *vid); - if (v && br_vlan_should_use(v)) - return true; + return true; + drop: kfree_skb(skb); return false; @@ -452,7 +485,7 @@ bool br_allowed_ingress(const struct net_bridge *br, return true; } - return __allowed_ingress(vg, br->vlan_proto, skb, vid); + return __allowed_ingress(br, vg, skb, vid); } /* Called under RCU. */ @@ -542,6 +575,11 @@ int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags) if (!vlan) return -ENOMEM; + vlan->stats = netdev_alloc_pcpu_stats(struct br_vlan_stats); + if (!vlan->stats) { + kfree(vlan); + return -ENOMEM; + } vlan->vid = vid; vlan->flags = flags | BRIDGE_VLAN_INFO_MASTER; vlan->flags &= ~BRIDGE_VLAN_INFO_PVID; @@ -549,8 +587,10 @@ int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags) if (flags & BRIDGE_VLAN_INFO_BRENTRY) atomic_set(&vlan->refcnt, 1); ret = __vlan_add(vlan, flags); - if (ret) + if (ret) { + free_percpu(vlan->stats); kfree(vlan); + } return ret; } @@ -711,6 +751,20 @@ int br_vlan_set_proto(struct net_bridge *br, unsigned long val) return __br_vlan_set_proto(br, htons(val)); } +int br_vlan_set_stats(struct net_bridge *br, unsigned long val) +{ + switch (val) { + case 0: + case 1: + br->vlan_stats_enabled = val; + break; + default: + return -EINVAL; + } + + return 0; +} + static bool vlan_default_pvid(struct net_bridge_vlan_group *vg, u16 vid) { struct net_bridge_vlan *v; From a60c090361ea211625c27052dbbc11c5222e20e4 Mon Sep 17 00:00:00 2001 From: Nikolay Aleksandrov Date: Sat, 30 Apr 2016 10:25:29 +0200 Subject: [PATCH 1235/1649] bridge: netlink: export per-vlan stats Add a new LINK_XSTATS_TYPE_BRIDGE attribute and implement the RTM_GETSTATS callbacks for IFLA_STATS_LINK_XSTATS (fill_linkxstats and get_linkxstats_size) in order to export the per-vlan stats. The paddings were added because soon these fields will be needed for per-port per-vlan stats (or something else if someone beats me to it) so avoiding at least a few more netlink attributes. Signed-off-by: Nikolay Aleksandrov Signed-off-by: David S. Miller --- include/uapi/linux/if_bridge.h | 18 ++++++++++ include/uapi/linux/if_link.h | 1 + net/bridge/br_netlink.c | 65 ++++++++++++++++++++++++++++++++++ net/bridge/br_private.h | 7 ++++ net/bridge/br_vlan.c | 27 ++++++++++++++ 5 files changed, 118 insertions(+) diff --git a/include/uapi/linux/if_bridge.h b/include/uapi/linux/if_bridge.h index 0536eefff9bf..397d503fdedb 100644 --- a/include/uapi/linux/if_bridge.h +++ b/include/uapi/linux/if_bridge.h @@ -134,6 +134,16 @@ struct bridge_vlan_info { __u16 vid; }; +struct bridge_vlan_xstats { + __u64 rx_bytes; + __u64 rx_packets; + __u64 tx_bytes; + __u64 tx_packets; + __u16 vid; + __u16 pad1; + __u32 pad2; +}; + /* Bridge multicast database attributes * [MDBA_MDB] = { * [MDBA_MDB_ENTRY] = { @@ -233,4 +243,12 @@ enum { }; #define MDBA_SET_ENTRY_MAX (__MDBA_SET_ENTRY_MAX - 1) +/* Embedded inside LINK_XSTATS_TYPE_BRIDGE */ +enum { + BRIDGE_XSTATS_UNSPEC, + BRIDGE_XSTATS_VLAN, + __BRIDGE_XSTATS_MAX +}; +#define BRIDGE_XSTATS_MAX (__BRIDGE_XSTATS_MAX - 1) + #endif /* _UAPI_LINUX_IF_BRIDGE_H */ diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index 95f77113388f..d2d7fd4ba5f5 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -826,6 +826,7 @@ enum { */ enum { LINK_XSTATS_TYPE_UNSPEC, + LINK_XSTATS_TYPE_BRIDGE, __LINK_XSTATS_TYPE_MAX }; #define LINK_XSTATS_TYPE_MAX (__LINK_XSTATS_TYPE_MAX - 1) diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index 7fba1f018bc9..a5343c7232bf 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c @@ -1234,6 +1234,69 @@ static int br_fill_info(struct sk_buff *skb, const struct net_device *brdev) return 0; } +static size_t br_get_linkxstats_size(const struct net_device *dev) +{ + struct net_bridge *br = netdev_priv(dev); + struct net_bridge_vlan_group *vg; + struct net_bridge_vlan *v; + int numvls = 0; + + vg = br_vlan_group(br); + if (!vg) + return 0; + + /* we need to count all, even placeholder entries */ + list_for_each_entry(v, &vg->vlan_list, vlist) + numvls++; + + /* account for the vlans and the link xstats type nest attribute */ + return numvls * nla_total_size(sizeof(struct bridge_vlan_xstats)) + + nla_total_size(0); +} + +static int br_fill_linkxstats(struct sk_buff *skb, const struct net_device *dev, + int *prividx) +{ + struct net_bridge *br = netdev_priv(dev); + struct net_bridge_vlan_group *vg; + struct net_bridge_vlan *v; + struct nlattr *nest; + int vl_idx = 0; + + vg = br_vlan_group(br); + if (!vg) + goto out; + nest = nla_nest_start(skb, LINK_XSTATS_TYPE_BRIDGE); + if (!nest) + return -EMSGSIZE; + list_for_each_entry(v, &vg->vlan_list, vlist) { + struct bridge_vlan_xstats vxi; + struct br_vlan_stats stats; + + if (vl_idx++ < *prividx) + continue; + memset(&vxi, 0, sizeof(vxi)); + vxi.vid = v->vid; + br_vlan_get_stats(v, &stats); + vxi.rx_bytes = stats.rx_bytes; + vxi.rx_packets = stats.rx_packets; + vxi.tx_bytes = stats.tx_bytes; + vxi.tx_packets = stats.tx_packets; + + if (nla_put(skb, BRIDGE_XSTATS_VLAN, sizeof(vxi), &vxi)) + goto nla_put_failure; + } + nla_nest_end(skb, nest); + *prividx = 0; +out: + return 0; + +nla_put_failure: + nla_nest_end(skb, nest); + *prividx = vl_idx; + + return -EMSGSIZE; +} static struct rtnl_af_ops br_af_ops __read_mostly = { .family = AF_BRIDGE, @@ -1252,6 +1315,8 @@ struct rtnl_link_ops br_link_ops __read_mostly = { .dellink = br_dev_delete, .get_size = br_get_size, .fill_info = br_fill_info, + .fill_linkxstats = br_fill_linkxstats, + .get_linkxstats_size = br_get_linkxstats_size, .slave_maxtype = IFLA_BRPORT_MAX, .slave_policy = br_port_policy, diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index 12b6d82dbd68..c7fb5d7a7218 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -711,6 +711,8 @@ int nbp_vlan_delete(struct net_bridge_port *port, u16 vid); void nbp_vlan_flush(struct net_bridge_port *port); int nbp_vlan_init(struct net_bridge_port *port); int nbp_get_num_vlan_infos(struct net_bridge_port *p, u32 filter_mask); +void br_vlan_get_stats(const struct net_bridge_vlan *v, + struct br_vlan_stats *stats); static inline struct net_bridge_vlan_group *br_vlan_group( const struct net_bridge *br) @@ -892,6 +894,11 @@ static inline struct net_bridge_vlan_group *nbp_vlan_group_rcu( { return NULL; } + +static inline void br_vlan_get_stats(const struct net_bridge_vlan *v, + struct br_vlan_stats *stats) +{ +} #endif struct nf_br_ops { diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c index 065c35351356..b6de4f457161 100644 --- a/net/bridge/br_vlan.c +++ b/net/bridge/br_vlan.c @@ -1054,3 +1054,30 @@ void nbp_vlan_flush(struct net_bridge_port *port) synchronize_rcu(); __vlan_group_free(vg); } + +void br_vlan_get_stats(const struct net_bridge_vlan *v, + struct br_vlan_stats *stats) +{ + int i; + + memset(stats, 0, sizeof(*stats)); + for_each_possible_cpu(i) { + u64 rxpackets, rxbytes, txpackets, txbytes; + struct br_vlan_stats *cpu_stats; + unsigned int start; + + cpu_stats = per_cpu_ptr(v->stats, i); + do { + start = u64_stats_fetch_begin_irq(&cpu_stats->syncp); + rxpackets = cpu_stats->rx_packets; + rxbytes = cpu_stats->rx_bytes; + txbytes = cpu_stats->tx_bytes; + txpackets = cpu_stats->tx_packets; + } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start)); + + stats->rx_packets += rxpackets; + stats->rx_bytes += rxbytes; + stats->tx_bytes += txbytes; + stats->tx_packets += txpackets; + } +} From c0ef079ca791ef9e057ac748051425a768c9e192 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Tue, 3 May 2016 03:29:09 +0200 Subject: [PATCH 1236/1649] netdevice: shrink size of struct netdev_queue - trans_timeout is incremented when tx queue timed out (tx watchdog). - tx_maxrate is set via sysfs Moving tx_maxrate to read-mostly part shrinks the struct by 64 bytes. While at it, also move trans_timeout (it is out-of-place in the 'write-mostly' part). Signed-off-by: Florian Westphal Acked-by: Eric Dumazet Signed-off-by: David S. Miller --- include/linux/netdevice.h | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 52914a854386..f2182594160e 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -569,6 +569,12 @@ struct netdev_queue { #if defined(CONFIG_XPS) && defined(CONFIG_NUMA) int numa_node; #endif + unsigned long tx_maxrate; + /* + * Number of TX timeouts for this queue + * (/sys/class/net/DEV/Q/trans_timeout) + */ + unsigned long trans_timeout; /* * write-mostly part */ @@ -579,18 +585,11 @@ struct netdev_queue { */ unsigned long trans_start; - /* - * Number of TX timeouts for this queue - * (/sys/class/net/DEV/Q/trans_timeout) - */ - unsigned long trans_timeout; - unsigned long state; #ifdef CONFIG_BQL struct dql dql; #endif - unsigned long tx_maxrate; } ____cacheline_aligned_in_smp; static inline int netdev_queue_numa_node_read(const struct netdev_queue *q) From 9580bf2edb402b3afaf9c5a4efb6953f993ef52e Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Sat, 30 Apr 2016 10:19:29 -0700 Subject: [PATCH 1237/1649] net: relax expensive skb_unclone() in iptunnel_handle_offloads() Locally generated TCP GSO packets having to go through a GRE/SIT/IPIP tunnel have to go through an expensive skb_unclone() Reallocating skb->head is a lot of work. Test should really check if a 'real clone' of the packet was done. TCP does not care if the original gso_type is changed while the packet travels in the stack. This adds skb_header_unclone() which is a variant of skb_clone() using skb_header_cloned() check instead of skb_cloned(). This variant can probably be used from other points. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/linux/skbuff.h | 10 ++++++++++ net/ipv4/ip_tunnel_core.c | 2 +- 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index c84a5a1078c5..c413c588a24f 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -1325,6 +1325,16 @@ static inline int skb_header_cloned(const struct sk_buff *skb) return dataref != 1; } +static inline int skb_header_unclone(struct sk_buff *skb, gfp_t pri) +{ + might_sleep_if(gfpflags_allow_blocking(pri)); + + if (skb_header_cloned(skb)) + return pskb_expand_head(skb, 0, 0, pri); + + return 0; +} + /** * skb_header_release - release reference to header * @skb: buffer to operate on diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c index 786fa7ca28e0..9118b0e640ba 100644 --- a/net/ipv4/ip_tunnel_core.c +++ b/net/ipv4/ip_tunnel_core.c @@ -157,7 +157,7 @@ int iptunnel_handle_offloads(struct sk_buff *skb, } if (skb_is_gso(skb)) { - err = skb_unclone(skb, GFP_ATOMIC); + err = skb_header_unclone(skb, GFP_ATOMIC); if (unlikely(err)) return err; skb_shinfo(skb)->gso_type |= gso_type_mask; From 094e43d50d7e421e9036d72a5a1d93c250cdbf0a Mon Sep 17 00:00:00 2001 From: Kazuya Mizuguchi Date: Mon, 2 May 2016 00:19:51 +0900 Subject: [PATCH 1238/1649] ravb: Remove rx buffer ALIGN Aligning the reception data size is not required. Signed-off-by: Kazuya Mizuguchi Signed-off-by: Yoshihiro Kaneko Tested-by: Simon Horman Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/ravb_main.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index 238b56feb1ce..34066e0649f5 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -246,10 +246,9 @@ static void ravb_ring_format(struct net_device *ndev, int q) for (i = 0; i < priv->num_rx_ring[q]; i++) { /* RX descriptor */ rx_desc = &priv->rx_ring[q][i]; - /* The size of the buffer should be on 16-byte boundary. */ - rx_desc->ds_cc = cpu_to_le16(ALIGN(PKT_BUF_SZ, 16)); + rx_desc->ds_cc = cpu_to_le16(PKT_BUF_SZ); dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data, - ALIGN(PKT_BUF_SZ, 16), + PKT_BUF_SZ, DMA_FROM_DEVICE); /* We just set the data size to 0 for a failed mapping which * should prevent DMA from happening... @@ -558,7 +557,7 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q) skb = priv->rx_skb[q][entry]; priv->rx_skb[q][entry] = NULL; dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr), - ALIGN(PKT_BUF_SZ, 16), + PKT_BUF_SZ, DMA_FROM_DEVICE); get_ts &= (q == RAVB_NC) ? RAVB_RXTSTAMP_TYPE_V2_L2_EVENT : @@ -588,8 +587,7 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q) for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) { entry = priv->dirty_rx[q] % priv->num_rx_ring[q]; desc = &priv->rx_ring[q][entry]; - /* The size of the buffer should be on 16-byte boundary. */ - desc->ds_cc = cpu_to_le16(ALIGN(PKT_BUF_SZ, 16)); + desc->ds_cc = cpu_to_le16(PKT_BUF_SZ); if (!priv->rx_skb[q][entry]) { skb = netdev_alloc_skb(ndev, From 9d18562a227874289fda8ca5d117d8f503f1dcca Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Sun, 1 May 2016 16:47:26 -0700 Subject: [PATCH 1239/1649] fq_codel: add batch ability to fq_codel_drop() In presence of inelastic flows and stress, we can call fq_codel_drop() for every packet entering fq_codel qdisc. fq_codel_drop() is quite expensive, as it does a linear scan of 4 KB of memory to find a fat flow. Once found, it drops the oldest packet of this flow. Instead of dropping a single packet, try to drop 50% of the backlog of this fat flow, with a configurable limit of 64 packets per round. TCA_FQ_CODEL_DROP_BATCH_SIZE is the new attribute to make this limit configurable. With this strategy the 4 KB search is amortized to a single cache line per drop [1], so fq_codel_drop() no longer appears at the top of kernel profile in presence of few inelastic flows. [1] Assuming a 64byte cache line, and 1024 buckets Signed-off-by: Eric Dumazet Reported-by: Dave Taht Cc: Jonathan Morton Acked-by: Jesper Dangaard Brouer Acked-by: Dave Taht Signed-off-by: David S. Miller --- include/uapi/linux/pkt_sched.h | 1 + net/sched/sch_fq_codel.c | 66 +++++++++++++++++++++++----------- 2 files changed, 47 insertions(+), 20 deletions(-) diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h index 1c78c7454c7c..a11afecd4482 100644 --- a/include/uapi/linux/pkt_sched.h +++ b/include/uapi/linux/pkt_sched.h @@ -718,6 +718,7 @@ enum { TCA_FQ_CODEL_FLOWS, TCA_FQ_CODEL_QUANTUM, TCA_FQ_CODEL_CE_THRESHOLD, + TCA_FQ_CODEL_DROP_BATCH_SIZE, __TCA_FQ_CODEL_MAX }; diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c index a5e420b3d4ab..e7b42b0d5145 100644 --- a/net/sched/sch_fq_codel.c +++ b/net/sched/sch_fq_codel.c @@ -59,6 +59,7 @@ struct fq_codel_sched_data { u32 flows_cnt; /* number of flows */ u32 perturbation; /* hash perturbation */ u32 quantum; /* psched_mtu(qdisc_dev(sch)); */ + u32 drop_batch_size; struct codel_params cparams; struct codel_stats cstats; u32 drop_overlimit; @@ -135,17 +136,20 @@ static inline void flow_queue_add(struct fq_codel_flow *flow, skb->next = NULL; } -static unsigned int fq_codel_drop(struct Qdisc *sch) +static unsigned int fq_codel_drop(struct Qdisc *sch, unsigned int max_packets) { struct fq_codel_sched_data *q = qdisc_priv(sch); struct sk_buff *skb; unsigned int maxbacklog = 0, idx = 0, i, len; struct fq_codel_flow *flow; + unsigned int threshold; - /* Queue is full! Find the fat flow and drop packet from it. + /* Queue is full! Find the fat flow and drop packet(s) from it. * This might sound expensive, but with 1024 flows, we scan * 4KB of memory, and we dont need to handle a complex tree * in fast path (packet queue/enqueue) with many cache misses. + * In stress mode, we'll try to drop 64 packets from the flow, + * amortizing this linear lookup to one cache line per drop. */ for (i = 0; i < q->flows_cnt; i++) { if (q->backlogs[i] > maxbacklog) { @@ -153,15 +157,24 @@ static unsigned int fq_codel_drop(struct Qdisc *sch) idx = i; } } + + /* Our goal is to drop half of this fat flow backlog */ + threshold = maxbacklog >> 1; + flow = &q->flows[idx]; - skb = dequeue_head(flow); - len = qdisc_pkt_len(skb); + len = 0; + i = 0; + do { + skb = dequeue_head(flow); + len += qdisc_pkt_len(skb); + kfree_skb(skb); + } while (++i < max_packets && len < threshold); + + flow->dropped += i; q->backlogs[idx] -= len; - sch->q.qlen--; - qdisc_qstats_drop(sch); - qdisc_qstats_backlog_dec(sch, skb); - kfree_skb(skb); - flow->dropped++; + sch->qstats.drops += i; + sch->qstats.backlog -= len; + sch->q.qlen -= i; return idx; } @@ -170,14 +183,14 @@ static unsigned int fq_codel_qdisc_drop(struct Qdisc *sch) unsigned int prev_backlog; prev_backlog = sch->qstats.backlog; - fq_codel_drop(sch); + fq_codel_drop(sch, 1U); return prev_backlog - sch->qstats.backlog; } static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch) { struct fq_codel_sched_data *q = qdisc_priv(sch); - unsigned int idx, prev_backlog; + unsigned int idx, prev_backlog, prev_qlen; struct fq_codel_flow *flow; int uninitialized_var(ret); @@ -206,16 +219,22 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch) return NET_XMIT_SUCCESS; prev_backlog = sch->qstats.backlog; - q->drop_overlimit++; - /* Return Congestion Notification only if we dropped a packet - * from this flow. - */ - if (fq_codel_drop(sch) == idx) - return NET_XMIT_CN; + prev_qlen = sch->q.qlen; - /* As we dropped a packet, better let upper stack know this */ - qdisc_tree_reduce_backlog(sch, 1, prev_backlog - sch->qstats.backlog); - return NET_XMIT_SUCCESS; + /* fq_codel_drop() is quite expensive, as it performs a linear search + * in q->backlogs[] to find a fat flow. + * So instead of dropping a single packet, drop half of its backlog + * with a 64 packets limit to not add a too big cpu spike here. + */ + ret = fq_codel_drop(sch, q->drop_batch_size); + + q->drop_overlimit += prev_qlen - sch->q.qlen; + + /* As we dropped packet(s), better let upper stack know this */ + qdisc_tree_reduce_backlog(sch, prev_qlen - sch->q.qlen, + prev_backlog - sch->qstats.backlog); + + return ret == idx ? NET_XMIT_CN : NET_XMIT_SUCCESS; } /* This is the specific function called from codel_dequeue() @@ -335,6 +354,7 @@ static const struct nla_policy fq_codel_policy[TCA_FQ_CODEL_MAX + 1] = { [TCA_FQ_CODEL_FLOWS] = { .type = NLA_U32 }, [TCA_FQ_CODEL_QUANTUM] = { .type = NLA_U32 }, [TCA_FQ_CODEL_CE_THRESHOLD] = { .type = NLA_U32 }, + [TCA_FQ_CODEL_DROP_BATCH_SIZE] = { .type = NLA_U32 }, }; static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt) @@ -386,6 +406,9 @@ static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt) if (tb[TCA_FQ_CODEL_QUANTUM]) q->quantum = max(256U, nla_get_u32(tb[TCA_FQ_CODEL_QUANTUM])); + if (tb[TCA_FQ_CODEL_DROP_BATCH_SIZE]) + q->drop_batch_size = min(1U, nla_get_u32(tb[TCA_FQ_CODEL_DROP_BATCH_SIZE])); + while (sch->q.qlen > sch->limit) { struct sk_buff *skb = fq_codel_dequeue(sch); @@ -431,6 +454,7 @@ static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt) sch->limit = 10*1024; q->flows_cnt = 1024; + q->drop_batch_size = 64; q->quantum = psched_mtu(qdisc_dev(sch)); q->perturbation = prandom_u32(); INIT_LIST_HEAD(&q->new_flows); @@ -489,6 +513,8 @@ static int fq_codel_dump(struct Qdisc *sch, struct sk_buff *skb) q->cparams.ecn) || nla_put_u32(skb, TCA_FQ_CODEL_QUANTUM, q->quantum) || + nla_put_u32(skb, TCA_FQ_CODEL_DROP_BATCH_SIZE, + q->drop_batch_size) || nla_put_u32(skb, TCA_FQ_CODEL_FLOWS, q->flows_cnt)) goto nla_put_failure; From b555a3d1e91cfa75eb37680c0c86a32d18215729 Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Sun, 1 May 2016 14:36:28 +0200 Subject: [PATCH 1240/1649] drivers: net: xgene: constify xgene_cle_ops structure The xgene_cle_ops structure is never modified, so declare it as const. Done with the help of Coccinelle. Signed-off-by: Julia Lawall Acked-by: Iyappan Subramanian Signed-off-by: David S. Miller --- drivers/net/ethernet/apm/xgene/xgene_enet_cle.c | 2 +- drivers/net/ethernet/apm/xgene/xgene_enet_cle.h | 2 +- drivers/net/ethernet/apm/xgene/xgene_enet_main.h | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_cle.c b/drivers/net/ethernet/apm/xgene/xgene_enet_cle.c index b212488606da..64792880e940 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_cle.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_cle.c @@ -729,6 +729,6 @@ static int xgene_enet_cle_init(struct xgene_enet_pdata *pdata) return xgene_cle_setup_ptree(pdata, enet_cle); } -struct xgene_cle_ops xgene_cle3in_ops = { +const struct xgene_cle_ops xgene_cle3in_ops = { .cle_init = xgene_enet_cle_init, }; diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_cle.h b/drivers/net/ethernet/apm/xgene/xgene_enet_cle.h index 29a17abdd828..13e829ab9053 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_cle.h +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_cle.h @@ -290,6 +290,6 @@ struct xgene_enet_cle { u32 jump_bytes; }; -extern struct xgene_cle_ops xgene_cle3in_ops; +extern const struct xgene_cle_ops xgene_cle3in_ops; #endif /* __XGENE_ENET_CLE_H__ */ diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h index 175d18890c7a..0a2887b96a42 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h @@ -191,7 +191,7 @@ struct xgene_enet_pdata { const struct xgene_mac_ops *mac_ops; const struct xgene_port_ops *port_ops; struct xgene_ring_ops *ring_ops; - struct xgene_cle_ops *cle_ops; + const struct xgene_cle_ops *cle_ops; struct delayed_work link_work; u32 port_id; u8 cpu_bufnum; From 56130915bbe31656c80f7493d28536693f8de0e2 Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Sun, 1 May 2016 14:49:15 +0200 Subject: [PATCH 1241/1649] VSOCK: constify vsock_transport structure The vsock_transport structure is never modified, so declare it as const. Done with the help of Coccinelle. Signed-off-by: Julia Lawall Signed-off-by: David S. Miller --- net/vmw_vsock/vmci_transport.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c index 56214736fe88..4120b7a538be 100644 --- a/net/vmw_vsock/vmci_transport.c +++ b/net/vmw_vsock/vmci_transport.c @@ -2051,7 +2051,7 @@ static u32 vmci_transport_get_local_cid(void) return vmci_get_context_id(); } -static struct vsock_transport vmci_transport = { +static const struct vsock_transport vmci_transport = { .init = vmci_transport_socket_init, .destruct = vmci_transport_destruct, .release = vmci_transport_release, From 0d1bcdc74fb9cefcdc74e9d4579e63bd0fa5dc9a Mon Sep 17 00:00:00 2001 From: Philippe Reynes Date: Sun, 1 May 2016 17:08:08 +0200 Subject: [PATCH 1242/1649] net: ethernet: gianfar: move to new ethtool api {get|set}_link_ksettings The ethtool api {get|set}_settings is deprecated. We move the gianfar driver to new api {get|set}_link_ksettings. Signed-off-by: Philippe Reynes Signed-off-by: David S. Miller --- .../net/ethernet/freescale/gianfar_ethtool.c | 25 ++++++------------- 1 file changed, 8 insertions(+), 17 deletions(-) diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c index 4b0ee855edd7..2c45c80d9b03 100644 --- a/drivers/net/ethernet/freescale/gianfar_ethtool.c +++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c @@ -185,7 +185,8 @@ static void gfar_gdrvinfo(struct net_device *dev, } -static int gfar_ssettings(struct net_device *dev, struct ethtool_cmd *cmd) +static int gfar_set_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) { struct gfar_private *priv = netdev_priv(dev); struct phy_device *phydev = priv->phydev; @@ -193,29 +194,19 @@ static int gfar_ssettings(struct net_device *dev, struct ethtool_cmd *cmd) if (NULL == phydev) return -ENODEV; - return phy_ethtool_sset(phydev, cmd); + return phy_ethtool_ksettings_set(phydev, cmd); } - -/* Return the current settings in the ethtool_cmd structure */ -static int gfar_gsettings(struct net_device *dev, struct ethtool_cmd *cmd) +static int gfar_get_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { struct gfar_private *priv = netdev_priv(dev); struct phy_device *phydev = priv->phydev; - struct gfar_priv_rx_q *rx_queue = NULL; - struct gfar_priv_tx_q *tx_queue = NULL; if (NULL == phydev) return -ENODEV; - tx_queue = priv->tx_queue[0]; - rx_queue = priv->rx_queue[0]; - /* etsec-1.7 and older versions have only one txic - * and rxic regs although they support multiple queues */ - cmd->maxtxpkt = get_icft_value(tx_queue->txic); - cmd->maxrxpkt = get_icft_value(rx_queue->rxic); - - return phy_ethtool_gset(phydev, cmd); + return phy_ethtool_ksettings_get(phydev, cmd); } /* Return the length of the register structure */ @@ -1565,8 +1556,6 @@ static int gfar_get_ts_info(struct net_device *dev, } const struct ethtool_ops gfar_ethtool_ops = { - .get_settings = gfar_gsettings, - .set_settings = gfar_ssettings, .get_drvinfo = gfar_gdrvinfo, .get_regs_len = gfar_reglen, .get_regs = gfar_get_regs, @@ -1589,4 +1578,6 @@ const struct ethtool_ops gfar_ethtool_ops = { .set_rxnfc = gfar_set_nfc, .get_rxnfc = gfar_get_nfc, .get_ts_info = gfar_get_ts_info, + .get_link_ksettings = gfar_get_ksettings, + .set_link_ksettings = gfar_set_ksettings, }; From 5e74bf2d9529e4a272f745d5f3e8f88113a987a0 Mon Sep 17 00:00:00 2001 From: Philippe Reynes Date: Sun, 1 May 2016 17:08:09 +0200 Subject: [PATCH 1243/1649] net: ethernet: ucc: move to new ethtool api {get|set}_link_ksettings The ethtool api {get|set}_settings is deprecated. We move the ucc driver to new api {get|set}_link_ksettings. Signed-off-by: Philippe Reynes Signed-off-by: David S. Miller --- .../net/ethernet/freescale/ucc_geth_ethtool.c | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) diff --git a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c index 89714f5e0dfc..812a968a78e9 100644 --- a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c +++ b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c @@ -105,23 +105,20 @@ static const char rx_fw_stat_gstrings[][ETH_GSTRING_LEN] = { #define UEC_RX_FW_STATS_LEN ARRAY_SIZE(rx_fw_stat_gstrings) static int -uec_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) +uec_get_ksettings(struct net_device *netdev, struct ethtool_link_ksettings *cmd) { struct ucc_geth_private *ugeth = netdev_priv(netdev); struct phy_device *phydev = ugeth->phydev; - struct ucc_geth_info *ug_info = ugeth->ug_info; if (!phydev) return -ENODEV; - ecmd->maxtxpkt = 1; - ecmd->maxrxpkt = ug_info->interruptcoalescingmaxvalue[0]; - - return phy_ethtool_gset(phydev, ecmd); + return phy_ethtool_ksettings_get(phydev, cmd); } static int -uec_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) +uec_set_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd) { struct ucc_geth_private *ugeth = netdev_priv(netdev); struct phy_device *phydev = ugeth->phydev; @@ -129,7 +126,7 @@ uec_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) if (!phydev) return -ENODEV; - return phy_ethtool_sset(phydev, ecmd); + return phy_ethtool_ksettings_set(phydev, cmd); } static void @@ -392,8 +389,6 @@ static int uec_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) #endif /* CONFIG_PM */ static const struct ethtool_ops uec_ethtool_ops = { - .get_settings = uec_get_settings, - .set_settings = uec_set_settings, .get_drvinfo = uec_get_drvinfo, .get_regs_len = uec_get_regs_len, .get_regs = uec_get_regs, @@ -411,6 +406,8 @@ static const struct ethtool_ops uec_ethtool_ops = { .get_wol = uec_get_wol, .set_wol = uec_set_wol, .get_ts_info = ethtool_op_get_ts_info, + .get_link_ksettings = uec_get_ksettings, + .set_link_ksettings = uec_set_ksettings, }; void uec_set_ethtool_ops(struct net_device *netdev) From a10cdae0a6ff400c5724967753ec02ef6291ffad Mon Sep 17 00:00:00 2001 From: Philippe Reynes Date: Sun, 1 May 2016 17:08:10 +0200 Subject: [PATCH 1244/1649] net: ethernet: fs-enet: move to new ethtool api {get|set}_link_ksettings The ethtool api {get|set}_settings is deprecated. We move the fs-enet driver to new api {get|set}_link_ksettings. Signed-off-by: Philippe Reynes Signed-off-by: David S. Miller --- .../ethernet/freescale/fs_enet/fs_enet-main.c | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c index 48a9c176e0d1..da90b5ad6e36 100644 --- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c +++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c @@ -847,24 +847,28 @@ static void fs_get_regs(struct net_device *dev, struct ethtool_regs *regs, regs->version = 0; } -static int fs_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int fs_get_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { struct fs_enet_private *fep = netdev_priv(dev); + struct phy_device *phydev = fep->phydev; if (!fep->phydev) return -ENODEV; - return phy_ethtool_gset(fep->phydev, cmd); + return phy_ethtool_ksettings_get(phydev, cmd); } -static int fs_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int fs_set_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) { struct fs_enet_private *fep = netdev_priv(dev); + struct phy_device *phydev = fep->phydev; if (!fep->phydev) return -ENODEV; - return phy_ethtool_sset(fep->phydev, cmd); + return phy_ethtool_ksettings_set(phydev, cmd); } static int fs_nway_reset(struct net_device *dev) @@ -887,14 +891,14 @@ static void fs_set_msglevel(struct net_device *dev, u32 value) static const struct ethtool_ops fs_ethtool_ops = { .get_drvinfo = fs_get_drvinfo, .get_regs_len = fs_get_regs_len, - .get_settings = fs_get_settings, - .set_settings = fs_set_settings, .nway_reset = fs_nway_reset, .get_link = ethtool_op_get_link, .get_msglevel = fs_get_msglevel, .set_msglevel = fs_set_msglevel, .get_regs = fs_get_regs, .get_ts_info = ethtool_op_get_ts_info, + .get_link_ksettings = fs_get_ksettings, + .set_link_ksettings = fs_set_ksettings, }; static int fs_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) From e03179fe78d5b39dbf3e8b0b50f7c406514b15c7 Mon Sep 17 00:00:00 2001 From: Philippe Reynes Date: Sun, 1 May 2016 17:08:11 +0200 Subject: [PATCH 1245/1649] net: ethernet: fec_mpc52xx: move to new ethtool api {get|set}_link_ksettings The ethtool api {get|set}_settings is deprecated. We move the fec_mpc52xx driver to new api {get|set}_link_ksettings. Signed-off-by: Philippe Reynes Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/fec_mpc52xx.c | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c index 25553ee857b4..f44471485d00 100644 --- a/drivers/net/ethernet/freescale/fec_mpc52xx.c +++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c @@ -763,24 +763,28 @@ static void mpc52xx_fec_reset(struct net_device *dev) /* ethtool interface */ -static int mpc52xx_fec_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int mpc52xx_fec_get_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { struct mpc52xx_fec_priv *priv = netdev_priv(dev); + struct phy_device *phydev = priv->phydev; if (!priv->phydev) return -ENODEV; - return phy_ethtool_gset(priv->phydev, cmd); + return phy_ethtool_ksettings_get(phydev, cmd); } -static int mpc52xx_fec_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int mpc52xx_fec_set_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) { struct mpc52xx_fec_priv *priv = netdev_priv(dev); + struct phy_device *phydev = priv->phydev; if (!priv->phydev) return -ENODEV; - return phy_ethtool_sset(priv->phydev, cmd); + return phy_ethtool_ksettings_set(phydev, cmd); } static u32 mpc52xx_fec_get_msglevel(struct net_device *dev) @@ -796,12 +800,12 @@ static void mpc52xx_fec_set_msglevel(struct net_device *dev, u32 level) } static const struct ethtool_ops mpc52xx_fec_ethtool_ops = { - .get_settings = mpc52xx_fec_get_settings, - .set_settings = mpc52xx_fec_set_settings, .get_link = ethtool_op_get_link, .get_msglevel = mpc52xx_fec_get_msglevel, .set_msglevel = mpc52xx_fec_set_msglevel, .get_ts_info = ethtool_op_get_ts_info, + .get_link_ksettings = mpc52xx_fec_get_ksettings, + .set_link_ksettings = mpc52xx_fec_set_ksettings, }; From 1584f41f66ff32db40be3814f16ab305128b7194 Mon Sep 17 00:00:00 2001 From: Sven Eckelmann Date: Fri, 1 Apr 2016 15:17:35 +0200 Subject: [PATCH 1246/1649] MAINTAINERS: Mark BATMAN ADVANCED mailing list as moderated The mailing list of b.a.t.m.a.n@lists.open-mesh.org is moderated for non-subscribers and non-whitelisted addresses. Such mails will be delayed but the sender will not be informed about the moderation. Signed-off-by: Sven Eckelmann Signed-off-by: Antonio Quartulli --- MAINTAINERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/MAINTAINERS b/MAINTAINERS index ab008013cfec..22688419873f 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2203,7 +2203,7 @@ BATMAN ADVANCED M: Marek Lindner M: Simon Wunderlich M: Antonio Quartulli -L: b.a.t.m.a.n@lists.open-mesh.org +L: b.a.t.m.a.n@lists.open-mesh.org (moderated for non-subscribers) W: https://www.open-mesh.org/ Q: https://patchwork.open-mesh.org/project/batman/list/ S: Maintained From 286ddfb03dc04f0afa254b51c30d5a7392dc920f Mon Sep 17 00:00:00 2001 From: Sven Eckelmann Date: Fri, 1 Apr 2016 15:17:36 +0200 Subject: [PATCH 1247/1649] MAINTAINERS: Add BATMAN ADVANCED documentation files The sysfs ABI documentation files and the batman-adv.txt are maintained by the BATMAN ADVANCED maintainers and patches for them should therefore be sent to them. Signed-off-by: Sven Eckelmann Signed-off-by: Antonio Quartulli --- MAINTAINERS | 3 +++ 1 file changed, 3 insertions(+) diff --git a/MAINTAINERS b/MAINTAINERS index 22688419873f..8f32094f6922 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2207,6 +2207,9 @@ L: b.a.t.m.a.n@lists.open-mesh.org (moderated for non-subscribers) W: https://www.open-mesh.org/ Q: https://patchwork.open-mesh.org/project/batman/list/ S: Maintained +F: Documentation/ABI/testing/sysfs-class-net-batman-adv +F: Documentation/ABI/testing/sysfs-class-net-mesh +F: Documentation/networking/batman-adv.txt F: net/batman-adv/ BAYCOM/HDLCDRV DRIVERS FOR AX.25 From 565489df244626c7c1a07a5e093d7f2b20b550cc Mon Sep 17 00:00:00 2001 From: Simon Wunderlich Date: Fri, 15 Apr 2016 12:14:39 +0200 Subject: [PATCH 1248/1649] batman-adv: Start new development cycle Signed-off-by: Simon Wunderlich Signed-off-by: Antonio Quartulli --- net/batman-adv/main.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h index db4533631834..38e5587675cc 100644 --- a/net/batman-adv/main.h +++ b/net/batman-adv/main.h @@ -24,7 +24,7 @@ #define BATADV_DRIVER_DEVICE "batman-adv" #ifndef BATADV_SOURCE_VERSION -#define BATADV_SOURCE_VERSION "2016.1" +#define BATADV_SOURCE_VERSION "2016.2" #endif /* B.A.T.M.A.N. parameters */ From 925a6f379036c58cd62a1492f83263ebe021a49d Mon Sep 17 00:00:00 2001 From: Antonio Quartulli Date: Sat, 12 Mar 2016 10:30:18 +0100 Subject: [PATCH 1249/1649] batman-adv: use static string for table headers Use a static string when showing table headers rather then a nonsense parametric one with fixed arguments. It is easier to grep and it does not need to be recomputed at runtime each time. Reported-by: Joe Perches Signed-off-by: Antonio Quartulli [sven@narfation.org: fix conflicts with current version] Signed-off-by: Sven Eckelmann Signed-off-by: Marek Lindner --- net/batman-adv/bat_iv_ogm.c | 8 +++----- net/batman-adv/bat_v.c | 9 ++++----- net/batman-adv/bridge_loop_avoidance.c | 7 +++---- net/batman-adv/distributed-arp-table.c | 4 ++-- net/batman-adv/translation-table.c | 9 ++++----- 5 files changed, 16 insertions(+), 21 deletions(-) diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c index cb2d1b9b0340..682fcaec56e6 100644 --- a/net/batman-adv/bat_iv_ogm.c +++ b/net/batman-adv/bat_iv_ogm.c @@ -1829,9 +1829,8 @@ static void batadv_iv_ogm_orig_print(struct batadv_priv *bat_priv, int batman_count = 0; u32 i; - seq_printf(seq, " %-15s %s (%s/%i) %17s [%10s]: %20s ...\n", - "Originator", "last-seen", "#", BATADV_TQ_MAX_VALUE, - "Nexthop", "outgoingIF", "Potential nexthops"); + seq_puts(seq, + " Originator last-seen (#/255) Nexthop [outgoingIF]: Potential nexthops ...\n"); for (i = 0; i < hash->size; i++) { head = &hash->table[i]; @@ -1911,8 +1910,7 @@ static void batadv_iv_neigh_print(struct batadv_priv *bat_priv, struct batadv_hard_iface *hard_iface; int batman_count = 0; - seq_printf(seq, " %10s %-13s %s\n", - "IF", "Neighbor", "last-seen"); + seq_puts(seq, " IF Neighbor last-seen\n"); rcu_read_lock(); list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) { diff --git a/net/batman-adv/bat_v.c b/net/batman-adv/bat_v.c index 3315b9a598af..246f9e959849 100644 --- a/net/batman-adv/bat_v.c +++ b/net/batman-adv/bat_v.c @@ -151,8 +151,8 @@ static void batadv_v_neigh_print(struct batadv_priv *bat_priv, struct batadv_hard_iface *hard_iface; int batman_count = 0; - seq_printf(seq, " %-15s %s (%11s) [%10s]\n", "Neighbor", - "last-seen", "throughput", "IF"); + seq_puts(seq, + " Neighbor last-seen ( throughput) [ IF]\n"); rcu_read_lock(); list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) { @@ -191,9 +191,8 @@ static void batadv_v_orig_print(struct batadv_priv *bat_priv, int batman_count = 0; u32 i; - seq_printf(seq, " %-15s %s (%11s) %17s [%10s]: %20s ...\n", - "Originator", "last-seen", "throughput", "Nexthop", - "outgoingIF", "Potential nexthops"); + seq_puts(seq, + " Originator last-seen ( throughput) Nexthop [outgoingIF]: Potential nexthops ...\n"); for (i = 0; i < hash->size; i++) { head = &hash->table[i]; diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c index 0a6c8b824a00..56bc971e404b 100644 --- a/net/batman-adv/bridge_loop_avoidance.c +++ b/net/batman-adv/bridge_loop_avoidance.c @@ -1815,8 +1815,8 @@ int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset) "Claims announced for the mesh %s (orig %pM, group id %#.4x)\n", net_dev->name, primary_addr, ntohs(bat_priv->bla.claim_dest.group)); - seq_printf(seq, " %-17s %-5s %-17s [o] (%-6s)\n", - "Client", "VID", "Originator", "CRC"); + seq_puts(seq, + " Client VID Originator [o] (CRC )\n"); for (i = 0; i < hash->size; i++) { head = &hash->table[i]; @@ -1873,8 +1873,7 @@ int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq, void *offset) "Backbones announced for the mesh %s (orig %pM, group id %#.4x)\n", net_dev->name, primary_addr, ntohs(bat_priv->bla.claim_dest.group)); - seq_printf(seq, " %-17s %-5s %-9s (%-6s)\n", - "Originator", "VID", "last seen", "CRC"); + seq_puts(seq, " Originator VID last seen (CRC )\n"); for (i = 0; i < hash->size; i++) { head = &hash->table[i]; diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c index e96d7c745b4a..ce574e9cef3b 100644 --- a/net/batman-adv/distributed-arp-table.c +++ b/net/batman-adv/distributed-arp-table.c @@ -814,8 +814,8 @@ int batadv_dat_cache_seq_print_text(struct seq_file *seq, void *offset) goto out; seq_printf(seq, "Distributed ARP Table (%s):\n", net_dev->name); - seq_printf(seq, " %-7s %-9s %4s %11s\n", "IPv4", - "MAC", "VID", "last-seen"); + seq_puts(seq, + " IPv4 MAC VID last-seen\n"); for (i = 0; i < hash->size; i++) { head = &hash->table[i]; diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c index 0b43e86328a5..29fd62839fac 100644 --- a/net/batman-adv/translation-table.c +++ b/net/batman-adv/translation-table.c @@ -1008,8 +1008,8 @@ int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset) seq_printf(seq, "Locally retrieved addresses (from %s) announced via TT (TTVN: %u):\n", net_dev->name, (u8)atomic_read(&bat_priv->tt.vn)); - seq_printf(seq, " %-13s %s %-8s %-9s (%-10s)\n", "Client", "VID", - "Flags", "Last seen", "CRC"); + seq_puts(seq, + " Client VID Flags Last seen (CRC )\n"); for (i = 0; i < hash->size; i++) { head = &hash->table[i]; @@ -1706,9 +1706,8 @@ int batadv_tt_global_seq_print_text(struct seq_file *seq, void *offset) seq_printf(seq, "Globally announced TT entries received via the mesh %s\n", net_dev->name); - seq_printf(seq, " %-13s %s %s %-15s %s (%-10s) %s\n", - "Client", "VID", "(TTVN)", "Originator", "(Curr TTVN)", - "CRC", "Flags"); + seq_puts(seq, + " Client VID (TTVN) Originator (Curr TTVN) (CRC ) Flags\n"); for (i = 0; i < hash->size; i++) { head = &hash->table[i]; From fb1f23eab6a9cd7d94d47b66f56df77b370a3954 Mon Sep 17 00:00:00 2001 From: Geliang Tang Date: Fri, 18 Dec 2015 23:33:31 +0800 Subject: [PATCH 1250/1649] batman-adv: use list_for_each_entry_safe Use list_for_each_entry_safe() instead of list_for_each_safe() to simplify the code. Signed-off-by: Geliang Tang Acked-by: Antonio Quartulli Reviewed-by: Sven Eckelmann Signed-off-by: Marek Lindner Signed-off-by: Antonio Quartulli --- net/batman-adv/icmp_socket.c | 22 +++++++++------------- 1 file changed, 9 insertions(+), 13 deletions(-) diff --git a/net/batman-adv/icmp_socket.c b/net/batman-adv/icmp_socket.c index 14d0013b387e..8a5889d134bc 100644 --- a/net/batman-adv/icmp_socket.c +++ b/net/batman-adv/icmp_socket.c @@ -104,25 +104,21 @@ static int batadv_socket_open(struct inode *inode, struct file *file) static int batadv_socket_release(struct inode *inode, struct file *file) { - struct batadv_socket_client *socket_client = file->private_data; - struct batadv_socket_packet *socket_packet; - struct list_head *list_pos, *list_pos_tmp; + struct batadv_socket_client *client = file->private_data; + struct batadv_socket_packet *packet, *tmp; - spin_lock_bh(&socket_client->lock); + spin_lock_bh(&client->lock); /* for all packets in the queue ... */ - list_for_each_safe(list_pos, list_pos_tmp, &socket_client->queue_list) { - socket_packet = list_entry(list_pos, - struct batadv_socket_packet, list); - - list_del(list_pos); - kfree(socket_packet); + list_for_each_entry_safe(packet, tmp, &client->queue_list, list) { + list_del(&packet->list); + kfree(packet); } - batadv_socket_client_hash[socket_client->index] = NULL; - spin_unlock_bh(&socket_client->lock); + batadv_socket_client_hash[client->index] = NULL; + spin_unlock_bh(&client->lock); - kfree(socket_client); + kfree(client); module_put(THIS_MODULE); return 0; From 4ba4bc0f74d32b201cecc33e153a4a18d5c5db1d Mon Sep 17 00:00:00 2001 From: Geliang Tang Date: Mon, 28 Dec 2015 23:43:37 +0800 Subject: [PATCH 1251/1649] batman-adv: use to_delayed_work Use to_delayed_work() instead of open-coding it. Signed-off-by: Geliang Tang Reviewed-by: Sven Eckelmann Signed-off-by: Marek Lindner Signed-off-by: Antonio Quartulli --- net/batman-adv/bridge_loop_avoidance.c | 2 +- net/batman-adv/distributed-arp-table.c | 2 +- net/batman-adv/network-coding.c | 2 +- net/batman-adv/originator.c | 2 +- net/batman-adv/send.c | 4 ++-- net/batman-adv/translation-table.c | 2 +- 6 files changed, 7 insertions(+), 7 deletions(-) diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c index 56bc971e404b..cad8cb3a88f2 100644 --- a/net/batman-adv/bridge_loop_avoidance.c +++ b/net/batman-adv/bridge_loop_avoidance.c @@ -1303,7 +1303,7 @@ static void batadv_bla_periodic_work(struct work_struct *work) struct batadv_hard_iface *primary_if; int i; - delayed_work = container_of(work, struct delayed_work, work); + delayed_work = to_delayed_work(work); priv_bla = container_of(delayed_work, struct batadv_priv_bla, work); bat_priv = container_of(priv_bla, struct batadv_priv, bla); primary_if = batadv_primary_if_get_selected(bat_priv); diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c index ce574e9cef3b..33f273e5354b 100644 --- a/net/batman-adv/distributed-arp-table.c +++ b/net/batman-adv/distributed-arp-table.c @@ -152,7 +152,7 @@ static void batadv_dat_purge(struct work_struct *work) struct batadv_priv_dat *priv_dat; struct batadv_priv *bat_priv; - delayed_work = container_of(work, struct delayed_work, work); + delayed_work = to_delayed_work(work); priv_dat = container_of(delayed_work, struct batadv_priv_dat, work); bat_priv = container_of(priv_dat, struct batadv_priv, dat); diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c index b41719b6487a..0d3bf4368e9b 100644 --- a/net/batman-adv/network-coding.c +++ b/net/batman-adv/network-coding.c @@ -714,7 +714,7 @@ static void batadv_nc_worker(struct work_struct *work) struct batadv_priv *bat_priv; unsigned long timeout; - delayed_work = container_of(work, struct delayed_work, work); + delayed_work = to_delayed_work(work); priv_nc = container_of(delayed_work, struct batadv_priv_nc, work); bat_priv = container_of(priv_nc, struct batadv_priv, nc); diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c index e4cbb0753e37..5b802f0dc24b 100644 --- a/net/batman-adv/originator.c +++ b/net/batman-adv/originator.c @@ -1222,7 +1222,7 @@ static void batadv_purge_orig(struct work_struct *work) struct delayed_work *delayed_work; struct batadv_priv *bat_priv; - delayed_work = container_of(work, struct delayed_work, work); + delayed_work = to_delayed_work(work); bat_priv = container_of(delayed_work, struct batadv_priv, orig_work); _batadv_purge_orig(bat_priv); queue_delayed_work(batadv_event_workqueue, diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c index 3ce06e0a91b1..20076b4c5e1d 100644 --- a/net/batman-adv/send.c +++ b/net/batman-adv/send.c @@ -552,7 +552,7 @@ static void batadv_send_outstanding_bcast_packet(struct work_struct *work) struct net_device *soft_iface; struct batadv_priv *bat_priv; - delayed_work = container_of(work, struct delayed_work, work); + delayed_work = to_delayed_work(work); forw_packet = container_of(delayed_work, struct batadv_forw_packet, delayed_work); soft_iface = forw_packet->if_incoming->soft_iface; @@ -604,7 +604,7 @@ void batadv_send_outstanding_bat_ogm_packet(struct work_struct *work) struct batadv_forw_packet *forw_packet; struct batadv_priv *bat_priv; - delayed_work = container_of(work, struct delayed_work, work); + delayed_work = to_delayed_work(work); forw_packet = container_of(delayed_work, struct batadv_forw_packet, delayed_work); bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface); diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c index 29fd62839fac..d44ce84626c5 100644 --- a/net/batman-adv/translation-table.c +++ b/net/batman-adv/translation-table.c @@ -3226,7 +3226,7 @@ static void batadv_tt_purge(struct work_struct *work) struct batadv_priv_tt *priv_tt; struct batadv_priv *bat_priv; - delayed_work = container_of(work, struct delayed_work, work); + delayed_work = to_delayed_work(work); priv_tt = container_of(delayed_work, struct batadv_priv_tt, work); bat_priv = container_of(priv_tt, struct batadv_priv, tt); From 6d030de89f1beb85ce8e6b71f4fbfef8cabe37cf Mon Sep 17 00:00:00 2001 From: Antonio Quartulli Date: Fri, 11 Mar 2016 16:36:19 +0100 Subject: [PATCH 1252/1649] batman-adv: fix wrong names in kerneldoc Signed-off-by: Antonio Quartulli [sven@narfation.org: Fix additional names] Signed-off-by: Sven Eckelmann Signed-off-by: Marek Lindner --- net/batman-adv/bridge_loop_avoidance.c | 2 +- net/batman-adv/distributed-arp-table.c | 2 +- net/batman-adv/icmp_socket.c | 2 +- net/batman-adv/main.h | 3 ++- net/batman-adv/multicast.c | 11 ++++++----- net/batman-adv/originator.c | 2 +- net/batman-adv/packet.h | 2 +- net/batman-adv/soft-interface.c | 2 +- 8 files changed, 14 insertions(+), 12 deletions(-) diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c index cad8cb3a88f2..20b2fd9b3d72 100644 --- a/net/batman-adv/bridge_loop_avoidance.c +++ b/net/batman-adv/bridge_loop_avoidance.c @@ -1575,7 +1575,7 @@ int batadv_bla_is_backbone_gw(struct sk_buff *skb, } /** - * batadv_bla_init - free all bla structures + * batadv_bla_free - free all bla structures * @bat_priv: the bat priv with all the soft interface information * * for softinterface free or module unload diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c index 33f273e5354b..f0548b4f66f4 100644 --- a/net/batman-adv/distributed-arp-table.c +++ b/net/batman-adv/distributed-arp-table.c @@ -717,7 +717,7 @@ void batadv_dat_status_update(struct net_device *net_dev) } /** - * batadv_gw_tvlv_ogm_handler_v1 - process incoming dat tvlv container + * batadv_dat_tvlv_ogm_handler_v1 - process incoming dat tvlv container * @bat_priv: the bat priv with all the soft interface information * @orig: the orig_node of the ogm * @flags: flags indicating the tvlv state (see batadv_tvlv_handler_flags) diff --git a/net/batman-adv/icmp_socket.c b/net/batman-adv/icmp_socket.c index 8a5889d134bc..777aea10cd8f 100644 --- a/net/batman-adv/icmp_socket.c +++ b/net/batman-adv/icmp_socket.c @@ -333,7 +333,7 @@ err: } /** - * batadv_socket_receive_packet - schedule an icmp packet to be sent to + * batadv_socket_add_packet - schedule an icmp packet to be sent to * userspace on an icmp socket. * @socket_client: the socket this packet belongs to * @icmph: pointer to the header of the icmp packet diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h index 38e5587675cc..07a6042d0ad6 100644 --- a/net/batman-adv/main.h +++ b/net/batman-adv/main.h @@ -296,7 +296,8 @@ static inline bool batadv_compare_eth(const void *data1, const void *data2) } /** - * has_timed_out - compares current time (jiffies) and timestamp + timeout + * batadv_has_timed_out - compares current time (jiffies) and timestamp + + * timeout * @timestamp: base value to compare with (in jiffies) * @timeout: added to base value before comparing (in milliseconds) * diff --git a/net/batman-adv/multicast.c b/net/batman-adv/multicast.c index 8caa2c72efa3..c32f24fafe67 100644 --- a/net/batman-adv/multicast.c +++ b/net/batman-adv/multicast.c @@ -394,7 +394,8 @@ static int batadv_mcast_forw_mode_check(struct batadv_priv *bat_priv, } /** - * batadv_mcast_want_all_ip_count - count nodes with unspecific mcast interest + * batadv_mcast_forw_want_all_ip_count - count nodes with unspecific mcast + * interest * @bat_priv: the bat priv with all the soft interface information * @ethhdr: ethernet header of a packet * @@ -433,7 +434,7 @@ batadv_mcast_forw_tt_node_get(struct batadv_priv *bat_priv, } /** - * batadv_mcast_want_forw_ipv4_node_get - get a node with an ipv4 flag + * batadv_mcast_forw_ipv4_node_get - get a node with an ipv4 flag * @bat_priv: the bat priv with all the soft interface information * * Return: an orig_node which has the BATADV_MCAST_WANT_ALL_IPV4 flag set and @@ -460,7 +461,7 @@ batadv_mcast_forw_ipv4_node_get(struct batadv_priv *bat_priv) } /** - * batadv_mcast_want_forw_ipv6_node_get - get a node with an ipv6 flag + * batadv_mcast_forw_ipv6_node_get - get a node with an ipv6 flag * @bat_priv: the bat priv with all the soft interface information * * Return: an orig_node which has the BATADV_MCAST_WANT_ALL_IPV6 flag set @@ -487,7 +488,7 @@ batadv_mcast_forw_ipv6_node_get(struct batadv_priv *bat_priv) } /** - * batadv_mcast_want_forw_ip_node_get - get a node with an ipv4/ipv6 flag + * batadv_mcast_forw_ip_node_get - get a node with an ipv4/ipv6 flag * @bat_priv: the bat priv with all the soft interface information * @ethhdr: an ethernet header to determine the protocol family from * @@ -511,7 +512,7 @@ batadv_mcast_forw_ip_node_get(struct batadv_priv *bat_priv, } /** - * batadv_mcast_want_forw_unsnoop_node_get - get a node with an unsnoopable flag + * batadv_mcast_forw_unsnoop_node_get - get a node with an unsnoopable flag * @bat_priv: the bat priv with all the soft interface information * * Return: an orig_node which has the BATADV_MCAST_WANT_ALL_UNSNOOPABLES flag diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c index 5b802f0dc24b..155c1dd36c17 100644 --- a/net/batman-adv/originator.c +++ b/net/batman-adv/originator.c @@ -289,7 +289,7 @@ void batadv_neigh_node_put(struct batadv_neigh_node *neigh_node) } /** - * batadv_orig_node_get_router - router to the originator depending on iface + * batadv_orig_router_get - router to the originator depending on iface * @orig_node: the orig node for the router * @if_outgoing: the interface where the payload packet has been received or * the OGM should be sent to diff --git a/net/batman-adv/packet.h b/net/batman-adv/packet.h index 8a8d7ca1a5cf..0796dfdfbb60 100644 --- a/net/batman-adv/packet.h +++ b/net/batman-adv/packet.h @@ -501,7 +501,7 @@ struct batadv_coded_packet { #pragma pack() /** - * struct batadv_unicast_tvlv - generic unicast packet with tvlv payload + * struct batadv_unicast_tvlv_packet - generic unicast packet with tvlv payload * @packet_type: batman-adv packet type, part of the general header * @version: batman-adv protocol version, part of the genereal header * @ttl: time to live for this packet, part of the genereal header diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c index 0710379491bf..e158235ada06 100644 --- a/net/batman-adv/soft-interface.c +++ b/net/batman-adv/soft-interface.c @@ -539,7 +539,7 @@ struct batadv_softif_vlan *batadv_softif_vlan_get(struct batadv_priv *bat_priv, } /** - * batadv_create_vlan - allocate the needed resources for a new vlan + * batadv_softif_create_vlan - allocate the needed resources for a new vlan * @bat_priv: the bat priv with all the soft interface information * @vid: the VLAN identifier * From d3abce780dbb731ddb918bf3ba8bb60681f19e42 Mon Sep 17 00:00:00 2001 From: Sven Eckelmann Date: Wed, 9 Mar 2016 22:22:51 +0100 Subject: [PATCH 1253/1649] batman-adv: Fix checkpatch warning about 'unsigned' type checkpatch.pl warns about the use of 'unsigned' as a short form for 'unsigned int'. Signed-off-by: Sven Eckelmann Signed-off-by: Marek Lindner Signed-off-by: Antonio Quartulli --- net/batman-adv/fragmentation.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c index e6956d0746a2..65536db1bff7 100644 --- a/net/batman-adv/fragmentation.c +++ b/net/batman-adv/fragmentation.c @@ -407,8 +407,8 @@ static struct sk_buff *batadv_frag_create(struct sk_buff *skb, unsigned int mtu) { struct sk_buff *skb_fragment; - unsigned header_size = sizeof(*frag_head); - unsigned fragment_size = mtu - header_size; + unsigned int header_size = sizeof(*frag_head); + unsigned int fragment_size = mtu - header_size; skb_fragment = netdev_alloc_skb(NULL, mtu + ETH_HLEN); if (!skb_fragment) @@ -444,15 +444,15 @@ bool batadv_frag_send_packet(struct sk_buff *skb, struct batadv_hard_iface *primary_if = NULL; struct batadv_frag_packet frag_header; struct sk_buff *skb_fragment; - unsigned mtu = neigh_node->if_incoming->net_dev->mtu; - unsigned header_size = sizeof(frag_header); - unsigned max_fragment_size, max_packet_size; + unsigned int mtu = neigh_node->if_incoming->net_dev->mtu; + unsigned int header_size = sizeof(frag_header); + unsigned int max_fragment_size, max_packet_size; bool ret = false; /* To avoid merge and refragmentation at next-hops we never send * fragments larger than BATADV_FRAG_MAX_FRAG_SIZE */ - mtu = min_t(unsigned, mtu, BATADV_FRAG_MAX_FRAG_SIZE); + mtu = min_t(unsigned int, mtu, BATADV_FRAG_MAX_FRAG_SIZE); max_fragment_size = mtu - header_size; max_packet_size = max_fragment_size * BATADV_FRAG_MAX_FRAGMENTS; From 98a5b1d88c2b03be3ce8c0c0034523779dcc384f Mon Sep 17 00:00:00 2001 From: Sven Eckelmann Date: Mon, 22 Feb 2016 18:55:32 +0100 Subject: [PATCH 1254/1649] batman-adv: Fix kerneldoc for batadv_compare_claim Signed-off-by: Sven Eckelmann Signed-off-by: Marek Lindner Signed-off-by: Antonio Quartulli --- net/batman-adv/bridge_loop_avoidance.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c index 20b2fd9b3d72..60d33232bd10 100644 --- a/net/batman-adv/bridge_loop_avoidance.c +++ b/net/batman-adv/bridge_loop_avoidance.c @@ -120,7 +120,7 @@ static int batadv_compare_backbone_gw(const struct hlist_node *node, } /** - * batadv_compare_backbone_gw - compare address and vid of two claims + * batadv_compare_claim - compare address and vid of two claims * @node: list node of the first entry to compare * @data2: pointer to the second claims * From f298cb94d6cb9c103c377a370d78dc51689819f6 Mon Sep 17 00:00:00 2001 From: Sven Eckelmann Date: Sun, 28 Feb 2016 11:38:50 +0100 Subject: [PATCH 1255/1649] batman-adv: Add kernel-doc for batadv_interface_rx Signed-off-by: Sven Eckelmann Signed-off-by: Marek Lindner Signed-off-by: Antonio Quartulli --- net/batman-adv/soft-interface.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c index e158235ada06..d78c560852d7 100644 --- a/net/batman-adv/soft-interface.c +++ b/net/batman-adv/soft-interface.c @@ -381,6 +381,24 @@ end: return NETDEV_TX_OK; } +/** + * batadv_interface_rx - receive ethernet frame on local batman-adv interface + * @soft_iface: local interface which will receive the ethernet frame + * @skb: ethernet frame for @soft_iface + * @recv_if: interface on which the batman-adv packet was received + * @hdr_size: size of already parsed batman-adv header + * @orig_node: originator from which the batman-adv packet was sent + * + * Sends a ethernet frame to the receive path of the local @soft_iface. + * skb->data has still point to the batman-adv header with the size @hdr_size. + * The caller has to have parsed this header already and made sure that at least + * @hdr_size bytes are still available for pull in @skb. + * + * The packet may still get dropped. This can happen when the encapsulated + * ethernet frame is invalid or contains again an batman-adv packet. Also + * unicast packets will be dropped directly when it was sent between two + * isolated clients. + */ void batadv_interface_rx(struct net_device *soft_iface, struct sk_buff *skb, struct batadv_hard_iface *recv_if, int hdr_size, struct batadv_orig_node *orig_node) From 6fc77a548679c6736909da0636da0cf4d759ed64 Mon Sep 17 00:00:00 2001 From: Sven Eckelmann Date: Sat, 5 Mar 2016 15:56:01 +0100 Subject: [PATCH 1256/1649] batman-adv: Fix function names on new line starting with '*' Some really long function names in batman-adv require a newline between return type and the function name. This has lead to some lines starting with *batadv_... This * belongs to the return type and thus should be on the same line as the return type. Signed-off-by: Sven Eckelmann Signed-off-by: Marek Lindner Signed-off-by: Antonio Quartulli --- net/batman-adv/bridge_loop_avoidance.c | 6 +++--- net/batman-adv/main.c | 8 ++++---- net/batman-adv/network-coding.c | 18 +++++++++--------- 3 files changed, 16 insertions(+), 16 deletions(-) diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c index 60d33232bd10..2c9aa671a49b 100644 --- a/net/batman-adv/bridge_loop_avoidance.c +++ b/net/batman-adv/bridge_loop_avoidance.c @@ -200,9 +200,9 @@ static void batadv_claim_put(struct batadv_bla_claim *claim) * * Return: claim if found or NULL otherwise. */ -static struct batadv_bla_claim -*batadv_claim_hash_find(struct batadv_priv *bat_priv, - struct batadv_bla_claim *data) +static struct batadv_bla_claim * +batadv_claim_hash_find(struct batadv_priv *bat_priv, + struct batadv_bla_claim *data) { struct batadv_hashtable *hash = bat_priv->bla.claim_hash; struct hlist_head *head; diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c index d64ddb961979..78c05a91ae6f 100644 --- a/net/batman-adv/main.c +++ b/net/batman-adv/main.c @@ -663,8 +663,8 @@ static void batadv_tvlv_handler_put(struct batadv_tvlv_handler *tvlv_handler) * * Return: tvlv handler if found or NULL otherwise. */ -static struct batadv_tvlv_handler -*batadv_tvlv_handler_get(struct batadv_priv *bat_priv, u8 type, u8 version) +static struct batadv_tvlv_handler * +batadv_tvlv_handler_get(struct batadv_priv *bat_priv, u8 type, u8 version) { struct batadv_tvlv_handler *tvlv_handler_tmp, *tvlv_handler = NULL; @@ -722,8 +722,8 @@ static void batadv_tvlv_container_put(struct batadv_tvlv_container *tvlv) * * Return: tvlv container if found or NULL otherwise. */ -static struct batadv_tvlv_container -*batadv_tvlv_container_get(struct batadv_priv *bat_priv, u8 type, u8 version) +static struct batadv_tvlv_container * +batadv_tvlv_container_get(struct batadv_priv *bat_priv, u8 type, u8 version) { struct batadv_tvlv_container *tvlv_tmp, *tvlv = NULL; diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c index 0d3bf4368e9b..1da8e0e1b18f 100644 --- a/net/batman-adv/network-coding.c +++ b/net/batman-adv/network-coding.c @@ -793,10 +793,10 @@ static bool batadv_can_nc_with_orig(struct batadv_priv *bat_priv, * * Return: the nc_node if found, NULL otherwise. */ -static struct batadv_nc_node -*batadv_nc_find_nc_node(struct batadv_orig_node *orig_node, - struct batadv_orig_node *orig_neigh_node, - bool in_coding) +static struct batadv_nc_node * +batadv_nc_find_nc_node(struct batadv_orig_node *orig_node, + struct batadv_orig_node *orig_neigh_node, + bool in_coding) { struct batadv_nc_node *nc_node, *nc_node_out = NULL; struct list_head *list; @@ -835,11 +835,11 @@ static struct batadv_nc_node * * Return: the nc_node if found or created, NULL in case of an error. */ -static struct batadv_nc_node -*batadv_nc_get_nc_node(struct batadv_priv *bat_priv, - struct batadv_orig_node *orig_node, - struct batadv_orig_node *orig_neigh_node, - bool in_coding) +static struct batadv_nc_node * +batadv_nc_get_nc_node(struct batadv_priv *bat_priv, + struct batadv_orig_node *orig_node, + struct batadv_orig_node *orig_neigh_node, + bool in_coding) { struct batadv_nc_node *nc_node; spinlock_t *lock; /* Used to lock list selected by "int in_coding" */ From 121bdca0d476c1018fa4b3c06ac008b7979acbd3 Mon Sep 17 00:00:00 2001 From: Simon Wunderlich Date: Fri, 11 Mar 2016 14:01:11 +0100 Subject: [PATCH 1257/1649] batman-adv: fix debuginfo macro style issue Structure initialization within the macros should follow the general coding style used in the kernel: put the initialization of the first variable and the closing brace on a separate line. Reported-by: Antonio Quartulli Signed-off-by: Simon Wunderlich [sven@narfation.org: fix conflicts with current version] Signed-off-by: Sven Eckelmann Signed-off-by: Marek Lindner Signed-off-by: Antonio Quartulli --- net/batman-adv/debugfs.c | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/net/batman-adv/debugfs.c b/net/batman-adv/debugfs.c index 48253cf8341b..aa315da83429 100644 --- a/net/batman-adv/debugfs.c +++ b/net/batman-adv/debugfs.c @@ -365,14 +365,17 @@ static int batadv_nc_nodes_open(struct inode *inode, struct file *file) #define BATADV_DEBUGINFO(_name, _mode, _open) \ struct batadv_debuginfo batadv_debuginfo_##_name = { \ - .attr = { .name = __stringify(_name), \ - .mode = _mode, }, \ - .fops = { .owner = THIS_MODULE, \ - .open = _open, \ - .read = seq_read, \ - .llseek = seq_lseek, \ - .release = single_release, \ - } \ + .attr = { \ + .name = __stringify(_name), \ + .mode = _mode, \ + }, \ + .fops = { \ + .owner = THIS_MODULE, \ + .open = _open, \ + .read = seq_read, \ + .llseek = seq_lseek, \ + .release = single_release, \ + }, \ } /* the following attributes are general and therefore they will be directly From efcc9d3069c982786f380bca8480a1aa2fd2721d Mon Sep 17 00:00:00 2001 From: Simon Wunderlich Date: Mon, 1 Feb 2016 15:21:37 +0100 Subject: [PATCH 1258/1649] batman-adv: move and restructure batadv_v_ogm_forward To match our code better to the protocol description of B.A.T.M.A.N. V, move batadv_v_ogm_forward() out into batadv_v_ogm_process_per_outif() and move all checks directly deciding whether the OGM should be forwarded into batadv_v_ogm_forward(). Signed-off-by: Simon Wunderlich Signed-off-by: Marek Lindner Signed-off-by: Antonio Quartulli --- net/batman-adv/bat_v_ogm.c | 110 +++++++++++++++++++++---------------- 1 file changed, 63 insertions(+), 47 deletions(-) diff --git a/net/batman-adv/bat_v_ogm.c b/net/batman-adv/bat_v_ogm.c index d9bcbe6e7d65..07c999734fba 100644 --- a/net/batman-adv/bat_v_ogm.c +++ b/net/batman-adv/bat_v_ogm.c @@ -347,10 +347,12 @@ static u32 batadv_v_forward_penalty(struct batadv_priv *bat_priv, } /** - * batadv_v_ogm_forward - forward an OGM to the given outgoing interface + * batadv_v_ogm_forward - check conditions and forward an OGM to the given + * outgoing interface * @bat_priv: the bat priv with all the soft interface information * @ogm_received: previously received OGM to be forwarded - * @throughput: throughput to announce, may vary per outgoing interface + * @orig_node: the originator which has been updated + * @neigh_node: the neigh_node through with the OGM has been received * @if_incoming: the interface on which this OGM was received on * @if_outgoing: the interface to which the OGM has to be forwarded to * @@ -359,28 +361,57 @@ static u32 batadv_v_forward_penalty(struct batadv_priv *bat_priv, */ static void batadv_v_ogm_forward(struct batadv_priv *bat_priv, const struct batadv_ogm2_packet *ogm_received, - u32 throughput, + struct batadv_orig_node *orig_node, + struct batadv_neigh_node *neigh_node, struct batadv_hard_iface *if_incoming, struct batadv_hard_iface *if_outgoing) { + struct batadv_neigh_ifinfo *neigh_ifinfo = NULL; + struct batadv_orig_ifinfo *orig_ifinfo = NULL; + struct batadv_neigh_node *router = NULL; struct batadv_ogm2_packet *ogm_forward; unsigned char *skb_buff; struct sk_buff *skb; size_t packet_len; u16 tvlv_len; + /* only forward for specific interfaces, not for the default one. */ + if (if_outgoing == BATADV_IF_DEFAULT) + goto out; + + orig_ifinfo = batadv_orig_ifinfo_new(orig_node, if_outgoing); + if (!orig_ifinfo) + goto out; + + /* acquire possibly updated router */ + router = batadv_orig_router_get(orig_node, if_outgoing); + + /* strict rule: forward packets coming from the best next hop only */ + if (neigh_node != router) + goto out; + + /* don't forward the same seqno twice on one interface */ + if (orig_ifinfo->last_seqno_forwarded == ntohl(ogm_received->seqno)) + goto out; + + orig_ifinfo->last_seqno_forwarded = ntohl(ogm_received->seqno); + if (ogm_received->ttl <= 1) { batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "ttl exceeded\n"); - return; + goto out; } + neigh_ifinfo = batadv_neigh_ifinfo_get(neigh_node, if_outgoing); + if (!neigh_ifinfo) + goto out; + tvlv_len = ntohs(ogm_received->tvlv_len); packet_len = BATADV_OGM2_HLEN + tvlv_len; skb = netdev_alloc_skb_ip_align(if_outgoing->net_dev, ETH_HLEN + packet_len); if (!skb) - return; + goto out; skb_reserve(skb, ETH_HLEN); skb_buff = skb_put(skb, packet_len); @@ -388,15 +419,23 @@ static void batadv_v_ogm_forward(struct batadv_priv *bat_priv, /* apply forward penalty */ ogm_forward = (struct batadv_ogm2_packet *)skb_buff; - ogm_forward->throughput = htonl(throughput); + ogm_forward->throughput = htonl(neigh_ifinfo->bat_v.throughput); ogm_forward->ttl--; batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "Forwarding OGM2 packet on %s: throughput %u, ttl %u, received via %s\n", - if_outgoing->net_dev->name, throughput, ogm_forward->ttl, - if_incoming->net_dev->name); + if_outgoing->net_dev->name, ntohl(ogm_forward->throughput), + ogm_forward->ttl, if_incoming->net_dev->name); batadv_v_ogm_send_to_if(skb, if_outgoing); + +out: + if (orig_ifinfo) + batadv_orig_ifinfo_put(orig_ifinfo); + if (router) + batadv_neigh_node_put(router); + if (neigh_ifinfo) + batadv_neigh_ifinfo_put(neigh_ifinfo); } /** @@ -493,8 +532,10 @@ out: * @neigh_node: the neigh_node through with the OGM has been received * @if_incoming: the interface where this packet was received * @if_outgoing: the interface for which the packet should be considered + * + * Return: true if the packet should be forwarded, false otherwise */ -static void batadv_v_ogm_route_update(struct batadv_priv *bat_priv, +static bool batadv_v_ogm_route_update(struct batadv_priv *bat_priv, const struct ethhdr *ethhdr, const struct batadv_ogm2_packet *ogm2, struct batadv_orig_node *orig_node, @@ -503,14 +544,9 @@ static void batadv_v_ogm_route_update(struct batadv_priv *bat_priv, struct batadv_hard_iface *if_outgoing) { struct batadv_neigh_node *router = NULL; - struct batadv_neigh_ifinfo *neigh_ifinfo = NULL; struct batadv_orig_node *orig_neigh_node = NULL; - struct batadv_orig_ifinfo *orig_ifinfo = NULL; struct batadv_neigh_node *orig_neigh_router = NULL; - - neigh_ifinfo = batadv_neigh_ifinfo_get(neigh_node, if_outgoing); - if (!neigh_ifinfo) - goto out; + bool forward = false; orig_neigh_node = batadv_v_ogm_orig_get(bat_priv, ethhdr->h_source); if (!orig_neigh_node) @@ -529,47 +565,20 @@ static void batadv_v_ogm_route_update(struct batadv_priv *bat_priv, goto out; } - if (router) - batadv_neigh_node_put(router); - /* Update routes, and check if the OGM is from the best next hop */ batadv_v_ogm_orig_update(bat_priv, orig_node, neigh_node, ogm2, if_outgoing); - orig_ifinfo = batadv_orig_ifinfo_new(orig_node, if_outgoing); - if (!orig_ifinfo) - goto out; - - /* don't forward the same seqno twice on one interface */ - if (orig_ifinfo->last_seqno_forwarded == ntohl(ogm2->seqno)) - goto out; - - /* acquire possibly updated router */ - router = batadv_orig_router_get(orig_node, if_outgoing); - - /* strict rule: forward packets coming from the best next hop only */ - if (neigh_node != router) - goto out; - - /* only forward for specific interface, not for the default one. */ - if (if_outgoing != BATADV_IF_DEFAULT) { - orig_ifinfo->last_seqno_forwarded = ntohl(ogm2->seqno); - batadv_v_ogm_forward(bat_priv, ogm2, - neigh_ifinfo->bat_v.throughput, - if_incoming, if_outgoing); - } - + forward = true; out: - if (orig_ifinfo) - batadv_orig_ifinfo_put(orig_ifinfo); if (router) batadv_neigh_node_put(router); if (orig_neigh_router) batadv_neigh_node_put(orig_neigh_router); if (orig_neigh_node) batadv_orig_node_put(orig_neigh_node); - if (neigh_ifinfo) - batadv_neigh_ifinfo_put(neigh_ifinfo); + + return forward; } /** @@ -592,6 +601,7 @@ batadv_v_ogm_process_per_outif(struct batadv_priv *bat_priv, struct batadv_hard_iface *if_outgoing) { int seqno_age; + bool forward; /* first, update the metric with according sanity checks */ seqno_age = batadv_v_ogm_metric_update(bat_priv, ogm2, orig_node, @@ -610,8 +620,14 @@ batadv_v_ogm_process_per_outif(struct batadv_priv *bat_priv, ntohs(ogm2->tvlv_len)); /* if the metric update went through, update routes if needed */ - batadv_v_ogm_route_update(bat_priv, ethhdr, ogm2, orig_node, - neigh_node, if_incoming, if_outgoing); + forward = batadv_v_ogm_route_update(bat_priv, ethhdr, ogm2, orig_node, + neigh_node, if_incoming, + if_outgoing); + + /* if the routes have been processed correctly, check and forward */ + if (forward) + batadv_v_ogm_forward(bat_priv, ogm2, orig_node, neigh_node, + if_incoming, if_outgoing); } /** From 86de37c1fb16d71402773a3da05abe6cc7346f94 Mon Sep 17 00:00:00 2001 From: Simon Wunderlich Date: Mon, 1 Feb 2016 15:21:38 +0100 Subject: [PATCH 1259/1649] batman-adv: Merge batadv_v_ogm_orig_update into batadv_v_ogm_route_update Since batadv_v_ogm_orig_update() was only called from one place and the calling function became very short, merge these two functions together. This should also reflect the protocol description of B.A.T.M.A.N. V better. Signed-off-by: Simon Wunderlich Signed-off-by: Marek Lindner Signed-off-by: Antonio Quartulli --- net/batman-adv/bat_v_ogm.c | 117 +++++++++++++++---------------------- 1 file changed, 46 insertions(+), 71 deletions(-) diff --git a/net/batman-adv/bat_v_ogm.c b/net/batman-adv/bat_v_ogm.c index 07c999734fba..4155fa57cf6d 100644 --- a/net/batman-adv/bat_v_ogm.c +++ b/net/batman-adv/bat_v_ogm.c @@ -233,73 +233,6 @@ void batadv_v_ogm_primary_iface_set(struct batadv_hard_iface *primary_iface) ether_addr_copy(ogm_packet->orig, primary_iface->net_dev->dev_addr); } -/** - * batadv_v_ogm_orig_update - update the originator status based on the received - * OGM - * @bat_priv: the bat priv with all the soft interface information - * @orig_node: the originator to update - * @neigh_node: the neighbour the OGM has been received from (to update) - * @ogm2: the received OGM - * @if_outgoing: the interface where this OGM is going to be forwarded through - */ -static void -batadv_v_ogm_orig_update(struct batadv_priv *bat_priv, - struct batadv_orig_node *orig_node, - struct batadv_neigh_node *neigh_node, - const struct batadv_ogm2_packet *ogm2, - struct batadv_hard_iface *if_outgoing) -{ - struct batadv_neigh_ifinfo *router_ifinfo = NULL, *neigh_ifinfo = NULL; - struct batadv_neigh_node *router = NULL; - s32 neigh_seq_diff; - u32 neigh_last_seqno; - u32 router_last_seqno; - u32 router_throughput, neigh_throughput; - - batadv_dbg(BATADV_DBG_BATMAN, bat_priv, - "Searching and updating originator entry of received packet\n"); - - /* if this neighbor already is our next hop there is nothing - * to change - */ - router = batadv_orig_router_get(orig_node, if_outgoing); - if (router == neigh_node) - goto out; - - /* don't consider neighbours with worse throughput. - * also switch route if this seqno is BATADV_V_MAX_ORIGDIFF newer than - * the last received seqno from our best next hop. - */ - if (router) { - router_ifinfo = batadv_neigh_ifinfo_get(router, if_outgoing); - neigh_ifinfo = batadv_neigh_ifinfo_get(neigh_node, if_outgoing); - - /* if these are not allocated, something is wrong. */ - if (!router_ifinfo || !neigh_ifinfo) - goto out; - - neigh_last_seqno = neigh_ifinfo->bat_v.last_seqno; - router_last_seqno = router_ifinfo->bat_v.last_seqno; - neigh_seq_diff = neigh_last_seqno - router_last_seqno; - router_throughput = router_ifinfo->bat_v.throughput; - neigh_throughput = neigh_ifinfo->bat_v.throughput; - - if ((neigh_seq_diff < BATADV_OGM_MAX_ORIGDIFF) && - (router_throughput >= neigh_throughput)) - goto out; - } - - batadv_update_route(bat_priv, orig_node, if_outgoing, neigh_node); - -out: - if (router_ifinfo) - batadv_neigh_ifinfo_put(router_ifinfo); - if (neigh_ifinfo) - batadv_neigh_ifinfo_put(neigh_ifinfo); - if (router) - batadv_neigh_node_put(router); -} - /** * batadv_v_forward_penalty - apply a penalty to the throughput metric forwarded * with B.A.T.M.A.N. V OGMs @@ -546,6 +479,11 @@ static bool batadv_v_ogm_route_update(struct batadv_priv *bat_priv, struct batadv_neigh_node *router = NULL; struct batadv_orig_node *orig_neigh_node = NULL; struct batadv_neigh_node *orig_neigh_router = NULL; + struct batadv_neigh_ifinfo *router_ifinfo = NULL, *neigh_ifinfo = NULL; + u32 router_throughput, neigh_throughput; + u32 router_last_seqno; + u32 neigh_last_seqno; + s32 neigh_seq_diff; bool forward = false; orig_neigh_node = batadv_v_ogm_orig_get(bat_priv, ethhdr->h_source); @@ -565,11 +503,44 @@ static bool batadv_v_ogm_route_update(struct batadv_priv *bat_priv, goto out; } - /* Update routes, and check if the OGM is from the best next hop */ - batadv_v_ogm_orig_update(bat_priv, orig_node, neigh_node, ogm2, - if_outgoing); - + /* Mark the OGM to be considered for forwarding, and update routes + * if needed. + */ forward = true; + + batadv_dbg(BATADV_DBG_BATMAN, bat_priv, + "Searching and updating originator entry of received packet\n"); + + /* if this neighbor already is our next hop there is nothing + * to change + */ + if (router == neigh_node) + goto out; + + /* don't consider neighbours with worse throughput. + * also switch route if this seqno is BATADV_V_MAX_ORIGDIFF newer than + * the last received seqno from our best next hop. + */ + if (router) { + router_ifinfo = batadv_neigh_ifinfo_get(router, if_outgoing); + neigh_ifinfo = batadv_neigh_ifinfo_get(neigh_node, if_outgoing); + + /* if these are not allocated, something is wrong. */ + if (!router_ifinfo || !neigh_ifinfo) + goto out; + + neigh_last_seqno = neigh_ifinfo->bat_v.last_seqno; + router_last_seqno = router_ifinfo->bat_v.last_seqno; + neigh_seq_diff = neigh_last_seqno - router_last_seqno; + router_throughput = router_ifinfo->bat_v.throughput; + neigh_throughput = neigh_ifinfo->bat_v.throughput; + + if ((neigh_seq_diff < BATADV_OGM_MAX_ORIGDIFF) && + (router_throughput >= neigh_throughput)) + goto out; + } + + batadv_update_route(bat_priv, orig_node, if_outgoing, neigh_node); out: if (router) batadv_neigh_node_put(router); @@ -577,6 +548,10 @@ out: batadv_neigh_node_put(orig_neigh_router); if (orig_neigh_node) batadv_orig_node_put(orig_neigh_node); + if (router_ifinfo) + batadv_neigh_ifinfo_put(router_ifinfo); + if (neigh_ifinfo) + batadv_neigh_ifinfo_put(neigh_ifinfo); return forward; } From 64ae74455371a40bc9f9c8325eb4c37f2978c95f Mon Sep 17 00:00:00 2001 From: Sven Eckelmann Date: Mon, 22 Feb 2016 22:56:34 +0100 Subject: [PATCH 1260/1649] batman-adv: Split batadv_iv_ogm_orig_del_if function batadv_iv_ogm_orig_del_if handles two different buffers bcast_own and bcast_own_sum which should be resized. The error handling two for allocating these buffers causes the complexity of this function. This can be avoided completely when the function is split into a main function handling the locking, freeing and call of the subfunctions. The subfunction can then independently handle the resize of the buffers. This also allows to easily reuse the old buffer (which always is larger) in case a smaller buffer could not be allocated without increasing the code complexity. Signed-off-by: Sven Eckelmann Signed-off-by: Marek Lindner Signed-off-by: Antonio Quartulli --- net/batman-adv/bat_iv_ogm.c | 131 +++++++++++++++++++++++------------- 1 file changed, 84 insertions(+), 47 deletions(-) diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c index 682fcaec56e6..8c1710bba803 100644 --- a/net/batman-adv/bat_iv_ogm.c +++ b/net/batman-adv/bat_iv_ogm.c @@ -32,6 +32,7 @@ #include #include #include +#include #include #include #include @@ -174,6 +175,79 @@ unlock: return ret; } +/** + * batadv_iv_ogm_drop_bcast_own_entry - drop section of bcast_own + * @orig_node: the orig_node that has to be changed + * @max_if_num: the current amount of interfaces + * @del_if_num: the index of the interface being removed + */ +static void +batadv_iv_ogm_drop_bcast_own_entry(struct batadv_orig_node *orig_node, + int max_if_num, int del_if_num) +{ + size_t chunk_size; + size_t if_offset; + void *data_ptr; + + lockdep_assert_held(&orig_node->bat_iv.ogm_cnt_lock); + + chunk_size = sizeof(unsigned long) * BATADV_NUM_WORDS; + data_ptr = kmalloc_array(max_if_num, chunk_size, GFP_ATOMIC); + if (!data_ptr) + /* use old buffer when new one could not be allocated */ + data_ptr = orig_node->bat_iv.bcast_own; + + /* copy first part */ + memmove(data_ptr, orig_node->bat_iv.bcast_own, del_if_num * chunk_size); + + /* copy second part */ + if_offset = (del_if_num + 1) * chunk_size; + memmove((char *)data_ptr + del_if_num * chunk_size, + (uint8_t *)orig_node->bat_iv.bcast_own + if_offset, + (max_if_num - del_if_num) * chunk_size); + + /* bcast_own was shrunk down in new buffer; free old one */ + if (orig_node->bat_iv.bcast_own != data_ptr) { + kfree(orig_node->bat_iv.bcast_own); + orig_node->bat_iv.bcast_own = data_ptr; + } +} + +/** + * batadv_iv_ogm_drop_bcast_own_sum_entry - drop section of bcast_own_sum + * @orig_node: the orig_node that has to be changed + * @max_if_num: the current amount of interfaces + * @del_if_num: the index of the interface being removed + */ +static void +batadv_iv_ogm_drop_bcast_own_sum_entry(struct batadv_orig_node *orig_node, + int max_if_num, int del_if_num) +{ + size_t if_offset; + void *data_ptr; + + lockdep_assert_held(&orig_node->bat_iv.ogm_cnt_lock); + + data_ptr = kmalloc_array(max_if_num, sizeof(u8), GFP_ATOMIC); + if (!data_ptr) + /* use old buffer when new one could not be allocated */ + data_ptr = orig_node->bat_iv.bcast_own_sum; + + memmove(data_ptr, orig_node->bat_iv.bcast_own_sum, + del_if_num * sizeof(u8)); + + if_offset = (del_if_num + 1) * sizeof(u8); + memmove((char *)data_ptr + del_if_num * sizeof(u8), + orig_node->bat_iv.bcast_own_sum + if_offset, + (max_if_num - del_if_num) * sizeof(u8)); + + /* bcast_own_sum was shrunk down in new buffer; free old one */ + if (orig_node->bat_iv.bcast_own_sum != data_ptr) { + kfree(orig_node->bat_iv.bcast_own_sum); + orig_node->bat_iv.bcast_own_sum = data_ptr; + } +} + /** * batadv_iv_ogm_orig_del_if - change the private structures of the orig_node to * exclude the removed interface @@ -186,60 +260,23 @@ unlock: static int batadv_iv_ogm_orig_del_if(struct batadv_orig_node *orig_node, int max_if_num, int del_if_num) { - int ret = -ENOMEM; - size_t chunk_size, if_offset; - void *data_ptr = NULL; - spin_lock_bh(&orig_node->bat_iv.ogm_cnt_lock); - /* last interface was removed */ - if (max_if_num == 0) - goto free_bcast_own; - - chunk_size = sizeof(unsigned long) * BATADV_NUM_WORDS; - data_ptr = kmalloc_array(max_if_num, chunk_size, GFP_ATOMIC); - if (!data_ptr) - goto unlock; - - /* copy first part */ - memcpy(data_ptr, orig_node->bat_iv.bcast_own, del_if_num * chunk_size); - - /* copy second part */ - if_offset = (del_if_num + 1) * chunk_size; - memcpy((char *)data_ptr + del_if_num * chunk_size, - (uint8_t *)orig_node->bat_iv.bcast_own + if_offset, - (max_if_num - del_if_num) * chunk_size); - -free_bcast_own: - kfree(orig_node->bat_iv.bcast_own); - orig_node->bat_iv.bcast_own = data_ptr; - - if (max_if_num == 0) - goto free_own_sum; - - data_ptr = kmalloc_array(max_if_num, sizeof(u8), GFP_ATOMIC); - if (!data_ptr) { + if (max_if_num == 0) { kfree(orig_node->bat_iv.bcast_own); - goto unlock; + kfree(orig_node->bat_iv.bcast_own_sum); + orig_node->bat_iv.bcast_own = NULL; + orig_node->bat_iv.bcast_own_sum = NULL; + } else { + batadv_iv_ogm_drop_bcast_own_entry(orig_node, max_if_num, + del_if_num); + batadv_iv_ogm_drop_bcast_own_sum_entry(orig_node, max_if_num, + del_if_num); } - memcpy(data_ptr, orig_node->bat_iv.bcast_own_sum, - del_if_num * sizeof(u8)); - - if_offset = (del_if_num + 1) * sizeof(u8); - memcpy((char *)data_ptr + del_if_num * sizeof(u8), - orig_node->bat_iv.bcast_own_sum + if_offset, - (max_if_num - del_if_num) * sizeof(u8)); - -free_own_sum: - kfree(orig_node->bat_iv.bcast_own_sum); - orig_node->bat_iv.bcast_own_sum = data_ptr; - - ret = 0; -unlock: spin_unlock_bh(&orig_node->bat_iv.ogm_cnt_lock); - return ret; + return 0; } /** From f4e7bd81b1630018952187e5bd731755a6536a61 Mon Sep 17 00:00:00 2001 From: Joachim Eastwood Date: Sun, 1 May 2016 22:58:19 +0200 Subject: [PATCH 1261/1649] stmmac: let remove/resume/suspend functions take device pointer Change stmmac_remove/resume/suspend to take a device pointer so they can be used directly by drivers that doesn't need to perform anything device specific. This lets us remove the PCI pm functions and later simplifiy the platform drivers. Signed-off-by: Joachim Eastwood Tested-by: Marek Vasut Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/stmmac.h | 6 ++--- .../net/ethernet/stmicro/stmmac/stmmac_main.c | 15 +++++++----- .../net/ethernet/stmicro/stmmac/stmmac_pci.c | 24 ++----------------- .../ethernet/stmicro/stmmac/stmmac_platform.c | 6 ++--- 4 files changed, 17 insertions(+), 34 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index ff6750621ff7..59ae6088cd22 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@ -148,9 +148,9 @@ void stmmac_set_ethtool_ops(struct net_device *netdev); int stmmac_ptp_register(struct stmmac_priv *priv); void stmmac_ptp_unregister(struct stmmac_priv *priv); -int stmmac_resume(struct net_device *ndev); -int stmmac_suspend(struct net_device *ndev); -int stmmac_dvr_remove(struct net_device *ndev); +int stmmac_resume(struct device *dev); +int stmmac_suspend(struct device *dev); +int stmmac_dvr_remove(struct device *dev); int stmmac_dvr_probe(struct device *device, struct plat_stmmacenet_data *plat_dat, struct stmmac_resources *res); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index b87edb72e80a..fd5ab7bfdb76 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -3350,12 +3350,13 @@ EXPORT_SYMBOL_GPL(stmmac_dvr_probe); /** * stmmac_dvr_remove - * @ndev: net device pointer + * @dev: device pointer * Description: this function resets the TX/RX processes, disables the MAC RX/TX * changes the link status, releases the DMA descriptor rings. */ -int stmmac_dvr_remove(struct net_device *ndev) +int stmmac_dvr_remove(struct device *dev) { + struct net_device *ndev = dev_get_drvdata(dev); struct stmmac_priv *priv = netdev_priv(ndev); pr_info("%s:\n\tremoving driver", __func__); @@ -3381,13 +3382,14 @@ EXPORT_SYMBOL_GPL(stmmac_dvr_remove); /** * stmmac_suspend - suspend callback - * @ndev: net device pointer + * @dev: device pointer * Description: this is the function to suspend the device and it is called * by the platform driver to stop the network queue, release the resources, * program the PMT register (for WoL), clean and release driver resources. */ -int stmmac_suspend(struct net_device *ndev) +int stmmac_suspend(struct device *dev) { + struct net_device *ndev = dev_get_drvdata(dev); struct stmmac_priv *priv = netdev_priv(ndev); unsigned long flags; @@ -3430,12 +3432,13 @@ EXPORT_SYMBOL_GPL(stmmac_suspend); /** * stmmac_resume - resume callback - * @ndev: net device pointer + * @dev: device pointer * Description: when resume this function is invoked to setup the DMA and CORE * in a usable state. */ -int stmmac_resume(struct net_device *ndev) +int stmmac_resume(struct device *dev) { + struct net_device *ndev = dev_get_drvdata(dev); struct stmmac_priv *priv = netdev_priv(ndev); unsigned long flags; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c index ae4388735b7f..56c8a2342c14 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c @@ -231,30 +231,10 @@ static int stmmac_pci_probe(struct pci_dev *pdev, */ static void stmmac_pci_remove(struct pci_dev *pdev) { - struct net_device *ndev = pci_get_drvdata(pdev); - - stmmac_dvr_remove(ndev); + stmmac_dvr_remove(&pdev->dev); } -#ifdef CONFIG_PM_SLEEP -static int stmmac_pci_suspend(struct device *dev) -{ - struct pci_dev *pdev = to_pci_dev(dev); - struct net_device *ndev = pci_get_drvdata(pdev); - - return stmmac_suspend(ndev); -} - -static int stmmac_pci_resume(struct device *dev) -{ - struct pci_dev *pdev = to_pci_dev(dev); - struct net_device *ndev = pci_get_drvdata(pdev); - - return stmmac_resume(ndev); -} -#endif - -static SIMPLE_DEV_PM_OPS(stmmac_pm_ops, stmmac_pci_suspend, stmmac_pci_resume); +static SIMPLE_DEV_PM_OPS(stmmac_pm_ops, stmmac_suspend, stmmac_resume); #define STMMAC_VENDOR_ID 0x700 #define STMMAC_QUARK_ID 0x0937 diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index effaa4ff5ab7..409db913b117 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -386,7 +386,7 @@ int stmmac_pltfr_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct stmmac_priv *priv = netdev_priv(ndev); - int ret = stmmac_dvr_remove(ndev); + int ret = stmmac_dvr_remove(&pdev->dev); if (priv->plat->exit) priv->plat->exit(pdev, priv->plat->bsp_priv); @@ -410,7 +410,7 @@ static int stmmac_pltfr_suspend(struct device *dev) struct stmmac_priv *priv = netdev_priv(ndev); struct platform_device *pdev = to_platform_device(dev); - ret = stmmac_suspend(ndev); + ret = stmmac_suspend(dev); if (priv->plat->exit) priv->plat->exit(pdev, priv->plat->bsp_priv); @@ -433,7 +433,7 @@ static int stmmac_pltfr_resume(struct device *dev) if (priv->plat->init) priv->plat->init(pdev, priv->plat->bsp_priv); - return stmmac_resume(ndev); + return stmmac_resume(dev); } #endif /* CONFIG_PM_SLEEP */ From 56868deece922283b367d08f1d647726d5b307d9 Mon Sep 17 00:00:00 2001 From: Joachim Eastwood Date: Sun, 1 May 2016 22:58:20 +0200 Subject: [PATCH 1262/1649] stmmac: dwmac-socfpga: add PM ops and resume function Implement the needed PM callbacks in the driver instead of relying on the init/exit hooks in stmmac_platform. This gives the driver more flexibility in how the code is organized. Eventually the init/exit callbacks will be deprecated in favor of the standard PM callbacks and driver remove function. Signed-off-by: Joachim Eastwood Tested-by: Marek Vasut Signed-off-by: David S. Miller --- .../ethernet/stmicro/stmmac/dwmac-socfpga.c | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c index 784eb53361b5..789013a78295 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c @@ -261,7 +261,6 @@ static int socfpga_dwmac_probe(struct platform_device *pdev) } plat_dat->bsp_priv = dwmac; - plat_dat->init = socfpga_dwmac_init; plat_dat->fix_mac_speed = socfpga_dwmac_fix_mac_speed; ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); @@ -271,6 +270,21 @@ static int socfpga_dwmac_probe(struct platform_device *pdev) return ret; } +#ifdef CONFIG_PM_SLEEP +static int socfpga_dwmac_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct net_device *ndev = dev_get_drvdata(dev); + struct stmmac_priv *priv = netdev_priv(ndev); + + socfpga_dwmac_init(pdev, priv->plat->bsp_priv); + + return stmmac_resume(dev); +} +#endif /* CONFIG_PM_SLEEP */ + +SIMPLE_DEV_PM_OPS(socfpga_dwmac_pm_ops, stmmac_suspend, socfpga_dwmac_resume); + static const struct of_device_id socfpga_dwmac_match[] = { { .compatible = "altr,socfpga-stmmac" }, { } @@ -282,7 +296,7 @@ static struct platform_driver socfpga_dwmac_driver = { .remove = stmmac_pltfr_remove, .driver = { .name = "socfpga-dwmac", - .pm = &stmmac_pltfr_pm_ops, + .pm = &socfpga_dwmac_pm_ops, .of_match_table = socfpga_dwmac_match, }, }; From 70cb136f7730830aa1134925a941e3ad96e3a846 Mon Sep 17 00:00:00 2001 From: Joachim Eastwood Date: Sun, 1 May 2016 22:58:21 +0200 Subject: [PATCH 1263/1649] stmmac: dwmac-socfpga: keep a copy of stmmac_rst in driver priv data The dwmac-socfpga driver needs to control the reset usually managed by the core driver to set the PHY mode. Take a copy of the reset handle from core priv data so it can be used by the driver later. This also allow us to move reset handling into socfpga_dwmac_setup() where the code that needs it is located. Signed-off-by: Joachim Eastwood Tested-by: Marek Vasut Signed-off-by: David S. Miller --- .../ethernet/stmicro/stmmac/dwmac-socfpga.c | 33 ++++++++++++------- 1 file changed, 22 insertions(+), 11 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c index 789013a78295..ba0b7934cc95 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c @@ -49,6 +49,7 @@ struct socfpga_dwmac { u32 reg_shift; struct device *dev; struct regmap *sys_mgr_base_addr; + struct reset_control *stmmac_rst; void __iomem *splitter_base; bool f2h_ptp_ref_clk; }; @@ -164,6 +165,10 @@ static int socfpga_dwmac_setup(struct socfpga_dwmac *dwmac) if (dwmac->splitter_base) val = SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII; + /* Assert reset to the enet controller before changing the phy mode */ + if (dwmac->stmmac_rst) + reset_control_assert(dwmac->stmmac_rst); + regmap_read(sys_mgr_base_addr, reg_offset, &ctrl); ctrl &= ~(SYSMGR_EMACGRP_CTRL_PHYSEL_MASK << reg_shift); ctrl |= val << reg_shift; @@ -181,6 +186,12 @@ static int socfpga_dwmac_setup(struct socfpga_dwmac *dwmac) regmap_write(sys_mgr_base_addr, reg_offset, ctrl); + /* Deassert reset for the phy configuration to be sampled by + * the enet controller, and operation to start in requested mode + */ + if (dwmac->stmmac_rst) + reset_control_deassert(dwmac->stmmac_rst); + return 0; } @@ -198,21 +209,11 @@ static int socfpga_dwmac_init(struct platform_device *pdev, void *priv) if (!stpriv) return -EINVAL; - /* Assert reset to the enet controller before changing the phy mode */ - if (stpriv->stmmac_rst) - reset_control_assert(stpriv->stmmac_rst); - /* Setup the phy mode in the system manager registers according to * devicetree configuration */ ret = socfpga_dwmac_setup(dwmac); - /* Deassert reset for the phy configuration to be sampled by - * the enet controller, and operation to start in requested mode - */ - if (stpriv->stmmac_rst) - reset_control_deassert(stpriv->stmmac_rst); - /* Before the enet controller is suspended, the phy is suspended. * This causes the phy clock to be gated. The enet controller is * resumed before the phy, so the clock is still gated "off" when @@ -264,8 +265,18 @@ static int socfpga_dwmac_probe(struct platform_device *pdev) plat_dat->fix_mac_speed = socfpga_dwmac_fix_mac_speed; ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); - if (!ret) + if (!ret) { + struct net_device *ndev = platform_get_drvdata(pdev); + struct stmmac_priv *stpriv = netdev_priv(ndev); + + /* The socfpga driver needs to control the stmmac reset to + * set the phy mode. Create a copy of the core reset handel + * so it can be used by the driver later. + */ + dwmac->stmmac_rst = stpriv->stmmac_rst; + ret = socfpga_dwmac_init(pdev, dwmac); + } return ret; } From 537372472415b7b75856561d23891a0bf3477ea4 Mon Sep 17 00:00:00 2001 From: Joachim Eastwood Date: Sun, 1 May 2016 22:58:22 +0200 Subject: [PATCH 1264/1649] stmmac: dwmac-socfpga: call phy_resume() only in resume callback Calling phy_resume() should only be need during driver resume to workaround a hardware errata. Signed-off-by: Joachim Eastwood Tested-by: Marek Vasut Signed-off-by: David S. Miller --- .../ethernet/stmicro/stmmac/dwmac-socfpga.c | 50 +++++++------------ 1 file changed, 19 insertions(+), 31 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c index ba0b7934cc95..ba49d8c14958 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c @@ -198,41 +198,11 @@ static int socfpga_dwmac_setup(struct socfpga_dwmac *dwmac) static int socfpga_dwmac_init(struct platform_device *pdev, void *priv) { struct socfpga_dwmac *dwmac = priv; - struct net_device *ndev = platform_get_drvdata(pdev); - struct stmmac_priv *stpriv = NULL; - int ret = 0; - - if (!ndev) - return -EINVAL; - - stpriv = netdev_priv(ndev); - if (!stpriv) - return -EINVAL; /* Setup the phy mode in the system manager registers according to * devicetree configuration */ - ret = socfpga_dwmac_setup(dwmac); - - /* Before the enet controller is suspended, the phy is suspended. - * This causes the phy clock to be gated. The enet controller is - * resumed before the phy, so the clock is still gated "off" when - * the enet controller is resumed. This code makes sure the phy - * is "resumed" before reinitializing the enet controller since - * the enet controller depends on an active phy clock to complete - * a DMA reset. A DMA reset will "time out" if executed - * with no phy clock input on the Synopsys enet controller. - * Verified through Synopsys Case #8000711656. - * - * Note that the phy clock is also gated when the phy is isolated. - * Phy "suspend" and "isolate" controls are located in phy basic - * control register 0, and can be modified by the phy driver - * framework. - */ - if (stpriv->phydev) - phy_resume(stpriv->phydev); - - return ret; + return socfpga_dwmac_setup(dwmac); } static int socfpga_dwmac_probe(struct platform_device *pdev) @@ -290,6 +260,24 @@ static int socfpga_dwmac_resume(struct device *dev) socfpga_dwmac_init(pdev, priv->plat->bsp_priv); + /* Before the enet controller is suspended, the phy is suspended. + * This causes the phy clock to be gated. The enet controller is + * resumed before the phy, so the clock is still gated "off" when + * the enet controller is resumed. This code makes sure the phy + * is "resumed" before reinitializing the enet controller since + * the enet controller depends on an active phy clock to complete + * a DMA reset. A DMA reset will "time out" if executed + * with no phy clock input on the Synopsys enet controller. + * Verified through Synopsys Case #8000711656. + * + * Note that the phy clock is also gated when the phy is isolated. + * Phy "suspend" and "isolate" controls are located in phy basic + * control register 0, and can be modified by the phy driver + * framework. + */ + if (priv->phydev) + phy_resume(priv->phydev); + return stmmac_resume(dev); } #endif /* CONFIG_PM_SLEEP */ From 0f400a87dc59be9b485ffa820ca9ed904cacfcbb Mon Sep 17 00:00:00 2001 From: Joachim Eastwood Date: Sun, 1 May 2016 22:58:23 +0200 Subject: [PATCH 1265/1649] stmmac: dwmac-socfpga: kill init() and rename setup() to set_phy_mode() Remove old init callback which now contains only a call to socfpga_dwmac_setup(). Also rename socfpga_dwmac_setup() to indicate what the function really does. Signed-off-by: Joachim Eastwood Tested-by: Marek Vasut Signed-off-by: David S. Miller --- .../net/ethernet/stmicro/stmmac/dwmac-socfpga.c | 17 +++-------------- 1 file changed, 3 insertions(+), 14 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c index ba49d8c14958..cd9764a6a36f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c @@ -136,7 +136,7 @@ static int socfpga_dwmac_parse_data(struct socfpga_dwmac *dwmac, struct device * return 0; } -static int socfpga_dwmac_setup(struct socfpga_dwmac *dwmac) +static int socfpga_dwmac_set_phy_mode(struct socfpga_dwmac *dwmac) { struct regmap *sys_mgr_base_addr = dwmac->sys_mgr_base_addr; int phymode = dwmac->interface; @@ -195,16 +195,6 @@ static int socfpga_dwmac_setup(struct socfpga_dwmac *dwmac) return 0; } -static int socfpga_dwmac_init(struct platform_device *pdev, void *priv) -{ - struct socfpga_dwmac *dwmac = priv; - - /* Setup the phy mode in the system manager registers according to - * devicetree configuration - */ - return socfpga_dwmac_setup(dwmac); -} - static int socfpga_dwmac_probe(struct platform_device *pdev) { struct plat_stmmacenet_data *plat_dat; @@ -245,7 +235,7 @@ static int socfpga_dwmac_probe(struct platform_device *pdev) */ dwmac->stmmac_rst = stpriv->stmmac_rst; - ret = socfpga_dwmac_init(pdev, dwmac); + ret = socfpga_dwmac_set_phy_mode(dwmac); } return ret; @@ -254,11 +244,10 @@ static int socfpga_dwmac_probe(struct platform_device *pdev) #ifdef CONFIG_PM_SLEEP static int socfpga_dwmac_resume(struct device *dev) { - struct platform_device *pdev = to_platform_device(dev); struct net_device *ndev = dev_get_drvdata(dev); struct stmmac_priv *priv = netdev_priv(ndev); - socfpga_dwmac_init(pdev, priv->plat->bsp_priv); + socfpga_dwmac_set_phy_mode(priv->plat->bsp_priv); /* Before the enet controller is suspended, the phy is suspended. * This causes the phy clock to be gated. The enet controller is From c0f31a05f5b405b67e08079d218ac42b3f0a1786 Mon Sep 17 00:00:00 2001 From: Manish Chopra Date: Mon, 2 May 2016 06:16:04 -0400 Subject: [PATCH 1266/1649] qed: Apply tunnel configurations after PF start Configure and enable various tunnels on the adapter after PF start. This change was missed as a part of 'commit 464f664501816ef5fbbc00b8de96f4ae5a1c9325 ("qed: Add infrastructure support for tunneling")' Signed-off-by: Manish Chopra Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_sp_commands.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c index 9f9bc10d0f6c..e1e2344b1906 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c @@ -362,7 +362,15 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, sb, sb_index, p_ramrod->outer_tag); - return qed_spq_post(p_hwfn, p_ent, NULL); + rc = qed_spq_post(p_hwfn, p_ent, NULL); + + if (p_tunn) { + qed_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt, + p_tunn->tunn_mode); + p_hwfn->cdev->tunn_mode = p_tunn->tunn_mode; + } + + return rc; } /* Set pf update ramrod command params */ From 8caf115c726e87526c4a1cbd8ba616d6a022ccd8 Mon Sep 17 00:00:00 2001 From: Oliver Neukum Date: Mon, 2 May 2016 13:06:12 +0200 Subject: [PATCH 1267/1649] brcm80211: correct speed testing Allow for SS+ USB Signed-off-by: Oliver Neukum Signed-off-by: David S. Miller --- drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c index aa0b2a192faa..98b15a9a2779 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c @@ -1368,7 +1368,9 @@ brcmf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) devinfo->ifnum = desc->bInterfaceNumber; - if (usb->speed == USB_SPEED_SUPER) + if (usb->speed == USB_SPEED_SUPER_PLUS) + brcmf_dbg(USB, "Broadcom super speed plus USB WLAN interface detected\n"); + else if (usb->speed == USB_SPEED_SUPER) brcmf_dbg(USB, "Broadcom super speed USB WLAN interface detected\n"); else if (usb->speed == USB_SPEED_HIGH) brcmf_dbg(USB, "Broadcom high speed USB WLAN interface detected\n"); From ea0798423c60a1d34c75e5fedae009aee0a8de5f Mon Sep 17 00:00:00 2001 From: Oliver Neukum Date: Mon, 2 May 2016 13:06:13 +0200 Subject: [PATCH 1268/1649] usbnet: correct speed testing Allow for SS+ USB Signed-off-by: Oliver Neukum Signed-off-by: David S. Miller --- drivers/net/usb/usbnet.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index 10798128c03f..4837854fd43c 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -356,6 +356,7 @@ void usbnet_update_max_qlen(struct usbnet *dev) dev->tx_qlen = MAX_QUEUE_MEMORY / dev->hard_mtu; break; case USB_SPEED_SUPER: + case USB_SPEED_SUPER_PLUS: /* * Not take default 5ms qlen for super speed HC to * save memory, and iperf tests show 2.5ms qlen can From 2b84af94a3932b1dcb716d1898edb18b7325dbea Mon Sep 17 00:00:00 2001 From: Oliver Neukum Date: Mon, 2 May 2016 13:06:14 +0200 Subject: [PATCH 1269/1649] rtl8152: correct speed testing Allow for SS+ USB Signed-off-by: Oliver Neukum Signed-off-by: David S. Miller --- drivers/net/usb/r8152.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index d1f78c2c97aa..3f9f6ed3eec4 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -3366,7 +3366,7 @@ static void r8153_init(struct r8152 *tp) ocp_write_word(tp, MCU_TYPE_PLA, PLA_LED_FEATURE, ocp_data); ocp_data = FIFO_EMPTY_1FB | ROK_EXIT_LPM; - if (tp->version == RTL_VER_04 && tp->udev->speed != USB_SPEED_SUPER) + if (tp->version == RTL_VER_04 && tp->udev->speed < USB_SPEED_SUPER) ocp_data |= LPM_TIMER_500MS; else ocp_data |= LPM_TIMER_500US; @@ -4211,6 +4211,7 @@ static int rtl8152_probe(struct usb_interface *intf, switch (udev->speed) { case USB_SPEED_SUPER: + case USB_SPEED_SUPER_PLUS: tp->coalesce = COALESCE_SUPER; break; case USB_SPEED_HIGH: From 7c8bcfb1255fe9d929c227d67bdcd84430fd200b Mon Sep 17 00:00:00 2001 From: Jon Paul Maloy Date: Mon, 2 May 2016 11:58:45 -0400 Subject: [PATCH 1270/1649] tipc: re-enable compensation for socket receive buffer double counting In the refactoring commit d570d86497ee ("tipc: enqueue arrived buffers in socket in separate function") we did by accident replace the test if (sk->sk_backlog.len == 0) atomic_set(&tsk->dupl_rcvcnt, 0); with if (sk->sk_backlog.len) atomic_set(&tsk->dupl_rcvcnt, 0); This effectively disables the compensation we have for the double receive buffer accounting that occurs temporarily when buffers are moved from the backlog to the socket receive queue. Until now, this has gone unnoticed because of the large receive buffer limits we are applying, but becomes indispensable when we reduce this buffer limit later in this series. We now fix this by inverting the mentioned condition. Acked-by: Ying Xue Signed-off-by: Jon Maloy Signed-off-by: David S. Miller --- net/tipc/socket.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 3eeb50a27b89..d37a9401e182 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -1748,7 +1748,7 @@ static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk, /* Try backlog, compensating for double-counted bytes */ dcnt = &tipc_sk(sk)->dupl_rcvcnt; - if (sk->sk_backlog.len) + if (!sk->sk_backlog.len) atomic_set(dcnt, 0); lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt); if (likely(!sk_add_backlog(sk, skb, lim))) From 60020e1857042387cdcd4cd6680a9e5496213379 Mon Sep 17 00:00:00 2001 From: Jon Paul Maloy Date: Mon, 2 May 2016 11:58:46 -0400 Subject: [PATCH 1271/1649] tipc: propagate peer node capabilities to socket layer During neighbor discovery, nodes advertise their capabilities as a bit map in a dedicated 16-bit field in the discovery message header. This bit map has so far only be stored in the node structure on the peer nodes, but we now see the need to keep a copy even in the socket structure. This commit adds this functionality. Acked-by: Ying Xue Signed-off-by: Jon Maloy Signed-off-by: David S. Miller --- net/tipc/node.c | 21 +++++++++++++++++++-- net/tipc/node.h | 1 + net/tipc/socket.c | 2 ++ 3 files changed, 22 insertions(+), 2 deletions(-) diff --git a/net/tipc/node.c b/net/tipc/node.c index c29915688230..29cc85319327 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c @@ -1,7 +1,7 @@ /* * net/tipc/node.c: TIPC node management routines * - * Copyright (c) 2000-2006, 2012-2015, Ericsson AB + * Copyright (c) 2000-2006, 2012-2016, Ericsson AB * Copyright (c) 2005-2006, 2010-2014, Wind River Systems * All rights reserved. * @@ -191,6 +191,20 @@ int tipc_node_get_mtu(struct net *net, u32 addr, u32 sel) tipc_node_put(n); return mtu; } + +u16 tipc_node_get_capabilities(struct net *net, u32 addr) +{ + struct tipc_node *n; + u16 caps; + + n = tipc_node_find(net, addr); + if (unlikely(!n)) + return TIPC_NODE_CAPABILITIES; + caps = n->capabilities; + tipc_node_put(n); + return caps; +} + /* * A trivial power-of-two bitmask technique is used for speed, since this * operation is done for every incoming TIPC packet. The number of hash table @@ -304,8 +318,11 @@ struct tipc_node *tipc_node_create(struct net *net, u32 addr, u16 capabilities) spin_lock_bh(&tn->node_list_lock); n = tipc_node_find(net, addr); - if (n) + if (n) { + /* Same node may come back with new capabilities */ + n->capabilities = capabilities; goto exit; + } n = kzalloc(sizeof(*n), GFP_ATOMIC); if (!n) { pr_warn("Node creation failed, no memory\n"); diff --git a/net/tipc/node.h b/net/tipc/node.h index f39d9d06e8bb..18237684ffc4 100644 --- a/net/tipc/node.h +++ b/net/tipc/node.h @@ -70,6 +70,7 @@ void tipc_node_broadcast(struct net *net, struct sk_buff *skb); int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 peer_port); void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port); int tipc_node_get_mtu(struct net *net, u32 addr, u32 sel); +u16 tipc_node_get_capabilities(struct net *net, u32 addr); int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb); int tipc_nl_node_dump_link(struct sk_buff *skb, struct netlink_callback *cb); int tipc_nl_node_reset_link_stats(struct sk_buff *skb, struct genl_info *info); diff --git a/net/tipc/socket.c b/net/tipc/socket.c index d37a9401e182..94bd28639855 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -98,6 +98,7 @@ struct tipc_sock { bool link_cong; uint sent_unacked; uint rcv_unacked; + u16 peer_caps; struct sockaddr_tipc remote; struct rhash_head node; struct rcu_head rcu; @@ -1118,6 +1119,7 @@ static void tipc_sk_finish_conn(struct tipc_sock *tsk, u32 peer_port, sk_reset_timer(sk, &sk->sk_timer, jiffies + tsk->probing_intv); tipc_node_add_conn(net, peer_node, tsk->portid, peer_port); tsk->max_pkt = tipc_node_get_mtu(net, peer_node, tsk->portid); + tsk->peer_caps = tipc_node_get_capabilities(net, peer_node); } /** From 10724cc7bb7832b482df049c20fd824d928c5eaa Mon Sep 17 00:00:00 2001 From: Jon Paul Maloy Date: Mon, 2 May 2016 11:58:47 -0400 Subject: [PATCH 1272/1649] tipc: redesign connection-level flow control There are two flow control mechanisms in TIPC; one at link level that handles network congestion, burst control, and retransmission, and one at connection level which' only remaining task is to prevent overflow in the receiving socket buffer. In TIPC, the latter task has to be solved end-to-end because messages can not be thrown away once they have been accepted and delivered upwards from the link layer, i.e, we can never permit the receive buffer to overflow. Currently, this algorithm is message based. A counter in the receiving socket keeps track of number of consumed messages, and sends a dedicated acknowledge message back to the sender for each 256 consumed message. A counter at the sending end keeps track of the sent, not yet acknowledged messages, and blocks the sender if this number ever reaches 512 unacknowledged messages. When the missing acknowledge arrives, the socket is then woken up for renewed transmission. This works well for keeping the message flow running, as it almost never happens that a sender socket is blocked this way. A problem with the current mechanism is that it potentially is very memory consuming. Since we don't distinguish between small and large messages, we have to dimension the socket receive buffer according to a worst-case of both. I.e., the window size must be chosen large enough to sustain a reasonable throughput even for the smallest messages, while we must still consider a scenario where all messages are of maximum size. Hence, the current fix window size of 512 messages and a maximum message size of 66k results in a receive buffer of 66 MB when truesize(66k) = 131k is taken into account. It is possible to do much better. This commit introduces an algorithm where we instead use 1024-byte blocks as base unit. This unit, always rounded upwards from the actual message size, is used when we advertise windows as well as when we count and acknowledge transmitted data. The advertised window is based on the configured receive buffer size in such a way that even the worst-case truesize/msgsize ratio always is covered. Since the smallest possible message size (from a flow control viewpoint) now is 1024 bytes, we can safely assume this ratio to be less than four, which is the value we are now using. This way, we have been able to reduce the default receive buffer size from 66 MB to 2 MB with maintained performance. In order to keep this solution backwards compatible, we introduce a new capability bit in the discovery protocol, and use this throughout the message sending/reception path to always select the right unit. Acked-by: Ying Xue Signed-off-by: Jon Maloy Signed-off-by: David S. Miller --- net/tipc/core.c | 8 +-- net/tipc/msg.h | 14 ++++- net/tipc/node.h | 5 +- net/tipc/socket.c | 140 ++++++++++++++++++++++++++++++---------------- net/tipc/socket.h | 17 ++++-- 5 files changed, 122 insertions(+), 62 deletions(-) diff --git a/net/tipc/core.c b/net/tipc/core.c index e2bdb07a49a2..fe1b062c4f18 100644 --- a/net/tipc/core.c +++ b/net/tipc/core.c @@ -112,11 +112,9 @@ static int __init tipc_init(void) pr_info("Activated (version " TIPC_MOD_VER ")\n"); - sysctl_tipc_rmem[0] = TIPC_CONN_OVERLOAD_LIMIT >> 4 << - TIPC_LOW_IMPORTANCE; - sysctl_tipc_rmem[1] = TIPC_CONN_OVERLOAD_LIMIT >> 4 << - TIPC_CRITICAL_IMPORTANCE; - sysctl_tipc_rmem[2] = TIPC_CONN_OVERLOAD_LIMIT; + sysctl_tipc_rmem[0] = RCVBUF_MIN; + sysctl_tipc_rmem[1] = RCVBUF_DEF; + sysctl_tipc_rmem[2] = RCVBUF_MAX; err = tipc_netlink_start(); if (err) diff --git a/net/tipc/msg.h b/net/tipc/msg.h index 58bf51541813..024da8af91f0 100644 --- a/net/tipc/msg.h +++ b/net/tipc/msg.h @@ -743,16 +743,26 @@ static inline void msg_set_msgcnt(struct tipc_msg *m, u16 n) msg_set_bits(m, 9, 16, 0xffff, n); } -static inline u32 msg_bcast_tag(struct tipc_msg *m) +static inline u32 msg_conn_ack(struct tipc_msg *m) { return msg_bits(m, 9, 16, 0xffff); } -static inline void msg_set_bcast_tag(struct tipc_msg *m, u32 n) +static inline void msg_set_conn_ack(struct tipc_msg *m, u32 n) { msg_set_bits(m, 9, 16, 0xffff, n); } +static inline u32 msg_adv_win(struct tipc_msg *m) +{ + return msg_bits(m, 9, 0, 0xffff); +} + +static inline void msg_set_adv_win(struct tipc_msg *m, u32 n) +{ + msg_set_bits(m, 9, 0, 0xffff, n); +} + static inline u32 msg_max_pkt(struct tipc_msg *m) { return msg_bits(m, 9, 16, 0xffff) * 4; diff --git a/net/tipc/node.h b/net/tipc/node.h index 18237684ffc4..8264b3d97dc4 100644 --- a/net/tipc/node.h +++ b/net/tipc/node.h @@ -45,10 +45,11 @@ /* Optional capabilities supported by this code version */ enum { - TIPC_BCAST_SYNCH = (1 << 1) + TIPC_BCAST_SYNCH = (1 << 1), + TIPC_BLOCK_FLOWCTL = (2 << 1) }; -#define TIPC_NODE_CAPABILITIES TIPC_BCAST_SYNCH +#define TIPC_NODE_CAPABILITIES (TIPC_BCAST_SYNCH | TIPC_BLOCK_FLOWCTL) #define INVALID_BEARER_ID -1 void tipc_node_stop(struct net *net); diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 94bd28639855..12628890c219 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -96,9 +96,11 @@ struct tipc_sock { uint conn_timeout; atomic_t dupl_rcvcnt; bool link_cong; - uint sent_unacked; - uint rcv_unacked; + u16 snt_unacked; + u16 snd_win; u16 peer_caps; + u16 rcv_unacked; + u16 rcv_win; struct sockaddr_tipc remote; struct rhash_head node; struct rcu_head rcu; @@ -228,9 +230,29 @@ static struct tipc_sock *tipc_sk(const struct sock *sk) return container_of(sk, struct tipc_sock, sk); } -static int tsk_conn_cong(struct tipc_sock *tsk) +static bool tsk_conn_cong(struct tipc_sock *tsk) { - return tsk->sent_unacked >= TIPC_FLOWCTRL_WIN; + return tsk->snt_unacked >= tsk->snd_win; +} + +/* tsk_blocks(): translate a buffer size in bytes to number of + * advertisable blocks, taking into account the ratio truesize(len)/len + * We can trust that this ratio is always < 4 for len >= FLOWCTL_BLK_SZ + */ +static u16 tsk_adv_blocks(int len) +{ + return len / FLOWCTL_BLK_SZ / 4; +} + +/* tsk_inc(): increment counter for sent or received data + * - If block based flow control is not supported by peer we + * fall back to message based ditto, incrementing the counter + */ +static u16 tsk_inc(struct tipc_sock *tsk, int msglen) +{ + if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL)) + return ((msglen / FLOWCTL_BLK_SZ) + 1); + return 1; } /** @@ -378,9 +400,12 @@ static int tipc_sk_create(struct net *net, struct socket *sock, sk->sk_write_space = tipc_write_space; sk->sk_destruct = tipc_sock_destruct; tsk->conn_timeout = CONN_TIMEOUT_DEFAULT; - tsk->sent_unacked = 0; atomic_set(&tsk->dupl_rcvcnt, 0); + /* Start out with safe limits until we receive an advertised window */ + tsk->snd_win = tsk_adv_blocks(RCVBUF_MIN); + tsk->rcv_win = tsk->snd_win; + if (sock->state == SS_READY) { tsk_set_unreturnable(tsk, true); if (sock->type == SOCK_DGRAM) @@ -776,7 +801,7 @@ static void tipc_sk_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb) struct sock *sk = &tsk->sk; struct tipc_msg *hdr = buf_msg(skb); int mtyp = msg_type(hdr); - int conn_cong; + bool conn_cong; /* Ignore if connection cannot be validated: */ if (!tsk_peer_msg(tsk, hdr)) @@ -790,7 +815,9 @@ static void tipc_sk_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb) return; } else if (mtyp == CONN_ACK) { conn_cong = tsk_conn_cong(tsk); - tsk->sent_unacked -= msg_msgcnt(hdr); + tsk->snt_unacked -= msg_conn_ack(hdr); + if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL) + tsk->snd_win = msg_adv_win(hdr); if (conn_cong) sk->sk_write_space(sk); } else if (mtyp != CONN_PROBE_REPLY) { @@ -1021,12 +1048,14 @@ static int __tipc_send_stream(struct socket *sock, struct msghdr *m, size_t dsz) u32 dnode; uint mtu, send, sent = 0; struct iov_iter save; + int hlen = MIN_H_SIZE; /* Handle implied connection establishment */ if (unlikely(dest)) { rc = __tipc_sendmsg(sock, m, dsz); + hlen = msg_hdr_sz(mhdr); if (dsz && (dsz == rc)) - tsk->sent_unacked = 1; + tsk->snt_unacked = tsk_inc(tsk, dsz + hlen); return rc; } if (dsz > (uint)INT_MAX) @@ -1055,7 +1084,7 @@ next: if (likely(!tsk_conn_cong(tsk))) { rc = tipc_node_xmit(net, &pktchain, dnode, portid); if (likely(!rc)) { - tsk->sent_unacked++; + tsk->snt_unacked += tsk_inc(tsk, send + hlen); sent += send; if (sent == dsz) return dsz; @@ -1120,6 +1149,12 @@ static void tipc_sk_finish_conn(struct tipc_sock *tsk, u32 peer_port, tipc_node_add_conn(net, peer_node, tsk->portid, peer_port); tsk->max_pkt = tipc_node_get_mtu(net, peer_node, tsk->portid); tsk->peer_caps = tipc_node_get_capabilities(net, peer_node); + if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL) + return; + + /* Fall back to message based flow control */ + tsk->rcv_win = FLOWCTL_MSG_WIN; + tsk->snd_win = FLOWCTL_MSG_WIN; } /** @@ -1216,7 +1251,7 @@ static int tipc_sk_anc_data_recv(struct msghdr *m, struct tipc_msg *msg, return 0; } -static void tipc_sk_send_ack(struct tipc_sock *tsk, uint ack) +static void tipc_sk_send_ack(struct tipc_sock *tsk) { struct net *net = sock_net(&tsk->sk); struct sk_buff *skb = NULL; @@ -1232,7 +1267,14 @@ static void tipc_sk_send_ack(struct tipc_sock *tsk, uint ack) if (!skb) return; msg = buf_msg(skb); - msg_set_msgcnt(msg, ack); + msg_set_conn_ack(msg, tsk->rcv_unacked); + tsk->rcv_unacked = 0; + + /* Adjust to and advertize the correct window limit */ + if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL) { + tsk->rcv_win = tsk_adv_blocks(tsk->sk.sk_rcvbuf); + msg_set_adv_win(msg, tsk->rcv_win); + } tipc_node_xmit_skb(net, skb, dnode, msg_link_selector(msg)); } @@ -1290,7 +1332,7 @@ static int tipc_recvmsg(struct socket *sock, struct msghdr *m, size_t buf_len, long timeo; unsigned int sz; u32 err; - int res; + int res, hlen; /* Catch invalid receive requests */ if (unlikely(!buf_len)) @@ -1315,6 +1357,7 @@ restart: buf = skb_peek(&sk->sk_receive_queue); msg = buf_msg(buf); sz = msg_data_sz(msg); + hlen = msg_hdr_sz(msg); err = msg_errcode(msg); /* Discard an empty non-errored message & try again */ @@ -1337,7 +1380,7 @@ restart: sz = buf_len; m->msg_flags |= MSG_TRUNC; } - res = skb_copy_datagram_msg(buf, msg_hdr_sz(msg), m, sz); + res = skb_copy_datagram_msg(buf, hlen, m, sz); if (res) goto exit; res = sz; @@ -1349,15 +1392,15 @@ restart: res = -ECONNRESET; } - /* Consume received message (optional) */ - if (likely(!(flags & MSG_PEEK))) { - if ((sock->state != SS_READY) && - (++tsk->rcv_unacked >= TIPC_CONNACK_INTV)) { - tipc_sk_send_ack(tsk, tsk->rcv_unacked); - tsk->rcv_unacked = 0; - } - tsk_advance_rx_queue(sk); + if (unlikely(flags & MSG_PEEK)) + goto exit; + + if (likely(sock->state != SS_READY)) { + tsk->rcv_unacked += tsk_inc(tsk, hlen + sz); + if (unlikely(tsk->rcv_unacked >= (tsk->rcv_win / 4))) + tipc_sk_send_ack(tsk); } + tsk_advance_rx_queue(sk); exit: release_sock(sk); return res; @@ -1386,7 +1429,7 @@ static int tipc_recv_stream(struct socket *sock, struct msghdr *m, int sz_to_copy, target, needed; int sz_copied = 0; u32 err; - int res = 0; + int res = 0, hlen; /* Catch invalid receive attempts */ if (unlikely(!buf_len)) @@ -1412,6 +1455,7 @@ restart: buf = skb_peek(&sk->sk_receive_queue); msg = buf_msg(buf); sz = msg_data_sz(msg); + hlen = msg_hdr_sz(msg); err = msg_errcode(msg); /* Discard an empty non-errored message & try again */ @@ -1436,8 +1480,7 @@ restart: needed = (buf_len - sz_copied); sz_to_copy = (sz <= needed) ? sz : needed; - res = skb_copy_datagram_msg(buf, msg_hdr_sz(msg) + offset, - m, sz_to_copy); + res = skb_copy_datagram_msg(buf, hlen + offset, m, sz_to_copy); if (res) goto exit; @@ -1459,20 +1502,18 @@ restart: res = -ECONNRESET; } - /* Consume received message (optional) */ - if (likely(!(flags & MSG_PEEK))) { - if (unlikely(++tsk->rcv_unacked >= TIPC_CONNACK_INTV)) { - tipc_sk_send_ack(tsk, tsk->rcv_unacked); - tsk->rcv_unacked = 0; - } - tsk_advance_rx_queue(sk); - } + if (unlikely(flags & MSG_PEEK)) + goto exit; + + tsk->rcv_unacked += tsk_inc(tsk, hlen + sz); + if (unlikely(tsk->rcv_unacked >= (tsk->rcv_win / 4))) + tipc_sk_send_ack(tsk); + tsk_advance_rx_queue(sk); /* Loop around if more data is required */ if ((sz_copied < buf_len) && /* didn't get all requested data */ (!skb_queue_empty(&sk->sk_receive_queue) || (sz_copied < target)) && /* and more is ready or required */ - (!(flags & MSG_PEEK)) && /* and aren't just peeking at data */ (!err)) /* and haven't reached a FIN */ goto restart; @@ -1604,30 +1645,33 @@ static bool filter_connect(struct tipc_sock *tsk, struct sk_buff *skb) /** * rcvbuf_limit - get proper overload limit of socket receive queue * @sk: socket - * @buf: message + * @skb: message * - * For all connection oriented messages, irrespective of importance, - * the default overload value (i.e. 67MB) is set as limit. + * For connection oriented messages, irrespective of importance, + * default queue limit is 2 MB. * - * For all connectionless messages, by default new queue limits are - * as belows: + * For connectionless messages, queue limits are based on message + * importance as follows: * - * TIPC_LOW_IMPORTANCE (4 MB) - * TIPC_MEDIUM_IMPORTANCE (8 MB) - * TIPC_HIGH_IMPORTANCE (16 MB) - * TIPC_CRITICAL_IMPORTANCE (32 MB) + * TIPC_LOW_IMPORTANCE (2 MB) + * TIPC_MEDIUM_IMPORTANCE (4 MB) + * TIPC_HIGH_IMPORTANCE (8 MB) + * TIPC_CRITICAL_IMPORTANCE (16 MB) * * Returns overload limit according to corresponding message importance */ -static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *buf) +static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *skb) { - struct tipc_msg *msg = buf_msg(buf); + struct tipc_sock *tsk = tipc_sk(sk); + struct tipc_msg *hdr = buf_msg(skb); - if (msg_connected(msg)) - return sysctl_tipc_rmem[2]; + if (unlikely(!msg_connected(hdr))) + return sk->sk_rcvbuf << msg_importance(hdr); - return sk->sk_rcvbuf >> TIPC_CRITICAL_IMPORTANCE << - msg_importance(msg); + if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL)) + return sk->sk_rcvbuf; + + return FLOWCTL_MSG_LIM; } /** diff --git a/net/tipc/socket.h b/net/tipc/socket.h index 4241f22069dc..06fb5944cf76 100644 --- a/net/tipc/socket.h +++ b/net/tipc/socket.h @@ -1,6 +1,6 @@ /* net/tipc/socket.h: Include file for TIPC socket code * - * Copyright (c) 2014-2015, Ericsson AB + * Copyright (c) 2014-2016, Ericsson AB * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -38,10 +38,17 @@ #include #include -#define TIPC_CONNACK_INTV 256 -#define TIPC_FLOWCTRL_WIN (TIPC_CONNACK_INTV * 2) -#define TIPC_CONN_OVERLOAD_LIMIT ((TIPC_FLOWCTRL_WIN * 2 + 1) * \ - SKB_TRUESIZE(TIPC_MAX_USER_MSG_SIZE)) +/* Compatibility values for deprecated message based flow control */ +#define FLOWCTL_MSG_WIN 512 +#define FLOWCTL_MSG_LIM ((FLOWCTL_MSG_WIN * 2 + 1) * SKB_TRUESIZE(MAX_MSG_SIZE)) + +#define FLOWCTL_BLK_SZ 1024 + +/* Socket receive buffer sizes */ +#define RCVBUF_MIN (FLOWCTL_BLK_SZ * 512) +#define RCVBUF_DEF (FLOWCTL_BLK_SZ * 1024 * 2) +#define RCVBUF_MAX (FLOWCTL_BLK_SZ * 1024 * 16) + int tipc_socket_init(void); void tipc_socket_stop(void); void tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq); From 1d2077ac0165c0d173a2255e37cf4dc5033d92c7 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Mon, 2 May 2016 10:56:27 -0700 Subject: [PATCH 1273/1649] net: add __sock_wfree() helper Hosts sending lot of ACK packets exhibit high sock_wfree() cost because of cache line miss to test SOCK_USE_WRITE_QUEUE We could move this flag close to sk_wmem_alloc but it is better to perform the atomic_sub_and_test() on a clean cache line, as it avoid one extra bus transaction. skb_orphan_partial() can also have a fast track for packets that either are TCP acks, or already went through another skb_orphan_partial() Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/net/sock.h | 1 + net/core/sock.c | 24 ++++++++++++++++++++++++ net/ipv4/tcp_output.c | 2 +- 3 files changed, 26 insertions(+), 1 deletion(-) diff --git a/include/net/sock.h b/include/net/sock.h index 1dbb1f9f7c1b..45f5b492c658 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1445,6 +1445,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority); struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, gfp_t priority); +void __sock_wfree(struct sk_buff *skb); void sock_wfree(struct sk_buff *skb); void skb_orphan_partial(struct sk_buff *skb); void sock_rfree(struct sk_buff *skb); diff --git a/net/core/sock.c b/net/core/sock.c index f615e9391170..08bf97eceeb3 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -1655,6 +1655,17 @@ void sock_wfree(struct sk_buff *skb) } EXPORT_SYMBOL(sock_wfree); +/* This variant of sock_wfree() is used by TCP, + * since it sets SOCK_USE_WRITE_QUEUE. + */ +void __sock_wfree(struct sk_buff *skb) +{ + struct sock *sk = skb->sk; + + if (atomic_sub_and_test(skb->truesize, &sk->sk_wmem_alloc)) + __sk_free(sk); +} + void skb_set_owner_w(struct sk_buff *skb, struct sock *sk) { skb_orphan(skb); @@ -1677,8 +1688,21 @@ void skb_set_owner_w(struct sk_buff *skb, struct sock *sk) } EXPORT_SYMBOL(skb_set_owner_w); +/* This helper is used by netem, as it can hold packets in its + * delay queue. We want to allow the owner socket to send more + * packets, as if they were already TX completed by a typical driver. + * But we also want to keep skb->sk set because some packet schedulers + * rely on it (sch_fq for example). So we set skb->truesize to a small + * amount (1) and decrease sk_wmem_alloc accordingly. + */ void skb_orphan_partial(struct sk_buff *skb) { + /* If this skb is a TCP pure ACK or already went here, + * we have nothing to do. 2 is already a very small truesize. + */ + if (skb->truesize <= 2) + return; + /* TCP stack sets skb->ooo_okay based on sk_wmem_alloc, * so we do not completely orphan skb, but transfert all * accounted bytes but one, to avoid unexpected reorders. diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 25d527922b18..8daefd8b1b49 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -949,7 +949,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, skb_orphan(skb); skb->sk = sk; - skb->destructor = skb_is_tcp_pure_ack(skb) ? sock_wfree : tcp_wfree; + skb->destructor = skb_is_tcp_pure_ack(skb) ? __sock_wfree : tcp_wfree; skb_set_hash_from_sk(skb, sk); atomic_add(skb->truesize, &sk->sk_wmem_alloc); From 26879da58711aa604a1b866cbeedd7e0f78f90ad Mon Sep 17 00:00:00 2001 From: Wei Wang Date: Mon, 2 May 2016 21:40:07 -0700 Subject: [PATCH 1274/1649] ipv6: add new struct ipcm6_cookie In the sendmsg function of UDP, raw, ICMP and l2tp sockets, we use local variables like hlimits, tclass, opt and dontfrag and pass them to corresponding functions like ip6_make_skb, ip6_append_data and xxx_push_pending_frames. This is not a good practice and makes it hard to add new parameters. This fix introduces a new struct ipcm6_cookie similar to ipcm_cookie in ipv4 and include the above mentioned variables. And we only pass the pointer to this structure to corresponding functions. This makes it easier to add new parameters in the future and makes the function cleaner. Signed-off-by: Wei Wang Signed-off-by: David S. Miller --- include/net/ipv6.h | 18 +++++++++++------ include/net/transp_v6.h | 3 +-- net/ipv6/datagram.c | 13 ++++++------- net/ipv6/icmp.c | 28 +++++++++++++++------------ net/ipv6/ip6_flowlabel.c | 6 +++--- net/ipv6/ip6_output.c | 42 +++++++++++++++++++--------------------- net/ipv6/ipv6_sockglue.c | 6 +++--- net/ipv6/ping.c | 12 +++++++----- net/ipv6/raw.c | 33 +++++++++++++++++-------------- net/ipv6/udp.c | 38 ++++++++++++++++++------------------ net/l2tp/l2tp_ip6.c | 33 +++++++++++++++++-------------- 11 files changed, 123 insertions(+), 109 deletions(-) diff --git a/include/net/ipv6.h b/include/net/ipv6.h index 415213da5be3..11a045281948 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -251,6 +251,13 @@ struct ipv6_fl_socklist { struct rcu_head rcu; }; +struct ipcm6_cookie { + __s16 hlimit; + __s16 tclass; + __s8 dontfrag; + struct ipv6_txoptions *opt; +}; + static inline struct ipv6_txoptions *txopt_get(const struct ipv6_pinfo *np) { struct ipv6_txoptions *opt; @@ -863,9 +870,9 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr); int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb), - void *from, int length, int transhdrlen, int hlimit, - int tclass, struct ipv6_txoptions *opt, struct flowi6 *fl6, - struct rt6_info *rt, unsigned int flags, int dontfrag, + void *from, int length, int transhdrlen, + struct ipcm6_cookie *ipc6, struct flowi6 *fl6, + struct rt6_info *rt, unsigned int flags, const struct sockcm_cookie *sockc); int ip6_push_pending_frames(struct sock *sk); @@ -881,9 +888,8 @@ struct sk_buff *ip6_make_skb(struct sock *sk, int getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb), void *from, int length, int transhdrlen, - int hlimit, int tclass, struct ipv6_txoptions *opt, - struct flowi6 *fl6, struct rt6_info *rt, - unsigned int flags, int dontfrag, + struct ipcm6_cookie *ipc6, struct flowi6 *fl6, + struct rt6_info *rt, unsigned int flags, const struct sockcm_cookie *sockc); static inline struct sk_buff *ip6_finish_skb(struct sock *sk) diff --git a/include/net/transp_v6.h b/include/net/transp_v6.h index 2b1c3450ab20..276f9760ab56 100644 --- a/include/net/transp_v6.h +++ b/include/net/transp_v6.h @@ -41,8 +41,7 @@ void ip6_datagram_recv_specific_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb); int ip6_datagram_send_ctl(struct net *net, struct sock *sk, struct msghdr *msg, - struct flowi6 *fl6, struct ipv6_txoptions *opt, - int *hlimit, int *tclass, int *dontfrag, + struct flowi6 *fl6, struct ipcm6_cookie *ipc6, struct sockcm_cookie *sockc); void ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp, diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index ea9ee5cce5cf..00d0c2903173 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c @@ -727,14 +727,13 @@ EXPORT_SYMBOL_GPL(ip6_datagram_recv_ctl); int ip6_datagram_send_ctl(struct net *net, struct sock *sk, struct msghdr *msg, struct flowi6 *fl6, - struct ipv6_txoptions *opt, - int *hlimit, int *tclass, int *dontfrag, - struct sockcm_cookie *sockc) + struct ipcm6_cookie *ipc6, struct sockcm_cookie *sockc) { struct in6_pktinfo *src_info; struct cmsghdr *cmsg; struct ipv6_rt_hdr *rthdr; struct ipv6_opt_hdr *hdr; + struct ipv6_txoptions *opt = ipc6->opt; int len; int err = 0; @@ -953,8 +952,8 @@ int ip6_datagram_send_ctl(struct net *net, struct sock *sk, goto exit_f; } - *hlimit = *(int *)CMSG_DATA(cmsg); - if (*hlimit < -1 || *hlimit > 0xff) { + ipc6->hlimit = *(int *)CMSG_DATA(cmsg); + if (ipc6->hlimit < -1 || ipc6->hlimit > 0xff) { err = -EINVAL; goto exit_f; } @@ -974,7 +973,7 @@ int ip6_datagram_send_ctl(struct net *net, struct sock *sk, goto exit_f; err = 0; - *tclass = tc; + ipc6->tclass = tc; break; } @@ -992,7 +991,7 @@ int ip6_datagram_send_ctl(struct net *net, struct sock *sk, goto exit_f; err = 0; - *dontfrag = df; + ipc6->dontfrag = df; break; } diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index 23b9a4cc418e..9554b99a8508 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c @@ -401,10 +401,10 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info) struct flowi6 fl6; struct icmpv6_msg msg; struct sockcm_cookie sockc_unused = {0}; + struct ipcm6_cookie ipc6; int iif = 0; int addr_type = 0; int len; - int hlimit; int err = 0; u32 mark = IP6_REPLY_MARK(net, skb->mark); @@ -507,7 +507,10 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info) if (IS_ERR(dst)) goto out; - hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst); + ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst); + ipc6.tclass = np->tclass; + ipc6.dontfrag = np->dontfrag; + ipc6.opt = NULL; msg.skb = skb; msg.offset = skb_network_offset(skb); @@ -526,9 +529,9 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info) err = ip6_append_data(sk, icmpv6_getfrag, &msg, len + sizeof(struct icmp6hdr), - sizeof(struct icmp6hdr), hlimit, - np->tclass, NULL, &fl6, (struct rt6_info *)dst, - MSG_DONTWAIT, np->dontfrag, &sockc_unused); + sizeof(struct icmp6hdr), + &ipc6, &fl6, (struct rt6_info *)dst, + MSG_DONTWAIT, &sockc_unused); if (err) { ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTERRORS); ip6_flush_pending_frames(sk); @@ -563,9 +566,8 @@ static void icmpv6_echo_reply(struct sk_buff *skb) struct flowi6 fl6; struct icmpv6_msg msg; struct dst_entry *dst; + struct ipcm6_cookie ipc6; int err = 0; - int hlimit; - u8 tclass; u32 mark = IP6_REPLY_MARK(net, skb->mark); struct sockcm_cookie sockc_unused = {0}; @@ -607,19 +609,21 @@ static void icmpv6_echo_reply(struct sk_buff *skb) if (IS_ERR(dst)) goto out; - hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst); - idev = __in6_dev_get(skb->dev); msg.skb = skb; msg.offset = 0; msg.type = ICMPV6_ECHO_REPLY; - tclass = ipv6_get_dsfield(ipv6_hdr(skb)); + ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst); + ipc6.tclass = ipv6_get_dsfield(ipv6_hdr(skb)); + ipc6.dontfrag = np->dontfrag; + ipc6.opt = NULL; + err = ip6_append_data(sk, icmpv6_getfrag, &msg, skb->len + sizeof(struct icmp6hdr), - sizeof(struct icmp6hdr), hlimit, tclass, NULL, &fl6, + sizeof(struct icmp6hdr), &ipc6, &fl6, (struct rt6_info *)dst, MSG_DONTWAIT, - np->dontfrag, &sockc_unused); + &sockc_unused); if (err) { __ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTERRORS); diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c index 35d3ddc328f8..b912f0dbaf72 100644 --- a/net/ipv6/ip6_flowlabel.c +++ b/net/ipv6/ip6_flowlabel.c @@ -373,7 +373,7 @@ fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq, struct msghdr msg; struct flowi6 flowi6; struct sockcm_cookie sockc_junk; - int junk; + struct ipcm6_cookie ipc6; err = -ENOMEM; fl->opt = kmalloc(sizeof(*fl->opt) + olen, GFP_KERNEL); @@ -390,8 +390,8 @@ fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq, msg.msg_control = (void *)(fl->opt+1); memset(&flowi6, 0, sizeof(flowi6)); - err = ip6_datagram_send_ctl(net, sk, &msg, &flowi6, fl->opt, - &junk, &junk, &junk, &sockc_junk); + ipc6.opt = fl->opt; + err = ip6_datagram_send_ctl(net, sk, &msg, &flowi6, &ipc6, &sockc_junk); if (err) goto done; err = -EINVAL; diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 2b3ffc582d16..cbf127ae7c67 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -1182,12 +1182,12 @@ static void ip6_append_data_mtu(unsigned int *mtu, } static int ip6_setup_cork(struct sock *sk, struct inet_cork_full *cork, - struct inet6_cork *v6_cork, - int hlimit, int tclass, struct ipv6_txoptions *opt, + struct inet6_cork *v6_cork, struct ipcm6_cookie *ipc6, struct rt6_info *rt, struct flowi6 *fl6) { struct ipv6_pinfo *np = inet6_sk(sk); unsigned int mtu; + struct ipv6_txoptions *opt = ipc6->opt; /* * setup for corking @@ -1229,8 +1229,8 @@ static int ip6_setup_cork(struct sock *sk, struct inet_cork_full *cork, dst_hold(&rt->dst); cork->base.dst = &rt->dst; cork->fl.u.ip6 = *fl6; - v6_cork->hop_limit = hlimit; - v6_cork->tclass = tclass; + v6_cork->hop_limit = ipc6->hlimit; + v6_cork->tclass = ipc6->tclass; if (rt->dst.flags & DST_XFRM_TUNNEL) mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ? rt->dst.dev->mtu : dst_mtu(&rt->dst); @@ -1258,7 +1258,7 @@ static int __ip6_append_data(struct sock *sk, int getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb), void *from, int length, int transhdrlen, - unsigned int flags, int dontfrag, + unsigned int flags, struct ipcm6_cookie *ipc6, const struct sockcm_cookie *sockc) { struct sk_buff *skb, *skb_prev = NULL; @@ -1298,7 +1298,7 @@ static int __ip6_append_data(struct sock *sk, sizeof(struct frag_hdr) : 0) + rt->rt6i_nfheader_len; - if (cork->length + length > mtu - headersize && dontfrag && + if (cork->length + length > mtu - headersize && ipc6->dontfrag && (sk->sk_protocol == IPPROTO_UDP || sk->sk_protocol == IPPROTO_RAW)) { ipv6_local_rxpmtu(sk, fl6, mtu - headersize + @@ -1564,9 +1564,9 @@ error: int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb), - void *from, int length, int transhdrlen, int hlimit, - int tclass, struct ipv6_txoptions *opt, struct flowi6 *fl6, - struct rt6_info *rt, unsigned int flags, int dontfrag, + void *from, int length, int transhdrlen, + struct ipcm6_cookie *ipc6, struct flowi6 *fl6, + struct rt6_info *rt, unsigned int flags, const struct sockcm_cookie *sockc) { struct inet_sock *inet = inet_sk(sk); @@ -1580,12 +1580,12 @@ int ip6_append_data(struct sock *sk, /* * setup for corking */ - err = ip6_setup_cork(sk, &inet->cork, &np->cork, hlimit, - tclass, opt, rt, fl6); + err = ip6_setup_cork(sk, &inet->cork, &np->cork, + ipc6, rt, fl6); if (err) return err; - exthdrlen = (opt ? opt->opt_flen : 0); + exthdrlen = (ipc6->opt ? ipc6->opt->opt_flen : 0); length += exthdrlen; transhdrlen += exthdrlen; } else { @@ -1595,8 +1595,7 @@ int ip6_append_data(struct sock *sk, return __ip6_append_data(sk, fl6, &sk->sk_write_queue, &inet->cork.base, &np->cork, sk_page_frag(sk), getfrag, - from, length, transhdrlen, flags, dontfrag, - sockc); + from, length, transhdrlen, flags, ipc6, sockc); } EXPORT_SYMBOL_GPL(ip6_append_data); @@ -1752,15 +1751,14 @@ struct sk_buff *ip6_make_skb(struct sock *sk, int getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb), void *from, int length, int transhdrlen, - int hlimit, int tclass, - struct ipv6_txoptions *opt, struct flowi6 *fl6, + struct ipcm6_cookie *ipc6, struct flowi6 *fl6, struct rt6_info *rt, unsigned int flags, - int dontfrag, const struct sockcm_cookie *sockc) + const struct sockcm_cookie *sockc) { struct inet_cork_full cork; struct inet6_cork v6_cork; struct sk_buff_head queue; - int exthdrlen = (opt ? opt->opt_flen : 0); + int exthdrlen = (ipc6->opt ? ipc6->opt->opt_flen : 0); int err; if (flags & MSG_PROBE) @@ -1772,17 +1770,17 @@ struct sk_buff *ip6_make_skb(struct sock *sk, cork.base.addr = 0; cork.base.opt = NULL; v6_cork.opt = NULL; - err = ip6_setup_cork(sk, &cork, &v6_cork, hlimit, tclass, opt, rt, fl6); + err = ip6_setup_cork(sk, &cork, &v6_cork, ipc6, rt, fl6); if (err) return ERR_PTR(err); - if (dontfrag < 0) - dontfrag = inet6_sk(sk)->dontfrag; + if (ipc6->dontfrag < 0) + ipc6->dontfrag = inet6_sk(sk)->dontfrag; err = __ip6_append_data(sk, fl6, &queue, &cork.base, &v6_cork, ¤t->task_frag, getfrag, from, length + exthdrlen, transhdrlen + exthdrlen, - flags, dontfrag, sockc); + flags, ipc6, sockc); if (err) { __ip6_flush_pending_frames(sk, &queue, &cork, &v6_cork); return ERR_PTR(err); diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index 4ff4b29894eb..a9895e15ee9c 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c @@ -473,7 +473,7 @@ sticky_done: struct msghdr msg; struct flowi6 fl6; struct sockcm_cookie sockc_junk; - int junk; + struct ipcm6_cookie ipc6; memset(&fl6, 0, sizeof(fl6)); fl6.flowi6_oif = sk->sk_bound_dev_if; @@ -503,9 +503,9 @@ sticky_done: msg.msg_controllen = optlen; msg.msg_control = (void *)(opt+1); + ipc6.opt = opt; - retv = ip6_datagram_send_ctl(net, sk, &msg, &fl6, opt, &junk, - &junk, &junk, &sockc_junk); + retv = ip6_datagram_send_ctl(net, sk, &msg, &fl6, &ipc6, &sockc_junk); if (retv) goto done; update: diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c index da1cff79e447..3ee3e444a66b 100644 --- a/net/ipv6/ping.c +++ b/net/ipv6/ping.c @@ -58,11 +58,11 @@ static int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) int iif = 0; struct flowi6 fl6; int err; - int hlimit; struct dst_entry *dst; struct rt6_info *rt; struct pingfakehdr pfh; struct sockcm_cookie junk = {0}; + struct ipcm6_cookie ipc6; pr_debug("ping_v6_sendmsg(sk=%p,sk->num=%u)\n", inet, inet->inet_num); @@ -139,13 +139,15 @@ static int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) pfh.wcheck = 0; pfh.family = AF_INET6; - hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst); + ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst); + ipc6.tclass = np->tclass; + ipc6.dontfrag = np->dontfrag; + ipc6.opt = NULL; lock_sock(sk); err = ip6_append_data(sk, ping_getfrag, &pfh, len, - 0, hlimit, - np->tclass, NULL, &fl6, rt, - MSG_DONTWAIT, np->dontfrag, &junk); + 0, &ipc6, &fl6, rt, + MSG_DONTWAIT, &junk); if (err) { ICMP6_INC_STATS(sock_net(sk), rt->rt6i_idev, diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index b07ce21983aa..896350df6423 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c @@ -746,10 +746,8 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) struct raw6_frag_vec rfv; struct flowi6 fl6; struct sockcm_cookie sockc; + struct ipcm6_cookie ipc6; int addr_len = msg->msg_namelen; - int hlimit = -1; - int tclass = -1; - int dontfrag = -1; u16 proto; int err; @@ -770,6 +768,11 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) fl6.flowi6_mark = sk->sk_mark; + ipc6.hlimit = -1; + ipc6.tclass = -1; + ipc6.dontfrag = -1; + ipc6.opt = NULL; + if (sin6) { if (addr_len < SIN6_LEN_RFC2133) return -EINVAL; @@ -827,10 +830,9 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) opt = &opt_space; memset(opt, 0, sizeof(struct ipv6_txoptions)); opt->tot_len = sizeof(struct ipv6_txoptions); + ipc6.opt = opt; - err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt, - &hlimit, &tclass, &dontfrag, - &sockc); + err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, &ipc6, &sockc); if (err < 0) { fl6_sock_release(flowlabel); return err; @@ -846,7 +848,7 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) if (!opt) { opt = txopt_get(np); opt_to_free = opt; - } + } if (flowlabel) opt = fl6_merge_options(&opt_space, flowlabel, opt); opt = ipv6_fixup_options(&opt_space, opt); @@ -881,14 +883,14 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) err = PTR_ERR(dst); goto out; } - if (hlimit < 0) - hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst); + if (ipc6.hlimit < 0) + ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst); - if (tclass < 0) - tclass = np->tclass; + if (ipc6.tclass < 0) + ipc6.tclass = np->tclass; - if (dontfrag < 0) - dontfrag = np->dontfrag; + if (ipc6.dontfrag < 0) + ipc6.dontfrag = np->dontfrag; if (msg->msg_flags&MSG_CONFIRM) goto do_confirm; @@ -897,10 +899,11 @@ back_from_confirm: if (inet->hdrincl) err = rawv6_send_hdrinc(sk, msg, len, &fl6, &dst, msg->msg_flags); else { + ipc6.opt = opt; lock_sock(sk); err = ip6_append_data(sk, raw6_getfrag, &rfv, - len, 0, hlimit, tclass, opt, &fl6, (struct rt6_info *)dst, - msg->msg_flags, dontfrag, &sockc); + len, 0, &ipc6, &fl6, (struct rt6_info *)dst, + msg->msg_flags, &sockc); if (err) ip6_flush_pending_frames(sk); diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index f911c63f79e6..aca06094110f 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -1064,11 +1064,9 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) struct ip6_flowlabel *flowlabel = NULL; struct flowi6 fl6; struct dst_entry *dst; + struct ipcm6_cookie ipc6; int addr_len = msg->msg_namelen; int ulen = len; - int hlimit = -1; - int tclass = -1; - int dontfrag = -1; int corkreq = up->corkflag || msg->msg_flags&MSG_MORE; int err; int connected = 0; @@ -1076,6 +1074,10 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) int (*getfrag)(void *, char *, int, int, int, struct sk_buff *); struct sockcm_cookie sockc; + ipc6.hlimit = -1; + ipc6.tclass = -1; + ipc6.dontfrag = -1; + /* destination address check */ if (sin6) { if (addr_len < offsetof(struct sockaddr, sa_data)) @@ -1200,10 +1202,9 @@ do_udp_sendmsg: opt = &opt_space; memset(opt, 0, sizeof(struct ipv6_txoptions)); opt->tot_len = sizeof(*opt); + ipc6.opt = opt; - err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt, - &hlimit, &tclass, &dontfrag, - &sockc); + err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, &ipc6, &sockc); if (err < 0) { fl6_sock_release(flowlabel); return err; @@ -1224,6 +1225,7 @@ do_udp_sendmsg: if (flowlabel) opt = fl6_merge_options(&opt_space, flowlabel, opt); opt = ipv6_fixup_options(&opt_space, opt); + ipc6.opt = opt; fl6.flowi6_proto = sk->sk_protocol; if (!ipv6_addr_any(daddr)) @@ -1253,11 +1255,11 @@ do_udp_sendmsg: goto out; } - if (hlimit < 0) - hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst); + if (ipc6.hlimit < 0) + ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst); - if (tclass < 0) - tclass = np->tclass; + if (ipc6.tclass < 0) + ipc6.tclass = np->tclass; if (msg->msg_flags&MSG_CONFIRM) goto do_confirm; @@ -1268,9 +1270,9 @@ back_from_confirm: struct sk_buff *skb; skb = ip6_make_skb(sk, getfrag, msg, ulen, - sizeof(struct udphdr), hlimit, tclass, opt, + sizeof(struct udphdr), &ipc6, &fl6, (struct rt6_info *)dst, - msg->msg_flags, dontfrag, &sockc); + msg->msg_flags, &sockc); err = PTR_ERR(skb); if (!IS_ERR_OR_NULL(skb)) err = udp_v6_send_skb(skb, &fl6); @@ -1291,14 +1293,12 @@ back_from_confirm: up->pending = AF_INET6; do_append_data: - if (dontfrag < 0) - dontfrag = np->dontfrag; + if (ipc6.dontfrag < 0) + ipc6.dontfrag = np->dontfrag; up->len += ulen; - err = ip6_append_data(sk, getfrag, msg, ulen, - sizeof(struct udphdr), hlimit, tclass, opt, &fl6, - (struct rt6_info *)dst, - corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags, dontfrag, - &sockc); + err = ip6_append_data(sk, getfrag, msg, ulen, sizeof(struct udphdr), + &ipc6, &fl6, (struct rt6_info *)dst, + corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags, &sockc); if (err) udp_v6_flush_pending_frames(sk); else if (!corkreq) diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c index 46e07267e503..c6f5df1bed12 100644 --- a/net/l2tp/l2tp_ip6.c +++ b/net/l2tp/l2tp_ip6.c @@ -495,10 +495,8 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) struct dst_entry *dst = NULL; struct flowi6 fl6; struct sockcm_cookie sockc_unused = {0}; + struct ipcm6_cookie ipc6; int addr_len = msg->msg_namelen; - int hlimit = -1; - int tclass = -1; - int dontfrag = -1; int transhdrlen = 4; /* zero session-id */ int ulen = len + transhdrlen; int err; @@ -520,6 +518,10 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) fl6.flowi6_mark = sk->sk_mark; + ipc6.hlimit = -1; + ipc6.tclass = -1; + ipc6.dontfrag = -1; + if (lsa) { if (addr_len < SIN6_LEN_RFC2133) return -EINVAL; @@ -564,11 +566,11 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) opt = &opt_space; memset(opt, 0, sizeof(struct ipv6_txoptions)); opt->tot_len = sizeof(struct ipv6_txoptions); + ipc6.opt = opt; - err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt, - &hlimit, &tclass, &dontfrag, - &sockc_unused); - if (err < 0) { + err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, &ipc6, + &sockc_unused); + if (err < 0) { fl6_sock_release(flowlabel); return err; } @@ -588,6 +590,7 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) if (flowlabel) opt = fl6_merge_options(&opt_space, flowlabel, opt); opt = ipv6_fixup_options(&opt_space, opt); + ipc6.opt = opt; fl6.flowi6_proto = sk->sk_protocol; if (!ipv6_addr_any(daddr)) @@ -612,14 +615,14 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) goto out; } - if (hlimit < 0) - hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst); + if (ipc6.hlimit < 0) + ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst); - if (tclass < 0) - tclass = np->tclass; + if (ipc6.tclass < 0) + ipc6.tclass = np->tclass; - if (dontfrag < 0) - dontfrag = np->dontfrag; + if (ipc6.dontfrag < 0) + ipc6.dontfrag = np->dontfrag; if (msg->msg_flags & MSG_CONFIRM) goto do_confirm; @@ -627,9 +630,9 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) back_from_confirm: lock_sock(sk); err = ip6_append_data(sk, ip_generic_getfrag, msg, - ulen, transhdrlen, hlimit, tclass, opt, + ulen, transhdrlen, &ipc6, &fl6, (struct rt6_info *)dst, - msg->msg_flags, dontfrag, &sockc_unused); + msg->msg_flags, &sockc_unused); if (err) ip6_flush_pending_frames(sk); else if (!(msg->msg_flags & MSG_MORE)) From b58afe6d6d3a53af165d5946f12c4b08c95acd58 Mon Sep 17 00:00:00 2001 From: Christophe Ricard Date: Sat, 30 Apr 2016 09:12:34 +0200 Subject: [PATCH 1275/1649] nfc: st21nfca: Fix static checker warning Fix static checker warning: drivers/nfc/st21nfca/i2c.c:530 st21nfca_hci_i2c_acpi_request_resources() error: 'gpiod_ena' dereferencing possible ERR_PTR() Fix so that if no enable gpio can be retrieved an -ENODEV is returned. Reported-by: Dan Carpenter Fixes: dfa8070d7f64 ("nfc: st21nfca: Add support for acpi probing for i2c device.") Cc: stable@vger.kernel.org Signed-off-by: Christophe Ricard Signed-off-by: Samuel Ortiz --- drivers/nfc/st21nfca/i2c.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/nfc/st21nfca/i2c.c b/drivers/nfc/st21nfca/i2c.c index 640b4de05793..36645dcbe775 100644 --- a/drivers/nfc/st21nfca/i2c.c +++ b/drivers/nfc/st21nfca/i2c.c @@ -524,8 +524,10 @@ static int st21nfca_hci_i2c_acpi_request_resources(struct i2c_client *client) /* Get EN GPIO from ACPI */ gpiod_ena = devm_gpiod_get_index(dev, ST21NFCA_GPIO_NAME_EN, 1, GPIOD_OUT_LOW); - if (!IS_ERR(gpiod_ena)) - phy->gpio_ena = desc_to_gpio(gpiod_ena); + if (!IS_ERR(gpiod_ena)) { + nfc_err(dev, "Unable to get ENABLE GPIO\n"); + return -ENODEV; + } phy->gpio_ena = desc_to_gpio(gpiod_ena); From c1fc9136c8b353e853065c2a8a736cdcf6081dcd Mon Sep 17 00:00:00 2001 From: Christophe Ricard Date: Sat, 30 Apr 2016 09:12:35 +0200 Subject: [PATCH 1276/1649] nfc: st-nci: i2c: Change ST_NCI_GPIO_NAME_RESET to match DT Since commit 10cf4899f8af ("gpiolib: tighten up ACPI legacy gpio lookups") If _DSD properties are available in an ACPI node, we are not allowed to fallback to _CRS data to retrieve gpio properties. This was causing us to fail if uicc-present and/or ese-present are defined. To be consistent with devicetree change ST_NCI_GPIO_NAME_RESET content to reset so that acpi_find_gpio in drivers/gpio/gpiolib.c will look for reset-gpios. In the mean time the ACPI table needs to be fixed as follow: Device (NFC1) { Name (_ADR, Zero) // _ADR: Address Name (_HID, "SMO2101") // _HID: Hardware ID Name (_CID, "SMO2101") // _CID: Compatible ID Name (_DDN, "SMO NFC") // _DDN: DOS Device Name Name (_UID, One) // _UID: Unique ID Method (_CRS, 0, NotSerialized) // _CRS: Current Resource Settings { Name (SBUF, ResourceTemplate () { I2cSerialBus (0x0008, ControllerInitiated, 400000, AddressingMode7Bit, "\\_SB.I2C7", 0x00, ResourceConsumer, ,) GpioInt (Edge, ActiveHigh, ExclusiveAndWake, PullNone, 0x0000, "\\_SB.GPO2", 0x00, ResourceConsumer, ,) { // Pin list 0x0001 } GpioIo (Exclusive, PullDefault, 0x0000, 0x0000, IoRestrictionOutputOnly, "\\_SB.GPO2", 0x00, ResourceConsumer, ,) { // Pin list 0x0002, } }) Name (_DSD, Package (0x02) { ToUUID ("daffd814-6eba-4d8c-8a91-bc9bbf4aa301") /* Device Properties for _DSD */, Package (0x03) { Package (0x02) { "uicc-present", 1 }, Package (0x02) { "ese-present", 1 }, Package (0x02) { "reset-gpios", Package(0x04) { ^NFC1, 1, 0, 0} }, } }) Return (SBUF) /* \_SB_.I2C7.NFC1._CRS.SBUF */ } Method (_STA, 0, NotSerialized) // _STA: Status { Return (0x0F) } } Signed-off-by: Christophe Ricard Signed-off-by: Samuel Ortiz --- drivers/nfc/st-nci/i2c.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/nfc/st-nci/i2c.c b/drivers/nfc/st-nci/i2c.c index 925dbeef74db..6645972377af 100644 --- a/drivers/nfc/st-nci/i2c.c +++ b/drivers/nfc/st-nci/i2c.c @@ -42,7 +42,7 @@ #define ST_NCI_I2C_DRIVER_NAME "st_nci_i2c" -#define ST_NCI_GPIO_NAME_RESET "clf_reset" +#define ST_NCI_GPIO_NAME_RESET "reset" struct st_nci_i2c_phy { struct i2c_client *i2c_dev; From de72dbc52c4141a6e3f31796499908d00816ad2e Mon Sep 17 00:00:00 2001 From: Christophe Ricard Date: Sat, 30 Apr 2016 09:12:36 +0200 Subject: [PATCH 1277/1649] nfc: st-nci: spi: Change ST_NCI_GPIO_NAME_RESET to match DT Since commit 10cf4899f8af ("gpiolib: tighten up ACPI legacy gpio lookups") If _DSD properties are available in an ACPI node, we are not allowed to fallback to _CRS data to retrieve gpio properties. This was causing us to fail if uicc-present and/or ese-present are defined. To be consistent with devicetree change ST_NCI_GPIO_NAME_RESET content to reset so that acpi_find_gpio in drivers/gpio/gpiolib.c will look for reset-gpios. In the mean time the ACPI table needs to be fixed as follow (Tested on Minnowboard Max): Device (NFC1) { Name (_ADR, Zero) // _ADR: Address Name (_HID, "SMO2101") // _HID: Hardware ID Name (_CID, "SMO2101") // _CID: Compatible ID Name (_DDN, "SMO NFC") // _DDN: DOS Device Name Name (_UID, One) // _UID: Unique ID Method (_CRS, 0, NotSerialized) // _CRS: Current Resource Settings { Name (SBUF, ResourceTemplate () { SpiSerialBus (0, PolarityLow, FourWireMode, 8, ControllerInitiated, 4000000, ClockPolarityLow, ClockPhaseFirst, "\\_SB.SPI1", 0x00, ResourceConsumer, ,) GpioInt (Edge, ActiveHigh, ExclusiveAndWake, PullNone, 0x0000, "\\_SB.GPO2", 0x00, ResourceConsumer, ,) { // Pin list 0x0001 } GpioIo (Exclusive, PullDefault, 0x0000, 0x0000, IoRestrictionOutputOnly, "\\_SB.GPO2", 0x00, ResourceConsumer, ,) { // Pin list 0x0002, } }) Name (_DSD, Package (0x02) { ToUUID ("daffd814-6eba-4d8c-8a91-bc9bbf4aa301") /* Device Properties for _DSD */, Package (0x03) { Package (0x02) { "uicc-present", 1 }, Package (0x02) { "ese-present", 1 }, Package (0x02) { "reset-gpios", Package(0x04) { ^NFC1, 1, 0, 0} }, } }) Return (SBUF) /* \_SB_.SPI1.NFC1._CRS.SBUF */ } Method (_STA, 0, NotSerialized) // _STA: Status { Return (0x0F) } } Signed-off-by: Christophe Ricard Signed-off-by: Samuel Ortiz --- drivers/nfc/st-nci/spi.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/nfc/st-nci/spi.c b/drivers/nfc/st-nci/spi.c index 821dfa950fa8..e015015c5dc9 100644 --- a/drivers/nfc/st-nci/spi.c +++ b/drivers/nfc/st-nci/spi.c @@ -43,7 +43,7 @@ #define ST_NCI_SPI_DRIVER_NAME "st_nci_spi" -#define ST_NCI_GPIO_NAME_RESET "clf_reset" +#define ST_NCI_GPIO_NAME_RESET "reset" struct st_nci_spi_phy { struct spi_device *spi_dev; From 2a196975058f94e184464b13b52d86752bccbbd4 Mon Sep 17 00:00:00 2001 From: Christophe Ricard Date: Sat, 30 Apr 2016 09:12:37 +0200 Subject: [PATCH 1278/1649] nfc: st21nfca: i2c: Change ST21NFCA_GPIO_NAME_RESET to match DT Since commit 10cf4899f8af ("gpiolib: tighten up ACPI legacy gpio lookups") If _DSD properties are available in an ACPI node, we are not allowed to fallback to _CRS data to retrieve gpio properties. This was causing us to fail if uicc-present and/or ese-present are defined. To be consistent with devicetree change ST_NCI_GPIO_NAME_RESET content to reset so that acpi_find_gpio in drivers/gpio/gpiolib.c will look for reset-gpios. In the mean time the ACPI table needs to be fixed as follow: Device (NFC1) { Name (_ADR, Zero) // _ADR: Address Name (_HID, "SMO2100") // _HID: Hardware ID Name (_CID, "SMO2100") // _CID: Compatible ID Name (_DDN, "SMO NFC") // _DDN: DOS Device Name Name (_UID, One) // _UID: Unique ID Method (_CRS, 0, NotSerialized) // _CRS: Current Resource Settings { Name (SBUF, ResourceTemplate () { I2cSerialBus (0x0008, ControllerInitiated, 400000, AddressingMode7Bit, "\\_SB.I2C7", 0x00, ResourceConsumer, ,) GpioInt (Edge, ActiveHigh, ExclusiveAndWake, PullNone, 0x0000, "\\_SB.GPO2", 0x00, ResourceConsumer, ,) { // Pin list 0x0001 } GpioIo (Exclusive, PullDefault, 0x0000, 0x0000, IoRestrictionOutputOnly, "\\_SB.GPO2", 0x00, ResourceConsumer, ,) { // Pin list 0x0002, } }) Name (_DSD, Package (0x02) { ToUUID ("daffd814-6eba-4d8c-8a91-bc9bbf4aa301") /* Device Properties for _DSD */, Package (0x03) { Package (0x02) { "uicc-present", 1 }, Package (0x02) { "ese-present", 1 }, Package (0x02) { "enable-gpios", Package(0x04) { ^NFC1, 1, 0, 0} }, } }) Return (SBUF) /* \_SB_.I2C7.NFC1._CRS.SBUF */ } Method (_STA, 0, NotSerialized) // _STA: Status { Return (0x0F) } } Signed-off-by: Christophe Ricard Signed-off-by: Samuel Ortiz --- drivers/nfc/st21nfca/i2c.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/nfc/st21nfca/i2c.c b/drivers/nfc/st21nfca/i2c.c index 36645dcbe775..179c0b8edf8d 100644 --- a/drivers/nfc/st21nfca/i2c.c +++ b/drivers/nfc/st21nfca/i2c.c @@ -62,7 +62,7 @@ #define ST21NFCA_HCI_I2C_DRIVER_NAME "st21nfca_hci_i2c" -#define ST21NFCA_GPIO_NAME_EN "clf_enable" +#define ST21NFCA_GPIO_NAME_EN "enable" struct st21nfca_i2c_phy { struct i2c_client *i2c_dev; From bd9d523257832ecf999acb0c312f1ee1a22ed4b0 Mon Sep 17 00:00:00 2001 From: Christophe Ricard Date: Sat, 30 Apr 2016 09:12:38 +0200 Subject: [PATCH 1279/1649] nfc: st21nfca: set is_ese_present and is_uicc_present properly When they're present, set is_ese_present and set is_uicc_present to the value describe in their package description. So far is_ese_present and is_uicc_present was set to true if their property was present. Signed-off-by: Christophe Ricard Signed-off-by: Samuel Ortiz --- drivers/nfc/st21nfca/i2c.c | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/drivers/nfc/st21nfca/i2c.c b/drivers/nfc/st21nfca/i2c.c index 179c0b8edf8d..a3153a30be5d 100644 --- a/drivers/nfc/st21nfca/i2c.c +++ b/drivers/nfc/st21nfca/i2c.c @@ -510,6 +510,7 @@ static int st21nfca_hci_i2c_acpi_request_resources(struct i2c_client *client) const struct acpi_device_id *id; struct gpio_desc *gpiod_ena; struct device *dev; + u8 tmp; if (!client) return -EINVAL; @@ -533,10 +534,18 @@ static int st21nfca_hci_i2c_acpi_request_resources(struct i2c_client *client) phy->irq_polarity = irq_get_trigger_type(client->irq); - phy->se_status.is_ese_present = - device_property_present(dev, "ese-present"); - phy->se_status.is_uicc_present = - device_property_present(dev, "uicc-present"); + phy->se_status.is_ese_present = false; + phy->se_status.is_uicc_present = false; + + if (device_property_present(dev, "ese-present")) { + device_property_read_u8(dev, "ese-present", &tmp); + phy->se_status.is_ese_present = tmp; + } + + if (device_property_present(dev, "uicc-present")) { + device_property_read_u8(dev, "uicc-present", &tmp); + phy->se_status.is_uicc_present = tmp; + } return 0; } From 27420fec40e22cb3ff04bcef654110eb2ef37620 Mon Sep 17 00:00:00 2001 From: Christophe Ricard Date: Sat, 30 Apr 2016 09:12:39 +0200 Subject: [PATCH 1280/1649] nfc: st-nci: set is_ese_present and is_uicc_present properly When they're present, set is_ese_present and set is_uicc_present to the value describe in their package description. So far is_ese_present and is_uicc_present was set to true if their property was present. Signed-off-by: Christophe Ricard Signed-off-by: Samuel Ortiz --- drivers/nfc/st-nci/i2c.c | 17 +++++++++++++---- drivers/nfc/st-nci/spi.c | 17 +++++++++++++---- 2 files changed, 26 insertions(+), 8 deletions(-) diff --git a/drivers/nfc/st-nci/i2c.c b/drivers/nfc/st-nci/i2c.c index 6645972377af..a6a1977e57c5 100644 --- a/drivers/nfc/st-nci/i2c.c +++ b/drivers/nfc/st-nci/i2c.c @@ -214,6 +214,7 @@ static int st_nci_i2c_acpi_request_resources(struct i2c_client *client) const struct acpi_device_id *id; struct gpio_desc *gpiod_reset; struct device *dev; + u8 tmp; if (!client) return -EINVAL; @@ -237,10 +238,18 @@ static int st_nci_i2c_acpi_request_resources(struct i2c_client *client) phy->irq_polarity = irq_get_trigger_type(client->irq); - phy->se_status.is_ese_present = - device_property_present(dev, "ese-present"); - phy->se_status.is_uicc_present = - device_property_present(dev, "uicc-present"); + phy->se_status.is_ese_present = false; + phy->se_status.is_uicc_present = false; + + if (device_property_present(dev, "ese-present")) { + device_property_read_u8(dev, "ese-present", &tmp); + phy->se_status.is_ese_present = tmp; + } + + if (device_property_present(dev, "uicc-present")) { + device_property_read_u8(dev, "uicc-present", &tmp); + phy->se_status.is_uicc_present = tmp; + } return 0; } diff --git a/drivers/nfc/st-nci/spi.c b/drivers/nfc/st-nci/spi.c index e015015c5dc9..51a863f0a987 100644 --- a/drivers/nfc/st-nci/spi.c +++ b/drivers/nfc/st-nci/spi.c @@ -229,6 +229,7 @@ static int st_nci_spi_acpi_request_resources(struct spi_device *spi_dev) const struct acpi_device_id *id; struct gpio_desc *gpiod_reset; struct device *dev; + u8 tmp; if (!spi_dev) return -EINVAL; @@ -252,10 +253,18 @@ static int st_nci_spi_acpi_request_resources(struct spi_device *spi_dev) phy->irq_polarity = irq_get_trigger_type(spi_dev->irq); - phy->se_status.is_ese_present = - device_property_present(dev, "ese-present"); - phy->se_status.is_uicc_present = - device_property_present(dev, "uicc-present"); + phy->se_status.is_ese_present = false; + phy->se_status.is_uicc_present = false; + + if (device_property_present(dev, "ese-present")) { + device_property_read_u8(dev, "ese-present", &tmp); + tmp = phy->se_status.is_ese_present; + } + + if (device_property_present(dev, "uicc-present")) { + device_property_read_u8(dev, "uicc-present", &tmp); + tmp = phy->se_status.is_uicc_present; + } return 0; } From d35cb20b411673820219e08c57392d18668f6217 Mon Sep 17 00:00:00 2001 From: Christophe Ricard Date: Sat, 30 Apr 2016 09:12:40 +0200 Subject: [PATCH 1281/1649] nfc: st21nfca: Simplify white list building Simplify white list Building Signed-off-by: Christophe Ricard Signed-off-by: Samuel Ortiz --- drivers/nfc/st21nfca/core.c | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/drivers/nfc/st21nfca/core.c b/drivers/nfc/st21nfca/core.c index dd8b150fbffa..25ab279606c2 100644 --- a/drivers/nfc/st21nfca/core.c +++ b/drivers/nfc/st21nfca/core.c @@ -262,17 +262,10 @@ static int st21nfca_hci_ready(struct nfc_hci_dev *hdev) int wl_size = 0; int r; - if (info->se_status->is_ese_present && - info->se_status->is_uicc_present) { + if (info->se_status->is_uicc_present) white_list[wl_size++] = NFC_HCI_UICC_HOST_ID; + if (info->se_status->is_ese_present) white_list[wl_size++] = ST21NFCA_ESE_HOST_ID; - } else if (!info->se_status->is_ese_present && - info->se_status->is_uicc_present) { - white_list[wl_size++] = NFC_HCI_UICC_HOST_ID; - } else if (info->se_status->is_ese_present && - !info->se_status->is_uicc_present) { - white_list[wl_size++] = ST21NFCA_ESE_HOST_ID; - } if (wl_size) { r = nfc_hci_set_param(hdev, NFC_HCI_ADMIN_GATE, From cde4856e612111ce91258b947051a08703cd1499 Mon Sep 17 00:00:00 2001 From: Christophe Ricard Date: Sat, 30 Apr 2016 09:12:41 +0200 Subject: [PATCH 1282/1649] nfc: st-nci: Simplify white list building Simplify white list Building Signed-off-by: Christophe Ricard Signed-off-by: Samuel Ortiz --- drivers/nfc/st-nci/se.c | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/drivers/nfc/st-nci/se.c b/drivers/nfc/st-nci/se.c index a53e5df803eb..79efccd13172 100644 --- a/drivers/nfc/st-nci/se.c +++ b/drivers/nfc/st-nci/se.c @@ -629,17 +629,10 @@ int st_nci_discover_se(struct nci_dev *ndev) if (test_bit(ST_NCI_FACTORY_MODE, &info->flags)) return 0; - if (info->se_info.se_status->is_ese_present && - info->se_info.se_status->is_uicc_present) { + if (info->se_info.se_status->is_uicc_present) white_list[wl_size++] = ST_NCI_UICC_HOST_ID; + if (info->se_info.se_status->is_ese_present) white_list[wl_size++] = ST_NCI_ESE_HOST_ID; - } else if (!info->se_info.se_status->is_ese_present && - info->se_info.se_status->is_uicc_present) { - white_list[wl_size++] = ST_NCI_UICC_HOST_ID; - } else if (info->se_info.se_status->is_ese_present && - !info->se_info.se_status->is_uicc_present) { - white_list[wl_size++] = ST_NCI_ESE_HOST_ID; - } if (wl_size) { r = nci_hci_set_param(ndev, NCI_HCI_ADMIN_GATE, From 0209e79d540440962913054fae499ad1892a4f15 Mon Sep 17 00:00:00 2001 From: Christophe Ricard Date: Sat, 30 Apr 2016 09:12:42 +0200 Subject: [PATCH 1283/1649] nfc: st-nci: A APDU_READER_GATE pipe is unexpected on a UICC An APDU_READER_GATE pipe is not expected on a UICC. Be more explicit so that an other secure element form factor (SD card) does not prompt this message. Signed-off-by: Christophe Ricard Signed-off-by: Samuel Ortiz --- drivers/nfc/st-nci/se.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/nfc/st-nci/se.c b/drivers/nfc/st-nci/se.c index 79efccd13172..edb6ee12a372 100644 --- a/drivers/nfc/st-nci/se.c +++ b/drivers/nfc/st-nci/se.c @@ -222,7 +222,7 @@ int st_nci_hci_load_session(struct nci_dev *ndev) */ dm_pipe_info = (struct st_nci_pipe_info *)skb_pipe_info->data; if (dm_pipe_info->dst_gate_id == ST_NCI_APDU_READER_GATE && - dm_pipe_info->src_host_id != ST_NCI_ESE_HOST_ID) { + dm_pipe_info->src_host_id == ST_NCI_UICC_HOST_ID) { pr_err("Unexpected apdu_reader pipe on host %x\n", dm_pipe_info->src_host_id); kfree_skb(skb_pipe_info); From 7cb6ab590d7d203b53284f4f211d4a8a89a125fc Mon Sep 17 00:00:00 2001 From: Christophe Ricard Date: Sat, 30 Apr 2016 09:12:43 +0200 Subject: [PATCH 1284/1649] nfc: st21nfca: A APDU_READER_GATE pipe is unexpected on a UICC An APDU_READER_GATE pipe is not expected on a UICC. Be more explicit so that an other secure element form factor (SD card) does not prompt this message. Signed-off-by: Christophe Ricard Signed-off-by: Samuel Ortiz --- drivers/nfc/st21nfca/core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/nfc/st21nfca/core.c b/drivers/nfc/st21nfca/core.c index 25ab279606c2..dacb9166081b 100644 --- a/drivers/nfc/st21nfca/core.c +++ b/drivers/nfc/st21nfca/core.c @@ -176,7 +176,7 @@ static int st21nfca_hci_load_session(struct nfc_hci_dev *hdev) */ info = (struct st21nfca_pipe_info *) skb_pipe_info->data; if (info->dst_gate_id == ST21NFCA_APDU_READER_GATE && - info->src_host_id != ST21NFCA_ESE_HOST_ID) { + info->src_host_id == NFC_HCI_UICC_HOST_ID) { pr_err("Unexpected apdu_reader pipe on host %x\n", info->src_host_id); kfree_skb(skb_pipe_info); From 070718a499cea9e6d7e6404788490574dfb71388 Mon Sep 17 00:00:00 2001 From: Christophe Ricard Date: Sat, 30 Apr 2016 09:12:44 +0200 Subject: [PATCH 1285/1649] NFC: st21nfca: Drop two useless checks in ACPI probe path When st21nfca_hci_i2c_acpi_request_resources() gets called we already know that the entries in ->acpi_match_table have matched ACPI ID of the device. In addition I2C client pointer cannot be NULL in any case (otherwise I2C core would not call ->probe() for the driver in the first place). Drop the two useless checks from the driver. Signed-off-by: Christophe Ricard Signed-off-by: Samuel Ortiz --- drivers/nfc/st21nfca/i2c.c | 13 +------------ 1 file changed, 1 insertion(+), 12 deletions(-) diff --git a/drivers/nfc/st21nfca/i2c.c b/drivers/nfc/st21nfca/i2c.c index a3153a30be5d..5a82f553906c 100644 --- a/drivers/nfc/st21nfca/i2c.c +++ b/drivers/nfc/st21nfca/i2c.c @@ -507,21 +507,10 @@ static struct nfc_phy_ops i2c_phy_ops = { static int st21nfca_hci_i2c_acpi_request_resources(struct i2c_client *client) { struct st21nfca_i2c_phy *phy = i2c_get_clientdata(client); - const struct acpi_device_id *id; struct gpio_desc *gpiod_ena; - struct device *dev; + struct device *dev = &client->dev; u8 tmp; - if (!client) - return -EINVAL; - - dev = &client->dev; - - /* Match the struct device against a given list of ACPI IDs */ - id = acpi_match_device(dev->driver->acpi_match_table, dev); - if (!id) - return -ENODEV; - /* Get EN GPIO from ACPI */ gpiod_ena = devm_gpiod_get_index(dev, ST21NFCA_GPIO_NAME_EN, 1, GPIOD_OUT_LOW); From 4ac52a0fd066cd33b12d79b09e30fd8bdb52f8d8 Mon Sep 17 00:00:00 2001 From: Christophe Ricard Date: Sat, 30 Apr 2016 09:12:45 +0200 Subject: [PATCH 1286/1649] NFC: st-nci: i2c: Drop two useless checks in ACPI probe path When st_nci_i2c_acpi_request_resources() gets called we already know that the entries in ->acpi_match_table have matched ACPI ID of the device. In addition I2C client pointer cannot be NULL in any case (otherwise I2C core would not call ->probe() for the driver in the first place). Drop the two useless checks from the driver. Signed-off-by: Christophe Ricard Signed-off-by: Samuel Ortiz --- drivers/nfc/st-nci/i2c.c | 13 +------------ 1 file changed, 1 insertion(+), 12 deletions(-) diff --git a/drivers/nfc/st-nci/i2c.c b/drivers/nfc/st-nci/i2c.c index a6a1977e57c5..9dfae0efa922 100644 --- a/drivers/nfc/st-nci/i2c.c +++ b/drivers/nfc/st-nci/i2c.c @@ -211,21 +211,10 @@ static struct nfc_phy_ops i2c_phy_ops = { static int st_nci_i2c_acpi_request_resources(struct i2c_client *client) { struct st_nci_i2c_phy *phy = i2c_get_clientdata(client); - const struct acpi_device_id *id; struct gpio_desc *gpiod_reset; - struct device *dev; + struct device *dev = &client->dev; u8 tmp; - if (!client) - return -EINVAL; - - dev = &client->dev; - - /* Match the struct device against a given list of ACPI IDs */ - id = acpi_match_device(dev->driver->acpi_match_table, dev); - if (!id) - return -ENODEV; - /* Get RESET GPIO from ACPI */ gpiod_reset = devm_gpiod_get_index(dev, ST_NCI_GPIO_NAME_RESET, 1, GPIOD_OUT_HIGH); From 1f34b20404443717a7ec77b447d86578f0478550 Mon Sep 17 00:00:00 2001 From: Christophe Ricard Date: Sat, 30 Apr 2016 09:12:46 +0200 Subject: [PATCH 1287/1649] NFC: st-nci: spi: Drop two useless checks in ACPI probe path When st_nci_spi_acpi_request_resources() gets called we already know that the entries in ->acpi_match_table have matched ACPI ID of the device. In addition spi_device pointer cannot be NULL in any case (otherwise SPI core would not call ->probe() for the driver in the first place). Drop the two useless checks from the driver. Signed-off-by: Christophe Ricard Signed-off-by: Samuel Ortiz --- drivers/nfc/st-nci/spi.c | 13 +------------ 1 file changed, 1 insertion(+), 12 deletions(-) diff --git a/drivers/nfc/st-nci/spi.c b/drivers/nfc/st-nci/spi.c index 51a863f0a987..89e341eba3eb 100644 --- a/drivers/nfc/st-nci/spi.c +++ b/drivers/nfc/st-nci/spi.c @@ -226,21 +226,10 @@ static struct nfc_phy_ops spi_phy_ops = { static int st_nci_spi_acpi_request_resources(struct spi_device *spi_dev) { struct st_nci_spi_phy *phy = spi_get_drvdata(spi_dev); - const struct acpi_device_id *id; struct gpio_desc *gpiod_reset; - struct device *dev; + struct device *dev = &spi_dev->dev; u8 tmp; - if (!spi_dev) - return -EINVAL; - - dev = &spi_dev->dev; - - /* Match the struct device against a given list of ACPI IDs */ - id = acpi_match_device(dev->driver->acpi_match_table, dev); - if (!id) - return -ENODEV; - /* Get RESET GPIO from ACPI */ gpiod_reset = devm_gpiod_get_index(dev, ST_NCI_GPIO_NAME_RESET, 1, GPIOD_OUT_HIGH); From c50e8fef7bb5e4a77609e4120940458e419d463f Mon Sep 17 00:00:00 2001 From: Christophe Ricard Date: Sat, 30 Apr 2016 09:12:47 +0200 Subject: [PATCH 1288/1649] nfc: st-nci: Remove redundant ST_NCI_HCI_HOST_ID_ESE from st-nci.h ST_NCI_HCI_HOST_ID_ESE is already having an equivalent in se.c (ST_NCI_ESE_HOST_ID). Remove and replace where relevant. Signed-off-by: Christophe Ricard Signed-off-by: Samuel Ortiz --- drivers/nfc/st-nci/se.c | 4 ++-- drivers/nfc/st-nci/st-nci.h | 1 - 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/nfc/st-nci/se.c b/drivers/nfc/st-nci/se.c index edb6ee12a372..e7f25f4e3dc3 100644 --- a/drivers/nfc/st-nci/se.c +++ b/drivers/nfc/st-nci/se.c @@ -520,7 +520,7 @@ int st_nci_enable_se(struct nci_dev *ndev, u32 se_idx) * Same for eSE. */ r = st_nci_control_se(ndev, se_idx, ST_NCI_SE_MODE_ON); - if (r == ST_NCI_HCI_HOST_ID_ESE) { + if (r == ST_NCI_ESE_HOST_ID) { st_nci_se_get_atr(ndev); r = nci_hci_send_event(ndev, ST_NCI_APDU_READER_GATE, ST_NCI_EVT_SE_SOFT_RESET, NULL, 0); @@ -665,7 +665,7 @@ int st_nci_se_io(struct nci_dev *ndev, u32 se_idx, pr_debug("\n"); switch (se_idx) { - case ST_NCI_HCI_HOST_ID_ESE: + case ST_NCI_ESE_HOST_ID: info->se_info.cb = cb; info->se_info.cb_context = cb_context; mod_timer(&info->se_info.bwi_timer, jiffies + diff --git a/drivers/nfc/st-nci/st-nci.h b/drivers/nfc/st-nci/st-nci.h index 8b9f77b0249c..8783f9594d65 100644 --- a/drivers/nfc/st-nci/st-nci.h +++ b/drivers/nfc/st-nci/st-nci.h @@ -32,7 +32,6 @@ * sequence of at most 32 characters. */ #define ST_NCI_ESE_MAX_LENGTH 33 -#define ST_NCI_HCI_HOST_ID_ESE 0xc0 #define ST_NCI_DEVICE_MGNT_GATE 0x01 From 99adc394f2a4a16763bccbaa150b9d598b18c58f Mon Sep 17 00:00:00 2001 From: Christophe Ricard Date: Sat, 30 Apr 2016 09:12:48 +0200 Subject: [PATCH 1289/1649] nfc: st21nfca: Remove duplicated ST21NFCA_ESE_HOST_ID from se.c ST21NFCA_ESE_HOST_ID is already defined in st21nfca.h. Signed-off-by: Christophe Ricard Signed-off-by: Samuel Ortiz --- drivers/nfc/st21nfca/se.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/nfc/st21nfca/se.c b/drivers/nfc/st21nfca/se.c index bd56a16e4007..3a98563d4a12 100644 --- a/drivers/nfc/st21nfca/se.c +++ b/drivers/nfc/st21nfca/se.c @@ -32,8 +32,6 @@ #define ST21NFCA_EVT_CONNECTIVITY 0x10 #define ST21NFCA_EVT_TRANSACTION 0x12 -#define ST21NFCA_ESE_HOST_ID 0xc0 - #define ST21NFCA_SE_TO_HOT_PLUG 1000 /* Connectivity pipe only */ #define ST21NFCA_SE_COUNT_PIPE_UICC 0x01 From 18836029d8c074ac1846167ba702ac528e0a0ad7 Mon Sep 17 00:00:00 2001 From: Christophe Ricard Date: Sat, 30 Apr 2016 09:12:49 +0200 Subject: [PATCH 1290/1649] nfc: nci: Fix nci_core_conn_create to allowing empty destination NCI_CORE_CONN_CREATE may not have any destination type parameter. Signed-off-by: Christophe Ricard Signed-off-by: Samuel Ortiz --- net/nfc/nci/core.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c index fbb7a2b57b44..031ac0f9538c 100644 --- a/net/nfc/nci/core.c +++ b/net/nfc/nci/core.c @@ -610,9 +610,6 @@ int nci_core_conn_create(struct nci_dev *ndev, u8 destination_type, struct nci_core_conn_create_cmd *cmd; struct core_conn_create_data data; - if (!number_destination_params) - return -EINVAL; - data.length = params_len + sizeof(struct nci_core_conn_create_cmd); cmd = kzalloc(data.length, GFP_KERNEL); if (!cmd) @@ -620,17 +617,20 @@ int nci_core_conn_create(struct nci_dev *ndev, u8 destination_type, cmd->destination_type = destination_type; cmd->number_destination_params = number_destination_params; - memcpy(cmd->params, params, params_len); data.cmd = cmd; - if (params->length > 0) - ndev->cur_id = params->value[DEST_SPEC_PARAMS_ID_INDEX]; - else + if (params) { + memcpy(cmd->params, params, params_len); + if (params->length > 0) + ndev->cur_id = params->value[DEST_SPEC_PARAMS_ID_INDEX]; + else + ndev->cur_id = 0; + } else { ndev->cur_id = 0; + } - r = __nci_request(ndev, nci_core_conn_create_req, - (unsigned long)&data, + r = __nci_request(ndev, nci_core_conn_create_req, (unsigned long)&data, msecs_to_jiffies(NCI_CMD_TIMEOUT)); kfree(cmd); return r; From de5ea8517c2ae40785fe5d0a2d02fc71bef1761b Mon Sep 17 00:00:00 2001 From: Christophe Ricard Date: Sat, 30 Apr 2016 09:12:50 +0200 Subject: [PATCH 1291/1649] nfc: nci: Fix nci_core_conn_close nci_core_conn_close was not retrieving a conn_info using the correct connection id. Signed-off-by: Christophe Ricard Signed-off-by: Samuel Ortiz --- net/nfc/nci/core.c | 1 + net/nfc/nci/rsp.c | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c index 031ac0f9538c..0884f1444817 100644 --- a/net/nfc/nci/core.c +++ b/net/nfc/nci/core.c @@ -646,6 +646,7 @@ static void nci_core_conn_close_req(struct nci_dev *ndev, unsigned long opt) int nci_core_conn_close(struct nci_dev *ndev, u8 conn_id) { + ndev->cur_conn_id = conn_id; return __nci_request(ndev, nci_core_conn_close_req, conn_id, msecs_to_jiffies(NCI_CMD_TIMEOUT)); } diff --git a/net/nfc/nci/rsp.c b/net/nfc/nci/rsp.c index 9b6eb913d801..69fe163b7350 100644 --- a/net/nfc/nci/rsp.c +++ b/net/nfc/nci/rsp.c @@ -271,7 +271,8 @@ static void nci_core_conn_close_rsp_packet(struct nci_dev *ndev, pr_debug("status 0x%x\n", status); if (status == NCI_STATUS_OK) { - conn_info = nci_get_conn_info_by_conn_id(ndev, ndev->cur_id); + conn_info = nci_get_conn_info_by_conn_id(ndev, + ndev->cur_conn_id); if (conn_info) { list_del(&conn_info->list); devm_kfree(&ndev->nfc_dev->dev, conn_info); From 9b8d1a4cf2aa819d606b4e423a6523fc0d4460a2 Mon Sep 17 00:00:00 2001 From: Christophe Ricard Date: Sat, 30 Apr 2016 09:12:51 +0200 Subject: [PATCH 1292/1649] nfc: nci: Add an additional parameter to identify a connection id According to NCI specification, destination type and destination specific parameters shall uniquely identify a single destination for the Logical Connection. Signed-off-by: Christophe Ricard Signed-off-by: Samuel Ortiz --- drivers/nfc/fdp/fdp.c | 3 ++- drivers/nfc/st-nci/se.c | 6 ++++-- include/net/nfc/nci_core.h | 15 ++++++++++++--- net/nfc/nci/core.c | 25 ++++++++++++++++++------- net/nfc/nci/ntf.c | 2 +- net/nfc/nci/rsp.c | 20 +++++++++++++++++--- 6 files changed, 54 insertions(+), 17 deletions(-) diff --git a/drivers/nfc/fdp/fdp.c b/drivers/nfc/fdp/fdp.c index ccb07a1b153d..e44a7a2f4061 100644 --- a/drivers/nfc/fdp/fdp.c +++ b/drivers/nfc/fdp/fdp.c @@ -102,7 +102,8 @@ static int fdp_nci_create_conn(struct nci_dev *ndev) if (r) return r; - return nci_get_conn_info_by_id(ndev, 0); + return nci_get_conn_info_by_dest_type_params(ndev, + FDP_PATCH_CONN_DEST, NULL); } static inline int fdp_nci_get_versions(struct nci_dev *ndev) diff --git a/drivers/nfc/st-nci/se.c b/drivers/nfc/st-nci/se.c index e7f25f4e3dc3..420f019cc42f 100644 --- a/drivers/nfc/st-nci/se.c +++ b/drivers/nfc/st-nci/se.c @@ -600,10 +600,12 @@ static int st_nci_hci_network_init(struct nci_dev *ndev) * HCI will be used here only for proprietary commands. */ if (test_bit(ST_NCI_FACTORY_MODE, &info->flags)) - r = nci_nfcee_mode_set(ndev, ndev->hci_dev->conn_info->id, + r = nci_nfcee_mode_set(ndev, + ndev->hci_dev->conn_info->dest_params->id, NCI_NFCEE_DISABLE); else - r = nci_nfcee_mode_set(ndev, ndev->hci_dev->conn_info->id, + r = nci_nfcee_mode_set(ndev, + ndev->hci_dev->conn_info->dest_params->id, NCI_NFCEE_ENABLE); free_dest_params: diff --git a/include/net/nfc/nci_core.h b/include/net/nfc/nci_core.h index 57ce24fb0047..ebb50d286ef6 100644 --- a/include/net/nfc/nci_core.h +++ b/include/net/nfc/nci_core.h @@ -109,7 +109,13 @@ struct nci_ops { struct nci_conn_info { struct list_head list; - __u8 id; /* can be an RF Discovery ID or an NFCEE ID */ + /* NCI specification 4.4.2 Connection Creation + * The combination of destination type and destination specific + * parameters shall uniquely identify a single destination for the + * Logical Connection + */ + struct dest_spec_params *dest_params; + __u8 dest_type; __u8 conn_id; __u8 max_pkt_payload_len; @@ -260,7 +266,9 @@ struct nci_dev { __u32 manufact_specific_info; /* Save RF Discovery ID or NFCEE ID under conn_create */ - __u8 cur_id; + struct dest_spec_params cur_params; + /* Save destination type under conn_create */ + __u8 cur_dest_type; /* stored during nci_data_exchange */ struct sk_buff *rx_data_reassembly; @@ -378,7 +386,8 @@ void nci_clear_target_list(struct nci_dev *ndev); void nci_req_complete(struct nci_dev *ndev, int result); struct nci_conn_info *nci_get_conn_info_by_conn_id(struct nci_dev *ndev, int conn_id); -int nci_get_conn_info_by_id(struct nci_dev *ndev, u8 id); +int nci_get_conn_info_by_dest_type_params(struct nci_dev *ndev, u8 dest_type, + struct dest_spec_params *params); /* ----- NCI status code ----- */ int nci_to_errno(__u8 code); diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c index 0884f1444817..74f2d54df4fc 100644 --- a/net/nfc/nci/core.c +++ b/net/nfc/nci/core.c @@ -64,18 +64,26 @@ struct nci_conn_info *nci_get_conn_info_by_conn_id(struct nci_dev *ndev, return NULL; } -int nci_get_conn_info_by_id(struct nci_dev *ndev, u8 id) +int nci_get_conn_info_by_dest_type_params(struct nci_dev *ndev, u8 dest_type, + struct dest_spec_params *params) { struct nci_conn_info *conn_info; list_for_each_entry(conn_info, &ndev->conn_info_list, list) { - if (conn_info->id == id) - return conn_info->conn_id; + if (conn_info->dest_type == dest_type) { + if (!params) + return conn_info->conn_id; + if (conn_info) { + if (params->id == conn_info->dest_params->id && + params->protocol == conn_info->dest_params->protocol) + return conn_info->conn_id; + } + } } return -EINVAL; } -EXPORT_SYMBOL(nci_get_conn_info_by_id); +EXPORT_SYMBOL(nci_get_conn_info_by_dest_type_params); /* ---- NCI requests ---- */ @@ -623,12 +631,15 @@ int nci_core_conn_create(struct nci_dev *ndev, u8 destination_type, if (params) { memcpy(cmd->params, params, params_len); if (params->length > 0) - ndev->cur_id = params->value[DEST_SPEC_PARAMS_ID_INDEX]; + memcpy(&ndev->cur_params, + ¶ms->value[DEST_SPEC_PARAMS_ID_INDEX], + sizeof(struct dest_spec_params)); else - ndev->cur_id = 0; + ndev->cur_params.id = 0; } else { - ndev->cur_id = 0; + ndev->cur_params.id = 0; } + ndev->cur_dest_type = destination_type; r = __nci_request(ndev, nci_core_conn_create_req, (unsigned long)&data, msecs_to_jiffies(NCI_CMD_TIMEOUT)); diff --git a/net/nfc/nci/ntf.c b/net/nfc/nci/ntf.c index 2ada2b39e355..1e8c1a12aaec 100644 --- a/net/nfc/nci/ntf.c +++ b/net/nfc/nci/ntf.c @@ -734,7 +734,7 @@ static void nci_nfcee_discover_ntf_packet(struct nci_dev *ndev, * “HCI Access”, even if the HCI Network contains multiple NFCEEs. */ ndev->hci_dev->nfcee_id = nfcee_ntf->nfcee_id; - ndev->cur_id = nfcee_ntf->nfcee_id; + ndev->cur_params.id = nfcee_ntf->nfcee_id; nci_req_complete(ndev, status); } diff --git a/net/nfc/nci/rsp.c b/net/nfc/nci/rsp.c index 69fe163b7350..e3bbf1937d0e 100644 --- a/net/nfc/nci/rsp.c +++ b/net/nfc/nci/rsp.c @@ -226,7 +226,7 @@ static void nci_core_conn_create_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb) { __u8 status = skb->data[0]; - struct nci_conn_info *conn_info; + struct nci_conn_info *conn_info = NULL; struct nci_core_conn_create_rsp *rsp; pr_debug("status 0x%x\n", status); @@ -241,7 +241,17 @@ static void nci_core_conn_create_rsp_packet(struct nci_dev *ndev, goto exit; } - conn_info->id = ndev->cur_id; + conn_info->dest_params = devm_kzalloc(&ndev->nfc_dev->dev, + sizeof(struct dest_spec_params), + GFP_KERNEL); + if (!conn_info->dest_params) { + status = NCI_STATUS_REJECTED; + goto free_conn_info; + } + + conn_info->dest_type = ndev->cur_dest_type; + conn_info->dest_params->id = ndev->cur_params.id; + conn_info->dest_params->protocol = ndev->cur_params.protocol; conn_info->conn_id = rsp->conn_id; /* Note: data_exchange_cb and data_exchange_cb_context need to @@ -251,7 +261,7 @@ static void nci_core_conn_create_rsp_packet(struct nci_dev *ndev, INIT_LIST_HEAD(&conn_info->list); list_add(&conn_info->list, &ndev->conn_info_list); - if (ndev->cur_id == ndev->hci_dev->nfcee_id) + if (ndev->cur_params.id == ndev->hci_dev->nfcee_id) ndev->hci_dev->conn_info = conn_info; conn_info->conn_id = rsp->conn_id; @@ -259,7 +269,11 @@ static void nci_core_conn_create_rsp_packet(struct nci_dev *ndev, atomic_set(&conn_info->credits_cnt, rsp->credits_cnt); } +free_conn_info: + if (status == NCI_STATUS_REJECTED) + devm_kfree(&ndev->nfc_dev->dev, conn_info); exit: + nci_req_complete(ndev, status); } From 1c53855f6be2e7da270e86cae381745ee6105eab Mon Sep 17 00:00:00 2001 From: Christophe Ricard Date: Sat, 30 Apr 2016 09:12:52 +0200 Subject: [PATCH 1293/1649] nfc: nci: Add nci_nfcc_loopback to the nci core For test purpose, provide the generic nci loopback function. Signed-off-by: Christophe Ricard Signed-off-by: Samuel Ortiz --- include/net/nfc/nci_core.h | 2 + net/nfc/nci/core.c | 77 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 79 insertions(+) diff --git a/include/net/nfc/nci_core.h b/include/net/nfc/nci_core.h index ebb50d286ef6..87499b6b35d6 100644 --- a/include/net/nfc/nci_core.h +++ b/include/net/nfc/nci_core.h @@ -306,6 +306,8 @@ int nci_core_conn_create(struct nci_dev *ndev, u8 destination_type, size_t params_len, struct core_conn_create_dest_spec_params *params); int nci_core_conn_close(struct nci_dev *ndev, u8 conn_id); +int nci_nfcc_loopback(struct nci_dev *ndev, void *data, size_t data_len, + struct sk_buff **resp); struct nci_hci_dev *nci_hci_allocate(struct nci_dev *ndev); int nci_hci_send_event(struct nci_dev *ndev, u8 gate, u8 event, diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c index 74f2d54df4fc..61fff422424f 100644 --- a/net/nfc/nci/core.c +++ b/net/nfc/nci/core.c @@ -400,6 +400,83 @@ int nci_core_init(struct nci_dev *ndev) } EXPORT_SYMBOL(nci_core_init); +struct nci_loopback_data { + u8 conn_id; + struct sk_buff *data; +}; + +static void nci_send_data_req(struct nci_dev *ndev, unsigned long opt) +{ + struct nci_loopback_data *data = (struct nci_loopback_data *)opt; + + nci_send_data(ndev, data->conn_id, data->data); +} + +static void nci_nfcc_loopback_cb(void *context, struct sk_buff *skb, int err) +{ + struct nci_dev *ndev = (struct nci_dev *)context; + struct nci_conn_info *conn_info; + + conn_info = nci_get_conn_info_by_conn_id(ndev, ndev->cur_conn_id); + if (!conn_info) { + nci_req_complete(ndev, NCI_STATUS_REJECTED); + return; + } + + conn_info->rx_skb = skb; + + nci_req_complete(ndev, NCI_STATUS_OK); +} + +int nci_nfcc_loopback(struct nci_dev *ndev, void *data, size_t data_len, + struct sk_buff **resp) +{ + int r; + struct nci_loopback_data loopback_data; + struct nci_conn_info *conn_info; + struct sk_buff *skb; + int conn_id = nci_get_conn_info_by_dest_type_params(ndev, + NCI_DESTINATION_NFCC_LOOPBACK, NULL); + + if (conn_id < 0) { + r = nci_core_conn_create(ndev, NCI_DESTINATION_NFCC_LOOPBACK, + 0, 0, NULL); + if (r != NCI_STATUS_OK) + return r; + + conn_id = nci_get_conn_info_by_dest_type_params(ndev, + NCI_DESTINATION_NFCC_LOOPBACK, + NULL); + } + + conn_info = nci_get_conn_info_by_conn_id(ndev, conn_id); + if (!conn_info) + return -EPROTO; + + /* store cb and context to be used on receiving data */ + conn_info->data_exchange_cb = nci_nfcc_loopback_cb; + conn_info->data_exchange_cb_context = ndev; + + skb = nci_skb_alloc(ndev, NCI_DATA_HDR_SIZE + data_len, GFP_KERNEL); + if (!skb) + return -ENOMEM; + + skb_reserve(skb, NCI_DATA_HDR_SIZE); + memcpy(skb_put(skb, data_len), data, data_len); + + loopback_data.conn_id = conn_id; + loopback_data.data = skb; + + ndev->cur_conn_id = conn_id; + r = nci_request(ndev, nci_send_data_req, (unsigned long)&loopback_data, + msecs_to_jiffies(NCI_DATA_TIMEOUT)); + if (r == NCI_STATUS_OK && resp) + *resp = conn_info->rx_skb; + + return r; +} +EXPORT_SYMBOL(nci_nfcc_loopback); + static int nci_open_device(struct nci_dev *ndev) { int rc = 0; From 3aacd7fe552b093fc24a8082e16467eb26c2fa32 Mon Sep 17 00:00:00 2001 From: Christophe Ricard Date: Sat, 30 Apr 2016 09:12:53 +0200 Subject: [PATCH 1294/1649] nfc: st-nci: Move loopback usage from HCI to NCI NCI provides possible way to run loopback testing has done over HCI. For us it offers many advantages: - It simplifies the code: No more need for a vendor_cmds structure - Loopback over HCI may not be supported in future st-nci firmware Signed-off-by: Christophe Ricard Signed-off-by: Samuel Ortiz --- drivers/nfc/st-nci/se.c | 5 --- drivers/nfc/st-nci/st-nci.h | 13 ++----- drivers/nfc/st-nci/vendor_cmds.c | 62 +++++++------------------------- 3 files changed, 14 insertions(+), 66 deletions(-) diff --git a/drivers/nfc/st-nci/se.c b/drivers/nfc/st-nci/se.c index 420f019cc42f..56f2112e0cd8 100644 --- a/drivers/nfc/st-nci/se.c +++ b/drivers/nfc/st-nci/se.c @@ -113,8 +113,6 @@ static struct nci_hci_gate st_nci_gates[] = { {NCI_HCI_IDENTITY_MGMT_GATE, NCI_HCI_INVALID_PIPE, ST_NCI_HOST_CONTROLLER_ID}, - {NCI_HCI_LOOPBACK_GATE, NCI_HCI_INVALID_PIPE, - ST_NCI_HOST_CONTROLLER_ID}, /* Secure element pipes are created by secure element host */ {ST_NCI_CONNECTIVITY_GATE, NCI_HCI_DO_NOT_OPEN_PIPE, @@ -385,9 +383,6 @@ void st_nci_hci_event_received(struct nci_dev *ndev, u8 pipe, case ST_NCI_CONNECTIVITY_GATE: st_nci_hci_connectivity_event_received(ndev, host, event, skb); break; - case NCI_HCI_LOOPBACK_GATE: - st_nci_hci_loopback_event_received(ndev, event, skb); - break; } } EXPORT_SYMBOL_GPL(st_nci_hci_event_received); diff --git a/drivers/nfc/st-nci/st-nci.h b/drivers/nfc/st-nci/st-nci.h index 8783f9594d65..afaf138b7e1b 100644 --- a/drivers/nfc/st-nci/st-nci.h +++ b/drivers/nfc/st-nci/st-nci.h @@ -92,8 +92,7 @@ struct st_nci_se_info { * white list). * @HCI_DM_FIELD_GENERATOR: Allow to generate different kind of RF * technology. When using this command to anti-collision is done. - * @HCI_LOOPBACK: Allow to echo a command and test the Dh to CLF - * connectivity. + * @LOOPBACK: Allow to echo a command and test the Dh to CLF connectivity. * @HCI_DM_VDC_MEASUREMENT_VALUE: Allow to measure the field applied on the * CLF antenna. A value between 0 and 0x0f is returned. 0 is maximum. * @HCI_DM_FWUPD_START: Allow to put CLF into firmware update mode. It is a @@ -115,7 +114,7 @@ enum nfc_vendor_cmds { HCI_DM_RESET, HCI_GET_PARAM, HCI_DM_FIELD_GENERATOR, - HCI_LOOPBACK, + LOOPBACK, HCI_DM_FWUPD_START, HCI_DM_FWUPD_END, HCI_DM_VDC_MEASUREMENT_VALUE, @@ -123,17 +122,11 @@ enum nfc_vendor_cmds { MANUFACTURER_SPECIFIC, }; -struct st_nci_vendor_info { - struct completion req_completion; - struct sk_buff *rx_skb; -}; - struct st_nci_info { struct llt_ndlc *ndlc; unsigned long flags; struct st_nci_se_info se_info; - struct st_nci_vendor_info vendor_info; }; void st_nci_remove(struct nci_dev *ndev); @@ -155,8 +148,6 @@ void st_nci_hci_event_received(struct nci_dev *ndev, u8 pipe, void st_nci_hci_cmd_received(struct nci_dev *ndev, u8 pipe, u8 cmd, struct sk_buff *skb); -void st_nci_hci_loopback_event_received(struct nci_dev *ndev, u8 event, - struct sk_buff *skb); int st_nci_vendor_cmds_init(struct nci_dev *ndev); #endif /* __LOCAL_ST_NCI_H_ */ diff --git a/drivers/nfc/st-nci/vendor_cmds.c b/drivers/nfc/st-nci/vendor_cmds.c index b5debce4ae0b..1a836c77c268 100644 --- a/drivers/nfc/st-nci/vendor_cmds.c +++ b/drivers/nfc/st-nci/vendor_cmds.c @@ -333,62 +333,28 @@ exit: return r; } -void st_nci_hci_loopback_event_received(struct nci_dev *ndev, u8 event, - struct sk_buff *skb) -{ - struct st_nci_info *info = nci_get_drvdata(ndev); - - switch (event) { - case ST_NCI_EVT_POST_DATA: - info->vendor_info.rx_skb = skb; - break; - default: - nfc_err(&ndev->nfc_dev->dev, "Unexpected event on loopback gate\n"); - } - complete(&info->vendor_info.req_completion); -} -EXPORT_SYMBOL(st_nci_hci_loopback_event_received); - -static int st_nci_hci_loopback(struct nfc_dev *dev, void *data, - size_t data_len) +static int st_nci_loopback(struct nfc_dev *dev, void *data, + size_t data_len) { int r; - struct sk_buff *msg; + struct sk_buff *msg, *skb; struct nci_dev *ndev = nfc_get_drvdata(dev); - struct st_nci_info *info = nci_get_drvdata(ndev); if (data_len <= 0) return -EPROTO; - reinit_completion(&info->vendor_info.req_completion); - info->vendor_info.rx_skb = NULL; + r = nci_nfcc_loopback(ndev, data, data_len, &skb); + if (r < 0) + return r; - r = nci_hci_send_event(ndev, NCI_HCI_LOOPBACK_GATE, - ST_NCI_EVT_POST_DATA, data, data_len); - if (r != data_len) { - r = -EPROTO; - goto exit; - } - - wait_for_completion_interruptible(&info->vendor_info.req_completion); - - if (!info->vendor_info.rx_skb || - info->vendor_info.rx_skb->len != data_len) { - r = -EPROTO; - goto exit; - } - - msg = nfc_vendor_cmd_alloc_reply_skb(ndev->nfc_dev, - ST_NCI_VENDOR_OUI, - HCI_LOOPBACK, - info->vendor_info.rx_skb->len); + msg = nfc_vendor_cmd_alloc_reply_skb(dev, ST_NCI_VENDOR_OUI, + LOOPBACK, skb->len); if (!msg) { r = -ENOMEM; goto free_skb; } - if (nla_put(msg, NFC_ATTR_VENDOR_DATA, info->vendor_info.rx_skb->len, - info->vendor_info.rx_skb->data)) { + if (nla_put(msg, NFC_ATTR_VENDOR_DATA, skb->len, skb->data)) { kfree_skb(msg); r = -ENOBUFS; goto free_skb; @@ -396,8 +362,7 @@ static int st_nci_hci_loopback(struct nfc_dev *dev, void *data, r = nfc_vendor_cmd_reply(msg); free_skb: - kfree_skb(info->vendor_info.rx_skb); -exit: + kfree_skb(skb); return r; } @@ -485,8 +450,8 @@ static struct nfc_vendor_cmd st_nci_vendor_cmds[] = { }, { .vendor_id = ST_NCI_VENDOR_OUI, - .subcmd = HCI_LOOPBACK, - .doit = st_nci_hci_loopback, + .subcmd = LOOPBACK, + .doit = st_nci_loopback, }, { .vendor_id = ST_NCI_VENDOR_OUI, @@ -507,9 +472,6 @@ static struct nfc_vendor_cmd st_nci_vendor_cmds[] = { int st_nci_vendor_cmds_init(struct nci_dev *ndev) { - struct st_nci_info *info = nci_get_drvdata(ndev); - - init_completion(&info->vendor_info.req_completion); return nfc_set_vendor_cmds(ndev->nfc_dev, st_nci_vendor_cmds, sizeof(st_nci_vendor_cmds)); } From 229d2850815fd64b4e084fbf4c40218301fffc9c Mon Sep 17 00:00:00 2001 From: Sridhar Samudrala Date: Mon, 2 May 2016 03:33:42 -0700 Subject: [PATCH 1295/1649] net_sched: act_mirred: add helper inlines to access tcf_mirred info Signed-off-by: Sridhar Samudrala Signed-off-by: Jeff Kirsher --- include/net/tc_act/tc_mirred.h | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/include/net/tc_act/tc_mirred.h b/include/net/tc_act/tc_mirred.h index dae96bae1c19..e891835eb74e 100644 --- a/include/net/tc_act/tc_mirred.h +++ b/include/net/tc_act/tc_mirred.h @@ -2,6 +2,7 @@ #define __NET_TC_MIR_H #include +#include struct tcf_mirred { struct tcf_common common; @@ -14,4 +15,18 @@ struct tcf_mirred { #define to_mirred(a) \ container_of(a->priv, struct tcf_mirred, common) +static inline bool is_tcf_mirred_redirect(const struct tc_action *a) +{ +#ifdef CONFIG_NET_CLS_ACT + if (a->ops && a->ops->type == TCA_ACT_MIRRED) + return to_mirred(a)->tcfm_eaction == TCA_EGRESS_REDIR; +#endif + return false; +} + +static inline int tcf_mirred_ifindex(const struct tc_action *a) +{ + return to_mirred(a)->tcfm_ifindex; +} + #endif /* __NET_TC_MIR_H */ From 947f8a4552325458b9cda200238292a6930828a8 Mon Sep 17 00:00:00 2001 From: Sridhar Samudrala Date: Tue, 5 Apr 2016 10:39:07 -0700 Subject: [PATCH 1296/1649] ixgbe: Add support for redirect action to cls_u32 offloads This patch enables 'redirect' to a SRIOV VF or a offloaded macvlan device queue via tc 'mirred' action. Verified with the following script that creates SRIOV VFs, offloaded macvlan and adds tc u32 filters with redirect action to the associated netdevs. # add ingress qdisc. tc qdisc add dev p4p1 ingress # enable hw tc offload. ethtool -K p4p1 hw-tc-offload on # create 4 sriov VFs and bring up the first one. echo 4 > /sys/class/net/p4p1/device/sriov_numvfs sleep 1 ip link set p4p1 up ip link set p4p1_0 up # create a offloaded macvlan device and bring it up. ethtool -K p4p1 l2-fwd-offload on ip link add link p4p1 name mvlan_1 type macvlan ip link set mvlan_1 up # add u32 filter with action to redirect to VF netdev tc filter add dev p4p1 parent ffff: protocol ip prio 99 \ handle 800:0:1 u32 ht 800: \ match ip src 192.168.1.3/32 \ action mirred egress redirect dev p4p1_0 # add u32 filter with action to redirect to macvlan netdev tc filter add dev p4p1 parent ffff: protocol ip prio 99 \ handle 800:0:2 u32 ht 800: \ match ip src 192.168.2.3/32 \ action mirred egress redirect dev mvlan_1 Signed-off-by: Sridhar Samudrala Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 96 ++++++++++++++++--- 1 file changed, 83 insertions(+), 13 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 0ef4a15bb23e..5aa22ac49934 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -53,6 +53,7 @@ #include #include #include +#include #include "ixgbe.h" #include "ixgbe_common.h" @@ -8319,6 +8320,85 @@ static int ixgbe_configure_clsu32_del_hnode(struct ixgbe_adapter *adapter, return 0; } +#ifdef CONFIG_NET_CLS_ACT +static int handle_redirect_action(struct ixgbe_adapter *adapter, int ifindex, + u8 *queue, u64 *action) +{ + unsigned int num_vfs = adapter->num_vfs, vf; + struct net_device *upper; + struct list_head *iter; + + /* redirect to a SRIOV VF */ + for (vf = 0; vf < num_vfs; ++vf) { + upper = pci_get_drvdata(adapter->vfinfo[vf].vfdev); + if (upper->ifindex == ifindex) { + if (adapter->num_rx_pools > 1) + *queue = vf * 2; + else + *queue = vf * adapter->num_rx_queues_per_pool; + + *action = vf + 1; + *action <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF; + return 0; + } + } + + /* redirect to a offloaded macvlan netdev */ + netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) { + if (netif_is_macvlan(upper)) { + struct macvlan_dev *dfwd = netdev_priv(upper); + struct ixgbe_fwd_adapter *vadapter = dfwd->fwd_priv; + + if (vadapter && vadapter->netdev->ifindex == ifindex) { + *queue = adapter->rx_ring[vadapter->rx_base_queue]->reg_idx; + *action = *queue; + return 0; + } + } + } + + return -EINVAL; +} + +static int parse_tc_actions(struct ixgbe_adapter *adapter, + struct tcf_exts *exts, u64 *action, u8 *queue) +{ + const struct tc_action *a; + int err; + + if (tc_no_actions(exts)) + return -EINVAL; + + tc_for_each_action(a, exts) { + + /* Drop action */ + if (is_tcf_gact_shot(a)) { + *action = IXGBE_FDIR_DROP_QUEUE; + *queue = IXGBE_FDIR_DROP_QUEUE; + return 0; + } + + /* Redirect to a VF or a offloaded macvlan */ + if (is_tcf_mirred_redirect(a)) { + int ifindex = tcf_mirred_ifindex(a); + + err = handle_redirect_action(adapter, ifindex, queue, + action); + if (err == 0) + return err; + } + } + + return -EINVAL; +} +#else +static int parse_tc_actions(struct ixgbe_adapter *adapter, + struct tcf_exts *exts, u64 *action, u8 *queue) +{ + return -EINVAL; +} +#endif /* CONFIG_NET_CLS_ACT */ + static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter, __be16 protocol, struct tc_cls_u32_offload *cls) @@ -8328,9 +8408,6 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter, struct ixgbe_mat_field *field_ptr; struct ixgbe_fdir_filter *input; union ixgbe_atr_input mask; -#ifdef CONFIG_NET_CLS_ACT - const struct tc_action *a; -#endif int i, err = 0; u8 queue; u32 uhtid, link_uhtid; @@ -8432,18 +8509,11 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter, if (input->filter.formatted.flow_type == IXGBE_ATR_FLOW_TYPE_IPV4) mask.formatted.flow_type &= IXGBE_ATR_L4TYPE_IPV6_MASK; -#ifdef CONFIG_NET_CLS_ACT - if (list_empty(&cls->knode.exts->actions)) + err = parse_tc_actions(adapter, cls->knode.exts, &input->action, + &queue); + if (err < 0) goto err_out; - list_for_each_entry(a, &cls->knode.exts->actions, list) { - if (!is_tcf_gact_shot(a)) - goto err_out; - } -#endif - - input->action = IXGBE_FDIR_DROP_QUEUE; - queue = IXGBE_FDIR_DROP_QUEUE; input->sw_idx = loc; spin_lock(&adapter->fdir_perfect_lock); From 1cdaaf5405ba910275fca720cab7f24a48fbdb14 Mon Sep 17 00:00:00 2001 From: Amritha Nambiar Date: Thu, 14 Apr 2016 19:08:53 -0400 Subject: [PATCH 1297/1649] ixgbe: Match on multiple headers for cls_u32 offloads Adds support to set filters with multiple header fields (L3,L4)to match on. This is achieved in the following order: 1. Create a leaf hash table for the next header. 2. Create a link to the leaf hash table from the base hash table with matches on next header type and current header fields. 3. Add filter in leaf hash table with match on next header fields and action. Verified with the following filters : Match TCP and DIP: handle 1: u32 divisor 1 u32 ht 800: order 1 link 1: \ offset at 0 mask 0f00 shift 6 plus 0 eat \ match ip protocol 6 ff match ip dst 10.0.0.1/32 match tcp src 28 ffff action drop Delete the filter: Match on DIP, SIP, UDP (SPort, DPort): handle 2: u32 divisor 1 u32 ht 800: order 2 link 2: \ offset at 0 mask 0f00 shift 6 plus 0 eat \ match ip dst 15.0.0.2/32 match ip protocol 17 ff \ match ip src 15.0.0.1/32 match udp src 30 ffff match udp dst 32 ffff action drop Signed-off-by: Amritha Nambiar Acked-by: Sridhar Samudrala Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe.h | 2 +- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 222 ++++++++++++------ .../net/ethernet/intel/ixgbe/ixgbe_model.h | 6 + 3 files changed, 159 insertions(+), 71 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index 781c8787ab66..515c3dc64d4f 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -792,7 +792,7 @@ struct ixgbe_adapter { unsigned long fwd_bitmask; /* Bitmask indicating in use pools */ #define IXGBE_MAX_LINK_HANDLE 10 - struct ixgbe_mat_field *jump_tables[IXGBE_MAX_LINK_HANDLE]; + struct ixgbe_jump_table *jump_tables[IXGBE_MAX_LINK_HANDLE]; unsigned long tables; /* maximum number of RETA entries among all devices supported by ixgbe diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 5aa22ac49934..3d895b600451 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -5574,6 +5574,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter) unsigned int rss, fdir; u32 fwsm; u16 device_caps; + int i; #ifdef CONFIG_IXGBE_DCB int j; struct tc_configuration *tc; @@ -5609,7 +5610,14 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter) #endif /* IXGBE_FCOE */ /* initialize static ixgbe jump table entries */ - adapter->jump_tables[0] = ixgbe_ipv4_fields; + adapter->jump_tables[0] = kzalloc(sizeof(*adapter->jump_tables[0]), + GFP_KERNEL); + if (!adapter->jump_tables[0]) + return -ENOMEM; + adapter->jump_tables[0]->mat = ixgbe_ipv4_fields; + + for (i = 1; i < IXGBE_MAX_LINK_HANDLE; i++) + adapter->jump_tables[i] = NULL; adapter->mac_table = kzalloc(sizeof(struct ixgbe_mac_addr) * hw->mac.num_rar_entries, @@ -8399,6 +8407,55 @@ static int parse_tc_actions(struct ixgbe_adapter *adapter, } #endif /* CONFIG_NET_CLS_ACT */ +static int ixgbe_clsu32_build_input(struct ixgbe_fdir_filter *input, + union ixgbe_atr_input *mask, + struct tc_cls_u32_offload *cls, + struct ixgbe_mat_field *field_ptr, + struct ixgbe_nexthdr *nexthdr) +{ + int i, j, off; + __be32 val, m; + bool found_entry = false, found_jump_field = false; + + for (i = 0; i < cls->knode.sel->nkeys; i++) { + off = cls->knode.sel->keys[i].off; + val = cls->knode.sel->keys[i].val; + m = cls->knode.sel->keys[i].mask; + + for (j = 0; field_ptr[j].val; j++) { + if (field_ptr[j].off == off) { + field_ptr[j].val(input, mask, val, m); + input->filter.formatted.flow_type |= + field_ptr[j].type; + found_entry = true; + break; + } + } + if (nexthdr) { + if (nexthdr->off == cls->knode.sel->keys[i].off && + nexthdr->val == cls->knode.sel->keys[i].val && + nexthdr->mask == cls->knode.sel->keys[i].mask) + found_jump_field = true; + else + continue; + } + } + + if (nexthdr && !found_jump_field) + return -EINVAL; + + if (!found_entry) + return 0; + + mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK | + IXGBE_ATR_L4TYPE_MASK; + + if (input->filter.formatted.flow_type == IXGBE_ATR_FLOW_TYPE_IPV4) + mask->formatted.flow_type &= IXGBE_ATR_L4TYPE_IPV6_MASK; + + return 0; +} + static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter, __be16 protocol, struct tc_cls_u32_offload *cls) @@ -8406,13 +8463,13 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter, u32 loc = cls->knode.handle & 0xfffff; struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_mat_field *field_ptr; - struct ixgbe_fdir_filter *input; - union ixgbe_atr_input mask; - int i, err = 0; + struct ixgbe_fdir_filter *input = NULL; + union ixgbe_atr_input *mask = NULL; + struct ixgbe_jump_table *jump = NULL; + int i, err = -EINVAL; u8 queue; u32 uhtid, link_uhtid; - memset(&mask, 0, sizeof(union ixgbe_atr_input)); uhtid = TC_U32_USERHTID(cls->knode.handle); link_uhtid = TC_U32_USERHTID(cls->knode.link_handle); @@ -8424,39 +8481,11 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter, * headers when needed. */ if (protocol != htons(ETH_P_IP)) - return -EINVAL; - - if (link_uhtid) { - struct ixgbe_nexthdr *nexthdr = ixgbe_ipv4_jumps; - - if (link_uhtid >= IXGBE_MAX_LINK_HANDLE) - return -EINVAL; - - if (!test_bit(link_uhtid - 1, &adapter->tables)) - return -EINVAL; - - for (i = 0; nexthdr[i].jump; i++) { - if (nexthdr[i].o != cls->knode.sel->offoff || - nexthdr[i].s != cls->knode.sel->offshift || - nexthdr[i].m != cls->knode.sel->offmask || - /* do not support multiple key jumps its just mad */ - cls->knode.sel->nkeys > 1) - return -EINVAL; - - if (nexthdr[i].off == cls->knode.sel->keys[0].off && - nexthdr[i].val == cls->knode.sel->keys[0].val && - nexthdr[i].mask == cls->knode.sel->keys[0].mask) { - adapter->jump_tables[link_uhtid] = - nexthdr[i].jump; - break; - } - } - return 0; - } + return err; if (loc >= ((1024 << adapter->fdir_pballoc) - 2)) { e_err(drv, "Location out of range\n"); - return -EINVAL; + return err; } /* cls u32 is a graph starting at root node 0x800. The driver tracks @@ -8467,47 +8496,85 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter, * this function _should_ be generic try not to hardcode values here. */ if (uhtid == 0x800) { - field_ptr = adapter->jump_tables[0]; + field_ptr = (adapter->jump_tables[0])->mat; } else { if (uhtid >= IXGBE_MAX_LINK_HANDLE) - return -EINVAL; - - field_ptr = adapter->jump_tables[uhtid]; + return err; + if (!adapter->jump_tables[uhtid]) + return err; + field_ptr = (adapter->jump_tables[uhtid])->mat; } if (!field_ptr) - return -EINVAL; + return err; + + /* At this point we know the field_ptr is valid and need to either + * build cls_u32 link or attach filter. Because adding a link to + * a handle that does not exist is invalid and the same for adding + * rules to handles that don't exist. + */ + + if (link_uhtid) { + struct ixgbe_nexthdr *nexthdr = ixgbe_ipv4_jumps; + + if (link_uhtid >= IXGBE_MAX_LINK_HANDLE) + return err; + + if (!test_bit(link_uhtid - 1, &adapter->tables)) + return err; + + for (i = 0; nexthdr[i].jump; i++) { + if (nexthdr[i].o != cls->knode.sel->offoff || + nexthdr[i].s != cls->knode.sel->offshift || + nexthdr[i].m != cls->knode.sel->offmask) + return err; + + jump = kzalloc(sizeof(*jump), GFP_KERNEL); + if (!jump) + return -ENOMEM; + input = kzalloc(sizeof(*input), GFP_KERNEL); + if (!input) { + err = -ENOMEM; + goto free_jump; + } + mask = kzalloc(sizeof(*mask), GFP_KERNEL); + if (!mask) { + err = -ENOMEM; + goto free_input; + } + jump->input = input; + jump->mask = mask; + err = ixgbe_clsu32_build_input(input, mask, cls, + field_ptr, &nexthdr[i]); + if (!err) { + jump->mat = nexthdr[i].jump; + adapter->jump_tables[link_uhtid] = jump; + break; + } + } + return 0; + } input = kzalloc(sizeof(*input), GFP_KERNEL); if (!input) return -ENOMEM; - - for (i = 0; i < cls->knode.sel->nkeys; i++) { - int off = cls->knode.sel->keys[i].off; - __be32 val = cls->knode.sel->keys[i].val; - __be32 m = cls->knode.sel->keys[i].mask; - bool found_entry = false; - int j; - - for (j = 0; field_ptr[j].val; j++) { - if (field_ptr[j].off == off) { - field_ptr[j].val(input, &mask, val, m); - input->filter.formatted.flow_type |= - field_ptr[j].type; - found_entry = true; - break; - } - } - - if (!found_entry) - goto err_out; + mask = kzalloc(sizeof(*mask), GFP_KERNEL); + if (!mask) { + err = -ENOMEM; + goto free_input; } - mask.formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK | - IXGBE_ATR_L4TYPE_MASK; - - if (input->filter.formatted.flow_type == IXGBE_ATR_FLOW_TYPE_IPV4) - mask.formatted.flow_type &= IXGBE_ATR_L4TYPE_IPV6_MASK; + if ((uhtid != 0x800) && (adapter->jump_tables[uhtid])) { + if ((adapter->jump_tables[uhtid])->input) + memcpy(input, (adapter->jump_tables[uhtid])->input, + sizeof(*input)); + if ((adapter->jump_tables[uhtid])->mask) + memcpy(mask, (adapter->jump_tables[uhtid])->mask, + sizeof(*mask)); + } + err = ixgbe_clsu32_build_input(input, mask, cls, field_ptr, NULL); + if (err) + goto err_out; err = parse_tc_actions(adapter, cls->knode.exts, &input->action, &queue); @@ -8519,28 +8586,33 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter, spin_lock(&adapter->fdir_perfect_lock); if (hlist_empty(&adapter->fdir_filter_list)) { - memcpy(&adapter->fdir_mask, &mask, sizeof(mask)); - err = ixgbe_fdir_set_input_mask_82599(hw, &mask); + memcpy(&adapter->fdir_mask, mask, sizeof(*mask)); + err = ixgbe_fdir_set_input_mask_82599(hw, mask); if (err) goto err_out_w_lock; - } else if (memcmp(&adapter->fdir_mask, &mask, sizeof(mask))) { + } else if (memcmp(&adapter->fdir_mask, mask, sizeof(*mask))) { err = -EINVAL; goto err_out_w_lock; } - ixgbe_atr_compute_perfect_hash_82599(&input->filter, &mask); + ixgbe_atr_compute_perfect_hash_82599(&input->filter, mask); err = ixgbe_fdir_write_perfect_filter_82599(hw, &input->filter, input->sw_idx, queue); if (!err) ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx); spin_unlock(&adapter->fdir_perfect_lock); + kfree(mask); return err; err_out_w_lock: spin_unlock(&adapter->fdir_perfect_lock); err_out: + kfree(mask); +free_input: kfree(input); - return -EINVAL; +free_jump: + kfree(jump); + return err; } static int __ixgbe_setup_tc(struct net_device *dev, u32 handle, __be16 proto, @@ -9612,6 +9684,7 @@ err_sw_init: ixgbe_disable_sriov(adapter); adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP; iounmap(adapter->io_addr); + kfree(adapter->jump_tables[0]); kfree(adapter->mac_table); err_ioremap: disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state); @@ -9640,6 +9713,7 @@ static void ixgbe_remove(struct pci_dev *pdev) struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); struct net_device *netdev; bool disable_dev; + int i; /* if !adapter then we already cleaned up in probe */ if (!adapter) @@ -9689,6 +9763,14 @@ static void ixgbe_remove(struct pci_dev *pdev) e_dev_info("complete\n"); + for (i = 0; i < IXGBE_MAX_LINK_HANDLE; i++) { + if (adapter->jump_tables[i]) { + kfree(adapter->jump_tables[i]->input); + kfree(adapter->jump_tables[i]->mask); + } + kfree(adapter->jump_tables[i]); + } + kfree(adapter->mac_table); disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state); free_netdev(netdev); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h index 60adde55a8c3..a8bed3d887f7 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h @@ -38,6 +38,12 @@ struct ixgbe_mat_field { unsigned int type; }; +struct ixgbe_jump_table { + struct ixgbe_mat_field *mat; + struct ixgbe_fdir_filter *input; + union ixgbe_atr_input *mask; +}; + static inline int ixgbe_mat_prgm_sip(struct ixgbe_fdir_filter *input, union ixgbe_atr_input *mask, u32 val, u32 m) From b4363fbd8df2be23439e15a53b4040897228c481 Mon Sep 17 00:00:00 2001 From: KY Srinivasan Date: Tue, 19 Apr 2016 19:17:56 -0700 Subject: [PATCH 1298/1649] ixgbevf: Add the device ID's presented while running on Hyper-V Intel SR-IOV cards present different ID when running on Hyper-V. Add the device IDs presented while running on Hyper-V. Signed-off-by: K. Y. Srinivasan Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbevf/defines.h | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h index 74901f7ef391..ae09d60e7b67 100644 --- a/drivers/net/ethernet/intel/ixgbevf/defines.h +++ b/drivers/net/ethernet/intel/ixgbevf/defines.h @@ -33,6 +33,11 @@ #define IXGBE_DEV_ID_X550_VF 0x1565 #define IXGBE_DEV_ID_X550EM_X_VF 0x15A8 +#define IXGBE_DEV_ID_82599_VF_HV 0x152E +#define IXGBE_DEV_ID_X540_VF_HV 0x1530 +#define IXGBE_DEV_ID_X550_VF_HV 0x1564 +#define IXGBE_DEV_ID_X550EM_X_VF_HV 0x15A9 + #define IXGBE_VF_IRQ_CLEAR_MASK 7 #define IXGBE_VF_MAX_TX_QUEUES 8 #define IXGBE_VF_MAX_RX_QUEUES 8 From c6d45171d706c2b5efa3d5ee7a8260c14b6367c0 Mon Sep 17 00:00:00 2001 From: KY Srinivasan Date: Tue, 19 Apr 2016 19:17:57 -0700 Subject: [PATCH 1299/1649] ixgbevf: Support Windows hosts (Hyper-V) On Hyper-V, the VF/PF communication is a via software mediated path as opposed to the hardware mailbox. Make the necessary adjustments to support Hyper-V. Signed-off-by: K. Y. Srinivasan Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbevf/ixgbevf.h | 11 + .../net/ethernet/intel/ixgbevf/ixgbevf_main.c | 31 ++- drivers/net/ethernet/intel/ixgbevf/mbx.c | 11 + drivers/net/ethernet/intel/ixgbevf/vf.c | 215 ++++++++++++++++++ drivers/net/ethernet/intel/ixgbevf/vf.h | 2 + 5 files changed, 263 insertions(+), 7 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h index aa28c4fb1a43..f2bafa6cb395 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h @@ -450,9 +450,13 @@ enum ixbgevf_state_t { enum ixgbevf_boards { board_82599_vf, + board_82599_vf_hv, board_X540_vf, + board_X540_vf_hv, board_X550_vf, + board_X550_vf_hv, board_X550EM_x_vf, + board_X550EM_x_vf_hv, }; enum ixgbevf_xcast_modes { @@ -467,6 +471,12 @@ extern const struct ixgbevf_info ixgbevf_X550_vf_info; extern const struct ixgbevf_info ixgbevf_X550EM_x_vf_info; extern const struct ixgbe_mbx_operations ixgbevf_mbx_ops; +extern const struct ixgbevf_info ixgbevf_82599_vf_hv_info; +extern const struct ixgbevf_info ixgbevf_X540_vf_hv_info; +extern const struct ixgbevf_info ixgbevf_X550_vf_hv_info; +extern const struct ixgbevf_info ixgbevf_X550EM_x_vf_hv_info; +extern const struct ixgbe_mbx_operations ixgbevf_hv_mbx_ops; + /* needed by ethtool.c */ extern const char ixgbevf_driver_name[]; extern const char ixgbevf_driver_version[]; @@ -484,6 +494,7 @@ void ixgbevf_free_rx_resources(struct ixgbevf_ring *); void ixgbevf_free_tx_resources(struct ixgbevf_ring *); void ixgbevf_update_stats(struct ixgbevf_adapter *adapter); int ethtool_ioctl(struct ifreq *ifr); +bool ixgbevf_on_hyperv(struct ixgbe_hw *hw); extern void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector); diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 319e25f29883..f39c8cb35839 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -62,10 +62,14 @@ static char ixgbevf_copyright[] = "Copyright (c) 2009 - 2015 Intel Corporation."; static const struct ixgbevf_info *ixgbevf_info_tbl[] = { - [board_82599_vf] = &ixgbevf_82599_vf_info, - [board_X540_vf] = &ixgbevf_X540_vf_info, - [board_X550_vf] = &ixgbevf_X550_vf_info, - [board_X550EM_x_vf] = &ixgbevf_X550EM_x_vf_info, + [board_82599_vf] = &ixgbevf_82599_vf_info, + [board_82599_vf_hv] = &ixgbevf_82599_vf_hv_info, + [board_X540_vf] = &ixgbevf_X540_vf_info, + [board_X540_vf_hv] = &ixgbevf_X540_vf_hv_info, + [board_X550_vf] = &ixgbevf_X550_vf_info, + [board_X550_vf_hv] = &ixgbevf_X550_vf_hv_info, + [board_X550EM_x_vf] = &ixgbevf_X550EM_x_vf_info, + [board_X550EM_x_vf_hv] = &ixgbevf_X550EM_x_vf_hv_info, }; /* ixgbevf_pci_tbl - PCI Device ID Table @@ -78,9 +82,13 @@ static const struct ixgbevf_info *ixgbevf_info_tbl[] = { */ static const struct pci_device_id ixgbevf_pci_tbl[] = { {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF), board_82599_vf }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF_HV), board_82599_vf_hv }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF), board_X540_vf }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF_HV), board_X540_vf_hv }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550_VF), board_X550_vf }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550_VF_HV), board_X550_vf_hv }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF), board_X550EM_x_vf }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF_HV), board_X550EM_x_vf_hv}, /* required last entry */ {0, } }; @@ -1795,7 +1803,10 @@ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter) ixgbevf_setup_vfmrqc(adapter); /* notify the PF of our intent to use this size of frame */ - ixgbevf_rlpml_set_vf(hw, netdev->mtu + ETH_HLEN + ETH_FCS_LEN); + if (!ixgbevf_on_hyperv(hw)) + ixgbevf_rlpml_set_vf(hw, netdev->mtu + ETH_HLEN + ETH_FCS_LEN); + else + ixgbevf_hv_rlpml_set_vf(hw, netdev->mtu + ETH_HLEN + ETH_FCS_LEN); /* Setup the HW Rx Head and Tail Descriptor Pointers and * the Base and Length of the Rx Descriptor Ring @@ -2056,7 +2067,10 @@ static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter) spin_lock_bh(&adapter->mbx_lock); while (api[idx] != ixgbe_mbox_api_unknown) { - err = hw->mac.ops.negotiate_api_version(hw, api[idx]); + if (!ixgbevf_on_hyperv(hw)) + err = hw->mac.ops.negotiate_api_version(hw, api[idx]); + else + err = ixgbevf_hv_negotiate_api_version(hw, api[idx]); if (!err) break; idx++; @@ -3740,7 +3754,10 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu) netdev->mtu = new_mtu; /* notify the PF of our intent to use this size of frame */ - ixgbevf_rlpml_set_vf(hw, max_frame); + if (!ixgbevf_on_hyperv(hw)) + ixgbevf_rlpml_set_vf(hw, max_frame); + else + ixgbevf_hv_rlpml_set_vf(hw, max_frame); return 0; } diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.c b/drivers/net/ethernet/intel/ixgbevf/mbx.c index dc68fea4894b..61a80da8b6f0 100644 --- a/drivers/net/ethernet/intel/ixgbevf/mbx.c +++ b/drivers/net/ethernet/intel/ixgbevf/mbx.c @@ -346,3 +346,14 @@ const struct ixgbe_mbx_operations ixgbevf_mbx_ops = { .check_for_rst = ixgbevf_check_for_rst_vf, }; +/* Mailbox operations when running on Hyper-V. + * On Hyper-V, PF/VF communication is not through the + * hardware mailbox; this communication is through + * a software mediated path. + * Most mail box operations are noop while running on + * Hyper-V. + */ +const struct ixgbe_mbx_operations ixgbevf_hv_mbx_ops = { + .init_params = ixgbevf_init_mbx_params_vf, + .check_for_rst = ixgbevf_check_for_rst_vf, +}; diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c index 987ad69d4918..8a4eb08a3d19 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.c +++ b/drivers/net/ethernet/intel/ixgbevf/vf.c @@ -27,6 +27,12 @@ #include "vf.h" #include "ixgbevf.h" +/* On Hyper-V, to reset, we need to read from this offset + * from the PCI config space. This is the mechanism used on + * Hyper-V to support PF/VF communication. + */ +#define IXGBE_HV_RESET_OFFSET 0x201 + /** * ixgbevf_start_hw_vf - Prepare hardware for Tx/Rx * @hw: pointer to hardware structure @@ -125,6 +131,27 @@ static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw) return 0; } +/** + * Hyper-V variant; the VF/PF communication is through the PCI + * config space. + */ +static s32 ixgbevf_hv_reset_hw_vf(struct ixgbe_hw *hw) +{ +#if IS_ENABLED(CONFIG_PCI_MMCONFIG) + struct ixgbevf_adapter *adapter = hw->back; + int i; + + for (i = 0; i < 6; i++) + pci_read_config_byte(adapter->pdev, + (i + IXGBE_HV_RESET_OFFSET), + &hw->mac.perm_addr[i]); + return 0; +#else + pr_err("PCI_MMCONFIG needs to be enabled for Hyper-V\n"); + return -EOPNOTSUPP; +#endif +} + /** * ixgbevf_stop_hw_vf - Generic stop Tx/Rx units * @hw: pointer to hardware structure @@ -258,6 +285,11 @@ static s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr) return ret_val; } +static s32 ixgbevf_hv_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr) +{ + return -EOPNOTSUPP; +} + /** * ixgbevf_get_reta_locked - get the RSS redirection table (RETA) contents. * @adapter: pointer to the port handle @@ -416,6 +448,26 @@ static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr, return ret_val; } +/** + * ixgbevf_hv_set_rar_vf - set device MAC address Hyper-V variant + * @hw: pointer to hardware structure + * @index: Receive address register to write + * @addr: Address to put into receive address register + * @vmdq: Unused in this implementation + * + * We don't really allow setting the device MAC address. However, + * if the address being set is the permanent MAC address we will + * permit that. + **/ +static s32 ixgbevf_hv_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr, + u32 vmdq) +{ + if (ether_addr_equal(addr, hw->mac.perm_addr)) + return 0; + + return -EOPNOTSUPP; +} + static void ixgbevf_write_msg_read_ack(struct ixgbe_hw *hw, u32 *msg, u16 size) { @@ -472,6 +524,15 @@ static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw, return 0; } +/** + * Hyper-V variant - just a stub. + */ +static s32 ixgbevf_hv_update_mc_addr_list_vf(struct ixgbe_hw *hw, + struct net_device *netdev) +{ + return -EOPNOTSUPP; +} + /** * ixgbevf_update_xcast_mode - Update Multicast mode * @hw: pointer to the HW structure @@ -512,6 +573,16 @@ static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, return 0; } +/** + * Hyper-V variant - just a stub. + */ +static s32 ixgbevf_hv_update_xcast_mode(struct ixgbe_hw *hw, + struct net_device *netdev, + int xcast_mode) +{ + return -EOPNOTSUPP; +} + /** * ixgbevf_set_vfta_vf - Set/Unset VLAN filter table address * @hw: pointer to the HW structure @@ -550,6 +621,15 @@ mbx_err: return err; } +/** + * Hyper-V variant - just a stub. + */ +static s32 ixgbevf_hv_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind, + bool vlan_on) +{ + return -EOPNOTSUPP; +} + /** * ixgbevf_setup_mac_link_vf - Setup MAC link settings * @hw: pointer to hardware structure @@ -655,6 +735,67 @@ out: return ret_val; } +/** + * Hyper-V variant; there is no mailbox communication. + */ +static s32 ixgbevf_hv_check_mac_link_vf(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *link_up, + bool autoneg_wait_to_complete) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + struct ixgbe_mac_info *mac = &hw->mac; + u32 links_reg; + + /* If we were hit with a reset drop the link */ + if (!mbx->ops.check_for_rst(hw) || !mbx->timeout) + mac->get_link_status = true; + + if (!mac->get_link_status) + goto out; + + /* if link status is down no point in checking to see if pf is up */ + links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS); + if (!(links_reg & IXGBE_LINKS_UP)) + goto out; + + /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs + * before the link status is correct + */ + if (mac->type == ixgbe_mac_82599_vf) { + int i; + + for (i = 0; i < 5; i++) { + udelay(100); + links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS); + + if (!(links_reg & IXGBE_LINKS_UP)) + goto out; + } + } + + switch (links_reg & IXGBE_LINKS_SPEED_82599) { + case IXGBE_LINKS_SPEED_10G_82599: + *speed = IXGBE_LINK_SPEED_10GB_FULL; + break; + case IXGBE_LINKS_SPEED_1G_82599: + *speed = IXGBE_LINK_SPEED_1GB_FULL; + break; + case IXGBE_LINKS_SPEED_100_82599: + *speed = IXGBE_LINK_SPEED_100_FULL; + break; + } + + /* if we passed all the tests above then the link is up and we no + * longer need to check for link + */ + mac->get_link_status = false; + +out: + *link_up = !mac->get_link_status; + return 0; +} + /** * ixgbevf_rlpml_set_vf - Set the maximum receive packet length * @hw: pointer to the HW structure @@ -669,6 +810,25 @@ void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size) ixgbevf_write_msg_read_ack(hw, msgbuf, 2); } +/** + * ixgbevf_hv_rlpml_set_vf - Set the maximum receive packet length + * @hw: pointer to the HW structure + * @max_size: value to assign to max frame size + * Hyper-V variant. + **/ +void ixgbevf_hv_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size) +{ + u32 reg; + + /* If we are on Hyper-V, we implement this functionality + * differently. + */ + reg = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(0)); + /* CRC == 4 */ + reg |= ((max_size + 4) | IXGBE_RXDCTL_RLPML_EN); + IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(0), reg); +} + /** * ixgbevf_negotiate_api_version_vf - Negotiate supported API version * @hw: pointer to the HW structure @@ -703,6 +863,21 @@ static int ixgbevf_negotiate_api_version_vf(struct ixgbe_hw *hw, int api) return err; } +/** + * ixgbevf_hv_negotiate_api_version - Negotiate supported API version + * @hw: pointer to the HW structure + * @api: integer containing requested API version + * Hyper-V version - only ixgbe_mbox_api_10 supported. + **/ +int ixgbevf_hv_negotiate_api_version(struct ixgbe_hw *hw, int api) +{ + /* Hyper-V only supports api version ixgbe_mbox_api_10 */ + if (api != ixgbe_mbox_api_10) + return IXGBE_ERR_INVALID_ARGUMENT; + + return 0; +} + int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs, unsigned int *default_tc) { @@ -777,22 +952,62 @@ static const struct ixgbe_mac_operations ixgbevf_mac_ops = { .set_vfta = ixgbevf_set_vfta_vf, }; +static const struct ixgbe_mac_operations ixgbevf_hv_mac_ops = { + .init_hw = ixgbevf_init_hw_vf, + .reset_hw = ixgbevf_hv_reset_hw_vf, + .start_hw = ixgbevf_start_hw_vf, + .get_mac_addr = ixgbevf_get_mac_addr_vf, + .stop_adapter = ixgbevf_stop_hw_vf, + .setup_link = ixgbevf_setup_mac_link_vf, + .check_link = ixgbevf_hv_check_mac_link_vf, + .set_rar = ixgbevf_hv_set_rar_vf, + .update_mc_addr_list = ixgbevf_hv_update_mc_addr_list_vf, + .update_xcast_mode = ixgbevf_hv_update_xcast_mode, + .set_uc_addr = ixgbevf_hv_set_uc_addr_vf, + .set_vfta = ixgbevf_hv_set_vfta_vf, +}; + const struct ixgbevf_info ixgbevf_82599_vf_info = { .mac = ixgbe_mac_82599_vf, .mac_ops = &ixgbevf_mac_ops, }; +const struct ixgbevf_info ixgbevf_82599_vf_hv_info = { + .mac = ixgbe_mac_82599_vf, + .mac_ops = &ixgbevf_hv_mac_ops, +}; + const struct ixgbevf_info ixgbevf_X540_vf_info = { .mac = ixgbe_mac_X540_vf, .mac_ops = &ixgbevf_mac_ops, }; +const struct ixgbevf_info ixgbevf_X540_vf_hv_info = { + .mac = ixgbe_mac_X540_vf, + .mac_ops = &ixgbevf_hv_mac_ops, +}; + const struct ixgbevf_info ixgbevf_X550_vf_info = { .mac = ixgbe_mac_X550_vf, .mac_ops = &ixgbevf_mac_ops, }; +const struct ixgbevf_info ixgbevf_X550_vf_hv_info = { + .mac = ixgbe_mac_X550_vf, + .mac_ops = &ixgbevf_hv_mac_ops, +}; + const struct ixgbevf_info ixgbevf_X550EM_x_vf_info = { .mac = ixgbe_mac_X550EM_x_vf, .mac_ops = &ixgbevf_mac_ops, }; + +const struct ixgbevf_info ixgbevf_X550EM_x_vf_hv_info = { + .mac = ixgbe_mac_X550EM_x_vf, + .mac_ops = &ixgbevf_hv_mac_ops, +}; + +bool ixgbevf_on_hyperv(struct ixgbe_hw *hw) +{ + return hw->mbx.ops.check_for_msg == NULL; +} diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h index 8e623f9327ae..f7c017058f83 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.h +++ b/drivers/net/ethernet/intel/ixgbevf/vf.h @@ -209,6 +209,8 @@ static inline u32 ixgbe_read_reg_array(struct ixgbe_hw *hw, u32 reg, #define IXGBE_READ_REG_ARRAY(h, r, o) ixgbe_read_reg_array(h, r, o) void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size); +void ixgbevf_hv_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size); +int ixgbevf_hv_negotiate_api_version(struct ixgbe_hw *hw, int api); int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs, unsigned int *default_tc); int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, int num_rx_queues); From 00103a6ce31624cb91cba9d1f991409f67453d73 Mon Sep 17 00:00:00 2001 From: Emil Tantilov Date: Thu, 21 Apr 2016 11:37:06 -0700 Subject: [PATCH 1300/1649] ixgbe: add WoL support for some 82599 subdevice IDs We had some 82599 subdevice IDs missing from the list of parts that support WoL. Reported-by: Neil Horman Signed-off-by: Emil Tantilov Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 8 ++++++-- drivers/net/ethernet/intel/ixgbe/ixgbe_type.h | 6 +++++- 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 3d895b600451..77c798fe86ba 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -9204,8 +9204,10 @@ int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id, case IXGBE_DEV_ID_82599_SFP: /* Only these subdevices could supports WOL */ switch (subdevice_id) { - case IXGBE_SUBDEV_ID_82599_SFP_WOL0: case IXGBE_SUBDEV_ID_82599_560FLR: + case IXGBE_SUBDEV_ID_82599_LOM_SNAP6: + case IXGBE_SUBDEV_ID_82599_SFP_WOL0: + case IXGBE_SUBDEV_ID_82599_SFP_2OCP: /* only support first port */ if (hw->bus.func != 0) break; @@ -9213,7 +9215,9 @@ int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id, case IXGBE_SUBDEV_ID_82599_SFP: case IXGBE_SUBDEV_ID_82599_RNDC: case IXGBE_SUBDEV_ID_82599_ECNA_DP: - case IXGBE_SUBDEV_ID_82599_LOM_SFP: + case IXGBE_SUBDEV_ID_82599_SFP_1OCP: + case IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM1: + case IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM2: is_wol_supported = 1; break; } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index 7af451460374..71082be3dedd 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -59,8 +59,12 @@ #define IXGBE_SUBDEV_ID_82599_RNDC 0x1F72 #define IXGBE_SUBDEV_ID_82599_560FLR 0x17D0 #define IXGBE_SUBDEV_ID_82599_SP_560FLR 0x211B +#define IXGBE_SUBDEV_ID_82599_LOM_SNAP6 0x2159 +#define IXGBE_SUBDEV_ID_82599_SFP_1OCP 0x000D +#define IXGBE_SUBDEV_ID_82599_SFP_2OCP 0x0008 +#define IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM1 0x8976 +#define IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM2 0x06EE #define IXGBE_SUBDEV_ID_82599_ECNA_DP 0x0470 -#define IXGBE_SUBDEV_ID_82599_LOM_SFP 0x8976 #define IXGBE_DEV_ID_82599_SFP_EM 0x1507 #define IXGBE_DEV_ID_82599_SFP_SF2 0x154D #define IXGBE_DEV_ID_82599EN_SFP 0x1557 From 740234f070eadd010f7b129c6592767db68f16ba Mon Sep 17 00:00:00 2001 From: Emil Tantilov Date: Thu, 21 Apr 2016 11:37:12 -0700 Subject: [PATCH 1301/1649] ixgbe: check EEPROM for WOL support for X540 and above This change aims to simplify the logic we use to determine WOL support by reading the EEPROM bits for MACs X540 and newer. Also some cleanups in ixgbe_wol_supported() - changed return type to bool and removed redundant return variable by simply using return after the checks. Signed-off-by: Emil Tantilov Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe.h | 4 +- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 49 +++++++++---------- 2 files changed, 25 insertions(+), 28 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index 515c3dc64d4f..e216a3d7f259 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -895,8 +895,8 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *, struct ixgbe_ring *); void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_ring *); void ixgbe_update_stats(struct ixgbe_adapter *adapter); int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter); -int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id, - u16 subdevice_id); +bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id, + u16 subdevice_id); #ifdef CONFIG_PCI_IOV void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter); #endif diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 77c798fe86ba..403b3cc0462c 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -9185,7 +9185,7 @@ static inline int ixgbe_enumerate_functions(struct ixgbe_adapter *adapter) /** * ixgbe_wol_supported - Check whether device supports WoL - * @hw: hw specific details + * @adapter: the adapter private structure * @device_id: the device ID * @subdev_id: the subsystem device ID * @@ -9193,13 +9193,25 @@ static inline int ixgbe_enumerate_functions(struct ixgbe_adapter *adapter) * which devices have WoL support * **/ -int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id, - u16 subdevice_id) +bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id, + u16 subdevice_id) { struct ixgbe_hw *hw = &adapter->hw; u16 wol_cap = adapter->eeprom_cap & IXGBE_DEVICE_CAPS_WOL_MASK; - int is_wol_supported = 0; + /* WOL not supported on 82598 */ + if (hw->mac.type == ixgbe_mac_82598EB) + return false; + + /* check eeprom to see if WOL is enabled for X540 and newer */ + if (hw->mac.type >= ixgbe_mac_X540) { + if ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0_1) || + ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0) && + (hw->bus.func == 0))) + return true; + } + + /* WOL is determined based on device IDs for 82599 MACs */ switch (device_id) { case IXGBE_DEV_ID_82599_SFP: /* Only these subdevices could supports WOL */ @@ -9218,43 +9230,28 @@ int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id, case IXGBE_SUBDEV_ID_82599_SFP_1OCP: case IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM1: case IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM2: - is_wol_supported = 1; - break; + return true; } break; case IXGBE_DEV_ID_82599EN_SFP: - /* Only this subdevice supports WOL */ + /* Only these subdevices support WOL */ switch (subdevice_id) { case IXGBE_SUBDEV_ID_82599EN_SFP_OCP1: - is_wol_supported = 1; - break; + return true; } break; case IXGBE_DEV_ID_82599_COMBO_BACKPLANE: /* All except this subdevice support WOL */ if (subdevice_id != IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ) - is_wol_supported = 1; + return true; break; case IXGBE_DEV_ID_82599_KX4: - is_wol_supported = 1; - break; - case IXGBE_DEV_ID_X540T: - case IXGBE_DEV_ID_X540T1: - case IXGBE_DEV_ID_X550T: - case IXGBE_DEV_ID_X550T1: - case IXGBE_DEV_ID_X550EM_X_KX4: - case IXGBE_DEV_ID_X550EM_X_KR: - case IXGBE_DEV_ID_X550EM_X_10G_T: - /* check eeprom to see if enabled wol */ - if ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0_1) || - ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0) && - (hw->bus.func == 0))) { - is_wol_supported = 1; - } + return true; + default: break; } - return is_wol_supported; + return false; } /** From 61ff59d81c3cf9c346a008032ad974d6e79ae1d9 Mon Sep 17 00:00:00 2001 From: Preethi Banala Date: Thu, 21 Apr 2016 11:39:29 -0700 Subject: [PATCH 1302/1649] ixgbe: Remove duplicate and unused device ID definitions Remove duplicate and unused device ID definitions. Signed-off-by: Preethi Banala Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_type.h | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index 71082be3dedd..1912f7e105e4 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -93,12 +93,8 @@ #define IXGBE_DEV_ID_X550EM_A_SFP 0x15CE /* VF Device IDs */ -#define IXGBE_DEV_ID_X550_VF_HV 0x1564 -#define IXGBE_DEV_ID_X550_VF 0x1565 -#define IXGBE_DEV_ID_X550EM_X_VF 0x15A8 -#define IXGBE_DEV_ID_X550EM_X_VF_HV 0x15A9 -#define IXGBE_DEV_ID_82599_VF 0x10ED -#define IXGBE_DEV_ID_X540_VF 0x1515 +#define IXGBE_DEV_ID_82599_VF 0x10ED +#define IXGBE_DEV_ID_X540_VF 0x1515 #define IXGBE_DEV_ID_X550_VF 0x1565 #define IXGBE_DEV_ID_X550EM_X_VF 0x15A8 #define IXGBE_DEV_ID_X550EM_A_VF 0x15C5 From 4c4f8023be8c8f900e79a622b92bfc778db1f2ec Mon Sep 17 00:00:00 2001 From: Preethi Banala Date: Thu, 21 Apr 2016 11:40:24 -0700 Subject: [PATCH 1303/1649] ixgbe: Return 64 bit stats values The code was ignoring higher 32 bits of stats registers. This patch correctly fills out 64 bit value in two 32 bit words. Signed-off-by: Preethi Banala Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index d3efcb4fecce..a696192eae20 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -720,8 +720,10 @@ static void ixgbe_get_regs(struct net_device *netdev, regs_buff[939] = IXGBE_GET_STAT(adapter, bprc); regs_buff[940] = IXGBE_GET_STAT(adapter, mprc); regs_buff[941] = IXGBE_GET_STAT(adapter, gptc); - regs_buff[942] = IXGBE_GET_STAT(adapter, gorc); - regs_buff[944] = IXGBE_GET_STAT(adapter, gotc); + regs_buff[942] = (u32)IXGBE_GET_STAT(adapter, gorc); + regs_buff[943] = (u32)(IXGBE_GET_STAT(adapter, gorc) >> 32); + regs_buff[944] = (u32)IXGBE_GET_STAT(adapter, gotc); + regs_buff[945] = (u32)(IXGBE_GET_STAT(adapter, gotc) >> 32); for (i = 0; i < 8; i++) regs_buff[946 + i] = IXGBE_GET_STAT(adapter, rnbc[i]); regs_buff[954] = IXGBE_GET_STAT(adapter, ruc); @@ -731,7 +733,8 @@ static void ixgbe_get_regs(struct net_device *netdev, regs_buff[958] = IXGBE_GET_STAT(adapter, mngprc); regs_buff[959] = IXGBE_GET_STAT(adapter, mngpdc); regs_buff[960] = IXGBE_GET_STAT(adapter, mngptc); - regs_buff[961] = IXGBE_GET_STAT(adapter, tor); + regs_buff[961] = (u32)IXGBE_GET_STAT(adapter, tor); + regs_buff[962] = (u32)(IXGBE_GET_STAT(adapter, tor) >> 32); regs_buff[963] = IXGBE_GET_STAT(adapter, tpr); regs_buff[964] = IXGBE_GET_STAT(adapter, tpt); regs_buff[965] = IXGBE_GET_STAT(adapter, ptc64); From 45a88dfcd806ca8774180026891b136ef10fd844 Mon Sep 17 00:00:00 2001 From: Preethi Banala Date: Thu, 21 Apr 2016 11:40:35 -0700 Subject: [PATCH 1304/1649] ixgbe: Revise populating few registers and macro definitions Revise populating few registers in ixgbe_get_regs() and macro definitions. Before applying patch: $ du -k objs/drivers/net/ethernet/intel/ixgbe/ixgbe.ko 8572 objs/drivers/net/ethernet/intel/ixgbe/ixgbe.ko After applying patch: $ du -k objs/drivers/net/ethernet/intel/ixgbe/ixgbe.ko 8568 objs/drivers/net/ethernet/intel/ixgbe/ixgbe.ko Signed-off-by: Preethi Banala Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- .../net/ethernet/intel/ixgbe/ixgbe_ethtool.c | 18 ++++++------------ drivers/net/ethernet/intel/ixgbe/ixgbe_type.h | 10 ++-------- 2 files changed, 8 insertions(+), 20 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index a696192eae20..59b771b9b354 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -533,10 +533,8 @@ static void ixgbe_get_regs(struct net_device *netdev, /* Flow Control */ regs_buff[30] = IXGBE_READ_REG(hw, IXGBE_PFCTOP); - regs_buff[31] = IXGBE_READ_REG(hw, IXGBE_FCTTV(0)); - regs_buff[32] = IXGBE_READ_REG(hw, IXGBE_FCTTV(1)); - regs_buff[33] = IXGBE_READ_REG(hw, IXGBE_FCTTV(2)); - regs_buff[34] = IXGBE_READ_REG(hw, IXGBE_FCTTV(3)); + for (i = 0; i < 4; i++) + regs_buff[31 + i] = IXGBE_READ_REG(hw, IXGBE_FCTTV(i)); for (i = 0; i < 8; i++) { switch (hw->mac.type) { case ixgbe_mac_82598EB: @@ -806,15 +804,11 @@ static void ixgbe_get_regs(struct net_device *netdev, regs_buff[1096 + i] = IXGBE_READ_REG(hw, IXGBE_TIC_DW(i)); regs_buff[1100] = IXGBE_READ_REG(hw, IXGBE_TDPROBE); regs_buff[1101] = IXGBE_READ_REG(hw, IXGBE_TXBUFCTRL); - regs_buff[1102] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA0); - regs_buff[1103] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA1); - regs_buff[1104] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA2); - regs_buff[1105] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA3); + for (i = 0; i < 4; i++) + regs_buff[1102 + i] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA(i)); regs_buff[1106] = IXGBE_READ_REG(hw, IXGBE_RXBUFCTRL); - regs_buff[1107] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA0); - regs_buff[1108] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA1); - regs_buff[1109] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA2); - regs_buff[1110] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA3); + for (i = 0; i < 4; i++) + regs_buff[1107 + i] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA(i)); for (i = 0; i < 8; i++) regs_buff[1111 + i] = IXGBE_READ_REG(hw, IXGBE_PCIE_DIAG(i)); regs_buff[1119] = IXGBE_READ_REG(hw, IXGBE_RFVAL); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index 1912f7e105e4..cd6c1dd41bf7 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -1060,15 +1060,9 @@ struct ixgbe_thermal_sensor_data { #define IXGBE_TIC_DW2(_i) (0x082B0 + ((_i) * 4)) #define IXGBE_TDPROBE 0x07F20 #define IXGBE_TXBUFCTRL 0x0C600 -#define IXGBE_TXBUFDATA0 0x0C610 -#define IXGBE_TXBUFDATA1 0x0C614 -#define IXGBE_TXBUFDATA2 0x0C618 -#define IXGBE_TXBUFDATA3 0x0C61C +#define IXGBE_TXBUFDATA(_i) (0x0C610 + ((_i) * 4)) /* 4 of these (0-3) */ #define IXGBE_RXBUFCTRL 0x03600 -#define IXGBE_RXBUFDATA0 0x03610 -#define IXGBE_RXBUFDATA1 0x03614 -#define IXGBE_RXBUFDATA2 0x03618 -#define IXGBE_RXBUFDATA3 0x0361C +#define IXGBE_RXBUFDATA(_i) (0x03610 + ((_i) * 4)) /* 4 of these (0-3) */ #define IXGBE_PCIE_DIAG(_i) (0x11090 + ((_i) * 4)) /* 8 of these */ #define IXGBE_RFVAL 0x050A4 #define IXGBE_MDFTC1 0x042B8 From 33b0eb15961393d8c60e7c4bddd23da53cd1c2e4 Mon Sep 17 00:00:00 2001 From: Babu Moger Date: Thu, 21 Apr 2016 15:56:49 -0700 Subject: [PATCH 1305/1649] ixgbevf: Change the relaxed order settings in VF driver for sparc We noticed performance issues with VF interface on sparc compared to PF. Setting the RX to IXGBE_DCA_RXCTRL_DATA_WRO_EN brings it on far with PF. Also this matches to the default sparc setting in PF driver. Signed-off-by: Babu Moger Acked-by: Sowmini Varadhan Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index f39c8cb35839..14d4729e7519 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -1760,9 +1760,15 @@ static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter, IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(reg_idx), ring->count * sizeof(union ixgbe_adv_rx_desc)); +#ifndef CONFIG_SPARC /* enable relaxed ordering */ IXGBE_WRITE_REG(hw, IXGBE_VFDCA_RXCTRL(reg_idx), IXGBE_DCA_RXCTRL_DESC_RRO_EN); +#else + IXGBE_WRITE_REG(hw, IXGBE_VFDCA_RXCTRL(reg_idx), + IXGBE_DCA_RXCTRL_DESC_RRO_EN | + IXGBE_DCA_RXCTRL_DATA_WRO_EN); +#endif /* reset head and tail pointers */ IXGBE_WRITE_REG(hw, IXGBE_VFRDH(reg_idx), 0); From 2f8214fe6811a246265629d81af2313695c63f4d Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Fri, 22 Apr 2016 13:18:26 -0400 Subject: [PATCH 1306/1649] ixgbevf: Use mac_ops instead of trying to identify NIC type This change makes it so that we can just use function pointers instead of having to identify if a given VF is running on a Linux or Windows PF. By doing this we can avoid having to pull too much information out of the lower layers and can instead just make use of the mac_ops pointers since they should differ between the two types of VFs anyway. Signed-off-by: Alexander Duyck Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbevf/ixgbevf.h | 1 - .../net/ethernet/intel/ixgbevf/ixgbevf_main.c | 15 +++----------- drivers/net/ethernet/intel/ixgbevf/vf.c | 20 +++++++++---------- drivers/net/ethernet/intel/ixgbevf/vf.h | 4 +--- 4 files changed, 13 insertions(+), 27 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h index f2bafa6cb395..d5944c391cbb 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h @@ -494,7 +494,6 @@ void ixgbevf_free_rx_resources(struct ixgbevf_ring *); void ixgbevf_free_tx_resources(struct ixgbevf_ring *); void ixgbevf_update_stats(struct ixgbevf_adapter *adapter); int ethtool_ioctl(struct ifreq *ifr); -bool ixgbevf_on_hyperv(struct ixgbe_hw *hw); extern void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector); diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 14d4729e7519..ba17a6193034 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -1809,10 +1809,7 @@ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter) ixgbevf_setup_vfmrqc(adapter); /* notify the PF of our intent to use this size of frame */ - if (!ixgbevf_on_hyperv(hw)) - ixgbevf_rlpml_set_vf(hw, netdev->mtu + ETH_HLEN + ETH_FCS_LEN); - else - ixgbevf_hv_rlpml_set_vf(hw, netdev->mtu + ETH_HLEN + ETH_FCS_LEN); + hw->mac.ops.set_rlpml(hw, netdev->mtu + ETH_HLEN + ETH_FCS_LEN); /* Setup the HW Rx Head and Tail Descriptor Pointers and * the Base and Length of the Rx Descriptor Ring @@ -2073,10 +2070,7 @@ static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter) spin_lock_bh(&adapter->mbx_lock); while (api[idx] != ixgbe_mbox_api_unknown) { - if (!ixgbevf_on_hyperv(hw)) - err = hw->mac.ops.negotiate_api_version(hw, api[idx]); - else - err = ixgbevf_hv_negotiate_api_version(hw, api[idx]); + err = hw->mac.ops.negotiate_api_version(hw, api[idx]); if (!err) break; idx++; @@ -3760,10 +3754,7 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu) netdev->mtu = new_mtu; /* notify the PF of our intent to use this size of frame */ - if (!ixgbevf_on_hyperv(hw)) - ixgbevf_rlpml_set_vf(hw, max_frame); - else - ixgbevf_hv_rlpml_set_vf(hw, max_frame); + hw->mac.ops.set_rlpml(hw, max_frame); return 0; } diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c index 8a4eb08a3d19..f66055aa0674 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.c +++ b/drivers/net/ethernet/intel/ixgbevf/vf.c @@ -797,11 +797,11 @@ out: } /** - * ixgbevf_rlpml_set_vf - Set the maximum receive packet length + * ixgbevf_set_rlpml_vf - Set the maximum receive packet length * @hw: pointer to the HW structure * @max_size: value to assign to max frame size **/ -void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size) +static void ixgbevf_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size) { u32 msgbuf[2]; @@ -811,12 +811,12 @@ void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size) } /** - * ixgbevf_hv_rlpml_set_vf - Set the maximum receive packet length + * ixgbevf_hv_set_rlpml_vf - Set the maximum receive packet length * @hw: pointer to the HW structure * @max_size: value to assign to max frame size * Hyper-V variant. **/ -void ixgbevf_hv_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size) +static void ixgbevf_hv_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size) { u32 reg; @@ -864,12 +864,12 @@ static int ixgbevf_negotiate_api_version_vf(struct ixgbe_hw *hw, int api) } /** - * ixgbevf_hv_negotiate_api_version - Negotiate supported API version + * ixgbevf_hv_negotiate_api_version_vf - Negotiate supported API version * @hw: pointer to the HW structure * @api: integer containing requested API version * Hyper-V version - only ixgbe_mbox_api_10 supported. **/ -int ixgbevf_hv_negotiate_api_version(struct ixgbe_hw *hw, int api) +static int ixgbevf_hv_negotiate_api_version_vf(struct ixgbe_hw *hw, int api) { /* Hyper-V only supports api version ixgbe_mbox_api_10 */ if (api != ixgbe_mbox_api_10) @@ -950,6 +950,7 @@ static const struct ixgbe_mac_operations ixgbevf_mac_ops = { .update_xcast_mode = ixgbevf_update_xcast_mode, .set_uc_addr = ixgbevf_set_uc_addr_vf, .set_vfta = ixgbevf_set_vfta_vf, + .set_rlpml = ixgbevf_set_rlpml_vf, }; static const struct ixgbe_mac_operations ixgbevf_hv_mac_ops = { @@ -960,11 +961,13 @@ static const struct ixgbe_mac_operations ixgbevf_hv_mac_ops = { .stop_adapter = ixgbevf_stop_hw_vf, .setup_link = ixgbevf_setup_mac_link_vf, .check_link = ixgbevf_hv_check_mac_link_vf, + .negotiate_api_version = ixgbevf_hv_negotiate_api_version_vf, .set_rar = ixgbevf_hv_set_rar_vf, .update_mc_addr_list = ixgbevf_hv_update_mc_addr_list_vf, .update_xcast_mode = ixgbevf_hv_update_xcast_mode, .set_uc_addr = ixgbevf_hv_set_uc_addr_vf, .set_vfta = ixgbevf_hv_set_vfta_vf, + .set_rlpml = ixgbevf_hv_set_rlpml_vf, }; const struct ixgbevf_info ixgbevf_82599_vf_info = { @@ -1006,8 +1009,3 @@ const struct ixgbevf_info ixgbevf_X550EM_x_vf_hv_info = { .mac = ixgbe_mac_X550EM_x_vf, .mac_ops = &ixgbevf_hv_mac_ops, }; - -bool ixgbevf_on_hyperv(struct ixgbe_hw *hw) -{ - return hw->mbx.ops.check_for_msg == NULL; -} diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h index f7c017058f83..6770f101aa45 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.h +++ b/drivers/net/ethernet/intel/ixgbevf/vf.h @@ -69,6 +69,7 @@ struct ixgbe_mac_operations { s32 (*disable_mc)(struct ixgbe_hw *); s32 (*clear_vfta)(struct ixgbe_hw *); s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool); + void (*set_rlpml)(struct ixgbe_hw *, u16); }; enum ixgbe_mac_type { @@ -208,9 +209,6 @@ static inline u32 ixgbe_read_reg_array(struct ixgbe_hw *hw, u32 reg, #define IXGBE_READ_REG_ARRAY(h, r, o) ixgbe_read_reg_array(h, r, o) -void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size); -void ixgbevf_hv_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size); -int ixgbevf_hv_negotiate_api_version(struct ixgbe_hw *hw, int api); int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs, unsigned int *default_tc); int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, int num_rx_queues); From 8829009d2fd5683ed29418420b4883cf4782c85c Mon Sep 17 00:00:00 2001 From: Usha Ketineni Date: Tue, 26 Apr 2016 05:00:26 -0700 Subject: [PATCH 1307/1649] ixgbe: Disable DCB and FCoE for X550EM_x and x550em_a This patch adds IXGBE_FLAG_DCB_CAPABLE flag that is set for all MACs other than X550EM_x and x550em_a. DCB and FCoE is disabled for these MACS. DCB initialization code is moved to a separate function. Signed-off-by: Usha Ketineni Tested-by: Ronald Bynoe Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe.h | 1 + drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 111 +++++++++++------- drivers/net/ethernet/intel/ixgbe/ixgbe_type.h | 1 + 3 files changed, 71 insertions(+), 42 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index e216a3d7f259..9f2db1855412 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -644,6 +644,7 @@ struct ixgbe_adapter { #define IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE BIT(24) #define IXGBE_FLAG_RX_HWTSTAMP_ENABLED BIT(25) #define IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER BIT(26) +#define IXGBE_FLAG_DCB_CAPABLE BIT(27) u32 flags2; #define IXGBE_FLAG2_RSC_CAPABLE BIT(0) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 403b3cc0462c..2f6984279d92 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -5559,6 +5559,58 @@ static void ixgbe_tx_timeout(struct net_device *netdev) ixgbe_tx_timeout_reset(adapter); } +#ifdef CONFIG_IXGBE_DCB +static void ixgbe_init_dcb(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + struct tc_configuration *tc; + int j; + + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + case ixgbe_mac_82599EB: + adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS; + adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS; + break; + case ixgbe_mac_X540: + case ixgbe_mac_X550: + adapter->dcb_cfg.num_tcs.pg_tcs = X540_TRAFFIC_CLASS; + adapter->dcb_cfg.num_tcs.pfc_tcs = X540_TRAFFIC_CLASS; + break; + case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: + default: + adapter->dcb_cfg.num_tcs.pg_tcs = DEF_TRAFFIC_CLASS; + adapter->dcb_cfg.num_tcs.pfc_tcs = DEF_TRAFFIC_CLASS; + break; + } + + /* Configure DCB traffic classes */ + for (j = 0; j < MAX_TRAFFIC_CLASS; j++) { + tc = &adapter->dcb_cfg.tc_config[j]; + tc->path[DCB_TX_CONFIG].bwg_id = 0; + tc->path[DCB_TX_CONFIG].bwg_percent = 12 + (j & 1); + tc->path[DCB_RX_CONFIG].bwg_id = 0; + tc->path[DCB_RX_CONFIG].bwg_percent = 12 + (j & 1); + tc->dcb_pfc = pfc_disabled; + } + + /* Initialize default user to priority mapping, UPx->TC0 */ + tc = &adapter->dcb_cfg.tc_config[0]; + tc->path[DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF; + tc->path[DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF; + + adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100; + adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100; + adapter->dcb_cfg.pfc_mode_enable = false; + adapter->dcb_set_bitmap = 0x00; + if (adapter->flags & IXGBE_FLAG_DCB_CAPABLE) + adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE; + memcpy(&adapter->temp_dcb_cfg, &adapter->dcb_cfg, + sizeof(adapter->temp_dcb_cfg)); +} +#endif + /** * ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter) * @adapter: board private structure to initialize @@ -5575,10 +5627,6 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter) u32 fwsm; u16 device_caps; int i; -#ifdef CONFIG_IXGBE_DCB - int j; - struct tc_configuration *tc; -#endif /* PCI config space info */ @@ -5600,6 +5648,10 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter) #ifdef CONFIG_IXGBE_DCA adapter->flags |= IXGBE_FLAG_DCA_CAPABLE; #endif +#ifdef CONFIG_IXGBE_DCB + adapter->flags |= IXGBE_FLAG_DCB_CAPABLE; + adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; +#endif #ifdef IXGBE_FCOE adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE; adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; @@ -5656,6 +5708,16 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter) break; case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: +#ifdef CONFIG_IXGBE_DCB + adapter->flags &= ~IXGBE_FLAG_DCB_CAPABLE; +#endif +#ifdef IXGBE_FCOE + adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE; +#ifdef CONFIG_IXGBE_DCB + adapter->fcoe.up = 0; +#endif /* IXGBE_DCB */ +#endif /* IXGBE_FCOE */ + /* Fall Through */ case ixgbe_mac_X550: #ifdef CONFIG_IXGBE_DCA adapter->flags &= ~IXGBE_FLAG_DCA_CAPABLE; @@ -5677,43 +5739,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter) spin_lock_init(&adapter->fdir_perfect_lock); #ifdef CONFIG_IXGBE_DCB - switch (hw->mac.type) { - case ixgbe_mac_X540: - case ixgbe_mac_X550: - case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: - adapter->dcb_cfg.num_tcs.pg_tcs = X540_TRAFFIC_CLASS; - adapter->dcb_cfg.num_tcs.pfc_tcs = X540_TRAFFIC_CLASS; - break; - default: - adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS; - adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS; - break; - } - - /* Configure DCB traffic classes */ - for (j = 0; j < MAX_TRAFFIC_CLASS; j++) { - tc = &adapter->dcb_cfg.tc_config[j]; - tc->path[DCB_TX_CONFIG].bwg_id = 0; - tc->path[DCB_TX_CONFIG].bwg_percent = 12 + (j & 1); - tc->path[DCB_RX_CONFIG].bwg_id = 0; - tc->path[DCB_RX_CONFIG].bwg_percent = 12 + (j & 1); - tc->dcb_pfc = pfc_disabled; - } - - /* Initialize default user to priority mapping, UPx->TC0 */ - tc = &adapter->dcb_cfg.tc_config[0]; - tc->path[DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF; - tc->path[DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF; - - adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100; - adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100; - adapter->dcb_cfg.pfc_mode_enable = false; - adapter->dcb_set_bitmap = 0x00; - adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE; - memcpy(&adapter->temp_dcb_cfg, &adapter->dcb_cfg, - sizeof(adapter->temp_dcb_cfg)); - + ixgbe_init_dcb(adapter); #endif /* default flow control settings */ @@ -9495,7 +9521,8 @@ skip_sriov: netdev->priv_flags |= IFF_SUPP_NOFCS; #ifdef CONFIG_IXGBE_DCB - netdev->dcbnl_ops = &dcbnl_ops; + if (adapter->flags & IXGBE_FLAG_DCB_CAPABLE) + netdev->dcbnl_ops = &dcbnl_ops; #endif #ifdef IXGBE_FCOE diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index cd6c1dd41bf7..da3d8358fee0 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -548,6 +548,7 @@ struct ixgbe_thermal_sensor_data { /* DCB registers */ #define MAX_TRAFFIC_CLASS 8 #define X540_TRAFFIC_CLASS 4 +#define DEF_TRAFFIC_CLASS 1 #define IXGBE_RMCS 0x03D00 #define IXGBE_DPMCS 0x07F40 #define IXGBE_PDPMCS 0x0CD00 From 8b44a8a09de335a2fa7e39bb27e9fb50ff6d52ba Mon Sep 17 00:00:00 2001 From: Tony Nguyen Date: Wed, 27 Apr 2016 14:14:14 -0700 Subject: [PATCH 1308/1649] ixgbevf: Remove unused parameter ixgbevf_update_xcast_mode() is not using the netdev parameter; removing it since it's unnecessary. Signed-off-by: Tony Nguyen Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | 2 +- drivers/net/ethernet/intel/ixgbevf/vf.c | 8 ++------ drivers/net/ethernet/intel/ixgbevf/vf.h | 2 +- 3 files changed, 4 insertions(+), 8 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index ba17a6193034..5e348b125090 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -1922,7 +1922,7 @@ static void ixgbevf_set_rx_mode(struct net_device *netdev) spin_lock_bh(&adapter->mbx_lock); - hw->mac.ops.update_xcast_mode(hw, netdev, xcast_mode); + hw->mac.ops.update_xcast_mode(hw, xcast_mode); /* reprogram multicast list */ hw->mac.ops.update_mc_addr_list(hw, netdev); diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c index f66055aa0674..e670d3b19c3c 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.c +++ b/drivers/net/ethernet/intel/ixgbevf/vf.c @@ -536,13 +536,11 @@ static s32 ixgbevf_hv_update_mc_addr_list_vf(struct ixgbe_hw *hw, /** * ixgbevf_update_xcast_mode - Update Multicast mode * @hw: pointer to the HW structure - * @netdev: pointer to net device structure * @xcast_mode: new multicast mode * * Updates the Multicast Mode of VF. **/ -static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, - struct net_device *netdev, int xcast_mode) +static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode) { struct ixgbe_mbx_info *mbx = &hw->mbx; u32 msgbuf[2]; @@ -576,9 +574,7 @@ static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, /** * Hyper-V variant - just a stub. */ -static s32 ixgbevf_hv_update_xcast_mode(struct ixgbe_hw *hw, - struct net_device *netdev, - int xcast_mode) +static s32 ixgbevf_hv_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode) { return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h index 6770f101aa45..2cac610f32ba 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.h +++ b/drivers/net/ethernet/intel/ixgbevf/vf.h @@ -64,7 +64,7 @@ struct ixgbe_mac_operations { s32 (*set_uc_addr)(struct ixgbe_hw *, u32, u8 *); s32 (*init_rx_addrs)(struct ixgbe_hw *); s32 (*update_mc_addr_list)(struct ixgbe_hw *, struct net_device *); - s32 (*update_xcast_mode)(struct ixgbe_hw *, struct net_device *, int); + s32 (*update_xcast_mode)(struct ixgbe_hw *, int); s32 (*enable_mc)(struct ixgbe_hw *); s32 (*disable_mc)(struct ixgbe_hw *); s32 (*clear_vfta)(struct ixgbe_hw *); From d4011239f46ac6e407af61e3f74d1e3874fc9394 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Mon, 2 May 2016 21:49:25 -0700 Subject: [PATCH 1309/1649] tcp: guarantee forward progress in tcp_sendmsg() Under high rx pressure, it is possible tcp_sendmsg() never has a chance to allocate an skb and loop forever as sk_flush_backlog() would always return true. Fix this by calling sk_flush_backlog() only if one skb had been allocated and filled before last backlog check. Fixes: d41a69f1d390 ("tcp: make tcp_sendmsg() aware of socket backlog") Signed-off-by: Eric Dumazet Acked-by: Soheil Hassas Yeganeh Signed-off-by: David S. Miller --- net/ipv4/tcp.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index b945c2b046c5..5c7ed147449c 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -1084,6 +1084,7 @@ int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) struct sockcm_cookie sockc; int flags, err, copied = 0; int mss_now = 0, size_goal, copied_syn = 0; + bool process_backlog = false; bool sg; long timeo; @@ -1167,9 +1168,10 @@ new_segment: if (!sk_stream_memory_free(sk)) goto wait_for_sndbuf; - if (sk_flush_backlog(sk)) + if (process_backlog && sk_flush_backlog(sk)) { + process_backlog = false; goto restart; - + } skb = sk_stream_alloc_skb(sk, select_size(sk, sg), sk->sk_allocation, @@ -1177,6 +1179,7 @@ new_segment: if (!skb) goto wait_for_memory; + process_backlog = true; /* * Check whether we can use HW checksum. */ From f132ae7c46370c981412a68ccec9f2145812a9b6 Mon Sep 17 00:00:00 2001 From: Jiri Benc Date: Tue, 3 May 2016 15:00:21 +0200 Subject: [PATCH 1310/1649] gre: change gre_parse_header to return the header length It's easier for gre_parse_header to return the header length instead of filing it into a parameter. That way, the callers that don't care about the header length can just check whether the returned value is lower than zero. In gre_err, the tunnel header must not be pulled. See commit b7f8fe251e46 ("gre: do not pull header in ICMP error processing") for details. This patch reduces the conflict between the mentioned commit and commit 95f5c64c3c13 ("gre: Move utility functions to common headers"). Signed-off-by: Jiri Benc Acked-by: Tom Herbert Signed-off-by: David S. Miller --- include/net/gre.h | 2 +- net/ipv4/gre_demux.c | 6 +++--- net/ipv4/ip_gre.c | 9 +++------ net/ipv6/ip6_gre.c | 3 ++- 4 files changed, 9 insertions(+), 11 deletions(-) diff --git a/include/net/gre.h b/include/net/gre.h index 29e37322c06e..a14093c70eab 100644 --- a/include/net/gre.h +++ b/include/net/gre.h @@ -26,7 +26,7 @@ int gre_del_protocol(const struct gre_protocol *proto, u8 version); struct net_device *gretap_fb_dev_create(struct net *net, const char *name, u8 name_assign_type); int gre_parse_header(struct sk_buff *skb, struct tnl_ptk_info *tpi, - bool *csum_err, int *hdr_len); + bool *csum_err); static inline int gre_calc_hlen(__be16 o_flags) { diff --git a/net/ipv4/gre_demux.c b/net/ipv4/gre_demux.c index 371674801e84..a41e73ab1369 100644 --- a/net/ipv4/gre_demux.c +++ b/net/ipv4/gre_demux.c @@ -60,8 +60,9 @@ int gre_del_protocol(const struct gre_protocol *proto, u8 version) } EXPORT_SYMBOL_GPL(gre_del_protocol); +/* Fills in tpi and returns header length to be pulled. */ int gre_parse_header(struct sk_buff *skb, struct tnl_ptk_info *tpi, - bool *csum_err, int *ret_hdr_len) + bool *csum_err) { const struct gre_base_hdr *greh; __be32 *options; @@ -119,8 +120,7 @@ int gre_parse_header(struct sk_buff *skb, struct tnl_ptk_info *tpi, return -EINVAL; } } - *ret_hdr_len = hdr_len; - return 0; + return hdr_len; } EXPORT_SYMBOL(gre_parse_header); diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index b99213c46aac..8260a707b9b8 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -221,16 +221,12 @@ static void gre_err(struct sk_buff *skb, u32 info) const int code = icmp_hdr(skb)->code; struct tnl_ptk_info tpi; bool csum_err = false; - int hdr_len; - if (gre_parse_header(skb, &tpi, &csum_err, &hdr_len)) { + if (gre_parse_header(skb, &tpi, &csum_err) < 0) { if (!csum_err) /* ignore csum errors. */ return; } - if (iptunnel_pull_header(skb, hdr_len, tpi.proto, false)) - return; - if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) { ipv4_update_pmtu(skb, dev_net(skb->dev), info, skb->dev->ifindex, 0, IPPROTO_GRE, 0); @@ -314,7 +310,8 @@ static int gre_rcv(struct sk_buff *skb) } #endif - if (gre_parse_header(skb, &tpi, &csum_err, &hdr_len) < 0) + hdr_len = gre_parse_header(skb, &tpi, &csum_err); + if (hdr_len < 0) goto drop; if (iptunnel_pull_header(skb, hdr_len, tpi.proto, false)) diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index 10127741a60d..47b671a46dc4 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c @@ -468,7 +468,8 @@ static int gre_rcv(struct sk_buff *skb) bool csum_err = false; int hdr_len; - if (gre_parse_header(skb, &tpi, &csum_err, &hdr_len) < 0) + hdr_len = gre_parse_header(skb, &tpi, &csum_err); + if (hdr_len < 0) goto drop; if (iptunnel_pull_header(skb, hdr_len, tpi.proto, false)) From d7fb5a80492169cd737d51042f4ee03b09be0ef6 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Mon, 2 May 2016 09:38:12 -0700 Subject: [PATCH 1311/1649] gso: Do not perform partial GSO if number of partial segments is 1 or less In the event that the number of partial segments is equal to 1 we don't really need to perform partial segmentation offload. As such we should skip multiplying the MSS and instead just clear the partial_segs value since it will not provide any gain to advertise the frame as being GSO when it is a single frame. Signed-off-by: Alexander Duyck Signed-off-by: David S. Miller --- net/core/skbuff.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 7a1d48983f81..b8dd2d2e2256 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -3101,7 +3101,10 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb, */ if (features & NETIF_F_GSO_PARTIAL) { partial_segs = len / mss; - mss *= partial_segs; + if (partial_segs > 1) + mss *= partial_segs; + else + partial_segs = 0; } headroom = skb_headroom(head_skb); From 36c983824b6f17b93258153ff5b05c33c34e44ba Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Mon, 2 May 2016 09:38:18 -0700 Subject: [PATCH 1312/1649] gso: Only allow GSO_PARTIAL if we can checksum the inner protocol This patch addresses a possible issue that can occur if we get into any odd corner cases where we support TSO for a given protocol but not the checksum or scatter-gather offload. There are few drivers floating around that setup their tunnels this way and by enforcing the checksum piece we can avoid mangling any frames. Signed-off-by: Alexander Duyck Signed-off-by: David S. Miller --- net/core/skbuff.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/net/core/skbuff.c b/net/core/skbuff.c index b8dd2d2e2256..5586be93632f 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -3080,8 +3080,7 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb, unsigned int headroom; unsigned int len = head_skb->len; __be16 proto; - bool csum; - int sg = !!(features & NETIF_F_SG); + bool csum, sg; int nfrags = skb_shinfo(head_skb)->nr_frags; int err = -ENOMEM; int i = 0; @@ -3093,13 +3092,14 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb, if (unlikely(!proto)) return ERR_PTR(-EINVAL); + sg = !!(features & NETIF_F_SG); csum = !!can_checksum_protocol(features, proto); /* GSO partial only requires that we trim off any excess that * doesn't fit into an MSS sized block, so take care of that * now. */ - if (features & NETIF_F_GSO_PARTIAL) { + if (sg && csum && (features & NETIF_F_GSO_PARTIAL)) { partial_segs = len / mss; if (partial_segs > 1) mss *= partial_segs; From b1dc497b28ad053d1f6d5b5cb186af9564e4d7f1 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Mon, 2 May 2016 09:38:24 -0700 Subject: [PATCH 1313/1649] net: Fix netdev_fix_features so that TSO_MANGLEID is only available with TSO This change makes it so that we will strip the TSO_MANGLEID bit if TSO is not present. This way we will also handle ECN correctly of TSO is not present. Signed-off-by: Alexander Duyck Signed-off-by: David S. Miller --- net/core/dev.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/net/core/dev.c b/net/core/dev.c index 673d1f118bfb..e98ba63fe280 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -6721,6 +6721,10 @@ static netdev_features_t netdev_fix_features(struct net_device *dev, features &= ~NETIF_F_TSO6; } + /* TSO with IPv4 ID mangling requires IPv4 TSO be enabled */ + if ((features & NETIF_F_TSO_MANGLEID) && !(features & NETIF_F_TSO)) + features &= ~NETIF_F_TSO_MANGLEID; + /* TSO ECN requires that TSO is present as well. */ if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN) features &= ~NETIF_F_TSO_ECN; From 3c9346b24001523a763c28478b49064589d1c8ab Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Mon, 2 May 2016 09:38:30 -0700 Subject: [PATCH 1314/1649] net/mlx4_en: Add support for UDP tunnel segmentation with outer checksum offload This patch assumes that the mlx4 hardware will ignore existing IPv4/v6 header fields for length and checksum as well as the length and checksum fields for outer UDP headers. Signed-off-by: Alexander Duyck Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/en_netdev.c | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 8bd143dda95d..bce37cbfde24 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -2358,7 +2358,9 @@ out: /* set offloads */ priv->dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM | - NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL; + NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_UDP_TUNNEL_CSUM | + NETIF_F_GSO_PARTIAL; } static void mlx4_en_del_vxlan_offloads(struct work_struct *work) @@ -2368,7 +2370,9 @@ static void mlx4_en_del_vxlan_offloads(struct work_struct *work) vxlan_del_task); /* unset offloads */ priv->dev->hw_enc_features &= ~(NETIF_F_IP_CSUM | NETIF_F_RXCSUM | - NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL); + NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_UDP_TUNNEL_CSUM | + NETIF_F_GSO_PARTIAL); ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC, 0); @@ -2992,8 +2996,13 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, } if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) { - dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL; - dev->features |= NETIF_F_GSO_UDP_TUNNEL; + dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_UDP_TUNNEL_CSUM | + NETIF_F_GSO_PARTIAL; + dev->features |= NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_UDP_TUNNEL_CSUM | + NETIF_F_GSO_PARTIAL; + dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM; } mdev->pndev[port] = dev; From 09067122db3b7fb9cd329fcc16cee12e8f6babcf Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Mon, 2 May 2016 09:38:37 -0700 Subject: [PATCH 1315/1649] net/mlx4_en: Add support for inner IPv6 checksum offloads and TSO >From what I can tell the ConnectX-3 will support an inner IPv6 checksum and segmentation offload, however it cannot support outer IPv6 headers. This assumption is based on the fact that I could see the checksum being offloaded for inner header on IPv4 tunnels, but not on IPv6 tunnels. For this reason I am adding the feature to the hw_enc_features and adding an extra check to the features_check call that will disable GSO and checksum offload in the case that the encapsulated frame has an outer IP version of that is not 4. The check in mlx4_en_features_check could be removed if at some point in the future a fix is found that allows the hardware to offload segmentation/checksum on tunnels with an outer IPv6 header. Signed-off-by: Alexander Duyck Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlx4/en_netdev.c | 25 +++++++++++++++---- drivers/net/ethernet/mellanox/mlx4/en_tx.c | 15 +++++++++-- 2 files changed, 33 insertions(+), 7 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index bce37cbfde24..6f28ac58251c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -2357,8 +2357,10 @@ out: } /* set offloads */ - priv->dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM | - NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL | + priv->dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | + NETIF_F_RXCSUM | + NETIF_F_TSO | NETIF_F_TSO6 | + NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_PARTIAL; } @@ -2369,8 +2371,10 @@ static void mlx4_en_del_vxlan_offloads(struct work_struct *work) struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, vxlan_del_task); /* unset offloads */ - priv->dev->hw_enc_features &= ~(NETIF_F_IP_CSUM | NETIF_F_RXCSUM | - NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL | + priv->dev->hw_enc_features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | + NETIF_F_RXCSUM | + NETIF_F_TSO | NETIF_F_TSO6 | + NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_PARTIAL); @@ -2431,7 +2435,18 @@ static netdev_features_t mlx4_en_features_check(struct sk_buff *skb, netdev_features_t features) { features = vlan_features_check(skb, features); - return vxlan_features_check(skb, features); + features = vxlan_features_check(skb, features); + + /* The ConnectX-3 doesn't support outer IPv6 checksums but it does + * support inner IPv6 checksums and segmentation so we need to + * strip that feature if this is an IPv6 encapsulated frame. + */ + if (skb->encapsulation && + (skb->ip_summed == CHECKSUM_PARTIAL) && + (ip_hdr(skb)->version != 4)) + features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); + + return features; } #endif diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index a386f047c1af..0f206a95429c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -41,6 +41,7 @@ #include #include #include +#include #include #include "mlx4_en.h" @@ -920,8 +921,18 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) tx_ind, fragptr); if (skb->encapsulation) { - struct iphdr *ipv4 = (struct iphdr *)skb_inner_network_header(skb); - if (ipv4->protocol == IPPROTO_TCP || ipv4->protocol == IPPROTO_UDP) + union { + struct iphdr *v4; + struct ipv6hdr *v6; + unsigned char *hdr; + } ip; + u8 proto; + + ip.hdr = skb_inner_network_header(skb); + proto = (ip.v4->version == 4) ? ip.v4->protocol : + ip.v6->nexthdr; + + if (proto == IPPROTO_TCP || proto == IPPROTO_UDP) op_own |= cpu_to_be32(MLX4_WQE_CTRL_IIP | MLX4_WQE_CTRL_ILP); else op_own |= cpu_to_be32(MLX4_WQE_CTRL_IIP); From b49663c8fb4908cc548afd27aa950c07f4e421d5 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Mon, 2 May 2016 09:38:43 -0700 Subject: [PATCH 1316/1649] net/mlx5e: Add support for UDP tunnel segmentation with outer checksum offload This patch assumes that the mlx5 hardware will ignore existing IPv4/v6 header fields for length and checksum as well as the length and checksum fields for outer UDP headers. Signed-off-by: Alexander Duyck Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 7dfb73aa8e41..23883be2b0b8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -2802,13 +2802,18 @@ static void mlx5e_build_netdev(struct net_device *netdev) netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; if (mlx5e_vxlan_allowed(mdev)) { - netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL; + netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_UDP_TUNNEL_CSUM | + NETIF_F_GSO_PARTIAL; netdev->hw_enc_features |= NETIF_F_IP_CSUM; netdev->hw_enc_features |= NETIF_F_RXCSUM; netdev->hw_enc_features |= NETIF_F_TSO; netdev->hw_enc_features |= NETIF_F_TSO6; netdev->hw_enc_features |= NETIF_F_RXHASH; netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL; + netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM | + NETIF_F_GSO_PARTIAL; + netdev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM; } mlx5_query_port_fcs(mdev, &fcs_supported, &fcs_enabled); From f3ed653cd4191ce42b27b2fb524418832f7d6c2d Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Mon, 2 May 2016 09:38:49 -0700 Subject: [PATCH 1317/1649] net/mlx5e: Fix IPv6 tunnel checksum offload The mlx5 driver exposes support for TSO6 but not IPv6 csum for hardware encapsulated tunnels. This leads to issues as it triggers warnings in skb_checksum_help as it ends up being called as we report supporting the segmentation but not the checksumming for IPv6 frames. This patch corrects that and drops 2 features that don't actually need to be supported in hw_enc_features since they are Rx features and don't actually impact anything by being present in hw_enc_features. Signed-off-by: Alexander Duyck Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 23883be2b0b8..b435c7b36cfb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -2806,10 +2806,9 @@ static void mlx5e_build_netdev(struct net_device *netdev) NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_PARTIAL; netdev->hw_enc_features |= NETIF_F_IP_CSUM; - netdev->hw_enc_features |= NETIF_F_RXCSUM; + netdev->hw_enc_features |= NETIF_F_IPV6_CSUM; netdev->hw_enc_features |= NETIF_F_TSO; netdev->hw_enc_features |= NETIF_F_TSO6; - netdev->hw_enc_features |= NETIF_F_RXHASH; netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL; netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_PARTIAL; From 152971ee75fddbc43fb6cf7e3ada96c1324df2af Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Mon, 2 May 2016 09:38:55 -0700 Subject: [PATCH 1318/1649] bnxt: Add support for segmentation of tunnels with outer checksums This patch assumes that the bnxt hardware will ignore existing IPv4/v6 header fields for length and checksum as well as the length and checksum fields for outer UDP and GRE headers. I have been told by Michael Chan that this is working. Though this might be somewhat redundant for IPv6 as they are forcing the checksum to be computed for all IPv6 frames that are offloaded. A follow-up patch may be necessary in order to fix this as it is essentially mangling the outer IPv6 headers to add a checksum where none was requested. Signed-off-by: Alexander Duyck Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 1199c2b4bf20..fd85b6dd4a6e 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -6219,14 +6219,19 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE | NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT | - NETIF_F_RXHASH | + NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM | + NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH | NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO; dev->hw_enc_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE | - NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT; + NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM | + NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT | + NETIF_F_GSO_PARTIAL; + dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM | + NETIF_F_GSO_GRE_CSUM; dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA; dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX; From ecc9120e054b08024b096d8a3a96f81df5fc906c Mon Sep 17 00:00:00 2001 From: Christian Lamparter Date: Tue, 3 May 2016 14:08:30 +0200 Subject: [PATCH 1319/1649] drivers: net: emac: add Atheros AR8035 phy initialization code This patch adds the phy initialization code for Qualcomm Atheros AR8035 phy. This configuration is found in the Cisco Meraki MR24. Signed-off-by: Christian Lamparter Signed-off-by: David S. Miller --- drivers/net/ethernet/ibm/emac/phy.c | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/drivers/net/ethernet/ibm/emac/phy.c b/drivers/net/ethernet/ibm/emac/phy.c index d3b9d103353e..5b88cc690c22 100644 --- a/drivers/net/ethernet/ibm/emac/phy.c +++ b/drivers/net/ethernet/ibm/emac/phy.c @@ -470,12 +470,38 @@ static struct mii_phy_def m88e1112_phy_def = { .ops = &m88e1112_phy_ops, }; +static int ar8035_init(struct mii_phy *phy) +{ + phy_write(phy, 0x1d, 0x5); /* Address debug register 5 */ + phy_write(phy, 0x1e, 0x2d47); /* Value copied from u-boot */ + phy_write(phy, 0x1d, 0xb); /* Address hib ctrl */ + phy_write(phy, 0x1e, 0xbc20); /* Value copied from u-boot */ + + return 0; +} + +static struct mii_phy_ops ar8035_phy_ops = { + .init = ar8035_init, + .setup_aneg = genmii_setup_aneg, + .setup_forced = genmii_setup_forced, + .poll_link = genmii_poll_link, + .read_link = genmii_read_link, +}; + +static struct mii_phy_def ar8035_phy_def = { + .phy_id = 0x004dd070, + .phy_id_mask = 0xfffffff0, + .name = "Atheros 8035 Gigabit Ethernet", + .ops = &ar8035_phy_ops, +}; + static struct mii_phy_def *mii_phy_table[] = { &et1011c_phy_def, &cis8201_phy_def, &bcm5248_phy_def, &m88e1111_phy_def, &m88e1112_phy_def, + &ar8035_phy_def, &genmii_phy_def, NULL }; From 5a20f5cfd7276862a8d21d75f83359840ed6e662 Mon Sep 17 00:00:00 2001 From: Hariprasad Shenai Date: Tue, 3 May 2016 18:58:01 +0530 Subject: [PATCH 1320/1649] cxgb4: Don't sleep when mbox cmd is issued from interrupt context When link goes down, from the interrupt handler DCB priority for the Tx queues needs to be unset. We issue mbox command to unset the Tx queue priority with negative timeout. In t4_wr_mbox_meat_timeout() do not sleep when negative timeout is passed, since it is called from interrupt context. Signed-off-by: Hariprasad Shenai Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 49bcbf16c9ca..59f5e0b40286 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -304,6 +304,12 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd, if (adap->pdev->error_state != pci_channel_io_normal) return -EIO; + /* If we have a negative timeout, that implies that we can't sleep. */ + if (timeout < 0) { + sleep_ok = false; + timeout = -timeout; + } + v = MBOWNER_G(t4_read_reg(adap, ctl_reg)); for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++) v = MBOWNER_G(t4_read_reg(adap, ctl_reg)); From f358738b1d64935b54430613691a6ef8c1f6d450 Mon Sep 17 00:00:00 2001 From: Hariprasad Shenai Date: Tue, 3 May 2016 18:58:02 +0530 Subject: [PATCH 1321/1649] cxgb4: Check for firmware errors in the mailbox command loop Check for firmware errors in the mailbox command loop and report them differently rather than simply timing out when the firmware goes belly up. Signed-off-by: Hariprasad Shenai Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 59f5e0b40286..a63addb4e72c 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -293,6 +293,7 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd, u32 data_reg = PF_REG(mbox, CIM_PF_MAILBOX_DATA_A); u32 ctl_reg = PF_REG(mbox, CIM_PF_MAILBOX_CTRL_A); __be64 cmd_rpl[MBOX_LEN / 8]; + u32 pcie_fw; if ((size & 15) || size > MBOX_LEN) return -EINVAL; @@ -331,7 +332,10 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd, delay_idx = 0; ms = delay[0]; - for (i = 0; i < timeout; i += ms) { + for (i = 0; + !((pcie_fw = t4_read_reg(adap, PCIE_FW_A)) & PCIE_FW_ERR_F) && + i < timeout; + i += ms) { if (sleep_ok) { ms = delay[delay_idx]; /* last element may repeat */ if (delay_idx < ARRAY_SIZE(delay) - 1) @@ -366,7 +370,7 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd, } } - ret = -ETIMEDOUT; + ret = (pcie_fw & PCIE_FW_ERR_F) ? -ENXIO : -ETIMEDOUT; t4_record_mbox(adap, cmd, MBOX_LEN, access, ret); dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n", *(const u8 *)cmd, mbox); From fbc4a69b562e38d6946bc8bd7e97d30fe0acd5d0 Mon Sep 17 00:00:00 2001 From: Maor Gottlieb Date: Tue, 3 May 2016 17:13:53 +0300 Subject: [PATCH 1322/1649] net/mlx5e: Fix aRFS compilation dependency en_arfs.o should be compiled only if both CONFIG_MLX5_CORE_EN and CONFIG_RFS_ACCEL are enabled. en_arfs calls to rps_may_expire_flow which is compiled only if CONFIG_RFS_ACCEL is defined. Move en_arfs.o compilation dependency to be under CONFIG_MLX5_CORE_EN and wrap the en_arfs.c content with ifdef of CONFIG_RFS_ACCEL. Fixes: 1cabe6b0965e ('net/mlx5e: Create aRFS flow tables') Signed-off-by: Maor Gottlieb Reported-by: Alexei Starovoitov Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/Makefile | 3 +-- drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c | 3 +++ 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index 679e18ffb3a6..b531d4f3c00b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -6,7 +6,6 @@ mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \ mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o eswitch.o \ en_main.o en_fs.o en_ethtool.o en_tx.o en_rx.o \ - en_txrx.o en_clock.o vxlan.o en_tc.o + en_txrx.o en_clock.o vxlan.o en_tc.o en_arfs.o mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o -mlx5_core-$(CONFIG_RFS_ACCEL) += en_arfs.o diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c index b4ae0fe15878..3515e78ba68f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c @@ -30,6 +30,8 @@ * SOFTWARE. */ +#ifdef CONFIG_RFS_ACCEL + #include #include #include @@ -747,3 +749,4 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, spin_unlock_bh(&arfs->arfs_lock); return arfs_rule->filter_id; } +#endif From efdc810ba39dae0ccce9cb9c1c84ff9b0157ca43 Mon Sep 17 00:00:00 2001 From: Mohamad Haj Yahia Date: Tue, 3 May 2016 17:13:54 +0300 Subject: [PATCH 1323/1649] net/mlx5: Flow steering, Add vport ACL support Update the relevant flow steering device structs and commands to support vport. Update the flow steering core API to receive vport number. Add ingress and egress ACL flow table name spaces. Add ACL flow table support: * ACL (Access Control List) flow table is a table that contains only allow/drop steering rules. * We have two types of ACL flow tables - ingress and egress. * ACLs handle traffic sent from/to E-Switch FDB table, Ingress refers to traffic sent from Vport to E-Switch and Egress refers to traffic sent from E-Switch to vport. * Ingress ACL flow table allow/drop rules is checked against traffic sent from VF. * Egress ACL flow table allow/drop rules is checked against traffic sent to VF. Signed-off-by: Mohamad Haj Yahia Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlx5/core/eswitch.c | 2 +- .../net/ethernet/mellanox/mlx5/core/fs_cmd.c | 33 +++++++ .../net/ethernet/mellanox/mlx5/core/fs_cmd.h | 1 + .../net/ethernet/mellanox/mlx5/core/fs_core.c | 85 +++++++++++++++++-- .../net/ethernet/mellanox/mlx5/core/fs_core.h | 7 +- .../ethernet/mellanox/mlx5/core/mlx5_core.h | 2 + include/linux/mlx5/device.h | 12 +++ include/linux/mlx5/driver.h | 2 + include/linux/mlx5/fs.h | 7 ++ 9 files changed, 142 insertions(+), 9 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index ff91bb5e1c43..dd066199d172 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -845,7 +845,7 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw) int mlx5_eswitch_init(struct mlx5_core_dev *dev) { int l2_table_size = 1 << MLX5_CAP_GEN(dev, log_max_l2_table); - int total_vports = 1 + pci_sriov_get_totalvfs(dev->pdev); + int total_vports = MLX5_TOTAL_VPORTS(dev); struct mlx5_eswitch *esw; int vport_num; int err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c index f46f1db0fc00..9797768891ee 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c @@ -50,6 +50,10 @@ int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev, MLX5_CMD_OP_SET_FLOW_TABLE_ROOT); MLX5_SET(set_flow_table_root_in, in, table_type, ft->type); MLX5_SET(set_flow_table_root_in, in, table_id, ft->id); + if (ft->vport) { + MLX5_SET(set_flow_table_root_in, in, vport_number, ft->vport); + MLX5_SET(set_flow_table_root_in, in, other_vport, 1); + } memset(out, 0, sizeof(out)); return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, @@ -57,6 +61,7 @@ int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev, } int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev, + u16 vport, enum fs_flow_table_type type, unsigned int level, unsigned int log_size, struct mlx5_flow_table *next_ft, unsigned int *table_id) @@ -77,6 +82,10 @@ int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev, MLX5_SET(create_flow_table_in, in, table_type, type); MLX5_SET(create_flow_table_in, in, level, level); MLX5_SET(create_flow_table_in, in, log_size, log_size); + if (vport) { + MLX5_SET(create_flow_table_in, in, vport_number, vport); + MLX5_SET(create_flow_table_in, in, other_vport, 1); + } memset(out, 0, sizeof(out)); err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, @@ -101,6 +110,10 @@ int mlx5_cmd_destroy_flow_table(struct mlx5_core_dev *dev, MLX5_CMD_OP_DESTROY_FLOW_TABLE); MLX5_SET(destroy_flow_table_in, in, table_type, ft->type); MLX5_SET(destroy_flow_table_in, in, table_id, ft->id); + if (ft->vport) { + MLX5_SET(destroy_flow_table_in, in, vport_number, ft->vport); + MLX5_SET(destroy_flow_table_in, in, other_vport, 1); + } return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out)); @@ -120,6 +133,10 @@ int mlx5_cmd_modify_flow_table(struct mlx5_core_dev *dev, MLX5_CMD_OP_MODIFY_FLOW_TABLE); MLX5_SET(modify_flow_table_in, in, table_type, ft->type); MLX5_SET(modify_flow_table_in, in, table_id, ft->id); + if (ft->vport) { + MLX5_SET(modify_flow_table_in, in, vport_number, ft->vport); + MLX5_SET(modify_flow_table_in, in, other_vport, 1); + } MLX5_SET(modify_flow_table_in, in, modify_field_select, MLX5_MODIFY_FLOW_TABLE_MISS_TABLE_ID); if (next_ft) { @@ -148,6 +165,10 @@ int mlx5_cmd_create_flow_group(struct mlx5_core_dev *dev, MLX5_CMD_OP_CREATE_FLOW_GROUP); MLX5_SET(create_flow_group_in, in, table_type, ft->type); MLX5_SET(create_flow_group_in, in, table_id, ft->id); + if (ft->vport) { + MLX5_SET(create_flow_group_in, in, vport_number, ft->vport); + MLX5_SET(create_flow_group_in, in, other_vport, 1); + } err = mlx5_cmd_exec_check_status(dev, in, inlen, out, @@ -174,6 +195,10 @@ int mlx5_cmd_destroy_flow_group(struct mlx5_core_dev *dev, MLX5_SET(destroy_flow_group_in, in, table_type, ft->type); MLX5_SET(destroy_flow_group_in, in, table_id, ft->id); MLX5_SET(destroy_flow_group_in, in, group_id, group_id); + if (ft->vport) { + MLX5_SET(destroy_flow_group_in, in, vport_number, ft->vport); + MLX5_SET(destroy_flow_group_in, in, other_vport, 1); + } return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out)); @@ -207,6 +232,10 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, MLX5_SET(set_fte_in, in, table_type, ft->type); MLX5_SET(set_fte_in, in, table_id, ft->id); MLX5_SET(set_fte_in, in, flow_index, fte->index); + if (ft->vport) { + MLX5_SET(set_fte_in, in, vport_number, ft->vport); + MLX5_SET(set_fte_in, in, other_vport, 1); + } in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context); MLX5_SET(flow_context, in_flow_context, group_id, group_id); @@ -285,6 +314,10 @@ int mlx5_cmd_delete_fte(struct mlx5_core_dev *dev, MLX5_SET(delete_fte_in, in, table_type, ft->type); MLX5_SET(delete_fte_in, in, table_id, ft->id); MLX5_SET(delete_fte_in, in, flow_index, index); + if (ft->vport) { + MLX5_SET(delete_fte_in, in, vport_number, ft->vport); + MLX5_SET(delete_fte_in, in, other_vport, 1); + } err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out)); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h index 9814d4784803..c97b4a03eeed 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h @@ -34,6 +34,7 @@ #define _MLX5_FS_CMD_ int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev, + u16 vport, enum fs_flow_table_type type, unsigned int level, unsigned int log_size, struct mlx5_flow_table *next_ft, unsigned int *table_id); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 4d78d5a48af3..659a6980cda2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -457,7 +457,7 @@ static struct mlx5_flow_group *alloc_flow_group(u32 *create_fg_in) return fg; } -static struct mlx5_flow_table *alloc_flow_table(int level, int max_fte, +static struct mlx5_flow_table *alloc_flow_table(int level, u16 vport, int max_fte, enum fs_flow_table_type table_type) { struct mlx5_flow_table *ft; @@ -469,6 +469,7 @@ static struct mlx5_flow_table *alloc_flow_table(int level, int max_fte, ft->level = level; ft->node.type = FS_TYPE_FLOW_TABLE; ft->type = table_type; + ft->vport = vport; ft->max_fte = max_fte; INIT_LIST_HEAD(&ft->fwd_rules); mutex_init(&ft->lock); @@ -700,9 +701,9 @@ static void list_add_flow_table(struct mlx5_flow_table *ft, list_add(&ft->node.list, prev); } -struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns, - int prio, int max_fte, - u32 level) +static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespace *ns, + u16 vport, int prio, + int max_fte, u32 level) { struct mlx5_flow_table *next_ft = NULL; struct mlx5_flow_table *ft; @@ -732,6 +733,7 @@ struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns, */ level += fs_prio->start_level; ft = alloc_flow_table(level, + vport, roundup_pow_of_two(max_fte), root->table_type); if (!ft) { @@ -742,7 +744,7 @@ struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns, tree_init_node(&ft->node, 1, del_flow_table); log_table_sz = ilog2(ft->max_fte); next_ft = find_next_chained_ft(fs_prio); - err = mlx5_cmd_create_flow_table(root->dev, ft->type, ft->level, + err = mlx5_cmd_create_flow_table(root->dev, ft->vport, ft->type, ft->level, log_table_sz, next_ft, &ft->id); if (err) goto free_ft; @@ -766,6 +768,20 @@ unlock_root: return ERR_PTR(err); } +struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns, + int prio, int max_fte, + u32 level) +{ + return __mlx5_create_flow_table(ns, 0, prio, max_fte, level); +} + +struct mlx5_flow_table *mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns, + int prio, int max_fte, + u32 level, u16 vport) +{ + return __mlx5_create_flow_table(ns, vport, prio, max_fte, level); +} + struct mlx5_flow_table *mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns, int prio, int num_flow_table_entries, @@ -1319,6 +1335,16 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev, return &dev->priv.fdb_root_ns->ns; else return NULL; + case MLX5_FLOW_NAMESPACE_ESW_EGRESS: + if (dev->priv.esw_egress_root_ns) + return &dev->priv.esw_egress_root_ns->ns; + else + return NULL; + case MLX5_FLOW_NAMESPACE_ESW_INGRESS: + if (dev->priv.esw_ingress_root_ns) + return &dev->priv.esw_ingress_root_ns->ns; + else + return NULL; default: return NULL; } @@ -1699,6 +1725,8 @@ void mlx5_cleanup_fs(struct mlx5_core_dev *dev) { cleanup_root_ns(dev); cleanup_single_prio_root_ns(dev, dev->priv.fdb_root_ns); + cleanup_single_prio_root_ns(dev, dev->priv.esw_egress_root_ns); + cleanup_single_prio_root_ns(dev, dev->priv.esw_ingress_root_ns); } static int init_fdb_root_ns(struct mlx5_core_dev *dev) @@ -1719,6 +1747,38 @@ static int init_fdb_root_ns(struct mlx5_core_dev *dev) } } +static int init_egress_acl_root_ns(struct mlx5_core_dev *dev) +{ + struct fs_prio *prio; + + dev->priv.esw_egress_root_ns = create_root_ns(dev, FS_FT_ESW_EGRESS_ACL); + if (!dev->priv.esw_egress_root_ns) + return -ENOMEM; + + /* create 1 prio*/ + prio = fs_create_prio(&dev->priv.esw_egress_root_ns->ns, 0, MLX5_TOTAL_VPORTS(dev)); + if (IS_ERR(prio)) + return PTR_ERR(prio); + else + return 0; +} + +static int init_ingress_acl_root_ns(struct mlx5_core_dev *dev) +{ + struct fs_prio *prio; + + dev->priv.esw_ingress_root_ns = create_root_ns(dev, FS_FT_ESW_INGRESS_ACL); + if (!dev->priv.esw_ingress_root_ns) + return -ENOMEM; + + /* create 1 prio*/ + prio = fs_create_prio(&dev->priv.esw_ingress_root_ns->ns, 0, MLX5_TOTAL_VPORTS(dev)); + if (IS_ERR(prio)) + return PTR_ERR(prio); + else + return 0; +} + int mlx5_init_fs(struct mlx5_core_dev *dev) { int err = 0; @@ -1731,8 +1791,21 @@ int mlx5_init_fs(struct mlx5_core_dev *dev) if (MLX5_CAP_GEN(dev, eswitch_flow_table)) { err = init_fdb_root_ns(dev); if (err) - cleanup_root_ns(dev); + goto err; + } + if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) { + err = init_egress_acl_root_ns(dev); + if (err) + goto err; + } + if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) { + err = init_ingress_acl_root_ns(dev); + if (err) + goto err; } + return 0; +err: + mlx5_cleanup_fs(dev); return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h index d607e564f454..8e76cc505f5a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h @@ -45,8 +45,10 @@ enum fs_node_type { }; enum fs_flow_table_type { - FS_FT_NIC_RX = 0x0, - FS_FT_FDB = 0X4, + FS_FT_NIC_RX = 0x0, + FS_FT_ESW_EGRESS_ACL = 0x2, + FS_FT_ESW_INGRESS_ACL = 0x3, + FS_FT_FDB = 0X4, }; enum fs_fte_status { @@ -79,6 +81,7 @@ struct mlx5_flow_rule { struct mlx5_flow_table { struct fs_node node; u32 id; + u16 vport; unsigned int max_fte; unsigned int level; enum fs_flow_table_type type; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index 0b0b226c789e..482604bd051c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h @@ -42,6 +42,8 @@ #define DRIVER_VERSION "3.0-1" #define DRIVER_RELDATE "January 2015" +#define MLX5_TOTAL_VPORTS(mdev) (1 + pci_sriov_get_totalvfs(mdev->pdev)) + extern int mlx5_core_debug_mask; #define mlx5_core_dbg(__dev, format, ...) \ diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 8fecd6d6f814..ee0d5a937f02 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -1349,6 +1349,18 @@ enum mlx5_cap_type { #define MLX5_CAP_ESW_FLOWTABLE_FDB_MAX(mdev, cap) \ MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_nic_esw_fdb.cap) +#define MLX5_CAP_ESW_EGRESS_ACL(mdev, cap) \ + MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_esw_acl_egress.cap) + +#define MLX5_CAP_ESW_EGRESS_ACL_MAX(mdev, cap) \ + MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_esw_acl_egress.cap) + +#define MLX5_CAP_ESW_INGRESS_ACL(mdev, cap) \ + MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_esw_acl_ingress.cap) + +#define MLX5_CAP_ESW_INGRESS_ACL_MAX(mdev, cap) \ + MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_esw_acl_ingress.cap) + #define MLX5_CAP_ESW(mdev, cap) \ MLX5_GET(e_switch_cap, \ mdev->hca_caps_cur[MLX5_CAP_ESWITCH], cap) diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index d5529449ef47..9613143f0561 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -518,6 +518,8 @@ struct mlx5_priv { unsigned long pci_dev_data; struct mlx5_flow_root_namespace *root_ns; struct mlx5_flow_root_namespace *fdb_root_ns; + struct mlx5_flow_root_namespace *esw_egress_root_ns; + struct mlx5_flow_root_namespace *esw_ingress_root_ns; }; enum mlx5_device_state { diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h index 165ff4f9cc6a..6467569ad76e 100644 --- a/include/linux/mlx5/fs.h +++ b/include/linux/mlx5/fs.h @@ -58,6 +58,8 @@ enum mlx5_flow_namespace_type { MLX5_FLOW_NAMESPACE_LEFTOVERS, MLX5_FLOW_NAMESPACE_ANCHOR, MLX5_FLOW_NAMESPACE_FDB, + MLX5_FLOW_NAMESPACE_ESW_EGRESS, + MLX5_FLOW_NAMESPACE_ESW_INGRESS, }; struct mlx5_flow_table; @@ -90,6 +92,11 @@ mlx5_create_flow_table(struct mlx5_flow_namespace *ns, int prio, int num_flow_table_entries, u32 level); +struct mlx5_flow_table * +mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns, + int prio, + int num_flow_table_entries, + u32 level, u16 vport); int mlx5_destroy_flow_table(struct mlx5_flow_table *ft); /* inbox should be set with the following values: From 831cae1daead92c4b9c3e149c6bc14853902c204 Mon Sep 17 00:00:00 2001 From: Mohamad Haj Yahia Date: Tue, 3 May 2016 17:13:55 +0300 Subject: [PATCH 1324/1649] net/mlx5: E-Switch, Replace vport spin lock with synchronize_irq() Vport spin lock can be replaced with synchronize_irq() in the right place, this will remove the need of locking inside irq context. Locking in esw_enable_vport is not required since vport events are yet to be enabled, and at esw_disable_vport it is sufficient to synchronize_irq() to guarantee no further vport events handlers will be scheduled. Signed-off-by: Mohamad Haj Yahia Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/eswitch.c | 11 ++--------- drivers/net/ethernet/mellanox/mlx5/core/eswitch.h | 5 ----- 2 files changed, 2 insertions(+), 14 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index dd066199d172..f01903a99993 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -713,7 +713,6 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num, int enable_events) { struct mlx5_vport *vport = &esw->vports[vport_num]; - unsigned long flags; WARN_ON(vport->enabled); @@ -727,9 +726,7 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num, vport->enabled_events = enable_events; esw_vport_change_handler(&vport->vport_change_handler); - spin_lock_irqsave(&vport->lock, flags); vport->enabled = true; - spin_unlock_irqrestore(&vport->lock, flags); arm_vport_context_events_cmd(esw->dev, vport_num, enable_events); @@ -761,17 +758,16 @@ static void esw_cleanup_vport(struct mlx5_eswitch *esw, u16 vport_num) static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num) { struct mlx5_vport *vport = &esw->vports[vport_num]; - unsigned long flags; if (!vport->enabled) return; esw_debug(esw->dev, "Disabling vport(%d)\n", vport_num); /* Mark this vport as disabled to discard new events */ - spin_lock_irqsave(&vport->lock, flags); vport->enabled = false; vport->enabled_events = 0; - spin_unlock_irqrestore(&vport->lock, flags); + + synchronize_irq(mlx5_get_msix_vec(esw->dev, MLX5_EQ_VEC_ASYNC)); mlx5_modify_vport_admin_state(esw->dev, MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT, @@ -894,7 +890,6 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev) vport->dev = dev; INIT_WORK(&vport->vport_change_handler, esw_vport_change_handler); - spin_lock_init(&vport->lock); } esw->total_vports = total_vports; @@ -942,10 +937,8 @@ void mlx5_eswitch_vport_event(struct mlx5_eswitch *esw, struct mlx5_eqe *eqe) } vport = &esw->vports[vport_num]; - spin_lock(&vport->lock); if (vport->enabled) queue_work(esw->work_queue, &vport->vport_change_handler); - spin_unlock(&vport->lock); } /* Vport Administration */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index 3416a428f70f..ba434513b2c1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -95,11 +95,6 @@ struct mlx5_vport { struct hlist_head mc_list[MLX5_L2_ADDR_HASH_SIZE]; struct work_struct vport_change_handler; - /* This spinlock protects access to vport data, between - * "esw_vport_disable" and ongoing interrupt "mlx5_eswitch_vport_event" - * once vport marked as disabled new interrupts are discarded. - */ - spinlock_t lock; /* vport events sync */ bool enabled; u16 enabled_events; }; From 761e205b559be52852d85e0db4a034c9f57965f9 Mon Sep 17 00:00:00 2001 From: Mohamad Haj Yahia Date: Tue, 3 May 2016 17:13:56 +0300 Subject: [PATCH 1325/1649] net/mlx5: E-Switch, Fix error flow memory leak Fix memory leak in case query nic vport command failed. Fixes: 81848731ff40 ('net/mlx5: E-Switch, Add SR-IOV (FDB) support') Signed-off-by: Mohamad Haj Yahia Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/eswitch.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index f01903a99993..c975ff593dfc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -646,7 +646,7 @@ static void esw_update_vport_addr_list(struct mlx5_eswitch *esw, err = mlx5_query_nic_vport_mac_list(esw->dev, vport_num, list_type, mac_list, &size); if (err) - return; + goto out; esw_debug(esw->dev, "vport[%d] context update %s list size (%d)\n", vport_num, is_uc ? "UC" : "MC", size); @@ -674,6 +674,7 @@ static void esw_update_vport_addr_list(struct mlx5_eswitch *esw, addr->vport = vport_num; addr->action = MLX5_ACTION_ADD; } +out: kfree(mac_list); } From 5742df0f7dbe54728145bf1136540c09c7fcb0d1 Mon Sep 17 00:00:00 2001 From: Mohamad Haj Yahia Date: Tue, 3 May 2016 17:13:57 +0300 Subject: [PATCH 1326/1649] net/mlx5: E-Switch, Introduce VST vport ingress/egress ACLs Create egress/ingress ACLs per VF vport at vport enable. Ingress ACL: - one flow group to drop all tagged traffic in VST mode. Egress ACL: - one flow group that allows only untagged traffic with smac that is equals to the original mac (anti-spoofing). - one flow group that allows only untagged traffic. - one flow group that allows only smac that is equals to the original mac (anti-spoofing). (note: only one of the above group has active rule) - star rule will be used to drop all other traffic. By default no rules are generated, unless VST is explicitly requested. Signed-off-by: Mohamad Haj Yahia Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlx5/core/eswitch.c | 258 ++++++++++++++++++ .../net/ethernet/mellanox/mlx5/core/eswitch.h | 18 ++ 2 files changed, 276 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index c975ff593dfc..f1a0f1845058 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -710,6 +710,248 @@ static void esw_vport_change_handler(struct work_struct *work) vport->enabled_events); } +static void esw_vport_enable_egress_acl(struct mlx5_eswitch *esw, + struct mlx5_vport *vport) +{ + int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); + struct mlx5_flow_group *vlan_grp = NULL; + struct mlx5_flow_group *drop_grp = NULL; + struct mlx5_core_dev *dev = esw->dev; + struct mlx5_flow_namespace *root_ns; + struct mlx5_flow_table *acl; + void *match_criteria; + u32 *flow_group_in; + /* The egress acl table contains 2 rules: + * 1)Allow traffic with vlan_tag=vst_vlan_id + * 2)Drop all other traffic. + */ + int table_size = 2; + int err = 0; + + if (!MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) + return; + + esw_debug(dev, "Create vport[%d] egress ACL log_max_size(%d)\n", + vport->vport, MLX5_CAP_ESW_EGRESS_ACL(dev, log_max_ft_size)); + + root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS); + if (!root_ns) { + esw_warn(dev, "Failed to get E-Switch egress flow namespace\n"); + return; + } + + flow_group_in = mlx5_vzalloc(inlen); + if (!flow_group_in) + return; + + acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport); + if (IS_ERR_OR_NULL(acl)) { + err = PTR_ERR(acl); + esw_warn(dev, "Failed to create E-Switch vport[%d] egress flow Table, err(%d)\n", + vport->vport, err); + goto out; + } + + MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); + match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria); + MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.vlan_tag); + MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.first_vid); + MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0); + MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0); + + vlan_grp = mlx5_create_flow_group(acl, flow_group_in); + if (IS_ERR_OR_NULL(vlan_grp)) { + err = PTR_ERR(vlan_grp); + esw_warn(dev, "Failed to create E-Switch vport[%d] egress allowed vlans flow group, err(%d)\n", + vport->vport, err); + goto out; + } + + memset(flow_group_in, 0, inlen); + MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1); + MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1); + drop_grp = mlx5_create_flow_group(acl, flow_group_in); + if (IS_ERR_OR_NULL(drop_grp)) { + err = PTR_ERR(drop_grp); + esw_warn(dev, "Failed to create E-Switch vport[%d] egress drop flow group, err(%d)\n", + vport->vport, err); + goto out; + } + + vport->egress.acl = acl; + vport->egress.drop_grp = drop_grp; + vport->egress.allowed_vlans_grp = vlan_grp; +out: + kfree(flow_group_in); + if (err && !IS_ERR_OR_NULL(vlan_grp)) + mlx5_destroy_flow_group(vlan_grp); + if (err && !IS_ERR_OR_NULL(acl)) + mlx5_destroy_flow_table(acl); +} + +static void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw, + struct mlx5_vport *vport) +{ + if (IS_ERR_OR_NULL(vport->egress.acl)) + return; + + esw_debug(esw->dev, "Destroy vport[%d] E-Switch egress ACL\n", vport->vport); + + mlx5_destroy_flow_group(vport->egress.allowed_vlans_grp); + mlx5_destroy_flow_group(vport->egress.drop_grp); + mlx5_destroy_flow_table(vport->egress.acl); + vport->egress.allowed_vlans_grp = NULL; + vport->egress.drop_grp = NULL; + vport->egress.acl = NULL; +} + +static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw, + struct mlx5_vport *vport) +{ + int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); + struct mlx5_core_dev *dev = esw->dev; + struct mlx5_flow_namespace *root_ns; + struct mlx5_flow_table *acl; + struct mlx5_flow_group *g; + void *match_criteria; + u32 *flow_group_in; + /* The ingress acl table contains 4 groups + * (2 active rules at the same time - + * 1 allow rule from one of the first 3 groups. + * 1 drop rule from the last group): + * 1)Allow untagged traffic with smac=original mac. + * 2)Allow untagged traffic. + * 3)Allow traffic with smac=original mac. + * 4)Drop all other traffic. + */ + int table_size = 4; + int err = 0; + + if (!MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) + return; + + esw_debug(dev, "Create vport[%d] ingress ACL log_max_size(%d)\n", + vport->vport, MLX5_CAP_ESW_INGRESS_ACL(dev, log_max_ft_size)); + + root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS); + if (!root_ns) { + esw_warn(dev, "Failed to get E-Switch ingress flow namespace\n"); + return; + } + + flow_group_in = mlx5_vzalloc(inlen); + if (!flow_group_in) + return; + + acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport); + if (IS_ERR_OR_NULL(acl)) { + err = PTR_ERR(acl); + esw_warn(dev, "Failed to create E-Switch vport[%d] ingress flow Table, err(%d)\n", + vport->vport, err); + goto out; + } + vport->ingress.acl = acl; + + match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria); + + MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); + MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.vlan_tag); + MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_47_16); + MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_15_0); + MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0); + MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0); + + g = mlx5_create_flow_group(acl, flow_group_in); + if (IS_ERR_OR_NULL(g)) { + err = PTR_ERR(g); + esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged spoofchk flow group, err(%d)\n", + vport->vport, err); + goto out; + } + vport->ingress.allow_untagged_spoofchk_grp = g; + + memset(flow_group_in, 0, inlen); + MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); + MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.vlan_tag); + MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1); + MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1); + + g = mlx5_create_flow_group(acl, flow_group_in); + if (IS_ERR_OR_NULL(g)) { + err = PTR_ERR(g); + esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged flow group, err(%d)\n", + vport->vport, err); + goto out; + } + vport->ingress.allow_untagged_only_grp = g; + + memset(flow_group_in, 0, inlen); + MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); + MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_47_16); + MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_15_0); + MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 2); + MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 2); + + g = mlx5_create_flow_group(acl, flow_group_in); + if (IS_ERR_OR_NULL(g)) { + err = PTR_ERR(g); + esw_warn(dev, "Failed to create E-Switch vport[%d] ingress spoofchk flow group, err(%d)\n", + vport->vport, err); + goto out; + } + vport->ingress.allow_spoofchk_only_grp = g; + + memset(flow_group_in, 0, inlen); + MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 3); + MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 3); + + g = mlx5_create_flow_group(acl, flow_group_in); + if (IS_ERR_OR_NULL(g)) { + err = PTR_ERR(g); + esw_warn(dev, "Failed to create E-Switch vport[%d] ingress drop flow group, err(%d)\n", + vport->vport, err); + goto out; + } + vport->ingress.drop_grp = g; + +out: + if (err) { + if (!IS_ERR_OR_NULL(vport->ingress.allow_spoofchk_only_grp)) + mlx5_destroy_flow_group( + vport->ingress.allow_spoofchk_only_grp); + if (!IS_ERR_OR_NULL(vport->ingress.allow_untagged_only_grp)) + mlx5_destroy_flow_group( + vport->ingress.allow_untagged_only_grp); + if (!IS_ERR_OR_NULL(vport->ingress.allow_untagged_spoofchk_grp)) + mlx5_destroy_flow_group( + vport->ingress.allow_untagged_spoofchk_grp); + if (!IS_ERR_OR_NULL(vport->ingress.acl)) + mlx5_destroy_flow_table(vport->ingress.acl); + } + + kfree(flow_group_in); +} + +static void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw, + struct mlx5_vport *vport) +{ + if (IS_ERR_OR_NULL(vport->ingress.acl)) + return; + + esw_debug(esw->dev, "Destroy vport[%d] E-Switch ingress ACL\n", vport->vport); + + mlx5_destroy_flow_group(vport->ingress.allow_spoofchk_only_grp); + mlx5_destroy_flow_group(vport->ingress.allow_untagged_only_grp); + mlx5_destroy_flow_group(vport->ingress.allow_untagged_spoofchk_grp); + mlx5_destroy_flow_group(vport->ingress.drop_grp); + mlx5_destroy_flow_table(vport->ingress.acl); + vport->ingress.acl = NULL; + vport->ingress.drop_grp = NULL; + vport->ingress.allow_spoofchk_only_grp = NULL; + vport->ingress.allow_untagged_only_grp = NULL; + vport->ingress.allow_untagged_spoofchk_grp = NULL; +} + static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num, int enable_events) { @@ -718,6 +960,12 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num, WARN_ON(vport->enabled); esw_debug(esw->dev, "Enabling VPORT(%d)\n", vport_num); + + if (vport_num) { /* Only VFs need ACLs for VST and spoofchk filtering */ + esw_vport_enable_ingress_acl(esw, vport); + esw_vport_enable_egress_acl(esw, vport); + } + mlx5_modify_vport_admin_state(esw->dev, MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT, vport_num, @@ -780,6 +1028,10 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num) arm_vport_context_events_cmd(esw->dev, vport->vport, 0); /* We don't assume VFs will cleanup after themselves */ esw_cleanup_vport(esw, vport_num); + if (vport_num) { + esw_vport_disable_egress_acl(esw, vport); + esw_vport_disable_ingress_acl(esw, vport); + } esw->enabled_vports--; } @@ -799,6 +1051,12 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs) return -ENOTSUPP; } + if (!MLX5_CAP_ESW_INGRESS_ACL(esw->dev, ft_support)) + esw_warn(esw->dev, "E-Switch ingress ACL is not supported by FW\n"); + + if (!MLX5_CAP_ESW_EGRESS_ACL(esw->dev, ft_support)) + esw_warn(esw->dev, "E-Switch engress ACL is not supported by FW\n"); + esw_info(esw->dev, "E-Switch enable SRIOV: nvfs(%d)\n", nvfs); esw_disable_vport(esw, 0); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index ba434513b2c1..e6972074c3f7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -88,6 +88,21 @@ struct l2addr_node { kfree(ptr); \ }) +struct vport_ingress { + struct mlx5_flow_table *acl; + struct mlx5_flow_group *allow_untagged_spoofchk_grp; + struct mlx5_flow_group *allow_spoofchk_only_grp; + struct mlx5_flow_group *allow_untagged_only_grp; + struct mlx5_flow_group *drop_grp; + +}; + +struct vport_egress { + struct mlx5_flow_table *acl; + struct mlx5_flow_group *allowed_vlans_grp; + struct mlx5_flow_group *drop_grp; +}; + struct mlx5_vport { struct mlx5_core_dev *dev; int vport; @@ -95,6 +110,9 @@ struct mlx5_vport { struct hlist_head mc_list[MLX5_L2_ADDR_HASH_SIZE]; struct work_struct vport_change_handler; + struct vport_ingress ingress; + struct vport_egress egress; + bool enabled; u16 enabled_events; }; From dfcb1ed3c3315902e33da0fc5b0ae4c6d5086a23 Mon Sep 17 00:00:00 2001 From: Mohamad Haj Yahia Date: Tue, 3 May 2016 17:13:58 +0300 Subject: [PATCH 1327/1649] net/mlx5: E-Switch, Vport ingress/egress ACLs rules for VST mode Configure ingress and egress vport ACL rules according to vlan and qos admin parameters. Ingress ACL flow table rules: 1) drop any tagged packet sent from the VF 2) allow other traffic (default behavior) Egress ACL flow table rules: 1) allow only tagged traffic with vlan_tag=vst_vid. 2) drop other traffic. Signed-off-by: Mohamad Haj Yahia Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlx5/core/eswitch.c | 180 +++++++++++++++++- .../net/ethernet/mellanox/mlx5/core/eswitch.h | 11 +- 2 files changed, 189 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index f1a0f1845058..1e075ed50e5b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -789,6 +789,19 @@ out: mlx5_destroy_flow_table(acl); } +static void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw, + struct mlx5_vport *vport) +{ + if (!IS_ERR_OR_NULL(vport->egress.allowed_vlan)) + mlx5_del_flow_rule(vport->egress.allowed_vlan); + + if (!IS_ERR_OR_NULL(vport->egress.drop_rule)) + mlx5_del_flow_rule(vport->egress.drop_rule); + + vport->egress.allowed_vlan = NULL; + vport->egress.drop_rule = NULL; +} + static void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw, struct mlx5_vport *vport) { @@ -797,6 +810,7 @@ static void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw, esw_debug(esw->dev, "Destroy vport[%d] E-Switch egress ACL\n", vport->vport); + esw_vport_cleanup_egress_rules(esw, vport); mlx5_destroy_flow_group(vport->egress.allowed_vlans_grp); mlx5_destroy_flow_group(vport->egress.drop_grp); mlx5_destroy_flow_table(vport->egress.acl); @@ -932,6 +946,14 @@ out: kfree(flow_group_in); } +static void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw, + struct mlx5_vport *vport) +{ + if (!IS_ERR_OR_NULL(vport->ingress.drop_rule)) + mlx5_del_flow_rule(vport->ingress.drop_rule); + vport->ingress.drop_rule = NULL; +} + static void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw, struct mlx5_vport *vport) { @@ -940,6 +962,7 @@ static void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw, esw_debug(esw->dev, "Destroy vport[%d] E-Switch ingress ACL\n", vport->vport); + esw_vport_cleanup_ingress_rules(esw, vport); mlx5_destroy_flow_group(vport->ingress.allow_spoofchk_only_grp); mlx5_destroy_flow_group(vport->ingress.allow_untagged_only_grp); mlx5_destroy_flow_group(vport->ingress.allow_untagged_spoofchk_grp); @@ -952,11 +975,139 @@ static void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw, vport->ingress.allow_untagged_spoofchk_grp = NULL; } +static int esw_vport_ingress_config(struct mlx5_eswitch *esw, + struct mlx5_vport *vport) +{ + u32 *match_v; + u32 *match_c; + int err = 0; + + if (IS_ERR_OR_NULL(vport->ingress.acl)) { + esw_warn(esw->dev, + "vport[%d] configure ingress rules failed, ingress acl is not initialized!\n", + vport->vport); + return -EPERM; + } + + esw_vport_cleanup_ingress_rules(esw, vport); + + if (!vport->vlan && !vport->qos) + return 0; + + esw_debug(esw->dev, + "vport[%d] configure ingress rules, vlan(%d) qos(%d)\n", + vport->vport, vport->vlan, vport->qos); + + match_v = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL); + match_c = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL); + if (!match_v || !match_c) { + err = -ENOMEM; + esw_warn(esw->dev, "vport[%d] configure ingress rules failed, err(%d)\n", + vport->vport, err); + goto out; + } + MLX5_SET_TO_ONES(fte_match_param, match_c, outer_headers.vlan_tag); + MLX5_SET_TO_ONES(fte_match_param, match_v, outer_headers.vlan_tag); + + vport->ingress.drop_rule = + mlx5_add_flow_rule(vport->ingress.acl, + MLX5_MATCH_OUTER_HEADERS, + match_c, + match_v, + MLX5_FLOW_CONTEXT_ACTION_DROP, + 0, NULL); + if (IS_ERR_OR_NULL(vport->ingress.drop_rule)) { + err = PTR_ERR(vport->ingress.drop_rule); + pr_warn("vport[%d] configure ingress rules, err(%d)\n", + vport->vport, err); + vport->ingress.drop_rule = NULL; + } +out: + kfree(match_v); + kfree(match_c); + return err; +} + +static int esw_vport_egress_config(struct mlx5_eswitch *esw, + struct mlx5_vport *vport) +{ + u32 *match_v; + u32 *match_c; + int err = 0; + + if (IS_ERR_OR_NULL(vport->egress.acl)) { + esw_warn(esw->dev, "vport[%d] configure rgress rules failed, egress acl is not initialized!\n", + vport->vport); + return -EPERM; + } + + esw_vport_cleanup_egress_rules(esw, vport); + + if (!vport->vlan && !vport->qos) + return 0; + + esw_debug(esw->dev, + "vport[%d] configure egress rules, vlan(%d) qos(%d)\n", + vport->vport, vport->vlan, vport->qos); + + match_v = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL); + match_c = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL); + if (!match_v || !match_c) { + err = -ENOMEM; + esw_warn(esw->dev, "vport[%d] configure egress rules failed, err(%d)\n", + vport->vport, err); + goto out; + } + + /* Allowed vlan rule */ + MLX5_SET_TO_ONES(fte_match_param, match_c, outer_headers.vlan_tag); + MLX5_SET_TO_ONES(fte_match_param, match_v, outer_headers.vlan_tag); + MLX5_SET_TO_ONES(fte_match_param, match_c, outer_headers.first_vid); + MLX5_SET(fte_match_param, match_v, outer_headers.first_vid, vport->vlan); + + vport->egress.allowed_vlan = + mlx5_add_flow_rule(vport->egress.acl, + MLX5_MATCH_OUTER_HEADERS, + match_c, + match_v, + MLX5_FLOW_CONTEXT_ACTION_ALLOW, + 0, NULL); + if (IS_ERR_OR_NULL(vport->egress.allowed_vlan)) { + err = PTR_ERR(vport->egress.allowed_vlan); + pr_warn("vport[%d] configure egress allowed vlan rule failed, err(%d)\n", + vport->vport, err); + vport->egress.allowed_vlan = NULL; + goto out; + } + + /* Drop others rule (star rule) */ + memset(match_c, 0, MLX5_ST_SZ_BYTES(fte_match_param)); + memset(match_v, 0, MLX5_ST_SZ_BYTES(fte_match_param)); + vport->egress.drop_rule = + mlx5_add_flow_rule(vport->egress.acl, + 0, + match_c, + match_v, + MLX5_FLOW_CONTEXT_ACTION_DROP, + 0, NULL); + if (IS_ERR_OR_NULL(vport->egress.drop_rule)) { + err = PTR_ERR(vport->egress.drop_rule); + pr_warn("vport[%d] configure egress drop rule failed, err(%d)\n", + vport->vport, err); + vport->egress.drop_rule = NULL; + } +out: + kfree(match_v); + kfree(match_c); + return err; +} + static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num, int enable_events) { struct mlx5_vport *vport = &esw->vports[vport_num]; + mutex_lock(&esw->state_lock); WARN_ON(vport->enabled); esw_debug(esw->dev, "Enabling VPORT(%d)\n", vport_num); @@ -964,6 +1115,8 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num, if (vport_num) { /* Only VFs need ACLs for VST and spoofchk filtering */ esw_vport_enable_ingress_acl(esw, vport); esw_vport_enable_egress_acl(esw, vport); + esw_vport_ingress_config(esw, vport); + esw_vport_egress_config(esw, vport); } mlx5_modify_vport_admin_state(esw->dev, @@ -981,6 +1134,7 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num, esw->enabled_vports++; esw_debug(esw->dev, "Enabled VPORT(%d)\n", vport_num); + mutex_unlock(&esw->state_lock); } static void esw_cleanup_vport(struct mlx5_eswitch *esw, u16 vport_num) @@ -1026,6 +1180,7 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num) flush_workqueue(esw->work_queue); /* Disable events from this vport */ arm_vport_context_events_cmd(esw->dev, vport->vport, 0); + mutex_lock(&esw->state_lock); /* We don't assume VFs will cleanup after themselves */ esw_cleanup_vport(esw, vport_num); if (vport_num) { @@ -1033,6 +1188,7 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num) esw_vport_disable_ingress_acl(esw, vport); } esw->enabled_vports--; + mutex_unlock(&esw->state_lock); } /* Public E-Switch API */ @@ -1142,6 +1298,8 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev) goto abort; } + mutex_init(&esw->state_lock); + for (vport_num = 0; vport_num < total_vports; vport_num++) { struct mlx5_vport *vport = &esw->vports[vport_num]; @@ -1268,6 +1426,8 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw, int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw, int vport, u16 vlan, u8 qos) { + struct mlx5_vport *evport; + int err = 0; int set = 0; if (!ESW_ALLOWED(esw)) @@ -1278,7 +1438,25 @@ int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw, if (vlan || qos) set = 1; - return modify_esw_vport_cvlan(esw->dev, vport, vlan, qos, set); + evport = &esw->vports[vport]; + + err = modify_esw_vport_cvlan(esw->dev, vport, vlan, qos, set); + if (err) + return err; + + mutex_lock(&esw->state_lock); + evport->vlan = vlan; + evport->qos = qos; + if (evport->enabled) { + err = esw_vport_ingress_config(esw, evport); + if (err) + goto out; + err = esw_vport_egress_config(esw, evport); + } + +out: + mutex_unlock(&esw->state_lock); + return err; } int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index e6972074c3f7..30d55ace4786 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -94,13 +94,16 @@ struct vport_ingress { struct mlx5_flow_group *allow_spoofchk_only_grp; struct mlx5_flow_group *allow_untagged_only_grp; struct mlx5_flow_group *drop_grp; - + struct mlx5_flow_rule *allow_rule; + struct mlx5_flow_rule *drop_rule; }; struct vport_egress { struct mlx5_flow_table *acl; struct mlx5_flow_group *allowed_vlans_grp; struct mlx5_flow_group *drop_grp; + struct mlx5_flow_rule *allowed_vlan; + struct mlx5_flow_rule *drop_rule; }; struct mlx5_vport { @@ -113,6 +116,8 @@ struct mlx5_vport { struct vport_ingress ingress; struct vport_egress egress; + u16 vlan; + u8 qos; bool enabled; u16 enabled_events; }; @@ -137,6 +142,10 @@ struct mlx5_eswitch { struct mlx5_vport *vports; int total_vports; int enabled_vports; + /* Synchronize between vport change events + * and async SRIOV admin state changes + */ + struct mutex state_lock; }; /* E-Switch API */ From f942380c12394002efe0ca0be023e0f6fafbf29b Mon Sep 17 00:00:00 2001 From: Mohamad Haj Yahia Date: Tue, 3 May 2016 17:13:59 +0300 Subject: [PATCH 1328/1649] net/mlx5: E-Switch, Vport ingress/egress ACLs rules for spoofchk Configure ingress and egress vport ACL rules according to spoofchk admin parameters. Ingress ACL flow table rules: if (!spoofchk && !vst) allow all traffic. else : 1) one of the following rules : * if (spoofchk && vst) allow only untagged traffic with smac=original mac sent from the VF. * if (spoofchk && !vst) allow only traffic with smac=original mac sent from the VF. * if (!spoofchk && vst) allow only untagged traffic. 2) drop all traffic that didn't hit #1. Add support for set vf spoofchk ndo. Add non zero mac validation in case of spoofchk to set mac ndo: when setting new mac we need to validate that the new mac is not zero while the spoofchk is on because it is illegal combination. Signed-off-by: Mohamad Haj Yahia Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlx5/core/en_main.c | 9 ++ .../net/ethernet/mellanox/mlx5/core/eswitch.c | 112 +++++++++++++++++- .../net/ethernet/mellanox/mlx5/core/eswitch.h | 3 + 3 files changed, 118 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index b435c7b36cfb..5d0911315189 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -2438,6 +2438,14 @@ static int mlx5e_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos) vlan, qos); } +static int mlx5e_set_vf_spoofchk(struct net_device *dev, int vf, bool setting) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + struct mlx5_core_dev *mdev = priv->mdev; + + return mlx5_eswitch_set_vport_spoofchk(mdev->priv.eswitch, vf + 1, setting); +} + static int mlx5_vport_link2ifla(u8 esw_link) { switch (esw_link) { @@ -2607,6 +2615,7 @@ static const struct net_device_ops mlx5e_netdev_ops_sriov = { #endif .ndo_set_vf_mac = mlx5e_set_vf_mac, .ndo_set_vf_vlan = mlx5e_set_vf_vlan, + .ndo_set_vf_spoofchk = mlx5e_set_vf_spoofchk, .ndo_get_vf_config = mlx5e_get_vf_config, .ndo_set_vf_link_state = mlx5e_set_vf_link_state, .ndo_get_vf_stats = mlx5e_get_vf_stats, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index 1e075ed50e5b..17d093cd6fc8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -951,7 +951,12 @@ static void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw, { if (!IS_ERR_OR_NULL(vport->ingress.drop_rule)) mlx5_del_flow_rule(vport->ingress.drop_rule); + + if (!IS_ERR_OR_NULL(vport->ingress.allow_rule)) + mlx5_del_flow_rule(vport->ingress.allow_rule); + vport->ingress.drop_rule = NULL; + vport->ingress.allow_rule = NULL; } static void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw, @@ -978,9 +983,11 @@ static void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw, static int esw_vport_ingress_config(struct mlx5_eswitch *esw, struct mlx5_vport *vport) { + u8 smac[ETH_ALEN]; u32 *match_v; u32 *match_c; int err = 0; + u8 *smac_v; if (IS_ERR_OR_NULL(vport->ingress.acl)) { esw_warn(esw->dev, @@ -989,9 +996,26 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw, return -EPERM; } + if (vport->spoofchk) { + err = mlx5_query_nic_vport_mac_address(esw->dev, vport->vport, smac); + if (err) { + esw_warn(esw->dev, + "vport[%d] configure ingress rules failed, query smac failed, err(%d)\n", + vport->vport, err); + return err; + } + + if (!is_valid_ether_addr(smac)) { + mlx5_core_warn(esw->dev, + "vport[%d] configure ingress rules failed, illegal mac with spoofchk\n", + vport->vport); + return -EPERM; + } + } + esw_vport_cleanup_ingress_rules(esw, vport); - if (!vport->vlan && !vport->qos) + if (!vport->vlan && !vport->qos && !vport->spoofchk) return 0; esw_debug(esw->dev, @@ -1006,23 +1030,55 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw, vport->vport, err); goto out; } - MLX5_SET_TO_ONES(fte_match_param, match_c, outer_headers.vlan_tag); - MLX5_SET_TO_ONES(fte_match_param, match_v, outer_headers.vlan_tag); - vport->ingress.drop_rule = + if (vport->vlan || vport->qos) + MLX5_SET_TO_ONES(fte_match_param, match_c, outer_headers.vlan_tag); + + if (vport->spoofchk) { + MLX5_SET_TO_ONES(fte_match_param, match_c, outer_headers.smac_47_16); + MLX5_SET_TO_ONES(fte_match_param, match_c, outer_headers.smac_15_0); + smac_v = MLX5_ADDR_OF(fte_match_param, + match_v, + outer_headers.smac_47_16); + ether_addr_copy(smac_v, smac); + } + + vport->ingress.allow_rule = mlx5_add_flow_rule(vport->ingress.acl, MLX5_MATCH_OUTER_HEADERS, match_c, match_v, + MLX5_FLOW_CONTEXT_ACTION_ALLOW, + 0, NULL); + if (IS_ERR_OR_NULL(vport->ingress.allow_rule)) { + err = PTR_ERR(vport->ingress.allow_rule); + pr_warn("vport[%d] configure ingress allow rule, err(%d)\n", + vport->vport, err); + vport->ingress.allow_rule = NULL; + goto out; + } + + memset(match_c, 0, MLX5_ST_SZ_BYTES(fte_match_param)); + memset(match_v, 0, MLX5_ST_SZ_BYTES(fte_match_param)); + vport->ingress.drop_rule = + mlx5_add_flow_rule(vport->ingress.acl, + 0, + match_c, + match_v, MLX5_FLOW_CONTEXT_ACTION_DROP, 0, NULL); if (IS_ERR_OR_NULL(vport->ingress.drop_rule)) { err = PTR_ERR(vport->ingress.drop_rule); - pr_warn("vport[%d] configure ingress rules, err(%d)\n", + pr_warn("vport[%d] configure ingress drop rule, err(%d)\n", vport->vport, err); vport->ingress.drop_rule = NULL; + goto out; } + out: + if (err) + esw_vport_cleanup_ingress_rules(esw, vport); + kfree(match_v); kfree(match_c); return err; @@ -1367,12 +1423,22 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw, int vport, u8 mac[ETH_ALEN]) { int err = 0; + struct mlx5_vport *evport; if (!ESW_ALLOWED(esw)) return -EPERM; if (!LEGAL_VPORT(esw, vport)) return -EINVAL; + evport = &esw->vports[vport]; + + if (evport->spoofchk && !is_valid_ether_addr(mac)) { + mlx5_core_warn(esw->dev, + "MAC invalidation is not allowed when spoofchk is on, vport(%d)\n", + vport); + return -EPERM; + } + err = mlx5_modify_nic_vport_mac_address(esw->dev, vport, mac); if (err) { mlx5_core_warn(esw->dev, @@ -1381,6 +1447,11 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw, return err; } + mutex_lock(&esw->state_lock); + if (evport->enabled) + err = esw_vport_ingress_config(esw, evport); + mutex_unlock(&esw->state_lock); + return err; } @@ -1400,6 +1471,7 @@ int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw, int vport, struct ifla_vf_info *ivi) { + struct mlx5_vport *evport; u16 vlan; u8 qos; @@ -1408,6 +1480,8 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw, if (!LEGAL_VPORT(esw, vport)) return -EINVAL; + evport = &esw->vports[vport]; + memset(ivi, 0, sizeof(*ivi)); ivi->vf = vport - 1; @@ -1418,7 +1492,7 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw, query_esw_vport_cvlan(esw->dev, vport, &vlan, &qos); ivi->vlan = vlan; ivi->qos = qos; - ivi->spoofchk = 0; + ivi->spoofchk = evport->spoofchk; return 0; } @@ -1459,6 +1533,32 @@ out: return err; } +int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw, + int vport, bool spoofchk) +{ + struct mlx5_vport *evport; + bool pschk; + int err = 0; + + if (!ESW_ALLOWED(esw)) + return -EPERM; + if (!LEGAL_VPORT(esw, vport)) + return -EINVAL; + + evport = &esw->vports[vport]; + + mutex_lock(&esw->state_lock); + pschk = evport->spoofchk; + evport->spoofchk = spoofchk; + if (evport->enabled) + err = esw_vport_ingress_config(esw, evport); + if (err) + evport->spoofchk = pschk; + mutex_unlock(&esw->state_lock); + + return err; +} + int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw, int vport, struct ifla_vf_stats *vf_stats) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index 30d55ace4786..2f979c9bcb93 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -118,6 +118,7 @@ struct mlx5_vport { u16 vlan; u8 qos; + bool spoofchk; bool enabled; u16 enabled_events; }; @@ -160,6 +161,8 @@ int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, int vport, int link_state); int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw, int vport, u16 vlan, u8 qos); +int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw, + int vport, bool spoofchk); int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw, int vport, struct ifla_vf_info *ivi); int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw, From 01f51f2247250eb69d2fd345c498eedbb078bb56 Mon Sep 17 00:00:00 2001 From: Mohamad Haj Yahia Date: Tue, 3 May 2016 17:14:00 +0300 Subject: [PATCH 1329/1649] net/mlx5: E-Switch, Enable/disable ACL tables on demand Enable ingress/egress ACL tables only when we need to configure ACL rules. Disable ingress/egress ACL tables once all ACL rules are removed. All VF outgoing/incoming traffic need to go through the ingress/egress ACL tables. Adding/Removing these tables on demand will save unnecessary hops in the flow steering when the ACL tables are empty. Signed-off-by: Mohamad Haj Yahia Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlx5/core/eswitch.c | 33 ++++++++----------- 1 file changed, 14 insertions(+), 19 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index 17d093cd6fc8..48c891982d7d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -728,7 +728,8 @@ static void esw_vport_enable_egress_acl(struct mlx5_eswitch *esw, int table_size = 2; int err = 0; - if (!MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) + if (!MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support) || + !IS_ERR_OR_NULL(vport->egress.acl)) return; esw_debug(dev, "Create vport[%d] egress ACL log_max_size(%d)\n", @@ -841,7 +842,8 @@ static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw, int table_size = 4; int err = 0; - if (!MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) + if (!MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support) || + !IS_ERR_OR_NULL(vport->ingress.acl)) return; esw_debug(dev, "Create vport[%d] ingress ACL log_max_size(%d)\n", @@ -989,13 +991,6 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw, int err = 0; u8 *smac_v; - if (IS_ERR_OR_NULL(vport->ingress.acl)) { - esw_warn(esw->dev, - "vport[%d] configure ingress rules failed, ingress acl is not initialized!\n", - vport->vport); - return -EPERM; - } - if (vport->spoofchk) { err = mlx5_query_nic_vport_mac_address(esw->dev, vport->vport, smac); if (err) { @@ -1015,8 +1010,12 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw, esw_vport_cleanup_ingress_rules(esw, vport); - if (!vport->vlan && !vport->qos && !vport->spoofchk) + if (!vport->vlan && !vport->qos && !vport->spoofchk) { + esw_vport_disable_ingress_acl(esw, vport); return 0; + } + + esw_vport_enable_ingress_acl(esw, vport); esw_debug(esw->dev, "vport[%d] configure ingress rules, vlan(%d) qos(%d)\n", @@ -1091,16 +1090,14 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw, u32 *match_c; int err = 0; - if (IS_ERR_OR_NULL(vport->egress.acl)) { - esw_warn(esw->dev, "vport[%d] configure rgress rules failed, egress acl is not initialized!\n", - vport->vport); - return -EPERM; - } - esw_vport_cleanup_egress_rules(esw, vport); - if (!vport->vlan && !vport->qos) + if (!vport->vlan && !vport->qos) { + esw_vport_disable_egress_acl(esw, vport); return 0; + } + + esw_vport_enable_egress_acl(esw, vport); esw_debug(esw->dev, "vport[%d] configure egress rules, vlan(%d) qos(%d)\n", @@ -1169,8 +1166,6 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num, esw_debug(esw->dev, "Enabling VPORT(%d)\n", vport_num); if (vport_num) { /* Only VFs need ACLs for VST and spoofchk filtering */ - esw_vport_enable_ingress_acl(esw, vport); - esw_vport_enable_egress_acl(esw, vport); esw_vport_ingress_config(esw, vport); esw_vport_egress_config(esw, vport); } From 586cfa7f1d58a7d480e548d4a9bef7f542a03257 Mon Sep 17 00:00:00 2001 From: Mohamad Haj Yahia Date: Tue, 3 May 2016 17:14:01 +0300 Subject: [PATCH 1330/1649] net/mlx5: E-Switch, Use vport event handler for vport cleanup Remove the usage of explicit cleanup function and use existing vport change handler. Calling vport change handler while vport is disabled will cleanup the vport resources. Signed-off-by: Mohamad Haj Yahia Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlx5/core/eswitch.c | 33 +++++-------------- 1 file changed, 9 insertions(+), 24 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index 48c891982d7d..37b4be9445fc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -643,6 +643,9 @@ static void esw_update_vport_addr_list(struct mlx5_eswitch *esw, addr->action = MLX5_ACTION_DEL; } + if (!vport->enabled) + goto out; + err = mlx5_query_nic_vport_mac_list(esw->dev, vport_num, list_type, mac_list, &size); if (err) @@ -1188,27 +1191,6 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num, mutex_unlock(&esw->state_lock); } -static void esw_cleanup_vport(struct mlx5_eswitch *esw, u16 vport_num) -{ - struct mlx5_vport *vport = &esw->vports[vport_num]; - struct l2addr_node *node; - struct vport_addr *addr; - struct hlist_node *tmp; - int hi; - - for_each_l2hash_node(node, tmp, vport->uc_list, hi) { - addr = container_of(node, struct vport_addr, node); - addr->action = MLX5_ACTION_DEL; - } - esw_apply_vport_addr_list(esw, vport_num, MLX5_NVPRT_LIST_TYPE_UC); - - for_each_l2hash_node(node, tmp, vport->mc_list, hi) { - addr = container_of(node, struct vport_addr, node); - addr->action = MLX5_ACTION_DEL; - } - esw_apply_vport_addr_list(esw, vport_num, MLX5_NVPRT_LIST_TYPE_MC); -} - static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num) { struct mlx5_vport *vport = &esw->vports[vport_num]; @@ -1219,7 +1201,6 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num) esw_debug(esw->dev, "Disabling vport(%d)\n", vport_num); /* Mark this vport as disabled to discard new events */ vport->enabled = false; - vport->enabled_events = 0; synchronize_irq(mlx5_get_msix_vec(esw->dev, MLX5_EQ_VEC_ASYNC)); @@ -1232,8 +1213,12 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num) /* Disable events from this vport */ arm_vport_context_events_cmd(esw->dev, vport->vport, 0); mutex_lock(&esw->state_lock); - /* We don't assume VFs will cleanup after themselves */ - esw_cleanup_vport(esw, vport_num); + /* We don't assume VFs will cleanup after themselves. + * Calling vport change handler while vport is disabled will cleanup + * the vport resources. + */ + esw_vport_change_handler(&vport->vport_change_handler); + vport->enabled_events = 0; if (vport_num) { esw_vport_disable_egress_acl(esw, vport); esw_vport_disable_ingress_acl(esw, vport); From 78a9199b7105fa0e843065fed0cca05a998a129b Mon Sep 17 00:00:00 2001 From: Mohamad Haj Yahia Date: Tue, 3 May 2016 17:14:02 +0300 Subject: [PATCH 1331/1649] net/mlx5: E-Switch, Add promiscuous and allmulti FDB flowtable groups Add promiscuous and allmulti steering groups in FDB table. Besides the full match L2 steering rules group, we added two more groups to catch the "miss" rules traffic: * Allmulti group: One rule that forwards any mcast traffic coming from either uplink or VFs/PF vports * Promisc group: One rule that forwards all unmatched traffic coming from uplink. Needed for downstream privileged VF promisc and allmulti support. Signed-off-by: Mohamad Haj Yahia Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlx5/core/eswitch.c | 91 ++++++++++++++++--- .../net/ethernet/mellanox/mlx5/core/eswitch.h | 2 + 2 files changed, 79 insertions(+), 14 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index 37b4be9445fc..6c72562bf7fb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -323,15 +323,17 @@ static void del_l2_table_entry(struct mlx5_core_dev *dev, u32 index) /* E-Switch FDB */ static struct mlx5_flow_rule * -esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u32 vport) +__esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, + u8 mac_c[ETH_ALEN], u8 mac_v[ETH_ALEN]) { - int match_header = MLX5_MATCH_OUTER_HEADERS; - struct mlx5_flow_destination dest; + int match_header = (is_zero_ether_addr(mac_c) ? 0 : + MLX5_MATCH_OUTER_HEADERS); struct mlx5_flow_rule *flow_rule = NULL; + struct mlx5_flow_destination dest; + u8 *dmac_v = NULL; + u8 *dmac_c = NULL; u32 *match_v; u32 *match_c; - u8 *dmac_v; - u8 *dmac_c; match_v = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL); match_c = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL); @@ -339,14 +341,16 @@ esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u32 vport) pr_warn("FDB: Failed to alloc match parameters\n"); goto out; } + dmac_v = MLX5_ADDR_OF(fte_match_param, match_v, outer_headers.dmac_47_16); dmac_c = MLX5_ADDR_OF(fte_match_param, match_c, outer_headers.dmac_47_16); - ether_addr_copy(dmac_v, mac); - /* Match criteria mask */ - memset(dmac_c, 0xff, 6); + if (match_header == MLX5_MATCH_OUTER_HEADERS) { + ether_addr_copy(dmac_v, mac_v); + ether_addr_copy(dmac_c, mac_c); + } dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; dest.vport_num = vport; @@ -373,6 +377,15 @@ out: return flow_rule; } +static struct mlx5_flow_rule * +esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u32 vport) +{ + u8 mac_c[ETH_ALEN]; + + eth_broadcast_addr(mac_c); + return __esw_fdb_set_vport_rule(esw, vport, mac_c, mac); +} + static int esw_create_fdb_table(struct mlx5_eswitch *esw, int nvports) { int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); @@ -407,28 +420,74 @@ static int esw_create_fdb_table(struct mlx5_eswitch *esw, int nvports) esw_warn(dev, "Failed to create FDB Table err %d\n", err); goto out; } + esw->fdb_table.fdb = fdb; + /* Addresses group : Full match unicast/multicast addresses */ MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria); dmac = MLX5_ADDR_OF(fte_match_param, match_criteria, outer_headers.dmac_47_16); MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0); - MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 1); + /* Preserve 2 entries for allmulti and promisc rules*/ + MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 3); eth_broadcast_addr(dmac); - g = mlx5_create_flow_group(fdb, flow_group_in); if (IS_ERR_OR_NULL(g)) { err = PTR_ERR(g); esw_warn(dev, "Failed to create flow group err(%d)\n", err); goto out; } - esw->fdb_table.addr_grp = g; - esw->fdb_table.fdb = fdb; + + /* Allmulti group : One rule that forwards any mcast traffic */ + MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, + MLX5_MATCH_OUTER_HEADERS); + MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 2); + MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 2); + eth_zero_addr(dmac); + dmac[0] = 0x01; + g = mlx5_create_flow_group(fdb, flow_group_in); + if (IS_ERR_OR_NULL(g)) { + err = PTR_ERR(g); + esw_warn(dev, "Failed to create allmulti flow group err(%d)\n", err); + goto out; + } + esw->fdb_table.allmulti_grp = g; + + /* Promiscuous group : + * One rule that forward all unmatched traffic from previous groups + */ + eth_zero_addr(dmac); + MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, + MLX5_MATCH_MISC_PARAMETERS); + MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port); + MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 1); + MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 1); + g = mlx5_create_flow_group(fdb, flow_group_in); + if (IS_ERR_OR_NULL(g)) { + err = PTR_ERR(g); + esw_warn(dev, "Failed to create promisc flow group err(%d)\n", err); + goto out; + } + esw->fdb_table.promisc_grp = g; + out: + if (err) { + if (!IS_ERR_OR_NULL(esw->fdb_table.allmulti_grp)) { + mlx5_destroy_flow_group(esw->fdb_table.allmulti_grp); + esw->fdb_table.allmulti_grp = NULL; + } + if (!IS_ERR_OR_NULL(esw->fdb_table.addr_grp)) { + mlx5_destroy_flow_group(esw->fdb_table.addr_grp); + esw->fdb_table.addr_grp = NULL; + } + if (!IS_ERR_OR_NULL(esw->fdb_table.fdb)) { + mlx5_destroy_flow_table(esw->fdb_table.fdb); + esw->fdb_table.fdb = NULL; + } + } + kfree(flow_group_in); - if (err && !IS_ERR_OR_NULL(fdb)) - mlx5_destroy_flow_table(fdb); return err; } @@ -438,10 +497,14 @@ static void esw_destroy_fdb_table(struct mlx5_eswitch *esw) return; esw_debug(esw->dev, "Destroy FDB Table\n"); + mlx5_destroy_flow_group(esw->fdb_table.promisc_grp); + mlx5_destroy_flow_group(esw->fdb_table.allmulti_grp); mlx5_destroy_flow_group(esw->fdb_table.addr_grp); mlx5_destroy_flow_table(esw->fdb_table.fdb); esw->fdb_table.fdb = NULL; esw->fdb_table.addr_grp = NULL; + esw->fdb_table.allmulti_grp = NULL; + esw->fdb_table.promisc_grp = NULL; } /* E-Switch vport UC/MC lists management */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index 2f979c9bcb93..36e87cbad5fb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -132,6 +132,8 @@ struct mlx5_l2_table { struct mlx5_eswitch_fdb { void *fdb; struct mlx5_flow_group *addr_grp; + struct mlx5_flow_group *allmulti_grp; + struct mlx5_flow_group *promisc_grp; }; struct mlx5_eswitch { From a35f71f27a614aff106cc89b86168962bce2725f Mon Sep 17 00:00:00 2001 From: Mohamad Haj Yahia Date: Tue, 3 May 2016 17:14:03 +0300 Subject: [PATCH 1332/1649] net/mlx5: E-Switch, Implement promiscuous rx modes vf request handling Add promisc_change as a trigger to vport context change event. Add set vport promisc/allmulti functions to add vport to promiscuous flowtable rules. Upon promisc/allmulti rx mode vf request add the vport to the relevant promiscuous group (Allmulti/Promisc group) so the relevant traffic will be forwarded to it. Upon allmulti vf request add the vport to each existing multicast fdb rule. Upon adding/removing mcast address from a vport, update all other allmulti vports. Signed-off-by: Mohamad Haj Yahia Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlx5/core/eswitch.c | 269 +++++++++++++++++- .../net/ethernet/mellanox/mlx5/core/eswitch.h | 4 + 2 files changed, 267 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index 6c72562bf7fb..ad4bc985cc43 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -77,16 +77,20 @@ struct vport_addr { u8 action; u32 vport; struct mlx5_flow_rule *flow_rule; /* SRIOV only */ + /* A flag indicating that mac was added due to mc promiscuous vport */ + bool mc_promisc; }; enum { UC_ADDR_CHANGE = BIT(0), MC_ADDR_CHANGE = BIT(1), + PROMISC_CHANGE = BIT(3), }; /* Vport context events */ #define SRIOV_VPORT_EVENTS (UC_ADDR_CHANGE | \ - MC_ADDR_CHANGE) + MC_ADDR_CHANGE | \ + PROMISC_CHANGE) static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport, u32 events_mask) @@ -116,6 +120,9 @@ static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport, if (events_mask & MC_ADDR_CHANGE) MLX5_SET(nic_vport_context, nic_vport_ctx, event_on_mc_address_change, 1); + if (events_mask & PROMISC_CHANGE) + MLX5_SET(nic_vport_context, nic_vport_ctx, + event_on_promisc_change, 1); err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); if (err) @@ -323,18 +330,22 @@ static void del_l2_table_entry(struct mlx5_core_dev *dev, u32 index) /* E-Switch FDB */ static struct mlx5_flow_rule * -__esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, +__esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule, u8 mac_c[ETH_ALEN], u8 mac_v[ETH_ALEN]) { int match_header = (is_zero_ether_addr(mac_c) ? 0 : MLX5_MATCH_OUTER_HEADERS); struct mlx5_flow_rule *flow_rule = NULL; struct mlx5_flow_destination dest; + void *mv_misc = NULL; + void *mc_misc = NULL; u8 *dmac_v = NULL; u8 *dmac_c = NULL; u32 *match_v; u32 *match_c; + if (rx_rule) + match_header |= MLX5_MATCH_MISC_PARAMETERS; match_v = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL); match_c = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL); if (!match_v || !match_c) { @@ -347,11 +358,18 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, dmac_c = MLX5_ADDR_OF(fte_match_param, match_c, outer_headers.dmac_47_16); - if (match_header == MLX5_MATCH_OUTER_HEADERS) { + if (match_header & MLX5_MATCH_OUTER_HEADERS) { ether_addr_copy(dmac_v, mac_v); ether_addr_copy(dmac_c, mac_c); } + if (match_header & MLX5_MATCH_MISC_PARAMETERS) { + mv_misc = MLX5_ADDR_OF(fte_match_param, match_v, misc_parameters); + mc_misc = MLX5_ADDR_OF(fte_match_param, match_c, misc_parameters); + MLX5_SET(fte_match_set_misc, mv_misc, source_port, UPLINK_VPORT); + MLX5_SET_TO_ONES(fte_match_set_misc, mc_misc, source_port); + } + dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; dest.vport_num = vport; @@ -383,7 +401,31 @@ esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u32 vport) u8 mac_c[ETH_ALEN]; eth_broadcast_addr(mac_c); - return __esw_fdb_set_vport_rule(esw, vport, mac_c, mac); + return __esw_fdb_set_vport_rule(esw, vport, false, mac_c, mac); +} + +static struct mlx5_flow_rule * +esw_fdb_set_vport_allmulti_rule(struct mlx5_eswitch *esw, u32 vport) +{ + u8 mac_c[ETH_ALEN]; + u8 mac_v[ETH_ALEN]; + + eth_zero_addr(mac_c); + eth_zero_addr(mac_v); + mac_c[0] = 0x01; + mac_v[0] = 0x01; + return __esw_fdb_set_vport_rule(esw, vport, false, mac_c, mac_v); +} + +static struct mlx5_flow_rule * +esw_fdb_set_vport_promisc_rule(struct mlx5_eswitch *esw, u32 vport) +{ + u8 mac_c[ETH_ALEN]; + u8 mac_v[ETH_ALEN]; + + eth_zero_addr(mac_c); + eth_zero_addr(mac_v); + return __esw_fdb_set_vport_rule(esw, vport, true, mac_c, mac_v); } static int esw_create_fdb_table(struct mlx5_eswitch *esw, int nvports) @@ -574,6 +616,52 @@ static int esw_del_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr) return 0; } +static void update_allmulti_vports(struct mlx5_eswitch *esw, + struct vport_addr *vaddr, + struct esw_mc_addr *esw_mc) +{ + u8 *mac = vaddr->node.addr; + u32 vport_idx = 0; + + for (vport_idx = 0; vport_idx < esw->total_vports; vport_idx++) { + struct mlx5_vport *vport = &esw->vports[vport_idx]; + struct hlist_head *vport_hash = vport->mc_list; + struct vport_addr *iter_vaddr = + l2addr_hash_find(vport_hash, + mac, + struct vport_addr); + if (IS_ERR_OR_NULL(vport->allmulti_rule) || + vaddr->vport == vport_idx) + continue; + switch (vaddr->action) { + case MLX5_ACTION_ADD: + if (iter_vaddr) + continue; + iter_vaddr = l2addr_hash_add(vport_hash, mac, + struct vport_addr, + GFP_KERNEL); + if (!iter_vaddr) { + esw_warn(esw->dev, + "ALL-MULTI: Failed to add MAC(%pM) to vport[%d] DB\n", + mac, vport_idx); + continue; + } + iter_vaddr->vport = vport_idx; + iter_vaddr->flow_rule = + esw_fdb_set_vport_rule(esw, + mac, + vport_idx); + break; + case MLX5_ACTION_DEL: + if (!iter_vaddr) + continue; + mlx5_del_flow_rule(iter_vaddr->flow_rule); + l2addr_hash_del(iter_vaddr); + break; + } + } +} + static int esw_add_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr) { struct hlist_head *hash = esw->mc_table; @@ -594,8 +682,17 @@ static int esw_add_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr) esw_mc->uplink_rule = /* Forward MC MAC to Uplink */ esw_fdb_set_vport_rule(esw, mac, UPLINK_VPORT); + + /* Add this multicast mac to all the mc promiscuous vports */ + update_allmulti_vports(esw, vaddr, esw_mc); + add: - esw_mc->refcnt++; + /* If the multicast mac is added as a result of mc promiscuous vport, + * don't increment the multicast ref count + */ + if (!vaddr->mc_promisc) + esw_mc->refcnt++; + /* Forward MC MAC to vport */ vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport); esw_debug(esw->dev, @@ -631,9 +728,15 @@ static int esw_del_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr) mlx5_del_flow_rule(vaddr->flow_rule); vaddr->flow_rule = NULL; - if (--esw_mc->refcnt) + /* If the multicast mac is added as a result of mc promiscuous vport, + * don't decrement the multicast ref count. + */ + if (vaddr->mc_promisc || (--esw_mc->refcnt > 0)) return 0; + /* Remove this multicast mac from all the mc promiscuous vports */ + update_allmulti_vports(esw, vaddr, esw_mc); + if (esw_mc->uplink_rule) mlx5_del_flow_rule(esw_mc->uplink_rule); @@ -726,6 +829,24 @@ static void esw_update_vport_addr_list(struct mlx5_eswitch *esw, addr = l2addr_hash_find(hash, mac_list[i], struct vport_addr); if (addr) { addr->action = MLX5_ACTION_NONE; + /* If this mac was previously added because of allmulti + * promiscuous rx mode, its now converted to be original + * vport mac. + */ + if (addr->mc_promisc) { + struct esw_mc_addr *esw_mc = + l2addr_hash_find(esw->mc_table, + mac_list[i], + struct esw_mc_addr); + if (!esw_mc) { + esw_warn(esw->dev, + "Failed to MAC(%pM) in mcast DB\n", + mac_list[i]); + continue; + } + esw_mc->refcnt++; + addr->mc_promisc = false; + } continue; } @@ -744,6 +865,115 @@ out: kfree(mac_list); } +/* Sync vport UC/MC list from vport context + * Must be called after esw_update_vport_addr_list + */ +static void esw_update_vport_mc_promisc(struct mlx5_eswitch *esw, u32 vport_num) +{ + struct mlx5_vport *vport = &esw->vports[vport_num]; + struct l2addr_node *node; + struct vport_addr *addr; + struct hlist_head *hash; + struct hlist_node *tmp; + int hi; + + hash = vport->mc_list; + + for_each_l2hash_node(node, tmp, esw->mc_table, hi) { + u8 *mac = node->addr; + + addr = l2addr_hash_find(hash, mac, struct vport_addr); + if (addr) { + if (addr->action == MLX5_ACTION_DEL) + addr->action = MLX5_ACTION_NONE; + continue; + } + addr = l2addr_hash_add(hash, mac, struct vport_addr, + GFP_KERNEL); + if (!addr) { + esw_warn(esw->dev, + "Failed to add allmulti MAC(%pM) to vport[%d] DB\n", + mac, vport_num); + continue; + } + addr->vport = vport_num; + addr->action = MLX5_ACTION_ADD; + addr->mc_promisc = true; + } +} + +/* Apply vport rx mode to HW FDB table */ +static void esw_apply_vport_rx_mode(struct mlx5_eswitch *esw, u32 vport_num, + bool promisc, bool mc_promisc) +{ + struct esw_mc_addr *allmulti_addr = esw->mc_promisc; + struct mlx5_vport *vport = &esw->vports[vport_num]; + + if (IS_ERR_OR_NULL(vport->allmulti_rule) != mc_promisc) + goto promisc; + + if (mc_promisc) { + vport->allmulti_rule = + esw_fdb_set_vport_allmulti_rule(esw, vport_num); + if (!allmulti_addr->uplink_rule) + allmulti_addr->uplink_rule = + esw_fdb_set_vport_allmulti_rule(esw, + UPLINK_VPORT); + allmulti_addr->refcnt++; + } else if (vport->allmulti_rule) { + mlx5_del_flow_rule(vport->allmulti_rule); + vport->allmulti_rule = NULL; + + if (--allmulti_addr->refcnt > 0) + goto promisc; + + if (allmulti_addr->uplink_rule) + mlx5_del_flow_rule(allmulti_addr->uplink_rule); + allmulti_addr->uplink_rule = NULL; + } + +promisc: + if (IS_ERR_OR_NULL(vport->promisc_rule) != promisc) + return; + + if (promisc) { + vport->promisc_rule = esw_fdb_set_vport_promisc_rule(esw, + vport_num); + } else if (vport->promisc_rule) { + mlx5_del_flow_rule(vport->promisc_rule); + vport->promisc_rule = NULL; + } +} + +/* Sync vport rx mode from vport context */ +static void esw_update_vport_rx_mode(struct mlx5_eswitch *esw, u32 vport_num) +{ + struct mlx5_vport *vport = &esw->vports[vport_num]; + int promisc_all = 0; + int promisc_uc = 0; + int promisc_mc = 0; + int err; + + err = mlx5_query_nic_vport_promisc(esw->dev, + vport_num, + &promisc_uc, + &promisc_mc, + &promisc_all); + if (err) + return; + esw_debug(esw->dev, "vport[%d] context update rx mode promisc_all=%d, all_multi=%d\n", + vport_num, promisc_all, promisc_mc); + + if (!vport->trusted || !vport->enabled) { + promisc_uc = 0; + promisc_mc = 0; + promisc_all = 0; + } + + esw_apply_vport_rx_mode(esw, vport_num, promisc_all, + (promisc_all || promisc_mc)); +} + static void esw_vport_change_handler(struct work_struct *work) { struct mlx5_vport *vport = @@ -766,6 +996,15 @@ static void esw_vport_change_handler(struct work_struct *work) if (vport->enabled_events & MC_ADDR_CHANGE) { esw_update_vport_addr_list(esw, vport->vport, MLX5_NVPRT_LIST_TYPE_MC); + } + + if (vport->enabled_events & PROMISC_CHANGE) { + esw_update_vport_rx_mode(esw, vport->vport); + if (!IS_ERR_OR_NULL(vport->allmulti_rule)) + esw_update_vport_mc_promisc(esw, vport->vport); + } + + if (vport->enabled_events & (PROMISC_CHANGE | MC_ADDR_CHANGE)) { esw_apply_vport_addr_list(esw, vport->vport, MLX5_NVPRT_LIST_TYPE_MC); } @@ -1247,6 +1486,9 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num, vport->enabled = true; + /* only PF is trusted by default */ + vport->trusted = (vport_num) ? false : true; + arm_vport_context_events_cmd(esw->dev, vport_num, enable_events); esw->enabled_vports++; @@ -1334,6 +1576,7 @@ abort: void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw) { + struct esw_mc_addr *mc_promisc; int i; if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) || @@ -1343,9 +1586,14 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw) esw_info(esw->dev, "disable SRIOV: active vports(%d)\n", esw->enabled_vports); + mc_promisc = esw->mc_promisc; + for (i = 0; i < esw->total_vports; i++) esw_disable_vport(esw, i); + if (mc_promisc && mc_promisc->uplink_rule) + mlx5_del_flow_rule(mc_promisc->uplink_rule); + esw_destroy_fdb_table(esw); /* VPORT 0 (PF) must be enabled back with non-sriov configuration */ @@ -1356,6 +1604,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev) { int l2_table_size = 1 << MLX5_CAP_GEN(dev, log_max_l2_table); int total_vports = MLX5_TOTAL_VPORTS(dev); + struct esw_mc_addr *mc_promisc; struct mlx5_eswitch *esw; int vport_num; int err; @@ -1384,6 +1633,13 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev) } esw->l2_table.size = l2_table_size; + mc_promisc = kzalloc(sizeof(*mc_promisc), GFP_KERNEL); + if (!mc_promisc) { + err = -ENOMEM; + goto abort; + } + esw->mc_promisc = mc_promisc; + esw->work_queue = create_singlethread_workqueue("mlx5_esw_wq"); if (!esw->work_queue) { err = -ENOMEM; @@ -1436,6 +1692,7 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) esw->dev->priv.eswitch = NULL; destroy_workqueue(esw->work_queue); kfree(esw->l2_table.bitmap); + kfree(esw->mc_promisc); kfree(esw->vports); kfree(esw); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index 36e87cbad5fb..a39f18e3bd18 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -111,6 +111,8 @@ struct mlx5_vport { int vport; struct hlist_head uc_list[MLX5_L2_ADDR_HASH_SIZE]; struct hlist_head mc_list[MLX5_L2_ADDR_HASH_SIZE]; + struct mlx5_flow_rule *promisc_rule; + struct mlx5_flow_rule *allmulti_rule; struct work_struct vport_change_handler; struct vport_ingress ingress; @@ -119,6 +121,7 @@ struct mlx5_vport { u16 vlan; u8 qos; bool spoofchk; + bool trusted; bool enabled; u16 enabled_events; }; @@ -149,6 +152,7 @@ struct mlx5_eswitch { * and async SRIOV admin state changes */ struct mutex state_lock; + struct esw_mc_addr *mc_promisc; }; /* E-Switch API */ From 1edc57e2b3d3bf8672bb1553dbd541cc94f54937 Mon Sep 17 00:00:00 2001 From: Mohamad Haj Yahia Date: Tue, 3 May 2016 17:14:04 +0300 Subject: [PATCH 1333/1649] net/mlx5: E-Switch, Implement trust vf ndo - Add support to configure trusted vf attribute through trust_vf_ndo. - Upon VF trust setting change we update vport context to refresh allmulti/promisc or any trusted vf attributes that we didn't trust the VF for before. - Lock the eswitch state lock on vport event in order to synchronise the vport context updates , this will prevent contention with vport trust setting change which will trigger vport mac list update. Signed-off-by: Mohamad Haj Yahia Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlx5/core/en_main.c | 8 ++++ .../net/ethernet/mellanox/mlx5/core/eswitch.c | 40 ++++++++++++++++--- .../net/ethernet/mellanox/mlx5/core/eswitch.h | 2 + 3 files changed, 45 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 5d0911315189..1c70e518b5c5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -2446,6 +2446,13 @@ static int mlx5e_set_vf_spoofchk(struct net_device *dev, int vf, bool setting) return mlx5_eswitch_set_vport_spoofchk(mdev->priv.eswitch, vf + 1, setting); } +static int mlx5e_set_vf_trust(struct net_device *dev, int vf, bool setting) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + struct mlx5_core_dev *mdev = priv->mdev; + + return mlx5_eswitch_set_vport_trust(mdev->priv.eswitch, vf + 1, setting); +} static int mlx5_vport_link2ifla(u8 esw_link) { switch (esw_link) { @@ -2616,6 +2623,7 @@ static const struct net_device_ops mlx5e_netdev_ops_sriov = { .ndo_set_vf_mac = mlx5e_set_vf_mac, .ndo_set_vf_vlan = mlx5e_set_vf_vlan, .ndo_set_vf_spoofchk = mlx5e_set_vf_spoofchk, + .ndo_set_vf_trust = mlx5e_set_vf_trust, .ndo_get_vf_config = mlx5e_get_vf_config, .ndo_set_vf_link_state = mlx5e_set_vf_link_state, .ndo_get_vf_stats = mlx5e_get_vf_stats, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index ad4bc985cc43..b84a6918a700 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -974,10 +974,8 @@ static void esw_update_vport_rx_mode(struct mlx5_eswitch *esw, u32 vport_num) (promisc_all || promisc_mc)); } -static void esw_vport_change_handler(struct work_struct *work) +static void esw_vport_change_handle_locked(struct mlx5_vport *vport) { - struct mlx5_vport *vport = - container_of(work, struct mlx5_vport, vport_change_handler); struct mlx5_core_dev *dev = vport->dev; struct mlx5_eswitch *esw = dev->priv.eswitch; u8 mac[ETH_ALEN]; @@ -1015,6 +1013,17 @@ static void esw_vport_change_handler(struct work_struct *work) vport->enabled_events); } +static void esw_vport_change_handler(struct work_struct *work) +{ + struct mlx5_vport *vport = + container_of(work, struct mlx5_vport, vport_change_handler); + struct mlx5_eswitch *esw = vport->dev->priv.eswitch; + + mutex_lock(&esw->state_lock); + esw_vport_change_handle_locked(vport); + mutex_unlock(&esw->state_lock); +} + static void esw_vport_enable_egress_acl(struct mlx5_eswitch *esw, struct mlx5_vport *vport) { @@ -1482,7 +1491,7 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num, /* Sync with current vport context */ vport->enabled_events = enable_events; - esw_vport_change_handler(&vport->vport_change_handler); + esw_vport_change_handle_locked(vport); vport->enabled = true; @@ -1522,7 +1531,7 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num) * Calling vport change handler while vport is disabled will cleanup * the vport resources. */ - esw_vport_change_handler(&vport->vport_change_handler); + esw_vport_change_handle_locked(vport); vport->enabled_events = 0; if (vport_num) { esw_vport_disable_egress_acl(esw, vport); @@ -1859,6 +1868,27 @@ int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw, return err; } +int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw, + int vport, bool setting) +{ + struct mlx5_vport *evport; + + if (!ESW_ALLOWED(esw)) + return -EPERM; + if (!LEGAL_VPORT(esw, vport)) + return -EINVAL; + + evport = &esw->vports[vport]; + + mutex_lock(&esw->state_lock); + evport->trusted = setting; + if (evport->enabled) + esw_vport_change_handle_locked(evport); + mutex_unlock(&esw->state_lock); + + return 0; +} + int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw, int vport, struct ifla_vf_stats *vf_stats) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index a39f18e3bd18..fd6800256d4a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -169,6 +169,8 @@ int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw, int vport, u16 vlan, u8 qos); int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw, int vport, bool spoofchk); +int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw, + int vport_num, bool setting); int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw, int vport, struct ifla_vf_info *ivi); int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw, From 00b2034029840ddad255352c46db0ae21342ce56 Mon Sep 17 00:00:00 2001 From: Jiri Benc Date: Tue, 3 May 2016 17:10:06 +0200 Subject: [PATCH 1334/1649] gre: remove superfluous pskb_may_pull The call to gre_parse_header is either followed by iptunnel_pull_header, or in the case of ICMP error path, the actual header is not accessed at all. In the first case, iptunnel_pull_header will call pskb_may_pull anyway and it's pointless to do it twice. The only difference is what call will fail with what error code but the net effect is still the same in all call sites. In the second case, pskb_may_pull is pointless, as skb->data is at the outer IP header and not at the GRE header. Signed-off-by: Jiri Benc Signed-off-by: David S. Miller --- net/ipv4/gre_demux.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/net/ipv4/gre_demux.c b/net/ipv4/gre_demux.c index a41e73ab1369..d78e2eefc0f7 100644 --- a/net/ipv4/gre_demux.c +++ b/net/ipv4/gre_demux.c @@ -114,11 +114,8 @@ int gre_parse_header(struct sk_buff *skb, struct tnl_ptk_info *tpi, */ if (greh->flags == 0 && tpi->proto == htons(ETH_P_WCCP)) { tpi->proto = htons(ETH_P_IP); - if ((*(u8 *)options & 0xF0) != 0x40) { + if ((*(u8 *)options & 0xF0) != 0x40) hdr_len += 4; - if (!pskb_may_pull(skb, hdr_len)) - return -EINVAL; - } } return hdr_len; } From 244a797bdcf1b74567fa59d7e72d89ed0ee1ffd9 Mon Sep 17 00:00:00 2001 From: Jiri Benc Date: Tue, 3 May 2016 17:10:07 +0200 Subject: [PATCH 1335/1649] gre: move iptunnel_pull_header down to ipgre_rcv This will allow to make the pull dependent on the tunnel type. Signed-off-by: Jiri Benc Signed-off-by: David S. Miller --- net/ipv4/ip_gre.c | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 8260a707b9b8..8f377dad5489 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -260,7 +260,8 @@ static __be32 tunnel_id_to_key(__be64 x) #endif } -static int ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi) +static int ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi, + int hdr_len) { struct net *net = dev_net(skb->dev); struct metadata_dst *tun_dst = NULL; @@ -278,6 +279,9 @@ static int ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi) iph->saddr, iph->daddr, tpi->key); if (tunnel) { + if (iptunnel_pull_header(skb, hdr_len, tpi->proto, false) < 0) + goto drop; + skb_pop_mac_header(skb); if (tunnel->collect_md) { __be16 flags; @@ -294,6 +298,10 @@ static int ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi) return PACKET_RCVD; } return PACKET_REJECT; + +drop: + kfree_skb(skb); + return PACKET_RCVD; } static int gre_rcv(struct sk_buff *skb) @@ -314,10 +322,7 @@ static int gre_rcv(struct sk_buff *skb) if (hdr_len < 0) goto drop; - if (iptunnel_pull_header(skb, hdr_len, tpi.proto, false)) - goto drop; - - if (ipgre_rcv(skb, &tpi) == PACKET_RCVD) + if (ipgre_rcv(skb, &tpi, hdr_len) == PACKET_RCVD) return 0; icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); From 125372faa4feb15e86f410c1adabbca9186d9c4a Mon Sep 17 00:00:00 2001 From: Jiri Benc Date: Tue, 3 May 2016 17:10:08 +0200 Subject: [PATCH 1336/1649] gre: receive also TEB packets for lwtunnels For ipgre interfaces in collect metadata mode, receive also traffic with encapsulated Ethernet headers. The lwtunnel users are supposed to sort this out correctly. This allows to have mixed Ethernet + L3-only traffic on the same lwtunnel interface. This is the same way as VXLAN-GPE behaves. To keep backwards compatibility and prevent any surprises, gretap interfaces have priority in receiving packets with Ethernet headers. Signed-off-by: Jiri Benc Signed-off-by: David S. Miller --- include/net/ip_tunnels.h | 1 + net/ipv4/ip_gre.c | 39 ++++++++++++++++++++++++++++----------- 2 files changed, 29 insertions(+), 11 deletions(-) diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h index 6d790910ebdf..d916b4315903 100644 --- a/include/net/ip_tunnels.h +++ b/include/net/ip_tunnels.h @@ -160,6 +160,7 @@ struct tnl_ptk_info { #define PACKET_RCVD 0 #define PACKET_REJECT 1 +#define PACKET_NEXT 2 #define IP_TNL_HASH_BITS 7 #define IP_TNL_HASH_SIZE (1 << IP_TNL_HASH_BITS) diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 8f377dad5489..2b267e71ebf5 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -260,26 +260,20 @@ static __be32 tunnel_id_to_key(__be64 x) #endif } -static int ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi, - int hdr_len) +static int __ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi, + struct ip_tunnel_net *itn, int hdr_len, bool raw_proto) { - struct net *net = dev_net(skb->dev); struct metadata_dst *tun_dst = NULL; - struct ip_tunnel_net *itn; const struct iphdr *iph; struct ip_tunnel *tunnel; - if (tpi->proto == htons(ETH_P_TEB)) - itn = net_generic(net, gre_tap_net_id); - else - itn = net_generic(net, ipgre_net_id); - iph = ip_hdr(skb); tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags, iph->saddr, iph->daddr, tpi->key); if (tunnel) { - if (iptunnel_pull_header(skb, hdr_len, tpi->proto, false) < 0) + if (__iptunnel_pull_header(skb, hdr_len, tpi->proto, + raw_proto, false) < 0) goto drop; skb_pop_mac_header(skb); @@ -297,13 +291,36 @@ static int ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi, ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error); return PACKET_RCVD; } - return PACKET_REJECT; + return PACKET_NEXT; drop: kfree_skb(skb); return PACKET_RCVD; } +static int ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi, + int hdr_len) +{ + struct net *net = dev_net(skb->dev); + struct ip_tunnel_net *itn; + int res; + + if (tpi->proto == htons(ETH_P_TEB)) + itn = net_generic(net, gre_tap_net_id); + else + itn = net_generic(net, ipgre_net_id); + + res = __ipgre_rcv(skb, tpi, itn, hdr_len, false); + if (res == PACKET_NEXT && tpi->proto == htons(ETH_P_TEB)) { + /* ipgre tunnels in collect metadata mode should receive + * also ETH_P_TEB traffic. + */ + itn = net_generic(net, ipgre_net_id); + res = __ipgre_rcv(skb, tpi, itn, hdr_len, true); + } + return res; +} + static int gre_rcv(struct sk_buff *skb) { struct tnl_ptk_info tpi; From 8bf42e9e51cce73874252e5b8fb938bb09bf0ce4 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 3 May 2016 17:19:57 +0200 Subject: [PATCH 1337/1649] gre6: add Kconfig dependency for NET_IPGRE_DEMUX The ipv6 gre implementation was cleaned up to share more code with the ipv4 version, but it can be enabled even when NET_IPGRE_DEMUX is disabled, resulting in a link error: net/built-in.o: In function `gre_rcv': :(.text+0x17f5d0): undefined reference to `gre_parse_header' ERROR: "gre_parse_header" [net/ipv6/ip6_gre.ko] undefined! This adds a Kconfig dependency to prevent that now invalid configuration. Signed-off-by: Arnd Bergmann Fixes: 308edfdf1563 ("gre6: Cleanup GREv6 receive path, call common GRE functions") Acked-by: Tom Herbert Signed-off-by: David S. Miller --- net/ipv6/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig index 11e875ffd7ac..3f8411328de5 100644 --- a/net/ipv6/Kconfig +++ b/net/ipv6/Kconfig @@ -218,6 +218,7 @@ config IPV6_GRE tristate "IPv6: GRE tunnel" select IPV6_TUNNEL select NET_IP_TUNNEL + depends on NET_IPGRE_DEMUX ---help--- Tunneling means encapsulating data of one protocol type within another protocol and sending it over a channel that understands the From a6e5472dc3d99201d0f59dd4d1faf0dcf7d978c3 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Tue, 3 May 2016 18:53:15 +0200 Subject: [PATCH 1338/1649] dmfe: kill DEVICE define use net_device directly. Compile tested, objdiff shows no changes. Signed-off-by: Florian Westphal Signed-off-by: David S. Miller --- drivers/net/ethernet/dec/tulip/dmfe.c | 39 +++++++++++++-------------- 1 file changed, 18 insertions(+), 21 deletions(-) diff --git a/drivers/net/ethernet/dec/tulip/dmfe.c b/drivers/net/ethernet/dec/tulip/dmfe.c index afd8e78e024e..42c759ef8ff0 100644 --- a/drivers/net/ethernet/dec/tulip/dmfe.c +++ b/drivers/net/ethernet/dec/tulip/dmfe.c @@ -192,9 +192,6 @@ (__CHK_IO_SIZE(((pci_dev)->device << 16) | (pci_dev)->vendor, \ (pci_dev)->revision)) -/* Sten Check */ -#define DEVICE net_device - /* Structure/enum declaration ------------------------------- */ struct tx_desc { __le32 tdes0, tdes1, tdes2, tdes3; /* Data for the card */ @@ -313,10 +310,10 @@ static u8 SF_mode; /* Special Function: 1:VLAN, 2:RX Flow Control /* function declaration ------------------------------------- */ -static int dmfe_open(struct DEVICE *); -static netdev_tx_t dmfe_start_xmit(struct sk_buff *, struct DEVICE *); -static int dmfe_stop(struct DEVICE *); -static void dmfe_set_filter_mode(struct DEVICE *); +static int dmfe_open(struct net_device *); +static netdev_tx_t dmfe_start_xmit(struct sk_buff *, struct net_device *); +static int dmfe_stop(struct net_device *); +static void dmfe_set_filter_mode(struct net_device *); static const struct ethtool_ops netdev_ethtool_ops; static u16 read_srom_word(void __iomem *, int); static irqreturn_t dmfe_interrupt(int , void *); @@ -326,8 +323,8 @@ static void poll_dmfe (struct net_device *dev); static void dmfe_descriptor_init(struct net_device *); static void allocate_rx_buffer(struct net_device *); static void update_cr6(u32, void __iomem *); -static void send_filter_frame(struct DEVICE *); -static void dm9132_id_table(struct DEVICE *); +static void send_filter_frame(struct net_device *); +static void dm9132_id_table(struct net_device *); static u16 dmfe_phy_read(void __iomem *, u8, u8, u32); static void dmfe_phy_write(void __iomem *, u8, u8, u16, u32); static void dmfe_phy_write_1bit(void __iomem *, u32); @@ -336,12 +333,12 @@ static u8 dmfe_sense_speed(struct dmfe_board_info *); static void dmfe_process_mode(struct dmfe_board_info *); static void dmfe_timer(unsigned long); static inline u32 cal_CRC(unsigned char *, unsigned int, u8); -static void dmfe_rx_packet(struct DEVICE *, struct dmfe_board_info *); -static void dmfe_free_tx_pkt(struct DEVICE *, struct dmfe_board_info *); +static void dmfe_rx_packet(struct net_device *, struct dmfe_board_info *); +static void dmfe_free_tx_pkt(struct net_device *, struct dmfe_board_info *); static void dmfe_reuse_skb(struct dmfe_board_info *, struct sk_buff *); -static void dmfe_dynamic_reset(struct DEVICE *); +static void dmfe_dynamic_reset(struct net_device *); static void dmfe_free_rxbuffer(struct dmfe_board_info *); -static void dmfe_init_dm910x(struct DEVICE *); +static void dmfe_init_dm910x(struct net_device *); static void dmfe_parse_srom(struct dmfe_board_info *); static void dmfe_program_DM9801(struct dmfe_board_info *, int); static void dmfe_program_DM9802(struct dmfe_board_info *); @@ -558,7 +555,7 @@ static void dmfe_remove_one(struct pci_dev *pdev) * The interface is opened whenever "ifconfig" actives it. */ -static int dmfe_open(struct DEVICE *dev) +static int dmfe_open(struct net_device *dev) { struct dmfe_board_info *db = netdev_priv(dev); const int irq = db->pdev->irq; @@ -617,7 +614,7 @@ static int dmfe_open(struct DEVICE *dev) * Enable Tx/Rx machine */ -static void dmfe_init_dm910x(struct DEVICE *dev) +static void dmfe_init_dm910x(struct net_device *dev) { struct dmfe_board_info *db = netdev_priv(dev); void __iomem *ioaddr = db->ioaddr; @@ -684,7 +681,7 @@ static void dmfe_init_dm910x(struct DEVICE *dev) */ static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb, - struct DEVICE *dev) + struct net_device *dev) { struct dmfe_board_info *db = netdev_priv(dev); void __iomem *ioaddr = db->ioaddr; @@ -754,7 +751,7 @@ static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb, * The interface is stopped when it is brought. */ -static int dmfe_stop(struct DEVICE *dev) +static int dmfe_stop(struct net_device *dev) { struct dmfe_board_info *db = netdev_priv(dev); void __iomem *ioaddr = db->ioaddr; @@ -798,7 +795,7 @@ static int dmfe_stop(struct DEVICE *dev) static irqreturn_t dmfe_interrupt(int irq, void *dev_id) { - struct DEVICE *dev = dev_id; + struct net_device *dev = dev_id; struct dmfe_board_info *db = netdev_priv(dev); void __iomem *ioaddr = db->ioaddr; unsigned long flags; @@ -879,7 +876,7 @@ static void poll_dmfe (struct net_device *dev) * Free TX resource after TX complete */ -static void dmfe_free_tx_pkt(struct DEVICE *dev, struct dmfe_board_info * db) +static void dmfe_free_tx_pkt(struct net_device *dev, struct dmfe_board_info *db) { struct tx_desc *txptr; void __iomem *ioaddr = db->ioaddr; @@ -961,7 +958,7 @@ static inline u32 cal_CRC(unsigned char * Data, unsigned int Len, u8 flag) * Receive the come packet and pass to upper layer */ -static void dmfe_rx_packet(struct DEVICE *dev, struct dmfe_board_info * db) +static void dmfe_rx_packet(struct net_device *dev, struct dmfe_board_info *db) { struct rx_desc *rxptr; struct sk_buff *skb, *newskb; @@ -1052,7 +1049,7 @@ static void dmfe_rx_packet(struct DEVICE *dev, struct dmfe_board_info * db) * Set DM910X multicast address */ -static void dmfe_set_filter_mode(struct DEVICE * dev) +static void dmfe_set_filter_mode(struct net_device *dev) { struct dmfe_board_info *db = netdev_priv(dev); unsigned long flags; From 4d0e965732db6f7cce78e6b8f5d3073249004c3a Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Tue, 3 May 2016 16:30:59 +0200 Subject: [PATCH 1339/1649] drivers: replace dev->trans_start accesses with dev_trans_start a trans_start struct member exists twice: - in struct net_device (legacy) - in struct netdev_queue Instead of open-coding dev->trans_start usage to obtain the current trans_start value, use dev_trans_start() instead. This is not exactly the same, as dev_trans_start also considers the trans_start values of the netdev queues owned by the device and provides the most recent one. For legacy devices this doesn't matter as dev_trans_start can cope with netdev trans_start values of 0 (they are ignored). This is a prerequisite to eventual removal of dev->trans_start. Cc: linux-rdma@vger.kernel.org Signed-off-by: Florian Westphal Signed-off-by: David S. Miller --- drivers/infiniband/ulp/ipoib/ipoib_main.c | 2 +- drivers/net/ethernet/intel/e1000e/netdev.c | 2 +- drivers/net/ethernet/intel/igb/igb_main.c | 2 +- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 2 +- drivers/net/ethernet/microchip/encx24j600.c | 2 +- drivers/net/ethernet/qualcomm/qca_spi.c | 2 +- drivers/net/fjes/fjes_main.c | 2 +- drivers/net/hamradio/mkiss.c | 2 +- drivers/staging/rtl8192e/rtllib_softmac.c | 2 +- drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c | 2 +- 10 files changed, 10 insertions(+), 10 deletions(-) diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 80807d6e5c4c..b940ef1c19c7 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -1036,7 +1036,7 @@ static void ipoib_timeout(struct net_device *dev) struct ipoib_dev_priv *priv = netdev_priv(dev); ipoib_warn(priv, "transmit timeout: latency %d msecs\n", - jiffies_to_msecs(jiffies - dev->trans_start)); + jiffies_to_msecs(jiffies - dev_trans_start(dev))); ipoib_warn(priv, "queue stopped %d, tx_head %u, tx_tail %u\n", netif_queue_stopped(dev), priv->tx_head, priv->tx_tail); diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index a7f16c35ebcd..269087cb7b96 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -242,7 +242,7 @@ static void e1000e_dump(struct e1000_adapter *adapter) dev_info(&adapter->pdev->dev, "Net device Info\n"); pr_info("Device Name state trans_start last_rx\n"); pr_info("%-15s %016lX %016lX %016lX\n", netdev->name, - netdev->state, netdev->trans_start, netdev->last_rx); + netdev->state, dev_trans_start(netdev), netdev->last_rx); } /* Print Registers */ diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 8e96c35307fb..7460bdbe2e49 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -383,7 +383,7 @@ static void igb_dump(struct igb_adapter *adapter) dev_info(&adapter->pdev->dev, "Net device Info\n"); pr_info("Device Name state trans_start last_rx\n"); pr_info("%-15s %016lX %016lX %016lX\n", netdev->name, - netdev->state, netdev->trans_start, netdev->last_rx); + netdev->state, dev_trans_start(netdev), netdev->last_rx); } /* Print Registers */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 0ef4a15bb23e..18dcfc577ba9 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -608,7 +608,7 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter) pr_info("%-15s %016lX %016lX %016lX\n", netdev->name, netdev->state, - netdev->trans_start, + dev_trans_start(netdev), netdev->last_rx); } diff --git a/drivers/net/ethernet/microchip/encx24j600.c b/drivers/net/ethernet/microchip/encx24j600.c index 7df318346b05..707283bb62ba 100644 --- a/drivers/net/ethernet/microchip/encx24j600.c +++ b/drivers/net/ethernet/microchip/encx24j600.c @@ -890,7 +890,7 @@ static void encx24j600_tx_timeout(struct net_device *dev) struct encx24j600_priv *priv = netdev_priv(dev); netif_err(priv, tx_err, dev, "TX timeout at %ld, latency %ld\n", - jiffies, jiffies - dev->trans_start); + jiffies, jiffies - dev_trans_start(dev)); dev->stats.tx_errors++; netif_wake_queue(dev); diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c index 1ef03939d25f..82f3c8811400 100644 --- a/drivers/net/ethernet/qualcomm/qca_spi.c +++ b/drivers/net/ethernet/qualcomm/qca_spi.c @@ -734,7 +734,7 @@ qcaspi_netdev_tx_timeout(struct net_device *dev) struct qcaspi *qca = netdev_priv(dev); netdev_info(qca->net_dev, "Transmit timeout at %ld, latency %ld\n", - jiffies, jiffies - dev->trans_start); + jiffies, jiffies - dev_trans_start(dev)); qca->net_dev->stats.tx_errors++; /* Trigger tx queue flush and QCA7000 reset */ qca->sync = QCASPI_SYNC_UNKNOWN; diff --git a/drivers/net/fjes/fjes_main.c b/drivers/net/fjes/fjes_main.c index bb7e90368f8f..7ad3d04314c5 100644 --- a/drivers/net/fjes/fjes_main.c +++ b/drivers/net/fjes/fjes_main.c @@ -471,7 +471,7 @@ static void fjes_tx_stall_task(struct work_struct *work) int i; if (((long)jiffies - - (long)(netdev->trans_start)) > FJES_TX_TX_STALL_TIMEOUT) { + dev_trans_start(netdev)) > FJES_TX_TX_STALL_TIMEOUT) { netif_wake_queue(netdev); return; } diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c index 85828f153445..c685937e1de3 100644 --- a/drivers/net/hamradio/mkiss.c +++ b/drivers/net/hamradio/mkiss.c @@ -542,7 +542,7 @@ static netdev_tx_t ax_xmit(struct sk_buff *skb, struct net_device *dev) * May be we must check transmitter timeout here ? * 14 Oct 1994 Dmitry Gorodchanin. */ - if (time_before(jiffies, dev->trans_start + 20 * HZ)) { + if (time_before(jiffies, dev_trans_start(dev) + 20 * HZ)) { /* 20 sec timeout not reached */ return NETDEV_TX_BUSY; } diff --git a/drivers/staging/rtl8192e/rtllib_softmac.c b/drivers/staging/rtl8192e/rtllib_softmac.c index cfab715495ad..62154e3f4463 100644 --- a/drivers/staging/rtl8192e/rtllib_softmac.c +++ b/drivers/staging/rtl8192e/rtllib_softmac.c @@ -1991,7 +1991,7 @@ static short rtllib_sta_ps_sleep(struct rtllib_device *ieee, u64 *time) return 2; if (!time_after(jiffies, - ieee->dev->trans_start + msecs_to_jiffies(timeout))) + dev_trans_start(ieee->dev) + msecs_to_jiffies(timeout))) return 0; if (!time_after(jiffies, ieee->last_rx_ps_time + msecs_to_jiffies(timeout))) diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c index ae1274cfb392..de714501c996 100644 --- a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c +++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c @@ -1737,7 +1737,7 @@ static short ieee80211_sta_ps_sleep(struct ieee80211_device *ieee, u32 *time_h, return 2; if(!time_after(jiffies, - ieee->dev->trans_start + msecs_to_jiffies(timeout))) + dev_trans_start(ieee->dev) + msecs_to_jiffies(timeout))) return 0; if(!time_after(jiffies, From ba162f8eed61a7e71e26455ce1cff5b5898a3579 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Tue, 3 May 2016 16:31:00 +0200 Subject: [PATCH 1340/1649] netdevice: add helper to update trans_start trans_start exists twice: - as member of net_device (legacy) - as member of netdev_queue In order to get rid of the legacy case, add a helper for the dev->trans_update (this patch), then convert spots that do dev->trans_start = jiffies to use this helper (next patch). This would then allow us to change the helper so that it updates the trans_stamp of netdev queue 0 instead. Signed-off-by: Florian Westphal Signed-off-by: David S. Miller --- include/linux/netdevice.h | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index bcf012637d10..f53412cccbaa 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -3481,6 +3481,12 @@ static inline void txq_trans_update(struct netdev_queue *txq) txq->trans_start = jiffies; } +/* legacy drivers only, netdev_start_xmit() sets txq->trans_start */ +static inline void netif_trans_update(struct net_device *dev) +{ + dev->trans_start = jiffies; +} + /** * netif_tx_lock - grab network device transmit lock * @dev: network device From 860e9538a9482bb84589f7d0718a7e6d0a944d58 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Tue, 3 May 2016 16:33:13 +0200 Subject: [PATCH 1341/1649] treewide: replace dev->trans_start update with helper Replace all trans_start updates with netif_trans_update helper. change was done via spatch: struct net_device *d; @@ - d->trans_start = jiffies + netif_trans_update(d) Compile tested only. Cc: user-mode-linux-devel@lists.sourceforge.net Cc: linux-xtensa@linux-xtensa.org Cc: linux1394-devel@lists.sourceforge.net Cc: linux-rdma@vger.kernel.org Cc: netdev@vger.kernel.org Cc: MPT-FusionLinux.pdl@broadcom.com Cc: linux-scsi@vger.kernel.org Cc: linux-can@vger.kernel.org Cc: linux-parisc@vger.kernel.org Cc: linux-omap@vger.kernel.org Cc: linux-hams@vger.kernel.org Cc: linux-usb@vger.kernel.org Cc: linux-wireless@vger.kernel.org Cc: linux-s390@vger.kernel.org Cc: devel@driverdev.osuosl.org Cc: b.a.t.m.a.n@lists.open-mesh.org Cc: linux-bluetooth@vger.kernel.org Signed-off-by: Florian Westphal Acked-by: Felipe Balbi Acked-by: Mugunthan V N Acked-by: Antonio Quartulli Signed-off-by: David S. Miller --- arch/um/drivers/net_kern.c | 4 ++-- arch/xtensa/platforms/iss/network.c | 2 +- drivers/char/pcmcia/synclink_cs.c | 4 ++-- drivers/firewire/net.c | 2 +- drivers/infiniband/hw/nes/nes_nic.c | 2 +- drivers/infiniband/ulp/ipoib/ipoib_cm.c | 2 +- drivers/infiniband/ulp/ipoib/ipoib_ib.c | 2 +- drivers/isdn/hysdn/hysdn_net.c | 2 +- drivers/isdn/i4l/isdn_net.c | 4 ++-- drivers/isdn/i4l/isdn_x25iface.c | 2 +- drivers/message/fusion/mptlan.c | 2 +- drivers/net/appletalk/cops.c | 2 +- drivers/net/can/mscan/mscan.c | 4 ++-- drivers/net/can/usb/ems_usb.c | 4 ++-- drivers/net/can/usb/esd_usb2.c | 4 ++-- drivers/net/can/usb/peak_usb/pcan_usb_core.c | 4 ++-- drivers/net/cris/eth_v10.c | 2 +- drivers/net/ethernet/3com/3c509.c | 2 +- drivers/net/ethernet/3com/3c515.c | 2 +- drivers/net/ethernet/3com/3c574_cs.c | 2 +- drivers/net/ethernet/3com/3c589_cs.c | 2 +- drivers/net/ethernet/3com/3c59x.c | 2 +- drivers/net/ethernet/8390/axnet_cs.c | 6 +++--- drivers/net/ethernet/8390/lib8390.c | 4 ++-- drivers/net/ethernet/adaptec/starfire.c | 2 +- drivers/net/ethernet/adi/bfin_mac.c | 2 +- drivers/net/ethernet/agere/et131x.c | 4 ++-- drivers/net/ethernet/allwinner/sun4i-emac.c | 6 +++--- drivers/net/ethernet/amd/7990.c | 4 ++-- drivers/net/ethernet/amd/a2065.c | 2 +- drivers/net/ethernet/amd/atarilance.c | 2 +- drivers/net/ethernet/amd/au1000_eth.c | 2 +- drivers/net/ethernet/amd/declance.c | 2 +- drivers/net/ethernet/amd/lance.c | 2 +- drivers/net/ethernet/amd/ni65.c | 4 ++-- drivers/net/ethernet/amd/nmclan_cs.c | 2 +- drivers/net/ethernet/amd/pcnet32.c | 4 ++-- drivers/net/ethernet/amd/sunlance.c | 2 +- drivers/net/ethernet/atheros/alx/main.c | 2 +- drivers/net/ethernet/broadcom/bcmsysport.c | 2 +- drivers/net/ethernet/broadcom/genet/bcmgenet.c | 2 +- drivers/net/ethernet/broadcom/sb1250-mac.c | 2 +- drivers/net/ethernet/broadcom/tg3.c | 2 +- drivers/net/ethernet/cavium/liquidio/lio_main.c | 4 ++-- drivers/net/ethernet/cavium/octeon/octeon_mgmt.c | 2 +- drivers/net/ethernet/cavium/thunder/nicvf_main.c | 2 +- drivers/net/ethernet/chelsio/cxgb4vf/sge.c | 2 +- drivers/net/ethernet/davicom/dm9000.c | 4 ++-- drivers/net/ethernet/dec/tulip/de4x5.c | 4 ++-- drivers/net/ethernet/dec/tulip/dmfe.c | 6 +++--- drivers/net/ethernet/dec/tulip/pnic.c | 6 +++--- drivers/net/ethernet/dec/tulip/tulip_core.c | 2 +- drivers/net/ethernet/dec/tulip/uli526x.c | 4 ++-- drivers/net/ethernet/dec/tulip/winbond-840.c | 2 +- drivers/net/ethernet/dlink/dl2k.c | 2 +- drivers/net/ethernet/dlink/sundance.c | 2 +- drivers/net/ethernet/fealnx.c | 2 +- drivers/net/ethernet/freescale/gianfar.c | 2 +- drivers/net/ethernet/fujitsu/fmvj18x_cs.c | 2 +- drivers/net/ethernet/hisilicon/hix5hd2_gmac.c | 2 +- drivers/net/ethernet/hisilicon/hns/hns_enet.c | 6 +++--- drivers/net/ethernet/hp/hp100.c | 2 +- drivers/net/ethernet/i825xx/82596.c | 2 +- drivers/net/ethernet/i825xx/lib82596.c | 2 +- drivers/net/ethernet/i825xx/sun3_82586.c | 4 ++-- drivers/net/ethernet/ibm/emac/core.c | 4 ++-- drivers/net/ethernet/intel/fm10k/fm10k_pci.c | 2 +- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 2 +- drivers/net/ethernet/korina.c | 8 ++++---- drivers/net/ethernet/lantiq_etop.c | 4 ++-- drivers/net/ethernet/marvell/pxa168_eth.c | 2 +- drivers/net/ethernet/marvell/sky2.c | 2 +- drivers/net/ethernet/micrel/ksz884x.c | 4 ++-- drivers/net/ethernet/microchip/encx24j600.c | 2 +- drivers/net/ethernet/moxa/moxart_ether.c | 2 +- drivers/net/ethernet/natsemi/natsemi.c | 2 +- drivers/net/ethernet/natsemi/sonic.c | 2 +- drivers/net/ethernet/nuvoton/w90p910_ether.c | 4 ++-- drivers/net/ethernet/packetengines/hamachi.c | 2 +- drivers/net/ethernet/packetengines/yellowfin.c | 2 +- drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c | 2 +- drivers/net/ethernet/qualcomm/qca_spi.c | 2 +- drivers/net/ethernet/realtek/atp.c | 2 +- drivers/net/ethernet/seeq/sgiseeq.c | 4 ++-- drivers/net/ethernet/sgi/meth.c | 4 ++-- drivers/net/ethernet/sis/sis900.c | 2 +- drivers/net/ethernet/smsc/epic100.c | 2 +- drivers/net/ethernet/smsc/smc911x.c | 6 +++--- drivers/net/ethernet/smsc/smc9194.c | 4 ++-- drivers/net/ethernet/smsc/smc91c92_cs.c | 4 ++-- drivers/net/ethernet/smsc/smc91x.c | 4 ++-- drivers/net/ethernet/sun/niu.c | 2 +- drivers/net/ethernet/sun/sungem.c | 2 +- drivers/net/ethernet/synopsys/dwc_eth_qos.c | 4 ++-- drivers/net/ethernet/tehuti/tehuti.c | 2 +- drivers/net/ethernet/ti/cpsw.c | 2 +- drivers/net/ethernet/ti/netcp_core.c | 4 ++-- drivers/net/ethernet/ti/tlan.c | 2 +- drivers/net/ethernet/tile/tilepro.c | 2 +- drivers/net/ethernet/toshiba/spider_net.c | 2 +- drivers/net/ethernet/via/via-rhine.c | 2 +- drivers/net/ethernet/wiznet/w5100.c | 2 +- drivers/net/ethernet/wiznet/w5300.c | 2 +- drivers/net/ethernet/xilinx/ll_temac_main.c | 2 +- drivers/net/ethernet/xilinx/xilinx_axienet_main.c | 2 +- drivers/net/ethernet/xilinx/xilinx_emaclite.c | 4 ++-- drivers/net/ethernet/xircom/xirc2ps_cs.c | 2 +- drivers/net/fjes/fjes_main.c | 2 +- drivers/net/hamradio/mkiss.c | 2 +- drivers/net/hamradio/scc.c | 2 +- drivers/net/hamradio/yam.c | 2 +- drivers/net/irda/ali-ircc.c | 8 ++++---- drivers/net/irda/bfin_sir.c | 2 +- drivers/net/irda/irda-usb.c | 4 ++-- drivers/net/irda/nsc-ircc.c | 8 ++++---- drivers/net/irda/smsc-ircc2.c | 2 +- drivers/net/irda/stir4200.c | 2 +- drivers/net/irda/via-ircc.c | 8 ++++---- drivers/net/slip/slip.c | 2 +- drivers/net/usb/catc.c | 4 ++-- drivers/net/usb/kaweth.c | 2 +- drivers/net/usb/lan78xx.c | 4 ++-- drivers/net/usb/pegasus.c | 2 +- drivers/net/usb/rtl8150.c | 4 ++-- drivers/net/usb/usbnet.c | 4 ++-- drivers/net/wan/cosa.c | 2 +- drivers/net/wan/farsync.c | 6 +++--- drivers/net/wan/lmc/lmc_main.c | 2 +- drivers/net/wan/sbni.c | 8 ++++---- drivers/net/wimax/i2400m/netdev.c | 2 +- drivers/net/wireless/cisco/airo.c | 6 +++--- drivers/net/wireless/intel/ipw2x00/ipw2100.c | 2 +- drivers/net/wireless/intel/ipw2x00/ipw2200.c | 6 +++--- drivers/net/wireless/intersil/hostap/hostap_hw.c | 2 +- drivers/net/wireless/intersil/orinoco/main.c | 2 +- drivers/net/wireless/intersil/orinoco/orinoco_usb.c | 2 +- drivers/net/wireless/marvell/mwifiex/init.c | 2 +- drivers/net/wireless/wl3501_cs.c | 2 +- drivers/net/wireless/zydas/zd1201.c | 2 +- drivers/s390/net/ctcm_main.c | 6 +++--- drivers/s390/net/ctcm_mpc.c | 2 +- drivers/s390/net/netiucv.c | 2 +- drivers/staging/rtl8192e/rtl8192e/rtl_core.c | 2 +- drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c | 6 +++--- drivers/staging/rtl8192u/r8192U_core.c | 4 ++-- drivers/staging/wlan-ng/p80211netdev.c | 2 +- drivers/tty/n_gsm.c | 2 +- drivers/tty/synclink.c | 4 ++-- drivers/tty/synclink_gt.c | 4 ++-- drivers/tty/synclinkmp.c | 4 ++-- drivers/usb/gadget/function/u_ether.c | 2 +- net/atm/lec.c | 4 ++-- net/batman-adv/soft-interface.c | 2 +- net/bluetooth/bnep/netdev.c | 2 +- net/irda/irlan/irlan_eth.c | 2 +- net/sched/sch_generic.c | 2 +- 156 files changed, 232 insertions(+), 232 deletions(-) diff --git a/arch/um/drivers/net_kern.c b/arch/um/drivers/net_kern.c index 9ef669d24bb2..2cd5b6874c7b 100644 --- a/arch/um/drivers/net_kern.c +++ b/arch/um/drivers/net_kern.c @@ -223,7 +223,7 @@ static int uml_net_start_xmit(struct sk_buff *skb, struct net_device *dev) if (len == skb->len) { dev->stats.tx_packets++; dev->stats.tx_bytes += skb->len; - dev->trans_start = jiffies; + netif_trans_update(dev); netif_start_queue(dev); /* this is normally done in the interrupt when tx finishes */ @@ -252,7 +252,7 @@ static void uml_net_set_multicast_list(struct net_device *dev) static void uml_net_tx_timeout(struct net_device *dev) { - dev->trans_start = jiffies; + netif_trans_update(dev); netif_wake_queue(dev); } diff --git a/arch/xtensa/platforms/iss/network.c b/arch/xtensa/platforms/iss/network.c index 976a38594537..66a5d15a9e0e 100644 --- a/arch/xtensa/platforms/iss/network.c +++ b/arch/xtensa/platforms/iss/network.c @@ -428,7 +428,7 @@ static int iss_net_start_xmit(struct sk_buff *skb, struct net_device *dev) if (len == skb->len) { lp->stats.tx_packets++; lp->stats.tx_bytes += skb->len; - dev->trans_start = jiffies; + netif_trans_update(dev); netif_start_queue(dev); /* this is normally done in the interrupt when tx finishes */ diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c index 22c27652e46a..e524e8302da6 100644 --- a/drivers/char/pcmcia/synclink_cs.c +++ b/drivers/char/pcmcia/synclink_cs.c @@ -3969,7 +3969,7 @@ static netdev_tx_t hdlcdev_xmit(struct sk_buff *skb, dev_kfree_skb(skb); /* save start time for transmit timeout detection */ - dev->trans_start = jiffies; + netif_trans_update(dev); /* start hardware transmitter if necessary */ spin_lock_irqsave(&info->lock, flags); @@ -4032,7 +4032,7 @@ static int hdlcdev_open(struct net_device *dev) tty_kref_put(tty); /* enable network layer transmit */ - dev->trans_start = jiffies; + netif_trans_update(dev); netif_start_queue(dev); /* inform generic HDLC layer of current DCD status */ diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c index f4ea80d602f7..309311b1faae 100644 --- a/drivers/firewire/net.c +++ b/drivers/firewire/net.c @@ -1023,7 +1023,7 @@ static int fwnet_send_packet(struct fwnet_packet_task *ptask) spin_unlock_irqrestore(&dev->lock, flags); - dev->netdev->trans_start = jiffies; + netif_trans_update(dev->netdev); out: if (free) fwnet_free_ptask(ptask); diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c index 77630cad7f81..2b27d1351cf7 100644 --- a/drivers/infiniband/hw/nes/nes_nic.c +++ b/drivers/infiniband/hw/nes/nes_nic.c @@ -682,7 +682,7 @@ tso_sq_no_longer_full: nes_write32(nesdev->regs+NES_WQE_ALLOC, (wqe_count << 24) | (1 << 23) | nesvnic->nic.qp_id); - netdev->trans_start = jiffies; + netif_trans_update(netdev); return NETDEV_TX_OK; } diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index c8ed53562c9b..b2f42835d76d 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c @@ -766,7 +766,7 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_ ipoib_dma_unmap_tx(priv, tx_req); dev_kfree_skb_any(skb); } else { - dev->trans_start = jiffies; + netif_trans_update(dev); ++tx->tx_head; if (++priv->tx_outstanding == ipoib_sendq_size) { diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index f0e55e47eb54..3643d559ba31 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c @@ -637,7 +637,7 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb, if (netif_queue_stopped(dev)) netif_wake_queue(dev); } else { - dev->trans_start = jiffies; + netif_trans_update(dev); address->last_send = priv->tx_head; ++priv->tx_head; diff --git a/drivers/isdn/hysdn/hysdn_net.c b/drivers/isdn/hysdn/hysdn_net.c index a0efb4cefa1c..5609deee7cd3 100644 --- a/drivers/isdn/hysdn/hysdn_net.c +++ b/drivers/isdn/hysdn/hysdn_net.c @@ -127,7 +127,7 @@ net_send_packet(struct sk_buff *skb, struct net_device *dev) if (lp->in_idx >= MAX_SKB_BUFFERS) lp->in_idx = 0; /* wrap around */ lp->sk_count++; /* adjust counter */ - dev->trans_start = jiffies; + netif_trans_update(dev); /* If we just used up the very last entry in the * TX ring on this device, tell the queueing diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c index aa5dd5668528..c151c6daa67e 100644 --- a/drivers/isdn/i4l/isdn_net.c +++ b/drivers/isdn/i4l/isdn_net.c @@ -1153,7 +1153,7 @@ static void isdn_net_tx_timeout(struct net_device *ndev) * ever called --KG */ } - ndev->trans_start = jiffies; + netif_trans_update(ndev); netif_wake_queue(ndev); } @@ -1291,7 +1291,7 @@ isdn_net_start_xmit(struct sk_buff *skb, struct net_device *ndev) } } else { /* Device is connected to an ISDN channel */ - ndev->trans_start = jiffies; + netif_trans_update(ndev); if (!lp->dialstate) { /* ISDN connection is established, try sending */ int ret; diff --git a/drivers/isdn/i4l/isdn_x25iface.c b/drivers/isdn/i4l/isdn_x25iface.c index e2d4e58230f5..0c5d8de41b23 100644 --- a/drivers/isdn/i4l/isdn_x25iface.c +++ b/drivers/isdn/i4l/isdn_x25iface.c @@ -278,7 +278,7 @@ static int isdn_x25iface_xmit(struct concap_proto *cprot, struct sk_buff *skb) case X25_IFACE_DATA: if (*state == WAN_CONNECTED) { skb_pull(skb, 1); - cprot->net_dev->trans_start = jiffies; + netif_trans_update(cprot->net_dev); ret = (cprot->dops->data_req(cprot, skb)); /* prepare for future retransmissions */ if (ret) skb_push(skb, 1); diff --git a/drivers/message/fusion/mptlan.c b/drivers/message/fusion/mptlan.c index cbe96072a6cc..6955c9e22d57 100644 --- a/drivers/message/fusion/mptlan.c +++ b/drivers/message/fusion/mptlan.c @@ -791,7 +791,7 @@ mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev) pSimple->Address.High = 0; mpt_put_msg_frame (LanCtx, mpt_dev, mf); - dev->trans_start = jiffies; + netif_trans_update(dev); dioprintk((KERN_INFO MYNAM ": %s/%s: Sending packet. FlagsLength = %08x.\n", IOC_AND_NETDEV_NAMES_s_s(dev), diff --git a/drivers/net/appletalk/cops.c b/drivers/net/appletalk/cops.c index 7f2a032c354c..1b2e9217ec78 100644 --- a/drivers/net/appletalk/cops.c +++ b/drivers/net/appletalk/cops.c @@ -861,7 +861,7 @@ static void cops_timeout(struct net_device *dev) } printk(KERN_WARNING "%s: Transmit timed out.\n", dev->name); cops_jumpstart(dev); /* Restart the card. */ - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ netif_wake_queue(dev); } diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c index e36b7400d5cc..acb708fc1463 100644 --- a/drivers/net/can/mscan/mscan.c +++ b/drivers/net/can/mscan/mscan.c @@ -276,7 +276,7 @@ static netdev_tx_t mscan_start_xmit(struct sk_buff *skb, struct net_device *dev) out_8(®s->cantflg, 1 << buf_id); if (!test_bit(F_TX_PROGRESS, &priv->flags)) - dev->trans_start = jiffies; + netif_trans_update(dev); list_add_tail(&priv->tx_queue[buf_id].list, &priv->tx_head); @@ -469,7 +469,7 @@ static irqreturn_t mscan_isr(int irq, void *dev_id) clear_bit(F_TX_PROGRESS, &priv->flags); priv->cur_pri = 0; } else { - dev->trans_start = jiffies; + netif_trans_update(dev); } if (!test_bit(F_TX_WAIT_ALL, &priv->flags)) diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c index 3400fd1cada7..71f0e791355b 100644 --- a/drivers/net/can/usb/ems_usb.c +++ b/drivers/net/can/usb/ems_usb.c @@ -521,7 +521,7 @@ static void ems_usb_write_bulk_callback(struct urb *urb) if (urb->status) netdev_info(netdev, "Tx URB aborted (%d)\n", urb->status); - netdev->trans_start = jiffies; + netif_trans_update(netdev); /* transmission complete interrupt */ netdev->stats.tx_packets++; @@ -835,7 +835,7 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne stats->tx_dropped++; } } else { - netdev->trans_start = jiffies; + netif_trans_update(netdev); /* Slow down tx path */ if (atomic_read(&dev->active_tx_urbs) >= MAX_TX_URBS || diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c index 113e64fcd73b..784a9002fbb9 100644 --- a/drivers/net/can/usb/esd_usb2.c +++ b/drivers/net/can/usb/esd_usb2.c @@ -480,7 +480,7 @@ static void esd_usb2_write_bulk_callback(struct urb *urb) if (urb->status) netdev_info(netdev, "Tx URB aborted (%d)\n", urb->status); - netdev->trans_start = jiffies; + netif_trans_update(netdev); } static ssize_t show_firmware(struct device *d, @@ -820,7 +820,7 @@ static netdev_tx_t esd_usb2_start_xmit(struct sk_buff *skb, goto releasebuf; } - netdev->trans_start = jiffies; + netif_trans_update(netdev); /* * Release our reference to this URB, the USB core will eventually free diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c index 5a2e341a6d1e..bfb91d8fa460 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c @@ -274,7 +274,7 @@ static void peak_usb_write_bulk_callback(struct urb *urb) netdev->stats.tx_bytes += context->data_len; /* prevent tx timeout */ - netdev->trans_start = jiffies; + netif_trans_update(netdev); break; default: @@ -373,7 +373,7 @@ static netdev_tx_t peak_usb_ndo_start_xmit(struct sk_buff *skb, stats->tx_dropped++; } } else { - netdev->trans_start = jiffies; + netif_trans_update(netdev); /* slow down tx path */ if (atomic_read(&dev->active_tx_urbs) >= PCAN_USB_MAX_TX_URBS) diff --git a/drivers/net/cris/eth_v10.c b/drivers/net/cris/eth_v10.c index 64c016a99af8..221f5f011ff9 100644 --- a/drivers/net/cris/eth_v10.c +++ b/drivers/net/cris/eth_v10.c @@ -1106,7 +1106,7 @@ e100_send_packet(struct sk_buff *skb, struct net_device *dev) myNextTxDesc->skb = skb; - dev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */ + netif_trans_update(dev); /* NETIF_F_LLTX driver :( */ e100_hardware_send_packet(np, buf, skb->len); diff --git a/drivers/net/ethernet/3com/3c509.c b/drivers/net/ethernet/3com/3c509.c index 7677c745fb30..91ada52f776b 100644 --- a/drivers/net/ethernet/3com/3c509.c +++ b/drivers/net/ethernet/3com/3c509.c @@ -699,7 +699,7 @@ el3_tx_timeout (struct net_device *dev) dev->name, inb(ioaddr + TX_STATUS), inw(ioaddr + EL3_STATUS), inw(ioaddr + TX_FREE)); dev->stats.tx_errors++; - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ /* Issue TX_RESET and TX_START commands. */ outw(TxReset, ioaddr + EL3_CMD); outw(TxEnable, ioaddr + EL3_CMD); diff --git a/drivers/net/ethernet/3com/3c515.c b/drivers/net/ethernet/3com/3c515.c index 942fb0d5aace..b26e038b4a0e 100644 --- a/drivers/net/ethernet/3com/3c515.c +++ b/drivers/net/ethernet/3com/3c515.c @@ -992,7 +992,7 @@ static void corkscrew_timeout(struct net_device *dev) if (!(inw(ioaddr + EL3_STATUS) & CmdInProgress)) break; outw(TxEnable, ioaddr + EL3_CMD); - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ dev->stats.tx_errors++; dev->stats.tx_dropped++; netif_wake_queue(dev); diff --git a/drivers/net/ethernet/3com/3c574_cs.c b/drivers/net/ethernet/3com/3c574_cs.c index b9948f00c5e9..b88afd759307 100644 --- a/drivers/net/ethernet/3com/3c574_cs.c +++ b/drivers/net/ethernet/3com/3c574_cs.c @@ -700,7 +700,7 @@ static void el3_tx_timeout(struct net_device *dev) netdev_notice(dev, "Transmit timed out!\n"); dump_status(dev); dev->stats.tx_errors++; - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ /* Issue TX_RESET and TX_START commands. */ tc574_wait_for_completion(dev, TxReset); outw(TxEnable, ioaddr + EL3_CMD); diff --git a/drivers/net/ethernet/3com/3c589_cs.c b/drivers/net/ethernet/3com/3c589_cs.c index c5a320507556..71396e4b87e3 100644 --- a/drivers/net/ethernet/3com/3c589_cs.c +++ b/drivers/net/ethernet/3com/3c589_cs.c @@ -534,7 +534,7 @@ static void el3_tx_timeout(struct net_device *dev) netdev_warn(dev, "Transmit timed out!\n"); dump_status(dev); dev->stats.tx_errors++; - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ /* Issue TX_RESET and TX_START commands. */ tc589_wait_for_completion(dev, TxReset); outw(TxEnable, ioaddr + EL3_CMD); diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c index d81fceddbe0e..25c55ab05c7d 100644 --- a/drivers/net/ethernet/3com/3c59x.c +++ b/drivers/net/ethernet/3com/3c59x.c @@ -1944,7 +1944,7 @@ static void vortex_tx_timeout(struct net_device *dev) } /* Issue Tx Enable */ iowrite16(TxEnable, ioaddr + EL3_CMD); - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ } /* diff --git a/drivers/net/ethernet/8390/axnet_cs.c b/drivers/net/ethernet/8390/axnet_cs.c index ec6eac1f8c95..4ea717d68c95 100644 --- a/drivers/net/ethernet/8390/axnet_cs.c +++ b/drivers/net/ethernet/8390/axnet_cs.c @@ -1041,7 +1041,7 @@ static netdev_tx_t axnet_start_xmit(struct sk_buff *skb, { ei_local->txing = 1; NS8390_trigger_send(dev, send_length, output_page); - dev->trans_start = jiffies; + netif_trans_update(dev); if (output_page == ei_local->tx_start_page) { ei_local->tx1 = -1; @@ -1270,7 +1270,7 @@ static void ei_tx_intr(struct net_device *dev) { ei_local->txing = 1; NS8390_trigger_send(dev, ei_local->tx2, ei_local->tx_start_page + 6); - dev->trans_start = jiffies; + netif_trans_update(dev); ei_local->tx2 = -1, ei_local->lasttx = 2; } @@ -1287,7 +1287,7 @@ static void ei_tx_intr(struct net_device *dev) { ei_local->txing = 1; NS8390_trigger_send(dev, ei_local->tx1, ei_local->tx_start_page); - dev->trans_start = jiffies; + netif_trans_update(dev); ei_local->tx1 = -1; ei_local->lasttx = 1; } diff --git a/drivers/net/ethernet/8390/lib8390.c b/drivers/net/ethernet/8390/lib8390.c index b96e8852b2d1..60f8e2c8e726 100644 --- a/drivers/net/ethernet/8390/lib8390.c +++ b/drivers/net/ethernet/8390/lib8390.c @@ -596,7 +596,7 @@ static void ei_tx_intr(struct net_device *dev) if (ei_local->tx2 > 0) { ei_local->txing = 1; NS8390_trigger_send(dev, ei_local->tx2, ei_local->tx_start_page + 6); - dev->trans_start = jiffies; + netif_trans_update(dev); ei_local->tx2 = -1, ei_local->lasttx = 2; } else @@ -609,7 +609,7 @@ static void ei_tx_intr(struct net_device *dev) if (ei_local->tx1 > 0) { ei_local->txing = 1; NS8390_trigger_send(dev, ei_local->tx1, ei_local->tx_start_page); - dev->trans_start = jiffies; + netif_trans_update(dev); ei_local->tx1 = -1; ei_local->lasttx = 1; } else diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c index ac7288240d55..1d1069641d81 100644 --- a/drivers/net/ethernet/adaptec/starfire.c +++ b/drivers/net/ethernet/adaptec/starfire.c @@ -1129,7 +1129,7 @@ static void tx_timeout(struct net_device *dev) /* Trigger an immediate transmit demand. */ - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ dev->stats.tx_errors++; netif_wake_queue(dev); } diff --git a/drivers/net/ethernet/adi/bfin_mac.c b/drivers/net/ethernet/adi/bfin_mac.c index 74139cb7f849..3d2245fdc283 100644 --- a/drivers/net/ethernet/adi/bfin_mac.c +++ b/drivers/net/ethernet/adi/bfin_mac.c @@ -1430,7 +1430,7 @@ static void bfin_mac_timeout(struct net_device *dev) bfin_mac_enable(lp->phydev); /* We can accept TX packets again */ - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ } static void bfin_mac_multicast_hash(struct net_device *dev) diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c index 0907ab6ff309..30defe6c81f2 100644 --- a/drivers/net/ethernet/agere/et131x.c +++ b/drivers/net/ethernet/agere/et131x.c @@ -3349,7 +3349,7 @@ static void et131x_down(struct net_device *netdev) struct et131x_adapter *adapter = netdev_priv(netdev); /* Save the timestamp for the TX watchdog, prevent a timeout */ - netdev->trans_start = jiffies; + netif_trans_update(netdev); phy_stop(adapter->phydev); et131x_disable_txrx(netdev); @@ -3816,7 +3816,7 @@ static netdev_tx_t et131x_tx(struct sk_buff *skb, struct net_device *netdev) netif_stop_queue(netdev); /* Save the timestamp for the TX timeout watchdog */ - netdev->trans_start = jiffies; + netif_trans_update(netdev); /* TCB is not available */ if (tx_ring->used >= NUM_TCB) diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c index 8d50314ac3eb..de2c4bf5fac4 100644 --- a/drivers/net/ethernet/allwinner/sun4i-emac.c +++ b/drivers/net/ethernet/allwinner/sun4i-emac.c @@ -428,7 +428,7 @@ static void emac_timeout(struct net_device *dev) emac_reset(db); emac_init_device(dev); /* We can accept TX packets again */ - dev->trans_start = jiffies; + netif_trans_update(dev); netif_wake_queue(dev); /* Restore previous register address */ @@ -468,7 +468,7 @@ static int emac_start_xmit(struct sk_buff *skb, struct net_device *dev) db->membase + EMAC_TX_CTL0_REG); /* save the time stamp */ - dev->trans_start = jiffies; + netif_trans_update(dev); } else if (channel == 1) { /* set TX len */ writel(skb->len, db->membase + EMAC_TX_PL1_REG); @@ -477,7 +477,7 @@ static int emac_start_xmit(struct sk_buff *skb, struct net_device *dev) db->membase + EMAC_TX_CTL1_REG); /* save the time stamp */ - dev->trans_start = jiffies; + netif_trans_update(dev); } if ((db->tx_fifo_stat & 3) == 3) { diff --git a/drivers/net/ethernet/amd/7990.c b/drivers/net/ethernet/amd/7990.c index 8e7575571531..dcf2a1f3643d 100644 --- a/drivers/net/ethernet/amd/7990.c +++ b/drivers/net/ethernet/amd/7990.c @@ -260,7 +260,7 @@ static int lance_reset(struct net_device *dev) load_csrs(lp); lance_init_ring(dev); - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ status = init_restart_lance(lp); #ifdef DEBUG_DRIVER printk("Lance restart=%d\n", status); @@ -530,7 +530,7 @@ void lance_tx_timeout(struct net_device *dev) { printk("lance_tx_timeout\n"); lance_reset(dev); - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ netif_wake_queue(dev); } EXPORT_SYMBOL_GPL(lance_tx_timeout); diff --git a/drivers/net/ethernet/amd/a2065.c b/drivers/net/ethernet/amd/a2065.c index 2a18d34d2610..a83cd1c4ce1d 100644 --- a/drivers/net/ethernet/amd/a2065.c +++ b/drivers/net/ethernet/amd/a2065.c @@ -512,7 +512,7 @@ static inline int lance_reset(struct net_device *dev) load_csrs(lp); lance_init_ring(dev); - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ netif_start_queue(dev); status = init_restart_lance(lp); diff --git a/drivers/net/ethernet/amd/atarilance.c b/drivers/net/ethernet/amd/atarilance.c index b10964e8cb54..d2bc8e5dcd23 100644 --- a/drivers/net/ethernet/amd/atarilance.c +++ b/drivers/net/ethernet/amd/atarilance.c @@ -764,7 +764,7 @@ static void lance_tx_timeout (struct net_device *dev) /* lance_restart, essentially */ lance_init_ring(dev); REGA( CSR0 ) = CSR0_INEA | CSR0_INIT | CSR0_STRT; - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ netif_wake_queue(dev); } diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c index d3977d032b48..9af309e017fd 100644 --- a/drivers/net/ethernet/amd/au1000_eth.c +++ b/drivers/net/ethernet/amd/au1000_eth.c @@ -1074,7 +1074,7 @@ static void au1000_tx_timeout(struct net_device *dev) netdev_err(dev, "au1000_tx_timeout: dev=%p\n", dev); au1000_reset_mac(dev); au1000_init(dev); - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ netif_wake_queue(dev); } diff --git a/drivers/net/ethernet/amd/declance.c b/drivers/net/ethernet/amd/declance.c index b584b78237df..b799c7ac899b 100644 --- a/drivers/net/ethernet/amd/declance.c +++ b/drivers/net/ethernet/amd/declance.c @@ -877,7 +877,7 @@ static inline int lance_reset(struct net_device *dev) lance_init_ring(dev); load_csrs(lp); - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ status = init_restart_lance(lp); return status; } diff --git a/drivers/net/ethernet/amd/lance.c b/drivers/net/ethernet/amd/lance.c index 3a7ebfdda57d..abb1ba228b26 100644 --- a/drivers/net/ethernet/amd/lance.c +++ b/drivers/net/ethernet/amd/lance.c @@ -943,7 +943,7 @@ static void lance_tx_timeout (struct net_device *dev) #endif lance_restart (dev, 0x0043, 1); - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ netif_wake_queue (dev); } diff --git a/drivers/net/ethernet/amd/ni65.c b/drivers/net/ethernet/amd/ni65.c index 1cf33addd15e..cda53db75f17 100644 --- a/drivers/net/ethernet/amd/ni65.c +++ b/drivers/net/ethernet/amd/ni65.c @@ -782,7 +782,7 @@ static void ni65_stop_start(struct net_device *dev,struct priv *p) if(!p->lock) if (p->tmdnum || !p->xmit_queued) netif_wake_queue(dev); - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ } else writedatareg(CSR0_STRT | csr0); @@ -1148,7 +1148,7 @@ static void ni65_timeout(struct net_device *dev) printk("%02x ",p->tmdhead[i].u.s.status); printk("\n"); ni65_lance_reinit(dev); - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ netif_wake_queue(dev); } diff --git a/drivers/net/ethernet/amd/nmclan_cs.c b/drivers/net/ethernet/amd/nmclan_cs.c index 27245efe9f50..2807e181647b 100644 --- a/drivers/net/ethernet/amd/nmclan_cs.c +++ b/drivers/net/ethernet/amd/nmclan_cs.c @@ -851,7 +851,7 @@ static void mace_tx_timeout(struct net_device *dev) #else /* #if RESET_ON_TIMEOUT */ pr_cont("NOT resetting card\n"); #endif /* #if RESET_ON_TIMEOUT */ - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ netif_wake_queue(dev); } diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c index 7ccebae9cb48..c22bf52d3320 100644 --- a/drivers/net/ethernet/amd/pcnet32.c +++ b/drivers/net/ethernet/amd/pcnet32.c @@ -448,7 +448,7 @@ static void pcnet32_netif_stop(struct net_device *dev) { struct pcnet32_private *lp = netdev_priv(dev); - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ napi_disable(&lp->napi); netif_tx_disable(dev); } @@ -2426,7 +2426,7 @@ static void pcnet32_tx_timeout(struct net_device *dev) } pcnet32_restart(dev, CSR0_NORMAL); - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ netif_wake_queue(dev); spin_unlock_irqrestore(&lp->lock, flags); diff --git a/drivers/net/ethernet/amd/sunlance.c b/drivers/net/ethernet/amd/sunlance.c index 7847638bdd22..9b56b40259dc 100644 --- a/drivers/net/ethernet/amd/sunlance.c +++ b/drivers/net/ethernet/amd/sunlance.c @@ -997,7 +997,7 @@ static int lance_reset(struct net_device *dev) } lp->init_ring(dev); load_csrs(lp); - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ status = init_restart_lance(lp); return status; } diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c index 55b118e876fd..9fe8b5e310d1 100644 --- a/drivers/net/ethernet/atheros/alx/main.c +++ b/drivers/net/ethernet/atheros/alx/main.c @@ -745,7 +745,7 @@ static netdev_features_t alx_fix_features(struct net_device *netdev, static void alx_netif_stop(struct alx_priv *alx) { - alx->dev->trans_start = jiffies; + netif_trans_update(alx->dev); if (netif_carrier_ok(alx->dev)) { netif_carrier_off(alx->dev); netif_tx_disable(alx->dev); diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 30b0c2895a56..543bf38105c9 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -1117,7 +1117,7 @@ static void bcm_sysport_tx_timeout(struct net_device *dev) { netdev_warn(dev, "transmit timeout!\n"); - dev->trans_start = jiffies; + netif_trans_update(dev); dev->stats.tx_errors++; netif_tx_wake_all_queues(dev); diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index fbff226369ac..541456398dfb 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -3059,7 +3059,7 @@ static void bcmgenet_timeout(struct net_device *dev) bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR); bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR); - dev->trans_start = jiffies; + netif_trans_update(dev); dev->stats.tx_errors++; diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c index eacc559679bf..f1b81187a201 100644 --- a/drivers/net/ethernet/broadcom/sb1250-mac.c +++ b/drivers/net/ethernet/broadcom/sb1250-mac.c @@ -2462,7 +2462,7 @@ static void sbmac_tx_timeout (struct net_device *dev) spin_lock_irqsave(&sc->sbm_lock, flags); - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ dev->stats.tx_errors++; spin_unlock_irqrestore(&sc->sbm_lock, flags); diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 3010080cfeee..ff300f7cf529 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -7383,7 +7383,7 @@ static void tg3_napi_fini(struct tg3 *tp) static inline void tg3_netif_stop(struct tg3 *tp) { - tp->dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(tp->dev); /* prevent tx timeout */ tg3_napi_disable(tp); netif_carrier_off(tp->dev); netif_tx_disable(tp->dev); diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index 34d269cd5579..8de79ae63231 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -2899,7 +2899,7 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) if (status == IQ_SEND_STOP) stop_q(lio->netdev, q_idx); - netdev->trans_start = jiffies; + netif_trans_update(netdev); stats->tx_done++; stats->tx_tot_bytes += skb->len; @@ -2928,7 +2928,7 @@ static void liquidio_tx_timeout(struct net_device *netdev) netif_info(lio, tx_err, lio->netdev, "Transmit timeout tx_dropped:%ld, waking up queues now!!\n", netdev->stats.tx_dropped); - netdev->trans_start = jiffies; + netif_trans_update(netdev); txqs_wake(netdev); } diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c index c177c7cec13b..388cd799d9ed 100644 --- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c +++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c @@ -1320,7 +1320,7 @@ static int octeon_mgmt_xmit(struct sk_buff *skb, struct net_device *netdev) /* Ring the bell. */ cvmx_write_csr(p->mix + MIX_ORING2, 1); - netdev->trans_start = jiffies; + netif_trans_update(netdev); rv = NETDEV_TX_OK; out: octeon_mgmt_update_tx_stats(netdev); diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index bfee298fc02a..a19e73f11d73 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c @@ -1442,7 +1442,7 @@ static void nicvf_reset_task(struct work_struct *work) nicvf_stop(nic->netdev); nicvf_open(nic->netdev); - nic->netdev->trans_start = jiffies; + netif_trans_update(nic->netdev); } static int nicvf_config_loopback(struct nicvf *nic, diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c index 1ccd282949a5..1bb57d3fbbe8 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c @@ -1448,7 +1448,7 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev) * the new TX descriptors and return success. */ txq_advance(&txq->q, ndesc); - dev->trans_start = jiffies; + netif_trans_update(dev); ring_tx_db(adapter, &txq->q, ndesc); return NETDEV_TX_OK; diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c index 48d91941408d..9e061307975f 100644 --- a/drivers/net/ethernet/davicom/dm9000.c +++ b/drivers/net/ethernet/davicom/dm9000.c @@ -966,7 +966,7 @@ dm9000_init_dm9000(struct net_device *dev) /* Init Driver variable */ db->tx_pkt_cnt = 0; db->queue_pkt_len = 0; - dev->trans_start = jiffies; + netif_trans_update(dev); } /* Our watchdog timed out. Called by the networking layer */ @@ -985,7 +985,7 @@ static void dm9000_timeout(struct net_device *dev) dm9000_init_dm9000(dev); dm9000_unmask_interrupts(db); /* We can accept TX packets again */ - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ netif_wake_queue(dev); /* Restore previous register address */ diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c index d88fbab378aa..cbe84972ff7a 100644 --- a/drivers/net/ethernet/dec/tulip/de4x5.c +++ b/drivers/net/ethernet/dec/tulip/de4x5.c @@ -1336,7 +1336,7 @@ de4x5_open(struct net_device *dev) } lp->interrupt = UNMASK_INTERRUPTS; - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ START_DE4X5; @@ -1935,7 +1935,7 @@ set_multicast_list(struct net_device *dev) lp->tx_new = (lp->tx_new + 1) % lp->txRingSize; outl(POLL_DEMAND, DE4X5_TPD); /* Start the TX */ - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ } } } diff --git a/drivers/net/ethernet/dec/tulip/dmfe.c b/drivers/net/ethernet/dec/tulip/dmfe.c index 42c759ef8ff0..8ed0fd8b1dda 100644 --- a/drivers/net/ethernet/dec/tulip/dmfe.c +++ b/drivers/net/ethernet/dec/tulip/dmfe.c @@ -725,7 +725,7 @@ static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb, txptr->tdes0 = cpu_to_le32(0x80000000); /* Set owner bit */ db->tx_packet_cnt++; /* Ready to send */ dw32(DCR1, 0x1); /* Issue Tx polling */ - dev->trans_start = jiffies; /* saved time stamp */ + netif_trans_update(dev); /* saved time stamp */ } else { db->tx_queue_cnt++; /* queue TX packet */ dw32(DCR1, 0x1); /* Issue Tx polling */ @@ -931,7 +931,7 @@ static void dmfe_free_tx_pkt(struct net_device *dev, struct dmfe_board_info *db) db->tx_packet_cnt++; /* Ready to send */ db->tx_queue_cnt--; dw32(DCR1, 0x1); /* Issue Tx polling */ - dev->trans_start = jiffies; /* saved time stamp */ + netif_trans_update(dev); /* saved time stamp */ } /* Resource available check */ @@ -1542,7 +1542,7 @@ static void send_filter_frame(struct net_device *dev) update_cr6(db->cr6_data | 0x2000, ioaddr); dw32(DCR1, 0x1); /* Issue Tx polling */ update_cr6(db->cr6_data, ioaddr); - dev->trans_start = jiffies; + netif_trans_update(dev); } else db->tx_queue_cnt++; /* Put in TX queue */ } diff --git a/drivers/net/ethernet/dec/tulip/pnic.c b/drivers/net/ethernet/dec/tulip/pnic.c index 5364563c4378..7bcccf5cac7a 100644 --- a/drivers/net/ethernet/dec/tulip/pnic.c +++ b/drivers/net/ethernet/dec/tulip/pnic.c @@ -44,7 +44,7 @@ void pnic_do_nway(struct net_device *dev) tp->csr6 = new_csr6; /* Restart Tx */ tulip_restart_rxtx(tp); - dev->trans_start = jiffies; + netif_trans_update(dev); } } } @@ -70,7 +70,7 @@ void pnic_lnk_change(struct net_device *dev, int csr5) iowrite32(tp->csr6, ioaddr + CSR6); iowrite32(0x30, ioaddr + CSR12); iowrite32(0x0201F078, ioaddr + 0xB8); /* Turn on autonegotiation. */ - dev->trans_start = jiffies; + netif_trans_update(dev); } } else if (ioread32(ioaddr + CSR5) & TPLnkPass) { if (tulip_media_cap[dev->if_port] & MediaIsMII) { @@ -147,7 +147,7 @@ void pnic_timer(unsigned long data) tp->csr6 = new_csr6; /* Restart Tx */ tulip_restart_rxtx(tp); - dev->trans_start = jiffies; + netif_trans_update(dev); if (tulip_debug > 1) dev_info(&dev->dev, "Changing PNIC configuration to %s %s-duplex, CSR6 %08x\n", diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c index 94d0eebef129..bbde90bc74fe 100644 --- a/drivers/net/ethernet/dec/tulip/tulip_core.c +++ b/drivers/net/ethernet/dec/tulip/tulip_core.c @@ -605,7 +605,7 @@ static void tulip_tx_timeout(struct net_device *dev) out_unlock: spin_unlock_irqrestore (&tp->lock, flags); - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ netif_wake_queue (dev); } diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c index 447d09272ab7..e750b5ddc0fb 100644 --- a/drivers/net/ethernet/dec/tulip/uli526x.c +++ b/drivers/net/ethernet/dec/tulip/uli526x.c @@ -636,7 +636,7 @@ static netdev_tx_t uli526x_start_xmit(struct sk_buff *skb, txptr->tdes0 = cpu_to_le32(0x80000000); /* Set owner bit */ db->tx_packet_cnt++; /* Ready to send */ uw32(DCR1, 0x1); /* Issue Tx polling */ - dev->trans_start = jiffies; /* saved time stamp */ + netif_trans_update(dev); /* saved time stamp */ } /* Tx resource check */ @@ -1431,7 +1431,7 @@ static void send_filter_frame(struct net_device *dev, int mc_cnt) update_cr6(db->cr6_data | 0x2000, ioaddr); uw32(DCR1, 0x1); /* Issue Tx polling */ update_cr6(db->cr6_data, ioaddr); - dev->trans_start = jiffies; + netif_trans_update(dev); } else netdev_err(dev, "No Tx resource - Send_filter_frame!\n"); } diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c index 3c0e4d5c5fef..1f62b9423851 100644 --- a/drivers/net/ethernet/dec/tulip/winbond-840.c +++ b/drivers/net/ethernet/dec/tulip/winbond-840.c @@ -966,7 +966,7 @@ static void tx_timeout(struct net_device *dev) enable_irq(irq); netif_wake_queue(dev); - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ np->stats.tx_errors++; } diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c index f92b6d948398..78f144696d6b 100644 --- a/drivers/net/ethernet/dlink/dl2k.c +++ b/drivers/net/ethernet/dlink/dl2k.c @@ -706,7 +706,7 @@ rio_tx_timeout (struct net_device *dev) dev->name, dr32(TxStatus)); rio_free_tx(dev, 0); dev->if_port = 0; - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ } static netdev_tx_t diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c index a28a2e583f0f..58c6338a839e 100644 --- a/drivers/net/ethernet/dlink/sundance.c +++ b/drivers/net/ethernet/dlink/sundance.c @@ -1011,7 +1011,7 @@ static void tx_timeout(struct net_device *dev) dev->if_port = 0; - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ dev->stats.tx_errors++; if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) { netif_wake_queue(dev); diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c index b1b9ebafb354..c08bd763172a 100644 --- a/drivers/net/ethernet/fealnx.c +++ b/drivers/net/ethernet/fealnx.c @@ -1227,7 +1227,7 @@ static void fealnx_tx_timeout(struct net_device *dev) spin_unlock_irqrestore(&np->lock, flags); - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ dev->stats.tx_errors++; netif_wake_queue(dev); /* or .._start_.. ?? */ } diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index d2f917af539f..a5800413f917 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -2076,7 +2076,7 @@ void gfar_start(struct gfar_private *priv) gfar_ints_enable(priv); - priv->ndev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(priv->ndev); /* prevent tx timeout */ } static void free_grp_irqs(struct gfar_priv_grp *grp) diff --git a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c index 678f5018d0be..399cfd217288 100644 --- a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c +++ b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c @@ -746,7 +746,7 @@ static irqreturn_t fjn_interrupt(int dummy, void *dev_id) lp->sent = lp->tx_queue ; lp->tx_queue = 0; lp->tx_queue_len = 0; - dev->trans_start = jiffies; + netif_trans_update(dev); } else { lp->tx_started = 0; } diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c index e51892d518ff..b9f2ea59308a 100644 --- a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c +++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c @@ -636,7 +636,7 @@ static int hix5hd2_net_xmit(struct sk_buff *skb, struct net_device *dev) pos = dma_ring_incr(pos, TX_DESC_NUM); writel_relaxed(dma_byte(pos), priv->base + TX_BQ_WR_ADDR); - dev->trans_start = jiffies; + netif_trans_update(dev); dev->stats.tx_packets++; dev->stats.tx_bytes += skb->len; netdev_sent_queue(dev, skb->len); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c index e47aff250b15..e621636e69b9 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c @@ -1275,7 +1275,7 @@ void hns_nic_net_reinit(struct net_device *netdev) { struct hns_nic_priv *priv = netdev_priv(netdev); - priv->netdev->trans_start = jiffies; + netif_trans_update(priv->netdev); while (test_and_set_bit(NIC_STATE_REINITING, &priv->state)) usleep_range(1000, 2000); @@ -1376,7 +1376,7 @@ static netdev_tx_t hns_nic_net_xmit(struct sk_buff *skb, ret = hns_nic_net_xmit_hw(ndev, skb, &tx_ring_data(priv, skb->queue_mapping)); if (ret == NETDEV_TX_OK) { - ndev->trans_start = jiffies; + netif_trans_update(ndev); ndev->stats.tx_bytes += skb->len; ndev->stats.tx_packets++; } @@ -1648,7 +1648,7 @@ static void hns_nic_reset_subtask(struct hns_nic_priv *priv) rtnl_lock(); /* put off any impending NetWatchDogTimeout */ - priv->netdev->trans_start = jiffies; + netif_trans_update(priv->netdev); if (type == HNAE_PORT_DEBUG) { hns_nic_net_reinit(priv->netdev); diff --git a/drivers/net/ethernet/hp/hp100.c b/drivers/net/ethernet/hp/hp100.c index 3daf2d4a7ca0..631dbc7b4dbb 100644 --- a/drivers/net/ethernet/hp/hp100.c +++ b/drivers/net/ethernet/hp/hp100.c @@ -1102,7 +1102,7 @@ static int hp100_open(struct net_device *dev) return -EAGAIN; } - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ netif_start_queue(dev); lp->lan_type = hp100_sense_lan(dev); diff --git a/drivers/net/ethernet/i825xx/82596.c b/drivers/net/ethernet/i825xx/82596.c index 7ce6379fd1a3..befb4ac3e2b0 100644 --- a/drivers/net/ethernet/i825xx/82596.c +++ b/drivers/net/ethernet/i825xx/82596.c @@ -1042,7 +1042,7 @@ static void i596_tx_timeout (struct net_device *dev) lp->last_restart = dev->stats.tx_packets; } - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ netif_wake_queue (dev); } diff --git a/drivers/net/ethernet/i825xx/lib82596.c b/drivers/net/ethernet/i825xx/lib82596.c index c984998b34a0..3dbc53c21baa 100644 --- a/drivers/net/ethernet/i825xx/lib82596.c +++ b/drivers/net/ethernet/i825xx/lib82596.c @@ -960,7 +960,7 @@ static void i596_tx_timeout (struct net_device *dev) lp->last_restart = dev->stats.tx_packets; } - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ netif_wake_queue (dev); } diff --git a/drivers/net/ethernet/i825xx/sun3_82586.c b/drivers/net/ethernet/i825xx/sun3_82586.c index 353f57f675d0..21c84cc9c871 100644 --- a/drivers/net/ethernet/i825xx/sun3_82586.c +++ b/drivers/net/ethernet/i825xx/sun3_82586.c @@ -983,7 +983,7 @@ static void sun3_82586_timeout(struct net_device *dev) p->scb->cmd_cuc = CUC_START; sun3_attn586(); WAIT_4_SCB_CMD(); - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ return 0; } #endif @@ -996,7 +996,7 @@ static void sun3_82586_timeout(struct net_device *dev) sun3_82586_close(dev); sun3_82586_open(dev); } - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ } /****************************************************** diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c index 5d7db6c01c46..4c9771d57d6e 100644 --- a/drivers/net/ethernet/ibm/emac/core.c +++ b/drivers/net/ethernet/ibm/emac/core.c @@ -301,7 +301,7 @@ static inline void emac_netif_stop(struct emac_instance *dev) dev->no_mcast = 1; netif_addr_unlock(dev->ndev); netif_tx_unlock_bh(dev->ndev); - dev->ndev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev->ndev); /* prevent tx timeout */ mal_poll_disable(dev->mal, &dev->commac); netif_tx_disable(dev->ndev); } @@ -1377,7 +1377,7 @@ static inline int emac_xmit_finish(struct emac_instance *dev, int len) DBG2(dev, "stopped TX queue" NL); } - ndev->trans_start = jiffies; + netif_trans_update(ndev); ++dev->stats.tx_packets; dev->stats.tx_bytes += len; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c index 206a466999ed..e05aca9bef0e 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c @@ -145,7 +145,7 @@ static void fm10k_reinit(struct fm10k_intfc *interface) WARN_ON(in_interrupt()); /* put off any impending NetWatchDogTimeout */ - netdev->trans_start = jiffies; + netif_trans_update(netdev); while (test_and_set_bit(__FM10K_RESETTING, &interface->state)) usleep_range(1000, 2000); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 18dcfc577ba9..5f3d239310c6 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -5287,7 +5287,7 @@ void ixgbe_reinit_locked(struct ixgbe_adapter *adapter) { WARN_ON(in_interrupt()); /* put off any impending NetWatchDogTimeout */ - adapter->netdev->trans_start = jiffies; + netif_trans_update(adapter->netdev); while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) usleep_range(1000, 2000); diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c index d74f5f4e5782..1799fe1415df 100644 --- a/drivers/net/ethernet/korina.c +++ b/drivers/net/ethernet/korina.c @@ -152,7 +152,7 @@ static inline void korina_abort_dma(struct net_device *dev, writel(0x10, &ch->dmac); while (!(readl(&ch->dmas) & DMA_STAT_HALT)) - dev->trans_start = jiffies; + netif_trans_update(dev); writel(0, &ch->dmas); } @@ -283,7 +283,7 @@ static int korina_send_packet(struct sk_buff *skb, struct net_device *dev) } dma_cache_wback((u32) td, sizeof(*td)); - dev->trans_start = jiffies; + netif_trans_update(dev); spin_unlock_irqrestore(&lp->lock, flags); return NETDEV_TX_OK; @@ -622,7 +622,7 @@ korina_tx_dma_interrupt(int irq, void *dev_id) &(lp->tx_dma_regs->dmandptr)); lp->tx_chain_status = desc_empty; lp->tx_chain_head = lp->tx_chain_tail; - dev->trans_start = jiffies; + netif_trans_update(dev); } if (dmas & DMA_STAT_ERR) printk(KERN_ERR "%s: DMA error\n", dev->name); @@ -811,7 +811,7 @@ static int korina_init(struct net_device *dev) /* reset ethernet logic */ writel(0, &lp->eth_regs->ethintfc); while ((readl(&lp->eth_regs->ethintfc) & ETH_INT_FC_RIP)) - dev->trans_start = jiffies; + netif_trans_update(dev); /* Enable Ethernet Interface */ writel(ETH_INT_FC_EN, &lp->eth_regs->ethintfc); diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c index b630ef1e9646..dc82b1b19574 100644 --- a/drivers/net/ethernet/lantiq_etop.c +++ b/drivers/net/ethernet/lantiq_etop.c @@ -519,7 +519,7 @@ ltq_etop_tx(struct sk_buff *skb, struct net_device *dev) byte_offset = CPHYSADDR(skb->data) % 16; ch->skb[ch->dma.desc] = skb; - dev->trans_start = jiffies; + netif_trans_update(dev); spin_lock_irqsave(&priv->lock, flags); desc->addr = ((unsigned int) dma_map_single(NULL, skb->data, len, @@ -657,7 +657,7 @@ ltq_etop_tx_timeout(struct net_device *dev) err = ltq_etop_hw_init(dev); if (err) goto err_hw; - dev->trans_start = jiffies; + netif_trans_update(dev); netif_wake_queue(dev); return; diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c index c442f6ad15ff..15cf50d7e316 100644 --- a/drivers/net/ethernet/marvell/pxa168_eth.c +++ b/drivers/net/ethernet/marvell/pxa168_eth.c @@ -1297,7 +1297,7 @@ static int pxa168_eth_start_xmit(struct sk_buff *skb, struct net_device *dev) stats->tx_bytes += length; stats->tx_packets++; - dev->trans_start = jiffies; + netif_trans_update(dev); if (pep->tx_ring_size - pep->tx_desc_count <= 1) { /* We handled the current skb, but now we are out of space.*/ netif_stop_queue(dev); diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index ec0a22119e09..467138b423d3 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -2418,7 +2418,7 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu) sky2_write32(hw, B0_IMSK, 0); sky2_read32(hw, B0_IMSK); - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ napi_disable(&hw->napi); netif_tx_disable(dev); diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c index 75dc46c5fca2..280e761d3a97 100644 --- a/drivers/net/ethernet/micrel/ksz884x.c +++ b/drivers/net/ethernet/micrel/ksz884x.c @@ -4790,7 +4790,7 @@ static void transmit_cleanup(struct dev_info *hw_priv, int normal) /* Notify the network subsystem that the packet has been sent. */ if (dev) - dev->trans_start = jiffies; + netif_trans_update(dev); } /** @@ -4965,7 +4965,7 @@ static void netdev_tx_timeout(struct net_device *dev) hw_ena_intr(hw); } - dev->trans_start = jiffies; + netif_trans_update(dev); netif_wake_queue(dev); } diff --git a/drivers/net/ethernet/microchip/encx24j600.c b/drivers/net/ethernet/microchip/encx24j600.c index 707283bb62ba..42e34076d2de 100644 --- a/drivers/net/ethernet/microchip/encx24j600.c +++ b/drivers/net/ethernet/microchip/encx24j600.c @@ -874,7 +874,7 @@ static netdev_tx_t encx24j600_tx(struct sk_buff *skb, struct net_device *dev) netif_stop_queue(dev); /* save the timestamp */ - dev->trans_start = jiffies; + netif_trans_update(dev); /* Remember the skb for deferred processing */ priv->tx_skb = skb; diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c index 3e67f451f2ab..4367dd6879a2 100644 --- a/drivers/net/ethernet/moxa/moxart_ether.c +++ b/drivers/net/ethernet/moxa/moxart_ether.c @@ -376,7 +376,7 @@ static int moxart_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev) priv->tx_head = TX_NEXT(tx_head); - ndev->trans_start = jiffies; + netif_trans_update(ndev); ret = NETDEV_TX_OK; out_unlock: spin_unlock_irq(&priv->txlock); diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c index 122c2ee3dfe2..ed89029ff75b 100644 --- a/drivers/net/ethernet/natsemi/natsemi.c +++ b/drivers/net/ethernet/natsemi/natsemi.c @@ -1904,7 +1904,7 @@ static void ns_tx_timeout(struct net_device *dev) spin_unlock_irq(&np->lock); enable_irq(irq); - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ dev->stats.tx_errors++; netif_wake_queue(dev); } diff --git a/drivers/net/ethernet/natsemi/sonic.c b/drivers/net/ethernet/natsemi/sonic.c index 1bd419dbda6d..612c7a44b26c 100644 --- a/drivers/net/ethernet/natsemi/sonic.c +++ b/drivers/net/ethernet/natsemi/sonic.c @@ -174,7 +174,7 @@ static void sonic_tx_timeout(struct net_device *dev) /* Try to restart the adaptor. */ sonic_init(dev); lp->stats.tx_errors++; - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ netif_wake_queue(dev); } diff --git a/drivers/net/ethernet/nuvoton/w90p910_ether.c b/drivers/net/ethernet/nuvoton/w90p910_ether.c index 52d9a94aebb9..87b7b814778b 100644 --- a/drivers/net/ethernet/nuvoton/w90p910_ether.c +++ b/drivers/net/ethernet/nuvoton/w90p910_ether.c @@ -476,7 +476,7 @@ static void w90p910_reset_mac(struct net_device *dev) w90p910_init_desc(dev); - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ ether->cur_tx = 0x0; ether->finish_tx = 0x0; ether->cur_rx = 0x0; @@ -490,7 +490,7 @@ static void w90p910_reset_mac(struct net_device *dev) w90p910_trigger_tx(dev); w90p910_trigger_rx(dev); - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ if (netif_queue_stopped(dev)) netif_wake_queue(dev); diff --git a/drivers/net/ethernet/packetengines/hamachi.c b/drivers/net/ethernet/packetengines/hamachi.c index 13d88a6025c8..91be2f02ef1c 100644 --- a/drivers/net/ethernet/packetengines/hamachi.c +++ b/drivers/net/ethernet/packetengines/hamachi.c @@ -1144,7 +1144,7 @@ static void hamachi_tx_timeout(struct net_device *dev) hmp->rx_ring[RX_RING_SIZE-1].status_n_length |= cpu_to_le32(DescEndRing); /* Trigger an immediate transmit demand. */ - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ dev->stats.tx_errors++; /* Restart the chip's Tx/Rx processes . */ diff --git a/drivers/net/ethernet/packetengines/yellowfin.c b/drivers/net/ethernet/packetengines/yellowfin.c index fa2db41e02f8..fb1d1031b091 100644 --- a/drivers/net/ethernet/packetengines/yellowfin.c +++ b/drivers/net/ethernet/packetengines/yellowfin.c @@ -714,7 +714,7 @@ static void yellowfin_tx_timeout(struct net_device *dev) if (yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE) netif_wake_queue (dev); /* Typical path */ - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ dev->stats.tx_errors++; } diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c index fd362b6923f4..cad37af1517d 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c @@ -2285,7 +2285,7 @@ static void netxen_tx_timeout_task(struct work_struct *work) goto request_reset; } } - adapter->netdev->trans_start = jiffies; + netif_trans_update(adapter->netdev); rtnl_unlock(); return; diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c index 82f3c8811400..6e2add979471 100644 --- a/drivers/net/ethernet/qualcomm/qca_spi.c +++ b/drivers/net/ethernet/qualcomm/qca_spi.c @@ -719,7 +719,7 @@ qcaspi_netdev_xmit(struct sk_buff *skb, struct net_device *dev) qca->stats.ring_full++; } - dev->trans_start = jiffies; + netif_trans_update(dev); if (qca->spi_thread && qca->spi_thread->state != TASK_RUNNING) diff --git a/drivers/net/ethernet/realtek/atp.c b/drivers/net/ethernet/realtek/atp.c index d77d60ea8202..5cb96785fb63 100644 --- a/drivers/net/ethernet/realtek/atp.c +++ b/drivers/net/ethernet/realtek/atp.c @@ -544,7 +544,7 @@ static void tx_timeout(struct net_device *dev) dev->stats.tx_errors++; /* Try to restart the adapter. */ hardware_init(dev); - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ netif_wake_queue(dev); dev->stats.tx_errors++; } diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c index ca7336605748..c2bd5378ffda 100644 --- a/drivers/net/ethernet/seeq/sgiseeq.c +++ b/drivers/net/ethernet/seeq/sgiseeq.c @@ -572,7 +572,7 @@ static inline int sgiseeq_reset(struct net_device *dev) if (err) return err; - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ netif_wake_queue(dev); return 0; @@ -648,7 +648,7 @@ static void timeout(struct net_device *dev) printk(KERN_NOTICE "%s: transmit timed out, resetting\n", dev->name); sgiseeq_reset(dev); - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ netif_wake_queue(dev); } diff --git a/drivers/net/ethernet/sgi/meth.c b/drivers/net/ethernet/sgi/meth.c index 5eac523b4b0c..aaa80f13859b 100644 --- a/drivers/net/ethernet/sgi/meth.c +++ b/drivers/net/ethernet/sgi/meth.c @@ -708,7 +708,7 @@ static int meth_tx(struct sk_buff *skb, struct net_device *dev) mace->eth.dma_ctrl = priv->dma_ctrl; meth_add_to_tx_ring(priv, skb); - dev->trans_start = jiffies; /* save the timestamp */ + netif_trans_update(dev); /* save the timestamp */ /* If TX ring is full, tell the upper layer to stop sending packets */ if (meth_tx_full(dev)) { @@ -756,7 +756,7 @@ static void meth_tx_timeout(struct net_device *dev) /* Enable interrupt */ spin_unlock_irqrestore(&priv->meth_lock, flags); - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ netif_wake_queue(dev); } diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c index fd812d2e5e1c..95001ee408ab 100644 --- a/drivers/net/ethernet/sis/sis900.c +++ b/drivers/net/ethernet/sis/sis900.c @@ -1575,7 +1575,7 @@ static void sis900_tx_timeout(struct net_device *net_dev) spin_unlock_irqrestore(&sis_priv->lock, flags); - net_dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(net_dev); /* prevent tx timeout */ /* load Transmit Descriptor Register */ sw32(txdp, sis_priv->tx_ring_dma); diff --git a/drivers/net/ethernet/smsc/epic100.c b/drivers/net/ethernet/smsc/epic100.c index 443f1da9fc9e..7186b89269ad 100644 --- a/drivers/net/ethernet/smsc/epic100.c +++ b/drivers/net/ethernet/smsc/epic100.c @@ -889,7 +889,7 @@ static void epic_tx_timeout(struct net_device *dev) ew32(COMMAND, TxQueued); } - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ dev->stats.tx_errors++; if (!ep->tx_full) netif_wake_queue(dev); diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c index a733868a43aa..cb49c9654f0a 100644 --- a/drivers/net/ethernet/smsc/smc911x.c +++ b/drivers/net/ethernet/smsc/smc911x.c @@ -499,7 +499,7 @@ static void smc911x_hardware_send_pkt(struct net_device *dev) /* DMA complete IRQ will free buffer and set jiffies */ #else SMC_PUSH_DATA(lp, buf, len); - dev->trans_start = jiffies; + netif_trans_update(dev); dev_kfree_skb_irq(skb); #endif if (!lp->tx_throttle) { @@ -1189,7 +1189,7 @@ smc911x_tx_dma_irq(void *data) DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, dev, "TX DMA irq handler\n"); BUG_ON(skb == NULL); dma_unmap_single(NULL, tx_dmabuf, tx_dmalen, DMA_TO_DEVICE); - dev->trans_start = jiffies; + netif_trans_update(dev); dev_kfree_skb_irq(skb); lp->current_tx_skb = NULL; if (lp->pending_tx_skb != NULL) @@ -1283,7 +1283,7 @@ static void smc911x_timeout(struct net_device *dev) schedule_work(&lp->phy_configure); /* We can accept TX packets again */ - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ netif_wake_queue(dev); } diff --git a/drivers/net/ethernet/smsc/smc9194.c b/drivers/net/ethernet/smsc/smc9194.c index 664f596971b5..d496888b85d3 100644 --- a/drivers/net/ethernet/smsc/smc9194.c +++ b/drivers/net/ethernet/smsc/smc9194.c @@ -663,7 +663,7 @@ static void smc_hardware_send_packet( struct net_device * dev ) lp->saved_skb = NULL; dev_kfree_skb_any (skb); - dev->trans_start = jiffies; + netif_trans_update(dev); /* we can send another packet */ netif_wake_queue(dev); @@ -1104,7 +1104,7 @@ static void smc_timeout(struct net_device *dev) /* "kick" the adaptor */ smc_reset( dev->base_addr ); smc_enable( dev->base_addr ); - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ /* clear anything saved */ ((struct smc_local *)netdev_priv(dev))->saved_skb = NULL; netif_wake_queue(dev); diff --git a/drivers/net/ethernet/smsc/smc91c92_cs.c b/drivers/net/ethernet/smsc/smc91c92_cs.c index 3449893aea8d..db3c696d7002 100644 --- a/drivers/net/ethernet/smsc/smc91c92_cs.c +++ b/drivers/net/ethernet/smsc/smc91c92_cs.c @@ -1172,7 +1172,7 @@ static void smc_hardware_send_packet(struct net_device * dev) smc->saved_skb = NULL; dev_kfree_skb_irq(skb); - dev->trans_start = jiffies; + netif_trans_update(dev); netif_start_queue(dev); } @@ -1187,7 +1187,7 @@ static void smc_tx_timeout(struct net_device *dev) inw(ioaddr)&0xff, inw(ioaddr + 2)); dev->stats.tx_errors++; smc_reset(dev); - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ smc->saved_skb = NULL; netif_wake_queue(dev); } diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c index c5ed27c54724..18ac52ded696 100644 --- a/drivers/net/ethernet/smsc/smc91x.c +++ b/drivers/net/ethernet/smsc/smc91x.c @@ -619,7 +619,7 @@ static void smc_hardware_send_pkt(unsigned long data) SMC_SET_MMU_CMD(lp, MC_ENQUEUE); smc_special_unlock(&lp->lock, flags); - dev->trans_start = jiffies; + netif_trans_update(dev); dev->stats.tx_packets++; dev->stats.tx_bytes += len; @@ -1364,7 +1364,7 @@ static void smc_timeout(struct net_device *dev) schedule_work(&lp->phy_configure); /* We can accept TX packets again */ - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ netif_wake_queue(dev); } diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c index 9cc45649f477..a2371aa14a49 100644 --- a/drivers/net/ethernet/sun/niu.c +++ b/drivers/net/ethernet/sun/niu.c @@ -6431,7 +6431,7 @@ static int niu_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) static void niu_netif_stop(struct niu *np) { - np->dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(np->dev); /* prevent tx timeout */ niu_disable_napi(np); diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c index 2437227712dc..d6ad0fbd054e 100644 --- a/drivers/net/ethernet/sun/sungem.c +++ b/drivers/net/ethernet/sun/sungem.c @@ -226,7 +226,7 @@ static void gem_put_cell(struct gem *gp) static inline void gem_netif_stop(struct gem *gp) { - gp->dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(gp->dev); /* prevent tx timeout */ napi_disable(&gp->napi); netif_tx_disable(gp->dev); } diff --git a/drivers/net/ethernet/synopsys/dwc_eth_qos.c b/drivers/net/ethernet/synopsys/dwc_eth_qos.c index af11ed1e0bcc..158213cd6cdd 100644 --- a/drivers/net/ethernet/synopsys/dwc_eth_qos.c +++ b/drivers/net/ethernet/synopsys/dwc_eth_qos.c @@ -949,7 +949,7 @@ static void dwceqos_adjust_link(struct net_device *ndev) if (status_change) { if (phydev->link) { - lp->ndev->trans_start = jiffies; + netif_trans_update(lp->ndev); dwceqos_link_up(lp); } else { dwceqos_link_down(lp); @@ -2203,7 +2203,7 @@ static int dwceqos_start_xmit(struct sk_buff *skb, struct net_device *ndev) netdev_sent_queue(ndev, skb->len); spin_unlock_bh(&lp->tx_lock); - ndev->trans_start = jiffies; + netif_trans_update(ndev); return 0; tx_error: diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c index 2524a69db318..7452b5f9d024 100644 --- a/drivers/net/ethernet/tehuti/tehuti.c +++ b/drivers/net/ethernet/tehuti/tehuti.c @@ -1701,7 +1701,7 @@ static netdev_tx_t bdx_tx_transmit(struct sk_buff *skb, #endif #ifdef BDX_LLTX - ndev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */ + netif_trans_update(ndev); /* NETIF_F_LLTX driver :( */ #endif ndev->stats.tx_packets++; ndev->stats.tx_bytes += skb->len; diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 68577ee2e64a..4b08a2f52b3e 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -1389,7 +1389,7 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb, struct cpsw_priv *priv = netdev_priv(ndev); int ret; - ndev->trans_start = jiffies; + netif_trans_update(ndev); if (skb_padto(skb, CPSW_MIN_PACKET_SIZE)) { cpsw_err(priv, tx_err, "packet pad failed\n"); diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c index 1d0942c53120..32516661f180 100644 --- a/drivers/net/ethernet/ti/netcp_core.c +++ b/drivers/net/ethernet/ti/netcp_core.c @@ -1272,7 +1272,7 @@ static int netcp_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev) if (ret) goto drop; - ndev->trans_start = jiffies; + netif_trans_update(ndev); /* Check Tx pool count & stop subqueue if needed */ desc_count = knav_pool_count(netcp->tx_pool); @@ -1788,7 +1788,7 @@ static void netcp_ndo_tx_timeout(struct net_device *ndev) dev_err(netcp->ndev_dev, "transmit timed out tx descs(%d)\n", descs); netcp_process_tx_compl_packets(netcp, netcp->tx_pool_size); - ndev->trans_start = jiffies; + netif_trans_update(ndev); netif_tx_wake_all_queues(ndev); } diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c index a274cd49afe9..561703317312 100644 --- a/drivers/net/ethernet/ti/tlan.c +++ b/drivers/net/ethernet/ti/tlan.c @@ -1007,7 +1007,7 @@ static void tlan_tx_timeout(struct net_device *dev) tlan_reset_lists(dev); tlan_read_and_clear_stats(dev, TLAN_IGNORE); tlan_reset_adapter(dev); - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ netif_wake_queue(dev); } diff --git a/drivers/net/ethernet/tile/tilepro.c b/drivers/net/ethernet/tile/tilepro.c index 298e059d0498..0bb98bc70c9d 100644 --- a/drivers/net/ethernet/tile/tilepro.c +++ b/drivers/net/ethernet/tile/tilepro.c @@ -1883,7 +1883,7 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev) /* Save the timestamp. */ - dev->trans_start = jiffies; + netif_trans_update(dev); #ifdef TILE_NET_PARANOIA diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c index 67610270d171..36a6e8b54d94 100644 --- a/drivers/net/ethernet/toshiba/spider_net.c +++ b/drivers/net/ethernet/toshiba/spider_net.c @@ -705,7 +705,7 @@ spider_net_prepare_tx_descr(struct spider_net_card *card, wmb(); descr->prev->hwdescr->next_descr_addr = descr->bus_addr; - card->netdev->trans_start = jiffies; /* set netdev watchdog timer */ + netif_trans_update(card->netdev); /* set netdev watchdog timer */ return 0; } diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c index 2b7550c43f78..9d14731cdcb1 100644 --- a/drivers/net/ethernet/via/via-rhine.c +++ b/drivers/net/ethernet/via/via-rhine.c @@ -1758,7 +1758,7 @@ static void rhine_reset_task(struct work_struct *work) spin_unlock_bh(&rp->lock); - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ dev->stats.tx_errors++; netif_wake_queue(dev); diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c index 8ed0c7735ee3..ec1889ce38a3 100644 --- a/drivers/net/ethernet/wiznet/w5100.c +++ b/drivers/net/ethernet/wiznet/w5100.c @@ -782,7 +782,7 @@ static void w5100_restart(struct net_device *ndev) w5100_hw_reset(priv); w5100_hw_start(priv); ndev->stats.tx_errors++; - ndev->trans_start = jiffies; + netif_trans_update(ndev); netif_wake_queue(ndev); } diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c index 8da7b930ff59..0b37ce9f28f1 100644 --- a/drivers/net/ethernet/wiznet/w5300.c +++ b/drivers/net/ethernet/wiznet/w5300.c @@ -362,7 +362,7 @@ static void w5300_tx_timeout(struct net_device *ndev) w5300_hw_reset(priv); w5300_hw_start(priv); ndev->stats.tx_errors++; - ndev->trans_start = jiffies; + netif_trans_update(ndev); netif_wake_queue(ndev); } diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c index 5a1068df7038..739708712022 100644 --- a/drivers/net/ethernet/xilinx/ll_temac_main.c +++ b/drivers/net/ethernet/xilinx/ll_temac_main.c @@ -584,7 +584,7 @@ static void temac_device_reset(struct net_device *ndev) dev_err(&ndev->dev, "Error setting TEMAC options\n"); /* Init Driver variable */ - ndev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(ndev); /* prevent tx timeout */ } static void temac_adjust_link(struct net_device *ndev) diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index 4684644703cc..8c7f5be51e62 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -508,7 +508,7 @@ static void axienet_device_reset(struct net_device *ndev) axienet_set_multicast_list(ndev); axienet_setoptions(ndev, lp->options); - ndev->trans_start = jiffies; + netif_trans_update(ndev); } /** diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c index e324b3092380..3cee84a24815 100644 --- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c +++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c @@ -531,7 +531,7 @@ static void xemaclite_tx_timeout(struct net_device *dev) } /* To exclude tx timeout */ - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ /* We're all ready to go. Start the queue */ netif_wake_queue(dev); @@ -563,7 +563,7 @@ static void xemaclite_tx_handler(struct net_device *dev) dev->stats.tx_bytes += lp->deferred_skb->len; dev_kfree_skb_irq(lp->deferred_skb); lp->deferred_skb = NULL; - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ netif_wake_queue(dev); } } diff --git a/drivers/net/ethernet/xircom/xirc2ps_cs.c b/drivers/net/ethernet/xircom/xirc2ps_cs.c index d56f8693202b..7b44968e02e6 100644 --- a/drivers/net/ethernet/xircom/xirc2ps_cs.c +++ b/drivers/net/ethernet/xircom/xirc2ps_cs.c @@ -1199,7 +1199,7 @@ xirc2ps_tx_timeout_task(struct work_struct *work) struct net_device *dev = local->dev; /* reset the card */ do_reset(dev,1); - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ netif_wake_queue(dev); } diff --git a/drivers/net/fjes/fjes_main.c b/drivers/net/fjes/fjes_main.c index 7ad3d04314c5..f4e69261a3ce 100644 --- a/drivers/net/fjes/fjes_main.c +++ b/drivers/net/fjes/fjes_main.c @@ -718,7 +718,7 @@ fjes_xmit_frame(struct sk_buff *skb, struct net_device *netdev) ret = NETDEV_TX_OK; } else { - netdev->trans_start = jiffies; + netif_trans_update(netdev); netif_tx_stop_queue(cur_queue); if (!work_pending(&adapter->tx_stall_task)) diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c index c685937e1de3..1dfe2304daa7 100644 --- a/drivers/net/hamradio/mkiss.c +++ b/drivers/net/hamradio/mkiss.c @@ -519,7 +519,7 @@ static void ax_encaps(struct net_device *dev, unsigned char *icp, int len) dev->stats.tx_packets++; dev->stats.tx_bytes += actual; - ax->dev->trans_start = jiffies; + netif_trans_update(ax->dev); ax->xleft = count - actual; ax->xhead = ax->xbuff + actual; } diff --git a/drivers/net/hamradio/scc.c b/drivers/net/hamradio/scc.c index ce88df33fe17..b8083161ef46 100644 --- a/drivers/net/hamradio/scc.c +++ b/drivers/net/hamradio/scc.c @@ -1669,7 +1669,7 @@ static netdev_tx_t scc_net_tx(struct sk_buff *skb, struct net_device *dev) dev_kfree_skb(skb_del); } skb_queue_tail(&scc->tx_queue, skb); - dev->trans_start = jiffies; + netif_trans_update(dev); /* diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c index 1a4729c36aa4..aaff07c10058 100644 --- a/drivers/net/hamradio/yam.c +++ b/drivers/net/hamradio/yam.c @@ -601,7 +601,7 @@ static netdev_tx_t yam_send_packet(struct sk_buff *skb, return ax25_ip_xmit(skb); skb_queue_tail(&yp->send_queue, skb); - dev->trans_start = jiffies; + netif_trans_update(dev); return NETDEV_TX_OK; } diff --git a/drivers/net/irda/ali-ircc.c b/drivers/net/irda/ali-ircc.c index 64bb44d5d867..c285eafd3f1c 100644 --- a/drivers/net/irda/ali-ircc.c +++ b/drivers/net/irda/ali-ircc.c @@ -1427,7 +1427,7 @@ static netdev_tx_t ali_ircc_fir_hard_xmit(struct sk_buff *skb, /* Check for empty frame */ if (!skb->len) { ali_ircc_change_speed(self, speed); - dev->trans_start = jiffies; + netif_trans_update(dev); spin_unlock_irqrestore(&self->lock, flags); dev_kfree_skb(skb); return NETDEV_TX_OK; @@ -1533,7 +1533,7 @@ static netdev_tx_t ali_ircc_fir_hard_xmit(struct sk_buff *skb, /* Restore bank register */ switch_bank(iobase, BANK0); - dev->trans_start = jiffies; + netif_trans_update(dev); spin_unlock_irqrestore(&self->lock, flags); dev_kfree_skb(skb); @@ -1946,7 +1946,7 @@ static netdev_tx_t ali_ircc_sir_hard_xmit(struct sk_buff *skb, /* Check for empty frame */ if (!skb->len) { ali_ircc_change_speed(self, speed); - dev->trans_start = jiffies; + netif_trans_update(dev); spin_unlock_irqrestore(&self->lock, flags); dev_kfree_skb(skb); return NETDEV_TX_OK; @@ -1966,7 +1966,7 @@ static netdev_tx_t ali_ircc_sir_hard_xmit(struct sk_buff *skb, /* Turn on transmit finished interrupt. Will fire immediately! */ outb(UART_IER_THRI, iobase+UART_IER); - dev->trans_start = jiffies; + netif_trans_update(dev); spin_unlock_irqrestore(&self->lock, flags); dev_kfree_skb(skb); diff --git a/drivers/net/irda/bfin_sir.c b/drivers/net/irda/bfin_sir.c index 303c4bd26e17..be5bb0b7f29c 100644 --- a/drivers/net/irda/bfin_sir.c +++ b/drivers/net/irda/bfin_sir.c @@ -531,7 +531,7 @@ static void bfin_sir_send_work(struct work_struct *work) bfin_sir_dma_tx_chars(dev); #endif bfin_sir_enable_tx(port); - dev->trans_start = jiffies; + netif_trans_update(dev); } static int bfin_sir_hard_xmit(struct sk_buff *skb, struct net_device *dev) diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c index 25f21968fa5c..a198946bc54f 100644 --- a/drivers/net/irda/irda-usb.c +++ b/drivers/net/irda/irda-usb.c @@ -429,7 +429,7 @@ static netdev_tx_t irda_usb_hard_xmit(struct sk_buff *skb, * do an extra memcpy and increment packet counters... * Jean II */ irda_usb_change_speed_xbofs(self); - netdev->trans_start = jiffies; + netif_trans_update(netdev); /* Will netif_wake_queue() in callback */ goto drop; } @@ -526,7 +526,7 @@ static netdev_tx_t irda_usb_hard_xmit(struct sk_buff *skb, netdev->stats.tx_packets++; netdev->stats.tx_bytes += skb->len; - netdev->trans_start = jiffies; + netif_trans_update(netdev); } spin_unlock_irqrestore(&self->lock, flags); diff --git a/drivers/net/irda/nsc-ircc.c b/drivers/net/irda/nsc-ircc.c index dc0dbd8dd0b5..9ef13d8ed813 100644 --- a/drivers/net/irda/nsc-ircc.c +++ b/drivers/net/irda/nsc-ircc.c @@ -1399,7 +1399,7 @@ static netdev_tx_t nsc_ircc_hard_xmit_sir(struct sk_buff *skb, * to make sure packets gets through the * proper xmit handler - Jean II */ } - dev->trans_start = jiffies; + netif_trans_update(dev); spin_unlock_irqrestore(&self->lock, flags); dev_kfree_skb(skb); return NETDEV_TX_OK; @@ -1424,7 +1424,7 @@ static netdev_tx_t nsc_ircc_hard_xmit_sir(struct sk_buff *skb, /* Restore bank register */ outb(bank, iobase+BSR); - dev->trans_start = jiffies; + netif_trans_update(dev); spin_unlock_irqrestore(&self->lock, flags); dev_kfree_skb(skb); @@ -1470,7 +1470,7 @@ static netdev_tx_t nsc_ircc_hard_xmit_fir(struct sk_buff *skb, * the speed change has been done. * Jean II */ } - dev->trans_start = jiffies; + netif_trans_update(dev); spin_unlock_irqrestore(&self->lock, flags); dev_kfree_skb(skb); return NETDEV_TX_OK; @@ -1553,7 +1553,7 @@ static netdev_tx_t nsc_ircc_hard_xmit_fir(struct sk_buff *skb, /* Restore bank register */ outb(bank, iobase+BSR); - dev->trans_start = jiffies; + netif_trans_update(dev); spin_unlock_irqrestore(&self->lock, flags); dev_kfree_skb(skb); diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c index b455ffe8850c..dcf92ba80872 100644 --- a/drivers/net/irda/smsc-ircc2.c +++ b/drivers/net/irda/smsc-ircc2.c @@ -862,7 +862,7 @@ static void smsc_ircc_timeout(struct net_device *dev) spin_lock_irqsave(&self->lock, flags); smsc_ircc_sir_start(self); smsc_ircc_change_speed(self, self->io.speed); - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ netif_wake_queue(dev); spin_unlock_irqrestore(&self->lock, flags); } diff --git a/drivers/net/irda/stir4200.c b/drivers/net/irda/stir4200.c index 83cc48a01802..42da094b68dd 100644 --- a/drivers/net/irda/stir4200.c +++ b/drivers/net/irda/stir4200.c @@ -718,7 +718,7 @@ static void stir_send(struct stir_cb *stir, struct sk_buff *skb) stir->netdev->stats.tx_packets++; stir->netdev->stats.tx_bytes += skb->len; - stir->netdev->trans_start = jiffies; + netif_trans_update(stir->netdev); pr_debug("send %d (%d)\n", skb->len, wraplen); if (usb_bulk_msg(stir->usbdev, usb_sndbulkpipe(stir->usbdev, 1), diff --git a/drivers/net/irda/via-ircc.c b/drivers/net/irda/via-ircc.c index 6960d4cd3cae..ca4442a9d631 100644 --- a/drivers/net/irda/via-ircc.c +++ b/drivers/net/irda/via-ircc.c @@ -774,7 +774,7 @@ static netdev_tx_t via_ircc_hard_xmit_sir(struct sk_buff *skb, /* Check for empty frame */ if (!skb->len) { via_ircc_change_speed(self, speed); - dev->trans_start = jiffies; + netif_trans_update(dev); dev_kfree_skb(skb); return NETDEV_TX_OK; } else @@ -821,7 +821,7 @@ static netdev_tx_t via_ircc_hard_xmit_sir(struct sk_buff *skb, RXStart(iobase, OFF); TXStart(iobase, ON); - dev->trans_start = jiffies; + netif_trans_update(dev); spin_unlock_irqrestore(&self->lock, flags); dev_kfree_skb(skb); return NETDEV_TX_OK; @@ -849,7 +849,7 @@ static netdev_tx_t via_ircc_hard_xmit_fir(struct sk_buff *skb, if ((speed != self->io.speed) && (speed != -1)) { if (!skb->len) { via_ircc_change_speed(self, speed); - dev->trans_start = jiffies; + netif_trans_update(dev); dev_kfree_skb(skb); return NETDEV_TX_OK; } else @@ -869,7 +869,7 @@ static netdev_tx_t via_ircc_hard_xmit_fir(struct sk_buff *skb, via_ircc_dma_xmit(self, iobase); //F01 } //F01 if (self->tx_fifo.free < (MAX_TX_WINDOW -1 )) netif_wake_queue(self->netdev); - dev->trans_start = jiffies; + netif_trans_update(dev); dev_kfree_skb(skb); spin_unlock_irqrestore(&self->lock, flags); return NETDEV_TX_OK; diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c index a17d86a57734..9ed6d1c1ee45 100644 --- a/drivers/net/slip/slip.c +++ b/drivers/net/slip/slip.c @@ -407,7 +407,7 @@ static void sl_encaps(struct slip *sl, unsigned char *icp, int len) set_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags); actual = sl->tty->ops->write(sl->tty, sl->xbuff, count); #ifdef SL_CHECK_TRANSMIT - sl->dev->trans_start = jiffies; + netif_trans_update(sl->dev); #endif sl->xleft = count - actual; sl->xhead = sl->xbuff + actual; diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c index 4e2b26a88b15..d9ca05d3ac8e 100644 --- a/drivers/net/usb/catc.c +++ b/drivers/net/usb/catc.c @@ -376,7 +376,7 @@ static int catc_tx_run(struct catc *catc) catc->tx_idx = !catc->tx_idx; catc->tx_ptr = 0; - catc->netdev->trans_start = jiffies; + netif_trans_update(catc->netdev); return status; } @@ -389,7 +389,7 @@ static void catc_tx_done(struct urb *urb) if (status == -ECONNRESET) { dev_dbg(&urb->dev->dev, "Tx Reset.\n"); urb->status = 0; - catc->netdev->trans_start = jiffies; + netif_trans_update(catc->netdev); catc->netdev->stats.tx_errors++; clear_bit(TX_RUNNING, &catc->flags); netif_wake_queue(catc->netdev); diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c index f64b25c221e8..770212baaf05 100644 --- a/drivers/net/usb/kaweth.c +++ b/drivers/net/usb/kaweth.c @@ -938,7 +938,7 @@ static void kaweth_tx_timeout(struct net_device *net) dev_warn(&net->dev, "%s: Tx timed out. Resetting.\n", net->name); kaweth->stats.tx_errors++; - net->trans_start = jiffies; + netif_trans_update(net); usb_unlink_urb(kaweth->tx_urb); } diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index f64778ad9753..6a9d474b08b2 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -3045,7 +3045,7 @@ gso_skb: ret = usb_submit_urb(urb, GFP_ATOMIC); switch (ret) { case 0: - dev->net->trans_start = jiffies; + netif_trans_update(dev->net); lan78xx_queue_skb(&dev->txq, skb, tx_start); if (skb_queue_len(&dev->txq) >= dev->tx_qlen) netif_stop_queue(dev->net); @@ -3729,7 +3729,7 @@ int lan78xx_resume(struct usb_interface *intf) usb_free_urb(res); usb_autopm_put_interface_async(dev->intf); } else { - dev->net->trans_start = jiffies; + netif_trans_update(dev->net); lan78xx_queue_skb(&dev->txq, skb, tx_start); } } diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c index 82129eef7774..36cd7f016a8d 100644 --- a/drivers/net/usb/pegasus.c +++ b/drivers/net/usb/pegasus.c @@ -615,7 +615,7 @@ static void write_bulk_callback(struct urb *urb) break; } - net->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(net); /* prevent tx timeout */ netif_wake_queue(net); } diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c index d37b7dce2d40..7c72bfac89d0 100644 --- a/drivers/net/usb/rtl8150.c +++ b/drivers/net/usb/rtl8150.c @@ -451,7 +451,7 @@ static void write_bulk_callback(struct urb *urb) if (status) dev_info(&urb->dev->dev, "%s: Tx status %d\n", dev->netdev->name, status); - dev->netdev->trans_start = jiffies; + netif_trans_update(dev->netdev); netif_wake_queue(dev->netdev); } @@ -694,7 +694,7 @@ static netdev_tx_t rtl8150_start_xmit(struct sk_buff *skb, } else { netdev->stats.tx_packets++; netdev->stats.tx_bytes += skb->len; - netdev->trans_start = jiffies; + netif_trans_update(netdev); } return NETDEV_TX_OK; diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index 4837854fd43c..61ba46404937 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -1416,7 +1416,7 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb, "tx: submit urb err %d\n", retval); break; case 0: - net->trans_start = jiffies; + netif_trans_update(net); __usbnet_queue_skb(&dev->txq, skb, tx_start); if (dev->txq.qlen >= TX_QLEN (dev)) netif_stop_queue (net); @@ -1845,7 +1845,7 @@ int usbnet_resume (struct usb_interface *intf) usb_free_urb(res); usb_autopm_put_interface_async(dev->intf); } else { - dev->net->trans_start = jiffies; + netif_trans_update(dev->net); __skb_queue_tail(&dev->txq, skb); } } diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c index 848ea6a399f2..b87fe0a01c69 100644 --- a/drivers/net/wan/cosa.c +++ b/drivers/net/wan/cosa.c @@ -739,7 +739,7 @@ static char *cosa_net_setup_rx(struct channel_data *chan, int size) chan->netdev->stats.rx_dropped++; return NULL; } - chan->netdev->trans_start = jiffies; + netif_trans_update(chan->netdev); return skb_put(chan->rx_skb, size); } diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c index 69b994f3b8c5..3c9cbf908ec7 100644 --- a/drivers/net/wan/farsync.c +++ b/drivers/net/wan/farsync.c @@ -831,7 +831,7 @@ fst_tx_dma_complete(struct fst_card_info *card, struct fst_port_info *port, DMA_OWN | TX_STP | TX_ENP); dev->stats.tx_packets++; dev->stats.tx_bytes += len; - dev->trans_start = jiffies; + netif_trans_update(dev); } /* @@ -1389,7 +1389,7 @@ do_bottom_half_tx(struct fst_card_info *card) DMA_OWN | TX_STP | TX_ENP); dev->stats.tx_packets++; dev->stats.tx_bytes += skb->len; - dev->trans_start = jiffies; + netif_trans_update(dev); } else { /* Or do it through dma */ memcpy(card->tx_dma_handle_host, @@ -2258,7 +2258,7 @@ fst_tx_timeout(struct net_device *dev) card->card_no, port->index); fst_issue_cmd(port, ABORTTX); - dev->trans_start = jiffies; + netif_trans_update(dev); netif_wake_queue(dev); port->start = 0; } diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c index bb33b242ab48..299140c04556 100644 --- a/drivers/net/wan/lmc/lmc_main.c +++ b/drivers/net/wan/lmc/lmc_main.c @@ -2105,7 +2105,7 @@ static void lmc_driver_timeout(struct net_device *dev) sc->lmc_device->stats.tx_errors++; sc->extra_stats.tx_ProcTimeout++; /* -baz */ - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ bug_out: diff --git a/drivers/net/wan/sbni.c b/drivers/net/wan/sbni.c index 8fef8d83436d..d98c7e57137d 100644 --- a/drivers/net/wan/sbni.c +++ b/drivers/net/wan/sbni.c @@ -860,9 +860,9 @@ prepare_to_send( struct sk_buff *skb, struct net_device *dev ) outb( inb( dev->base_addr + CSR0 ) | TR_REQ, dev->base_addr + CSR0 ); #ifdef CONFIG_SBNI_MULTILINE - nl->master->trans_start = jiffies; + netif_trans_update(nl->master); #else - dev->trans_start = jiffies; + netif_trans_update(dev); #endif } @@ -889,10 +889,10 @@ drop_xmit_queue( struct net_device *dev ) nl->state &= ~(FL_WAIT_ACK | FL_NEED_RESEND); #ifdef CONFIG_SBNI_MULTILINE netif_start_queue( nl->master ); - nl->master->trans_start = jiffies; + netif_trans_update(nl->master); #else netif_start_queue( dev ); - dev->trans_start = jiffies; + netif_trans_update(dev); #endif } diff --git a/drivers/net/wimax/i2400m/netdev.c b/drivers/net/wimax/i2400m/netdev.c index a9970f1af976..bb74f4b9a02f 100644 --- a/drivers/net/wimax/i2400m/netdev.c +++ b/drivers/net/wimax/i2400m/netdev.c @@ -334,7 +334,7 @@ int i2400m_net_tx(struct i2400m *i2400m, struct net_device *net_dev, d_fnstart(3, dev, "(i2400m %p net_dev %p skb %p)\n", i2400m, net_dev, skb); /* FIXME: check eth hdr, only IPv4 is routed by the device as of now */ - net_dev->trans_start = jiffies; + netif_trans_update(net_dev); i2400m_tx_prep_header(skb); d_printf(3, dev, "NETTX: skb %p sending %d bytes to radio\n", skb, skb->len); diff --git a/drivers/net/wireless/cisco/airo.c b/drivers/net/wireless/cisco/airo.c index 4bd9e2b97e86..55456f750229 100644 --- a/drivers/net/wireless/cisco/airo.c +++ b/drivers/net/wireless/cisco/airo.c @@ -2026,7 +2026,7 @@ static int mpi_send_packet (struct net_device *dev) } else { *payloadLen = cpu_to_le16(len - sizeof(etherHead)); - dev->trans_start = jiffies; + netif_trans_update(dev); /* copy data into airo dma buffer */ memcpy(sendbuf, buffer, len); @@ -2107,7 +2107,7 @@ static void airo_end_xmit(struct net_device *dev) { i = 0; if ( status == SUCCESS ) { - dev->trans_start = jiffies; + netif_trans_update(dev); for (; i < MAX_FIDS / 2 && (priv->fids[i] & 0xffff0000); i++); } else { priv->fids[fid] &= 0xffff; @@ -2174,7 +2174,7 @@ static void airo_end_xmit11(struct net_device *dev) { i = MAX_FIDS / 2; if ( status == SUCCESS ) { - dev->trans_start = jiffies; + netif_trans_update(dev); for (; i < MAX_FIDS && (priv->fids[i] & 0xffff0000); i++); } else { priv->fids[fid] &= 0xffff; diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2100.c b/drivers/net/wireless/intel/ipw2x00/ipw2100.c index e1e42ed6c412..bfa542c8d6f1 100644 --- a/drivers/net/wireless/intel/ipw2x00/ipw2100.c +++ b/drivers/net/wireless/intel/ipw2x00/ipw2100.c @@ -2954,7 +2954,7 @@ static int __ipw2100_tx_process(struct ipw2100_priv *priv) /* A packet was processed by the hardware, so update the * watchdog */ - priv->net_dev->trans_start = jiffies; + netif_trans_update(priv->net_dev); break; diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2200.c b/drivers/net/wireless/intel/ipw2x00/ipw2200.c index dac13cf42e9f..5adb7cefb2fe 100644 --- a/drivers/net/wireless/intel/ipw2x00/ipw2200.c +++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.c @@ -7707,7 +7707,7 @@ static void ipw_handle_data_packet(struct ipw_priv *priv, struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data; /* We received data from the HW, so stop the watchdog */ - dev->trans_start = jiffies; + netif_trans_update(dev); /* We only process data packets if the * interface is open */ @@ -7770,7 +7770,7 @@ static void ipw_handle_data_packet_monitor(struct ipw_priv *priv, unsigned short len = le16_to_cpu(pkt->u.frame.length); /* We received data from the HW, so stop the watchdog */ - dev->trans_start = jiffies; + netif_trans_update(dev); /* We only process data packets if the * interface is open */ @@ -7952,7 +7952,7 @@ static void ipw_handle_promiscuous_rx(struct ipw_priv *priv, return; /* We received data from the HW, so stop the watchdog */ - dev->trans_start = jiffies; + netif_trans_update(dev); if (unlikely((len + IPW_RX_FRAME_SIZE) > skb_tailroom(rxb->skb))) { dev->stats.rx_errors++; diff --git a/drivers/net/wireless/intersil/hostap/hostap_hw.c b/drivers/net/wireless/intersil/hostap/hostap_hw.c index 515aa3f993f3..a8a9bd8e176a 100644 --- a/drivers/net/wireless/intersil/hostap/hostap_hw.c +++ b/drivers/net/wireless/intersil/hostap/hostap_hw.c @@ -1794,7 +1794,7 @@ static int prism2_transmit(struct net_device *dev, int idx) netif_wake_queue(dev); return -1; } - dev->trans_start = jiffies; + netif_trans_update(dev); /* Since we did not wait for command completion, the card continues * to process on the background and we will finish handling when diff --git a/drivers/net/wireless/intersil/orinoco/main.c b/drivers/net/wireless/intersil/orinoco/main.c index 7b5c554323c7..7afe2004e930 100644 --- a/drivers/net/wireless/intersil/orinoco/main.c +++ b/drivers/net/wireless/intersil/orinoco/main.c @@ -1794,7 +1794,7 @@ void orinoco_reset(struct work_struct *work) printk(KERN_ERR "%s: orinoco_reset: Error %d reenabling card\n", dev->name, err); } else - dev->trans_start = jiffies; + netif_trans_update(dev); } orinoco_unlock_irq(priv); diff --git a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c index f2cd513d54b2..56f109bc8394 100644 --- a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c +++ b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c @@ -1275,7 +1275,7 @@ static netdev_tx_t ezusb_xmit(struct sk_buff *skb, struct net_device *dev) goto busy; } - dev->trans_start = jiffies; + netif_trans_update(dev); stats->tx_bytes += skb->len; goto ok; diff --git a/drivers/net/wireless/marvell/mwifiex/init.c b/drivers/net/wireless/marvell/mwifiex/init.c index 517653b3adab..78c532f0d286 100644 --- a/drivers/net/wireless/marvell/mwifiex/init.c +++ b/drivers/net/wireless/marvell/mwifiex/init.c @@ -317,7 +317,7 @@ void mwifiex_set_trans_start(struct net_device *dev) for (i = 0; i < dev->num_tx_queues; i++) netdev_get_tx_queue(dev, i)->trans_start = jiffies; - dev->trans_start = jiffies; + netif_trans_update(dev); } /* diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c index 99de07d14939..13fd734b61ec 100644 --- a/drivers/net/wireless/wl3501_cs.c +++ b/drivers/net/wireless/wl3501_cs.c @@ -1287,7 +1287,7 @@ static void wl3501_tx_timeout(struct net_device *dev) printk(KERN_ERR "%s: Error %d resetting card on Tx timeout!\n", dev->name, rc); else { - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ netif_wake_queue(dev); } } diff --git a/drivers/net/wireless/zydas/zd1201.c b/drivers/net/wireless/zydas/zd1201.c index 6f5c793a7855..dea049b2556f 100644 --- a/drivers/net/wireless/zydas/zd1201.c +++ b/drivers/net/wireless/zydas/zd1201.c @@ -845,7 +845,7 @@ static void zd1201_tx_timeout(struct net_device *dev) usb_unlink_urb(zd->tx_urb); dev->stats.tx_errors++; /* Restart the timeout to quiet the watchdog: */ - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ } static int zd1201_set_mac_address(struct net_device *dev, void *p) diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c index c3e22523faf3..ad17fc5883f6 100644 --- a/drivers/s390/net/ctcm_main.c +++ b/drivers/s390/net/ctcm_main.c @@ -642,7 +642,7 @@ static void ctcmpc_send_sweep_req(struct channel *rch) kfree(header); - dev->trans_start = jiffies; + netif_trans_update(dev); skb_queue_tail(&ch->sweep_queue, sweep_skb); fsm_addtimer(&ch->sweep_timer, 100, CTC_EVENT_RSWEEP_TIMER, ch); @@ -911,7 +911,7 @@ static int ctcm_tx(struct sk_buff *skb, struct net_device *dev) if (ctcm_test_and_set_busy(dev)) return NETDEV_TX_BUSY; - dev->trans_start = jiffies; + netif_trans_update(dev); if (ctcm_transmit_skb(priv->channel[CTCM_WRITE], skb) != 0) return NETDEV_TX_BUSY; return NETDEV_TX_OK; @@ -994,7 +994,7 @@ static int ctcmpc_tx(struct sk_buff *skb, struct net_device *dev) goto done; } - dev->trans_start = jiffies; + netif_trans_update(dev); if (ctcmpc_transmit_skb(priv->channel[CTCM_WRITE], skb) != 0) { CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, "%s(%s): device error - dropped", diff --git a/drivers/s390/net/ctcm_mpc.c b/drivers/s390/net/ctcm_mpc.c index edf16bfba8ee..c103fc7efe9f 100644 --- a/drivers/s390/net/ctcm_mpc.c +++ b/drivers/s390/net/ctcm_mpc.c @@ -671,7 +671,7 @@ static void ctcmpc_send_sweep_resp(struct channel *rch) kfree(header); - dev->trans_start = jiffies; + netif_trans_update(dev); skb_queue_tail(&ch->sweep_queue, sweep_skb); fsm_addtimer(&ch->sweep_timer, 100, CTC_EVENT_RSWEEP_TIMER, ch); diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c index 0ba3a2f81750..b0e8ffdf864b 100644 --- a/drivers/s390/net/netiucv.c +++ b/drivers/s390/net/netiucv.c @@ -1407,7 +1407,7 @@ static int netiucv_tx(struct sk_buff *skb, struct net_device *dev) IUCV_DBF_TEXT(data, 2, "EBUSY from netiucv_tx\n"); return NETDEV_TX_BUSY; } - dev->trans_start = jiffies; + netif_trans_update(dev); rc = netiucv_transmit_skb(privptr->conn, skb); netiucv_clear_busy(dev); return rc ? NETDEV_TX_BUSY : NETDEV_TX_OK; diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c index 9b7cc7dc7cb8..13a5ddc2bea5 100644 --- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c +++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c @@ -1792,7 +1792,7 @@ static short _rtl92e_tx(struct net_device *dev, struct sk_buff *skb) __skb_queue_tail(&ring->queue, skb); pdesc->OWN = 1; spin_unlock_irqrestore(&priv->irq_th_lock, flags); - dev->trans_start = jiffies; + netif_trans_update(dev); rtl92e_writew(dev, TPPoll, 0x01 << tcb_desc->queue_index); return 0; diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c index de714501c996..d705595766a9 100644 --- a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c +++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c @@ -249,7 +249,7 @@ inline void softmac_mgmt_xmit(struct sk_buff *skb, struct ieee80211_device *ieee ieee->seq_ctrl[0]++; /* avoid watchdog triggers */ - ieee->dev->trans_start = jiffies; + netif_trans_update(ieee->dev); ieee->softmac_data_hard_start_xmit(skb,ieee->dev,ieee->basic_rate); //dev_kfree_skb_any(skb);//edit by thomas } @@ -302,7 +302,7 @@ inline void softmac_ps_mgmt_xmit(struct sk_buff *skb, struct ieee80211_device *i ieee->seq_ctrl[0]++; /* avoid watchdog triggers */ - ieee->dev->trans_start = jiffies; + netif_trans_update(ieee->dev); ieee->softmac_data_hard_start_xmit(skb,ieee->dev,ieee->basic_rate); }else{ @@ -2205,7 +2205,7 @@ static void ieee80211_resume_tx(struct ieee80211_device *ieee) ieee->dev, ieee->rate); //(i+1)tx_pending.txb->nr_frags); ieee->stats.tx_packets++; - ieee->dev->trans_start = jiffies; + netif_trans_update(ieee->dev); } } diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c index 849a95ef723c..4af0140c6ead 100644 --- a/drivers/staging/rtl8192u/r8192U_core.c +++ b/drivers/staging/rtl8192u/r8192U_core.c @@ -1108,7 +1108,7 @@ static void rtl8192_tx_isr(struct urb *tx_urb) if (tcb_desc->queue_index != TXCMD_QUEUE) { if (tx_urb->status == 0) { - dev->trans_start = jiffies; + netif_trans_update(dev); priv->stats.txoktotal++; priv->ieee80211->LinkDetectInfo.NumTxOkInPeriod++; priv->stats.txbytesunicast += @@ -1715,7 +1715,7 @@ short rtl8192_tx(struct net_device *dev, struct sk_buff *skb) return -1; } } - dev->trans_start = jiffies; + netif_trans_update(dev); atomic_inc(&priv->tx_pending[tcb_desc->queue_index]); return 0; } diff --git a/drivers/staging/wlan-ng/p80211netdev.c b/drivers/staging/wlan-ng/p80211netdev.c index 88255ce2871b..1f9dfba5dbb3 100644 --- a/drivers/staging/wlan-ng/p80211netdev.c +++ b/drivers/staging/wlan-ng/p80211netdev.c @@ -393,7 +393,7 @@ static int p80211knetdev_hard_start_xmit(struct sk_buff *skb, goto failed; } - netdev->trans_start = jiffies; + netif_trans_update(netdev); netdev->stats.tx_packets++; /* count only the packet payload */ diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c index c01620780f5b..0c27a00ab42d 100644 --- a/drivers/tty/n_gsm.c +++ b/drivers/tty/n_gsm.c @@ -2662,7 +2662,7 @@ static int gsm_mux_net_start_xmit(struct sk_buff *skb, STATS(net).tx_bytes += skb->len; gsm_dlci_data_kick(dlci); /* And tell the kernel when the last transmit started. */ - net->trans_start = jiffies; + netif_trans_update(net); muxnet_put(mux_net); return NETDEV_TX_OK; } diff --git a/drivers/tty/synclink.c b/drivers/tty/synclink.c index f5476e270734..c8c760151094 100644 --- a/drivers/tty/synclink.c +++ b/drivers/tty/synclink.c @@ -7708,7 +7708,7 @@ static netdev_tx_t hdlcdev_xmit(struct sk_buff *skb, dev_kfree_skb(skb); /* save start time for transmit timeout detection */ - dev->trans_start = jiffies; + netif_trans_update(dev); /* start hardware transmitter if necessary */ spin_lock_irqsave(&info->irq_spinlock,flags); @@ -7764,7 +7764,7 @@ static int hdlcdev_open(struct net_device *dev) mgsl_program_hw(info); /* enable network layer transmit */ - dev->trans_start = jiffies; + netif_trans_update(dev); netif_start_queue(dev); /* inform generic HDLC layer of current DCD status */ diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c index c0a2f5a1b1c2..d5b6471bece4 100644 --- a/drivers/tty/synclink_gt.c +++ b/drivers/tty/synclink_gt.c @@ -1493,7 +1493,7 @@ static netdev_tx_t hdlcdev_xmit(struct sk_buff *skb, dev->stats.tx_bytes += skb->len; /* save start time for transmit timeout detection */ - dev->trans_start = jiffies; + netif_trans_update(dev); spin_lock_irqsave(&info->lock, flags); tx_load(info, skb->data, skb->len); @@ -1552,7 +1552,7 @@ static int hdlcdev_open(struct net_device *dev) program_hw(info); /* enable network layer transmit */ - dev->trans_start = jiffies; + netif_trans_update(dev); netif_start_queue(dev); /* inform generic HDLC layer of current DCD status */ diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c index 90da0c712262..3f8968543af0 100644 --- a/drivers/tty/synclinkmp.c +++ b/drivers/tty/synclinkmp.c @@ -1612,7 +1612,7 @@ static netdev_tx_t hdlcdev_xmit(struct sk_buff *skb, dev_kfree_skb(skb); /* save start time for transmit timeout detection */ - dev->trans_start = jiffies; + netif_trans_update(dev); /* start hardware transmitter if necessary */ spin_lock_irqsave(&info->lock,flags); @@ -1668,7 +1668,7 @@ static int hdlcdev_open(struct net_device *dev) program_hw(info); /* enable network layer transmit */ - dev->trans_start = jiffies; + netif_trans_update(dev); netif_start_queue(dev); /* inform generic HDLC layer of current DCD status */ diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c index 637809e3bd0d..a3f7e7c55ebb 100644 --- a/drivers/usb/gadget/function/u_ether.c +++ b/drivers/usb/gadget/function/u_ether.c @@ -597,7 +597,7 @@ static netdev_tx_t eth_start_xmit(struct sk_buff *skb, DBG(dev, "tx queue err %d\n", retval); break; case 0: - net->trans_start = jiffies; + netif_trans_update(net); atomic_inc(&dev->tx_qlen); } diff --git a/net/atm/lec.c b/net/atm/lec.c index cd3b37989057..e574a7e9db6f 100644 --- a/net/atm/lec.c +++ b/net/atm/lec.c @@ -194,7 +194,7 @@ lec_send(struct atm_vcc *vcc, struct sk_buff *skb) static void lec_tx_timeout(struct net_device *dev) { pr_info("%s\n", dev->name); - dev->trans_start = jiffies; + netif_trans_update(dev); netif_wake_queue(dev); } @@ -324,7 +324,7 @@ static netdev_tx_t lec_start_xmit(struct sk_buff *skb, out: if (entry) lec_arp_put(entry); - dev->trans_start = jiffies; + netif_trans_update(dev); return NETDEV_TX_OK; } diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c index 8a136b6a1ff0..dcb16c33cd8b 100644 --- a/net/batman-adv/soft-interface.c +++ b/net/batman-adv/soft-interface.c @@ -208,7 +208,7 @@ static int batadv_interface_tx(struct sk_buff *skb, if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE) goto dropped; - soft_iface->trans_start = jiffies; + netif_trans_update(soft_iface); vid = batadv_get_vid(skb, 0); ethhdr = eth_hdr(skb); diff --git a/net/bluetooth/bnep/netdev.c b/net/bluetooth/bnep/netdev.c index 6ceb5d36a32b..f4fcb4a9d5c1 100644 --- a/net/bluetooth/bnep/netdev.c +++ b/net/bluetooth/bnep/netdev.c @@ -188,7 +188,7 @@ static netdev_tx_t bnep_net_xmit(struct sk_buff *skb, * So we have to queue them and wake up session thread which is sleeping * on the sk_sleep(sk). */ - dev->trans_start = jiffies; + netif_trans_update(dev); skb_queue_tail(&sk->sk_write_queue, skb); wake_up_interruptible(sk_sleep(sk)); diff --git a/net/irda/irlan/irlan_eth.c b/net/irda/irlan/irlan_eth.c index fcfbe579434a..d8b7267280c3 100644 --- a/net/irda/irlan/irlan_eth.c +++ b/net/irda/irlan/irlan_eth.c @@ -181,7 +181,7 @@ static netdev_tx_t irlan_eth_xmit(struct sk_buff *skb, skb = new_skb; } - dev->trans_start = jiffies; + netif_trans_update(dev); len = skb->len; /* Now queue the packet in the transport layer */ diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 9c7756237904..70182cfe119c 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -775,7 +775,7 @@ void dev_activate(struct net_device *dev) transition_one_qdisc(dev, dev_ingress_queue(dev), NULL); if (need_watchdog) { - dev->trans_start = jiffies; + netif_trans_update(dev); dev_watchdog_up(dev); } } From 9b36627acecd5792e81daf1a3bff8eab39ed45fb Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Tue, 3 May 2016 16:33:14 +0200 Subject: [PATCH 1342/1649] net: remove dev->trans_start previous patches removed all direct accesses to dev->trans_start, so change the netif_trans_update helper to update trans_start of netdev queue 0 instead and then remove trans_start from struct net_device. AFAICS a lot of the netif_trans_update() invocations are now useless because they occur in ndo_start_xmit and driver doesn't set LLTX (i.e. stack already took care of the update). As I can't test any of them it seems better to just leave them alone. Signed-off-by: Florian Westphal Signed-off-by: David S. Miller --- drivers/net/ethernet/intel/i40e/i40e_main.c | 2 +- include/linux/netdevice.h | 15 +++++---------- net/sched/sch_generic.c | 10 +++------- 3 files changed, 9 insertions(+), 18 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 8e6c0f2487d7..f6da6b76e678 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -328,7 +328,7 @@ static void i40e_tx_timeout(struct net_device *netdev) unsigned long trans_start; q = netdev_get_tx_queue(netdev, i); - trans_start = q->trans_start ? : netdev->trans_start; + trans_start = q->trans_start; if (netif_xmit_stopped(q) && time_after(jiffies, (trans_start + netdev->watchdog_timeo))) { diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index f53412cccbaa..63580e6d0df4 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -581,7 +581,7 @@ struct netdev_queue { spinlock_t _xmit_lock ____cacheline_aligned_in_smp; int xmit_lock_owner; /* - * please use this field instead of dev->trans_start + * Time (in jiffies) of last Tx */ unsigned long trans_start; @@ -1545,7 +1545,6 @@ enum netdev_priv_flags { * * @offload_fwd_mark: Offload device fwding mark * - * @trans_start: Time (in jiffies) of last Tx * @watchdog_timeo: Represents the timeout that is used by * the watchdog (see dev_watchdog()) * @watchdog_timer: List of timers @@ -1794,13 +1793,6 @@ struct net_device { #endif /* These may be needed for future network-power-down code. */ - - /* - * trans_start here is expensive for high speed devices on SMP, - * please use netdev_queue->trans_start instead. - */ - unsigned long trans_start; - struct timer_list watchdog_timer; int __percpu *pcpu_refcnt; @@ -3484,7 +3476,10 @@ static inline void txq_trans_update(struct netdev_queue *txq) /* legacy drivers only, netdev_start_xmit() sets txq->trans_start */ static inline void netif_trans_update(struct net_device *dev) { - dev->trans_start = jiffies; + struct netdev_queue *txq = netdev_get_tx_queue(dev, 0); + + if (txq->trans_start != jiffies) + txq->trans_start = jiffies; } /** diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 70182cfe119c..269dd71b3828 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -227,13 +227,12 @@ unsigned long dev_trans_start(struct net_device *dev) if (is_vlan_dev(dev)) dev = vlan_dev_real_dev(dev); - res = dev->trans_start; - for (i = 0; i < dev->num_tx_queues; i++) { + res = netdev_get_tx_queue(dev, 0)->trans_start; + for (i = 1; i < dev->num_tx_queues; i++) { val = netdev_get_tx_queue(dev, i)->trans_start; if (val && time_after(val, res)) res = val; } - dev->trans_start = res; return res; } @@ -256,10 +255,7 @@ static void dev_watchdog(unsigned long arg) struct netdev_queue *txq; txq = netdev_get_tx_queue(dev, i); - /* - * old device drivers set dev->trans_start - */ - trans_start = txq->trans_start ? : dev->trans_start; + trans_start = txq->trans_start; if (netif_xmit_stopped(txq) && time_after(jiffies, (trans_start + dev->watchdog_timeo))) { From e98a3aabf85f60b80c6ef93e509d38144ca434b1 Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Tue, 3 May 2016 23:14:41 +0300 Subject: [PATCH 1343/1649] mdio_bus: don't return NULL from mdiobus_scan() I've finally noticed that mdiobus_scan() also returns either NULL or error value on failure. Return ERR_PTR(-ENODEV) instead of NULL since this is the error value already filtered out by the callers that want to ignore the MDIO address scan failure... Signed-off-by: Sergei Shtylyov Signed-off-by: David S. Miller --- drivers/net/phy/mdio_bus.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index 388f9922647b..09deef4bed09 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c @@ -431,7 +431,7 @@ struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr) err = phy_device_register(phydev); if (err) { phy_device_free(phydev); - return NULL; + return ERR_PTR(-ENODEV); } return phydev; From 46cc6e4976e3d9058490f20d93bc7805f7f2d81e Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 3 May 2016 16:56:03 -0700 Subject: [PATCH 1344/1649] tcp: fix lockdep splat in tcp_snd_una_update() tcp_snd_una_update() and tcp_rcv_nxt_update() call u64_stats_update_begin() either from process context or BH handler. This triggers a lockdep splat on 32bit & SMP builds. We could add u64_stats_update_begin_bh() variant but this would slow down 32bit builds with useless local_disable_bh() and local_enable_bh() pairs, since we own the socket lock at this point. I add sock_owned_by_me() helper to have proper lockdep support even on 64bit builds, and new u64_stats_update_begin_raw() and u64_stats_update_end_raw methods. Fixes: c10d9310edf5 ("tcp: do not assume TCP code is non preemptible") Reported-by: Fabio Estevam Diagnosed-by: Francois Romieu Signed-off-by: Eric Dumazet Tested-by: Fabio Estevam Signed-off-by: David S. Miller --- include/linux/u64_stats_sync.h | 14 ++++++++++++++ include/net/sock.h | 7 ++++++- net/ipv4/tcp_input.c | 10 ++++++---- 3 files changed, 26 insertions(+), 5 deletions(-) diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h index df89c9bcba7d..d3a2bb712af3 100644 --- a/include/linux/u64_stats_sync.h +++ b/include/linux/u64_stats_sync.h @@ -89,6 +89,20 @@ static inline void u64_stats_update_end(struct u64_stats_sync *syncp) #endif } +static inline void u64_stats_update_begin_raw(struct u64_stats_sync *syncp) +{ +#if BITS_PER_LONG==32 && defined(CONFIG_SMP) + raw_write_seqcount_begin(&syncp->seq); +#endif +} + +static inline void u64_stats_update_end_raw(struct u64_stats_sync *syncp) +{ +#if BITS_PER_LONG==32 && defined(CONFIG_SMP) + raw_write_seqcount_end(&syncp->seq); +#endif +} + static inline unsigned int u64_stats_fetch_begin(const struct u64_stats_sync *syncp) { #if BITS_PER_LONG==32 && defined(CONFIG_SMP) diff --git a/include/net/sock.h b/include/net/sock.h index 45f5b492c658..c9c8b19df27c 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1421,11 +1421,16 @@ static inline void unlock_sock_fast(struct sock *sk, bool slow) * accesses from user process context. */ -static inline bool sock_owned_by_user(const struct sock *sk) +static inline void sock_owned_by_me(const struct sock *sk) { #ifdef CONFIG_LOCKDEP WARN_ON_ONCE(!lockdep_sock_is_held(sk) && debug_locks); #endif +} + +static inline bool sock_owned_by_user(const struct sock *sk) +{ + sock_owned_by_me(sk); return sk->sk_lock.owned; } diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 6171f92be090..a914e0607895 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -3355,9 +3355,10 @@ static void tcp_snd_una_update(struct tcp_sock *tp, u32 ack) { u32 delta = ack - tp->snd_una; - u64_stats_update_begin(&tp->syncp); + sock_owned_by_me((struct sock *)tp); + u64_stats_update_begin_raw(&tp->syncp); tp->bytes_acked += delta; - u64_stats_update_end(&tp->syncp); + u64_stats_update_end_raw(&tp->syncp); tp->snd_una = ack; } @@ -3366,9 +3367,10 @@ static void tcp_rcv_nxt_update(struct tcp_sock *tp, u32 seq) { u32 delta = seq - tp->rcv_nxt; - u64_stats_update_begin(&tp->syncp); + sock_owned_by_me((struct sock *)tp); + u64_stats_update_begin_raw(&tp->syncp); tp->bytes_received += delta; - u64_stats_update_end(&tp->syncp); + u64_stats_update_end_raw(&tp->syncp); tp->rcv_nxt = seq; } From 614bdd4d6e61d260d82945f5f52a5dc288f64783 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 3 May 2016 17:10:50 -0700 Subject: [PATCH 1345/1649] tcp: must block bh in __inet_twsk_hashdance() __inet_twsk_hashdance() might be called from process context, better block BH before acquiring bind hash and established locks Fixes: c10d9310edf5 ("tcp: do not assume TCP code is non preemptible") Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/ipv4/inet_timewait_sock.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c index 99ee5c4a9b68..206581674806 100644 --- a/net/ipv4/inet_timewait_sock.c +++ b/net/ipv4/inet_timewait_sock.c @@ -94,7 +94,7 @@ static void inet_twsk_add_bind_node(struct inet_timewait_sock *tw, } /* - * Enter the time wait state. This is called with locally disabled BH. + * Enter the time wait state. * Essentially we whip up a timewait bucket, copy the relevant info into it * from the SK, and mess with hash chains and list linkage. */ @@ -112,7 +112,7 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk, */ bhead = &hashinfo->bhash[inet_bhashfn(twsk_net(tw), inet->inet_num, hashinfo->bhash_size)]; - spin_lock(&bhead->lock); + spin_lock_bh(&bhead->lock); tw->tw_tb = icsk->icsk_bind_hash; WARN_ON(!icsk->icsk_bind_hash); inet_twsk_add_bind_node(tw, &tw->tw_tb->owners); @@ -138,7 +138,7 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk, if (__sk_nulls_del_node_init_rcu(sk)) sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); - spin_unlock(lock); + spin_unlock_bh(lock); } EXPORT_SYMBOL_GPL(__inet_twsk_hashdance); From 58ef6a3f64bd837e107a2bbf0571574850a32b8c Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Wed, 4 May 2016 09:21:02 +0300 Subject: [PATCH 1346/1649] usbnet/smsc75xx: silence uninitialized variable warning If the fn() calls fail then "buf" is uninitialized. Just return early in that situation. Signed-off-by: Dan Carpenter Signed-off-by: David S. Miller --- drivers/net/usb/smsc75xx.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c index c369db99c005..9af9799935db 100644 --- a/drivers/net/usb/smsc75xx.c +++ b/drivers/net/usb/smsc75xx.c @@ -99,9 +99,11 @@ static int __must_check __smsc75xx_read_reg(struct usbnet *dev, u32 index, ret = fn(dev, USB_VENDOR_REQUEST_READ_REGISTER, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, index, &buf, 4); - if (unlikely(ret < 0)) + if (unlikely(ret < 0)) { netdev_warn(dev->net, "Failed to read reg index 0x%08x: %d\n", index, ret); + return ret; + } le32_to_cpus(&buf); *data = buf; From 5a36b68b670ebdf19eaedd4f859810293a3d1dc1 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Wed, 4 May 2016 09:22:01 +0300 Subject: [PATCH 1347/1649] usbnet: smsc95xx: silence an uninitialized variable warning If the call to fn() fails then "buf" is uninitialized. Just return the error code in that case. Signed-off-by: Dan Carpenter Signed-off-by: David S. Miller --- drivers/net/usb/smsc95xx.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c index 2edc2bc6d1b9..d9d2806a47b1 100644 --- a/drivers/net/usb/smsc95xx.c +++ b/drivers/net/usb/smsc95xx.c @@ -92,9 +92,11 @@ static int __must_check __smsc95xx_read_reg(struct usbnet *dev, u32 index, ret = fn(dev, USB_VENDOR_REQUEST_READ_REGISTER, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, index, &buf, 4); - if (unlikely(ret < 0)) + if (unlikely(ret < 0)) { netdev_warn(dev->net, "Failed to read reg index 0x%08x: %d\n", index, ret); + return ret; + } le32_to_cpus(&buf); *data = buf; From 5c2a9644d05e98b3c06b073351cd363ff91b22e8 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Wed, 4 May 2016 22:51:47 +0200 Subject: [PATCH 1348/1649] bonding: update documentation section after dev->trans_start removal Drivers that use LLTX need to update trans_start of the netdev_queue. (Most drivers don't use LLTX; stack does this update if .ndo_start_xmit returned TX_OK). Signed-off-by: Florian Westphal Signed-off-by: David S. Miller --- Documentation/networking/bonding.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Documentation/networking/bonding.txt b/Documentation/networking/bonding.txt index 334b49ef02d1..57f52cdce32e 100644 --- a/Documentation/networking/bonding.txt +++ b/Documentation/networking/bonding.txt @@ -1880,8 +1880,8 @@ or more peers on the local network. The ARP monitor relies on the device driver itself to verify that traffic is flowing. In particular, the driver must keep up to -date the last receive time, dev->last_rx, and transmit start time, -dev->trans_start. If these are not updated by the driver, then the +date the last receive time, dev->last_rx. Drivers that use NETIF_F_LLTX +flag must also update netdev_queue->trans_start. If they do not, then the ARP monitor will immediately fail any slaves using that driver, and those slaves will stay down. If networking monitoring (tcpdump, etc) shows the ARP requests and replies on the network, then it may be that From 3e66bab33f2a32630616d335dbf4965896eb159f Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Wed, 4 May 2016 22:56:07 +0200 Subject: [PATCH 1349/1649] drivers: fix dev->trans_start removal fallout kbuild test robot reported a build failure on s390. While at it, also fix missing conversion in the tilera driver. Fixes: 9b36627acecd5792 ("net: remove dev->trans_start") Reported-by: kbuild test robot Signed-off-by: Florian Westphal Signed-off-by: David S. Miller --- drivers/net/ethernet/tile/tilepro.c | 2 +- drivers/s390/net/qeth_core_main.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/tile/tilepro.c b/drivers/net/ethernet/tile/tilepro.c index 0bb98bc70c9d..922a443e3415 100644 --- a/drivers/net/ethernet/tile/tilepro.c +++ b/drivers/net/ethernet/tile/tilepro.c @@ -2026,7 +2026,7 @@ static void tile_net_tx_timeout(struct net_device *dev) { PDEBUG("tile_net_tx_timeout()\n"); PDEBUG("Transmit timeout at %ld, latency %ld\n", jiffies, - jiffies - dev->trans_start); + jiffies - dev_trans_start(dev)); /* XXX: ISSUE: This doesn't seem useful for us. */ netif_wake_queue(dev); diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 787153764120..b7b74776e2ff 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -3481,7 +3481,7 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index, } } - queue->card->dev->trans_start = jiffies; + netif_trans_update(queue->card->dev); if (queue->card->options.performance_stats) { queue->card->perf_stats.outbound_do_qdio_cnt++; queue->card->perf_stats.outbound_do_qdio_start_time = From 777c6ae57ebd432b59950b3e28bf01676018d1b2 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 4 May 2016 15:27:29 -0700 Subject: [PATCH 1350/1649] tcp: two more missing bh disable percpu_counter only have protection against preemption. TCP stack uses them possibly from BH, so we need BH protection in contexts that could be run in process context Fixes: c10d9310edf5 ("tcp: do not assume TCP code is non preemptible") Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/ipv4/inet_connection_sock.c | 2 ++ net/ipv4/tcp_ipv4.c | 2 ++ 2 files changed, 4 insertions(+) diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index 7ce112aa3a7b..fa8c39804bdb 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -706,7 +706,9 @@ void inet_csk_destroy_sock(struct sock *sk) sk_refcnt_debug_release(sk); + local_bh_disable(); percpu_counter_dec(sk->sk_prot->orphan_count); + local_bh_enable(); sock_put(sk); } EXPORT_SYMBOL(inet_csk_destroy_sock); diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 761bc492c5e3..a7ab9472d645 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -1839,7 +1839,9 @@ void tcp_v4_destroy_sock(struct sock *sk) tcp_free_fastopen_req(tp); tcp_saved_syn_free(tp); + local_bh_disable(); sk_sockets_allocated_dec(sk); + local_bh_enable(); if (mem_cgroup_sockets_enabled && sk->sk_memcg) sock_release_memcg(sk); From 035cd6ba53eff060760c4f4d11339fcc916a967c Mon Sep 17 00:00:00 2001 From: Jeff Kirsher Date: Wed, 4 May 2016 15:49:39 -0700 Subject: [PATCH 1351/1649] MAINTAINERS: Cleanup Intel Wired LAN maintainers list With the recent "retirements" and other changes, make the maintainers list a lot less confusing and a bit more straight forward. Signed-off-by: Jeff Kirsher Acked-by: Jesse Brandeburg Acked-by: Shannon Nelson Signed-off-by: David S. Miller --- MAINTAINERS | 7 ------- 1 file changed, 7 deletions(-) diff --git a/MAINTAINERS b/MAINTAINERS index 2b74fde7bcc2..e425912ff933 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -5748,13 +5748,6 @@ F: drivers/char/hw_random/ixp4xx-rng.c INTEL ETHERNET DRIVERS M: Jeff Kirsher -R: Jesse Brandeburg -R: Shannon Nelson -R: Carolyn Wyborny -R: Don Skidmore -R: Bruce Allan -R: John Ronciak -R: Mitch Williams L: intel-wired-lan@lists.osuosl.org (moderated for non-subscribers) W: http://www.intel.com/support/feedback.htm W: http://e1000.sourceforge.net/ From 1ad8f48df6f683f186b03b51381419ac4aec73d3 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Tue, 26 Apr 2016 11:59:53 +0200 Subject: [PATCH 1352/1649] netfilter: nftables: add connlabel set support Conntrack labels are currently sized depending on the iptables ruleset, i.e. if we're asked to test or set bits 1, 2, and 65 then we would allocate enough room to store at least bit 65. However, with nft, the input is just a register with arbitrary runtime content. We therefore ask for the upper ceiling we currently have, which is enough room to store 128 bits. Alternatively, we could alter nf_connlabel_replace to increase net->ct.label_words at run time, but since 128 bits is not that big we'd only save sizeof(long) so it doesn't seem worth it for now. This follows a similar approach that xtables 'connlabel' match uses, so when user inputs ct label set bar then we will set the bit used by the 'bar' label and leave the rest alone. This is done by passing the sreg content to nf_connlabels_replace as both value and mask argument. Labels (bits) already set thus cannot be re-set to zero, but this is not supported by xtables connlabel match either. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nft_ct.c | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c index 25998facefd0..137e308d5b24 100644 --- a/net/netfilter/nft_ct.c +++ b/net/netfilter/nft_ct.c @@ -197,6 +197,14 @@ static void nft_ct_set_eval(const struct nft_expr *expr, nf_conntrack_event_cache(IPCT_MARK, ct); } break; +#endif +#ifdef CONFIG_NF_CONNTRACK_LABELS + case NFT_CT_LABELS: + nf_connlabels_replace(ct, + ®s->data[priv->sreg], + ®s->data[priv->sreg], + NF_CT_LABELS_MAX_SIZE / sizeof(u32)); + break; #endif default: break; @@ -364,6 +372,16 @@ static int nft_ct_set_init(const struct nft_ctx *ctx, case NFT_CT_MARK: len = FIELD_SIZEOF(struct nf_conn, mark); break; +#endif +#ifdef CONFIG_NF_CONNTRACK_LABELS + case NFT_CT_LABELS: + if (tb[NFTA_CT_DIRECTION]) + return -EINVAL; + len = NF_CT_LABELS_MAX_SIZE; + err = nf_connlabels_get(ctx->net, (len * BITS_PER_BYTE) - 1); + if (err) + return err; + break; #endif default: return -EOPNOTSUPP; @@ -384,6 +402,18 @@ static int nft_ct_set_init(const struct nft_ctx *ctx, static void nft_ct_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr) { + struct nft_ct *priv = nft_expr_priv(expr); + + switch (priv->key) { +#ifdef CONFIG_NF_CONNTRACK_LABELS + case NFT_CT_LABELS: + nf_connlabels_put(ctx->net); + break; +#endif + default: + break; + } + nft_ct_l3proto_module_put(ctx->afi->family); } From 2cf1234807bdd4ae5d3096a63c8fd5d4d5cad0ef Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Thu, 28 Apr 2016 19:13:40 +0200 Subject: [PATCH 1353/1649] netfilter: conntrack: keep BH enabled during lookup No need to disable BH here anymore: stats are switched to _ATOMIC variant (== this_cpu_inc()), which nowadays generates same code as the non _ATOMIC NF_STAT, at least on x86. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_conntrack_core.c | 25 ++++++++----------------- 1 file changed, 8 insertions(+), 17 deletions(-) diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 1fd0ff1030c2..1b63359d2bb8 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -472,18 +472,13 @@ ____nf_conntrack_find(struct net *net, const struct nf_conntrack_zone *zone, struct hlist_nulls_node *n; unsigned int bucket = hash_bucket(hash, net); - /* Disable BHs the entire time since we normally need to disable them - * at least once for the stats anyway. - */ - local_bh_disable(); begin: hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[bucket], hnnode) { if (nf_ct_key_equal(h, tuple, zone)) { - NF_CT_STAT_INC(net, found); - local_bh_enable(); + NF_CT_STAT_INC_ATOMIC(net, found); return h; } - NF_CT_STAT_INC(net, searched); + NF_CT_STAT_INC_ATOMIC(net, searched); } /* * if the nulls value we got at the end of this lookup is @@ -491,10 +486,9 @@ begin: * We probably met an item that was moved to another chain. */ if (get_nulls_value(n) != bucket) { - NF_CT_STAT_INC(net, search_restart); + NF_CT_STAT_INC_ATOMIC(net, search_restart); goto begin; } - local_bh_enable(); return NULL; } @@ -735,22 +729,19 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple, zone = nf_ct_zone(ignored_conntrack); hash = hash_conntrack(net, tuple); - /* Disable BHs the entire time since we need to disable them at - * least once for the stats anyway. - */ - rcu_read_lock_bh(); + rcu_read_lock(); hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnnode) { ct = nf_ct_tuplehash_to_ctrack(h); if (ct != ignored_conntrack && nf_ct_tuple_equal(tuple, &h->tuple) && nf_ct_zone_equal(ct, zone, NF_CT_DIRECTION(h))) { - NF_CT_STAT_INC(net, found); - rcu_read_unlock_bh(); + NF_CT_STAT_INC_ATOMIC(net, found); + rcu_read_unlock(); return 1; } - NF_CT_STAT_INC(net, searched); + NF_CT_STAT_INC_ATOMIC(net, searched); } - rcu_read_unlock_bh(); + rcu_read_unlock(); return 0; } From 5e3c61f981756361e7dc74e2c673121028449e35 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Thu, 28 Apr 2016 19:13:41 +0200 Subject: [PATCH 1354/1649] netfilter: conntrack: fix lookup race during hash resize When resizing the conntrack hash table at runtime via echo 42 > /sys/module/nf_conntrack/parameters/hashsize, we are racing with the conntrack lookup path -- reads can happen in parallel and nothing prevents readers from observing a the newly allocated hash but the old size (or vice versa). So access to hash[bucket] can trigger OOB read access in case the table got expanded and we saw the new size but the old hash pointer (or it got shrunk and we got new hash ptr but the size of the old and larger table): kasan: GPF could be caused by NULL-ptr deref or user memory access general protection fault: 0000 [#1] SMP KASAN CPU: 0 PID: 3 Comm: ksoftirqd/0 Not tainted 4.6.0-rc2+ #107 [..] Call Trace: [] ? nf_conntrack_tuple_taken+0x12a/0xe90 [] ? nf_ct_invert_tuplepr+0x221/0x3a0 [] get_unique_tuple+0xfb3/0x2760 Use generation counter to obtain the address/length of the same table. Also add a synchronize_net before freeing the old hash. AFAICS, without it we might access ct_hash[bucket] after ct_hash has been freed, provided that lockless reader got delayed by another event: CPU1 CPU2 seq_begin seq_retry resize occurs free oldhash for_each(oldhash[size]) Note that resize is only supported in init_netns, it took over 2 minutes of constant resizing+flooding to produce the warning, so this isn't a big problem in practice. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_conntrack_core.c | 24 +++++++++++++++++++----- 1 file changed, 19 insertions(+), 5 deletions(-) diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 1b63359d2bb8..29fa08b3ab82 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -469,11 +469,18 @@ ____nf_conntrack_find(struct net *net, const struct nf_conntrack_zone *zone, const struct nf_conntrack_tuple *tuple, u32 hash) { struct nf_conntrack_tuple_hash *h; + struct hlist_nulls_head *ct_hash; struct hlist_nulls_node *n; - unsigned int bucket = hash_bucket(hash, net); + unsigned int bucket, sequence; begin: - hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[bucket], hnnode) { + do { + sequence = read_seqcount_begin(&nf_conntrack_generation); + bucket = hash_bucket(hash, net); + ct_hash = net->ct.hash; + } while (read_seqcount_retry(&nf_conntrack_generation, sequence)); + + hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[bucket], hnnode) { if (nf_ct_key_equal(h, tuple, zone)) { NF_CT_STAT_INC_ATOMIC(net, found); return h; @@ -722,15 +729,21 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple, struct net *net = nf_ct_net(ignored_conntrack); const struct nf_conntrack_zone *zone; struct nf_conntrack_tuple_hash *h; + struct hlist_nulls_head *ct_hash; + unsigned int hash, sequence; struct hlist_nulls_node *n; struct nf_conn *ct; - unsigned int hash; zone = nf_ct_zone(ignored_conntrack); - hash = hash_conntrack(net, tuple); rcu_read_lock(); - hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnnode) { + do { + sequence = read_seqcount_begin(&nf_conntrack_generation); + hash = hash_conntrack(net, tuple); + ct_hash = net->ct.hash; + } while (read_seqcount_retry(&nf_conntrack_generation, sequence)); + + hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[hash], hnnode) { ct = nf_ct_tuplehash_to_ctrack(h); if (ct != ignored_conntrack && nf_ct_tuple_equal(tuple, &h->tuple) && @@ -1607,6 +1620,7 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp) nf_conntrack_all_unlock(); local_bh_enable(); + synchronize_net(); nf_ct_free_hashtable(old_hash, old_size); return 0; } From 88b68bc5237c84c6ff6f78568653780869a94a95 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Thu, 28 Apr 2016 19:13:42 +0200 Subject: [PATCH 1355/1649] netfilter: conntrack: don't attempt to iterate over empty table Once we place all conntracks into same table iteration becomes more costly because the table contains conntracks that we are not interested in (belonging to other netns). So don't bother scanning if the current namespace has no entries. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_conntrack_core.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 29fa08b3ab82..f2e75a54408b 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -1428,6 +1428,9 @@ void nf_ct_iterate_cleanup(struct net *net, might_sleep(); + if (atomic_read(&net->ct.count) == 0) + return; + while ((ct = get_next_corpse(net, iter, data, &bucket)) != NULL) { /* Time to push up daises... */ if (del_timer(&ct->timeout)) From 868043485ecb7cda503af0dfb9e2804e0260196a Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Thu, 28 Apr 2016 19:13:43 +0200 Subject: [PATCH 1356/1649] netfilter: conntrack: use nf_ct_key_equal() in more places This prepares for upcoming change that places all conntracks into a single, global table. For this to work we will need to also compare net pointer during lookup. To avoid open-coding such check use the nf_ct_key_equal helper and then later extend it to also consider net_eq. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_conntrack_core.c | 29 +++++++++++------------------ 1 file changed, 11 insertions(+), 18 deletions(-) diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index f2e75a54408b..3b9c3023192e 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -572,16 +572,13 @@ nf_conntrack_hash_check_insert(struct nf_conn *ct) /* See if there's one in the list already, including reverse */ hlist_nulls_for_each_entry(h, n, &net->ct.hash[hash], hnnode) - if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, - &h->tuple) && - nf_ct_zone_equal(nf_ct_tuplehash_to_ctrack(h), zone, - NF_CT_DIRECTION(h))) + if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, + zone)) goto out; + hlist_nulls_for_each_entry(h, n, &net->ct.hash[reply_hash], hnnode) - if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple, - &h->tuple) && - nf_ct_zone_equal(nf_ct_tuplehash_to_ctrack(h), zone, - NF_CT_DIRECTION(h))) + if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_REPLY].tuple, + zone)) goto out; add_timer(&ct->timeout); @@ -665,16 +662,13 @@ __nf_conntrack_confirm(struct sk_buff *skb) NAT could have grabbed it without realizing, since we're not in the hash. If there is, we lost race. */ hlist_nulls_for_each_entry(h, n, &net->ct.hash[hash], hnnode) - if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, - &h->tuple) && - nf_ct_zone_equal(nf_ct_tuplehash_to_ctrack(h), zone, - NF_CT_DIRECTION(h))) + if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, + zone)) goto out; + hlist_nulls_for_each_entry(h, n, &net->ct.hash[reply_hash], hnnode) - if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple, - &h->tuple) && - nf_ct_zone_equal(nf_ct_tuplehash_to_ctrack(h), zone, - NF_CT_DIRECTION(h))) + if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_REPLY].tuple, + zone)) goto out; /* Timer relative to confirmation time, not original @@ -746,8 +740,7 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple, hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[hash], hnnode) { ct = nf_ct_tuplehash_to_ctrack(h); if (ct != ignored_conntrack && - nf_ct_tuple_equal(tuple, &h->tuple) && - nf_ct_zone_equal(ct, zone, NF_CT_DIRECTION(h))) { + nf_ct_key_equal(h, tuple, zone)) { NF_CT_STAT_INC_ATOMIC(net, found); rcu_read_unlock(); return 1; From 245cfdcaba2e7e4ee16b12af547ead37f9c501cd Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Thu, 28 Apr 2016 19:13:44 +0200 Subject: [PATCH 1357/1649] netfilter: conntrack: small refactoring of conntrack seq_printf The iteration process is lockless, so we test if the conntrack object is eligible for printing (e.g. is AF_INET) after obtaining the reference count. Once we put all conntracks into same hash table we might see more entries that need to be skipped. So add a helper and first perform the test in a lockless fashion for fast skip. Once we obtain the reference count, just repeat the check. Note that this refactoring also includes a missing check for unconfirmed conntrack entries due to slab rcu object re-usage, so they need to be skipped since they are not part of the listing. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- .../nf_conntrack_l3proto_ipv4_compat.c | 24 +++++++++++++++---- 1 file changed, 19 insertions(+), 5 deletions(-) diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c index f0dfe92a00d6..483cf7952e54 100644 --- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c +++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c @@ -114,6 +114,19 @@ static inline void ct_show_secctx(struct seq_file *s, const struct nf_conn *ct) } #endif +static bool ct_seq_should_skip(const struct nf_conn *ct, + const struct nf_conntrack_tuple_hash *hash) +{ + /* we only want to print DIR_ORIGINAL */ + if (NF_CT_DIRECTION(hash)) + return true; + + if (nf_ct_l3num(ct) != AF_INET) + return true; + + return false; +} + static int ct_seq_show(struct seq_file *s, void *v) { struct nf_conntrack_tuple_hash *hash = v; @@ -123,14 +136,15 @@ static int ct_seq_show(struct seq_file *s, void *v) int ret = 0; NF_CT_ASSERT(ct); + if (ct_seq_should_skip(ct, hash)) + return 0; + if (unlikely(!atomic_inc_not_zero(&ct->ct_general.use))) return 0; - - /* we only want to print DIR_ORIGINAL */ - if (NF_CT_DIRECTION(hash)) - goto release; - if (nf_ct_l3num(ct) != AF_INET) + /* check if we raced w. object reuse */ + if (!nf_ct_is_confirmed(ct) || + ct_seq_should_skip(ct, hash)) goto release; l3proto = __nf_ct_l3proto_find(nf_ct_l3num(ct)); From e0c7d47221883966d930fa7335b3ca295bc316b2 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Thu, 28 Apr 2016 19:13:45 +0200 Subject: [PATCH 1358/1649] netfilter: conntrack: check netns when comparing conntrack objects Once we place all conntracks in the same hash table we must also compare the netns pointer to skip conntracks that belong to a different namespace. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- .../nf_conntrack_l3proto_ipv4_compat.c | 8 +++++-- net/netfilter/nf_conntrack_core.c | 23 +++++++++++-------- net/netfilter/nf_conntrack_netlink.c | 3 +++ 3 files changed, 22 insertions(+), 12 deletions(-) diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c index 483cf7952e54..171aba15c952 100644 --- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c +++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c @@ -115,6 +115,7 @@ static inline void ct_show_secctx(struct seq_file *s, const struct nf_conn *ct) #endif static bool ct_seq_should_skip(const struct nf_conn *ct, + const struct net *net, const struct nf_conntrack_tuple_hash *hash) { /* we only want to print DIR_ORIGINAL */ @@ -124,6 +125,9 @@ static bool ct_seq_should_skip(const struct nf_conn *ct, if (nf_ct_l3num(ct) != AF_INET) return true; + if (!net_eq(nf_ct_net(ct), net)) + return true; + return false; } @@ -136,7 +140,7 @@ static int ct_seq_show(struct seq_file *s, void *v) int ret = 0; NF_CT_ASSERT(ct); - if (ct_seq_should_skip(ct, hash)) + if (ct_seq_should_skip(ct, seq_file_net(s), hash)) return 0; if (unlikely(!atomic_inc_not_zero(&ct->ct_general.use))) @@ -144,7 +148,7 @@ static int ct_seq_show(struct seq_file *s, void *v) /* check if we raced w. object reuse */ if (!nf_ct_is_confirmed(ct) || - ct_seq_should_skip(ct, hash)) + ct_seq_should_skip(ct, seq_file_net(s), hash)) goto release; l3proto = __nf_ct_l3proto_find(nf_ct_l3num(ct)); diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 3b9c3023192e..10ae2eef1e40 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -447,7 +447,8 @@ static void death_by_timeout(unsigned long ul_conntrack) static inline bool nf_ct_key_equal(struct nf_conntrack_tuple_hash *h, const struct nf_conntrack_tuple *tuple, - const struct nf_conntrack_zone *zone) + const struct nf_conntrack_zone *zone, + const struct net *net) { struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h); @@ -456,7 +457,8 @@ nf_ct_key_equal(struct nf_conntrack_tuple_hash *h, */ return nf_ct_tuple_equal(tuple, &h->tuple) && nf_ct_zone_equal(ct, zone, NF_CT_DIRECTION(h)) && - nf_ct_is_confirmed(ct); + nf_ct_is_confirmed(ct) && + net_eq(net, nf_ct_net(ct)); } /* @@ -481,7 +483,7 @@ begin: } while (read_seqcount_retry(&nf_conntrack_generation, sequence)); hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[bucket], hnnode) { - if (nf_ct_key_equal(h, tuple, zone)) { + if (nf_ct_key_equal(h, tuple, zone, net)) { NF_CT_STAT_INC_ATOMIC(net, found); return h; } @@ -517,7 +519,7 @@ begin: !atomic_inc_not_zero(&ct->ct_general.use))) h = NULL; else { - if (unlikely(!nf_ct_key_equal(h, tuple, zone))) { + if (unlikely(!nf_ct_key_equal(h, tuple, zone, net))) { nf_ct_put(ct); goto begin; } @@ -573,12 +575,12 @@ nf_conntrack_hash_check_insert(struct nf_conn *ct) /* See if there's one in the list already, including reverse */ hlist_nulls_for_each_entry(h, n, &net->ct.hash[hash], hnnode) if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, - zone)) + zone, net)) goto out; hlist_nulls_for_each_entry(h, n, &net->ct.hash[reply_hash], hnnode) if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_REPLY].tuple, - zone)) + zone, net)) goto out; add_timer(&ct->timeout); @@ -663,12 +665,12 @@ __nf_conntrack_confirm(struct sk_buff *skb) not in the hash. If there is, we lost race. */ hlist_nulls_for_each_entry(h, n, &net->ct.hash[hash], hnnode) if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, - zone)) + zone, net)) goto out; hlist_nulls_for_each_entry(h, n, &net->ct.hash[reply_hash], hnnode) if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_REPLY].tuple, - zone)) + zone, net)) goto out; /* Timer relative to confirmation time, not original @@ -740,7 +742,7 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple, hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[hash], hnnode) { ct = nf_ct_tuplehash_to_ctrack(h); if (ct != ignored_conntrack && - nf_ct_key_equal(h, tuple, zone)) { + nf_ct_key_equal(h, tuple, zone, net)) { NF_CT_STAT_INC_ATOMIC(net, found); rcu_read_unlock(); return 1; @@ -1383,7 +1385,8 @@ get_next_corpse(struct net *net, int (*iter)(struct nf_conn *i, void *data), if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL) continue; ct = nf_ct_tuplehash_to_ctrack(h); - if (iter(ct, data)) + if (net_eq(nf_ct_net(ct), net) && + iter(ct, data)) goto found; } } diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index 294a8e28cec4..f6bbcb23749e 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -837,6 +837,9 @@ restart: if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL) continue; ct = nf_ct_tuplehash_to_ctrack(h); + if (!net_eq(net, nf_ct_net(ct))) + continue; + /* Dump entries of a given L3 protocol number. * If it is not specified, ie. l3proto == 0, * then dump everything. */ From 1b8c8a9f648c809c01a44114d7535ac8ca4c5ba3 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Tue, 3 May 2016 00:25:58 +0200 Subject: [PATCH 1359/1649] netfilter: conntrack: make netns address part of hash Once we place all conntracks into a global hash table we want them to be spread across entire hash table, even if namespaces have overlapping ip addresses. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_conntrack_core.c | 35 ++++++++++++++++--------------- 1 file changed, 18 insertions(+), 17 deletions(-) diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 10ae2eef1e40..ebafa7736f0a 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -54,6 +54,7 @@ #include #include #include +#include #define NF_CONNTRACK_VERSION "0.5.0" @@ -144,9 +145,11 @@ EXPORT_PER_CPU_SYMBOL(nf_conntrack_untracked); static unsigned int nf_conntrack_hash_rnd __read_mostly; -static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple) +static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple, + const struct net *net) { unsigned int n; + u32 seed; get_random_once(&nf_conntrack_hash_rnd, sizeof(nf_conntrack_hash_rnd)); @@ -154,32 +157,29 @@ static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple) * destination ports (which is a multiple of 4) and treat the last * three bytes manually. */ + seed = nf_conntrack_hash_rnd ^ net_hash_mix(net); n = (sizeof(tuple->src) + sizeof(tuple->dst.u3)) / sizeof(u32); - return jhash2((u32 *)tuple, n, nf_conntrack_hash_rnd ^ + return jhash2((u32 *)tuple, n, seed ^ (((__force __u16)tuple->dst.u.all << 16) | tuple->dst.protonum)); } -static u32 __hash_bucket(u32 hash, unsigned int size) -{ - return reciprocal_scale(hash, size); -} - static u32 hash_bucket(u32 hash, const struct net *net) { - return __hash_bucket(hash, net->ct.htable_size); + return reciprocal_scale(hash, net->ct.htable_size); } -static u_int32_t __hash_conntrack(const struct nf_conntrack_tuple *tuple, - unsigned int size) +static u32 __hash_conntrack(const struct net *net, + const struct nf_conntrack_tuple *tuple, + unsigned int size) { - return __hash_bucket(hash_conntrack_raw(tuple), size); + return reciprocal_scale(hash_conntrack_raw(tuple, net), size); } -static inline u_int32_t hash_conntrack(const struct net *net, - const struct nf_conntrack_tuple *tuple) +static u32 hash_conntrack(const struct net *net, + const struct nf_conntrack_tuple *tuple) { - return __hash_conntrack(tuple, net->ct.htable_size); + return __hash_conntrack(net, tuple, net->ct.htable_size); } bool @@ -535,7 +535,7 @@ nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone, const struct nf_conntrack_tuple *tuple) { return __nf_conntrack_find_get(net, zone, tuple, - hash_conntrack_raw(tuple)); + hash_conntrack_raw(tuple, net)); } EXPORT_SYMBOL_GPL(nf_conntrack_find_get); @@ -1041,7 +1041,7 @@ resolve_normal_ct(struct net *net, struct nf_conn *tmpl, /* look for tuple match */ zone = nf_ct_zone_tmpl(tmpl, skb, &tmp); - hash = hash_conntrack_raw(&tuple); + hash = hash_conntrack_raw(&tuple, net); h = __nf_conntrack_find_get(net, zone, &tuple, hash); if (!h) { h = init_conntrack(net, tmpl, &tuple, l3proto, l4proto, @@ -1605,7 +1605,8 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp) struct nf_conntrack_tuple_hash, hnnode); ct = nf_ct_tuplehash_to_ctrack(h); hlist_nulls_del_rcu(&h->hnnode); - bucket = __hash_conntrack(&h->tuple, hashsize); + bucket = __hash_conntrack(nf_ct_net(ct), + &h->tuple, hashsize); hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]); } } From 56d52d4892d0e478a005b99ed10d0a7f488ea8c1 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Mon, 2 May 2016 18:39:55 +0200 Subject: [PATCH 1360/1649] netfilter: conntrack: use a single hashtable for all namespaces We already include netns address in the hash and compare the netns pointers during lookup, so even if namespaces have overlapping addresses entries will be spread across the table. Assuming 64k bucket size, this change saves 0.5 mbyte per namespace on a 64bit system. NAT bysrc and expectation hash is still per namespace, those will changed too soon. Future patch will also make conntrack object slab cache global again. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- include/net/netfilter/nf_conntrack_core.h | 1 + include/net/netns/conntrack.h | 2 - .../netfilter/nf_conntrack_l3proto_ipv4.c | 2 +- .../nf_conntrack_l3proto_ipv4_compat.c | 10 +-- net/netfilter/nf_conntrack_core.c | 80 +++++++++---------- net/netfilter/nf_conntrack_helper.c | 6 +- net/netfilter/nf_conntrack_netlink.c | 8 +- net/netfilter/nf_conntrack_standalone.c | 13 ++- net/netfilter/nf_nat_core.c | 2 +- net/netfilter/nfnetlink_cttimeout.c | 6 +- 10 files changed, 62 insertions(+), 68 deletions(-) diff --git a/include/net/netfilter/nf_conntrack_core.h b/include/net/netfilter/nf_conntrack_core.h index 62e17d1319ff..3e2f3328945c 100644 --- a/include/net/netfilter/nf_conntrack_core.h +++ b/include/net/netfilter/nf_conntrack_core.h @@ -81,6 +81,7 @@ print_tuple(struct seq_file *s, const struct nf_conntrack_tuple *tuple, #define CONNTRACK_LOCKS 1024 +extern struct hlist_nulls_head *nf_conntrack_hash; extern spinlock_t nf_conntrack_locks[CONNTRACK_LOCKS]; void nf_conntrack_lock(spinlock_t *lock); diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h index b052785b1590..251c435ee330 100644 --- a/include/net/netns/conntrack.h +++ b/include/net/netns/conntrack.h @@ -93,9 +93,7 @@ struct netns_ct { int sysctl_tstamp; int sysctl_checksum; - unsigned int htable_size; struct kmem_cache *nf_conntrack_cachep; - struct hlist_nulls_head *hash; struct hlist_head *expect_hash; struct ct_pcpu __percpu *pcpu_lists; struct ip_conntrack_stat __percpu *stat; diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c index e3c46e8e2762..ae1a71a97132 100644 --- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c +++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c @@ -360,7 +360,7 @@ static int ipv4_init_net(struct net *net) in->ctl_table[0].data = &nf_conntrack_max; in->ctl_table[1].data = &net->ct.count; - in->ctl_table[2].data = &net->ct.htable_size; + in->ctl_table[2].data = &nf_conntrack_htable_size; in->ctl_table[3].data = &net->ct.sysctl_checksum; in->ctl_table[4].data = &net->ct.sysctl_log_invalid; #endif diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c index 171aba15c952..f8fc7ab201c9 100644 --- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c +++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c @@ -31,15 +31,14 @@ struct ct_iter_state { static struct hlist_nulls_node *ct_get_first(struct seq_file *seq) { - struct net *net = seq_file_net(seq); struct ct_iter_state *st = seq->private; struct hlist_nulls_node *n; for (st->bucket = 0; - st->bucket < net->ct.htable_size; + st->bucket < nf_conntrack_htable_size; st->bucket++) { n = rcu_dereference( - hlist_nulls_first_rcu(&net->ct.hash[st->bucket])); + hlist_nulls_first_rcu(&nf_conntrack_hash[st->bucket])); if (!is_a_nulls(n)) return n; } @@ -49,17 +48,16 @@ static struct hlist_nulls_node *ct_get_first(struct seq_file *seq) static struct hlist_nulls_node *ct_get_next(struct seq_file *seq, struct hlist_nulls_node *head) { - struct net *net = seq_file_net(seq); struct ct_iter_state *st = seq->private; head = rcu_dereference(hlist_nulls_next_rcu(head)); while (is_a_nulls(head)) { if (likely(get_nulls_value(head) == st->bucket)) { - if (++st->bucket >= net->ct.htable_size) + if (++st->bucket >= nf_conntrack_htable_size) return NULL; } head = rcu_dereference( - hlist_nulls_first_rcu(&net->ct.hash[st->bucket])); + hlist_nulls_first_rcu(&nf_conntrack_hash[st->bucket])); } return head; } diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index ebafa7736f0a..4c906e73e872 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -69,6 +69,9 @@ EXPORT_SYMBOL_GPL(nf_conntrack_locks); __cacheline_aligned_in_smp DEFINE_SPINLOCK(nf_conntrack_expect_lock); EXPORT_SYMBOL_GPL(nf_conntrack_expect_lock); +struct hlist_nulls_head *nf_conntrack_hash __read_mostly; +EXPORT_SYMBOL_GPL(nf_conntrack_hash); + static __read_mostly spinlock_t nf_conntrack_locks_all_lock; static __read_mostly seqcount_t nf_conntrack_generation; static __read_mostly bool nf_conntrack_locks_all; @@ -164,9 +167,9 @@ static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple, tuple->dst.protonum)); } -static u32 hash_bucket(u32 hash, const struct net *net) +static u32 scale_hash(u32 hash) { - return reciprocal_scale(hash, net->ct.htable_size); + return reciprocal_scale(hash, nf_conntrack_htable_size); } static u32 __hash_conntrack(const struct net *net, @@ -179,7 +182,7 @@ static u32 __hash_conntrack(const struct net *net, static u32 hash_conntrack(const struct net *net, const struct nf_conntrack_tuple *tuple) { - return __hash_conntrack(net, tuple, net->ct.htable_size); + return scale_hash(hash_conntrack_raw(tuple, net)); } bool @@ -478,8 +481,8 @@ ____nf_conntrack_find(struct net *net, const struct nf_conntrack_zone *zone, begin: do { sequence = read_seqcount_begin(&nf_conntrack_generation); - bucket = hash_bucket(hash, net); - ct_hash = net->ct.hash; + bucket = scale_hash(hash); + ct_hash = nf_conntrack_hash; } while (read_seqcount_retry(&nf_conntrack_generation, sequence)); hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[bucket], hnnode) { @@ -543,12 +546,10 @@ static void __nf_conntrack_hash_insert(struct nf_conn *ct, unsigned int hash, unsigned int reply_hash) { - struct net *net = nf_ct_net(ct); - hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode, - &net->ct.hash[hash]); + &nf_conntrack_hash[hash]); hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode, - &net->ct.hash[reply_hash]); + &nf_conntrack_hash[reply_hash]); } int @@ -573,12 +574,12 @@ nf_conntrack_hash_check_insert(struct nf_conn *ct) } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence)); /* See if there's one in the list already, including reverse */ - hlist_nulls_for_each_entry(h, n, &net->ct.hash[hash], hnnode) + hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[hash], hnnode) if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, zone, net)) goto out; - hlist_nulls_for_each_entry(h, n, &net->ct.hash[reply_hash], hnnode) + hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[reply_hash], hnnode) if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_REPLY].tuple, zone, net)) goto out; @@ -633,7 +634,7 @@ __nf_conntrack_confirm(struct sk_buff *skb) sequence = read_seqcount_begin(&nf_conntrack_generation); /* reuse the hash saved before */ hash = *(unsigned long *)&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev; - hash = hash_bucket(hash, net); + hash = scale_hash(hash); reply_hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_REPLY].tuple); @@ -663,12 +664,12 @@ __nf_conntrack_confirm(struct sk_buff *skb) /* See if there's one in the list already, including reverse: NAT could have grabbed it without realizing, since we're not in the hash. If there is, we lost race. */ - hlist_nulls_for_each_entry(h, n, &net->ct.hash[hash], hnnode) + hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[hash], hnnode) if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, zone, net)) goto out; - hlist_nulls_for_each_entry(h, n, &net->ct.hash[reply_hash], hnnode) + hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[reply_hash], hnnode) if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_REPLY].tuple, zone, net)) goto out; @@ -736,7 +737,7 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple, do { sequence = read_seqcount_begin(&nf_conntrack_generation); hash = hash_conntrack(net, tuple); - ct_hash = net->ct.hash; + ct_hash = nf_conntrack_hash; } while (read_seqcount_retry(&nf_conntrack_generation, sequence)); hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[hash], hnnode) { @@ -773,16 +774,16 @@ static noinline int early_drop(struct net *net, unsigned int _hash) local_bh_disable(); restart: sequence = read_seqcount_begin(&nf_conntrack_generation); - hash = hash_bucket(_hash, net); - for (; i < net->ct.htable_size; i++) { + hash = scale_hash(_hash); + for (; i < nf_conntrack_htable_size; i++) { lockp = &nf_conntrack_locks[hash % CONNTRACK_LOCKS]; nf_conntrack_lock(lockp); if (read_seqcount_retry(&nf_conntrack_generation, sequence)) { spin_unlock(lockp); goto restart; } - hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], - hnnode) { + hlist_nulls_for_each_entry_rcu(h, n, &nf_conntrack_hash[hash], + hnnode) { tmp = nf_ct_tuplehash_to_ctrack(h); if (!test_bit(IPS_ASSURED_BIT, &tmp->status) && !nf_ct_is_dying(tmp) && @@ -793,7 +794,7 @@ restart: cnt++; } - hash = (hash + 1) % net->ct.htable_size; + hash = (hash + 1) % nf_conntrack_htable_size; spin_unlock(lockp); if (ct || cnt >= NF_CT_EVICTION_RANGE) @@ -1376,12 +1377,12 @@ get_next_corpse(struct net *net, int (*iter)(struct nf_conn *i, void *data), int cpu; spinlock_t *lockp; - for (; *bucket < net->ct.htable_size; (*bucket)++) { + for (; *bucket < nf_conntrack_htable_size; (*bucket)++) { lockp = &nf_conntrack_locks[*bucket % CONNTRACK_LOCKS]; local_bh_disable(); nf_conntrack_lock(lockp); - if (*bucket < net->ct.htable_size) { - hlist_nulls_for_each_entry(h, n, &net->ct.hash[*bucket], hnnode) { + if (*bucket < nf_conntrack_htable_size) { + hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[*bucket], hnnode) { if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL) continue; ct = nf_ct_tuplehash_to_ctrack(h); @@ -1478,6 +1479,8 @@ void nf_conntrack_cleanup_end(void) while (untrack_refs() > 0) schedule(); + nf_ct_free_hashtable(nf_conntrack_hash, nf_conntrack_htable_size); + #ifdef CONFIG_NF_CONNTRACK_ZONES nf_ct_extend_unregister(&nf_ct_zone_extend); #endif @@ -1528,7 +1531,6 @@ i_see_dead_people: } list_for_each_entry(net, net_exit_list, exit_list) { - nf_ct_free_hashtable(net->ct.hash, net->ct.htable_size); nf_conntrack_proto_pernet_fini(net); nf_conntrack_helper_pernet_fini(net); nf_conntrack_ecache_pernet_fini(net); @@ -1599,10 +1601,10 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp) * though since that required taking the locks. */ - for (i = 0; i < init_net.ct.htable_size; i++) { - while (!hlist_nulls_empty(&init_net.ct.hash[i])) { - h = hlist_nulls_entry(init_net.ct.hash[i].first, - struct nf_conntrack_tuple_hash, hnnode); + for (i = 0; i < nf_conntrack_htable_size; i++) { + while (!hlist_nulls_empty(&nf_conntrack_hash[i])) { + h = hlist_nulls_entry(nf_conntrack_hash[i].first, + struct nf_conntrack_tuple_hash, hnnode); ct = nf_ct_tuplehash_to_ctrack(h); hlist_nulls_del_rcu(&h->hnnode); bucket = __hash_conntrack(nf_ct_net(ct), @@ -1610,11 +1612,11 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp) hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]); } } - old_size = init_net.ct.htable_size; - old_hash = init_net.ct.hash; + old_size = nf_conntrack_htable_size; + old_hash = nf_conntrack_hash; - init_net.ct.htable_size = nf_conntrack_htable_size = hashsize; - init_net.ct.hash = hash; + nf_conntrack_hash = hash; + nf_conntrack_htable_size = hashsize; write_seqcount_end(&nf_conntrack_generation); nf_conntrack_all_unlock(); @@ -1670,6 +1672,11 @@ int nf_conntrack_init_start(void) * entries. */ max_factor = 4; } + + nf_conntrack_hash = nf_ct_alloc_hashtable(&nf_conntrack_htable_size, 1); + if (!nf_conntrack_hash) + return -ENOMEM; + nf_conntrack_max = max_factor * nf_conntrack_htable_size; printk(KERN_INFO "nf_conntrack version %s (%u buckets, %d max)\n", @@ -1748,6 +1755,7 @@ err_tstamp: err_acct: nf_conntrack_expect_fini(); err_expect: + nf_ct_free_hashtable(nf_conntrack_hash, nf_conntrack_htable_size); return ret; } @@ -1800,12 +1808,6 @@ int nf_conntrack_init_net(struct net *net) goto err_cache; } - net->ct.htable_size = nf_conntrack_htable_size; - net->ct.hash = nf_ct_alloc_hashtable(&net->ct.htable_size, 1); - if (!net->ct.hash) { - printk(KERN_ERR "Unable to create nf_conntrack_hash\n"); - goto err_hash; - } ret = nf_conntrack_expect_pernet_init(net); if (ret < 0) goto err_expect; @@ -1837,8 +1839,6 @@ err_tstamp: err_acct: nf_conntrack_expect_pernet_fini(net); err_expect: - nf_ct_free_hashtable(net->ct.hash, net->ct.htable_size); -err_hash: kmem_cache_destroy(net->ct.nf_conntrack_cachep); err_cache: kfree(net->ct.slabname); diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c index 498bf74f154d..cb48e6adba2c 100644 --- a/net/netfilter/nf_conntrack_helper.c +++ b/net/netfilter/nf_conntrack_helper.c @@ -424,10 +424,10 @@ static void __nf_conntrack_helper_unregister(struct nf_conntrack_helper *me, spin_unlock_bh(&pcpu->lock); } local_bh_disable(); - for (i = 0; i < net->ct.htable_size; i++) { + for (i = 0; i < nf_conntrack_htable_size; i++) { nf_conntrack_lock(&nf_conntrack_locks[i % CONNTRACK_LOCKS]); - if (i < net->ct.htable_size) { - hlist_nulls_for_each_entry(h, nn, &net->ct.hash[i], hnnode) + if (i < nf_conntrack_htable_size) { + hlist_nulls_for_each_entry(h, nn, &nf_conntrack_hash[i], hnnode) unhelp(h, me); } spin_unlock(&nf_conntrack_locks[i % CONNTRACK_LOCKS]); diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index f6bbcb23749e..e00f178c48b0 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -824,16 +824,16 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb) last = (struct nf_conn *)cb->args[1]; local_bh_disable(); - for (; cb->args[0] < net->ct.htable_size; cb->args[0]++) { + for (; cb->args[0] < nf_conntrack_htable_size; cb->args[0]++) { restart: lockp = &nf_conntrack_locks[cb->args[0] % CONNTRACK_LOCKS]; nf_conntrack_lock(lockp); - if (cb->args[0] >= net->ct.htable_size) { + if (cb->args[0] >= nf_conntrack_htable_size) { spin_unlock(lockp); goto out; } - hlist_nulls_for_each_entry(h, n, &net->ct.hash[cb->args[0]], - hnnode) { + hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[cb->args[0]], + hnnode) { if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL) continue; ct = nf_ct_tuplehash_to_ctrack(h); diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c index 0f1a45bcacb2..f87e84ebcec3 100644 --- a/net/netfilter/nf_conntrack_standalone.c +++ b/net/netfilter/nf_conntrack_standalone.c @@ -54,14 +54,13 @@ struct ct_iter_state { static struct hlist_nulls_node *ct_get_first(struct seq_file *seq) { - struct net *net = seq_file_net(seq); struct ct_iter_state *st = seq->private; struct hlist_nulls_node *n; for (st->bucket = 0; - st->bucket < net->ct.htable_size; + st->bucket < nf_conntrack_htable_size; st->bucket++) { - n = rcu_dereference(hlist_nulls_first_rcu(&net->ct.hash[st->bucket])); + n = rcu_dereference(hlist_nulls_first_rcu(&nf_conntrack_hash[st->bucket])); if (!is_a_nulls(n)) return n; } @@ -71,18 +70,17 @@ static struct hlist_nulls_node *ct_get_first(struct seq_file *seq) static struct hlist_nulls_node *ct_get_next(struct seq_file *seq, struct hlist_nulls_node *head) { - struct net *net = seq_file_net(seq); struct ct_iter_state *st = seq->private; head = rcu_dereference(hlist_nulls_next_rcu(head)); while (is_a_nulls(head)) { if (likely(get_nulls_value(head) == st->bucket)) { - if (++st->bucket >= net->ct.htable_size) + if (++st->bucket >= nf_conntrack_htable_size) return NULL; } head = rcu_dereference( hlist_nulls_first_rcu( - &net->ct.hash[st->bucket])); + &nf_conntrack_hash[st->bucket])); } return head; } @@ -458,7 +456,7 @@ static struct ctl_table nf_ct_sysctl_table[] = { }, { .procname = "nf_conntrack_buckets", - .data = &init_net.ct.htable_size, + .data = &nf_conntrack_htable_size, .maxlen = sizeof(unsigned int), .mode = 0444, .proc_handler = proc_dointvec, @@ -512,7 +510,6 @@ static int nf_conntrack_standalone_init_sysctl(struct net *net) goto out_kmemdup; table[1].data = &net->ct.count; - table[2].data = &net->ct.htable_size; table[3].data = &net->ct.sysctl_checksum; table[4].data = &net->ct.sysctl_log_invalid; diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c index 3d522715a167..d74e7167499d 100644 --- a/net/netfilter/nf_nat_core.c +++ b/net/netfilter/nf_nat_core.c @@ -824,7 +824,7 @@ nfnetlink_parse_nat_setup(struct nf_conn *ct, static int __net_init nf_nat_net_init(struct net *net) { /* Leave them the same for the moment. */ - net->ct.nat_htable_size = net->ct.htable_size; + net->ct.nat_htable_size = nf_conntrack_htable_size; net->ct.nat_bysource = nf_ct_alloc_hashtable(&net->ct.nat_htable_size, 0); if (!net->ct.nat_bysource) return -ENOMEM; diff --git a/net/netfilter/nfnetlink_cttimeout.c b/net/netfilter/nfnetlink_cttimeout.c index 2671b9deb103..3c84f14326f5 100644 --- a/net/netfilter/nfnetlink_cttimeout.c +++ b/net/netfilter/nfnetlink_cttimeout.c @@ -306,10 +306,10 @@ static void ctnl_untimeout(struct net *net, struct ctnl_timeout *timeout) int i; local_bh_disable(); - for (i = 0; i < net->ct.htable_size; i++) { + for (i = 0; i < nf_conntrack_htable_size; i++) { nf_conntrack_lock(&nf_conntrack_locks[i % CONNTRACK_LOCKS]); - if (i < net->ct.htable_size) { - hlist_nulls_for_each_entry(h, nn, &net->ct.hash[i], hnnode) + if (i < nf_conntrack_htable_size) { + hlist_nulls_for_each_entry(h, nn, &nf_conntrack_hash[i], hnnode) untimeout(h, timeout); } spin_unlock(&nf_conntrack_locks[i % CONNTRACK_LOCKS]); From 3e86638e9a0be8bcf7db007909d8307b8b9f8e3b Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Mon, 2 May 2016 18:40:14 +0200 Subject: [PATCH 1361/1649] netfilter: conntrack: consider ct netns in early_drop logic When iterating, skip conntrack entries living in a different netns. We could ignore netns and kill some other non-assured one, but it has two problems: - a netns can kill non-assured conntracks in other namespace - we would start to 'over-subscribe' the affected/overlimit netns. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_conntrack_core.c | 43 ++++++++++++++++++------------- 1 file changed, 25 insertions(+), 18 deletions(-) diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 4c906e73e872..e3787cf33427 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -764,18 +764,20 @@ static noinline int early_drop(struct net *net, unsigned int _hash) { /* Use oldest entry, which is roughly LRU */ struct nf_conntrack_tuple_hash *h; - struct nf_conn *ct = NULL, *tmp; + struct nf_conn *tmp; struct hlist_nulls_node *n; - unsigned int i = 0, cnt = 0; - int dropped = 0; - unsigned int hash, sequence; + unsigned int i, hash, sequence; + struct nf_conn *ct = NULL; spinlock_t *lockp; + bool ret = false; + + i = 0; local_bh_disable(); restart: sequence = read_seqcount_begin(&nf_conntrack_generation); - hash = scale_hash(_hash); - for (; i < nf_conntrack_htable_size; i++) { + for (; i < NF_CT_EVICTION_RANGE; i++) { + hash = scale_hash(_hash++); lockp = &nf_conntrack_locks[hash % CONNTRACK_LOCKS]; nf_conntrack_lock(lockp); if (read_seqcount_retry(&nf_conntrack_generation, sequence)) { @@ -785,35 +787,40 @@ restart: hlist_nulls_for_each_entry_rcu(h, n, &nf_conntrack_hash[hash], hnnode) { tmp = nf_ct_tuplehash_to_ctrack(h); - if (!test_bit(IPS_ASSURED_BIT, &tmp->status) && - !nf_ct_is_dying(tmp) && - atomic_inc_not_zero(&tmp->ct_general.use)) { + + if (test_bit(IPS_ASSURED_BIT, &tmp->status) || + !net_eq(nf_ct_net(tmp), net) || + nf_ct_is_dying(tmp)) + continue; + + if (atomic_inc_not_zero(&tmp->ct_general.use)) { ct = tmp; break; } - cnt++; } - hash = (hash + 1) % nf_conntrack_htable_size; spin_unlock(lockp); - - if (ct || cnt >= NF_CT_EVICTION_RANGE) + if (ct) break; - } + local_bh_enable(); if (!ct) - return dropped; + return false; - if (del_timer(&ct->timeout)) { + /* kill only if in same netns -- might have moved due to + * SLAB_DESTROY_BY_RCU rules + */ + if (net_eq(nf_ct_net(ct), net) && del_timer(&ct->timeout)) { if (nf_ct_delete(ct, 0, 0)) { - dropped = 1; NF_CT_STAT_INC_ATOMIC(net, early_drop); + ret = true; } } + nf_ct_put(ct); - return dropped; + return ret; } static struct nf_conn * From 4b4ceb9dbf6a549682edff9fc5f04c204da50ab9 Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Sun, 1 May 2016 00:34:37 +0200 Subject: [PATCH 1362/1649] netfilter: conntrack: __nf_ct_l4proto_find() always returns valid pointer Remove unnecessary check for non-nul pointer in destroy_conntrack() given that __nf_ct_l4proto_find() returns the generic protocol tracker if the protocol is not supported. Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_conntrack_core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index e3787cf33427..f72ede19354c 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -363,7 +363,7 @@ destroy_conntrack(struct nf_conntrack *nfct) } rcu_read_lock(); l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct)); - if (l4proto && l4proto->destroy) + if (l4proto->destroy) l4proto->destroy(ct); rcu_read_unlock(); From ba76738c032ec0af3acbecd85c429c6a5c9e5e5e Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Mon, 2 May 2016 21:28:57 +0200 Subject: [PATCH 1363/1649] netfilter: conntrack: introduce nf_ct_acct_update() Introduce a helper function to update conntrack counters. __nf_ct_kill_acct() was unnecessarily subtracting skb_network_offset() that is expected to be zero from the ipv4/ipv6 hooks. Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_conntrack_core.c | 42 ++++++++++++++----------------- 1 file changed, 19 insertions(+), 23 deletions(-) diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index f72ede19354c..25e0c2677a12 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -602,6 +602,21 @@ out: } EXPORT_SYMBOL_GPL(nf_conntrack_hash_check_insert); +static inline void nf_ct_acct_update(struct nf_conn *ct, + enum ip_conntrack_info ctinfo, + unsigned int len) +{ + struct nf_conn_acct *acct; + + acct = nf_conn_acct_find(ct); + if (acct) { + struct nf_conn_counter *counter = acct->counter; + + atomic64_inc(&counter[CTINFO2DIR(ctinfo)].packets); + atomic64_add(len, &counter[CTINFO2DIR(ctinfo)].bytes); + } +} + /* Confirm a connection given skb; places it in hash table */ int __nf_conntrack_confirm(struct sk_buff *skb) @@ -1258,17 +1273,8 @@ void __nf_ct_refresh_acct(struct nf_conn *ct, } acct: - if (do_acct) { - struct nf_conn_acct *acct; - - acct = nf_conn_acct_find(ct); - if (acct) { - struct nf_conn_counter *counter = acct->counter; - - atomic64_inc(&counter[CTINFO2DIR(ctinfo)].packets); - atomic64_add(skb->len, &counter[CTINFO2DIR(ctinfo)].bytes); - } - } + if (do_acct) + nf_ct_acct_update(ct, ctinfo, skb->len); } EXPORT_SYMBOL_GPL(__nf_ct_refresh_acct); @@ -1277,18 +1283,8 @@ bool __nf_ct_kill_acct(struct nf_conn *ct, const struct sk_buff *skb, int do_acct) { - if (do_acct) { - struct nf_conn_acct *acct; - - acct = nf_conn_acct_find(ct); - if (acct) { - struct nf_conn_counter *counter = acct->counter; - - atomic64_inc(&counter[CTINFO2DIR(ctinfo)].packets); - atomic64_add(skb->len - skb_network_offset(skb), - &counter[CTINFO2DIR(ctinfo)].bytes); - } - } + if (do_acct) + nf_ct_acct_update(ct, ctinfo, skb->len); if (del_timer(&ct->timeout)) { ct->timeout.function((unsigned long)ct); From 71d8c47fc653711c41bc3282e5b0e605b3727956 Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Sun, 1 May 2016 00:28:40 +0200 Subject: [PATCH 1364/1649] netfilter: conntrack: introduce clash resolution on insertion race This patch introduces nf_ct_resolve_clash() to resolve race condition on conntrack insertions. This is particularly a problem for connection-less protocols such as UDP, with no initial handshake. Two or more packets may race to insert the entry resulting in packet drops. Another problematic scenario are packets enqueued to userspace via NFQUEUE after the raw table, that make it easier to trigger this race. To resolve this, the idea is to reset the conntrack entry to the one that won race. Packet and bytes counters are also merged. The 'insert_failed' stats still accounts for this situation, after this patch, the drop counter is bumped whenever we drop packets, so we can watch for unresolved clashes. Signed-off-by: Pablo Neira Ayuso --- include/net/netfilter/nf_conntrack_l4proto.h | 3 ++ net/netfilter/nf_conntrack_core.c | 53 ++++++++++++++++++-- net/netfilter/nf_conntrack_proto_udp.c | 2 + net/netfilter/nf_conntrack_proto_udplite.c | 2 + 4 files changed, 57 insertions(+), 3 deletions(-) diff --git a/include/net/netfilter/nf_conntrack_l4proto.h b/include/net/netfilter/nf_conntrack_l4proto.h index 956d8a6ac069..1a5fb36f165f 100644 --- a/include/net/netfilter/nf_conntrack_l4proto.h +++ b/include/net/netfilter/nf_conntrack_l4proto.h @@ -23,6 +23,9 @@ struct nf_conntrack_l4proto { /* L4 Protocol number. */ u_int8_t l4proto; + /* Resolve clashes on insertion races. */ + bool allow_clash; + /* Try to fill in the third arg: dataoff is offset past network protocol hdr. Return true if possible. */ bool (*pkt_to_tuple)(const struct sk_buff *skb, unsigned int dataoff, diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 25e0c2677a12..f58a70410c69 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -617,6 +617,48 @@ static inline void nf_ct_acct_update(struct nf_conn *ct, } } +static void nf_ct_acct_merge(struct nf_conn *ct, enum ip_conntrack_info ctinfo, + const struct nf_conn *loser_ct) +{ + struct nf_conn_acct *acct; + + acct = nf_conn_acct_find(loser_ct); + if (acct) { + struct nf_conn_counter *counter = acct->counter; + enum ip_conntrack_info ctinfo; + unsigned int bytes; + + /* u32 should be fine since we must have seen one packet. */ + bytes = atomic64_read(&counter[CTINFO2DIR(ctinfo)].bytes); + nf_ct_acct_update(ct, ctinfo, bytes); + } +} + +/* Resolve race on insertion if this protocol allows this. */ +static int nf_ct_resolve_clash(struct net *net, struct sk_buff *skb, + enum ip_conntrack_info ctinfo, + struct nf_conntrack_tuple_hash *h) +{ + /* This is the conntrack entry already in hashes that won race. */ + struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h); + struct nf_conntrack_l4proto *l4proto; + + l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct)); + if (l4proto->allow_clash && + !nf_ct_is_dying(ct) && + atomic_inc_not_zero(&ct->ct_general.use)) { + nf_ct_acct_merge(ct, ctinfo, (struct nf_conn *)skb->nfct); + nf_conntrack_put(skb->nfct); + /* Assign conntrack already in hashes to this skbuff. Don't + * modify skb->nfctinfo to ensure consistent stateful filtering. + */ + skb->nfct = &ct->ct_general; + return NF_ACCEPT; + } + NF_CT_STAT_INC(net, drop); + return NF_DROP; +} + /* Confirm a connection given skb; places it in hash table */ int __nf_conntrack_confirm(struct sk_buff *skb) @@ -631,6 +673,7 @@ __nf_conntrack_confirm(struct sk_buff *skb) enum ip_conntrack_info ctinfo; struct net *net; unsigned int sequence; + int ret = NF_DROP; ct = nf_ct_get(skb, &ctinfo); net = nf_ct_net(ct); @@ -673,8 +716,10 @@ __nf_conntrack_confirm(struct sk_buff *skb) */ nf_ct_del_from_dying_or_unconfirmed_list(ct); - if (unlikely(nf_ct_is_dying(ct))) - goto out; + if (unlikely(nf_ct_is_dying(ct))) { + nf_ct_add_to_dying_list(ct); + goto dying; + } /* See if there's one in the list already, including reverse: NAT could have grabbed it without realizing, since we're @@ -725,10 +770,12 @@ __nf_conntrack_confirm(struct sk_buff *skb) out: nf_ct_add_to_dying_list(ct); + ret = nf_ct_resolve_clash(net, skb, ctinfo, h); +dying: nf_conntrack_double_unlock(hash, reply_hash); NF_CT_STAT_INC(net, insert_failed); local_bh_enable(); - return NF_DROP; + return ret; } EXPORT_SYMBOL_GPL(__nf_conntrack_confirm); diff --git a/net/netfilter/nf_conntrack_proto_udp.c b/net/netfilter/nf_conntrack_proto_udp.c index 478f92f834b6..4fd040575ffe 100644 --- a/net/netfilter/nf_conntrack_proto_udp.c +++ b/net/netfilter/nf_conntrack_proto_udp.c @@ -309,6 +309,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4 __read_mostly = .l3proto = PF_INET, .l4proto = IPPROTO_UDP, .name = "udp", + .allow_clash = true, .pkt_to_tuple = udp_pkt_to_tuple, .invert_tuple = udp_invert_tuple, .print_tuple = udp_print_tuple, @@ -341,6 +342,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6 __read_mostly = .l3proto = PF_INET6, .l4proto = IPPROTO_UDP, .name = "udp", + .allow_clash = true, .pkt_to_tuple = udp_pkt_to_tuple, .invert_tuple = udp_invert_tuple, .print_tuple = udp_print_tuple, diff --git a/net/netfilter/nf_conntrack_proto_udplite.c b/net/netfilter/nf_conntrack_proto_udplite.c index 1ac8ee13a873..9d692f5adb94 100644 --- a/net/netfilter/nf_conntrack_proto_udplite.c +++ b/net/netfilter/nf_conntrack_proto_udplite.c @@ -274,6 +274,7 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite4 __read_mostly = .l3proto = PF_INET, .l4proto = IPPROTO_UDPLITE, .name = "udplite", + .allow_clash = true, .pkt_to_tuple = udplite_pkt_to_tuple, .invert_tuple = udplite_invert_tuple, .print_tuple = udplite_print_tuple, @@ -306,6 +307,7 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6 __read_mostly = .l3proto = PF_INET6, .l4proto = IPPROTO_UDPLITE, .name = "udplite", + .allow_clash = true, .pkt_to_tuple = udplite_pkt_to_tuple, .invert_tuple = udplite_invert_tuple, .print_tuple = udplite_print_tuple, From 3b78155b1b3688dbe910fecdc3e003f431b46630 Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Tue, 3 May 2016 11:13:29 +0200 Subject: [PATCH 1365/1649] openvswitch: __nf_ct_l{3,4}proto_find() always return a valid pointer If the protocol is not natively supported, this assigns generic protocol tracker so we can always assume a valid pointer after these calls. Signed-off-by: Pablo Neira Ayuso Acked-by: Jarno Rajahalme Acked-by: Joe Stringer --- net/openvswitch/conntrack.c | 8 -------- 1 file changed, 8 deletions(-) diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c index 9741a76c7405..9f0bc49fa969 100644 --- a/net/openvswitch/conntrack.c +++ b/net/openvswitch/conntrack.c @@ -439,20 +439,12 @@ ovs_ct_find_existing(struct net *net, const struct nf_conntrack_zone *zone, u8 protonum; l3proto = __nf_ct_l3proto_find(l3num); - if (!l3proto) { - pr_debug("ovs_ct_find_existing: Can't get l3proto\n"); - return NULL; - } if (l3proto->get_l4proto(skb, skb_network_offset(skb), &dataoff, &protonum) <= 0) { pr_debug("ovs_ct_find_existing: Can't get protonum\n"); return NULL; } l4proto = __nf_ct_l4proto_find(l3num, protonum); - if (!l4proto) { - pr_debug("ovs_ct_find_existing: Can't get l4proto\n"); - return NULL; - } if (!nf_ct_get_tuple(skb, skb_network_offset(skb), dataoff, l3num, protonum, net, &tuple, l3proto, l4proto)) { pr_debug("ovs_ct_find_existing: Can't get tuple\n"); From d7cdf81657776ca1aa8377fd84d02fd8774db483 Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Tue, 3 May 2016 13:54:23 +0200 Subject: [PATCH 1366/1649] netfilter: x_tables: get rid of old and inconsistent debugging The dprintf() and duprintf() functions are enabled at compile time, these days we have better runtime debugging through pr_debug() and static keys. On top of this, this debugging is so old that I don't expect anyone using this anymore, so let's get rid of this. IP_NF_ASSERT() is still left in place, although this needs that NETFILTER_DEBUG is enabled, I think these assertions provide useful context information when reading the code. Note that ARP_NF_ASSERT() has been removed as there is no user of this. Kill also DEBUG_ALLOW_ALL and a couple of pr_error() and pr_debug() spots that are inconsistently placed in the code. Signed-off-by: Pablo Neira Ayuso --- net/ipv4/netfilter/arp_tables.c | 217 +++++----------------------- net/ipv4/netfilter/ip_tables.c | 244 ++++++-------------------------- net/ipv6/netfilter/ip6_tables.c | 229 ++++++------------------------ 3 files changed, 117 insertions(+), 573 deletions(-) diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index 3355ed72051d..2033f929aa66 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c @@ -34,27 +34,6 @@ MODULE_LICENSE("GPL"); MODULE_AUTHOR("David S. Miller "); MODULE_DESCRIPTION("arptables core"); -/*#define DEBUG_ARP_TABLES*/ -/*#define DEBUG_ARP_TABLES_USER*/ - -#ifdef DEBUG_ARP_TABLES -#define dprintf(format, args...) pr_debug(format, ## args) -#else -#define dprintf(format, args...) -#endif - -#ifdef DEBUG_ARP_TABLES_USER -#define duprintf(format, args...) pr_debug(format, ## args) -#else -#define duprintf(format, args...) -#endif - -#ifdef CONFIG_NETFILTER_DEBUG -#define ARP_NF_ASSERT(x) WARN_ON(!(x)) -#else -#define ARP_NF_ASSERT(x) -#endif - void *arpt_alloc_initial_table(const struct xt_table *info) { return xt_alloc_initial_table(arpt, ARPT); @@ -113,36 +92,20 @@ static inline int arp_packet_match(const struct arphdr *arphdr, #define FWINV(bool, invflg) ((bool) ^ !!(arpinfo->invflags & (invflg))) if (FWINV((arphdr->ar_op & arpinfo->arpop_mask) != arpinfo->arpop, - ARPT_INV_ARPOP)) { - dprintf("ARP operation field mismatch.\n"); - dprintf("ar_op: %04x info->arpop: %04x info->arpop_mask: %04x\n", - arphdr->ar_op, arpinfo->arpop, arpinfo->arpop_mask); + ARPT_INV_ARPOP)) return 0; - } if (FWINV((arphdr->ar_hrd & arpinfo->arhrd_mask) != arpinfo->arhrd, - ARPT_INV_ARPHRD)) { - dprintf("ARP hardware address format mismatch.\n"); - dprintf("ar_hrd: %04x info->arhrd: %04x info->arhrd_mask: %04x\n", - arphdr->ar_hrd, arpinfo->arhrd, arpinfo->arhrd_mask); + ARPT_INV_ARPHRD)) return 0; - } if (FWINV((arphdr->ar_pro & arpinfo->arpro_mask) != arpinfo->arpro, - ARPT_INV_ARPPRO)) { - dprintf("ARP protocol address format mismatch.\n"); - dprintf("ar_pro: %04x info->arpro: %04x info->arpro_mask: %04x\n", - arphdr->ar_pro, arpinfo->arpro, arpinfo->arpro_mask); + ARPT_INV_ARPPRO)) return 0; - } if (FWINV((arphdr->ar_hln & arpinfo->arhln_mask) != arpinfo->arhln, - ARPT_INV_ARPHLN)) { - dprintf("ARP hardware address length mismatch.\n"); - dprintf("ar_hln: %02x info->arhln: %02x info->arhln_mask: %02x\n", - arphdr->ar_hln, arpinfo->arhln, arpinfo->arhln_mask); + ARPT_INV_ARPHLN)) return 0; - } src_devaddr = arpptr; arpptr += dev->addr_len; @@ -155,49 +118,25 @@ static inline int arp_packet_match(const struct arphdr *arphdr, if (FWINV(arp_devaddr_compare(&arpinfo->src_devaddr, src_devaddr, dev->addr_len), ARPT_INV_SRCDEVADDR) || FWINV(arp_devaddr_compare(&arpinfo->tgt_devaddr, tgt_devaddr, dev->addr_len), - ARPT_INV_TGTDEVADDR)) { - dprintf("Source or target device address mismatch.\n"); - + ARPT_INV_TGTDEVADDR)) return 0; - } if (FWINV((src_ipaddr & arpinfo->smsk.s_addr) != arpinfo->src.s_addr, ARPT_INV_SRCIP) || FWINV(((tgt_ipaddr & arpinfo->tmsk.s_addr) != arpinfo->tgt.s_addr), - ARPT_INV_TGTIP)) { - dprintf("Source or target IP address mismatch.\n"); - - dprintf("SRC: %pI4. Mask: %pI4. Target: %pI4.%s\n", - &src_ipaddr, - &arpinfo->smsk.s_addr, - &arpinfo->src.s_addr, - arpinfo->invflags & ARPT_INV_SRCIP ? " (INV)" : ""); - dprintf("TGT: %pI4 Mask: %pI4 Target: %pI4.%s\n", - &tgt_ipaddr, - &arpinfo->tmsk.s_addr, - &arpinfo->tgt.s_addr, - arpinfo->invflags & ARPT_INV_TGTIP ? " (INV)" : ""); + ARPT_INV_TGTIP)) return 0; - } /* Look for ifname matches. */ ret = ifname_compare(indev, arpinfo->iniface, arpinfo->iniface_mask); - if (FWINV(ret != 0, ARPT_INV_VIA_IN)) { - dprintf("VIA in mismatch (%s vs %s).%s\n", - indev, arpinfo->iniface, - arpinfo->invflags & ARPT_INV_VIA_IN ? " (INV)" : ""); + if (FWINV(ret != 0, ARPT_INV_VIA_IN)) return 0; - } ret = ifname_compare(outdev, arpinfo->outiface, arpinfo->outiface_mask); - if (FWINV(ret != 0, ARPT_INV_VIA_OUT)) { - dprintf("VIA out mismatch (%s vs %s).%s\n", - outdev, arpinfo->outiface, - arpinfo->invflags & ARPT_INV_VIA_OUT ? " (INV)" : ""); + if (FWINV(ret != 0, ARPT_INV_VIA_OUT)) return 0; - } return 1; #undef FWINV @@ -205,16 +144,10 @@ static inline int arp_packet_match(const struct arphdr *arphdr, static inline int arp_checkentry(const struct arpt_arp *arp) { - if (arp->flags & ~ARPT_F_MASK) { - duprintf("Unknown flag bits set: %08X\n", - arp->flags & ~ARPT_F_MASK); + if (arp->flags & ~ARPT_F_MASK) return 0; - } - if (arp->invflags & ~ARPT_INV_MASK) { - duprintf("Unknown invflag bits set: %08X\n", - arp->invflags & ~ARPT_INV_MASK); + if (arp->invflags & ~ARPT_INV_MASK) return 0; - } return 1; } @@ -406,11 +339,9 @@ static int mark_source_chains(const struct xt_table_info *newinfo, = (void *)arpt_get_target_c(e); int visited = e->comefrom & (1 << hook); - if (e->comefrom & (1 << NF_ARP_NUMHOOKS)) { - pr_notice("arptables: loop hook %u pos %u %08X.\n", - hook, pos, e->comefrom); + if (e->comefrom & (1 << NF_ARP_NUMHOOKS)) return 0; - } + e->comefrom |= ((1 << hook) | (1 << NF_ARP_NUMHOOKS)); @@ -423,12 +354,8 @@ static int mark_source_chains(const struct xt_table_info *newinfo, if ((strcmp(t->target.u.user.name, XT_STANDARD_TARGET) == 0) && - t->verdict < -NF_MAX_VERDICT - 1) { - duprintf("mark_source_chains: bad " - "negative verdict (%i)\n", - t->verdict); + t->verdict < -NF_MAX_VERDICT - 1) return 0; - } /* Return: backtrack through the last * big jump. @@ -462,8 +389,6 @@ static int mark_source_chains(const struct xt_table_info *newinfo, XT_STANDARD_TARGET) == 0 && newpos >= 0) { /* This a jump; chase it. */ - duprintf("Jump rule %u -> %u\n", - pos, newpos); e = (struct arpt_entry *) (entry0 + newpos); if (!find_jump_target(newinfo, e)) @@ -480,8 +405,7 @@ static int mark_source_chains(const struct xt_table_info *newinfo, pos = newpos; } } -next: - duprintf("Finished chain %u\n", hook); +next: ; } return 1; } @@ -489,7 +413,6 @@ next: static inline int check_target(struct arpt_entry *e, const char *name) { struct xt_entry_target *t = arpt_get_target(e); - int ret; struct xt_tgchk_param par = { .table = name, .entryinfo = e, @@ -499,13 +422,7 @@ static inline int check_target(struct arpt_entry *e, const char *name) .family = NFPROTO_ARP, }; - ret = xt_check_target(&par, t->u.target_size - sizeof(*t), 0, false); - if (ret < 0) { - duprintf("arp_tables: check failed for `%s'.\n", - t->u.kernel.target->name); - return ret; - } - return 0; + return xt_check_target(&par, t->u.target_size - sizeof(*t), 0, false); } static inline int @@ -525,7 +442,6 @@ find_check_entry(struct arpt_entry *e, const char *name, unsigned int size) target = xt_request_find_target(NFPROTO_ARP, t->u.user.name, t->u.user.revision); if (IS_ERR(target)) { - duprintf("find_check_entry: `%s' not found\n", t->u.user.name); ret = PTR_ERR(target); goto out; } @@ -571,17 +487,12 @@ static inline int check_entry_size_and_hooks(struct arpt_entry *e, if ((unsigned long)e % __alignof__(struct arpt_entry) != 0 || (unsigned char *)e + sizeof(struct arpt_entry) >= limit || - (unsigned char *)e + e->next_offset > limit) { - duprintf("Bad offset %p\n", e); + (unsigned char *)e + e->next_offset > limit) return -EINVAL; - } if (e->next_offset - < sizeof(struct arpt_entry) + sizeof(struct xt_entry_target)) { - duprintf("checking: element %p size %u\n", - e, e->next_offset); + < sizeof(struct arpt_entry) + sizeof(struct xt_entry_target)) return -EINVAL; - } if (!arp_checkentry(&e->arp)) return -EINVAL; @@ -598,12 +509,9 @@ static inline int check_entry_size_and_hooks(struct arpt_entry *e, if ((unsigned char *)e - base == hook_entries[h]) newinfo->hook_entry[h] = hook_entries[h]; if ((unsigned char *)e - base == underflows[h]) { - if (!check_underflow(e)) { - pr_debug("Underflows must be unconditional and " - "use the STANDARD target with " - "ACCEPT/DROP\n"); + if (!check_underflow(e)) return -EINVAL; - } + newinfo->underflow[h] = underflows[h]; } } @@ -648,7 +556,6 @@ static int translate_table(struct xt_table_info *newinfo, void *entry0, newinfo->underflow[i] = 0xFFFFFFFF; } - duprintf("translate_table: size %u\n", newinfo->size); i = 0; /* Walk through entries, checking offsets. */ @@ -665,31 +572,21 @@ static int translate_table(struct xt_table_info *newinfo, void *entry0, XT_ERROR_TARGET) == 0) ++newinfo->stacksize; } - duprintf("translate_table: ARPT_ENTRY_ITERATE gives %d\n", ret); if (ret != 0) return ret; - if (i != repl->num_entries) { - duprintf("translate_table: %u not %u entries\n", - i, repl->num_entries); + if (i != repl->num_entries) return -EINVAL; - } /* Check hooks all assigned */ for (i = 0; i < NF_ARP_NUMHOOKS; i++) { /* Only hooks which are valid */ if (!(repl->valid_hooks & (1 << i))) continue; - if (newinfo->hook_entry[i] == 0xFFFFFFFF) { - duprintf("Invalid hook entry %u %u\n", - i, repl->hook_entry[i]); + if (newinfo->hook_entry[i] == 0xFFFFFFFF) return -EINVAL; - } - if (newinfo->underflow[i] == 0xFFFFFFFF) { - duprintf("Invalid underflow %u %u\n", - i, repl->underflow[i]); + if (newinfo->underflow[i] == 0xFFFFFFFF) return -EINVAL; - } } if (!mark_source_chains(newinfo, repl->valid_hooks, entry0)) @@ -897,11 +794,8 @@ static int get_info(struct net *net, void __user *user, struct xt_table *t; int ret; - if (*len != sizeof(struct arpt_getinfo)) { - duprintf("length %u != %Zu\n", *len, - sizeof(struct arpt_getinfo)); + if (*len != sizeof(struct arpt_getinfo)) return -EINVAL; - } if (copy_from_user(name, user, sizeof(name)) != 0) return -EFAULT; @@ -957,33 +851,25 @@ static int get_entries(struct net *net, struct arpt_get_entries __user *uptr, struct arpt_get_entries get; struct xt_table *t; - if (*len < sizeof(get)) { - duprintf("get_entries: %u < %Zu\n", *len, sizeof(get)); + if (*len < sizeof(get)) return -EINVAL; - } if (copy_from_user(&get, uptr, sizeof(get)) != 0) return -EFAULT; - if (*len != sizeof(struct arpt_get_entries) + get.size) { - duprintf("get_entries: %u != %Zu\n", *len, - sizeof(struct arpt_get_entries) + get.size); + if (*len != sizeof(struct arpt_get_entries) + get.size) return -EINVAL; - } + get.name[sizeof(get.name) - 1] = '\0'; t = xt_find_table_lock(net, NFPROTO_ARP, get.name); if (!IS_ERR_OR_NULL(t)) { const struct xt_table_info *private = t->private; - duprintf("t->private->number = %u\n", - private->number); if (get.size == private->size) ret = copy_entries_to_user(private->size, t, uptr->entrytable); - else { - duprintf("get_entries: I've got %u not %u!\n", - private->size, get.size); + else ret = -EAGAIN; - } + module_put(t->me); xt_table_unlock(t); } else @@ -1021,8 +907,6 @@ static int __do_replace(struct net *net, const char *name, /* You lied! */ if (valid_hooks != t->valid_hooks) { - duprintf("Valid hook crap: %08X vs %08X\n", - valid_hooks, t->valid_hooks); ret = -EINVAL; goto put_module; } @@ -1032,8 +916,6 @@ static int __do_replace(struct net *net, const char *name, goto put_module; /* Update module usage count based on number of rules */ - duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n", - oldinfo->number, oldinfo->initial_entries, newinfo->number); if ((oldinfo->number > oldinfo->initial_entries) || (newinfo->number <= oldinfo->initial_entries)) module_put(t->me); @@ -1103,8 +985,6 @@ static int do_replace(struct net *net, const void __user *user, if (ret != 0) goto free_newinfo; - duprintf("arp_tables: Translated table\n"); - ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo, tmp.num_counters, tmp.counters); if (ret) @@ -1202,20 +1082,14 @@ check_compat_entry_size_and_hooks(struct compat_arpt_entry *e, unsigned int entry_offset; int ret, off; - duprintf("check_compat_entry_size_and_hooks %p\n", e); if ((unsigned long)e % __alignof__(struct compat_arpt_entry) != 0 || (unsigned char *)e + sizeof(struct compat_arpt_entry) >= limit || - (unsigned char *)e + e->next_offset > limit) { - duprintf("Bad offset %p, limit = %p\n", e, limit); + (unsigned char *)e + e->next_offset > limit) return -EINVAL; - } if (e->next_offset < sizeof(struct compat_arpt_entry) + - sizeof(struct compat_xt_entry_target)) { - duprintf("checking: element %p size %u\n", - e, e->next_offset); + sizeof(struct compat_xt_entry_target)) return -EINVAL; - } if (!arp_checkentry(&e->arp)) return -EINVAL; @@ -1232,8 +1106,6 @@ check_compat_entry_size_and_hooks(struct compat_arpt_entry *e, target = xt_request_find_target(NFPROTO_ARP, t->u.user.name, t->u.user.revision); if (IS_ERR(target)) { - duprintf("check_compat_entry_size_and_hooks: `%s' not found\n", - t->u.user.name); ret = PTR_ERR(target); goto out; } @@ -1303,7 +1175,6 @@ static int translate_compat_table(struct xt_table_info **pinfo, size = compatr->size; info->number = compatr->num_entries; - duprintf("translate_compat_table: size %u\n", info->size); j = 0; xt_compat_lock(NFPROTO_ARP); xt_compat_init_offsets(NFPROTO_ARP, compatr->num_entries); @@ -1318,11 +1189,8 @@ static int translate_compat_table(struct xt_table_info **pinfo, } ret = -EINVAL; - if (j != compatr->num_entries) { - duprintf("translate_compat_table: %u not %u entries\n", - j, compatr->num_entries); + if (j != compatr->num_entries) goto out_unlock; - } ret = -ENOMEM; newinfo = xt_alloc_table_info(size); @@ -1413,8 +1281,6 @@ static int compat_do_replace(struct net *net, void __user *user, if (ret != 0) goto free_newinfo; - duprintf("compat_do_replace: Translated table\n"); - ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo, tmp.num_counters, compat_ptr(tmp.counters)); if (ret) @@ -1447,7 +1313,6 @@ static int compat_do_arpt_set_ctl(struct sock *sk, int cmd, void __user *user, break; default: - duprintf("do_arpt_set_ctl: unknown request %i\n", cmd); ret = -EINVAL; } @@ -1530,17 +1395,13 @@ static int compat_get_entries(struct net *net, struct compat_arpt_get_entries get; struct xt_table *t; - if (*len < sizeof(get)) { - duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get)); + if (*len < sizeof(get)) return -EINVAL; - } if (copy_from_user(&get, uptr, sizeof(get)) != 0) return -EFAULT; - if (*len != sizeof(struct compat_arpt_get_entries) + get.size) { - duprintf("compat_get_entries: %u != %zu\n", - *len, sizeof(get) + get.size); + if (*len != sizeof(struct compat_arpt_get_entries) + get.size) return -EINVAL; - } + get.name[sizeof(get.name) - 1] = '\0'; xt_compat_lock(NFPROTO_ARP); @@ -1549,16 +1410,13 @@ static int compat_get_entries(struct net *net, const struct xt_table_info *private = t->private; struct xt_table_info info; - duprintf("t->private->number = %u\n", private->number); ret = compat_table_info(private, &info); if (!ret && get.size == info.size) { ret = compat_copy_entries_to_user(private->size, t, uptr->entrytable); - } else if (!ret) { - duprintf("compat_get_entries: I've got %u not %u!\n", - private->size, get.size); + } else if (!ret) ret = -EAGAIN; - } + xt_compat_flush_offsets(NFPROTO_ARP); module_put(t->me); xt_table_unlock(t); @@ -1610,7 +1468,6 @@ static int do_arpt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned break; default: - duprintf("do_arpt_set_ctl: unknown request %i\n", cmd); ret = -EINVAL; } @@ -1653,7 +1510,6 @@ static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len } default: - duprintf("do_arpt_get_ctl: unknown request %i\n", cmd); ret = -EINVAL; } @@ -1698,7 +1554,6 @@ int arpt_register_table(struct net *net, memcpy(loc_cpu_entry, repl->entries, repl->size); ret = translate_table(newinfo, loc_cpu_entry, repl); - duprintf("arpt_register_table: translate table gives %d\n", ret); if (ret != 0) goto out_free; diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index 21ccc19e1e6f..54906e0e8e0c 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c @@ -35,34 +35,12 @@ MODULE_LICENSE("GPL"); MODULE_AUTHOR("Netfilter Core Team "); MODULE_DESCRIPTION("IPv4 packet filter"); -/*#define DEBUG_IP_FIREWALL*/ -/*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */ -/*#define DEBUG_IP_FIREWALL_USER*/ - -#ifdef DEBUG_IP_FIREWALL -#define dprintf(format, args...) pr_info(format , ## args) -#else -#define dprintf(format, args...) -#endif - -#ifdef DEBUG_IP_FIREWALL_USER -#define duprintf(format, args...) pr_info(format , ## args) -#else -#define duprintf(format, args...) -#endif - #ifdef CONFIG_NETFILTER_DEBUG #define IP_NF_ASSERT(x) WARN_ON(!(x)) #else #define IP_NF_ASSERT(x) #endif -#if 0 -/* All the better to debug you with... */ -#define static -#define inline -#endif - void *ipt_alloc_initial_table(const struct xt_table *info) { return xt_alloc_initial_table(ipt, IPT); @@ -85,52 +63,28 @@ ip_packet_match(const struct iphdr *ip, if (FWINV((ip->saddr&ipinfo->smsk.s_addr) != ipinfo->src.s_addr, IPT_INV_SRCIP) || FWINV((ip->daddr&ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr, - IPT_INV_DSTIP)) { - dprintf("Source or dest mismatch.\n"); - - dprintf("SRC: %pI4. Mask: %pI4. Target: %pI4.%s\n", - &ip->saddr, &ipinfo->smsk.s_addr, &ipinfo->src.s_addr, - ipinfo->invflags & IPT_INV_SRCIP ? " (INV)" : ""); - dprintf("DST: %pI4 Mask: %pI4 Target: %pI4.%s\n", - &ip->daddr, &ipinfo->dmsk.s_addr, &ipinfo->dst.s_addr, - ipinfo->invflags & IPT_INV_DSTIP ? " (INV)" : ""); + IPT_INV_DSTIP)) return false; - } ret = ifname_compare_aligned(indev, ipinfo->iniface, ipinfo->iniface_mask); - if (FWINV(ret != 0, IPT_INV_VIA_IN)) { - dprintf("VIA in mismatch (%s vs %s).%s\n", - indev, ipinfo->iniface, - ipinfo->invflags & IPT_INV_VIA_IN ? " (INV)" : ""); + if (FWINV(ret != 0, IPT_INV_VIA_IN)) return false; - } ret = ifname_compare_aligned(outdev, ipinfo->outiface, ipinfo->outiface_mask); - if (FWINV(ret != 0, IPT_INV_VIA_OUT)) { - dprintf("VIA out mismatch (%s vs %s).%s\n", - outdev, ipinfo->outiface, - ipinfo->invflags & IPT_INV_VIA_OUT ? " (INV)" : ""); + if (FWINV(ret != 0, IPT_INV_VIA_OUT)) return false; - } /* Check specific protocol */ if (ipinfo->proto && - FWINV(ip->protocol != ipinfo->proto, IPT_INV_PROTO)) { - dprintf("Packet protocol %hi does not match %hi.%s\n", - ip->protocol, ipinfo->proto, - ipinfo->invflags & IPT_INV_PROTO ? " (INV)" : ""); + FWINV(ip->protocol != ipinfo->proto, IPT_INV_PROTO)) return false; - } /* If we have a fragment rule but the packet is not a fragment * then we return zero */ - if (FWINV((ipinfo->flags&IPT_F_FRAG) && !isfrag, IPT_INV_FRAG)) { - dprintf("Fragment rule but not fragment.%s\n", - ipinfo->invflags & IPT_INV_FRAG ? " (INV)" : ""); + if (FWINV((ipinfo->flags&IPT_F_FRAG) && !isfrag, IPT_INV_FRAG)) return false; - } return true; } @@ -138,16 +92,10 @@ ip_packet_match(const struct iphdr *ip, static bool ip_checkentry(const struct ipt_ip *ip) { - if (ip->flags & ~IPT_F_MASK) { - duprintf("Unknown flag bits set: %08X\n", - ip->flags & ~IPT_F_MASK); + if (ip->flags & ~IPT_F_MASK) return false; - } - if (ip->invflags & ~IPT_INV_MASK) { - duprintf("Unknown invflag bits set: %08X\n", - ip->invflags & ~IPT_INV_MASK); + if (ip->invflags & ~IPT_INV_MASK) return false; - } return true; } @@ -346,10 +294,6 @@ ipt_do_table(struct sk_buff *skb, e = get_entry(table_base, private->hook_entry[hook]); - pr_debug("Entering %s(hook %u), UF %p\n", - table->name, hook, - get_entry(table_base, private->underflow[hook])); - do { const struct xt_entry_target *t; const struct xt_entry_match *ematch; @@ -396,22 +340,15 @@ ipt_do_table(struct sk_buff *skb, if (stackidx == 0) { e = get_entry(table_base, private->underflow[hook]); - pr_debug("Underflow (this is normal) " - "to %p\n", e); } else { e = jumpstack[--stackidx]; - pr_debug("Pulled %p out from pos %u\n", - e, stackidx); e = ipt_next_entry(e); } continue; } if (table_base + v != ipt_next_entry(e) && - !(e->ip.flags & IPT_F_GOTO)) { + !(e->ip.flags & IPT_F_GOTO)) jumpstack[stackidx++] = e; - pr_debug("Pushed %p into pos %u\n", - e, stackidx - 1); - } e = get_entry(table_base, v); continue; @@ -429,18 +366,13 @@ ipt_do_table(struct sk_buff *skb, /* Verdict */ break; } while (!acpar.hotdrop); - pr_debug("Exiting %s; sp at %u\n", __func__, stackidx); xt_write_recseq_end(addend); local_bh_enable(); -#ifdef DEBUG_ALLOW_ALL - return NF_ACCEPT; -#else if (acpar.hotdrop) return NF_DROP; else return verdict; -#endif } static bool find_jump_target(const struct xt_table_info *t, @@ -480,11 +412,9 @@ mark_source_chains(const struct xt_table_info *newinfo, = (void *)ipt_get_target_c(e); int visited = e->comefrom & (1 << hook); - if (e->comefrom & (1 << NF_INET_NUMHOOKS)) { - pr_err("iptables: loop hook %u pos %u %08X.\n", - hook, pos, e->comefrom); + if (e->comefrom & (1 << NF_INET_NUMHOOKS)) return 0; - } + e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS)); /* Unconditional return/END. */ @@ -496,26 +426,13 @@ mark_source_chains(const struct xt_table_info *newinfo, if ((strcmp(t->target.u.user.name, XT_STANDARD_TARGET) == 0) && - t->verdict < -NF_MAX_VERDICT - 1) { - duprintf("mark_source_chains: bad " - "negative verdict (%i)\n", - t->verdict); + t->verdict < -NF_MAX_VERDICT - 1) return 0; - } /* Return: backtrack through the last big jump. */ do { e->comefrom ^= (1<comefrom - & (1 << NF_INET_NUMHOOKS)) { - duprintf("Back unset " - "on hook %u " - "rule %u\n", - hook, pos); - } -#endif oldpos = pos; pos = e->counters.pcnt; e->counters.pcnt = 0; @@ -543,8 +460,6 @@ mark_source_chains(const struct xt_table_info *newinfo, XT_STANDARD_TARGET) == 0 && newpos >= 0) { /* This a jump; chase it. */ - duprintf("Jump rule %u -> %u\n", - pos, newpos); e = (struct ipt_entry *) (entry0 + newpos); if (!find_jump_target(newinfo, e)) @@ -561,8 +476,7 @@ mark_source_chains(const struct xt_table_info *newinfo, pos = newpos; } } -next: - duprintf("Finished chain %u\n", hook); +next: ; } return 1; } @@ -584,18 +498,12 @@ static int check_match(struct xt_entry_match *m, struct xt_mtchk_param *par) { const struct ipt_ip *ip = par->entryinfo; - int ret; par->match = m->u.kernel.match; par->matchinfo = m->data; - ret = xt_check_match(par, m->u.match_size - sizeof(*m), - ip->proto, ip->invflags & IPT_INV_PROTO); - if (ret < 0) { - duprintf("check failed for `%s'.\n", par->match->name); - return ret; - } - return 0; + return xt_check_match(par, m->u.match_size - sizeof(*m), + ip->proto, ip->invflags & IPT_INV_PROTO); } static int @@ -606,10 +514,8 @@ find_check_match(struct xt_entry_match *m, struct xt_mtchk_param *par) match = xt_request_find_match(NFPROTO_IPV4, m->u.user.name, m->u.user.revision); - if (IS_ERR(match)) { - duprintf("find_check_match: `%s' not found\n", m->u.user.name); + if (IS_ERR(match)) return PTR_ERR(match); - } m->u.kernel.match = match; ret = check_match(m, par); @@ -634,16 +540,9 @@ static int check_target(struct ipt_entry *e, struct net *net, const char *name) .hook_mask = e->comefrom, .family = NFPROTO_IPV4, }; - int ret; - ret = xt_check_target(&par, t->u.target_size - sizeof(*t), - e->ip.proto, e->ip.invflags & IPT_INV_PROTO); - if (ret < 0) { - duprintf("check failed for `%s'.\n", - t->u.kernel.target->name); - return ret; - } - return 0; + return xt_check_target(&par, t->u.target_size - sizeof(*t), + e->ip.proto, e->ip.invflags & IPT_INV_PROTO); } static int @@ -680,7 +579,6 @@ find_check_entry(struct ipt_entry *e, struct net *net, const char *name, target = xt_request_find_target(NFPROTO_IPV4, t->u.user.name, t->u.user.revision); if (IS_ERR(target)) { - duprintf("find_check_entry: `%s' not found\n", t->u.user.name); ret = PTR_ERR(target); goto cleanup_matches; } @@ -734,17 +632,12 @@ check_entry_size_and_hooks(struct ipt_entry *e, if ((unsigned long)e % __alignof__(struct ipt_entry) != 0 || (unsigned char *)e + sizeof(struct ipt_entry) >= limit || - (unsigned char *)e + e->next_offset > limit) { - duprintf("Bad offset %p\n", e); + (unsigned char *)e + e->next_offset > limit) return -EINVAL; - } if (e->next_offset - < sizeof(struct ipt_entry) + sizeof(struct xt_entry_target)) { - duprintf("checking: element %p size %u\n", - e, e->next_offset); + < sizeof(struct ipt_entry) + sizeof(struct xt_entry_target)) return -EINVAL; - } if (!ip_checkentry(&e->ip)) return -EINVAL; @@ -761,12 +654,9 @@ check_entry_size_and_hooks(struct ipt_entry *e, if ((unsigned char *)e - base == hook_entries[h]) newinfo->hook_entry[h] = hook_entries[h]; if ((unsigned char *)e - base == underflows[h]) { - if (!check_underflow(e)) { - pr_debug("Underflows must be unconditional and " - "use the STANDARD target with " - "ACCEPT/DROP\n"); + if (!check_underflow(e)) return -EINVAL; - } + newinfo->underflow[h] = underflows[h]; } } @@ -818,7 +708,6 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0, newinfo->underflow[i] = 0xFFFFFFFF; } - duprintf("translate_table: size %u\n", newinfo->size); i = 0; /* Walk through entries, checking offsets. */ xt_entry_foreach(iter, entry0, newinfo->size) { @@ -835,27 +724,18 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0, ++newinfo->stacksize; } - if (i != repl->num_entries) { - duprintf("translate_table: %u not %u entries\n", - i, repl->num_entries); + if (i != repl->num_entries) return -EINVAL; - } /* Check hooks all assigned */ for (i = 0; i < NF_INET_NUMHOOKS; i++) { /* Only hooks which are valid */ if (!(repl->valid_hooks & (1 << i))) continue; - if (newinfo->hook_entry[i] == 0xFFFFFFFF) { - duprintf("Invalid hook entry %u %u\n", - i, repl->hook_entry[i]); + if (newinfo->hook_entry[i] == 0xFFFFFFFF) return -EINVAL; - } - if (newinfo->underflow[i] == 0xFFFFFFFF) { - duprintf("Invalid underflow %u %u\n", - i, repl->underflow[i]); + if (newinfo->underflow[i] == 0xFFFFFFFF) return -EINVAL; - } } if (!mark_source_chains(newinfo, repl->valid_hooks, entry0)) @@ -1083,11 +963,8 @@ static int get_info(struct net *net, void __user *user, struct xt_table *t; int ret; - if (*len != sizeof(struct ipt_getinfo)) { - duprintf("length %u != %zu\n", *len, - sizeof(struct ipt_getinfo)); + if (*len != sizeof(struct ipt_getinfo)) return -EINVAL; - } if (copy_from_user(name, user, sizeof(name)) != 0) return -EFAULT; @@ -1145,31 +1022,23 @@ get_entries(struct net *net, struct ipt_get_entries __user *uptr, struct ipt_get_entries get; struct xt_table *t; - if (*len < sizeof(get)) { - duprintf("get_entries: %u < %zu\n", *len, sizeof(get)); + if (*len < sizeof(get)) return -EINVAL; - } if (copy_from_user(&get, uptr, sizeof(get)) != 0) return -EFAULT; - if (*len != sizeof(struct ipt_get_entries) + get.size) { - duprintf("get_entries: %u != %zu\n", - *len, sizeof(get) + get.size); + if (*len != sizeof(struct ipt_get_entries) + get.size) return -EINVAL; - } get.name[sizeof(get.name) - 1] = '\0'; t = xt_find_table_lock(net, AF_INET, get.name); if (!IS_ERR_OR_NULL(t)) { const struct xt_table_info *private = t->private; - duprintf("t->private->number = %u\n", private->number); if (get.size == private->size) ret = copy_entries_to_user(private->size, t, uptr->entrytable); - else { - duprintf("get_entries: I've got %u not %u!\n", - private->size, get.size); + else ret = -EAGAIN; - } + module_put(t->me); xt_table_unlock(t); } else @@ -1205,8 +1074,6 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks, /* You lied! */ if (valid_hooks != t->valid_hooks) { - duprintf("Valid hook crap: %08X vs %08X\n", - valid_hooks, t->valid_hooks); ret = -EINVAL; goto put_module; } @@ -1216,8 +1083,6 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks, goto put_module; /* Update module usage count based on number of rules */ - duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n", - oldinfo->number, oldinfo->initial_entries, newinfo->number); if ((oldinfo->number > oldinfo->initial_entries) || (newinfo->number <= oldinfo->initial_entries)) module_put(t->me); @@ -1286,8 +1151,6 @@ do_replace(struct net *net, const void __user *user, unsigned int len) if (ret != 0) goto free_newinfo; - duprintf("Translated table\n"); - ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo, tmp.num_counters, tmp.counters); if (ret) @@ -1413,11 +1276,9 @@ compat_find_calc_match(struct xt_entry_match *m, match = xt_request_find_match(NFPROTO_IPV4, m->u.user.name, m->u.user.revision); - if (IS_ERR(match)) { - duprintf("compat_check_calc_match: `%s' not found\n", - m->u.user.name); + if (IS_ERR(match)) return PTR_ERR(match); - } + m->u.kernel.match = match; *size += xt_compat_match_offset(match); return 0; @@ -1449,20 +1310,14 @@ check_compat_entry_size_and_hooks(struct compat_ipt_entry *e, unsigned int j; int ret, off; - duprintf("check_compat_entry_size_and_hooks %p\n", e); if ((unsigned long)e % __alignof__(struct compat_ipt_entry) != 0 || (unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit || - (unsigned char *)e + e->next_offset > limit) { - duprintf("Bad offset %p, limit = %p\n", e, limit); + (unsigned char *)e + e->next_offset > limit) return -EINVAL; - } if (e->next_offset < sizeof(struct compat_ipt_entry) + - sizeof(struct compat_xt_entry_target)) { - duprintf("checking: element %p size %u\n", - e, e->next_offset); + sizeof(struct compat_xt_entry_target)) return -EINVAL; - } if (!ip_checkentry(&e->ip)) return -EINVAL; @@ -1486,8 +1341,6 @@ check_compat_entry_size_and_hooks(struct compat_ipt_entry *e, target = xt_request_find_target(NFPROTO_IPV4, t->u.user.name, t->u.user.revision); if (IS_ERR(target)) { - duprintf("check_compat_entry_size_and_hooks: `%s' not found\n", - t->u.user.name); ret = PTR_ERR(target); goto release_matches; } @@ -1569,7 +1422,6 @@ translate_compat_table(struct net *net, size = compatr->size; info->number = compatr->num_entries; - duprintf("translate_compat_table: size %u\n", info->size); j = 0; xt_compat_lock(AF_INET); xt_compat_init_offsets(AF_INET, compatr->num_entries); @@ -1584,11 +1436,8 @@ translate_compat_table(struct net *net, } ret = -EINVAL; - if (j != compatr->num_entries) { - duprintf("translate_compat_table: %u not %u entries\n", - j, compatr->num_entries); + if (j != compatr->num_entries) goto out_unlock; - } ret = -ENOMEM; newinfo = xt_alloc_table_info(size); @@ -1685,8 +1534,6 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len) if (ret != 0) goto free_newinfo; - duprintf("compat_do_replace: Translated table\n"); - ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo, tmp.num_counters, compat_ptr(tmp.counters)); if (ret) @@ -1720,7 +1567,6 @@ compat_do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, break; default: - duprintf("do_ipt_set_ctl: unknown request %i\n", cmd); ret = -EINVAL; } @@ -1770,19 +1616,15 @@ compat_get_entries(struct net *net, struct compat_ipt_get_entries __user *uptr, struct compat_ipt_get_entries get; struct xt_table *t; - if (*len < sizeof(get)) { - duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get)); + if (*len < sizeof(get)) return -EINVAL; - } if (copy_from_user(&get, uptr, sizeof(get)) != 0) return -EFAULT; - if (*len != sizeof(struct compat_ipt_get_entries) + get.size) { - duprintf("compat_get_entries: %u != %zu\n", - *len, sizeof(get) + get.size); + if (*len != sizeof(struct compat_ipt_get_entries) + get.size) return -EINVAL; - } + get.name[sizeof(get.name) - 1] = '\0'; xt_compat_lock(AF_INET); @@ -1790,16 +1632,13 @@ compat_get_entries(struct net *net, struct compat_ipt_get_entries __user *uptr, if (!IS_ERR_OR_NULL(t)) { const struct xt_table_info *private = t->private; struct xt_table_info info; - duprintf("t->private->number = %u\n", private->number); ret = compat_table_info(private, &info); - if (!ret && get.size == info.size) { + if (!ret && get.size == info.size) ret = compat_copy_entries_to_user(private->size, t, uptr->entrytable); - } else if (!ret) { - duprintf("compat_get_entries: I've got %u not %u!\n", - private->size, get.size); + else if (!ret) ret = -EAGAIN; - } + xt_compat_flush_offsets(AF_INET); module_put(t->me); xt_table_unlock(t); @@ -1852,7 +1691,6 @@ do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) break; default: - duprintf("do_ipt_set_ctl: unknown request %i\n", cmd); ret = -EINVAL; } @@ -1904,7 +1742,6 @@ do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) } default: - duprintf("do_ipt_get_ctl: unknown request %i\n", cmd); ret = -EINVAL; } @@ -2006,7 +1843,6 @@ icmp_match(const struct sk_buff *skb, struct xt_action_param *par) /* We've been asked to examine this packet, and we * can't. Hence, no choice but to drop. */ - duprintf("Dropping evil ICMP tinygram.\n"); par->hotdrop = true; return false; } diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index 17874e83a950..63e06c3dd319 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c @@ -39,34 +39,12 @@ MODULE_LICENSE("GPL"); MODULE_AUTHOR("Netfilter Core Team "); MODULE_DESCRIPTION("IPv6 packet filter"); -/*#define DEBUG_IP_FIREWALL*/ -/*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */ -/*#define DEBUG_IP_FIREWALL_USER*/ - -#ifdef DEBUG_IP_FIREWALL -#define dprintf(format, args...) pr_info(format , ## args) -#else -#define dprintf(format, args...) -#endif - -#ifdef DEBUG_IP_FIREWALL_USER -#define duprintf(format, args...) pr_info(format , ## args) -#else -#define duprintf(format, args...) -#endif - #ifdef CONFIG_NETFILTER_DEBUG #define IP_NF_ASSERT(x) WARN_ON(!(x)) #else #define IP_NF_ASSERT(x) #endif -#if 0 -/* All the better to debug you with... */ -#define static -#define inline -#endif - void *ip6t_alloc_initial_table(const struct xt_table *info) { return xt_alloc_initial_table(ip6t, IP6T); @@ -100,35 +78,18 @@ ip6_packet_match(const struct sk_buff *skb, if (FWINV(ipv6_masked_addr_cmp(&ipv6->saddr, &ip6info->smsk, &ip6info->src), IP6T_INV_SRCIP) || FWINV(ipv6_masked_addr_cmp(&ipv6->daddr, &ip6info->dmsk, - &ip6info->dst), IP6T_INV_DSTIP)) { - dprintf("Source or dest mismatch.\n"); -/* - dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr, - ipinfo->smsk.s_addr, ipinfo->src.s_addr, - ipinfo->invflags & IP6T_INV_SRCIP ? " (INV)" : ""); - dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr, - ipinfo->dmsk.s_addr, ipinfo->dst.s_addr, - ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/ + &ip6info->dst), IP6T_INV_DSTIP)) return false; - } ret = ifname_compare_aligned(indev, ip6info->iniface, ip6info->iniface_mask); - if (FWINV(ret != 0, IP6T_INV_VIA_IN)) { - dprintf("VIA in mismatch (%s vs %s).%s\n", - indev, ip6info->iniface, - ip6info->invflags & IP6T_INV_VIA_IN ? " (INV)" : ""); + if (FWINV(ret != 0, IP6T_INV_VIA_IN)) return false; - } ret = ifname_compare_aligned(outdev, ip6info->outiface, ip6info->outiface_mask); - if (FWINV(ret != 0, IP6T_INV_VIA_OUT)) { - dprintf("VIA out mismatch (%s vs %s).%s\n", - outdev, ip6info->outiface, - ip6info->invflags & IP6T_INV_VIA_OUT ? " (INV)" : ""); + if (FWINV(ret != 0, IP6T_INV_VIA_OUT)) return false; - } /* ... might want to do something with class and flowlabel here ... */ @@ -145,11 +106,6 @@ ip6_packet_match(const struct sk_buff *skb, } *fragoff = _frag_off; - dprintf("Packet protocol %hi ?= %s%hi.\n", - protohdr, - ip6info->invflags & IP6T_INV_PROTO ? "!":"", - ip6info->proto); - if (ip6info->proto == protohdr) { if (ip6info->invflags & IP6T_INV_PROTO) return false; @@ -169,16 +125,11 @@ ip6_packet_match(const struct sk_buff *skb, static bool ip6_checkentry(const struct ip6t_ip6 *ipv6) { - if (ipv6->flags & ~IP6T_F_MASK) { - duprintf("Unknown flag bits set: %08X\n", - ipv6->flags & ~IP6T_F_MASK); + if (ipv6->flags & ~IP6T_F_MASK) return false; - } - if (ipv6->invflags & ~IP6T_INV_MASK) { - duprintf("Unknown invflag bits set: %08X\n", - ipv6->invflags & ~IP6T_INV_MASK); + if (ipv6->invflags & ~IP6T_INV_MASK) return false; - } + return true; } @@ -446,13 +397,9 @@ ip6t_do_table(struct sk_buff *skb, xt_write_recseq_end(addend); local_bh_enable(); -#ifdef DEBUG_ALLOW_ALL - return NF_ACCEPT; -#else if (acpar.hotdrop) return NF_DROP; else return verdict; -#endif } static bool find_jump_target(const struct xt_table_info *t, @@ -492,11 +439,9 @@ mark_source_chains(const struct xt_table_info *newinfo, = (void *)ip6t_get_target_c(e); int visited = e->comefrom & (1 << hook); - if (e->comefrom & (1 << NF_INET_NUMHOOKS)) { - pr_err("iptables: loop hook %u pos %u %08X.\n", - hook, pos, e->comefrom); + if (e->comefrom & (1 << NF_INET_NUMHOOKS)) return 0; - } + e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS)); /* Unconditional return/END. */ @@ -508,26 +453,13 @@ mark_source_chains(const struct xt_table_info *newinfo, if ((strcmp(t->target.u.user.name, XT_STANDARD_TARGET) == 0) && - t->verdict < -NF_MAX_VERDICT - 1) { - duprintf("mark_source_chains: bad " - "negative verdict (%i)\n", - t->verdict); + t->verdict < -NF_MAX_VERDICT - 1) return 0; - } /* Return: backtrack through the last big jump. */ do { e->comefrom ^= (1<comefrom - & (1 << NF_INET_NUMHOOKS)) { - duprintf("Back unset " - "on hook %u " - "rule %u\n", - hook, pos); - } -#endif oldpos = pos; pos = e->counters.pcnt; e->counters.pcnt = 0; @@ -555,8 +487,6 @@ mark_source_chains(const struct xt_table_info *newinfo, XT_STANDARD_TARGET) == 0 && newpos >= 0) { /* This a jump; chase it. */ - duprintf("Jump rule %u -> %u\n", - pos, newpos); e = (struct ip6t_entry *) (entry0 + newpos); if (!find_jump_target(newinfo, e)) @@ -573,8 +503,7 @@ mark_source_chains(const struct xt_table_info *newinfo, pos = newpos; } } -next: - duprintf("Finished chain %u\n", hook); +next: ; } return 1; } @@ -595,19 +524,12 @@ static void cleanup_match(struct xt_entry_match *m, struct net *net) static int check_match(struct xt_entry_match *m, struct xt_mtchk_param *par) { const struct ip6t_ip6 *ipv6 = par->entryinfo; - int ret; par->match = m->u.kernel.match; par->matchinfo = m->data; - ret = xt_check_match(par, m->u.match_size - sizeof(*m), - ipv6->proto, ipv6->invflags & IP6T_INV_PROTO); - if (ret < 0) { - duprintf("ip_tables: check failed for `%s'.\n", - par.match->name); - return ret; - } - return 0; + return xt_check_match(par, m->u.match_size - sizeof(*m), + ipv6->proto, ipv6->invflags & IP6T_INV_PROTO); } static int @@ -618,10 +540,9 @@ find_check_match(struct xt_entry_match *m, struct xt_mtchk_param *par) match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name, m->u.user.revision); - if (IS_ERR(match)) { - duprintf("find_check_match: `%s' not found\n", m->u.user.name); + if (IS_ERR(match)) return PTR_ERR(match); - } + m->u.kernel.match = match; ret = check_match(m, par); @@ -646,17 +567,11 @@ static int check_target(struct ip6t_entry *e, struct net *net, const char *name) .hook_mask = e->comefrom, .family = NFPROTO_IPV6, }; - int ret; t = ip6t_get_target(e); - ret = xt_check_target(&par, t->u.target_size - sizeof(*t), - e->ipv6.proto, e->ipv6.invflags & IP6T_INV_PROTO); - if (ret < 0) { - duprintf("ip_tables: check failed for `%s'.\n", - t->u.kernel.target->name); - return ret; - } - return 0; + return xt_check_target(&par, t->u.target_size - sizeof(*t), + e->ipv6.proto, + e->ipv6.invflags & IP6T_INV_PROTO); } static int @@ -693,7 +608,6 @@ find_check_entry(struct ip6t_entry *e, struct net *net, const char *name, target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name, t->u.user.revision); if (IS_ERR(target)) { - duprintf("find_check_entry: `%s' not found\n", t->u.user.name); ret = PTR_ERR(target); goto cleanup_matches; } @@ -746,17 +660,12 @@ check_entry_size_and_hooks(struct ip6t_entry *e, if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0 || (unsigned char *)e + sizeof(struct ip6t_entry) >= limit || - (unsigned char *)e + e->next_offset > limit) { - duprintf("Bad offset %p\n", e); + (unsigned char *)e + e->next_offset > limit) return -EINVAL; - } if (e->next_offset - < sizeof(struct ip6t_entry) + sizeof(struct xt_entry_target)) { - duprintf("checking: element %p size %u\n", - e, e->next_offset); + < sizeof(struct ip6t_entry) + sizeof(struct xt_entry_target)) return -EINVAL; - } if (!ip6_checkentry(&e->ipv6)) return -EINVAL; @@ -773,12 +682,9 @@ check_entry_size_and_hooks(struct ip6t_entry *e, if ((unsigned char *)e - base == hook_entries[h]) newinfo->hook_entry[h] = hook_entries[h]; if ((unsigned char *)e - base == underflows[h]) { - if (!check_underflow(e)) { - pr_debug("Underflows must be unconditional and " - "use the STANDARD target with " - "ACCEPT/DROP\n"); + if (!check_underflow(e)) return -EINVAL; - } + newinfo->underflow[h] = underflows[h]; } } @@ -830,7 +736,6 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0, newinfo->underflow[i] = 0xFFFFFFFF; } - duprintf("translate_table: size %u\n", newinfo->size); i = 0; /* Walk through entries, checking offsets. */ xt_entry_foreach(iter, entry0, newinfo->size) { @@ -847,27 +752,18 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0, ++newinfo->stacksize; } - if (i != repl->num_entries) { - duprintf("translate_table: %u not %u entries\n", - i, repl->num_entries); + if (i != repl->num_entries) return -EINVAL; - } /* Check hooks all assigned */ for (i = 0; i < NF_INET_NUMHOOKS; i++) { /* Only hooks which are valid */ if (!(repl->valid_hooks & (1 << i))) continue; - if (newinfo->hook_entry[i] == 0xFFFFFFFF) { - duprintf("Invalid hook entry %u %u\n", - i, repl->hook_entry[i]); + if (newinfo->hook_entry[i] == 0xFFFFFFFF) return -EINVAL; - } - if (newinfo->underflow[i] == 0xFFFFFFFF) { - duprintf("Invalid underflow %u %u\n", - i, repl->underflow[i]); + if (newinfo->underflow[i] == 0xFFFFFFFF) return -EINVAL; - } } if (!mark_source_chains(newinfo, repl->valid_hooks, entry0)) @@ -1095,11 +991,8 @@ static int get_info(struct net *net, void __user *user, struct xt_table *t; int ret; - if (*len != sizeof(struct ip6t_getinfo)) { - duprintf("length %u != %zu\n", *len, - sizeof(struct ip6t_getinfo)); + if (*len != sizeof(struct ip6t_getinfo)) return -EINVAL; - } if (copy_from_user(name, user, sizeof(name)) != 0) return -EFAULT; @@ -1157,31 +1050,24 @@ get_entries(struct net *net, struct ip6t_get_entries __user *uptr, struct ip6t_get_entries get; struct xt_table *t; - if (*len < sizeof(get)) { - duprintf("get_entries: %u < %zu\n", *len, sizeof(get)); + if (*len < sizeof(get)) return -EINVAL; - } if (copy_from_user(&get, uptr, sizeof(get)) != 0) return -EFAULT; - if (*len != sizeof(struct ip6t_get_entries) + get.size) { - duprintf("get_entries: %u != %zu\n", - *len, sizeof(get) + get.size); + if (*len != sizeof(struct ip6t_get_entries) + get.size) return -EINVAL; - } + get.name[sizeof(get.name) - 1] = '\0'; t = xt_find_table_lock(net, AF_INET6, get.name); if (!IS_ERR_OR_NULL(t)) { struct xt_table_info *private = t->private; - duprintf("t->private->number = %u\n", private->number); if (get.size == private->size) ret = copy_entries_to_user(private->size, t, uptr->entrytable); - else { - duprintf("get_entries: I've got %u not %u!\n", - private->size, get.size); + else ret = -EAGAIN; - } + module_put(t->me); xt_table_unlock(t); } else @@ -1217,8 +1103,6 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks, /* You lied! */ if (valid_hooks != t->valid_hooks) { - duprintf("Valid hook crap: %08X vs %08X\n", - valid_hooks, t->valid_hooks); ret = -EINVAL; goto put_module; } @@ -1228,8 +1112,6 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks, goto put_module; /* Update module usage count based on number of rules */ - duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n", - oldinfo->number, oldinfo->initial_entries, newinfo->number); if ((oldinfo->number > oldinfo->initial_entries) || (newinfo->number <= oldinfo->initial_entries)) module_put(t->me); @@ -1298,8 +1180,6 @@ do_replace(struct net *net, const void __user *user, unsigned int len) if (ret != 0) goto free_newinfo; - duprintf("ip_tables: Translated table\n"); - ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo, tmp.num_counters, tmp.counters); if (ret) @@ -1424,11 +1304,9 @@ compat_find_calc_match(struct xt_entry_match *m, match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name, m->u.user.revision); - if (IS_ERR(match)) { - duprintf("compat_check_calc_match: `%s' not found\n", - m->u.user.name); + if (IS_ERR(match)) return PTR_ERR(match); - } + m->u.kernel.match = match; *size += xt_compat_match_offset(match); return 0; @@ -1460,20 +1338,14 @@ check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e, unsigned int j; int ret, off; - duprintf("check_compat_entry_size_and_hooks %p\n", e); if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0 || (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit || - (unsigned char *)e + e->next_offset > limit) { - duprintf("Bad offset %p, limit = %p\n", e, limit); + (unsigned char *)e + e->next_offset > limit) return -EINVAL; - } if (e->next_offset < sizeof(struct compat_ip6t_entry) + - sizeof(struct compat_xt_entry_target)) { - duprintf("checking: element %p size %u\n", - e, e->next_offset); + sizeof(struct compat_xt_entry_target)) return -EINVAL; - } if (!ip6_checkentry(&e->ipv6)) return -EINVAL; @@ -1497,8 +1369,6 @@ check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e, target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name, t->u.user.revision); if (IS_ERR(target)) { - duprintf("check_compat_entry_size_and_hooks: `%s' not found\n", - t->u.user.name); ret = PTR_ERR(target); goto release_matches; } @@ -1577,7 +1447,6 @@ translate_compat_table(struct net *net, size = compatr->size; info->number = compatr->num_entries; - duprintf("translate_compat_table: size %u\n", info->size); j = 0; xt_compat_lock(AF_INET6); xt_compat_init_offsets(AF_INET6, compatr->num_entries); @@ -1592,11 +1461,8 @@ translate_compat_table(struct net *net, } ret = -EINVAL; - if (j != compatr->num_entries) { - duprintf("translate_compat_table: %u not %u entries\n", - j, compatr->num_entries); + if (j != compatr->num_entries) goto out_unlock; - } ret = -ENOMEM; newinfo = xt_alloc_table_info(size); @@ -1687,8 +1553,6 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len) if (ret != 0) goto free_newinfo; - duprintf("compat_do_replace: Translated table\n"); - ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo, tmp.num_counters, compat_ptr(tmp.counters)); if (ret) @@ -1722,7 +1586,6 @@ compat_do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, break; default: - duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd); ret = -EINVAL; } @@ -1772,19 +1635,15 @@ compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr, struct compat_ip6t_get_entries get; struct xt_table *t; - if (*len < sizeof(get)) { - duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get)); + if (*len < sizeof(get)) return -EINVAL; - } if (copy_from_user(&get, uptr, sizeof(get)) != 0) return -EFAULT; - if (*len != sizeof(struct compat_ip6t_get_entries) + get.size) { - duprintf("compat_get_entries: %u != %zu\n", - *len, sizeof(get) + get.size); + if (*len != sizeof(struct compat_ip6t_get_entries) + get.size) return -EINVAL; - } + get.name[sizeof(get.name) - 1] = '\0'; xt_compat_lock(AF_INET6); @@ -1792,16 +1651,13 @@ compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr, if (!IS_ERR_OR_NULL(t)) { const struct xt_table_info *private = t->private; struct xt_table_info info; - duprintf("t->private->number = %u\n", private->number); ret = compat_table_info(private, &info); - if (!ret && get.size == info.size) { + if (!ret && get.size == info.size) ret = compat_copy_entries_to_user(private->size, t, uptr->entrytable); - } else if (!ret) { - duprintf("compat_get_entries: I've got %u not %u!\n", - private->size, get.size); + else if (!ret) ret = -EAGAIN; - } + xt_compat_flush_offsets(AF_INET6); module_put(t->me); xt_table_unlock(t); @@ -1854,7 +1710,6 @@ do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) break; default: - duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd); ret = -EINVAL; } @@ -1906,7 +1761,6 @@ do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) } default: - duprintf("do_ip6t_get_ctl: unknown request %i\n", cmd); ret = -EINVAL; } @@ -2008,7 +1862,6 @@ icmp6_match(const struct sk_buff *skb, struct xt_action_param *par) /* We've been asked to examine this packet, and we * can't. Hence, no choice but to drop. */ - duprintf("Dropping evil ICMP tinygram.\n"); par->hotdrop = true; return false; } From cb39ad8b8ef224c544074962780bf763077d6141 Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Wed, 4 May 2016 17:49:53 +0200 Subject: [PATCH 1367/1649] netfilter: nf_tables: allow set names up to 32 bytes Currently, we support set names of up to 16 bytes, get this aligned with the maximum length we can use in ipset to make it easier when considering migration to nf_tables. Signed-off-by: Pablo Neira Ayuso --- include/net/netfilter/nf_tables.h | 2 +- include/uapi/linux/netfilter/nf_tables.h | 1 + net/netfilter/nf_tables_api.c | 6 +++--- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index f6b1daf2e698..092235458691 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -303,7 +303,7 @@ void nft_unregister_set(struct nft_set_ops *ops); struct nft_set { struct list_head list; struct list_head bindings; - char name[IFNAMSIZ]; + char name[NFT_SET_MAXNAMELEN]; u32 ktype; u32 dtype; u32 size; diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h index 660231363bb5..6a4dbe04f09e 100644 --- a/include/uapi/linux/netfilter/nf_tables.h +++ b/include/uapi/linux/netfilter/nf_tables.h @@ -3,6 +3,7 @@ #define NFT_TABLE_MAXNAMELEN 32 #define NFT_CHAIN_MAXNAMELEN 32 +#define NFT_SET_MAXNAMELEN 32 #define NFT_USERDATA_MAXLEN 256 /** diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 73c8fad0b8ef..4d292b933b5c 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -2317,7 +2317,7 @@ nft_select_set_ops(const struct nlattr * const nla[], static const struct nla_policy nft_set_policy[NFTA_SET_MAX + 1] = { [NFTA_SET_TABLE] = { .type = NLA_STRING }, [NFTA_SET_NAME] = { .type = NLA_STRING, - .len = IFNAMSIZ - 1 }, + .len = NFT_SET_MAXNAMELEN - 1 }, [NFTA_SET_FLAGS] = { .type = NLA_U32 }, [NFTA_SET_KEY_TYPE] = { .type = NLA_U32 }, [NFTA_SET_KEY_LEN] = { .type = NLA_U32 }, @@ -2401,7 +2401,7 @@ static int nf_tables_set_alloc_name(struct nft_ctx *ctx, struct nft_set *set, unsigned long *inuse; unsigned int n = 0, min = 0; - p = strnchr(name, IFNAMSIZ, '%'); + p = strnchr(name, NFT_SET_MAXNAMELEN, '%'); if (p != NULL) { if (p[1] != 'd' || strchr(p + 2, '%')) return -EINVAL; @@ -2696,7 +2696,7 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk, struct nft_table *table; struct nft_set *set; struct nft_ctx ctx; - char name[IFNAMSIZ]; + char name[NFT_SET_MAXNAMELEN]; unsigned int size; bool create; u64 timeout; From f8a952cb40407f3c127cab8ec77f1261f1e424b4 Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Mon, 18 Apr 2016 11:33:41 -0700 Subject: [PATCH 1368/1649] i40e/i40evf: Refactor tunnel interpretation Refactor the interpretation of a tunnel. This removes some code and lets us start using the hardware's parsing. Signed-off-by: Jesse Brandeburg Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_txrx.c | 13 ++++++------- drivers/net/ethernet/intel/i40evf/i40e_txrx.c | 13 ++++++------- 2 files changed, 12 insertions(+), 14 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 2765d7efdd9c..dab733c5343d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -1392,7 +1392,7 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, u16 rx_ptype) { struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(rx_ptype); - bool ipv4, ipv6, ipv4_tunnel, ipv6_tunnel; + bool ipv4, ipv6, tunnel = false; skb->ip_summed = CHECKSUM_NONE; @@ -1441,14 +1441,13 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, * doesn't make it a hard requirement so if we have validated the * inner checksum report CHECKSUM_UNNECESSARY. */ - - ipv4_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT4_MAC_PAY3) && - (rx_ptype <= I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4); - ipv6_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT6_MAC_PAY3) && - (rx_ptype <= I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4); + if (decoded.inner_prot & (I40E_RX_PTYPE_INNER_PROT_TCP | + I40E_RX_PTYPE_INNER_PROT_UDP | + I40E_RX_PTYPE_INNER_PROT_SCTP)) + tunnel = true; skb->ip_summed = CHECKSUM_UNNECESSARY; - skb->csum_level = ipv4_tunnel || ipv6_tunnel; + skb->csum_level = tunnel ? 1 : 0; return; diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index ede8dfc189bc..a37a3f34ed4f 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -864,7 +864,7 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, u16 rx_ptype) { struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(rx_ptype); - bool ipv4, ipv6, ipv4_tunnel, ipv6_tunnel; + bool ipv4, ipv6, tunnel = false; skb->ip_summed = CHECKSUM_NONE; @@ -913,14 +913,13 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, * doesn't make it a hard requirement so if we have validated the * inner checksum report CHECKSUM_UNNECESSARY. */ - - ipv4_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT4_MAC_PAY3) && - (rx_ptype <= I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4); - ipv6_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT6_MAC_PAY3) && - (rx_ptype <= I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4); + if (decoded.inner_prot & (I40E_RX_PTYPE_INNER_PROT_TCP | + I40E_RX_PTYPE_INNER_PROT_UDP | + I40E_RX_PTYPE_INNER_PROT_SCTP)) + tunnel = true; skb->ip_summed = CHECKSUM_UNNECESSARY; - skb->csum_level = ipv4_tunnel || ipv6_tunnel; + skb->csum_level = tunnel ? 1 : 0; return; From b32bfa17246d836125958e39996a674653e899a5 Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Mon, 18 Apr 2016 11:33:42 -0700 Subject: [PATCH 1369/1649] i40e: Drop packet split receive routine As part of preparation for the rx-refactor, remove the packet split receive routine and ancillary code. Signed-off-by: Jesse Brandeburg Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e.h | 3 - .../net/ethernet/intel/i40e/i40e_debugfs.c | 4 +- .../net/ethernet/intel/i40e/i40e_ethtool.c | 19 -- drivers/net/ethernet/intel/i40e/i40e_main.c | 49 +--- drivers/net/ethernet/intel/i40e/i40e_txrx.c | 245 +----------------- drivers/net/ethernet/intel/i40e/i40e_txrx.h | 7 - 6 files changed, 10 insertions(+), 317 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 00c473874f01..ea6a69a1f1d7 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -101,7 +101,6 @@ #define I40E_PRIV_FLAGS_LINKPOLL_FLAG BIT(1) #define I40E_PRIV_FLAGS_FD_ATR BIT(2) #define I40E_PRIV_FLAGS_VEB_STATS BIT(3) -#define I40E_PRIV_FLAGS_PS BIT(4) #define I40E_PRIV_FLAGS_HW_ATR_EVICT BIT(5) #define I40E_NVM_VERSION_LO_SHIFT 0 @@ -320,8 +319,6 @@ struct i40e_pf { #define I40E_FLAG_RX_CSUM_ENABLED BIT_ULL(1) #define I40E_FLAG_MSI_ENABLED BIT_ULL(2) #define I40E_FLAG_MSIX_ENABLED BIT_ULL(3) -#define I40E_FLAG_RX_1BUF_ENABLED BIT_ULL(4) -#define I40E_FLAG_RX_PS_ENABLED BIT_ULL(5) #define I40E_FLAG_RSS_ENABLED BIT_ULL(6) #define I40E_FLAG_VMDQ_ENABLED BIT_ULL(7) #define I40E_FLAG_FDIR_REQUIRES_REINIT BIT_ULL(8) diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index 83dccf1792e7..f119a747f5d9 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -273,8 +273,8 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) rx_ring->rx_buf_len, rx_ring->dtype); dev_info(&pf->pdev->dev, - " rx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n", - i, ring_is_ps_enabled(rx_ring), + " rx_rings[%i]: next_to_use = %d, next_to_clean = %d, ring_active = %i\n", + i, rx_ring->next_to_use, rx_ring->next_to_clean, rx_ring->ring_active); diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 8e56c43c4104..858e1699b87c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -2829,8 +2829,6 @@ static u32 i40e_get_priv_flags(struct net_device *dev) I40E_PRIV_FLAGS_FD_ATR : 0; ret_flags |= pf->flags & I40E_FLAG_VEB_STATS_ENABLED ? I40E_PRIV_FLAGS_VEB_STATS : 0; - ret_flags |= pf->flags & I40E_FLAG_RX_PS_ENABLED ? - I40E_PRIV_FLAGS_PS : 0; ret_flags |= pf->auto_disable_flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE ? 0 : I40E_PRIV_FLAGS_HW_ATR_EVICT; @@ -2851,23 +2849,6 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags) /* NOTE: MFP is not settable */ - /* allow the user to control the method of receive - * buffer DMA, whether the packet is split at header - * boundaries into two separate buffers. In some cases - * one routine or the other will perform better. - */ - if ((flags & I40E_PRIV_FLAGS_PS) && - !(pf->flags & I40E_FLAG_RX_PS_ENABLED)) { - pf->flags |= I40E_FLAG_RX_PS_ENABLED; - pf->flags &= ~I40E_FLAG_RX_1BUF_ENABLED; - reset_required = true; - } else if (!(flags & I40E_PRIV_FLAGS_PS) && - (pf->flags & I40E_FLAG_RX_PS_ENABLED)) { - pf->flags &= ~I40E_FLAG_RX_PS_ENABLED; - pf->flags |= I40E_FLAG_RX_1BUF_ENABLED; - reset_required = true; - } - if (flags & I40E_PRIV_FLAGS_LINKPOLL_FLAG) pf->flags |= I40E_FLAG_LINK_POLLING_ENABLED; else diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index f6da6b76e678..84e8d4e05924 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -2871,18 +2871,9 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring) } rx_ctx.dtype = vsi->dtype; - if (vsi->dtype) { - set_ring_ps_enabled(ring); - rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2 | - I40E_RX_SPLIT_IP | - I40E_RX_SPLIT_TCP_UDP | - I40E_RX_SPLIT_SCTP; - } else { - rx_ctx.hsplit_0 = 0; - } + rx_ctx.hsplit_0 = 0; - rx_ctx.rxmax = min_t(u16, vsi->max_frame, - (chain_len * ring->rx_buf_len)); + rx_ctx.rxmax = min_t(u16, vsi->max_frame, chain_len * ring->rx_buf_len); if (hw->revision_id == 0) rx_ctx.lrxqthresh = 0; else @@ -2919,12 +2910,7 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring) ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q); writel(0, ring->tail); - if (ring_is_ps_enabled(ring)) { - i40e_alloc_rx_headers(ring); - i40e_alloc_rx_buffers_ps(ring, I40E_DESC_UNUSED(ring)); - } else { - i40e_alloc_rx_buffers_1buf(ring, I40E_DESC_UNUSED(ring)); - } + i40e_alloc_rx_buffers_1buf(ring, I40E_DESC_UNUSED(ring)); return 0; } @@ -2963,25 +2949,9 @@ static int i40e_vsi_configure_rx(struct i40e_vsi *vsi) else vsi->max_frame = I40E_RXBUFFER_2048; - /* figure out correct receive buffer length */ - switch (vsi->back->flags & (I40E_FLAG_RX_1BUF_ENABLED | - I40E_FLAG_RX_PS_ENABLED)) { - case I40E_FLAG_RX_1BUF_ENABLED: - vsi->rx_hdr_len = 0; - vsi->rx_buf_len = vsi->max_frame; - vsi->dtype = I40E_RX_DTYPE_NO_SPLIT; - break; - case I40E_FLAG_RX_PS_ENABLED: - vsi->rx_hdr_len = I40E_RX_HDR_SIZE; - vsi->rx_buf_len = I40E_RXBUFFER_2048; - vsi->dtype = I40E_RX_DTYPE_HEADER_SPLIT; - break; - default: - vsi->rx_hdr_len = I40E_RX_HDR_SIZE; - vsi->rx_buf_len = I40E_RXBUFFER_2048; - vsi->dtype = I40E_RX_DTYPE_SPLIT_ALWAYS; - break; - } + vsi->rx_hdr_len = 0; + vsi->rx_buf_len = vsi->max_frame; + vsi->dtype = I40E_RX_DTYPE_NO_SPLIT; #ifdef I40E_FCOE /* setup rx buffer for FCoE */ @@ -8460,11 +8430,6 @@ static int i40e_sw_init(struct i40e_pf *pf) I40E_FLAG_MSI_ENABLED | I40E_FLAG_MSIX_ENABLED; - if (iommu_present(&pci_bus_type)) - pf->flags |= I40E_FLAG_RX_PS_ENABLED; - else - pf->flags |= I40E_FLAG_RX_1BUF_ENABLED; - /* Set default ITR */ pf->rx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_RX_DEF; pf->tx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_TX_DEF; @@ -10699,7 +10664,7 @@ static void i40e_print_features(struct i40e_pf *pf) i += snprintf(&buf[i], REMAIN(i), " VSIs: %d QP: %d RX: %s", pf->hw.func_caps.num_vsis, pf->vsi[pf->lan_vsi]->num_queue_pairs, - pf->flags & I40E_FLAG_RX_PS_ENABLED ? "PS" : "1BUF"); + "1BUF"); if (pf->flags & I40E_FLAG_RSS_ENABLED) i += snprintf(&buf[i], REMAIN(i), " RSS"); diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index dab733c5343d..450ecdd589db 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -1032,22 +1032,6 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring) if (!rx_ring->rx_bi) return; - if (ring_is_ps_enabled(rx_ring)) { - int bufsz = ALIGN(rx_ring->rx_hdr_len, 256) * rx_ring->count; - - rx_bi = &rx_ring->rx_bi[0]; - if (rx_bi->hdr_buf) { - dma_free_coherent(dev, - bufsz, - rx_bi->hdr_buf, - rx_bi->dma); - for (i = 0; i < rx_ring->count; i++) { - rx_bi = &rx_ring->rx_bi[i]; - rx_bi->dma = 0; - rx_bi->hdr_buf = NULL; - } - } - } /* Free all the Rx ring sk_buffs */ for (i = 0; i < rx_ring->count; i++) { rx_bi = &rx_ring->rx_bi[i]; @@ -1502,230 +1486,6 @@ static inline void i40e_rx_hash(struct i40e_ring *ring, } } -/** - * i40e_clean_rx_irq_ps - Reclaim resources after receive; packet split - * @rx_ring: rx ring to clean - * @budget: how many cleans we're allowed - * - * Returns true if there's any budget left (e.g. the clean is finished) - **/ -static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, const int budget) -{ - unsigned int total_rx_bytes = 0, total_rx_packets = 0; - u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo; - u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); - struct i40e_vsi *vsi = rx_ring->vsi; - u16 i = rx_ring->next_to_clean; - union i40e_rx_desc *rx_desc; - u32 rx_error, rx_status; - bool failure = false; - u8 rx_ptype; - u64 qword; - u32 copysize; - - if (budget <= 0) - return 0; - - do { - struct i40e_rx_buffer *rx_bi; - struct sk_buff *skb; - u16 vlan_tag; - /* return some buffers to hardware, one at a time is too slow */ - if (cleaned_count >= I40E_RX_BUFFER_WRITE) { - failure = failure || - i40e_alloc_rx_buffers_ps(rx_ring, - cleaned_count); - cleaned_count = 0; - } - - i = rx_ring->next_to_clean; - rx_desc = I40E_RX_DESC(rx_ring, i); - qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); - rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> - I40E_RXD_QW1_STATUS_SHIFT; - - if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT))) - break; - - /* This memory barrier is needed to keep us from reading - * any other fields out of the rx_desc until we know the - * DD bit is set. - */ - dma_rmb(); - /* sync header buffer for reading */ - dma_sync_single_range_for_cpu(rx_ring->dev, - rx_ring->rx_bi[0].dma, - i * rx_ring->rx_hdr_len, - rx_ring->rx_hdr_len, - DMA_FROM_DEVICE); - if (i40e_rx_is_programming_status(qword)) { - i40e_clean_programming_status(rx_ring, rx_desc); - I40E_RX_INCREMENT(rx_ring, i); - continue; - } - rx_bi = &rx_ring->rx_bi[i]; - skb = rx_bi->skb; - if (likely(!skb)) { - skb = __netdev_alloc_skb_ip_align(rx_ring->netdev, - rx_ring->rx_hdr_len, - GFP_ATOMIC | - __GFP_NOWARN); - if (!skb) { - rx_ring->rx_stats.alloc_buff_failed++; - failure = true; - break; - } - - /* initialize queue mapping */ - skb_record_rx_queue(skb, rx_ring->queue_index); - /* we are reusing so sync this buffer for CPU use */ - dma_sync_single_range_for_cpu(rx_ring->dev, - rx_ring->rx_bi[0].dma, - i * rx_ring->rx_hdr_len, - rx_ring->rx_hdr_len, - DMA_FROM_DEVICE); - } - rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> - I40E_RXD_QW1_LENGTH_PBUF_SHIFT; - rx_header_len = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK) >> - I40E_RXD_QW1_LENGTH_HBUF_SHIFT; - rx_sph = (qword & I40E_RXD_QW1_LENGTH_SPH_MASK) >> - I40E_RXD_QW1_LENGTH_SPH_SHIFT; - - rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >> - I40E_RXD_QW1_ERROR_SHIFT; - rx_hbo = rx_error & BIT(I40E_RX_DESC_ERROR_HBO_SHIFT); - rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT); - - rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> - I40E_RXD_QW1_PTYPE_SHIFT; - /* sync half-page for reading */ - dma_sync_single_range_for_cpu(rx_ring->dev, - rx_bi->page_dma, - rx_bi->page_offset, - PAGE_SIZE / 2, - DMA_FROM_DEVICE); - prefetch(page_address(rx_bi->page) + rx_bi->page_offset); - rx_bi->skb = NULL; - cleaned_count++; - copysize = 0; - if (rx_hbo || rx_sph) { - int len; - - if (rx_hbo) - len = I40E_RX_HDR_SIZE; - else - len = rx_header_len; - memcpy(__skb_put(skb, len), rx_bi->hdr_buf, len); - } else if (skb->len == 0) { - int len; - unsigned char *va = page_address(rx_bi->page) + - rx_bi->page_offset; - - len = min(rx_packet_len, rx_ring->rx_hdr_len); - memcpy(__skb_put(skb, len), va, len); - copysize = len; - rx_packet_len -= len; - } - /* Get the rest of the data if this was a header split */ - if (rx_packet_len) { - skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, - rx_bi->page, - rx_bi->page_offset + copysize, - rx_packet_len, I40E_RXBUFFER_2048); - - /* If the page count is more than 2, then both halves - * of the page are used and we need to free it. Do it - * here instead of in the alloc code. Otherwise one - * of the half-pages might be released between now and - * then, and we wouldn't know which one to use. - * Don't call get_page and free_page since those are - * both expensive atomic operations that just change - * the refcount in opposite directions. Just give the - * page to the stack; he can have our refcount. - */ - if (page_count(rx_bi->page) > 2) { - dma_unmap_page(rx_ring->dev, - rx_bi->page_dma, - PAGE_SIZE, - DMA_FROM_DEVICE); - rx_bi->page = NULL; - rx_bi->page_dma = 0; - rx_ring->rx_stats.realloc_count++; - } else { - get_page(rx_bi->page); - /* switch to the other half-page here; the - * allocation code programs the right addr - * into HW. If we haven't used this half-page, - * the address won't be changed, and HW can - * just use it next time through. - */ - rx_bi->page_offset ^= PAGE_SIZE / 2; - } - - } - I40E_RX_INCREMENT(rx_ring, i); - - if (unlikely( - !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) { - struct i40e_rx_buffer *next_buffer; - - next_buffer = &rx_ring->rx_bi[i]; - next_buffer->skb = skb; - rx_ring->rx_stats.non_eop_descs++; - continue; - } - - /* ERR_MASK will only have valid bits if EOP set */ - if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) { - dev_kfree_skb_any(skb); - continue; - } - - i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype); - - if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) { - i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status & - I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >> - I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT); - rx_ring->last_rx_timestamp = jiffies; - } - - /* probably a little skewed due to removing CRC */ - total_rx_bytes += skb->len; - total_rx_packets++; - - skb->protocol = eth_type_trans(skb, rx_ring->netdev); - - i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype); - - vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) - ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) - : 0; -#ifdef I40E_FCOE - if (unlikely( - i40e_rx_is_fcoe(rx_ptype) && - !i40e_fcoe_handle_offload(rx_ring, rx_desc, skb))) { - dev_kfree_skb_any(skb); - continue; - } -#endif - i40e_receive_skb(rx_ring, skb, vlan_tag); - - rx_desc->wb.qword1.status_error_len = 0; - - } while (likely(total_rx_packets < budget)); - - u64_stats_update_begin(&rx_ring->syncp); - rx_ring->stats.packets += total_rx_packets; - rx_ring->stats.bytes += total_rx_bytes; - u64_stats_update_end(&rx_ring->syncp); - rx_ring->q_vector->rx.total_packets += total_rx_packets; - rx_ring->q_vector->rx.total_bytes += total_rx_bytes; - - return failure ? budget : total_rx_packets; -} - /** * i40e_clean_rx_irq_1buf - Reclaim resources after receive; single buffer * @rx_ring: rx ring to clean @@ -2001,10 +1761,7 @@ int i40e_napi_poll(struct napi_struct *napi, int budget) i40e_for_each_ring(ring, q_vector->rx) { int cleaned; - if (ring_is_ps_enabled(ring)) - cleaned = i40e_clean_rx_irq_ps(ring, budget_per_ring); - else - cleaned = i40e_clean_rx_irq_1buf(ring, budget_per_ring); + cleaned = i40e_clean_rx_irq_1buf(ring, budget_per_ring); work_done += cleaned; /* if we clean as many as budgeted, we must not be done */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index 6b2b1913527d..5a2d0fef4d37 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -245,16 +245,9 @@ struct i40e_rx_queue_stats { enum i40e_ring_state_t { __I40E_TX_FDIR_INIT_DONE, __I40E_TX_XPS_INIT_DONE, - __I40E_RX_PS_ENABLED, __I40E_RX_16BYTE_DESC_ENABLED, }; -#define ring_is_ps_enabled(ring) \ - test_bit(__I40E_RX_PS_ENABLED, &(ring)->state) -#define set_ring_ps_enabled(ring) \ - set_bit(__I40E_RX_PS_ENABLED, &(ring)->state) -#define clear_ring_ps_enabled(ring) \ - clear_bit(__I40E_RX_PS_ENABLED, &(ring)->state) #define ring_is_16byte_desc_enabled(ring) \ test_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state) #define set_ring_16byte_desc_enabled(ring) \ From 04b3b779816502549e5f4bfaf5df90204ce2fe0e Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Mon, 18 Apr 2016 11:33:43 -0700 Subject: [PATCH 1370/1649] i40e/i40evf: Remove reference to ring->dtype As part of the rx-refactor, the dtype variable in the i40e_ring struct is no longer used, so remove it. Signed-off-by: Jesse Brandeburg Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_debugfs.c | 8 ++------ drivers/net/ethernet/intel/i40e/i40e_txrx.h | 1 - drivers/net/ethernet/intel/i40evf/i40e_txrx.h | 1 - 3 files changed, 2 insertions(+), 8 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index f119a747f5d9..c0a01e0eb181 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -268,10 +268,9 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) rx_ring->queue_index, rx_ring->reg_idx); dev_info(&pf->pdev->dev, - " rx_rings[%i]: rx_hdr_len = %d, rx_buf_len = %d, dtype = %d\n", + " rx_rings[%i]: rx_hdr_len = %d, rx_buf_len = %d\n", i, rx_ring->rx_hdr_len, - rx_ring->rx_buf_len, - rx_ring->dtype); + rx_ring->rx_buf_len); dev_info(&pf->pdev->dev, " rx_rings[%i]: next_to_use = %d, next_to_clean = %d, ring_active = %i\n", i, @@ -325,9 +324,6 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) i, tx_ring->state, tx_ring->queue_index, tx_ring->reg_idx); - dev_info(&pf->pdev->dev, - " tx_rings[%i]: dtype = %d\n", - i, tx_ring->dtype); dev_info(&pf->pdev->dev, " tx_rings[%i]: next_to_use = %d, next_to_clean = %d, ring_active = %i\n", i, diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index 5a2d0fef4d37..03e21d95c4f1 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -282,7 +282,6 @@ struct i40e_ring { u16 reg_idx; /* HW register index of the ring */ u16 rx_hdr_len; u16 rx_buf_len; - u8 dtype; #define I40E_RX_DTYPE_NO_SPLIT 0 #define I40E_RX_DTYPE_HEADER_SPLIT 1 #define I40E_RX_DTYPE_SPLIT_ALWAYS 2 diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h index 54b52e8f7097..3b3f9764c235 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h @@ -280,7 +280,6 @@ struct i40e_ring { u16 reg_idx; /* HW register index of the ring */ u16 rx_hdr_len; u16 rx_buf_len; - u8 dtype; #define I40E_RX_DTYPE_NO_SPLIT 0 #define I40E_RX_DTYPE_HEADER_SPLIT 1 #define I40E_RX_DTYPE_SPLIT_ALWAYS 2 From 73898db0430125606c86c798c0627aefef9af9ed Mon Sep 17 00:00:00 2001 From: Haggai Abramovsky Date: Wed, 4 May 2016 14:50:15 +0300 Subject: [PATCH 1371/1649] net/mlx4: Avoid wrong virtual mappings The dma_alloc_coherent() function returns a virtual address which can be used for coherent access to the underlying memory. On some architectures, like arm64, undefined behavior results if this memory is also accessed via virtual mappings that are not coherent. Because of their undefined nature, operations like virt_to_page() return garbage when passed virtual addresses obtained from dma_alloc_coherent(). Any subsequent mappings via vmap() of the garbage page values are unusable and result in bad things like bus errors (synchronous aborts in ARM64 speak). The mlx4 driver contains code that does the equivalent of: vmap(virt_to_page(dma_alloc_coherent)), this results in an OOPs when the device is opened. Prevent Ethernet driver to run this problematic code by forcing it to allocate contiguous memory. As for the Infiniband driver, at first we are trying to allocate contiguous memory, but in case of failure roll back to work with fragmented memory. Signed-off-by: Haggai Abramovsky Signed-off-by: Yishai Hadas Reported-by: David Daney Tested-by: Sinan Kaya Signed-off-by: David S. Miller --- drivers/infiniband/hw/mlx4/qp.c | 27 ++++-- drivers/net/ethernet/mellanox/mlx4/alloc.c | 97 ++++++++----------- drivers/net/ethernet/mellanox/mlx4/en_cq.c | 9 +- .../net/ethernet/mellanox/mlx4/en_netdev.c | 2 +- .../net/ethernet/mellanox/mlx4/en_resources.c | 31 ------ drivers/net/ethernet/mellanox/mlx4/en_rx.c | 11 +-- drivers/net/ethernet/mellanox/mlx4/en_tx.c | 14 +-- drivers/net/ethernet/mellanox/mlx4/mlx4_en.h | 2 - include/linux/mlx4/device.h | 4 +- 9 files changed, 70 insertions(+), 127 deletions(-) diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index fd97534762b8..81b0e1fbec1d 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c @@ -419,7 +419,8 @@ static int set_rq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap, } static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap, - enum mlx4_ib_qp_type type, struct mlx4_ib_qp *qp) + enum mlx4_ib_qp_type type, struct mlx4_ib_qp *qp, + bool shrink_wqe) { int s; @@ -477,7 +478,7 @@ static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap, * We set WQE size to at least 64 bytes, this way stamping * invalidates each WQE. */ - if (dev->dev->caps.fw_ver >= MLX4_FW_VER_WQE_CTRL_NEC && + if (shrink_wqe && dev->dev->caps.fw_ver >= MLX4_FW_VER_WQE_CTRL_NEC && qp->sq_signal_bits && BITS_PER_LONG == 64 && type != MLX4_IB_QPT_SMI && type != MLX4_IB_QPT_GSI && !(type & (MLX4_IB_QPT_PROXY_SMI_OWNER | MLX4_IB_QPT_PROXY_SMI | @@ -642,6 +643,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, { int qpn; int err; + struct ib_qp_cap backup_cap; struct mlx4_ib_sqp *sqp; struct mlx4_ib_qp *qp; enum mlx4_ib_qp_type qp_type = (enum mlx4_ib_qp_type) init_attr->qp_type; @@ -775,7 +777,9 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, goto err; } - err = set_kernel_sq_size(dev, &init_attr->cap, qp_type, qp); + memcpy(&backup_cap, &init_attr->cap, sizeof(backup_cap)); + err = set_kernel_sq_size(dev, &init_attr->cap, + qp_type, qp, true); if (err) goto err; @@ -787,9 +791,20 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, *qp->db.db = 0; } - if (mlx4_buf_alloc(dev->dev, qp->buf_size, PAGE_SIZE * 2, &qp->buf, gfp)) { - err = -ENOMEM; - goto err_db; + if (mlx4_buf_alloc(dev->dev, qp->buf_size, qp->buf_size, + &qp->buf, gfp)) { + memcpy(&init_attr->cap, &backup_cap, + sizeof(backup_cap)); + err = set_kernel_sq_size(dev, &init_attr->cap, qp_type, + qp, false); + if (err) + goto err_db; + + if (mlx4_buf_alloc(dev->dev, qp->buf_size, + PAGE_SIZE * 2, &qp->buf, gfp)) { + err = -ENOMEM; + goto err_db; + } } err = mlx4_mtt_init(dev->dev, qp->buf.npages, qp->buf.page_shift, diff --git a/drivers/net/ethernet/mellanox/mlx4/alloc.c b/drivers/net/ethernet/mellanox/mlx4/alloc.c index 0c51c69f802f..249a4584401a 100644 --- a/drivers/net/ethernet/mellanox/mlx4/alloc.c +++ b/drivers/net/ethernet/mellanox/mlx4/alloc.c @@ -576,41 +576,48 @@ out: return res; } -/* - * Handling for queue buffers -- we allocate a bunch of memory and - * register it in a memory region at HCA virtual address 0. If the - * requested size is > max_direct, we split the allocation into - * multiple pages, so we don't require too much contiguous memory. - */ -int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct, - struct mlx4_buf *buf, gfp_t gfp) +static int mlx4_buf_direct_alloc(struct mlx4_dev *dev, int size, + struct mlx4_buf *buf, gfp_t gfp) { dma_addr_t t; + buf->nbufs = 1; + buf->npages = 1; + buf->page_shift = get_order(size) + PAGE_SHIFT; + buf->direct.buf = + dma_zalloc_coherent(&dev->persist->pdev->dev, + size, &t, gfp); + if (!buf->direct.buf) + return -ENOMEM; + + buf->direct.map = t; + + while (t & ((1 << buf->page_shift) - 1)) { + --buf->page_shift; + buf->npages *= 2; + } + + return 0; +} + +/* Handling for queue buffers -- we allocate a bunch of memory and + * register it in a memory region at HCA virtual address 0. If the + * requested size is > max_direct, we split the allocation into + * multiple pages, so we don't require too much contiguous memory. + */ +int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct, + struct mlx4_buf *buf, gfp_t gfp) +{ if (size <= max_direct) { - buf->nbufs = 1; - buf->npages = 1; - buf->page_shift = get_order(size) + PAGE_SHIFT; - buf->direct.buf = dma_alloc_coherent(&dev->persist->pdev->dev, - size, &t, gfp); - if (!buf->direct.buf) - return -ENOMEM; - - buf->direct.map = t; - - while (t & ((1 << buf->page_shift) - 1)) { - --buf->page_shift; - buf->npages *= 2; - } - - memset(buf->direct.buf, 0, size); + return mlx4_buf_direct_alloc(dev, size, buf, gfp); } else { + dma_addr_t t; int i; - buf->direct.buf = NULL; - buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE; - buf->npages = buf->nbufs; + buf->direct.buf = NULL; + buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE; + buf->npages = buf->nbufs; buf->page_shift = PAGE_SHIFT; buf->page_list = kcalloc(buf->nbufs, sizeof(*buf->page_list), gfp); @@ -619,28 +626,12 @@ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct, for (i = 0; i < buf->nbufs; ++i) { buf->page_list[i].buf = - dma_alloc_coherent(&dev->persist->pdev->dev, - PAGE_SIZE, - &t, gfp); + dma_zalloc_coherent(&dev->persist->pdev->dev, + PAGE_SIZE, &t, gfp); if (!buf->page_list[i].buf) goto err_free; buf->page_list[i].map = t; - - memset(buf->page_list[i].buf, 0, PAGE_SIZE); - } - - if (BITS_PER_LONG == 64) { - struct page **pages; - pages = kmalloc(sizeof *pages * buf->nbufs, gfp); - if (!pages) - goto err_free; - for (i = 0; i < buf->nbufs; ++i) - pages[i] = virt_to_page(buf->page_list[i].buf); - buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL); - kfree(pages); - if (!buf->direct.buf) - goto err_free; } } @@ -655,15 +646,11 @@ EXPORT_SYMBOL_GPL(mlx4_buf_alloc); void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf) { - int i; - - if (buf->nbufs == 1) + if (buf->nbufs == 1) { dma_free_coherent(&dev->persist->pdev->dev, size, - buf->direct.buf, - buf->direct.map); - else { - if (BITS_PER_LONG == 64) - vunmap(buf->direct.buf); + buf->direct.buf, buf->direct.map); + } else { + int i; for (i = 0; i < buf->nbufs; ++i) if (buf->page_list[i].buf) @@ -789,7 +776,7 @@ void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db) EXPORT_SYMBOL_GPL(mlx4_db_free); int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres, - int size, int max_direct) + int size) { int err; @@ -799,7 +786,7 @@ int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres, *wqres->db.db = 0; - err = mlx4_buf_alloc(dev, size, max_direct, &wqres->buf, GFP_KERNEL); + err = mlx4_buf_direct_alloc(dev, size, &wqres->buf, GFP_KERNEL); if (err) goto err_db; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c index af975a2b74c6..132cea655920 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c @@ -73,22 +73,16 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv, */ set_dev_node(&mdev->dev->persist->pdev->dev, node); err = mlx4_alloc_hwq_res(mdev->dev, &cq->wqres, - cq->buf_size, 2 * PAGE_SIZE); + cq->buf_size); set_dev_node(&mdev->dev->persist->pdev->dev, mdev->dev->numa_node); if (err) goto err_cq; - err = mlx4_en_map_buffer(&cq->wqres.buf); - if (err) - goto err_res; - cq->buf = (struct mlx4_cqe *)cq->wqres.buf.direct.buf; *pcq = cq; return 0; -err_res: - mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size); err_cq: kfree(cq); *pcq = NULL; @@ -177,7 +171,6 @@ void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq) struct mlx4_en_dev *mdev = priv->mdev; struct mlx4_en_cq *cq = *pcq; - mlx4_en_unmap_buffer(&cq->wqres.buf); mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size); if (mlx4_is_eq_vector_valid(mdev->dev, priv->port, cq->vector) && cq->is_tx == RX) diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 6f28ac58251c..92e0624f4cf0 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -2928,7 +2928,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, /* Allocate page for receive rings */ err = mlx4_alloc_hwq_res(mdev->dev, &priv->res, - MLX4_EN_PAGE_SIZE, MLX4_EN_PAGE_SIZE); + MLX4_EN_PAGE_SIZE); if (err) { en_err(priv, "Failed to allocate page for rx qps\n"); goto out; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_resources.c b/drivers/net/ethernet/mellanox/mlx4/en_resources.c index 02e925d6f734..a6b0db0e0383 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_resources.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_resources.c @@ -107,37 +107,6 @@ int mlx4_en_change_mcast_lb(struct mlx4_en_priv *priv, struct mlx4_qp *qp, return ret; } -int mlx4_en_map_buffer(struct mlx4_buf *buf) -{ - struct page **pages; - int i; - - if (BITS_PER_LONG == 64 || buf->nbufs == 1) - return 0; - - pages = kmalloc(sizeof *pages * buf->nbufs, GFP_KERNEL); - if (!pages) - return -ENOMEM; - - for (i = 0; i < buf->nbufs; ++i) - pages[i] = virt_to_page(buf->page_list[i].buf); - - buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL); - kfree(pages); - if (!buf->direct.buf) - return -ENOMEM; - - return 0; -} - -void mlx4_en_unmap_buffer(struct mlx4_buf *buf) -{ - if (BITS_PER_LONG == 64 || buf->nbufs == 1) - return; - - vunmap(buf->direct.buf); -} - void mlx4_en_sqp_event(struct mlx4_qp *qp, enum mlx4_event event) { return; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index b723e3bcab39..8ef6875b6cf9 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -394,17 +394,11 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, /* Allocate HW buffers on provided NUMA node */ set_dev_node(&mdev->dev->persist->pdev->dev, node); - err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, - ring->buf_size, 2 * PAGE_SIZE); + err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size); set_dev_node(&mdev->dev->persist->pdev->dev, mdev->dev->numa_node); if (err) goto err_info; - err = mlx4_en_map_buffer(&ring->wqres.buf); - if (err) { - en_err(priv, "Failed to map RX buffer\n"); - goto err_hwq; - } ring->buf = ring->wqres.buf.direct.buf; ring->hwtstamp_rx_filter = priv->hwtstamp_config.rx_filter; @@ -412,8 +406,6 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, *pring = ring; return 0; -err_hwq: - mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size); err_info: vfree(ring->rx_info); ring->rx_info = NULL; @@ -517,7 +509,6 @@ void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv, struct mlx4_en_dev *mdev = priv->mdev; struct mlx4_en_rx_ring *ring = *pring; - mlx4_en_unmap_buffer(&ring->wqres.buf); mlx4_free_hwq_res(mdev->dev, &ring->wqres, size * stride + TXBB_SIZE); vfree(ring->rx_info); ring->rx_info = NULL; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index 0f206a95429c..f6e61570cb2c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -94,20 +94,13 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, /* Allocate HW buffers on provided NUMA node */ set_dev_node(&mdev->dev->persist->pdev->dev, node); - err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size, - 2 * PAGE_SIZE); + err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size); set_dev_node(&mdev->dev->persist->pdev->dev, mdev->dev->numa_node); if (err) { en_err(priv, "Failed allocating hwq resources\n"); goto err_bounce; } - err = mlx4_en_map_buffer(&ring->wqres.buf); - if (err) { - en_err(priv, "Failed to map TX buffer\n"); - goto err_hwq_res; - } - ring->buf = ring->wqres.buf.direct.buf; en_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d buf_size:%d dma:%llx\n", @@ -118,7 +111,7 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, MLX4_RESERVE_ETH_BF_QP); if (err) { en_err(priv, "failed reserving qp for TX ring\n"); - goto err_map; + goto err_hwq_res; } err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp, GFP_KERNEL); @@ -155,8 +148,6 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, err_reserve: mlx4_qp_release_range(mdev->dev, ring->qpn, 1); -err_map: - mlx4_en_unmap_buffer(&ring->wqres.buf); err_hwq_res: mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size); err_bounce: @@ -183,7 +174,6 @@ void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, mlx4_qp_remove(mdev->dev, &ring->qp); mlx4_qp_free(mdev->dev, &ring->qp); mlx4_qp_release_range(priv->mdev->dev, ring->qpn, 1); - mlx4_en_unmap_buffer(&ring->wqres.buf); mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size); kfree(ring->bounce_buf); ring->bounce_buf = NULL; diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index 63b1aeae2c03..cc84e09f324a 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -672,8 +672,6 @@ void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride, int is_tx, int rss, int qpn, int cqn, int user_prio, struct mlx4_qp_context *context); void mlx4_en_sqp_event(struct mlx4_qp *qp, enum mlx4_event event); -int mlx4_en_map_buffer(struct mlx4_buf *buf); -void mlx4_en_unmap_buffer(struct mlx4_buf *buf); int mlx4_en_change_mcast_lb(struct mlx4_en_priv *priv, struct mlx4_qp *qp, int loopback); void mlx4_en_calc_rx_buf(struct net_device *dev); diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index d1f904c8b2cb..80dec87a94f8 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -1058,7 +1058,7 @@ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct, void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf); static inline void *mlx4_buf_offset(struct mlx4_buf *buf, int offset) { - if (BITS_PER_LONG == 64 || buf->nbufs == 1) + if (buf->nbufs == 1) return buf->direct.buf + offset; else return buf->page_list[offset >> PAGE_SHIFT].buf + @@ -1098,7 +1098,7 @@ int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order, void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db); int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres, - int size, int max_direct); + int size); void mlx4_free_hwq_res(struct mlx4_dev *mdev, struct mlx4_hwq_resources *wqres, int size); From 1a557afc4dd59b85a5cae2be6d351eaeb31d2664 Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Wed, 20 Apr 2016 19:43:37 -0700 Subject: [PATCH 1372/1649] i40e: Refactor receive routine This is part 1 of the Rx refactor series, just including changes to i40e. This refactor aligns the receive routine with the one in ixgbe which was highly optimized. This reduces the code we have to maintain and allows for (hopefully) more readable and maintainable RX hot path. In order to do this: - consolidate the receive path into a single function that doesn't use packet split but *does* use pages for Rx buffers. - remove the old _1buf routine - consolidate several routines into helper functions - remove ethtool control over packet split Change-ID: I5ca100721de65992aa0114f8b4bac844b84758e0 Signed-off-by: Jesse Brandeburg Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e.h | 1 - .../net/ethernet/intel/i40e/i40e_debugfs.c | 9 +- .../net/ethernet/intel/i40e/i40e_ethtool.c | 1 - drivers/net/ethernet/intel/i40e/i40e_main.c | 16 +- drivers/net/ethernet/intel/i40e/i40e_txrx.c | 792 +++++++++++------- drivers/net/ethernet/intel/i40e/i40e_txrx.h | 37 +- 6 files changed, 542 insertions(+), 314 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index ea6a69a1f1d7..ebf423bd5ccb 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -531,7 +531,6 @@ struct i40e_vsi { u8 *rss_lut_user; /* User configured lookup table entries */ u16 max_frame; - u16 rx_hdr_len; u16 rx_buf_len; u8 dtype; diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index c0a01e0eb181..8ae30f7f1839 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -268,9 +268,8 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) rx_ring->queue_index, rx_ring->reg_idx); dev_info(&pf->pdev->dev, - " rx_rings[%i]: rx_hdr_len = %d, rx_buf_len = %d\n", - i, rx_ring->rx_hdr_len, - rx_ring->rx_buf_len); + " rx_rings[%i]: rx_buf_len = %d\n", + i, rx_ring->rx_buf_len); dev_info(&pf->pdev->dev, " rx_rings[%i]: next_to_use = %d, next_to_clean = %d, ring_active = %i\n", i, @@ -361,8 +360,8 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) " work_limit = %d\n", vsi->work_limit); dev_info(&pf->pdev->dev, - " max_frame = %d, rx_hdr_len = %d, rx_buf_len = %d dtype = %d\n", - vsi->max_frame, vsi->rx_hdr_len, vsi->rx_buf_len, vsi->dtype); + " max_frame = %d, rx_buf_len = %d dtype = %d\n", + vsi->max_frame, vsi->rx_buf_len, vsi->dtype); dev_info(&pf->pdev->dev, " num_q_vectors = %i, base_vector = %i\n", vsi->num_q_vectors, vsi->base_vector); diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 858e1699b87c..6fd730ac23a1 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -235,7 +235,6 @@ static const char i40e_priv_flags_strings[][ETH_GSTRING_LEN] = { "LinkPolling", "flow-director-atr", "veb-stats", - "packet-split", "hw-atr-eviction", }; diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 84e8d4e05924..e46611122179 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -2855,10 +2855,8 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring) memset(&rx_ctx, 0, sizeof(rx_ctx)); ring->rx_buf_len = vsi->rx_buf_len; - ring->rx_hdr_len = vsi->rx_hdr_len; rx_ctx.dbuff = ring->rx_buf_len >> I40E_RXQ_CTX_DBUFF_SHIFT; - rx_ctx.hbuff = ring->rx_hdr_len >> I40E_RXQ_CTX_HBUFF_SHIFT; rx_ctx.base = (ring->dma / 128); rx_ctx.qlen = ring->count; @@ -2910,7 +2908,7 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring) ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q); writel(0, ring->tail); - i40e_alloc_rx_buffers_1buf(ring, I40E_DESC_UNUSED(ring)); + i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring)); return 0; } @@ -2949,15 +2947,13 @@ static int i40e_vsi_configure_rx(struct i40e_vsi *vsi) else vsi->max_frame = I40E_RXBUFFER_2048; - vsi->rx_hdr_len = 0; - vsi->rx_buf_len = vsi->max_frame; + vsi->rx_buf_len = I40E_RXBUFFER_2048; vsi->dtype = I40E_RX_DTYPE_NO_SPLIT; #ifdef I40E_FCOE /* setup rx buffer for FCoE */ if ((vsi->type == I40E_VSI_FCOE) && (vsi->back->flags & I40E_FLAG_FCOE_ENABLED)) { - vsi->rx_hdr_len = 0; vsi->rx_buf_len = I40E_RXBUFFER_3072; vsi->max_frame = I40E_RXBUFFER_3072; vsi->dtype = I40E_RX_DTYPE_NO_SPLIT; @@ -2965,8 +2961,6 @@ static int i40e_vsi_configure_rx(struct i40e_vsi *vsi) #endif /* I40E_FCOE */ /* round up for the chip's needs */ - vsi->rx_hdr_len = ALIGN(vsi->rx_hdr_len, - BIT_ULL(I40E_RXQ_CTX_HBUFF_SHIFT)); vsi->rx_buf_len = ALIGN(vsi->rx_buf_len, BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT)); @@ -10661,11 +10655,9 @@ static void i40e_print_features(struct i40e_pf *pf) #ifdef CONFIG_PCI_IOV i += snprintf(&buf[i], REMAIN(i), " VFs: %d", pf->num_req_vfs); #endif - i += snprintf(&buf[i], REMAIN(i), " VSIs: %d QP: %d RX: %s", + i += snprintf(&buf[i], REMAIN(i), " VSIs: %d QP: %d", pf->hw.func_caps.num_vsis, - pf->vsi[pf->lan_vsi]->num_queue_pairs, - "1BUF"); - + pf->vsi[pf->lan_vsi]->num_queue_pairs); if (pf->flags & I40E_FLAG_RSS_ENABLED) i += snprintf(&buf[i], REMAIN(i), " RSS"); if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 450ecdd589db..b0edffe88492 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -1024,7 +1024,6 @@ err: void i40e_clean_rx_ring(struct i40e_ring *rx_ring) { struct device *dev = rx_ring->dev; - struct i40e_rx_buffer *rx_bi; unsigned long bi_size; u16 i; @@ -1034,30 +1033,20 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring) /* Free all the Rx ring sk_buffs */ for (i = 0; i < rx_ring->count; i++) { - rx_bi = &rx_ring->rx_bi[i]; - if (rx_bi->dma) { - dma_unmap_single(dev, - rx_bi->dma, - rx_ring->rx_buf_len, - DMA_FROM_DEVICE); - rx_bi->dma = 0; - } + struct i40e_rx_buffer *rx_bi = &rx_ring->rx_bi[i]; + if (rx_bi->skb) { dev_kfree_skb(rx_bi->skb); rx_bi->skb = NULL; } - if (rx_bi->page) { - if (rx_bi->page_dma) { - dma_unmap_page(dev, - rx_bi->page_dma, - PAGE_SIZE, - DMA_FROM_DEVICE); - rx_bi->page_dma = 0; - } - __free_page(rx_bi->page); - rx_bi->page = NULL; - rx_bi->page_offset = 0; - } + if (!rx_bi->page) + continue; + + dma_unmap_page(dev, rx_bi->dma, PAGE_SIZE, DMA_FROM_DEVICE); + __free_pages(rx_bi->page, 0); + + rx_bi->page = NULL; + rx_bi->page_offset = 0; } bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count; @@ -1066,6 +1055,7 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring) /* Zero out the descriptor ring */ memset(rx_ring->desc, 0, rx_ring->size); + rx_ring->next_to_alloc = 0; rx_ring->next_to_clean = 0; rx_ring->next_to_use = 0; } @@ -1089,37 +1079,6 @@ void i40e_free_rx_resources(struct i40e_ring *rx_ring) } } -/** - * i40e_alloc_rx_headers - allocate rx header buffers - * @rx_ring: ring to alloc buffers - * - * Allocate rx header buffers for the entire ring. As these are static, - * this is only called when setting up a new ring. - **/ -void i40e_alloc_rx_headers(struct i40e_ring *rx_ring) -{ - struct device *dev = rx_ring->dev; - struct i40e_rx_buffer *rx_bi; - dma_addr_t dma; - void *buffer; - int buf_size; - int i; - - if (rx_ring->rx_bi[0].hdr_buf) - return; - /* Make sure the buffers don't cross cache line boundaries. */ - buf_size = ALIGN(rx_ring->rx_hdr_len, 256); - buffer = dma_alloc_coherent(dev, buf_size * rx_ring->count, - &dma, GFP_KERNEL); - if (!buffer) - return; - for (i = 0; i < rx_ring->count; i++) { - rx_bi = &rx_ring->rx_bi[i]; - rx_bi->dma = dma + (i * buf_size); - rx_bi->hdr_buf = buffer + (i * buf_size); - } -} - /** * i40e_setup_rx_descriptors - Allocate Rx descriptors * @rx_ring: Rx descriptor ring (for a specific queue) to setup @@ -1141,9 +1100,7 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring) u64_stats_init(&rx_ring->syncp); /* Round up to nearest 4K */ - rx_ring->size = ring_is_16byte_desc_enabled(rx_ring) - ? rx_ring->count * sizeof(union i40e_16byte_rx_desc) - : rx_ring->count * sizeof(union i40e_32byte_rx_desc); + rx_ring->size = rx_ring->count * sizeof(union i40e_32byte_rx_desc); rx_ring->size = ALIGN(rx_ring->size, 4096); rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, &rx_ring->dma, GFP_KERNEL); @@ -1154,6 +1111,7 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring) goto err; } + rx_ring->next_to_alloc = 0; rx_ring->next_to_clean = 0; rx_ring->next_to_use = 0; @@ -1172,6 +1130,10 @@ err: static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val) { rx_ring->next_to_use = val; + + /* update next to alloc since we have filled the ring */ + rx_ring->next_to_alloc = val; + /* Force memory writes to complete before letting h/w * know there are new descriptors to fetch. (Only * applicable for weak-ordered memory model archs, @@ -1182,164 +1144,48 @@ static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val) } /** - * i40e_alloc_rx_buffers_ps - Replace used receive buffers; packet split - * @rx_ring: ring to place buffers on - * @cleaned_count: number of buffers to replace + * i40e_alloc_mapped_page - recycle or make a new page + * @rx_ring: ring to use + * @bi: rx_buffer struct to modify * - * Returns true if any errors on allocation + * Returns true if the page was successfully allocated or + * reused. **/ -bool i40e_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count) +static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring, + struct i40e_rx_buffer *bi) { - u16 i = rx_ring->next_to_use; - union i40e_rx_desc *rx_desc; - struct i40e_rx_buffer *bi; - const int current_node = numa_node_id(); + struct page *page = bi->page; + dma_addr_t dma; - /* do nothing if no valid netdev defined */ - if (!rx_ring->netdev || !cleaned_count) - return false; - - while (cleaned_count--) { - rx_desc = I40E_RX_DESC(rx_ring, i); - bi = &rx_ring->rx_bi[i]; - - if (bi->skb) /* desc is in use */ - goto no_buffers; - - /* If we've been moved to a different NUMA node, release the - * page so we can get a new one on the current node. - */ - if (bi->page && page_to_nid(bi->page) != current_node) { - dma_unmap_page(rx_ring->dev, - bi->page_dma, - PAGE_SIZE, - DMA_FROM_DEVICE); - __free_page(bi->page); - bi->page = NULL; - bi->page_dma = 0; - rx_ring->rx_stats.realloc_count++; - } else if (bi->page) { - rx_ring->rx_stats.page_reuse_count++; - } - - if (!bi->page) { - bi->page = alloc_page(GFP_ATOMIC); - if (!bi->page) { - rx_ring->rx_stats.alloc_page_failed++; - goto no_buffers; - } - bi->page_dma = dma_map_page(rx_ring->dev, - bi->page, - 0, - PAGE_SIZE, - DMA_FROM_DEVICE); - if (dma_mapping_error(rx_ring->dev, bi->page_dma)) { - rx_ring->rx_stats.alloc_page_failed++; - __free_page(bi->page); - bi->page = NULL; - bi->page_dma = 0; - bi->page_offset = 0; - goto no_buffers; - } - bi->page_offset = 0; - } - - /* Refresh the desc even if buffer_addrs didn't change - * because each write-back erases this info. - */ - rx_desc->read.pkt_addr = - cpu_to_le64(bi->page_dma + bi->page_offset); - rx_desc->read.hdr_addr = cpu_to_le64(bi->dma); - i++; - if (i == rx_ring->count) - i = 0; + /* since we are recycling buffers we should seldom need to alloc */ + if (likely(page)) { + rx_ring->rx_stats.page_reuse_count++; + return true; } - if (rx_ring->next_to_use != i) - i40e_release_rx_desc(rx_ring, i); - - return false; - -no_buffers: - if (rx_ring->next_to_use != i) - i40e_release_rx_desc(rx_ring, i); - - /* make sure to come back via polling to try again after - * allocation failure - */ - return true; -} - -/** - * i40e_alloc_rx_buffers_1buf - Replace used receive buffers; single buffer - * @rx_ring: ring to place buffers on - * @cleaned_count: number of buffers to replace - * - * Returns true if any errors on allocation - **/ -bool i40e_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count) -{ - u16 i = rx_ring->next_to_use; - union i40e_rx_desc *rx_desc; - struct i40e_rx_buffer *bi; - struct sk_buff *skb; - - /* do nothing if no valid netdev defined */ - if (!rx_ring->netdev || !cleaned_count) + /* alloc new page for storage */ + page = dev_alloc_page(); + if (unlikely(!page)) { + rx_ring->rx_stats.alloc_page_failed++; return false; - - while (cleaned_count--) { - rx_desc = I40E_RX_DESC(rx_ring, i); - bi = &rx_ring->rx_bi[i]; - skb = bi->skb; - - if (!skb) { - skb = __netdev_alloc_skb_ip_align(rx_ring->netdev, - rx_ring->rx_buf_len, - GFP_ATOMIC | - __GFP_NOWARN); - if (!skb) { - rx_ring->rx_stats.alloc_buff_failed++; - goto no_buffers; - } - /* initialize queue mapping */ - skb_record_rx_queue(skb, rx_ring->queue_index); - bi->skb = skb; - } - - if (!bi->dma) { - bi->dma = dma_map_single(rx_ring->dev, - skb->data, - rx_ring->rx_buf_len, - DMA_FROM_DEVICE); - if (dma_mapping_error(rx_ring->dev, bi->dma)) { - rx_ring->rx_stats.alloc_buff_failed++; - bi->dma = 0; - dev_kfree_skb(bi->skb); - bi->skb = NULL; - goto no_buffers; - } - } - - rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); - rx_desc->read.hdr_addr = 0; - i++; - if (i == rx_ring->count) - i = 0; } - if (rx_ring->next_to_use != i) - i40e_release_rx_desc(rx_ring, i); + /* map page for use */ + dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); - return false; - -no_buffers: - if (rx_ring->next_to_use != i) - i40e_release_rx_desc(rx_ring, i); - - /* make sure to come back via polling to try again after - * allocation failure + /* if mapping failed free memory back to system since + * there isn't much point in holding memory we can't use */ + if (dma_mapping_error(rx_ring->dev, dma)) { + __free_pages(page, 0); + rx_ring->rx_stats.alloc_page_failed++; + return false; + } + + bi->dma = dma; + bi->page = page; + bi->page_offset = 0; + return true; } @@ -1361,25 +1207,96 @@ static void i40e_receive_skb(struct i40e_ring *rx_ring, napi_gro_receive(&q_vector->napi, skb); } +/** + * i40e_alloc_rx_buffers - Replace used receive buffers + * @rx_ring: ring to place buffers on + * @cleaned_count: number of buffers to replace + * + * Returns false if all allocations were successful, true if any fail + **/ +bool i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count) +{ + u16 ntu = rx_ring->next_to_use; + union i40e_rx_desc *rx_desc; + struct i40e_rx_buffer *bi; + + /* do nothing if no valid netdev defined */ + if (!rx_ring->netdev || !cleaned_count) + return false; + + rx_desc = I40E_RX_DESC(rx_ring, ntu); + bi = &rx_ring->rx_bi[ntu]; + + do { + if (!i40e_alloc_mapped_page(rx_ring, bi)) + goto no_buffers; + + /* Refresh the desc even if buffer_addrs didn't change + * because each write-back erases this info. + */ + rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); + rx_desc->read.hdr_addr = 0; + + rx_desc++; + bi++; + ntu++; + if (unlikely(ntu == rx_ring->count)) { + rx_desc = I40E_RX_DESC(rx_ring, 0); + bi = rx_ring->rx_bi; + ntu = 0; + } + + /* clear the status bits for the next_to_use descriptor */ + rx_desc->wb.qword1.status_error_len = 0; + + cleaned_count--; + } while (cleaned_count); + + if (rx_ring->next_to_use != ntu) + i40e_release_rx_desc(rx_ring, ntu); + + return false; + +no_buffers: + if (rx_ring->next_to_use != ntu) + i40e_release_rx_desc(rx_ring, ntu); + + /* make sure to come back via polling to try again after + * allocation failure + */ + return true; +} + /** * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum * @vsi: the VSI we care about * @skb: skb currently being received and modified - * @rx_status: status value of last descriptor in packet - * @rx_error: error value of last descriptor in packet - * @rx_ptype: ptype value of last descriptor in packet + * @rx_desc: the receive descriptor + * + * skb->protocol must be set before this function is called **/ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, struct sk_buff *skb, - u32 rx_status, - u32 rx_error, - u16 rx_ptype) + union i40e_rx_desc *rx_desc) { - struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(rx_ptype); + struct i40e_rx_ptype_decoded decoded; bool ipv4, ipv6, tunnel = false; + u32 rx_error, rx_status; + u8 ptype; + u64 qword; + + qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); + ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT; + rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >> + I40E_RXD_QW1_ERROR_SHIFT; + rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> + I40E_RXD_QW1_STATUS_SHIFT; + decoded = decode_rx_desc_ptype(ptype); skb->ip_summed = CHECKSUM_NONE; + skb_checksum_none_assert(skb); + /* Rx csum enabled and ip headers found? */ if (!(vsi->netdev->features & NETIF_F_RXCSUM)) return; @@ -1445,7 +1362,7 @@ checksum_fail: * * Returns a hash type to be used by skb_set_hash **/ -static inline enum pkt_hash_types i40e_ptype_to_htype(u8 ptype) +static inline int i40e_ptype_to_htype(u8 ptype) { struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype); @@ -1473,7 +1390,7 @@ static inline void i40e_rx_hash(struct i40e_ring *ring, u8 rx_ptype) { u32 hash; - const __le64 rss_mask = + const __le64 rss_mask = cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH << I40E_RX_DESC_STATUS_FLTSTAT_SHIFT); @@ -1487,114 +1404,419 @@ static inline void i40e_rx_hash(struct i40e_ring *ring, } /** - * i40e_clean_rx_irq_1buf - Reclaim resources after receive; single buffer - * @rx_ring: rx ring to clean - * @budget: how many cleans we're allowed + * i40e_process_skb_fields - Populate skb header fields from Rx descriptor + * @rx_ring: rx descriptor ring packet is being transacted on + * @rx_desc: pointer to the EOP Rx descriptor + * @skb: pointer to current skb being populated + * @rx_ptype: the packet type decoded by hardware * - * Returns number of packets cleaned + * This function checks the ring, descriptor, and packet information in + * order to populate the hash, checksum, VLAN, protocol, and + * other fields within the skb. **/ -static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) +static inline +void i40e_process_skb_fields(struct i40e_ring *rx_ring, + union i40e_rx_desc *rx_desc, struct sk_buff *skb, + u8 rx_ptype) +{ + u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); + u32 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> + I40E_RXD_QW1_STATUS_SHIFT; + u32 rsyn = (rx_status & I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >> + I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT; + + if (unlikely(rsyn)) { + i40e_ptp_rx_hwtstamp(rx_ring->vsi->back, skb, rsyn); + rx_ring->last_rx_timestamp = jiffies; + } + + i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype); + + /* modifies the skb - consumes the enet header */ + skb->protocol = eth_type_trans(skb, rx_ring->netdev); + + i40e_rx_checksum(rx_ring->vsi, skb, rx_desc); + + skb_record_rx_queue(skb, rx_ring->queue_index); +} + +/** + * i40e_pull_tail - i40e specific version of skb_pull_tail + * @rx_ring: rx descriptor ring packet is being transacted on + * @skb: pointer to current skb being adjusted + * + * This function is an i40e specific version of __pskb_pull_tail. The + * main difference between this version and the original function is that + * this function can make several assumptions about the state of things + * that allow for significant optimizations versus the standard function. + * As a result we can do things like drop a frag and maintain an accurate + * truesize for the skb. + */ +static void i40e_pull_tail(struct i40e_ring *rx_ring, struct sk_buff *skb) +{ + struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; + unsigned char *va; + unsigned int pull_len; + + /* it is valid to use page_address instead of kmap since we are + * working with pages allocated out of the lomem pool per + * alloc_page(GFP_ATOMIC) + */ + va = skb_frag_address(frag); + + /* we need the header to contain the greater of either ETH_HLEN or + * 60 bytes if the skb->len is less than 60 for skb_pad. + */ + pull_len = eth_get_headlen(va, I40E_RX_HDR_SIZE); + + /* align pull length to size of long to optimize memcpy performance */ + skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); + + /* update all of the pointers */ + skb_frag_size_sub(frag, pull_len); + frag->page_offset += pull_len; + skb->data_len -= pull_len; + skb->tail += pull_len; +} + +/** + * i40e_cleanup_headers - Correct empty headers + * @rx_ring: rx descriptor ring packet is being transacted on + * @skb: pointer to current skb being fixed + * + * Also address the case where we are pulling data in on pages only + * and as such no data is present in the skb header. + * + * In addition if skb is not at least 60 bytes we need to pad it so that + * it is large enough to qualify as a valid Ethernet frame. + * + * Returns true if an error was encountered and skb was freed. + **/ +static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb) +{ + /* place header in linear portion of buffer */ + if (skb_is_nonlinear(skb)) + i40e_pull_tail(rx_ring, skb); + + /* if eth_skb_pad returns an error the skb was freed */ + if (eth_skb_pad(skb)) + return true; + + return false; +} + +/** + * i40e_reuse_rx_page - page flip buffer and store it back on the ring + * @rx_ring: rx descriptor ring to store buffers on + * @old_buff: donor buffer to have page reused + * + * Synchronizes page for reuse by the adapter + **/ +static void i40e_reuse_rx_page(struct i40e_ring *rx_ring, + struct i40e_rx_buffer *old_buff) +{ + struct i40e_rx_buffer *new_buff; + u16 nta = rx_ring->next_to_alloc; + + new_buff = &rx_ring->rx_bi[nta]; + + /* update, and store next to alloc */ + nta++; + rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; + + /* transfer page from old buffer to new buffer */ + *new_buff = *old_buff; +} + +/** + * i40e_page_is_reserved - check if reuse is possible + * @page: page struct to check + */ +static inline bool i40e_page_is_reserved(struct page *page) +{ + return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); +} + +/** + * i40e_add_rx_frag - Add contents of Rx buffer to sk_buff + * @rx_ring: rx descriptor ring to transact packets on + * @rx_buffer: buffer containing page to add + * @rx_desc: descriptor containing length of buffer written by hardware + * @skb: sk_buff to place the data into + * + * This function will add the data contained in rx_buffer->page to the skb. + * This is done either through a direct copy if the data in the buffer is + * less than the skb header size, otherwise it will just attach the page as + * a frag to the skb. + * + * The function will then update the page offset if necessary and return + * true if the buffer can be reused by the adapter. + **/ +static bool i40e_add_rx_frag(struct i40e_ring *rx_ring, + struct i40e_rx_buffer *rx_buffer, + union i40e_rx_desc *rx_desc, + struct sk_buff *skb) +{ + struct page *page = rx_buffer->page; + u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); + unsigned int size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> + I40E_RXD_QW1_LENGTH_PBUF_SHIFT; +#if (PAGE_SIZE < 8192) + unsigned int truesize = I40E_RXBUFFER_2048; +#else + unsigned int truesize = ALIGN(size, L1_CACHE_BYTES); + unsigned int last_offset = PAGE_SIZE - I40E_RXBUFFER_2048; +#endif + + /* will the data fit in the skb we allocated? if so, just + * copy it as it is pretty small anyway + */ + if ((size <= I40E_RX_HDR_SIZE) && !skb_is_nonlinear(skb)) { + unsigned char *va = page_address(page) + rx_buffer->page_offset; + + memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); + + /* page is not reserved, we can reuse buffer as-is */ + if (likely(!i40e_page_is_reserved(page))) + return true; + + /* this page cannot be reused so discard it */ + __free_pages(page, 0); + return false; + } + + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, + rx_buffer->page_offset, size, truesize); + + /* avoid re-using remote pages */ + if (unlikely(i40e_page_is_reserved(page))) + return false; + +#if (PAGE_SIZE < 8192) + /* if we are only owner of page we can reuse it */ + if (unlikely(page_count(page) != 1)) + return false; + + /* flip page offset to other buffer */ + rx_buffer->page_offset ^= truesize; +#else + /* move offset up to the next cache line */ + rx_buffer->page_offset += truesize; + + if (rx_buffer->page_offset > last_offset) + return false; +#endif + + /* Even if we own the page, we are not allowed to use atomic_set() + * This would break get_page_unless_zero() users. + */ + get_page(rx_buffer->page); + + return true; +} + +/** + * i40e_fetch_rx_buffer - Allocate skb and populate it + * @rx_ring: rx descriptor ring to transact packets on + * @rx_desc: descriptor containing info written by hardware + * + * This function allocates an skb on the fly, and populates it with the page + * data from the current receive descriptor, taking care to set up the skb + * correctly, as well as handling calling the page recycle function if + * necessary. + */ +static inline +struct sk_buff *i40e_fetch_rx_buffer(struct i40e_ring *rx_ring, + union i40e_rx_desc *rx_desc) +{ + struct i40e_rx_buffer *rx_buffer; + struct sk_buff *skb; + struct page *page; + + rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean]; + page = rx_buffer->page; + prefetchw(page); + + skb = rx_buffer->skb; + + if (likely(!skb)) { + void *page_addr = page_address(page) + rx_buffer->page_offset; + + /* prefetch first cache line of first page */ + prefetch(page_addr); +#if L1_CACHE_BYTES < 128 + prefetch(page_addr + L1_CACHE_BYTES); +#endif + + /* allocate a skb to store the frags */ + skb = __napi_alloc_skb(&rx_ring->q_vector->napi, + I40E_RX_HDR_SIZE, + GFP_ATOMIC | __GFP_NOWARN); + if (unlikely(!skb)) { + rx_ring->rx_stats.alloc_buff_failed++; + return NULL; + } + + /* we will be copying header into skb->data in + * pskb_may_pull so it is in our interest to prefetch + * it now to avoid a possible cache miss + */ + prefetchw(skb->data); + } else { + rx_buffer->skb = NULL; + } + + /* we are reusing so sync this buffer for CPU use */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_buffer->dma, + rx_buffer->page_offset, + I40E_RXBUFFER_2048, + DMA_FROM_DEVICE); + + /* pull page into skb */ + if (i40e_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) { + /* hand second half of page back to the ring */ + i40e_reuse_rx_page(rx_ring, rx_buffer); + rx_ring->rx_stats.page_reuse_count++; + } else { + /* we are not reusing the buffer so unmap it */ + dma_unmap_page(rx_ring->dev, rx_buffer->dma, PAGE_SIZE, + DMA_FROM_DEVICE); + } + + /* clear contents of buffer_info */ + rx_buffer->page = NULL; + + return skb; +} + +/** + * i40e_is_non_eop - process handling of non-EOP buffers + * @rx_ring: Rx ring being processed + * @rx_desc: Rx descriptor for current buffer + * @skb: Current socket buffer containing buffer in progress + * + * This function updates next to clean. If the buffer is an EOP buffer + * this function exits returning false, otherwise it will place the + * sk_buff in the next buffer to be chained and return true indicating + * that this is in fact a non-EOP buffer. + **/ +static bool i40e_is_non_eop(struct i40e_ring *rx_ring, + union i40e_rx_desc *rx_desc, + struct sk_buff *skb) +{ + u32 ntc = rx_ring->next_to_clean + 1; + + /* fetch, update, and store next to clean */ + ntc = (ntc < rx_ring->count) ? ntc : 0; + rx_ring->next_to_clean = ntc; + + prefetch(I40E_RX_DESC(rx_ring, ntc)); + +#define staterrlen rx_desc->wb.qword1.status_error_len + if (unlikely(i40e_rx_is_programming_status(le64_to_cpu(staterrlen)))) { + i40e_clean_programming_status(rx_ring, rx_desc); + rx_ring->rx_bi[ntc].skb = skb; + return true; + } + /* if we are the last buffer then there is nothing else to do */ +#define I40E_RXD_EOF BIT(I40E_RX_DESC_STATUS_EOF_SHIFT) + if (likely(i40e_test_staterr(rx_desc, I40E_RXD_EOF))) + return false; + + /* place skb in next buffer to be received */ + rx_ring->rx_bi[ntc].skb = skb; + rx_ring->rx_stats.non_eop_descs++; + + return true; +} + +/** + * i40e_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf + * @rx_ring: rx descriptor ring to transact packets on + * @budget: Total limit on number of packets to process + * + * This function provides a "bounce buffer" approach to Rx interrupt + * processing. The advantage to this is that on systems that have + * expensive overhead for IOMMU access this provides a means of avoiding + * it by maintaining the mapping of the page to the system. + * + * Returns amount of work completed + **/ +static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) { unsigned int total_rx_bytes = 0, total_rx_packets = 0; u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); - struct i40e_vsi *vsi = rx_ring->vsi; - union i40e_rx_desc *rx_desc; - u32 rx_error, rx_status; - u16 rx_packet_len; bool failure = false; - u8 rx_ptype; - u64 qword; - u16 i; - do { - struct i40e_rx_buffer *rx_bi; + while (likely(total_rx_packets < budget)) { + union i40e_rx_desc *rx_desc; struct sk_buff *skb; + u32 rx_status; u16 vlan_tag; + u8 rx_ptype; + u64 qword; + /* return some buffers to hardware, one at a time is too slow */ if (cleaned_count >= I40E_RX_BUFFER_WRITE) { failure = failure || - i40e_alloc_rx_buffers_1buf(rx_ring, - cleaned_count); + i40e_alloc_rx_buffers(rx_ring, cleaned_count); cleaned_count = 0; } - i = rx_ring->next_to_clean; - rx_desc = I40E_RX_DESC(rx_ring, i); + rx_desc = I40E_RX_DESC(rx_ring, rx_ring->next_to_clean); + qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); + rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> + I40E_RXD_QW1_PTYPE_SHIFT; rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> - I40E_RXD_QW1_STATUS_SHIFT; + I40E_RXD_QW1_STATUS_SHIFT; if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT))) break; + /* status_error_len will always be zero for unused descriptors + * because it's cleared in cleanup, and overlaps with hdr_addr + * which is always zero because packet split isn't used, if the + * hardware wrote DD then it will be non-zero + */ + if (!rx_desc->wb.qword1.status_error_len) + break; + /* This memory barrier is needed to keep us from reading * any other fields out of the rx_desc until we know the * DD bit is set. */ dma_rmb(); - if (i40e_rx_is_programming_status(qword)) { - i40e_clean_programming_status(rx_ring, rx_desc); - I40E_RX_INCREMENT(rx_ring, i); - continue; - } - rx_bi = &rx_ring->rx_bi[i]; - skb = rx_bi->skb; - prefetch(skb->data); + skb = i40e_fetch_rx_buffer(rx_ring, rx_desc); + if (!skb) + break; - rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> - I40E_RXD_QW1_LENGTH_PBUF_SHIFT; - - rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >> - I40E_RXD_QW1_ERROR_SHIFT; - rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT); - - rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> - I40E_RXD_QW1_PTYPE_SHIFT; - rx_bi->skb = NULL; cleaned_count++; - /* Get the header and possibly the whole packet - * If this is an skb from previous receive dma will be 0 - */ - skb_put(skb, rx_packet_len); - dma_unmap_single(rx_ring->dev, rx_bi->dma, rx_ring->rx_buf_len, - DMA_FROM_DEVICE); - rx_bi->dma = 0; - - I40E_RX_INCREMENT(rx_ring, i); - - if (unlikely( - !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) { - rx_ring->rx_stats.non_eop_descs++; + if (i40e_is_non_eop(rx_ring, rx_desc, skb)) continue; - } - /* ERR_MASK will only have valid bits if EOP set */ - if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) { + /* ERR_MASK will only have valid bits if EOP set, and + * what we are doing here is actually checking + * I40E_RX_DESC_ERROR_RXE_SHIFT, since it is the zeroth bit in + * the error field + */ + if (unlikely(i40e_test_staterr(rx_desc, BIT(I40E_RXD_QW1_ERROR_SHIFT)))) { dev_kfree_skb_any(skb); continue; } - i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype); - if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) { - i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status & - I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >> - I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT); - rx_ring->last_rx_timestamp = jiffies; - } + if (i40e_cleanup_headers(rx_ring, skb)) + continue; /* probably a little skewed due to removing CRC */ total_rx_bytes += skb->len; - total_rx_packets++; - skb->protocol = eth_type_trans(skb, rx_ring->netdev); + /* populate checksum, VLAN, and protocol */ + i40e_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype); - i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype); - - vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) - ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) - : 0; #ifdef I40E_FCOE if (unlikely( i40e_rx_is_fcoe(rx_ptype) && @@ -1603,10 +1825,15 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) continue; } #endif + + vlan_tag = (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) ? + le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0; + i40e_receive_skb(rx_ring, skb, vlan_tag); - rx_desc->wb.qword1.status_error_len = 0; - } while (likely(total_rx_packets < budget)); + /* update budget accounting */ + total_rx_packets++; + } u64_stats_update_begin(&rx_ring->syncp); rx_ring->stats.packets += total_rx_packets; @@ -1615,6 +1842,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) rx_ring->q_vector->rx.total_packets += total_rx_packets; rx_ring->q_vector->rx.total_bytes += total_rx_bytes; + /* guarantee a trip back through this routine if there was a failure */ return failure ? budget : total_rx_packets; } @@ -1759,9 +1987,7 @@ int i40e_napi_poll(struct napi_struct *napi, int budget) budget_per_ring = max(budget/q_vector->num_ringpairs, 1); i40e_for_each_ring(ring, q_vector->rx) { - int cleaned; - - cleaned = i40e_clean_rx_irq_1buf(ring, budget_per_ring); + int cleaned = i40e_clean_rx_irq(ring, budget_per_ring); work_done += cleaned; /* if we clean as many as budgeted, we must not be done */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index 03e21d95c4f1..37643e6b3afd 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -102,8 +102,8 @@ enum i40e_dyn_idx_t { (((pf)->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) ? \ I40E_DEFAULT_RSS_HENA_EXPANDED : I40E_DEFAULT_RSS_HENA) -/* Supported Rx Buffer Sizes */ -#define I40E_RXBUFFER_512 512 /* Used for packet split */ +/* Supported Rx Buffer Sizes (a multiple of 128) */ +#define I40E_RXBUFFER_256 256 #define I40E_RXBUFFER_2048 2048 #define I40E_RXBUFFER_3072 3072 /* For FCoE MTU of 2158 */ #define I40E_RXBUFFER_4096 4096 @@ -114,9 +114,28 @@ enum i40e_dyn_idx_t { * reserve 2 more, and skb_shared_info adds an additional 384 bytes more, * this adds up to 512 bytes of extra data meaning the smallest allocation * we could have is 1K. - * i.e. RXBUFFER_512 --> size-1024 slab + * i.e. RXBUFFER_256 --> 960 byte skb (size-1024 slab) + * i.e. RXBUFFER_512 --> 1216 byte skb (size-2048 slab) */ -#define I40E_RX_HDR_SIZE I40E_RXBUFFER_512 +#define I40E_RX_HDR_SIZE I40E_RXBUFFER_256 +#define i40e_rx_desc i40e_32byte_rx_desc + +/** + * i40e_test_staterr - tests bits in Rx descriptor status and error fields + * @rx_desc: pointer to receive descriptor (in le64 format) + * @stat_err_bits: value to mask + * + * This function does some fast chicanery in order to return the + * value of the mask which is really only used for boolean tests. + * The status_error_len doesn't need to be shifted because it begins + * at offset zero. + */ +static inline bool i40e_test_staterr(union i40e_rx_desc *rx_desc, + const u64 stat_err_bits) +{ + return !!(rx_desc->wb.qword1.status_error_len & + cpu_to_le64(stat_err_bits)); +} /* How many Rx Buffers do we bundle into one write to the hardware ? */ #define I40E_RX_BUFFER_WRITE 16 /* Must be power of 2 */ @@ -142,8 +161,6 @@ enum i40e_dyn_idx_t { prefetch((n)); \ } while (0) -#define i40e_rx_desc i40e_32byte_rx_desc - #define I40E_MAX_BUFFER_TXD 8 #define I40E_MIN_TX_LEN 17 @@ -213,10 +230,8 @@ struct i40e_tx_buffer { struct i40e_rx_buffer { struct sk_buff *skb; - void *hdr_buf; dma_addr_t dma; struct page *page; - dma_addr_t page_dma; unsigned int page_offset; }; @@ -280,7 +295,6 @@ struct i40e_ring { u16 count; /* Number of descriptors */ u16 reg_idx; /* HW register index of the ring */ - u16 rx_hdr_len; u16 rx_buf_len; #define I40E_RX_DTYPE_NO_SPLIT 0 #define I40E_RX_DTYPE_HEADER_SPLIT 1 @@ -322,6 +336,7 @@ struct i40e_ring { struct i40e_q_vector *q_vector; /* Backreference to associated vector */ struct rcu_head rcu; /* to avoid race on free */ + u16 next_to_alloc; } ____cacheline_internodealigned_in_smp; enum i40e_latency_range { @@ -345,9 +360,7 @@ struct i40e_ring_container { #define i40e_for_each_ring(pos, head) \ for (pos = (head).ring; pos != NULL; pos = pos->next) -bool i40e_alloc_rx_buffers_ps(struct i40e_ring *rxr, u16 cleaned_count); -bool i40e_alloc_rx_buffers_1buf(struct i40e_ring *rxr, u16 cleaned_count); -void i40e_alloc_rx_headers(struct i40e_ring *rxr); +bool i40e_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count); netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev); void i40e_clean_tx_ring(struct i40e_ring *tx_ring); void i40e_clean_rx_ring(struct i40e_ring *rx_ring); From 19b85e677df44b954c23a47395edf5a6c379771b Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Mon, 18 Apr 2016 11:33:45 -0700 Subject: [PATCH 1373/1649] i40evf: Drop packet split receive routine As part of preparation for the rx-refactor, remove the packet split receive routine and ancillary code. Some of the split related context set up code stays in i40e_virtchnl_pf.c in case an older VF driver tries to load and still wants to use packet split. Signed-off-by: Jesse Brandeburg Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- .../ethernet/intel/i40e/i40e_virtchnl_pf.c | 2 +- drivers/net/ethernet/intel/i40evf/i40e_txrx.c | 21 +------------- drivers/net/ethernet/intel/i40evf/i40e_txrx.h | 7 ----- drivers/net/ethernet/intel/i40evf/i40evf.h | 2 -- .../ethernet/intel/i40evf/i40evf_ethtool.c | 14 ---------- .../net/ethernet/intel/i40evf/i40evf_main.c | 28 +------------------ .../ethernet/intel/i40evf/i40evf_virtchnl.c | 4 --- 7 files changed, 3 insertions(+), 75 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 6b9db7983693..36aa33af45c0 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -590,7 +590,7 @@ static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id, } rx_ctx.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT; - /* set splitalways mode 10b */ + /* set split mode 10b */ rx_ctx.dtype = I40E_RX_DTYPE_HEADER_SPLIT; } diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index a37a3f34ed4f..61d4a7a8e0be 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -504,22 +504,6 @@ void i40evf_clean_rx_ring(struct i40e_ring *rx_ring) if (!rx_ring->rx_bi) return; - if (ring_is_ps_enabled(rx_ring)) { - int bufsz = ALIGN(rx_ring->rx_hdr_len, 256) * rx_ring->count; - - rx_bi = &rx_ring->rx_bi[0]; - if (rx_bi->hdr_buf) { - dma_free_coherent(dev, - bufsz, - rx_bi->hdr_buf, - rx_bi->dma); - for (i = 0; i < rx_ring->count; i++) { - rx_bi = &rx_ring->rx_bi[i]; - rx_bi->dma = 0; - rx_bi->hdr_buf = NULL; - } - } - } /* Free all the Rx ring sk_buffs */ for (i = 0; i < rx_ring->count; i++) { rx_bi = &rx_ring->rx_bi[i]; @@ -1435,10 +1419,7 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget) i40e_for_each_ring(ring, q_vector->rx) { int cleaned; - if (ring_is_ps_enabled(ring)) - cleaned = i40e_clean_rx_irq_ps(ring, budget_per_ring); - else - cleaned = i40e_clean_rx_irq_1buf(ring, budget_per_ring); + cleaned = i40e_clean_rx_irq_1buf(ring, budget_per_ring); work_done += cleaned; /* if we clean as many as budgeted, we must not be done */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h index 3b3f9764c235..f24a97edbd4c 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h @@ -244,16 +244,9 @@ struct i40e_rx_queue_stats { enum i40e_ring_state_t { __I40E_TX_FDIR_INIT_DONE, __I40E_TX_XPS_INIT_DONE, - __I40E_RX_PS_ENABLED, __I40E_RX_16BYTE_DESC_ENABLED, }; -#define ring_is_ps_enabled(ring) \ - test_bit(__I40E_RX_PS_ENABLED, &(ring)->state) -#define set_ring_ps_enabled(ring) \ - set_bit(__I40E_RX_PS_ENABLED, &(ring)->state) -#define clear_ring_ps_enabled(ring) \ - clear_bit(__I40E_RX_PS_ENABLED, &(ring)->state) #define ring_is_16byte_desc_enabled(ring) \ test_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state) #define set_ring_16byte_desc_enabled(ring) \ diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h index 25afabf999d0..83ccc58894e5 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf.h +++ b/drivers/net/ethernet/intel/i40evf/i40evf.h @@ -209,8 +209,6 @@ struct i40evf_adapter { u32 flags; #define I40EVF_FLAG_RX_CSUM_ENABLED BIT(0) #define I40EVF_FLAG_RX_1BUF_CAPABLE BIT(1) -#define I40EVF_FLAG_RX_PS_CAPABLE BIT(2) -#define I40EVF_FLAG_RX_PS_ENABLED BIT(3) #define I40EVF_FLAG_IMIR_ENABLED BIT(5) #define I40EVF_FLAG_MQ_CAPABLE BIT(6) #define I40EVF_FLAG_NEED_LINK_UPDATE BIT(7) diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c index 5a48ee07688f..e972ebcb1ac1 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c @@ -527,12 +527,8 @@ static int i40evf_set_rxfh(struct net_device *netdev, const u32 *indir, **/ static u32 i40evf_get_priv_flags(struct net_device *dev) { - struct i40evf_adapter *adapter = netdev_priv(dev); u32 ret_flags = 0; - ret_flags |= adapter->flags & I40EVF_FLAG_RX_PS_ENABLED ? - I40EVF_PRIV_FLAGS_PS : 0; - return ret_flags; } @@ -546,16 +542,6 @@ static int i40evf_set_priv_flags(struct net_device *dev, u32 flags) struct i40evf_adapter *adapter = netdev_priv(dev); bool reset_required = false; - if ((flags & I40EVF_PRIV_FLAGS_PS) && - !(adapter->flags & I40EVF_FLAG_RX_PS_ENABLED)) { - adapter->flags |= I40EVF_FLAG_RX_PS_ENABLED; - reset_required = true; - } else if (!(flags & I40EVF_PRIV_FLAGS_PS) && - (adapter->flags & I40EVF_FLAG_RX_PS_ENABLED)) { - adapter->flags &= ~I40EVF_FLAG_RX_PS_ENABLED; - reset_required = true; - } - /* if needed, issue reset to cause things to take effect */ if (reset_required) i40evf_schedule_reset(adapter); diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index 9f0bd7acc22a..52408bc103d6 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -641,28 +641,11 @@ static void i40evf_configure_tx(struct i40evf_adapter *adapter) static void i40evf_configure_rx(struct i40evf_adapter *adapter) { struct i40e_hw *hw = &adapter->hw; - struct net_device *netdev = adapter->netdev; - int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; int i; - int rx_buf_len; - - - /* Set the RX buffer length according to the mode */ - if (adapter->flags & I40EVF_FLAG_RX_PS_ENABLED || - netdev->mtu <= ETH_DATA_LEN) - rx_buf_len = I40EVF_RXBUFFER_2048; - else - rx_buf_len = ALIGN(max_frame, 1024); for (i = 0; i < adapter->num_active_queues; i++) { adapter->rx_rings[i].tail = hw->hw_addr + I40E_QRX_TAIL1(i); - adapter->rx_rings[i].rx_buf_len = rx_buf_len; - if (adapter->flags & I40EVF_FLAG_RX_PS_ENABLED) { - set_ring_ps_enabled(&adapter->rx_rings[i]); - adapter->rx_rings[i].rx_hdr_len = I40E_RX_HDR_SIZE; - } else { - clear_ring_ps_enabled(&adapter->rx_rings[i]); - } + adapter->rx_rings[i].rx_buf_len = I40EVF_RXBUFFER_2048; } } @@ -1007,12 +990,7 @@ static void i40evf_configure(struct i40evf_adapter *adapter) for (i = 0; i < adapter->num_active_queues; i++) { struct i40e_ring *ring = &adapter->rx_rings[i]; - if (adapter->flags & I40EVF_FLAG_RX_PS_ENABLED) { - i40evf_alloc_rx_headers(ring); - i40evf_alloc_rx_buffers_ps(ring, ring->count); - } else { i40evf_alloc_rx_buffers_1buf(ring, ring->count); - } ring->next_to_use = ring->count - 1; writel(ring->next_to_use, ring->tail); } @@ -2424,10 +2402,6 @@ static void i40evf_init_task(struct work_struct *work) adapter->flags |= I40EVF_FLAG_RX_CSUM_ENABLED; adapter->flags |= I40EVF_FLAG_RX_1BUF_CAPABLE; - adapter->flags |= I40EVF_FLAG_RX_PS_CAPABLE; - - /* Default to single buffer rx, can be changed through ethtool. */ - adapter->flags &= ~I40EVF_FLAG_RX_PS_ENABLED; netdev->netdev_ops = &i40evf_netdev_ops; i40evf_set_ethtool_ops(netdev); diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c index ba7fbc0608a6..c5d33a2cea87 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c @@ -270,10 +270,6 @@ void i40evf_configure_queues(struct i40evf_adapter *adapter) vqpi->rxq.max_pkt_size = adapter->netdev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN; vqpi->rxq.databuffer_size = adapter->rx_rings[i].rx_buf_len; - if (adapter->flags & I40EVF_FLAG_RX_PS_ENABLED) { - vqpi->rxq.splithdr_enabled = true; - vqpi->rxq.hdr_size = I40E_RX_HDR_SIZE; - } vqpi++; } From ab9ad98eb5f95b86490cc4c2ddbde5a0bc9bd5c6 Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Mon, 18 Apr 2016 11:33:46 -0700 Subject: [PATCH 1374/1649] i40evf: refactor receive routine This is part 2 of the Rx refactor series, just including changes to i40evf. This refactor aligns the receive routine with the one in ixgbe which was highly optimized. This reduces the code we have to maintain and allows for (hopefully) more readable and maintainable RX hot path. In order to do this: - consolidate the receive path into a single function that doesn't use packet split but *does* use pages for Rx buffers. - remove the old _1buf routine - consolidate several routines into helper functions - remove VF ethtool control over packet split - remove priv_flags interface since it is unused Signed-off-by: Jesse Brandeburg Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40evf/i40e_txrx.c | 954 +++++++++--------- drivers/net/ethernet/intel/i40evf/i40e_txrx.h | 37 +- drivers/net/ethernet/intel/i40evf/i40evf.h | 5 - .../ethernet/intel/i40evf/i40evf_ethtool.c | 51 - .../net/ethernet/intel/i40evf/i40evf_main.c | 3 +- 5 files changed, 509 insertions(+), 541 deletions(-) diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index 61d4a7a8e0be..fd7dae46c5d8 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -496,7 +496,6 @@ err: void i40evf_clean_rx_ring(struct i40e_ring *rx_ring) { struct device *dev = rx_ring->dev; - struct i40e_rx_buffer *rx_bi; unsigned long bi_size; u16 i; @@ -506,30 +505,20 @@ void i40evf_clean_rx_ring(struct i40e_ring *rx_ring) /* Free all the Rx ring sk_buffs */ for (i = 0; i < rx_ring->count; i++) { - rx_bi = &rx_ring->rx_bi[i]; - if (rx_bi->dma) { - dma_unmap_single(dev, - rx_bi->dma, - rx_ring->rx_buf_len, - DMA_FROM_DEVICE); - rx_bi->dma = 0; - } + struct i40e_rx_buffer *rx_bi = &rx_ring->rx_bi[i]; + if (rx_bi->skb) { dev_kfree_skb(rx_bi->skb); rx_bi->skb = NULL; } - if (rx_bi->page) { - if (rx_bi->page_dma) { - dma_unmap_page(dev, - rx_bi->page_dma, - PAGE_SIZE, - DMA_FROM_DEVICE); - rx_bi->page_dma = 0; - } - __free_page(rx_bi->page); - rx_bi->page = NULL; - rx_bi->page_offset = 0; - } + if (!rx_bi->page) + continue; + + dma_unmap_page(dev, rx_bi->dma, PAGE_SIZE, DMA_FROM_DEVICE); + __free_pages(rx_bi->page, 0); + + rx_bi->page = NULL; + rx_bi->page_offset = 0; } bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count; @@ -538,6 +527,7 @@ void i40evf_clean_rx_ring(struct i40e_ring *rx_ring) /* Zero out the descriptor ring */ memset(rx_ring->desc, 0, rx_ring->size); + rx_ring->next_to_alloc = 0; rx_ring->next_to_clean = 0; rx_ring->next_to_use = 0; } @@ -561,37 +551,6 @@ void i40evf_free_rx_resources(struct i40e_ring *rx_ring) } } -/** - * i40evf_alloc_rx_headers - allocate rx header buffers - * @rx_ring: ring to alloc buffers - * - * Allocate rx header buffers for the entire ring. As these are static, - * this is only called when setting up a new ring. - **/ -void i40evf_alloc_rx_headers(struct i40e_ring *rx_ring) -{ - struct device *dev = rx_ring->dev; - struct i40e_rx_buffer *rx_bi; - dma_addr_t dma; - void *buffer; - int buf_size; - int i; - - if (rx_ring->rx_bi[0].hdr_buf) - return; - /* Make sure the buffers don't cross cache line boundaries. */ - buf_size = ALIGN(rx_ring->rx_hdr_len, 256); - buffer = dma_alloc_coherent(dev, buf_size * rx_ring->count, - &dma, GFP_KERNEL); - if (!buffer) - return; - for (i = 0; i < rx_ring->count; i++) { - rx_bi = &rx_ring->rx_bi[i]; - rx_bi->dma = dma + (i * buf_size); - rx_bi->hdr_buf = buffer + (i * buf_size); - } -} - /** * i40evf_setup_rx_descriptors - Allocate Rx descriptors * @rx_ring: Rx descriptor ring (for a specific queue) to setup @@ -613,9 +572,7 @@ int i40evf_setup_rx_descriptors(struct i40e_ring *rx_ring) u64_stats_init(&rx_ring->syncp); /* Round up to nearest 4K */ - rx_ring->size = ring_is_16byte_desc_enabled(rx_ring) - ? rx_ring->count * sizeof(union i40e_16byte_rx_desc) - : rx_ring->count * sizeof(union i40e_32byte_rx_desc); + rx_ring->size = rx_ring->count * sizeof(union i40e_32byte_rx_desc); rx_ring->size = ALIGN(rx_ring->size, 4096); rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, &rx_ring->dma, GFP_KERNEL); @@ -626,6 +583,7 @@ int i40evf_setup_rx_descriptors(struct i40e_ring *rx_ring) goto err; } + rx_ring->next_to_alloc = 0; rx_ring->next_to_clean = 0; rx_ring->next_to_use = 0; @@ -644,6 +602,10 @@ err: static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val) { rx_ring->next_to_use = val; + + /* update next to alloc since we have filled the ring */ + rx_ring->next_to_alloc = val; + /* Force memory writes to complete before letting h/w * know there are new descriptors to fetch. (Only * applicable for weak-ordered memory model archs, @@ -654,164 +616,48 @@ static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val) } /** - * i40evf_alloc_rx_buffers_ps - Replace used receive buffers; packet split - * @rx_ring: ring to place buffers on - * @cleaned_count: number of buffers to replace + * i40e_alloc_mapped_page - recycle or make a new page + * @rx_ring: ring to use + * @bi: rx_buffer struct to modify * - * Returns true if any errors on allocation + * Returns true if the page was successfully allocated or + * reused. **/ -bool i40evf_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count) +static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring, + struct i40e_rx_buffer *bi) { - u16 i = rx_ring->next_to_use; - union i40e_rx_desc *rx_desc; - struct i40e_rx_buffer *bi; - const int current_node = numa_node_id(); + struct page *page = bi->page; + dma_addr_t dma; - /* do nothing if no valid netdev defined */ - if (!rx_ring->netdev || !cleaned_count) - return false; - - while (cleaned_count--) { - rx_desc = I40E_RX_DESC(rx_ring, i); - bi = &rx_ring->rx_bi[i]; - - if (bi->skb) /* desc is in use */ - goto no_buffers; - - /* If we've been moved to a different NUMA node, release the - * page so we can get a new one on the current node. - */ - if (bi->page && page_to_nid(bi->page) != current_node) { - dma_unmap_page(rx_ring->dev, - bi->page_dma, - PAGE_SIZE, - DMA_FROM_DEVICE); - __free_page(bi->page); - bi->page = NULL; - bi->page_dma = 0; - rx_ring->rx_stats.realloc_count++; - } else if (bi->page) { - rx_ring->rx_stats.page_reuse_count++; - } - - if (!bi->page) { - bi->page = alloc_page(GFP_ATOMIC); - if (!bi->page) { - rx_ring->rx_stats.alloc_page_failed++; - goto no_buffers; - } - bi->page_dma = dma_map_page(rx_ring->dev, - bi->page, - 0, - PAGE_SIZE, - DMA_FROM_DEVICE); - if (dma_mapping_error(rx_ring->dev, bi->page_dma)) { - rx_ring->rx_stats.alloc_page_failed++; - __free_page(bi->page); - bi->page = NULL; - bi->page_dma = 0; - bi->page_offset = 0; - goto no_buffers; - } - bi->page_offset = 0; - } - - /* Refresh the desc even if buffer_addrs didn't change - * because each write-back erases this info. - */ - rx_desc->read.pkt_addr = - cpu_to_le64(bi->page_dma + bi->page_offset); - rx_desc->read.hdr_addr = cpu_to_le64(bi->dma); - i++; - if (i == rx_ring->count) - i = 0; + /* since we are recycling buffers we should seldom need to alloc */ + if (likely(page)) { + rx_ring->rx_stats.page_reuse_count++; + return true; } - if (rx_ring->next_to_use != i) - i40e_release_rx_desc(rx_ring, i); - - return false; - -no_buffers: - if (rx_ring->next_to_use != i) - i40e_release_rx_desc(rx_ring, i); - - /* make sure to come back via polling to try again after - * allocation failure - */ - return true; -} - -/** - * i40evf_alloc_rx_buffers_1buf - Replace used receive buffers; single buffer - * @rx_ring: ring to place buffers on - * @cleaned_count: number of buffers to replace - * - * Returns true if any errors on allocation - **/ -bool i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count) -{ - u16 i = rx_ring->next_to_use; - union i40e_rx_desc *rx_desc; - struct i40e_rx_buffer *bi; - struct sk_buff *skb; - - /* do nothing if no valid netdev defined */ - if (!rx_ring->netdev || !cleaned_count) + /* alloc new page for storage */ + page = dev_alloc_page(); + if (unlikely(!page)) { + rx_ring->rx_stats.alloc_page_failed++; return false; - - while (cleaned_count--) { - rx_desc = I40E_RX_DESC(rx_ring, i); - bi = &rx_ring->rx_bi[i]; - skb = bi->skb; - - if (!skb) { - skb = __netdev_alloc_skb_ip_align(rx_ring->netdev, - rx_ring->rx_buf_len, - GFP_ATOMIC | - __GFP_NOWARN); - if (!skb) { - rx_ring->rx_stats.alloc_buff_failed++; - goto no_buffers; - } - /* initialize queue mapping */ - skb_record_rx_queue(skb, rx_ring->queue_index); - bi->skb = skb; - } - - if (!bi->dma) { - bi->dma = dma_map_single(rx_ring->dev, - skb->data, - rx_ring->rx_buf_len, - DMA_FROM_DEVICE); - if (dma_mapping_error(rx_ring->dev, bi->dma)) { - rx_ring->rx_stats.alloc_buff_failed++; - bi->dma = 0; - dev_kfree_skb(bi->skb); - bi->skb = NULL; - goto no_buffers; - } - } - - rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); - rx_desc->read.hdr_addr = 0; - i++; - if (i == rx_ring->count) - i = 0; } - if (rx_ring->next_to_use != i) - i40e_release_rx_desc(rx_ring, i); + /* map page for use */ + dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); - return false; - -no_buffers: - if (rx_ring->next_to_use != i) - i40e_release_rx_desc(rx_ring, i); - - /* make sure to come back via polling to try again after - * allocation failure + /* if mapping failed free memory back to system since + * there isn't much point in holding memory we can't use */ + if (dma_mapping_error(rx_ring->dev, dma)) { + __free_pages(page, 0); + rx_ring->rx_stats.alloc_page_failed++; + return false; + } + + bi->dma = dma; + bi->page = page; + bi->page_offset = 0; + return true; } @@ -833,25 +679,96 @@ static void i40e_receive_skb(struct i40e_ring *rx_ring, napi_gro_receive(&q_vector->napi, skb); } +/** + * i40evf_alloc_rx_buffers - Replace used receive buffers + * @rx_ring: ring to place buffers on + * @cleaned_count: number of buffers to replace + * + * Returns false if all allocations were successful, true if any fail + **/ +bool i40evf_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count) +{ + u16 ntu = rx_ring->next_to_use; + union i40e_rx_desc *rx_desc; + struct i40e_rx_buffer *bi; + + /* do nothing if no valid netdev defined */ + if (!rx_ring->netdev || !cleaned_count) + return false; + + rx_desc = I40E_RX_DESC(rx_ring, ntu); + bi = &rx_ring->rx_bi[ntu]; + + do { + if (!i40e_alloc_mapped_page(rx_ring, bi)) + goto no_buffers; + + /* Refresh the desc even if buffer_addrs didn't change + * because each write-back erases this info. + */ + rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); + rx_desc->read.hdr_addr = 0; + + rx_desc++; + bi++; + ntu++; + if (unlikely(ntu == rx_ring->count)) { + rx_desc = I40E_RX_DESC(rx_ring, 0); + bi = rx_ring->rx_bi; + ntu = 0; + } + + /* clear the status bits for the next_to_use descriptor */ + rx_desc->wb.qword1.status_error_len = 0; + + cleaned_count--; + } while (cleaned_count); + + if (rx_ring->next_to_use != ntu) + i40e_release_rx_desc(rx_ring, ntu); + + return false; + +no_buffers: + if (rx_ring->next_to_use != ntu) + i40e_release_rx_desc(rx_ring, ntu); + + /* make sure to come back via polling to try again after + * allocation failure + */ + return true; +} + /** * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum * @vsi: the VSI we care about * @skb: skb currently being received and modified - * @rx_status: status value of last descriptor in packet - * @rx_error: error value of last descriptor in packet - * @rx_ptype: ptype value of last descriptor in packet + * @rx_desc: the receive descriptor + * + * skb->protocol must be set before this function is called **/ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, struct sk_buff *skb, - u32 rx_status, - u32 rx_error, - u16 rx_ptype) + union i40e_rx_desc *rx_desc) { - struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(rx_ptype); + struct i40e_rx_ptype_decoded decoded; bool ipv4, ipv6, tunnel = false; + u32 rx_error, rx_status; + u8 ptype; + u64 qword; + + qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); + ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT; + rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >> + I40E_RXD_QW1_ERROR_SHIFT; + rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> + I40E_RXD_QW1_STATUS_SHIFT; + decoded = decode_rx_desc_ptype(ptype); skb->ip_summed = CHECKSUM_NONE; + skb_checksum_none_assert(skb); + /* Rx csum enabled and ip headers found? */ if (!(vsi->netdev->features & NETIF_F_RXCSUM)) return; @@ -917,7 +834,7 @@ checksum_fail: * * Returns a hash type to be used by skb_set_hash **/ -static inline enum pkt_hash_types i40e_ptype_to_htype(u8 ptype) +static inline int i40e_ptype_to_htype(u8 ptype) { struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype); @@ -945,7 +862,7 @@ static inline void i40e_rx_hash(struct i40e_ring *ring, u8 rx_ptype) { u32 hash; - const __le64 rss_mask = + const __le64 rss_mask = cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH << I40E_RX_DESC_STATUS_FLTSTAT_SHIFT); @@ -959,315 +876,411 @@ static inline void i40e_rx_hash(struct i40e_ring *ring, } /** - * i40e_clean_rx_irq_ps - Reclaim resources after receive; packet split - * @rx_ring: rx ring to clean - * @budget: how many cleans we're allowed + * i40evf_process_skb_fields - Populate skb header fields from Rx descriptor + * @rx_ring: rx descriptor ring packet is being transacted on + * @rx_desc: pointer to the EOP Rx descriptor + * @skb: pointer to current skb being populated + * @rx_ptype: the packet type decoded by hardware * - * Returns true if there's any budget left (e.g. the clean is finished) + * This function checks the ring, descriptor, and packet information in + * order to populate the hash, checksum, VLAN, protocol, and + * other fields within the skb. **/ -static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, const int budget) +static inline +void i40evf_process_skb_fields(struct i40e_ring *rx_ring, + union i40e_rx_desc *rx_desc, struct sk_buff *skb, + u8 rx_ptype) { - unsigned int total_rx_bytes = 0, total_rx_packets = 0; - u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo; - u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); - struct i40e_vsi *vsi = rx_ring->vsi; - u16 i = rx_ring->next_to_clean; - union i40e_rx_desc *rx_desc; - u32 rx_error, rx_status; - bool failure = false; - u8 rx_ptype; - u64 qword; - u32 copysize; + i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype); - do { - struct i40e_rx_buffer *rx_bi; - struct sk_buff *skb; - u16 vlan_tag; - /* return some buffers to hardware, one at a time is too slow */ - if (cleaned_count >= I40E_RX_BUFFER_WRITE) { - failure = failure || - i40evf_alloc_rx_buffers_ps(rx_ring, - cleaned_count); - cleaned_count = 0; - } + /* modifies the skb - consumes the enet header */ + skb->protocol = eth_type_trans(skb, rx_ring->netdev); - i = rx_ring->next_to_clean; - rx_desc = I40E_RX_DESC(rx_ring, i); - qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); - rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> - I40E_RXD_QW1_STATUS_SHIFT; + i40e_rx_checksum(rx_ring->vsi, skb, rx_desc); - if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT))) - break; - - /* This memory barrier is needed to keep us from reading - * any other fields out of the rx_desc until we know the - * DD bit is set. - */ - dma_rmb(); - /* sync header buffer for reading */ - dma_sync_single_range_for_cpu(rx_ring->dev, - rx_ring->rx_bi[0].dma, - i * rx_ring->rx_hdr_len, - rx_ring->rx_hdr_len, - DMA_FROM_DEVICE); - rx_bi = &rx_ring->rx_bi[i]; - skb = rx_bi->skb; - if (likely(!skb)) { - skb = __netdev_alloc_skb_ip_align(rx_ring->netdev, - rx_ring->rx_hdr_len, - GFP_ATOMIC | - __GFP_NOWARN); - if (!skb) { - rx_ring->rx_stats.alloc_buff_failed++; - failure = true; - break; - } - - /* initialize queue mapping */ - skb_record_rx_queue(skb, rx_ring->queue_index); - /* we are reusing so sync this buffer for CPU use */ - dma_sync_single_range_for_cpu(rx_ring->dev, - rx_ring->rx_bi[0].dma, - i * rx_ring->rx_hdr_len, - rx_ring->rx_hdr_len, - DMA_FROM_DEVICE); - } - rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> - I40E_RXD_QW1_LENGTH_PBUF_SHIFT; - rx_header_len = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK) >> - I40E_RXD_QW1_LENGTH_HBUF_SHIFT; - rx_sph = (qword & I40E_RXD_QW1_LENGTH_SPH_MASK) >> - I40E_RXD_QW1_LENGTH_SPH_SHIFT; - - rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >> - I40E_RXD_QW1_ERROR_SHIFT; - rx_hbo = rx_error & BIT(I40E_RX_DESC_ERROR_HBO_SHIFT); - rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT); - - rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> - I40E_RXD_QW1_PTYPE_SHIFT; - /* sync half-page for reading */ - dma_sync_single_range_for_cpu(rx_ring->dev, - rx_bi->page_dma, - rx_bi->page_offset, - PAGE_SIZE / 2, - DMA_FROM_DEVICE); - prefetch(page_address(rx_bi->page) + rx_bi->page_offset); - rx_bi->skb = NULL; - cleaned_count++; - copysize = 0; - if (rx_hbo || rx_sph) { - int len; - - if (rx_hbo) - len = I40E_RX_HDR_SIZE; - else - len = rx_header_len; - memcpy(__skb_put(skb, len), rx_bi->hdr_buf, len); - } else if (skb->len == 0) { - int len; - unsigned char *va = page_address(rx_bi->page) + - rx_bi->page_offset; - - len = min(rx_packet_len, rx_ring->rx_hdr_len); - memcpy(__skb_put(skb, len), va, len); - copysize = len; - rx_packet_len -= len; - } - /* Get the rest of the data if this was a header split */ - if (rx_packet_len) { - skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, - rx_bi->page, - rx_bi->page_offset + copysize, - rx_packet_len, I40E_RXBUFFER_2048); - - /* If the page count is more than 2, then both halves - * of the page are used and we need to free it. Do it - * here instead of in the alloc code. Otherwise one - * of the half-pages might be released between now and - * then, and we wouldn't know which one to use. - * Don't call get_page and free_page since those are - * both expensive atomic operations that just change - * the refcount in opposite directions. Just give the - * page to the stack; he can have our refcount. - */ - if (page_count(rx_bi->page) > 2) { - dma_unmap_page(rx_ring->dev, - rx_bi->page_dma, - PAGE_SIZE, - DMA_FROM_DEVICE); - rx_bi->page = NULL; - rx_bi->page_dma = 0; - rx_ring->rx_stats.realloc_count++; - } else { - get_page(rx_bi->page); - /* switch to the other half-page here; the - * allocation code programs the right addr - * into HW. If we haven't used this half-page, - * the address won't be changed, and HW can - * just use it next time through. - */ - rx_bi->page_offset ^= PAGE_SIZE / 2; - } - - } - I40E_RX_INCREMENT(rx_ring, i); - - if (unlikely( - !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) { - struct i40e_rx_buffer *next_buffer; - - next_buffer = &rx_ring->rx_bi[i]; - next_buffer->skb = skb; - rx_ring->rx_stats.non_eop_descs++; - continue; - } - - /* ERR_MASK will only have valid bits if EOP set */ - if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) { - dev_kfree_skb_any(skb); - continue; - } - - i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype); - - /* probably a little skewed due to removing CRC */ - total_rx_bytes += skb->len; - total_rx_packets++; - - skb->protocol = eth_type_trans(skb, rx_ring->netdev); - - i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype); - - vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) - ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) - : 0; -#ifdef I40E_FCOE - if (unlikely( - i40e_rx_is_fcoe(rx_ptype) && - !i40e_fcoe_handle_offload(rx_ring, rx_desc, skb))) { - dev_kfree_skb_any(skb); - continue; - } -#endif - i40e_receive_skb(rx_ring, skb, vlan_tag); - - rx_desc->wb.qword1.status_error_len = 0; - - } while (likely(total_rx_packets < budget)); - - u64_stats_update_begin(&rx_ring->syncp); - rx_ring->stats.packets += total_rx_packets; - rx_ring->stats.bytes += total_rx_bytes; - u64_stats_update_end(&rx_ring->syncp); - rx_ring->q_vector->rx.total_packets += total_rx_packets; - rx_ring->q_vector->rx.total_bytes += total_rx_bytes; - - return failure ? budget : total_rx_packets; + skb_record_rx_queue(skb, rx_ring->queue_index); } /** - * i40e_clean_rx_irq_1buf - Reclaim resources after receive; single buffer - * @rx_ring: rx ring to clean - * @budget: how many cleans we're allowed + * i40e_pull_tail - i40e specific version of skb_pull_tail + * @rx_ring: rx descriptor ring packet is being transacted on + * @skb: pointer to current skb being adjusted * - * Returns number of packets cleaned + * This function is an i40e specific version of __pskb_pull_tail. The + * main difference between this version and the original function is that + * this function can make several assumptions about the state of things + * that allow for significant optimizations versus the standard function. + * As a result we can do things like drop a frag and maintain an accurate + * truesize for the skb. + */ +static void i40e_pull_tail(struct i40e_ring *rx_ring, struct sk_buff *skb) +{ + struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; + unsigned char *va; + unsigned int pull_len; + + /* it is valid to use page_address instead of kmap since we are + * working with pages allocated out of the lomem pool per + * alloc_page(GFP_ATOMIC) + */ + va = skb_frag_address(frag); + + /* we need the header to contain the greater of either ETH_HLEN or + * 60 bytes if the skb->len is less than 60 for skb_pad. + */ + pull_len = eth_get_headlen(va, I40E_RX_HDR_SIZE); + + /* align pull length to size of long to optimize memcpy performance */ + skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); + + /* update all of the pointers */ + skb_frag_size_sub(frag, pull_len); + frag->page_offset += pull_len; + skb->data_len -= pull_len; + skb->tail += pull_len; +} + +/** + * i40e_cleanup_headers - Correct empty headers + * @rx_ring: rx descriptor ring packet is being transacted on + * @skb: pointer to current skb being fixed + * + * Also address the case where we are pulling data in on pages only + * and as such no data is present in the skb header. + * + * In addition if skb is not at least 60 bytes we need to pad it so that + * it is large enough to qualify as a valid Ethernet frame. + * + * Returns true if an error was encountered and skb was freed. **/ -static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) +static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb) +{ + /* place header in linear portion of buffer */ + if (skb_is_nonlinear(skb)) + i40e_pull_tail(rx_ring, skb); + + /* if eth_skb_pad returns an error the skb was freed */ + if (eth_skb_pad(skb)) + return true; + + return false; +} + +/** + * i40e_reuse_rx_page - page flip buffer and store it back on the ring + * @rx_ring: rx descriptor ring to store buffers on + * @old_buff: donor buffer to have page reused + * + * Synchronizes page for reuse by the adapter + **/ +static void i40e_reuse_rx_page(struct i40e_ring *rx_ring, + struct i40e_rx_buffer *old_buff) +{ + struct i40e_rx_buffer *new_buff; + u16 nta = rx_ring->next_to_alloc; + + new_buff = &rx_ring->rx_bi[nta]; + + /* update, and store next to alloc */ + nta++; + rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; + + /* transfer page from old buffer to new buffer */ + *new_buff = *old_buff; +} + +/** + * i40e_page_is_reserved - check if reuse is possible + * @page: page struct to check + */ +static inline bool i40e_page_is_reserved(struct page *page) +{ + return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); +} + +/** + * i40e_add_rx_frag - Add contents of Rx buffer to sk_buff + * @rx_ring: rx descriptor ring to transact packets on + * @rx_buffer: buffer containing page to add + * @rx_desc: descriptor containing length of buffer written by hardware + * @skb: sk_buff to place the data into + * + * This function will add the data contained in rx_buffer->page to the skb. + * This is done either through a direct copy if the data in the buffer is + * less than the skb header size, otherwise it will just attach the page as + * a frag to the skb. + * + * The function will then update the page offset if necessary and return + * true if the buffer can be reused by the adapter. + **/ +static bool i40e_add_rx_frag(struct i40e_ring *rx_ring, + struct i40e_rx_buffer *rx_buffer, + union i40e_rx_desc *rx_desc, + struct sk_buff *skb) +{ + struct page *page = rx_buffer->page; + u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); + unsigned int size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> + I40E_RXD_QW1_LENGTH_PBUF_SHIFT; +#if (PAGE_SIZE < 8192) + unsigned int truesize = I40E_RXBUFFER_2048; +#else + unsigned int truesize = ALIGN(size, L1_CACHE_BYTES); + unsigned int last_offset = PAGE_SIZE - I40E_RXBUFFER_2048; +#endif + + /* will the data fit in the skb we allocated? if so, just + * copy it as it is pretty small anyway + */ + if ((size <= I40E_RX_HDR_SIZE) && !skb_is_nonlinear(skb)) { + unsigned char *va = page_address(page) + rx_buffer->page_offset; + + memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); + + /* page is not reserved, we can reuse buffer as-is */ + if (likely(!i40e_page_is_reserved(page))) + return true; + + /* this page cannot be reused so discard it */ + __free_pages(page, 0); + return false; + } + + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, + rx_buffer->page_offset, size, truesize); + + /* avoid re-using remote pages */ + if (unlikely(i40e_page_is_reserved(page))) + return false; + +#if (PAGE_SIZE < 8192) + /* if we are only owner of page we can reuse it */ + if (unlikely(page_count(page) != 1)) + return false; + + /* flip page offset to other buffer */ + rx_buffer->page_offset ^= truesize; +#else + /* move offset up to the next cache line */ + rx_buffer->page_offset += truesize; + + if (rx_buffer->page_offset > last_offset) + return false; +#endif + + /* Even if we own the page, we are not allowed to use atomic_set() + * This would break get_page_unless_zero() users. + */ + get_page(rx_buffer->page); + + return true; +} + +/** + * i40evf_fetch_rx_buffer - Allocate skb and populate it + * @rx_ring: rx descriptor ring to transact packets on + * @rx_desc: descriptor containing info written by hardware + * + * This function allocates an skb on the fly, and populates it with the page + * data from the current receive descriptor, taking care to set up the skb + * correctly, as well as handling calling the page recycle function if + * necessary. + */ +static inline +struct sk_buff *i40evf_fetch_rx_buffer(struct i40e_ring *rx_ring, + union i40e_rx_desc *rx_desc) +{ + struct i40e_rx_buffer *rx_buffer; + struct sk_buff *skb; + struct page *page; + + rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean]; + page = rx_buffer->page; + prefetchw(page); + + skb = rx_buffer->skb; + + if (likely(!skb)) { + void *page_addr = page_address(page) + rx_buffer->page_offset; + + /* prefetch first cache line of first page */ + prefetch(page_addr); +#if L1_CACHE_BYTES < 128 + prefetch(page_addr + L1_CACHE_BYTES); +#endif + + /* allocate a skb to store the frags */ + skb = __napi_alloc_skb(&rx_ring->q_vector->napi, + I40E_RX_HDR_SIZE, + GFP_ATOMIC | __GFP_NOWARN); + if (unlikely(!skb)) { + rx_ring->rx_stats.alloc_buff_failed++; + return NULL; + } + + /* we will be copying header into skb->data in + * pskb_may_pull so it is in our interest to prefetch + * it now to avoid a possible cache miss + */ + prefetchw(skb->data); + } else { + rx_buffer->skb = NULL; + } + + /* we are reusing so sync this buffer for CPU use */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_buffer->dma, + rx_buffer->page_offset, + I40E_RXBUFFER_2048, + DMA_FROM_DEVICE); + + /* pull page into skb */ + if (i40e_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) { + /* hand second half of page back to the ring */ + i40e_reuse_rx_page(rx_ring, rx_buffer); + rx_ring->rx_stats.page_reuse_count++; + } else { + /* we are not reusing the buffer so unmap it */ + dma_unmap_page(rx_ring->dev, rx_buffer->dma, PAGE_SIZE, + DMA_FROM_DEVICE); + } + + /* clear contents of buffer_info */ + rx_buffer->page = NULL; + + return skb; +} + +/** + * i40e_is_non_eop - process handling of non-EOP buffers + * @rx_ring: Rx ring being processed + * @rx_desc: Rx descriptor for current buffer + * @skb: Current socket buffer containing buffer in progress + * + * This function updates next to clean. If the buffer is an EOP buffer + * this function exits returning false, otherwise it will place the + * sk_buff in the next buffer to be chained and return true indicating + * that this is in fact a non-EOP buffer. + **/ +static bool i40e_is_non_eop(struct i40e_ring *rx_ring, + union i40e_rx_desc *rx_desc, + struct sk_buff *skb) +{ + u32 ntc = rx_ring->next_to_clean + 1; + + /* fetch, update, and store next to clean */ + ntc = (ntc < rx_ring->count) ? ntc : 0; + rx_ring->next_to_clean = ntc; + + prefetch(I40E_RX_DESC(rx_ring, ntc)); + + /* if we are the last buffer then there is nothing else to do */ +#define I40E_RXD_EOF BIT(I40E_RX_DESC_STATUS_EOF_SHIFT) + if (likely(i40e_test_staterr(rx_desc, I40E_RXD_EOF))) + return false; + + /* place skb in next buffer to be received */ + rx_ring->rx_bi[ntc].skb = skb; + rx_ring->rx_stats.non_eop_descs++; + + return true; +} + +/** + * i40e_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf + * @rx_ring: rx descriptor ring to transact packets on + * @budget: Total limit on number of packets to process + * + * This function provides a "bounce buffer" approach to Rx interrupt + * processing. The advantage to this is that on systems that have + * expensive overhead for IOMMU access this provides a means of avoiding + * it by maintaining the mapping of the page to the system. + * + * Returns amount of work completed + **/ +static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) { unsigned int total_rx_bytes = 0, total_rx_packets = 0; u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); - struct i40e_vsi *vsi = rx_ring->vsi; - union i40e_rx_desc *rx_desc; - u32 rx_error, rx_status; - u16 rx_packet_len; bool failure = false; - u8 rx_ptype; - u64 qword; - u16 i; - do { - struct i40e_rx_buffer *rx_bi; + while (likely(total_rx_packets < budget)) { + union i40e_rx_desc *rx_desc; struct sk_buff *skb; + u32 rx_status; u16 vlan_tag; + u8 rx_ptype; + u64 qword; + /* return some buffers to hardware, one at a time is too slow */ if (cleaned_count >= I40E_RX_BUFFER_WRITE) { failure = failure || - i40evf_alloc_rx_buffers_1buf(rx_ring, - cleaned_count); + i40evf_alloc_rx_buffers(rx_ring, cleaned_count); cleaned_count = 0; } - i = rx_ring->next_to_clean; - rx_desc = I40E_RX_DESC(rx_ring, i); + rx_desc = I40E_RX_DESC(rx_ring, rx_ring->next_to_clean); + qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); + rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> + I40E_RXD_QW1_PTYPE_SHIFT; rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> - I40E_RXD_QW1_STATUS_SHIFT; + I40E_RXD_QW1_STATUS_SHIFT; if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT))) break; + /* status_error_len will always be zero for unused descriptors + * because it's cleared in cleanup, and overlaps with hdr_addr + * which is always zero because packet split isn't used, if the + * hardware wrote DD then it will be non-zero + */ + if (!rx_desc->wb.qword1.status_error_len) + break; + /* This memory barrier is needed to keep us from reading * any other fields out of the rx_desc until we know the * DD bit is set. */ dma_rmb(); - rx_bi = &rx_ring->rx_bi[i]; - skb = rx_bi->skb; - prefetch(skb->data); + skb = i40evf_fetch_rx_buffer(rx_ring, rx_desc); + if (!skb) + break; - rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> - I40E_RXD_QW1_LENGTH_PBUF_SHIFT; - - rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >> - I40E_RXD_QW1_ERROR_SHIFT; - rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT); - - rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> - I40E_RXD_QW1_PTYPE_SHIFT; - rx_bi->skb = NULL; cleaned_count++; - /* Get the header and possibly the whole packet - * If this is an skb from previous receive dma will be 0 - */ - skb_put(skb, rx_packet_len); - dma_unmap_single(rx_ring->dev, rx_bi->dma, rx_ring->rx_buf_len, - DMA_FROM_DEVICE); - rx_bi->dma = 0; - - I40E_RX_INCREMENT(rx_ring, i); - - if (unlikely( - !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) { - rx_ring->rx_stats.non_eop_descs++; + if (i40e_is_non_eop(rx_ring, rx_desc, skb)) continue; - } - /* ERR_MASK will only have valid bits if EOP set */ - if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) { + /* ERR_MASK will only have valid bits if EOP set, and + * what we are doing here is actually checking + * I40E_RX_DESC_ERROR_RXE_SHIFT, since it is the zeroth bit in + * the error field + */ + if (unlikely(i40e_test_staterr(rx_desc, BIT(I40E_RXD_QW1_ERROR_SHIFT)))) { dev_kfree_skb_any(skb); continue; } - i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype); + if (i40e_cleanup_headers(rx_ring, skb)) + continue; + /* probably a little skewed due to removing CRC */ total_rx_bytes += skb->len; - total_rx_packets++; - skb->protocol = eth_type_trans(skb, rx_ring->netdev); + /* populate checksum, VLAN, and protocol */ + i40evf_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype); - i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype); - vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) - ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) - : 0; + vlan_tag = (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) ? + le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0; + i40e_receive_skb(rx_ring, skb, vlan_tag); - rx_desc->wb.qword1.status_error_len = 0; - } while (likely(total_rx_packets < budget)); + /* update budget accounting */ + total_rx_packets++; + } u64_stats_update_begin(&rx_ring->syncp); rx_ring->stats.packets += total_rx_packets; @@ -1276,6 +1289,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) rx_ring->q_vector->rx.total_packets += total_rx_packets; rx_ring->q_vector->rx.total_bytes += total_rx_bytes; + /* guarantee a trip back through this routine if there was a failure */ return failure ? budget : total_rx_packets; } @@ -1417,9 +1431,7 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget) budget_per_ring = max(budget/q_vector->num_ringpairs, 1); i40e_for_each_ring(ring, q_vector->rx) { - int cleaned; - - cleaned = i40e_clean_rx_irq_1buf(ring, budget_per_ring); + int cleaned = i40e_clean_rx_irq(ring, budget_per_ring); work_done += cleaned; /* if we clean as many as budgeted, we must not be done */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h index f24a97edbd4c..4ba302e8a2df 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h @@ -102,8 +102,8 @@ enum i40e_dyn_idx_t { (((pf)->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) ? \ I40E_DEFAULT_RSS_HENA_EXPANDED : I40E_DEFAULT_RSS_HENA) -/* Supported Rx Buffer Sizes */ -#define I40E_RXBUFFER_512 512 /* Used for packet split */ +/* Supported Rx Buffer Sizes (a multiple of 128) */ +#define I40E_RXBUFFER_256 256 #define I40E_RXBUFFER_2048 2048 #define I40E_RXBUFFER_3072 3072 /* For FCoE MTU of 2158 */ #define I40E_RXBUFFER_4096 4096 @@ -114,9 +114,28 @@ enum i40e_dyn_idx_t { * reserve 2 more, and skb_shared_info adds an additional 384 bytes more, * this adds up to 512 bytes of extra data meaning the smallest allocation * we could have is 1K. - * i.e. RXBUFFER_512 --> size-1024 slab + * i.e. RXBUFFER_256 --> 960 byte skb (size-1024 slab) + * i.e. RXBUFFER_512 --> 1216 byte skb (size-2048 slab) */ -#define I40E_RX_HDR_SIZE I40E_RXBUFFER_512 +#define I40E_RX_HDR_SIZE I40E_RXBUFFER_256 +#define i40e_rx_desc i40e_32byte_rx_desc + +/** + * i40e_test_staterr - tests bits in Rx descriptor status and error fields + * @rx_desc: pointer to receive descriptor (in le64 format) + * @stat_err_bits: value to mask + * + * This function does some fast chicanery in order to return the + * value of the mask which is really only used for boolean tests. + * The status_error_len doesn't need to be shifted because it begins + * at offset zero. + */ +static inline bool i40e_test_staterr(union i40e_rx_desc *rx_desc, + const u64 stat_err_bits) +{ + return !!(rx_desc->wb.qword1.status_error_len & + cpu_to_le64(stat_err_bits)); +} /* How many Rx Buffers do we bundle into one write to the hardware ? */ #define I40E_RX_BUFFER_WRITE 16 /* Must be power of 2 */ @@ -142,8 +161,6 @@ enum i40e_dyn_idx_t { prefetch((n)); \ } while (0) -#define i40e_rx_desc i40e_32byte_rx_desc - #define I40E_MAX_BUFFER_TXD 8 #define I40E_MIN_TX_LEN 17 @@ -212,10 +229,8 @@ struct i40e_tx_buffer { struct i40e_rx_buffer { struct sk_buff *skb; - void *hdr_buf; dma_addr_t dma; struct page *page; - dma_addr_t page_dma; unsigned int page_offset; }; @@ -271,7 +286,6 @@ struct i40e_ring { u16 count; /* Number of descriptors */ u16 reg_idx; /* HW register index of the ring */ - u16 rx_hdr_len; u16 rx_buf_len; #define I40E_RX_DTYPE_NO_SPLIT 0 #define I40E_RX_DTYPE_HEADER_SPLIT 1 @@ -311,6 +325,7 @@ struct i40e_ring { struct i40e_q_vector *q_vector; /* Backreference to associated vector */ struct rcu_head rcu; /* to avoid race on free */ + u16 next_to_alloc; } ____cacheline_internodealigned_in_smp; enum i40e_latency_range { @@ -334,9 +349,7 @@ struct i40e_ring_container { #define i40e_for_each_ring(pos, head) \ for (pos = (head).ring; pos != NULL; pos = pos->next) -bool i40evf_alloc_rx_buffers_ps(struct i40e_ring *rxr, u16 cleaned_count); -bool i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rxr, u16 cleaned_count); -void i40evf_alloc_rx_headers(struct i40e_ring *rxr); +bool i40evf_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count); netdev_tx_t i40evf_xmit_frame(struct sk_buff *skb, struct net_device *netdev); void i40evf_clean_tx_ring(struct i40e_ring *tx_ring); void i40evf_clean_rx_ring(struct i40e_ring *rx_ring); diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h index 83ccc58894e5..fa044a904208 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf.h +++ b/drivers/net/ethernet/intel/i40evf/i40evf.h @@ -80,9 +80,6 @@ struct i40e_vsi { #define I40EVF_REQ_DESCRIPTOR_MULTIPLE 32 /* Supported Rx Buffer Sizes */ -#define I40EVF_RXBUFFER_64 64 /* Used for packet split */ -#define I40EVF_RXBUFFER_128 128 /* Used for packet split */ -#define I40EVF_RXBUFFER_256 256 /* Used for packet split */ #define I40EVF_RXBUFFER_2048 2048 #define I40EVF_MAX_RXBUFFER 16384 /* largest size for single descriptor */ #define I40EVF_MAX_AQ_BUF_SIZE 4096 @@ -208,7 +205,6 @@ struct i40evf_adapter { u32 flags; #define I40EVF_FLAG_RX_CSUM_ENABLED BIT(0) -#define I40EVF_FLAG_RX_1BUF_CAPABLE BIT(1) #define I40EVF_FLAG_IMIR_ENABLED BIT(5) #define I40EVF_FLAG_MQ_CAPABLE BIT(6) #define I40EVF_FLAG_NEED_LINK_UPDATE BIT(7) @@ -293,7 +289,6 @@ struct i40evf_adapter { /* Ethtool Private Flags */ -#define I40EVF_PRIV_FLAGS_PS BIT(0) /* needed by i40evf_ethtool.c */ extern char i40evf_driver_name[]; diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c index e972ebcb1ac1..c9c202f6c521 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c @@ -63,12 +63,6 @@ static const struct i40evf_stats i40evf_gstrings_stats[] = { #define I40EVF_STATS_LEN(_dev) \ (I40EVF_GLOBAL_STATS_LEN + I40EVF_QUEUE_STATS_LEN(_dev)) -static const char i40evf_priv_flags_strings[][ETH_GSTRING_LEN] = { - "packet-split", -}; - -#define I40EVF_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40evf_priv_flags_strings) - /** * i40evf_get_settings - Get Link Speed and Duplex settings * @netdev: network interface device structure @@ -103,8 +97,6 @@ static int i40evf_get_sset_count(struct net_device *netdev, int sset) { if (sset == ETH_SS_STATS) return I40EVF_STATS_LEN(netdev); - else if (sset == ETH_SS_PRIV_FLAGS) - return I40EVF_PRIV_FLAGS_STR_LEN; else return -EINVAL; } @@ -170,12 +162,6 @@ static void i40evf_get_strings(struct net_device *netdev, u32 sset, u8 *data) snprintf(p, ETH_GSTRING_LEN, "rx-%u.bytes", i); p += ETH_GSTRING_LEN; } - } else if (sset == ETH_SS_PRIV_FLAGS) { - for (i = 0; i < I40EVF_PRIV_FLAGS_STR_LEN; i++) { - memcpy(data, i40evf_priv_flags_strings[i], - ETH_GSTRING_LEN); - data += ETH_GSTRING_LEN; - } } } @@ -225,7 +211,6 @@ static void i40evf_get_drvinfo(struct net_device *netdev, strlcpy(drvinfo->version, i40evf_driver_version, 32); strlcpy(drvinfo->fw_version, "N/A", 4); strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); - drvinfo->n_priv_flags = I40EVF_PRIV_FLAGS_STR_LEN; } /** @@ -515,40 +500,6 @@ static int i40evf_set_rxfh(struct net_device *netdev, const u32 *indir, return i40evf_config_rss(adapter); } -/** - * i40evf_get_priv_flags - report device private flags - * @dev: network interface device structure - * - * The get string set count and the string set should be matched for each - * flag returned. Add new strings for each flag to the i40e_priv_flags_strings - * array. - * - * Returns a u32 bitmap of flags. - **/ -static u32 i40evf_get_priv_flags(struct net_device *dev) -{ - u32 ret_flags = 0; - - return ret_flags; -} - -/** - * i40evf_set_priv_flags - set private flags - * @dev: network interface device structure - * @flags: bit flags to be set - **/ -static int i40evf_set_priv_flags(struct net_device *dev, u32 flags) -{ - struct i40evf_adapter *adapter = netdev_priv(dev); - bool reset_required = false; - - /* if needed, issue reset to cause things to take effect */ - if (reset_required) - i40evf_schedule_reset(adapter); - - return 0; -} - static const struct ethtool_ops i40evf_ethtool_ops = { .get_settings = i40evf_get_settings, .get_drvinfo = i40evf_get_drvinfo, @@ -558,8 +509,6 @@ static const struct ethtool_ops i40evf_ethtool_ops = { .get_strings = i40evf_get_strings, .get_ethtool_stats = i40evf_get_ethtool_stats, .get_sset_count = i40evf_get_sset_count, - .get_priv_flags = i40evf_get_priv_flags, - .set_priv_flags = i40evf_set_priv_flags, .get_msglevel = i40evf_get_msglevel, .set_msglevel = i40evf_set_msglevel, .get_coalesce = i40evf_get_coalesce, diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index 52408bc103d6..870bad8adeba 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -990,7 +990,7 @@ static void i40evf_configure(struct i40evf_adapter *adapter) for (i = 0; i < adapter->num_active_queues; i++) { struct i40e_ring *ring = &adapter->rx_rings[i]; - i40evf_alloc_rx_buffers_1buf(ring, ring->count); + i40evf_alloc_rx_buffers(ring, ring->count); ring->next_to_use = ring->count - 1; writel(ring->next_to_use, ring->tail); } @@ -2401,7 +2401,6 @@ static void i40evf_init_task(struct work_struct *work) adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN; adapter->flags |= I40EVF_FLAG_RX_CSUM_ENABLED; - adapter->flags |= I40EVF_FLAG_RX_1BUF_CAPABLE; netdev->netdev_ops = &i40evf_netdev_ops; i40evf_set_ethtool_ops(netdev); From bec60fc42b285344b027c87444c7fd6caade0ceb Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Mon, 18 Apr 2016 11:33:47 -0700 Subject: [PATCH 1375/1649] i40e/i40evf: Remove unused hardware receive descriptor code The hardware supports a 16 byte descriptor for receive, but the driver was never using it in production. There was no performance benefit to the real driver of 16 byte descriptors, so drop a whole lot of complexity while getting rid of the code. Also since the previous patch made us use no-split mode all the time, drop any support in the driver for any other value in dtype and assume it is always zero (aka no-split). Hooray for code removal! Change-ID: I2257e902e4dad84a07b94db6d2e6f4ce69b27bc0 Signed-off-by: Jesse Brandeburg Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e.h | 7 +----- .../net/ethernet/intel/i40e/i40e_debugfs.c | 16 +------------ drivers/net/ethernet/intel/i40e/i40e_main.c | 18 ++++---------- drivers/net/ethernet/intel/i40e/i40e_txrx.h | 24 ++++++++----------- drivers/net/ethernet/intel/i40evf/i40e_txrx.h | 24 ++++++++----------- 5 files changed, 27 insertions(+), 62 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index ebf423bd5ccb..2a6a5d3dd874 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -122,10 +122,7 @@ #define XSTRINGIFY(bar) STRINGIFY(bar) #define I40E_RX_DESC(R, i) \ - ((ring_is_16byte_desc_enabled(R)) \ - ? (union i40e_32byte_rx_desc *) \ - (&(((union i40e_16byte_rx_desc *)((R)->desc))[i])) \ - : (&(((union i40e_32byte_rx_desc *)((R)->desc))[i]))) + (&(((union i40e_32byte_rx_desc *)((R)->desc))[i])) #define I40E_TX_DESC(R, i) \ (&(((struct i40e_tx_desc *)((R)->desc))[i])) #define I40E_TX_CTXTDESC(R, i) \ @@ -327,7 +324,6 @@ struct i40e_pf { #ifdef I40E_FCOE #define I40E_FLAG_FCOE_ENABLED BIT_ULL(11) #endif /* I40E_FCOE */ -#define I40E_FLAG_16BYTE_RX_DESC_ENABLED BIT_ULL(13) #define I40E_FLAG_CLEAN_ADMINQ BIT_ULL(14) #define I40E_FLAG_FILTER_SYNC BIT_ULL(15) #define I40E_FLAG_SERVICE_CLIENT_REQUESTED BIT_ULL(16) @@ -532,7 +528,6 @@ struct i40e_vsi { u16 max_frame; u16 rx_buf_len; - u8 dtype; /* List of q_vectors allocated to this VSI */ struct i40e_q_vector **q_vectors; diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index 8ae30f7f1839..e6af8c8d7019 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -361,7 +361,7 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) vsi->work_limit); dev_info(&pf->pdev->dev, " max_frame = %d, rx_buf_len = %d dtype = %d\n", - vsi->max_frame, vsi->rx_buf_len, vsi->dtype); + vsi->max_frame, vsi->rx_buf_len, 0); dev_info(&pf->pdev->dev, " num_q_vectors = %i, base_vector = %i\n", vsi->num_q_vectors, vsi->base_vector); @@ -586,13 +586,6 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n, " d[%03x] = 0x%016llx 0x%016llx\n", i, txd->buffer_addr, txd->cmd_type_offset_bsz); - } else if (sizeof(union i40e_rx_desc) == - sizeof(union i40e_16byte_rx_desc)) { - rxd = I40E_RX_DESC(ring, i); - dev_info(&pf->pdev->dev, - " d[%03x] = 0x%016llx 0x%016llx\n", - i, rxd->read.pkt_addr, - rxd->read.hdr_addr); } else { rxd = I40E_RX_DESC(ring, i); dev_info(&pf->pdev->dev, @@ -614,13 +607,6 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n, "vsi = %02i tx ring = %02i d[%03x] = 0x%016llx 0x%016llx\n", vsi_seid, ring_id, desc_n, txd->buffer_addr, txd->cmd_type_offset_bsz); - } else if (sizeof(union i40e_rx_desc) == - sizeof(union i40e_16byte_rx_desc)) { - rxd = I40E_RX_DESC(ring, desc_n); - dev_info(&pf->pdev->dev, - "vsi = %02i rx ring = %02i d[%03x] = 0x%016llx 0x%016llx\n", - vsi_seid, ring_id, desc_n, - rxd->read.pkt_addr, rxd->read.hdr_addr); } else { rxd = I40E_RX_DESC(ring, desc_n); dev_info(&pf->pdev->dev, diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index e46611122179..46a3a674c635 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -2861,14 +2861,12 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring) rx_ctx.base = (ring->dma / 128); rx_ctx.qlen = ring->count; - if (vsi->back->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED) { - set_ring_16byte_desc_enabled(ring); - rx_ctx.dsize = 0; - } else { - rx_ctx.dsize = 1; - } + /* use 32 byte descriptors */ + rx_ctx.dsize = 1; - rx_ctx.dtype = vsi->dtype; + /* descriptor type is always zero + * rx_ctx.dtype = 0; + */ rx_ctx.hsplit_0 = 0; rx_ctx.rxmax = min_t(u16, vsi->max_frame, chain_len * ring->rx_buf_len); @@ -2948,7 +2946,6 @@ static int i40e_vsi_configure_rx(struct i40e_vsi *vsi) vsi->max_frame = I40E_RXBUFFER_2048; vsi->rx_buf_len = I40E_RXBUFFER_2048; - vsi->dtype = I40E_RX_DTYPE_NO_SPLIT; #ifdef I40E_FCOE /* setup rx buffer for FCoE */ @@ -2956,7 +2953,6 @@ static int i40e_vsi_configure_rx(struct i40e_vsi *vsi) (vsi->back->flags & I40E_FLAG_FCOE_ENABLED)) { vsi->rx_buf_len = I40E_RXBUFFER_3072; vsi->max_frame = I40E_RXBUFFER_3072; - vsi->dtype = I40E_RX_DTYPE_NO_SPLIT; } #endif /* I40E_FCOE */ @@ -7476,10 +7472,6 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi) rx_ring->count = vsi->num_desc; rx_ring->size = 0; rx_ring->dcb_tc = 0; - if (pf->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED) - set_ring_16byte_desc_enabled(rx_ring); - else - clear_ring_16byte_desc_enabled(rx_ring); rx_ring->rx_itr_setting = pf->rx_itr_default; vsi->rx_rings[i] = rx_ring; } diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index 37643e6b3afd..b78c810d1835 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -260,15 +260,18 @@ struct i40e_rx_queue_stats { enum i40e_ring_state_t { __I40E_TX_FDIR_INIT_DONE, __I40E_TX_XPS_INIT_DONE, - __I40E_RX_16BYTE_DESC_ENABLED, }; -#define ring_is_16byte_desc_enabled(ring) \ - test_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state) -#define set_ring_16byte_desc_enabled(ring) \ - set_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state) -#define clear_ring_16byte_desc_enabled(ring) \ - clear_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state) +/* some useful defines for virtchannel interface, which + * is the only remaining user of header split + */ +#define I40E_RX_DTYPE_NO_SPLIT 0 +#define I40E_RX_DTYPE_HEADER_SPLIT 1 +#define I40E_RX_DTYPE_SPLIT_ALWAYS 2 +#define I40E_RX_SPLIT_L2 0x1 +#define I40E_RX_SPLIT_IP 0x2 +#define I40E_RX_SPLIT_TCP_UDP 0x4 +#define I40E_RX_SPLIT_SCTP 0x8 /* struct that defines a descriptor ring, associated with a VSI */ struct i40e_ring { @@ -296,13 +299,6 @@ struct i40e_ring { u16 count; /* Number of descriptors */ u16 reg_idx; /* HW register index of the ring */ u16 rx_buf_len; -#define I40E_RX_DTYPE_NO_SPLIT 0 -#define I40E_RX_DTYPE_HEADER_SPLIT 1 -#define I40E_RX_DTYPE_SPLIT_ALWAYS 2 -#define I40E_RX_SPLIT_L2 0x1 -#define I40E_RX_SPLIT_IP 0x2 -#define I40E_RX_SPLIT_TCP_UDP 0x4 -#define I40E_RX_SPLIT_SCTP 0x8 /* used in interrupt processing */ u16 next_to_use; diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h index 4ba302e8a2df..0112277e5882 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h @@ -259,15 +259,18 @@ struct i40e_rx_queue_stats { enum i40e_ring_state_t { __I40E_TX_FDIR_INIT_DONE, __I40E_TX_XPS_INIT_DONE, - __I40E_RX_16BYTE_DESC_ENABLED, }; -#define ring_is_16byte_desc_enabled(ring) \ - test_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state) -#define set_ring_16byte_desc_enabled(ring) \ - set_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state) -#define clear_ring_16byte_desc_enabled(ring) \ - clear_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state) +/* some useful defines for virtchannel interface, which + * is the only remaining user of header split + */ +#define I40E_RX_DTYPE_NO_SPLIT 0 +#define I40E_RX_DTYPE_HEADER_SPLIT 1 +#define I40E_RX_DTYPE_SPLIT_ALWAYS 2 +#define I40E_RX_SPLIT_L2 0x1 +#define I40E_RX_SPLIT_IP 0x2 +#define I40E_RX_SPLIT_TCP_UDP 0x4 +#define I40E_RX_SPLIT_SCTP 0x8 /* struct that defines a descriptor ring, associated with a VSI */ struct i40e_ring { @@ -287,13 +290,6 @@ struct i40e_ring { u16 count; /* Number of descriptors */ u16 reg_idx; /* HW register index of the ring */ u16 rx_buf_len; -#define I40E_RX_DTYPE_NO_SPLIT 0 -#define I40E_RX_DTYPE_HEADER_SPLIT 1 -#define I40E_RX_DTYPE_SPLIT_ALWAYS 2 -#define I40E_RX_SPLIT_L2 0x1 -#define I40E_RX_SPLIT_IP 0x2 -#define I40E_RX_SPLIT_TCP_UDP 0x4 -#define I40E_RX_SPLIT_SCTP 0x8 /* used in interrupt processing */ u16 next_to_use; From b163098ea1eece88a8834952dcbade1f17378731 Mon Sep 17 00:00:00 2001 From: Mitch Williams Date: Mon, 18 Apr 2016 11:33:48 -0700 Subject: [PATCH 1376/1649] i40evf: Allocate Rx buffers properly Allocate the correct number of RX buffers, and don't fiddle with next_to_use. The common RX code handles all of this. This fixes a memory leak of one page each time the driver is opened. Change-Id: Id06eca353086e084921f047acad28c14745684ee Signed-off-by: Mitch Williams Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40evf/i40evf_main.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index 870bad8adeba..b548dbe78cd3 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -990,9 +990,7 @@ static void i40evf_configure(struct i40evf_adapter *adapter) for (i = 0; i < adapter->num_active_queues; i++) { struct i40e_ring *ring = &adapter->rx_rings[i]; - i40evf_alloc_rx_buffers(ring, ring->count); - ring->next_to_use = ring->count - 1; - writel(ring->next_to_use, ring->tail); + i40evf_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring)); } } @@ -2768,7 +2766,6 @@ static void i40evf_remove(struct pci_dev *pdev) iounmap(hw->hw_addr); pci_release_regions(pdev); - i40evf_free_all_tx_resources(adapter); i40evf_free_all_rx_resources(adapter); i40evf_free_queues(adapter); From 147e81ec7568933a51fe58b64244383e929870fb Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Mon, 18 Apr 2016 11:33:49 -0700 Subject: [PATCH 1377/1649] i40e: Test memory before ethtool alloc succeeds When testing on systems with very limited amounts of RAM, a bug was found where, while changing the number of descriptors using ethtool, the driver didn't test the limits of system memory before permanently assuming it would be able to get receive buffer memory. Work around this issue by pre-allocation of the receive buffer memory, in the "ghost" ring, which is then used during reinit using the new ring length. Change-Id: I92d7a5fb59a6c884b2efdd1ec652845f101c3359 Signed-off-by: Jesse Brandeburg Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- .../net/ethernet/intel/i40e/i40e_ethtool.c | 34 +++++++++++++++++-- 1 file changed, 31 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 6fd730ac23a1..51a994d85870 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -1274,6 +1274,13 @@ static int i40e_set_ringparam(struct net_device *netdev, } for (i = 0; i < vsi->num_queue_pairs; i++) { + /* this is to allow wr32 to have something to write to + * during early allocation of Rx buffers + */ + u32 __iomem faketail = 0; + struct i40e_ring *ring; + u16 unused; + /* clone ring and setup updated count */ rx_rings[i] = *vsi->rx_rings[i]; rx_rings[i].count = new_rx_count; @@ -1282,12 +1289,22 @@ static int i40e_set_ringparam(struct net_device *netdev, */ rx_rings[i].desc = NULL; rx_rings[i].rx_bi = NULL; + rx_rings[i].tail = (u8 __iomem *)&faketail; err = i40e_setup_rx_descriptors(&rx_rings[i]); + if (err) + goto rx_unwind; + + /* now allocate the Rx buffers to make sure the OS + * has enough memory, any failure here means abort + */ + ring = &rx_rings[i]; + unused = I40E_DESC_UNUSED(ring); + err = i40e_alloc_rx_buffers(ring, unused); +rx_unwind: if (err) { - while (i) { - i--; + do { i40e_free_rx_resources(&rx_rings[i]); - } + } while (i--); kfree(rx_rings); rx_rings = NULL; @@ -1313,6 +1330,17 @@ static int i40e_set_ringparam(struct net_device *netdev, if (rx_rings) { for (i = 0; i < vsi->num_queue_pairs; i++) { i40e_free_rx_resources(vsi->rx_rings[i]); + /* get the real tail offset */ + rx_rings[i].tail = vsi->rx_rings[i]->tail; + /* this is to fake out the allocation routine + * into thinking it has to realloc everything + * but the recycling logic will let us re-use + * the buffers allocated above + */ + rx_rings[i].next_to_use = 0; + rx_rings[i].next_to_clean = 0; + rx_rings[i].next_to_alloc = 0; + /* do a struct copy */ *vsi->rx_rings[i] = rx_rings[i]; } kfree(rx_rings); From ce927db48721235b3a08142b6d9f9a906e4e626f Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Fri, 29 Apr 2016 19:44:05 +0200 Subject: [PATCH 1378/1649] i40e: fix misleading indentation Newly added code in i40e_vc_config_promiscuous_mode_msg() is indented in a way that gcc rightly complains about: drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c: In function 'i40e_vc_config_promiscuous_mode_msg': drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c:1543:4: error: this 'if' clause does not guard... [-Werror=misleading-indentation] if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID) ^~ drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c:1550:5: note: ...this statement, but the latter is misleadingly indented as if it is guarded by the 'if' aq_err = pf->hw.aq.asq_last_status; From the context, it looks like the aq_err assignment was meant to be inside of the conditional expression, so I'm adding the appropriate curly braces now. Signed-off-by: Arnd Bergmann Fixes: 5676a8b9cd9a ("i40e: Add VF promiscuous mode driver support") Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 36aa33af45c0..a9b04e72df82 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -1544,7 +1544,7 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, } else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) { list_for_each_entry(f, &vsi->mac_filter_list, list) { aq_ret = 0; - if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID) + if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID) { aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, vsi->seid, @@ -1552,6 +1552,7 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, f->vlan, NULL); aq_err = pf->hw.aq.asq_last_status; + } if (aq_ret) dev_err(&pf->pdev->dev, "Could not add VLAN %d to Unicast promiscuous domain err %s aq_err %s\n", From 3949c4ac8cfa8ab3518a326c72eff1a2ff489bb9 Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Sun, 1 May 2016 14:07:23 +0200 Subject: [PATCH 1379/1649] i40e: constify i40e_client_ops structure The i40e_client_ops structure is never modified, so declare it as const. Done with the help of Coccinelle. Signed-off-by: Julia Lawall Reviewed-by: Leon Romanovsky Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/infiniband/hw/i40iw/i40iw_main.c | 2 +- drivers/net/ethernet/intel/i40e/i40e_client.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c index 90e5af21737e..e41fae2422ab 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_main.c +++ b/drivers/infiniband/hw/i40iw/i40iw_main.c @@ -1863,7 +1863,7 @@ static enum i40iw_status_code i40iw_virtchnl_send(struct i40iw_sc_dev *dev, } /* client interface functions */ -static struct i40e_client_ops i40e_ops = { +static const struct i40e_client_ops i40e_ops = { .open = i40iw_open, .close = i40iw_close, .l2_param_change = i40iw_l2param_change, diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.h b/drivers/net/ethernet/intel/i40e/i40e_client.h index bf6b453d93a1..a4601d97fb24 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_client.h +++ b/drivers/net/ethernet/intel/i40e/i40e_client.h @@ -217,7 +217,7 @@ struct i40e_client { #define I40E_CLIENT_FLAGS_LAUNCH_ON_PROBE BIT(0) #define I40E_TX_FLAGS_NOTIFY_OTHER_EVENTS BIT(2) enum i40e_client_type type; - struct i40e_client_ops *ops; /* client ops provided by the client */ + const struct i40e_client_ops *ops; /* client ops provided by the client */ }; static inline bool i40e_client_is_registered(struct i40e_client *client) From 698e2a8dca98e4de32f3f630e6d9cd93753c52e1 Mon Sep 17 00:00:00 2001 From: Marco Angaroni Date: Tue, 26 Apr 2016 21:20:22 +0200 Subject: [PATCH 1380/1649] ipvs: make drop_entry protection effective for SIP-pe DoS protection policy that deletes connections to avoid out of memory is currently not effective for SIP-pe plus OPS-mode for two reasons: 1) connection templates (holding SIP call-id) are always skipped in ip_vs_random_dropentry() 2) in_pkts counter (used by drop_entry algorithm) is not incremented for connection templates This patch addresses such problems with the following changes: a) connection templates associated (via their dest) to virtual-services configured in OPS mode are included in ip_vs_random_dropentry() monitoring. This applies to SIP-pe over UDP (which requires OPS mode), but is more general principle: when OPS is controlled by templates memory can be used only by templates themselves, since OPS conns are deleted after packet is forwarded. b) OPS connections, if controlled by a template, cause increment of in_pkts counter of their template. This is already happening but only in case director is in master-slave mode (see ip_vs_sync_conn()). Signed-off-by: Marco Angaroni Acked-by: Julian Anastasov Signed-off-by: Simon Horman --- net/netfilter/ipvs/ip_vs_conn.c | 22 +++++++++++++++++++--- net/netfilter/ipvs/ip_vs_core.c | 8 +++++++- 2 files changed, 26 insertions(+), 4 deletions(-) diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c index 292365ffa4f0..2cb3c626cd43 100644 --- a/net/netfilter/ipvs/ip_vs_conn.c +++ b/net/netfilter/ipvs/ip_vs_conn.c @@ -1261,6 +1261,16 @@ static inline int todrop_entry(struct ip_vs_conn *cp) return 1; } +static inline bool ip_vs_conn_ops_mode(struct ip_vs_conn *cp) +{ + struct ip_vs_service *svc; + + if (!cp->dest) + return false; + svc = rcu_dereference(cp->dest->svc); + return svc && (svc->flags & IP_VS_SVC_F_ONEPACKET); +} + /* Called from keventd and must protect itself from softirqs */ void ip_vs_random_dropentry(struct netns_ipvs *ipvs) { @@ -1275,11 +1285,16 @@ void ip_vs_random_dropentry(struct netns_ipvs *ipvs) unsigned int hash = prandom_u32() & ip_vs_conn_tab_mask; hlist_for_each_entry_rcu(cp, &ip_vs_conn_tab[hash], c_list) { - if (cp->flags & IP_VS_CONN_F_TEMPLATE) - /* connection template */ - continue; if (cp->ipvs != ipvs) continue; + if (cp->flags & IP_VS_CONN_F_TEMPLATE) { + if (atomic_read(&cp->n_control) || + !ip_vs_conn_ops_mode(cp)) + continue; + else + /* connection template of OPS */ + goto try_drop; + } if (cp->protocol == IPPROTO_TCP) { switch(cp->state) { case IP_VS_TCP_S_SYN_RECV: @@ -1307,6 +1322,7 @@ void ip_vs_random_dropentry(struct netns_ipvs *ipvs) continue; } } else { +try_drop: if (!todrop_entry(cp)) continue; } diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c index f3bac2e9a25a..1207f20d24e4 100644 --- a/net/netfilter/ipvs/ip_vs_core.c +++ b/net/netfilter/ipvs/ip_vs_core.c @@ -612,7 +612,10 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb, ret = cp->packet_xmit(skb, cp, pd->pp, iph); /* do not touch skb anymore */ - atomic_inc(&cp->in_pkts); + if ((cp->flags & IP_VS_CONN_F_ONE_PACKET) && cp->control) + atomic_inc(&cp->control->in_pkts); + else + atomic_inc(&cp->in_pkts); ip_vs_conn_put(cp); return ret; } @@ -1991,6 +1994,9 @@ ip_vs_in(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, int if (ipvs->sync_state & IP_VS_STATE_MASTER) ip_vs_sync_conn(ipvs, cp, pkts); + else if ((cp->flags & IP_VS_CONN_F_ONE_PACKET) && cp->control) + /* increment is done inside ip_vs_sync_conn too */ + atomic_inc(&cp->control->in_pkts); ip_vs_conn_put(cp); return ret; From 03d7dc5cdfe6fd4e5bd04cfc2be7ae259f956428 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Fri, 6 May 2016 00:51:47 +0200 Subject: [PATCH 1381/1649] netfilter: conntrack: check netns when walking expect hash Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- .../nf_conntrack_l3proto_ipv4_compat.c | 3 +++ net/netfilter/nf_conntrack_expect.c | 19 +++++++++++++++---- net/netfilter/nf_conntrack_netlink.c | 12 ++++++++++++ 3 files changed, 30 insertions(+), 4 deletions(-) diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c index f8fc7ab201c9..2b4c729fcf8d 100644 --- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c +++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c @@ -301,6 +301,9 @@ static int exp_seq_show(struct seq_file *s, void *v) exp = hlist_entry(n, struct nf_conntrack_expect, hnode); + if (!net_eq(nf_ct_net(exp->master), seq_file_net(s))) + return 0; + if (exp->tuple.src.l3num != AF_INET) return 0; diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c index c2f7c4f475b1..da95d740e60b 100644 --- a/net/netfilter/nf_conntrack_expect.c +++ b/net/netfilter/nf_conntrack_expect.c @@ -86,6 +86,17 @@ static unsigned int nf_ct_expect_dst_hash(const struct nf_conntrack_tuple *tuple return reciprocal_scale(hash, nf_ct_expect_hsize); } +static bool +nf_ct_exp_equal(const struct nf_conntrack_tuple *tuple, + const struct nf_conntrack_expect *i, + const struct nf_conntrack_zone *zone, + const struct net *net) +{ + return nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) && + net_eq(net, nf_ct_net(i->master)) && + nf_ct_zone_equal_any(i->master, zone); +} + struct nf_conntrack_expect * __nf_ct_expect_find(struct net *net, const struct nf_conntrack_zone *zone, @@ -99,8 +110,7 @@ __nf_ct_expect_find(struct net *net, h = nf_ct_expect_dst_hash(tuple); hlist_for_each_entry_rcu(i, &net->ct.expect_hash[h], hnode) { - if (nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) && - nf_ct_zone_equal_any(i->master, zone)) + if (nf_ct_exp_equal(tuple, i, zone, net)) return i; } return NULL; @@ -141,8 +151,7 @@ nf_ct_find_expectation(struct net *net, h = nf_ct_expect_dst_hash(tuple); hlist_for_each_entry(i, &net->ct.expect_hash[h], hnode) { if (!(i->flags & NF_CT_EXPECT_INACTIVE) && - nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) && - nf_ct_zone_equal_any(i->master, zone)) { + nf_ct_exp_equal(tuple, i, zone, net)) { exp = i; break; } @@ -222,6 +231,7 @@ static inline int expect_clash(const struct nf_conntrack_expect *a, } return nf_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask) && + net_eq(nf_ct_net(a->master), nf_ct_net(b->master)) && nf_ct_zone_equal_any(a->master, nf_ct_zone(b->master)); } @@ -231,6 +241,7 @@ static inline int expect_matches(const struct nf_conntrack_expect *a, return a->master == b->master && a->class == b->class && nf_ct_tuple_equal(&a->tuple, &b->tuple) && nf_ct_tuple_mask_equal(&a->mask, &b->mask) && + net_eq(nf_ct_net(a->master), nf_ct_net(b->master)) && nf_ct_zone_equal_any(a->master, nf_ct_zone(b->master)); } diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index e00f178c48b0..5dfb84d86143 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -2636,6 +2636,10 @@ restart: hnode) { if (l3proto && exp->tuple.src.l3num != l3proto) continue; + + if (!net_eq(nf_ct_net(exp->master), net)) + continue; + if (cb->args[1]) { if (exp != last) continue; @@ -2888,6 +2892,10 @@ static int ctnetlink_del_expect(struct net *net, struct sock *ctnl, hlist_for_each_entry_safe(exp, next, &net->ct.expect_hash[i], hnode) { + + if (!net_eq(nf_ct_exp_net(exp), net)) + continue; + m_help = nfct_help(exp->master); if (!strcmp(m_help->helper->name, name) && del_timer(&exp->timeout)) { @@ -2906,6 +2914,10 @@ static int ctnetlink_del_expect(struct net *net, struct sock *ctnl, hlist_for_each_entry_safe(exp, next, &net->ct.expect_hash[i], hnode) { + + if (!net_eq(nf_ct_exp_net(exp), net)) + continue; + if (del_timer(&exp->timeout)) { nf_ct_unlink_expect_report(exp, NETLINK_CB(skb).portid, From a9a083c3878f28e9d368f6dfb1a79a6f04ad8123 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Fri, 6 May 2016 00:51:48 +0200 Subject: [PATCH 1382/1649] netfilter: conntrack: make netns address part of expect hash Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_conntrack_expect.c | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c index da95d740e60b..130f1be8db26 100644 --- a/net/netfilter/nf_conntrack_expect.c +++ b/net/netfilter/nf_conntrack_expect.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include @@ -73,15 +74,17 @@ static void nf_ct_expectation_timed_out(unsigned long ul_expect) nf_ct_expect_put(exp); } -static unsigned int nf_ct_expect_dst_hash(const struct nf_conntrack_tuple *tuple) +static unsigned int nf_ct_expect_dst_hash(const struct net *n, const struct nf_conntrack_tuple *tuple) { - unsigned int hash; + unsigned int hash, seed; get_random_once(&nf_ct_expect_hashrnd, sizeof(nf_ct_expect_hashrnd)); + seed = nf_ct_expect_hashrnd ^ net_hash_mix(n); + hash = jhash2(tuple->dst.u3.all, ARRAY_SIZE(tuple->dst.u3.all), (((tuple->dst.protonum ^ tuple->src.l3num) << 16) | - (__force __u16)tuple->dst.u.all) ^ nf_ct_expect_hashrnd); + (__force __u16)tuple->dst.u.all) ^ seed); return reciprocal_scale(hash, nf_ct_expect_hsize); } @@ -108,7 +111,7 @@ __nf_ct_expect_find(struct net *net, if (!net->ct.expect_count) return NULL; - h = nf_ct_expect_dst_hash(tuple); + h = nf_ct_expect_dst_hash(net, tuple); hlist_for_each_entry_rcu(i, &net->ct.expect_hash[h], hnode) { if (nf_ct_exp_equal(tuple, i, zone, net)) return i; @@ -148,7 +151,7 @@ nf_ct_find_expectation(struct net *net, if (!net->ct.expect_count) return NULL; - h = nf_ct_expect_dst_hash(tuple); + h = nf_ct_expect_dst_hash(net, tuple); hlist_for_each_entry(i, &net->ct.expect_hash[h], hnode) { if (!(i->flags & NF_CT_EXPECT_INACTIVE) && nf_ct_exp_equal(tuple, i, zone, net)) { @@ -352,7 +355,7 @@ static int nf_ct_expect_insert(struct nf_conntrack_expect *exp) struct nf_conn_help *master_help = nfct_help(exp->master); struct nf_conntrack_helper *helper; struct net *net = nf_ct_exp_net(exp); - unsigned int h = nf_ct_expect_dst_hash(&exp->tuple); + unsigned int h = nf_ct_expect_dst_hash(net, &exp->tuple); /* two references : one for hash insert, one for the timer */ atomic_add(2, &exp->use); @@ -411,7 +414,7 @@ static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect) ret = -ESHUTDOWN; goto out; } - h = nf_ct_expect_dst_hash(&expect->tuple); + h = nf_ct_expect_dst_hash(net, &expect->tuple); hlist_for_each_entry_safe(i, next, &net->ct.expect_hash[h], hnode) { if (expect_matches(i, expect)) { if (del_timer(&i->timeout)) { From 0a93aaedc46af2c5feecfb1066d98bfb491ec0b8 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Fri, 6 May 2016 00:51:49 +0200 Subject: [PATCH 1383/1649] netfilter: conntrack: use a single expectation table for all namespaces We already include netns address in the hash and compare the netns pointers during lookup, so even if namespaces have overlapping addresses entries will be spread across the expectation table. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- include/net/netfilter/nf_conntrack_expect.h | 1 + include/net/netns/conntrack.h | 1 - .../nf_conntrack_l3proto_ipv4_compat.c | 6 +-- net/netfilter/nf_conntrack_expect.c | 42 ++++++++----------- net/netfilter/nf_conntrack_helper.c | 2 +- net/netfilter/nf_conntrack_netlink.c | 6 +-- 6 files changed, 25 insertions(+), 33 deletions(-) diff --git a/include/net/netfilter/nf_conntrack_expect.h b/include/net/netfilter/nf_conntrack_expect.h index dce56f09ac9a..5ed33ea4718e 100644 --- a/include/net/netfilter/nf_conntrack_expect.h +++ b/include/net/netfilter/nf_conntrack_expect.h @@ -10,6 +10,7 @@ extern unsigned int nf_ct_expect_hsize; extern unsigned int nf_ct_expect_max; +extern struct hlist_head *nf_ct_expect_hash; struct nf_conntrack_expect { /* Conntrack expectation list member */ diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h index 251c435ee330..2811ddcc1a3d 100644 --- a/include/net/netns/conntrack.h +++ b/include/net/netns/conntrack.h @@ -94,7 +94,6 @@ struct netns_ct { int sysctl_checksum; struct kmem_cache *nf_conntrack_cachep; - struct hlist_head *expect_hash; struct ct_pcpu __percpu *pcpu_lists; struct ip_conntrack_stat __percpu *stat; struct nf_ct_event_notifier __rcu *nf_conntrack_event_cb; diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c index 2b4c729fcf8d..c6f3c406f707 100644 --- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c +++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c @@ -236,13 +236,12 @@ struct ct_expect_iter_state { static struct hlist_node *ct_expect_get_first(struct seq_file *seq) { - struct net *net = seq_file_net(seq); struct ct_expect_iter_state *st = seq->private; struct hlist_node *n; for (st->bucket = 0; st->bucket < nf_ct_expect_hsize; st->bucket++) { n = rcu_dereference( - hlist_first_rcu(&net->ct.expect_hash[st->bucket])); + hlist_first_rcu(&nf_ct_expect_hash[st->bucket])); if (n) return n; } @@ -252,7 +251,6 @@ static struct hlist_node *ct_expect_get_first(struct seq_file *seq) static struct hlist_node *ct_expect_get_next(struct seq_file *seq, struct hlist_node *head) { - struct net *net = seq_file_net(seq); struct ct_expect_iter_state *st = seq->private; head = rcu_dereference(hlist_next_rcu(head)); @@ -260,7 +258,7 @@ static struct hlist_node *ct_expect_get_next(struct seq_file *seq, if (++st->bucket >= nf_ct_expect_hsize) return NULL; head = rcu_dereference( - hlist_first_rcu(&net->ct.expect_hash[st->bucket])); + hlist_first_rcu(&nf_ct_expect_hash[st->bucket])); } return head; } diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c index 130f1be8db26..9e3693128313 100644 --- a/net/netfilter/nf_conntrack_expect.c +++ b/net/netfilter/nf_conntrack_expect.c @@ -36,6 +36,9 @@ unsigned int nf_ct_expect_hsize __read_mostly; EXPORT_SYMBOL_GPL(nf_ct_expect_hsize); +struct hlist_head *nf_ct_expect_hash __read_mostly; +EXPORT_SYMBOL_GPL(nf_ct_expect_hash); + unsigned int nf_ct_expect_max __read_mostly; static struct kmem_cache *nf_ct_expect_cachep __read_mostly; @@ -112,7 +115,7 @@ __nf_ct_expect_find(struct net *net, return NULL; h = nf_ct_expect_dst_hash(net, tuple); - hlist_for_each_entry_rcu(i, &net->ct.expect_hash[h], hnode) { + hlist_for_each_entry_rcu(i, &nf_ct_expect_hash[h], hnode) { if (nf_ct_exp_equal(tuple, i, zone, net)) return i; } @@ -152,7 +155,7 @@ nf_ct_find_expectation(struct net *net, return NULL; h = nf_ct_expect_dst_hash(net, tuple); - hlist_for_each_entry(i, &net->ct.expect_hash[h], hnode) { + hlist_for_each_entry(i, &nf_ct_expect_hash[h], hnode) { if (!(i->flags & NF_CT_EXPECT_INACTIVE) && nf_ct_exp_equal(tuple, i, zone, net)) { exp = i; @@ -363,7 +366,7 @@ static int nf_ct_expect_insert(struct nf_conntrack_expect *exp) hlist_add_head(&exp->lnode, &master_help->expectations); master_help->expecting[exp->class]++; - hlist_add_head_rcu(&exp->hnode, &net->ct.expect_hash[h]); + hlist_add_head_rcu(&exp->hnode, &nf_ct_expect_hash[h]); net->ct.expect_count++; setup_timer(&exp->timeout, nf_ct_expectation_timed_out, @@ -415,7 +418,7 @@ static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect) goto out; } h = nf_ct_expect_dst_hash(net, &expect->tuple); - hlist_for_each_entry_safe(i, next, &net->ct.expect_hash[h], hnode) { + hlist_for_each_entry_safe(i, next, &nf_ct_expect_hash[h], hnode) { if (expect_matches(i, expect)) { if (del_timer(&i->timeout)) { nf_ct_unlink_expect(i); @@ -481,12 +484,11 @@ struct ct_expect_iter_state { static struct hlist_node *ct_expect_get_first(struct seq_file *seq) { - struct net *net = seq_file_net(seq); struct ct_expect_iter_state *st = seq->private; struct hlist_node *n; for (st->bucket = 0; st->bucket < nf_ct_expect_hsize; st->bucket++) { - n = rcu_dereference(hlist_first_rcu(&net->ct.expect_hash[st->bucket])); + n = rcu_dereference(hlist_first_rcu(&nf_ct_expect_hash[st->bucket])); if (n) return n; } @@ -496,14 +498,13 @@ static struct hlist_node *ct_expect_get_first(struct seq_file *seq) static struct hlist_node *ct_expect_get_next(struct seq_file *seq, struct hlist_node *head) { - struct net *net = seq_file_net(seq); struct ct_expect_iter_state *st = seq->private; head = rcu_dereference(hlist_next_rcu(head)); while (head == NULL) { if (++st->bucket >= nf_ct_expect_hsize) return NULL; - head = rcu_dereference(hlist_first_rcu(&net->ct.expect_hash[st->bucket])); + head = rcu_dereference(hlist_first_rcu(&nf_ct_expect_hash[st->bucket])); } return head; } @@ -636,28 +637,13 @@ module_param_named(expect_hashsize, nf_ct_expect_hsize, uint, 0400); int nf_conntrack_expect_pernet_init(struct net *net) { - int err = -ENOMEM; - net->ct.expect_count = 0; - net->ct.expect_hash = nf_ct_alloc_hashtable(&nf_ct_expect_hsize, 0); - if (net->ct.expect_hash == NULL) - goto err1; - - err = exp_proc_init(net); - if (err < 0) - goto err2; - - return 0; -err2: - nf_ct_free_hashtable(net->ct.expect_hash, nf_ct_expect_hsize); -err1: - return err; + return exp_proc_init(net); } void nf_conntrack_expect_pernet_fini(struct net *net) { exp_proc_remove(net); - nf_ct_free_hashtable(net->ct.expect_hash, nf_ct_expect_hsize); } int nf_conntrack_expect_init(void) @@ -673,6 +659,13 @@ int nf_conntrack_expect_init(void) 0, 0, NULL); if (!nf_ct_expect_cachep) return -ENOMEM; + + nf_ct_expect_hash = nf_ct_alloc_hashtable(&nf_ct_expect_hsize, 0); + if (!nf_ct_expect_hash) { + kmem_cache_destroy(nf_ct_expect_cachep); + return -ENOMEM; + } + return 0; } @@ -680,4 +673,5 @@ void nf_conntrack_expect_fini(void) { rcu_barrier(); /* Wait for call_rcu() before destroy */ kmem_cache_destroy(nf_ct_expect_cachep); + nf_ct_free_hashtable(nf_ct_expect_hash, nf_ct_expect_hsize); } diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c index cb48e6adba2c..f703adb7e5f7 100644 --- a/net/netfilter/nf_conntrack_helper.c +++ b/net/netfilter/nf_conntrack_helper.c @@ -400,7 +400,7 @@ static void __nf_conntrack_helper_unregister(struct nf_conntrack_helper *me, spin_lock_bh(&nf_conntrack_expect_lock); for (i = 0; i < nf_ct_expect_hsize; i++) { hlist_for_each_entry_safe(exp, next, - &net->ct.expect_hash[i], hnode) { + &nf_ct_expect_hash[i], hnode) { struct nf_conn_help *help = nfct_help(exp->master); if ((rcu_dereference_protected( help->helper, diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index 5dfb84d86143..a18d1ceabad5 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -2632,7 +2632,7 @@ ctnetlink_exp_dump_table(struct sk_buff *skb, struct netlink_callback *cb) last = (struct nf_conntrack_expect *)cb->args[1]; for (; cb->args[0] < nf_ct_expect_hsize; cb->args[0]++) { restart: - hlist_for_each_entry(exp, &net->ct.expect_hash[cb->args[0]], + hlist_for_each_entry(exp, &nf_ct_expect_hash[cb->args[0]], hnode) { if (l3proto && exp->tuple.src.l3num != l3proto) continue; @@ -2890,7 +2890,7 @@ static int ctnetlink_del_expect(struct net *net, struct sock *ctnl, spin_lock_bh(&nf_conntrack_expect_lock); for (i = 0; i < nf_ct_expect_hsize; i++) { hlist_for_each_entry_safe(exp, next, - &net->ct.expect_hash[i], + &nf_ct_expect_hash[i], hnode) { if (!net_eq(nf_ct_exp_net(exp), net)) @@ -2912,7 +2912,7 @@ static int ctnetlink_del_expect(struct net *net, struct sock *ctnl, spin_lock_bh(&nf_conntrack_expect_lock); for (i = 0; i < nf_ct_expect_hsize; i++) { hlist_for_each_entry_safe(exp, next, - &net->ct.expect_hash[i], + &nf_ct_expect_hash[i], hnode) { if (!net_eq(nf_ct_exp_net(exp), net)) From f37bd0cced37189fc5dd34a85c90710700fd38d5 Mon Sep 17 00:00:00 2001 From: Jon Maxwell Date: Thu, 5 May 2016 09:55:51 +1000 Subject: [PATCH 1384/1649] cnic: call cp->stop_hw() in cnic_start_hw() on allocation failure MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We recently had a system crash in the cnic module. Vmcore analysis confirmed that "ip link up" was executed which failed due to an allocation failure because of memory fragmentation. Futher analysis revealed that the cnic irq vector was still allocated after the "ip link up" that failed. When "ip link down" was executed it called free_msi_irqs() which crashed the system because the cnic irq was still inuse. PANIC: "kernel BUG at drivers/pci/msi.c:411!" The code execution was: cnic_netdev_event() if (event == NETDEV_UP) { . . ▹ if (!cnic_start_hw(dev)) cnic_start_hw() calls cnic_cm_open() which failed with -ENOMEM cnic_start_hw() then took the err1 path: err1:↩ cp->free_resc(dev);↩ <---- frees resources but not irq vector pci_dev_put(dev->pcidev);↩ return err;↩ }↩ This returns control back to cnic_netdev_event() but now the cnic irq vector is still allocated even although cnic_cm_open() failed. The next "ip link down" while trigger the crash. The cnic_start_hw() routine is not handling the allocation failure correctly. Fix this by checking whether CNIC_DRV_STATE_HANDLES_IRQ flag is set indicating that the hardware has been started in cnic_start_hw(). If it has then call cp->stop_hw() which frees the cnic irq vector and cnic resources. Otherwise just maintain the previous behaviour and free cnic resources. I reproduced this by injecting an ENOMEM error into cnic_cm_alloc_mem()s return code. # ip link set dev enpX down # ip link set dev enpX up <--- hit's allocation failure # ip link set dev enpX down <--- crashes here With this patch I confirmed there was no crash in the reproducer. Signed-off-by: Jon Maxwell Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/cnic.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c index b69dc58faeab..b1d2ac818710 100644 --- a/drivers/net/ethernet/broadcom/cnic.c +++ b/drivers/net/ethernet/broadcom/cnic.c @@ -5350,7 +5350,10 @@ static int cnic_start_hw(struct cnic_dev *dev) return 0; err1: - cp->free_resc(dev); + if (ethdev->drv_state & CNIC_DRV_STATE_HANDLES_IRQ) + cp->stop_hw(dev); + else + cp->free_resc(dev); pci_dev_put(dev->pcidev); return err; } From b3b4663c973bf11ef19243fa4f1a544cbdc2fa8e Mon Sep 17 00:00:00 2001 From: David Ahern Date: Wed, 4 May 2016 21:46:12 -0700 Subject: [PATCH 1385/1649] net: vrf: Create FIB tables on link create Tables have to exist for VRFs to function. Ensure they exist when VRF device is created. Signed-off-by: David Ahern Signed-off-by: David S. Miller --- drivers/net/vrf.c | 13 +++++++++++-- net/ipv4/fib_frontend.c | 1 + net/ipv6/ip6_fib.c | 1 + 3 files changed, 13 insertions(+), 2 deletions(-) diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index 8a8f1e58b415..4b2461ae5d3b 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -364,17 +364,23 @@ static int vrf_rt6_create(struct net_device *dev) { struct net_vrf *vrf = netdev_priv(dev); struct net *net = dev_net(dev); + struct fib6_table *rt6i_table; struct rt6_info *rt6; int rc = -ENOMEM; + rt6i_table = fib6_new_table(net, vrf->tb_id); + if (!rt6i_table) + goto out; + rt6 = ip6_dst_alloc(net, dev, DST_HOST | DST_NOPOLICY | DST_NOXFRM | DST_NOCACHE); if (!rt6) goto out; - rt6->dst.output = vrf_output6; - rt6->rt6i_table = fib6_get_table(net, vrf->tb_id); dst_hold(&rt6->dst); + + rt6->rt6i_table = rt6i_table; + rt6->dst.output = vrf_output6; vrf->rt6 = rt6; rc = 0; out: @@ -462,6 +468,9 @@ static struct rtable *vrf_rtable_create(struct net_device *dev) struct net_vrf *vrf = netdev_priv(dev); struct rtable *rth; + if (!fib_new_table(dev_net(dev), vrf->tb_id)) + return NULL; + rth = rt_dst_alloc(dev, 0, RTN_UNICAST, 1, 1, 0); if (rth) { rth->dst.output = vrf_output; diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index 63566ec54794..ef2ebeb89d0f 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c @@ -110,6 +110,7 @@ struct fib_table *fib_new_table(struct net *net, u32 id) hlist_add_head_rcu(&tb->tb_hlist, &net->ipv4.fib_table_hash[h]); return tb; } +EXPORT_SYMBOL_GPL(fib_new_table); /* caller must hold either rtnl or rcu read lock */ struct fib_table *fib_get_table(struct net *net, u32 id) diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index ea071fad67a0..1bcef2369d64 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c @@ -240,6 +240,7 @@ struct fib6_table *fib6_new_table(struct net *net, u32 id) return tb; } +EXPORT_SYMBOL_GPL(fib6_new_table); struct fib6_table *fib6_get_table(struct net *net, u32 id) { From a0af53b511423cca93900066512379e21586d7dd Mon Sep 17 00:00:00 2001 From: Tedd Ho-Jeong An Date: Fri, 6 May 2016 11:53:46 -0700 Subject: [PATCH 1386/1649] Bluetooth: Add support for Intel Bluetooth device 8265 [8087:0a2b] This patch adds support for Intel Bluetooth device 8265 also known as Windstorm Peak (WsP). T: Bus=01 Lev=01 Prnt=01 Port=01 Cnt=02 Dev#= 6 Spd=12 MxCh= 0 D: Ver= 2.00 Cls=e0(wlcon) Sub=01 Prot=01 MxPS=64 #Cfgs= 1 P: Vendor=8087 ProdID=0a2b Rev= 0.10 C:* #Ifs= 2 Cfg#= 1 Atr=e0 MxPwr=100mA I:* If#= 0 Alt= 0 #EPs= 3 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb E: Ad=81(I) Atr=03(Int.) MxPS= 64 Ivl=1ms E: Ad=02(O) Atr=02(Bulk) MxPS= 64 Ivl=0ms E: Ad=82(I) Atr=02(Bulk) MxPS= 64 Ivl=0ms I:* If#= 1 Alt= 0 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb E: Ad=03(O) Atr=01(Isoc) MxPS= 0 Ivl=1ms E: Ad=83(I) Atr=01(Isoc) MxPS= 0 Ivl=1ms I: If#= 1 Alt= 1 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb E: Ad=03(O) Atr=01(Isoc) MxPS= 9 Ivl=1ms E: Ad=83(I) Atr=01(Isoc) MxPS= 9 Ivl=1ms I: If#= 1 Alt= 2 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb E: Ad=03(O) Atr=01(Isoc) MxPS= 17 Ivl=1ms E: Ad=83(I) Atr=01(Isoc) MxPS= 17 Ivl=1ms I: If#= 1 Alt= 3 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb E: Ad=03(O) Atr=01(Isoc) MxPS= 25 Ivl=1ms E: Ad=83(I) Atr=01(Isoc) MxPS= 25 Ivl=1ms I: If#= 1 Alt= 4 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb E: Ad=03(O) Atr=01(Isoc) MxPS= 33 Ivl=1ms E: Ad=83(I) Atr=01(Isoc) MxPS= 33 Ivl=1ms I: If#= 1 Alt= 5 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb E: Ad=03(O) Atr=01(Isoc) MxPS= 49 Ivl=1ms E: Ad=83(I) Atr=01(Isoc) MxPS= 49 Ivl=1ms Signed-off-by: Tedd Ho-Jeong An Signed-off-by: Marcel Holtmann --- drivers/bluetooth/btusb.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 0d4e372e426d..6aae9590511a 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -2001,12 +2001,13 @@ static int btusb_setup_intel_new(struct hci_dev *hdev) return -EINVAL; } - /* At the moment only the hardware variant iBT 3.0 (LnP/SfP) is - * supported by this firmware loading method. This check has been - * put in place to ensure correct forward compatibility options - * when newer hardware variants come along. + /* At the moment the iBT 3.0 hardware variants 0x0b (LnP/SfP) + * and 0x0c (WsP) are supported by this firmware loading method. + * + * This check has been put in place to ensure correct forward + * compatibility options when newer hardware variants come along. */ - if (ver.hw_variant != 0x0b) { + if (ver.hw_variant != 0x0b && ver.hw_variant != 0x0c) { BT_ERR("%s: Unsupported Intel hardware variant (%u)", hdev->name, ver.hw_variant); return -EINVAL; From 1a0dc1ac1d2928e25739ee82d7e04423b01da563 Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Thu, 5 May 2016 19:49:09 -0700 Subject: [PATCH 1387/1649] bpf: cleanup verifier code cleanup verifier code and prepare it for addition of "pointer to packet" logic Signed-off-by: Alexei Starovoitov Acked-by: Daniel Borkmann Signed-off-by: David S. Miller --- kernel/bpf/verifier.c | 100 ++++++++++++++++++++++-------------------- 1 file changed, 53 insertions(+), 47 deletions(-) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 63554b6d4e25..afeb62808902 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -249,28 +249,30 @@ static const char * const reg_type_str[] = { [CONST_IMM] = "imm", }; -static void print_verifier_state(struct verifier_env *env) +static void print_verifier_state(struct verifier_state *state) { + struct reg_state *reg; enum bpf_reg_type t; int i; for (i = 0; i < MAX_BPF_REG; i++) { - t = env->cur_state.regs[i].type; + reg = &state->regs[i]; + t = reg->type; if (t == NOT_INIT) continue; verbose(" R%d=%s", i, reg_type_str[t]); if (t == CONST_IMM || t == PTR_TO_STACK) - verbose("%ld", env->cur_state.regs[i].imm); + verbose("%ld", reg->imm); else if (t == CONST_PTR_TO_MAP || t == PTR_TO_MAP_VALUE || t == PTR_TO_MAP_VALUE_OR_NULL) verbose("(ks=%d,vs=%d)", - env->cur_state.regs[i].map_ptr->key_size, - env->cur_state.regs[i].map_ptr->value_size); + reg->map_ptr->key_size, + reg->map_ptr->value_size); } for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) { - if (env->cur_state.stack_slot_type[i] == STACK_SPILL) + if (state->stack_slot_type[i] == STACK_SPILL) verbose(" fp%d=%s", -MAX_BPF_STACK + i, - reg_type_str[env->cur_state.spilled_regs[i / BPF_REG_SIZE].type]); + reg_type_str[state->spilled_regs[i / BPF_REG_SIZE].type]); } verbose("\n"); } @@ -686,10 +688,11 @@ static int check_mem_access(struct verifier_env *env, u32 regno, int off, int value_regno) { struct verifier_state *state = &env->cur_state; + struct reg_state *reg = &state->regs[regno]; int size, err = 0; - if (state->regs[regno].type == PTR_TO_STACK) - off += state->regs[regno].imm; + if (reg->type == PTR_TO_STACK) + off += reg->imm; size = bpf_size_to_bytes(bpf_size); if (size < 0) @@ -700,7 +703,7 @@ static int check_mem_access(struct verifier_env *env, u32 regno, int off, return -EACCES; } - if (state->regs[regno].type == PTR_TO_MAP_VALUE) { + if (reg->type == PTR_TO_MAP_VALUE) { if (t == BPF_WRITE && value_regno >= 0 && is_pointer_value(env, value_regno)) { verbose("R%d leaks addr into map\n", value_regno); @@ -710,7 +713,7 @@ static int check_mem_access(struct verifier_env *env, u32 regno, int off, if (!err && t == BPF_READ && value_regno >= 0) mark_reg_unknown_value(state->regs, value_regno); - } else if (state->regs[regno].type == PTR_TO_CTX) { + } else if (reg->type == PTR_TO_CTX) { if (t == BPF_WRITE && value_regno >= 0 && is_pointer_value(env, value_regno)) { verbose("R%d leaks addr into ctx\n", value_regno); @@ -720,8 +723,7 @@ static int check_mem_access(struct verifier_env *env, u32 regno, int off, if (!err && t == BPF_READ && value_regno >= 0) mark_reg_unknown_value(state->regs, value_regno); - } else if (state->regs[regno].type == FRAME_PTR || - state->regs[regno].type == PTR_TO_STACK) { + } else if (reg->type == FRAME_PTR || reg->type == PTR_TO_STACK) { if (off >= 0 || off < -MAX_BPF_STACK) { verbose("invalid stack off=%d size=%d\n", off, size); return -EACCES; @@ -739,7 +741,7 @@ static int check_mem_access(struct verifier_env *env, u32 regno, int off, } } else { verbose("R%d invalid mem access '%s'\n", - regno, reg_type_str[state->regs[regno].type]); + regno, reg_type_str[reg->type]); return -EACCES; } return err; @@ -1104,7 +1106,7 @@ static int check_call(struct verifier_env *env, int func_id) /* check validity of 32-bit and 64-bit arithmetic operations */ static int check_alu_op(struct verifier_env *env, struct bpf_insn *insn) { - struct reg_state *regs = env->cur_state.regs; + struct reg_state *regs = env->cur_state.regs, *dst_reg; u8 opcode = BPF_OP(insn->code); int err; @@ -1193,8 +1195,6 @@ static int check_alu_op(struct verifier_env *env, struct bpf_insn *insn) } else { /* all other ALU ops: and, sub, xor, add, ... */ - bool stack_relative = false; - if (BPF_SRC(insn->code) == BPF_X) { if (insn->imm != 0 || insn->off != 0) { verbose("BPF_ALU uses reserved fields\n"); @@ -1232,11 +1232,19 @@ static int check_alu_op(struct verifier_env *env, struct bpf_insn *insn) } } + /* check dest operand */ + err = check_reg_arg(regs, insn->dst_reg, DST_OP_NO_MARK); + if (err) + return err; + + dst_reg = ®s[insn->dst_reg]; + /* pattern match 'bpf_add Rx, imm' instruction */ if (opcode == BPF_ADD && BPF_CLASS(insn->code) == BPF_ALU64 && - regs[insn->dst_reg].type == FRAME_PTR && - BPF_SRC(insn->code) == BPF_K) { - stack_relative = true; + dst_reg->type == FRAME_PTR && BPF_SRC(insn->code) == BPF_K) { + dst_reg->type = PTR_TO_STACK; + dst_reg->imm = insn->imm; + return 0; } else if (is_pointer_value(env, insn->dst_reg)) { verbose("R%d pointer arithmetic prohibited\n", insn->dst_reg); @@ -1248,15 +1256,8 @@ static int check_alu_op(struct verifier_env *env, struct bpf_insn *insn) return -EACCES; } - /* check dest operand */ - err = check_reg_arg(regs, insn->dst_reg, DST_OP); - if (err) - return err; - - if (stack_relative) { - regs[insn->dst_reg].type = PTR_TO_STACK; - regs[insn->dst_reg].imm = insn->imm; - } + /* mark dest operand */ + mark_reg_unknown_value(regs, insn->dst_reg); } return 0; @@ -1265,7 +1266,7 @@ static int check_alu_op(struct verifier_env *env, struct bpf_insn *insn) static int check_cond_jmp_op(struct verifier_env *env, struct bpf_insn *insn, int *insn_idx) { - struct reg_state *regs = env->cur_state.regs; + struct reg_state *regs = env->cur_state.regs, *dst_reg; struct verifier_state *other_branch; u8 opcode = BPF_OP(insn->code); int err; @@ -1303,11 +1304,12 @@ static int check_cond_jmp_op(struct verifier_env *env, if (err) return err; + dst_reg = ®s[insn->dst_reg]; + /* detect if R == 0 where R was initialized to zero earlier */ if (BPF_SRC(insn->code) == BPF_K && (opcode == BPF_JEQ || opcode == BPF_JNE) && - regs[insn->dst_reg].type == CONST_IMM && - regs[insn->dst_reg].imm == insn->imm) { + dst_reg->type == CONST_IMM && dst_reg->imm == insn->imm) { if (opcode == BPF_JEQ) { /* if (imm == imm) goto pc+off; * only follow the goto, ignore fall-through @@ -1329,9 +1331,8 @@ static int check_cond_jmp_op(struct verifier_env *env, /* detect if R == 0 where R is returned value from bpf_map_lookup_elem() */ if (BPF_SRC(insn->code) == BPF_K && - insn->imm == 0 && (opcode == BPF_JEQ || - opcode == BPF_JNE) && - regs[insn->dst_reg].type == PTR_TO_MAP_VALUE_OR_NULL) { + insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) && + dst_reg->type == PTR_TO_MAP_VALUE_OR_NULL) { if (opcode == BPF_JEQ) { /* next fallthrough insn can access memory via * this register @@ -1366,7 +1367,7 @@ static int check_cond_jmp_op(struct verifier_env *env, } } if (log_level) - print_verifier_state(env); + print_verifier_state(&env->cur_state); return 0; } @@ -1444,14 +1445,14 @@ static int check_ld_abs(struct verifier_env *env, struct bpf_insn *insn) int i, err; if (!may_access_skb(env->prog->type)) { - verbose("BPF_LD_ABS|IND instructions not allowed for this program type\n"); + verbose("BPF_LD_[ABS|IND] instructions not allowed for this program type\n"); return -EINVAL; } if (insn->dst_reg != BPF_REG_0 || insn->off != 0 || BPF_SIZE(insn->code) == BPF_DW || (mode == BPF_ABS && insn->src_reg != BPF_REG_0)) { - verbose("BPF_LD_ABS uses reserved fields\n"); + verbose("BPF_LD_[ABS|IND] uses reserved fields\n"); return -EINVAL; } @@ -1712,17 +1713,21 @@ err_free: */ static bool states_equal(struct verifier_state *old, struct verifier_state *cur) { + struct reg_state *rold, *rcur; int i; for (i = 0; i < MAX_BPF_REG; i++) { - if (memcmp(&old->regs[i], &cur->regs[i], - sizeof(old->regs[0])) != 0) { - if (old->regs[i].type == NOT_INIT || - (old->regs[i].type == UNKNOWN_VALUE && - cur->regs[i].type != NOT_INIT)) - continue; - return false; - } + rold = &old->regs[i]; + rcur = &cur->regs[i]; + + if (memcmp(rold, rcur, sizeof(*rold)) == 0) + continue; + + if (rold->type == NOT_INIT || + (rold->type == UNKNOWN_VALUE && rcur->type != NOT_INIT)) + continue; + + return false; } for (i = 0; i < MAX_BPF_STACK; i++) { @@ -1844,7 +1849,7 @@ static int do_check(struct verifier_env *env) if (log_level && do_print_state) { verbose("\nfrom %d to %d:", prev_insn_idx, insn_idx); - print_verifier_state(env); + print_verifier_state(&env->cur_state); do_print_state = false; } @@ -2056,6 +2061,7 @@ process_bpf_exit: insn_idx++; } + verbose("processed %d insns\n", insn_processed); return 0; } From 969bf05eb3cedd5a8d4b7c346a85c2ede87a6d6d Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Thu, 5 May 2016 19:49:10 -0700 Subject: [PATCH 1388/1649] bpf: direct packet access Extended BPF carried over two instructions from classic to access packet data: LD_ABS and LD_IND. They're highly optimized in JITs, but due to their design they have to do length check for every access. When BPF is processing 20M packets per second single LD_ABS after JIT is consuming 3% cpu. Hence the need to optimize it further by amortizing the cost of 'off < skb_headlen' over multiple packet accesses. One option is to introduce two new eBPF instructions LD_ABS_DW and LD_IND_DW with similar usage as skb_header_pointer(). The kernel part for interpreter and x64 JIT was implemented in [1], but such new insns behave like old ld_abs and abort the program with 'return 0' if access is beyond linear data. Such hidden control flow is hard to workaround plus changing JITs and rolling out new llvm is incovenient. Therefore allow cls_bpf/act_bpf program access skb->data directly: int bpf_prog(struct __sk_buff *skb) { struct iphdr *ip; if (skb->data + sizeof(struct iphdr) + ETH_HLEN > skb->data_end) /* packet too small */ return 0; ip = skb->data + ETH_HLEN; /* access IP header fields with direct loads */ if (ip->version != 4 || ip->saddr == 0x7f000001) return 1; [...] } This solution avoids introduction of new instructions. llvm stays the same and all JITs stay the same, but verifier has to work extra hard to prove safety of the above program. For XDP the direct store instructions can be allowed as well. The skb->data is NET_IP_ALIGNED, so for common cases the verifier can check the alignment. The complex packet parsers where packet pointer is adjusted incrementally cannot be tracked for alignment, so allow byte access in such cases and misaligned access on architectures that define efficient_unaligned_access [1] https://git.kernel.org/cgit/linux/kernel/git/ast/bpf.git/?h=ld_abs_dw Signed-off-by: Alexei Starovoitov Acked-by: Daniel Borkmann Signed-off-by: David S. Miller --- include/uapi/linux/bpf.h | 2 + kernel/bpf/core.c | 5 + kernel/bpf/verifier.c | 441 ++++++++++++++++++++++++++++++++++++++- 3 files changed, 440 insertions(+), 8 deletions(-) diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index b7b0fb1292e7..406459b935a2 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -370,6 +370,8 @@ struct __sk_buff { __u32 cb[5]; __u32 hash; __u32 tc_classid; + __u32 data; + __u32 data_end; }; struct bpf_tunnel_key { diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index e4248fe79513..d781b077431f 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -794,6 +794,11 @@ void __weak bpf_int_jit_compile(struct bpf_prog *prog) { } +bool __weak bpf_helper_changes_skb_data(void *func) +{ + return false; +} + /* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call * skb_copy_bits(), so provide a weak definition of it for NET-less config. */ diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index afeb62808902..6338c61fc2a1 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -1,4 +1,5 @@ /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com + * Copyright (c) 2016 Facebook * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public @@ -136,13 +137,32 @@ enum bpf_reg_type { FRAME_PTR, /* reg == frame_pointer */ PTR_TO_STACK, /* reg == frame_pointer + imm */ CONST_IMM, /* constant integer value */ + + /* PTR_TO_PACKET represents: + * skb->data + * skb->data + imm + * skb->data + (u16) var + * skb->data + (u16) var + imm + * if (range > 0) then [ptr, ptr + range - off) is safe to access + * if (id > 0) means that some 'var' was added + * if (off > 0) menas that 'imm' was added + */ + PTR_TO_PACKET, + PTR_TO_PACKET_END, /* skb->data + headlen */ }; struct reg_state { enum bpf_reg_type type; union { - /* valid when type == CONST_IMM | PTR_TO_STACK */ - long imm; + /* valid when type == CONST_IMM | PTR_TO_STACK | UNKNOWN_VALUE */ + s64 imm; + + /* valid when type == PTR_TO_PACKET* */ + struct { + u32 id; + u16 off; + u16 range; + }; /* valid when type == CONST_PTR_TO_MAP | PTR_TO_MAP_VALUE | * PTR_TO_MAP_VALUE_OR_NULL @@ -247,6 +267,8 @@ static const char * const reg_type_str[] = { [FRAME_PTR] = "fp", [PTR_TO_STACK] = "fp", [CONST_IMM] = "imm", + [PTR_TO_PACKET] = "pkt", + [PTR_TO_PACKET_END] = "pkt_end", }; static void print_verifier_state(struct verifier_state *state) @@ -262,7 +284,12 @@ static void print_verifier_state(struct verifier_state *state) continue; verbose(" R%d=%s", i, reg_type_str[t]); if (t == CONST_IMM || t == PTR_TO_STACK) - verbose("%ld", reg->imm); + verbose("%lld", reg->imm); + else if (t == PTR_TO_PACKET) + verbose("(id=%d,off=%d,r=%d)", + reg->id, reg->off, reg->range); + else if (t == UNKNOWN_VALUE && reg->imm) + verbose("%lld", reg->imm); else if (t == CONST_PTR_TO_MAP || t == PTR_TO_MAP_VALUE || t == PTR_TO_MAP_VALUE_OR_NULL) verbose("(ks=%d,vs=%d)", @@ -548,6 +575,8 @@ static bool is_spillable_regtype(enum bpf_reg_type type) case PTR_TO_MAP_VALUE_OR_NULL: case PTR_TO_STACK: case PTR_TO_CTX: + case PTR_TO_PACKET: + case PTR_TO_PACKET_END: case FRAME_PTR: case CONST_PTR_TO_MAP: return true; @@ -647,6 +676,27 @@ static int check_map_access(struct verifier_env *env, u32 regno, int off, return 0; } +#define MAX_PACKET_OFF 0xffff + +static int check_packet_access(struct verifier_env *env, u32 regno, int off, + int size) +{ + struct reg_state *regs = env->cur_state.regs; + struct reg_state *reg = ®s[regno]; + int linear_size = (int) reg->range - (int) reg->off; + + if (linear_size < 0 || linear_size >= MAX_PACKET_OFF) { + verbose("verifier bug\n"); + return -EFAULT; + } + if (off < 0 || off + size > linear_size) { + verbose("invalid access to packet, off=%d size=%d, allowed=%d\n", + off, size, linear_size); + return -EACCES; + } + return 0; +} + /* check access to 'struct bpf_context' fields */ static int check_ctx_access(struct verifier_env *env, int off, int size, enum bpf_access_type t) @@ -677,6 +727,45 @@ static bool is_pointer_value(struct verifier_env *env, int regno) } } +static int check_ptr_alignment(struct verifier_env *env, struct reg_state *reg, + int off, int size) +{ + if (reg->type != PTR_TO_PACKET) { + if (off % size != 0) { + verbose("misaligned access off %d size %d\n", off, size); + return -EACCES; + } else { + return 0; + } + } + + switch (env->prog->type) { + case BPF_PROG_TYPE_SCHED_CLS: + case BPF_PROG_TYPE_SCHED_ACT: + break; + default: + verbose("verifier is misconfigured\n"); + return -EACCES; + } + + if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) + /* misaligned access to packet is ok on x86,arm,arm64 */ + return 0; + + if (reg->id && size != 1) { + verbose("Unknown packet alignment. Only byte-sized access allowed\n"); + return -EACCES; + } + + /* skb->data is NET_IP_ALIGN-ed */ + if ((NET_IP_ALIGN + reg->off + off) % size != 0) { + verbose("misaligned packet access off %d+%d+%d size %d\n", + NET_IP_ALIGN, reg->off, off, size); + return -EACCES; + } + return 0; +} + /* check whether memory at (regno + off) is accessible for t = (read | write) * if t==write, value_regno is a register which value is stored into memory * if t==read, value_regno is a register which will receive the value from memory @@ -698,10 +787,9 @@ static int check_mem_access(struct verifier_env *env, u32 regno, int off, if (size < 0) return size; - if (off % size != 0) { - verbose("misaligned access off %d size %d\n", off, size); - return -EACCES; - } + err = check_ptr_alignment(env, reg, off, size); + if (err) + return err; if (reg->type == PTR_TO_MAP_VALUE) { if (t == BPF_WRITE && value_regno >= 0 && @@ -720,8 +808,16 @@ static int check_mem_access(struct verifier_env *env, u32 regno, int off, return -EACCES; } err = check_ctx_access(env, off, size, t); - if (!err && t == BPF_READ && value_regno >= 0) + if (!err && t == BPF_READ && value_regno >= 0) { mark_reg_unknown_value(state->regs, value_regno); + if (off == offsetof(struct __sk_buff, data) && + env->allow_ptr_leaks) + /* note that reg.[id|off|range] == 0 */ + state->regs[value_regno].type = PTR_TO_PACKET; + else if (off == offsetof(struct __sk_buff, data_end) && + env->allow_ptr_leaks) + state->regs[value_regno].type = PTR_TO_PACKET_END; + } } else if (reg->type == FRAME_PTR || reg->type == PTR_TO_STACK) { if (off >= 0 || off < -MAX_BPF_STACK) { @@ -739,11 +835,28 @@ static int check_mem_access(struct verifier_env *env, u32 regno, int off, } else { err = check_stack_read(state, off, size, value_regno); } + } else if (state->regs[regno].type == PTR_TO_PACKET) { + if (t == BPF_WRITE) { + verbose("cannot write into packet\n"); + return -EACCES; + } + err = check_packet_access(env, regno, off, size); + if (!err && t == BPF_READ && value_regno >= 0) + mark_reg_unknown_value(state->regs, value_regno); } else { verbose("R%d invalid mem access '%s'\n", regno, reg_type_str[reg->type]); return -EACCES; } + + if (!err && size <= 2 && value_regno >= 0 && env->allow_ptr_leaks && + state->regs[value_regno].type == UNKNOWN_VALUE) { + /* 1 or 2 byte load zero-extends, determine the number of + * zero upper bits. Not doing it fo 4 byte load, since + * such values cannot be added to ptr_to_packet anyway. + */ + state->regs[value_regno].imm = 64 - size * 8; + } return err; } @@ -1001,6 +1114,29 @@ static int check_raw_mode(const struct bpf_func_proto *fn) return count > 1 ? -EINVAL : 0; } +static void clear_all_pkt_pointers(struct verifier_env *env) +{ + struct verifier_state *state = &env->cur_state; + struct reg_state *regs = state->regs, *reg; + int i; + + for (i = 0; i < MAX_BPF_REG; i++) + if (regs[i].type == PTR_TO_PACKET || + regs[i].type == PTR_TO_PACKET_END) + mark_reg_unknown_value(regs, i); + + for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) { + if (state->stack_slot_type[i] != STACK_SPILL) + continue; + reg = &state->spilled_regs[i / BPF_REG_SIZE]; + if (reg->type != PTR_TO_PACKET && + reg->type != PTR_TO_PACKET_END) + continue; + reg->type = UNKNOWN_VALUE; + reg->imm = 0; + } +} + static int check_call(struct verifier_env *env, int func_id) { struct verifier_state *state = &env->cur_state; @@ -1008,6 +1144,7 @@ static int check_call(struct verifier_env *env, int func_id) struct reg_state *regs = state->regs; struct reg_state *reg; struct bpf_call_arg_meta meta; + bool changes_data; int i, err; /* find function prototype */ @@ -1030,6 +1167,8 @@ static int check_call(struct verifier_env *env, int func_id) return -EINVAL; } + changes_data = bpf_helper_changes_skb_data(fn->func); + memset(&meta, 0, sizeof(meta)); /* We only support one arg being in raw mode at the moment, which @@ -1100,6 +1239,189 @@ static int check_call(struct verifier_env *env, int func_id) if (err) return err; + if (changes_data) + clear_all_pkt_pointers(env); + return 0; +} + +static int check_packet_ptr_add(struct verifier_env *env, struct bpf_insn *insn) +{ + struct reg_state *regs = env->cur_state.regs; + struct reg_state *dst_reg = ®s[insn->dst_reg]; + struct reg_state *src_reg = ®s[insn->src_reg]; + s32 imm; + + if (BPF_SRC(insn->code) == BPF_K) { + /* pkt_ptr += imm */ + imm = insn->imm; + +add_imm: + if (imm <= 0) { + verbose("addition of negative constant to packet pointer is not allowed\n"); + return -EACCES; + } + if (imm >= MAX_PACKET_OFF || + imm + dst_reg->off >= MAX_PACKET_OFF) { + verbose("constant %d is too large to add to packet pointer\n", + imm); + return -EACCES; + } + /* a constant was added to pkt_ptr. + * Remember it while keeping the same 'id' + */ + dst_reg->off += imm; + } else { + if (src_reg->type == CONST_IMM) { + /* pkt_ptr += reg where reg is known constant */ + imm = src_reg->imm; + goto add_imm; + } + /* disallow pkt_ptr += reg + * if reg is not uknown_value with guaranteed zero upper bits + * otherwise pkt_ptr may overflow and addition will become + * subtraction which is not allowed + */ + if (src_reg->type != UNKNOWN_VALUE) { + verbose("cannot add '%s' to ptr_to_packet\n", + reg_type_str[src_reg->type]); + return -EACCES; + } + if (src_reg->imm < 48) { + verbose("cannot add integer value with %lld upper zero bits to ptr_to_packet\n", + src_reg->imm); + return -EACCES; + } + /* dst_reg stays as pkt_ptr type and since some positive + * integer value was added to the pointer, increment its 'id' + */ + dst_reg->id++; + + /* something was added to pkt_ptr, set range and off to zero */ + dst_reg->off = 0; + dst_reg->range = 0; + } + return 0; +} + +static int evaluate_reg_alu(struct verifier_env *env, struct bpf_insn *insn) +{ + struct reg_state *regs = env->cur_state.regs; + struct reg_state *dst_reg = ®s[insn->dst_reg]; + u8 opcode = BPF_OP(insn->code); + s64 imm_log2; + + /* for type == UNKNOWN_VALUE: + * imm > 0 -> number of zero upper bits + * imm == 0 -> don't track which is the same as all bits can be non-zero + */ + + if (BPF_SRC(insn->code) == BPF_X) { + struct reg_state *src_reg = ®s[insn->src_reg]; + + if (src_reg->type == UNKNOWN_VALUE && src_reg->imm > 0 && + dst_reg->imm && opcode == BPF_ADD) { + /* dreg += sreg + * where both have zero upper bits. Adding them + * can only result making one more bit non-zero + * in the larger value. + * Ex. 0xffff (imm=48) + 1 (imm=63) = 0x10000 (imm=47) + * 0xffff (imm=48) + 0xffff = 0x1fffe (imm=47) + */ + dst_reg->imm = min(dst_reg->imm, src_reg->imm); + dst_reg->imm--; + return 0; + } + if (src_reg->type == CONST_IMM && src_reg->imm > 0 && + dst_reg->imm && opcode == BPF_ADD) { + /* dreg += sreg + * where dreg has zero upper bits and sreg is const. + * Adding them can only result making one more bit + * non-zero in the larger value. + */ + imm_log2 = __ilog2_u64((long long)src_reg->imm); + dst_reg->imm = min(dst_reg->imm, 63 - imm_log2); + dst_reg->imm--; + return 0; + } + /* all other cases non supported yet, just mark dst_reg */ + dst_reg->imm = 0; + return 0; + } + + /* sign extend 32-bit imm into 64-bit to make sure that + * negative values occupy bit 63. Note ilog2() would have + * been incorrect, since sizeof(insn->imm) == 4 + */ + imm_log2 = __ilog2_u64((long long)insn->imm); + + if (dst_reg->imm && opcode == BPF_LSH) { + /* reg <<= imm + * if reg was a result of 2 byte load, then its imm == 48 + * which means that upper 48 bits are zero and shifting this reg + * left by 4 would mean that upper 44 bits are still zero + */ + dst_reg->imm -= insn->imm; + } else if (dst_reg->imm && opcode == BPF_MUL) { + /* reg *= imm + * if multiplying by 14 subtract 4 + * This is conservative calculation of upper zero bits. + * It's not trying to special case insn->imm == 1 or 0 cases + */ + dst_reg->imm -= imm_log2 + 1; + } else if (opcode == BPF_AND) { + /* reg &= imm */ + dst_reg->imm = 63 - imm_log2; + } else if (dst_reg->imm && opcode == BPF_ADD) { + /* reg += imm */ + dst_reg->imm = min(dst_reg->imm, 63 - imm_log2); + dst_reg->imm--; + } else if (opcode == BPF_RSH) { + /* reg >>= imm + * which means that after right shift, upper bits will be zero + * note that verifier already checked that + * 0 <= imm < 64 for shift insn + */ + dst_reg->imm += insn->imm; + if (unlikely(dst_reg->imm > 64)) + /* some dumb code did: + * r2 = *(u32 *)mem; + * r2 >>= 32; + * and all bits are zero now */ + dst_reg->imm = 64; + } else { + /* all other alu ops, means that we don't know what will + * happen to the value, mark it with unknown number of zero bits + */ + dst_reg->imm = 0; + } + + if (dst_reg->imm < 0) { + /* all 64 bits of the register can contain non-zero bits + * and such value cannot be added to ptr_to_packet, since it + * may overflow, mark it as unknown to avoid further eval + */ + dst_reg->imm = 0; + } + return 0; +} + +static int evaluate_reg_imm_alu(struct verifier_env *env, struct bpf_insn *insn) +{ + struct reg_state *regs = env->cur_state.regs; + struct reg_state *dst_reg = ®s[insn->dst_reg]; + struct reg_state *src_reg = ®s[insn->src_reg]; + u8 opcode = BPF_OP(insn->code); + + /* dst_reg->type == CONST_IMM here, simulate execution of 'add' insn. + * Don't care about overflow or negative values, just add them + */ + if (opcode == BPF_ADD && BPF_SRC(insn->code) == BPF_K) + dst_reg->imm += insn->imm; + else if (opcode == BPF_ADD && BPF_SRC(insn->code) == BPF_X && + src_reg->type == CONST_IMM) + dst_reg->imm += src_reg->imm; + else + mark_reg_unknown_value(regs, insn->dst_reg); return 0; } @@ -1245,6 +1567,21 @@ static int check_alu_op(struct verifier_env *env, struct bpf_insn *insn) dst_reg->type = PTR_TO_STACK; dst_reg->imm = insn->imm; return 0; + } else if (opcode == BPF_ADD && + BPF_CLASS(insn->code) == BPF_ALU64 && + dst_reg->type == PTR_TO_PACKET) { + /* ptr_to_packet += K|X */ + return check_packet_ptr_add(env, insn); + } else if (BPF_CLASS(insn->code) == BPF_ALU64 && + dst_reg->type == UNKNOWN_VALUE && + env->allow_ptr_leaks) { + /* unknown += K|X */ + return evaluate_reg_alu(env, insn); + } else if (BPF_CLASS(insn->code) == BPF_ALU64 && + dst_reg->type == CONST_IMM && + env->allow_ptr_leaks) { + /* reg_imm += K|X */ + return evaluate_reg_imm_alu(env, insn); } else if (is_pointer_value(env, insn->dst_reg)) { verbose("R%d pointer arithmetic prohibited\n", insn->dst_reg); @@ -1263,6 +1600,34 @@ static int check_alu_op(struct verifier_env *env, struct bpf_insn *insn) return 0; } +static void find_good_pkt_pointers(struct verifier_env *env, + struct reg_state *dst_reg) +{ + struct verifier_state *state = &env->cur_state; + struct reg_state *regs = state->regs, *reg; + int i; + /* r2 = r3; + * r2 += 8 + * if (r2 > pkt_end) goto somewhere + * r2 == dst_reg, pkt_end == src_reg, + * r2=pkt(id=n,off=8,r=0) + * r3=pkt(id=n,off=0,r=0) + * find register r3 and mark its range as r3=pkt(id=n,off=0,r=8) + * so that range of bytes [r3, r3 + 8) is safe to access + */ + for (i = 0; i < MAX_BPF_REG; i++) + if (regs[i].type == PTR_TO_PACKET && regs[i].id == dst_reg->id) + regs[i].range = dst_reg->off; + + for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) { + if (state->stack_slot_type[i] != STACK_SPILL) + continue; + reg = &state->spilled_regs[i / BPF_REG_SIZE]; + if (reg->type == PTR_TO_PACKET && reg->id == dst_reg->id) + reg->range = dst_reg->off; + } +} + static int check_cond_jmp_op(struct verifier_env *env, struct bpf_insn *insn, int *insn_idx) { @@ -1346,6 +1711,10 @@ static int check_cond_jmp_op(struct verifier_env *env, regs[insn->dst_reg].type = CONST_IMM; regs[insn->dst_reg].imm = 0; } + } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGT && + dst_reg->type == PTR_TO_PACKET && + regs[insn->src_reg].type == PTR_TO_PACKET_END) { + find_good_pkt_pointers(env, dst_reg); } else if (is_pointer_value(env, insn->dst_reg)) { verbose("R%d pointer comparison prohibited\n", insn->dst_reg); return -EACCES; @@ -1685,6 +2054,58 @@ err_free: return ret; } +/* the following conditions reduce the number of explored insns + * from ~140k to ~80k for ultra large programs that use a lot of ptr_to_packet + */ +static bool compare_ptrs_to_packet(struct reg_state *old, struct reg_state *cur) +{ + if (old->id != cur->id) + return false; + + /* old ptr_to_packet is more conservative, since it allows smaller + * range. Ex: + * old(off=0,r=10) is equal to cur(off=0,r=20), because + * old(off=0,r=10) means that with range=10 the verifier proceeded + * further and found no issues with the program. Now we're in the same + * spot with cur(off=0,r=20), so we're safe too, since anything further + * will only be looking at most 10 bytes after this pointer. + */ + if (old->off == cur->off && old->range < cur->range) + return true; + + /* old(off=20,r=10) is equal to cur(off=22,re=22 or 5 or 0) + * since both cannot be used for packet access and safe(old) + * pointer has smaller off that could be used for further + * 'if (ptr > data_end)' check + * Ex: + * old(off=20,r=10) and cur(off=22,r=22) and cur(off=22,r=0) mean + * that we cannot access the packet. + * The safe range is: + * [ptr, ptr + range - off) + * so whenever off >=range, it means no safe bytes from this pointer. + * When comparing old->off <= cur->off, it means that older code + * went with smaller offset and that offset was later + * used to figure out the safe range after 'if (ptr > data_end)' check + * Say, 'old' state was explored like: + * ... R3(off=0, r=0) + * R4 = R3 + 20 + * ... now R4(off=20,r=0) <-- here + * if (R4 > data_end) + * ... R4(off=20,r=20), R3(off=0,r=20) and R3 can be used to access. + * ... the code further went all the way to bpf_exit. + * Now the 'cur' state at the mark 'here' has R4(off=30,r=0). + * old_R4(off=20,r=0) equal to cur_R4(off=30,r=0), since if the verifier + * goes further, such cur_R4 will give larger safe packet range after + * 'if (R4 > data_end)' and all further insn were already good with r=20, + * so they will be good with r=30 and we can prune the search. + */ + if (old->off <= cur->off && + old->off >= old->range && cur->off >= cur->range) + return true; + + return false; +} + /* compare two verifier states * * all states stored in state_list are known to be valid, since @@ -1727,6 +2148,10 @@ static bool states_equal(struct verifier_state *old, struct verifier_state *cur) (rold->type == UNKNOWN_VALUE && rcur->type != NOT_INIT)) continue; + if (rold->type == PTR_TO_PACKET && rcur->type == PTR_TO_PACKET && + compare_ptrs_to_packet(rold, rcur)) + continue; + return false; } From 735b433397ea2f97d59240cbe4ea770aa7c88eef Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Thu, 5 May 2016 19:49:11 -0700 Subject: [PATCH 1389/1649] bpf: improve verifier state equivalence since UNKNOWN_VALUE type is weaker than CONST_IMM we can un-teach verifier its recognition of constants in conditional branches without affecting safety. Ex: if (reg == 123) { .. here verifier was marking reg->type as CONST_IMM instead keep reg as UNKNOWN_VALUE } Two verifier states with UNKNOWN_VALUE are equivalent, whereas CONST_IMM_X != CONST_IMM_Y, since CONST_IMM is used for stack range verification and other cases. So help search pruning by marking registers as UNKNOWN_VALUE where possible instead of CONST_IMM. Signed-off-by: Alexei Starovoitov Acked-by: Daniel Borkmann Signed-off-by: David S. Miller --- kernel/bpf/verifier.c | 23 +++-------------------- 1 file changed, 3 insertions(+), 20 deletions(-) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 6338c61fc2a1..84bff68cf80e 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -1704,12 +1704,11 @@ static int check_cond_jmp_op(struct verifier_env *env, */ regs[insn->dst_reg].type = PTR_TO_MAP_VALUE; /* branch targer cannot access it, since reg == 0 */ - other_branch->regs[insn->dst_reg].type = CONST_IMM; - other_branch->regs[insn->dst_reg].imm = 0; + mark_reg_unknown_value(other_branch->regs, + insn->dst_reg); } else { other_branch->regs[insn->dst_reg].type = PTR_TO_MAP_VALUE; - regs[insn->dst_reg].type = CONST_IMM; - regs[insn->dst_reg].imm = 0; + mark_reg_unknown_value(regs, insn->dst_reg); } } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGT && dst_reg->type == PTR_TO_PACKET && @@ -1718,22 +1717,6 @@ static int check_cond_jmp_op(struct verifier_env *env, } else if (is_pointer_value(env, insn->dst_reg)) { verbose("R%d pointer comparison prohibited\n", insn->dst_reg); return -EACCES; - } else if (BPF_SRC(insn->code) == BPF_K && - (opcode == BPF_JEQ || opcode == BPF_JNE)) { - - if (opcode == BPF_JEQ) { - /* detect if (R == imm) goto - * and in the target state recognize that R = imm - */ - other_branch->regs[insn->dst_reg].type = CONST_IMM; - other_branch->regs[insn->dst_reg].imm = insn->imm; - } else { - /* detect if (R != imm) goto - * and in the fall-through state recognize that R = imm - */ - regs[insn->dst_reg].type = CONST_IMM; - regs[insn->dst_reg].imm = insn->imm; - } } if (log_level) print_verifier_state(&env->cur_state); From db58ba45920255e967cc1d62a430cebd634b5046 Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Thu, 5 May 2016 19:49:12 -0700 Subject: [PATCH 1390/1649] bpf: wire in data and data_end for cls_act_bpf allow cls_bpf and act_bpf programs access skb->data and skb->data_end pointers. The bpf helpers that change skb->data need to update data_end pointer as well. The verifier checks that programs always reload data, data_end pointers after calls to such bpf helpers. We cannot add 'data_end' pointer to struct qdisc_skb_cb directly, since it's embedded as-is by infiniband ipoib, so wrapper struct is needed. Signed-off-by: Alexei Starovoitov Acked-by: Daniel Borkmann Signed-off-by: David S. Miller --- include/linux/filter.h | 16 +++++++++++++ net/core/filter.c | 51 +++++++++++++++++++++++++++++++++++++----- net/sched/act_bpf.c | 2 ++ net/sched/cls_bpf.c | 2 ++ 4 files changed, 65 insertions(+), 6 deletions(-) diff --git a/include/linux/filter.h b/include/linux/filter.h index 43aa1f8855c7..ec1411c89105 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -352,6 +352,22 @@ struct sk_filter { #define BPF_SKB_CB_LEN QDISC_CB_PRIV_LEN +struct bpf_skb_data_end { + struct qdisc_skb_cb qdisc_cb; + void *data_end; +}; + +/* compute the linear packet data range [data, data_end) which + * will be accessed by cls_bpf and act_bpf programs + */ +static inline void bpf_compute_data_end(struct sk_buff *skb) +{ + struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb; + + BUILD_BUG_ON(sizeof(*cb) > FIELD_SIZEOF(struct sk_buff, cb)); + cb->data_end = skb->data + skb_headlen(skb); +} + static inline u8 *bpf_skb_cb(struct sk_buff *skb) { /* eBPF programs may read/write skb->cb[] area to transfer meta diff --git a/net/core/filter.c b/net/core/filter.c index 218e5de8c402..71c2a1f473ad 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -1344,6 +1344,21 @@ struct bpf_scratchpad { static DEFINE_PER_CPU(struct bpf_scratchpad, bpf_sp); +static inline int bpf_try_make_writable(struct sk_buff *skb, + unsigned int write_len) +{ + int err; + + if (!skb_cloned(skb)) + return 0; + if (skb_clone_writable(skb, write_len)) + return 0; + err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); + if (!err) + bpf_compute_data_end(skb); + return err; +} + static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 flags) { struct bpf_scratchpad *sp = this_cpu_ptr(&bpf_sp); @@ -1366,7 +1381,7 @@ static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 flags) */ if (unlikely((u32) offset > 0xffff || len > sizeof(sp->buff))) return -EFAULT; - if (unlikely(skb_try_make_writable(skb, offset + len))) + if (unlikely(bpf_try_make_writable(skb, offset + len))) return -EFAULT; ptr = skb_header_pointer(skb, offset, len, sp->buff); @@ -1444,7 +1459,7 @@ static u64 bpf_l3_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags) return -EINVAL; if (unlikely((u32) offset > 0xffff)) return -EFAULT; - if (unlikely(skb_try_make_writable(skb, offset + sizeof(sum)))) + if (unlikely(bpf_try_make_writable(skb, offset + sizeof(sum)))) return -EFAULT; ptr = skb_header_pointer(skb, offset, sizeof(sum), &sum); @@ -1499,7 +1514,7 @@ static u64 bpf_l4_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags) return -EINVAL; if (unlikely((u32) offset > 0xffff)) return -EFAULT; - if (unlikely(skb_try_make_writable(skb, offset + sizeof(sum)))) + if (unlikely(bpf_try_make_writable(skb, offset + sizeof(sum)))) return -EFAULT; ptr = skb_header_pointer(skb, offset, sizeof(sum), &sum); @@ -1699,12 +1714,15 @@ static u64 bpf_skb_vlan_push(u64 r1, u64 r2, u64 vlan_tci, u64 r4, u64 r5) { struct sk_buff *skb = (struct sk_buff *) (long) r1; __be16 vlan_proto = (__force __be16) r2; + int ret; if (unlikely(vlan_proto != htons(ETH_P_8021Q) && vlan_proto != htons(ETH_P_8021AD))) vlan_proto = htons(ETH_P_8021Q); - return skb_vlan_push(skb, vlan_proto, vlan_tci); + ret = skb_vlan_push(skb, vlan_proto, vlan_tci); + bpf_compute_data_end(skb); + return ret; } const struct bpf_func_proto bpf_skb_vlan_push_proto = { @@ -1720,8 +1738,11 @@ EXPORT_SYMBOL_GPL(bpf_skb_vlan_push_proto); static u64 bpf_skb_vlan_pop(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) { struct sk_buff *skb = (struct sk_buff *) (long) r1; + int ret; - return skb_vlan_pop(skb); + ret = skb_vlan_pop(skb); + bpf_compute_data_end(skb); + return ret; } const struct bpf_func_proto bpf_skb_vlan_pop_proto = { @@ -2066,8 +2087,12 @@ static bool __is_valid_access(int off, int size, enum bpf_access_type type) static bool sk_filter_is_valid_access(int off, int size, enum bpf_access_type type) { - if (off == offsetof(struct __sk_buff, tc_classid)) + switch (off) { + case offsetof(struct __sk_buff, tc_classid): + case offsetof(struct __sk_buff, data): + case offsetof(struct __sk_buff, data_end): return false; + } if (type == BPF_WRITE) { switch (off) { @@ -2215,6 +2240,20 @@ static u32 bpf_net_convert_ctx_access(enum bpf_access_type type, int dst_reg, *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg, ctx_off); break; + case offsetof(struct __sk_buff, data): + *insn++ = BPF_LDX_MEM(bytes_to_bpf_size(FIELD_SIZEOF(struct sk_buff, data)), + dst_reg, src_reg, + offsetof(struct sk_buff, data)); + break; + + case offsetof(struct __sk_buff, data_end): + ctx_off -= offsetof(struct __sk_buff, data_end); + ctx_off += offsetof(struct sk_buff, cb); + ctx_off += offsetof(struct bpf_skb_data_end, data_end); + *insn++ = BPF_LDX_MEM(bytes_to_bpf_size(sizeof(void *)), + dst_reg, src_reg, ctx_off); + break; + case offsetof(struct __sk_buff, tc_index): #ifdef CONFIG_NET_SCHED BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, tc_index) != 2); diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c index 4fd703362563..c7123e01c2ca 100644 --- a/net/sched/act_bpf.c +++ b/net/sched/act_bpf.c @@ -53,9 +53,11 @@ static int tcf_bpf(struct sk_buff *skb, const struct tc_action *act, filter = rcu_dereference(prog->filter); if (at_ingress) { __skb_push(skb, skb->mac_len); + bpf_compute_data_end(skb); filter_res = BPF_PROG_RUN(filter, skb); __skb_pull(skb, skb->mac_len); } else { + bpf_compute_data_end(skb); filter_res = BPF_PROG_RUN(filter, skb); } rcu_read_unlock(); diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c index 425fe6a0eda3..7b342c779da7 100644 --- a/net/sched/cls_bpf.c +++ b/net/sched/cls_bpf.c @@ -96,9 +96,11 @@ static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp, if (at_ingress) { /* It is safe to push/pull even if skb_shared() */ __skb_push(skb, skb->mac_len); + bpf_compute_data_end(skb); filter_res = BPF_PROG_RUN(prog->filter, skb); __skb_pull(skb, skb->mac_len); } else { + bpf_compute_data_end(skb); filter_res = BPF_PROG_RUN(prog->filter, skb); } From f9c8d19d6c7c15a59963f80ec47e68808914abd4 Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Thu, 5 May 2016 19:49:13 -0700 Subject: [PATCH 1391/1649] bpf: add documentation for 'direct packet access' explain how verifier checks safety of packet access and update email addresses. Signed-off-by: Alexei Starovoitov Acked-by: Daniel Borkmann Signed-off-by: David S. Miller --- Documentation/networking/filter.txt | 85 ++++++++++++++++++++++++++++- 1 file changed, 83 insertions(+), 2 deletions(-) diff --git a/Documentation/networking/filter.txt b/Documentation/networking/filter.txt index 96da119a47e7..6aef0b5f3bc7 100644 --- a/Documentation/networking/filter.txt +++ b/Documentation/networking/filter.txt @@ -1095,6 +1095,87 @@ all use cases. See details of eBPF verifier in kernel/bpf/verifier.c +Direct packet access +-------------------- +In cls_bpf and act_bpf programs the verifier allows direct access to the packet +data via skb->data and skb->data_end pointers. +Ex: +1: r4 = *(u32 *)(r1 +80) /* load skb->data_end */ +2: r3 = *(u32 *)(r1 +76) /* load skb->data */ +3: r5 = r3 +4: r5 += 14 +5: if r5 > r4 goto pc+16 +R1=ctx R3=pkt(id=0,off=0,r=14) R4=pkt_end R5=pkt(id=0,off=14,r=14) R10=fp +6: r0 = *(u16 *)(r3 +12) /* access 12 and 13 bytes of the packet */ + +this 2byte load from the packet is safe to do, since the program author +did check 'if (skb->data + 14 > skb->data_end) goto err' at insn #5 which +means that in the fall-through case the register R3 (which points to skb->data) +has at least 14 directly accessible bytes. The verifier marks it +as R3=pkt(id=0,off=0,r=14). +id=0 means that no additional variables were added to the register. +off=0 means that no additional constants were added. +r=14 is the range of safe access which means that bytes [R3, R3 + 14) are ok. +Note that R5 is marked as R5=pkt(id=0,off=14,r=14). It also points +to the packet data, but constant 14 was added to the register, so +it now points to 'skb->data + 14' and accessible range is [R5, R5 + 14 - 14) +which is zero bytes. + +More complex packet access may look like: + R0=imm1 R1=ctx R3=pkt(id=0,off=0,r=14) R4=pkt_end R5=pkt(id=0,off=14,r=14) R10=fp + 6: r0 = *(u8 *)(r3 +7) /* load 7th byte from the packet */ + 7: r4 = *(u8 *)(r3 +12) + 8: r4 *= 14 + 9: r3 = *(u32 *)(r1 +76) /* load skb->data */ +10: r3 += r4 +11: r2 = r1 +12: r2 <<= 48 +13: r2 >>= 48 +14: r3 += r2 +15: r2 = r3 +16: r2 += 8 +17: r1 = *(u32 *)(r1 +80) /* load skb->data_end */ +18: if r2 > r1 goto pc+2 + R0=inv56 R1=pkt_end R2=pkt(id=2,off=8,r=8) R3=pkt(id=2,off=0,r=8) R4=inv52 R5=pkt(id=0,off=14,r=14) R10=fp +19: r1 = *(u8 *)(r3 +4) +The state of the register R3 is R3=pkt(id=2,off=0,r=8) +id=2 means that two 'r3 += rX' instructions were seen, so r3 points to some +offset within a packet and since the program author did +'if (r3 + 8 > r1) goto err' at insn #18, the safe range is [R3, R3 + 8). +The verifier only allows 'add' operation on packet registers. Any other +operation will set the register state to 'unknown_value' and it won't be +available for direct packet access. +Operation 'r3 += rX' may overflow and become less than original skb->data, +therefore the verifier has to prevent that. So it tracks the number of +upper zero bits in all 'uknown_value' registers, so when it sees +'r3 += rX' instruction and rX is more than 16-bit value, it will error as: +"cannot add integer value with N upper zero bits to ptr_to_packet" +Ex. after insn 'r4 = *(u8 *)(r3 +12)' (insn #7 above) the state of r4 is +R4=inv56 which means that upper 56 bits on the register are guaranteed +to be zero. After insn 'r4 *= 14' the state becomes R4=inv52, since +multiplying 8-bit value by constant 14 will keep upper 52 bits as zero. +Similarly 'r2 >>= 48' will make R2=inv48, since the shift is not sign +extending. This logic is implemented in evaluate_reg_alu() function. + +The end result is that bpf program author can access packet directly +using normal C code as: + void *data = (void *)(long)skb->data; + void *data_end = (void *)(long)skb->data_end; + struct eth_hdr *eth = data; + struct iphdr *iph = data + sizeof(*eth); + struct udphdr *udp = data + sizeof(*eth) + sizeof(*iph); + + if (data + sizeof(*eth) + sizeof(*iph) + sizeof(*udp) > data_end) + return 0; + if (eth->h_proto != htons(ETH_P_IP)) + return 0; + if (iph->protocol != IPPROTO_UDP || iph->ihl != 5) + return 0; + if (udp->dest == 53 || udp->source == 9) + ...; +which makes such programs easier to write comparing to LD_ABS insn +and significantly faster. + eBPF maps --------- 'maps' is a generic storage of different types for sharing data between kernel @@ -1293,5 +1374,5 @@ to give potential BPF hackers or security auditors a better overview of the underlying architecture. Jay Schulist -Daniel Borkmann -Alexei Starovoitov +Daniel Borkmann +Alexei Starovoitov From 65d472fb007dd73ef28f70078f43f86bb6cc67d0 Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Thu, 5 May 2016 19:49:14 -0700 Subject: [PATCH 1392/1649] samples/bpf: add 'pointer to packet' tests parse_simple.c - packet parser exapmle with single length check that filters out udp packets for port 9 parse_varlen.c - variable length parser that understand multiple vlan headers, ipip, ipip6 and ip options to filter out udp or tcp packets on port 9. The packet is parsed layer by layer with multitple length checks. parse_ldabs.c - classic style of packet parsing using LD_ABS instruction. Same functionality as parse_simple. simple = 24.1Mpps per core varlen = 22.7Mpps ldabs = 21.4Mpps Parser with LD_ABS instructions is slower than full direct access parser which does more packet accesses and checks. These examples demonstrate the choice bpf program authors can make between flexibility of the parser vs speed. Signed-off-by: Alexei Starovoitov Signed-off-by: David S. Miller --- samples/bpf/Makefile | 2 + samples/bpf/parse_ldabs.c | 41 ++++++++++ samples/bpf/parse_simple.c | 48 +++++++++++ samples/bpf/parse_varlen.c | 153 ++++++++++++++++++++++++++++++++++++ samples/bpf/test_cls_bpf.sh | 37 +++++++++ 5 files changed, 281 insertions(+) create mode 100644 samples/bpf/parse_ldabs.c create mode 100644 samples/bpf/parse_simple.c create mode 100644 samples/bpf/parse_varlen.c create mode 100755 samples/bpf/test_cls_bpf.sh diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile index 66897e61232c..0bf2478cb7df 100644 --- a/samples/bpf/Makefile +++ b/samples/bpf/Makefile @@ -60,6 +60,7 @@ always += spintest_kern.o always += map_perf_test_kern.o always += test_overhead_tp_kern.o always += test_overhead_kprobe_kern.o +always += parse_varlen.o parse_simple.o parse_ldabs.o HOSTCFLAGS += -I$(objtree)/usr/include @@ -120,4 +121,5 @@ $(src)/*.c: verify_target_bpf $(obj)/%.o: $(src)/%.c $(CLANG) $(NOSTDINC_FLAGS) $(LINUXINCLUDE) $(EXTRA_CFLAGS) \ -D__KERNEL__ -D__ASM_SYSREG_H -Wno-unused-value -Wno-pointer-sign \ + -Wno-compare-distinct-pointer-types \ -O2 -emit-llvm -c $< -o -| $(LLC) -march=bpf -filetype=obj -o $@ diff --git a/samples/bpf/parse_ldabs.c b/samples/bpf/parse_ldabs.c new file mode 100644 index 000000000000..d17550198d06 --- /dev/null +++ b/samples/bpf/parse_ldabs.c @@ -0,0 +1,41 @@ +/* Copyright (c) 2016 Facebook + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + */ +#include +#include +#include +#include +#include +#include +#include "bpf_helpers.h" + +#define DEFAULT_PKTGEN_UDP_PORT 9 +#define IP_MF 0x2000 +#define IP_OFFSET 0x1FFF + +static inline int ip_is_fragment(struct __sk_buff *ctx, __u64 nhoff) +{ + return load_half(ctx, nhoff + offsetof(struct iphdr, frag_off)) + & (IP_MF | IP_OFFSET); +} + +SEC("ldabs") +int handle_ingress(struct __sk_buff *skb) +{ + __u64 troff = ETH_HLEN + sizeof(struct iphdr); + + if (load_half(skb, offsetof(struct ethhdr, h_proto)) != ETH_P_IP) + return 0; + if (load_byte(skb, ETH_HLEN + offsetof(struct iphdr, protocol)) != IPPROTO_UDP || + load_byte(skb, ETH_HLEN) != 0x45) + return 0; + if (ip_is_fragment(skb, ETH_HLEN)) + return 0; + if (load_half(skb, troff + offsetof(struct udphdr, dest)) == DEFAULT_PKTGEN_UDP_PORT) + return TC_ACT_SHOT; + return 0; +} +char _license[] SEC("license") = "GPL"; diff --git a/samples/bpf/parse_simple.c b/samples/bpf/parse_simple.c new file mode 100644 index 000000000000..cf2511c33905 --- /dev/null +++ b/samples/bpf/parse_simple.c @@ -0,0 +1,48 @@ +/* Copyright (c) 2016 Facebook + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + */ +#include +#include +#include +#include +#include +#include +#include +#include "bpf_helpers.h" + +#define DEFAULT_PKTGEN_UDP_PORT 9 + +/* copy of 'struct ethhdr' without __packed */ +struct eth_hdr { + unsigned char h_dest[ETH_ALEN]; + unsigned char h_source[ETH_ALEN]; + unsigned short h_proto; +}; + +SEC("simple") +int handle_ingress(struct __sk_buff *skb) +{ + void *data = (void *)(long)skb->data; + struct eth_hdr *eth = data; + struct iphdr *iph = data + sizeof(*eth); + struct udphdr *udp = data + sizeof(*eth) + sizeof(*iph); + void *data_end = (void *)(long)skb->data_end; + + /* single length check */ + if (data + sizeof(*eth) + sizeof(*iph) + sizeof(*udp) > data_end) + return 0; + + if (eth->h_proto != htons(ETH_P_IP)) + return 0; + if (iph->protocol != IPPROTO_UDP || iph->ihl != 5) + return 0; + if (ip_is_fragment(iph)) + return 0; + if (udp->dest == htons(DEFAULT_PKTGEN_UDP_PORT)) + return TC_ACT_SHOT; + return 0; +} +char _license[] SEC("license") = "GPL"; diff --git a/samples/bpf/parse_varlen.c b/samples/bpf/parse_varlen.c new file mode 100644 index 000000000000..edab34dce79b --- /dev/null +++ b/samples/bpf/parse_varlen.c @@ -0,0 +1,153 @@ +/* Copyright (c) 2016 Facebook + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include "bpf_helpers.h" + +#define DEFAULT_PKTGEN_UDP_PORT 9 +#define DEBUG 0 + +static int tcp(void *data, uint64_t tp_off, void *data_end) +{ + struct tcphdr *tcp = data + tp_off; + + if (tcp + 1 > data_end) + return 0; + if (tcp->dest == htons(80) || tcp->source == htons(80)) + return TC_ACT_SHOT; + return 0; +} + +static int udp(void *data, uint64_t tp_off, void *data_end) +{ + struct udphdr *udp = data + tp_off; + + if (udp + 1 > data_end) + return 0; + if (udp->dest == htons(DEFAULT_PKTGEN_UDP_PORT) || + udp->source == htons(DEFAULT_PKTGEN_UDP_PORT)) { + if (DEBUG) { + char fmt[] = "udp port 9 indeed\n"; + + bpf_trace_printk(fmt, sizeof(fmt)); + } + return TC_ACT_SHOT; + } + return 0; +} + +static int parse_ipv4(void *data, uint64_t nh_off, void *data_end) +{ + struct iphdr *iph; + uint64_t ihl_len; + + iph = data + nh_off; + if (iph + 1 > data_end) + return 0; + + if (ip_is_fragment(iph)) + return 0; + ihl_len = iph->ihl * 4; + + if (iph->protocol == IPPROTO_IPIP) { + iph = data + nh_off + ihl_len; + if (iph + 1 > data_end) + return 0; + ihl_len += iph->ihl * 4; + } + + if (iph->protocol == IPPROTO_TCP) + return tcp(data, nh_off + ihl_len, data_end); + else if (iph->protocol == IPPROTO_UDP) + return udp(data, nh_off + ihl_len, data_end); + return 0; +} + +static int parse_ipv6(void *data, uint64_t nh_off, void *data_end) +{ + struct ipv6hdr *ip6h; + struct iphdr *iph; + uint64_t ihl_len = sizeof(struct ipv6hdr); + uint64_t nexthdr; + + ip6h = data + nh_off; + if (ip6h + 1 > data_end) + return 0; + + nexthdr = ip6h->nexthdr; + + if (nexthdr == IPPROTO_IPIP) { + iph = data + nh_off + ihl_len; + if (iph + 1 > data_end) + return 0; + ihl_len += iph->ihl * 4; + nexthdr = iph->protocol; + } else if (nexthdr == IPPROTO_IPV6) { + ip6h = data + nh_off + ihl_len; + if (ip6h + 1 > data_end) + return 0; + ihl_len += sizeof(struct ipv6hdr); + nexthdr = ip6h->nexthdr; + } + + if (nexthdr == IPPROTO_TCP) + return tcp(data, nh_off + ihl_len, data_end); + else if (nexthdr == IPPROTO_UDP) + return udp(data, nh_off + ihl_len, data_end); + return 0; +} + +struct vlan_hdr { + uint16_t h_vlan_TCI; + uint16_t h_vlan_encapsulated_proto; +}; + +SEC("varlen") +int handle_ingress(struct __sk_buff *skb) +{ + void *data = (void *)(long)skb->data; + struct ethhdr *eth = data; + void *data_end = (void *)(long)skb->data_end; + uint64_t h_proto, nh_off; + + nh_off = sizeof(*eth); + if (data + nh_off > data_end) + return 0; + + h_proto = eth->h_proto; + + if (h_proto == ETH_P_8021Q || h_proto == ETH_P_8021AD) { + struct vlan_hdr *vhdr; + + vhdr = data + nh_off; + nh_off += sizeof(struct vlan_hdr); + if (data + nh_off > data_end) + return 0; + h_proto = vhdr->h_vlan_encapsulated_proto; + } + if (h_proto == ETH_P_8021Q || h_proto == ETH_P_8021AD) { + struct vlan_hdr *vhdr; + + vhdr = data + nh_off; + nh_off += sizeof(struct vlan_hdr); + if (data + nh_off > data_end) + return 0; + h_proto = vhdr->h_vlan_encapsulated_proto; + } + if (h_proto == htons(ETH_P_IP)) + return parse_ipv4(data, nh_off, data_end); + else if (h_proto == htons(ETH_P_IPV6)) + return parse_ipv6(data, nh_off, data_end); + return 0; +} +char _license[] SEC("license") = "GPL"; diff --git a/samples/bpf/test_cls_bpf.sh b/samples/bpf/test_cls_bpf.sh new file mode 100755 index 000000000000..0365d5ee512c --- /dev/null +++ b/samples/bpf/test_cls_bpf.sh @@ -0,0 +1,37 @@ +#!/bin/bash + +function pktgen { + ../pktgen/pktgen_bench_xmit_mode_netif_receive.sh -i $IFC -s 64 \ + -m 90:e2:ba:ff:ff:ff -d 192.168.0.1 -t 4 + local dropped=`tc -s qdisc show dev $IFC | tail -3 | awk '/drop/{print $7}'` + if [ "$dropped" == "0," ]; then + echo "FAIL" + else + echo "Successfully filtered " $dropped " packets" + fi +} + +function test { + echo -n "Loading bpf program '$2'... " + tc qdisc add dev $IFC clsact + tc filter add dev $IFC ingress bpf da obj $1 sec $2 + local status=$? + if [ $status -ne 0 ]; then + echo "FAIL" + else + echo "ok" + pktgen + fi + tc qdisc del dev $IFC clsact +} + +IFC=test_veth + +ip link add name $IFC type veth peer name pair_$IFC +ip link set $IFC up +ip link set pair_$IFC up + +test ./parse_simple.o simple +test ./parse_varlen.o varlen +test ./parse_ldabs.o ldabs +ip link del dev $IFC From 883e44e4de71c023d3d74e02f35ca462c67d07dc Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Thu, 5 May 2016 19:49:15 -0700 Subject: [PATCH 1393/1649] samples/bpf: add verifier tests add few tests for "pointer to packet" logic of the verifier Signed-off-by: Alexei Starovoitov Signed-off-by: David S. Miller --- samples/bpf/test_verifier.c | 80 +++++++++++++++++++++++++++++++++++++ 1 file changed, 80 insertions(+) diff --git a/samples/bpf/test_verifier.c b/samples/bpf/test_verifier.c index 9eba8d1d9dcc..fe2fcec98c1f 100644 --- a/samples/bpf/test_verifier.c +++ b/samples/bpf/test_verifier.c @@ -1448,6 +1448,86 @@ static struct bpf_test tests[] = { .result = ACCEPT, .prog_type = BPF_PROG_TYPE_SCHED_CLS, }, + { + "pkt: test1", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), + BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + }, + { + "pkt: test2", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_MOV64_REG(BPF_REG_5, BPF_REG_3), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14), + BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 15), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 7), + BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_3, 12), + BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 14), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_4), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_1), + BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 48), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 48), + BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_3), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8), + BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1), + BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_3, 4), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + }, + { + "pkt: test3", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access off=76", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SOCKET_FILTER, + }, + { + "pkt: test4", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), + BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), + BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "cannot write", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + }, }; static int probe_filter_length(struct bpf_insn *fp) From 47dcc20a39d06585bf3cb9fb381f0e81c20002c3 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 6 May 2016 09:46:18 -0700 Subject: [PATCH 1394/1649] ipv4: tcp: ip_send_unicast_reply() is not BH safe I forgot that ip_send_unicast_reply() is not BH safe (yet). Disabling preemption before calling it was not a good move. Fixes: c10d9310edf5 ("tcp: do not assume TCP code is non preemptible") Signed-off-by: Eric Dumazet Reported-by: Andres Lagar-Cavilla Signed-off-by: David S. Miller --- net/ipv4/tcp_ipv4.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index a7ab9472d645..8219d0d8dc83 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -692,7 +692,7 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb) offsetof(struct inet_timewait_sock, tw_bound_dev_if)); arg.tos = ip_hdr(skb)->tos; - preempt_disable(); + local_bh_disable(); ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk), skb, &TCP_SKB_CB(skb)->header.h4.opt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, @@ -700,7 +700,7 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb) __TCP_INC_STATS(net, TCP_MIB_OUTSEGS); __TCP_INC_STATS(net, TCP_MIB_OUTRSTS); - preempt_enable(); + local_bh_enable(); #ifdef CONFIG_TCP_MD5SIG out: @@ -776,14 +776,14 @@ static void tcp_v4_send_ack(struct net *net, if (oif) arg.bound_dev_if = oif; arg.tos = tos; - preempt_disable(); + local_bh_disable(); ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk), skb, &TCP_SKB_CB(skb)->header.h4.opt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len); __TCP_INC_STATS(net, TCP_MIB_OUTSEGS); - preempt_enable(); + local_bh_enable(); } static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb) From 17af2bce88d31e65ed73d638bb752d2e13c66ced Mon Sep 17 00:00:00 2001 From: Marc Angel Date: Thu, 5 May 2016 12:14:26 +0200 Subject: [PATCH 1395/1649] macvtap: add namespace support to the sysfs device class When creating macvtaps that are expected to have the same ifindex in different network namespaces, only the first one will succeed. The others will fail with a sysfs_warn_dup warning due to them trying to create the following sysfs link (with 'NN' the ifindex of macvtapX): /sys/class/macvtap/tapNN -> /sys/devices/virtual/net/macvtapX/tapNN This is reproducible by running the following commands: ip netns add ns1 ip netns add ns2 ip link add veth0 type veth peer name veth1 ip link set veth0 netns ns1 ip link set veth1 netns ns2 ip netns exec ns1 ip l add link veth0 macvtap0 type macvtap ip netns exec ns2 ip l add link veth1 macvtap1 type macvtap The last command will fail with "RTNETLINK answers: File exists" (along with the kernel warning) but retrying it will work because the ifindex was incremented. The 'net' device class is isolated between network namespaces so each one has its own hierarchy of net devices. This isn't the case for the 'macvtap' device class. The problem occurs half-way through the netdev registration, when `macvtap_device_event` is called-back to create the 'tapNN' macvtap class device under the 'macvtapX' net class device. This patch adds namespace support to the 'macvtap' device class so that /sys/class/macvtap is no longer shared between net namespaces. However, making the macvtap sysfs class namespace-aware has the side effect of changing /sys/devices/virtual/net/macvtapX/tapNN into /sys/devices/virtual/net/macvtapX/macvtap/tapNN. This is due to Commit 24b1442 ("Driver-core: Always create class directories for classses that support namespaces") and the fact that class devices supporting namespaces are really not supposed to be placed directly under other class devices. To avoid breaking userland, a tapNN symlink pointing to macvtap/tapNN is created inside the macvtapX directory. Signed-off-by: Marc Angel Signed-off-by: David S. Miller --- drivers/net/macvtap.c | 36 ++++++++++++++++++++++++++---------- 1 file changed, 26 insertions(+), 10 deletions(-) diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index 74cb15a2e032..22b85b097cbc 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c @@ -129,7 +129,18 @@ static DEFINE_MUTEX(minor_lock); static DEFINE_IDR(minor_idr); #define GOODCOPY_LEN 128 -static struct class *macvtap_class; +static const void *macvtap_net_namespace(struct device *d) +{ + struct net_device *dev = to_net_dev(d->parent); + return dev_net(dev); +} + +static struct class macvtap_class = { + .name = "macvtap", + .owner = THIS_MODULE, + .ns_type = &net_ns_type_operations, + .namespace = macvtap_net_namespace, +}; static struct cdev macvtap_cdev; static const struct proto_ops macvtap_socket_ops; @@ -1278,10 +1289,12 @@ static int macvtap_device_event(struct notifier_block *unused, struct device *classdev; dev_t devt; int err; + char tap_name[IFNAMSIZ]; if (dev->rtnl_link_ops != &macvtap_link_ops) return NOTIFY_DONE; + snprintf(tap_name, IFNAMSIZ, "tap%d", dev->ifindex); vlan = netdev_priv(dev); switch (event) { @@ -1295,19 +1308,24 @@ static int macvtap_device_event(struct notifier_block *unused, return notifier_from_errno(err); devt = MKDEV(MAJOR(macvtap_major), vlan->minor); - classdev = device_create(macvtap_class, &dev->dev, devt, - dev, "tap%d", dev->ifindex); + classdev = device_create(&macvtap_class, &dev->dev, devt, + dev, tap_name); if (IS_ERR(classdev)) { macvtap_free_minor(vlan); return notifier_from_errno(PTR_ERR(classdev)); } + err = sysfs_create_link(&dev->dev.kobj, &classdev->kobj, + tap_name); + if (err) + return notifier_from_errno(err); break; case NETDEV_UNREGISTER: /* vlan->minor == 0 if NETDEV_REGISTER above failed */ if (vlan->minor == 0) break; + sysfs_remove_link(&dev->dev.kobj, tap_name); devt = MKDEV(MAJOR(macvtap_major), vlan->minor); - device_destroy(macvtap_class, devt); + device_destroy(&macvtap_class, devt); macvtap_free_minor(vlan); break; } @@ -1333,11 +1351,9 @@ static int macvtap_init(void) if (err) goto out2; - macvtap_class = class_create(THIS_MODULE, "macvtap"); - if (IS_ERR(macvtap_class)) { - err = PTR_ERR(macvtap_class); + err = class_register(&macvtap_class); + if (err) goto out3; - } err = register_netdevice_notifier(&macvtap_notifier_block); if (err) @@ -1352,7 +1368,7 @@ static int macvtap_init(void) out5: unregister_netdevice_notifier(&macvtap_notifier_block); out4: - class_unregister(macvtap_class); + class_unregister(&macvtap_class); out3: cdev_del(&macvtap_cdev); out2: @@ -1366,7 +1382,7 @@ static void macvtap_exit(void) { rtnl_link_unregister(&macvtap_link_ops); unregister_netdevice_notifier(&macvtap_notifier_block); - class_unregister(macvtap_class); + class_unregister(&macvtap_class); cdev_del(&macvtap_cdev); unregister_chrdev_region(macvtap_major, MACVTAP_NUM_DEVS); idr_destroy(&minor_idr); From 5113bfdbc63845402eec7f419204d22283c9cd4c Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Fri, 6 May 2016 22:20:59 +0200 Subject: [PATCH 1396/1649] mlxsw: spectrum: Fix ordering in mlxsw_sp_fini Fixes: 0f433fa0ec ("mlxsw: spectrum_buffers: Implement shared buffer configuration") Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 681afe1a3802..79cdd81d55ab 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -2449,8 +2449,8 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) { struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); - mlxsw_sp_buffers_fini(mlxsw_sp); mlxsw_sp_switchdev_fini(mlxsw_sp); + mlxsw_sp_buffers_fini(mlxsw_sp); mlxsw_sp_traps_fini(mlxsw_sp); mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE); mlxsw_sp_ports_remove(mlxsw_sp); From 218d48e701f08a71df57c410e596e30b3285bd25 Mon Sep 17 00:00:00 2001 From: Hariprasad Shenai Date: Thu, 5 May 2016 11:05:39 +0530 Subject: [PATCH 1397/1649] cxgb4: Reset dcb state machine and tx queue prio only if dcb is enabled When cxgb4 is enabled with CONFIG_CHELSIO_T4_DCB set, VI enable command gets called with DCB enabled. But when we have a back to back setup with DCB enabled on one side and non-DCB on the Peer side. Firmware doesn't send any DCB_L2_CFG, and DCB priority is never set for Tx queue. But driver resets the queue priority and state machine whenever there is a link down, this patch fixes it by adding a check to reset only if cxgb4_dcb_enabled() returns true. Signed-off-by: Hariprasad Shenai Signed-off-by: David S. Miller --- .../net/ethernet/chelsio/cxgb4/cxgb4_main.c | 38 ++++++++++--------- 1 file changed, 20 insertions(+), 18 deletions(-) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index d7f40436f319..477db477b133 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -304,6 +304,22 @@ static void dcb_tx_queue_prio_enable(struct net_device *dev, int enable) } #endif /* CONFIG_CHELSIO_T4_DCB */ +int cxgb4_dcb_enabled(const struct net_device *dev) +{ +#ifdef CONFIG_CHELSIO_T4_DCB + struct port_info *pi = netdev_priv(dev); + + if (!pi->dcb.enabled) + return 0; + + return ((pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED) || + (pi->dcb.state == CXGB4_DCB_STATE_HOST)); +#else + return 0; +#endif +} +EXPORT_SYMBOL(cxgb4_dcb_enabled); + void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat) { struct net_device *dev = adapter->port[port_id]; @@ -314,8 +330,10 @@ void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat) netif_carrier_on(dev); else { #ifdef CONFIG_CHELSIO_T4_DCB - cxgb4_dcb_state_init(dev); - dcb_tx_queue_prio_enable(dev, false); + if (cxgb4_dcb_enabled(dev)) { + cxgb4_dcb_state_init(dev); + dcb_tx_queue_prio_enable(dev, false); + } #endif /* CONFIG_CHELSIO_T4_DCB */ netif_carrier_off(dev); } @@ -494,22 +512,6 @@ static int link_start(struct net_device *dev) return ret; } -int cxgb4_dcb_enabled(const struct net_device *dev) -{ -#ifdef CONFIG_CHELSIO_T4_DCB - struct port_info *pi = netdev_priv(dev); - - if (!pi->dcb.enabled) - return 0; - - return ((pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED) || - (pi->dcb.state == CXGB4_DCB_STATE_HOST)); -#else - return 0; -#endif -} -EXPORT_SYMBOL(cxgb4_dcb_enabled); - #ifdef CONFIG_CHELSIO_T4_DCB /* Handle a Data Center Bridging update message from the firmware. */ static void dcb_rpl(struct adapter *adap, const struct fw_port_cmd *pcmd) From 43315f31adc2bf3b35e04dcf2372c3bb08014ed1 Mon Sep 17 00:00:00 2001 From: Bjorn Andersson Date: Fri, 6 May 2016 07:09:07 -0700 Subject: [PATCH 1398/1649] soc: qcom: smd: Introduce compile stubs Introduce compile stubs for the SMD API, allowing consumers to be compile tested. Acked-by: Andy Gross Signed-off-by: Bjorn Andersson Signed-off-by: David S. Miller --- include/linux/soc/qcom/smd.h | 28 +++++++++++++++++++++++++++- 1 file changed, 27 insertions(+), 1 deletion(-) diff --git a/include/linux/soc/qcom/smd.h b/include/linux/soc/qcom/smd.h index d0cb6d189a0a..46a984f5e3a3 100644 --- a/include/linux/soc/qcom/smd.h +++ b/include/linux/soc/qcom/smd.h @@ -45,13 +45,39 @@ struct qcom_smd_driver { int (*callback)(struct qcom_smd_device *, const void *, size_t); }; +#if IS_ENABLED(CONFIG_QCOM_SMD) + int qcom_smd_driver_register(struct qcom_smd_driver *drv); void qcom_smd_driver_unregister(struct qcom_smd_driver *drv); +int qcom_smd_send(struct qcom_smd_channel *channel, const void *data, int len); + +#else + +static inline int qcom_smd_driver_register(struct qcom_smd_driver *drv) +{ + return -ENXIO; +} + +static inline void qcom_smd_driver_unregister(struct qcom_smd_driver *drv) +{ + /* This shouldn't be possible */ + WARN_ON(1); +} + +static inline int qcom_smd_send(struct qcom_smd_channel *channel, + const void *data, int len) +{ + /* This shouldn't be possible */ + WARN_ON(1); + return -ENXIO; +} + +#endif + #define module_qcom_smd_driver(__smd_driver) \ module_driver(__smd_driver, qcom_smd_driver_register, \ qcom_smd_driver_unregister) -int qcom_smd_send(struct qcom_smd_channel *channel, const void *data, int len); #endif From bdabad3e363d825ddf9679dd431cca0b2c30f881 Mon Sep 17 00:00:00 2001 From: Courtney Cavin Date: Fri, 6 May 2016 07:09:08 -0700 Subject: [PATCH 1399/1649] net: Add Qualcomm IPC router Add an implementation of Qualcomm's IPC router protocol, used to communicate with service providing remote processors. Signed-off-by: Courtney Cavin Signed-off-by: Bjorn Andersson [bjorn: Cope with 0 being a valid node id and implement RTM_NEWADDR] Signed-off-by: Bjorn Andersson Signed-off-by: David S. Miller --- include/linux/socket.h | 4 +- include/uapi/linux/qrtr.h | 12 + net/Kconfig | 1 + net/Makefile | 1 + net/qrtr/Kconfig | 24 + net/qrtr/Makefile | 2 + net/qrtr/qrtr.c | 1007 +++++++++++++++++++++++++++++++++++++ net/qrtr/qrtr.h | 31 ++ net/qrtr/smd.c | 117 +++++ 9 files changed, 1198 insertions(+), 1 deletion(-) create mode 100644 include/uapi/linux/qrtr.h create mode 100644 net/qrtr/Kconfig create mode 100644 net/qrtr/Makefile create mode 100644 net/qrtr/qrtr.c create mode 100644 net/qrtr/qrtr.h create mode 100644 net/qrtr/smd.c diff --git a/include/linux/socket.h b/include/linux/socket.h index 73bf6c6a833b..b5cc5a6d7011 100644 --- a/include/linux/socket.h +++ b/include/linux/socket.h @@ -201,8 +201,9 @@ struct ucred { #define AF_NFC 39 /* NFC sockets */ #define AF_VSOCK 40 /* vSockets */ #define AF_KCM 41 /* Kernel Connection Multiplexor*/ +#define AF_QIPCRTR 42 /* Qualcomm IPC Router */ -#define AF_MAX 42 /* For now.. */ +#define AF_MAX 43 /* For now.. */ /* Protocol families, same as address families. */ #define PF_UNSPEC AF_UNSPEC @@ -249,6 +250,7 @@ struct ucred { #define PF_NFC AF_NFC #define PF_VSOCK AF_VSOCK #define PF_KCM AF_KCM +#define PF_QIPCRTR AF_QIPCRTR #define PF_MAX AF_MAX /* Maximum queue length specifiable by listen. */ diff --git a/include/uapi/linux/qrtr.h b/include/uapi/linux/qrtr.h new file mode 100644 index 000000000000..66c0748d26e2 --- /dev/null +++ b/include/uapi/linux/qrtr.h @@ -0,0 +1,12 @@ +#ifndef _LINUX_QRTR_H +#define _LINUX_QRTR_H + +#include + +struct sockaddr_qrtr { + __kernel_sa_family_t sq_family; + __u32 sq_node; + __u32 sq_port; +}; + +#endif /* _LINUX_QRTR_H */ diff --git a/net/Kconfig b/net/Kconfig index a8934d8c8fda..b841c42e5c9b 100644 --- a/net/Kconfig +++ b/net/Kconfig @@ -236,6 +236,7 @@ source "net/mpls/Kconfig" source "net/hsr/Kconfig" source "net/switchdev/Kconfig" source "net/l3mdev/Kconfig" +source "net/qrtr/Kconfig" config RPS bool diff --git a/net/Makefile b/net/Makefile index 81d14119eab5..bdd14553a774 100644 --- a/net/Makefile +++ b/net/Makefile @@ -78,3 +78,4 @@ endif ifneq ($(CONFIG_NET_L3_MASTER_DEV),) obj-y += l3mdev/ endif +obj-$(CONFIG_QRTR) += qrtr/ diff --git a/net/qrtr/Kconfig b/net/qrtr/Kconfig new file mode 100644 index 000000000000..673fd1f86ebe --- /dev/null +++ b/net/qrtr/Kconfig @@ -0,0 +1,24 @@ +# Qualcomm IPC Router configuration +# + +config QRTR + tristate "Qualcomm IPC Router support" + depends on ARCH_QCOM || COMPILE_TEST + ---help--- + Say Y if you intend to use Qualcomm IPC router protocol. The + protocol is used to communicate with services provided by other + hardware blocks in the system. + + In order to do service lookups, a userspace daemon is required to + maintain a service listing. + +if QRTR + +config QRTR_SMD + tristate "SMD IPC Router channels" + depends on QCOM_SMD || COMPILE_TEST + ---help--- + Say Y here to support SMD based ipcrouter channels. SMD is the + most common transport for IPC Router. + +endif # QRTR diff --git a/net/qrtr/Makefile b/net/qrtr/Makefile new file mode 100644 index 000000000000..6c00dc623b7e --- /dev/null +++ b/net/qrtr/Makefile @@ -0,0 +1,2 @@ +obj-$(CONFIG_QRTR) := qrtr.o +obj-$(CONFIG_QRTR_SMD) += smd.o diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c new file mode 100644 index 000000000000..c985ecbe9bd6 --- /dev/null +++ b/net/qrtr/qrtr.c @@ -0,0 +1,1007 @@ +/* + * Copyright (c) 2015, Sony Mobile Communications Inc. + * Copyright (c) 2013, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include +#include +#include +#include /* For TIOCINQ/OUTQ */ + +#include + +#include "qrtr.h" + +#define QRTR_PROTO_VER 1 + +/* auto-bind range */ +#define QRTR_MIN_EPH_SOCKET 0x4000 +#define QRTR_MAX_EPH_SOCKET 0x7fff + +enum qrtr_pkt_type { + QRTR_TYPE_DATA = 1, + QRTR_TYPE_HELLO = 2, + QRTR_TYPE_BYE = 3, + QRTR_TYPE_NEW_SERVER = 4, + QRTR_TYPE_DEL_SERVER = 5, + QRTR_TYPE_DEL_CLIENT = 6, + QRTR_TYPE_RESUME_TX = 7, + QRTR_TYPE_EXIT = 8, + QRTR_TYPE_PING = 9, +}; + +/** + * struct qrtr_hdr - (I|R)PCrouter packet header + * @version: protocol version + * @type: packet type; one of QRTR_TYPE_* + * @src_node_id: source node + * @src_port_id: source port + * @confirm_rx: boolean; whether a resume-tx packet should be send in reply + * @size: length of packet, excluding this header + * @dst_node_id: destination node + * @dst_port_id: destination port + */ +struct qrtr_hdr { + __le32 version; + __le32 type; + __le32 src_node_id; + __le32 src_port_id; + __le32 confirm_rx; + __le32 size; + __le32 dst_node_id; + __le32 dst_port_id; +} __packed; + +#define QRTR_HDR_SIZE sizeof(struct qrtr_hdr) +#define QRTR_NODE_BCAST ((unsigned int)-1) +#define QRTR_PORT_CTRL ((unsigned int)-2) + +struct qrtr_sock { + /* WARNING: sk must be the first member */ + struct sock sk; + struct sockaddr_qrtr us; + struct sockaddr_qrtr peer; +}; + +static inline struct qrtr_sock *qrtr_sk(struct sock *sk) +{ + BUILD_BUG_ON(offsetof(struct qrtr_sock, sk) != 0); + return container_of(sk, struct qrtr_sock, sk); +} + +static unsigned int qrtr_local_nid = -1; + +/* for node ids */ +static RADIX_TREE(qrtr_nodes, GFP_KERNEL); +/* broadcast list */ +static LIST_HEAD(qrtr_all_nodes); +/* lock for qrtr_nodes, qrtr_all_nodes and node reference */ +static DEFINE_MUTEX(qrtr_node_lock); + +/* local port allocation management */ +static DEFINE_IDR(qrtr_ports); +static DEFINE_MUTEX(qrtr_port_lock); + +/** + * struct qrtr_node - endpoint node + * @ep_lock: lock for endpoint management and callbacks + * @ep: endpoint + * @ref: reference count for node + * @nid: node id + * @rx_queue: receive queue + * @work: scheduled work struct for recv work + * @item: list item for broadcast list + */ +struct qrtr_node { + struct mutex ep_lock; + struct qrtr_endpoint *ep; + struct kref ref; + unsigned int nid; + + struct sk_buff_head rx_queue; + struct work_struct work; + struct list_head item; +}; + +/* Release node resources and free the node. + * + * Do not call directly, use qrtr_node_release. To be used with + * kref_put_mutex. As such, the node mutex is expected to be locked on call. + */ +static void __qrtr_node_release(struct kref *kref) +{ + struct qrtr_node *node = container_of(kref, struct qrtr_node, ref); + + if (node->nid != QRTR_EP_NID_AUTO) + radix_tree_delete(&qrtr_nodes, node->nid); + + list_del(&node->item); + mutex_unlock(&qrtr_node_lock); + + skb_queue_purge(&node->rx_queue); + kfree(node); +} + +/* Increment reference to node. */ +static struct qrtr_node *qrtr_node_acquire(struct qrtr_node *node) +{ + if (node) + kref_get(&node->ref); + return node; +} + +/* Decrement reference to node and release as necessary. */ +static void qrtr_node_release(struct qrtr_node *node) +{ + if (!node) + return; + kref_put_mutex(&node->ref, __qrtr_node_release, &qrtr_node_lock); +} + +/* Pass an outgoing packet socket buffer to the endpoint driver. */ +static int qrtr_node_enqueue(struct qrtr_node *node, struct sk_buff *skb) +{ + int rc = -ENODEV; + + mutex_lock(&node->ep_lock); + if (node->ep) + rc = node->ep->xmit(node->ep, skb); + else + kfree_skb(skb); + mutex_unlock(&node->ep_lock); + + return rc; +} + +/* Lookup node by id. + * + * callers must release with qrtr_node_release() + */ +static struct qrtr_node *qrtr_node_lookup(unsigned int nid) +{ + struct qrtr_node *node; + + mutex_lock(&qrtr_node_lock); + node = radix_tree_lookup(&qrtr_nodes, nid); + node = qrtr_node_acquire(node); + mutex_unlock(&qrtr_node_lock); + + return node; +} + +/* Assign node id to node. + * + * This is mostly useful for automatic node id assignment, based on + * the source id in the incoming packet. + */ +static void qrtr_node_assign(struct qrtr_node *node, unsigned int nid) +{ + if (node->nid != QRTR_EP_NID_AUTO || nid == QRTR_EP_NID_AUTO) + return; + + mutex_lock(&qrtr_node_lock); + radix_tree_insert(&qrtr_nodes, nid, node); + node->nid = nid; + mutex_unlock(&qrtr_node_lock); +} + +/** + * qrtr_endpoint_post() - post incoming data + * @ep: endpoint handle + * @data: data pointer + * @len: size of data in bytes + * + * Return: 0 on success; negative error code on failure + */ +int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len) +{ + struct qrtr_node *node = ep->node; + const struct qrtr_hdr *phdr = data; + struct sk_buff *skb; + unsigned int psize; + unsigned int size; + unsigned int type; + unsigned int ver; + unsigned int dst; + + if (len < QRTR_HDR_SIZE || len & 3) + return -EINVAL; + + ver = le32_to_cpu(phdr->version); + size = le32_to_cpu(phdr->size); + type = le32_to_cpu(phdr->type); + dst = le32_to_cpu(phdr->dst_port_id); + + psize = (size + 3) & ~3; + + if (ver != QRTR_PROTO_VER) + return -EINVAL; + + if (len != psize + QRTR_HDR_SIZE) + return -EINVAL; + + if (dst != QRTR_PORT_CTRL && type != QRTR_TYPE_DATA) + return -EINVAL; + + skb = netdev_alloc_skb(NULL, len); + if (!skb) + return -ENOMEM; + + skb_reset_transport_header(skb); + memcpy(skb_put(skb, len), data, len); + + skb_queue_tail(&node->rx_queue, skb); + schedule_work(&node->work); + + return 0; +} +EXPORT_SYMBOL_GPL(qrtr_endpoint_post); + +/* Allocate and construct a resume-tx packet. */ +static struct sk_buff *qrtr_alloc_resume_tx(u32 src_node, + u32 dst_node, u32 port) +{ + const int pkt_len = 20; + struct qrtr_hdr *hdr; + struct sk_buff *skb; + u32 *buf; + + skb = alloc_skb(QRTR_HDR_SIZE + pkt_len, GFP_KERNEL); + if (!skb) + return NULL; + skb_reset_transport_header(skb); + + hdr = (struct qrtr_hdr *)skb_put(skb, QRTR_HDR_SIZE); + hdr->version = cpu_to_le32(QRTR_PROTO_VER); + hdr->type = cpu_to_le32(QRTR_TYPE_RESUME_TX); + hdr->src_node_id = cpu_to_le32(src_node); + hdr->src_port_id = cpu_to_le32(QRTR_PORT_CTRL); + hdr->confirm_rx = cpu_to_le32(0); + hdr->size = cpu_to_le32(pkt_len); + hdr->dst_node_id = cpu_to_le32(dst_node); + hdr->dst_port_id = cpu_to_le32(QRTR_PORT_CTRL); + + buf = (u32 *)skb_put(skb, pkt_len); + memset(buf, 0, pkt_len); + buf[0] = cpu_to_le32(QRTR_TYPE_RESUME_TX); + buf[1] = cpu_to_le32(src_node); + buf[2] = cpu_to_le32(port); + + return skb; +} + +static struct qrtr_sock *qrtr_port_lookup(int port); +static void qrtr_port_put(struct qrtr_sock *ipc); + +/* Handle and route a received packet. + * + * This will auto-reply with resume-tx packet as necessary. + */ +static void qrtr_node_rx_work(struct work_struct *work) +{ + struct qrtr_node *node = container_of(work, struct qrtr_node, work); + struct sk_buff *skb; + + while ((skb = skb_dequeue(&node->rx_queue)) != NULL) { + const struct qrtr_hdr *phdr; + u32 dst_node, dst_port; + struct qrtr_sock *ipc; + u32 src_node; + int confirm; + + phdr = (const struct qrtr_hdr *)skb_transport_header(skb); + src_node = le32_to_cpu(phdr->src_node_id); + dst_node = le32_to_cpu(phdr->dst_node_id); + dst_port = le32_to_cpu(phdr->dst_port_id); + confirm = !!phdr->confirm_rx; + + qrtr_node_assign(node, src_node); + + ipc = qrtr_port_lookup(dst_port); + if (!ipc) { + kfree_skb(skb); + } else { + if (sock_queue_rcv_skb(&ipc->sk, skb)) + kfree_skb(skb); + + qrtr_port_put(ipc); + } + + if (confirm) { + skb = qrtr_alloc_resume_tx(dst_node, node->nid, dst_port); + if (!skb) + break; + if (qrtr_node_enqueue(node, skb)) + break; + } + } +} + +/** + * qrtr_endpoint_register() - register a new endpoint + * @ep: endpoint to register + * @nid: desired node id; may be QRTR_EP_NID_AUTO for auto-assignment + * Return: 0 on success; negative error code on failure + * + * The specified endpoint must have the xmit function pointer set on call. + */ +int qrtr_endpoint_register(struct qrtr_endpoint *ep, unsigned int nid) +{ + struct qrtr_node *node; + + if (!ep || !ep->xmit) + return -EINVAL; + + node = kzalloc(sizeof(*node), GFP_KERNEL); + if (!node) + return -ENOMEM; + + INIT_WORK(&node->work, qrtr_node_rx_work); + kref_init(&node->ref); + mutex_init(&node->ep_lock); + skb_queue_head_init(&node->rx_queue); + node->nid = QRTR_EP_NID_AUTO; + node->ep = ep; + + qrtr_node_assign(node, nid); + + mutex_lock(&qrtr_node_lock); + list_add(&node->item, &qrtr_all_nodes); + mutex_unlock(&qrtr_node_lock); + ep->node = node; + + return 0; +} +EXPORT_SYMBOL_GPL(qrtr_endpoint_register); + +/** + * qrtr_endpoint_unregister - unregister endpoint + * @ep: endpoint to unregister + */ +void qrtr_endpoint_unregister(struct qrtr_endpoint *ep) +{ + struct qrtr_node *node = ep->node; + + mutex_lock(&node->ep_lock); + node->ep = NULL; + mutex_unlock(&node->ep_lock); + + qrtr_node_release(node); + ep->node = NULL; +} +EXPORT_SYMBOL_GPL(qrtr_endpoint_unregister); + +/* Lookup socket by port. + * + * Callers must release with qrtr_port_put() + */ +static struct qrtr_sock *qrtr_port_lookup(int port) +{ + struct qrtr_sock *ipc; + + if (port == QRTR_PORT_CTRL) + port = 0; + + mutex_lock(&qrtr_port_lock); + ipc = idr_find(&qrtr_ports, port); + if (ipc) + sock_hold(&ipc->sk); + mutex_unlock(&qrtr_port_lock); + + return ipc; +} + +/* Release acquired socket. */ +static void qrtr_port_put(struct qrtr_sock *ipc) +{ + sock_put(&ipc->sk); +} + +/* Remove port assignment. */ +static void qrtr_port_remove(struct qrtr_sock *ipc) +{ + int port = ipc->us.sq_port; + + if (port == QRTR_PORT_CTRL) + port = 0; + + __sock_put(&ipc->sk); + + mutex_lock(&qrtr_port_lock); + idr_remove(&qrtr_ports, port); + mutex_unlock(&qrtr_port_lock); +} + +/* Assign port number to socket. + * + * Specify port in the integer pointed to by port, and it will be adjusted + * on return as necesssary. + * + * Port may be: + * 0: Assign ephemeral port in [QRTR_MIN_EPH_SOCKET, QRTR_MAX_EPH_SOCKET] + * QRTR_MIN_EPH_SOCKET: Specified; available to all + */ +static int qrtr_port_assign(struct qrtr_sock *ipc, int *port) +{ + int rc; + + mutex_lock(&qrtr_port_lock); + if (!*port) { + rc = idr_alloc(&qrtr_ports, ipc, + QRTR_MIN_EPH_SOCKET, QRTR_MAX_EPH_SOCKET + 1, + GFP_ATOMIC); + if (rc >= 0) + *port = rc; + } else if (*port < QRTR_MIN_EPH_SOCKET && !capable(CAP_NET_ADMIN)) { + rc = -EACCES; + } else if (*port == QRTR_PORT_CTRL) { + rc = idr_alloc(&qrtr_ports, ipc, 0, 1, GFP_ATOMIC); + } else { + rc = idr_alloc(&qrtr_ports, ipc, *port, *port + 1, GFP_ATOMIC); + if (rc >= 0) + *port = rc; + } + mutex_unlock(&qrtr_port_lock); + + if (rc == -ENOSPC) + return -EADDRINUSE; + else if (rc < 0) + return rc; + + sock_hold(&ipc->sk); + + return 0; +} + +/* Bind socket to address. + * + * Socket should be locked upon call. + */ +static int __qrtr_bind(struct socket *sock, + const struct sockaddr_qrtr *addr, int zapped) +{ + struct qrtr_sock *ipc = qrtr_sk(sock->sk); + struct sock *sk = sock->sk; + int port; + int rc; + + /* rebinding ok */ + if (!zapped && addr->sq_port == ipc->us.sq_port) + return 0; + + port = addr->sq_port; + rc = qrtr_port_assign(ipc, &port); + if (rc) + return rc; + + /* unbind previous, if any */ + if (!zapped) + qrtr_port_remove(ipc); + ipc->us.sq_port = port; + + sock_reset_flag(sk, SOCK_ZAPPED); + + return 0; +} + +/* Auto bind to an ephemeral port. */ +static int qrtr_autobind(struct socket *sock) +{ + struct sock *sk = sock->sk; + struct sockaddr_qrtr addr; + + if (!sock_flag(sk, SOCK_ZAPPED)) + return 0; + + addr.sq_family = AF_QIPCRTR; + addr.sq_node = qrtr_local_nid; + addr.sq_port = 0; + + return __qrtr_bind(sock, &addr, 1); +} + +/* Bind socket to specified sockaddr. */ +static int qrtr_bind(struct socket *sock, struct sockaddr *saddr, int len) +{ + DECLARE_SOCKADDR(struct sockaddr_qrtr *, addr, saddr); + struct qrtr_sock *ipc = qrtr_sk(sock->sk); + struct sock *sk = sock->sk; + int rc; + + if (len < sizeof(*addr) || addr->sq_family != AF_QIPCRTR) + return -EINVAL; + + if (addr->sq_node != ipc->us.sq_node) + return -EINVAL; + + lock_sock(sk); + rc = __qrtr_bind(sock, addr, sock_flag(sk, SOCK_ZAPPED)); + release_sock(sk); + + return rc; +} + +/* Queue packet to local peer socket. */ +static int qrtr_local_enqueue(struct qrtr_node *node, struct sk_buff *skb) +{ + const struct qrtr_hdr *phdr; + struct qrtr_sock *ipc; + + phdr = (const struct qrtr_hdr *)skb_transport_header(skb); + + ipc = qrtr_port_lookup(le32_to_cpu(phdr->dst_port_id)); + if (!ipc || &ipc->sk == skb->sk) { /* do not send to self */ + kfree_skb(skb); + return -ENODEV; + } + + if (sock_queue_rcv_skb(&ipc->sk, skb)) { + qrtr_port_put(ipc); + kfree_skb(skb); + return -ENOSPC; + } + + qrtr_port_put(ipc); + + return 0; +} + +/* Queue packet for broadcast. */ +static int qrtr_bcast_enqueue(struct qrtr_node *node, struct sk_buff *skb) +{ + struct sk_buff *skbn; + + mutex_lock(&qrtr_node_lock); + list_for_each_entry(node, &qrtr_all_nodes, item) { + skbn = skb_clone(skb, GFP_KERNEL); + if (!skbn) + break; + skb_set_owner_w(skbn, skb->sk); + qrtr_node_enqueue(node, skbn); + } + mutex_unlock(&qrtr_node_lock); + + qrtr_local_enqueue(node, skb); + + return 0; +} + +static int qrtr_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) +{ + DECLARE_SOCKADDR(struct sockaddr_qrtr *, addr, msg->msg_name); + int (*enqueue_fn)(struct qrtr_node *, struct sk_buff *); + struct qrtr_sock *ipc = qrtr_sk(sock->sk); + struct sock *sk = sock->sk; + struct qrtr_node *node; + struct qrtr_hdr *hdr; + struct sk_buff *skb; + size_t plen; + int rc; + + if (msg->msg_flags & ~(MSG_DONTWAIT)) + return -EINVAL; + + if (len > 65535) + return -EMSGSIZE; + + lock_sock(sk); + + if (addr) { + if (msg->msg_namelen < sizeof(*addr)) { + release_sock(sk); + return -EINVAL; + } + + if (addr->sq_family != AF_QIPCRTR) { + release_sock(sk); + return -EINVAL; + } + + rc = qrtr_autobind(sock); + if (rc) { + release_sock(sk); + return rc; + } + } else if (sk->sk_state == TCP_ESTABLISHED) { + addr = &ipc->peer; + } else { + release_sock(sk); + return -ENOTCONN; + } + + node = NULL; + if (addr->sq_node == QRTR_NODE_BCAST) { + enqueue_fn = qrtr_bcast_enqueue; + } else if (addr->sq_node == ipc->us.sq_node) { + enqueue_fn = qrtr_local_enqueue; + } else { + enqueue_fn = qrtr_node_enqueue; + node = qrtr_node_lookup(addr->sq_node); + if (!node) { + release_sock(sk); + return -ECONNRESET; + } + } + + plen = (len + 3) & ~3; + skb = sock_alloc_send_skb(sk, plen + QRTR_HDR_SIZE, + msg->msg_flags & MSG_DONTWAIT, &rc); + if (!skb) + goto out_node; + + skb_reset_transport_header(skb); + skb_put(skb, len + QRTR_HDR_SIZE); + + hdr = (struct qrtr_hdr *)skb_transport_header(skb); + hdr->version = cpu_to_le32(QRTR_PROTO_VER); + hdr->src_node_id = cpu_to_le32(ipc->us.sq_node); + hdr->src_port_id = cpu_to_le32(ipc->us.sq_port); + hdr->confirm_rx = cpu_to_le32(0); + hdr->size = cpu_to_le32(len); + hdr->dst_node_id = cpu_to_le32(addr->sq_node); + hdr->dst_port_id = cpu_to_le32(addr->sq_port); + + rc = skb_copy_datagram_from_iter(skb, QRTR_HDR_SIZE, + &msg->msg_iter, len); + if (rc) { + kfree_skb(skb); + goto out_node; + } + + if (plen != len) { + skb_pad(skb, plen - len); + skb_put(skb, plen - len); + } + + if (ipc->us.sq_port == QRTR_PORT_CTRL) { + if (len < 4) { + rc = -EINVAL; + kfree_skb(skb); + goto out_node; + } + + /* control messages already require the type as 'command' */ + skb_copy_bits(skb, QRTR_HDR_SIZE, &hdr->type, 4); + } else { + hdr->type = cpu_to_le32(QRTR_TYPE_DATA); + } + + rc = enqueue_fn(node, skb); + if (rc >= 0) + rc = len; + +out_node: + qrtr_node_release(node); + release_sock(sk); + + return rc; +} + +static int qrtr_recvmsg(struct socket *sock, struct msghdr *msg, + size_t size, int flags) +{ + DECLARE_SOCKADDR(struct sockaddr_qrtr *, addr, msg->msg_name); + const struct qrtr_hdr *phdr; + struct sock *sk = sock->sk; + struct sk_buff *skb; + int copied, rc; + + lock_sock(sk); + + if (sock_flag(sk, SOCK_ZAPPED)) { + release_sock(sk); + return -EADDRNOTAVAIL; + } + + skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, + flags & MSG_DONTWAIT, &rc); + if (!skb) { + release_sock(sk); + return rc; + } + + phdr = (const struct qrtr_hdr *)skb_transport_header(skb); + copied = le32_to_cpu(phdr->size); + if (copied > size) { + copied = size; + msg->msg_flags |= MSG_TRUNC; + } + + rc = skb_copy_datagram_msg(skb, QRTR_HDR_SIZE, msg, copied); + if (rc < 0) + goto out; + rc = copied; + + if (addr) { + addr->sq_family = AF_QIPCRTR; + addr->sq_node = le32_to_cpu(phdr->src_node_id); + addr->sq_port = le32_to_cpu(phdr->src_port_id); + msg->msg_namelen = sizeof(*addr); + } + +out: + skb_free_datagram(sk, skb); + release_sock(sk); + + return rc; +} + +static int qrtr_connect(struct socket *sock, struct sockaddr *saddr, + int len, int flags) +{ + DECLARE_SOCKADDR(struct sockaddr_qrtr *, addr, saddr); + struct qrtr_sock *ipc = qrtr_sk(sock->sk); + struct sock *sk = sock->sk; + int rc; + + if (len < sizeof(*addr) || addr->sq_family != AF_QIPCRTR) + return -EINVAL; + + lock_sock(sk); + + sk->sk_state = TCP_CLOSE; + sock->state = SS_UNCONNECTED; + + rc = qrtr_autobind(sock); + if (rc) { + release_sock(sk); + return rc; + } + + ipc->peer = *addr; + sock->state = SS_CONNECTED; + sk->sk_state = TCP_ESTABLISHED; + + release_sock(sk); + + return 0; +} + +static int qrtr_getname(struct socket *sock, struct sockaddr *saddr, + int *len, int peer) +{ + struct qrtr_sock *ipc = qrtr_sk(sock->sk); + struct sockaddr_qrtr qaddr; + struct sock *sk = sock->sk; + + lock_sock(sk); + if (peer) { + if (sk->sk_state != TCP_ESTABLISHED) { + release_sock(sk); + return -ENOTCONN; + } + + qaddr = ipc->peer; + } else { + qaddr = ipc->us; + } + release_sock(sk); + + *len = sizeof(qaddr); + qaddr.sq_family = AF_QIPCRTR; + + memcpy(saddr, &qaddr, sizeof(qaddr)); + + return 0; +} + +static int qrtr_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) +{ + void __user *argp = (void __user *)arg; + struct qrtr_sock *ipc = qrtr_sk(sock->sk); + struct sock *sk = sock->sk; + struct sockaddr_qrtr *sq; + struct sk_buff *skb; + struct ifreq ifr; + long len = 0; + int rc = 0; + + lock_sock(sk); + + switch (cmd) { + case TIOCOUTQ: + len = sk->sk_sndbuf - sk_wmem_alloc_get(sk); + if (len < 0) + len = 0; + rc = put_user(len, (int __user *)argp); + break; + case TIOCINQ: + skb = skb_peek(&sk->sk_receive_queue); + if (skb) + len = skb->len - QRTR_HDR_SIZE; + rc = put_user(len, (int __user *)argp); + break; + case SIOCGIFADDR: + if (copy_from_user(&ifr, argp, sizeof(ifr))) { + rc = -EFAULT; + break; + } + + sq = (struct sockaddr_qrtr *)&ifr.ifr_addr; + *sq = ipc->us; + if (copy_to_user(argp, &ifr, sizeof(ifr))) { + rc = -EFAULT; + break; + } + break; + case SIOCGSTAMP: + rc = sock_get_timestamp(sk, argp); + break; + case SIOCADDRT: + case SIOCDELRT: + case SIOCSIFADDR: + case SIOCGIFDSTADDR: + case SIOCSIFDSTADDR: + case SIOCGIFBRDADDR: + case SIOCSIFBRDADDR: + case SIOCGIFNETMASK: + case SIOCSIFNETMASK: + rc = -EINVAL; + break; + default: + rc = -ENOIOCTLCMD; + break; + } + + release_sock(sk); + + return rc; +} + +static int qrtr_release(struct socket *sock) +{ + struct sock *sk = sock->sk; + struct qrtr_sock *ipc; + + if (!sk) + return 0; + + lock_sock(sk); + + ipc = qrtr_sk(sk); + sk->sk_shutdown = SHUTDOWN_MASK; + if (!sock_flag(sk, SOCK_DEAD)) + sk->sk_state_change(sk); + + sock_set_flag(sk, SOCK_DEAD); + sock->sk = NULL; + + if (!sock_flag(sk, SOCK_ZAPPED)) + qrtr_port_remove(ipc); + + skb_queue_purge(&sk->sk_receive_queue); + + release_sock(sk); + sock_put(sk); + + return 0; +} + +static const struct proto_ops qrtr_proto_ops = { + .owner = THIS_MODULE, + .family = AF_QIPCRTR, + .bind = qrtr_bind, + .connect = qrtr_connect, + .socketpair = sock_no_socketpair, + .accept = sock_no_accept, + .listen = sock_no_listen, + .sendmsg = qrtr_sendmsg, + .recvmsg = qrtr_recvmsg, + .getname = qrtr_getname, + .ioctl = qrtr_ioctl, + .poll = datagram_poll, + .shutdown = sock_no_shutdown, + .setsockopt = sock_no_setsockopt, + .getsockopt = sock_no_getsockopt, + .release = qrtr_release, + .mmap = sock_no_mmap, + .sendpage = sock_no_sendpage, +}; + +static struct proto qrtr_proto = { + .name = "QIPCRTR", + .owner = THIS_MODULE, + .obj_size = sizeof(struct qrtr_sock), +}; + +static int qrtr_create(struct net *net, struct socket *sock, + int protocol, int kern) +{ + struct qrtr_sock *ipc; + struct sock *sk; + + if (sock->type != SOCK_DGRAM) + return -EPROTOTYPE; + + sk = sk_alloc(net, AF_QIPCRTR, GFP_KERNEL, &qrtr_proto, kern); + if (!sk) + return -ENOMEM; + + sock_set_flag(sk, SOCK_ZAPPED); + + sock_init_data(sock, sk); + sock->ops = &qrtr_proto_ops; + + ipc = qrtr_sk(sk); + ipc->us.sq_family = AF_QIPCRTR; + ipc->us.sq_node = qrtr_local_nid; + ipc->us.sq_port = 0; + + return 0; +} + +static const struct nla_policy qrtr_policy[IFA_MAX + 1] = { + [IFA_LOCAL] = { .type = NLA_U32 }, +}; + +static int qrtr_addr_doit(struct sk_buff *skb, struct nlmsghdr *nlh) +{ + struct nlattr *tb[IFA_MAX + 1]; + struct ifaddrmsg *ifm; + int rc; + + if (!netlink_capable(skb, CAP_NET_ADMIN)) + return -EPERM; + + if (!netlink_capable(skb, CAP_SYS_ADMIN)) + return -EPERM; + + ASSERT_RTNL(); + + rc = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, qrtr_policy); + if (rc < 0) + return rc; + + ifm = nlmsg_data(nlh); + if (!tb[IFA_LOCAL]) + return -EINVAL; + + qrtr_local_nid = nla_get_u32(tb[IFA_LOCAL]); + return 0; +} + +static const struct net_proto_family qrtr_family = { + .owner = THIS_MODULE, + .family = AF_QIPCRTR, + .create = qrtr_create, +}; + +static int __init qrtr_proto_init(void) +{ + int rc; + + rc = proto_register(&qrtr_proto, 1); + if (rc) + return rc; + + rc = sock_register(&qrtr_family); + if (rc) { + proto_unregister(&qrtr_proto); + return rc; + } + + rtnl_register(PF_QIPCRTR, RTM_NEWADDR, qrtr_addr_doit, NULL, NULL); + + return 0; +} +module_init(qrtr_proto_init); + +static void __exit qrtr_proto_fini(void) +{ + rtnl_unregister(PF_QIPCRTR, RTM_NEWADDR); + sock_unregister(qrtr_family.family); + proto_unregister(&qrtr_proto); +} +module_exit(qrtr_proto_fini); + +MODULE_DESCRIPTION("Qualcomm IPC-router driver"); +MODULE_LICENSE("GPL v2"); diff --git a/net/qrtr/qrtr.h b/net/qrtr/qrtr.h new file mode 100644 index 000000000000..2b848718f8fe --- /dev/null +++ b/net/qrtr/qrtr.h @@ -0,0 +1,31 @@ +#ifndef __QRTR_H_ +#define __QRTR_H_ + +#include + +struct sk_buff; + +/* endpoint node id auto assignment */ +#define QRTR_EP_NID_AUTO (-1) + +/** + * struct qrtr_endpoint - endpoint handle + * @xmit: Callback for outgoing packets + * + * The socket buffer passed to the xmit function becomes owned by the endpoint + * driver. As such, when the driver is done with the buffer, it should + * call kfree_skb() on failure, or consume_skb() on success. + */ +struct qrtr_endpoint { + int (*xmit)(struct qrtr_endpoint *ep, struct sk_buff *skb); + /* private: not for endpoint use */ + struct qrtr_node *node; +}; + +int qrtr_endpoint_register(struct qrtr_endpoint *ep, unsigned int nid); + +void qrtr_endpoint_unregister(struct qrtr_endpoint *ep); + +int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len); + +#endif diff --git a/net/qrtr/smd.c b/net/qrtr/smd.c new file mode 100644 index 000000000000..84ebce73aa23 --- /dev/null +++ b/net/qrtr/smd.c @@ -0,0 +1,117 @@ +/* + * Copyright (c) 2015, Sony Mobile Communications Inc. + * Copyright (c) 2013, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include + +#include "qrtr.h" + +struct qrtr_smd_dev { + struct qrtr_endpoint ep; + struct qcom_smd_channel *channel; +}; + +/* from smd to qrtr */ +static int qcom_smd_qrtr_callback(struct qcom_smd_device *sdev, + const void *data, size_t len) +{ + struct qrtr_smd_dev *qdev = dev_get_drvdata(&sdev->dev); + int rc; + + if (!qdev) + return -EAGAIN; + + rc = qrtr_endpoint_post(&qdev->ep, data, len); + if (rc == -EINVAL) { + dev_err(&sdev->dev, "invalid ipcrouter packet\n"); + /* return 0 to let smd drop the packet */ + rc = 0; + } + + return rc; +} + +/* from qrtr to smd */ +static int qcom_smd_qrtr_send(struct qrtr_endpoint *ep, struct sk_buff *skb) +{ + struct qrtr_smd_dev *qdev = container_of(ep, struct qrtr_smd_dev, ep); + int rc; + + rc = skb_linearize(skb); + if (rc) + goto out; + + rc = qcom_smd_send(qdev->channel, skb->data, skb->len); + +out: + if (rc) + kfree_skb(skb); + else + consume_skb(skb); + return rc; +} + +static int qcom_smd_qrtr_probe(struct qcom_smd_device *sdev) +{ + struct qrtr_smd_dev *qdev; + int rc; + + qdev = devm_kzalloc(&sdev->dev, sizeof(*qdev), GFP_KERNEL); + if (!qdev) + return -ENOMEM; + + qdev->channel = sdev->channel; + qdev->ep.xmit = qcom_smd_qrtr_send; + + rc = qrtr_endpoint_register(&qdev->ep, QRTR_EP_NID_AUTO); + if (rc) + return rc; + + dev_set_drvdata(&sdev->dev, qdev); + + dev_dbg(&sdev->dev, "Qualcomm SMD QRTR driver probed\n"); + + return 0; +} + +static void qcom_smd_qrtr_remove(struct qcom_smd_device *sdev) +{ + struct qrtr_smd_dev *qdev = dev_get_drvdata(&sdev->dev); + + qrtr_endpoint_unregister(&qdev->ep); + + dev_set_drvdata(&sdev->dev, NULL); +} + +static const struct qcom_smd_id qcom_smd_qrtr_smd_match[] = { + { "IPCRTR" }, + {} +}; + +static struct qcom_smd_driver qcom_smd_qrtr_driver = { + .probe = qcom_smd_qrtr_probe, + .remove = qcom_smd_qrtr_remove, + .callback = qcom_smd_qrtr_callback, + .smd_match_table = qcom_smd_qrtr_smd_match, + .driver = { + .name = "qcom_smd_qrtr", + .owner = THIS_MODULE, + }, +}; + +module_qcom_smd_driver(qcom_smd_qrtr_driver); + +MODULE_DESCRIPTION("Qualcomm IPC-Router SMD interface driver"); +MODULE_LICENSE("GPL v2"); From 95b58430abe74f5e50970c57d27380bd5b8be324 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 6 May 2016 08:55:12 -0700 Subject: [PATCH 1400/1649] fq_codel: add memory limitation per queue MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit On small embedded routers, one wants to control maximal amount of memory used by fq_codel, instead of controlling number of packets or bytes, since GRO/TSO make these not practical. Assuming skb->truesize is accurate, we have to keep track of skb->truesize sum for skbs in queue. This patch adds a new TCA_FQ_CODEL_MEMORY_LIMIT attribute. I chose a default value of 32 MBytes, which looks reasonable even for heavy duty usages. (Prior fq_codel users should not be hurt when they upgrade their kernels) Two fields are added to tc_fq_codel_qd_stats to report : - Current memory usage - Number of drops caused by memory limits # tc qd replace dev eth1 root est 1sec 4sec fq_codel memory_limit 4M .. # tc -s -d qd sh dev eth1 qdisc fq_codel 8008: root refcnt 257 limit 10240p flows 1024 quantum 1514 target 5.0ms interval 100.0ms memory_limit 4Mb ecn Sent 2083566791363 bytes 1376214889 pkt (dropped 4994406, overlimits 0 requeues 21705223) rate 9841Mbit 812549pps backlog 3906120b 376p requeues 21705223 maxpacket 68130 drop_overlimit 4994406 new_flow_count 28855414 ecn_mark 0 memory_used 4190048 drop_overmemory 4994406 new_flows_len 1 old_flows_len 177 Signed-off-by: Eric Dumazet Cc: Jesper Dangaard Brouer Cc: Dave Täht Cc: Sebastian Möller Signed-off-by: David S. Miller --- include/uapi/linux/pkt_sched.h | 3 +++ net/sched/sch_fq_codel.c | 27 ++++++++++++++++++++++++--- 2 files changed, 27 insertions(+), 3 deletions(-) diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h index a11afecd4482..2382eed50278 100644 --- a/include/uapi/linux/pkt_sched.h +++ b/include/uapi/linux/pkt_sched.h @@ -719,6 +719,7 @@ enum { TCA_FQ_CODEL_QUANTUM, TCA_FQ_CODEL_CE_THRESHOLD, TCA_FQ_CODEL_DROP_BATCH_SIZE, + TCA_FQ_CODEL_MEMORY_LIMIT, __TCA_FQ_CODEL_MAX }; @@ -743,6 +744,8 @@ struct tc_fq_codel_qd_stats { __u32 new_flows_len; /* count of flows in new list */ __u32 old_flows_len; /* count of flows in old list */ __u32 ce_mark; /* packets above ce_threshold */ + __u32 memory_usage; /* in bytes */ + __u32 drop_overmemory; }; struct tc_fq_codel_cl_stats { diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c index e7b42b0d5145..bb8bd9314629 100644 --- a/net/sched/sch_fq_codel.c +++ b/net/sched/sch_fq_codel.c @@ -60,8 +60,11 @@ struct fq_codel_sched_data { u32 perturbation; /* hash perturbation */ u32 quantum; /* psched_mtu(qdisc_dev(sch)); */ u32 drop_batch_size; + u32 memory_limit; struct codel_params cparams; struct codel_stats cstats; + u32 memory_usage; + u32 drop_overmemory; u32 drop_overlimit; u32 new_flow_count; @@ -143,6 +146,7 @@ static unsigned int fq_codel_drop(struct Qdisc *sch, unsigned int max_packets) unsigned int maxbacklog = 0, idx = 0, i, len; struct fq_codel_flow *flow; unsigned int threshold; + unsigned int mem = 0; /* Queue is full! Find the fat flow and drop packet(s) from it. * This might sound expensive, but with 1024 flows, we scan @@ -167,11 +171,13 @@ static unsigned int fq_codel_drop(struct Qdisc *sch, unsigned int max_packets) do { skb = dequeue_head(flow); len += qdisc_pkt_len(skb); + mem += skb->truesize; kfree_skb(skb); } while (++i < max_packets && len < threshold); flow->dropped += i; q->backlogs[idx] -= len; + q->memory_usage -= mem; sch->qstats.drops += i; sch->qstats.backlog -= len; sch->q.qlen -= i; @@ -193,6 +199,7 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch) unsigned int idx, prev_backlog, prev_qlen; struct fq_codel_flow *flow; int uninitialized_var(ret); + bool memory_limited; idx = fq_codel_classify(skb, sch, &ret); if (idx == 0) { @@ -215,7 +222,9 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch) flow->deficit = q->quantum; flow->dropped = 0; } - if (++sch->q.qlen <= sch->limit) + q->memory_usage += skb->truesize; + memory_limited = q->memory_usage > q->memory_limit; + if (++sch->q.qlen <= sch->limit && !memory_limited) return NET_XMIT_SUCCESS; prev_backlog = sch->qstats.backlog; @@ -229,7 +238,8 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch) ret = fq_codel_drop(sch, q->drop_batch_size); q->drop_overlimit += prev_qlen - sch->q.qlen; - + if (memory_limited) + q->drop_overmemory += prev_qlen - sch->q.qlen; /* As we dropped packet(s), better let upper stack know this */ qdisc_tree_reduce_backlog(sch, prev_qlen - sch->q.qlen, prev_backlog - sch->qstats.backlog); @@ -308,6 +318,7 @@ begin: list_del_init(&flow->flowchain); goto begin; } + q->memory_usage -= skb->truesize; qdisc_bstats_update(sch, skb); flow->deficit -= qdisc_pkt_len(skb); /* We cant call qdisc_tree_reduce_backlog() if our qlen is 0, @@ -355,6 +366,7 @@ static const struct nla_policy fq_codel_policy[TCA_FQ_CODEL_MAX + 1] = { [TCA_FQ_CODEL_QUANTUM] = { .type = NLA_U32 }, [TCA_FQ_CODEL_CE_THRESHOLD] = { .type = NLA_U32 }, [TCA_FQ_CODEL_DROP_BATCH_SIZE] = { .type = NLA_U32 }, + [TCA_FQ_CODEL_MEMORY_LIMIT] = { .type = NLA_U32 }, }; static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt) @@ -409,7 +421,11 @@ static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt) if (tb[TCA_FQ_CODEL_DROP_BATCH_SIZE]) q->drop_batch_size = min(1U, nla_get_u32(tb[TCA_FQ_CODEL_DROP_BATCH_SIZE])); - while (sch->q.qlen > sch->limit) { + if (tb[TCA_FQ_CODEL_MEMORY_LIMIT]) + q->memory_limit = min(1U << 31, nla_get_u32(tb[TCA_FQ_CODEL_MEMORY_LIMIT])); + + while (sch->q.qlen > sch->limit || + q->memory_usage > q->memory_limit) { struct sk_buff *skb = fq_codel_dequeue(sch); q->cstats.drop_len += qdisc_pkt_len(skb); @@ -454,6 +470,7 @@ static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt) sch->limit = 10*1024; q->flows_cnt = 1024; + q->memory_limit = 32 << 20; /* 32 MBytes */ q->drop_batch_size = 64; q->quantum = psched_mtu(qdisc_dev(sch)); q->perturbation = prandom_u32(); @@ -515,6 +532,8 @@ static int fq_codel_dump(struct Qdisc *sch, struct sk_buff *skb) q->quantum) || nla_put_u32(skb, TCA_FQ_CODEL_DROP_BATCH_SIZE, q->drop_batch_size) || + nla_put_u32(skb, TCA_FQ_CODEL_MEMORY_LIMIT, + q->memory_limit) || nla_put_u32(skb, TCA_FQ_CODEL_FLOWS, q->flows_cnt)) goto nla_put_failure; @@ -543,6 +562,8 @@ static int fq_codel_dump_stats(struct Qdisc *sch, struct gnet_dump *d) st.qdisc_stats.ecn_mark = q->cstats.ecn_mark; st.qdisc_stats.new_flow_count = q->new_flow_count; st.qdisc_stats.ce_mark = q->cstats.ce_mark; + st.qdisc_stats.memory_usage = q->memory_usage; + st.qdisc_stats.drop_overmemory = q->drop_overmemory; list_for_each(pos, &q->new_flows) st.qdisc_stats.new_flows_len++; From acf87a3f5794f58743996c1b627b91622df6dd1d Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Fri, 6 May 2016 20:55:23 +0300 Subject: [PATCH 1401/1649] ISDN: eicon: replace custom hex_asc_lo() / hex_pack_byte() Instead of custom approach re-use generic helpers to convert byte to hex format. Signed-off-by: Andy Shevchenko Signed-off-by: David S. Miller --- drivers/isdn/hardware/eicon/message.c | 21 +++++++-------------- 1 file changed, 7 insertions(+), 14 deletions(-) diff --git a/drivers/isdn/hardware/eicon/message.c b/drivers/isdn/hardware/eicon/message.c index d7c286656a25..1a1d99704fe6 100644 --- a/drivers/isdn/hardware/eicon/message.c +++ b/drivers/isdn/hardware/eicon/message.c @@ -1147,8 +1147,6 @@ static byte test_c_ind_mask_bit(PLCI *plci, word b) static void dump_c_ind_mask(PLCI *plci) { - static char hex_digit_table[0x10] = - {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'}; word i, j, k; dword d; char *p; @@ -1165,7 +1163,7 @@ static void dump_c_ind_mask(PLCI *plci) d = plci->c_ind_mask_table[i + j]; for (k = 0; k < 8; k++) { - *(--p) = hex_digit_table[d & 0xf]; + *(--p) = hex_asc_lo(d); d >>= 4; } } @@ -10507,7 +10505,6 @@ static void mixer_set_bchannel_id(PLCI *plci, byte *chi) static void mixer_calculate_coefs(DIVA_CAPI_ADAPTER *a) { - static char hex_digit_table[0x10] = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'}; word n, i, j; char *p; char hex_line[2 * MIXER_MAX_DUMP_CHANNELS + MIXER_MAX_DUMP_CHANNELS / 8 + 4]; @@ -10690,13 +10687,13 @@ static void mixer_calculate_coefs(DIVA_CAPI_ADAPTER *a) n = li_total_channels; if (n > MIXER_MAX_DUMP_CHANNELS) n = MIXER_MAX_DUMP_CHANNELS; + p = hex_line; for (j = 0; j < n; j++) { if ((j & 0x7) == 0) *(p++) = ' '; - *(p++) = hex_digit_table[li_config_table[j].curchnl >> 4]; - *(p++) = hex_digit_table[li_config_table[j].curchnl & 0xf]; + p = hex_byte_pack(p, li_config_table[j].curchnl); } *p = '\0'; dbug(1, dprintf("[%06lx] CURRENT %s", @@ -10706,8 +10703,7 @@ static void mixer_calculate_coefs(DIVA_CAPI_ADAPTER *a) { if ((j & 0x7) == 0) *(p++) = ' '; - *(p++) = hex_digit_table[li_config_table[j].channel >> 4]; - *(p++) = hex_digit_table[li_config_table[j].channel & 0xf]; + p = hex_byte_pack(p, li_config_table[j].channel); } *p = '\0'; dbug(1, dprintf("[%06lx] CHANNEL %s", @@ -10717,8 +10713,7 @@ static void mixer_calculate_coefs(DIVA_CAPI_ADAPTER *a) { if ((j & 0x7) == 0) *(p++) = ' '; - *(p++) = hex_digit_table[li_config_table[j].chflags >> 4]; - *(p++) = hex_digit_table[li_config_table[j].chflags & 0xf]; + p = hex_byte_pack(p, li_config_table[j].chflags); } *p = '\0'; dbug(1, dprintf("[%06lx] CHFLAG %s", @@ -10730,8 +10725,7 @@ static void mixer_calculate_coefs(DIVA_CAPI_ADAPTER *a) { if ((j & 0x7) == 0) *(p++) = ' '; - *(p++) = hex_digit_table[li_config_table[i].flag_table[j] >> 4]; - *(p++) = hex_digit_table[li_config_table[i].flag_table[j] & 0xf]; + p = hex_byte_pack(p, li_config_table[i].flag_table[j]); } *p = '\0'; dbug(1, dprintf("[%06lx] FLAG[%02x]%s", @@ -10744,8 +10738,7 @@ static void mixer_calculate_coefs(DIVA_CAPI_ADAPTER *a) { if ((j & 0x7) == 0) *(p++) = ' '; - *(p++) = hex_digit_table[li_config_table[i].coef_table[j] >> 4]; - *(p++) = hex_digit_table[li_config_table[i].coef_table[j] & 0xf]; + p = hex_byte_pack(p, li_config_table[i].coef_table[j]); } *p = '\0'; dbug(1, dprintf("[%06lx] COEF[%02x]%s", From 8a3a4c6e7b343f1b648b63e55700243e98bfc892 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 6 May 2016 15:55:50 -0700 Subject: [PATCH 1402/1649] net: make sch_handle_ingress() drop monitor ready TC_ACT_STOLEN is used when ingress traffic is mirred/redirected to say ifb. Packet is not dropped, but consumed. Only TC_ACT_SHOT is a clear indication something went wrong. Signed-off-by: Eric Dumazet Cc: Jamal Hadi Salim Acked-by: Alexei Starovoitov Acked-by: Jamal Hadi Salim Signed-off-by: David S. Miller --- net/core/dev.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/net/core/dev.c b/net/core/dev.c index e98ba63fe280..c7490339315c 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -3956,9 +3956,11 @@ sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret, break; case TC_ACT_SHOT: qdisc_qstats_cpu_drop(cl->q); + kfree_skb(skb); + return NULL; case TC_ACT_STOLEN: case TC_ACT_QUEUED: - kfree_skb(skb); + consume_skb(skb); return NULL; case TC_ACT_REDIRECT: /* skb_mac_header check was done by cls/act_bpf, so From 7d945796754a0394b0c5c35d8f80a4a805c7dbb9 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 6 May 2016 18:19:59 -0700 Subject: [PATCH 1403/1649] ifb: support more features When using ifb+netem on ingress on SIT/IPIP/GRE traffic, GRO packets are not properly processed. Segmentation should not be forced, since ifb is already adding quite a performance hit. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- drivers/net/ifb.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c index cc56fac3c3f8..66c0eeafcb5d 100644 --- a/drivers/net/ifb.c +++ b/drivers/net/ifb.c @@ -196,6 +196,7 @@ static const struct net_device_ops ifb_netdev_ops = { #define IFB_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | NETIF_F_FRAGLIST | \ NETIF_F_TSO_ECN | NETIF_F_TSO | NETIF_F_TSO6 | \ + NETIF_F_GSO_ENCAP_ALL | \ NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_CTAG_TX | \ NETIF_F_HW_VLAN_STAG_TX) @@ -224,6 +225,8 @@ static void ifb_setup(struct net_device *dev) dev->tx_queue_len = TX_Q_LIMIT; dev->features |= IFB_FEATURES; + dev->hw_features |= dev->features; + dev->hw_enc_features |= dev->features; dev->vlan_features |= IFB_FEATURES & ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX); From b75803d52a2ce1f6cbaf7ae0ae40a369210070cf Mon Sep 17 00:00:00 2001 From: Lawrence Brakmo Date: Fri, 6 May 2016 20:35:35 -0700 Subject: [PATCH 1404/1649] tcp: refactor struct tcp_skb_cb Refactor tcp_skb_cb to create two overlaping areas to store state for incoming or outgoing skbs based on comments by Neal Cardwell to tcp_nv patch: AFAICT this patch would not require an increase in the size of sk_buff cb[] if it were to take advantage of the fact that the tcp_skb_cb header.h4 and header.h6 fields are only used in the packet reception code path, and this in_flight field is only used on the transmit side. Signed-off-by: Lawrence Brakmo Acked-by: Yuchung Cheng Signed-off-by: David S. Miller --- include/net/tcp.h | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/include/net/tcp.h b/include/net/tcp.h index 24ec80483805..4775a1bba7f7 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -765,11 +765,16 @@ struct tcp_skb_cb { unused:6; __u32 ack_seq; /* Sequence number ACK'd */ union { - struct inet_skb_parm h4; + struct { + /* There is space for up to 20 bytes */ + } tx; /* only used for outgoing skbs */ + union { + struct inet_skb_parm h4; #if IS_ENABLED(CONFIG_IPV6) - struct inet6_skb_parm h6; + struct inet6_skb_parm h6; #endif - } header; /* For incoming frames */ + } header; /* For incoming skbs */ + }; }; #define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0])) From 59efcbaf434964f324928619fcd0a6f506134753 Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Sat, 7 May 2016 22:53:40 +0300 Subject: [PATCH 1405/1649] pxa168_eth: mdiobus_scan() doesn't return NULL anymore Now that mdiobus_scan() doesn't return NULL on failure anymore, this driver no longer needs to check for it... Signed-off-by: Sergei Shtylyov Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/pxa168_eth.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c index 15cf50d7e316..89d0d835352e 100644 --- a/drivers/net/ethernet/marvell/pxa168_eth.c +++ b/drivers/net/ethernet/marvell/pxa168_eth.c @@ -981,8 +981,6 @@ static int pxa168_init_phy(struct net_device *dev) pep->phy = mdiobus_scan(pep->smi_bus, pep->phy_addr); if (IS_ERR(pep->phy)) return PTR_ERR(pep->phy); - if (!pep->phy) - return -ENODEV; err = phy_connect_direct(dev, pep->phy, pxa168_eth_adjust_link, pep->phy_intf); From 7927092253da598331542bdedb8fd5612f161f80 Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Sun, 8 May 2016 00:08:05 +0300 Subject: [PATCH 1406/1649] sh_eth: call sh_eth_tsu_write() from sh_eth_chip_reset_giga() sh_eth_chip_reset_giga() doesn't really need to use direct iowrite32() when writing to the ARSTR register, it can use sh_eth_tsu_write() as all other chip_reset() methods. Signed-off-by: Sergei Shtylyov Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/sh_eth.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 07e29638299f..23678e7cce6b 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -725,8 +725,9 @@ static struct sh_eth_cpu_data sh7757_data = { #define GIGA_MAHR(port) (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c0) static void sh_eth_chip_reset_giga(struct net_device *ndev) { - int i; + struct sh_eth_private *mdp = netdev_priv(ndev); u32 mahr[2], malr[2]; + int i; /* save MAHR and MALR */ for (i = 0; i < 2; i++) { @@ -735,7 +736,7 @@ static void sh_eth_chip_reset_giga(struct net_device *ndev) } /* reset device */ - iowrite32(ARSTR_ARST, (void *)(SH_GIGA_ETH_BASE + 0x1800)); + sh_eth_tsu_write(mdp, ARSTR_ARST, ARSTR); mdelay(1); /* restore MAHR and MALR */ From c66b2581123cd1527b6a084f39e9271cb02673b7 Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Sat, 7 May 2016 14:09:01 -0700 Subject: [PATCH 1407/1649] sh_eth: reuse sh_eth_chip_reset() All the chip_reset() methods repeat the code writing to the ARSTR register and delaying for 1 ms, so that we can reuse sh_eth_chip_reset() twice. Signed-off-by: Sergei Shtylyov Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/sh_eth.c | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 23678e7cce6b..04cd39f66cc9 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -537,11 +537,7 @@ static struct sh_eth_cpu_data r7s72100_data = { static void sh_eth_chip_reset_r8a7740(struct net_device *ndev) { - struct sh_eth_private *mdp = netdev_priv(ndev); - - /* reset device */ - sh_eth_tsu_write(mdp, ARSTR_ARST, ARSTR); - mdelay(1); + sh_eth_chip_reset(ndev); sh_eth_select_mii(ndev); } @@ -725,7 +721,6 @@ static struct sh_eth_cpu_data sh7757_data = { #define GIGA_MAHR(port) (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c0) static void sh_eth_chip_reset_giga(struct net_device *ndev) { - struct sh_eth_private *mdp = netdev_priv(ndev); u32 mahr[2], malr[2]; int i; @@ -735,9 +730,7 @@ static void sh_eth_chip_reset_giga(struct net_device *ndev) mahr[i] = ioread32((void *)GIGA_MAHR(i)); } - /* reset device */ - sh_eth_tsu_write(mdp, ARSTR_ARST, ARSTR); - mdelay(1); + sh_eth_chip_reset(ndev); /* restore MAHR and MALR */ for (i = 0; i < 2; i++) { From 3e51a3356cb2a1a35c07607f93d79484b1496bbf Mon Sep 17 00:00:00 2001 From: Alexander Gerasiov Date: Fri, 1 Apr 2016 19:16:45 +0300 Subject: [PATCH 1408/1649] can: sja1000: Fix error location forwarding According to SJA1000 documentation the location of error is available regardless of an error type. Therefore it should always be forwarded to SocketCAN. Signed-off-by: Nikita Edward Baruzdin Signed-off-by: Alexander GQ Gerasiov Acked-by: Oliver Hartkopp Signed-off-by: Marc Kleine-Budde --- drivers/net/can/sja1000/sja1000.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c index 8dda3b703d39..9f107798f904 100644 --- a/drivers/net/can/sja1000/sja1000.c +++ b/drivers/net/can/sja1000/sja1000.c @@ -438,6 +438,7 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status) cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; + /* set error type */ switch (ecc & ECC_MASK) { case ECC_BIT: cf->data[2] |= CAN_ERR_PROT_BIT; @@ -449,9 +450,12 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status) cf->data[2] |= CAN_ERR_PROT_STUFF; break; default: - cf->data[3] = ecc & ECC_SEG; break; } + + /* set error location */ + cf->data[3] = ecc & ECC_SEG; + /* Error occurred during transmission? */ if ((ecc & ECC_DIR) == 0) cf->data[2] |= CAN_ERR_PROT_TX; From 056a7201ac7a659880b43c2e62a5aa85ca06bebf Mon Sep 17 00:00:00 2001 From: Nikita Edward Baruzdin Date: Wed, 6 Apr 2016 16:04:32 +0300 Subject: [PATCH 1409/1649] can: sja1000: plx_pci: Add support for Marathon CAN-bus-PCIe card This patch adds support for the Marathon CAN-bus-PCIe card to the sja1000 driver. For more information see: http://can.marathon.ru/page/devices/can-bus-pcie Signed-off-by: Nikita Edward Baruzdin Signed-off-by: Marc Kleine-Budde --- drivers/net/can/sja1000/plx_pci.c | 64 +++++++++++++++++++++++++++---- 1 file changed, 56 insertions(+), 8 deletions(-) diff --git a/drivers/net/can/sja1000/plx_pci.c b/drivers/net/can/sja1000/plx_pci.c index 8836a7485c81..3eb7430dffbf 100644 --- a/drivers/net/can/sja1000/plx_pci.c +++ b/drivers/net/can/sja1000/plx_pci.c @@ -39,6 +39,7 @@ MODULE_DESCRIPTION("Socket-CAN driver for PLX90xx PCI-bridge cards with " MODULE_SUPPORTED_DEVICE("Adlink PCI-7841/cPCI-7841, " "Adlink PCI-7841/cPCI-7841 SE, " "Marathon CAN-bus-PCI, " + "Marathon CAN-bus-PCIe, " "TEWS TECHNOLOGIES TPMC810, " "esd CAN-PCI/CPCI/PCI104/200, " "esd CAN-PCI/PMC/266, " @@ -133,6 +134,7 @@ struct plx_pci_card { #define IXXAT_PCI_SUB_SYS_ID 0x2540 #define MARATHON_PCI_DEVICE_ID 0x2715 +#define MARATHON_PCIE_DEVICE_ID 0x3432 #define TEWS_PCI_VENDOR_ID 0x1498 #define TEWS_PCI_DEVICE_ID_TMPC810 0x032A @@ -141,8 +143,9 @@ struct plx_pci_card { #define CTI_PCI_DEVICE_ID_CRG001 0x0900 static void plx_pci_reset_common(struct pci_dev *pdev); -static void plx_pci_reset_marathon(struct pci_dev *pdev); static void plx9056_pci_reset_common(struct pci_dev *pdev); +static void plx_pci_reset_marathon_pci(struct pci_dev *pdev); +static void plx_pci_reset_marathon_pcie(struct pci_dev *pdev); struct plx_pci_channel_map { u32 bar; @@ -215,14 +218,22 @@ static struct plx_pci_card_info plx_pci_card_info_ixxat = { /* based on PLX9050 */ }; -static struct plx_pci_card_info plx_pci_card_info_marathon = { +static struct plx_pci_card_info plx_pci_card_info_marathon_pci = { "Marathon CAN-bus-PCI", 2, PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR, {0, 0x00, 0x00}, { {2, 0x00, 0x00}, {4, 0x00, 0x00} }, - &plx_pci_reset_marathon + &plx_pci_reset_marathon_pci /* based on PLX9052 */ }; +static struct plx_pci_card_info plx_pci_card_info_marathon_pcie = { + "Marathon CAN-bus-PCIe", 2, + PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR, + {0, 0x00, 0x00}, { {2, 0x00, 0x00}, {3, 0x80, 0x00} }, + &plx_pci_reset_marathon_pcie + /* based on PEX8311 */ +}; + static struct plx_pci_card_info plx_pci_card_info_tews = { "TEWS TECHNOLOGIES TPMC810", 2, PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR, @@ -316,7 +327,14 @@ static const struct pci_device_id plx_pci_tbl[] = { PCI_VENDOR_ID_PLX, MARATHON_PCI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, - (kernel_ulong_t)&plx_pci_card_info_marathon + (kernel_ulong_t)&plx_pci_card_info_marathon_pci + }, + { + /* Marathon CAN-bus-PCIe card */ + PCI_VENDOR_ID_PLX, MARATHON_PCIE_DEVICE_ID, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + (kernel_ulong_t)&plx_pci_card_info_marathon_pcie }, { /* TEWS TECHNOLOGIES TPMC810 card */ @@ -437,8 +455,8 @@ static void plx9056_pci_reset_common(struct pci_dev *pdev) iowrite32(cntrl, card->conf_addr + PLX9056_CNTRL); }; -/* Special reset function for Marathon card */ -static void plx_pci_reset_marathon(struct pci_dev *pdev) +/* Special reset function for Marathon CAN-bus-PCI card */ +static void plx_pci_reset_marathon_pci(struct pci_dev *pdev) { void __iomem *reset_addr; int i; @@ -460,6 +478,34 @@ static void plx_pci_reset_marathon(struct pci_dev *pdev) } } +/* Special reset function for Marathon CAN-bus-PCIe card */ +static void plx_pci_reset_marathon_pcie(struct pci_dev *pdev) +{ + void __iomem *addr; + void __iomem *reset_addr; + int i; + + plx9056_pci_reset_common(pdev); + + for (i = 0; i < 2; i++) { + struct plx_pci_channel_map *chan_map = + &plx_pci_card_info_marathon_pcie.chan_map_tbl[i]; + addr = pci_iomap(pdev, chan_map->bar, chan_map->size); + if (!addr) { + dev_err(&pdev->dev, "Failed to remap reset " + "space %d (BAR%d)\n", i, chan_map->bar); + } else { + /* reset the SJA1000 chip */ + #define MARATHON_PCIE_RESET_OFFSET 32 + reset_addr = addr + chan_map->offset + + MARATHON_PCIE_RESET_OFFSET; + iowrite8(0x1, reset_addr); + udelay(100); + pci_iounmap(pdev, addr); + } + } +} + static void plx_pci_del_card(struct pci_dev *pdev) { struct plx_pci_card *card = pci_get_drvdata(pdev); @@ -486,7 +532,8 @@ static void plx_pci_del_card(struct pci_dev *pdev) * Disable interrupts from PCI-card and disable local * interrupts */ - if (pdev->device != PCI_DEVICE_ID_PLX_9056) + if (pdev->device != PCI_DEVICE_ID_PLX_9056 && + pdev->device != MARATHON_PCIE_DEVICE_ID) iowrite32(0x0, card->conf_addr + PLX_INTCSR); else iowrite32(0x0, card->conf_addr + PLX9056_INTCSR); @@ -619,7 +666,8 @@ static int plx_pci_add_card(struct pci_dev *pdev, * Enable interrupts from PCI-card (PLX90xx) and enable Local_1, * Local_2 interrupts from the SJA1000 chips */ - if (pdev->device != PCI_DEVICE_ID_PLX_9056) { + if (pdev->device != PCI_DEVICE_ID_PLX_9056 && + pdev->device != MARATHON_PCIE_DEVICE_ID) { val = ioread32(card->conf_addr + PLX_INTCSR); if (pdev->subsystem_vendor == PCI_VENDOR_ID_ESDGMBH) val |= PLX_LINT1_EN | PLX_PCI_INT_EN; From b6fd3aba6041922e115bd8e10539b8545f4120ac Mon Sep 17 00:00:00 2001 From: Amitoj Kaur Chawla Date: Fri, 8 Apr 2016 21:02:10 +0530 Subject: [PATCH 1410/1649] can: mcp251x: Replace create_freezable_workqueue with alloc_workqueue Replace scheduled to be removed create_freezable_workqueue with alloc_workqueue. priv->wq should be explicitly set as freezable to ensure it is frozen in the suspend sequence and work items are drained so that no new work item starts execution until thawed. Thus, use of WQ_FREEZABLE flag here is required. WQ_MEM_RECLAIM flag has been set here to ensure forward progress regardless of memory pressure. The order of execution is not important so set @max_active as 0. Signed-off-by: Amitoj Kaur Chawla Acked-by: Tejun Heo Signed-off-by: Marc Kleine-Budde --- drivers/net/can/spi/mcp251x.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c index 74a7dfecee27..cf36d26ef002 100644 --- a/drivers/net/can/spi/mcp251x.c +++ b/drivers/net/can/spi/mcp251x.c @@ -961,7 +961,8 @@ static int mcp251x_open(struct net_device *net) goto open_unlock; } - priv->wq = create_freezable_workqueue("mcp251x_wq"); + priv->wq = alloc_workqueue("mcp251x_wq", WQ_FREEZABLE | WQ_MEM_RECLAIM, + 0); INIT_WORK(&priv->tx_work, mcp251x_tx_work_handler); INIT_WORK(&priv->restart_work, mcp251x_restart_work_handler); From bb208f144cf3f59d8f89a09a80efd04389718907 Mon Sep 17 00:00:00 2001 From: Oliver Hartkopp Date: Mon, 21 Mar 2016 20:18:21 +0100 Subject: [PATCH 1411/1649] can: fix handling of unmodifiable configuration options As described in 'can: m_can: tag current CAN FD controllers as non-ISO' (6cfda7fbebe) it is possible to define fixed configuration options by setting the according bit in 'ctrlmode' and clear it in 'ctrlmode_supported'. This leads to the incovenience that the fixed configuration bits can not be passed by netlink even when they have the correct values (e.g. non-ISO, FD). This patch fixes that issue and not only allows fixed set bit values to be set again but now requires(!) to provide these fixed values at configuration time. A valid CAN FD configuration consists of a nominal/arbitration bittiming, a data bittiming and a control mode with CAN_CTRLMODE_FD set - which is now enforced by a new can_validate() function. This fix additionally removed the inconsistency that was prohibiting the support of 'CANFD-only' controller drivers, like the RCar CAN FD. For this reason a new helper can_set_static_ctrlmode() has been introduced to provide a proper interface to handle static enabled CAN controller options. Reported-by: Ramesh Shanmugasundaram Signed-off-by: Oliver Hartkopp Reviewed-by: Ramesh Shanmugasundaram Cc: # >= 3.18 Signed-off-by: Marc Kleine-Budde --- drivers/net/can/dev.c | 56 ++++++++++++++++++++++++++++++++--- drivers/net/can/m_can/m_can.c | 2 +- include/linux/can/dev.h | 22 ++++++++++++-- 3 files changed, 73 insertions(+), 7 deletions(-) diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c index 141c2a42d7ed..910c12e2638e 100644 --- a/drivers/net/can/dev.c +++ b/drivers/net/can/dev.c @@ -696,11 +696,17 @@ int can_change_mtu(struct net_device *dev, int new_mtu) /* allow change of MTU according to the CANFD ability of the device */ switch (new_mtu) { case CAN_MTU: + /* 'CANFD-only' controllers can not switch to CAN_MTU */ + if (priv->ctrlmode_static & CAN_CTRLMODE_FD) + return -EINVAL; + priv->ctrlmode &= ~CAN_CTRLMODE_FD; break; case CANFD_MTU: - if (!(priv->ctrlmode_supported & CAN_CTRLMODE_FD)) + /* check for potential CANFD ability */ + if (!(priv->ctrlmode_supported & CAN_CTRLMODE_FD) && + !(priv->ctrlmode_static & CAN_CTRLMODE_FD)) return -EINVAL; priv->ctrlmode |= CAN_CTRLMODE_FD; @@ -782,6 +788,35 @@ static const struct nla_policy can_policy[IFLA_CAN_MAX + 1] = { = { .len = sizeof(struct can_bittiming_const) }, }; +static int can_validate(struct nlattr *tb[], struct nlattr *data[]) +{ + bool is_can_fd = false; + + /* Make sure that valid CAN FD configurations always consist of + * - nominal/arbitration bittiming + * - data bittiming + * - control mode with CAN_CTRLMODE_FD set + */ + + if (data[IFLA_CAN_CTRLMODE]) { + struct can_ctrlmode *cm = nla_data(data[IFLA_CAN_CTRLMODE]); + + is_can_fd = cm->flags & cm->mask & CAN_CTRLMODE_FD; + } + + if (is_can_fd) { + if (!data[IFLA_CAN_BITTIMING] || !data[IFLA_CAN_DATA_BITTIMING]) + return -EOPNOTSUPP; + } + + if (data[IFLA_CAN_DATA_BITTIMING]) { + if (!is_can_fd || !data[IFLA_CAN_BITTIMING]) + return -EOPNOTSUPP; + } + + return 0; +} + static int can_changelink(struct net_device *dev, struct nlattr *tb[], struct nlattr *data[]) { @@ -813,19 +848,31 @@ static int can_changelink(struct net_device *dev, if (data[IFLA_CAN_CTRLMODE]) { struct can_ctrlmode *cm; + u32 ctrlstatic; + u32 maskedflags; /* Do not allow changing controller mode while running */ if (dev->flags & IFF_UP) return -EBUSY; cm = nla_data(data[IFLA_CAN_CTRLMODE]); + ctrlstatic = priv->ctrlmode_static; + maskedflags = cm->flags & cm->mask; - /* check whether changed bits are allowed to be modified */ - if (cm->mask & ~priv->ctrlmode_supported) + /* check whether provided bits are allowed to be passed */ + if (cm->mask & ~(priv->ctrlmode_supported | ctrlstatic)) + return -EOPNOTSUPP; + + /* do not check for static fd-non-iso if 'fd' is disabled */ + if (!(maskedflags & CAN_CTRLMODE_FD)) + ctrlstatic &= ~CAN_CTRLMODE_FD_NON_ISO; + + /* make sure static options are provided by configuration */ + if ((maskedflags & ctrlstatic) != ctrlstatic) return -EOPNOTSUPP; /* clear bits to be modified and copy the flag values */ priv->ctrlmode &= ~cm->mask; - priv->ctrlmode |= (cm->flags & cm->mask); + priv->ctrlmode |= maskedflags; /* CAN_CTRLMODE_FD can only be set when driver supports FD */ if (priv->ctrlmode & CAN_CTRLMODE_FD) @@ -966,6 +1013,7 @@ static struct rtnl_link_ops can_link_ops __read_mostly = { .maxtype = IFLA_CAN_MAX, .policy = can_policy, .setup = can_setup, + .validate = can_validate, .newlink = can_newlink, .changelink = can_changelink, .get_size = can_get_size, diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c index 39cf911f7a1e..195f15edb32e 100644 --- a/drivers/net/can/m_can/m_can.c +++ b/drivers/net/can/m_can/m_can.c @@ -955,7 +955,7 @@ static struct net_device *alloc_m_can_dev(void) priv->can.do_get_berr_counter = m_can_get_berr_counter; /* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.0.1 */ - priv->can.ctrlmode = CAN_CTRLMODE_FD_NON_ISO; + can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO); /* CAN_CTRLMODE_FD_NON_ISO can not be changed with M_CAN IP v3.0.1 */ priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h index 735f9f8c4e43..5261751f6bd4 100644 --- a/include/linux/can/dev.h +++ b/include/linux/can/dev.h @@ -40,8 +40,11 @@ struct can_priv { struct can_clock clock; enum can_state state; - u32 ctrlmode; - u32 ctrlmode_supported; + + /* CAN controller features - see include/uapi/linux/can/netlink.h */ + u32 ctrlmode; /* current options setting */ + u32 ctrlmode_supported; /* options that can be modified by netlink */ + u32 ctrlmode_static; /* static enabled options for driver/hardware */ int restart_ms; struct timer_list restart_timer; @@ -108,6 +111,21 @@ static inline bool can_is_canfd_skb(const struct sk_buff *skb) return skb->len == CANFD_MTU; } +/* helper to define static CAN controller features at device creation time */ +static inline void can_set_static_ctrlmode(struct net_device *dev, + u32 static_mode) +{ + struct can_priv *priv = netdev_priv(dev); + + /* alloc_candev() succeeded => netdev_priv() is valid at this point */ + priv->ctrlmode = static_mode; + priv->ctrlmode_static = static_mode; + + /* override MTU which was set by default in can_setup()? */ + if (static_mode & CAN_CTRLMODE_FD) + dev->mtu = CANFD_MTU; +} + /* get data length from can_dlc with sanitized can_dlc */ u8 can_dlc2len(u8 can_dlc); From b16e368ed6193d8d1774b4cc874d7892d55c4780 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andreas=20Gr=C3=B6ger?= Date: Fri, 6 May 2016 10:04:37 +0200 Subject: [PATCH 1412/1649] can: janz-ican3: error handling for CAL/CANopen firmware MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit My patch of May 2015 was missing the changed handling of error indications. With CAL/CANopen firmware the NMTS-SlaveEventIndication must be used instead of CAN-EventIndication. An appropriate slave node must be configured to report the errors. In our department (about 15 development systems with Janz ICAN3- modules with firmware 1.48, my system also with firmware ICANOS 1.35) we use the driver with this patch for about one year: no known problems. Signed-off-by: Andreas Gröger Signed-off-by: Marc Kleine-Budde --- drivers/net/can/janz-ican3.c | 104 ++++++++++++++++++++++++++++++++--- 1 file changed, 95 insertions(+), 9 deletions(-) diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c index 5d04f5464faf..f13bb8d9bb84 100644 --- a/drivers/net/can/janz-ican3.c +++ b/drivers/net/can/janz-ican3.c @@ -84,6 +84,7 @@ #define MSG_COFFREQ 0x42 #define MSG_CONREQ 0x43 #define MSG_CCONFREQ 0x47 +#define MSG_NMTS 0xb0 #define MSG_LMTS 0xb4 /* @@ -130,6 +131,22 @@ #define ICAN3_CAN_DLC_MASK 0x0f +/* Janz ICAN3 NMTS subtypes */ +#define NMTS_CREATE_NODE_REQ 0x0 +#define NMTS_SLAVE_STATE_IND 0x8 +#define NMTS_SLAVE_EVENT_IND 0x9 + +/* Janz ICAN3 LMTS subtypes */ +#define LMTS_BUSON_REQ 0x0 +#define LMTS_BUSOFF_REQ 0x1 +#define LMTS_CAN_CONF_REQ 0x2 + +/* Janz ICAN3 NMTS Event indications */ +#define NE_LOCAL_OCCURRED 0x3 +#define NE_LOCAL_RESOLVED 0x2 +#define NE_REMOTE_OCCURRED 0xc +#define NE_REMOTE_RESOLVED 0x8 + /* * SJA1000 Status and Error Register Definitions * @@ -800,21 +817,41 @@ static int ican3_set_bus_state(struct ican3_dev *mod, bool on) return ican3_send_msg(mod, &msg); } else if (mod->fwtype == ICAN3_FWTYPE_CAL_CANOPEN) { + /* bittiming + can-on/off request */ memset(&msg, 0, sizeof(msg)); msg.spec = MSG_LMTS; if (on) { msg.len = cpu_to_le16(4); - msg.data[0] = 0; + msg.data[0] = LMTS_BUSON_REQ; msg.data[1] = 0; msg.data[2] = btr0; msg.data[3] = btr1; } else { msg.len = cpu_to_le16(2); - msg.data[0] = 1; + msg.data[0] = LMTS_BUSOFF_REQ; msg.data[1] = 0; } + res = ican3_send_msg(mod, &msg); + if (res) + return res; - return ican3_send_msg(mod, &msg); + if (on) { + /* create NMT Slave Node for error processing + * class 2 (with error capability, see CiA/DS203-1) + * id 1 + * name locnod1 (must be exactly 7 bytes) + */ + memset(&msg, 0, sizeof(msg)); + msg.spec = MSG_NMTS; + msg.len = cpu_to_le16(11); + msg.data[0] = NMTS_CREATE_NODE_REQ; + msg.data[1] = 0; + msg.data[2] = 2; /* node class */ + msg.data[3] = 1; /* node id */ + strcpy(msg.data + 4, "locnod1"); /* node name */ + return ican3_send_msg(mod, &msg); + } + return 0; } return -ENOTSUPP; } @@ -849,12 +886,23 @@ static int ican3_set_buserror(struct ican3_dev *mod, u8 quota) { struct ican3_msg msg; - memset(&msg, 0, sizeof(msg)); - msg.spec = MSG_CCONFREQ; - msg.len = cpu_to_le16(2); - msg.data[0] = 0x00; - msg.data[1] = quota; - + if (mod->fwtype == ICAN3_FWTYPE_ICANOS) { + memset(&msg, 0, sizeof(msg)); + msg.spec = MSG_CCONFREQ; + msg.len = cpu_to_le16(2); + msg.data[0] = 0x00; + msg.data[1] = quota; + } else if (mod->fwtype == ICAN3_FWTYPE_CAL_CANOPEN) { + memset(&msg, 0, sizeof(msg)); + msg.spec = MSG_LMTS; + msg.len = cpu_to_le16(4); + msg.data[0] = LMTS_CAN_CONF_REQ; + msg.data[1] = 0x00; + msg.data[2] = 0x00; + msg.data[3] = quota; + } else { + return -ENOTSUPP; + } return ican3_send_msg(mod, &msg); } @@ -1150,6 +1198,41 @@ static void ican3_handle_inquiry(struct ican3_dev *mod, struct ican3_msg *msg) } } +/* Handle NMTS Slave Event Indication Messages from the firmware */ +static void ican3_handle_nmtsind(struct ican3_dev *mod, struct ican3_msg *msg) +{ + u16 subspec; + + subspec = msg->data[0] + msg->data[1] * 0x100; + if (subspec == NMTS_SLAVE_EVENT_IND) { + switch (msg->data[2]) { + case NE_LOCAL_OCCURRED: + case NE_LOCAL_RESOLVED: + /* now follows the same message as Raw ICANOS CEVTIND + * shift the data at the same place and call this method + */ + le16_add_cpu(&msg->len, -3); + memmove(msg->data, msg->data + 3, le16_to_cpu(msg->len)); + ican3_handle_cevtind(mod, msg); + break; + case NE_REMOTE_OCCURRED: + case NE_REMOTE_RESOLVED: + /* should not occurre, ignore */ + break; + default: + netdev_warn(mod->ndev, "unknown NMTS event indication %x\n", + msg->data[2]); + break; + } + } else if (subspec == NMTS_SLAVE_STATE_IND) { + /* ignore state indications */ + } else { + netdev_warn(mod->ndev, "unhandled NMTS indication %x\n", + subspec); + return; + } +} + static void ican3_handle_unknown_message(struct ican3_dev *mod, struct ican3_msg *msg) { @@ -1179,6 +1262,9 @@ static void ican3_handle_message(struct ican3_dev *mod, struct ican3_msg *msg) case MSG_INQUIRY: ican3_handle_inquiry(mod, msg); break; + case MSG_NMTS: + ican3_handle_nmtsind(mod, msg); + break; default: ican3_handle_unknown_message(mod, msg); break; From 2fe6c943147c5bb0550f5c3e941de3824986330e Mon Sep 17 00:00:00 2001 From: Maximilian Schneider Date: Mon, 25 Apr 2016 08:54:19 +0000 Subject: [PATCH 1413/1649] can: gs_usb: modify the usb device table to use only the first usb interface Modified the USB device table to use only the first USB interface, as is the case with GS USB devices. This allows other GS USB compatible devices to be more flexible with their remaining interfaces. Signed-off-by: Maximilian Schneider Signed-off-by: Marc Kleine-Budde --- drivers/net/can/usb/gs_usb.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c index cbc99d5649af..1556d4286235 100644 --- a/drivers/net/can/usb/gs_usb.c +++ b/drivers/net/can/usb/gs_usb.c @@ -950,7 +950,8 @@ static void gs_usb_disconnect(struct usb_interface *intf) } static const struct usb_device_id gs_usb_table[] = { - {USB_DEVICE(USB_GSUSB_1_VENDOR_ID, USB_GSUSB_1_PRODUCT_ID)}, + { USB_DEVICE_INTERFACE_NUMBER(USB_GSUSB_1_VENDOR_ID, + USB_GSUSB_1_PRODUCT_ID, 0) }, {} /* Terminating entry */ }; From 496c798db0b81af67572a2052ea30504c863235f Mon Sep 17 00:00:00 2001 From: Marek Vasut Date: Sun, 8 May 2016 00:34:11 +0200 Subject: [PATCH 1414/1649] can: ifi: Start NAPI poll on bus warning too Start the NAPI polling in case the bus warning interrupt happens, since it is the poll function which checks and reports the warning. Signed-off-by: Marek Vasut Cc: Marc Kleine-Budde Cc: Mark Rutland Cc: Oliver Hartkopp Cc: Wolfgang Grandegger Signed-off-by: Marc Kleine-Budde --- drivers/net/can/ifi_canfd/ifi_canfd.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/net/can/ifi_canfd/ifi_canfd.c b/drivers/net/can/ifi_canfd/ifi_canfd.c index a1bd54ffd31e..1ad05f1ab942 100644 --- a/drivers/net/can/ifi_canfd/ifi_canfd.c +++ b/drivers/net/can/ifi_canfd/ifi_canfd.c @@ -497,7 +497,8 @@ static irqreturn_t ifi_canfd_isr(int irq, void *dev_id) struct ifi_canfd_priv *priv = netdev_priv(ndev); struct net_device_stats *stats = &ndev->stats; const u32 rx_irq_mask = IFI_CANFD_INTERRUPT_RXFIFO_NEMPTY | - IFI_CANFD_INTERRUPT_RXFIFO_NEMPTY_PER; + IFI_CANFD_INTERRUPT_RXFIFO_NEMPTY_PER | + IFI_CANFD_INTERRUPT_ERROR_WARNING; const u32 tx_irq_mask = IFI_CANFD_INTERRUPT_TXFIFO_EMPTY | IFI_CANFD_INTERRUPT_TXFIFO_REMOVE; const u32 clr_irq_mask = ~(IFI_CANFD_INTERRUPT_SET_IRQ | @@ -513,7 +514,7 @@ static irqreturn_t ifi_canfd_isr(int irq, void *dev_id) /* Clear all pending interrupts but ErrWarn */ writel(clr_irq_mask, priv->base + IFI_CANFD_INTERRUPT); - /* RX IRQ, start NAPI */ + /* RX IRQ or bus warning, start NAPI */ if (isr & rx_irq_mask) { ifi_canfd_irq_enable(ndev, 0); napi_schedule(&priv->napi); From be1861320a9ec599b6862ebb71db9bd3ad897150 Mon Sep 17 00:00:00 2001 From: Marek Vasut Date: Sun, 8 May 2016 00:34:12 +0200 Subject: [PATCH 1415/1649] can: ifi: Update timing configuration code The updated documentation regarding the IFI CANFD core from April 2016 adds more details regarding the timing calculation. There is no longer any distinction in the timing calculation between CANFD and CAN2.0, but instead there are two timing modes -- 4_12_6_6 and 7_9_8_8 -- where the numbers mean the width in bits of the SJW/Prescaler/TimeA/TimeB fields. The code uses 7_9_8_8 mode, which allows more fine-grained control over the timing. Signed-off-by: Marek Vasut Cc: Marc Kleine-Budde Cc: Mark Rutland Cc: Oliver Hartkopp Cc: Wolfgang Grandegger Signed-off-by: Marc Kleine-Budde --- drivers/net/can/ifi_canfd/ifi_canfd.c | 54 +++++++++++---------------- 1 file changed, 21 insertions(+), 33 deletions(-) diff --git a/drivers/net/can/ifi_canfd/ifi_canfd.c b/drivers/net/can/ifi_canfd/ifi_canfd.c index 1ad05f1ab942..b9efd6ec04c9 100644 --- a/drivers/net/can/ifi_canfd/ifi_canfd.c +++ b/drivers/net/can/ifi_canfd/ifi_canfd.c @@ -34,6 +34,7 @@ #define IFI_CANFD_STCMD_LOOPBACK BIT(18) #define IFI_CANFD_STCMD_DISABLE_CANFD BIT(24) #define IFI_CANFD_STCMD_ENABLE_ISO BIT(25) +#define IFI_CANFD_STCMD_ENABLE_7_9_8_8_TIMING BIT(26) #define IFI_CANFD_STCMD_NORMAL_MODE ((u32)BIT(31)) #define IFI_CANFD_RXSTCMD 0x4 @@ -71,12 +72,12 @@ #define IFI_CANFD_TIME_TIMEB_OFF 0 #define IFI_CANFD_TIME_TIMEA_OFF 8 #define IFI_CANFD_TIME_PRESCALE_OFF 16 -#define IFI_CANFD_TIME_SJW_OFF_ISO 25 -#define IFI_CANFD_TIME_SJW_OFF_BOSCH 28 -#define IFI_CANFD_TIME_SET_SJW_BOSCH BIT(6) -#define IFI_CANFD_TIME_SET_TIMEB_BOSCH BIT(7) -#define IFI_CANFD_TIME_SET_PRESC_BOSCH BIT(14) -#define IFI_CANFD_TIME_SET_TIMEA_BOSCH BIT(15) +#define IFI_CANFD_TIME_SJW_OFF_7_9_8_8 25 +#define IFI_CANFD_TIME_SJW_OFF_4_12_6_6 28 +#define IFI_CANFD_TIME_SET_SJW_4_12_6_6 BIT(6) +#define IFI_CANFD_TIME_SET_TIMEB_4_12_6_6 BIT(7) +#define IFI_CANFD_TIME_SET_PRESC_4_12_6_6 BIT(14) +#define IFI_CANFD_TIME_SET_TIMEA_4_12_6_6 BIT(15) #define IFI_CANFD_TDELAY 0x1c @@ -534,24 +535,24 @@ static irqreturn_t ifi_canfd_isr(int irq, void *dev_id) static const struct can_bittiming_const ifi_canfd_bittiming_const = { .name = KBUILD_MODNAME, .tseg1_min = 1, /* Time segment 1 = prop_seg + phase_seg1 */ - .tseg1_max = 64, + .tseg1_max = 256, .tseg2_min = 2, /* Time segment 2 = phase_seg2 */ - .tseg2_max = 64, - .sjw_max = 16, + .tseg2_max = 256, + .sjw_max = 128, .brp_min = 2, - .brp_max = 256, + .brp_max = 512, .brp_inc = 1, }; static const struct can_bittiming_const ifi_canfd_data_bittiming_const = { .name = KBUILD_MODNAME, .tseg1_min = 1, /* Time segment 1 = prop_seg + phase_seg1 */ - .tseg1_max = 64, + .tseg1_max = 256, .tseg2_min = 2, /* Time segment 2 = phase_seg2 */ - .tseg2_max = 64, - .sjw_max = 16, + .tseg2_max = 256, + .sjw_max = 128, .brp_min = 2, - .brp_max = 256, + .brp_max = 512, .brp_inc = 1, }; @@ -561,19 +562,6 @@ static void ifi_canfd_set_bittiming(struct net_device *ndev) const struct can_bittiming *bt = &priv->can.bittiming; const struct can_bittiming *dbt = &priv->can.data_bittiming; u16 brp, sjw, tseg1, tseg2; - u32 noniso_arg = 0; - u32 time_off; - - if ((priv->can.ctrlmode & CAN_CTRLMODE_FD) && - !(priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO)) { - time_off = IFI_CANFD_TIME_SJW_OFF_ISO; - } else { - noniso_arg = IFI_CANFD_TIME_SET_TIMEB_BOSCH | - IFI_CANFD_TIME_SET_TIMEA_BOSCH | - IFI_CANFD_TIME_SET_PRESC_BOSCH | - IFI_CANFD_TIME_SET_SJW_BOSCH; - time_off = IFI_CANFD_TIME_SJW_OFF_BOSCH; - } /* Configure bit timing */ brp = bt->brp - 2; @@ -583,8 +571,7 @@ static void ifi_canfd_set_bittiming(struct net_device *ndev) writel((tseg2 << IFI_CANFD_TIME_TIMEB_OFF) | (tseg1 << IFI_CANFD_TIME_TIMEA_OFF) | (brp << IFI_CANFD_TIME_PRESCALE_OFF) | - (sjw << time_off) | - noniso_arg, + (sjw << IFI_CANFD_TIME_SJW_OFF_7_9_8_8), priv->base + IFI_CANFD_TIME); /* Configure data bit timing */ @@ -595,8 +582,7 @@ static void ifi_canfd_set_bittiming(struct net_device *ndev) writel((tseg2 << IFI_CANFD_TIME_TIMEB_OFF) | (tseg1 << IFI_CANFD_TIME_TIMEA_OFF) | (brp << IFI_CANFD_TIME_PRESCALE_OFF) | - (sjw << time_off) | - noniso_arg, + (sjw << IFI_CANFD_TIME_SJW_OFF_7_9_8_8), priv->base + IFI_CANFD_FTIME); } @@ -641,7 +627,8 @@ static void ifi_canfd_start(struct net_device *ndev) /* Reset the IP */ writel(IFI_CANFD_STCMD_HARDRESET, priv->base + IFI_CANFD_STCMD); - writel(0, priv->base + IFI_CANFD_STCMD); + writel(IFI_CANFD_STCMD_ENABLE_7_9_8_8_TIMING, + priv->base + IFI_CANFD_STCMD); ifi_canfd_set_bittiming(ndev); ifi_canfd_set_filters(ndev); @@ -660,7 +647,8 @@ static void ifi_canfd_start(struct net_device *ndev) writel((u32)(~IFI_CANFD_INTERRUPT_SET_IRQ), priv->base + IFI_CANFD_INTERRUPT); - stcmd = IFI_CANFD_STCMD_ENABLE | IFI_CANFD_STCMD_NORMAL_MODE; + stcmd = IFI_CANFD_STCMD_ENABLE | IFI_CANFD_STCMD_NORMAL_MODE | + IFI_CANFD_STCMD_ENABLE_7_9_8_8_TIMING; if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) stcmd |= IFI_CANFD_STCMD_BUSMONITOR; From 478ad12c9f5a2f5a1d39fc898b098992980d3abc Mon Sep 17 00:00:00 2001 From: Marek Vasut Date: Sun, 8 May 2016 00:34:13 +0200 Subject: [PATCH 1416/1649] can: ifi: Unify timing constants There is no distinction between bittiming constants for the slow and fast part of the CANFD operation on this controller, so just use one single bittiming constant set. Signed-off-by: Marek Vasut Cc: Marc Kleine-Budde Cc: Mark Rutland Cc: Oliver Hartkopp Cc: Wolfgang Grandegger Signed-off-by: Marc Kleine-Budde --- drivers/net/can/ifi_canfd/ifi_canfd.c | 14 +------------- 1 file changed, 1 insertion(+), 13 deletions(-) diff --git a/drivers/net/can/ifi_canfd/ifi_canfd.c b/drivers/net/can/ifi_canfd/ifi_canfd.c index b9efd6ec04c9..30dc3b676627 100644 --- a/drivers/net/can/ifi_canfd/ifi_canfd.c +++ b/drivers/net/can/ifi_canfd/ifi_canfd.c @@ -544,18 +544,6 @@ static const struct can_bittiming_const ifi_canfd_bittiming_const = { .brp_inc = 1, }; -static const struct can_bittiming_const ifi_canfd_data_bittiming_const = { - .name = KBUILD_MODNAME, - .tseg1_min = 1, /* Time segment 1 = prop_seg + phase_seg1 */ - .tseg1_max = 256, - .tseg2_min = 2, /* Time segment 2 = phase_seg2 */ - .tseg2_max = 256, - .sjw_max = 128, - .brp_min = 2, - .brp_max = 512, - .brp_inc = 1, -}; - static void ifi_canfd_set_bittiming(struct net_device *ndev) { struct ifi_canfd_priv *priv = netdev_priv(ndev); @@ -866,7 +854,7 @@ static int ifi_canfd_plat_probe(struct platform_device *pdev) priv->can.clock.freq = readl(addr + IFI_CANFD_CANCLOCK); priv->can.bittiming_const = &ifi_canfd_bittiming_const; - priv->can.data_bittiming_const = &ifi_canfd_data_bittiming_const; + priv->can.data_bittiming_const = &ifi_canfd_bittiming_const; priv->can.do_set_mode = ifi_canfd_set_mode; priv->can.do_get_berr_counter = ifi_canfd_get_berr_counter; From ca79408986bf250c22f3130c333787ec44c4d103 Mon Sep 17 00:00:00 2001 From: Marek Vasut Date: Sun, 8 May 2016 00:34:14 +0200 Subject: [PATCH 1417/1649] can: ifi: Treat CAN_CTRLMODE_FD_NON_ISO correctly The CAN_CTRLMODE_FD flag is set for both ISO and BOSCH CANFD mode, while the CAN_CTRLMODE_FD_NON_ISO is additional flag which is only set for CANFD-BOSCH mode. Fix the handling of the flags to reflect this. Signed-off-by: Marek Vasut Cc: Marc Kleine-Budde Cc: Mark Rutland Cc: Oliver Hartkopp Cc: Wolfgang Grandegger Reviewed-by: Oliver Hartkopp Signed-off-by: Marc Kleine-Budde --- drivers/net/can/ifi_canfd/ifi_canfd.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/net/can/ifi_canfd/ifi_canfd.c b/drivers/net/can/ifi_canfd/ifi_canfd.c index 30dc3b676627..5bd95dd5cb88 100644 --- a/drivers/net/can/ifi_canfd/ifi_canfd.c +++ b/drivers/net/can/ifi_canfd/ifi_canfd.c @@ -644,10 +644,11 @@ static void ifi_canfd_start(struct net_device *ndev) if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) stcmd |= IFI_CANFD_STCMD_LOOPBACK; - if (priv->can.ctrlmode & CAN_CTRLMODE_FD) + if ((priv->can.ctrlmode & CAN_CTRLMODE_FD) && + !(priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO)) stcmd |= IFI_CANFD_STCMD_ENABLE_ISO; - if (!(priv->can.ctrlmode & (CAN_CTRLMODE_FD | CAN_CTRLMODE_FD_NON_ISO))) + if (!(priv->can.ctrlmode & CAN_CTRLMODE_FD)) stcmd |= IFI_CANFD_STCMD_DISABLE_CANFD; priv->can.state = CAN_STATE_ERROR_ACTIVE; From 1acd80fb982728b8bf467184d5ec4c5a77a5601b Mon Sep 17 00:00:00 2001 From: Marek Vasut Date: Sun, 8 May 2016 00:34:15 +0200 Subject: [PATCH 1418/1649] can: ifi: Increment TX counters only on real transmission Only increment the TX counters in the irq handler if a CAN message was sent. The current code incremented the counters also if the TX FIFO empty interrupt happened, which is incorrect. Signed-off-by: Marek Vasut Cc: Marc Kleine-Budde Cc: Mark Rutland Cc: Oliver Hartkopp Cc: Wolfgang Grandegger Signed-off-by: Marc Kleine-Budde --- drivers/net/can/ifi_canfd/ifi_canfd.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/net/can/ifi_canfd/ifi_canfd.c b/drivers/net/can/ifi_canfd/ifi_canfd.c index 5bd95dd5cb88..ba6cd43e1bc6 100644 --- a/drivers/net/can/ifi_canfd/ifi_canfd.c +++ b/drivers/net/can/ifi_canfd/ifi_canfd.c @@ -522,13 +522,15 @@ static irqreturn_t ifi_canfd_isr(int irq, void *dev_id) } /* TX IRQ */ - if (isr & tx_irq_mask) { + if (isr & IFI_CANFD_INTERRUPT_TXFIFO_REMOVE) { stats->tx_bytes += can_get_echo_skb(ndev, 0); stats->tx_packets++; can_led_event(ndev, CAN_LED_EVENT_TX); - netif_wake_queue(ndev); } + if (isr & tx_irq_mask) + netif_wake_queue(ndev); + return IRQ_HANDLED; } From 5bbd655a8bd000579d135ddf30660f759db89996 Mon Sep 17 00:00:00 2001 From: Marek Vasut Date: Sun, 8 May 2016 00:34:16 +0200 Subject: [PATCH 1419/1649] can: ifi: Add more detailed error reporting The updated specification for the IFI CANFD core contains description of more detailed error reporting capability of the core. Implement support for this detailed error reporting. Signed-off-by: Marek Vasut Signed-off-by: Marc Kleine-Budde --- drivers/net/can/ifi_canfd/ifi_canfd.c | 113 ++++++++++++++++++++++++-- 1 file changed, 107 insertions(+), 6 deletions(-) diff --git a/drivers/net/can/ifi_canfd/ifi_canfd.c b/drivers/net/can/ifi_canfd/ifi_canfd.c index ba6cd43e1bc6..2d1d22eec750 100644 --- a/drivers/net/can/ifi_canfd/ifi_canfd.c +++ b/drivers/net/can/ifi_canfd/ifi_canfd.c @@ -52,7 +52,8 @@ #define IFI_CANFD_TXSTCMD_OVERFLOW BIT(13) #define IFI_CANFD_INTERRUPT 0xc -#define IFI_CANFD_INTERRUPT_ERROR_WARNING ((u32)BIT(1)) +#define IFI_CANFD_INTERRUPT_ERROR_WARNING BIT(1) +#define IFI_CANFD_INTERRUPT_ERROR_COUNTER BIT(10) #define IFI_CANFD_INTERRUPT_TXFIFO_EMPTY BIT(16) #define IFI_CANFD_INTERRUPT_TXFIFO_REMOVE BIT(22) #define IFI_CANFD_INTERRUPT_RXFIFO_NEMPTY BIT(24) @@ -103,7 +104,26 @@ #define IFI_CANFD_RES1 0x40 -#define IFI_CANFD_RES2 0x44 +#define IFI_CANFD_ERROR_CTR 0x44 +#define IFI_CANFD_ERROR_CTR_UNLOCK_MAGIC 0x21302899 +#define IFI_CANFD_ERROR_CTR_OVERLOAD_FIRST BIT(0) +#define IFI_CANFD_ERROR_CTR_ACK_ERROR_FIRST BIT(1) +#define IFI_CANFD_ERROR_CTR_BIT0_ERROR_FIRST BIT(2) +#define IFI_CANFD_ERROR_CTR_BIT1_ERROR_FIRST BIT(3) +#define IFI_CANFD_ERROR_CTR_STUFF_ERROR_FIRST BIT(4) +#define IFI_CANFD_ERROR_CTR_CRC_ERROR_FIRST BIT(5) +#define IFI_CANFD_ERROR_CTR_FORM_ERROR_FIRST BIT(6) +#define IFI_CANFD_ERROR_CTR_OVERLOAD_ALL BIT(8) +#define IFI_CANFD_ERROR_CTR_ACK_ERROR_ALL BIT(9) +#define IFI_CANFD_ERROR_CTR_BIT0_ERROR_ALL BIT(10) +#define IFI_CANFD_ERROR_CTR_BIT1_ERROR_ALL BIT(11) +#define IFI_CANFD_ERROR_CTR_STUFF_ERROR_ALL BIT(12) +#define IFI_CANFD_ERROR_CTR_CRC_ERROR_ALL BIT(13) +#define IFI_CANFD_ERROR_CTR_FORM_ERROR_ALL BIT(14) +#define IFI_CANFD_ERROR_CTR_BITPOSITION_OFFSET 16 +#define IFI_CANFD_ERROR_CTR_BITPOSITION_MASK 0xff +#define IFI_CANFD_ERROR_CTR_ER_RESET BIT(30) +#define IFI_CANFD_ERROR_CTR_ER_ENABLE ((u32)BIT(31)) #define IFI_CANFD_PAR 0x48 @@ -197,6 +217,8 @@ static void ifi_canfd_irq_enable(struct net_device *ndev, bool enable) if (enable) { enirq = IFI_CANFD_IRQMASK_TXFIFO_EMPTY | IFI_CANFD_IRQMASK_RXFIFO_NEMPTY; + if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) + enirq |= IFI_CANFD_INTERRUPT_ERROR_COUNTER; } writel(IFI_CANFD_IRQMASK_SET_ERR | @@ -335,6 +357,68 @@ static int ifi_canfd_handle_lost_msg(struct net_device *ndev) return 1; } +static int ifi_canfd_handle_lec_err(struct net_device *ndev, const u32 errctr) +{ + struct ifi_canfd_priv *priv = netdev_priv(ndev); + struct net_device_stats *stats = &ndev->stats; + struct can_frame *cf; + struct sk_buff *skb; + const u32 errmask = IFI_CANFD_ERROR_CTR_OVERLOAD_FIRST | + IFI_CANFD_ERROR_CTR_ACK_ERROR_FIRST | + IFI_CANFD_ERROR_CTR_BIT0_ERROR_FIRST | + IFI_CANFD_ERROR_CTR_BIT1_ERROR_FIRST | + IFI_CANFD_ERROR_CTR_STUFF_ERROR_FIRST | + IFI_CANFD_ERROR_CTR_CRC_ERROR_FIRST | + IFI_CANFD_ERROR_CTR_FORM_ERROR_FIRST; + + if (!(errctr & errmask)) /* No error happened. */ + return 0; + + priv->can.can_stats.bus_error++; + stats->rx_errors++; + + /* Propagate the error condition to the CAN stack. */ + skb = alloc_can_err_skb(ndev, &cf); + if (unlikely(!skb)) + return 0; + + /* Read the error counter register and check for new errors. */ + cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; + + if (errctr & IFI_CANFD_ERROR_CTR_OVERLOAD_FIRST) + cf->data[2] |= CAN_ERR_PROT_OVERLOAD; + + if (errctr & IFI_CANFD_ERROR_CTR_ACK_ERROR_FIRST) + cf->data[3] = CAN_ERR_PROT_LOC_ACK; + + if (errctr & IFI_CANFD_ERROR_CTR_BIT0_ERROR_FIRST) + cf->data[2] |= CAN_ERR_PROT_BIT0; + + if (errctr & IFI_CANFD_ERROR_CTR_BIT1_ERROR_FIRST) + cf->data[2] |= CAN_ERR_PROT_BIT1; + + if (errctr & IFI_CANFD_ERROR_CTR_STUFF_ERROR_FIRST) + cf->data[2] |= CAN_ERR_PROT_STUFF; + + if (errctr & IFI_CANFD_ERROR_CTR_CRC_ERROR_FIRST) + cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; + + if (errctr & IFI_CANFD_ERROR_CTR_FORM_ERROR_FIRST) + cf->data[2] |= CAN_ERR_PROT_FORM; + + /* Reset the error counter, ack the IRQ and re-enable the counter. */ + writel(IFI_CANFD_ERROR_CTR_ER_RESET, priv->base + IFI_CANFD_ERROR_CTR); + writel(IFI_CANFD_INTERRUPT_ERROR_COUNTER, + priv->base + IFI_CANFD_INTERRUPT); + writel(IFI_CANFD_ERROR_CTR_ER_ENABLE, priv->base + IFI_CANFD_ERROR_CTR); + + stats->rx_packets++; + stats->rx_bytes += cf->can_dlc; + netif_receive_skb(skb); + + return 1; +} + static int ifi_canfd_get_berr_counter(const struct net_device *ndev, struct can_berr_counter *bec) { @@ -470,6 +554,7 @@ static int ifi_canfd_poll(struct napi_struct *napi, int quota) u32 stcmd = readl(priv->base + IFI_CANFD_STCMD); u32 rxstcmd = readl(priv->base + IFI_CANFD_STCMD); + u32 errctr = readl(priv->base + IFI_CANFD_ERROR_CTR); /* Handle bus state changes */ if ((stcmd & stcmd_state_mask) || @@ -480,6 +565,10 @@ static int ifi_canfd_poll(struct napi_struct *napi, int quota) if (rxstcmd & IFI_CANFD_RXSTCMD_OVERFLOW) work_done += ifi_canfd_handle_lost_msg(ndev); + /* Handle lec errors on the bus */ + if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) + work_done += ifi_canfd_handle_lec_err(ndev, errctr); + /* Handle normal messages on RX */ if (!(rxstcmd & IFI_CANFD_RXSTCMD_EMPTY)) work_done += ifi_canfd_do_rx_poll(ndev, quota - work_done); @@ -499,11 +588,12 @@ static irqreturn_t ifi_canfd_isr(int irq, void *dev_id) struct net_device_stats *stats = &ndev->stats; const u32 rx_irq_mask = IFI_CANFD_INTERRUPT_RXFIFO_NEMPTY | IFI_CANFD_INTERRUPT_RXFIFO_NEMPTY_PER | - IFI_CANFD_INTERRUPT_ERROR_WARNING; + IFI_CANFD_INTERRUPT_ERROR_WARNING | + IFI_CANFD_INTERRUPT_ERROR_COUNTER; const u32 tx_irq_mask = IFI_CANFD_INTERRUPT_TXFIFO_EMPTY | IFI_CANFD_INTERRUPT_TXFIFO_REMOVE; - const u32 clr_irq_mask = ~(IFI_CANFD_INTERRUPT_SET_IRQ | - IFI_CANFD_INTERRUPT_ERROR_WARNING); + const u32 clr_irq_mask = ~((u32)(IFI_CANFD_INTERRUPT_SET_IRQ | + IFI_CANFD_INTERRUPT_ERROR_WARNING)); u32 isr; isr = readl(priv->base + IFI_CANFD_INTERRUPT); @@ -657,6 +747,12 @@ static void ifi_canfd_start(struct net_device *ndev) ifi_canfd_irq_enable(ndev, 1); + /* Unlock, reset and enable the error counter. */ + writel(IFI_CANFD_ERROR_CTR_UNLOCK_MAGIC, + priv->base + IFI_CANFD_ERROR_CTR); + writel(IFI_CANFD_ERROR_CTR_ER_RESET, priv->base + IFI_CANFD_ERROR_CTR); + writel(IFI_CANFD_ERROR_CTR_ER_ENABLE, priv->base + IFI_CANFD_ERROR_CTR); + /* Enable controller */ writel(stcmd, priv->base + IFI_CANFD_STCMD); } @@ -665,6 +761,10 @@ static void ifi_canfd_stop(struct net_device *ndev) { struct ifi_canfd_priv *priv = netdev_priv(ndev); + /* Reset and disable the error counter. */ + writel(IFI_CANFD_ERROR_CTR_ER_RESET, priv->base + IFI_CANFD_ERROR_CTR); + writel(0, priv->base + IFI_CANFD_ERROR_CTR); + /* Reset the IP */ writel(IFI_CANFD_STCMD_HARDRESET, priv->base + IFI_CANFD_STCMD); @@ -868,7 +968,8 @@ static int ifi_canfd_plat_probe(struct platform_device *pdev) priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_FD | - CAN_CTRLMODE_FD_NON_ISO; + CAN_CTRLMODE_FD_NON_ISO | + CAN_CTRLMODE_BERR_REPORTING; platform_set_drvdata(pdev, ndev); SET_NETDEV_DEV(ndev, dev); From 464c38556e06723b4c77d36fecff140b8527bc59 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Mon, 9 May 2016 16:24:30 +0200 Subject: [PATCH 1420/1649] netfilter: conntrack: make netns address part of nat bysrc hash Will be needed soon when we place all in the same hash table. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_nat_core.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c index d74e7167499d..069912c370b5 100644 --- a/net/netfilter/nf_nat_core.c +++ b/net/netfilter/nf_nat_core.c @@ -118,7 +118,7 @@ EXPORT_SYMBOL(nf_xfrm_me_harder); /* We keep an extra hash for each conntrack, for fast searching. */ static inline unsigned int -hash_by_src(const struct net *net, const struct nf_conntrack_tuple *tuple) +hash_by_src(const struct net *n, const struct nf_conntrack_tuple *tuple) { unsigned int hash; @@ -126,9 +126,9 @@ hash_by_src(const struct net *net, const struct nf_conntrack_tuple *tuple) /* Original src, to ensure we map it consistently if poss. */ hash = jhash2((u32 *)&tuple->src, sizeof(tuple->src) / sizeof(u32), - tuple->dst.protonum ^ nf_nat_hash_rnd); + tuple->dst.protonum ^ nf_nat_hash_rnd ^ net_hash_mix(n)); - return reciprocal_scale(hash, net->ct.nat_htable_size); + return reciprocal_scale(hash, n->ct.nat_htable_size); } /* Is this tuple already taken? (not by us) */ From a76ae1c85576b4b833a506925417d746bc839302 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Mon, 9 May 2016 16:24:31 +0200 Subject: [PATCH 1421/1649] netfilter: conntrack: use a single nat bysource table for all namespaces We already include netns address in the hash, so we only need to use net_eq in find_appropriate_src and can then put all entries into same table. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- include/net/netns/conntrack.h | 4 ---- net/netfilter/nf_nat_core.c | 33 +++++++++++++++++---------------- 2 files changed, 17 insertions(+), 20 deletions(-) diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h index 2811ddcc1a3d..1e751bf176fa 100644 --- a/include/net/netns/conntrack.h +++ b/include/net/netns/conntrack.h @@ -103,9 +103,5 @@ struct netns_ct { unsigned int labels_used; u8 label_words; #endif -#ifdef CONFIG_NF_NAT_NEEDED - struct hlist_head *nat_bysource; - unsigned int nat_htable_size; -#endif }; #endif diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c index 069912c370b5..6877a396f8fc 100644 --- a/net/netfilter/nf_nat_core.c +++ b/net/netfilter/nf_nat_core.c @@ -37,6 +37,9 @@ static const struct nf_nat_l3proto __rcu *nf_nat_l3protos[NFPROTO_NUMPROTO] __read_mostly; static const struct nf_nat_l4proto __rcu **nf_nat_l4protos[NFPROTO_NUMPROTO] __read_mostly; + +static struct hlist_head *nf_nat_bysource __read_mostly; +static unsigned int nf_nat_htable_size __read_mostly; static unsigned int nf_nat_hash_rnd __read_mostly; inline const struct nf_nat_l3proto * @@ -128,7 +131,7 @@ hash_by_src(const struct net *n, const struct nf_conntrack_tuple *tuple) hash = jhash2((u32 *)&tuple->src, sizeof(tuple->src) / sizeof(u32), tuple->dst.protonum ^ nf_nat_hash_rnd ^ net_hash_mix(n)); - return reciprocal_scale(hash, n->ct.nat_htable_size); + return reciprocal_scale(hash, nf_nat_htable_size); } /* Is this tuple already taken? (not by us) */ @@ -198,9 +201,10 @@ find_appropriate_src(struct net *net, const struct nf_conn_nat *nat; const struct nf_conn *ct; - hlist_for_each_entry_rcu(nat, &net->ct.nat_bysource[h], bysource) { + hlist_for_each_entry_rcu(nat, &nf_nat_bysource[h], bysource) { ct = nat->ct; if (same_src(ct, tuple) && + net_eq(net, nf_ct_net(ct)) && nf_ct_zone_equal(ct, zone, IP_CT_DIR_ORIGINAL)) { /* Copy source part from reply tuple. */ nf_ct_invert_tuplepr(result, @@ -433,7 +437,7 @@ nf_nat_setup_info(struct nf_conn *ct, nat = nfct_nat(ct); nat->ct = ct; hlist_add_head_rcu(&nat->bysource, - &net->ct.nat_bysource[srchash]); + &nf_nat_bysource[srchash]); spin_unlock_bh(&nf_nat_lock); } @@ -821,27 +825,14 @@ nfnetlink_parse_nat_setup(struct nf_conn *ct, } #endif -static int __net_init nf_nat_net_init(struct net *net) -{ - /* Leave them the same for the moment. */ - net->ct.nat_htable_size = nf_conntrack_htable_size; - net->ct.nat_bysource = nf_ct_alloc_hashtable(&net->ct.nat_htable_size, 0); - if (!net->ct.nat_bysource) - return -ENOMEM; - return 0; -} - static void __net_exit nf_nat_net_exit(struct net *net) { struct nf_nat_proto_clean clean = {}; nf_ct_iterate_cleanup(net, nf_nat_proto_clean, &clean, 0, 0); - synchronize_rcu(); - nf_ct_free_hashtable(net->ct.nat_bysource, net->ct.nat_htable_size); } static struct pernet_operations nf_nat_net_ops = { - .init = nf_nat_net_init, .exit = nf_nat_net_exit, }; @@ -854,8 +845,16 @@ static int __init nf_nat_init(void) { int ret; + /* Leave them the same for the moment. */ + nf_nat_htable_size = nf_conntrack_htable_size; + + nf_nat_bysource = nf_ct_alloc_hashtable(&nf_nat_htable_size, 0); + if (!nf_nat_bysource) + return -ENOMEM; + ret = nf_ct_extend_register(&nat_extend); if (ret < 0) { + nf_ct_free_hashtable(nf_nat_bysource, nf_nat_htable_size); printk(KERN_ERR "nf_nat_core: Unable to register extension\n"); return ret; } @@ -879,6 +878,7 @@ static int __init nf_nat_init(void) return 0; cleanup_extend: + nf_ct_free_hashtable(nf_nat_bysource, nf_nat_htable_size); nf_ct_extend_unregister(&nat_extend); return ret; } @@ -897,6 +897,7 @@ static void __exit nf_nat_cleanup(void) for (i = 0; i < NFPROTO_NUMPROTO; i++) kfree(nf_nat_l4protos[i]); synchronize_net(); + nf_ct_free_hashtable(nf_nat_bysource, nf_nat_htable_size); } MODULE_LICENSE("GPL"); From 0c5366b3a8c77fd6d67b763c5a76dfdc314e7726 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Mon, 9 May 2016 16:24:32 +0200 Subject: [PATCH 1422/1649] netfilter: conntrack: use single slab cache An earlier patch changed lookup side to also net_eq() namespaces after obtaining a reference on the conntrack, so a single kmemcache can be used. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- include/net/netns/conntrack.h | 2 -- net/netfilter/nf_conntrack_core.c | 36 ++++++++++++------------------- 2 files changed, 14 insertions(+), 24 deletions(-) diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h index 1e751bf176fa..38b1a80517f0 100644 --- a/include/net/netns/conntrack.h +++ b/include/net/netns/conntrack.h @@ -84,7 +84,6 @@ struct netns_ct { struct ctl_table_header *event_sysctl_header; struct ctl_table_header *helper_sysctl_header; #endif - char *slabname; unsigned int sysctl_log_invalid; /* Log invalid packets */ int sysctl_events; int sysctl_acct; @@ -93,7 +92,6 @@ struct netns_ct { int sysctl_tstamp; int sysctl_checksum; - struct kmem_cache *nf_conntrack_cachep; struct ct_pcpu __percpu *pcpu_lists; struct ip_conntrack_stat __percpu *stat; struct nf_ct_event_notifier __rcu *nf_conntrack_event_cb; diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index f58a70410c69..0cd29365004f 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -72,6 +72,7 @@ EXPORT_SYMBOL_GPL(nf_conntrack_expect_lock); struct hlist_nulls_head *nf_conntrack_hash __read_mostly; EXPORT_SYMBOL_GPL(nf_conntrack_hash); +static __read_mostly struct kmem_cache *nf_conntrack_cachep; static __read_mostly spinlock_t nf_conntrack_locks_all_lock; static __read_mostly seqcount_t nf_conntrack_generation; static __read_mostly bool nf_conntrack_locks_all; @@ -910,7 +911,7 @@ __nf_conntrack_alloc(struct net *net, * Do not use kmem_cache_zalloc(), as this cache uses * SLAB_DESTROY_BY_RCU. */ - ct = kmem_cache_alloc(net->ct.nf_conntrack_cachep, gfp); + ct = kmem_cache_alloc(nf_conntrack_cachep, gfp); if (ct == NULL) goto out; @@ -937,7 +938,7 @@ __nf_conntrack_alloc(struct net *net, atomic_set(&ct->ct_general.use, 0); return ct; out_free: - kmem_cache_free(net->ct.nf_conntrack_cachep, ct); + kmem_cache_free(nf_conntrack_cachep, ct); out: atomic_dec(&net->ct.count); return ERR_PTR(-ENOMEM); @@ -964,7 +965,7 @@ void nf_conntrack_free(struct nf_conn *ct) nf_ct_ext_destroy(ct); nf_ct_ext_free(ct); - kmem_cache_free(net->ct.nf_conntrack_cachep, ct); + kmem_cache_free(nf_conntrack_cachep, ct); smp_mb__before_atomic(); atomic_dec(&net->ct.count); } @@ -1587,8 +1588,6 @@ i_see_dead_people: nf_conntrack_tstamp_pernet_fini(net); nf_conntrack_acct_pernet_fini(net); nf_conntrack_expect_pernet_fini(net); - kmem_cache_destroy(net->ct.nf_conntrack_cachep); - kfree(net->ct.slabname); free_percpu(net->ct.stat); free_percpu(net->ct.pcpu_lists); } @@ -1693,7 +1692,8 @@ EXPORT_SYMBOL_GPL(nf_ct_untracked_status_or); int nf_conntrack_init_start(void) { int max_factor = 8; - int i, ret, cpu; + int ret = -ENOMEM; + int i, cpu; seqcount_init(&nf_conntrack_generation); @@ -1729,6 +1729,12 @@ int nf_conntrack_init_start(void) nf_conntrack_max = max_factor * nf_conntrack_htable_size; + nf_conntrack_cachep = kmem_cache_create("nf_conntrack", + sizeof(struct nf_conn), 0, + SLAB_DESTROY_BY_RCU, NULL); + if (!nf_conntrack_cachep) + goto err_cachep; + printk(KERN_INFO "nf_conntrack version %s (%u buckets, %d max)\n", NF_CONNTRACK_VERSION, nf_conntrack_htable_size, nf_conntrack_max); @@ -1805,6 +1811,8 @@ err_tstamp: err_acct: nf_conntrack_expect_fini(); err_expect: + kmem_cache_destroy(nf_conntrack_cachep); +err_cachep: nf_ct_free_hashtable(nf_conntrack_hash, nf_conntrack_htable_size); return ret; } @@ -1846,18 +1854,6 @@ int nf_conntrack_init_net(struct net *net) if (!net->ct.stat) goto err_pcpu_lists; - net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net); - if (!net->ct.slabname) - goto err_slabname; - - net->ct.nf_conntrack_cachep = kmem_cache_create(net->ct.slabname, - sizeof(struct nf_conn), 0, - SLAB_DESTROY_BY_RCU, NULL); - if (!net->ct.nf_conntrack_cachep) { - printk(KERN_ERR "Unable to create nf_conn slab cache\n"); - goto err_cache; - } - ret = nf_conntrack_expect_pernet_init(net); if (ret < 0) goto err_expect; @@ -1889,10 +1885,6 @@ err_tstamp: err_acct: nf_conntrack_expect_pernet_fini(net); err_expect: - kmem_cache_destroy(net->ct.nf_conntrack_cachep); -err_cache: - kfree(net->ct.slabname); -err_slabname: free_percpu(net->ct.stat); err_pcpu_lists: free_percpu(net->ct.pcpu_lists); From b5058d7a308035233db18032edc17135cb17ae27 Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Mon, 9 May 2016 13:22:38 -0400 Subject: [PATCH 1423/1649] net: dsa: mv88e6xxx: add flags to info Add a flags bitmap to the info structure in order to identify features supported or not by the different switch models. Signed-off-by: Vivien Didelot Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6123.c | 3 +++ drivers/net/dsa/mv88e6131.c | 4 ++++ drivers/net/dsa/mv88e6171.c | 4 ++++ drivers/net/dsa/mv88e6352.c | 6 ++++++ drivers/net/dsa/mv88e6xxx.h | 21 +++++++++++++++++++++ 5 files changed, 38 insertions(+) diff --git a/drivers/net/dsa/mv88e6123.c b/drivers/net/dsa/mv88e6123.c index 5535a42a6113..ab5885b68a07 100644 --- a/drivers/net/dsa/mv88e6123.c +++ b/drivers/net/dsa/mv88e6123.c @@ -24,18 +24,21 @@ static const struct mv88e6xxx_info mv88e6123_table[] = { .name = "Marvell 88E6123", .num_databases = 4096, .num_ports = 3, + .flags = MV88E6XXX_FLAGS_FAMILY_6165, }, { .prod_num = PORT_SWITCH_ID_PROD_NUM_6161, .family = MV88E6XXX_FAMILY_6165, .name = "Marvell 88E6161", .num_databases = 4096, .num_ports = 6, + .flags = MV88E6XXX_FLAGS_FAMILY_6165, }, { .prod_num = PORT_SWITCH_ID_PROD_NUM_6165, .family = MV88E6XXX_FAMILY_6165, .name = "Marvell 88E6165", .num_databases = 4096, .num_ports = 6, + .flags = MV88E6XXX_FLAGS_FAMILY_6165, } }; diff --git a/drivers/net/dsa/mv88e6131.c b/drivers/net/dsa/mv88e6131.c index 357ab794d720..d4773204935b 100644 --- a/drivers/net/dsa/mv88e6131.c +++ b/drivers/net/dsa/mv88e6131.c @@ -24,24 +24,28 @@ static const struct mv88e6xxx_info mv88e6131_table[] = { .name = "Marvell 88E6095/88E6095F", .num_databases = 256, .num_ports = 11, + .flags = MV88E6XXX_FLAGS_FAMILY_6095, }, { .prod_num = PORT_SWITCH_ID_PROD_NUM_6085, .family = MV88E6XXX_FAMILY_6097, .name = "Marvell 88E6085", .num_databases = 4096, .num_ports = 10, + .flags = MV88E6XXX_FLAGS_FAMILY_6097, }, { .prod_num = PORT_SWITCH_ID_PROD_NUM_6131, .family = MV88E6XXX_FAMILY_6185, .name = "Marvell 88E6131", .num_databases = 256, .num_ports = 8, + .flags = MV88E6XXX_FLAGS_FAMILY_6185, }, { .prod_num = PORT_SWITCH_ID_PROD_NUM_6185, .family = MV88E6XXX_FAMILY_6185, .name = "Marvell 88E6185", .num_databases = 256, .num_ports = 10, + .flags = MV88E6XXX_FLAGS_FAMILY_6185, } }; diff --git a/drivers/net/dsa/mv88e6171.c b/drivers/net/dsa/mv88e6171.c index f75164dc3bd6..e64cbeed2cdf 100644 --- a/drivers/net/dsa/mv88e6171.c +++ b/drivers/net/dsa/mv88e6171.c @@ -24,24 +24,28 @@ static const struct mv88e6xxx_info mv88e6171_table[] = { .name = "Marvell 88E6171", .num_databases = 4096, .num_ports = 7, + .flags = MV88E6XXX_FLAGS_FAMILY_6351, }, { .prod_num = PORT_SWITCH_ID_PROD_NUM_6175, .family = MV88E6XXX_FAMILY_6351, .name = "Marvell 88E6175", .num_databases = 4096, .num_ports = 7, + .flags = MV88E6XXX_FLAGS_FAMILY_6351, }, { .prod_num = PORT_SWITCH_ID_PROD_NUM_6350, .family = MV88E6XXX_FAMILY_6351, .name = "Marvell 88E6350", .num_databases = 4096, .num_ports = 7, + .flags = MV88E6XXX_FLAGS_FAMILY_6351, }, { .prod_num = PORT_SWITCH_ID_PROD_NUM_6351, .family = MV88E6XXX_FAMILY_6351, .name = "Marvell 88E6351", .num_databases = 4096, .num_ports = 7, + .flags = MV88E6XXX_FLAGS_FAMILY_6351, } }; diff --git a/drivers/net/dsa/mv88e6352.c b/drivers/net/dsa/mv88e6352.c index c622a1d58480..c61f0f4da6f4 100644 --- a/drivers/net/dsa/mv88e6352.c +++ b/drivers/net/dsa/mv88e6352.c @@ -29,36 +29,42 @@ static const struct mv88e6xxx_info mv88e6352_table[] = { .name = "Marvell 88E6320", .num_databases = 4096, .num_ports = 7, + .flags = MV88E6XXX_FLAGS_FAMILY_6320, }, { .prod_num = PORT_SWITCH_ID_PROD_NUM_6321, .family = MV88E6XXX_FAMILY_6320, .name = "Marvell 88E6321", .num_databases = 4096, .num_ports = 7, + .flags = MV88E6XXX_FLAGS_FAMILY_6320, }, { .prod_num = PORT_SWITCH_ID_PROD_NUM_6172, .family = MV88E6XXX_FAMILY_6352, .name = "Marvell 88E6172", .num_databases = 4096, .num_ports = 7, + .flags = MV88E6XXX_FLAGS_FAMILY_6352, }, { .prod_num = PORT_SWITCH_ID_PROD_NUM_6176, .family = MV88E6XXX_FAMILY_6352, .name = "Marvell 88E6176", .num_databases = 4096, .num_ports = 7, + .flags = MV88E6XXX_FLAGS_FAMILY_6352, }, { .prod_num = PORT_SWITCH_ID_PROD_NUM_6240, .family = MV88E6XXX_FAMILY_6352, .name = "Marvell 88E6240", .num_databases = 4096, .num_ports = 7, + .flags = MV88E6XXX_FLAGS_FAMILY_6352, }, { .prod_num = PORT_SWITCH_ID_PROD_NUM_6352, .family = MV88E6XXX_FAMILY_6352, .name = "Marvell 88E6352", .num_databases = 4096, .num_ports = 7, + .flags = MV88E6XXX_FLAGS_FAMILY_6352, } }; diff --git a/drivers/net/dsa/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx.h index 4f455d219859..c67b72af9af1 100644 --- a/drivers/net/dsa/mv88e6xxx.h +++ b/drivers/net/dsa/mv88e6xxx.h @@ -350,12 +350,27 @@ enum mv88e6xxx_family { MV88E6XXX_FAMILY_6352, /* 6172 6176 6240 6352 */ }; +#define MV88E6XXX_FLAGS_FAMILY_6095 0 + +#define MV88E6XXX_FLAGS_FAMILY_6097 0 + +#define MV88E6XXX_FLAGS_FAMILY_6165 0 + +#define MV88E6XXX_FLAGS_FAMILY_6185 0 + +#define MV88E6XXX_FLAGS_FAMILY_6320 0 + +#define MV88E6XXX_FLAGS_FAMILY_6351 0 + +#define MV88E6XXX_FLAGS_FAMILY_6352 0 + struct mv88e6xxx_info { enum mv88e6xxx_family family; u16 prod_num; const char *name; unsigned int num_databases; unsigned int num_ports; + unsigned long flags; }; struct mv88e6xxx_atu_entry { @@ -449,6 +464,12 @@ struct mv88e6xxx_hw_stat { enum stat_type type; }; +static inline bool mv88e6xxx_has(struct mv88e6xxx_priv_state *ps, + unsigned long flags) +{ + return (ps->info->flags & flags) == flags; +} + int mv88e6xxx_switch_reset(struct mv88e6xxx_priv_state *ps, bool ppu_active); const char *mv88e6xxx_drv_probe(struct device *dsa_dev, struct device *host_dev, int sw_addr, void **priv, From 8c9983a2249269f9b0f22bf070bf856ec1ff58d7 Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Mon, 9 May 2016 13:22:39 -0400 Subject: [PATCH 1424/1649] net: dsa: mv88e6xxx: factorize PHY access with PPU Add a MV88E6XXX_FLAG_PPU flag to describe switch models with a PHY Polling Unit. This allows to merge PPU specific PHY access code in the share code. Make the mv88e6xxx_ppu_disable and mv88e6xxx_phy_{read,write}_ppu functions use unlocked register accesses in order to call them in mv88e6xxx_phy_{read,write} in a locked context. Since the PPU code is shared, also remove NET_DSA_MV88E6XXX_NEED_PPU. Signed-off-by: Vivien Didelot Signed-off-by: David S. Miller --- drivers/net/dsa/Kconfig | 5 ----- drivers/net/dsa/mv88e6131.c | 39 ++---------------------------------- drivers/net/dsa/mv88e6xxx.c | 40 +++++++++++++++++++++++-------------- drivers/net/dsa/mv88e6xxx.h | 25 ++++++++++++++--------- 4 files changed, 43 insertions(+), 66 deletions(-) diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig index 90ba003d8fdf..4aaadced6b81 100644 --- a/drivers/net/dsa/Kconfig +++ b/drivers/net/dsa/Kconfig @@ -13,15 +13,10 @@ config NET_DSA_MV88E6060 This enables support for the Marvell 88E6060 ethernet switch chip. -config NET_DSA_MV88E6XXX_NEED_PPU - bool - default n - config NET_DSA_MV88E6131 tristate "Marvell 88E6085/6095/6095F/6131 ethernet switch chip support" depends on NET_DSA select NET_DSA_MV88E6XXX - select NET_DSA_MV88E6XXX_NEED_PPU select NET_DSA_TAG_DSA ---help--- This enables support for the Marvell 88E6085/6095/6095F/6131 diff --git a/drivers/net/dsa/mv88e6131.c b/drivers/net/dsa/mv88e6131.c index d4773204935b..9d21d69de08a 100644 --- a/drivers/net/dsa/mv88e6131.c +++ b/drivers/net/dsa/mv88e6131.c @@ -132,8 +132,6 @@ static int mv88e6131_setup(struct dsa_switch *ds) if (ret < 0) return ret; - mv88e6xxx_ppu_state_init(ps); - ret = mv88e6xxx_switch_reset(ps, false); if (ret < 0) return ret; @@ -145,46 +143,13 @@ static int mv88e6131_setup(struct dsa_switch *ds) return mv88e6xxx_setup_ports(ds); } -static int mv88e6131_port_to_phy_addr(struct dsa_switch *ds, int port) -{ - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); - - if (port >= 0 && port < ps->info->num_ports) - return port; - - return -EINVAL; -} - -static int -mv88e6131_phy_read(struct dsa_switch *ds, int port, int regnum) -{ - int addr = mv88e6131_port_to_phy_addr(ds, port); - - if (addr < 0) - return addr; - - return mv88e6xxx_phy_read_ppu(ds, addr, regnum); -} - -static int -mv88e6131_phy_write(struct dsa_switch *ds, - int port, int regnum, u16 val) -{ - int addr = mv88e6131_port_to_phy_addr(ds, port); - - if (addr < 0) - return addr; - - return mv88e6xxx_phy_write_ppu(ds, addr, regnum, val); -} - struct dsa_switch_driver mv88e6131_switch_driver = { .tag_protocol = DSA_TAG_PROTO_DSA, .probe = mv88e6131_drv_probe, .setup = mv88e6131_setup, .set_addr = mv88e6xxx_set_addr_direct, - .phy_read = mv88e6131_phy_read, - .phy_write = mv88e6131_phy_write, + .phy_read = mv88e6xxx_phy_read, + .phy_write = mv88e6xxx_phy_write, .get_strings = mv88e6xxx_get_strings, .get_ethtool_stats = mv88e6xxx_get_ethtool_stats, .get_sset_count = mv88e6xxx_get_sset_count, diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index 470cfc783baa..a28b46c33e13 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c @@ -241,24 +241,23 @@ static int _mv88e6xxx_phy_write(struct mv88e6xxx_priv_state *ps, int addr, return 0; } -#ifdef CONFIG_NET_DSA_MV88E6XXX_NEED_PPU static int mv88e6xxx_ppu_disable(struct mv88e6xxx_priv_state *ps) { int ret; unsigned long timeout; - ret = mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_CONTROL); + ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_CONTROL); if (ret < 0) return ret; - ret = mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_CONTROL, - ret & ~GLOBAL_CONTROL_PPU_ENABLE); + ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_CONTROL, + ret & ~GLOBAL_CONTROL_PPU_ENABLE); if (ret) return ret; timeout = jiffies + 1 * HZ; while (time_before(jiffies, timeout)) { - ret = mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_STATUS); + ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_STATUS); if (ret < 0) return ret; @@ -361,35 +360,33 @@ void mv88e6xxx_ppu_state_init(struct mv88e6xxx_priv_state *ps) ps->ppu_timer.function = mv88e6xxx_ppu_reenable_timer; } -int mv88e6xxx_phy_read_ppu(struct dsa_switch *ds, int addr, int regnum) +static int mv88e6xxx_phy_read_ppu(struct mv88e6xxx_priv_state *ps, int addr, + int regnum) { - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); int ret; ret = mv88e6xxx_ppu_access_get(ps); if (ret >= 0) { - ret = mv88e6xxx_reg_read(ps, addr, regnum); + ret = _mv88e6xxx_reg_read(ps, addr, regnum); mv88e6xxx_ppu_access_put(ps); } return ret; } -int mv88e6xxx_phy_write_ppu(struct dsa_switch *ds, int addr, - int regnum, u16 val) +static int mv88e6xxx_phy_write_ppu(struct mv88e6xxx_priv_state *ps, int addr, + int regnum, u16 val) { - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); int ret; ret = mv88e6xxx_ppu_access_get(ps); if (ret >= 0) { - ret = mv88e6xxx_reg_write(ps, addr, regnum, val); + ret = _mv88e6xxx_reg_write(ps, addr, regnum, val); mv88e6xxx_ppu_access_put(ps); } return ret; } -#endif static bool mv88e6xxx_6065_family(struct mv88e6xxx_priv_state *ps) { @@ -2599,6 +2596,9 @@ int mv88e6xxx_setup_common(struct mv88e6xxx_priv_state *ps) INIT_WORK(&ps->bridge_work, mv88e6xxx_bridge_work); + if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_PPU)) + mv88e6xxx_ppu_state_init(ps); + return 0; } @@ -2884,7 +2884,12 @@ mv88e6xxx_phy_read(struct dsa_switch *ds, int port, int regnum) return 0xffff; mutex_lock(&ps->smi_mutex); - ret = _mv88e6xxx_phy_read(ps, addr, regnum); + + if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_PPU)) + ret = mv88e6xxx_phy_read_ppu(ps, addr, regnum); + else + ret = _mv88e6xxx_phy_read(ps, addr, regnum); + mutex_unlock(&ps->smi_mutex); return ret; } @@ -2900,7 +2905,12 @@ mv88e6xxx_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val) return 0xffff; mutex_lock(&ps->smi_mutex); - ret = _mv88e6xxx_phy_write(ps, addr, regnum, val); + + if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_PPU)) + ret = mv88e6xxx_phy_write_ppu(ps, addr, regnum, val); + else + ret = _mv88e6xxx_phy_write(ps, addr, regnum, val); + mutex_unlock(&ps->smi_mutex); return ret; } diff --git a/drivers/net/dsa/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx.h index c67b72af9af1..52ca24efec64 100644 --- a/drivers/net/dsa/mv88e6xxx.h +++ b/drivers/net/dsa/mv88e6xxx.h @@ -350,13 +350,26 @@ enum mv88e6xxx_family { MV88E6XXX_FAMILY_6352, /* 6172 6176 6240 6352 */ }; -#define MV88E6XXX_FLAGS_FAMILY_6095 0 +enum mv88e6xxx_cap { + /* PHY Polling Unit. + * See GLOBAL_CONTROL_PPU_ENABLE and GLOBAL_STATUS_PPU_POLLING. + */ + MV88E6XXX_CAP_PPU, +}; -#define MV88E6XXX_FLAGS_FAMILY_6097 0 +/* Bitmask of capabilities */ +#define MV88E6XXX_FLAG_PPU BIT(MV88E6XXX_CAP_PPU) + +#define MV88E6XXX_FLAGS_FAMILY_6095 \ + MV88E6XXX_FLAG_PPU + +#define MV88E6XXX_FLAGS_FAMILY_6097 \ + MV88E6XXX_FLAG_PPU #define MV88E6XXX_FLAGS_FAMILY_6165 0 -#define MV88E6XXX_FLAGS_FAMILY_6185 0 +#define MV88E6XXX_FLAGS_FAMILY_6185 \ + MV88E6XXX_FLAG_PPU #define MV88E6XXX_FLAGS_FAMILY_6320 0 @@ -418,7 +431,6 @@ struct mv88e6xxx_priv_state { struct mii_bus *bus; int sw_addr; -#ifdef CONFIG_NET_DSA_MV88E6XXX_NEED_PPU /* Handles automatic disabling and re-enabling of the PHY * polling unit. */ @@ -426,7 +438,6 @@ struct mv88e6xxx_priv_state { int ppu_disabled; struct work_struct ppu_work; struct timer_list ppu_timer; -#endif /* This mutex serialises access to the statistics unit. * Hold this mutex over snapshot + dump sequences. @@ -489,10 +500,6 @@ int mv88e6xxx_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val); int mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int port, int regnum); int mv88e6xxx_phy_write_indirect(struct dsa_switch *ds, int port, int regnum, u16 val); -void mv88e6xxx_ppu_state_init(struct mv88e6xxx_priv_state *ps); -int mv88e6xxx_phy_read_ppu(struct dsa_switch *ds, int addr, int regnum); -int mv88e6xxx_phy_write_ppu(struct dsa_switch *ds, int addr, - int regnum, u16 val); void mv88e6xxx_get_strings(struct dsa_switch *ds, int port, uint8_t *data); void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data); From 6d5834a1adefd6199bbd7c8b2ba3a131f38e161e Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Mon, 9 May 2016 13:22:40 -0400 Subject: [PATCH 1425/1649] net: dsa: mv88e6xxx: factorize PHY indirect access Some switch has dedicated SMI PHY Command and Data registers, used to indirectly access the PHYs, instead of direct access. Identify these switch models and make mv88e6xxx_phy_{read,write} generic enough to support every models. Signed-off-by: Vivien Didelot Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6171.c | 4 ++-- drivers/net/dsa/mv88e6352.c | 4 ++-- drivers/net/dsa/mv88e6xxx.c | 37 ++++--------------------------------- drivers/net/dsa/mv88e6xxx.h | 22 +++++++++++++--------- 4 files changed, 21 insertions(+), 46 deletions(-) diff --git a/drivers/net/dsa/mv88e6171.c b/drivers/net/dsa/mv88e6171.c index e64cbeed2cdf..b190647d2a15 100644 --- a/drivers/net/dsa/mv88e6171.c +++ b/drivers/net/dsa/mv88e6171.c @@ -124,8 +124,8 @@ struct dsa_switch_driver mv88e6171_switch_driver = { .probe = mv88e6171_drv_probe, .setup = mv88e6171_setup, .set_addr = mv88e6xxx_set_addr_indirect, - .phy_read = mv88e6xxx_phy_read_indirect, - .phy_write = mv88e6xxx_phy_write_indirect, + .phy_read = mv88e6xxx_phy_read, + .phy_write = mv88e6xxx_phy_write, .get_strings = mv88e6xxx_get_strings, .get_ethtool_stats = mv88e6xxx_get_ethtool_stats, .get_sset_count = mv88e6xxx_get_sset_count, diff --git a/drivers/net/dsa/mv88e6352.c b/drivers/net/dsa/mv88e6352.c index c61f0f4da6f4..6fa7c02f9027 100644 --- a/drivers/net/dsa/mv88e6352.c +++ b/drivers/net/dsa/mv88e6352.c @@ -344,8 +344,8 @@ struct dsa_switch_driver mv88e6352_switch_driver = { .probe = mv88e6352_drv_probe, .setup = mv88e6352_setup, .set_addr = mv88e6xxx_set_addr_indirect, - .phy_read = mv88e6xxx_phy_read_indirect, - .phy_write = mv88e6xxx_phy_write_indirect, + .phy_read = mv88e6xxx_phy_read, + .phy_write = mv88e6xxx_phy_write, .get_strings = mv88e6xxx_get_strings, .get_ethtool_stats = mv88e6xxx_get_ethtool_stats, .get_sset_count = mv88e6xxx_get_sset_count, diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index a28b46c33e13..2c8c5e1d16bc 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c @@ -2887,6 +2887,8 @@ mv88e6xxx_phy_read(struct dsa_switch *ds, int port, int regnum) if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_PPU)) ret = mv88e6xxx_phy_read_ppu(ps, addr, regnum); + else if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_SMI_PHY)) + ret = _mv88e6xxx_phy_read_indirect(ps, addr, regnum); else ret = _mv88e6xxx_phy_read(ps, addr, regnum); @@ -2908,6 +2910,8 @@ mv88e6xxx_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val) if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_PPU)) ret = mv88e6xxx_phy_write_ppu(ps, addr, regnum, val); + else if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_SMI_PHY)) + ret = _mv88e6xxx_phy_write_indirect(ps, addr, regnum, val); else ret = _mv88e6xxx_phy_write(ps, addr, regnum, val); @@ -2915,39 +2919,6 @@ mv88e6xxx_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val) return ret; } -int -mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int port, int regnum) -{ - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); - int addr = mv88e6xxx_port_to_phy_addr(ps, port); - int ret; - - if (addr < 0) - return 0xffff; - - mutex_lock(&ps->smi_mutex); - ret = _mv88e6xxx_phy_read_indirect(ps, addr, regnum); - mutex_unlock(&ps->smi_mutex); - return ret; -} - -int -mv88e6xxx_phy_write_indirect(struct dsa_switch *ds, int port, int regnum, - u16 val) -{ - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); - int addr = mv88e6xxx_port_to_phy_addr(ps, port); - int ret; - - if (addr < 0) - return addr; - - mutex_lock(&ps->smi_mutex); - ret = _mv88e6xxx_phy_write_indirect(ps, addr, regnum, val); - mutex_unlock(&ps->smi_mutex); - return ret; -} - #ifdef CONFIG_NET_DSA_HWMON static int mv88e61xx_get_temp(struct dsa_switch *ds, int *temp) diff --git a/drivers/net/dsa/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx.h index 52ca24efec64..597257123ca7 100644 --- a/drivers/net/dsa/mv88e6xxx.h +++ b/drivers/net/dsa/mv88e6xxx.h @@ -355,10 +355,17 @@ enum mv88e6xxx_cap { * See GLOBAL_CONTROL_PPU_ENABLE and GLOBAL_STATUS_PPU_POLLING. */ MV88E6XXX_CAP_PPU, + + /* SMI PHY Command and Data registers. + * This requires an indirect access to PHY registers through + * GLOBAL2_SMI_OP, otherwise direct access to PHY registers is done. + */ + MV88E6XXX_CAP_SMI_PHY, }; /* Bitmask of capabilities */ #define MV88E6XXX_FLAG_PPU BIT(MV88E6XXX_CAP_PPU) +#define MV88E6XXX_FLAG_SMI_PHY BIT(MV88E6XXX_CAP_SMI_PHY) #define MV88E6XXX_FLAGS_FAMILY_6095 \ MV88E6XXX_FLAG_PPU @@ -371,11 +378,14 @@ enum mv88e6xxx_cap { #define MV88E6XXX_FLAGS_FAMILY_6185 \ MV88E6XXX_FLAG_PPU -#define MV88E6XXX_FLAGS_FAMILY_6320 0 +#define MV88E6XXX_FLAGS_FAMILY_6320 \ + MV88E6XXX_FLAG_SMI_PHY -#define MV88E6XXX_FLAGS_FAMILY_6351 0 +#define MV88E6XXX_FLAGS_FAMILY_6351 \ + MV88E6XXX_FLAG_SMI_PHY -#define MV88E6XXX_FLAGS_FAMILY_6352 0 +#define MV88E6XXX_FLAGS_FAMILY_6352 \ + MV88E6XXX_FLAG_SMI_PHY struct mv88e6xxx_info { enum mv88e6xxx_family family; @@ -497,9 +507,6 @@ int mv88e6xxx_set_addr_direct(struct dsa_switch *ds, u8 *addr); int mv88e6xxx_set_addr_indirect(struct dsa_switch *ds, u8 *addr); int mv88e6xxx_phy_read(struct dsa_switch *ds, int port, int regnum); int mv88e6xxx_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val); -int mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int port, int regnum); -int mv88e6xxx_phy_write_indirect(struct dsa_switch *ds, int port, int regnum, - u16 val); void mv88e6xxx_get_strings(struct dsa_switch *ds, int port, uint8_t *data); void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data); @@ -516,9 +523,6 @@ int mv88e6xxx_set_temp_limit(struct dsa_switch *ds, int temp); int mv88e6xxx_get_temp_alarm(struct dsa_switch *ds, bool *alarm); int mv88e6xxx_eeprom_load_wait(struct dsa_switch *ds); int mv88e6xxx_eeprom_busy_wait(struct dsa_switch *ds); -int mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int addr, int regnum); -int mv88e6xxx_phy_write_indirect(struct dsa_switch *ds, int addr, int regnum, - u16 val); int mv88e6xxx_get_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e); int mv88e6xxx_set_eee(struct dsa_switch *ds, int port, struct phy_device *phydev, struct ethtool_eee *e); From d24645bebce2b13b3c5c49ff392cfb7f3efe0d76 Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Mon, 9 May 2016 13:22:41 -0400 Subject: [PATCH 1426/1649] net: dsa: mv88e6xxx: factorize EEPROM access Add a MV88E6XXX_FLAG_EEPROM flag to describe switch models featuring an EEPROM and distribute the EEPROM access routines to all models. Signed-off-by: Vivien Didelot Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6123.c | 2 + drivers/net/dsa/mv88e6131.c | 2 + drivers/net/dsa/mv88e6171.c | 2 + drivers/net/dsa/mv88e6352.c | 207 +--------------------------------- drivers/net/dsa/mv88e6xxx.c | 216 +++++++++++++++++++++++++++++++++++- drivers/net/dsa/mv88e6xxx.h | 18 ++- 6 files changed, 236 insertions(+), 211 deletions(-) diff --git a/drivers/net/dsa/mv88e6123.c b/drivers/net/dsa/mv88e6123.c index ab5885b68a07..8330a8e34bff 100644 --- a/drivers/net/dsa/mv88e6123.c +++ b/drivers/net/dsa/mv88e6123.c @@ -124,6 +124,8 @@ struct dsa_switch_driver mv88e6123_switch_driver = { #ifdef CONFIG_NET_DSA_HWMON .get_temp = mv88e6xxx_get_temp, #endif + .get_eeprom = mv88e6xxx_get_eeprom, + .set_eeprom = mv88e6xxx_set_eeprom, .get_regs_len = mv88e6xxx_get_regs_len, .get_regs = mv88e6xxx_get_regs, }; diff --git a/drivers/net/dsa/mv88e6131.c b/drivers/net/dsa/mv88e6131.c index 9d21d69de08a..ab8c507b8f8c 100644 --- a/drivers/net/dsa/mv88e6131.c +++ b/drivers/net/dsa/mv88e6131.c @@ -153,6 +153,8 @@ struct dsa_switch_driver mv88e6131_switch_driver = { .get_strings = mv88e6xxx_get_strings, .get_ethtool_stats = mv88e6xxx_get_ethtool_stats, .get_sset_count = mv88e6xxx_get_sset_count, + .get_eeprom = mv88e6xxx_get_eeprom, + .set_eeprom = mv88e6xxx_set_eeprom, .adjust_link = mv88e6xxx_adjust_link, .port_bridge_join = mv88e6xxx_port_bridge_join, .port_bridge_leave = mv88e6xxx_port_bridge_leave, diff --git a/drivers/net/dsa/mv88e6171.c b/drivers/net/dsa/mv88e6171.c index b190647d2a15..a7afbaa87618 100644 --- a/drivers/net/dsa/mv88e6171.c +++ b/drivers/net/dsa/mv88e6171.c @@ -133,6 +133,8 @@ struct dsa_switch_driver mv88e6171_switch_driver = { #ifdef CONFIG_NET_DSA_HWMON .get_temp = mv88e6xxx_get_temp, #endif + .get_eeprom = mv88e6xxx_get_eeprom, + .set_eeprom = mv88e6xxx_set_eeprom, .get_regs_len = mv88e6xxx_get_regs_len, .get_regs = mv88e6xxx_get_regs, .port_bridge_join = mv88e6xxx_port_bridge_join, diff --git a/drivers/net/dsa/mv88e6352.c b/drivers/net/dsa/mv88e6352.c index 6fa7c02f9027..3bb271e16035 100644 --- a/drivers/net/dsa/mv88e6352.c +++ b/drivers/net/dsa/mv88e6352.c @@ -125,8 +125,6 @@ static int mv88e6352_setup(struct dsa_switch *ds) if (ret < 0) return ret; - mutex_init(&ps->eeprom_mutex); - ret = mv88e6xxx_switch_reset(ps, true); if (ret < 0) return ret; @@ -138,207 +136,6 @@ static int mv88e6352_setup(struct dsa_switch *ds) return mv88e6xxx_setup_ports(ds); } -static int mv88e6352_read_eeprom_word(struct dsa_switch *ds, int addr) -{ - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); - int ret; - - mutex_lock(&ps->eeprom_mutex); - - ret = mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_EEPROM_OP, - GLOBAL2_EEPROM_OP_READ | - (addr & GLOBAL2_EEPROM_OP_ADDR_MASK)); - if (ret < 0) - goto error; - - ret = mv88e6xxx_eeprom_busy_wait(ds); - if (ret < 0) - goto error; - - ret = mv88e6xxx_reg_read(ps, REG_GLOBAL2, GLOBAL2_EEPROM_DATA); -error: - mutex_unlock(&ps->eeprom_mutex); - return ret; -} - -static int mv88e6352_get_eeprom(struct dsa_switch *ds, - struct ethtool_eeprom *eeprom, u8 *data) -{ - int offset; - int len; - int ret; - - offset = eeprom->offset; - len = eeprom->len; - eeprom->len = 0; - - eeprom->magic = 0xc3ec4951; - - ret = mv88e6xxx_eeprom_load_wait(ds); - if (ret < 0) - return ret; - - if (offset & 1) { - int word; - - word = mv88e6352_read_eeprom_word(ds, offset >> 1); - if (word < 0) - return word; - - *data++ = (word >> 8) & 0xff; - - offset++; - len--; - eeprom->len++; - } - - while (len >= 2) { - int word; - - word = mv88e6352_read_eeprom_word(ds, offset >> 1); - if (word < 0) - return word; - - *data++ = word & 0xff; - *data++ = (word >> 8) & 0xff; - - offset += 2; - len -= 2; - eeprom->len += 2; - } - - if (len) { - int word; - - word = mv88e6352_read_eeprom_word(ds, offset >> 1); - if (word < 0) - return word; - - *data++ = word & 0xff; - - offset++; - len--; - eeprom->len++; - } - - return 0; -} - -static int mv88e6352_eeprom_is_readonly(struct dsa_switch *ds) -{ - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); - int ret; - - ret = mv88e6xxx_reg_read(ps, REG_GLOBAL2, GLOBAL2_EEPROM_OP); - if (ret < 0) - return ret; - - if (!(ret & GLOBAL2_EEPROM_OP_WRITE_EN)) - return -EROFS; - - return 0; -} - -static int mv88e6352_write_eeprom_word(struct dsa_switch *ds, int addr, - u16 data) -{ - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); - int ret; - - mutex_lock(&ps->eeprom_mutex); - - ret = mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_EEPROM_DATA, data); - if (ret < 0) - goto error; - - ret = mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_EEPROM_OP, - GLOBAL2_EEPROM_OP_WRITE | - (addr & GLOBAL2_EEPROM_OP_ADDR_MASK)); - if (ret < 0) - goto error; - - ret = mv88e6xxx_eeprom_busy_wait(ds); -error: - mutex_unlock(&ps->eeprom_mutex); - return ret; -} - -static int mv88e6352_set_eeprom(struct dsa_switch *ds, - struct ethtool_eeprom *eeprom, u8 *data) -{ - int offset; - int ret; - int len; - - if (eeprom->magic != 0xc3ec4951) - return -EINVAL; - - ret = mv88e6352_eeprom_is_readonly(ds); - if (ret) - return ret; - - offset = eeprom->offset; - len = eeprom->len; - eeprom->len = 0; - - ret = mv88e6xxx_eeprom_load_wait(ds); - if (ret < 0) - return ret; - - if (offset & 1) { - int word; - - word = mv88e6352_read_eeprom_word(ds, offset >> 1); - if (word < 0) - return word; - - word = (*data++ << 8) | (word & 0xff); - - ret = mv88e6352_write_eeprom_word(ds, offset >> 1, word); - if (ret < 0) - return ret; - - offset++; - len--; - eeprom->len++; - } - - while (len >= 2) { - int word; - - word = *data++; - word |= *data++ << 8; - - ret = mv88e6352_write_eeprom_word(ds, offset >> 1, word); - if (ret < 0) - return ret; - - offset += 2; - len -= 2; - eeprom->len += 2; - } - - if (len) { - int word; - - word = mv88e6352_read_eeprom_word(ds, offset >> 1); - if (word < 0) - return word; - - word = (word & 0xff00) | *data++; - - ret = mv88e6352_write_eeprom_word(ds, offset >> 1, word); - if (ret < 0) - return ret; - - offset++; - len--; - eeprom->len++; - } - - return 0; -} - struct dsa_switch_driver mv88e6352_switch_driver = { .tag_protocol = DSA_TAG_PROTO_EDSA, .probe = mv88e6352_drv_probe, @@ -358,8 +155,8 @@ struct dsa_switch_driver mv88e6352_switch_driver = { .set_temp_limit = mv88e6xxx_set_temp_limit, .get_temp_alarm = mv88e6xxx_get_temp_alarm, #endif - .get_eeprom = mv88e6352_get_eeprom, - .set_eeprom = mv88e6352_set_eeprom, + .get_eeprom = mv88e6xxx_get_eeprom, + .set_eeprom = mv88e6xxx_set_eeprom, .get_regs_len = mv88e6xxx_get_regs_len, .get_regs = mv88e6xxx_get_regs, .port_bridge_join = mv88e6xxx_port_bridge_join, diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index 2c8c5e1d16bc..d277350069d0 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c @@ -823,7 +823,7 @@ static int _mv88e6xxx_phy_wait(struct mv88e6xxx_priv_state *ps) GLOBAL2_SMI_OP_BUSY); } -int mv88e6xxx_eeprom_load_wait(struct dsa_switch *ds) +static int mv88e6xxx_eeprom_load_wait(struct dsa_switch *ds) { struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); @@ -831,7 +831,7 @@ int mv88e6xxx_eeprom_load_wait(struct dsa_switch *ds) GLOBAL2_EEPROM_OP_LOAD); } -int mv88e6xxx_eeprom_busy_wait(struct dsa_switch *ds) +static int mv88e6xxx_eeprom_busy_wait(struct dsa_switch *ds) { struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); @@ -839,6 +839,215 @@ int mv88e6xxx_eeprom_busy_wait(struct dsa_switch *ds) GLOBAL2_EEPROM_OP_BUSY); } +static int mv88e6xxx_read_eeprom_word(struct dsa_switch *ds, int addr) +{ + struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + int ret; + + mutex_lock(&ps->eeprom_mutex); + + ret = mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_EEPROM_OP, + GLOBAL2_EEPROM_OP_READ | + (addr & GLOBAL2_EEPROM_OP_ADDR_MASK)); + if (ret < 0) + goto error; + + ret = mv88e6xxx_eeprom_busy_wait(ds); + if (ret < 0) + goto error; + + ret = mv88e6xxx_reg_read(ps, REG_GLOBAL2, GLOBAL2_EEPROM_DATA); +error: + mutex_unlock(&ps->eeprom_mutex); + return ret; +} + +int mv88e6xxx_get_eeprom(struct dsa_switch *ds, struct ethtool_eeprom *eeprom, + u8 *data) +{ + struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + int offset; + int len; + int ret; + + if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_EEPROM)) + return -EOPNOTSUPP; + + offset = eeprom->offset; + len = eeprom->len; + eeprom->len = 0; + + eeprom->magic = 0xc3ec4951; + + ret = mv88e6xxx_eeprom_load_wait(ds); + if (ret < 0) + return ret; + + if (offset & 1) { + int word; + + word = mv88e6xxx_read_eeprom_word(ds, offset >> 1); + if (word < 0) + return word; + + *data++ = (word >> 8) & 0xff; + + offset++; + len--; + eeprom->len++; + } + + while (len >= 2) { + int word; + + word = mv88e6xxx_read_eeprom_word(ds, offset >> 1); + if (word < 0) + return word; + + *data++ = word & 0xff; + *data++ = (word >> 8) & 0xff; + + offset += 2; + len -= 2; + eeprom->len += 2; + } + + if (len) { + int word; + + word = mv88e6xxx_read_eeprom_word(ds, offset >> 1); + if (word < 0) + return word; + + *data++ = word & 0xff; + + offset++; + len--; + eeprom->len++; + } + + return 0; +} + +static int mv88e6xxx_eeprom_is_readonly(struct dsa_switch *ds) +{ + struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + int ret; + + ret = mv88e6xxx_reg_read(ps, REG_GLOBAL2, GLOBAL2_EEPROM_OP); + if (ret < 0) + return ret; + + if (!(ret & GLOBAL2_EEPROM_OP_WRITE_EN)) + return -EROFS; + + return 0; +} + +static int mv88e6xxx_write_eeprom_word(struct dsa_switch *ds, int addr, + u16 data) +{ + struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + int ret; + + mutex_lock(&ps->eeprom_mutex); + + ret = mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_EEPROM_DATA, data); + if (ret < 0) + goto error; + + ret = mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_EEPROM_OP, + GLOBAL2_EEPROM_OP_WRITE | + (addr & GLOBAL2_EEPROM_OP_ADDR_MASK)); + if (ret < 0) + goto error; + + ret = mv88e6xxx_eeprom_busy_wait(ds); +error: + mutex_unlock(&ps->eeprom_mutex); + return ret; +} + +int mv88e6xxx_set_eeprom(struct dsa_switch *ds, struct ethtool_eeprom *eeprom, + u8 *data) +{ + struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + int offset; + int ret; + int len; + + if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_EEPROM)) + return -EOPNOTSUPP; + + if (eeprom->magic != 0xc3ec4951) + return -EINVAL; + + ret = mv88e6xxx_eeprom_is_readonly(ds); + if (ret) + return ret; + + offset = eeprom->offset; + len = eeprom->len; + eeprom->len = 0; + + ret = mv88e6xxx_eeprom_load_wait(ds); + if (ret < 0) + return ret; + + if (offset & 1) { + int word; + + word = mv88e6xxx_read_eeprom_word(ds, offset >> 1); + if (word < 0) + return word; + + word = (*data++ << 8) | (word & 0xff); + + ret = mv88e6xxx_write_eeprom_word(ds, offset >> 1, word); + if (ret < 0) + return ret; + + offset++; + len--; + eeprom->len++; + } + + while (len >= 2) { + int word; + + word = *data++; + word |= *data++ << 8; + + ret = mv88e6xxx_write_eeprom_word(ds, offset >> 1, word); + if (ret < 0) + return ret; + + offset += 2; + len -= 2; + eeprom->len += 2; + } + + if (len) { + int word; + + word = mv88e6xxx_read_eeprom_word(ds, offset >> 1); + if (word < 0) + return word; + + word = (word & 0xff00) | *data++; + + ret = mv88e6xxx_write_eeprom_word(ds, offset >> 1, word); + if (ret < 0) + return ret; + + offset++; + len--; + eeprom->len++; + } + + return 0; +} + static int _mv88e6xxx_atu_wait(struct mv88e6xxx_priv_state *ps) { return _mv88e6xxx_wait(ps, REG_GLOBAL, GLOBAL_ATU_OP, @@ -2596,6 +2805,9 @@ int mv88e6xxx_setup_common(struct mv88e6xxx_priv_state *ps) INIT_WORK(&ps->bridge_work, mv88e6xxx_bridge_work); + if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_EEPROM)) + mutex_init(&ps->eeprom_mutex); + if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_PPU)) mv88e6xxx_ppu_state_init(ps); diff --git a/drivers/net/dsa/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx.h index 597257123ca7..0181f6775bfc 100644 --- a/drivers/net/dsa/mv88e6xxx.h +++ b/drivers/net/dsa/mv88e6xxx.h @@ -351,6 +351,11 @@ enum mv88e6xxx_family { }; enum mv88e6xxx_cap { + /* EEPROM Command and Data registers. + * See GLOBAL2_EEPROM_OP and GLOBAL2_EEPROM_DATA. + */ + MV88E6XXX_CAP_EEPROM, + /* PHY Polling Unit. * See GLOBAL_CONTROL_PPU_ENABLE and GLOBAL_STATUS_PPU_POLLING. */ @@ -364,6 +369,7 @@ enum mv88e6xxx_cap { }; /* Bitmask of capabilities */ +#define MV88E6XXX_FLAG_EEPROM BIT(MV88E6XXX_CAP_EEPROM) #define MV88E6XXX_FLAG_PPU BIT(MV88E6XXX_CAP_PPU) #define MV88E6XXX_FLAG_SMI_PHY BIT(MV88E6XXX_CAP_SMI_PHY) @@ -379,13 +385,15 @@ enum mv88e6xxx_cap { MV88E6XXX_FLAG_PPU #define MV88E6XXX_FLAGS_FAMILY_6320 \ - MV88E6XXX_FLAG_SMI_PHY + (MV88E6XXX_FLAG_EEPROM | \ + MV88E6XXX_FLAG_SMI_PHY) #define MV88E6XXX_FLAGS_FAMILY_6351 \ MV88E6XXX_FLAG_SMI_PHY #define MV88E6XXX_FLAGS_FAMILY_6352 \ - MV88E6XXX_FLAG_SMI_PHY + (MV88E6XXX_FLAG_EEPROM | \ + MV88E6XXX_FLAG_SMI_PHY) struct mv88e6xxx_info { enum mv88e6xxx_family family; @@ -521,8 +529,10 @@ int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp); int mv88e6xxx_get_temp_limit(struct dsa_switch *ds, int *temp); int mv88e6xxx_set_temp_limit(struct dsa_switch *ds, int temp); int mv88e6xxx_get_temp_alarm(struct dsa_switch *ds, bool *alarm); -int mv88e6xxx_eeprom_load_wait(struct dsa_switch *ds); -int mv88e6xxx_eeprom_busy_wait(struct dsa_switch *ds); +int mv88e6xxx_get_eeprom(struct dsa_switch *ds, struct ethtool_eeprom *eeprom, + u8 *data); +int mv88e6xxx_set_eeprom(struct dsa_switch *ds, struct ethtool_eeprom *eeprom, + u8 *data); int mv88e6xxx_get_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e); int mv88e6xxx_set_eee(struct dsa_switch *ds, int port, struct phy_device *phydev, struct ethtool_eee *e); From 6594f615792a52ccb66c07000ade917e8c8f62fd Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Mon, 9 May 2016 13:22:42 -0400 Subject: [PATCH 1427/1649] net: dsa: mv88e6xxx: factorize temperature access Add MV88E6XXX_FLAG_TEMP and MV88E6XXX_FLAG_TEMP_LIMIT flags to describe switch models featuring a temperature access. Use them to centralize the access to the temperature feature. Signed-off-by: Vivien Didelot Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6123.c | 3 +++ drivers/net/dsa/mv88e6131.c | 6 ++++++ drivers/net/dsa/mv88e6171.c | 3 +++ drivers/net/dsa/mv88e6xxx.c | 9 ++++++--- drivers/net/dsa/mv88e6xxx.h | 22 ++++++++++++++++++---- 5 files changed, 36 insertions(+), 7 deletions(-) diff --git a/drivers/net/dsa/mv88e6123.c b/drivers/net/dsa/mv88e6123.c index 8330a8e34bff..e234bdbd9b42 100644 --- a/drivers/net/dsa/mv88e6123.c +++ b/drivers/net/dsa/mv88e6123.c @@ -123,6 +123,9 @@ struct dsa_switch_driver mv88e6123_switch_driver = { .adjust_link = mv88e6xxx_adjust_link, #ifdef CONFIG_NET_DSA_HWMON .get_temp = mv88e6xxx_get_temp, + .get_temp_limit = mv88e6xxx_get_temp_limit, + .set_temp_limit = mv88e6xxx_set_temp_limit, + .get_temp_alarm = mv88e6xxx_get_temp_alarm, #endif .get_eeprom = mv88e6xxx_get_eeprom, .set_eeprom = mv88e6xxx_set_eeprom, diff --git a/drivers/net/dsa/mv88e6131.c b/drivers/net/dsa/mv88e6131.c index ab8c507b8f8c..089f9c05ea38 100644 --- a/drivers/net/dsa/mv88e6131.c +++ b/drivers/net/dsa/mv88e6131.c @@ -155,6 +155,12 @@ struct dsa_switch_driver mv88e6131_switch_driver = { .get_sset_count = mv88e6xxx_get_sset_count, .get_eeprom = mv88e6xxx_get_eeprom, .set_eeprom = mv88e6xxx_set_eeprom, +#ifdef CONFIG_NET_DSA_HWMON + .get_temp = mv88e6xxx_get_temp, + .get_temp_limit = mv88e6xxx_get_temp_limit, + .set_temp_limit = mv88e6xxx_set_temp_limit, + .get_temp_alarm = mv88e6xxx_get_temp_alarm, +#endif .adjust_link = mv88e6xxx_adjust_link, .port_bridge_join = mv88e6xxx_port_bridge_join, .port_bridge_leave = mv88e6xxx_port_bridge_leave, diff --git a/drivers/net/dsa/mv88e6171.c b/drivers/net/dsa/mv88e6171.c index a7afbaa87618..588b48625af7 100644 --- a/drivers/net/dsa/mv88e6171.c +++ b/drivers/net/dsa/mv88e6171.c @@ -132,6 +132,9 @@ struct dsa_switch_driver mv88e6171_switch_driver = { .adjust_link = mv88e6xxx_adjust_link, #ifdef CONFIG_NET_DSA_HWMON .get_temp = mv88e6xxx_get_temp, + .get_temp_limit = mv88e6xxx_get_temp_limit, + .set_temp_limit = mv88e6xxx_set_temp_limit, + .get_temp_alarm = mv88e6xxx_get_temp_alarm, #endif .get_eeprom = mv88e6xxx_get_eeprom, .set_eeprom = mv88e6xxx_set_eeprom, diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index d277350069d0..24aea900af35 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c @@ -3199,6 +3199,9 @@ int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp) { struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_TEMP)) + return -EOPNOTSUPP; + if (mv88e6xxx_6320_family(ps) || mv88e6xxx_6352_family(ps)) return mv88e63xx_get_temp(ds, temp); @@ -3211,7 +3214,7 @@ int mv88e6xxx_get_temp_limit(struct dsa_switch *ds, int *temp) int phy = mv88e6xxx_6320_family(ps) ? 3 : 0; int ret; - if (!mv88e6xxx_6320_family(ps) && !mv88e6xxx_6352_family(ps)) + if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_TEMP_LIMIT)) return -EOPNOTSUPP; *temp = 0; @@ -3231,7 +3234,7 @@ int mv88e6xxx_set_temp_limit(struct dsa_switch *ds, int temp) int phy = mv88e6xxx_6320_family(ps) ? 3 : 0; int ret; - if (!mv88e6xxx_6320_family(ps) && !mv88e6xxx_6352_family(ps)) + if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_TEMP_LIMIT)) return -EOPNOTSUPP; ret = mv88e6xxx_phy_page_read(ds, phy, 6, 26); @@ -3248,7 +3251,7 @@ int mv88e6xxx_get_temp_alarm(struct dsa_switch *ds, bool *alarm) int phy = mv88e6xxx_6320_family(ps) ? 3 : 0; int ret; - if (!mv88e6xxx_6320_family(ps) && !mv88e6xxx_6352_family(ps)) + if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_TEMP_LIMIT)) return -EOPNOTSUPP; *alarm = false; diff --git a/drivers/net/dsa/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx.h index 0181f6775bfc..9ddb6d04389e 100644 --- a/drivers/net/dsa/mv88e6xxx.h +++ b/drivers/net/dsa/mv88e6xxx.h @@ -366,12 +366,20 @@ enum mv88e6xxx_cap { * GLOBAL2_SMI_OP, otherwise direct access to PHY registers is done. */ MV88E6XXX_CAP_SMI_PHY, + + /* Internal temperature sensor. + * Available from any enabled port's PHY register 26, page 6. + */ + MV88E6XXX_CAP_TEMP, + MV88E6XXX_CAP_TEMP_LIMIT, }; /* Bitmask of capabilities */ #define MV88E6XXX_FLAG_EEPROM BIT(MV88E6XXX_CAP_EEPROM) #define MV88E6XXX_FLAG_PPU BIT(MV88E6XXX_CAP_PPU) #define MV88E6XXX_FLAG_SMI_PHY BIT(MV88E6XXX_CAP_SMI_PHY) +#define MV88E6XXX_FLAG_TEMP BIT(MV88E6XXX_CAP_TEMP) +#define MV88E6XXX_FLAG_TEMP_LIMIT BIT(MV88E6XXX_CAP_TEMP_LIMIT) #define MV88E6XXX_FLAGS_FAMILY_6095 \ MV88E6XXX_FLAG_PPU @@ -379,21 +387,27 @@ enum mv88e6xxx_cap { #define MV88E6XXX_FLAGS_FAMILY_6097 \ MV88E6XXX_FLAG_PPU -#define MV88E6XXX_FLAGS_FAMILY_6165 0 +#define MV88E6XXX_FLAGS_FAMILY_6165 \ + MV88E6XXX_FLAG_TEMP #define MV88E6XXX_FLAGS_FAMILY_6185 \ MV88E6XXX_FLAG_PPU #define MV88E6XXX_FLAGS_FAMILY_6320 \ (MV88E6XXX_FLAG_EEPROM | \ - MV88E6XXX_FLAG_SMI_PHY) + MV88E6XXX_FLAG_SMI_PHY | \ + MV88E6XXX_FLAG_TEMP | \ + MV88E6XXX_FLAG_TEMP_LIMIT) #define MV88E6XXX_FLAGS_FAMILY_6351 \ - MV88E6XXX_FLAG_SMI_PHY + (MV88E6XXX_FLAG_SMI_PHY | \ + MV88E6XXX_FLAG_TEMP) #define MV88E6XXX_FLAGS_FAMILY_6352 \ (MV88E6XXX_FLAG_EEPROM | \ - MV88E6XXX_FLAG_SMI_PHY) + MV88E6XXX_FLAG_SMI_PHY | \ + MV88E6XXX_FLAG_TEMP | \ + MV88E6XXX_FLAG_TEMP_LIMIT) struct mv88e6xxx_info { enum mv88e6xxx_family family; From 1d13a06e00bdcde27d4d88e011841ff0924b3dde Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Mon, 9 May 2016 13:22:43 -0400 Subject: [PATCH 1428/1649] net: dsa: mv88e6xxx: factorize MAC address setting Some switch models have a dedicated register for Switch MAC/WoF/WoL. This register, when present, is used to indirectly set the switch MAC address, instead of a direct write to 3 global registers. Identify this feature and share a common mv88e6xxx_set_addr function. Signed-off-by: Vivien Didelot Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6123.c | 2 +- drivers/net/dsa/mv88e6131.c | 2 +- drivers/net/dsa/mv88e6171.c | 2 +- drivers/net/dsa/mv88e6352.c | 2 +- drivers/net/dsa/mv88e6xxx.c | 14 ++++++++++++-- drivers/net/dsa/mv88e6xxx.h | 17 ++++++++++++++--- 6 files changed, 30 insertions(+), 9 deletions(-) diff --git a/drivers/net/dsa/mv88e6123.c b/drivers/net/dsa/mv88e6123.c index e234bdbd9b42..c349fb7ec3f2 100644 --- a/drivers/net/dsa/mv88e6123.c +++ b/drivers/net/dsa/mv88e6123.c @@ -114,7 +114,7 @@ struct dsa_switch_driver mv88e6123_switch_driver = { .tag_protocol = DSA_TAG_PROTO_EDSA, .probe = mv88e6123_drv_probe, .setup = mv88e6123_setup, - .set_addr = mv88e6xxx_set_addr_indirect, + .set_addr = mv88e6xxx_set_addr, .phy_read = mv88e6xxx_phy_read, .phy_write = mv88e6xxx_phy_write, .get_strings = mv88e6xxx_get_strings, diff --git a/drivers/net/dsa/mv88e6131.c b/drivers/net/dsa/mv88e6131.c index 089f9c05ea38..1e040c6d663a 100644 --- a/drivers/net/dsa/mv88e6131.c +++ b/drivers/net/dsa/mv88e6131.c @@ -147,7 +147,7 @@ struct dsa_switch_driver mv88e6131_switch_driver = { .tag_protocol = DSA_TAG_PROTO_DSA, .probe = mv88e6131_drv_probe, .setup = mv88e6131_setup, - .set_addr = mv88e6xxx_set_addr_direct, + .set_addr = mv88e6xxx_set_addr, .phy_read = mv88e6xxx_phy_read, .phy_write = mv88e6xxx_phy_write, .get_strings = mv88e6xxx_get_strings, diff --git a/drivers/net/dsa/mv88e6171.c b/drivers/net/dsa/mv88e6171.c index 588b48625af7..f9b20e05b895 100644 --- a/drivers/net/dsa/mv88e6171.c +++ b/drivers/net/dsa/mv88e6171.c @@ -123,7 +123,7 @@ struct dsa_switch_driver mv88e6171_switch_driver = { .tag_protocol = DSA_TAG_PROTO_EDSA, .probe = mv88e6171_drv_probe, .setup = mv88e6171_setup, - .set_addr = mv88e6xxx_set_addr_indirect, + .set_addr = mv88e6xxx_set_addr, .phy_read = mv88e6xxx_phy_read, .phy_write = mv88e6xxx_phy_write, .get_strings = mv88e6xxx_get_strings, diff --git a/drivers/net/dsa/mv88e6352.c b/drivers/net/dsa/mv88e6352.c index 3bb271e16035..d03c14a7ad1f 100644 --- a/drivers/net/dsa/mv88e6352.c +++ b/drivers/net/dsa/mv88e6352.c @@ -140,7 +140,7 @@ struct dsa_switch_driver mv88e6352_switch_driver = { .tag_protocol = DSA_TAG_PROTO_EDSA, .probe = mv88e6352_drv_probe, .setup = mv88e6352_setup, - .set_addr = mv88e6xxx_set_addr_indirect, + .set_addr = mv88e6xxx_set_addr, .phy_read = mv88e6xxx_phy_read, .phy_write = mv88e6xxx_phy_write, .get_strings = mv88e6xxx_get_strings, diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index 24aea900af35..4f0e047538d2 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c @@ -173,7 +173,7 @@ int mv88e6xxx_reg_write(struct mv88e6xxx_priv_state *ps, int addr, return ret; } -int mv88e6xxx_set_addr_direct(struct dsa_switch *ds, u8 *addr) +static int mv88e6xxx_set_addr_direct(struct dsa_switch *ds, u8 *addr) { struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); int err; @@ -192,7 +192,7 @@ int mv88e6xxx_set_addr_direct(struct dsa_switch *ds, u8 *addr) (addr[4] << 8) | addr[5]); } -int mv88e6xxx_set_addr_indirect(struct dsa_switch *ds, u8 *addr) +static int mv88e6xxx_set_addr_indirect(struct dsa_switch *ds, u8 *addr) { struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); int ret; @@ -225,6 +225,16 @@ int mv88e6xxx_set_addr_indirect(struct dsa_switch *ds, u8 *addr) return 0; } +int mv88e6xxx_set_addr(struct dsa_switch *ds, u8 *addr) +{ + struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + + if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_SWITCH_MAC)) + return mv88e6xxx_set_addr_indirect(ds, addr); + else + return mv88e6xxx_set_addr_direct(ds, addr); +} + static int _mv88e6xxx_phy_read(struct mv88e6xxx_priv_state *ps, int addr, int regnum) { diff --git a/drivers/net/dsa/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx.h index 9ddb6d04389e..517e95fbd10e 100644 --- a/drivers/net/dsa/mv88e6xxx.h +++ b/drivers/net/dsa/mv88e6xxx.h @@ -367,6 +367,13 @@ enum mv88e6xxx_cap { */ MV88E6XXX_CAP_SMI_PHY, + /* Switch MAC/WoL/WoF register. + * This requires an indirect access to set the switch MAC address + * through GLOBAL2_SWITCH_MAC, otherwise GLOBAL_MAC_01, GLOBAL_MAC_23, + * and GLOBAL_MAC_45 are used with a direct access. + */ + MV88E6XXX_CAP_SWITCH_MAC_WOL_WOF, + /* Internal temperature sensor. * Available from any enabled port's PHY register 26, page 6. */ @@ -378,6 +385,7 @@ enum mv88e6xxx_cap { #define MV88E6XXX_FLAG_EEPROM BIT(MV88E6XXX_CAP_EEPROM) #define MV88E6XXX_FLAG_PPU BIT(MV88E6XXX_CAP_PPU) #define MV88E6XXX_FLAG_SMI_PHY BIT(MV88E6XXX_CAP_SMI_PHY) +#define MV88E6XXX_FLAG_SWITCH_MAC BIT(MV88E6XXX_CAP_SWITCH_MAC_WOL_WOF) #define MV88E6XXX_FLAG_TEMP BIT(MV88E6XXX_CAP_TEMP) #define MV88E6XXX_FLAG_TEMP_LIMIT BIT(MV88E6XXX_CAP_TEMP_LIMIT) @@ -388,7 +396,8 @@ enum mv88e6xxx_cap { MV88E6XXX_FLAG_PPU #define MV88E6XXX_FLAGS_FAMILY_6165 \ - MV88E6XXX_FLAG_TEMP + (MV88E6XXX_FLAG_SWITCH_MAC | \ + MV88E6XXX_FLAG_TEMP) #define MV88E6XXX_FLAGS_FAMILY_6185 \ MV88E6XXX_FLAG_PPU @@ -396,16 +405,19 @@ enum mv88e6xxx_cap { #define MV88E6XXX_FLAGS_FAMILY_6320 \ (MV88E6XXX_FLAG_EEPROM | \ MV88E6XXX_FLAG_SMI_PHY | \ + MV88E6XXX_FLAG_SWITCH_MAC | \ MV88E6XXX_FLAG_TEMP | \ MV88E6XXX_FLAG_TEMP_LIMIT) #define MV88E6XXX_FLAGS_FAMILY_6351 \ (MV88E6XXX_FLAG_SMI_PHY | \ + MV88E6XXX_FLAG_SWITCH_MAC | \ MV88E6XXX_FLAG_TEMP) #define MV88E6XXX_FLAGS_FAMILY_6352 \ (MV88E6XXX_FLAG_EEPROM | \ MV88E6XXX_FLAG_SMI_PHY | \ + MV88E6XXX_FLAG_SWITCH_MAC | \ MV88E6XXX_FLAG_TEMP | \ MV88E6XXX_FLAG_TEMP_LIMIT) @@ -525,8 +537,7 @@ int mv88e6xxx_setup_global(struct dsa_switch *ds); int mv88e6xxx_reg_read(struct mv88e6xxx_priv_state *ps, int addr, int reg); int mv88e6xxx_reg_write(struct mv88e6xxx_priv_state *ps, int addr, int reg, u16 val); -int mv88e6xxx_set_addr_direct(struct dsa_switch *ds, u8 *addr); -int mv88e6xxx_set_addr_indirect(struct dsa_switch *ds, u8 *addr); +int mv88e6xxx_set_addr(struct dsa_switch *ds, u8 *addr); int mv88e6xxx_phy_read(struct dsa_switch *ds, int port, int regnum); int mv88e6xxx_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val); void mv88e6xxx_get_strings(struct dsa_switch *ds, int port, uint8_t *data); From aadbdb8a0da6c38e8370fb7cd860f38b266c6037 Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Mon, 9 May 2016 13:22:44 -0400 Subject: [PATCH 1429/1649] net: dsa: mv88e6xxx: factorize EEE access Add a MV88E6XXX_FLAG_EEE flag to describe switch models featuring Energy Efficient Ethernet. Use it to conditionally support such access in the common code. Signed-off-by: Vivien Didelot Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6123.c | 2 ++ drivers/net/dsa/mv88e6131.c | 2 ++ drivers/net/dsa/mv88e6171.c | 2 ++ drivers/net/dsa/mv88e6xxx.c | 6 ++++++ drivers/net/dsa/mv88e6xxx.h | 11 +++++++++-- 5 files changed, 21 insertions(+), 2 deletions(-) diff --git a/drivers/net/dsa/mv88e6123.c b/drivers/net/dsa/mv88e6123.c index c349fb7ec3f2..1ad7bcd1d421 100644 --- a/drivers/net/dsa/mv88e6123.c +++ b/drivers/net/dsa/mv88e6123.c @@ -117,6 +117,8 @@ struct dsa_switch_driver mv88e6123_switch_driver = { .set_addr = mv88e6xxx_set_addr, .phy_read = mv88e6xxx_phy_read, .phy_write = mv88e6xxx_phy_write, + .set_eee = mv88e6xxx_set_eee, + .get_eee = mv88e6xxx_get_eee, .get_strings = mv88e6xxx_get_strings, .get_ethtool_stats = mv88e6xxx_get_ethtool_stats, .get_sset_count = mv88e6xxx_get_sset_count, diff --git a/drivers/net/dsa/mv88e6131.c b/drivers/net/dsa/mv88e6131.c index 1e040c6d663a..432d3c487691 100644 --- a/drivers/net/dsa/mv88e6131.c +++ b/drivers/net/dsa/mv88e6131.c @@ -150,6 +150,8 @@ struct dsa_switch_driver mv88e6131_switch_driver = { .set_addr = mv88e6xxx_set_addr, .phy_read = mv88e6xxx_phy_read, .phy_write = mv88e6xxx_phy_write, + .set_eee = mv88e6xxx_set_eee, + .get_eee = mv88e6xxx_get_eee, .get_strings = mv88e6xxx_get_strings, .get_ethtool_stats = mv88e6xxx_get_ethtool_stats, .get_sset_count = mv88e6xxx_get_sset_count, diff --git a/drivers/net/dsa/mv88e6171.c b/drivers/net/dsa/mv88e6171.c index f9b20e05b895..a98e7d3c0c64 100644 --- a/drivers/net/dsa/mv88e6171.c +++ b/drivers/net/dsa/mv88e6171.c @@ -126,6 +126,8 @@ struct dsa_switch_driver mv88e6171_switch_driver = { .set_addr = mv88e6xxx_set_addr, .phy_read = mv88e6xxx_phy_read, .phy_write = mv88e6xxx_phy_write, + .set_eee = mv88e6xxx_set_eee, + .get_eee = mv88e6xxx_get_eee, .get_strings = mv88e6xxx_get_strings, .get_ethtool_stats = mv88e6xxx_get_ethtool_stats, .get_sset_count = mv88e6xxx_get_sset_count, diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index 4f0e047538d2..6aac58b8b78b 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c @@ -1105,6 +1105,9 @@ int mv88e6xxx_get_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e) struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); int reg; + if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_EEE)) + return -EOPNOTSUPP; + mutex_lock(&ps->smi_mutex); reg = _mv88e6xxx_phy_read_indirect(ps, port, 16); @@ -1133,6 +1136,9 @@ int mv88e6xxx_set_eee(struct dsa_switch *ds, int port, int reg; int ret; + if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_EEE)) + return -EOPNOTSUPP; + mutex_lock(&ps->smi_mutex); ret = _mv88e6xxx_phy_read_indirect(ps, port, 16); diff --git a/drivers/net/dsa/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx.h index 517e95fbd10e..b99e0905992a 100644 --- a/drivers/net/dsa/mv88e6xxx.h +++ b/drivers/net/dsa/mv88e6xxx.h @@ -351,6 +351,10 @@ enum mv88e6xxx_family { }; enum mv88e6xxx_cap { + /* Energy Efficient Ethernet. + */ + MV88E6XXX_CAP_EEE, + /* EEPROM Command and Data registers. * See GLOBAL2_EEPROM_OP and GLOBAL2_EEPROM_DATA. */ @@ -382,6 +386,7 @@ enum mv88e6xxx_cap { }; /* Bitmask of capabilities */ +#define MV88E6XXX_FLAG_EEE BIT(MV88E6XXX_CAP_EEE) #define MV88E6XXX_FLAG_EEPROM BIT(MV88E6XXX_CAP_EEPROM) #define MV88E6XXX_FLAG_PPU BIT(MV88E6XXX_CAP_PPU) #define MV88E6XXX_FLAG_SMI_PHY BIT(MV88E6XXX_CAP_SMI_PHY) @@ -403,7 +408,8 @@ enum mv88e6xxx_cap { MV88E6XXX_FLAG_PPU #define MV88E6XXX_FLAGS_FAMILY_6320 \ - (MV88E6XXX_FLAG_EEPROM | \ + (MV88E6XXX_FLAG_EEE | \ + MV88E6XXX_FLAG_EEPROM | \ MV88E6XXX_FLAG_SMI_PHY | \ MV88E6XXX_FLAG_SWITCH_MAC | \ MV88E6XXX_FLAG_TEMP | \ @@ -415,7 +421,8 @@ enum mv88e6xxx_cap { MV88E6XXX_FLAG_TEMP) #define MV88E6XXX_FLAGS_FAMILY_6352 \ - (MV88E6XXX_FLAG_EEPROM | \ + (MV88E6XXX_FLAG_EEE | \ + MV88E6XXX_FLAG_EEPROM | \ MV88E6XXX_FLAG_SMI_PHY | \ MV88E6XXX_FLAG_SWITCH_MAC | \ MV88E6XXX_FLAG_TEMP | \ From 2306251341bda39f1c3260bb96479db4dff2fe95 Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Mon, 9 May 2016 13:22:45 -0400 Subject: [PATCH 1430/1649] net: dsa: mv88e6131: add registers access Only 6131 was not supporting the port registers access yet. Assume such support and use the unlock access routines in the meantime. Signed-off-by: Vivien Didelot Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6131.c | 2 ++ drivers/net/dsa/mv88e6xxx.c | 6 +++++- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/drivers/net/dsa/mv88e6131.c b/drivers/net/dsa/mv88e6131.c index 432d3c487691..3fb06af74db3 100644 --- a/drivers/net/dsa/mv88e6131.c +++ b/drivers/net/dsa/mv88e6131.c @@ -157,6 +157,8 @@ struct dsa_switch_driver mv88e6131_switch_driver = { .get_sset_count = mv88e6xxx_get_sset_count, .get_eeprom = mv88e6xxx_get_eeprom, .set_eeprom = mv88e6xxx_set_eeprom, + .get_regs_len = mv88e6xxx_get_regs_len, + .get_regs = mv88e6xxx_get_regs, #ifdef CONFIG_NET_DSA_HWMON .get_temp = mv88e6xxx_get_temp, .get_temp_limit = mv88e6xxx_get_temp_limit, diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index 6aac58b8b78b..c28ad83ee74d 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c @@ -787,13 +787,17 @@ void mv88e6xxx_get_regs(struct dsa_switch *ds, int port, memset(p, 0xff, 32 * sizeof(u16)); + mutex_lock(&ps->smi_mutex); + for (i = 0; i < 32; i++) { int ret; - ret = mv88e6xxx_reg_read(ps, REG_PORT(port), i); + ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), i); if (ret >= 0) p[i] = ret; } + + mutex_unlock(&ps->smi_mutex); } static int _mv88e6xxx_wait(struct mv88e6xxx_priv_state *ps, int reg, int offset, From 936f234a9624dbce9f723cbb24f135c60f76c148 Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Mon, 9 May 2016 13:22:46 -0400 Subject: [PATCH 1431/1649] net: dsa: mv88e6xxx: factorize bridge support Add MV88E6XXX_FLAG_PORTSTATE and MV88E6XXX_FLAG_VLANTABLE flags to identify switch models with required 802.1D operations. Signed-off-by: Vivien Didelot Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6123.c | 3 +++ drivers/net/dsa/mv88e6131.c | 1 + drivers/net/dsa/mv88e6xxx.c | 9 +++++++++ drivers/net/dsa/mv88e6xxx.h | 38 +++++++++++++++++++++++++++++-------- 4 files changed, 43 insertions(+), 8 deletions(-) diff --git a/drivers/net/dsa/mv88e6123.c b/drivers/net/dsa/mv88e6123.c index 1ad7bcd1d421..81cd4a906038 100644 --- a/drivers/net/dsa/mv88e6123.c +++ b/drivers/net/dsa/mv88e6123.c @@ -133,6 +133,9 @@ struct dsa_switch_driver mv88e6123_switch_driver = { .set_eeprom = mv88e6xxx_set_eeprom, .get_regs_len = mv88e6xxx_get_regs_len, .get_regs = mv88e6xxx_get_regs, + .port_bridge_join = mv88e6xxx_port_bridge_join, + .port_bridge_leave = mv88e6xxx_port_bridge_leave, + .port_stp_state_set = mv88e6xxx_port_stp_state_set, }; MODULE_ALIAS("platform:mv88e6123"); diff --git a/drivers/net/dsa/mv88e6131.c b/drivers/net/dsa/mv88e6131.c index 3fb06af74db3..5d252445e543 100644 --- a/drivers/net/dsa/mv88e6131.c +++ b/drivers/net/dsa/mv88e6131.c @@ -168,6 +168,7 @@ struct dsa_switch_driver mv88e6131_switch_driver = { .adjust_link = mv88e6xxx_adjust_link, .port_bridge_join = mv88e6xxx_port_bridge_join, .port_bridge_leave = mv88e6xxx_port_bridge_leave, + .port_stp_state_set = mv88e6xxx_port_stp_state_set, .port_vlan_filtering = mv88e6xxx_port_vlan_filtering, .port_vlan_prepare = mv88e6xxx_port_vlan_prepare, .port_vlan_add = mv88e6xxx_port_vlan_add, diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index c28ad83ee74d..f02738eaf541 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c @@ -1369,6 +1369,9 @@ void mv88e6xxx_port_stp_state_set(struct dsa_switch *ds, int port, u8 state) struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); int stp_state; + if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_PORTSTATE)) + return; + switch (state) { case BR_STATE_DISABLED: stp_state = PORT_CONTROL_STATE_DISABLED; @@ -2430,6 +2433,9 @@ int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port, struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); int i, err = 0; + if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_VLANTABLE)) + return -EOPNOTSUPP; + mutex_lock(&ps->smi_mutex); /* Assign the bridge and remap each port's VLANTable */ @@ -2454,6 +2460,9 @@ void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port) struct net_device *bridge = ps->ports[port].bridge_dev; int i; + if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_VLANTABLE)) + return; + mutex_lock(&ps->smi_mutex); /* Unassign the bridge and remap each port's VLANTable */ diff --git a/drivers/net/dsa/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx.h index b99e0905992a..d15e0b3dffd3 100644 --- a/drivers/net/dsa/mv88e6xxx.h +++ b/drivers/net/dsa/mv88e6xxx.h @@ -360,6 +360,11 @@ enum mv88e6xxx_cap { */ MV88E6XXX_CAP_EEPROM, + /* Port State Filtering for 802.1D Spanning Tree. + * See PORT_CONTROL_STATE_* values in the PORT_CONTROL register. + */ + MV88E6XXX_CAP_PORTSTATE, + /* PHY Polling Unit. * See GLOBAL_CONTROL_PPU_ENABLE and GLOBAL_STATUS_PPU_POLLING. */ @@ -383,50 +388,67 @@ enum mv88e6xxx_cap { */ MV88E6XXX_CAP_TEMP, MV88E6XXX_CAP_TEMP_LIMIT, + + /* In-chip Port Based VLANs. + * Each port VLANTable register (see PORT_BASE_VLAN) is used to restrict + * the output (or egress) ports to which it is allowed to send frames. + */ + MV88E6XXX_CAP_VLANTABLE, }; /* Bitmask of capabilities */ #define MV88E6XXX_FLAG_EEE BIT(MV88E6XXX_CAP_EEE) #define MV88E6XXX_FLAG_EEPROM BIT(MV88E6XXX_CAP_EEPROM) +#define MV88E6XXX_FLAG_PORTSTATE BIT(MV88E6XXX_CAP_PORTSTATE) #define MV88E6XXX_FLAG_PPU BIT(MV88E6XXX_CAP_PPU) #define MV88E6XXX_FLAG_SMI_PHY BIT(MV88E6XXX_CAP_SMI_PHY) #define MV88E6XXX_FLAG_SWITCH_MAC BIT(MV88E6XXX_CAP_SWITCH_MAC_WOL_WOF) #define MV88E6XXX_FLAG_TEMP BIT(MV88E6XXX_CAP_TEMP) #define MV88E6XXX_FLAG_TEMP_LIMIT BIT(MV88E6XXX_CAP_TEMP_LIMIT) +#define MV88E6XXX_FLAG_VLANTABLE BIT(MV88E6XXX_CAP_VLANTABLE) #define MV88E6XXX_FLAGS_FAMILY_6095 \ - MV88E6XXX_FLAG_PPU + (MV88E6XXX_FLAG_PPU | \ + MV88E6XXX_FLAG_VLANTABLE) #define MV88E6XXX_FLAGS_FAMILY_6097 \ - MV88E6XXX_FLAG_PPU + (MV88E6XXX_FLAG_PPU | \ + MV88E6XXX_FLAG_VLANTABLE) #define MV88E6XXX_FLAGS_FAMILY_6165 \ (MV88E6XXX_FLAG_SWITCH_MAC | \ MV88E6XXX_FLAG_TEMP) #define MV88E6XXX_FLAGS_FAMILY_6185 \ - MV88E6XXX_FLAG_PPU + (MV88E6XXX_FLAG_PPU | \ + MV88E6XXX_FLAG_VLANTABLE) #define MV88E6XXX_FLAGS_FAMILY_6320 \ (MV88E6XXX_FLAG_EEE | \ MV88E6XXX_FLAG_EEPROM | \ + MV88E6XXX_FLAG_PORTSTATE | \ MV88E6XXX_FLAG_SMI_PHY | \ MV88E6XXX_FLAG_SWITCH_MAC | \ MV88E6XXX_FLAG_TEMP | \ - MV88E6XXX_FLAG_TEMP_LIMIT) + MV88E6XXX_FLAG_TEMP_LIMIT | \ + MV88E6XXX_FLAG_VLANTABLE) #define MV88E6XXX_FLAGS_FAMILY_6351 \ - (MV88E6XXX_FLAG_SMI_PHY | \ + (MV88E6XXX_FLAG_PORTSTATE | \ + MV88E6XXX_FLAG_SMI_PHY | \ MV88E6XXX_FLAG_SWITCH_MAC | \ - MV88E6XXX_FLAG_TEMP) + MV88E6XXX_FLAG_TEMP | \ + MV88E6XXX_FLAG_VLANTABLE) #define MV88E6XXX_FLAGS_FAMILY_6352 \ (MV88E6XXX_FLAG_EEE | \ MV88E6XXX_FLAG_EEPROM | \ - MV88E6XXX_FLAG_SMI_PHY | \ + MV88E6XXX_FLAG_PORTSTATE | \ + MV88E6XXX_FLAG_SMI_PHY | \ MV88E6XXX_FLAG_SWITCH_MAC | \ MV88E6XXX_FLAG_TEMP | \ - MV88E6XXX_FLAG_TEMP_LIMIT) + MV88E6XXX_FLAG_TEMP_LIMIT | \ + MV88E6XXX_FLAG_VLANTABLE) struct mv88e6xxx_info { enum mv88e6xxx_family family; From 54d77b5b6ac92c76ee7dd360d8b7b0dfabf9f5f0 Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Mon, 9 May 2016 13:22:47 -0400 Subject: [PATCH 1432/1649] net: dsa: mv88e6xxx: factorize VTU access Add a MV88E6XXX_FLAG_VTU flag to indentify switch models with a VLAN Table Unit. Signed-off-by: Vivien Didelot Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6123.c | 5 +++++ drivers/net/dsa/mv88e6xxx.c | 16 ++++++++++++++++ drivers/net/dsa/mv88e6xxx.h | 24 ++++++++++++++++++------ 3 files changed, 39 insertions(+), 6 deletions(-) diff --git a/drivers/net/dsa/mv88e6123.c b/drivers/net/dsa/mv88e6123.c index 81cd4a906038..da5aa9c0a471 100644 --- a/drivers/net/dsa/mv88e6123.c +++ b/drivers/net/dsa/mv88e6123.c @@ -136,6 +136,11 @@ struct dsa_switch_driver mv88e6123_switch_driver = { .port_bridge_join = mv88e6xxx_port_bridge_join, .port_bridge_leave = mv88e6xxx_port_bridge_leave, .port_stp_state_set = mv88e6xxx_port_stp_state_set, + .port_vlan_filtering = mv88e6xxx_port_vlan_filtering, + .port_vlan_prepare = mv88e6xxx_port_vlan_prepare, + .port_vlan_add = mv88e6xxx_port_vlan_add, + .port_vlan_del = mv88e6xxx_port_vlan_del, + .port_vlan_dump = mv88e6xxx_port_vlan_dump, }; MODULE_ALIAS("platform:mv88e6123"); diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index f02738eaf541..6c472cfa1df9 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c @@ -1596,6 +1596,9 @@ int mv88e6xxx_port_vlan_dump(struct dsa_switch *ds, int port, u16 pvid; int err; + if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_VTU)) + return -EOPNOTSUPP; + mutex_lock(&ps->smi_mutex); err = _mv88e6xxx_port_pvid_get(ps, port, &pvid); @@ -2019,6 +2022,9 @@ int mv88e6xxx_port_vlan_filtering(struct dsa_switch *ds, int port, PORT_CONTROL_2_8021Q_DISABLED; int ret; + if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_VTU)) + return -EOPNOTSUPP; + mutex_lock(&ps->smi_mutex); ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_CONTROL_2); @@ -2052,8 +2058,12 @@ int mv88e6xxx_port_vlan_prepare(struct dsa_switch *ds, int port, const struct switchdev_obj_port_vlan *vlan, struct switchdev_trans *trans) { + struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); int err; + if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_VTU)) + return -EOPNOTSUPP; + /* If the requested port doesn't belong to the same bridge as the VLAN * members, do not support it (yet) and fallback to software VLAN. */ @@ -2094,6 +2104,9 @@ void mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port, bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; u16 vid; + if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_VTU)) + return; + mutex_lock(&ps->smi_mutex); for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) @@ -2151,6 +2164,9 @@ int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port, u16 pvid, vid; int err = 0; + if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_VTU)) + return -EOPNOTSUPP; + mutex_lock(&ps->smi_mutex); err = _mv88e6xxx_port_pvid_get(ps, port, &pvid); diff --git a/drivers/net/dsa/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx.h index d15e0b3dffd3..4f21206ac4de 100644 --- a/drivers/net/dsa/mv88e6xxx.h +++ b/drivers/net/dsa/mv88e6xxx.h @@ -394,6 +394,11 @@ enum mv88e6xxx_cap { * the output (or egress) ports to which it is allowed to send frames. */ MV88E6XXX_CAP_VLANTABLE, + + /* VLAN Table Unit. + * The VTU is used to program 802.1Q VLANs. See GLOBAL_VTU_OP. + */ + MV88E6XXX_CAP_VTU, }; /* Bitmask of capabilities */ @@ -406,14 +411,17 @@ enum mv88e6xxx_cap { #define MV88E6XXX_FLAG_TEMP BIT(MV88E6XXX_CAP_TEMP) #define MV88E6XXX_FLAG_TEMP_LIMIT BIT(MV88E6XXX_CAP_TEMP_LIMIT) #define MV88E6XXX_FLAG_VLANTABLE BIT(MV88E6XXX_CAP_VLANTABLE) +#define MV88E6XXX_FLAG_VTU BIT(MV88E6XXX_CAP_VTU) #define MV88E6XXX_FLAGS_FAMILY_6095 \ (MV88E6XXX_FLAG_PPU | \ - MV88E6XXX_FLAG_VLANTABLE) + MV88E6XXX_FLAG_VLANTABLE | \ + MV88E6XXX_FLAG_VTU) #define MV88E6XXX_FLAGS_FAMILY_6097 \ (MV88E6XXX_FLAG_PPU | \ - MV88E6XXX_FLAG_VLANTABLE) + MV88E6XXX_FLAG_VLANTABLE | \ + MV88E6XXX_FLAG_VTU) #define MV88E6XXX_FLAGS_FAMILY_6165 \ (MV88E6XXX_FLAG_SWITCH_MAC | \ @@ -421,7 +429,8 @@ enum mv88e6xxx_cap { #define MV88E6XXX_FLAGS_FAMILY_6185 \ (MV88E6XXX_FLAG_PPU | \ - MV88E6XXX_FLAG_VLANTABLE) + MV88E6XXX_FLAG_VLANTABLE | \ + MV88E6XXX_FLAG_VTU) #define MV88E6XXX_FLAGS_FAMILY_6320 \ (MV88E6XXX_FLAG_EEE | \ @@ -431,14 +440,16 @@ enum mv88e6xxx_cap { MV88E6XXX_FLAG_SWITCH_MAC | \ MV88E6XXX_FLAG_TEMP | \ MV88E6XXX_FLAG_TEMP_LIMIT | \ - MV88E6XXX_FLAG_VLANTABLE) + MV88E6XXX_FLAG_VLANTABLE | \ + MV88E6XXX_FLAG_VTU) #define MV88E6XXX_FLAGS_FAMILY_6351 \ (MV88E6XXX_FLAG_PORTSTATE | \ MV88E6XXX_FLAG_SMI_PHY | \ MV88E6XXX_FLAG_SWITCH_MAC | \ MV88E6XXX_FLAG_TEMP | \ - MV88E6XXX_FLAG_VLANTABLE) + MV88E6XXX_FLAG_VLANTABLE | \ + MV88E6XXX_FLAG_VTU) #define MV88E6XXX_FLAGS_FAMILY_6352 \ (MV88E6XXX_FLAG_EEE | \ @@ -448,7 +459,8 @@ enum mv88e6xxx_cap { MV88E6XXX_FLAG_SWITCH_MAC | \ MV88E6XXX_FLAG_TEMP | \ MV88E6XXX_FLAG_TEMP_LIMIT | \ - MV88E6XXX_FLAG_VLANTABLE) + MV88E6XXX_FLAG_VLANTABLE | \ + MV88E6XXX_FLAG_VTU) struct mv88e6xxx_info { enum mv88e6xxx_family family; From 2672f82548de2be29adcf5ef3c99fdaa1e5ace32 Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Mon, 9 May 2016 13:22:48 -0400 Subject: [PATCH 1433/1649] net: dsa: mv88e6xxx: factorize ATU access Add a MV88E6XXX_FLAG_ATU flag to identify switch models with an Address Translation Unit. Signed-off-by: Vivien Didelot Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6123.c | 4 ++++ drivers/net/dsa/mv88e6xxx.c | 14 ++++++++++++++ drivers/net/dsa/mv88e6xxx.h | 24 ++++++++++++++++++------ 3 files changed, 36 insertions(+), 6 deletions(-) diff --git a/drivers/net/dsa/mv88e6123.c b/drivers/net/dsa/mv88e6123.c index da5aa9c0a471..45cce4fa5f37 100644 --- a/drivers/net/dsa/mv88e6123.c +++ b/drivers/net/dsa/mv88e6123.c @@ -141,6 +141,10 @@ struct dsa_switch_driver mv88e6123_switch_driver = { .port_vlan_add = mv88e6xxx_port_vlan_add, .port_vlan_del = mv88e6xxx_port_vlan_del, .port_vlan_dump = mv88e6xxx_port_vlan_dump, + .port_fdb_prepare = mv88e6xxx_port_fdb_prepare, + .port_fdb_add = mv88e6xxx_port_fdb_add, + .port_fdb_del = mv88e6xxx_port_fdb_del, + .port_fdb_dump = mv88e6xxx_port_fdb_dump, }; MODULE_ALIAS("platform:mv88e6123"); diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index 6c472cfa1df9..037244b95e79 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c @@ -2275,6 +2275,11 @@ int mv88e6xxx_port_fdb_prepare(struct dsa_switch *ds, int port, const struct switchdev_obj_port_fdb *fdb, struct switchdev_trans *trans) { + struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + + if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_ATU)) + return -EOPNOTSUPP; + /* We don't need any dynamic resource from the kernel (yet), * so skip the prepare phase. */ @@ -2290,6 +2295,9 @@ void mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port, GLOBAL_ATU_DATA_STATE_UC_STATIC; struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_ATU)) + return; + mutex_lock(&ps->smi_mutex); if (_mv88e6xxx_port_fdb_load(ps, port, fdb->addr, fdb->vid, state)) netdev_err(ds->ports[port], "failed to load MAC address\n"); @@ -2302,6 +2310,9 @@ int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port, struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); int ret; + if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_ATU)) + return -EOPNOTSUPP; + mutex_lock(&ps->smi_mutex); ret = _mv88e6xxx_port_fdb_load(ps, port, fdb->addr, fdb->vid, GLOBAL_ATU_DATA_STATE_UNUSED); @@ -2407,6 +2418,9 @@ int mv88e6xxx_port_fdb_dump(struct dsa_switch *ds, int port, u16 fid; int err; + if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_ATU)) + return -EOPNOTSUPP; + mutex_lock(&ps->smi_mutex); /* Dump port's default Filtering Information Database (VLAN ID 0) */ diff --git a/drivers/net/dsa/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx.h index 4f21206ac4de..192292f21c8a 100644 --- a/drivers/net/dsa/mv88e6xxx.h +++ b/drivers/net/dsa/mv88e6xxx.h @@ -351,6 +351,11 @@ enum mv88e6xxx_family { }; enum mv88e6xxx_cap { + /* Address Translation Unit. + * The ATU is used to lookup and learn MAC addresses. See GLOBAL_ATU_OP. + */ + MV88E6XXX_CAP_ATU, + /* Energy Efficient Ethernet. */ MV88E6XXX_CAP_EEE, @@ -402,6 +407,7 @@ enum mv88e6xxx_cap { }; /* Bitmask of capabilities */ +#define MV88E6XXX_FLAG_ATU BIT(MV88E6XXX_CAP_ATU) #define MV88E6XXX_FLAG_EEE BIT(MV88E6XXX_CAP_EEE) #define MV88E6XXX_FLAG_EEPROM BIT(MV88E6XXX_CAP_EEPROM) #define MV88E6XXX_FLAG_PORTSTATE BIT(MV88E6XXX_CAP_PORTSTATE) @@ -414,12 +420,14 @@ enum mv88e6xxx_cap { #define MV88E6XXX_FLAG_VTU BIT(MV88E6XXX_CAP_VTU) #define MV88E6XXX_FLAGS_FAMILY_6095 \ - (MV88E6XXX_FLAG_PPU | \ + (MV88E6XXX_FLAG_ATU | \ + MV88E6XXX_FLAG_PPU | \ MV88E6XXX_FLAG_VLANTABLE | \ MV88E6XXX_FLAG_VTU) #define MV88E6XXX_FLAGS_FAMILY_6097 \ - (MV88E6XXX_FLAG_PPU | \ + (MV88E6XXX_FLAG_ATU | \ + MV88E6XXX_FLAG_PPU | \ MV88E6XXX_FLAG_VLANTABLE | \ MV88E6XXX_FLAG_VTU) @@ -428,12 +436,14 @@ enum mv88e6xxx_cap { MV88E6XXX_FLAG_TEMP) #define MV88E6XXX_FLAGS_FAMILY_6185 \ - (MV88E6XXX_FLAG_PPU | \ + (MV88E6XXX_FLAG_ATU | \ + MV88E6XXX_FLAG_PPU | \ MV88E6XXX_FLAG_VLANTABLE | \ MV88E6XXX_FLAG_VTU) #define MV88E6XXX_FLAGS_FAMILY_6320 \ - (MV88E6XXX_FLAG_EEE | \ + (MV88E6XXX_FLAG_ATU | \ + MV88E6XXX_FLAG_EEE | \ MV88E6XXX_FLAG_EEPROM | \ MV88E6XXX_FLAG_PORTSTATE | \ MV88E6XXX_FLAG_SMI_PHY | \ @@ -444,7 +454,8 @@ enum mv88e6xxx_cap { MV88E6XXX_FLAG_VTU) #define MV88E6XXX_FLAGS_FAMILY_6351 \ - (MV88E6XXX_FLAG_PORTSTATE | \ + (MV88E6XXX_FLAG_ATU | \ + MV88E6XXX_FLAG_PORTSTATE | \ MV88E6XXX_FLAG_SMI_PHY | \ MV88E6XXX_FLAG_SWITCH_MAC | \ MV88E6XXX_FLAG_TEMP | \ @@ -452,7 +463,8 @@ enum mv88e6xxx_cap { MV88E6XXX_FLAG_VTU) #define MV88E6XXX_FLAGS_FAMILY_6352 \ - (MV88E6XXX_FLAG_EEE | \ + (MV88E6XXX_FLAG_ATU | \ + MV88E6XXX_FLAG_EEE | \ MV88E6XXX_FLAG_EEPROM | \ MV88E6XXX_FLAG_PORTSTATE | \ MV88E6XXX_FLAG_SMI_PHY | \ From 552238b59487eaac1477bdb7b0c4c652f29cbc86 Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Mon, 9 May 2016 13:22:49 -0400 Subject: [PATCH 1434/1649] net: dsa: mv88e6xxx: factorize switch reset Add a MV88E6XXX_FLAG_PPU_ACTIVE flag to describe how to reset the switch, and merge the reset call to the common setup code. Signed-off-by: Vivien Didelot Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6123.c | 4 -- drivers/net/dsa/mv88e6131.c | 4 -- drivers/net/dsa/mv88e6171.c | 4 -- drivers/net/dsa/mv88e6352.c | 4 -- drivers/net/dsa/mv88e6xxx.c | 137 +++++++++++++++++++----------------- drivers/net/dsa/mv88e6xxx.h | 6 +- 6 files changed, 76 insertions(+), 83 deletions(-) diff --git a/drivers/net/dsa/mv88e6123.c b/drivers/net/dsa/mv88e6123.c index 45cce4fa5f37..fadec7a0e6b5 100644 --- a/drivers/net/dsa/mv88e6123.c +++ b/drivers/net/dsa/mv88e6123.c @@ -99,10 +99,6 @@ static int mv88e6123_setup(struct dsa_switch *ds) if (ret < 0) return ret; - ret = mv88e6xxx_switch_reset(ps, false); - if (ret < 0) - return ret; - ret = mv88e6123_setup_global(ds); if (ret < 0) return ret; diff --git a/drivers/net/dsa/mv88e6131.c b/drivers/net/dsa/mv88e6131.c index 5d252445e543..25ed82372df5 100644 --- a/drivers/net/dsa/mv88e6131.c +++ b/drivers/net/dsa/mv88e6131.c @@ -132,10 +132,6 @@ static int mv88e6131_setup(struct dsa_switch *ds) if (ret < 0) return ret; - ret = mv88e6xxx_switch_reset(ps, false); - if (ret < 0) - return ret; - ret = mv88e6131_setup_global(ds); if (ret < 0) return ret; diff --git a/drivers/net/dsa/mv88e6171.c b/drivers/net/dsa/mv88e6171.c index a98e7d3c0c64..caaa4b66abc1 100644 --- a/drivers/net/dsa/mv88e6171.c +++ b/drivers/net/dsa/mv88e6171.c @@ -108,10 +108,6 @@ static int mv88e6171_setup(struct dsa_switch *ds) if (ret < 0) return ret; - ret = mv88e6xxx_switch_reset(ps, true); - if (ret < 0) - return ret; - ret = mv88e6171_setup_global(ds); if (ret < 0) return ret; diff --git a/drivers/net/dsa/mv88e6352.c b/drivers/net/dsa/mv88e6352.c index d03c14a7ad1f..470789a3f8ec 100644 --- a/drivers/net/dsa/mv88e6352.c +++ b/drivers/net/dsa/mv88e6352.c @@ -125,10 +125,6 @@ static int mv88e6352_setup(struct dsa_switch *ds) if (ret < 0) return ret; - ret = mv88e6xxx_switch_reset(ps, true); - if (ret < 0) - return ret; - ret = mv88e6352_setup_global(ds); if (ret < 0) return ret; diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index 037244b95e79..b631a5d07ba0 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c @@ -2559,6 +2559,68 @@ restore_page_0: return ret; } +static int mv88e6xxx_switch_reset(struct mv88e6xxx_priv_state *ps) +{ + bool ppu_active = mv88e6xxx_has(ps, MV88E6XXX_FLAG_PPU_ACTIVE); + u16 is_reset = (ppu_active ? 0x8800 : 0xc800); + struct gpio_desc *gpiod = ps->ds->pd->reset; + unsigned long timeout; + int ret; + int i; + + /* Set all ports to the disabled state. */ + for (i = 0; i < ps->info->num_ports; i++) { + ret = _mv88e6xxx_reg_read(ps, REG_PORT(i), PORT_CONTROL); + if (ret < 0) + return ret; + + ret = _mv88e6xxx_reg_write(ps, REG_PORT(i), PORT_CONTROL, + ret & 0xfffc); + if (ret) + return ret; + } + + /* Wait for transmit queues to drain. */ + usleep_range(2000, 4000); + + /* If there is a gpio connected to the reset pin, toggle it */ + if (gpiod) { + gpiod_set_value_cansleep(gpiod, 1); + usleep_range(10000, 20000); + gpiod_set_value_cansleep(gpiod, 0); + usleep_range(10000, 20000); + } + + /* Reset the switch. Keep the PPU active if requested. The PPU + * needs to be active to support indirect phy register access + * through global registers 0x18 and 0x19. + */ + if (ppu_active) + ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, 0x04, 0xc000); + else + ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, 0x04, 0xc400); + if (ret) + return ret; + + /* Wait up to one second for reset to complete. */ + timeout = jiffies + 1 * HZ; + while (time_before(jiffies, timeout)) { + ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, 0x00); + if (ret < 0) + return ret; + + if ((ret & is_reset) == is_reset) + break; + usleep_range(1000, 2000); + } + if (time_after(jiffies, timeout)) + ret = -ETIMEDOUT; + else + ret = 0; + + return ret; +} + static int mv88e6xxx_power_on_serdes(struct mv88e6xxx_priv_state *ps) { int ret; @@ -2860,6 +2922,8 @@ int mv88e6xxx_setup_ports(struct dsa_switch *ds) int mv88e6xxx_setup_common(struct mv88e6xxx_priv_state *ps) { + int err; + mutex_init(&ps->smi_mutex); INIT_WORK(&ps->bridge_work, mv88e6xxx_bridge_work); @@ -2870,7 +2934,13 @@ int mv88e6xxx_setup_common(struct mv88e6xxx_priv_state *ps) if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_PPU)) mv88e6xxx_ppu_state_init(ps); - return 0; + mutex_lock(&ps->smi_mutex); + + err = mv88e6xxx_switch_reset(ps); + + mutex_unlock(&ps->smi_mutex); + + return err; } int mv88e6xxx_setup_global(struct dsa_switch *ds) @@ -3046,71 +3116,6 @@ unlock: return err; } -int mv88e6xxx_switch_reset(struct mv88e6xxx_priv_state *ps, bool ppu_active) -{ - u16 is_reset = (ppu_active ? 0x8800 : 0xc800); - struct gpio_desc *gpiod = ps->ds->pd->reset; - unsigned long timeout; - int ret; - int i; - - mutex_lock(&ps->smi_mutex); - - /* Set all ports to the disabled state. */ - for (i = 0; i < ps->info->num_ports; i++) { - ret = _mv88e6xxx_reg_read(ps, REG_PORT(i), PORT_CONTROL); - if (ret < 0) - goto unlock; - - ret = _mv88e6xxx_reg_write(ps, REG_PORT(i), PORT_CONTROL, - ret & 0xfffc); - if (ret) - goto unlock; - } - - /* Wait for transmit queues to drain. */ - usleep_range(2000, 4000); - - /* If there is a gpio connected to the reset pin, toggle it */ - if (gpiod) { - gpiod_set_value_cansleep(gpiod, 1); - usleep_range(10000, 20000); - gpiod_set_value_cansleep(gpiod, 0); - usleep_range(10000, 20000); - } - - /* Reset the switch. Keep the PPU active if requested. The PPU - * needs to be active to support indirect phy register access - * through global registers 0x18 and 0x19. - */ - if (ppu_active) - ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, 0x04, 0xc000); - else - ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, 0x04, 0xc400); - if (ret) - goto unlock; - - /* Wait up to one second for reset to complete. */ - timeout = jiffies + 1 * HZ; - while (time_before(jiffies, timeout)) { - ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, 0x00); - if (ret < 0) - goto unlock; - - if ((ret & is_reset) == is_reset) - break; - usleep_range(1000, 2000); - } - if (time_after(jiffies, timeout)) - ret = -ETIMEDOUT; - else - ret = 0; -unlock: - mutex_unlock(&ps->smi_mutex); - - return ret; -} - int mv88e6xxx_phy_page_read(struct dsa_switch *ds, int port, int page, int reg) { struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); diff --git a/drivers/net/dsa/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx.h index 192292f21c8a..efd6ebde8eb4 100644 --- a/drivers/net/dsa/mv88e6xxx.h +++ b/drivers/net/dsa/mv88e6xxx.h @@ -374,6 +374,7 @@ enum mv88e6xxx_cap { * See GLOBAL_CONTROL_PPU_ENABLE and GLOBAL_STATUS_PPU_POLLING. */ MV88E6XXX_CAP_PPU, + MV88E6XXX_CAP_PPU_ACTIVE, /* SMI PHY Command and Data registers. * This requires an indirect access to PHY registers through @@ -412,6 +413,7 @@ enum mv88e6xxx_cap { #define MV88E6XXX_FLAG_EEPROM BIT(MV88E6XXX_CAP_EEPROM) #define MV88E6XXX_FLAG_PORTSTATE BIT(MV88E6XXX_CAP_PORTSTATE) #define MV88E6XXX_FLAG_PPU BIT(MV88E6XXX_CAP_PPU) +#define MV88E6XXX_FLAG_PPU_ACTIVE BIT(MV88E6XXX_CAP_PPU_ACTIVE) #define MV88E6XXX_FLAG_SMI_PHY BIT(MV88E6XXX_CAP_SMI_PHY) #define MV88E6XXX_FLAG_SWITCH_MAC BIT(MV88E6XXX_CAP_SWITCH_MAC_WOL_WOF) #define MV88E6XXX_FLAG_TEMP BIT(MV88E6XXX_CAP_TEMP) @@ -446,6 +448,7 @@ enum mv88e6xxx_cap { MV88E6XXX_FLAG_EEE | \ MV88E6XXX_FLAG_EEPROM | \ MV88E6XXX_FLAG_PORTSTATE | \ + MV88E6XXX_FLAG_PPU_ACTIVE | \ MV88E6XXX_FLAG_SMI_PHY | \ MV88E6XXX_FLAG_SWITCH_MAC | \ MV88E6XXX_FLAG_TEMP | \ @@ -456,6 +459,7 @@ enum mv88e6xxx_cap { #define MV88E6XXX_FLAGS_FAMILY_6351 \ (MV88E6XXX_FLAG_ATU | \ MV88E6XXX_FLAG_PORTSTATE | \ + MV88E6XXX_FLAG_PPU_ACTIVE | \ MV88E6XXX_FLAG_SMI_PHY | \ MV88E6XXX_FLAG_SWITCH_MAC | \ MV88E6XXX_FLAG_TEMP | \ @@ -467,6 +471,7 @@ enum mv88e6xxx_cap { MV88E6XXX_FLAG_EEE | \ MV88E6XXX_FLAG_EEPROM | \ MV88E6XXX_FLAG_PORTSTATE | \ + MV88E6XXX_FLAG_PPU_ACTIVE | \ MV88E6XXX_FLAG_SMI_PHY | \ MV88E6XXX_FLAG_SWITCH_MAC | \ MV88E6XXX_FLAG_TEMP | \ @@ -578,7 +583,6 @@ static inline bool mv88e6xxx_has(struct mv88e6xxx_priv_state *ps, return (ps->info->flags & flags) == flags; } -int mv88e6xxx_switch_reset(struct mv88e6xxx_priv_state *ps, bool ppu_active); const char *mv88e6xxx_drv_probe(struct device *dsa_dev, struct device *host_dev, int sw_addr, void **priv, const struct mv88e6xxx_info *table, From 08a012619a0349b8e02797bdfe57051fe7df3d3b Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Mon, 9 May 2016 13:22:50 -0400 Subject: [PATCH 1435/1649] net: dsa: mv88e6xxx: factorize global setup Every driver is calling mv88e6xxx_setup_global after mv88e6xxx_setup_common. Call the former in the latter. Signed-off-by: Vivien Didelot Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6123.c | 4 - drivers/net/dsa/mv88e6131.c | 4 - drivers/net/dsa/mv88e6171.c | 4 - drivers/net/dsa/mv88e6352.c | 4 - drivers/net/dsa/mv88e6xxx.c | 341 ++++++++++++++++++------------------ drivers/net/dsa/mv88e6xxx.h | 1 - 6 files changed, 172 insertions(+), 186 deletions(-) diff --git a/drivers/net/dsa/mv88e6123.c b/drivers/net/dsa/mv88e6123.c index fadec7a0e6b5..d74695ac0be6 100644 --- a/drivers/net/dsa/mv88e6123.c +++ b/drivers/net/dsa/mv88e6123.c @@ -58,10 +58,6 @@ static int mv88e6123_setup_global(struct dsa_switch *ds) int ret; u32 reg; - ret = mv88e6xxx_setup_global(ds); - if (ret) - return ret; - /* Disable the PHY polling unit (since there won't be any * external PHYs to poll), don't discard packets with * excessive collisions, and mask all interrupt sources. diff --git a/drivers/net/dsa/mv88e6131.c b/drivers/net/dsa/mv88e6131.c index 25ed82372df5..e22ca7b7fa51 100644 --- a/drivers/net/dsa/mv88e6131.c +++ b/drivers/net/dsa/mv88e6131.c @@ -65,10 +65,6 @@ static int mv88e6131_setup_global(struct dsa_switch *ds) int ret; u32 reg; - ret = mv88e6xxx_setup_global(ds); - if (ret) - return ret; - /* Enable the PHY polling unit, don't discard packets with * excessive collisions, use a weighted fair queueing scheme * to arbitrate between packet queues, set the maximum frame diff --git a/drivers/net/dsa/mv88e6171.c b/drivers/net/dsa/mv88e6171.c index caaa4b66abc1..4bbf2e1a90aa 100644 --- a/drivers/net/dsa/mv88e6171.c +++ b/drivers/net/dsa/mv88e6171.c @@ -65,10 +65,6 @@ static int mv88e6171_setup_global(struct dsa_switch *ds) int ret; u32 reg; - ret = mv88e6xxx_setup_global(ds); - if (ret) - return ret; - /* Discard packets with excessive collisions, mask all * interrupt sources, enable PPU. */ diff --git a/drivers/net/dsa/mv88e6352.c b/drivers/net/dsa/mv88e6352.c index 470789a3f8ec..3e0be872df95 100644 --- a/drivers/net/dsa/mv88e6352.c +++ b/drivers/net/dsa/mv88e6352.c @@ -84,10 +84,6 @@ static int mv88e6352_setup_global(struct dsa_switch *ds) int ret; u32 reg; - ret = mv88e6xxx_setup_global(ds); - if (ret) - return ret; - /* Discard packets with excessive collisions, * mask all interrupt sources, enable PPU (bit 14, undocumented). */ diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index b631a5d07ba0..32b36a8fb446 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c @@ -2920,6 +2920,177 @@ int mv88e6xxx_setup_ports(struct dsa_switch *ds) return 0; } +static int mv88e6xxx_setup_global(struct mv88e6xxx_priv_state *ps) +{ + int err; + int i; + + /* Set the default address aging time to 5 minutes, and + * enable address learn messages to be sent to all message + * ports. + */ + err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_ATU_CONTROL, + 0x0140 | GLOBAL_ATU_CONTROL_LEARN2ALL); + if (err) + return err; + + /* Configure the IP ToS mapping registers. */ + err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_0, 0x0000); + if (err) + return err; + err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_1, 0x0000); + if (err) + return err; + err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_2, 0x5555); + if (err) + return err; + err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_3, 0x5555); + if (err) + return err; + err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_4, 0xaaaa); + if (err) + return err; + err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_5, 0xaaaa); + if (err) + return err; + err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_6, 0xffff); + if (err) + return err; + err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_7, 0xffff); + if (err) + return err; + + /* Configure the IEEE 802.1p priority mapping register. */ + err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IEEE_PRI, 0xfa41); + if (err) + return err; + + /* Send all frames with destination addresses matching + * 01:80:c2:00:00:0x to the CPU port. + */ + err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_MGMT_EN_0X, 0xffff); + if (err) + return err; + + /* Ignore removed tag data on doubly tagged packets, disable + * flow control messages, force flow control priority to the + * highest, and send all special multicast frames to the CPU + * port at the highest priority. + */ + err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_SWITCH_MGMT, + 0x7 | GLOBAL2_SWITCH_MGMT_RSVD2CPU | 0x70 | + GLOBAL2_SWITCH_MGMT_FORCE_FLOW_CTRL_PRI); + if (err) + return err; + + /* Program the DSA routing table. */ + for (i = 0; i < 32; i++) { + int nexthop = 0x1f; + + if (ps->ds->pd->rtable && + i != ps->ds->index && i < ps->ds->dst->pd->nr_chips) + nexthop = ps->ds->pd->rtable[i] & 0x1f; + + err = _mv88e6xxx_reg_write( + ps, REG_GLOBAL2, + GLOBAL2_DEVICE_MAPPING, + GLOBAL2_DEVICE_MAPPING_UPDATE | + (i << GLOBAL2_DEVICE_MAPPING_TARGET_SHIFT) | nexthop); + if (err) + return err; + } + + /* Clear all trunk masks. */ + for (i = 0; i < 8; i++) { + err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_TRUNK_MASK, + 0x8000 | + (i << GLOBAL2_TRUNK_MASK_NUM_SHIFT) | + ((1 << ps->info->num_ports) - 1)); + if (err) + return err; + } + + /* Clear all trunk mappings. */ + for (i = 0; i < 16; i++) { + err = _mv88e6xxx_reg_write( + ps, REG_GLOBAL2, + GLOBAL2_TRUNK_MAPPING, + GLOBAL2_TRUNK_MAPPING_UPDATE | + (i << GLOBAL2_TRUNK_MAPPING_ID_SHIFT)); + if (err) + return err; + } + + if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) || + mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) || + mv88e6xxx_6320_family(ps)) { + /* Send all frames with destination addresses matching + * 01:80:c2:00:00:2x to the CPU port. + */ + err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, + GLOBAL2_MGMT_EN_2X, 0xffff); + if (err) + return err; + + /* Initialise cross-chip port VLAN table to reset + * defaults. + */ + err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, + GLOBAL2_PVT_ADDR, 0x9000); + if (err) + return err; + + /* Clear the priority override table. */ + for (i = 0; i < 16; i++) { + err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, + GLOBAL2_PRIO_OVERRIDE, + 0x8000 | (i << 8)); + if (err) + return err; + } + } + + if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) || + mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) || + mv88e6xxx_6185_family(ps) || mv88e6xxx_6095_family(ps) || + mv88e6xxx_6320_family(ps)) { + /* Disable ingress rate limiting by resetting all + * ingress rate limit registers to their initial + * state. + */ + for (i = 0; i < ps->info->num_ports; i++) { + err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, + GLOBAL2_INGRESS_OP, + 0x9000 | (i << 8)); + if (err) + return err; + } + } + + /* Clear the statistics counters for all ports */ + err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_STATS_OP, + GLOBAL_STATS_OP_FLUSH_ALL); + if (err) + return err; + + /* Wait for the flush to complete. */ + err = _mv88e6xxx_stats_wait(ps); + if (err) + return err; + + /* Clear all ATU entries */ + err = _mv88e6xxx_atu_flush(ps, 0, true); + if (err) + return err; + + /* Clear all the VTU and STU entries */ + err = _mv88e6xxx_vtu_stu_flush(ps); + if (err < 0) + return err; + + return err; +} + int mv88e6xxx_setup_common(struct mv88e6xxx_priv_state *ps) { int err; @@ -2937,179 +3108,11 @@ int mv88e6xxx_setup_common(struct mv88e6xxx_priv_state *ps) mutex_lock(&ps->smi_mutex); err = mv88e6xxx_switch_reset(ps); - - mutex_unlock(&ps->smi_mutex); - - return err; -} - -int mv88e6xxx_setup_global(struct dsa_switch *ds) -{ - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); - int err; - int i; - - mutex_lock(&ps->smi_mutex); - /* Set the default address aging time to 5 minutes, and - * enable address learn messages to be sent to all message - * ports. - */ - err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_ATU_CONTROL, - 0x0140 | GLOBAL_ATU_CONTROL_LEARN2ALL); if (err) goto unlock; - /* Configure the IP ToS mapping registers. */ - err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_0, 0x0000); - if (err) - goto unlock; - err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_1, 0x0000); - if (err) - goto unlock; - err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_2, 0x5555); - if (err) - goto unlock; - err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_3, 0x5555); - if (err) - goto unlock; - err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_4, 0xaaaa); - if (err) - goto unlock; - err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_5, 0xaaaa); - if (err) - goto unlock; - err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_6, 0xffff); - if (err) - goto unlock; - err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_7, 0xffff); - if (err) - goto unlock; + err = mv88e6xxx_setup_global(ps); - /* Configure the IEEE 802.1p priority mapping register. */ - err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IEEE_PRI, 0xfa41); - if (err) - goto unlock; - - /* Send all frames with destination addresses matching - * 01:80:c2:00:00:0x to the CPU port. - */ - err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_MGMT_EN_0X, 0xffff); - if (err) - goto unlock; - - /* Ignore removed tag data on doubly tagged packets, disable - * flow control messages, force flow control priority to the - * highest, and send all special multicast frames to the CPU - * port at the highest priority. - */ - err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_SWITCH_MGMT, - 0x7 | GLOBAL2_SWITCH_MGMT_RSVD2CPU | 0x70 | - GLOBAL2_SWITCH_MGMT_FORCE_FLOW_CTRL_PRI); - if (err) - goto unlock; - - /* Program the DSA routing table. */ - for (i = 0; i < 32; i++) { - int nexthop = 0x1f; - - if (ds->pd->rtable && - i != ds->index && i < ds->dst->pd->nr_chips) - nexthop = ds->pd->rtable[i] & 0x1f; - - err = _mv88e6xxx_reg_write( - ps, REG_GLOBAL2, - GLOBAL2_DEVICE_MAPPING, - GLOBAL2_DEVICE_MAPPING_UPDATE | - (i << GLOBAL2_DEVICE_MAPPING_TARGET_SHIFT) | nexthop); - if (err) - goto unlock; - } - - /* Clear all trunk masks. */ - for (i = 0; i < 8; i++) { - err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_TRUNK_MASK, - 0x8000 | - (i << GLOBAL2_TRUNK_MASK_NUM_SHIFT) | - ((1 << ps->info->num_ports) - 1)); - if (err) - goto unlock; - } - - /* Clear all trunk mappings. */ - for (i = 0; i < 16; i++) { - err = _mv88e6xxx_reg_write( - ps, REG_GLOBAL2, - GLOBAL2_TRUNK_MAPPING, - GLOBAL2_TRUNK_MAPPING_UPDATE | - (i << GLOBAL2_TRUNK_MAPPING_ID_SHIFT)); - if (err) - goto unlock; - } - - if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) || - mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) || - mv88e6xxx_6320_family(ps)) { - /* Send all frames with destination addresses matching - * 01:80:c2:00:00:2x to the CPU port. - */ - err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, - GLOBAL2_MGMT_EN_2X, 0xffff); - if (err) - goto unlock; - - /* Initialise cross-chip port VLAN table to reset - * defaults. - */ - err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, - GLOBAL2_PVT_ADDR, 0x9000); - if (err) - goto unlock; - - /* Clear the priority override table. */ - for (i = 0; i < 16; i++) { - err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, - GLOBAL2_PRIO_OVERRIDE, - 0x8000 | (i << 8)); - if (err) - goto unlock; - } - } - - if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) || - mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) || - mv88e6xxx_6185_family(ps) || mv88e6xxx_6095_family(ps) || - mv88e6xxx_6320_family(ps)) { - /* Disable ingress rate limiting by resetting all - * ingress rate limit registers to their initial - * state. - */ - for (i = 0; i < ps->info->num_ports; i++) { - err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, - GLOBAL2_INGRESS_OP, - 0x9000 | (i << 8)); - if (err) - goto unlock; - } - } - - /* Clear the statistics counters for all ports */ - err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_STATS_OP, - GLOBAL_STATS_OP_FLUSH_ALL); - if (err) - goto unlock; - - /* Wait for the flush to complete. */ - err = _mv88e6xxx_stats_wait(ps); - if (err < 0) - goto unlock; - - /* Clear all ATU entries */ - err = _mv88e6xxx_atu_flush(ps, 0, true); - if (err < 0) - goto unlock; - - /* Clear all the VTU and STU entries */ - err = _mv88e6xxx_vtu_stu_flush(ps); unlock: mutex_unlock(&ps->smi_mutex); diff --git a/drivers/net/dsa/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx.h index efd6ebde8eb4..62f6fc9510aa 100644 --- a/drivers/net/dsa/mv88e6xxx.h +++ b/drivers/net/dsa/mv88e6xxx.h @@ -590,7 +590,6 @@ const char *mv88e6xxx_drv_probe(struct device *dsa_dev, struct device *host_dev, int mv88e6xxx_setup_ports(struct dsa_switch *ds); int mv88e6xxx_setup_common(struct mv88e6xxx_priv_state *ps); -int mv88e6xxx_setup_global(struct dsa_switch *ds); int mv88e6xxx_reg_read(struct mv88e6xxx_priv_state *ps, int addr, int reg); int mv88e6xxx_reg_write(struct mv88e6xxx_priv_state *ps, int addr, int reg, u16 val); From 119477bd987cbaf29af10b9cb1b731547906787e Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Mon, 9 May 2016 13:22:51 -0400 Subject: [PATCH 1436/1649] net: dsa: mv88e6xxx: factorize GLOBAL_CONTROL setup All switch models configure the GLOBAL_CONTROL register with slightly differences. Discarding packets with excessive collisions (GLOBAL_CONTROL_DISCARD_EXCESS) is specific to 6352 and similar switches, and setting a maximum frame size (GLOBAL_CONTROL_MAX_FRAME_1632) is specific to 6185 and similar switches. As we are centralizing the chips setup, skip these settings and don't discard any frames yet, until we found out that such discarding by the hardware is necessary. Assume a common setup to enable the PHY Polling Unit if present, don't discard any packets, and mask all interrupt sources. Tested on 88E6352 and 88E6185. Signed-off-by: Vivien Didelot Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6123.c | 8 -------- drivers/net/dsa/mv88e6131.c | 11 ----------- drivers/net/dsa/mv88e6171.c | 9 --------- drivers/net/dsa/mv88e6352.c | 9 --------- drivers/net/dsa/mv88e6xxx.c | 13 +++++++++++++ 5 files changed, 13 insertions(+), 37 deletions(-) diff --git a/drivers/net/dsa/mv88e6123.c b/drivers/net/dsa/mv88e6123.c index d74695ac0be6..1cd30ac19c1a 100644 --- a/drivers/net/dsa/mv88e6123.c +++ b/drivers/net/dsa/mv88e6123.c @@ -58,14 +58,6 @@ static int mv88e6123_setup_global(struct dsa_switch *ds) int ret; u32 reg; - /* Disable the PHY polling unit (since there won't be any - * external PHYs to poll), don't discard packets with - * excessive collisions, and mask all interrupt sources. - */ - ret = mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_CONTROL, 0x0000); - if (ret) - return ret; - /* Configure the upstream port, and configure the upstream * port as the port to which ingress and egress monitor frames * are to be sent. diff --git a/drivers/net/dsa/mv88e6131.c b/drivers/net/dsa/mv88e6131.c index e22ca7b7fa51..d05fc7980e0c 100644 --- a/drivers/net/dsa/mv88e6131.c +++ b/drivers/net/dsa/mv88e6131.c @@ -65,17 +65,6 @@ static int mv88e6131_setup_global(struct dsa_switch *ds) int ret; u32 reg; - /* Enable the PHY polling unit, don't discard packets with - * excessive collisions, use a weighted fair queueing scheme - * to arbitrate between packet queues, set the maximum frame - * size to 1632, and mask all interrupt sources. - */ - ret = mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_CONTROL, - GLOBAL_CONTROL_PPU_ENABLE | - GLOBAL_CONTROL_MAX_FRAME_1632); - if (ret) - return ret; - /* Set the VLAN ethertype to 0x8100. */ ret = mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_CORE_TAG_TYPE, 0x8100); if (ret) diff --git a/drivers/net/dsa/mv88e6171.c b/drivers/net/dsa/mv88e6171.c index 4bbf2e1a90aa..6c8554ce24e6 100644 --- a/drivers/net/dsa/mv88e6171.c +++ b/drivers/net/dsa/mv88e6171.c @@ -65,15 +65,6 @@ static int mv88e6171_setup_global(struct dsa_switch *ds) int ret; u32 reg; - /* Discard packets with excessive collisions, mask all - * interrupt sources, enable PPU. - */ - ret = mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_CONTROL, - GLOBAL_CONTROL_PPU_ENABLE | - GLOBAL_CONTROL_DISCARD_EXCESS); - if (ret) - return ret; - /* Configure the upstream port, and configure the upstream * port as the port to which ingress and egress monitor frames * are to be sent. diff --git a/drivers/net/dsa/mv88e6352.c b/drivers/net/dsa/mv88e6352.c index 3e0be872df95..a27616c00ad6 100644 --- a/drivers/net/dsa/mv88e6352.c +++ b/drivers/net/dsa/mv88e6352.c @@ -84,15 +84,6 @@ static int mv88e6352_setup_global(struct dsa_switch *ds) int ret; u32 reg; - /* Discard packets with excessive collisions, - * mask all interrupt sources, enable PPU (bit 14, undocumented). - */ - ret = mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_CONTROL, - GLOBAL_CONTROL_PPU_ENABLE | - GLOBAL_CONTROL_DISCARD_EXCESS); - if (ret) - return ret; - /* Configure the upstream port, and configure the upstream * port as the port to which ingress and egress monitor frames * are to be sent. diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index 32b36a8fb446..f1cd66073bf7 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c @@ -2922,9 +2922,22 @@ int mv88e6xxx_setup_ports(struct dsa_switch *ds) static int mv88e6xxx_setup_global(struct mv88e6xxx_priv_state *ps) { + u16 reg; int err; int i; + /* Enable the PHY Polling Unit if present, don't discard any packets, + * and mask all interrupt sources. + */ + reg = 0; + if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_PPU) || + mv88e6xxx_has(ps, MV88E6XXX_FLAG_PPU_ACTIVE)) + reg |= GLOBAL_CONTROL_PPU_ENABLE; + + err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_CONTROL, reg); + if (err) + return err; + /* Set the default address aging time to 5 minutes, and * enable address learn messages to be sent to all message * ports. From 709643aa626ec8f0bd678be79187beefffae32c7 Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Mon, 9 May 2016 13:22:52 -0400 Subject: [PATCH 1437/1649] net: dsa: mv88e6131: drop VLAN Ethertype setup The 6131 switch models have a Core Tag Type register. Their setup code is setting it to 0x8100, which is the reset default. Drop this specific part which is correctly configured on reset anyway. Signed-off-by: Vivien Didelot Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6131.c | 5 ----- 1 file changed, 5 deletions(-) diff --git a/drivers/net/dsa/mv88e6131.c b/drivers/net/dsa/mv88e6131.c index d05fc7980e0c..1c3b245cd110 100644 --- a/drivers/net/dsa/mv88e6131.c +++ b/drivers/net/dsa/mv88e6131.c @@ -65,11 +65,6 @@ static int mv88e6131_setup_global(struct dsa_switch *ds) int ret; u32 reg; - /* Set the VLAN ethertype to 0x8100. */ - ret = mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_CORE_TAG_TYPE, 0x8100); - if (ret) - return ret; - /* Disable ARP mirroring, and configure the upstream port as * the port to which ingress and egress monitor frames are to * be sent. From b0745e87943b40734e6c341dcf47a6ec80ee4346 Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Mon, 9 May 2016 13:22:53 -0400 Subject: [PATCH 1438/1649] net: dsa: mv88e6xxx: factorize GLOBAL_MONITOR_CONTROL setup All switch drivers configure the GLOBAL_MONITOR_CONTROL register with slightly changes. Assume the setup of the upstream port, and configure it as the port to which ingress and egress and ARP monitor frames are to be sent. Signed-off-by: Vivien Didelot Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6123.c | 14 -------------- drivers/net/dsa/mv88e6131.c | 13 ------------- drivers/net/dsa/mv88e6171.c | 15 --------------- drivers/net/dsa/mv88e6352.c | 14 -------------- drivers/net/dsa/mv88e6xxx.c | 12 ++++++++++++ 5 files changed, 12 insertions(+), 56 deletions(-) diff --git a/drivers/net/dsa/mv88e6123.c b/drivers/net/dsa/mv88e6123.c index 1cd30ac19c1a..5df06d8c3ed2 100644 --- a/drivers/net/dsa/mv88e6123.c +++ b/drivers/net/dsa/mv88e6123.c @@ -54,20 +54,6 @@ static const char *mv88e6123_drv_probe(struct device *dsa_dev, static int mv88e6123_setup_global(struct dsa_switch *ds) { struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); - u32 upstream_port = dsa_upstream_port(ds); - int ret; - u32 reg; - - /* Configure the upstream port, and configure the upstream - * port as the port to which ingress and egress monitor frames - * are to be sent. - */ - reg = upstream_port << GLOBAL_MONITOR_CONTROL_INGRESS_SHIFT | - upstream_port << GLOBAL_MONITOR_CONTROL_EGRESS_SHIFT | - upstream_port << GLOBAL_MONITOR_CONTROL_ARP_SHIFT; - ret = mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_MONITOR_CONTROL, reg); - if (ret) - return ret; /* Disable remote management for now, and set the switch's * DSA device number. diff --git a/drivers/net/dsa/mv88e6131.c b/drivers/net/dsa/mv88e6131.c index 1c3b245cd110..c8e3974163dc 100644 --- a/drivers/net/dsa/mv88e6131.c +++ b/drivers/net/dsa/mv88e6131.c @@ -61,20 +61,7 @@ static const char *mv88e6131_drv_probe(struct device *dsa_dev, static int mv88e6131_setup_global(struct dsa_switch *ds) { struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); - u32 upstream_port = dsa_upstream_port(ds); int ret; - u32 reg; - - /* Disable ARP mirroring, and configure the upstream port as - * the port to which ingress and egress monitor frames are to - * be sent. - */ - reg = upstream_port << GLOBAL_MONITOR_CONTROL_INGRESS_SHIFT | - upstream_port << GLOBAL_MONITOR_CONTROL_EGRESS_SHIFT | - GLOBAL_MONITOR_CONTROL_ARP_DISABLED; - ret = mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_MONITOR_CONTROL, reg); - if (ret) - return ret; /* Disable cascade port functionality unless this device * is used in a cascade configuration, and set the switch's diff --git a/drivers/net/dsa/mv88e6171.c b/drivers/net/dsa/mv88e6171.c index 6c8554ce24e6..a848aefb4c74 100644 --- a/drivers/net/dsa/mv88e6171.c +++ b/drivers/net/dsa/mv88e6171.c @@ -61,21 +61,6 @@ static const char *mv88e6171_drv_probe(struct device *dsa_dev, static int mv88e6171_setup_global(struct dsa_switch *ds) { struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); - u32 upstream_port = dsa_upstream_port(ds); - int ret; - u32 reg; - - /* Configure the upstream port, and configure the upstream - * port as the port to which ingress and egress monitor frames - * are to be sent. - */ - reg = upstream_port << GLOBAL_MONITOR_CONTROL_INGRESS_SHIFT | - upstream_port << GLOBAL_MONITOR_CONTROL_EGRESS_SHIFT | - upstream_port << GLOBAL_MONITOR_CONTROL_ARP_SHIFT | - upstream_port << GLOBAL_MONITOR_CONTROL_MIRROR_SHIFT; - ret = mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_MONITOR_CONTROL, reg); - if (ret) - return ret; /* Disable remote management for now, and set the switch's * DSA device number. diff --git a/drivers/net/dsa/mv88e6352.c b/drivers/net/dsa/mv88e6352.c index a27616c00ad6..e0988706c882 100644 --- a/drivers/net/dsa/mv88e6352.c +++ b/drivers/net/dsa/mv88e6352.c @@ -80,20 +80,6 @@ static const char *mv88e6352_drv_probe(struct device *dsa_dev, static int mv88e6352_setup_global(struct dsa_switch *ds) { struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); - u32 upstream_port = dsa_upstream_port(ds); - int ret; - u32 reg; - - /* Configure the upstream port, and configure the upstream - * port as the port to which ingress and egress monitor frames - * are to be sent. - */ - reg = upstream_port << GLOBAL_MONITOR_CONTROL_INGRESS_SHIFT | - upstream_port << GLOBAL_MONITOR_CONTROL_EGRESS_SHIFT | - upstream_port << GLOBAL_MONITOR_CONTROL_ARP_SHIFT; - ret = mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_MONITOR_CONTROL, reg); - if (ret) - return ret; /* Disable remote management for now, and set the switch's * DSA device number. diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index f1cd66073bf7..27551c1f1cd0 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c @@ -2922,6 +2922,8 @@ int mv88e6xxx_setup_ports(struct dsa_switch *ds) static int mv88e6xxx_setup_global(struct mv88e6xxx_priv_state *ps) { + struct dsa_switch *ds = ps->ds; + u32 upstream_port = dsa_upstream_port(ds); u16 reg; int err; int i; @@ -2938,6 +2940,16 @@ static int mv88e6xxx_setup_global(struct mv88e6xxx_priv_state *ps) if (err) return err; + /* Configure the upstream port, and configure it as the port to which + * ingress and egress and ARP monitor frames are to be sent. + */ + reg = upstream_port << GLOBAL_MONITOR_CONTROL_INGRESS_SHIFT | + upstream_port << GLOBAL_MONITOR_CONTROL_EGRESS_SHIFT | + upstream_port << GLOBAL_MONITOR_CONTROL_ARP_SHIFT; + err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_MONITOR_CONTROL, reg); + if (err) + return err; + /* Set the default address aging time to 5 minutes, and * enable address learn messages to be sent to all message * ports. From 50484ff4d11c1eca0059f3b3d407ecec0f1b83b0 Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Mon, 9 May 2016 13:22:54 -0400 Subject: [PATCH 1439/1649] net: dsa: mv88e6xxx: factorize GLOBAL_CONTROL_2 setup All switch models setup the GLOBAL_CONTROL_2 register with slightly differences. Since the cascade mode is valid even in a single chip setup, factorize such configuration. Signed-off-by: Vivien Didelot Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6123.c | 15 --------------- drivers/net/dsa/mv88e6131.c | 16 ---------------- drivers/net/dsa/mv88e6171.c | 15 --------------- drivers/net/dsa/mv88e6352.c | 14 -------------- drivers/net/dsa/mv88e6xxx.c | 7 +++++++ 5 files changed, 7 insertions(+), 60 deletions(-) diff --git a/drivers/net/dsa/mv88e6123.c b/drivers/net/dsa/mv88e6123.c index 5df06d8c3ed2..8f3a7c55c178 100644 --- a/drivers/net/dsa/mv88e6123.c +++ b/drivers/net/dsa/mv88e6123.c @@ -51,17 +51,6 @@ static const char *mv88e6123_drv_probe(struct device *dsa_dev, ARRAY_SIZE(mv88e6123_table)); } -static int mv88e6123_setup_global(struct dsa_switch *ds) -{ - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); - - /* Disable remote management for now, and set the switch's - * DSA device number. - */ - return mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_CONTROL_2, - ds->index & 0x1f); -} - static int mv88e6123_setup(struct dsa_switch *ds) { struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); @@ -73,10 +62,6 @@ static int mv88e6123_setup(struct dsa_switch *ds) if (ret < 0) return ret; - ret = mv88e6123_setup_global(ds); - if (ret < 0) - return ret; - return mv88e6xxx_setup_ports(ds); } diff --git a/drivers/net/dsa/mv88e6131.c b/drivers/net/dsa/mv88e6131.c index c8e3974163dc..b6ca07b9b938 100644 --- a/drivers/net/dsa/mv88e6131.c +++ b/drivers/net/dsa/mv88e6131.c @@ -61,22 +61,6 @@ static const char *mv88e6131_drv_probe(struct device *dsa_dev, static int mv88e6131_setup_global(struct dsa_switch *ds) { struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); - int ret; - - /* Disable cascade port functionality unless this device - * is used in a cascade configuration, and set the switch's - * DSA device number. - */ - if (ds->dst->pd->nr_chips > 1) - ret = mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_CONTROL_2, - GLOBAL_CONTROL_2_MULTIPLE_CASCADE | - (ds->index & 0x1f)); - else - ret = mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_CONTROL_2, - GLOBAL_CONTROL_2_NO_CASCADE | - (ds->index & 0x1f)); - if (ret) - return ret; /* Force the priority of IGMP/MLD snoop frames and ARP frames * to the highest setting. diff --git a/drivers/net/dsa/mv88e6171.c b/drivers/net/dsa/mv88e6171.c index a848aefb4c74..83678adfd97c 100644 --- a/drivers/net/dsa/mv88e6171.c +++ b/drivers/net/dsa/mv88e6171.c @@ -58,17 +58,6 @@ static const char *mv88e6171_drv_probe(struct device *dsa_dev, ARRAY_SIZE(mv88e6171_table)); } -static int mv88e6171_setup_global(struct dsa_switch *ds) -{ - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); - - /* Disable remote management for now, and set the switch's - * DSA device number. - */ - return mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_CONTROL_2, - ds->index & 0x1f); -} - static int mv88e6171_setup(struct dsa_switch *ds) { struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); @@ -80,10 +69,6 @@ static int mv88e6171_setup(struct dsa_switch *ds) if (ret < 0) return ret; - ret = mv88e6171_setup_global(ds); - if (ret < 0) - return ret; - return mv88e6xxx_setup_ports(ds); } diff --git a/drivers/net/dsa/mv88e6352.c b/drivers/net/dsa/mv88e6352.c index e0988706c882..81810ddcc47e 100644 --- a/drivers/net/dsa/mv88e6352.c +++ b/drivers/net/dsa/mv88e6352.c @@ -77,16 +77,6 @@ static const char *mv88e6352_drv_probe(struct device *dsa_dev, ARRAY_SIZE(mv88e6352_table)); } -static int mv88e6352_setup_global(struct dsa_switch *ds) -{ - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); - - /* Disable remote management for now, and set the switch's - * DSA device number. - */ - return mv88e6xxx_reg_write(ps, REG_GLOBAL, 0x1c, ds->index & 0x1f); -} - static int mv88e6352_setup(struct dsa_switch *ds) { struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); @@ -98,10 +88,6 @@ static int mv88e6352_setup(struct dsa_switch *ds) if (ret < 0) return ret; - ret = mv88e6352_setup_global(ds); - if (ret < 0) - return ret; - return mv88e6xxx_setup_ports(ds); } diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index 27551c1f1cd0..d8bb4c8e005f 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c @@ -2950,6 +2950,13 @@ static int mv88e6xxx_setup_global(struct mv88e6xxx_priv_state *ps) if (err) return err; + /* Disable remote management, and set the switch's DSA device number. */ + err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_CONTROL_2, + GLOBAL_CONTROL_2_MULTIPLE_CASCADE | + (ds->index & 0x1f)); + if (err) + return err; + /* Set the default address aging time to 5 minutes, and * enable address learn messages to be sent to all message * ports. From 8698fd9595c3b90a76c878159328ac6ebd923963 Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Mon, 9 May 2016 13:22:55 -0400 Subject: [PATCH 1440/1649] net: dsa: mv88e6131: drop frames priorities setup 6131 is the only driver which setups the priority of IGMP/MLD snoop frames and ARP frames to the highest setting. Drop such change until we figure out a common configuration for all switch models. Signed-off-by: Vivien Didelot Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6131.c | 18 ------------------ 1 file changed, 18 deletions(-) diff --git a/drivers/net/dsa/mv88e6131.c b/drivers/net/dsa/mv88e6131.c index b6ca07b9b938..da2832726672 100644 --- a/drivers/net/dsa/mv88e6131.c +++ b/drivers/net/dsa/mv88e6131.c @@ -58,20 +58,6 @@ static const char *mv88e6131_drv_probe(struct device *dsa_dev, ARRAY_SIZE(mv88e6131_table)); } -static int mv88e6131_setup_global(struct dsa_switch *ds) -{ - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); - - /* Force the priority of IGMP/MLD snoop frames and ARP frames - * to the highest setting. - */ - return mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_PRIO_OVERRIDE, - GLOBAL2_PRIO_OVERRIDE_FORCE_SNOOP | - 7 << GLOBAL2_PRIO_OVERRIDE_SNOOP_SHIFT | - GLOBAL2_PRIO_OVERRIDE_FORCE_ARP | - 7 << GLOBAL2_PRIO_OVERRIDE_ARP_SHIFT); -} - static int mv88e6131_setup(struct dsa_switch *ds) { struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); @@ -83,10 +69,6 @@ static int mv88e6131_setup(struct dsa_switch *ds) if (ret < 0) return ret; - ret = mv88e6131_setup_global(ds); - if (ret < 0) - return ret; - return mv88e6xxx_setup_ports(ds); } From a1a6a4d1f76aab009e6e0b1003b9c7bca3991e9c Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Mon, 9 May 2016 13:22:56 -0400 Subject: [PATCH 1441/1649] net: dsa: mv88e6xxx: factorize switch setup Provide a shared mv88e6xxx_setup function to the drivers. Signed-off-by: Vivien Didelot Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6123.c | 16 +-------- drivers/net/dsa/mv88e6131.c | 16 +-------- drivers/net/dsa/mv88e6171.c | 16 +-------- drivers/net/dsa/mv88e6352.c | 16 +-------- drivers/net/dsa/mv88e6xxx.c | 69 ++++++++++++++++++------------------- drivers/net/dsa/mv88e6xxx.h | 3 +- 6 files changed, 38 insertions(+), 98 deletions(-) diff --git a/drivers/net/dsa/mv88e6123.c b/drivers/net/dsa/mv88e6123.c index 8f3a7c55c178..2bc407b5632d 100644 --- a/drivers/net/dsa/mv88e6123.c +++ b/drivers/net/dsa/mv88e6123.c @@ -51,24 +51,10 @@ static const char *mv88e6123_drv_probe(struct device *dsa_dev, ARRAY_SIZE(mv88e6123_table)); } -static int mv88e6123_setup(struct dsa_switch *ds) -{ - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); - int ret; - - ps->ds = ds; - - ret = mv88e6xxx_setup_common(ps); - if (ret < 0) - return ret; - - return mv88e6xxx_setup_ports(ds); -} - struct dsa_switch_driver mv88e6123_switch_driver = { .tag_protocol = DSA_TAG_PROTO_EDSA, .probe = mv88e6123_drv_probe, - .setup = mv88e6123_setup, + .setup = mv88e6xxx_setup, .set_addr = mv88e6xxx_set_addr, .phy_read = mv88e6xxx_phy_read, .phy_write = mv88e6xxx_phy_write, diff --git a/drivers/net/dsa/mv88e6131.c b/drivers/net/dsa/mv88e6131.c index da2832726672..22952be7f4de 100644 --- a/drivers/net/dsa/mv88e6131.c +++ b/drivers/net/dsa/mv88e6131.c @@ -58,24 +58,10 @@ static const char *mv88e6131_drv_probe(struct device *dsa_dev, ARRAY_SIZE(mv88e6131_table)); } -static int mv88e6131_setup(struct dsa_switch *ds) -{ - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); - int ret; - - ps->ds = ds; - - ret = mv88e6xxx_setup_common(ps); - if (ret < 0) - return ret; - - return mv88e6xxx_setup_ports(ds); -} - struct dsa_switch_driver mv88e6131_switch_driver = { .tag_protocol = DSA_TAG_PROTO_DSA, .probe = mv88e6131_drv_probe, - .setup = mv88e6131_setup, + .setup = mv88e6xxx_setup, .set_addr = mv88e6xxx_set_addr, .phy_read = mv88e6xxx_phy_read, .phy_write = mv88e6xxx_phy_write, diff --git a/drivers/net/dsa/mv88e6171.c b/drivers/net/dsa/mv88e6171.c index 83678adfd97c..4bf517a86acb 100644 --- a/drivers/net/dsa/mv88e6171.c +++ b/drivers/net/dsa/mv88e6171.c @@ -58,24 +58,10 @@ static const char *mv88e6171_drv_probe(struct device *dsa_dev, ARRAY_SIZE(mv88e6171_table)); } -static int mv88e6171_setup(struct dsa_switch *ds) -{ - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); - int ret; - - ps->ds = ds; - - ret = mv88e6xxx_setup_common(ps); - if (ret < 0) - return ret; - - return mv88e6xxx_setup_ports(ds); -} - struct dsa_switch_driver mv88e6171_switch_driver = { .tag_protocol = DSA_TAG_PROTO_EDSA, .probe = mv88e6171_drv_probe, - .setup = mv88e6171_setup, + .setup = mv88e6xxx_setup, .set_addr = mv88e6xxx_set_addr, .phy_read = mv88e6xxx_phy_read, .phy_write = mv88e6xxx_phy_write, diff --git a/drivers/net/dsa/mv88e6352.c b/drivers/net/dsa/mv88e6352.c index 81810ddcc47e..d65a90dca0b4 100644 --- a/drivers/net/dsa/mv88e6352.c +++ b/drivers/net/dsa/mv88e6352.c @@ -77,24 +77,10 @@ static const char *mv88e6352_drv_probe(struct device *dsa_dev, ARRAY_SIZE(mv88e6352_table)); } -static int mv88e6352_setup(struct dsa_switch *ds) -{ - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); - int ret; - - ps->ds = ds; - - ret = mv88e6xxx_setup_common(ps); - if (ret < 0) - return ret; - - return mv88e6xxx_setup_ports(ds); -} - struct dsa_switch_driver mv88e6352_switch_driver = { .tag_protocol = DSA_TAG_PROTO_EDSA, .probe = mv88e6352_drv_probe, - .setup = mv88e6352_setup, + .setup = mv88e6xxx_setup, .set_addr = mv88e6xxx_set_addr, .phy_read = mv88e6xxx_phy_read, .phy_write = mv88e6xxx_phy_write, diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index d8bb4c8e005f..7ea30502d221 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c @@ -2640,14 +2640,12 @@ static int mv88e6xxx_power_on_serdes(struct mv88e6xxx_priv_state *ps) return ret; } -static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port) +static int mv88e6xxx_setup_port(struct mv88e6xxx_priv_state *ps, int port) { - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + struct dsa_switch *ds = ps->ds; int ret; u16 reg; - mutex_lock(&ps->smi_mutex); - if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) || mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) || mv88e6xxx_6185_family(ps) || mv88e6xxx_6095_family(ps) || @@ -2676,7 +2674,7 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port) ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_PCS_CTRL, reg); if (ret) - goto abort; + return ret; } /* Port Control: disable Drop-on-Unlock, disable Drop-on-Lock, @@ -2740,7 +2738,7 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port) ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_CONTROL, reg); if (ret) - goto abort; + return ret; } /* If this port is connected to a SerDes, make sure the SerDes is not @@ -2749,14 +2747,14 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port) if (mv88e6xxx_6352_family(ps)) { ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_STATUS); if (ret < 0) - goto abort; + return ret; ret &= PORT_STATUS_CMODE_MASK; if ((ret == PORT_STATUS_CMODE_100BASE_X) || (ret == PORT_STATUS_CMODE_1000BASE_X) || (ret == PORT_STATUS_CMODE_SGMII)) { ret = mv88e6xxx_power_on_serdes(ps); if (ret < 0) - goto abort; + return ret; } } @@ -2793,7 +2791,7 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port) ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_CONTROL_2, reg); if (ret) - goto abort; + return ret; } /* Port Association Vector: when learning source addresses @@ -2808,13 +2806,13 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port) ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_ASSOC_VECTOR, reg); if (ret) - goto abort; + return ret; /* Egress rate control 2: disable egress rate control. */ ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_RATE_CONTROL_2, 0x0000); if (ret) - goto abort; + return ret; if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) || mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) || @@ -2826,7 +2824,7 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port) ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_PAUSE_CTRL, 0x0000); if (ret) - goto abort; + return ret; /* Port ATU control: disable limiting the number of * address database entries that this port is allowed @@ -2840,7 +2838,7 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port) ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_PRI_OVERRIDE, 0x0000); if (ret) - goto abort; + return ret; /* Port Ethertype: use the Ethertype DSA Ethertype * value. @@ -2848,14 +2846,14 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port) ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_ETH_TYPE, ETH_P_EDSA); if (ret) - goto abort; + return ret; /* Tag Remap: use an identity 802.1p prio -> switch * prio mapping. */ ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_TAG_REGMAP_0123, 0x3210); if (ret) - goto abort; + return ret; /* Tag Remap 2: use an identity 802.1p prio -> switch * prio mapping. @@ -2863,7 +2861,7 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port) ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_TAG_REGMAP_4567, 0x7654); if (ret) - goto abort; + return ret; } if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) || @@ -2874,7 +2872,7 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port) ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_RATE_CONTROL, 0x0001); if (ret) - goto abort; + return ret; } /* Port Control 1: disable trunking, disable sending @@ -2882,7 +2880,7 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port) */ ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_CONTROL_1, 0x0000); if (ret) - goto abort; + return ret; /* Port based VLAN map: give each port the same default address * database, and allow bidirectional communication between the @@ -2890,33 +2888,20 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port) */ ret = _mv88e6xxx_port_fid_set(ps, port, 0); if (ret) - goto abort; + return ret; ret = _mv88e6xxx_port_based_vlan_map(ps, port); if (ret) - goto abort; + return ret; /* Default VLAN ID and priority: don't set a default VLAN * ID, and set the default packet priority to zero. */ ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_DEFAULT_VLAN, 0x0000); -abort: - mutex_unlock(&ps->smi_mutex); - return ret; -} + if (ret) + return ret; -int mv88e6xxx_setup_ports(struct dsa_switch *ds) -{ - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); - int ret; - int i; - - for (i = 0; i < ps->info->num_ports; i++) { - ret = mv88e6xxx_setup_port(ds, i); - if (ret < 0) - return ret; - } return 0; } @@ -3123,9 +3108,13 @@ static int mv88e6xxx_setup_global(struct mv88e6xxx_priv_state *ps) return err; } -int mv88e6xxx_setup_common(struct mv88e6xxx_priv_state *ps) +int mv88e6xxx_setup(struct dsa_switch *ds) { + struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); int err; + int i; + + ps->ds = ds; mutex_init(&ps->smi_mutex); @@ -3144,6 +3133,14 @@ int mv88e6xxx_setup_common(struct mv88e6xxx_priv_state *ps) goto unlock; err = mv88e6xxx_setup_global(ps); + if (err) + goto unlock; + + for (i = 0; i < ps->info->num_ports; i++) { + err = mv88e6xxx_setup_port(ps, i); + if (err) + goto unlock; + } unlock: mutex_unlock(&ps->smi_mutex); diff --git a/drivers/net/dsa/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx.h index 62f6fc9510aa..a131827cb26d 100644 --- a/drivers/net/dsa/mv88e6xxx.h +++ b/drivers/net/dsa/mv88e6xxx.h @@ -588,8 +588,7 @@ const char *mv88e6xxx_drv_probe(struct device *dsa_dev, struct device *host_dev, const struct mv88e6xxx_info *table, unsigned int num); -int mv88e6xxx_setup_ports(struct dsa_switch *ds); -int mv88e6xxx_setup_common(struct mv88e6xxx_priv_state *ps); +int mv88e6xxx_setup(struct dsa_switch *ds); int mv88e6xxx_reg_read(struct mv88e6xxx_priv_state *ps, int addr, int reg); int mv88e6xxx_reg_write(struct mv88e6xxx_priv_state *ps, int addr, int reg, u16 val); From b9729e53ade9be2637b46ac98fd85c7eac1b77c8 Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Mon, 9 May 2016 13:22:57 -0400 Subject: [PATCH 1442/1649] net: dsa: mv88e6131: use EDSA tag protocol 6131 is the only driver to set the tag protocol to DSA_TAG_PROTO_DSA. Since it works fine with DSA_TAG_PROTO_EDSA, change its value, like all other mv88e6xxx drivers. Signed-off-by: Vivien Didelot Signed-off-by: David S. Miller --- drivers/net/dsa/Kconfig | 2 +- drivers/net/dsa/mv88e6131.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig index 4aaadced6b81..7e01dce09904 100644 --- a/drivers/net/dsa/Kconfig +++ b/drivers/net/dsa/Kconfig @@ -17,7 +17,7 @@ config NET_DSA_MV88E6131 tristate "Marvell 88E6085/6095/6095F/6131 ethernet switch chip support" depends on NET_DSA select NET_DSA_MV88E6XXX - select NET_DSA_TAG_DSA + select NET_DSA_TAG_EDSA ---help--- This enables support for the Marvell 88E6085/6095/6095F/6131 ethernet switch chips. diff --git a/drivers/net/dsa/mv88e6131.c b/drivers/net/dsa/mv88e6131.c index 22952be7f4de..bbad199b50aa 100644 --- a/drivers/net/dsa/mv88e6131.c +++ b/drivers/net/dsa/mv88e6131.c @@ -59,7 +59,7 @@ static const char *mv88e6131_drv_probe(struct device *dsa_dev, } struct dsa_switch_driver mv88e6131_switch_driver = { - .tag_protocol = DSA_TAG_PROTO_DSA, + .tag_protocol = DSA_TAG_PROTO_EDSA, .probe = mv88e6131_drv_probe, .setup = mv88e6xxx_setup, .set_addr = mv88e6xxx_set_addr, From f81ec90fe9cbf512f3c632130a37c6d353fa94ea Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Mon, 9 May 2016 13:22:58 -0400 Subject: [PATCH 1443/1649] net: dsa: mv88e6xxx: factorize the switch driver Now that all drivers support the same set of functions and the same setup code, drop every model-specific DSA switch driver and replace them with a common mv88e6xxx driver. This merges the info tables into one, removes the function exports, the model-specific files, and update the defconfigs. Signed-off-by: Vivien Didelot Signed-off-by: David S. Miller --- arch/arm/configs/multi_v5_defconfig | 5 +- arch/arm/configs/mvebu_v7_defconfig | 2 +- arch/arm/configs/orion5x_defconfig | 3 +- arch/tile/configs/tilegx_defconfig | 3 +- arch/tile/configs/tilepro_defconfig | 3 +- drivers/net/dsa/Kconfig | 40 +--- drivers/net/dsa/Makefile | 15 +- drivers/net/dsa/mv88e6123.c | 93 -------- drivers/net/dsa/mv88e6131.c | 101 -------- drivers/net/dsa/mv88e6171.c | 101 -------- drivers/net/dsa/mv88e6352.c | 121 ---------- drivers/net/dsa/mv88e6xxx.c | 354 +++++++++++++++++++++------- drivers/net/dsa/mv88e6xxx.h | 91 ++----- 13 files changed, 302 insertions(+), 630 deletions(-) delete mode 100644 drivers/net/dsa/mv88e6123.c delete mode 100644 drivers/net/dsa/mv88e6131.c delete mode 100644 drivers/net/dsa/mv88e6171.c delete mode 100644 drivers/net/dsa/mv88e6352.c diff --git a/arch/arm/configs/multi_v5_defconfig b/arch/arm/configs/multi_v5_defconfig index e11d99d529ee..690352d3ba4c 100644 --- a/arch/arm/configs/multi_v5_defconfig +++ b/arch/arm/configs/multi_v5_defconfig @@ -91,10 +91,7 @@ CONFIG_SATA_AHCI=y CONFIG_SATA_MV=y CONFIG_NETDEVICES=y CONFIG_NET_DSA_MV88E6060=y -CONFIG_NET_DSA_MV88E6131=y -CONFIG_NET_DSA_MV88E6123=y -CONFIG_NET_DSA_MV88E6171=y -CONFIG_NET_DSA_MV88E6352=y +CONFIG_NET_DSA_MV88E6XXX=y CONFIG_MV643XX_ETH=y CONFIG_R8169=y CONFIG_MARVELL_PHY=y diff --git a/arch/arm/configs/mvebu_v7_defconfig b/arch/arm/configs/mvebu_v7_defconfig index dc5797a2efab..6492407efd7e 100644 --- a/arch/arm/configs/mvebu_v7_defconfig +++ b/arch/arm/configs/mvebu_v7_defconfig @@ -66,7 +66,7 @@ CONFIG_SATA_AHCI=y CONFIG_AHCI_MVEBU=y CONFIG_SATA_MV=y CONFIG_NETDEVICES=y -CONFIG_NET_DSA_MV88E6171=y +CONFIG_NET_DSA_MV88E6XXX=y CONFIG_MV643XX_ETH=y CONFIG_MVNETA=y CONFIG_MVPP2=y diff --git a/arch/arm/configs/orion5x_defconfig b/arch/arm/configs/orion5x_defconfig index 6a5bc27538f1..27a70a7a50f6 100644 --- a/arch/arm/configs/orion5x_defconfig +++ b/arch/arm/configs/orion5x_defconfig @@ -85,8 +85,7 @@ CONFIG_ATA=y CONFIG_SATA_MV=y CONFIG_NETDEVICES=y CONFIG_MII=y -CONFIG_NET_DSA_MV88E6131=y -CONFIG_NET_DSA_MV88E6123=y +CONFIG_NET_DSA_MV88E6XXX=y CONFIG_MV643XX_ETH=y CONFIG_MARVELL_PHY=y # CONFIG_INPUT_MOUSEDEV is not set diff --git a/arch/tile/configs/tilegx_defconfig b/arch/tile/configs/tilegx_defconfig index 3f3dfb8b150a..718905557f7e 100644 --- a/arch/tile/configs/tilegx_defconfig +++ b/arch/tile/configs/tilegx_defconfig @@ -221,8 +221,7 @@ CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_TUN=y CONFIG_VETH=m CONFIG_NET_DSA_MV88E6060=y -CONFIG_NET_DSA_MV88E6131=y -CONFIG_NET_DSA_MV88E6123=y +CONFIG_NET_DSA_MV88E6XXX=y CONFIG_SKY2=y CONFIG_PTP_1588_CLOCK_TILEGX=y # CONFIG_WLAN is not set diff --git a/arch/tile/configs/tilepro_defconfig b/arch/tile/configs/tilepro_defconfig index ef9e27eb2f50..dc85468afd5e 100644 --- a/arch/tile/configs/tilepro_defconfig +++ b/arch/tile/configs/tilepro_defconfig @@ -340,8 +340,7 @@ CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_TUN=y CONFIG_VETH=m CONFIG_NET_DSA_MV88E6060=y -CONFIG_NET_DSA_MV88E6131=y -CONFIG_NET_DSA_MV88E6123=y +CONFIG_NET_DSA_MV88E6XXX=y # CONFIG_NET_VENDOR_3COM is not set CONFIG_E1000E=y # CONFIG_WLAN is not set diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig index 7e01dce09904..200663c43ce9 100644 --- a/drivers/net/dsa/Kconfig +++ b/drivers/net/dsa/Kconfig @@ -1,10 +1,6 @@ menu "Distributed Switch Architecture drivers" depends on HAVE_NET_DSA -config NET_DSA_MV88E6XXX - tristate - default n - config NET_DSA_MV88E6060 tristate "Marvell 88E6060 ethernet switch chip support" depends on NET_DSA @@ -13,41 +9,13 @@ config NET_DSA_MV88E6060 This enables support for the Marvell 88E6060 ethernet switch chip. -config NET_DSA_MV88E6131 - tristate "Marvell 88E6085/6095/6095F/6131 ethernet switch chip support" +config NET_DSA_MV88E6XXX + tristate "Marvell 88E6xxx Ethernet switch chip support" depends on NET_DSA - select NET_DSA_MV88E6XXX select NET_DSA_TAG_EDSA ---help--- - This enables support for the Marvell 88E6085/6095/6095F/6131 - ethernet switch chips. - -config NET_DSA_MV88E6123 - tristate "Marvell 88E6123/6161/6165 ethernet switch chip support" - depends on NET_DSA - select NET_DSA_MV88E6XXX - select NET_DSA_TAG_EDSA - ---help--- - This enables support for the Marvell 88E6123/6161/6165 - ethernet switch chips. - -config NET_DSA_MV88E6171 - tristate "Marvell 88E6171/6175/6350/6351 ethernet switch chip support" - depends on NET_DSA - select NET_DSA_MV88E6XXX - select NET_DSA_TAG_EDSA - ---help--- - This enables support for the Marvell 88E6171/6175/6350/6351 - ethernet switches chips. - -config NET_DSA_MV88E6352 - tristate "Marvell 88E6172/6176/6320/6321/6352 ethernet switch chip support" - depends on NET_DSA - select NET_DSA_MV88E6XXX - select NET_DSA_TAG_EDSA - ---help--- - This enables support for the Marvell 88E6172, 88E6176, 88E6320, - 88E6321 and 88E6352 ethernet switch chips. + This enables support for most of the Marvell 88E6xxx models of + Ethernet switch chips, except 88E6060. config NET_DSA_BCM_SF2 tristate "Broadcom Starfighter 2 Ethernet switch support" diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile index a6e09939be65..76b751dd9efd 100644 --- a/drivers/net/dsa/Makefile +++ b/drivers/net/dsa/Makefile @@ -1,16 +1,3 @@ obj-$(CONFIG_NET_DSA_MV88E6060) += mv88e6060.o -obj-$(CONFIG_NET_DSA_MV88E6XXX) += mv88e6xxx_drv.o -mv88e6xxx_drv-y += mv88e6xxx.o -ifdef CONFIG_NET_DSA_MV88E6123 -mv88e6xxx_drv-y += mv88e6123.o -endif -ifdef CONFIG_NET_DSA_MV88E6131 -mv88e6xxx_drv-y += mv88e6131.o -endif -ifdef CONFIG_NET_DSA_MV88E6352 -mv88e6xxx_drv-y += mv88e6352.o -endif -ifdef CONFIG_NET_DSA_MV88E6171 -mv88e6xxx_drv-y += mv88e6171.o -endif +obj-$(CONFIG_NET_DSA_MV88E6XXX) += mv88e6xxx.o obj-$(CONFIG_NET_DSA_BCM_SF2) += bcm_sf2.o diff --git a/drivers/net/dsa/mv88e6123.c b/drivers/net/dsa/mv88e6123.c deleted file mode 100644 index 2bc407b5632d..000000000000 --- a/drivers/net/dsa/mv88e6123.c +++ /dev/null @@ -1,93 +0,0 @@ -/* - * net/dsa/mv88e6123_61_65.c - Marvell 88e6123/6161/6165 switch chip support - * Copyright (c) 2008-2009 Marvell Semiconductor - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ - -#include -#include -#include -#include -#include -#include -#include -#include "mv88e6xxx.h" - -static const struct mv88e6xxx_info mv88e6123_table[] = { - { - .prod_num = PORT_SWITCH_ID_PROD_NUM_6123, - .family = MV88E6XXX_FAMILY_6165, - .name = "Marvell 88E6123", - .num_databases = 4096, - .num_ports = 3, - .flags = MV88E6XXX_FLAGS_FAMILY_6165, - }, { - .prod_num = PORT_SWITCH_ID_PROD_NUM_6161, - .family = MV88E6XXX_FAMILY_6165, - .name = "Marvell 88E6161", - .num_databases = 4096, - .num_ports = 6, - .flags = MV88E6XXX_FLAGS_FAMILY_6165, - }, { - .prod_num = PORT_SWITCH_ID_PROD_NUM_6165, - .family = MV88E6XXX_FAMILY_6165, - .name = "Marvell 88E6165", - .num_databases = 4096, - .num_ports = 6, - .flags = MV88E6XXX_FLAGS_FAMILY_6165, - } -}; - -static const char *mv88e6123_drv_probe(struct device *dsa_dev, - struct device *host_dev, int sw_addr, - void **priv) -{ - return mv88e6xxx_drv_probe(dsa_dev, host_dev, sw_addr, priv, - mv88e6123_table, - ARRAY_SIZE(mv88e6123_table)); -} - -struct dsa_switch_driver mv88e6123_switch_driver = { - .tag_protocol = DSA_TAG_PROTO_EDSA, - .probe = mv88e6123_drv_probe, - .setup = mv88e6xxx_setup, - .set_addr = mv88e6xxx_set_addr, - .phy_read = mv88e6xxx_phy_read, - .phy_write = mv88e6xxx_phy_write, - .set_eee = mv88e6xxx_set_eee, - .get_eee = mv88e6xxx_get_eee, - .get_strings = mv88e6xxx_get_strings, - .get_ethtool_stats = mv88e6xxx_get_ethtool_stats, - .get_sset_count = mv88e6xxx_get_sset_count, - .adjust_link = mv88e6xxx_adjust_link, -#ifdef CONFIG_NET_DSA_HWMON - .get_temp = mv88e6xxx_get_temp, - .get_temp_limit = mv88e6xxx_get_temp_limit, - .set_temp_limit = mv88e6xxx_set_temp_limit, - .get_temp_alarm = mv88e6xxx_get_temp_alarm, -#endif - .get_eeprom = mv88e6xxx_get_eeprom, - .set_eeprom = mv88e6xxx_set_eeprom, - .get_regs_len = mv88e6xxx_get_regs_len, - .get_regs = mv88e6xxx_get_regs, - .port_bridge_join = mv88e6xxx_port_bridge_join, - .port_bridge_leave = mv88e6xxx_port_bridge_leave, - .port_stp_state_set = mv88e6xxx_port_stp_state_set, - .port_vlan_filtering = mv88e6xxx_port_vlan_filtering, - .port_vlan_prepare = mv88e6xxx_port_vlan_prepare, - .port_vlan_add = mv88e6xxx_port_vlan_add, - .port_vlan_del = mv88e6xxx_port_vlan_del, - .port_vlan_dump = mv88e6xxx_port_vlan_dump, - .port_fdb_prepare = mv88e6xxx_port_fdb_prepare, - .port_fdb_add = mv88e6xxx_port_fdb_add, - .port_fdb_del = mv88e6xxx_port_fdb_del, - .port_fdb_dump = mv88e6xxx_port_fdb_dump, -}; - -MODULE_ALIAS("platform:mv88e6123"); -MODULE_ALIAS("platform:mv88e6161"); -MODULE_ALIAS("platform:mv88e6165"); diff --git a/drivers/net/dsa/mv88e6131.c b/drivers/net/dsa/mv88e6131.c deleted file mode 100644 index bbad199b50aa..000000000000 --- a/drivers/net/dsa/mv88e6131.c +++ /dev/null @@ -1,101 +0,0 @@ -/* - * net/dsa/mv88e6131.c - Marvell 88e6095/6095f/6131 switch chip support - * Copyright (c) 2008-2009 Marvell Semiconductor - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ - -#include -#include -#include -#include -#include -#include -#include -#include "mv88e6xxx.h" - -static const struct mv88e6xxx_info mv88e6131_table[] = { - { - .prod_num = PORT_SWITCH_ID_PROD_NUM_6095, - .family = MV88E6XXX_FAMILY_6095, - .name = "Marvell 88E6095/88E6095F", - .num_databases = 256, - .num_ports = 11, - .flags = MV88E6XXX_FLAGS_FAMILY_6095, - }, { - .prod_num = PORT_SWITCH_ID_PROD_NUM_6085, - .family = MV88E6XXX_FAMILY_6097, - .name = "Marvell 88E6085", - .num_databases = 4096, - .num_ports = 10, - .flags = MV88E6XXX_FLAGS_FAMILY_6097, - }, { - .prod_num = PORT_SWITCH_ID_PROD_NUM_6131, - .family = MV88E6XXX_FAMILY_6185, - .name = "Marvell 88E6131", - .num_databases = 256, - .num_ports = 8, - .flags = MV88E6XXX_FLAGS_FAMILY_6185, - }, { - .prod_num = PORT_SWITCH_ID_PROD_NUM_6185, - .family = MV88E6XXX_FAMILY_6185, - .name = "Marvell 88E6185", - .num_databases = 256, - .num_ports = 10, - .flags = MV88E6XXX_FLAGS_FAMILY_6185, - } -}; - -static const char *mv88e6131_drv_probe(struct device *dsa_dev, - struct device *host_dev, int sw_addr, - void **priv) -{ - return mv88e6xxx_drv_probe(dsa_dev, host_dev, sw_addr, priv, - mv88e6131_table, - ARRAY_SIZE(mv88e6131_table)); -} - -struct dsa_switch_driver mv88e6131_switch_driver = { - .tag_protocol = DSA_TAG_PROTO_EDSA, - .probe = mv88e6131_drv_probe, - .setup = mv88e6xxx_setup, - .set_addr = mv88e6xxx_set_addr, - .phy_read = mv88e6xxx_phy_read, - .phy_write = mv88e6xxx_phy_write, - .set_eee = mv88e6xxx_set_eee, - .get_eee = mv88e6xxx_get_eee, - .get_strings = mv88e6xxx_get_strings, - .get_ethtool_stats = mv88e6xxx_get_ethtool_stats, - .get_sset_count = mv88e6xxx_get_sset_count, - .get_eeprom = mv88e6xxx_get_eeprom, - .set_eeprom = mv88e6xxx_set_eeprom, - .get_regs_len = mv88e6xxx_get_regs_len, - .get_regs = mv88e6xxx_get_regs, -#ifdef CONFIG_NET_DSA_HWMON - .get_temp = mv88e6xxx_get_temp, - .get_temp_limit = mv88e6xxx_get_temp_limit, - .set_temp_limit = mv88e6xxx_set_temp_limit, - .get_temp_alarm = mv88e6xxx_get_temp_alarm, -#endif - .adjust_link = mv88e6xxx_adjust_link, - .port_bridge_join = mv88e6xxx_port_bridge_join, - .port_bridge_leave = mv88e6xxx_port_bridge_leave, - .port_stp_state_set = mv88e6xxx_port_stp_state_set, - .port_vlan_filtering = mv88e6xxx_port_vlan_filtering, - .port_vlan_prepare = mv88e6xxx_port_vlan_prepare, - .port_vlan_add = mv88e6xxx_port_vlan_add, - .port_vlan_del = mv88e6xxx_port_vlan_del, - .port_vlan_dump = mv88e6xxx_port_vlan_dump, - .port_fdb_prepare = mv88e6xxx_port_fdb_prepare, - .port_fdb_add = mv88e6xxx_port_fdb_add, - .port_fdb_del = mv88e6xxx_port_fdb_del, - .port_fdb_dump = mv88e6xxx_port_fdb_dump, -}; - -MODULE_ALIAS("platform:mv88e6085"); -MODULE_ALIAS("platform:mv88e6095"); -MODULE_ALIAS("platform:mv88e6095f"); -MODULE_ALIAS("platform:mv88e6131"); diff --git a/drivers/net/dsa/mv88e6171.c b/drivers/net/dsa/mv88e6171.c deleted file mode 100644 index 4bf517a86acb..000000000000 --- a/drivers/net/dsa/mv88e6171.c +++ /dev/null @@ -1,101 +0,0 @@ -/* net/dsa/mv88e6171.c - Marvell 88e6171 switch chip support - * Copyright (c) 2008-2009 Marvell Semiconductor - * Copyright (c) 2014 Claudio Leite - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ - -#include -#include -#include -#include -#include -#include -#include -#include "mv88e6xxx.h" - -static const struct mv88e6xxx_info mv88e6171_table[] = { - { - .prod_num = PORT_SWITCH_ID_PROD_NUM_6171, - .family = MV88E6XXX_FAMILY_6351, - .name = "Marvell 88E6171", - .num_databases = 4096, - .num_ports = 7, - .flags = MV88E6XXX_FLAGS_FAMILY_6351, - }, { - .prod_num = PORT_SWITCH_ID_PROD_NUM_6175, - .family = MV88E6XXX_FAMILY_6351, - .name = "Marvell 88E6175", - .num_databases = 4096, - .num_ports = 7, - .flags = MV88E6XXX_FLAGS_FAMILY_6351, - }, { - .prod_num = PORT_SWITCH_ID_PROD_NUM_6350, - .family = MV88E6XXX_FAMILY_6351, - .name = "Marvell 88E6350", - .num_databases = 4096, - .num_ports = 7, - .flags = MV88E6XXX_FLAGS_FAMILY_6351, - }, { - .prod_num = PORT_SWITCH_ID_PROD_NUM_6351, - .family = MV88E6XXX_FAMILY_6351, - .name = "Marvell 88E6351", - .num_databases = 4096, - .num_ports = 7, - .flags = MV88E6XXX_FLAGS_FAMILY_6351, - } -}; - -static const char *mv88e6171_drv_probe(struct device *dsa_dev, - struct device *host_dev, int sw_addr, - void **priv) -{ - return mv88e6xxx_drv_probe(dsa_dev, host_dev, sw_addr, priv, - mv88e6171_table, - ARRAY_SIZE(mv88e6171_table)); -} - -struct dsa_switch_driver mv88e6171_switch_driver = { - .tag_protocol = DSA_TAG_PROTO_EDSA, - .probe = mv88e6171_drv_probe, - .setup = mv88e6xxx_setup, - .set_addr = mv88e6xxx_set_addr, - .phy_read = mv88e6xxx_phy_read, - .phy_write = mv88e6xxx_phy_write, - .set_eee = mv88e6xxx_set_eee, - .get_eee = mv88e6xxx_get_eee, - .get_strings = mv88e6xxx_get_strings, - .get_ethtool_stats = mv88e6xxx_get_ethtool_stats, - .get_sset_count = mv88e6xxx_get_sset_count, - .adjust_link = mv88e6xxx_adjust_link, -#ifdef CONFIG_NET_DSA_HWMON - .get_temp = mv88e6xxx_get_temp, - .get_temp_limit = mv88e6xxx_get_temp_limit, - .set_temp_limit = mv88e6xxx_set_temp_limit, - .get_temp_alarm = mv88e6xxx_get_temp_alarm, -#endif - .get_eeprom = mv88e6xxx_get_eeprom, - .set_eeprom = mv88e6xxx_set_eeprom, - .get_regs_len = mv88e6xxx_get_regs_len, - .get_regs = mv88e6xxx_get_regs, - .port_bridge_join = mv88e6xxx_port_bridge_join, - .port_bridge_leave = mv88e6xxx_port_bridge_leave, - .port_stp_state_set = mv88e6xxx_port_stp_state_set, - .port_vlan_filtering = mv88e6xxx_port_vlan_filtering, - .port_vlan_prepare = mv88e6xxx_port_vlan_prepare, - .port_vlan_add = mv88e6xxx_port_vlan_add, - .port_vlan_del = mv88e6xxx_port_vlan_del, - .port_vlan_dump = mv88e6xxx_port_vlan_dump, - .port_fdb_prepare = mv88e6xxx_port_fdb_prepare, - .port_fdb_add = mv88e6xxx_port_fdb_add, - .port_fdb_del = mv88e6xxx_port_fdb_del, - .port_fdb_dump = mv88e6xxx_port_fdb_dump, -}; - -MODULE_ALIAS("platform:mv88e6171"); -MODULE_ALIAS("platform:mv88e6175"); -MODULE_ALIAS("platform:mv88e6350"); -MODULE_ALIAS("platform:mv88e6351"); diff --git a/drivers/net/dsa/mv88e6352.c b/drivers/net/dsa/mv88e6352.c deleted file mode 100644 index d65a90dca0b4..000000000000 --- a/drivers/net/dsa/mv88e6352.c +++ /dev/null @@ -1,121 +0,0 @@ -/* - * net/dsa/mv88e6352.c - Marvell 88e6352 switch chip support - * - * Copyright (c) 2014 Guenter Roeck - * - * Derived from mv88e6123_61_65.c - * Copyright (c) 2008-2009 Marvell Semiconductor - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include "mv88e6xxx.h" - -static const struct mv88e6xxx_info mv88e6352_table[] = { - { - .prod_num = PORT_SWITCH_ID_PROD_NUM_6320, - .family = MV88E6XXX_FAMILY_6320, - .name = "Marvell 88E6320", - .num_databases = 4096, - .num_ports = 7, - .flags = MV88E6XXX_FLAGS_FAMILY_6320, - }, { - .prod_num = PORT_SWITCH_ID_PROD_NUM_6321, - .family = MV88E6XXX_FAMILY_6320, - .name = "Marvell 88E6321", - .num_databases = 4096, - .num_ports = 7, - .flags = MV88E6XXX_FLAGS_FAMILY_6320, - }, { - .prod_num = PORT_SWITCH_ID_PROD_NUM_6172, - .family = MV88E6XXX_FAMILY_6352, - .name = "Marvell 88E6172", - .num_databases = 4096, - .num_ports = 7, - .flags = MV88E6XXX_FLAGS_FAMILY_6352, - }, { - .prod_num = PORT_SWITCH_ID_PROD_NUM_6176, - .family = MV88E6XXX_FAMILY_6352, - .name = "Marvell 88E6176", - .num_databases = 4096, - .num_ports = 7, - .flags = MV88E6XXX_FLAGS_FAMILY_6352, - }, { - .prod_num = PORT_SWITCH_ID_PROD_NUM_6240, - .family = MV88E6XXX_FAMILY_6352, - .name = "Marvell 88E6240", - .num_databases = 4096, - .num_ports = 7, - .flags = MV88E6XXX_FLAGS_FAMILY_6352, - }, { - .prod_num = PORT_SWITCH_ID_PROD_NUM_6352, - .family = MV88E6XXX_FAMILY_6352, - .name = "Marvell 88E6352", - .num_databases = 4096, - .num_ports = 7, - .flags = MV88E6XXX_FLAGS_FAMILY_6352, - } -}; - -static const char *mv88e6352_drv_probe(struct device *dsa_dev, - struct device *host_dev, int sw_addr, - void **priv) -{ - return mv88e6xxx_drv_probe(dsa_dev, host_dev, sw_addr, priv, - mv88e6352_table, - ARRAY_SIZE(mv88e6352_table)); -} - -struct dsa_switch_driver mv88e6352_switch_driver = { - .tag_protocol = DSA_TAG_PROTO_EDSA, - .probe = mv88e6352_drv_probe, - .setup = mv88e6xxx_setup, - .set_addr = mv88e6xxx_set_addr, - .phy_read = mv88e6xxx_phy_read, - .phy_write = mv88e6xxx_phy_write, - .get_strings = mv88e6xxx_get_strings, - .get_ethtool_stats = mv88e6xxx_get_ethtool_stats, - .get_sset_count = mv88e6xxx_get_sset_count, - .adjust_link = mv88e6xxx_adjust_link, - .set_eee = mv88e6xxx_set_eee, - .get_eee = mv88e6xxx_get_eee, -#ifdef CONFIG_NET_DSA_HWMON - .get_temp = mv88e6xxx_get_temp, - .get_temp_limit = mv88e6xxx_get_temp_limit, - .set_temp_limit = mv88e6xxx_set_temp_limit, - .get_temp_alarm = mv88e6xxx_get_temp_alarm, -#endif - .get_eeprom = mv88e6xxx_get_eeprom, - .set_eeprom = mv88e6xxx_set_eeprom, - .get_regs_len = mv88e6xxx_get_regs_len, - .get_regs = mv88e6xxx_get_regs, - .port_bridge_join = mv88e6xxx_port_bridge_join, - .port_bridge_leave = mv88e6xxx_port_bridge_leave, - .port_stp_state_set = mv88e6xxx_port_stp_state_set, - .port_vlan_filtering = mv88e6xxx_port_vlan_filtering, - .port_vlan_prepare = mv88e6xxx_port_vlan_prepare, - .port_vlan_add = mv88e6xxx_port_vlan_add, - .port_vlan_del = mv88e6xxx_port_vlan_del, - .port_vlan_dump = mv88e6xxx_port_vlan_dump, - .port_fdb_prepare = mv88e6xxx_port_fdb_prepare, - .port_fdb_add = mv88e6xxx_port_fdb_add, - .port_fdb_del = mv88e6xxx_port_fdb_del, - .port_fdb_dump = mv88e6xxx_port_fdb_dump, -}; - -MODULE_ALIAS("platform:mv88e6172"); -MODULE_ALIAS("platform:mv88e6176"); -MODULE_ALIAS("platform:mv88e6320"); -MODULE_ALIAS("platform:mv88e6321"); -MODULE_ALIAS("platform:mv88e6352"); diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index 7ea30502d221..1e5ca8e0f48e 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c @@ -467,8 +467,8 @@ static bool mv88e6xxx_has_stu(struct mv88e6xxx_priv_state *ps) * phy. However, in the case of a fixed link phy, we force the port * settings from the fixed link settings. */ -void mv88e6xxx_adjust_link(struct dsa_switch *ds, int port, - struct phy_device *phydev) +static void mv88e6xxx_adjust_link(struct dsa_switch *ds, int port, + struct phy_device *phydev) { struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); u32 reg; @@ -714,7 +714,8 @@ static uint64_t _mv88e6xxx_get_ethtool_stat(struct mv88e6xxx_priv_state *ps, return value; } -void mv88e6xxx_get_strings(struct dsa_switch *ds, int port, uint8_t *data) +static void mv88e6xxx_get_strings(struct dsa_switch *ds, int port, + uint8_t *data) { struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); struct mv88e6xxx_hw_stat *stat; @@ -730,7 +731,7 @@ void mv88e6xxx_get_strings(struct dsa_switch *ds, int port, uint8_t *data) } } -int mv88e6xxx_get_sset_count(struct dsa_switch *ds) +static int mv88e6xxx_get_sset_count(struct dsa_switch *ds) { struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); struct mv88e6xxx_hw_stat *stat; @@ -744,9 +745,8 @@ int mv88e6xxx_get_sset_count(struct dsa_switch *ds) return j; } -void -mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds, - int port, uint64_t *data) +static void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds, int port, + uint64_t *data) { struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); struct mv88e6xxx_hw_stat *stat; @@ -771,13 +771,13 @@ mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds, mutex_unlock(&ps->smi_mutex); } -int mv88e6xxx_get_regs_len(struct dsa_switch *ds, int port) +static int mv88e6xxx_get_regs_len(struct dsa_switch *ds, int port) { return 32 * sizeof(u16); } -void mv88e6xxx_get_regs(struct dsa_switch *ds, int port, - struct ethtool_regs *regs, void *_p) +static void mv88e6xxx_get_regs(struct dsa_switch *ds, int port, + struct ethtool_regs *regs, void *_p) { struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); u16 *p = _p; @@ -876,8 +876,8 @@ error: return ret; } -int mv88e6xxx_get_eeprom(struct dsa_switch *ds, struct ethtool_eeprom *eeprom, - u8 *data) +static int mv88e6xxx_get_eeprom(struct dsa_switch *ds, + struct ethtool_eeprom *eeprom, u8 *data) { struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); int offset; @@ -982,8 +982,8 @@ error: return ret; } -int mv88e6xxx_set_eeprom(struct dsa_switch *ds, struct ethtool_eeprom *eeprom, - u8 *data) +static int mv88e6xxx_set_eeprom(struct dsa_switch *ds, + struct ethtool_eeprom *eeprom, u8 *data) { struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); int offset; @@ -1104,7 +1104,8 @@ static int _mv88e6xxx_phy_write_indirect(struct mv88e6xxx_priv_state *ps, return _mv88e6xxx_phy_wait(ps); } -int mv88e6xxx_get_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e) +static int mv88e6xxx_get_eee(struct dsa_switch *ds, int port, + struct ethtool_eee *e) { struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); int reg; @@ -1133,8 +1134,8 @@ out: return reg; } -int mv88e6xxx_set_eee(struct dsa_switch *ds, int port, - struct phy_device *phydev, struct ethtool_eee *e) +static int mv88e6xxx_set_eee(struct dsa_switch *ds, int port, + struct phy_device *phydev, struct ethtool_eee *e) { struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); int reg; @@ -1364,7 +1365,8 @@ static int _mv88e6xxx_port_based_vlan_map(struct mv88e6xxx_priv_state *ps, return _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_BASE_VLAN, reg); } -void mv88e6xxx_port_stp_state_set(struct dsa_switch *ds, int port, u8 state) +static void mv88e6xxx_port_stp_state_set(struct dsa_switch *ds, int port, + u8 state) { struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); int stp_state; @@ -1587,9 +1589,9 @@ static int _mv88e6xxx_vtu_getnext(struct mv88e6xxx_priv_state *ps, return 0; } -int mv88e6xxx_port_vlan_dump(struct dsa_switch *ds, int port, - struct switchdev_obj_port_vlan *vlan, - int (*cb)(struct switchdev_obj *obj)) +static int mv88e6xxx_port_vlan_dump(struct dsa_switch *ds, int port, + struct switchdev_obj_port_vlan *vlan, + int (*cb)(struct switchdev_obj *obj)) { struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); struct mv88e6xxx_vtu_stu_entry next; @@ -2014,8 +2016,8 @@ static const char * const mv88e6xxx_port_8021q_mode_names[] = { [PORT_CONTROL_2_8021Q_SECURE] = "Secure", }; -int mv88e6xxx_port_vlan_filtering(struct dsa_switch *ds, int port, - bool vlan_filtering) +static int mv88e6xxx_port_vlan_filtering(struct dsa_switch *ds, int port, + bool vlan_filtering) { struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); u16 old, new = vlan_filtering ? PORT_CONTROL_2_8021Q_SECURE : @@ -2054,9 +2056,9 @@ unlock: return ret; } -int mv88e6xxx_port_vlan_prepare(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_vlan *vlan, - struct switchdev_trans *trans) +static int mv88e6xxx_port_vlan_prepare(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan, + struct switchdev_trans *trans) { struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); int err; @@ -2095,9 +2097,9 @@ static int _mv88e6xxx_port_vlan_add(struct mv88e6xxx_priv_state *ps, int port, return _mv88e6xxx_vtu_loadpurge(ps, &vlan); } -void mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_vlan *vlan, - struct switchdev_trans *trans) +static void mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan, + struct switchdev_trans *trans) { struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; @@ -2157,8 +2159,8 @@ static int _mv88e6xxx_port_vlan_del(struct mv88e6xxx_priv_state *ps, return _mv88e6xxx_atu_remove(ps, vlan.fid, port, false); } -int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_vlan *vlan) +static int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan) { struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); u16 pvid, vid; @@ -2271,9 +2273,9 @@ static int _mv88e6xxx_port_fdb_load(struct mv88e6xxx_priv_state *ps, int port, return _mv88e6xxx_atu_load(ps, &entry); } -int mv88e6xxx_port_fdb_prepare(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_fdb *fdb, - struct switchdev_trans *trans) +static int mv88e6xxx_port_fdb_prepare(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_fdb *fdb, + struct switchdev_trans *trans) { struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); @@ -2286,9 +2288,9 @@ int mv88e6xxx_port_fdb_prepare(struct dsa_switch *ds, int port, return 0; } -void mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_fdb *fdb, - struct switchdev_trans *trans) +static void mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_fdb *fdb, + struct switchdev_trans *trans) { int state = is_multicast_ether_addr(fdb->addr) ? GLOBAL_ATU_DATA_STATE_MC_STATIC : @@ -2304,8 +2306,8 @@ void mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port, mutex_unlock(&ps->smi_mutex); } -int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_fdb *fdb) +static int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_fdb *fdb) { struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); int ret; @@ -2407,9 +2409,9 @@ static int _mv88e6xxx_port_fdb_dump_one(struct mv88e6xxx_priv_state *ps, return err; } -int mv88e6xxx_port_fdb_dump(struct dsa_switch *ds, int port, - struct switchdev_obj_port_fdb *fdb, - int (*cb)(struct switchdev_obj *obj)) +static int mv88e6xxx_port_fdb_dump(struct dsa_switch *ds, int port, + struct switchdev_obj_port_fdb *fdb, + int (*cb)(struct switchdev_obj *obj)) { struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); struct mv88e6xxx_vtu_stu_entry vlan = { @@ -2457,8 +2459,8 @@ unlock: return err; } -int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port, - struct net_device *bridge) +static int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port, + struct net_device *bridge) { struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); int i, err = 0; @@ -2484,7 +2486,7 @@ int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port, return err; } -void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port) +static void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port) { struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); struct net_device *bridge = ps->ports[port].bridge_dev; @@ -3108,7 +3110,7 @@ static int mv88e6xxx_setup_global(struct mv88e6xxx_priv_state *ps) return err; } -int mv88e6xxx_setup(struct dsa_switch *ds) +static int mv88e6xxx_setup(struct dsa_switch *ds) { struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); int err; @@ -3181,8 +3183,7 @@ static int mv88e6xxx_port_to_phy_addr(struct mv88e6xxx_priv_state *ps, return -EINVAL; } -int -mv88e6xxx_phy_read(struct dsa_switch *ds, int port, int regnum) +static int mv88e6xxx_phy_read(struct dsa_switch *ds, int port, int regnum) { struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); int addr = mv88e6xxx_port_to_phy_addr(ps, port); @@ -3204,8 +3205,8 @@ mv88e6xxx_phy_read(struct dsa_switch *ds, int port, int regnum) return ret; } -int -mv88e6xxx_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val) +static int mv88e6xxx_phy_write(struct dsa_switch *ds, int port, int regnum, + u16 val) { struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); int addr = mv88e6xxx_port_to_phy_addr(ps, port); @@ -3291,7 +3292,7 @@ static int mv88e63xx_get_temp(struct dsa_switch *ds, int *temp) return 0; } -int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp) +static int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp) { struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); @@ -3304,7 +3305,7 @@ int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp) return mv88e61xx_get_temp(ds, temp); } -int mv88e6xxx_get_temp_limit(struct dsa_switch *ds, int *temp) +static int mv88e6xxx_get_temp_limit(struct dsa_switch *ds, int *temp) { struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); int phy = mv88e6xxx_6320_family(ps) ? 3 : 0; @@ -3324,7 +3325,7 @@ int mv88e6xxx_get_temp_limit(struct dsa_switch *ds, int *temp) return 0; } -int mv88e6xxx_set_temp_limit(struct dsa_switch *ds, int temp) +static int mv88e6xxx_set_temp_limit(struct dsa_switch *ds, int temp) { struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); int phy = mv88e6xxx_6320_family(ps) ? 3 : 0; @@ -3341,7 +3342,7 @@ int mv88e6xxx_set_temp_limit(struct dsa_switch *ds, int temp) (ret & 0xe0ff) | (temp << 8)); } -int mv88e6xxx_get_temp_alarm(struct dsa_switch *ds, bool *alarm) +static int mv88e6xxx_get_temp_alarm(struct dsa_switch *ds, bool *alarm) { struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); int phy = mv88e6xxx_6320_family(ps) ? 3 : 0; @@ -3362,6 +3363,161 @@ int mv88e6xxx_get_temp_alarm(struct dsa_switch *ds, bool *alarm) } #endif /* CONFIG_NET_DSA_HWMON */ +static const struct mv88e6xxx_info mv88e6xxx_table[] = { + [MV88E6085] = { + .prod_num = PORT_SWITCH_ID_PROD_NUM_6085, + .family = MV88E6XXX_FAMILY_6097, + .name = "Marvell 88E6085", + .num_databases = 4096, + .num_ports = 10, + .flags = MV88E6XXX_FLAGS_FAMILY_6097, + }, + + [MV88E6095] = { + .prod_num = PORT_SWITCH_ID_PROD_NUM_6095, + .family = MV88E6XXX_FAMILY_6095, + .name = "Marvell 88E6095/88E6095F", + .num_databases = 256, + .num_ports = 11, + .flags = MV88E6XXX_FLAGS_FAMILY_6095, + }, + + [MV88E6123] = { + .prod_num = PORT_SWITCH_ID_PROD_NUM_6123, + .family = MV88E6XXX_FAMILY_6165, + .name = "Marvell 88E6123", + .num_databases = 4096, + .num_ports = 3, + .flags = MV88E6XXX_FLAGS_FAMILY_6165, + }, + + [MV88E6131] = { + .prod_num = PORT_SWITCH_ID_PROD_NUM_6131, + .family = MV88E6XXX_FAMILY_6185, + .name = "Marvell 88E6131", + .num_databases = 256, + .num_ports = 8, + .flags = MV88E6XXX_FLAGS_FAMILY_6185, + }, + + [MV88E6161] = { + .prod_num = PORT_SWITCH_ID_PROD_NUM_6161, + .family = MV88E6XXX_FAMILY_6165, + .name = "Marvell 88E6161", + .num_databases = 4096, + .num_ports = 6, + .flags = MV88E6XXX_FLAGS_FAMILY_6165, + }, + + [MV88E6165] = { + .prod_num = PORT_SWITCH_ID_PROD_NUM_6165, + .family = MV88E6XXX_FAMILY_6165, + .name = "Marvell 88E6165", + .num_databases = 4096, + .num_ports = 6, + .flags = MV88E6XXX_FLAGS_FAMILY_6165, + }, + + [MV88E6171] = { + .prod_num = PORT_SWITCH_ID_PROD_NUM_6171, + .family = MV88E6XXX_FAMILY_6351, + .name = "Marvell 88E6171", + .num_databases = 4096, + .num_ports = 7, + .flags = MV88E6XXX_FLAGS_FAMILY_6351, + }, + + [MV88E6172] = { + .prod_num = PORT_SWITCH_ID_PROD_NUM_6172, + .family = MV88E6XXX_FAMILY_6352, + .name = "Marvell 88E6172", + .num_databases = 4096, + .num_ports = 7, + .flags = MV88E6XXX_FLAGS_FAMILY_6352, + }, + + [MV88E6175] = { + .prod_num = PORT_SWITCH_ID_PROD_NUM_6175, + .family = MV88E6XXX_FAMILY_6351, + .name = "Marvell 88E6175", + .num_databases = 4096, + .num_ports = 7, + .flags = MV88E6XXX_FLAGS_FAMILY_6351, + }, + + [MV88E6176] = { + .prod_num = PORT_SWITCH_ID_PROD_NUM_6176, + .family = MV88E6XXX_FAMILY_6352, + .name = "Marvell 88E6176", + .num_databases = 4096, + .num_ports = 7, + .flags = MV88E6XXX_FLAGS_FAMILY_6352, + }, + + [MV88E6185] = { + .prod_num = PORT_SWITCH_ID_PROD_NUM_6185, + .family = MV88E6XXX_FAMILY_6185, + .name = "Marvell 88E6185", + .num_databases = 256, + .num_ports = 10, + .flags = MV88E6XXX_FLAGS_FAMILY_6185, + }, + + [MV88E6240] = { + .prod_num = PORT_SWITCH_ID_PROD_NUM_6240, + .family = MV88E6XXX_FAMILY_6352, + .name = "Marvell 88E6240", + .num_databases = 4096, + .num_ports = 7, + .flags = MV88E6XXX_FLAGS_FAMILY_6352, + }, + + [MV88E6320] = { + .prod_num = PORT_SWITCH_ID_PROD_NUM_6320, + .family = MV88E6XXX_FAMILY_6320, + .name = "Marvell 88E6320", + .num_databases = 4096, + .num_ports = 7, + .flags = MV88E6XXX_FLAGS_FAMILY_6320, + }, + + [MV88E6321] = { + .prod_num = PORT_SWITCH_ID_PROD_NUM_6321, + .family = MV88E6XXX_FAMILY_6320, + .name = "Marvell 88E6321", + .num_databases = 4096, + .num_ports = 7, + .flags = MV88E6XXX_FLAGS_FAMILY_6320, + }, + + [MV88E6350] = { + .prod_num = PORT_SWITCH_ID_PROD_NUM_6350, + .family = MV88E6XXX_FAMILY_6351, + .name = "Marvell 88E6350", + .num_databases = 4096, + .num_ports = 7, + .flags = MV88E6XXX_FLAGS_FAMILY_6351, + }, + + [MV88E6351] = { + .prod_num = PORT_SWITCH_ID_PROD_NUM_6351, + .family = MV88E6XXX_FAMILY_6351, + .name = "Marvell 88E6351", + .num_databases = 4096, + .num_ports = 7, + .flags = MV88E6XXX_FLAGS_FAMILY_6351, + }, + + [MV88E6352] = { + .prod_num = PORT_SWITCH_ID_PROD_NUM_6352, + .family = MV88E6XXX_FAMILY_6352, + .name = "Marvell 88E6352", + .num_databases = 4096, + .num_ports = 7, + .flags = MV88E6XXX_FLAGS_FAMILY_6352, + }, +}; + static const struct mv88e6xxx_info * mv88e6xxx_lookup_info(unsigned int prod_num, const struct mv88e6xxx_info *table, unsigned int num) @@ -3375,10 +3531,9 @@ mv88e6xxx_lookup_info(unsigned int prod_num, const struct mv88e6xxx_info *table, return NULL; } -const char *mv88e6xxx_drv_probe(struct device *dsa_dev, struct device *host_dev, - int sw_addr, void **priv, - const struct mv88e6xxx_info *table, - unsigned int num) +static const char *mv88e6xxx_probe(struct device *dsa_dev, + struct device *host_dev, int sw_addr, + void **priv) { const struct mv88e6xxx_info *info; struct mv88e6xxx_priv_state *ps; @@ -3397,7 +3552,8 @@ const char *mv88e6xxx_drv_probe(struct device *dsa_dev, struct device *host_dev, prod_num = (id & 0xfff0) >> 4; rev = id & 0x000f; - info = mv88e6xxx_lookup_info(prod_num, table, num); + info = mv88e6xxx_lookup_info(prod_num, mv88e6xxx_table, + ARRAY_SIZE(mv88e6xxx_table)); if (!info) return NULL; @@ -3419,41 +3575,73 @@ const char *mv88e6xxx_drv_probe(struct device *dsa_dev, struct device *host_dev, return name; } +struct dsa_switch_driver mv88e6xxx_switch_driver = { + .tag_protocol = DSA_TAG_PROTO_EDSA, + .probe = mv88e6xxx_probe, + .setup = mv88e6xxx_setup, + .set_addr = mv88e6xxx_set_addr, + .phy_read = mv88e6xxx_phy_read, + .phy_write = mv88e6xxx_phy_write, + .adjust_link = mv88e6xxx_adjust_link, + .get_strings = mv88e6xxx_get_strings, + .get_ethtool_stats = mv88e6xxx_get_ethtool_stats, + .get_sset_count = mv88e6xxx_get_sset_count, + .set_eee = mv88e6xxx_set_eee, + .get_eee = mv88e6xxx_get_eee, +#ifdef CONFIG_NET_DSA_HWMON + .get_temp = mv88e6xxx_get_temp, + .get_temp_limit = mv88e6xxx_get_temp_limit, + .set_temp_limit = mv88e6xxx_set_temp_limit, + .get_temp_alarm = mv88e6xxx_get_temp_alarm, +#endif + .get_eeprom = mv88e6xxx_get_eeprom, + .set_eeprom = mv88e6xxx_set_eeprom, + .get_regs_len = mv88e6xxx_get_regs_len, + .get_regs = mv88e6xxx_get_regs, + .port_bridge_join = mv88e6xxx_port_bridge_join, + .port_bridge_leave = mv88e6xxx_port_bridge_leave, + .port_stp_state_set = mv88e6xxx_port_stp_state_set, + .port_vlan_filtering = mv88e6xxx_port_vlan_filtering, + .port_vlan_prepare = mv88e6xxx_port_vlan_prepare, + .port_vlan_add = mv88e6xxx_port_vlan_add, + .port_vlan_del = mv88e6xxx_port_vlan_del, + .port_vlan_dump = mv88e6xxx_port_vlan_dump, + .port_fdb_prepare = mv88e6xxx_port_fdb_prepare, + .port_fdb_add = mv88e6xxx_port_fdb_add, + .port_fdb_del = mv88e6xxx_port_fdb_del, + .port_fdb_dump = mv88e6xxx_port_fdb_dump, +}; + static int __init mv88e6xxx_init(void) { -#if IS_ENABLED(CONFIG_NET_DSA_MV88E6131) - register_switch_driver(&mv88e6131_switch_driver); -#endif -#if IS_ENABLED(CONFIG_NET_DSA_MV88E6123) - register_switch_driver(&mv88e6123_switch_driver); -#endif -#if IS_ENABLED(CONFIG_NET_DSA_MV88E6352) - register_switch_driver(&mv88e6352_switch_driver); -#endif -#if IS_ENABLED(CONFIG_NET_DSA_MV88E6171) - register_switch_driver(&mv88e6171_switch_driver); -#endif + register_switch_driver(&mv88e6xxx_switch_driver); + return 0; } module_init(mv88e6xxx_init); static void __exit mv88e6xxx_cleanup(void) { -#if IS_ENABLED(CONFIG_NET_DSA_MV88E6171) - unregister_switch_driver(&mv88e6171_switch_driver); -#endif -#if IS_ENABLED(CONFIG_NET_DSA_MV88E6352) - unregister_switch_driver(&mv88e6352_switch_driver); -#endif -#if IS_ENABLED(CONFIG_NET_DSA_MV88E6123) - unregister_switch_driver(&mv88e6123_switch_driver); -#endif -#if IS_ENABLED(CONFIG_NET_DSA_MV88E6131) - unregister_switch_driver(&mv88e6131_switch_driver); -#endif + unregister_switch_driver(&mv88e6xxx_switch_driver); } module_exit(mv88e6xxx_cleanup); +MODULE_ALIAS("platform:mv88e6085"); +MODULE_ALIAS("platform:mv88e6095"); +MODULE_ALIAS("platform:mv88e6095f"); +MODULE_ALIAS("platform:mv88e6123"); +MODULE_ALIAS("platform:mv88e6131"); +MODULE_ALIAS("platform:mv88e6161"); +MODULE_ALIAS("platform:mv88e6165"); +MODULE_ALIAS("platform:mv88e6171"); +MODULE_ALIAS("platform:mv88e6172"); +MODULE_ALIAS("platform:mv88e6175"); +MODULE_ALIAS("platform:mv88e6176"); +MODULE_ALIAS("platform:mv88e6320"); +MODULE_ALIAS("platform:mv88e6321"); +MODULE_ALIAS("platform:mv88e6350"); +MODULE_ALIAS("platform:mv88e6351"); +MODULE_ALIAS("platform:mv88e6352"); MODULE_AUTHOR("Lennert Buytenhek "); MODULE_DESCRIPTION("Driver for Marvell 88E6XXX ethernet switch chips"); MODULE_LICENSE("GPL"); diff --git a/drivers/net/dsa/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx.h index a131827cb26d..ca69a93a42a0 100644 --- a/drivers/net/dsa/mv88e6xxx.h +++ b/drivers/net/dsa/mv88e6xxx.h @@ -338,6 +338,27 @@ #define MV88E6XXX_N_FID 4096 +/* List of supported models */ +enum mv88e6xxx_model { + MV88E6085, + MV88E6095, + MV88E6123, + MV88E6131, + MV88E6161, + MV88E6165, + MV88E6171, + MV88E6172, + MV88E6175, + MV88E6176, + MV88E6185, + MV88E6240, + MV88E6320, + MV88E6321, + MV88E6350, + MV88E6351, + MV88E6352, +}; + enum mv88e6xxx_family { MV88E6XXX_FAMILY_NONE, MV88E6XXX_FAMILY_6065, /* 6031 6035 6061 6065 */ @@ -583,74 +604,4 @@ static inline bool mv88e6xxx_has(struct mv88e6xxx_priv_state *ps, return (ps->info->flags & flags) == flags; } -const char *mv88e6xxx_drv_probe(struct device *dsa_dev, struct device *host_dev, - int sw_addr, void **priv, - const struct mv88e6xxx_info *table, - unsigned int num); - -int mv88e6xxx_setup(struct dsa_switch *ds); -int mv88e6xxx_reg_read(struct mv88e6xxx_priv_state *ps, int addr, int reg); -int mv88e6xxx_reg_write(struct mv88e6xxx_priv_state *ps, int addr, - int reg, u16 val); -int mv88e6xxx_set_addr(struct dsa_switch *ds, u8 *addr); -int mv88e6xxx_phy_read(struct dsa_switch *ds, int port, int regnum); -int mv88e6xxx_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val); -void mv88e6xxx_get_strings(struct dsa_switch *ds, int port, uint8_t *data); -void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds, int port, - uint64_t *data); -int mv88e6xxx_get_sset_count(struct dsa_switch *ds); -int mv88e6xxx_get_sset_count_basic(struct dsa_switch *ds); -void mv88e6xxx_adjust_link(struct dsa_switch *ds, int port, - struct phy_device *phydev); -int mv88e6xxx_get_regs_len(struct dsa_switch *ds, int port); -void mv88e6xxx_get_regs(struct dsa_switch *ds, int port, - struct ethtool_regs *regs, void *_p); -int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp); -int mv88e6xxx_get_temp_limit(struct dsa_switch *ds, int *temp); -int mv88e6xxx_set_temp_limit(struct dsa_switch *ds, int temp); -int mv88e6xxx_get_temp_alarm(struct dsa_switch *ds, bool *alarm); -int mv88e6xxx_get_eeprom(struct dsa_switch *ds, struct ethtool_eeprom *eeprom, - u8 *data); -int mv88e6xxx_set_eeprom(struct dsa_switch *ds, struct ethtool_eeprom *eeprom, - u8 *data); -int mv88e6xxx_get_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e); -int mv88e6xxx_set_eee(struct dsa_switch *ds, int port, - struct phy_device *phydev, struct ethtool_eee *e); -int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port, - struct net_device *bridge); -void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port); -void mv88e6xxx_port_stp_state_set(struct dsa_switch *ds, int port, u8 state); -int mv88e6xxx_port_vlan_filtering(struct dsa_switch *ds, int port, - bool vlan_filtering); -int mv88e6xxx_port_vlan_prepare(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_vlan *vlan, - struct switchdev_trans *trans); -void mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_vlan *vlan, - struct switchdev_trans *trans); -int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_vlan *vlan); -int mv88e6xxx_port_vlan_dump(struct dsa_switch *ds, int port, - struct switchdev_obj_port_vlan *vlan, - int (*cb)(struct switchdev_obj *obj)); -int mv88e6xxx_port_fdb_prepare(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_fdb *fdb, - struct switchdev_trans *trans); -void mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_fdb *fdb, - struct switchdev_trans *trans); -int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_fdb *fdb); -int mv88e6xxx_port_fdb_dump(struct dsa_switch *ds, int port, - struct switchdev_obj_port_fdb *fdb, - int (*cb)(struct switchdev_obj *obj)); -int mv88e6xxx_phy_page_read(struct dsa_switch *ds, int port, int page, int reg); -int mv88e6xxx_phy_page_write(struct dsa_switch *ds, int port, int page, - int reg, int val); - -extern struct dsa_switch_driver mv88e6131_switch_driver; -extern struct dsa_switch_driver mv88e6123_switch_driver; -extern struct dsa_switch_driver mv88e6352_switch_driver; -extern struct dsa_switch_driver mv88e6171_switch_driver; - #endif From 79f09fa79cefdd9df40c9c590cc8dda544ebff26 Mon Sep 17 00:00:00 2001 From: Michael Thalmeier Date: Thu, 21 Apr 2016 16:43:49 +0200 Subject: [PATCH 1444/1649] NFC: pn533: i2c: free irq on driver remove The requested irq needs to be freed when removing the driver, otherwise a following driver load fails to request the irq. Signed-off-by: Michael Thalmeier Signed-off-by: Samuel Ortiz --- drivers/nfc/pn533/i2c.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/nfc/pn533/i2c.c b/drivers/nfc/pn533/i2c.c index 9679aa52c381..1a622e1c0a73 100644 --- a/drivers/nfc/pn533/i2c.c +++ b/drivers/nfc/pn533/i2c.c @@ -236,6 +236,8 @@ static int pn533_i2c_remove(struct i2c_client *client) pn533_unregister_device(phy->priv); + free_irq(client->irq, phy); + return 0; } From b16931b13c67f06ca74cc8d02797de480dea348b Mon Sep 17 00:00:00 2001 From: Michael Thalmeier Date: Thu, 21 Apr 2016 16:43:50 +0200 Subject: [PATCH 1445/1649] NFC: pn533: fix order of initialization Correctly call nfc_set_parent_dev before nfc_register_device. Otherwise the driver will OOPS when being removed. Signed-off-by: Michael Thalmeier Signed-off-by: Samuel Ortiz --- drivers/nfc/pn533/i2c.c | 3 ++- drivers/nfc/pn533/pn533.c | 4 +++- drivers/nfc/pn533/pn533.h | 3 ++- drivers/nfc/pn533/usb.c | 3 +-- 4 files changed, 8 insertions(+), 5 deletions(-) diff --git a/drivers/nfc/pn533/i2c.c b/drivers/nfc/pn533/i2c.c index 1a622e1c0a73..0141f19ac5a7 100644 --- a/drivers/nfc/pn533/i2c.c +++ b/drivers/nfc/pn533/i2c.c @@ -211,7 +211,8 @@ static int pn533_i2c_probe(struct i2c_client *client, PN533_NO_TYPE_B_PROTOCOLS, PN533_PROTO_REQ_ACK_RESP, phy, &i2c_phy_ops, NULL, - &phy->i2c_dev->dev); + &phy->i2c_dev->dev, + &client->dev); if (IS_ERR(priv)) { r = PTR_ERR(priv); diff --git a/drivers/nfc/pn533/pn533.c b/drivers/nfc/pn533/pn533.c index ee9e8f1195fa..d82eecd8daad 100644 --- a/drivers/nfc/pn533/pn533.c +++ b/drivers/nfc/pn533/pn533.c @@ -2554,7 +2554,8 @@ struct pn533 *pn533_register_device(u32 device_type, void *phy, struct pn533_phy_ops *phy_ops, struct pn533_frame_ops *fops, - struct device *dev) + struct device *dev, + struct device *parent) { struct pn533_fw_version fw_ver; struct pn533 *priv; @@ -2617,6 +2618,7 @@ struct pn533 *pn533_register_device(u32 device_type, goto destroy_wq; } + nfc_set_parent_dev(priv->nfc_dev, parent); nfc_set_drvdata(priv->nfc_dev, priv); rc = nfc_register_device(priv->nfc_dev); diff --git a/drivers/nfc/pn533/pn533.h b/drivers/nfc/pn533/pn533.h index ba604f6d93f9..553c7d171fd1 100644 --- a/drivers/nfc/pn533/pn533.h +++ b/drivers/nfc/pn533/pn533.h @@ -228,7 +228,8 @@ struct pn533 *pn533_register_device(u32 device_type, void *phy, struct pn533_phy_ops *phy_ops, struct pn533_frame_ops *fops, - struct device *dev); + struct device *dev, + struct device *parent); void pn533_unregister_device(struct pn533 *priv); void pn533_recv_frame(struct pn533 *dev, struct sk_buff *skb, int status); diff --git a/drivers/nfc/pn533/usb.c b/drivers/nfc/pn533/usb.c index 4f73cbf8ccef..8ca060324b6a 100644 --- a/drivers/nfc/pn533/usb.c +++ b/drivers/nfc/pn533/usb.c @@ -536,7 +536,7 @@ static int pn533_usb_probe(struct usb_interface *interface, priv = pn533_register_device(id->driver_info, protocols, protocol_type, phy, &usb_phy_ops, fops, - &phy->udev->dev); + &phy->udev->dev, &interface->dev); if (IS_ERR(priv)) { rc = PTR_ERR(priv); @@ -544,7 +544,6 @@ static int pn533_usb_probe(struct usb_interface *interface, } phy->priv = priv; - nfc_set_parent_dev(priv->nfc_dev, &interface->dev); usb_set_intfdata(interface, phy); From 30f98489f54e027001cc9b27c59840975871de65 Mon Sep 17 00:00:00 2001 From: Michael Thalmeier Date: Thu, 21 Apr 2016 16:43:51 +0200 Subject: [PATCH 1446/1649] NFC: pn533: i2c: do not call pn533_recv_frame with aborted commands When a command gets aborted the pn533 core does not need any RX frames that may be received until a new frame is sent. Signed-off-by: Michael Thalmeier Signed-off-by: Samuel Ortiz --- drivers/nfc/pn533/i2c.c | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/drivers/nfc/pn533/i2c.c b/drivers/nfc/pn533/i2c.c index 0141f19ac5a7..1dc89248e58e 100644 --- a/drivers/nfc/pn533/i2c.c +++ b/drivers/nfc/pn533/i2c.c @@ -39,6 +39,8 @@ struct pn533_i2c_phy { struct i2c_client *i2c_dev; struct pn533 *priv; + bool aborted; + int hard_fault; /* * < 0 if hardware error occurred (e.g. i2c err) * and prevents normal operation. @@ -71,6 +73,8 @@ static int pn533_i2c_send_frame(struct pn533 *dev, if (phy->priv == NULL) phy->priv = dev; + phy->aborted = false; + print_hex_dump_debug("PN533_i2c TX: ", DUMP_PREFIX_NONE, 16, 1, out->data, out->len, false); @@ -93,13 +97,15 @@ static int pn533_i2c_send_frame(struct pn533 *dev, static void pn533_i2c_abort_cmd(struct pn533 *dev, gfp_t flags) { + struct pn533_i2c_phy *phy = dev->phy; + + phy->aborted = true; + /* An ack will cancel the last issued command */ pn533_i2c_send_ack(dev, flags); /* schedule cmd_complete_work to finish current command execution */ - if (dev->cmd != NULL) - dev->cmd->status = -ENOENT; - queue_work(dev->wq, &dev->cmd_complete_work); + pn533_recv_frame(phy->priv, NULL, -ENOENT); } static int pn533_i2c_read(struct pn533_i2c_phy *phy, struct sk_buff **skb) @@ -164,7 +170,8 @@ static irqreturn_t pn533_i2c_irq_thread_fn(int irq, void *data) return IRQ_HANDLED; } - pn533_recv_frame(phy->priv, skb, 0); + if (!phy->aborted) + pn533_recv_frame(phy->priv, skb, 0); return IRQ_HANDLED; } From c952f915ce8567120ffdf12998ad0c945fbc93ac Mon Sep 17 00:00:00 2001 From: Michael Thalmeier Date: Thu, 21 Apr 2016 16:43:52 +0200 Subject: [PATCH 1447/1649] NFC: pn533: reset poll modulation list before calling targets_found We need to reset the poll modulation list before calling nfc_targets_found because otherwise userspace could run before the modulation list is cleared and then get a "Cannot activate target while polling" error upon calling activate_target. Signed-off-by: Michael Thalmeier Signed-off-by: Samuel Ortiz --- drivers/nfc/pn533/pn533.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/nfc/pn533/pn533.c b/drivers/nfc/pn533/pn533.c index d82eecd8daad..745181ea693b 100644 --- a/drivers/nfc/pn533/pn533.c +++ b/drivers/nfc/pn533/pn533.c @@ -865,6 +865,7 @@ static int pn533_target_found_type_b(struct nfc_target *nfc_tgt, u8 *tgt_data, return 0; } +static void pn533_poll_reset_mod_list(struct pn533 *dev); static int pn533_target_found(struct pn533 *dev, u8 tg, u8 *tgdata, int tgdata_len) { @@ -914,6 +915,7 @@ static int pn533_target_found(struct pn533 *dev, u8 tg, u8 *tgdata, dev->tgt_available_prots = nfc_tgt.supported_protocols; + pn533_poll_reset_mod_list(dev); nfc_targets_found(dev->nfc_dev, &nfc_tgt, 1); return 0; @@ -980,10 +982,8 @@ static int pn533_start_poll_complete(struct pn533 *dev, struct sk_buff *resp) rc = pn533_target_found(dev, tg, tgdata, tgdata_len); /* We must stop the poll after a valid target found */ - if (rc == 0) { - pn533_poll_reset_mod_list(dev); + if (rc == 0) return 0; - } } return -EAGAIN; From b31d5103c33280738188c51e226224dff4401c7b Mon Sep 17 00:00:00 2001 From: Michael Thalmeier Date: Thu, 21 Apr 2016 16:43:53 +0200 Subject: [PATCH 1448/1649] NFC: pn533: handle interrupted commands in pn533_recv_frame When pn533_recv_frame is called from within abort_command context the current dev->cmd is not guaranteed to be set. Additionally on receiving an error status we can omit frame checking and simply schedule the workqueue. Signed-off-by: Michael Thalmeier Signed-off-by: Samuel Ortiz --- drivers/nfc/pn533/pn533.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/nfc/pn533/pn533.c b/drivers/nfc/pn533/pn533.c index 745181ea693b..d9c55830b2b2 100644 --- a/drivers/nfc/pn533/pn533.c +++ b/drivers/nfc/pn533/pn533.c @@ -2016,8 +2016,16 @@ _error: */ void pn533_recv_frame(struct pn533 *dev, struct sk_buff *skb, int status) { + if (!dev->cmd) + goto sched_wq; + dev->cmd->status = status; + if (status != 0) { + dev_dbg(dev->dev, "%s: Error received: %d\n", __func__, status); + goto sched_wq; + } + if (skb == NULL) { pr_err("NULL Frame -> link is dead\n"); goto sched_wq; From 1de1d449c6cf701e09d2941d042b28328118ad1a Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Mon, 9 May 2016 22:05:13 -0400 Subject: [PATCH 1449/1649] mlx5: Fix merge errors. I accidently let Arnd's VXLAN dependency changes slip into net-next, they are only appropriate for net. Also the flow steering structural changes to mlx5e_priv got scrambled during the merge resolution as well. Fix that all up. Reported-by: Stephen Rothwell Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/Kconfig | 7 ------- drivers/net/ethernet/mellanox/mlx5/core/Makefile | 3 +-- drivers/net/ethernet/mellanox/mlx5/core/en.h | 5 ----- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 3 --- drivers/net/ethernet/mellanox/mlx5/core/vxlan.h | 8 +------- 5 files changed, 2 insertions(+), 24 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig index f5c3b9465d8d..1cf722eba607 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig +++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig @@ -31,10 +31,3 @@ config MLX5_CORE_EN_DCB This flag is depended on the kernel's DCB support. If unsure, set to Y - -config MLX5_CORE_EN_VXLAN - bool "VXLAN offloads Support" - default y - depends on MLX5_CORE_EN && VXLAN && !(MLX5_CORE=y && VXLAN=m) - ---help--- - Say Y here if you want to use VXLAN offloads in the driver. diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index e4a5b37b90ab..b531d4f3c00b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -6,7 +6,6 @@ mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \ mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o eswitch.o \ en_main.o en_fs.o en_ethtool.o en_tx.o en_rx.o \ - en_txrx.o en_clock.o en_tc.o en_arfs.o + en_txrx.o en_clock.o vxlan.o en_tc.o en_arfs.o -mlx5_core-$(CONFIG_MLX5_CORE_EN_VXLAN) += vxlan.o mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 7aea32e085b3..bfa5daaaf5aa 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -522,12 +522,7 @@ struct mlx5e_priv { struct mlx5e_direct_tir direct_tir[MLX5E_MAX_NUM_CHANNELS]; struct mlx5e_flow_steering fs; - struct mlx5e_flow_tables fts; - struct mlx5e_eth_addr_db eth_addr; - struct mlx5e_vlan_db vlan; -#ifdef CONFIG_MLX5_CORE_EN_VXLAN struct mlx5e_vxlan_db vxlan; -#endif struct mlx5e_params params; struct workqueue_struct *wq; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index b60a1bc6f457..1c70e518b5c5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -2509,7 +2509,6 @@ static int mlx5e_get_vf_stats(struct net_device *dev, vf_stats); } -#if IS_ENABLED(CONFIG_MLX5_CORE_EN_VXLAN) static void mlx5e_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family, __be16 port) { @@ -2581,7 +2580,6 @@ static netdev_features_t mlx5e_features_check(struct sk_buff *skb, return features; } -#endif static const struct net_device_ops mlx5e_netdev_ops_basic = { .ndo_open = mlx5e_open, @@ -2616,7 +2614,6 @@ static const struct net_device_ops mlx5e_netdev_ops_sriov = { .ndo_set_features = mlx5e_set_features, .ndo_change_mtu = mlx5e_change_mtu, .ndo_do_ioctl = mlx5e_ioctl, -#ifdef CONFIG_MLX5_CORE_EN_VXLAN .ndo_add_vxlan_port = mlx5e_add_vxlan_port, .ndo_del_vxlan_port = mlx5e_del_vxlan_port, .ndo_features_check = mlx5e_features_check, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h index 217ac530a514..5def12c048e3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h @@ -48,18 +48,12 @@ struct mlx5e_vxlan_work { static inline bool mlx5e_vxlan_allowed(struct mlx5_core_dev *mdev) { - return IS_ENABLED(CONFIG_MLX5_CORE_EN_VXLAN) && - (MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan) && + return (MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan) && mlx5_core_is_pf(mdev)); } -#ifdef CONFIG_MLX5_CORE_EN_VXLAN void mlx5e_vxlan_init(struct mlx5e_priv *priv); void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv); -#else -static inline void mlx5e_vxlan_init(struct mlx5e_priv *priv) {} -static inline void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv) {} -#endif void mlx5e_vxlan_queue_work(struct mlx5e_priv *priv, sa_family_t sa_family, u16 port, int add); From 27c0f739a009971aaf43ee71d9b150a56ee57012 Mon Sep 17 00:00:00 2001 From: Taku Izumi Date: Tue, 10 May 2016 10:28:49 +0900 Subject: [PATCH 1450/1649] fjes: Fix unnecessary spinlock_irqsave commit-bd5a256 introduces a deadlock bug in fjes_change_mtu(). This spin_lock_irqsave() is obviously unnecessary. This patch eliminates unnecessary spin_lock_irqsave() in fjes_change_mtu() Signed-off-by: Taku Izumi Signed-off-by: David S. Miller --- drivers/net/fjes/fjes_main.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/net/fjes/fjes_main.c b/drivers/net/fjes/fjes_main.c index f4e69261a3ce..86c331bb5eb3 100644 --- a/drivers/net/fjes/fjes_main.c +++ b/drivers/net/fjes/fjes_main.c @@ -819,7 +819,6 @@ static int fjes_change_mtu(struct net_device *netdev, int new_mtu) netdev->mtu = new_mtu; if (running) { - spin_lock_irqsave(&hw->rx_status_lock, flags); for (epidx = 0; epidx < hw->max_epid; epidx++) { if (epidx == hw->my_epid) continue; From 4a65896f94fa82370041823837cd75aac1186b54 Mon Sep 17 00:00:00 2001 From: David Ahern Date: Sat, 7 May 2016 16:48:59 -0700 Subject: [PATCH 1451/1649] net: l3mdev: Move get_saddr and rt6_dst Move l3mdev_rt6_dst_by_oif and l3mdev_get_saddr to l3mdev.c. Collapse l3mdev_get_rt6_dst into l3mdev_rt6_dst_by_oif since it is the only user and keep the l3mdev_get_rt6_dst name for consistency with other hooks. A follow-on patch adds more code to these functions making them long for inlined functions. Signed-off-by: David Ahern Signed-off-by: David S. Miller --- include/net/l3mdev.h | 56 +++----------------------------------------- net/ipv6/route.c | 2 +- net/l3mdev/l3mdev.c | 54 ++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 58 insertions(+), 54 deletions(-) diff --git a/include/net/l3mdev.h b/include/net/l3mdev.h index c43a9c73de5e..78872bd1dc2c 100644 --- a/include/net/l3mdev.h +++ b/include/net/l3mdev.h @@ -130,52 +130,9 @@ static inline bool netif_index_is_l3_master(struct net *net, int ifindex) return rc; } -static inline int l3mdev_get_saddr(struct net *net, int ifindex, - struct flowi4 *fl4) -{ - struct net_device *dev; - int rc = 0; +int l3mdev_get_saddr(struct net *net, int ifindex, struct flowi4 *fl4); - if (ifindex) { - - rcu_read_lock(); - - dev = dev_get_by_index_rcu(net, ifindex); - if (dev && netif_is_l3_master(dev) && - dev->l3mdev_ops->l3mdev_get_saddr) { - rc = dev->l3mdev_ops->l3mdev_get_saddr(dev, fl4); - } - - rcu_read_unlock(); - } - - return rc; -} - -static inline struct dst_entry *l3mdev_get_rt6_dst(const struct net_device *dev, - const struct flowi6 *fl6) -{ - if (netif_is_l3_master(dev) && dev->l3mdev_ops->l3mdev_get_rt6_dst) - return dev->l3mdev_ops->l3mdev_get_rt6_dst(dev, fl6); - - return NULL; -} - -static inline -struct dst_entry *l3mdev_rt6_dst_by_oif(struct net *net, - const struct flowi6 *fl6) -{ - struct dst_entry *dst = NULL; - struct net_device *dev; - - dev = dev_get_by_index(net, fl6->flowi6_oif); - if (dev) { - dst = l3mdev_get_rt6_dst(dev, fl6); - dev_put(dev); - } - - return dst; -} +struct dst_entry *l3mdev_get_rt6_dst(struct net *net, const struct flowi6 *fl6); #else @@ -233,14 +190,7 @@ static inline int l3mdev_get_saddr(struct net *net, int ifindex, } static inline -struct dst_entry *l3mdev_get_rt6_dst(const struct net_device *dev, - const struct flowi6 *fl6) -{ - return NULL; -} -static inline -struct dst_entry *l3mdev_rt6_dst_by_oif(struct net *net, - const struct flowi6 *fl6) +struct dst_entry *l3mdev_get_rt6_dst(struct net *net, const struct flowi6 *fl6) { return NULL; } diff --git a/net/ipv6/route.c b/net/ipv6/route.c index af46e19205f5..c42fa1deb152 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -1190,7 +1190,7 @@ struct dst_entry *ip6_route_output_flags(struct net *net, const struct sock *sk, struct dst_entry *dst; bool any_src; - dst = l3mdev_rt6_dst_by_oif(net, fl6); + dst = l3mdev_get_rt6_dst(net, fl6); if (dst) return dst; diff --git a/net/l3mdev/l3mdev.c b/net/l3mdev/l3mdev.c index e925037fa0df..898d01e0f87b 100644 --- a/net/l3mdev/l3mdev.c +++ b/net/l3mdev/l3mdev.c @@ -97,3 +97,57 @@ u32 l3mdev_fib_table_by_index(struct net *net, int ifindex) return tb_id; } EXPORT_SYMBOL_GPL(l3mdev_fib_table_by_index); + +/** + * l3mdev_get_rt6_dst - IPv6 route lookup based on flow. Returns + * cached route for L3 master device if relevant + * to flow + * @net: network namespace for device index lookup + * @fl6: IPv6 flow struct for lookup + */ + +struct dst_entry *l3mdev_get_rt6_dst(struct net *net, + const struct flowi6 *fl6) +{ + struct dst_entry *dst = NULL; + struct net_device *dev; + + dev = dev_get_by_index(net, fl6->flowi6_oif); + if (dev) { + if (netif_is_l3_master(dev) && + dev->l3mdev_ops->l3mdev_get_rt6_dst) + dst = dev->l3mdev_ops->l3mdev_get_rt6_dst(dev, fl6); + dev_put(dev); + } + + return dst; +} +EXPORT_SYMBOL_GPL(l3mdev_get_rt6_dst); + +/** + * l3mdev_get_saddr - get source address for a flow based on an interface + * enslaved to an L3 master device + * @net: network namespace for device index lookup + * @ifindex: Interface index + * @fl4: IPv4 flow struct + */ + +int l3mdev_get_saddr(struct net *net, int ifindex, struct flowi4 *fl4) +{ + struct net_device *dev; + int rc = 0; + + if (ifindex) { + rcu_read_lock(); + + dev = dev_get_by_index_rcu(net, ifindex); + if (dev && netif_is_l3_master(dev) && + dev->l3mdev_ops->l3mdev_get_saddr) + rc = dev->l3mdev_ops->l3mdev_get_saddr(dev, fl4); + + rcu_read_unlock(); + } + + return rc; +} +EXPORT_SYMBOL_GPL(l3mdev_get_saddr); From 1ff23beebdd315fe4d16070c08c065e89d7debb3 Mon Sep 17 00:00:00 2001 From: David Ahern Date: Sat, 7 May 2016 16:49:00 -0700 Subject: [PATCH 1452/1649] net: l3mdev: Allow send on enslaved interface Allow udp and raw sockets to send by oif that is an enslaved interface versus the l3mdev/VRF device. For example, this allows BFD to use ifindex from IP_PKTINFO on a receive to send a response without the need to convert to the VRF index. It also allows ping and ping6 to work when specifying an enslaved interface (e.g., ping -I swp1 ) which is a natural use case. Signed-off-by: David Ahern Signed-off-by: David S. Miller --- drivers/net/vrf.c | 2 ++ net/ipv4/route.c | 4 ++++ net/l3mdev/l3mdev.c | 17 +++++++++++++---- 3 files changed, 19 insertions(+), 4 deletions(-) diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index 4b2461ae5d3b..c8db55aa8280 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -648,6 +648,8 @@ static int vrf_get_saddr(struct net_device *dev, struct flowi4 *fl4) fl4->flowi4_flags |= FLOWI_FLAG_SKIP_NH_OIF; fl4->flowi4_iif = LOOPBACK_IFINDEX; + /* make sure oif is set to VRF device for lookup */ + fl4->flowi4_oif = dev->ifindex; fl4->flowi4_tos = tos & IPTOS_RT_MASK; fl4->flowi4_scope = ((tos & RTO_ONLINK) ? RT_SCOPE_LINK : RT_SCOPE_UNIVERSE); diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 8c8c655bb2c4..a1f2830d8110 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -2146,6 +2146,7 @@ struct rtable *__ip_route_output_key_hash(struct net *net, struct flowi4 *fl4, unsigned int flags = 0; struct fib_result res; struct rtable *rth; + int master_idx; int orig_oif; int err = -ENETUNREACH; @@ -2155,6 +2156,9 @@ struct rtable *__ip_route_output_key_hash(struct net *net, struct flowi4 *fl4, orig_oif = fl4->flowi4_oif; + master_idx = l3mdev_master_ifindex_by_index(net, fl4->flowi4_oif); + if (master_idx) + fl4->flowi4_oif = master_idx; fl4->flowi4_iif = LOOPBACK_IFINDEX; fl4->flowi4_tos = tos & IPTOS_RT_MASK; fl4->flowi4_scope = ((tos & RTO_ONLINK) ? diff --git a/net/l3mdev/l3mdev.c b/net/l3mdev/l3mdev.c index 898d01e0f87b..6651a78e100c 100644 --- a/net/l3mdev/l3mdev.c +++ b/net/l3mdev/l3mdev.c @@ -112,12 +112,18 @@ struct dst_entry *l3mdev_get_rt6_dst(struct net *net, struct dst_entry *dst = NULL; struct net_device *dev; - dev = dev_get_by_index(net, fl6->flowi6_oif); - if (dev) { - if (netif_is_l3_master(dev) && + if (fl6->flowi6_oif) { + rcu_read_lock(); + + dev = dev_get_by_index_rcu(net, fl6->flowi6_oif); + if (dev && netif_is_l3_slave(dev)) + dev = netdev_master_upper_dev_get_rcu(dev); + + if (dev && netif_is_l3_master(dev) && dev->l3mdev_ops->l3mdev_get_rt6_dst) dst = dev->l3mdev_ops->l3mdev_get_rt6_dst(dev, fl6); - dev_put(dev); + + rcu_read_unlock(); } return dst; @@ -141,6 +147,9 @@ int l3mdev_get_saddr(struct net *net, int ifindex, struct flowi4 *fl4) rcu_read_lock(); dev = dev_get_by_index_rcu(net, ifindex); + if (dev && netif_is_l3_slave(dev)) + dev = netdev_master_upper_dev_get_rcu(dev); + if (dev && netif_is_l3_master(dev) && dev->l3mdev_ops->l3mdev_get_saddr) rc = dev->l3mdev_ops->l3mdev_get_saddr(dev, fl4); From bfca2eba2adaa9501656f503559a971de6927fa8 Mon Sep 17 00:00:00 2001 From: Joachim Eastwood Date: Sun, 8 May 2016 13:47:23 +0200 Subject: [PATCH 1453/1649] stmmac: dwmac-socfpga: make socfpga_dwmac_pm_ops static Fix the following sparse warning: drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c:274:1: warning: symbol 'socfpga_dwmac_pm_ops' was not declared. Should it be static? Signed-off-by: Joachim Eastwood Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c index cd9764a6a36f..f13499fa1f58 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c @@ -271,7 +271,8 @@ static int socfpga_dwmac_resume(struct device *dev) } #endif /* CONFIG_PM_SLEEP */ -SIMPLE_DEV_PM_OPS(socfpga_dwmac_pm_ops, stmmac_suspend, socfpga_dwmac_resume); +static SIMPLE_DEV_PM_OPS(socfpga_dwmac_pm_ops, stmmac_suspend, + socfpga_dwmac_resume); static const struct of_device_id socfpga_dwmac_match[] = { { .compatible = "altr,socfpga-stmmac" }, From db2ec95d1ba419b766aae7b1d7c0271f3c9cd9f4 Mon Sep 17 00:00:00 2001 From: Tom Herbert Date: Mon, 9 May 2016 17:12:08 -0700 Subject: [PATCH 1454/1649] ip6_gre: Fix MTU setting In ip6gre_tnl_link_config set t->tun_len and t->hlen correctly for the configuration. For hard_header_len and mtu calculation include IPv6 header and encapsulation overhead. In ip6gre_tunnel_init_common set t->tun_len and t->hlen correctly for the configuration. Revert to setting hard_header_len instead of needed_headroom. Tested: ./ip link add name tun8 type ip6gretap remote \ 2401:db00:20:911a:face:0:27:0 local \ 2401:db00:20:911a:face:0:25:0 ttl 225 Gives MTU of 1434. That is equal to 1500 - 40 - 14 - 4 - 8. ./ip link add name tun8 type ip6gretap remote \ 2401:db00:20:911a:face:0:27:0 local \ 2401:db00:20:911a:face:0:25:0 ttl 225 okey 123 Gives MTU of 1430. That is equal to 1500 - 40 - 14 - 4 - 8 - 4. Signed-off-by: Tom Herbert Signed-off-by: David S. Miller --- net/ipv6/ip6_gre.c | 29 +++++++++++++---------------- 1 file changed, 13 insertions(+), 16 deletions(-) diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index 47b671a46dc4..6d0aa94cf59a 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c @@ -700,7 +700,7 @@ static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu) struct net_device *dev = t->dev; struct __ip6_tnl_parm *p = &t->parms; struct flowi6 *fl6 = &t->fl.u.ip6; - int addend = sizeof(struct ipv6hdr) + 4; + int t_hlen; if (dev->type != ARPHRD_ETHER) { memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr)); @@ -727,16 +727,11 @@ static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu) else dev->flags &= ~IFF_POINTOPOINT; - /* Precalculate GRE options length */ - if (t->parms.o_flags&(GRE_CSUM|GRE_KEY|GRE_SEQ)) { - if (t->parms.o_flags&GRE_CSUM) - addend += 4; - if (t->parms.o_flags&GRE_KEY) - addend += 4; - if (t->parms.o_flags&GRE_SEQ) - addend += 4; - } - t->hlen = addend; + t->tun_hlen = gre_calc_hlen(t->parms.o_flags); + + t->hlen = t->tun_hlen; + + t_hlen = t->hlen + sizeof(struct ipv6hdr); if (p->flags & IP6_TNL_F_CAP_XMIT) { int strict = (ipv6_addr_type(&p->raddr) & @@ -750,10 +745,11 @@ static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu) return; if (rt->dst.dev) { - dev->hard_header_len = rt->dst.dev->hard_header_len + addend; + dev->hard_header_len = rt->dst.dev->hard_header_len + + t_hlen; if (set_mtu) { - dev->mtu = rt->dst.dev->mtu - addend; + dev->mtu = rt->dst.dev->mtu - t_hlen; if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) dev->mtu -= 8; if (dev->type == ARPHRD_ETHER) @@ -1027,11 +1023,12 @@ static int ip6gre_tunnel_init_common(struct net_device *dev) tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags); + tunnel->hlen = tunnel->tun_hlen; + t_hlen = tunnel->hlen + sizeof(struct ipv6hdr); - dev->needed_headroom = LL_MAX_HEADER + t_hlen + 4; - dev->mtu = ETH_DATA_LEN - t_hlen - 4; - + dev->hard_header_len = LL_MAX_HEADER + t_hlen; + dev->mtu = ETH_DATA_LEN - t_hlen; if (!(tunnel->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) dev->mtu -= 8; From f41fe3c2acc9e40304ac1dae0f243ef27fe85dee Mon Sep 17 00:00:00 2001 From: Tom Herbert Date: Mon, 9 May 2016 17:12:09 -0700 Subject: [PATCH 1455/1649] gre6: Fix flag translations GRE for IPv6 does not properly translate for GRE flags to tunnel flags and vice versa. This patch fixes that. Signed-off-by: Tom Herbert Signed-off-by: David S. Miller --- net/ipv6/ip6_gre.c | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index 6d0aa94cf59a..509fb9273771 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c @@ -795,8 +795,8 @@ static void ip6gre_tnl_parm_from_user(struct __ip6_tnl_parm *p, p->link = u->link; p->i_key = u->i_key; p->o_key = u->o_key; - p->i_flags = u->i_flags; - p->o_flags = u->o_flags; + p->i_flags = gre_flags_to_tnl_flags(u->i_flags); + p->o_flags = gre_flags_to_tnl_flags(u->o_flags); memcpy(p->name, u->name, sizeof(u->name)); } @@ -813,8 +813,8 @@ static void ip6gre_tnl_parm_to_user(struct ip6_tnl_parm2 *u, u->link = p->link; u->i_key = p->i_key; u->o_key = p->o_key; - u->i_flags = p->i_flags; - u->o_flags = p->o_flags; + u->i_flags = gre_tnl_flags_to_gre_flags(p->i_flags); + u->o_flags = gre_tnl_flags_to_gre_flags(p->o_flags); memcpy(u->name, p->name, sizeof(u->name)); } @@ -1214,10 +1214,12 @@ static void ip6gre_netlink_parms(struct nlattr *data[], parms->link = nla_get_u32(data[IFLA_GRE_LINK]); if (data[IFLA_GRE_IFLAGS]) - parms->i_flags = nla_get_be16(data[IFLA_GRE_IFLAGS]); + parms->i_flags = gre_flags_to_tnl_flags( + nla_get_be16(data[IFLA_GRE_IFLAGS])); if (data[IFLA_GRE_OFLAGS]) - parms->o_flags = nla_get_be16(data[IFLA_GRE_OFLAGS]); + parms->o_flags = gre_flags_to_tnl_flags( + nla_get_be16(data[IFLA_GRE_OFLAGS])); if (data[IFLA_GRE_IKEY]) parms->i_key = nla_get_be32(data[IFLA_GRE_IKEY]); @@ -1409,8 +1411,10 @@ static int ip6gre_fill_info(struct sk_buff *skb, const struct net_device *dev) struct __ip6_tnl_parm *p = &t->parms; if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) || - nla_put_be16(skb, IFLA_GRE_IFLAGS, p->i_flags) || - nla_put_be16(skb, IFLA_GRE_OFLAGS, p->o_flags) || + nla_put_be16(skb, IFLA_GRE_IFLAGS, + gre_tnl_flags_to_gre_flags(p->i_flags)) || + nla_put_be16(skb, IFLA_GRE_OFLAGS, + gre_tnl_flags_to_gre_flags(p->o_flags)) || nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) || nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) || nla_put_in6_addr(skb, IFLA_GRE_LOCAL, &p->laddr) || From d27bff9ca294ac76548ee97db5d92f39e4752a01 Mon Sep 17 00:00:00 2001 From: Tom Herbert Date: Mon, 9 May 2016 17:12:10 -0700 Subject: [PATCH 1456/1649] ip6_gre: Set inner protocol correctly in __gre6_xmit Need to use adjusted protocol value for setting inner protocol. Signed-off-by: Tom Herbert Signed-off-by: David S. Miller --- net/ipv6/ip6_gre.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index 509fb9273771..ec209f4d3312 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c @@ -519,7 +519,7 @@ static netdev_tx_t __gre6_xmit(struct sk_buff *skb, gre_build_header(skb, tunnel->tun_hlen, tunnel->parms.o_flags, protocol, tunnel->parms.o_key, htonl(tunnel->o_seqno)); - skb_set_inner_protocol(skb, proto); + skb_set_inner_protocol(skb, protocol); return ip6_tnl_xmit(skb, dev, dsfield, fl6, encap_limit, pmtu, NEXTHDR_GRE); From 4b4a0c91438c2471d7ab9504c0096ffc47d2389b Mon Sep 17 00:00:00 2001 From: Tom Herbert Date: Mon, 9 May 2016 17:12:11 -0700 Subject: [PATCH 1457/1649] ip6: Don't set transport header in IPv6 tunneling We only need to reset network header here. Signed-off-by: Tom Herbert Signed-off-by: David S. Miller --- net/ipv6/ip6_tunnel.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index ade55af6ace6..50af7061ecdb 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -1114,8 +1114,6 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield, dst_cache_set_ip6(&t->dst_cache, ndst, &fl6->saddr); skb_dst_set(skb, dst); - skb->transport_header = skb->network_header; - if (encap_limit >= 0) { init_tel_txopt(&opt, encap_limit); ipv6_push_nfrag_opts(skb, &opt.ops, &proto, NULL); From b45bd1d787969d3ae5662a56ed431cd13e3b8b92 Mon Sep 17 00:00:00 2001 From: Tom Herbert Date: Mon, 9 May 2016 17:12:12 -0700 Subject: [PATCH 1458/1649] ip6_gre: Use correct flags for reading TUNNEL_SEQ Fix two spots where o_flags in a tunnel are being compared to GRE_SEQ instead of TUNNEL_SEQ. Signed-off-by: Tom Herbert Signed-off-by: David S. Miller --- net/ipv6/ip6_gre.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index ec209f4d3312..ee62ec469ab3 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c @@ -343,7 +343,7 @@ static struct ip6_tnl *ip6gre_tunnel_locate(struct net *net, goto failed_free; /* Can use a lockless transmit, unless we generate output sequences */ - if (!(nt->parms.o_flags & GRE_SEQ)) + if (!(nt->parms.o_flags & TUNNEL_SEQ)) dev->features |= NETIF_F_LLTX; dev_hold(dev); @@ -1314,7 +1314,7 @@ static int ip6gre_newlink(struct net *src_net, struct net_device *dev, dev->features |= GRE6_FEATURES; dev->hw_features |= GRE6_FEATURES; - if (!(nt->parms.o_flags & GRE_SEQ)) { + if (!(nt->parms.o_flags & TUNNEL_SEQ)) { /* TCP segmentation offload is not supported when we * generate output sequences. */ From c047c3b1af6214b447e353527e394fa3f3e86397 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 9 May 2016 21:47:23 +0200 Subject: [PATCH 1459/1649] netfilter: conntrack: remove uninitialized shadow variable A recent commit introduced an unconditional use of an uninitialized variable, as reported in this gcc warning: net/netfilter/nf_conntrack_core.c: In function '__nf_conntrack_confirm': net/netfilter/nf_conntrack_core.c:632:33: error: 'ctinfo' may be used uninitialized in this function [-Werror=maybe-uninitialized] bytes = atomic64_read(&counter[CTINFO2DIR(ctinfo)].bytes); ^ net/netfilter/nf_conntrack_core.c:628:26: note: 'ctinfo' was declared here enum ip_conntrack_info ctinfo; The problem is that a local variable shadows the function parameter. This removes the local variable, which looks like what Pablo originally intended. Signed-off-by: Arnd Bergmann Fixes: 71d8c47fc653 ("netfilter: conntrack: introduce clash resolution on insertion race") Acked-by: Pablo Neira Ayuso Signed-off-by: David S. Miller --- net/netfilter/nf_conntrack_core.c | 1 - 1 file changed, 1 deletion(-) diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 0cd29365004f..566c64e3ec50 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -626,7 +626,6 @@ static void nf_ct_acct_merge(struct nf_conn *ct, enum ip_conntrack_info ctinfo, acct = nf_conn_acct_find(loser_ct); if (acct) { struct nf_conn_counter *counter = acct->counter; - enum ip_conntrack_info ctinfo; unsigned int bytes; /* u32 should be fine since we must have seen one packet. */ From 6535db56d5453555b7a40230024225d9ec700585 Mon Sep 17 00:00:00 2001 From: Sven Eckelmann Date: Sun, 28 Feb 2016 11:38:51 +0100 Subject: [PATCH 1460/1649] batman-adv: Remove unused parameter recv_if of batadv_interface_rx Signed-off-by: Sven Eckelmann Signed-off-by: Marek Lindner Signed-off-by: Antonio Quartulli --- net/batman-adv/routing.c | 5 ++--- net/batman-adv/soft-interface.c | 5 ++--- net/batman-adv/soft-interface.h | 4 ++-- 3 files changed, 6 insertions(+), 8 deletions(-) diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c index b781bf753250..2ecfca246be4 100644 --- a/net/batman-adv/routing.c +++ b/net/batman-adv/routing.c @@ -912,7 +912,7 @@ int batadv_recv_unicast_packet(struct sk_buff *skb, hdr_size)) goto rx_success; - batadv_interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size, + batadv_interface_rx(recv_if->soft_iface, skb, hdr_size, orig_node); rx_success: @@ -1122,8 +1122,7 @@ int batadv_recv_bcast_packet(struct sk_buff *skb, goto rx_success; /* broadcast for me */ - batadv_interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size, - orig_node); + batadv_interface_rx(recv_if->soft_iface, skb, hdr_size, orig_node); rx_success: ret = NET_RX_SUCCESS; diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c index dfb4d56120b6..dc9a61a5122d 100644 --- a/net/batman-adv/soft-interface.c +++ b/net/batman-adv/soft-interface.c @@ -385,7 +385,6 @@ end: * batadv_interface_rx - receive ethernet frame on local batman-adv interface * @soft_iface: local interface which will receive the ethernet frame * @skb: ethernet frame for @soft_iface - * @recv_if: interface on which the batman-adv packet was received * @hdr_size: size of already parsed batman-adv header * @orig_node: originator from which the batman-adv packet was sent * @@ -400,8 +399,8 @@ end: * isolated clients. */ void batadv_interface_rx(struct net_device *soft_iface, - struct sk_buff *skb, struct batadv_hard_iface *recv_if, - int hdr_size, struct batadv_orig_node *orig_node) + struct sk_buff *skb, int hdr_size, + struct batadv_orig_node *orig_node) { struct batadv_bcast_packet *batadv_bcast_packet; struct batadv_priv *bat_priv = netdev_priv(soft_iface); diff --git a/net/batman-adv/soft-interface.h b/net/batman-adv/soft-interface.h index 9ae265703d23..5942da3d03d5 100644 --- a/net/batman-adv/soft-interface.h +++ b/net/batman-adv/soft-interface.h @@ -27,8 +27,8 @@ struct sk_buff; int batadv_skb_head_push(struct sk_buff *skb, unsigned int len); void batadv_interface_rx(struct net_device *soft_iface, - struct sk_buff *skb, struct batadv_hard_iface *recv_if, - int hdr_size, struct batadv_orig_node *orig_node); + struct sk_buff *skb, int hdr_size, + struct batadv_orig_node *orig_node); struct net_device *batadv_softif_create(const char *name); void batadv_softif_destroy_sysfs(struct net_device *soft_iface); int batadv_softif_is_valid(const struct net_device *net_dev); From 7142fc107274a0ebfd31e995de61e71a1e84770f Mon Sep 17 00:00:00 2001 From: Sven Eckelmann Date: Sun, 28 Feb 2016 11:38:52 +0100 Subject: [PATCH 1461/1649] batman-adv: Remove hdr_size skb size check in batadv_interface_rx The callers of batadv_interface_rx have to make sure that enough data can be pulled from the skb when they read the batman-adv header. The only two functions using it are either calling pskb_may_pull with hdr_size directly (batadv_recv_bcast_packet) or indirectly via batadv_check_unicast_packet (batadv_recv_unicast_packet). Reported-by: Marek Lindner Signed-off-by: Sven Eckelmann Signed-off-by: Marek Lindner Signed-off-by: Antonio Quartulli --- net/batman-adv/soft-interface.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c index dc9a61a5122d..d72f88707736 100644 --- a/net/batman-adv/soft-interface.c +++ b/net/batman-adv/soft-interface.c @@ -413,10 +413,6 @@ void batadv_interface_rx(struct net_device *soft_iface, batadv_bcast_packet = (struct batadv_bcast_packet *)skb->data; is_bcast = (batadv_bcast_packet->packet_type == BATADV_BCAST); - /* check if enough space is available for pulling, and pull */ - if (!pskb_may_pull(skb, hdr_size)) - goto dropped; - skb_pull_rcsum(skb, hdr_size); skb_reset_mac_header(skb); From 0d21cdaa9bbf5efae95cfb6346d26ff6e61f8896 Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Tue, 1 Mar 2016 22:19:05 +0100 Subject: [PATCH 1462/1649] batman-adv: NETIF_F_NETNS_LOCAL feature to prevent netns moves The batX soft interface should not be moved between network name spaces. This is similar to bridges, bonds, tunnels, which are not allowed to move between network namespaces. Suggested-by: Daniel Ehlers Signed-off-by: Andrew Lunn Acked-by: Antonio Quartulli Reviewed-by: Sven Eckelmann Signed-off-by: Marek Lindner Signed-off-by: Antonio Quartulli --- net/batman-adv/soft-interface.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c index d72f88707736..66dd0aac480a 100644 --- a/net/batman-adv/soft-interface.c +++ b/net/batman-adv/soft-interface.c @@ -972,7 +972,7 @@ static void batadv_softif_init_early(struct net_device *dev) dev->netdev_ops = &batadv_netdev_ops; dev->destructor = batadv_softif_free; - dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; + dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_NETNS_LOCAL; dev->priv_flags |= IFF_NO_QUEUE; /* can't call min_mtu, because the needed variables From 2cd45a0671d9e37ab20e844fc4c84717a38b7f52 Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Thu, 21 Apr 2016 12:57:27 +0200 Subject: [PATCH 1463/1649] batman-adv: Create batman soft interfaces within correct netns. When creating a soft interface, create it in the same netns as the hard interface. Replace all references to init_net with the correct name space for the interface being manipulated. Suggested-by: Daniel Ehlers Signed-off-by: Andrew Lunn Acked-by: Antonio Quartulli Signed-off-by: Sven Eckelmann Signed-off-by: Marek Lindner Signed-off-by: Antonio Quartulli --- net/batman-adv/hard-interface.c | 10 +++++----- net/batman-adv/hard-interface.h | 3 ++- net/batman-adv/soft-interface.c | 7 +++++-- net/batman-adv/soft-interface.h | 3 ++- net/batman-adv/sysfs.c | 3 ++- net/batman-adv/translation-table.c | 4 ++-- 6 files changed, 18 insertions(+), 12 deletions(-) diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c index 0a7deaf2670a..f0e1899e5b6b 100644 --- a/net/batman-adv/hard-interface.c +++ b/net/batman-adv/hard-interface.c @@ -36,7 +36,6 @@ #include #include #include -#include #include "bridge_loop_avoidance.h" #include "debugfs.h" @@ -121,6 +120,7 @@ static bool batadv_mutual_parents(const struct net_device *dev1, static bool batadv_is_on_batman_iface(const struct net_device *net_dev) { struct net_device *parent_dev; + struct net *net = dev_net(net_dev); bool ret; /* check if this is a batman-adv mesh interface */ @@ -133,7 +133,7 @@ static bool batadv_is_on_batman_iface(const struct net_device *net_dev) return false; /* recurse over the parent device */ - parent_dev = __dev_get_by_index(&init_net, dev_get_iflink(net_dev)); + parent_dev = __dev_get_by_index(net, dev_get_iflink(net_dev)); /* if we got a NULL parent_dev there is something broken.. */ if (WARN(!parent_dev, "Cannot find parent device")) return false; @@ -456,7 +456,7 @@ static int batadv_master_del_slave(struct batadv_hard_iface *slave, } int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface, - const char *iface_name) + struct net *net, const char *iface_name) { struct batadv_priv *bat_priv; struct net_device *soft_iface, *master; @@ -470,10 +470,10 @@ int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface, if (!kref_get_unless_zero(&hard_iface->refcount)) goto out; - soft_iface = dev_get_by_name(&init_net, iface_name); + soft_iface = dev_get_by_name(net, iface_name); if (!soft_iface) { - soft_iface = batadv_softif_create(iface_name); + soft_iface = batadv_softif_create(net, iface_name); if (!soft_iface) { ret = -ENOMEM; diff --git a/net/batman-adv/hard-interface.h b/net/batman-adv/hard-interface.h index d74f1983f33e..a76724d369bf 100644 --- a/net/batman-adv/hard-interface.h +++ b/net/batman-adv/hard-interface.h @@ -28,6 +28,7 @@ #include struct net_device; +struct net; enum batadv_hard_if_state { BATADV_IF_NOT_IN_USE, @@ -55,7 +56,7 @@ bool batadv_is_wifi_iface(int ifindex); struct batadv_hard_iface* batadv_hardif_get_by_netdev(const struct net_device *net_dev); int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface, - const char *iface_name); + struct net *net, const char *iface_name); void batadv_hardif_disable_interface(struct batadv_hard_iface *hard_iface, enum batadv_hard_if_cleanup autodel); void batadv_hardif_remove_interfaces(void); diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c index 66dd0aac480a..04866c9b860a 100644 --- a/net/batman-adv/soft-interface.c +++ b/net/batman-adv/soft-interface.c @@ -885,13 +885,14 @@ static int batadv_softif_slave_add(struct net_device *dev, struct net_device *slave_dev) { struct batadv_hard_iface *hard_iface; + struct net *net = dev_net(dev); int ret = -EINVAL; hard_iface = batadv_hardif_get_by_netdev(slave_dev); if (!hard_iface || hard_iface->soft_iface) goto out; - ret = batadv_hardif_enable_interface(hard_iface, dev->name); + ret = batadv_hardif_enable_interface(hard_iface, net, dev->name); out: if (hard_iface) @@ -988,7 +989,7 @@ static void batadv_softif_init_early(struct net_device *dev) memset(priv, 0, sizeof(*priv)); } -struct net_device *batadv_softif_create(const char *name) +struct net_device *batadv_softif_create(struct net *net, const char *name) { struct net_device *soft_iface; int ret; @@ -998,6 +999,8 @@ struct net_device *batadv_softif_create(const char *name) if (!soft_iface) return NULL; + dev_net_set(soft_iface, net); + soft_iface->rtnl_link_ops = &batadv_link_ops; ret = register_netdevice(soft_iface); diff --git a/net/batman-adv/soft-interface.h b/net/batman-adv/soft-interface.h index 5942da3d03d5..b0966342a986 100644 --- a/net/batman-adv/soft-interface.h +++ b/net/batman-adv/soft-interface.h @@ -23,13 +23,14 @@ #include struct net_device; +struct net; struct sk_buff; int batadv_skb_head_push(struct sk_buff *skb, unsigned int len); void batadv_interface_rx(struct net_device *soft_iface, struct sk_buff *skb, int hdr_size, struct batadv_orig_node *orig_node); -struct net_device *batadv_softif_create(const char *name); +struct net_device *batadv_softif_create(struct net *net, const char *name); void batadv_softif_destroy_sysfs(struct net_device *soft_iface); int batadv_softif_is_valid(const struct net_device *net_dev); extern struct rtnl_link_ops batadv_link_ops; diff --git a/net/batman-adv/sysfs.c b/net/batman-adv/sysfs.c index e7cf51333a36..6b1e54f3250a 100644 --- a/net/batman-adv/sysfs.c +++ b/net/batman-adv/sysfs.c @@ -830,6 +830,7 @@ static ssize_t batadv_store_mesh_iface(struct kobject *kobj, size_t count) { struct net_device *net_dev = batadv_kobj_to_netdev(kobj); + struct net *net = dev_net(net_dev); struct batadv_hard_iface *hard_iface; int status_tmp = -1; int ret = count; @@ -873,7 +874,7 @@ static ssize_t batadv_store_mesh_iface(struct kobject *kobj, batadv_hardif_disable_interface(hard_iface, BATADV_IF_CLEANUP_AUTO); - ret = batadv_hardif_enable_interface(hard_iface, buff); + ret = batadv_hardif_enable_interface(hard_iface, net, buff); unlock: rtnl_unlock(); diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c index 942b3aa00bed..6ea6e9bf9a8c 100644 --- a/net/batman-adv/translation-table.c +++ b/net/batman-adv/translation-table.c @@ -43,7 +43,6 @@ #include #include #include -#include #include "bridge_loop_avoidance.h" #include "hard-interface.h" @@ -585,6 +584,7 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const u8 *addr, struct batadv_priv *bat_priv = netdev_priv(soft_iface); struct batadv_tt_local_entry *tt_local; struct batadv_tt_global_entry *tt_global = NULL; + struct net *net = dev_net(soft_iface); struct batadv_softif_vlan *vlan; struct net_device *in_dev = NULL; struct hlist_head *head; @@ -596,7 +596,7 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const u8 *addr, u32 match_mark; if (ifindex != BATADV_NULL_IFINDEX) - in_dev = dev_get_by_index(&init_net, ifindex); + in_dev = dev_get_by_index(net, ifindex); tt_local = batadv_tt_local_hash_find(bat_priv, addr, vid); From cd9c7bfbbae81bbe75de585ac57b1097e86cf109 Mon Sep 17 00:00:00 2001 From: Simon Wunderlich Date: Sat, 12 Mar 2016 10:49:33 +0100 Subject: [PATCH 1464/1649] batman-adv: add detection for complex bridge loops There are network setups where the current bridge loop avoidance can't detect bridge loops. The minimal setup affected would consist of two LANs and two separate meshes, connected in a ring like that: A...(mesh1)...B | | (LAN1) (LAN2) | | C...(mesh2)...D Since both the meshes and backbones are separate, the bridge loop avoidance has not enough information to detect and avoid the loop in this case. Even if these scenarios can't be fixed easily, these kind of loops can be detected. This patch implements a periodic check (running every 60 seconds for now) which sends a broadcast frame with a random MAC address on each backbone VLAN. If a broadcast frame with the same MAC address is received shortly after on the mesh, we know that there must be a loop and report that incident as well as throw an uevent to let others handle that problem. Signed-off-by: Simon Wunderlich [sven@narfation.org: fix conflicts with current version] Signed-off-by: Sven Eckelmann Signed-off-by: Marek Lindner Signed-off-by: Antonio Quartulli --- net/batman-adv/bridge_loop_avoidance.c | 139 +++++++++++++++++++++++++ net/batman-adv/main.h | 4 + net/batman-adv/packet.h | 1 + net/batman-adv/sysfs.c | 6 +- net/batman-adv/types.h | 8 ++ 5 files changed, 156 insertions(+), 2 deletions(-) diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c index 2c9aa671a49b..5064ae5e9b34 100644 --- a/net/batman-adv/bridge_loop_avoidance.c +++ b/net/batman-adv/bridge_loop_avoidance.c @@ -50,6 +50,7 @@ #include "hash.h" #include "originator.h" #include "packet.h" +#include "sysfs.h" #include "translation-table.h" static const u8 batadv_announce_mac[4] = {0x43, 0x05, 0x43, 0x05}; @@ -407,6 +408,14 @@ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, u8 *mac, ethhdr->h_source, ethhdr->h_dest, BATADV_PRINT_VID(vid)); break; + case BATADV_CLAIM_TYPE_LOOPDETECT: + ether_addr_copy(ethhdr->h_source, mac); + batadv_dbg(BATADV_DBG_BLA, bat_priv, + "bla_send_claim(): LOOPDETECT of %pM to %pM on vid %d\n", + ethhdr->h_source, ethhdr->h_dest, + BATADV_PRINT_VID(vid)); + + break; } if (vid & BATADV_VLAN_HAS_TAG) @@ -426,6 +435,36 @@ out: batadv_hardif_put(primary_if); } +/** + * batadv_bla_loopdetect_report - worker for reporting the loop + * @work: work queue item + * + * Throws an uevent, as the loopdetect check function can't do that itself + * since the kernel may sleep while throwing uevents. + */ +static void batadv_bla_loopdetect_report(struct work_struct *work) +{ + struct batadv_bla_backbone_gw *backbone_gw; + struct batadv_priv *bat_priv; + char vid_str[6] = { '\0' }; + + backbone_gw = container_of(work, struct batadv_bla_backbone_gw, + report_work); + bat_priv = backbone_gw->bat_priv; + + batadv_info(bat_priv->soft_iface, + "Possible loop on VLAN %d detected which can't be handled by BLA - please check your network setup!\n", + BATADV_PRINT_VID(backbone_gw->vid)); + snprintf(vid_str, sizeof(vid_str), "%d", + BATADV_PRINT_VID(backbone_gw->vid)); + vid_str[sizeof(vid_str) - 1] = 0; + + batadv_throw_uevent(bat_priv, BATADV_UEV_BLA, BATADV_UEV_LOOPDETECT, + vid_str); + + batadv_backbone_gw_put(backbone_gw); +} + /** * batadv_bla_get_backbone_gw - finds or creates a backbone gateway * @bat_priv: the bat priv with all the soft interface information @@ -464,6 +503,7 @@ batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, u8 *orig, atomic_set(&entry->request_sent, 0); atomic_set(&entry->wait_periods, 0); ether_addr_copy(entry->orig, orig); + INIT_WORK(&entry->report_work, batadv_bla_loopdetect_report); /* one for the hash, one for returning */ kref_init(&entry->refcount); @@ -1060,6 +1100,10 @@ static int batadv_bla_process_claim(struct batadv_priv *bat_priv, if (vlan_depth > 1) return 1; + /* Let the loopdetect frames on the mesh in any case. */ + if (bla_dst->type == BATADV_CLAIM_TYPE_LOOPDETECT) + return 0; + /* check if it is a claim frame. */ ret = batadv_check_claim_group(bat_priv, primary_if, hw_src, hw_dst, ethhdr); @@ -1264,6 +1308,26 @@ void batadv_bla_update_orig_address(struct batadv_priv *bat_priv, } } +/** + * batadv_bla_send_loopdetect - send a loopdetect frame + * @bat_priv: the bat priv with all the soft interface information + * @backbone_gw: the backbone gateway for which a loop should be detected + * + * To detect loops that the bridge loop avoidance can't handle, send a loop + * detection packet on the backbone. Unlike other BLA frames, this frame will + * be allowed on the mesh by other nodes. If it is received on the mesh, this + * indicates that there is a loop. + */ +static void +batadv_bla_send_loopdetect(struct batadv_priv *bat_priv, + struct batadv_bla_backbone_gw *backbone_gw) +{ + batadv_dbg(BATADV_DBG_BLA, bat_priv, "Send loopdetect frame for vid %d\n", + backbone_gw->vid); + batadv_bla_send_claim(bat_priv, bat_priv->bla.loopdetect_addr, + backbone_gw->vid, BATADV_CLAIM_TYPE_LOOPDETECT); +} + /** * batadv_bla_status_update - purge bla interfaces if necessary * @net_dev: the soft interface net device @@ -1301,6 +1365,7 @@ static void batadv_bla_periodic_work(struct work_struct *work) struct batadv_bla_backbone_gw *backbone_gw; struct batadv_hashtable *hash; struct batadv_hard_iface *primary_if; + bool send_loopdetect = false; int i; delayed_work = to_delayed_work(work); @@ -1316,6 +1381,22 @@ static void batadv_bla_periodic_work(struct work_struct *work) if (!atomic_read(&bat_priv->bridge_loop_avoidance)) goto out; + if (atomic_dec_and_test(&bat_priv->bla.loopdetect_next)) { + /* set a new random mac address for the next bridge loop + * detection frames. Set the locally administered bit to avoid + * collisions with users mac addresses. + */ + random_ether_addr(bat_priv->bla.loopdetect_addr); + bat_priv->bla.loopdetect_addr[0] = 0xba; + bat_priv->bla.loopdetect_addr[1] = 0xbe; + bat_priv->bla.loopdetect_lasttime = jiffies; + atomic_set(&bat_priv->bla.loopdetect_next, + BATADV_BLA_LOOPDETECT_PERIODS); + + /* mark for sending loop detect on all VLANs */ + send_loopdetect = true; + } + hash = bat_priv->bla.backbone_hash; if (!hash) goto out; @@ -1332,6 +1413,9 @@ static void batadv_bla_periodic_work(struct work_struct *work) backbone_gw->lasttime = jiffies; batadv_bla_send_announce(bat_priv, backbone_gw); + if (send_loopdetect) + batadv_bla_send_loopdetect(bat_priv, + backbone_gw); /* request_sent is only set after creation to avoid * problems when we are not yet known as backbone gw @@ -1405,6 +1489,9 @@ int batadv_bla_init(struct batadv_priv *bat_priv) bat_priv->bla.bcast_duplist[i].entrytime = entrytime; bat_priv->bla.bcast_duplist_curr = 0; + atomic_set(&bat_priv->bla.loopdetect_next, + BATADV_BLA_LOOPDETECT_PERIODS); + if (bat_priv->bla.claim_hash) return 0; @@ -1601,6 +1688,55 @@ void batadv_bla_free(struct batadv_priv *bat_priv) batadv_hardif_put(primary_if); } +/** + * batadv_bla_loopdetect_check - check and handle a detected loop + * @bat_priv: the bat priv with all the soft interface information + * @skb: the packet to check + * @primary_if: interface where the request came on + * @vid: the VLAN ID of the frame + * + * Checks if this packet is a loop detect frame which has been sent by us, + * throw an uevent and log the event if that is the case. + * + * Return: true if it is a loop detect frame which is to be dropped, false + * otherwise. + */ +static bool +batadv_bla_loopdetect_check(struct batadv_priv *bat_priv, struct sk_buff *skb, + struct batadv_hard_iface *primary_if, + unsigned short vid) +{ + struct batadv_bla_backbone_gw *backbone_gw; + struct ethhdr *ethhdr; + + ethhdr = eth_hdr(skb); + + /* Only check for the MAC address and skip more checks here for + * performance reasons - this function is on the hotpath, after all. + */ + if (!batadv_compare_eth(ethhdr->h_source, + bat_priv->bla.loopdetect_addr)) + return false; + + /* If the packet came too late, don't forward it on the mesh + * but don't consider that as loop. It might be a coincidence. + */ + if (batadv_has_timed_out(bat_priv->bla.loopdetect_lasttime, + BATADV_BLA_LOOPDETECT_TIMEOUT)) + return true; + + backbone_gw = batadv_bla_get_backbone_gw(bat_priv, + primary_if->net_dev->dev_addr, + vid, true); + if (unlikely(!backbone_gw)) + return true; + + queue_work(batadv_event_workqueue, &backbone_gw->report_work); + /* backbone_gw is unreferenced in the report work function function */ + + return true; +} + /** * batadv_bla_rx - check packets coming from the mesh. * @bat_priv: the bat priv with all the soft interface information @@ -1634,6 +1770,9 @@ int batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, if (!atomic_read(&bat_priv->bridge_loop_avoidance)) goto allow; + if (batadv_bla_loopdetect_check(bat_priv, skb, primary_if, vid)) + goto handled; + if (unlikely(atomic_read(&bat_priv->bla.num_requests))) /* don't allow broadcasts while requests are in flight */ if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast) diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h index 07a6042d0ad6..090c6f0f2398 100644 --- a/net/batman-adv/main.h +++ b/net/batman-adv/main.h @@ -120,6 +120,8 @@ #define BATADV_BLA_BACKBONE_TIMEOUT (BATADV_BLA_PERIOD_LENGTH * 6) #define BATADV_BLA_CLAIM_TIMEOUT (BATADV_BLA_PERIOD_LENGTH * 10) #define BATADV_BLA_WAIT_PERIODS 3 +#define BATADV_BLA_LOOPDETECT_PERIODS 6 +#define BATADV_BLA_LOOPDETECT_TIMEOUT 3000 /* 3 seconds */ #define BATADV_DUPLIST_SIZE 16 #define BATADV_DUPLIST_TIMEOUT 500 /* 500 ms */ @@ -142,10 +144,12 @@ enum batadv_uev_action { BATADV_UEV_ADD = 0, BATADV_UEV_DEL, BATADV_UEV_CHANGE, + BATADV_UEV_LOOPDETECT, }; enum batadv_uev_type { BATADV_UEV_GW = 0, + BATADV_UEV_BLA, }; #define BATADV_GW_THRESHOLD 50 diff --git a/net/batman-adv/packet.h b/net/batman-adv/packet.h index 0796dfdfbb60..372128ddb474 100644 --- a/net/batman-adv/packet.h +++ b/net/batman-adv/packet.h @@ -175,6 +175,7 @@ enum batadv_bla_claimframe { BATADV_CLAIM_TYPE_UNCLAIM = 0x01, BATADV_CLAIM_TYPE_ANNOUNCE = 0x02, BATADV_CLAIM_TYPE_REQUEST = 0x03, + BATADV_CLAIM_TYPE_LOOPDETECT = 0x04, }; /** diff --git a/net/batman-adv/sysfs.c b/net/batman-adv/sysfs.c index 6b1e54f3250a..414b2074165f 100644 --- a/net/batman-adv/sysfs.c +++ b/net/batman-adv/sysfs.c @@ -116,11 +116,13 @@ batadv_kobj_to_vlan(struct batadv_priv *bat_priv, struct kobject *obj) static char *batadv_uev_action_str[] = { "add", "del", - "change" + "change", + "loopdetect", }; static char *batadv_uev_type_str[] = { - "gw" + "gw", + "bla", }; /* Use this, if you have customized show and store functions for vlan attrs */ diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h index 1e47fbe8bb7b..6a577f4f8ba7 100644 --- a/net/batman-adv/types.h +++ b/net/batman-adv/types.h @@ -657,6 +657,9 @@ struct batadv_priv_tt { * @num_requests: number of bla requests in flight * @claim_hash: hash table containing mesh nodes this host has claimed * @backbone_hash: hash table containing all detected backbone gateways + * @loopdetect_addr: MAC address used for own loopdetection frames + * @loopdetect_lasttime: time when the loopdetection frames were sent + * @loopdetect_next: how many periods to wait for the next loopdetect process * @bcast_duplist: recently received broadcast packets array (for broadcast * duplicate suppression) * @bcast_duplist_curr: index of last broadcast packet added to bcast_duplist @@ -668,6 +671,9 @@ struct batadv_priv_bla { atomic_t num_requests; struct batadv_hashtable *claim_hash; struct batadv_hashtable *backbone_hash; + u8 loopdetect_addr[ETH_ALEN]; + unsigned long loopdetect_lasttime; + atomic_t loopdetect_next; struct batadv_bcast_duplist_entry bcast_duplist[BATADV_DUPLIST_SIZE]; int bcast_duplist_curr; /* protects bcast_duplist & bcast_duplist_curr */ @@ -1012,6 +1018,7 @@ struct batadv_socket_packet { * resolved * @crc: crc16 checksum over all claims * @crc_lock: lock protecting crc + * @report_work: work struct for reporting detected loops * @refcount: number of contexts the object is used * @rcu: struct used for freeing in an RCU-safe manner */ @@ -1025,6 +1032,7 @@ struct batadv_bla_backbone_gw { atomic_t request_sent; u16 crc; spinlock_t crc_lock; /* protects crc */ + struct work_struct report_work; struct kref refcount; struct rcu_head rcu; }; From 273534468f050744b32054f84a1e20ee6b5bd329 Mon Sep 17 00:00:00 2001 From: Sven Eckelmann Date: Sat, 5 Mar 2016 16:09:16 +0100 Subject: [PATCH 1465/1649] batman-adv: Check hard_iface refcnt before calling function The batadv_hardif_list list is checked in many situations and the items in this list are given to specialized functions to modify the routing behavior. At the moment each of these called functions has to check itself whether the received batadv_hard_iface has a refcount > 0 before it can increase the reference counter and use it in other objects. This can easily lead to problems because it is not easily visible where all callers of a function got the batadv_hard_iface object from and whether they already hold a valid reference. Checking the reference counter directly before calling a subfunction with a pointer from the batadv_hardif_list avoids this problem. Signed-off-by: Sven Eckelmann Signed-off-by: Marek Lindner Signed-off-by: Antonio Quartulli --- net/batman-adv/bat_iv_ogm.c | 11 +++++++++++ net/batman-adv/bat_v_ogm.c | 14 +++++++++++++- net/batman-adv/originator.c | 5 +++++ net/batman-adv/send.c | 6 ++++++ 4 files changed, 35 insertions(+), 1 deletion(-) diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c index 8c1710bba803..57e9962c7090 100644 --- a/net/batman-adv/bat_iv_ogm.c +++ b/net/batman-adv/bat_iv_ogm.c @@ -987,9 +987,15 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface) list_for_each_entry_rcu(tmp_hard_iface, &batadv_hardif_list, list) { if (tmp_hard_iface->soft_iface != hard_iface->soft_iface) continue; + + if (!kref_get_unless_zero(&tmp_hard_iface->refcount)) + continue; + batadv_iv_ogm_queue_add(bat_priv, *ogm_buff, *ogm_buff_len, hard_iface, tmp_hard_iface, 1, send_time); + + batadv_hardif_put(tmp_hard_iface); } rcu_read_unlock(); @@ -1767,8 +1773,13 @@ static void batadv_iv_ogm_process(const struct sk_buff *skb, int ogm_offset, if (hard_iface->soft_iface != bat_priv->soft_iface) continue; + if (!kref_get_unless_zero(&hard_iface->refcount)) + continue; + batadv_iv_ogm_process_per_outif(skb, ogm_offset, orig_node, if_incoming, hard_iface); + + batadv_hardif_put(hard_iface); } rcu_read_unlock(); diff --git a/net/batman-adv/bat_v_ogm.c b/net/batman-adv/bat_v_ogm.c index 4155fa57cf6d..473ebb9a0e73 100644 --- a/net/batman-adv/bat_v_ogm.c +++ b/net/batman-adv/bat_v_ogm.c @@ -26,6 +26,7 @@ #include #include #include +#include #include #include #include @@ -176,6 +177,9 @@ static void batadv_v_ogm_send(struct work_struct *work) if (hard_iface->soft_iface != bat_priv->soft_iface) continue; + if (!kref_get_unless_zero(&hard_iface->refcount)) + continue; + batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "Sending own OGM2 packet (originator %pM, seqno %u, throughput %u, TTL %d) on interface %s [%pM]\n", ogm_packet->orig, ntohl(ogm_packet->seqno), @@ -185,10 +189,13 @@ static void batadv_v_ogm_send(struct work_struct *work) /* this skb gets consumed by batadv_v_ogm_send_to_if() */ skb_tmp = skb_clone(skb, GFP_ATOMIC); - if (!skb_tmp) + if (!skb_tmp) { + batadv_hardif_put(hard_iface); break; + } batadv_v_ogm_send_to_if(skb_tmp, hard_iface); + batadv_hardif_put(hard_iface); } rcu_read_unlock(); @@ -704,9 +711,14 @@ static void batadv_v_ogm_process(const struct sk_buff *skb, int ogm_offset, if (hard_iface->soft_iface != bat_priv->soft_iface) continue; + if (!kref_get_unless_zero(&hard_iface->refcount)) + continue; + batadv_v_ogm_process_per_outif(bat_priv, ethhdr, ogm_packet, orig_node, neigh_node, if_incoming, hard_iface); + + batadv_hardif_put(hard_iface); } rcu_read_unlock(); out: diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c index f885a41d06d5..2ed2cc89a669 100644 --- a/net/batman-adv/originator.c +++ b/net/batman-adv/originator.c @@ -1160,6 +1160,9 @@ static bool batadv_purge_orig_node(struct batadv_priv *bat_priv, if (hard_iface->soft_iface != bat_priv->soft_iface) continue; + if (!kref_get_unless_zero(&hard_iface->refcount)) + continue; + best_neigh_node = batadv_find_best_neighbor(bat_priv, orig_node, hard_iface); @@ -1167,6 +1170,8 @@ static bool batadv_purge_orig_node(struct batadv_priv *bat_priv, best_neigh_node); if (best_neigh_node) batadv_neigh_node_put(best_neigh_node); + + batadv_hardif_put(hard_iface); } rcu_read_unlock(); diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c index 99ea9001cf8a..f2f125684ed9 100644 --- a/net/batman-adv/send.c +++ b/net/batman-adv/send.c @@ -26,6 +26,7 @@ #include #include #include +#include #include #include #include @@ -577,10 +578,15 @@ static void batadv_send_outstanding_bcast_packet(struct work_struct *work) if (forw_packet->num_packets >= hard_iface->num_bcasts) continue; + if (!kref_get_unless_zero(&hard_iface->refcount)) + continue; + /* send a copy of the saved skb */ skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC); if (skb1) batadv_send_broadcast_skb(skb1, hard_iface); + + batadv_hardif_put(hard_iface); } rcu_read_unlock(); From 4fe56e60ac1be4d103f64743d0a36fd31a70657c Mon Sep 17 00:00:00 2001 From: Sven Eckelmann Date: Sat, 5 Mar 2016 16:09:17 +0100 Subject: [PATCH 1466/1649] batman-adv: Check hard_iface refcnt when receiving skb The receive function may start processing an incoming packet while the hard_iface is shut down in a different context. All called functions called with the batadv_hard_iface object belonging to the incoming interface would have to check whether the reference counter is still > 0. This is rather error-prone because this check can be forgotten easily. Instead check the reference counter when receiving the object to make sure that all called functions have a valid reference. Signed-off-by: Sven Eckelmann Signed-off-by: Marek Lindner Signed-off-by: Antonio Quartulli --- net/batman-adv/main.c | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c index 78c05a91ae6f..c8d8bc78a518 100644 --- a/net/batman-adv/main.c +++ b/net/batman-adv/main.c @@ -401,11 +401,19 @@ int batadv_batman_skb_recv(struct sk_buff *skb, struct net_device *dev, hard_iface = container_of(ptype, struct batadv_hard_iface, batman_adv_ptype); + + /* Prevent processing a packet received on an interface which is getting + * shut down otherwise the packet may trigger de-reference errors + * further down in the receive path. + */ + if (!kref_get_unless_zero(&hard_iface->refcount)) + goto err_out; + skb = skb_share_check(skb, GFP_ATOMIC); /* skb was released by skb_share_check() */ if (!skb) - goto err_out; + goto err_put; /* packet should hold at least type and version */ if (unlikely(!pskb_may_pull(skb, 2))) @@ -448,6 +456,8 @@ int batadv_batman_skb_recv(struct sk_buff *skb, struct net_device *dev, if (ret == NET_RX_DROP) kfree_skb(skb); + batadv_hardif_put(hard_iface); + /* return NET_RX_SUCCESS in any case as we * most probably dropped the packet for * routing-logical reasons. @@ -456,6 +466,8 @@ int batadv_batman_skb_recv(struct sk_buff *skb, struct net_device *dev, err_free: kfree_skb(skb); +err_put: + batadv_hardif_put(hard_iface); err_out: return NET_RX_DROP; } From d7d6de9530db7e385a05e1ae6cde642a617e6c89 Mon Sep 17 00:00:00 2001 From: Sven Eckelmann Date: Sat, 5 Mar 2016 16:09:18 +0100 Subject: [PATCH 1467/1649] batman-adv: Increase hard_iface refcnt for ptype The hard_iface is referenced in the packet_type for batman-adv. Increase the refcounter of the hard_interface for it to have an explicit reference for it in case this functionality gets refactorted and the currently used implicit reference for it will be removed. Signed-off-by: Sven Eckelmann Signed-off-by: Marek Lindner Signed-off-by: Antonio Quartulli --- net/batman-adv/hard-interface.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c index f0e1899e5b6b..d3d37f3f99cf 100644 --- a/net/batman-adv/hard-interface.c +++ b/net/batman-adv/hard-interface.c @@ -522,6 +522,7 @@ int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface, goto err_upper; } + kref_get(&hard_iface->refcount); hard_iface->batman_adv_ptype.type = ethertype; hard_iface->batman_adv_ptype.func = batadv_batman_skb_recv; hard_iface->batman_adv_ptype.dev = hard_iface->net_dev; @@ -583,6 +584,7 @@ void batadv_hardif_disable_interface(struct batadv_hard_iface *hard_iface, batadv_info(hard_iface->soft_iface, "Removing interface: %s\n", hard_iface->net_dev->name); dev_remove_pack(&hard_iface->batman_adv_ptype); + batadv_hardif_put(hard_iface); bat_priv->num_ifaces--; batadv_orig_hash_del_if(hard_iface, bat_priv->num_ifaces); From c9dad805e9f4fd3978f22c970bae49eaa00b46dd Mon Sep 17 00:00:00 2001 From: Sven Eckelmann Date: Sat, 5 Mar 2016 16:09:20 +0100 Subject: [PATCH 1468/1649] batman-adv: Use kref_get for batadv_tvlv_container_get batadv_tvlv_container_get requires that tvlv.container_list_lock is held by the caller. It is therefore not possible that an item in tvlv.container_list has an reference counter of 0 and is still in the list The kref_get function instead WARNs (with debug information) when the reference counter would still be 0. This makes a bug in batman-adv better visible because kref_get_unless_zero would have ignored this problem. Signed-off-by: Sven Eckelmann Signed-off-by: Marek Lindner Signed-off-by: Antonio Quartulli --- net/batman-adv/main.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c index c8d8bc78a518..5f2974bd1227 100644 --- a/net/batman-adv/main.c +++ b/net/batman-adv/main.c @@ -748,9 +748,7 @@ batadv_tvlv_container_get(struct batadv_priv *bat_priv, u8 type, u8 version) if (tvlv_tmp->tvlv_hdr.version != version) continue; - if (!kref_get_unless_zero(&tvlv_tmp->refcount)) - continue; - + kref_get(&tvlv_tmp->refcount); tvlv = tvlv_tmp; break; } From 0de32ceee156787429035c974316f4e5098cf722 Mon Sep 17 00:00:00 2001 From: Sven Eckelmann Date: Sat, 5 Mar 2016 16:09:21 +0100 Subject: [PATCH 1469/1649] batman-adv: Use kref_get for batadv_nc_get_nc_node batadv_nc_get_nc_node requires that the caller already has a valid reference for orig_neigh_node. It is therefore not possible that it has an reference counter of 0 and was still given to this function The kref_get function instead WARNs (with debug information) when the reference counter would still be 0. This makes a bug in batman-adv better visible because kref_get_unless_zero would have ignored this problem. Signed-off-by: Sven Eckelmann Signed-off-by: Marek Lindner Signed-off-by: Antonio Quartulli --- net/batman-adv/network-coding.c | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c index 1da8e0e1b18f..953dff1ad43b 100644 --- a/net/batman-adv/network-coding.c +++ b/net/batman-adv/network-coding.c @@ -856,8 +856,7 @@ batadv_nc_get_nc_node(struct batadv_priv *bat_priv, if (!nc_node) return NULL; - if (!kref_get_unless_zero(&orig_neigh_node->refcount)) - goto free; + kref_get(&orig_neigh_node->refcount); /* Initialize nc_node */ INIT_LIST_HEAD(&nc_node->list); @@ -884,10 +883,6 @@ batadv_nc_get_nc_node(struct batadv_priv *bat_priv, spin_unlock_bh(lock); return nc_node; - -free: - kfree(nc_node); - return NULL; } /** From a08d497d6718d579e496a801115aecc1c4fbb770 Mon Sep 17 00:00:00 2001 From: Sven Eckelmann Date: Sat, 5 Mar 2016 16:09:22 +0100 Subject: [PATCH 1470/1649] batman-adv: Use kref_get for batadv_gw_select batadv_gw_select requires that the caller already has a valid reference for new_gw_node. It is therefore not possible that it has an reference counter of 0 and was still given to this function The kref_get function instead WARNs (with debug information) when the reference counter would still be 0. This makes a bug in batman-adv better visible because kref_get_unless_zero would have ignored this problem. Signed-off-by: Sven Eckelmann Signed-off-by: Marek Lindner Signed-off-by: Antonio Quartulli --- net/batman-adv/gateway_client.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c index c59aff5ccac8..bb1c4f37716e 100644 --- a/net/batman-adv/gateway_client.c +++ b/net/batman-adv/gateway_client.c @@ -135,8 +135,8 @@ static void batadv_gw_select(struct batadv_priv *bat_priv, spin_lock_bh(&bat_priv->gw.list_lock); - if (new_gw_node && !kref_get_unless_zero(&new_gw_node->refcount)) - new_gw_node = NULL; + if (new_gw_node) + kref_get(&new_gw_node->refcount); curr_gw_node = rcu_dereference_protected(bat_priv->gw.curr_gw, 1); rcu_assign_pointer(bat_priv->gw.curr_gw, new_gw_node); From c3ba37a778ecab4f8ddb117a2ceff3e13184a7db Mon Sep 17 00:00:00 2001 From: Sven Eckelmann Date: Sat, 5 Mar 2016 16:09:23 +0100 Subject: [PATCH 1471/1649] batman-adv: Use kref_get for batadv_gw_node_add batadv_gw_node_add requires that the caller already has a valid reference for orig_node. It is therefore not possible that it has an reference counter of 0 and was still given to this function The kref_get function instead WARNs (with debug information) when the reference counter would still be 0. This makes a bug in batman-adv better visible because kref_get_unless_zero would have ignored this problem. Signed-off-by: Sven Eckelmann Signed-off-by: Marek Lindner Signed-off-by: Antonio Quartulli --- net/batman-adv/gateway_client.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c index bb1c4f37716e..5839c569f769 100644 --- a/net/batman-adv/gateway_client.c +++ b/net/batman-adv/gateway_client.c @@ -440,15 +440,11 @@ static void batadv_gw_node_add(struct batadv_priv *bat_priv, if (gateway->bandwidth_down == 0) return; - if (!kref_get_unless_zero(&orig_node->refcount)) - return; - gw_node = kzalloc(sizeof(*gw_node), GFP_ATOMIC); - if (!gw_node) { - batadv_orig_node_put(orig_node); + if (!gw_node) return; - } + kref_get(&orig_node->refcount); INIT_HLIST_NODE(&gw_node->list); gw_node->orig_node = orig_node; gw_node->bandwidth_down = ntohl(gateway->bandwidth_down); From 17a8691502c9d2d792cfea7253b17382279ffb3e Mon Sep 17 00:00:00 2001 From: Sven Eckelmann Date: Mon, 11 Apr 2016 13:06:40 +0200 Subject: [PATCH 1472/1649] batman-adv: Use kref_get for hard_iface subfunctions The callers of the functions using batadv_hard_iface objects already make sure that they hold a valid reference. The subfunctions don't have to check whether the reference counter is > 0 because this was checked by the callers. The kref_get function instead WARNs (with debug information) when the reference counter would still be 0. This makes a bug in batman-adv better visible because kref_get_unless_zero would have ignored this problem. Signed-off-by: Sven Eckelmann Signed-off-by: Marek Lindner Signed-off-by: Antonio Quartulli --- net/batman-adv/bat_iv_ogm.c | 14 +++----------- net/batman-adv/hard-interface.c | 7 +++---- net/batman-adv/originator.c | 30 +++++++----------------------- 3 files changed, 13 insertions(+), 38 deletions(-) diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c index 57e9962c7090..eb3435de54b5 100644 --- a/net/batman-adv/bat_iv_ogm.c +++ b/net/batman-adv/bat_iv_ogm.c @@ -681,18 +681,12 @@ static void batadv_iv_ogm_aggregate_new(const unsigned char *packet_buff, unsigned char *skb_buff; unsigned int skb_size; - if (!kref_get_unless_zero(&if_incoming->refcount)) - return; - - if (!kref_get_unless_zero(&if_outgoing->refcount)) - goto out_free_incoming; - /* own packet should always be scheduled */ if (!own_packet) { if (!batadv_atomic_dec_not_zero(&bat_priv->batman_queue_left)) { batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "batman packet queue full\n"); - goto out_free_outgoing; + return; } } @@ -718,6 +712,8 @@ static void batadv_iv_ogm_aggregate_new(const unsigned char *packet_buff, forw_packet_aggr->packet_len = packet_len; memcpy(skb_buff, packet_buff, packet_len); + kref_get(&if_incoming->refcount); + kref_get(&if_outgoing->refcount); forw_packet_aggr->own = own_packet; forw_packet_aggr->if_incoming = if_incoming; forw_packet_aggr->if_outgoing = if_outgoing; @@ -747,10 +743,6 @@ out_free_forw_packet: out_nomem: if (!own_packet) atomic_inc(&bat_priv->batman_queue_left); -out_free_outgoing: - batadv_hardif_put(if_outgoing); -out_free_incoming: - batadv_hardif_put(if_incoming); } /* aggregate a new packet into the existing ogm packet */ diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c index d3d37f3f99cf..7c1d8d7ac548 100644 --- a/net/batman-adv/hard-interface.c +++ b/net/batman-adv/hard-interface.c @@ -236,8 +236,8 @@ static void batadv_primary_if_select(struct batadv_priv *bat_priv, ASSERT_RTNL(); - if (new_hard_iface && !kref_get_unless_zero(&new_hard_iface->refcount)) - new_hard_iface = NULL; + if (new_hard_iface) + kref_get(&new_hard_iface->refcount); curr_hard_iface = rcu_dereference_protected(bat_priv->primary_if, 1); rcu_assign_pointer(bat_priv->primary_if, new_hard_iface); @@ -467,8 +467,7 @@ int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface, if (hard_iface->if_status != BATADV_IF_NOT_IN_USE) goto out; - if (!kref_get_unless_zero(&hard_iface->refcount)) - goto out; + kref_get(&hard_iface->refcount); soft_iface = dev_get_by_name(net, iface_name); diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c index 2ed2cc89a669..04fa139911c3 100644 --- a/net/batman-adv/originator.c +++ b/net/batman-adv/originator.c @@ -374,12 +374,8 @@ batadv_orig_ifinfo_new(struct batadv_orig_node *orig_node, if (!orig_ifinfo) goto out; - if (if_outgoing != BATADV_IF_DEFAULT && - !kref_get_unless_zero(&if_outgoing->refcount)) { - kfree(orig_ifinfo); - orig_ifinfo = NULL; - goto out; - } + if (if_outgoing != BATADV_IF_DEFAULT) + kref_get(&if_outgoing->refcount); reset_time = jiffies - 1; reset_time -= msecs_to_jiffies(BATADV_RESET_PROTECTION_MS); @@ -455,11 +451,8 @@ batadv_neigh_ifinfo_new(struct batadv_neigh_node *neigh, if (!neigh_ifinfo) goto out; - if (if_outgoing && !kref_get_unless_zero(&if_outgoing->refcount)) { - kfree(neigh_ifinfo); - neigh_ifinfo = NULL; - goto out; - } + if (if_outgoing) + kref_get(&if_outgoing->refcount); INIT_HLIST_NODE(&neigh_ifinfo->list); kref_init(&neigh_ifinfo->refcount); @@ -532,15 +525,11 @@ batadv_hardif_neigh_create(struct batadv_hard_iface *hard_iface, if (hardif_neigh) goto out; - if (!kref_get_unless_zero(&hard_iface->refcount)) - goto out; - hardif_neigh = kzalloc(sizeof(*hardif_neigh), GFP_ATOMIC); - if (!hardif_neigh) { - batadv_hardif_put(hard_iface); + if (!hardif_neigh) goto out; - } + kref_get(&hard_iface->refcount); INIT_HLIST_NODE(&hardif_neigh->list); ether_addr_copy(hardif_neigh->addr, neigh_addr); hardif_neigh->if_incoming = hard_iface; @@ -643,16 +632,11 @@ batadv_neigh_node_new(struct batadv_orig_node *orig_node, if (!neigh_node) goto out; - if (!kref_get_unless_zero(&hard_iface->refcount)) { - kfree(neigh_node); - neigh_node = NULL; - goto out; - } - INIT_HLIST_NODE(&neigh_node->list); INIT_HLIST_HEAD(&neigh_node->ifinfo_list); spin_lock_init(&neigh_node->ifinfo_lock); + kref_get(&hard_iface->refcount); ether_addr_copy(neigh_node->addr, neigh_addr); neigh_node->if_incoming = hard_iface; neigh_node->orig_node = orig_node; From f0b94ebccd2b924237ca7a101da3db70c3a8f0f2 Mon Sep 17 00:00:00 2001 From: Sven Eckelmann Date: Sat, 5 Mar 2016 19:05:24 +0100 Subject: [PATCH 1473/1649] batman-adv: Use kref_get for _batadv_update_route _batadv_update_route requires that the caller already has a valid reference for neigh_node. It is therefore not possible that it has an reference counter of 0 and was still given to this function The kref_get function instead WARNs (with debug information) when the reference counter would still be 0. This makes a bug in batman-adv better visible because kref_get_unless_zero would have ignored this problem. Signed-off-by: Sven Eckelmann Signed-off-by: Marek Lindner Signed-off-by: Antonio Quartulli --- net/batman-adv/routing.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c index 2ecfca246be4..b494e435686f 100644 --- a/net/batman-adv/routing.c +++ b/net/batman-adv/routing.c @@ -100,10 +100,6 @@ static void _batadv_update_route(struct batadv_priv *bat_priv, if (curr_router) batadv_neigh_node_put(curr_router); - /* increase refcount of new best neighbor */ - if (neigh_node && !kref_get_unless_zero(&neigh_node->refcount)) - neigh_node = NULL; - spin_lock_bh(&orig_node->neigh_list_lock); /* curr_router used earlier may not be the current orig_ifinfo->router * anymore because it was dereferenced outside of the neigh_list_lock @@ -114,6 +110,10 @@ static void _batadv_update_route(struct batadv_priv *bat_priv, */ curr_router = rcu_dereference_protected(orig_ifinfo->router, true); + /* increase refcount of new best neighbor */ + if (neigh_node) + kref_get(&neigh_node->refcount); + rcu_assign_pointer(orig_ifinfo->router, neigh_node); spin_unlock_bh(&orig_node->neigh_list_lock); batadv_orig_ifinfo_put(orig_ifinfo); From 4b426b108ac82b27f5af40df7da05a2501fd2aca Mon Sep 17 00:00:00 2001 From: Sven Eckelmann Date: Mon, 22 Feb 2016 21:02:39 +0100 Subject: [PATCH 1474/1649] batman-adv: Use bool as return type for boolean functions It is easier to understand that the returned value of a specific function doesn't have to be 0 when the functions was successful when the actual return type is bool. This is especially true when all surrounding functions with return type int use negative values to return the error code. Reported-by: Nicholas Krause Signed-off-by: Sven Eckelmann Signed-off-by: Marek Lindner Signed-off-by: Antonio Quartulli --- net/batman-adv/bat_iv_ogm.c | 23 ++-- net/batman-adv/bitarray.c | 16 +-- net/batman-adv/bitarray.h | 15 ++- net/batman-adv/bridge_loop_avoidance.c | 175 +++++++++++++------------ net/batman-adv/bridge_loop_avoidance.h | 43 +++--- net/batman-adv/debugfs.c | 2 +- net/batman-adv/distributed-arp-table.c | 6 +- net/batman-adv/hard-interface.c | 15 +-- net/batman-adv/hash.h | 6 +- net/batman-adv/main.h | 2 +- net/batman-adv/network-coding.c | 12 +- net/batman-adv/originator.c | 4 +- net/batman-adv/originator.h | 2 +- net/batman-adv/routing.c | 37 +++--- net/batman-adv/routing.h | 6 +- net/batman-adv/soft-interface.c | 6 +- net/batman-adv/soft-interface.h | 3 +- net/batman-adv/translation-table.c | 31 ++--- 18 files changed, 205 insertions(+), 199 deletions(-) diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c index eb3435de54b5..7f98a9d39883 100644 --- a/net/batman-adv/bat_iv_ogm.c +++ b/net/batman-adv/bat_iv_ogm.c @@ -1168,13 +1168,13 @@ out: * @if_incoming: interface where the packet was received * @if_outgoing: interface for which the retransmission should be considered * - * Return: 1 if the link can be considered bidirectional, 0 otherwise + * Return: true if the link can be considered bidirectional, false otherwise */ -static int batadv_iv_ogm_calc_tq(struct batadv_orig_node *orig_node, - struct batadv_orig_node *orig_neigh_node, - struct batadv_ogm_packet *batadv_ogm_packet, - struct batadv_hard_iface *if_incoming, - struct batadv_hard_iface *if_outgoing) +static bool batadv_iv_ogm_calc_tq(struct batadv_orig_node *orig_node, + struct batadv_orig_node *orig_neigh_node, + struct batadv_ogm_packet *batadv_ogm_packet, + struct batadv_hard_iface *if_incoming, + struct batadv_hard_iface *if_outgoing) { struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface); struct batadv_neigh_node *neigh_node = NULL, *tmp_neigh_node; @@ -1182,9 +1182,10 @@ static int batadv_iv_ogm_calc_tq(struct batadv_orig_node *orig_node, u8 total_count; u8 orig_eq_count, neigh_rq_count, neigh_rq_inv, tq_own; unsigned int neigh_rq_inv_cube, neigh_rq_max_cube; - int tq_asym_penalty, inv_asym_penalty, if_num, ret = 0; + int tq_asym_penalty, inv_asym_penalty, if_num; unsigned int combined_tq; int tq_iface_penalty; + bool ret = false; /* find corresponding one hop neighbor */ rcu_read_lock(); @@ -1296,7 +1297,7 @@ static int batadv_iv_ogm_calc_tq(struct batadv_orig_node *orig_node, * consider it bidirectional */ if (batadv_ogm_packet->tq >= BATADV_TQ_TOTAL_BIDRECT_LIMIT) - ret = 1; + ret = true; out: if (neigh_node) @@ -1325,9 +1326,9 @@ batadv_iv_ogm_update_seqnos(const struct ethhdr *ethhdr, struct batadv_orig_ifinfo *orig_ifinfo = NULL; struct batadv_neigh_node *neigh_node; struct batadv_neigh_ifinfo *neigh_ifinfo; - int is_dup; + bool is_dup; s32 seq_diff; - int need_update = 0; + bool need_update = false; int set_mark; enum batadv_dup_status ret = BATADV_NO_DUP; u32 seqno = ntohl(batadv_ogm_packet->seqno); @@ -1437,7 +1438,7 @@ batadv_iv_ogm_process_per_outif(const struct sk_buff *skb, int ogm_offset, struct sk_buff *skb_priv; struct ethhdr *ethhdr; u8 *prev_sender; - int is_bidirect; + bool is_bidirect; /* create a private copy of the skb, as some functions change tq value * and/or flags. diff --git a/net/batman-adv/bitarray.c b/net/batman-adv/bitarray.c index b56bb000a0ab..a0c7913837a5 100644 --- a/net/batman-adv/bitarray.c +++ b/net/batman-adv/bitarray.c @@ -38,11 +38,11 @@ static void batadv_bitmap_shift_left(unsigned long *seq_bits, s32 n) * the last sequence number * @set_mark: whether this packet should be marked in seq_bits * - * Return: 1 if the window was moved (either new or very old), - * 0 if the window was not moved/shifted. + * Return: true if the window was moved (either new or very old), + * false if the window was not moved/shifted. */ -int batadv_bit_get_packet(void *priv, unsigned long *seq_bits, s32 seq_num_diff, - int set_mark) +bool batadv_bit_get_packet(void *priv, unsigned long *seq_bits, + s32 seq_num_diff, int set_mark) { struct batadv_priv *bat_priv = priv; @@ -52,7 +52,7 @@ int batadv_bit_get_packet(void *priv, unsigned long *seq_bits, s32 seq_num_diff, if (seq_num_diff <= 0 && seq_num_diff > -BATADV_TQ_LOCAL_WINDOW_SIZE) { if (set_mark) batadv_set_bit(seq_bits, -seq_num_diff); - return 0; + return false; } /* sequence number is slightly newer, so we shift the window and @@ -63,7 +63,7 @@ int batadv_bit_get_packet(void *priv, unsigned long *seq_bits, s32 seq_num_diff, if (set_mark) batadv_set_bit(seq_bits, 0); - return 1; + return true; } /* sequence number is much newer, probably missed a lot of packets */ @@ -75,7 +75,7 @@ int batadv_bit_get_packet(void *priv, unsigned long *seq_bits, s32 seq_num_diff, bitmap_zero(seq_bits, BATADV_TQ_LOCAL_WINDOW_SIZE); if (set_mark) batadv_set_bit(seq_bits, 0); - return 1; + return true; } /* received a much older packet. The other host either restarted @@ -94,5 +94,5 @@ int batadv_bit_get_packet(void *priv, unsigned long *seq_bits, s32 seq_num_diff, if (set_mark) batadv_set_bit(seq_bits, 0); - return 1; + return true; } diff --git a/net/batman-adv/bitarray.h b/net/batman-adv/bitarray.h index 3e41bb80eb81..0e6e9d09078c 100644 --- a/net/batman-adv/bitarray.h +++ b/net/batman-adv/bitarray.h @@ -22,6 +22,7 @@ #include #include +#include #include /** @@ -31,17 +32,17 @@ * @last_seqno: latest sequence number in seq_bits * @curr_seqno: sequence number to test for * - * Return: 1 if the corresponding bit in the given seq_bits indicates true - * and curr_seqno is within range of last_seqno. Otherwise returns 0. + * Return: true if the corresponding bit in the given seq_bits indicates true + * and curr_seqno is within range of last_seqno. Otherwise returns false. */ -static inline int batadv_test_bit(const unsigned long *seq_bits, - u32 last_seqno, u32 curr_seqno) +static inline bool batadv_test_bit(const unsigned long *seq_bits, + u32 last_seqno, u32 curr_seqno) { s32 diff; diff = last_seqno - curr_seqno; if (diff < 0 || diff >= BATADV_TQ_LOCAL_WINDOW_SIZE) - return 0; + return false; return test_bit(diff, seq_bits) != 0; } @@ -55,7 +56,7 @@ static inline void batadv_set_bit(unsigned long *seq_bits, s32 n) set_bit(n, seq_bits); /* turn the position on */ } -int batadv_bit_get_packet(void *priv, unsigned long *seq_bits, s32 seq_num_diff, - int set_mark); +bool batadv_bit_get_packet(void *priv, unsigned long *seq_bits, + s32 seq_num_diff, int set_mark); #endif /* _NET_BATMAN_ADV_BITARRAY_H_ */ diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c index 5064ae5e9b34..748a9ead7ce5 100644 --- a/net/batman-adv/bridge_loop_avoidance.c +++ b/net/batman-adv/bridge_loop_avoidance.c @@ -101,10 +101,10 @@ static inline u32 batadv_choose_backbone_gw(const void *data, u32 size) * @node: list node of the first entry to compare * @data2: pointer to the second backbone gateway * - * Return: 1 if the backbones have the same data, 0 otherwise + * Return: true if the backbones have the same data, false otherwise */ -static int batadv_compare_backbone_gw(const struct hlist_node *node, - const void *data2) +static bool batadv_compare_backbone_gw(const struct hlist_node *node, + const void *data2) { const void *data1 = container_of(node, struct batadv_bla_backbone_gw, hash_entry); @@ -112,12 +112,12 @@ static int batadv_compare_backbone_gw(const struct hlist_node *node, const struct batadv_bla_backbone_gw *gw2 = data2; if (!batadv_compare_eth(gw1->orig, gw2->orig)) - return 0; + return false; if (gw1->vid != gw2->vid) - return 0; + return false; - return 1; + return true; } /** @@ -125,10 +125,10 @@ static int batadv_compare_backbone_gw(const struct hlist_node *node, * @node: list node of the first entry to compare * @data2: pointer to the second claims * - * Return: 1 if the claim have the same data, 0 otherwise + * Return: true if the claim have the same data, 0 otherwise */ -static int batadv_compare_claim(const struct hlist_node *node, - const void *data2) +static bool batadv_compare_claim(const struct hlist_node *node, + const void *data2) { const void *data1 = container_of(node, struct batadv_bla_claim, hash_entry); @@ -136,12 +136,12 @@ static int batadv_compare_claim(const struct hlist_node *node, const struct batadv_bla_claim *cl2 = data2; if (!batadv_compare_eth(cl1->addr, cl2->addr)) - return 0; + return false; if (cl1->vid != cl2->vid) - return 0; + return false; - return 1; + return true; } /** @@ -775,22 +775,22 @@ static void batadv_bla_del_claim(struct batadv_priv *bat_priv, * @backbone_addr: originator address of the sender (Ethernet source MAC) * @vid: the VLAN ID of the frame * - * Return: 1 if handled + * Return: true if handled */ -static int batadv_handle_announce(struct batadv_priv *bat_priv, u8 *an_addr, - u8 *backbone_addr, unsigned short vid) +static bool batadv_handle_announce(struct batadv_priv *bat_priv, u8 *an_addr, + u8 *backbone_addr, unsigned short vid) { struct batadv_bla_backbone_gw *backbone_gw; u16 backbone_crc, crc; if (memcmp(an_addr, batadv_announce_mac, 4) != 0) - return 0; + return false; backbone_gw = batadv_bla_get_backbone_gw(bat_priv, backbone_addr, vid, false); if (unlikely(!backbone_gw)) - return 1; + return true; /* handle as ANNOUNCE frame */ backbone_gw->lasttime = jiffies; @@ -823,7 +823,7 @@ static int batadv_handle_announce(struct batadv_priv *bat_priv, u8 *an_addr, } batadv_backbone_gw_put(backbone_gw); - return 1; + return true; } /** @@ -834,29 +834,29 @@ static int batadv_handle_announce(struct batadv_priv *bat_priv, u8 *an_addr, * @ethhdr: ethernet header of a packet * @vid: the VLAN ID of the frame * - * Return: 1 if handled + * Return: true if handled */ -static int batadv_handle_request(struct batadv_priv *bat_priv, - struct batadv_hard_iface *primary_if, - u8 *backbone_addr, struct ethhdr *ethhdr, - unsigned short vid) +static bool batadv_handle_request(struct batadv_priv *bat_priv, + struct batadv_hard_iface *primary_if, + u8 *backbone_addr, struct ethhdr *ethhdr, + unsigned short vid) { /* check for REQUEST frame */ if (!batadv_compare_eth(backbone_addr, ethhdr->h_dest)) - return 0; + return false; /* sanity check, this should not happen on a normal switch, * we ignore it in this case. */ if (!batadv_compare_eth(ethhdr->h_dest, primary_if->net_dev->dev_addr)) - return 1; + return true; batadv_dbg(BATADV_DBG_BLA, bat_priv, "handle_request(): REQUEST vid %d (sent by %pM)...\n", BATADV_PRINT_VID(vid), ethhdr->h_source); batadv_bla_answer_request(bat_priv, primary_if, vid); - return 1; + return true; } /** @@ -867,12 +867,12 @@ static int batadv_handle_request(struct batadv_priv *bat_priv, * @claim_addr: Client to be unclaimed (ARP sender HW MAC) * @vid: the VLAN ID of the frame * - * Return: 1 if handled + * Return: true if handled */ -static int batadv_handle_unclaim(struct batadv_priv *bat_priv, - struct batadv_hard_iface *primary_if, - u8 *backbone_addr, u8 *claim_addr, - unsigned short vid) +static bool batadv_handle_unclaim(struct batadv_priv *bat_priv, + struct batadv_hard_iface *primary_if, + u8 *backbone_addr, u8 *claim_addr, + unsigned short vid) { struct batadv_bla_backbone_gw *backbone_gw; @@ -885,7 +885,7 @@ static int batadv_handle_unclaim(struct batadv_priv *bat_priv, backbone_gw = batadv_backbone_hash_find(bat_priv, backbone_addr, vid); if (!backbone_gw) - return 1; + return true; /* this must be an UNCLAIM frame */ batadv_dbg(BATADV_DBG_BLA, bat_priv, @@ -894,7 +894,7 @@ static int batadv_handle_unclaim(struct batadv_priv *bat_priv, batadv_bla_del_claim(bat_priv, claim_addr, vid); batadv_backbone_gw_put(backbone_gw); - return 1; + return true; } /** @@ -905,12 +905,12 @@ static int batadv_handle_unclaim(struct batadv_priv *bat_priv, * @claim_addr: client mac address to be claimed (ARP sender HW MAC) * @vid: the VLAN ID of the frame * - * Return: 1 if handled + * Return: true if handled */ -static int batadv_handle_claim(struct batadv_priv *bat_priv, - struct batadv_hard_iface *primary_if, - u8 *backbone_addr, u8 *claim_addr, - unsigned short vid) +static bool batadv_handle_claim(struct batadv_priv *bat_priv, + struct batadv_hard_iface *primary_if, + u8 *backbone_addr, u8 *claim_addr, + unsigned short vid) { struct batadv_bla_backbone_gw *backbone_gw; @@ -920,7 +920,7 @@ static int batadv_handle_claim(struct batadv_priv *bat_priv, false); if (unlikely(!backbone_gw)) - return 1; + return true; /* this must be a CLAIM frame */ batadv_bla_add_claim(bat_priv, claim_addr, vid, backbone_gw); @@ -931,7 +931,7 @@ static int batadv_handle_claim(struct batadv_priv *bat_priv, /* TODO: we could call something like tt_local_del() here. */ batadv_backbone_gw_put(backbone_gw); - return 1; + return true; } /** @@ -1015,12 +1015,12 @@ static int batadv_check_claim_group(struct batadv_priv *bat_priv, * @primary_if: the primary hard interface of this batman soft interface * @skb: the frame to be checked * - * Return: 1 if it was a claim frame, otherwise return 0 to + * Return: true if it was a claim frame, otherwise return false to * tell the callee that it can use the frame on its own. */ -static int batadv_bla_process_claim(struct batadv_priv *bat_priv, - struct batadv_hard_iface *primary_if, - struct sk_buff *skb) +static bool batadv_bla_process_claim(struct batadv_priv *bat_priv, + struct batadv_hard_iface *primary_if, + struct sk_buff *skb) { struct batadv_bla_claim_dst *bla_dst, *bla_dst_own; u8 *hw_src, *hw_dst; @@ -1051,7 +1051,7 @@ static int batadv_bla_process_claim(struct batadv_priv *bat_priv, vhdr = skb_header_pointer(skb, headlen, VLAN_HLEN, &vhdr_buf); if (!vhdr) - return 0; + return false; proto = vhdr->h_vlan_encapsulated_proto; headlen += VLAN_HLEN; @@ -1060,12 +1060,12 @@ static int batadv_bla_process_claim(struct batadv_priv *bat_priv, } if (proto != htons(ETH_P_ARP)) - return 0; /* not a claim frame */ + return false; /* not a claim frame */ /* this must be a ARP frame. check if it is a claim. */ if (unlikely(!pskb_may_pull(skb, headlen + arp_hdr_len(skb->dev)))) - return 0; + return false; /* pskb_may_pull() may have modified the pointers, get ethhdr again */ ethhdr = eth_hdr(skb); @@ -1075,13 +1075,13 @@ static int batadv_bla_process_claim(struct batadv_priv *bat_priv, * IP information */ if (arphdr->ar_hrd != htons(ARPHRD_ETHER)) - return 0; + return false; if (arphdr->ar_pro != htons(ETH_P_IP)) - return 0; + return false; if (arphdr->ar_hln != ETH_ALEN) - return 0; + return false; if (arphdr->ar_pln != 4) - return 0; + return false; hw_src = (u8 *)arphdr + sizeof(struct arphdr); hw_dst = hw_src + ETH_ALEN + 4; @@ -1091,14 +1091,14 @@ static int batadv_bla_process_claim(struct batadv_priv *bat_priv, /* check if it is a claim frame in general */ if (memcmp(bla_dst->magic, bla_dst_own->magic, sizeof(bla_dst->magic)) != 0) - return 0; + return false; /* check if there is a claim frame encapsulated deeper in (QinQ) and * drop that, as this is not supported by BLA but should also not be * sent via the mesh. */ if (vlan_depth > 1) - return 1; + return true; /* Let the loopdetect frames on the mesh in any case. */ if (bla_dst->type == BATADV_CLAIM_TYPE_LOOPDETECT) @@ -1114,7 +1114,7 @@ static int batadv_bla_process_claim(struct batadv_priv *bat_priv, hw_dst); if (ret < 2) - return ret; + return !!ret; /* become a backbone gw ourselves on this vlan if not happened yet */ batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid); @@ -1124,30 +1124,30 @@ static int batadv_bla_process_claim(struct batadv_priv *bat_priv, case BATADV_CLAIM_TYPE_CLAIM: if (batadv_handle_claim(bat_priv, primary_if, hw_src, ethhdr->h_source, vid)) - return 1; + return true; break; case BATADV_CLAIM_TYPE_UNCLAIM: if (batadv_handle_unclaim(bat_priv, primary_if, ethhdr->h_source, hw_src, vid)) - return 1; + return true; break; case BATADV_CLAIM_TYPE_ANNOUNCE: if (batadv_handle_announce(bat_priv, hw_src, ethhdr->h_source, vid)) - return 1; + return true; break; case BATADV_CLAIM_TYPE_REQUEST: if (batadv_handle_request(bat_priv, primary_if, hw_src, ethhdr, vid)) - return 1; + return true; break; } batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla_process_claim(): ERROR - this looks like a claim frame, but is useless. eth src %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n", ethhdr->h_source, BATADV_PRINT_VID(vid), hw_src, hw_dst); - return 1; + return true; } /** @@ -1529,15 +1529,16 @@ int batadv_bla_init(struct batadv_priv *bat_priv) * sent by another host, drop it. We allow equal packets from * the same host however as this might be intended. * - * Return: 1 if a packet is in the duplicate list, 0 otherwise. + * Return: true if a packet is in the duplicate list, false otherwise. */ -int batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv, - struct sk_buff *skb) +bool batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv, + struct sk_buff *skb) { - int i, curr, ret = 0; + int i, curr; __be32 crc; struct batadv_bcast_packet *bcast_packet; struct batadv_bcast_duplist_entry *entry; + bool ret = false; bcast_packet = (struct batadv_bcast_packet *)skb->data; @@ -1565,9 +1566,9 @@ int batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv, continue; /* this entry seems to match: same crc, not too old, - * and from another gw. therefore return 1 to forbid it. + * and from another gw. therefore return true to forbid it. */ - ret = 1; + ret = true; goto out; } /* not found, add a new entry (overwrite the oldest entry) @@ -1633,21 +1634,21 @@ bool batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, u8 *orig, * @orig_node: the orig_node of the frame * @hdr_size: maximum length of the frame * - * Return: 1 if the orig_node is also a gateway on the soft interface, otherwise - * it returns 0. + * Return: true if the orig_node is also a gateway on the soft interface, + * otherwise it returns false. */ -int batadv_bla_is_backbone_gw(struct sk_buff *skb, - struct batadv_orig_node *orig_node, int hdr_size) +bool batadv_bla_is_backbone_gw(struct sk_buff *skb, + struct batadv_orig_node *orig_node, int hdr_size) { struct batadv_bla_backbone_gw *backbone_gw; unsigned short vid; if (!atomic_read(&orig_node->bat_priv->bridge_loop_avoidance)) - return 0; + return false; /* first, find out the vid. */ if (!pskb_may_pull(skb, hdr_size + ETH_HLEN)) - return 0; + return false; vid = batadv_get_vid(skb, hdr_size); @@ -1655,10 +1656,10 @@ int batadv_bla_is_backbone_gw(struct sk_buff *skb, backbone_gw = batadv_backbone_hash_find(orig_node->bat_priv, orig_node->orig, vid); if (!backbone_gw) - return 0; + return false; batadv_backbone_gw_put(backbone_gw); - return 1; + return true; } /** @@ -1750,16 +1751,16 @@ batadv_bla_loopdetect_check(struct batadv_priv *bat_priv, struct sk_buff *skb, * * in these cases, the skb is further handled by this function * - * Return: 1 if handled, otherwise it returns 0 and the caller shall further - * process the skb. + * Return: true if handled, otherwise it returns false and the caller shall + * further process the skb. */ -int batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, - unsigned short vid, bool is_bcast) +bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, + unsigned short vid, bool is_bcast) { struct ethhdr *ethhdr; struct batadv_bla_claim search_claim, *claim = NULL; struct batadv_hard_iface *primary_if; - int ret; + bool ret; ethhdr = eth_hdr(skb); @@ -1821,12 +1822,12 @@ int batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, } allow: batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid); - ret = 0; + ret = false; goto out; handled: kfree_skb(skb); - ret = 1; + ret = true; out: if (primary_if) @@ -1850,16 +1851,16 @@ out: * * This call might reallocate skb data. * - * Return: 1 if handled, otherwise it returns 0 and the caller shall further - * process the skb. + * Return: true if handled, otherwise it returns false and the caller shall + * further process the skb. */ -int batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb, - unsigned short vid) +bool batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb, + unsigned short vid) { struct ethhdr *ethhdr; struct batadv_bla_claim search_claim, *claim = NULL; struct batadv_hard_iface *primary_if; - int ret = 0; + bool ret = false; primary_if = batadv_primary_if_get_selected(bat_priv); if (!primary_if) @@ -1913,10 +1914,10 @@ int batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb, } allow: batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid); - ret = 0; + ret = false; goto out; handled: - ret = 1; + ret = true; out: if (primary_if) batadv_hardif_put(primary_if); diff --git a/net/batman-adv/bridge_loop_avoidance.h b/net/batman-adv/bridge_loop_avoidance.h index 579f0fa6fe6a..0f01daeb359e 100644 --- a/net/batman-adv/bridge_loop_avoidance.h +++ b/net/batman-adv/bridge_loop_avoidance.h @@ -27,19 +27,20 @@ struct seq_file; struct sk_buff; #ifdef CONFIG_BATMAN_ADV_BLA -int batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, - unsigned short vid, bool is_bcast); -int batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb, - unsigned short vid); -int batadv_bla_is_backbone_gw(struct sk_buff *skb, - struct batadv_orig_node *orig_node, int hdr_size); +bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, + unsigned short vid, bool is_bcast); +bool batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb, + unsigned short vid); +bool batadv_bla_is_backbone_gw(struct sk_buff *skb, + struct batadv_orig_node *orig_node, + int hdr_size); int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset); int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq, void *offset); bool batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, u8 *orig, unsigned short vid); -int batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv, - struct sk_buff *skb); +bool batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv, + struct sk_buff *skb); void batadv_bla_update_orig_address(struct batadv_priv *bat_priv, struct batadv_hard_iface *primary_if, struct batadv_hard_iface *oldif); @@ -50,24 +51,24 @@ void batadv_bla_free(struct batadv_priv *bat_priv); #define BATADV_BLA_CRC_INIT 0 #else /* ifdef CONFIG_BATMAN_ADV_BLA */ -static inline int batadv_bla_rx(struct batadv_priv *bat_priv, - struct sk_buff *skb, unsigned short vid, - bool is_bcast) +static inline bool batadv_bla_rx(struct batadv_priv *bat_priv, + struct sk_buff *skb, unsigned short vid, + bool is_bcast) { - return 0; + return false; } -static inline int batadv_bla_tx(struct batadv_priv *bat_priv, - struct sk_buff *skb, unsigned short vid) +static inline bool batadv_bla_tx(struct batadv_priv *bat_priv, + struct sk_buff *skb, unsigned short vid) { - return 0; + return false; } -static inline int batadv_bla_is_backbone_gw(struct sk_buff *skb, - struct batadv_orig_node *orig_node, - int hdr_size) +static inline bool batadv_bla_is_backbone_gw(struct sk_buff *skb, + struct batadv_orig_node *orig_node, + int hdr_size) { - return 0; + return false; } static inline int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, @@ -88,11 +89,11 @@ static inline bool batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, return false; } -static inline int +static inline bool batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv, struct sk_buff *skb) { - return 0; + return false; } static inline void diff --git a/net/batman-adv/debugfs.c b/net/batman-adv/debugfs.c index aa315da83429..952900466d88 100644 --- a/net/batman-adv/debugfs.c +++ b/net/batman-adv/debugfs.c @@ -134,7 +134,7 @@ static int batadv_log_release(struct inode *inode, struct file *file) return 0; } -static int batadv_log_empty(struct batadv_priv_debug_log *debug_log) +static bool batadv_log_empty(struct batadv_priv_debug_log *debug_log) { return !(debug_log->log_start - debug_log->log_end); } diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c index 67f44f5d630b..278800a99c69 100644 --- a/net/batman-adv/distributed-arp-table.c +++ b/net/batman-adv/distributed-arp-table.c @@ -165,14 +165,14 @@ static void batadv_dat_purge(struct work_struct *work) * @node: node in the local table * @data2: second object to compare the node to * - * Return: 1 if the two entries are the same, 0 otherwise. + * Return: true if the two entries are the same, false otherwise. */ -static int batadv_compare_dat(const struct hlist_node *node, const void *data2) +static bool batadv_compare_dat(const struct hlist_node *node, const void *data2) { const void *data1 = container_of(node, struct batadv_dat_entry, hash_entry); - return memcmp(data1, data2, sizeof(__be32)) == 0 ? 1 : 0; + return memcmp(data1, data2, sizeof(__be32)) == 0; } /** diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c index 7c1d8d7ac548..8c2f39962fa5 100644 --- a/net/batman-adv/hard-interface.c +++ b/net/batman-adv/hard-interface.c @@ -146,22 +146,22 @@ static bool batadv_is_on_batman_iface(const struct net_device *net_dev) return ret; } -static int batadv_is_valid_iface(const struct net_device *net_dev) +static bool batadv_is_valid_iface(const struct net_device *net_dev) { if (net_dev->flags & IFF_LOOPBACK) - return 0; + return false; if (net_dev->type != ARPHRD_ETHER) - return 0; + return false; if (net_dev->addr_len != ETH_ALEN) - return 0; + return false; /* no batman over batman */ if (batadv_is_on_batman_iface(net_dev)) - return 0; + return false; - return 1; + return true; } /** @@ -653,8 +653,7 @@ batadv_hardif_add_interface(struct net_device *net_dev) ASSERT_RTNL(); - ret = batadv_is_valid_iface(net_dev); - if (ret != 1) + if (!batadv_is_valid_iface(net_dev)) goto out; dev_hold(net_dev); diff --git a/net/batman-adv/hash.h b/net/batman-adv/hash.h index 9bb57b87447c..cbbf87075f06 100644 --- a/net/batman-adv/hash.h +++ b/net/batman-adv/hash.h @@ -32,10 +32,10 @@ struct lock_class_key; /* callback to a compare function. should compare 2 element datas for their * keys * - * Return: 0 if same and not 0 if not same + * Return: true if same and false if not same */ -typedef int (*batadv_hashdata_compare_cb)(const struct hlist_node *, - const void *); +typedef bool (*batadv_hashdata_compare_cb)(const struct hlist_node *, + const void *); /* the hashfunction * diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h index 090c6f0f2398..76925266deed 100644 --- a/net/batman-adv/main.h +++ b/net/batman-adv/main.h @@ -292,7 +292,7 @@ static inline void _batadv_dbg(int type __always_unused, * * note: can't use ether_addr_equal() as it requires aligned memory * - * Return: 1 if they are the same ethernet addr + * Return: true if they are the same ethernet addr */ static inline bool batadv_compare_eth(const void *data1, const void *data2) { diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c index 953dff1ad43b..df5ae9c7e507 100644 --- a/net/batman-adv/network-coding.c +++ b/net/batman-adv/network-coding.c @@ -510,10 +510,10 @@ static u32 batadv_nc_hash_choose(const void *data, u32 size) * @node: node in the local table * @data2: second object to compare the node to * - * Return: 1 if the two entry are the same, 0 otherwise + * Return: true if the two entry are the same, false otherwise */ -static int batadv_nc_hash_compare(const struct hlist_node *node, - const void *data2) +static bool batadv_nc_hash_compare(const struct hlist_node *node, + const void *data2) { const struct batadv_nc_path *nc_path1, *nc_path2; @@ -523,13 +523,13 @@ static int batadv_nc_hash_compare(const struct hlist_node *node, /* Return 1 if the two keys are identical */ if (memcmp(nc_path1->prev_hop, nc_path2->prev_hop, sizeof(nc_path1->prev_hop)) != 0) - return 0; + return false; if (memcmp(nc_path1->next_hop, nc_path2->next_hop, sizeof(nc_path1->next_hop)) != 0) - return 0; + return false; - return 1; + return true; } /** diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c index 04fa139911c3..1ff4ee473966 100644 --- a/net/batman-adv/originator.c +++ b/net/batman-adv/originator.c @@ -54,9 +54,9 @@ static void batadv_purge_orig(struct work_struct *work); * @node: node in the local table * @data2: second object to compare the node to * - * Return: 1 if they are the same originator + * Return: true if they are the same originator */ -int batadv_compare_orig(const struct hlist_node *node, const void *data2) +bool batadv_compare_orig(const struct hlist_node *node, const void *data2) { const void *data1 = container_of(node, struct batadv_orig_node, hash_entry); diff --git a/net/batman-adv/originator.h b/net/batman-adv/originator.h index 4e8b67f11051..64a8951e5844 100644 --- a/net/batman-adv/originator.h +++ b/net/batman-adv/originator.h @@ -33,7 +33,7 @@ struct seq_file; -int batadv_compare_orig(const struct hlist_node *node, const void *data2); +bool batadv_compare_orig(const struct hlist_node *node, const void *data2); int batadv_originator_init(struct batadv_priv *bat_priv); void batadv_originator_free(struct batadv_priv *bat_priv); void batadv_purge_orig_ref(struct batadv_priv *bat_priv); diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c index b494e435686f..ae850f2d11cb 100644 --- a/net/batman-adv/routing.c +++ b/net/batman-adv/routing.c @@ -163,18 +163,18 @@ out: * doesn't change otherwise. * * Return: - * 0 if the packet is to be accepted. - * 1 if the packet is to be ignored. + * false if the packet is to be accepted. + * true if the packet is to be ignored. */ -int batadv_window_protected(struct batadv_priv *bat_priv, s32 seq_num_diff, - s32 seq_old_max_diff, unsigned long *last_reset, - bool *protection_started) +bool batadv_window_protected(struct batadv_priv *bat_priv, s32 seq_num_diff, + s32 seq_old_max_diff, unsigned long *last_reset, + bool *protection_started) { if (seq_num_diff <= -seq_old_max_diff || seq_num_diff >= BATADV_EXPECTED_SEQNO_RANGE) { if (!batadv_has_timed_out(*last_reset, BATADV_RESET_PROTECTION_MS)) - return 1; + return true; *last_reset = jiffies; if (protection_started) @@ -183,7 +183,7 @@ int batadv_window_protected(struct batadv_priv *bat_priv, s32 seq_num_diff, "old packet received, start protection\n"); } - return 0; + return false; } bool batadv_check_management_packet(struct sk_buff *skb, @@ -718,8 +718,9 @@ out: return ret; } -static int batadv_check_unicast_ttvn(struct batadv_priv *bat_priv, - struct sk_buff *skb, int hdr_len) { +static bool batadv_check_unicast_ttvn(struct batadv_priv *bat_priv, + struct sk_buff *skb, int hdr_len) +{ struct batadv_unicast_packet *unicast_packet; struct batadv_hard_iface *primary_if; struct batadv_orig_node *orig_node; @@ -730,11 +731,11 @@ static int batadv_check_unicast_ttvn(struct batadv_priv *bat_priv, /* check if there is enough data before accessing it */ if (!pskb_may_pull(skb, hdr_len + ETH_HLEN)) - return 0; + return false; /* create a copy of the skb (in case of for re-routing) to modify it. */ if (skb_cow(skb, sizeof(*unicast_packet)) < 0) - return 0; + return false; unicast_packet = (struct batadv_unicast_packet *)skb->data; vid = batadv_get_vid(skb, hdr_len); @@ -758,7 +759,7 @@ static int batadv_check_unicast_ttvn(struct batadv_priv *bat_priv, * table. If not, let the packet go untouched anyway because * there is nothing the node can do */ - return 1; + return true; } /* retrieve the TTVN known by this node for the packet destination. This @@ -774,7 +775,7 @@ static int batadv_check_unicast_ttvn(struct batadv_priv *bat_priv, * not be possible to deliver it */ if (!orig_node) - return 0; + return false; curr_ttvn = (u8)atomic_read(&orig_node->last_ttvn); batadv_orig_node_put(orig_node); @@ -785,7 +786,7 @@ static int batadv_check_unicast_ttvn(struct batadv_priv *bat_priv, */ is_old_ttvn = batadv_seq_before(unicast_packet->ttvn, curr_ttvn); if (!is_old_ttvn) - return 1; + return true; old_ttvn = unicast_packet->ttvn; /* the packet was forged based on outdated network information. Its @@ -798,7 +799,7 @@ static int batadv_check_unicast_ttvn(struct batadv_priv *bat_priv, "Rerouting unicast packet to %pM (dst=%pM): TTVN mismatch old_ttvn=%u new_ttvn=%u\n", unicast_packet->dest, ethhdr->h_dest, old_ttvn, curr_ttvn); - return 1; + return true; } /* the packet has not been re-routed: either the destination is @@ -806,14 +807,14 @@ static int batadv_check_unicast_ttvn(struct batadv_priv *bat_priv, * it is possible to drop the packet */ if (!batadv_is_my_client(bat_priv, ethhdr->h_dest, vid)) - return 0; + return false; /* update the header in order to let the packet be delivered to this * node's soft interface */ primary_if = batadv_primary_if_get_selected(bat_priv); if (!primary_if) - return 0; + return false; ether_addr_copy(unicast_packet->dest, primary_if->net_dev->dev_addr); @@ -821,7 +822,7 @@ static int batadv_check_unicast_ttvn(struct batadv_priv *bat_priv, unicast_packet->ttvn = curr_ttvn; - return 1; + return true; } /** diff --git a/net/batman-adv/routing.h b/net/batman-adv/routing.h index 02a5caa84127..05c3ff42e181 100644 --- a/net/batman-adv/routing.h +++ b/net/batman-adv/routing.h @@ -51,8 +51,8 @@ struct batadv_neigh_node * batadv_find_router(struct batadv_priv *bat_priv, struct batadv_orig_node *orig_node, struct batadv_hard_iface *recv_if); -int batadv_window_protected(struct batadv_priv *bat_priv, s32 seq_num_diff, - s32 seq_old_max_diff, unsigned long *last_reset, - bool *protection_started); +bool batadv_window_protected(struct batadv_priv *bat_priv, s32 seq_num_diff, + s32 seq_old_max_diff, unsigned long *last_reset, + bool *protection_started); #endif /* _NET_BATMAN_ADV_ROUTING_H_ */ diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c index 04866c9b860a..3a0fc3c18444 100644 --- a/net/batman-adv/soft-interface.c +++ b/net/batman-adv/soft-interface.c @@ -1045,12 +1045,12 @@ static void batadv_softif_destroy_netlink(struct net_device *soft_iface, unregister_netdevice_queue(soft_iface, head); } -int batadv_softif_is_valid(const struct net_device *net_dev) +bool batadv_softif_is_valid(const struct net_device *net_dev) { if (net_dev->netdev_ops->ndo_start_xmit == batadv_interface_tx) - return 1; + return true; - return 0; + return false; } struct rtnl_link_ops batadv_link_ops __read_mostly = { diff --git a/net/batman-adv/soft-interface.h b/net/batman-adv/soft-interface.h index b0966342a986..ec303ddbf647 100644 --- a/net/batman-adv/soft-interface.h +++ b/net/batman-adv/soft-interface.h @@ -20,6 +20,7 @@ #include "main.h" +#include #include struct net_device; @@ -32,7 +33,7 @@ void batadv_interface_rx(struct net_device *soft_iface, struct batadv_orig_node *orig_node); struct net_device *batadv_softif_create(struct net *net, const char *name); void batadv_softif_destroy_sysfs(struct net_device *soft_iface); -int batadv_softif_is_valid(const struct net_device *net_dev); +bool batadv_softif_is_valid(const struct net_device *net_dev); extern struct rtnl_link_ops batadv_link_ops; int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid); void batadv_softif_vlan_put(struct batadv_softif_vlan *softif_vlan); diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c index 6ea6e9bf9a8c..feaf492b01ca 100644 --- a/net/batman-adv/translation-table.c +++ b/net/batman-adv/translation-table.c @@ -75,9 +75,9 @@ static void batadv_tt_global_del(struct batadv_priv *bat_priv, * * Compare the MAC address and the VLAN ID of the two TT entries and check if * they are the same TT client. - * Return: 1 if the two TT clients are the same, 0 otherwise + * Return: true if the two TT clients are the same, false otherwise */ -static int batadv_compare_tt(const struct hlist_node *node, const void *data2) +static bool batadv_compare_tt(const struct hlist_node *node, const void *data2) { const void *data1 = container_of(node, struct batadv_tt_common_entry, hash_entry); @@ -2361,19 +2361,19 @@ unlock: * @entry_ptr: to be checked local tt entry * @data_ptr: not used but definition required to satisfy the callback prototype * - * Return: 1 if the entry is a valid, 0 otherwise. + * Return: true if the entry is a valid, false otherwise. */ -static int batadv_tt_local_valid(const void *entry_ptr, const void *data_ptr) +static bool batadv_tt_local_valid(const void *entry_ptr, const void *data_ptr) { const struct batadv_tt_common_entry *tt_common_entry = entry_ptr; if (tt_common_entry->flags & BATADV_TT_CLIENT_NEW) - return 0; - return 1; + return false; + return true; } -static int batadv_tt_global_valid(const void *entry_ptr, - const void *data_ptr) +static bool batadv_tt_global_valid(const void *entry_ptr, + const void *data_ptr) { const struct batadv_tt_common_entry *tt_common_entry = entry_ptr; const struct batadv_tt_global_entry *tt_global_entry; @@ -2381,7 +2381,7 @@ static int batadv_tt_global_valid(const void *entry_ptr, if (tt_common_entry->flags & BATADV_TT_CLIENT_ROAM || tt_common_entry->flags & BATADV_TT_CLIENT_TEMP) - return 0; + return false; tt_global_entry = container_of(tt_common_entry, struct batadv_tt_global_entry, @@ -2403,7 +2403,8 @@ static int batadv_tt_global_valid(const void *entry_ptr, static void batadv_tt_tvlv_generate(struct batadv_priv *bat_priv, struct batadv_hashtable *hash, void *tvlv_buff, u16 tt_len, - int (*valid_cb)(const void *, const void *), + bool (*valid_cb)(const void *, + const void *), void *cb_data) { struct batadv_tt_common_entry *tt_common_entry; @@ -2552,11 +2553,11 @@ static void batadv_tt_global_update_crc(struct batadv_priv *bat_priv, * * Return: true if the TT Request was sent, false otherwise */ -static int batadv_send_tt_request(struct batadv_priv *bat_priv, - struct batadv_orig_node *dst_orig_node, - u8 ttvn, - struct batadv_tvlv_tt_vlan_data *tt_vlan, - u16 num_vlan, bool full_table) +static bool batadv_send_tt_request(struct batadv_priv *bat_priv, + struct batadv_orig_node *dst_orig_node, + u8 ttvn, + struct batadv_tvlv_tt_vlan_data *tt_vlan, + u16 num_vlan, bool full_table) { struct batadv_tvlv_tt_data *tvlv_tt_data = NULL; struct batadv_tt_req_node *tt_req_node = NULL; From 9d1601ef4347b27dfa627d61ccfa9a724cc6e303 Mon Sep 17 00:00:00 2001 From: Marek Lindner Date: Sun, 20 Mar 2016 18:39:56 +0800 Subject: [PATCH 1475/1649] batman-adv: replace ethertype variable with ETH_P_BATMAN for readability Signed-off-by: Marek Lindner Reviewed-by: Sven Eckelmann Signed-off-by: Antonio Quartulli --- net/batman-adv/soft-interface.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c index 3a0fc3c18444..343d2c904399 100644 --- a/net/batman-adv/soft-interface.c +++ b/net/batman-adv/soft-interface.c @@ -186,7 +186,6 @@ static int batadv_interface_tx(struct sk_buff *skb, struct batadv_priv *bat_priv = netdev_priv(soft_iface); struct batadv_hard_iface *primary_if = NULL; struct batadv_bcast_packet *bcast_packet; - __be16 ethertype = htons(ETH_P_BATMAN); static const u8 stp_addr[ETH_ALEN] = {0x01, 0x80, 0xC2, 0x00, 0x00, 0x00}; static const u8 ectp_addr[ETH_ALEN] = {0xCF, 0x00, 0x00, 0x00, @@ -216,7 +215,8 @@ static int batadv_interface_tx(struct sk_buff *skb, case ETH_P_8021Q: vhdr = vlan_eth_hdr(skb); - if (vhdr->h_vlan_encapsulated_proto != ethertype) { + /* drop batman-in-batman packets to prevent loops */ + if (vhdr->h_vlan_encapsulated_proto != htons(ETH_P_BATMAN)) { network_offset += VLAN_HLEN; break; } @@ -404,7 +404,6 @@ void batadv_interface_rx(struct net_device *soft_iface, { struct batadv_bcast_packet *batadv_bcast_packet; struct batadv_priv *bat_priv = netdev_priv(soft_iface); - __be16 ethertype = htons(ETH_P_BATMAN); struct vlan_ethhdr *vhdr; struct ethhdr *ethhdr; unsigned short vid; @@ -434,7 +433,8 @@ void batadv_interface_rx(struct net_device *soft_iface, vhdr = (struct vlan_ethhdr *)skb->data; - if (vhdr->h_vlan_encapsulated_proto != ethertype) + /* drop batman-in-batman packets to prevent loops */ + if (vhdr->h_vlan_encapsulated_proto != htons(ETH_P_BATMAN)) break; /* fall through */ From 676970e55b1033af7f0a03d4037b4d9b76327ded Mon Sep 17 00:00:00 2001 From: Antonio Quartulli Date: Fri, 11 Mar 2016 14:01:10 +0100 Subject: [PATCH 1476/1649] batman-adv: use batadv_compare_eth when possible When comparing Ethernet address it is better to use the more generic batadv_compare_eth. The latter is also optimised for architectures having a fast unaligned access. Signed-off-by: Antonio Quartulli [sven@narfation.org: fix conflicts with current version] Signed-off-by: Sven Eckelmann Signed-off-by: Marek Lindner --- net/batman-adv/network-coding.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c index df5ae9c7e507..678f06865312 100644 --- a/net/batman-adv/network-coding.c +++ b/net/batman-adv/network-coding.c @@ -521,12 +521,10 @@ static bool batadv_nc_hash_compare(const struct hlist_node *node, nc_path2 = data2; /* Return 1 if the two keys are identical */ - if (memcmp(nc_path1->prev_hop, nc_path2->prev_hop, - sizeof(nc_path1->prev_hop)) != 0) + if (!batadv_compare_eth(nc_path1->prev_hop, nc_path2->prev_hop)) return false; - if (memcmp(nc_path1->next_hop, nc_path2->next_hop, - sizeof(nc_path1->next_hop)) != 0) + if (!batadv_compare_eth(nc_path1->next_hop, nc_path2->next_hop)) return false; return true; From 459aa660eb1d8ce67080da1983bb81d716aa5a69 Mon Sep 17 00:00:00 2001 From: Pablo Neira Date: Mon, 9 May 2016 00:55:48 +0200 Subject: [PATCH 1477/1649] gtp: add initial driver for datapath of GPRS Tunneling Protocol (GTP-U) This is an initial implementation of a netdev driver for GTP datapath (GTP-U) v0 and v1, according to the GSM TS 09.60 and 3GPP TS 29.060 standards. This tunneling protocol is used to prevent subscribers from accessing mobile carrier core network infrastructure. This implementation requires a GGSN userspace daemon that implements the signaling protocol (GTP-C), such as OpenGGSN [1]. This userspace daemon updates the PDP context database that represents active subscriber sessions through a genetlink interface. For more context on this tunneling protocol, you can check the slides that were presented during the NetDev 1.1 [2]. Only IPv4 is supported at this time. [1] http://git.osmocom.org/openggsn/ [2] http://www.netdevconf.org/1.1/proceedings/slides/schultz-welte-osmocom-gtp.pdf Signed-off-by: Pablo Neira Ayuso Signed-off-by: David S. Miller --- drivers/net/Kconfig | 17 + drivers/net/Makefile | 1 + drivers/net/gtp.c | 1364 ++++++++++++++++++++++++++++++++++ include/net/gtp.h | 34 + include/uapi/linux/Kbuild | 1 + include/uapi/linux/gtp.h | 33 + include/uapi/linux/if_link.h | 10 + include/uapi/linux/udp.h | 3 +- 8 files changed, 1462 insertions(+), 1 deletion(-) create mode 100644 drivers/net/gtp.c create mode 100644 include/net/gtp.h create mode 100644 include/uapi/linux/gtp.h diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index befd67df08e1..0c5415b05ea9 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -192,6 +192,23 @@ config GENEVE To compile this driver as a module, choose M here: the module will be called geneve. +config GTP + tristate "GPRS Tunneling Protocol datapath (GTP-U)" + depends on INET && NET_UDP_TUNNEL + select NET_IP_TUNNEL + ---help--- + This allows one to create gtp virtual interfaces that provide + the GPRS Tunneling Protocol datapath (GTP-U). This tunneling protocol + is used to prevent subscribers from accessing mobile carrier core + network infrastructure. This driver requires a userspace software that + implements the signaling protocol (GTP-C) to update its PDP context + base, such as OpenGGSN + * + * Author: Harald Welte + * Pablo Neira Ayuso + * Andreas Schultz + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* An active session for the subscriber. */ +struct pdp_ctx { + struct hlist_node hlist_tid; + struct hlist_node hlist_addr; + + union { + u64 tid; + struct { + u64 tid; + u16 flow; + } v0; + struct { + u32 i_tei; + u32 o_tei; + } v1; + } u; + u8 gtp_version; + u16 af; + + struct in_addr ms_addr_ip4; + struct in_addr sgsn_addr_ip4; + + atomic_t tx_seq; + struct rcu_head rcu_head; +}; + +/* One instance of the GTP device. */ +struct gtp_dev { + struct list_head list; + + struct socket *sock0; + struct socket *sock1u; + + struct net *net; + struct net_device *dev; + + unsigned int hash_size; + struct hlist_head *tid_hash; + struct hlist_head *addr_hash; +}; + +static int gtp_net_id __read_mostly; + +struct gtp_net { + struct list_head gtp_dev_list; +}; + +static u32 gtp_h_initval; + +static inline u32 gtp0_hashfn(u64 tid) +{ + u32 *tid32 = (u32 *) &tid; + return jhash_2words(tid32[0], tid32[1], gtp_h_initval); +} + +static inline u32 gtp1u_hashfn(u32 tid) +{ + return jhash_1word(tid, gtp_h_initval); +} + +static inline u32 ipv4_hashfn(__be32 ip) +{ + return jhash_1word((__force u32)ip, gtp_h_initval); +} + +/* Resolve a PDP context structure based on the 64bit TID. */ +static struct pdp_ctx *gtp0_pdp_find(struct gtp_dev *gtp, u64 tid) +{ + struct hlist_head *head; + struct pdp_ctx *pdp; + + head = >p->tid_hash[gtp0_hashfn(tid) % gtp->hash_size]; + + hlist_for_each_entry_rcu(pdp, head, hlist_tid) { + if (pdp->gtp_version == GTP_V0 && + pdp->u.v0.tid == tid) + return pdp; + } + return NULL; +} + +/* Resolve a PDP context structure based on the 32bit TEI. */ +static struct pdp_ctx *gtp1_pdp_find(struct gtp_dev *gtp, u32 tid) +{ + struct hlist_head *head; + struct pdp_ctx *pdp; + + head = >p->tid_hash[gtp1u_hashfn(tid) % gtp->hash_size]; + + hlist_for_each_entry_rcu(pdp, head, hlist_tid) { + if (pdp->gtp_version == GTP_V1 && + pdp->u.v1.i_tei == tid) + return pdp; + } + return NULL; +} + +/* Resolve a PDP context based on IPv4 address of MS. */ +static struct pdp_ctx *ipv4_pdp_find(struct gtp_dev *gtp, __be32 ms_addr) +{ + struct hlist_head *head; + struct pdp_ctx *pdp; + + head = >p->addr_hash[ipv4_hashfn(ms_addr) % gtp->hash_size]; + + hlist_for_each_entry_rcu(pdp, head, hlist_addr) { + if (pdp->af == AF_INET && + pdp->ms_addr_ip4.s_addr == ms_addr) + return pdp; + } + + return NULL; +} + +static bool gtp_check_src_ms_ipv4(struct sk_buff *skb, struct pdp_ctx *pctx, + unsigned int hdrlen) +{ + struct iphdr *iph; + + if (!pskb_may_pull(skb, hdrlen + sizeof(struct iphdr))) + return false; + + iph = (struct iphdr *)(skb->data + hdrlen + sizeof(struct iphdr)); + + return iph->saddr != pctx->ms_addr_ip4.s_addr; +} + +/* Check if the inner IP source address in this packet is assigned to any + * existing mobile subscriber. + */ +static bool gtp_check_src_ms(struct sk_buff *skb, struct pdp_ctx *pctx, + unsigned int hdrlen) +{ + switch (ntohs(skb->protocol)) { + case ETH_P_IP: + return gtp_check_src_ms_ipv4(skb, pctx, hdrlen); + } + return false; +} + +/* 1 means pass up to the stack, -1 means drop and 0 means decapsulated. */ +static int gtp0_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb, + bool xnet) +{ + unsigned int hdrlen = sizeof(struct udphdr) + + sizeof(struct gtp0_header); + struct gtp0_header *gtp0; + struct pdp_ctx *pctx; + int ret = 0; + + if (!pskb_may_pull(skb, hdrlen)) + return -1; + + gtp0 = (struct gtp0_header *)(skb->data + sizeof(struct udphdr)); + + if ((gtp0->flags >> 5) != GTP_V0) + return 1; + + if (gtp0->type != GTP_TPDU) + return 1; + + rcu_read_lock(); + pctx = gtp0_pdp_find(gtp, be64_to_cpu(gtp0->tid)); + if (!pctx) { + netdev_dbg(gtp->dev, "No PDP ctx to decap skb=%p\n", skb); + ret = -1; + goto out_rcu; + } + + if (!gtp_check_src_ms(skb, pctx, hdrlen)) { + netdev_dbg(gtp->dev, "No PDP ctx for this MS\n"); + ret = -1; + goto out_rcu; + } + rcu_read_unlock(); + + /* Get rid of the GTP + UDP headers. */ + return iptunnel_pull_header(skb, hdrlen, skb->protocol, xnet); +out_rcu: + rcu_read_unlock(); + return ret; +} + +static int gtp1u_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb, + bool xnet) +{ + unsigned int hdrlen = sizeof(struct udphdr) + + sizeof(struct gtp1_header); + struct gtp1_header *gtp1; + struct pdp_ctx *pctx; + int ret = 0; + + if (!pskb_may_pull(skb, hdrlen)) + return -1; + + gtp1 = (struct gtp1_header *)(skb->data + sizeof(struct udphdr)); + + if ((gtp1->flags >> 5) != GTP_V1) + return 1; + + if (gtp1->type != GTP_TPDU) + return 1; + + /* From 29.060: "This field shall be present if and only if any one or + * more of the S, PN and E flags are set.". + * + * If any of the bit is set, then the remaining ones also have to be + * set. + */ + if (gtp1->flags & GTP1_F_MASK) + hdrlen += 4; + + /* Make sure the header is larger enough, including extensions. */ + if (!pskb_may_pull(skb, hdrlen)) + return -1; + + rcu_read_lock(); + pctx = gtp1_pdp_find(gtp, ntohl(gtp1->tid)); + if (!pctx) { + netdev_dbg(gtp->dev, "No PDP ctx to decap skb=%p\n", skb); + ret = -1; + goto out_rcu; + } + + if (!gtp_check_src_ms(skb, pctx, hdrlen)) { + netdev_dbg(gtp->dev, "No PDP ctx for this MS\n"); + ret = -1; + goto out_rcu; + } + rcu_read_unlock(); + + /* Get rid of the GTP + UDP headers. */ + return iptunnel_pull_header(skb, hdrlen, skb->protocol, xnet); +out_rcu: + rcu_read_unlock(); + return ret; +} + +static void gtp_encap_disable(struct gtp_dev *gtp) +{ + if (gtp->sock0 && gtp->sock0->sk) { + udp_sk(gtp->sock0->sk)->encap_type = 0; + rcu_assign_sk_user_data(gtp->sock0->sk, NULL); + } + if (gtp->sock1u && gtp->sock1u->sk) { + udp_sk(gtp->sock1u->sk)->encap_type = 0; + rcu_assign_sk_user_data(gtp->sock1u->sk, NULL); + } + + gtp->sock0 = NULL; + gtp->sock1u = NULL; +} + +static void gtp_encap_destroy(struct sock *sk) +{ + struct gtp_dev *gtp; + + gtp = rcu_dereference_sk_user_data(sk); + if (gtp) + gtp_encap_disable(gtp); +} + +/* UDP encapsulation receive handler. See net/ipv4/udp.c. + * Return codes: 0: success, <0: error, >0: pass up to userspace UDP socket. + */ +static int gtp_encap_recv(struct sock *sk, struct sk_buff *skb) +{ + struct pcpu_sw_netstats *stats; + struct gtp_dev *gtp; + bool xnet; + int ret; + + gtp = rcu_dereference_sk_user_data(sk); + if (!gtp) + return 1; + + netdev_dbg(gtp->dev, "encap_recv sk=%p\n", sk); + + xnet = !net_eq(gtp->net, dev_net(gtp->dev)); + + switch (udp_sk(sk)->encap_type) { + case UDP_ENCAP_GTP0: + netdev_dbg(gtp->dev, "received GTP0 packet\n"); + ret = gtp0_udp_encap_recv(gtp, skb, xnet); + break; + case UDP_ENCAP_GTP1U: + netdev_dbg(gtp->dev, "received GTP1U packet\n"); + ret = gtp1u_udp_encap_recv(gtp, skb, xnet); + break; + default: + ret = -1; /* Shouldn't happen. */ + } + + switch (ret) { + case 1: + netdev_dbg(gtp->dev, "pass up to the process\n"); + return 1; + case 0: + netdev_dbg(gtp->dev, "forwarding packet from GGSN to uplink\n"); + break; + case -1: + netdev_dbg(gtp->dev, "GTP packet has been dropped\n"); + kfree_skb(skb); + return 0; + } + + /* Now that the UDP and the GTP header have been removed, set up the + * new network header. This is required by the upper layer to + * calculate the transport header. + */ + skb_reset_network_header(skb); + + skb->dev = gtp->dev; + + stats = this_cpu_ptr(gtp->dev->tstats); + u64_stats_update_begin(&stats->syncp); + stats->rx_packets++; + stats->rx_bytes += skb->len; + u64_stats_update_end(&stats->syncp); + + netif_rx(skb); + + return 0; +} + +static int gtp_dev_init(struct net_device *dev) +{ + struct gtp_dev *gtp = netdev_priv(dev); + + gtp->dev = dev; + + dev->tstats = alloc_percpu(struct pcpu_sw_netstats); + if (!dev->tstats) + return -ENOMEM; + + return 0; +} + +static void gtp_dev_uninit(struct net_device *dev) +{ + struct gtp_dev *gtp = netdev_priv(dev); + + gtp_encap_disable(gtp); + free_percpu(dev->tstats); +} + +static struct rtable *ip4_route_output_gtp(struct net *net, struct flowi4 *fl4, + const struct sock *sk, __be32 daddr) +{ + memset(fl4, 0, sizeof(*fl4)); + fl4->flowi4_oif = sk->sk_bound_dev_if; + fl4->daddr = daddr; + fl4->saddr = inet_sk(sk)->inet_saddr; + fl4->flowi4_tos = RT_CONN_FLAGS(sk); + fl4->flowi4_proto = sk->sk_protocol; + + return ip_route_output_key(net, fl4); +} + +static inline void gtp0_push_header(struct sk_buff *skb, struct pdp_ctx *pctx) +{ + int payload_len = skb->len; + struct gtp0_header *gtp0; + + gtp0 = (struct gtp0_header *) skb_push(skb, sizeof(*gtp0)); + + gtp0->flags = 0x1e; /* v0, GTP-non-prime. */ + gtp0->type = GTP_TPDU; + gtp0->length = htons(payload_len); + gtp0->seq = htons((atomic_inc_return(&pctx->tx_seq) - 1) % 0xffff); + gtp0->flow = htons(pctx->u.v0.flow); + gtp0->number = 0xff; + gtp0->spare[0] = gtp0->spare[1] = gtp0->spare[2] = 0xff; + gtp0->tid = cpu_to_be64(pctx->u.v0.tid); +} + +static inline void gtp1_push_header(struct sk_buff *skb, struct pdp_ctx *pctx) +{ + int payload_len = skb->len; + struct gtp1_header *gtp1; + + gtp1 = (struct gtp1_header *) skb_push(skb, sizeof(*gtp1)); + + /* Bits 8 7 6 5 4 3 2 1 + * +--+--+--+--+--+--+--+--+ + * |version |PT| 1| E| S|PN| + * +--+--+--+--+--+--+--+--+ + * 0 0 1 1 1 0 0 0 + */ + gtp1->flags = 0x38; /* v1, GTP-non-prime. */ + gtp1->type = GTP_TPDU; + gtp1->length = htons(payload_len); + gtp1->tid = htonl(pctx->u.v1.o_tei); + + /* TODO: Suppport for extension header, sequence number and N-PDU. + * Update the length field if any of them is available. + */ +} + +struct gtp_pktinfo { + struct sock *sk; + struct iphdr *iph; + struct flowi4 fl4; + struct rtable *rt; + struct pdp_ctx *pctx; + struct net_device *dev; + __be16 gtph_port; +}; + +static void gtp_push_header(struct sk_buff *skb, struct gtp_pktinfo *pktinfo) +{ + switch (pktinfo->pctx->gtp_version) { + case GTP_V0: + pktinfo->gtph_port = htons(GTP0_PORT); + gtp0_push_header(skb, pktinfo->pctx); + break; + case GTP_V1: + pktinfo->gtph_port = htons(GTP1U_PORT); + gtp1_push_header(skb, pktinfo->pctx); + break; + } +} + +static inline void gtp_set_pktinfo_ipv4(struct gtp_pktinfo *pktinfo, + struct sock *sk, struct iphdr *iph, + struct pdp_ctx *pctx, struct rtable *rt, + struct flowi4 *fl4, + struct net_device *dev) +{ + pktinfo->sk = sk; + pktinfo->iph = iph; + pktinfo->pctx = pctx; + pktinfo->rt = rt; + pktinfo->fl4 = *fl4; + pktinfo->dev = dev; +} + +static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev, + struct gtp_pktinfo *pktinfo) +{ + struct gtp_dev *gtp = netdev_priv(dev); + struct pdp_ctx *pctx; + struct rtable *rt; + struct flowi4 fl4; + struct iphdr *iph; + struct sock *sk; + __be16 df; + int mtu; + + /* Read the IP destination address and resolve the PDP context. + * Prepend PDP header with TEI/TID from PDP ctx. + */ + iph = ip_hdr(skb); + pctx = ipv4_pdp_find(gtp, iph->daddr); + if (!pctx) { + netdev_dbg(dev, "no PDP ctx found for %pI4, skip\n", + &iph->daddr); + return -ENOENT; + } + netdev_dbg(dev, "found PDP context %p\n", pctx); + + switch (pctx->gtp_version) { + case GTP_V0: + if (gtp->sock0) + sk = gtp->sock0->sk; + else + sk = NULL; + break; + case GTP_V1: + if (gtp->sock1u) + sk = gtp->sock1u->sk; + else + sk = NULL; + break; + default: + return -ENOENT; + } + + if (!sk) { + netdev_dbg(dev, "no userspace socket is available, skip\n"); + return -ENOENT; + } + + rt = ip4_route_output_gtp(sock_net(sk), &fl4, gtp->sock0->sk, + pctx->sgsn_addr_ip4.s_addr); + if (IS_ERR(rt)) { + netdev_dbg(dev, "no route to SSGN %pI4\n", + &pctx->sgsn_addr_ip4.s_addr); + dev->stats.tx_carrier_errors++; + goto err; + } + + if (rt->dst.dev == dev) { + netdev_dbg(dev, "circular route to SSGN %pI4\n", + &pctx->sgsn_addr_ip4.s_addr); + dev->stats.collisions++; + goto err_rt; + } + + skb_dst_drop(skb); + + /* This is similar to tnl_update_pmtu(). */ + df = iph->frag_off; + if (df) { + mtu = dst_mtu(&rt->dst) - dev->hard_header_len - + sizeof(struct iphdr) - sizeof(struct udphdr); + switch (pctx->gtp_version) { + case GTP_V0: + mtu -= sizeof(struct gtp0_header); + break; + case GTP_V1: + mtu -= sizeof(struct gtp1_header); + break; + } + } else { + mtu = dst_mtu(&rt->dst); + } + + rt->dst.ops->update_pmtu(&rt->dst, NULL, skb, mtu); + + if (!skb_is_gso(skb) && (iph->frag_off & htons(IP_DF)) && + mtu < ntohs(iph->tot_len)) { + netdev_dbg(dev, "packet too big, fragmentation needed\n"); + memset(IPCB(skb), 0, sizeof(*IPCB(skb))); + icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, + htonl(mtu)); + goto err_rt; + } + + gtp_set_pktinfo_ipv4(pktinfo, sk, iph, pctx, rt, &fl4, dev); + gtp_push_header(skb, pktinfo); + + return 0; +err_rt: + ip_rt_put(rt); +err: + return -EBADMSG; +} + +static netdev_tx_t gtp_dev_xmit(struct sk_buff *skb, struct net_device *dev) +{ + unsigned int proto = ntohs(skb->protocol); + struct gtp_pktinfo pktinfo; + int err; + + /* Ensure there is sufficient headroom. */ + if (skb_cow_head(skb, dev->needed_headroom)) + goto tx_err; + + skb_reset_inner_headers(skb); + + /* PDP context lookups in gtp_build_skb_*() need rcu read-side lock. */ + rcu_read_lock(); + switch (proto) { + case ETH_P_IP: + err = gtp_build_skb_ip4(skb, dev, &pktinfo); + break; + default: + err = -EOPNOTSUPP; + break; + } + rcu_read_unlock(); + + if (err < 0) + goto tx_err; + + switch (proto) { + case ETH_P_IP: + netdev_dbg(pktinfo.dev, "gtp -> IP src: %pI4 dst: %pI4\n", + &pktinfo.iph->saddr, &pktinfo.iph->daddr); + udp_tunnel_xmit_skb(pktinfo.rt, pktinfo.sk, skb, + pktinfo.fl4.saddr, pktinfo.fl4.daddr, + pktinfo.iph->tos, + ip4_dst_hoplimit(&pktinfo.rt->dst), + htons(IP_DF), + pktinfo.gtph_port, pktinfo.gtph_port, + true, false); + break; + } + + return NETDEV_TX_OK; +tx_err: + dev->stats.tx_errors++; + dev_kfree_skb(skb); + return NETDEV_TX_OK; +} + +static const struct net_device_ops gtp_netdev_ops = { + .ndo_init = gtp_dev_init, + .ndo_uninit = gtp_dev_uninit, + .ndo_start_xmit = gtp_dev_xmit, + .ndo_get_stats64 = ip_tunnel_get_stats64, +}; + +static void gtp_link_setup(struct net_device *dev) +{ + dev->netdev_ops = >p_netdev_ops; + dev->destructor = free_netdev; + + dev->hard_header_len = 0; + dev->addr_len = 0; + + /* Zero header length. */ + dev->type = ARPHRD_NONE; + dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; + + dev->priv_flags |= IFF_NO_QUEUE; + dev->features |= NETIF_F_LLTX; + netif_keep_dst(dev); + + /* Assume largest header, ie. GTPv0. */ + dev->needed_headroom = LL_MAX_HEADER + + sizeof(struct iphdr) + + sizeof(struct udphdr) + + sizeof(struct gtp0_header); +} + +static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize); +static void gtp_hashtable_free(struct gtp_dev *gtp); +static int gtp_encap_enable(struct net_device *dev, struct gtp_dev *gtp, + int fd_gtp0, int fd_gtp1, struct net *src_net); + +static int gtp_newlink(struct net *src_net, struct net_device *dev, + struct nlattr *tb[], struct nlattr *data[]) +{ + int hashsize, err, fd0, fd1; + struct gtp_dev *gtp; + struct gtp_net *gn; + + if (!data[IFLA_GTP_FD0] || !data[IFLA_GTP_FD1]) + return -EINVAL; + + gtp = netdev_priv(dev); + + fd0 = nla_get_u32(data[IFLA_GTP_FD0]); + fd1 = nla_get_u32(data[IFLA_GTP_FD1]); + + err = gtp_encap_enable(dev, gtp, fd0, fd1, src_net); + if (err < 0) + goto out_err; + + if (!data[IFLA_GTP_PDP_HASHSIZE]) + hashsize = 1024; + else + hashsize = nla_get_u32(data[IFLA_GTP_PDP_HASHSIZE]); + + err = gtp_hashtable_new(gtp, hashsize); + if (err < 0) + goto out_encap; + + err = register_netdevice(dev); + if (err < 0) { + netdev_dbg(dev, "failed to register new netdev %d\n", err); + goto out_hashtable; + } + + gn = net_generic(dev_net(dev), gtp_net_id); + list_add_rcu(>p->list, &gn->gtp_dev_list); + + netdev_dbg(dev, "registered new GTP interface\n"); + + return 0; + +out_hashtable: + gtp_hashtable_free(gtp); +out_encap: + gtp_encap_disable(gtp); +out_err: + return err; +} + +static void gtp_dellink(struct net_device *dev, struct list_head *head) +{ + struct gtp_dev *gtp = netdev_priv(dev); + + gtp_encap_disable(gtp); + gtp_hashtable_free(gtp); + list_del_rcu(>p->list); + unregister_netdevice_queue(dev, head); +} + +static const struct nla_policy gtp_policy[IFLA_GTP_MAX + 1] = { + [IFLA_GTP_FD0] = { .type = NLA_U32 }, + [IFLA_GTP_FD1] = { .type = NLA_U32 }, + [IFLA_GTP_PDP_HASHSIZE] = { .type = NLA_U32 }, +}; + +static int gtp_validate(struct nlattr *tb[], struct nlattr *data[]) +{ + if (!data) + return -EINVAL; + + return 0; +} + +static size_t gtp_get_size(const struct net_device *dev) +{ + return nla_total_size(sizeof(__u32)); /* IFLA_GTP_PDP_HASHSIZE */ +} + +static int gtp_fill_info(struct sk_buff *skb, const struct net_device *dev) +{ + struct gtp_dev *gtp = netdev_priv(dev); + + if (nla_put_u32(skb, IFLA_GTP_PDP_HASHSIZE, gtp->hash_size)) + goto nla_put_failure; + + return 0; + +nla_put_failure: + return -EMSGSIZE; +} + +static struct rtnl_link_ops gtp_link_ops __read_mostly = { + .kind = "gtp", + .maxtype = IFLA_GTP_MAX, + .policy = gtp_policy, + .priv_size = sizeof(struct gtp_dev), + .setup = gtp_link_setup, + .validate = gtp_validate, + .newlink = gtp_newlink, + .dellink = gtp_dellink, + .get_size = gtp_get_size, + .fill_info = gtp_fill_info, +}; + +static struct net *gtp_genl_get_net(struct net *src_net, struct nlattr *tb[]) +{ + struct net *net; + + /* Examine the link attributes and figure out which network namespace + * we are talking about. + */ + if (tb[GTPA_NET_NS_FD]) + net = get_net_ns_by_fd(nla_get_u32(tb[GTPA_NET_NS_FD])); + else + net = get_net(src_net); + + return net; +} + +static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize) +{ + int i; + + gtp->addr_hash = kmalloc(sizeof(struct hlist_head) * hsize, GFP_KERNEL); + if (gtp->addr_hash == NULL) + return -ENOMEM; + + gtp->tid_hash = kmalloc(sizeof(struct hlist_head) * hsize, GFP_KERNEL); + if (gtp->tid_hash == NULL) + goto err1; + + gtp->hash_size = hsize; + + for (i = 0; i < hsize; i++) { + INIT_HLIST_HEAD(>p->addr_hash[i]); + INIT_HLIST_HEAD(>p->tid_hash[i]); + } + return 0; +err1: + kfree(gtp->addr_hash); + return -ENOMEM; +} + +static void gtp_hashtable_free(struct gtp_dev *gtp) +{ + struct pdp_ctx *pctx; + int i; + + for (i = 0; i < gtp->hash_size; i++) { + hlist_for_each_entry_rcu(pctx, >p->tid_hash[i], hlist_tid) { + hlist_del_rcu(&pctx->hlist_tid); + hlist_del_rcu(&pctx->hlist_addr); + kfree_rcu(pctx, rcu_head); + } + } + synchronize_rcu(); + kfree(gtp->addr_hash); + kfree(gtp->tid_hash); +} + +static int gtp_encap_enable(struct net_device *dev, struct gtp_dev *gtp, + int fd_gtp0, int fd_gtp1, struct net *src_net) +{ + struct udp_tunnel_sock_cfg tuncfg = {NULL}; + struct socket *sock0, *sock1u; + int err; + + netdev_dbg(dev, "enable gtp on %d, %d\n", fd_gtp0, fd_gtp1); + + sock0 = sockfd_lookup(fd_gtp0, &err); + if (sock0 == NULL) { + netdev_dbg(dev, "socket fd=%d not found (gtp0)\n", fd_gtp0); + return -ENOENT; + } + + if (sock0->sk->sk_protocol != IPPROTO_UDP) { + netdev_dbg(dev, "socket fd=%d not UDP\n", fd_gtp0); + err = -EINVAL; + goto err1; + } + + sock1u = sockfd_lookup(fd_gtp1, &err); + if (sock1u == NULL) { + netdev_dbg(dev, "socket fd=%d not found (gtp1u)\n", fd_gtp1); + err = -ENOENT; + goto err1; + } + + if (sock1u->sk->sk_protocol != IPPROTO_UDP) { + netdev_dbg(dev, "socket fd=%d not UDP\n", fd_gtp1); + err = -EINVAL; + goto err2; + } + + netdev_dbg(dev, "enable gtp on %p, %p\n", sock0, sock1u); + + gtp->sock0 = sock0; + gtp->sock1u = sock1u; + gtp->net = src_net; + + tuncfg.sk_user_data = gtp; + tuncfg.encap_rcv = gtp_encap_recv; + tuncfg.encap_destroy = gtp_encap_destroy; + + tuncfg.encap_type = UDP_ENCAP_GTP0; + setup_udp_tunnel_sock(sock_net(gtp->sock0->sk), gtp->sock0, &tuncfg); + + tuncfg.encap_type = UDP_ENCAP_GTP1U; + setup_udp_tunnel_sock(sock_net(gtp->sock1u->sk), gtp->sock1u, &tuncfg); + + err = 0; +err2: + sockfd_put(sock1u); +err1: + sockfd_put(sock0); + return err; +} + +static struct net_device *gtp_find_dev(struct net *net, int ifindex) +{ + struct gtp_net *gn = net_generic(net, gtp_net_id); + struct gtp_dev *gtp; + + list_for_each_entry_rcu(gtp, &gn->gtp_dev_list, list) { + if (ifindex == gtp->dev->ifindex) + return gtp->dev; + } + return NULL; +} + +static void ipv4_pdp_fill(struct pdp_ctx *pctx, struct genl_info *info) +{ + pctx->gtp_version = nla_get_u32(info->attrs[GTPA_VERSION]); + pctx->af = AF_INET; + pctx->sgsn_addr_ip4.s_addr = + nla_get_be32(info->attrs[GTPA_SGSN_ADDRESS]); + pctx->ms_addr_ip4.s_addr = + nla_get_be32(info->attrs[GTPA_MS_ADDRESS]); + + switch (pctx->gtp_version) { + case GTP_V0: + /* According to TS 09.60, sections 7.5.1 and 7.5.2, the flow + * label needs to be the same for uplink and downlink packets, + * so let's annotate this. + */ + pctx->u.v0.tid = nla_get_u64(info->attrs[GTPA_TID]); + pctx->u.v0.flow = nla_get_u16(info->attrs[GTPA_FLOW]); + break; + case GTP_V1: + pctx->u.v1.i_tei = nla_get_u32(info->attrs[GTPA_I_TEI]); + pctx->u.v1.o_tei = nla_get_u32(info->attrs[GTPA_O_TEI]); + break; + default: + break; + } +} + +static int ipv4_pdp_add(struct net_device *dev, struct genl_info *info) +{ + struct gtp_dev *gtp = netdev_priv(dev); + u32 hash_ms, hash_tid = 0; + struct pdp_ctx *pctx; + bool found = false; + __be32 ms_addr; + + ms_addr = nla_get_be32(info->attrs[GTPA_MS_ADDRESS]); + hash_ms = ipv4_hashfn(ms_addr) % gtp->hash_size; + + hlist_for_each_entry_rcu(pctx, >p->addr_hash[hash_ms], hlist_addr) { + if (pctx->ms_addr_ip4.s_addr == ms_addr) { + found = true; + break; + } + } + + if (found) { + if (info->nlhdr->nlmsg_flags & NLM_F_EXCL) + return -EEXIST; + if (info->nlhdr->nlmsg_flags & NLM_F_REPLACE) + return -EOPNOTSUPP; + + ipv4_pdp_fill(pctx, info); + + if (pctx->gtp_version == GTP_V0) + netdev_dbg(dev, "GTPv0-U: update tunnel id = %llx (pdp %p)\n", + pctx->u.v0.tid, pctx); + else if (pctx->gtp_version == GTP_V1) + netdev_dbg(dev, "GTPv1-U: update tunnel id = %x/%x (pdp %p)\n", + pctx->u.v1.i_tei, pctx->u.v1.o_tei, pctx); + + return 0; + + } + + pctx = kmalloc(sizeof(struct pdp_ctx), GFP_KERNEL); + if (pctx == NULL) + return -ENOMEM; + + ipv4_pdp_fill(pctx, info); + atomic_set(&pctx->tx_seq, 0); + + switch (pctx->gtp_version) { + case GTP_V0: + /* TS 09.60: "The flow label identifies unambiguously a GTP + * flow.". We use the tid for this instead, I cannot find a + * situation in which this doesn't unambiguosly identify the + * PDP context. + */ + hash_tid = gtp0_hashfn(pctx->u.v0.tid) % gtp->hash_size; + break; + case GTP_V1: + hash_tid = gtp1u_hashfn(pctx->u.v1.i_tei) % gtp->hash_size; + break; + } + + hlist_add_head_rcu(&pctx->hlist_addr, >p->addr_hash[hash_ms]); + hlist_add_head_rcu(&pctx->hlist_tid, >p->tid_hash[hash_tid]); + + switch (pctx->gtp_version) { + case GTP_V0: + netdev_dbg(dev, "GTPv0-U: new PDP ctx id=%llx ssgn=%pI4 ms=%pI4 (pdp=%p)\n", + pctx->u.v0.tid, &pctx->sgsn_addr_ip4, + &pctx->ms_addr_ip4, pctx); + break; + case GTP_V1: + netdev_dbg(dev, "GTPv1-U: new PDP ctx id=%x/%x ssgn=%pI4 ms=%pI4 (pdp=%p)\n", + pctx->u.v1.i_tei, pctx->u.v1.o_tei, + &pctx->sgsn_addr_ip4, &pctx->ms_addr_ip4, pctx); + break; + } + + return 0; +} + +static int gtp_genl_new_pdp(struct sk_buff *skb, struct genl_info *info) +{ + struct net_device *dev; + struct net *net; + + if (!info->attrs[GTPA_VERSION] || + !info->attrs[GTPA_LINK] || + !info->attrs[GTPA_SGSN_ADDRESS] || + !info->attrs[GTPA_MS_ADDRESS]) + return -EINVAL; + + switch (nla_get_u32(info->attrs[GTPA_VERSION])) { + case GTP_V0: + if (!info->attrs[GTPA_TID] || + !info->attrs[GTPA_FLOW]) + return -EINVAL; + break; + case GTP_V1: + if (!info->attrs[GTPA_I_TEI] || + !info->attrs[GTPA_O_TEI]) + return -EINVAL; + break; + + default: + return -EINVAL; + } + + net = gtp_genl_get_net(sock_net(skb->sk), info->attrs); + if (IS_ERR(net)) + return PTR_ERR(net); + + /* Check if there's an existing gtpX device to configure */ + dev = gtp_find_dev(net, nla_get_u32(info->attrs[GTPA_LINK])); + if (dev == NULL) + return -ENODEV; + + return ipv4_pdp_add(dev, info); +} + +static int gtp_genl_del_pdp(struct sk_buff *skb, struct genl_info *info) +{ + struct net_device *dev; + struct pdp_ctx *pctx; + struct gtp_dev *gtp; + struct net *net; + + if (!info->attrs[GTPA_VERSION] || + !info->attrs[GTPA_LINK]) + return -EINVAL; + + net = gtp_genl_get_net(sock_net(skb->sk), info->attrs); + if (IS_ERR(net)) + return PTR_ERR(net); + + /* Check if there's an existing gtpX device to configure */ + dev = gtp_find_dev(net, nla_get_u32(info->attrs[GTPA_LINK])); + if (dev == NULL) + return -ENODEV; + + gtp = netdev_priv(dev); + + switch (nla_get_u32(info->attrs[GTPA_VERSION])) { + case GTP_V0: + if (!info->attrs[GTPA_TID]) + return -EINVAL; + pctx = gtp0_pdp_find(gtp, nla_get_u64(info->attrs[GTPA_TID])); + break; + case GTP_V1: + if (!info->attrs[GTPA_I_TEI]) + return -EINVAL; + pctx = gtp1_pdp_find(gtp, nla_get_u64(info->attrs[GTPA_I_TEI])); + break; + + default: + return -EINVAL; + } + + if (pctx == NULL) + return -ENOENT; + + if (pctx->gtp_version == GTP_V0) + netdev_dbg(dev, "GTPv0-U: deleting tunnel id = %llx (pdp %p)\n", + pctx->u.v0.tid, pctx); + else if (pctx->gtp_version == GTP_V1) + netdev_dbg(dev, "GTPv1-U: deleting tunnel id = %x/%x (pdp %p)\n", + pctx->u.v1.i_tei, pctx->u.v1.o_tei, pctx); + + hlist_del_rcu(&pctx->hlist_tid); + hlist_del_rcu(&pctx->hlist_addr); + kfree_rcu(pctx, rcu_head); + + return 0; +} + +static struct genl_family gtp_genl_family = { + .id = GENL_ID_GENERATE, + .name = "gtp", + .version = 0, + .hdrsize = 0, + .maxattr = GTPA_MAX, + .netnsok = true, +}; + +static int gtp_genl_fill_info(struct sk_buff *skb, u32 snd_portid, u32 snd_seq, + u32 type, struct pdp_ctx *pctx) +{ + void *genlh; + + genlh = genlmsg_put(skb, snd_portid, snd_seq, >p_genl_family, 0, + type); + if (genlh == NULL) + goto nlmsg_failure; + + if (nla_put_u32(skb, GTPA_VERSION, pctx->gtp_version) || + nla_put_be32(skb, GTPA_SGSN_ADDRESS, pctx->sgsn_addr_ip4.s_addr) || + nla_put_be32(skb, GTPA_MS_ADDRESS, pctx->ms_addr_ip4.s_addr)) + goto nla_put_failure; + + switch (pctx->gtp_version) { + case GTP_V0: + if (nla_put_u64_64bit(skb, GTPA_TID, pctx->u.v0.tid, GTPA_PAD) || + nla_put_u16(skb, GTPA_FLOW, pctx->u.v0.flow)) + goto nla_put_failure; + break; + case GTP_V1: + if (nla_put_u32(skb, GTPA_I_TEI, pctx->u.v1.i_tei) || + nla_put_u32(skb, GTPA_O_TEI, pctx->u.v1.o_tei)) + goto nla_put_failure; + break; + } + genlmsg_end(skb, genlh); + return 0; + +nlmsg_failure: +nla_put_failure: + genlmsg_cancel(skb, genlh); + return -EMSGSIZE; +} + +static int gtp_genl_get_pdp(struct sk_buff *skb, struct genl_info *info) +{ + struct pdp_ctx *pctx = NULL; + struct net_device *dev; + struct sk_buff *skb2; + struct gtp_dev *gtp; + u32 gtp_version; + struct net *net; + int err; + + if (!info->attrs[GTPA_VERSION] || + !info->attrs[GTPA_LINK]) + return -EINVAL; + + gtp_version = nla_get_u32(info->attrs[GTPA_VERSION]); + switch (gtp_version) { + case GTP_V0: + case GTP_V1: + break; + default: + return -EINVAL; + } + + net = gtp_genl_get_net(sock_net(skb->sk), info->attrs); + if (IS_ERR(net)) + return PTR_ERR(net); + + /* Check if there's an existing gtpX device to configure */ + dev = gtp_find_dev(net, nla_get_u32(info->attrs[GTPA_LINK])); + if (dev == NULL) + return -ENODEV; + + gtp = netdev_priv(dev); + + rcu_read_lock(); + if (gtp_version == GTP_V0 && + info->attrs[GTPA_TID]) { + u64 tid = nla_get_u64(info->attrs[GTPA_TID]); + + pctx = gtp0_pdp_find(gtp, tid); + } else if (gtp_version == GTP_V1 && + info->attrs[GTPA_I_TEI]) { + u32 tid = nla_get_u32(info->attrs[GTPA_I_TEI]); + + pctx = gtp1_pdp_find(gtp, tid); + } else if (info->attrs[GTPA_MS_ADDRESS]) { + __be32 ip = nla_get_be32(info->attrs[GTPA_MS_ADDRESS]); + + pctx = ipv4_pdp_find(gtp, ip); + } + + if (pctx == NULL) { + err = -ENOENT; + goto err_unlock; + } + + skb2 = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC); + if (skb2 == NULL) { + err = -ENOMEM; + goto err_unlock; + } + + err = gtp_genl_fill_info(skb2, NETLINK_CB(skb).portid, + info->snd_seq, info->nlhdr->nlmsg_type, pctx); + if (err < 0) + goto err_unlock_free; + + rcu_read_unlock(); + return genlmsg_unicast(genl_info_net(info), skb2, info->snd_portid); + +err_unlock_free: + kfree_skb(skb2); +err_unlock: + rcu_read_unlock(); + return err; +} + +static int gtp_genl_dump_pdp(struct sk_buff *skb, + struct netlink_callback *cb) +{ + struct gtp_dev *last_gtp = (struct gtp_dev *)cb->args[2], *gtp; + struct net *net = sock_net(skb->sk); + struct gtp_net *gn = net_generic(net, gtp_net_id); + unsigned long tid = cb->args[1]; + int i, k = cb->args[0], ret; + struct pdp_ctx *pctx; + + if (cb->args[4]) + return 0; + + list_for_each_entry_rcu(gtp, &gn->gtp_dev_list, list) { + if (last_gtp && last_gtp != gtp) + continue; + else + last_gtp = NULL; + + for (i = k; i < gtp->hash_size; i++) { + hlist_for_each_entry_rcu(pctx, >p->tid_hash[i], hlist_tid) { + if (tid && tid != pctx->u.tid) + continue; + else + tid = 0; + + ret = gtp_genl_fill_info(skb, + NETLINK_CB(cb->skb).portid, + cb->nlh->nlmsg_seq, + cb->nlh->nlmsg_type, pctx); + if (ret < 0) { + cb->args[0] = i; + cb->args[1] = pctx->u.tid; + cb->args[2] = (unsigned long)gtp; + goto out; + } + } + } + } + cb->args[4] = 1; +out: + return skb->len; +} + +static struct nla_policy gtp_genl_policy[GTPA_MAX + 1] = { + [GTPA_LINK] = { .type = NLA_U32, }, + [GTPA_VERSION] = { .type = NLA_U32, }, + [GTPA_TID] = { .type = NLA_U64, }, + [GTPA_SGSN_ADDRESS] = { .type = NLA_U32, }, + [GTPA_MS_ADDRESS] = { .type = NLA_U32, }, + [GTPA_FLOW] = { .type = NLA_U16, }, + [GTPA_NET_NS_FD] = { .type = NLA_U32, }, + [GTPA_I_TEI] = { .type = NLA_U32, }, + [GTPA_O_TEI] = { .type = NLA_U32, }, +}; + +static const struct genl_ops gtp_genl_ops[] = { + { + .cmd = GTP_CMD_NEWPDP, + .doit = gtp_genl_new_pdp, + .policy = gtp_genl_policy, + .flags = GENL_ADMIN_PERM, + }, + { + .cmd = GTP_CMD_DELPDP, + .doit = gtp_genl_del_pdp, + .policy = gtp_genl_policy, + .flags = GENL_ADMIN_PERM, + }, + { + .cmd = GTP_CMD_GETPDP, + .doit = gtp_genl_get_pdp, + .dumpit = gtp_genl_dump_pdp, + .policy = gtp_genl_policy, + .flags = GENL_ADMIN_PERM, + }, +}; + +static int __net_init gtp_net_init(struct net *net) +{ + struct gtp_net *gn = net_generic(net, gtp_net_id); + + INIT_LIST_HEAD(&gn->gtp_dev_list); + return 0; +} + +static void __net_exit gtp_net_exit(struct net *net) +{ + struct gtp_net *gn = net_generic(net, gtp_net_id); + struct gtp_dev *gtp; + LIST_HEAD(list); + + rtnl_lock(); + list_for_each_entry(gtp, &gn->gtp_dev_list, list) + gtp_dellink(gtp->dev, &list); + + unregister_netdevice_many(&list); + rtnl_unlock(); +} + +static struct pernet_operations gtp_net_ops = { + .init = gtp_net_init, + .exit = gtp_net_exit, + .id = >p_net_id, + .size = sizeof(struct gtp_net), +}; + +static int __init gtp_init(void) +{ + int err; + + get_random_bytes(>p_h_initval, sizeof(gtp_h_initval)); + + err = rtnl_link_register(>p_link_ops); + if (err < 0) + goto error_out; + + err = genl_register_family_with_ops(>p_genl_family, gtp_genl_ops); + if (err < 0) + goto unreg_rtnl_link; + + err = register_pernet_subsys(>p_net_ops); + if (err < 0) + goto unreg_genl_family; + + pr_info("GTP module loaded (pdp ctx size %Zd bytes)\n", + sizeof(struct pdp_ctx)); + return 0; + +unreg_genl_family: + genl_unregister_family(>p_genl_family); +unreg_rtnl_link: + rtnl_link_unregister(>p_link_ops); +error_out: + pr_err("error loading GTP module loaded\n"); + return err; +} +late_initcall(gtp_init); + +static void __exit gtp_fini(void) +{ + unregister_pernet_subsys(>p_net_ops); + genl_unregister_family(>p_genl_family); + rtnl_link_unregister(>p_link_ops); + + pr_info("GTP module unloaded\n"); +} +module_exit(gtp_fini); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Harald Welte "); +MODULE_DESCRIPTION("Interface driver for GTP encapsulated traffic"); +MODULE_ALIAS_RTNL_LINK("gtp"); diff --git a/include/net/gtp.h b/include/net/gtp.h new file mode 100644 index 000000000000..894a37b87d63 --- /dev/null +++ b/include/net/gtp.h @@ -0,0 +1,34 @@ +#ifndef _GTP_H_ +#define _GTP_H + +/* General GTP protocol related definitions. */ + +#define GTP0_PORT 3386 +#define GTP1U_PORT 2152 + +#define GTP_TPDU 255 + +struct gtp0_header { /* According to GSM TS 09.60. */ + __u8 flags; + __u8 type; + __be16 length; + __be16 seq; + __be16 flow; + __u8 number; + __u8 spare[3]; + __be64 tid; +} __attribute__ ((packed)); + +struct gtp1_header { /* According to 3GPP TS 29.060. */ + __u8 flags; + __u8 type; + __be16 length; + __be32 tid; +} __attribute__ ((packed)); + +#define GTP1_F_NPDU 0x01 +#define GTP1_F_SEQ 0x02 +#define GTP1_F_EXTHDR 0x04 +#define GTP1_F_MASK 0x07 + +#endif diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild index 813ffb2e22c9..8bdae34d1f9a 100644 --- a/include/uapi/linux/Kbuild +++ b/include/uapi/linux/Kbuild @@ -141,6 +141,7 @@ header-y += gfs2_ondisk.h header-y += gigaset_dev.h header-y += gpio.h header-y += gsmmux.h +header-y += gtp.h header-y += hdlcdrv.h header-y += hdlc.h header-y += hdreg.h diff --git a/include/uapi/linux/gtp.h b/include/uapi/linux/gtp.h new file mode 100644 index 000000000000..ca1054dd8249 --- /dev/null +++ b/include/uapi/linux/gtp.h @@ -0,0 +1,33 @@ +#ifndef _UAPI_LINUX_GTP_H_ +#define _UAPI_LINUX_GTP_H__ + +enum gtp_genl_cmds { + GTP_CMD_NEWPDP, + GTP_CMD_DELPDP, + GTP_CMD_GETPDP, + + GTP_CMD_MAX, +}; + +enum gtp_version { + GTP_V0 = 0, + GTP_V1, +}; + +enum gtp_attrs { + GTPA_UNSPEC = 0, + GTPA_LINK, + GTPA_VERSION, + GTPA_TID, /* for GTPv0 only */ + GTPA_SGSN_ADDRESS, + GTPA_MS_ADDRESS, + GTPA_FLOW, + GTPA_NET_NS_FD, + GTPA_I_TEI, /* for GTPv1 only */ + GTPA_O_TEI, /* for GTPv1 only */ + GTPA_PAD, + __GTPA_MAX, +}; +#define GTPA_MAX (__GTPA_MAX + 1) + +#endif /* _UAPI_LINUX_GTP_H_ */ diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index d2d7fd4ba5f5..bb36bd5675a7 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -529,6 +529,16 @@ enum { }; #define IFLA_PPP_MAX (__IFLA_PPP_MAX - 1) +/* GTP section */ +enum { + IFLA_GTP_UNSPEC, + IFLA_GTP_FD0, + IFLA_GTP_FD1, + IFLA_GTP_PDP_HASHSIZE, + __IFLA_GTP_MAX, +}; +#define IFLA_GTP_MAX (__IFLA_GTP_MAX - 1) + /* Bonding section */ enum { diff --git a/include/uapi/linux/udp.h b/include/uapi/linux/udp.h index 16574ea18f0c..2c8180f9156f 100644 --- a/include/uapi/linux/udp.h +++ b/include/uapi/linux/udp.h @@ -36,6 +36,7 @@ struct udphdr { #define UDP_ENCAP_ESPINUDP_NON_IKE 1 /* draft-ietf-ipsec-nat-t-ike-00/01 */ #define UDP_ENCAP_ESPINUDP 2 /* draft-ietf-ipsec-udp-encaps-06 */ #define UDP_ENCAP_L2TPINUDP 3 /* rfc2661 */ - +#define UDP_ENCAP_GTP0 4 /* GSM TS 09.60 */ +#define UDP_ENCAP_GTP1U 5 /* 3GPP TS 29.060 */ #endif /* _UAPI_LINUX_UDP_H */ From 9d9a77cee1ab53dc6419b1ab9da88c4e9342d26a Mon Sep 17 00:00:00 2001 From: Philippe Reynes Date: Tue, 10 May 2016 00:19:41 +0200 Subject: [PATCH 1478/1649] net: phy: add phy_ethtool_{get|set}_link_ksettings Ethtool callbacks {get|set}_link_ksettings are often the same, so we add two generics functions phy_ethtool_{get|set}_link_ksettings to avoid writing severals times the same function. Signed-off-by: Philippe Reynes Acked-By: David Decotigny Signed-off-by: David S. Miller --- drivers/net/phy/phy.c | 24 ++++++++++++++++++++++++ include/linux/phy.h | 4 ++++ 2 files changed, 28 insertions(+) diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 6f221c8c2a7f..603e8db50162 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -1347,3 +1347,27 @@ void phy_ethtool_get_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol) phydev->drv->get_wol(phydev, wol); } EXPORT_SYMBOL(phy_ethtool_get_wol); + +int phy_ethtool_get_link_ksettings(struct net_device *ndev, + struct ethtool_link_ksettings *cmd) +{ + struct phy_device *phydev = ndev->phydev; + + if (!phydev) + return -ENODEV; + + return phy_ethtool_ksettings_get(phydev, cmd); +} +EXPORT_SYMBOL(phy_ethtool_get_link_ksettings); + +int phy_ethtool_set_link_ksettings(struct net_device *ndev, + const struct ethtool_link_ksettings *cmd) +{ + struct phy_device *phydev = ndev->phydev; + + if (!phydev) + return -ENODEV; + + return phy_ethtool_ksettings_set(phydev, cmd); +} +EXPORT_SYMBOL(phy_ethtool_set_link_ksettings); diff --git a/include/linux/phy.h b/include/linux/phy.h index be3f83bbdc0b..2d24b283aa2d 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -829,6 +829,10 @@ int phy_ethtool_get_eee(struct phy_device *phydev, struct ethtool_eee *data); int phy_ethtool_set_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol); void phy_ethtool_get_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol); +int phy_ethtool_get_link_ksettings(struct net_device *ndev, + struct ethtool_link_ksettings *cmd); +int phy_ethtool_set_link_ksettings(struct net_device *ndev, + const struct ethtool_link_ksettings *cmd); int __init mdio_bus_init(void); void mdio_bus_exit(void); From 45f5c327ce5ce9178147279b5d6b95eac216c84c Mon Sep 17 00:00:00 2001 From: Philippe Reynes Date: Tue, 10 May 2016 00:19:42 +0200 Subject: [PATCH 1479/1649] net: ethernet: fec: use phydev from struct net_device The private structure contain a pointer to phydev, but the structure net_device already contain such pointer. So we can remove the pointer phydev in the private structure, and update the driver to use the one contained in struct net_device. Signed-off-by: Philippe Reynes Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/fec.h | 1 - drivers/net/ethernet/freescale/fec_main.c | 49 ++++++++++------------- 2 files changed, 21 insertions(+), 29 deletions(-) diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h index 195122e11f10..f58f9ea51639 100644 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h @@ -517,7 +517,6 @@ struct fec_enet_private { /* Phylib and MDIO interface */ struct mii_bus *mii_bus; - struct phy_device *phy_dev; int mii_timeout; uint phy_speed; phy_interface_t phy_interface; diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index c9f77c324535..9d6e35cf0905 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -967,10 +967,10 @@ fec_restart(struct net_device *ndev) rcntl &= ~(1 << 8); /* 1G, 100M or 10M */ - if (fep->phy_dev) { - if (fep->phy_dev->speed == SPEED_1000) + if (ndev->phydev) { + if (ndev->phydev->speed == SPEED_1000) ecntl |= (1 << 5); - else if (fep->phy_dev->speed == SPEED_100) + else if (ndev->phydev->speed == SPEED_100) rcntl &= ~(1 << 9); else rcntl |= (1 << 9); @@ -991,7 +991,7 @@ fec_restart(struct net_device *ndev) */ cfgr = (fep->phy_interface == PHY_INTERFACE_MODE_RMII) ? BM_MIIGSK_CFGR_RMII : BM_MIIGSK_CFGR_MII; - if (fep->phy_dev && fep->phy_dev->speed == SPEED_10) + if (ndev->phydev && ndev->phydev->speed == SPEED_10) cfgr |= BM_MIIGSK_CFGR_FRCONT_10M; writel(cfgr, fep->hwp + FEC_MIIGSK_CFGR); @@ -1005,7 +1005,7 @@ fec_restart(struct net_device *ndev) /* enable pause frame*/ if ((fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) || ((fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) && - fep->phy_dev && fep->phy_dev->pause)) { + ndev->phydev && ndev->phydev->pause)) { rcntl |= FEC_ENET_FCE; /* set FIFO threshold parameter to reduce overrun */ @@ -1685,7 +1685,7 @@ static void fec_get_mac(struct net_device *ndev) static void fec_enet_adjust_link(struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); - struct phy_device *phy_dev = fep->phy_dev; + struct phy_device *phy_dev = ndev->phydev; int status_change = 0; /* Prevent a state halted on mii error */ @@ -1885,8 +1885,6 @@ static int fec_enet_mii_probe(struct net_device *ndev) int phy_id; int dev_id = fep->dev_id; - fep->phy_dev = NULL; - if (fep->phy_node) { phy_dev = of_phy_connect(ndev, fep->phy_node, &fec_enet_adjust_link, 0, @@ -1934,7 +1932,6 @@ static int fec_enet_mii_probe(struct net_device *ndev) phy_dev->advertising = phy_dev->supported; - fep->phy_dev = phy_dev; fep->link = 0; fep->full_duplex = 0; @@ -2067,8 +2064,7 @@ static void fec_enet_mii_remove(struct fec_enet_private *fep) static int fec_enet_get_link_ksettings(struct net_device *ndev, struct ethtool_link_ksettings *cmd) { - struct fec_enet_private *fep = netdev_priv(ndev); - struct phy_device *phydev = fep->phy_dev; + struct phy_device *phydev = ndev->phydev; if (!phydev) return -ENODEV; @@ -2079,8 +2075,7 @@ static int fec_enet_get_link_ksettings(struct net_device *ndev, static int fec_enet_set_link_ksettings(struct net_device *ndev, const struct ethtool_link_ksettings *cmd) { - struct fec_enet_private *fep = netdev_priv(ndev); - struct phy_device *phydev = fep->phy_dev; + struct phy_device *phydev = ndev->phydev; if (!phydev) return -ENODEV; @@ -2220,7 +2215,7 @@ static int fec_enet_set_pauseparam(struct net_device *ndev, { struct fec_enet_private *fep = netdev_priv(ndev); - if (!fep->phy_dev) + if (!ndev->phydev) return -ENODEV; if (pause->tx_pause != pause->rx_pause) { @@ -2236,17 +2231,17 @@ static int fec_enet_set_pauseparam(struct net_device *ndev, fep->pause_flag |= pause->autoneg ? FEC_PAUSE_FLAG_AUTONEG : 0; if (pause->rx_pause || pause->autoneg) { - fep->phy_dev->supported |= ADVERTISED_Pause; - fep->phy_dev->advertising |= ADVERTISED_Pause; + ndev->phydev->supported |= ADVERTISED_Pause; + ndev->phydev->advertising |= ADVERTISED_Pause; } else { - fep->phy_dev->supported &= ~ADVERTISED_Pause; - fep->phy_dev->advertising &= ~ADVERTISED_Pause; + ndev->phydev->supported &= ~ADVERTISED_Pause; + ndev->phydev->advertising &= ~ADVERTISED_Pause; } if (pause->autoneg) { if (netif_running(ndev)) fec_stop(ndev); - phy_start_aneg(fep->phy_dev); + phy_start_aneg(ndev->phydev); } if (netif_running(ndev)) { napi_disable(&fep->napi); @@ -2362,8 +2357,7 @@ static int fec_enet_get_sset_count(struct net_device *dev, int sset) static int fec_enet_nway_reset(struct net_device *dev) { - struct fec_enet_private *fep = netdev_priv(dev); - struct phy_device *phydev = fep->phy_dev; + struct phy_device *phydev = dev->phydev; if (!phydev) return -ENODEV; @@ -2594,7 +2588,7 @@ static const struct ethtool_ops fec_enet_ethtool_ops = { static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) { struct fec_enet_private *fep = netdev_priv(ndev); - struct phy_device *phydev = fep->phy_dev; + struct phy_device *phydev = ndev->phydev; if (!netif_running(ndev)) return -EINVAL; @@ -2849,7 +2843,7 @@ fec_enet_open(struct net_device *ndev) goto err_enet_mii_probe; napi_enable(&fep->napi); - phy_start(fep->phy_dev); + phy_start(ndev->phydev); netif_tx_start_all_queues(ndev); device_set_wakeup_enable(&ndev->dev, fep->wol_flag & @@ -2873,7 +2867,7 @@ fec_enet_close(struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); - phy_stop(fep->phy_dev); + phy_stop(ndev->phydev); if (netif_device_present(ndev)) { napi_disable(&fep->napi); @@ -2881,8 +2875,7 @@ fec_enet_close(struct net_device *ndev) fec_stop(ndev); } - phy_disconnect(fep->phy_dev); - fep->phy_dev = NULL; + phy_disconnect(ndev->phydev); fec_enet_clk_enable(ndev, false); pinctrl_pm_select_sleep_state(&fep->pdev->dev); @@ -3510,7 +3503,7 @@ static int __maybe_unused fec_suspend(struct device *dev) if (netif_running(ndev)) { if (fep->wol_flag & FEC_WOL_FLAG_ENABLE) fep->wol_flag |= FEC_WOL_FLAG_SLEEP_ON; - phy_stop(fep->phy_dev); + phy_stop(ndev->phydev); napi_disable(&fep->napi); netif_tx_lock_bh(ndev); netif_device_detach(ndev); @@ -3570,7 +3563,7 @@ static int __maybe_unused fec_resume(struct device *dev) netif_device_attach(ndev); netif_tx_unlock_bh(ndev); napi_enable(&fep->napi); - phy_start(fep->phy_dev); + phy_start(ndev->phydev); } rtnl_unlock(); From 9365fbf5781f2c7f182e8b5c78ff757d3ff9d722 Mon Sep 17 00:00:00 2001 From: Philippe Reynes Date: Tue, 10 May 2016 00:19:43 +0200 Subject: [PATCH 1480/1649] net: ethernet: fec: use phy_ethtool_{get|set}_link_ksettings There are two generics functions phy_ethtool_{get|set}_link_ksettings, so we can use them instead of defining the same code in the driver. Signed-off-by: Philippe Reynes Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/fec_main.c | 26 ++--------------------- 1 file changed, 2 insertions(+), 24 deletions(-) diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 9d6e35cf0905..ca2cccc594fd 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -2061,28 +2061,6 @@ static void fec_enet_mii_remove(struct fec_enet_private *fep) } } -static int fec_enet_get_link_ksettings(struct net_device *ndev, - struct ethtool_link_ksettings *cmd) -{ - struct phy_device *phydev = ndev->phydev; - - if (!phydev) - return -ENODEV; - - return phy_ethtool_ksettings_get(phydev, cmd); -} - -static int fec_enet_set_link_ksettings(struct net_device *ndev, - const struct ethtool_link_ksettings *cmd) -{ - struct phy_device *phydev = ndev->phydev; - - if (!phydev) - return -ENODEV; - - return phy_ethtool_ksettings_set(phydev, cmd); -} - static void fec_enet_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *info) { @@ -2581,8 +2559,8 @@ static const struct ethtool_ops fec_enet_ethtool_ops = { .set_tunable = fec_enet_set_tunable, .get_wol = fec_enet_get_wol, .set_wol = fec_enet_set_wol, - .get_link_ksettings = fec_enet_get_link_ksettings, - .set_link_ksettings = fec_enet_set_link_ksettings, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, }; static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) From 1dee3f59a8d5e711797dc82628aaf94a64e99922 Mon Sep 17 00:00:00 2001 From: Nicolas Dichtel Date: Mon, 9 May 2016 11:40:20 +0200 Subject: [PATCH 1481/1649] block/drbd: align properly u64 in nl messages The attribute 0 is never used in drbd, so let's use it as pad attribute in netlink messages. This minimizes the patch. Note that this patch is only compile-tested. Signed-off-by: Nicolas Dichtel Signed-off-by: Lars Ellenberg Signed-off-by: David S. Miller --- drivers/block/drbd/drbd_nl.c | 28 ++++++++++++++++------------ include/linux/genl_magic_struct.h | 7 ++++++- 2 files changed, 22 insertions(+), 13 deletions(-) diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c index 1fd1dccebb6b..0bac9c8246bc 100644 --- a/drivers/block/drbd/drbd_nl.c +++ b/drivers/block/drbd/drbd_nl.c @@ -3633,14 +3633,15 @@ static int nla_put_status_info(struct sk_buff *skb, struct drbd_device *device, goto nla_put_failure; if (nla_put_u32(skb, T_sib_reason, sib ? sib->sib_reason : SIB_GET_STATUS_REPLY) || nla_put_u32(skb, T_current_state, device->state.i) || - nla_put_u64(skb, T_ed_uuid, device->ed_uuid) || - nla_put_u64(skb, T_capacity, drbd_get_capacity(device->this_bdev)) || - nla_put_u64(skb, T_send_cnt, device->send_cnt) || - nla_put_u64(skb, T_recv_cnt, device->recv_cnt) || - nla_put_u64(skb, T_read_cnt, device->read_cnt) || - nla_put_u64(skb, T_writ_cnt, device->writ_cnt) || - nla_put_u64(skb, T_al_writ_cnt, device->al_writ_cnt) || - nla_put_u64(skb, T_bm_writ_cnt, device->bm_writ_cnt) || + nla_put_u64_0pad(skb, T_ed_uuid, device->ed_uuid) || + nla_put_u64_0pad(skb, T_capacity, + drbd_get_capacity(device->this_bdev)) || + nla_put_u64_0pad(skb, T_send_cnt, device->send_cnt) || + nla_put_u64_0pad(skb, T_recv_cnt, device->recv_cnt) || + nla_put_u64_0pad(skb, T_read_cnt, device->read_cnt) || + nla_put_u64_0pad(skb, T_writ_cnt, device->writ_cnt) || + nla_put_u64_0pad(skb, T_al_writ_cnt, device->al_writ_cnt) || + nla_put_u64_0pad(skb, T_bm_writ_cnt, device->bm_writ_cnt) || nla_put_u32(skb, T_ap_bio_cnt, atomic_read(&device->ap_bio_cnt)) || nla_put_u32(skb, T_ap_pending_cnt, atomic_read(&device->ap_pending_cnt)) || nla_put_u32(skb, T_rs_pending_cnt, atomic_read(&device->rs_pending_cnt))) @@ -3657,13 +3658,16 @@ static int nla_put_status_info(struct sk_buff *skb, struct drbd_device *device, goto nla_put_failure; if (nla_put_u32(skb, T_disk_flags, device->ldev->md.flags) || - nla_put_u64(skb, T_bits_total, drbd_bm_bits(device)) || - nla_put_u64(skb, T_bits_oos, drbd_bm_total_weight(device))) + nla_put_u64_0pad(skb, T_bits_total, drbd_bm_bits(device)) || + nla_put_u64_0pad(skb, T_bits_oos, + drbd_bm_total_weight(device))) goto nla_put_failure; if (C_SYNC_SOURCE <= device->state.conn && C_PAUSED_SYNC_T >= device->state.conn) { - if (nla_put_u64(skb, T_bits_rs_total, device->rs_total) || - nla_put_u64(skb, T_bits_rs_failed, device->rs_failed)) + if (nla_put_u64_0pad(skb, T_bits_rs_total, + device->rs_total) || + nla_put_u64_0pad(skb, T_bits_rs_failed, + device->rs_failed)) goto nla_put_failure; } } diff --git a/include/linux/genl_magic_struct.h b/include/linux/genl_magic_struct.h index eecd19b37001..6270a56e5edc 100644 --- a/include/linux/genl_magic_struct.h +++ b/include/linux/genl_magic_struct.h @@ -62,6 +62,11 @@ extern void CONCAT_(GENL_MAGIC_FAMILY, _genl_unregister)(void); /* MAGIC helpers {{{2 */ +static inline int nla_put_u64_0pad(struct sk_buff *skb, int attrtype, u64 value) +{ + return nla_put_64bit(skb, attrtype, sizeof(u64), &value, 0); +} + /* possible field types */ #define __flg_field(attr_nr, attr_flag, name) \ __field(attr_nr, attr_flag, name, NLA_U8, char, \ @@ -80,7 +85,7 @@ extern void CONCAT_(GENL_MAGIC_FAMILY, _genl_unregister)(void); nla_get_u32, nla_put_u32, true) #define __u64_field(attr_nr, attr_flag, name) \ __field(attr_nr, attr_flag, name, NLA_U64, __u64, \ - nla_get_u64, nla_put_u64, false) + nla_get_u64, nla_put_u64_0pad, false) #define __str_field(attr_nr, attr_flag, name, maxlen) \ __array(attr_nr, attr_flag, name, NLA_NUL_STRING, char, maxlen, \ nla_strlcpy, nla_put, false) From 93edb8c7f94fb3d384790ac8a83c3fb9389f6ca5 Mon Sep 17 00:00:00 2001 From: Pablo Neira Date: Tue, 10 May 2016 21:33:38 +0200 Subject: [PATCH 1482/1649] gtp: reload GTPv1 header after pskb_may_pull() The GTPv1 header flags indicate the presence of optional extensions after this header. Refresh the pointer to the GTPv1 header as skb->head might have be reallocated via pskb_may_pull(). Fixes: 459aa660eb1d ("gtp: add initial driver for datapath of GPRS Tunneling Protocol (GTP-U)") Reported-by: Eric Dumazet Signed-off-by: Pablo Neira Ayuso Signed-off-by: David S. Miller --- drivers/net/gtp.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c index 8ce1104e4fdb..f7caf1e35d83 100644 --- a/drivers/net/gtp.c +++ b/drivers/net/gtp.c @@ -253,6 +253,8 @@ static int gtp1u_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb, if (!pskb_may_pull(skb, hdrlen)) return -1; + gtp1 = (struct gtp1_header *)(skb->data + sizeof(struct udphdr)); + rcu_read_lock(); pctx = gtp1_pdp_find(gtp, ntohl(gtp1->tid)); if (!pctx) { From 1ddb6b71b9b572edb2b9c09086ee3bbbc550d9cd Mon Sep 17 00:00:00 2001 From: Tom Herbert Date: Tue, 10 May 2016 11:56:32 +0200 Subject: [PATCH 1483/1649] ila: ipv6/ila: fix nlsize calculation for lwtunnel The handler 'ila_fill_encap_info' adds two attributes: ILA_ATTR_LOCATOR and ILA_ATTR_CSUM_MODE. nla_total_size_64bit() must be use for ILA_ATTR_LOCATOR. Also, do nla_put_u8 instead of nla_put_u64 for ILA_ATTR_CSUM_MODE. Fixes: f13a82d87b21 ("ipv6: use nla_put_u64_64bit()") Fixes: 90bfe662db13 ("ila: add checksum neutral ILA translations") Reported-by: Nicolas Dichtel Signed-off-by: Tom Herbert Signed-off-by: Nicolas Dichtel Acked-by: Tom Herbert Signed-off-by: David S. Miller --- net/ipv6/ila/ila_lwt.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/net/ipv6/ila/ila_lwt.c b/net/ipv6/ila/ila_lwt.c index 17038e1ede98..1dfb64166d7d 100644 --- a/net/ipv6/ila/ila_lwt.c +++ b/net/ipv6/ila/ila_lwt.c @@ -133,7 +133,7 @@ static int ila_fill_encap_info(struct sk_buff *skb, if (nla_put_u64_64bit(skb, ILA_ATTR_LOCATOR, (__force u64)p->locator.v64, ILA_ATTR_PAD)) goto nla_put_failure; - if (nla_put_u64(skb, ILA_ATTR_CSUM_MODE, (__force u8)p->csum_mode)) + if (nla_put_u8(skb, ILA_ATTR_CSUM_MODE, (__force u8)p->csum_mode)) goto nla_put_failure; return 0; @@ -144,7 +144,9 @@ nla_put_failure: static int ila_encap_nlsize(struct lwtunnel_state *lwtstate) { - return nla_total_size(sizeof(u64)); /* ILA_ATTR_LOCATOR */ + return nla_total_size_64bit(sizeof(u64)) + /* ILA_ATTR_LOCATOR */ + nla_total_size(sizeof(u8)) + /* ILA_ATTR_CSUM_MODE */ + 0; } static int ila_encap_cmp(struct lwtunnel_state *a, struct lwtunnel_state *b) From 953abb3823633385b1235add9c30c3e775dee0bc Mon Sep 17 00:00:00 2001 From: Sowmini Varadhan Date: Tue, 10 May 2016 12:38:08 -0400 Subject: [PATCH 1484/1649] skbuff: remove unused variable `doff' There are two instances of an unused variable, `doff' added by commit 6fa01ccd8830 ("skbuff: Add pskb_extract() helper function") in pskb_carve_inside_header() and pskb_carve_inside_nonlinear(). Remove these instances, they are not used. Reported by: Daniel Borkmann Signed-off-by: Sowmini Varadhan Signed-off-by: David S. Miller --- net/core/skbuff.c | 6 ------ 1 file changed, 6 deletions(-) diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 5586be93632f..f2b77e549c03 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -4634,7 +4634,6 @@ static int pskb_carve_inside_header(struct sk_buff *skb, const u32 off, int size = skb_end_offset(skb); int new_hlen = headlen - off; u8 *data; - int doff = 0; size = SKB_DATA_ALIGN(size); @@ -4674,13 +4673,11 @@ static int pskb_carve_inside_header(struct sk_buff *skb, const u32 off, skb_free_head(skb); } - doff = (data - skb->head); skb->head = data; skb->data = data; skb->head_frag = 0; #ifdef NET_SKBUFF_DATA_USES_OFFSET skb->end = size; - doff = 0; #else skb->end = skb->head + size; #endif @@ -4761,7 +4758,6 @@ static int pskb_carve_inside_nonlinear(struct sk_buff *skb, const u32 off, u8 *data; const int nfrags = skb_shinfo(skb)->nr_frags; struct skb_shared_info *shinfo; - int doff = 0; size = SKB_DATA_ALIGN(size); @@ -4816,13 +4812,11 @@ static int pskb_carve_inside_nonlinear(struct sk_buff *skb, const u32 off, } skb_release_data(skb); - doff = (data - skb->head); skb->head = data; skb->head_frag = 0; skb->data = data; #ifdef NET_SKBUFF_DATA_USES_OFFSET skb->end = size; - doff = 0; #else skb->end = skb->head + size; #endif From 756ee1729b2feb3a45767da29e338f70f2086ba3 Mon Sep 17 00:00:00 2001 From: Lawrence Brakmo Date: Wed, 11 May 2016 10:02:13 -0700 Subject: [PATCH 1485/1649] tcp: replace cnt & rtt with struct in pkts_acked() Replace 2 arguments (cnt and rtt) in the congestion control modules' pkts_acked() function with a struct. This will allow adding more information without having to modify existing congestion control modules (tcp_nv in particular needs bytes in flight when packet was sent). As proposed by Neal Cardwell in his comments to the tcp_nv patch. Signed-off-by: Lawrence Brakmo Acked-by: Yuchung Cheng Signed-off-by: David S. Miller --- include/net/tcp.h | 7 ++++++- net/ipv4/tcp_bic.c | 6 +++--- net/ipv4/tcp_cdg.c | 14 +++++++------- net/ipv4/tcp_cubic.c | 6 +++--- net/ipv4/tcp_htcp.c | 10 +++++----- net/ipv4/tcp_illinois.c | 21 +++++++++++---------- net/ipv4/tcp_input.c | 8 ++++++-- net/ipv4/tcp_lp.c | 6 +++--- net/ipv4/tcp_vegas.c | 6 +++--- net/ipv4/tcp_vegas.h | 2 +- net/ipv4/tcp_veno.c | 7 ++++--- net/ipv4/tcp_westwood.c | 7 ++++--- net/ipv4/tcp_yeah.c | 7 ++++--- 13 files changed, 60 insertions(+), 47 deletions(-) diff --git a/include/net/tcp.h b/include/net/tcp.h index 4775a1bba7f7..c9ab561387c4 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -854,6 +854,11 @@ enum tcp_ca_ack_event_flags { union tcp_cc_info; +struct ack_sample { + u32 pkts_acked; + s32 rtt_us; +}; + struct tcp_congestion_ops { struct list_head list; u32 key; @@ -877,7 +882,7 @@ struct tcp_congestion_ops { /* new value of cwnd after loss (optional) */ u32 (*undo_cwnd)(struct sock *sk); /* hook for packet ack accounting (optional) */ - void (*pkts_acked)(struct sock *sk, u32 num_acked, s32 rtt_us); + void (*pkts_acked)(struct sock *sk, const struct ack_sample *sample); /* get info for inet_diag (optional) */ size_t (*get_info)(struct sock *sk, u32 ext, int *attr, union tcp_cc_info *info); diff --git a/net/ipv4/tcp_bic.c b/net/ipv4/tcp_bic.c index fd1405d37c14..36087bca9f48 100644 --- a/net/ipv4/tcp_bic.c +++ b/net/ipv4/tcp_bic.c @@ -197,15 +197,15 @@ static void bictcp_state(struct sock *sk, u8 new_state) /* Track delayed acknowledgment ratio using sliding window * ratio = (15*ratio + sample) / 16 */ -static void bictcp_acked(struct sock *sk, u32 cnt, s32 rtt) +static void bictcp_acked(struct sock *sk, const struct ack_sample *sample) { const struct inet_connection_sock *icsk = inet_csk(sk); if (icsk->icsk_ca_state == TCP_CA_Open) { struct bictcp *ca = inet_csk_ca(sk); - cnt -= ca->delayed_ack >> ACK_RATIO_SHIFT; - ca->delayed_ack += cnt; + ca->delayed_ack += sample->pkts_acked - + (ca->delayed_ack >> ACK_RATIO_SHIFT); } } diff --git a/net/ipv4/tcp_cdg.c b/net/ipv4/tcp_cdg.c index ccce8a55f1e1..03725b294286 100644 --- a/net/ipv4/tcp_cdg.c +++ b/net/ipv4/tcp_cdg.c @@ -294,12 +294,12 @@ static void tcp_cdg_cong_avoid(struct sock *sk, u32 ack, u32 acked) ca->shadow_wnd = max(ca->shadow_wnd, ca->shadow_wnd + incr); } -static void tcp_cdg_acked(struct sock *sk, u32 num_acked, s32 rtt_us) +static void tcp_cdg_acked(struct sock *sk, const struct ack_sample *sample) { struct cdg *ca = inet_csk_ca(sk); struct tcp_sock *tp = tcp_sk(sk); - if (rtt_us <= 0) + if (sample->rtt_us <= 0) return; /* A heuristic for filtering delayed ACKs, adapted from: @@ -307,20 +307,20 @@ static void tcp_cdg_acked(struct sock *sk, u32 num_acked, s32 rtt_us) * delay and rate based TCP mechanisms." TR 100219A. CAIA, 2010. */ if (tp->sacked_out == 0) { - if (num_acked == 1 && ca->delack) { + if (sample->pkts_acked == 1 && ca->delack) { /* A delayed ACK is only used for the minimum if it is * provenly lower than an existing non-zero minimum. */ - ca->rtt.min = min(ca->rtt.min, rtt_us); + ca->rtt.min = min(ca->rtt.min, sample->rtt_us); ca->delack--; return; - } else if (num_acked > 1 && ca->delack < 5) { + } else if (sample->pkts_acked > 1 && ca->delack < 5) { ca->delack++; } } - ca->rtt.min = min_not_zero(ca->rtt.min, rtt_us); - ca->rtt.max = max(ca->rtt.max, rtt_us); + ca->rtt.min = min_not_zero(ca->rtt.min, sample->rtt_us); + ca->rtt.max = max(ca->rtt.max, sample->rtt_us); } static u32 tcp_cdg_ssthresh(struct sock *sk) diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c index 0ce946e395e1..c99230efcd52 100644 --- a/net/ipv4/tcp_cubic.c +++ b/net/ipv4/tcp_cubic.c @@ -437,21 +437,21 @@ static void hystart_update(struct sock *sk, u32 delay) /* Track delayed acknowledgment ratio using sliding window * ratio = (15*ratio + sample) / 16 */ -static void bictcp_acked(struct sock *sk, u32 cnt, s32 rtt_us) +static void bictcp_acked(struct sock *sk, const struct ack_sample *sample) { const struct tcp_sock *tp = tcp_sk(sk); struct bictcp *ca = inet_csk_ca(sk); u32 delay; /* Some calls are for duplicates without timetamps */ - if (rtt_us < 0) + if (sample->rtt_us < 0) return; /* Discard delay samples right after fast recovery */ if (ca->epoch_start && (s32)(tcp_time_stamp - ca->epoch_start) < HZ) return; - delay = (rtt_us << 3) / USEC_PER_MSEC; + delay = (sample->rtt_us << 3) / USEC_PER_MSEC; if (delay == 0) delay = 1; diff --git a/net/ipv4/tcp_htcp.c b/net/ipv4/tcp_htcp.c index 82f0d9ed60f5..4a4d8e76738f 100644 --- a/net/ipv4/tcp_htcp.c +++ b/net/ipv4/tcp_htcp.c @@ -99,7 +99,7 @@ static inline void measure_rtt(struct sock *sk, u32 srtt) } static void measure_achieved_throughput(struct sock *sk, - u32 pkts_acked, s32 rtt) + const struct ack_sample *sample) { const struct inet_connection_sock *icsk = inet_csk(sk); const struct tcp_sock *tp = tcp_sk(sk); @@ -107,10 +107,10 @@ static void measure_achieved_throughput(struct sock *sk, u32 now = tcp_time_stamp; if (icsk->icsk_ca_state == TCP_CA_Open) - ca->pkts_acked = pkts_acked; + ca->pkts_acked = sample->pkts_acked; - if (rtt > 0) - measure_rtt(sk, usecs_to_jiffies(rtt)); + if (sample->rtt_us > 0) + measure_rtt(sk, usecs_to_jiffies(sample->rtt_us)); if (!use_bandwidth_switch) return; @@ -122,7 +122,7 @@ static void measure_achieved_throughput(struct sock *sk, return; } - ca->packetcount += pkts_acked; + ca->packetcount += sample->pkts_acked; if (ca->packetcount >= tp->snd_cwnd - (ca->alpha >> 7 ? : 1) && now - ca->lasttime >= ca->minRTT && diff --git a/net/ipv4/tcp_illinois.c b/net/ipv4/tcp_illinois.c index 2ab9bbb6faff..c8e6d86be114 100644 --- a/net/ipv4/tcp_illinois.c +++ b/net/ipv4/tcp_illinois.c @@ -82,30 +82,31 @@ static void tcp_illinois_init(struct sock *sk) } /* Measure RTT for each ack. */ -static void tcp_illinois_acked(struct sock *sk, u32 pkts_acked, s32 rtt) +static void tcp_illinois_acked(struct sock *sk, const struct ack_sample *sample) { struct illinois *ca = inet_csk_ca(sk); + s32 rtt_us = sample->rtt_us; - ca->acked = pkts_acked; + ca->acked = sample->pkts_acked; /* dup ack, no rtt sample */ - if (rtt < 0) + if (rtt_us < 0) return; /* ignore bogus values, this prevents wraparound in alpha math */ - if (rtt > RTT_MAX) - rtt = RTT_MAX; + if (rtt_us > RTT_MAX) + rtt_us = RTT_MAX; /* keep track of minimum RTT seen so far */ - if (ca->base_rtt > rtt) - ca->base_rtt = rtt; + if (ca->base_rtt > rtt_us) + ca->base_rtt = rtt_us; /* and max */ - if (ca->max_rtt < rtt) - ca->max_rtt = rtt; + if (ca->max_rtt < rtt_us) + ca->max_rtt = rtt_us; ++ca->cnt_rtt; - ca->sum_rtt += rtt; + ca->sum_rtt += rtt_us; } /* Maximum queuing delay */ diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index a914e0607895..d6c8f4cd0800 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -3248,8 +3248,12 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets, tcp_rearm_rto(sk); } - if (icsk->icsk_ca_ops->pkts_acked) - icsk->icsk_ca_ops->pkts_acked(sk, pkts_acked, ca_rtt_us); + if (icsk->icsk_ca_ops->pkts_acked) { + struct ack_sample sample = { .pkts_acked = pkts_acked, + .rtt_us = ca_rtt_us }; + + icsk->icsk_ca_ops->pkts_acked(sk, &sample); + } #if FASTRETRANS_DEBUG > 0 WARN_ON((int)tp->sacked_out < 0); diff --git a/net/ipv4/tcp_lp.c b/net/ipv4/tcp_lp.c index 1e70fa8fa793..c67ece1390c2 100644 --- a/net/ipv4/tcp_lp.c +++ b/net/ipv4/tcp_lp.c @@ -260,13 +260,13 @@ static void tcp_lp_rtt_sample(struct sock *sk, u32 rtt) * newReno in increase case. * We work it out by following the idea from TCP-LP's paper directly */ -static void tcp_lp_pkts_acked(struct sock *sk, u32 num_acked, s32 rtt_us) +static void tcp_lp_pkts_acked(struct sock *sk, const struct ack_sample *sample) { struct tcp_sock *tp = tcp_sk(sk); struct lp *lp = inet_csk_ca(sk); - if (rtt_us > 0) - tcp_lp_rtt_sample(sk, rtt_us); + if (sample->rtt_us > 0) + tcp_lp_rtt_sample(sk, sample->rtt_us); /* calc inference */ if (tcp_time_stamp > tp->rx_opt.rcv_tsecr) diff --git a/net/ipv4/tcp_vegas.c b/net/ipv4/tcp_vegas.c index 13951c4087d4..4c4bac1b5eab 100644 --- a/net/ipv4/tcp_vegas.c +++ b/net/ipv4/tcp_vegas.c @@ -107,16 +107,16 @@ EXPORT_SYMBOL_GPL(tcp_vegas_init); * o min-filter RTT samples from a much longer window (forever for now) * to find the propagation delay (baseRTT) */ -void tcp_vegas_pkts_acked(struct sock *sk, u32 cnt, s32 rtt_us) +void tcp_vegas_pkts_acked(struct sock *sk, const struct ack_sample *sample) { struct vegas *vegas = inet_csk_ca(sk); u32 vrtt; - if (rtt_us < 0) + if (sample->rtt_us < 0) return; /* Never allow zero rtt or baseRTT */ - vrtt = rtt_us + 1; + vrtt = sample->rtt_us + 1; /* Filter to find propagation delay: */ if (vrtt < vegas->baseRTT) diff --git a/net/ipv4/tcp_vegas.h b/net/ipv4/tcp_vegas.h index ef9da5306c68..248cfc0ff9ae 100644 --- a/net/ipv4/tcp_vegas.h +++ b/net/ipv4/tcp_vegas.h @@ -17,7 +17,7 @@ struct vegas { void tcp_vegas_init(struct sock *sk); void tcp_vegas_state(struct sock *sk, u8 ca_state); -void tcp_vegas_pkts_acked(struct sock *sk, u32 cnt, s32 rtt_us); +void tcp_vegas_pkts_acked(struct sock *sk, const struct ack_sample *sample); void tcp_vegas_cwnd_event(struct sock *sk, enum tcp_ca_event event); size_t tcp_vegas_get_info(struct sock *sk, u32 ext, int *attr, union tcp_cc_info *info); diff --git a/net/ipv4/tcp_veno.c b/net/ipv4/tcp_veno.c index 0d094b995cd9..40171e163cff 100644 --- a/net/ipv4/tcp_veno.c +++ b/net/ipv4/tcp_veno.c @@ -69,16 +69,17 @@ static void tcp_veno_init(struct sock *sk) } /* Do rtt sampling needed for Veno. */ -static void tcp_veno_pkts_acked(struct sock *sk, u32 cnt, s32 rtt_us) +static void tcp_veno_pkts_acked(struct sock *sk, + const struct ack_sample *sample) { struct veno *veno = inet_csk_ca(sk); u32 vrtt; - if (rtt_us < 0) + if (sample->rtt_us < 0) return; /* Never allow zero rtt or baseRTT */ - vrtt = rtt_us + 1; + vrtt = sample->rtt_us + 1; /* Filter to find propagation delay: */ if (vrtt < veno->basertt) diff --git a/net/ipv4/tcp_westwood.c b/net/ipv4/tcp_westwood.c index c10732e39837..4b03a2e2a050 100644 --- a/net/ipv4/tcp_westwood.c +++ b/net/ipv4/tcp_westwood.c @@ -99,12 +99,13 @@ static void westwood_filter(struct westwood *w, u32 delta) * Called after processing group of packets. * but all westwood needs is the last sample of srtt. */ -static void tcp_westwood_pkts_acked(struct sock *sk, u32 cnt, s32 rtt) +static void tcp_westwood_pkts_acked(struct sock *sk, + const struct ack_sample *sample) { struct westwood *w = inet_csk_ca(sk); - if (rtt > 0) - w->rtt = usecs_to_jiffies(rtt); + if (sample->rtt_us > 0) + w->rtt = usecs_to_jiffies(sample->rtt_us); } /* diff --git a/net/ipv4/tcp_yeah.c b/net/ipv4/tcp_yeah.c index 3e6a472e6b88..028eb046ea40 100644 --- a/net/ipv4/tcp_yeah.c +++ b/net/ipv4/tcp_yeah.c @@ -56,15 +56,16 @@ static void tcp_yeah_init(struct sock *sk) tp->snd_cwnd_clamp = min_t(u32, tp->snd_cwnd_clamp, 0xffffffff/128); } -static void tcp_yeah_pkts_acked(struct sock *sk, u32 pkts_acked, s32 rtt_us) +static void tcp_yeah_pkts_acked(struct sock *sk, + const struct ack_sample *sample) { const struct inet_connection_sock *icsk = inet_csk(sk); struct yeah *yeah = inet_csk_ca(sk); if (icsk->icsk_ca_state == TCP_CA_Open) - yeah->pkts_acked = pkts_acked; + yeah->pkts_acked = sample->pkts_acked; - tcp_vegas_pkts_acked(sk, pkts_acked, rtt_us); + tcp_vegas_pkts_acked(sk, sample); } static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, u32 acked) From ca4aa976f04d14bc7da60dce0e2afc34c9f0f1d2 Mon Sep 17 00:00:00 2001 From: Nicolas Dichtel Date: Tue, 10 May 2016 16:08:17 +0200 Subject: [PATCH 1486/1649] ipv6: fix 4in6 tunnel receive path Protocol for 4in6 tunnel is IPPROTO_IPIP. This was wrongly changed by the last cleanup. CC: Tom Herbert Fixes: 0d3c703a9d17 ("ipv6: Cleanup IPv6 tunnel receive path") Signed-off-by: Nicolas Dichtel Signed-off-by: David S. Miller --- net/ipv6/ip6_tunnel.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 50af7061ecdb..e79330f214bd 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -897,7 +897,7 @@ drop: static int ip4ip6_rcv(struct sk_buff *skb) { - return ipxip6_rcv(skb, IPPROTO_IP, &tpi_v4, + return ipxip6_rcv(skb, IPPROTO_IPIP, &tpi_v4, ip4ip6_dscp_ecn_decapsulate); } From 74b20582ac389ee9f18a6fcc0eef244658ce8de0 Mon Sep 17 00:00:00 2001 From: David Ahern Date: Tue, 10 May 2016 11:19:50 -0700 Subject: [PATCH 1487/1649] net: l3mdev: Add hook in ip and ipv6 Currently the VRF driver uses the rx_handler to switch the skb device to the VRF device. Switching the dev prior to the ip / ipv6 layer means the VRF driver has to duplicate IP/IPv6 processing which adds overhead and makes features such as retaining the ingress device index more complicated than necessary. This patch moves the hook to the L3 layer just after the first NF_HOOK for PRE_ROUTING. This location makes exposing the original ingress device trivial (next patch) and allows adding other NF_HOOKs to the VRF driver in the future. dev_queue_xmit_nit is exported so that the VRF driver can cycle the skb with the switched device through the packet taps to maintain current behavior (tcpdump can be used on either the vrf device or the enslaved devices). Signed-off-by: David Ahern Signed-off-by: David S. Miller --- drivers/net/vrf.c | 189 ++++++++++++++++++-------------------- include/linux/ipv6.h | 17 +++- include/linux/netdevice.h | 2 + include/net/l3mdev.h | 42 +++++++++ include/net/tcp.h | 4 +- net/core/dev.c | 3 +- net/ipv4/ip_input.c | 7 ++ net/ipv6/ip6_input.c | 7 ++ 8 files changed, 170 insertions(+), 101 deletions(-) diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index c8db55aa8280..0ea29345eb2e 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -42,9 +42,6 @@ #define DRV_NAME "vrf" #define DRV_VERSION "1.0" -#define vrf_master_get_rcu(dev) \ - ((struct net_device *)rcu_dereference(dev->rx_handler_data)) - struct net_vrf { struct rtable *rth; struct rt6_info *rt6; @@ -60,90 +57,12 @@ struct pcpu_dstats { struct u64_stats_sync syncp; }; -/* neighbor handling is done with actual device; do not want - * to flip skb->dev for those ndisc packets. This really fails - * for multiple next protocols (e.g., NEXTHDR_HOP). But it is - * a start. - */ -#if IS_ENABLED(CONFIG_IPV6) -static bool check_ipv6_frame(const struct sk_buff *skb) -{ - const struct ipv6hdr *ipv6h; - struct ipv6hdr _ipv6h; - bool rc = true; - - ipv6h = skb_header_pointer(skb, 0, sizeof(_ipv6h), &_ipv6h); - if (!ipv6h) - goto out; - - if (ipv6h->nexthdr == NEXTHDR_ICMP) { - const struct icmp6hdr *icmph; - struct icmp6hdr _icmph; - - icmph = skb_header_pointer(skb, sizeof(_ipv6h), - sizeof(_icmph), &_icmph); - if (!icmph) - goto out; - - switch (icmph->icmp6_type) { - case NDISC_ROUTER_SOLICITATION: - case NDISC_ROUTER_ADVERTISEMENT: - case NDISC_NEIGHBOUR_SOLICITATION: - case NDISC_NEIGHBOUR_ADVERTISEMENT: - case NDISC_REDIRECT: - rc = false; - break; - } - } - -out: - return rc; -} -#else -static bool check_ipv6_frame(const struct sk_buff *skb) -{ - return false; -} -#endif - -static bool is_ip_rx_frame(struct sk_buff *skb) -{ - switch (skb->protocol) { - case htons(ETH_P_IP): - return true; - case htons(ETH_P_IPV6): - return check_ipv6_frame(skb); - } - return false; -} - static void vrf_tx_error(struct net_device *vrf_dev, struct sk_buff *skb) { vrf_dev->stats.tx_errors++; kfree_skb(skb); } -/* note: already called with rcu_read_lock */ -static rx_handler_result_t vrf_handle_frame(struct sk_buff **pskb) -{ - struct sk_buff *skb = *pskb; - - if (is_ip_rx_frame(skb)) { - struct net_device *dev = vrf_master_get_rcu(skb->dev); - struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats); - - u64_stats_update_begin(&dstats->syncp); - dstats->rx_pkts++; - dstats->rx_bytes += skb->len; - u64_stats_update_end(&dstats->syncp); - - skb->dev = dev; - - return RX_HANDLER_ANOTHER; - } - return RX_HANDLER_PASS; -} - static struct rtnl_link_stats64 *vrf_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { @@ -506,28 +425,14 @@ static int do_vrf_add_slave(struct net_device *dev, struct net_device *port_dev) { int ret; - /* register the packet handler for slave ports */ - ret = netdev_rx_handler_register(port_dev, vrf_handle_frame, dev); - if (ret) { - netdev_err(port_dev, - "Device %s failed to register rx_handler\n", - port_dev->name); - goto out_fail; - } - ret = netdev_master_upper_dev_link(port_dev, dev, NULL, NULL); if (ret < 0) - goto out_unregister; + return ret; port_dev->priv_flags |= IFF_L3MDEV_SLAVE; cycle_netdev(port_dev); return 0; - -out_unregister: - netdev_rx_handler_unregister(port_dev); -out_fail: - return ret; } static int vrf_add_slave(struct net_device *dev, struct net_device *port_dev) @@ -544,8 +449,6 @@ static int do_vrf_del_slave(struct net_device *dev, struct net_device *port_dev) netdev_upper_dev_unlink(port_dev, dev); port_dev->priv_flags &= ~IFF_L3MDEV_SLAVE; - netdev_rx_handler_unregister(port_dev); - cycle_netdev(port_dev); return 0; @@ -669,6 +572,95 @@ static int vrf_get_saddr(struct net_device *dev, struct flowi4 *fl4) return rc; } +#if IS_ENABLED(CONFIG_IPV6) +/* neighbor handling is done with actual device; do not want + * to flip skb->dev for those ndisc packets. This really fails + * for multiple next protocols (e.g., NEXTHDR_HOP). But it is + * a start. + */ +static bool ipv6_ndisc_frame(const struct sk_buff *skb) +{ + const struct ipv6hdr *iph = ipv6_hdr(skb); + bool rc = false; + + if (iph->nexthdr == NEXTHDR_ICMP) { + const struct icmp6hdr *icmph; + struct icmp6hdr _icmph; + + icmph = skb_header_pointer(skb, sizeof(*iph), + sizeof(_icmph), &_icmph); + if (!icmph) + goto out; + + switch (icmph->icmp6_type) { + case NDISC_ROUTER_SOLICITATION: + case NDISC_ROUTER_ADVERTISEMENT: + case NDISC_NEIGHBOUR_SOLICITATION: + case NDISC_NEIGHBOUR_ADVERTISEMENT: + case NDISC_REDIRECT: + rc = true; + break; + } + } + +out: + return rc; +} + +static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev, + struct sk_buff *skb) +{ + /* if packet is NDISC keep the ingress interface */ + if (!ipv6_ndisc_frame(skb)) { + skb->dev = vrf_dev; + skb->skb_iif = vrf_dev->ifindex; + + skb_push(skb, skb->mac_len); + dev_queue_xmit_nit(skb, vrf_dev); + skb_pull(skb, skb->mac_len); + + IP6CB(skb)->flags |= IP6SKB_L3SLAVE; + } + + return skb; +} + +#else +static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev, + struct sk_buff *skb) +{ + return skb; +} +#endif + +static struct sk_buff *vrf_ip_rcv(struct net_device *vrf_dev, + struct sk_buff *skb) +{ + skb->dev = vrf_dev; + skb->skb_iif = vrf_dev->ifindex; + + skb_push(skb, skb->mac_len); + dev_queue_xmit_nit(skb, vrf_dev); + skb_pull(skb, skb->mac_len); + + return skb; +} + +/* called with rcu lock held */ +static struct sk_buff *vrf_l3_rcv(struct net_device *vrf_dev, + struct sk_buff *skb, + u16 proto) +{ + switch (proto) { + case AF_INET: + return vrf_ip_rcv(vrf_dev, skb); + case AF_INET6: + return vrf_ip6_rcv(vrf_dev, skb); + } + + return skb; +} + #if IS_ENABLED(CONFIG_IPV6) static struct dst_entry *vrf_get_rt6_dst(const struct net_device *dev, const struct flowi6 *fl6) @@ -690,6 +682,7 @@ static const struct l3mdev_ops vrf_l3mdev_ops = { .l3mdev_fib_table = vrf_fib_table, .l3mdev_get_rtable = vrf_get_rtable, .l3mdev_get_saddr = vrf_get_saddr, + .l3mdev_l3_rcv = vrf_l3_rcv, #if IS_ENABLED(CONFIG_IPV6) .l3mdev_get_rt6_dst = vrf_get_rt6_dst, #endif diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index 58d6e158755f..5c91b0b055d4 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h @@ -118,14 +118,29 @@ struct inet6_skb_parm { #define IP6SKB_ROUTERALERT 8 #define IP6SKB_FRAGMENTED 16 #define IP6SKB_HOPBYHOP 32 +#define IP6SKB_L3SLAVE 64 }; +#if defined(CONFIG_NET_L3_MASTER_DEV) +static inline bool skb_l3mdev_slave(__u16 flags) +{ + return flags & IP6SKB_L3SLAVE; +} +#else +static inline bool skb_l3mdev_slave(__u16 flags) +{ + return false; +} +#endif + #define IP6CB(skb) ((struct inet6_skb_parm*)((skb)->cb)) #define IP6CBMTU(skb) ((struct ip6_mtuinfo *)((skb)->cb)) static inline int inet6_iif(const struct sk_buff *skb) { - return IP6CB(skb)->iif; + bool l3_slave = skb_l3mdev_slave(IP6CB(skb)->flags); + + return l3_slave ? skb->skb_iif : IP6CB(skb)->iif; } struct tcp6_request_sock { diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 63580e6d0df4..c2f5112f08f7 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -3258,6 +3258,8 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb); bool is_skb_forwardable(const struct net_device *dev, const struct sk_buff *skb); +void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev); + extern int netdev_budget; /* Called by rtnetlink.c:rtnl_unlock() */ diff --git a/include/net/l3mdev.h b/include/net/l3mdev.h index 78872bd1dc2c..374388dc01c8 100644 --- a/include/net/l3mdev.h +++ b/include/net/l3mdev.h @@ -25,6 +25,8 @@ struct l3mdev_ops { u32 (*l3mdev_fib_table)(const struct net_device *dev); + struct sk_buff * (*l3mdev_l3_rcv)(struct net_device *dev, + struct sk_buff *skb, u16 proto); /* IPv4 ops */ struct rtable * (*l3mdev_get_rtable)(const struct net_device *dev, @@ -134,6 +136,34 @@ int l3mdev_get_saddr(struct net *net, int ifindex, struct flowi4 *fl4); struct dst_entry *l3mdev_get_rt6_dst(struct net *net, const struct flowi6 *fl6); +static inline +struct sk_buff *l3mdev_l3_rcv(struct sk_buff *skb, u16 proto) +{ + struct net_device *master = NULL; + + if (netif_is_l3_slave(skb->dev)) + master = netdev_master_upper_dev_get_rcu(skb->dev); + else if (netif_is_l3_master(skb->dev)) + master = skb->dev; + + if (master && master->l3mdev_ops->l3mdev_l3_rcv) + skb = master->l3mdev_ops->l3mdev_l3_rcv(master, skb, proto); + + return skb; +} + +static inline +struct sk_buff *l3mdev_ip_rcv(struct sk_buff *skb) +{ + return l3mdev_l3_rcv(skb, AF_INET); +} + +static inline +struct sk_buff *l3mdev_ip6_rcv(struct sk_buff *skb) +{ + return l3mdev_l3_rcv(skb, AF_INET6); +} + #else static inline int l3mdev_master_ifindex_rcu(const struct net_device *dev) @@ -194,6 +224,18 @@ struct dst_entry *l3mdev_get_rt6_dst(struct net *net, const struct flowi6 *fl6) { return NULL; } + +static inline +struct sk_buff *l3mdev_ip_rcv(struct sk_buff *skb) +{ + return skb; +} + +static inline +struct sk_buff *l3mdev_ip6_rcv(struct sk_buff *skb) +{ + return skb; +} #endif #endif /* _NET_L3MDEV_H_ */ diff --git a/include/net/tcp.h b/include/net/tcp.h index c9ab561387c4..0bcc70f4e1fb 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -786,7 +786,9 @@ struct tcp_skb_cb { */ static inline int tcp_v6_iif(const struct sk_buff *skb) { - return TCP_SKB_CB(skb)->header.h6.iif; + bool l3_slave = skb_l3mdev_slave(TCP_SKB_CB(skb)->header.h6.flags); + + return l3_slave ? skb->skb_iif : TCP_SKB_CB(skb)->header.h6.iif; } #endif diff --git a/net/core/dev.c b/net/core/dev.c index c7490339315c..12436d1312ca 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1850,7 +1850,7 @@ static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb) * taps currently in use. */ -static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev) +void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev) { struct packet_type *ptype; struct sk_buff *skb2 = NULL; @@ -1907,6 +1907,7 @@ out_unlock: pt_prev->func(skb2, skb->dev, pt_prev, skb->dev); rcu_read_unlock(); } +EXPORT_SYMBOL_GPL(dev_queue_xmit_nit); /** * netif_setup_tc - Handle tc mappings on real_num_tx_queues change diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c index 751c0658e194..37375eedeef9 100644 --- a/net/ipv4/ip_input.c +++ b/net/ipv4/ip_input.c @@ -313,6 +313,13 @@ static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb) const struct iphdr *iph = ip_hdr(skb); struct rtable *rt; + /* if ingress device is enslaved to an L3 master device pass the + * skb to its handler for processing + */ + skb = l3mdev_ip_rcv(skb); + if (!skb) + return NET_RX_SUCCESS; + if (net->ipv4.sysctl_ip_early_demux && !skb_dst(skb) && !skb->sk && diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c index 6ed56012005d..f185cbcda114 100644 --- a/net/ipv6/ip6_input.c +++ b/net/ipv6/ip6_input.c @@ -49,6 +49,13 @@ int ip6_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb) { + /* if ingress device is enslaved to an L3 master device pass the + * skb to its handler for processing + */ + skb = l3mdev_ip6_rcv(skb); + if (!skb) + return NET_RX_SUCCESS; + if (net->ipv4.sysctl_ip_early_demux && !skb_dst(skb) && skb->sk == NULL) { const struct inet6_protocol *ipprot; From 0b922b7a829c06e3b0790c58cd9ca026de86096e Mon Sep 17 00:00:00 2001 From: David Ahern Date: Tue, 10 May 2016 11:19:51 -0700 Subject: [PATCH 1488/1649] net: original ingress device index in PKTINFO Applications such as OSPF and BFD need the original ingress device not the VRF device; the latter can be derived from the former. To that end add the skb_iif to inet_skb_parm and set it in ipv4 code after clearing the skb control buffer similar to IPv6. From there the pktinfo can just pull it from cb with the PKTINFO_SKB_CB cast. The previous patch moving the skb->dev change to L3 means nothing else is needed for IPv6; it just works. Signed-off-by: David Ahern Signed-off-by: David S. Miller --- include/net/ip.h | 1 + net/ipv4/ip_input.c | 1 + net/ipv4/ip_sockglue.c | 7 ++++++- 3 files changed, 8 insertions(+), 1 deletion(-) diff --git a/include/net/ip.h b/include/net/ip.h index 247ac82e9cf2..37165fba3741 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -36,6 +36,7 @@ struct sock; struct inet_skb_parm { + int iif; struct ip_options opt; /* Compiled IP options */ unsigned char flags; diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c index 37375eedeef9..4b351af3e67b 100644 --- a/net/ipv4/ip_input.c +++ b/net/ipv4/ip_input.c @@ -478,6 +478,7 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, /* Remove any debris in the socket control block */ memset(IPCB(skb), 0, sizeof(struct inet_skb_parm)); + IPCB(skb)->iif = skb->skb_iif; /* Must drop socket now because of tproxy. */ skb_orphan(skb); diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index bdb222c0c6a2..5805762d7fc7 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c @@ -1193,7 +1193,12 @@ void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb) ipv6_sk_rxinfo(sk); if (prepare && skb_rtable(skb)) { - pktinfo->ipi_ifindex = inet_iif(skb); + /* skb->cb is overloaded: prior to this point it is IP{6}CB + * which has interface index (iif) as the first member of the + * underlying inet{6}_skb_parm struct. This code then overlays + * PKTINFO_SKB_CB and in_pktinfo also has iif as the first + * element so the iif is picked up from the prior IPCB + */ pktinfo->ipi_spec_dst.s_addr = fib_compute_spec_dst(skb); } else { pktinfo->ipi_ifindex = 0; From 15d7d7d435cfe1faa1abf62459d6269bb734cfe7 Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Tue, 10 May 2016 15:44:28 -0400 Subject: [PATCH 1489/1649] net: dsa: mv88e6xxx: abstract VTU/STU data access Both VTU and STU operations use the same routine to access their (common) data registers, with a different offset. Add VTU and STU specific read and write functions to the data registers to abstract the required offset. Signed-off-by: Vivien Didelot Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx.c | 32 ++++++++++++++++++++++++++++---- 1 file changed, 28 insertions(+), 4 deletions(-) diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index 1e5ca8e0f48e..92be27d5db3a 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c @@ -1498,6 +1498,18 @@ static int _mv88e6xxx_vtu_stu_data_read(struct mv88e6xxx_priv_state *ps, return 0; } +static int mv88e6xxx_vtu_data_read(struct mv88e6xxx_priv_state *ps, + struct mv88e6xxx_vtu_stu_entry *entry) +{ + return _mv88e6xxx_vtu_stu_data_read(ps, entry, 0); +} + +static int mv88e6xxx_stu_data_read(struct mv88e6xxx_priv_state *ps, + struct mv88e6xxx_vtu_stu_entry *entry) +{ + return _mv88e6xxx_vtu_stu_data_read(ps, entry, 2); +} + static int _mv88e6xxx_vtu_stu_data_write(struct mv88e6xxx_priv_state *ps, struct mv88e6xxx_vtu_stu_entry *entry, unsigned int nibble_offset) @@ -1523,6 +1535,18 @@ static int _mv88e6xxx_vtu_stu_data_write(struct mv88e6xxx_priv_state *ps, return 0; } +static int mv88e6xxx_vtu_data_write(struct mv88e6xxx_priv_state *ps, + struct mv88e6xxx_vtu_stu_entry *entry) +{ + return _mv88e6xxx_vtu_stu_data_write(ps, entry, 0); +} + +static int mv88e6xxx_stu_data_write(struct mv88e6xxx_priv_state *ps, + struct mv88e6xxx_vtu_stu_entry *entry) +{ + return _mv88e6xxx_vtu_stu_data_write(ps, entry, 2); +} + static int _mv88e6xxx_vtu_vid_write(struct mv88e6xxx_priv_state *ps, u16 vid) { return _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_VID, @@ -1551,7 +1575,7 @@ static int _mv88e6xxx_vtu_getnext(struct mv88e6xxx_priv_state *ps, next.valid = !!(ret & GLOBAL_VTU_VID_VALID); if (next.valid) { - ret = _mv88e6xxx_vtu_stu_data_read(ps, &next, 0); + ret = mv88e6xxx_vtu_data_read(ps, &next); if (ret < 0) return ret; @@ -1658,7 +1682,7 @@ static int _mv88e6xxx_vtu_loadpurge(struct mv88e6xxx_priv_state *ps, goto loadpurge; /* Write port member tags */ - ret = _mv88e6xxx_vtu_stu_data_write(ps, entry, 0); + ret = mv88e6xxx_vtu_data_write(ps, entry); if (ret < 0) return ret; @@ -1724,7 +1748,7 @@ static int _mv88e6xxx_stu_getnext(struct mv88e6xxx_priv_state *ps, u8 sid, next.valid = !!(ret & GLOBAL_VTU_VID_VALID); if (next.valid) { - ret = _mv88e6xxx_vtu_stu_data_read(ps, &next, 2); + ret = mv88e6xxx_stu_data_read(ps, &next); if (ret < 0) return ret; } @@ -1747,7 +1771,7 @@ static int _mv88e6xxx_stu_loadpurge(struct mv88e6xxx_priv_state *ps, goto loadpurge; /* Write port states */ - ret = _mv88e6xxx_vtu_stu_data_write(ps, entry, 2); + ret = mv88e6xxx_stu_data_write(ps, entry); if (ret < 0) return ret; From cb9b9020fca5fd34ab2e21fb36fc2c7a85329426 Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Tue, 10 May 2016 15:44:29 -0400 Subject: [PATCH 1490/1649] net: dsa: mv88e6xxx: add STU capability Some switch models have a STU (per VLAN port state database). Add a new capability flag to switches info, instead of checking their family. Also if the 6165 family has an STU, it must have a VTU, so add the MV88E6XXX_FLAG_VTU to its family flags. Signed-off-by: Vivien Didelot Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx.c | 14 ++------------ drivers/net/dsa/mv88e6xxx.h | 16 ++++++++++++++-- 2 files changed, 16 insertions(+), 14 deletions(-) diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index 92be27d5db3a..835126e90afd 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c @@ -453,16 +453,6 @@ static bool mv88e6xxx_has_fid_reg(struct mv88e6xxx_priv_state *ps) return false; } -static bool mv88e6xxx_has_stu(struct mv88e6xxx_priv_state *ps) -{ - /* Does the device have STU and dedicated SID registers for VTU ops? */ - if (mv88e6xxx_6097_family(ps) || mv88e6xxx_6165_family(ps) || - mv88e6xxx_6351_family(ps) || mv88e6xxx_6352_family(ps)) - return true; - - return false; -} - /* We expect the switch to perform auto negotiation if there is a real * phy. However, in the case of a fixed link phy, we force the port * settings from the fixed link settings. @@ -1599,7 +1589,7 @@ static int _mv88e6xxx_vtu_getnext(struct mv88e6xxx_priv_state *ps, next.fid |= ret & 0xf; } - if (mv88e6xxx_has_stu(ps)) { + if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_STU)) { ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_VTU_SID); if (ret < 0) @@ -1686,7 +1676,7 @@ static int _mv88e6xxx_vtu_loadpurge(struct mv88e6xxx_priv_state *ps, if (ret < 0) return ret; - if (mv88e6xxx_has_stu(ps)) { + if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_STU)) { reg = entry->sid & GLOBAL_VTU_SID_MASK; ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_SID, reg); if (ret < 0) diff --git a/drivers/net/dsa/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx.h index ca69a93a42a0..5f09a4ea3cc5 100644 --- a/drivers/net/dsa/mv88e6xxx.h +++ b/drivers/net/dsa/mv88e6xxx.h @@ -403,6 +403,12 @@ enum mv88e6xxx_cap { */ MV88E6XXX_CAP_SMI_PHY, + /* Per VLAN Spanning Tree Unit (STU). + * The Port State database, if present, is accessed through VTU + * operations and dedicated SID registers. See GLOBAL_VTU_SID. + */ + MV88E6XXX_CAP_STU, + /* Switch MAC/WoL/WoF register. * This requires an indirect access to set the switch MAC address * through GLOBAL2_SWITCH_MAC, otherwise GLOBAL_MAC_01, GLOBAL_MAC_23, @@ -436,6 +442,7 @@ enum mv88e6xxx_cap { #define MV88E6XXX_FLAG_PPU BIT(MV88E6XXX_CAP_PPU) #define MV88E6XXX_FLAG_PPU_ACTIVE BIT(MV88E6XXX_CAP_PPU_ACTIVE) #define MV88E6XXX_FLAG_SMI_PHY BIT(MV88E6XXX_CAP_SMI_PHY) +#define MV88E6XXX_FLAG_STU BIT(MV88E6XXX_CAP_STU) #define MV88E6XXX_FLAG_SWITCH_MAC BIT(MV88E6XXX_CAP_SWITCH_MAC_WOL_WOF) #define MV88E6XXX_FLAG_TEMP BIT(MV88E6XXX_CAP_TEMP) #define MV88E6XXX_FLAG_TEMP_LIMIT BIT(MV88E6XXX_CAP_TEMP_LIMIT) @@ -451,12 +458,15 @@ enum mv88e6xxx_cap { #define MV88E6XXX_FLAGS_FAMILY_6097 \ (MV88E6XXX_FLAG_ATU | \ MV88E6XXX_FLAG_PPU | \ + MV88E6XXX_FLAG_STU | \ MV88E6XXX_FLAG_VLANTABLE | \ MV88E6XXX_FLAG_VTU) #define MV88E6XXX_FLAGS_FAMILY_6165 \ - (MV88E6XXX_FLAG_SWITCH_MAC | \ - MV88E6XXX_FLAG_TEMP) + (MV88E6XXX_FLAG_STU | \ + MV88E6XXX_FLAG_SWITCH_MAC | \ + MV88E6XXX_FLAG_TEMP | \ + MV88E6XXX_FLAG_VTU) #define MV88E6XXX_FLAGS_FAMILY_6185 \ (MV88E6XXX_FLAG_ATU | \ @@ -482,6 +492,7 @@ enum mv88e6xxx_cap { MV88E6XXX_FLAG_PORTSTATE | \ MV88E6XXX_FLAG_PPU_ACTIVE | \ MV88E6XXX_FLAG_SMI_PHY | \ + MV88E6XXX_FLAG_STU | \ MV88E6XXX_FLAG_SWITCH_MAC | \ MV88E6XXX_FLAG_TEMP | \ MV88E6XXX_FLAG_VLANTABLE | \ @@ -494,6 +505,7 @@ enum mv88e6xxx_cap { MV88E6XXX_FLAG_PORTSTATE | \ MV88E6XXX_FLAG_PPU_ACTIVE | \ MV88E6XXX_FLAG_SMI_PHY | \ + MV88E6XXX_FLAG_STU | \ MV88E6XXX_FLAG_SWITCH_MAC | \ MV88E6XXX_FLAG_TEMP | \ MV88E6XXX_FLAG_TEMP_LIMIT | \ From b681957ad439daf1ea67111463df783d3113c2da Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Tue, 10 May 2016 23:27:19 +0200 Subject: [PATCH 1491/1649] dsa: mv88e6xxx: Initialise the mutex as soon as it is created By initialising immediately it, we don't run the danger of using it before it is initialised. Signed-off-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index 835126e90afd..46564f4a9615 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c @@ -3132,8 +3132,6 @@ static int mv88e6xxx_setup(struct dsa_switch *ds) ps->ds = ds; - mutex_init(&ps->smi_mutex); - INIT_WORK(&ps->bridge_work, mv88e6xxx_bridge_work); if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_EEPROM)) @@ -3580,6 +3578,7 @@ static const char *mv88e6xxx_probe(struct device *dsa_dev, ps->bus = bus; ps->sw_addr = sw_addr; ps->info = info; + mutex_init(&ps->smi_mutex); *priv = ps; From fcdce7d0751096bbc863d5db12726e9253abbc3c Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Tue, 10 May 2016 23:27:20 +0200 Subject: [PATCH 1492/1649] dsa: mv88e6xxx: Rename probe function to fit the normal pattern All other DSA drivers use _drv_ in there DSA probe function name, thus allowing for a true linux driver probe function to use the conventional name. Make mv88e6xxx fit this pattern. Signed-off-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index 46564f4a9615..3d260da7caf5 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c @@ -3543,9 +3543,9 @@ mv88e6xxx_lookup_info(unsigned int prod_num, const struct mv88e6xxx_info *table, return NULL; } -static const char *mv88e6xxx_probe(struct device *dsa_dev, - struct device *host_dev, int sw_addr, - void **priv) +static const char *mv88e6xxx_drv_probe(struct device *dsa_dev, + struct device *host_dev, int sw_addr, + void **priv) { const struct mv88e6xxx_info *info; struct mv88e6xxx_priv_state *ps; @@ -3590,7 +3590,7 @@ static const char *mv88e6xxx_probe(struct device *dsa_dev, struct dsa_switch_driver mv88e6xxx_switch_driver = { .tag_protocol = DSA_TAG_PROTO_EDSA, - .probe = mv88e6xxx_probe, + .probe = mv88e6xxx_drv_probe, .setup = mv88e6xxx_setup, .set_addr = mv88e6xxx_set_addr, .phy_read = mv88e6xxx_phy_read, From 14c7b3c3877075e6df22e071d4619cbdeac82ffd Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Tue, 10 May 2016 23:27:21 +0200 Subject: [PATCH 1493/1649] dsa: Add mdio device support to Marvell switches Allow Marvell switches to be mdio devices. Currently the driver just allocate the private structure and detects what device is on the bus. Later patches will make them register with the DSA framework. Signed-off-by: Andrew Lunn Signed-off-by: David S. Miller --- .../devicetree/bindings/net/dsa/marvell.txt | 27 ++++++ drivers/net/dsa/mv88e6xxx.c | 90 +++++++++++++++---- 2 files changed, 99 insertions(+), 18 deletions(-) create mode 100644 Documentation/devicetree/bindings/net/dsa/marvell.txt diff --git a/Documentation/devicetree/bindings/net/dsa/marvell.txt b/Documentation/devicetree/bindings/net/dsa/marvell.txt new file mode 100644 index 000000000000..cdd70cebdea7 --- /dev/null +++ b/Documentation/devicetree/bindings/net/dsa/marvell.txt @@ -0,0 +1,27 @@ +Marvell DSA Switch Device Tree Bindings +--------------------------------------- + +WARNING: This binding is currently unstable. Do not program it into a +FLASH never to be changed again. Once this binding is stable, this +warning will be removed. + +If you need a stable binding, use the old dsa.txt binding. + +Marvell Switches are MDIO devices. The following properties should be +placed as a child node of an mdio device. + +Required properties: +- compatible : Should be one of "marvell,mv88e6085", +- reg : Address on the MII bus for the switch. + +Example: + + mdio { + #address-cells = <1>; + #size-cells = <0>; + + switch0: switch@0 { + compatible = "marvell,mv88e6085"; + reg = <0>; + }; + }; diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index 3d260da7caf5..ae1cb191a0e0 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c @@ -5,6 +5,8 @@ * Copyright (c) 2015 CMC Electronics, Inc. * Added support for VLAN Table Unit operations * + * Copyright (c) 2016 Andrew Lunn + * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or @@ -17,6 +19,7 @@ #include #include #include +#include #include #include #include @@ -3625,36 +3628,87 @@ struct dsa_switch_driver mv88e6xxx_switch_driver = { .port_fdb_dump = mv88e6xxx_port_fdb_dump, }; +int mv88e6xxx_probe(struct mdio_device *mdiodev) +{ + struct device *dev = &mdiodev->dev; + struct mv88e6xxx_priv_state *ps; + int id, prod_num, rev; + struct dsa_switch *ds; + + ds = devm_kzalloc(dev, sizeof(*ds) + sizeof(*ps), GFP_KERNEL); + if (!ds) + return -ENOMEM; + + ps = (struct mv88e6xxx_priv_state *)(ds + 1); + ds->priv = ps; + ps->dev = dev; + ps->ds = ds; + ps->bus = mdiodev->bus; + ps->sw_addr = mdiodev->addr; + mutex_init(&ps->smi_mutex); + + get_device(&ps->bus->dev); + + ds->drv = &mv88e6xxx_switch_driver; + + id = mv88e6xxx_reg_read(ps, REG_PORT(0), PORT_SWITCH_ID); + if (id < 0) + return id; + + prod_num = (id & 0xfff0) >> 4; + rev = id & 0x000f; + + ps->info = mv88e6xxx_lookup_info(prod_num, mv88e6xxx_table, + ARRAY_SIZE(mv88e6xxx_table)); + if (!ps->info) + return -ENODEV; + + dev_set_drvdata(dev, ds); + + dev_info(dev, "switch 0x%x probed: %s, revision %u\n", + prod_num, ps->info->name, rev); + + return 0; +} + +static void mv88e6xxx_remove(struct mdio_device *mdiodev) +{ + struct dsa_switch *ds = dev_get_drvdata(&mdiodev->dev); + struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + + put_device(&ps->bus->dev); +} + +static const struct of_device_id mv88e6xxx_of_match[] = { + { .compatible = "marvell,mv88e6085" }, + { /* sentinel */ }, +}; + +MODULE_DEVICE_TABLE(of, mv88e6xxx_of_match); + +static struct mdio_driver mv88e6xxx_driver = { + .probe = mv88e6xxx_probe, + .remove = mv88e6xxx_remove, + .mdiodrv.driver = { + .name = "mv88e6085", + .of_match_table = mv88e6xxx_of_match, + }, +}; + static int __init mv88e6xxx_init(void) { register_switch_driver(&mv88e6xxx_switch_driver); - - return 0; + return mdio_driver_register(&mv88e6xxx_driver); } module_init(mv88e6xxx_init); static void __exit mv88e6xxx_cleanup(void) { + mdio_driver_unregister(&mv88e6xxx_driver); unregister_switch_driver(&mv88e6xxx_switch_driver); } module_exit(mv88e6xxx_cleanup); -MODULE_ALIAS("platform:mv88e6085"); -MODULE_ALIAS("platform:mv88e6095"); -MODULE_ALIAS("platform:mv88e6095f"); -MODULE_ALIAS("platform:mv88e6123"); -MODULE_ALIAS("platform:mv88e6131"); -MODULE_ALIAS("platform:mv88e6161"); -MODULE_ALIAS("platform:mv88e6165"); -MODULE_ALIAS("platform:mv88e6171"); -MODULE_ALIAS("platform:mv88e6172"); -MODULE_ALIAS("platform:mv88e6175"); -MODULE_ALIAS("platform:mv88e6176"); -MODULE_ALIAS("platform:mv88e6320"); -MODULE_ALIAS("platform:mv88e6321"); -MODULE_ALIAS("platform:mv88e6350"); -MODULE_ALIAS("platform:mv88e6351"); -MODULE_ALIAS("platform:mv88e6352"); MODULE_AUTHOR("Lennert Buytenhek "); MODULE_DESCRIPTION("Driver for Marvell 88E6XXX ethernet switch chips"); MODULE_LICENSE("GPL"); From 52638f71fcff9386fe64c83a18a129b122333fdf Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Tue, 10 May 2016 23:27:22 +0200 Subject: [PATCH 1494/1649] dsa: Move gpio reset into switch driver Resetting the switch is something the driver does, not the framework. So move the parsing of this property into the driver. There are no in kernel users of this property, so moving it does not break anything. There is however a board which will make use of this property making its way into the kernel. Signed-off-by: Andrew Lunn Signed-off-by: David S. Miller --- .../devicetree/bindings/net/dsa/dsa.txt | 2 -- .../devicetree/bindings/net/dsa/marvell.txt | 8 ++++++++ drivers/net/dsa/mv88e6xxx.c | 14 +++++++++++++- drivers/net/dsa/mv88e6xxx.h | 7 +++++++ include/net/dsa.h | 8 -------- net/dsa/dsa.c | 16 ---------------- 6 files changed, 28 insertions(+), 27 deletions(-) diff --git a/Documentation/devicetree/bindings/net/dsa/dsa.txt b/Documentation/devicetree/bindings/net/dsa/dsa.txt index 5fdbbcdf8c4b..9f4807f90c31 100644 --- a/Documentation/devicetree/bindings/net/dsa/dsa.txt +++ b/Documentation/devicetree/bindings/net/dsa/dsa.txt @@ -31,8 +31,6 @@ A switch child node has the following optional property: switch. Must be set if the switch can not detect the presence and/or size of a connected EEPROM, otherwise optional. -- reset-gpios : phandle and specifier to a gpio line connected to - reset pin of the switch chip. A switch may have multiple "port" children nodes diff --git a/Documentation/devicetree/bindings/net/dsa/marvell.txt b/Documentation/devicetree/bindings/net/dsa/marvell.txt index cdd70cebdea7..7629189398aa 100644 --- a/Documentation/devicetree/bindings/net/dsa/marvell.txt +++ b/Documentation/devicetree/bindings/net/dsa/marvell.txt @@ -10,10 +10,17 @@ If you need a stable binding, use the old dsa.txt binding. Marvell Switches are MDIO devices. The following properties should be placed as a child node of an mdio device. +The properties described here are those specific to Marvell devices. +Additional required and optional properties can be found in dsa.txt. + Required properties: - compatible : Should be one of "marvell,mv88e6085", - reg : Address on the MII bus for the switch. +Optional properties: + +- reset-gpios : Should be a gpio specifier for a reset line + Example: mdio { @@ -23,5 +30,6 @@ Example: switch0: switch@0 { compatible = "marvell,mv88e6085"; reg = <0>; + reset-gpios = <&gpio5 1 GPIO_ACTIVE_LOW>; }; }; diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index ae1cb191a0e0..e7e07eb7091d 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c @@ -2582,7 +2582,7 @@ static int mv88e6xxx_switch_reset(struct mv88e6xxx_priv_state *ps) { bool ppu_active = mv88e6xxx_has(ps, MV88E6XXX_FLAG_PPU_ACTIVE); u16 is_reset = (ppu_active ? 0x8800 : 0xc800); - struct gpio_desc *gpiod = ps->ds->pd->reset; + struct gpio_desc *gpiod = ps->reset; unsigned long timeout; int ret; int i; @@ -3634,6 +3634,7 @@ int mv88e6xxx_probe(struct mdio_device *mdiodev) struct mv88e6xxx_priv_state *ps; int id, prod_num, rev; struct dsa_switch *ds; + int err; ds = devm_kzalloc(dev, sizeof(*ds) + sizeof(*ps), GFP_KERNEL); if (!ds) @@ -3663,6 +3664,17 @@ int mv88e6xxx_probe(struct mdio_device *mdiodev) if (!ps->info) return -ENODEV; + ps->reset = devm_gpiod_get(&mdiodev->dev, "reset", GPIOD_ASIS); + if (IS_ERR(ps->reset)) { + err = PTR_ERR(ps->reset); + if (err == -ENOENT) { + /* Optional, so not an error */ + ps->reset = NULL; + } else { + return err; + } + } + dev_set_drvdata(dev, ds); dev_info(dev, "switch 0x%x probed: %s, revision %u\n", diff --git a/drivers/net/dsa/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx.h index 5f09a4ea3cc5..9ef7673f0c61 100644 --- a/drivers/net/dsa/mv88e6xxx.h +++ b/drivers/net/dsa/mv88e6xxx.h @@ -12,6 +12,7 @@ #define __MV88E6XXX_H #include +#include #ifndef UINT64_MAX #define UINT64_MAX (u64)(~((u64)0)) @@ -595,6 +596,12 @@ struct mv88e6xxx_priv_state { DECLARE_BITMAP(port_state_update_mask, DSA_MAX_PORTS); struct work_struct bridge_work; + + /* A switch may have a GPIO line tied to its reset pin. Parse + * this from the device tree, and use it before performing + * switch soft reset. + */ + struct gpio_desc *reset; }; enum stat_type { diff --git a/include/net/dsa.h b/include/net/dsa.h index 8e86af87c84f..ecb52e265cc3 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -16,7 +16,6 @@ #include #include #include -#include #include #include #include @@ -65,13 +64,6 @@ struct dsa_chip_data { * NULL if there is only one switch chip. */ s8 *rtable; - - /* - * A switch may have a GPIO line tied to its reset pin. Parse - * this from the device tree, and use it before performing - * switch soft reset. - */ - struct gpio_desc *reset; }; struct dsa_platform_data { diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c index d61ceed912be..df169811f26d 100644 --- a/net/dsa/dsa.c +++ b/net/dsa/dsa.c @@ -659,9 +659,6 @@ static int dsa_of_probe(struct device *dev) const char *port_name; int chip_index, port_index; const unsigned int *sw_addr, *port_reg; - int gpio; - enum of_gpio_flags of_flags; - unsigned long flags; u32 eeprom_len; int ret; @@ -740,19 +737,6 @@ static int dsa_of_probe(struct device *dev) put_device(cd->host_dev); cd->host_dev = &mdio_bus_switch->dev; } - gpio = of_get_named_gpio_flags(child, "reset-gpios", 0, - &of_flags); - if (gpio_is_valid(gpio)) { - flags = (of_flags == OF_GPIO_ACTIVE_LOW ? - GPIOF_ACTIVE_LOW : 0); - ret = devm_gpio_request_one(dev, gpio, flags, - "switch_reset"); - if (ret) - goto out_free_chip; - - cd->reset = gpio_to_desc(gpio); - gpiod_direction_output(cd->reset, 0); - } for_each_available_child_of_node(child, port) { port_reg = of_get_property(port, "reg", NULL); From c33063d6a0d83a553faacf32f3cb834e63d8ecd7 Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Tue, 10 May 2016 23:27:23 +0200 Subject: [PATCH 1495/1649] dsa: Remove master_dev from switch structure The switch drivers only use the master_dev member for dev_info() messages. Now that the device is passed to the old style probe, and new style drivers are probed as true linux drivers, this is no longer needed. Signed-off-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx.c | 1 + include/net/dsa.h | 7 ++----- net/dsa/dsa.c | 2 +- net/dsa/slave.c | 2 +- 4 files changed, 5 insertions(+), 7 deletions(-) diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index e7e07eb7091d..8659cbaac9f9 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c @@ -3642,6 +3642,7 @@ int mv88e6xxx_probe(struct mdio_device *mdiodev) ps = (struct mv88e6xxx_priv_state *)(ds + 1); ds->priv = ps; + ds->dev = dev; ps->dev = dev; ps->ds = ds; ps->bus = mdiodev->bus; diff --git a/include/net/dsa.h b/include/net/dsa.h index ecb52e265cc3..f4c0bff8d9d6 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -120,6 +120,8 @@ struct dsa_switch_tree { }; struct dsa_switch { + struct device *dev; + /* * Parent switch tree, and switch index. */ @@ -142,11 +144,6 @@ struct dsa_switch { */ struct dsa_switch_driver *drv; - /* - * Reference to host device to use. - */ - struct device *master_dev; - #ifdef CONFIG_NET_DSA_HWMON /* * Hardware monitoring information diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c index df169811f26d..5db779c69a68 100644 --- a/net/dsa/dsa.c +++ b/net/dsa/dsa.c @@ -411,7 +411,7 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index, ds->pd = pd; ds->drv = drv; ds->priv = priv; - ds->master_dev = host_dev; + ds->dev = parent; ret = dsa_switch_setup_one(ds, parent); if (ret) diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 5ea8a40c8d33..f25dcd9e814a 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -51,7 +51,7 @@ void dsa_slave_mii_bus_init(struct dsa_switch *ds) ds->slave_mii_bus->write = dsa_slave_phy_write; snprintf(ds->slave_mii_bus->id, MII_BUS_ID_SIZE, "dsa-%d:%.2x", ds->index, ds->pd->sw_addr); - ds->slave_mii_bus->parent = ds->master_dev; + ds->slave_mii_bus->parent = ds->dev; ds->slave_mii_bus->phy_mask = ~ds->phys_mii_mask; } From ff04955c2f678a2c4c3207e0184c4c389da9d1e2 Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Tue, 10 May 2016 23:27:24 +0200 Subject: [PATCH 1496/1649] dsa: Rename switch chip data to cd The dsa_switch structure contains a dsa_chip_data member called pd. However in the rest of the code, pd is used for dsa_platform_data. This is confusing. Rename it cd, which is already often used in dsa.c and slave.c for this data type. Signed-off-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/dsa/bcm_sf2.c | 4 ++-- drivers/net/dsa/mv88e6xxx.c | 4 ++-- include/net/dsa.h | 4 ++-- net/dsa/dsa.c | 18 +++++++++--------- net/dsa/slave.c | 10 +++++----- 5 files changed, 20 insertions(+), 20 deletions(-) diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index 448deb59b9a4..10ddd5a5dfb6 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -949,8 +949,8 @@ static int bcm_sf2_sw_setup(struct dsa_switch *ds) /* All the interesting properties are at the parent device_node * level */ - dn = ds->pd->of_node->parent; - bcm_sf2_identify_ports(priv, ds->pd->of_node); + dn = ds->cd->of_node->parent; + bcm_sf2_identify_ports(priv, ds->cd->of_node); priv->irq0 = irq_of_parse_and_map(dn, 0); priv->irq1 = irq_of_parse_and_map(dn, 1); diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index 8659cbaac9f9..ee7830935a73 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c @@ -3023,9 +3023,9 @@ static int mv88e6xxx_setup_global(struct mv88e6xxx_priv_state *ps) for (i = 0; i < 32; i++) { int nexthop = 0x1f; - if (ps->ds->pd->rtable && + if (ps->ds->cd->rtable && i != ps->ds->index && i < ps->ds->dst->pd->nr_chips) - nexthop = ps->ds->pd->rtable[i] & 0x1f; + nexthop = ps->ds->cd->rtable[i] & 0x1f; err = _mv88e6xxx_reg_write( ps, REG_GLOBAL2, diff --git a/include/net/dsa.h b/include/net/dsa.h index f4c0bff8d9d6..17c3d37b6779 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -137,7 +137,7 @@ struct dsa_switch { /* * Configuration data for this switch. */ - struct dsa_chip_data *pd; + struct dsa_chip_data *cd; /* * The used switch driver. @@ -190,7 +190,7 @@ static inline u8 dsa_upstream_port(struct dsa_switch *ds) if (dst->cpu_switch == ds->index) return dst->cpu_port; else - return ds->pd->rtable[dst->cpu_switch]; + return ds->cd->rtable[dst->cpu_switch]; } struct switchdev_trans; diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c index 5db779c69a68..eff5dfc2e33f 100644 --- a/net/dsa/dsa.c +++ b/net/dsa/dsa.c @@ -182,7 +182,7 @@ __ATTRIBUTE_GROUPS(dsa_hwmon); /* basic switch operations **************************************************/ static int dsa_cpu_dsa_setup(struct dsa_switch *ds, struct net_device *master) { - struct dsa_chip_data *cd = ds->pd; + struct dsa_chip_data *cd = ds->cd; struct device_node *port_dn; struct phy_device *phydev; int ret, port, mode; @@ -219,7 +219,7 @@ static int dsa_switch_setup_one(struct dsa_switch *ds, struct device *parent) { struct dsa_switch_driver *drv = ds->drv; struct dsa_switch_tree *dst = ds->dst; - struct dsa_chip_data *pd = ds->pd; + struct dsa_chip_data *cd = ds->cd; bool valid_name_found = false; int index = ds->index; int i, ret; @@ -230,7 +230,7 @@ static int dsa_switch_setup_one(struct dsa_switch *ds, struct device *parent) for (i = 0; i < DSA_MAX_PORTS; i++) { char *name; - name = pd->port_names[i]; + name = cd->port_names[i]; if (name == NULL) continue; @@ -328,10 +328,10 @@ static int dsa_switch_setup_one(struct dsa_switch *ds, struct device *parent) if (!(ds->enabled_port_mask & (1 << i))) continue; - ret = dsa_slave_create(ds, parent, i, pd->port_names[i]); + ret = dsa_slave_create(ds, parent, i, cd->port_names[i]); if (ret < 0) { netdev_err(dst->master_netdev, "[%d]: can't create dsa slave device for port %d(%s): %d\n", - index, i, pd->port_names[i], ret); + index, i, cd->port_names[i], ret); ret = 0; } } @@ -379,7 +379,7 @@ static struct dsa_switch * dsa_switch_setup(struct dsa_switch_tree *dst, int index, struct device *parent, struct device *host_dev) { - struct dsa_chip_data *pd = dst->pd->chip + index; + struct dsa_chip_data *cd = dst->pd->chip + index; struct dsa_switch_driver *drv; struct dsa_switch *ds; int ret; @@ -389,7 +389,7 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index, /* * Probe for switch model. */ - drv = dsa_switch_probe(parent, host_dev, pd->sw_addr, &name, &priv); + drv = dsa_switch_probe(parent, host_dev, cd->sw_addr, &name, &priv); if (drv == NULL) { netdev_err(dst->master_netdev, "[%d]: could not detect attached switch\n", index); @@ -408,7 +408,7 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index, ds->dst = dst; ds->index = index; - ds->pd = pd; + ds->cd = cd; ds->drv = drv; ds->priv = priv; ds->dev = parent; @@ -424,7 +424,7 @@ static void dsa_switch_destroy(struct dsa_switch *ds) { struct device_node *port_dn; struct phy_device *phydev; - struct dsa_chip_data *cd = ds->pd; + struct dsa_chip_data *cd = ds->cd; int port; #ifdef CONFIG_NET_DSA_HWMON diff --git a/net/dsa/slave.c b/net/dsa/slave.c index f25dcd9e814a..152436cdab30 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -50,7 +50,7 @@ void dsa_slave_mii_bus_init(struct dsa_switch *ds) ds->slave_mii_bus->read = dsa_slave_phy_read; ds->slave_mii_bus->write = dsa_slave_phy_write; snprintf(ds->slave_mii_bus->id, MII_BUS_ID_SIZE, "dsa-%d:%.2x", - ds->index, ds->pd->sw_addr); + ds->index, ds->cd->sw_addr); ds->slave_mii_bus->parent = ds->dev; ds->slave_mii_bus->phy_mask = ~ds->phys_mii_mask; } @@ -615,8 +615,8 @@ static int dsa_slave_get_eeprom_len(struct net_device *dev) struct dsa_slave_priv *p = netdev_priv(dev); struct dsa_switch *ds = p->parent; - if (ds->pd->eeprom_len) - return ds->pd->eeprom_len; + if (ds->cd->eeprom_len) + return ds->cd->eeprom_len; if (ds->drv->get_eeprom_len) return ds->drv->get_eeprom_len(ds); @@ -999,7 +999,7 @@ static int dsa_slave_phy_setup(struct dsa_slave_priv *p, struct net_device *slave_dev) { struct dsa_switch *ds = p->parent; - struct dsa_chip_data *cd = ds->pd; + struct dsa_chip_data *cd = ds->cd; struct device_node *phy_dn, *port_dn; bool phy_is_fixed = false; u32 phy_flags = 0; @@ -1147,7 +1147,7 @@ int dsa_slave_create(struct dsa_switch *ds, struct device *parent, NULL); SET_NETDEV_DEV(slave_dev, parent); - slave_dev->dev.of_node = ds->pd->port_dn[port]; + slave_dev->dev.of_node = ds->cd->port_dn[port]; slave_dev->vlan_features = master->vlan_features; p = netdev_priv(slave_dev); From f8cd8753def081b92b93209265bce68a73885ed0 Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Tue, 10 May 2016 23:27:25 +0200 Subject: [PATCH 1497/1649] dsa: mv88e6xxx: Handle eeprom-length property A switch can export an attached EEPROM using the standard ethtool API. However the switch itself cannot determine the size of the EEPROM, and multiple sizes are allowed. Thus a device tree property is supported to indicate the length of the EEPROM. Parse this property during device probe, and implement a callback function to retrieve it. Signed-off-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx.c | 17 +++++++++++++++++ drivers/net/dsa/mv88e6xxx.h | 3 +++ 2 files changed, 20 insertions(+) diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index ee7830935a73..a3f0e7ec4067 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c @@ -869,6 +869,16 @@ error: return ret; } +static int mv88e6xxx_get_eeprom_len(struct dsa_switch *ds) +{ + struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + + if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_EEPROM)) + return ps->eeprom_len; + + return 0; +} + static int mv88e6xxx_get_eeprom(struct dsa_switch *ds, struct ethtool_eeprom *eeprom, u8 *data) { @@ -3610,6 +3620,7 @@ struct dsa_switch_driver mv88e6xxx_switch_driver = { .set_temp_limit = mv88e6xxx_set_temp_limit, .get_temp_alarm = mv88e6xxx_get_temp_alarm, #endif + .get_eeprom_len = mv88e6xxx_get_eeprom_len, .get_eeprom = mv88e6xxx_get_eeprom, .set_eeprom = mv88e6xxx_set_eeprom, .get_regs_len = mv88e6xxx_get_regs_len, @@ -3631,9 +3642,11 @@ struct dsa_switch_driver mv88e6xxx_switch_driver = { int mv88e6xxx_probe(struct mdio_device *mdiodev) { struct device *dev = &mdiodev->dev; + struct device_node *np = dev->of_node; struct mv88e6xxx_priv_state *ps; int id, prod_num, rev; struct dsa_switch *ds; + u32 eeprom_len; int err; ds = devm_kzalloc(dev, sizeof(*ds) + sizeof(*ps), GFP_KERNEL); @@ -3676,6 +3689,10 @@ int mv88e6xxx_probe(struct mdio_device *mdiodev) } } + if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_EEPROM) && + !of_property_read_u32(np, "eeprom-length", &eeprom_len)) + ps->eeprom_len = eeprom_len; + dev_set_drvdata(dev, ds); dev_info(dev, "switch 0x%x probed: %s, revision %u\n", diff --git a/drivers/net/dsa/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx.h index 9ef7673f0c61..40e8721ecfb1 100644 --- a/drivers/net/dsa/mv88e6xxx.h +++ b/drivers/net/dsa/mv88e6xxx.h @@ -602,6 +602,9 @@ struct mv88e6xxx_priv_state { * switch soft reset. */ struct gpio_desc *reset; + + /* set to size of eeprom if supported by the switch */ + int eeprom_len; }; enum stat_type { From 7219ab34f184b5d864be38f5ada7cdff1ab5b18a Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Wed, 11 May 2016 00:29:14 +0300 Subject: [PATCH 1498/1649] net/mlx5e: CQE compression CQE compression feature is meant to save PCIe bandwidth by compressing few CQEs into smaller amount of bytes on PCIe. CQE compression can be selectively enabled per CQ. By default is disabled for now and will be enabled later on. Signed-off-by: Tariq Toukan Signed-off-by: Eugenia Emantayev Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 10 ++ .../ethernet/mellanox/mlx5/core/en_clock.c | 4 + .../net/ethernet/mellanox/mlx5/core/en_main.c | 6 + .../net/ethernet/mellanox/mlx5/core/en_rx.c | 151 +++++++++++++++++- .../ethernet/mellanox/mlx5/core/en_stats.h | 8 + include/linux/mlx5/device.h | 34 ++++ 6 files changed, 211 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index bfa5daaaf5aa..19f0d8db27ce 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -157,6 +157,8 @@ struct mlx5e_params { u8 log_rq_size; u16 num_channels; u8 num_tc; + bool rx_cqe_compress_admin; + bool rx_cqe_compress; u16 rx_cq_moderation_usec; u16 rx_cq_moderation_pkts; u16 tx_cq_moderation_usec; @@ -202,6 +204,13 @@ struct mlx5e_cq { struct mlx5e_channel *channel; struct mlx5e_priv *priv; + /* cqe decompression */ + struct mlx5_cqe64 title; + struct mlx5_mini_cqe8 mini_arr[MLX5_MINI_CQE_ARRAY_SIZE]; + u8 mini_arr_idx; + u16 decmprs_left; + u16 decmprs_wqe_counter; + /* control */ struct mlx5_wq_ctrl wq_ctrl; } ____cacheline_aligned_in_smp; @@ -616,6 +625,7 @@ void mlx5e_timestamp_init(struct mlx5e_priv *priv); void mlx5e_timestamp_cleanup(struct mlx5e_priv *priv); int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr); int mlx5e_hwstamp_get(struct net_device *dev, struct ifreq *ifr); +void mlx5e_modify_rx_cqe_compression(struct mlx5e_priv *priv, bool val); int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto, u16 vid); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c index 2018eebe1531..847a8f3ac2b2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c @@ -93,6 +93,8 @@ int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr) /* RX HW timestamp */ switch (config.rx_filter) { case HWTSTAMP_FILTER_NONE: + /* Reset CQE compression to Admin default */ + mlx5e_modify_rx_cqe_compression(priv, priv->params.rx_cqe_compress_admin); break; case HWTSTAMP_FILTER_ALL: case HWTSTAMP_FILTER_SOME: @@ -108,6 +110,8 @@ int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr) case HWTSTAMP_FILTER_PTP_V2_EVENT: case HWTSTAMP_FILTER_PTP_V2_SYNC: case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + /* Disable CQE compression */ + mlx5e_modify_rx_cqe_compression(priv, false); config.rx_filter = HWTSTAMP_FILTER_ALL; break; default: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 1c70e518b5c5..0ea4c03a7946 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -114,6 +114,8 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv) s->rx_mpwqe_filler += rq_stats->mpwqe_filler; s->rx_mpwqe_frag += rq_stats->mpwqe_frag; s->rx_buff_alloc_err += rq_stats->buff_alloc_err; + s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks; + s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts; for (j = 0; j < priv->params.num_tc; j++) { sq_stats = &priv->channel[i]->sq[j].stats; @@ -1204,6 +1206,10 @@ static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv, } MLX5_SET(cqc, cqc, log_cq_size, log_cq_size); + if (priv->params.rx_cqe_compress) { + MLX5_SET(cqc, cqc, mini_cqe_res_format, MLX5_CQE_FORMAT_CSUM); + MLX5_SET(cqc, cqc, cqe_comp_en, 1); + } mlx5e_build_common_cq_param(priv, param); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 23adfe2fcba9..c8b8d456268f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -42,6 +42,143 @@ static inline bool mlx5e_rx_hw_stamp(struct mlx5e_tstamp *tstamp) return tstamp->hwtstamp_config.rx_filter == HWTSTAMP_FILTER_ALL; } +static inline void mlx5e_read_cqe_slot(struct mlx5e_cq *cq, u32 cqcc, + void *data) +{ + u32 ci = cqcc & cq->wq.sz_m1; + + memcpy(data, mlx5_cqwq_get_wqe(&cq->wq, ci), sizeof(struct mlx5_cqe64)); +} + +static inline void mlx5e_read_title_slot(struct mlx5e_rq *rq, + struct mlx5e_cq *cq, u32 cqcc) +{ + mlx5e_read_cqe_slot(cq, cqcc, &cq->title); + cq->decmprs_left = be32_to_cpu(cq->title.byte_cnt); + cq->decmprs_wqe_counter = be16_to_cpu(cq->title.wqe_counter); + rq->stats.cqe_compress_blks++; +} + +static inline void mlx5e_read_mini_arr_slot(struct mlx5e_cq *cq, u32 cqcc) +{ + mlx5e_read_cqe_slot(cq, cqcc, cq->mini_arr); + cq->mini_arr_idx = 0; +} + +static inline void mlx5e_cqes_update_owner(struct mlx5e_cq *cq, u32 cqcc, int n) +{ + u8 op_own = (cqcc >> cq->wq.log_sz) & 1; + u32 wq_sz = 1 << cq->wq.log_sz; + u32 ci = cqcc & cq->wq.sz_m1; + u32 ci_top = min_t(u32, wq_sz, ci + n); + + for (; ci < ci_top; ci++, n--) { + struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, ci); + + cqe->op_own = op_own; + } + + if (unlikely(ci == wq_sz)) { + op_own = !op_own; + for (ci = 0; ci < n; ci++) { + struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, ci); + + cqe->op_own = op_own; + } + } +} + +static inline void mlx5e_decompress_cqe(struct mlx5e_rq *rq, + struct mlx5e_cq *cq, u32 cqcc) +{ + u16 wqe_cnt_step; + + cq->title.byte_cnt = cq->mini_arr[cq->mini_arr_idx].byte_cnt; + cq->title.check_sum = cq->mini_arr[cq->mini_arr_idx].checksum; + cq->title.op_own &= 0xf0; + cq->title.op_own |= 0x01 & (cqcc >> cq->wq.log_sz); + cq->title.wqe_counter = cpu_to_be16(cq->decmprs_wqe_counter); + + wqe_cnt_step = + rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ ? + mpwrq_get_cqe_consumed_strides(&cq->title) : 1; + cq->decmprs_wqe_counter = + (cq->decmprs_wqe_counter + wqe_cnt_step) & rq->wq.sz_m1; +} + +static inline void mlx5e_decompress_cqe_no_hash(struct mlx5e_rq *rq, + struct mlx5e_cq *cq, u32 cqcc) +{ + mlx5e_decompress_cqe(rq, cq, cqcc); + cq->title.rss_hash_type = 0; + cq->title.rss_hash_result = 0; +} + +static inline u32 mlx5e_decompress_cqes_cont(struct mlx5e_rq *rq, + struct mlx5e_cq *cq, + int update_owner_only, + int budget_rem) +{ + u32 cqcc = cq->wq.cc + update_owner_only; + u32 cqe_count; + u32 i; + + cqe_count = min_t(u32, cq->decmprs_left, budget_rem); + + for (i = update_owner_only; i < cqe_count; + i++, cq->mini_arr_idx++, cqcc++) { + if (unlikely(cq->mini_arr_idx == MLX5_MINI_CQE_ARRAY_SIZE)) + mlx5e_read_mini_arr_slot(cq, cqcc); + + mlx5e_decompress_cqe_no_hash(rq, cq, cqcc); + rq->handle_rx_cqe(rq, &cq->title); + } + mlx5e_cqes_update_owner(cq, cq->wq.cc, cqcc - cq->wq.cc); + cq->wq.cc = cqcc; + cq->decmprs_left -= cqe_count; + rq->stats.cqe_compress_pkts += cqe_count; + + return cqe_count; +} + +static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq, + struct mlx5e_cq *cq, + int budget_rem) +{ + mlx5e_read_title_slot(rq, cq, cq->wq.cc); + mlx5e_read_mini_arr_slot(cq, cq->wq.cc + 1); + mlx5e_decompress_cqe(rq, cq, cq->wq.cc); + rq->handle_rx_cqe(rq, &cq->title); + cq->mini_arr_idx++; + + return mlx5e_decompress_cqes_cont(rq, cq, 1, budget_rem) - 1; +} + +void mlx5e_modify_rx_cqe_compression(struct mlx5e_priv *priv, bool val) +{ + bool was_opened; + + if (!MLX5_CAP_GEN(priv->mdev, cqe_compression)) + return; + + mutex_lock(&priv->state_lock); + + if (priv->params.rx_cqe_compress == val) + goto unlock; + + was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state); + if (was_opened) + mlx5e_close_locked(priv->netdev); + + priv->params.rx_cqe_compress = val; + + if (was_opened) + mlx5e_open_locked(priv->netdev); + +unlock: + mutex_unlock(&priv->state_lock); +} + int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix) { struct sk_buff *skb; @@ -738,14 +875,24 @@ mpwrq_cqe_out: int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget) { struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq); - int work_done; + int work_done = 0; - for (work_done = 0; work_done < budget; work_done++) { + if (cq->decmprs_left) + work_done += mlx5e_decompress_cqes_cont(rq, cq, 0, budget); + + for (; work_done < budget; work_done++) { struct mlx5_cqe64 *cqe = mlx5e_get_cqe(cq); if (!cqe) break; + if (mlx5_get_cqe_format(cqe) == MLX5_COMPRESSED) { + work_done += + mlx5e_decompress_cqes_start(rq, cq, + budget - work_done); + continue; + } + mlx5_cqwq_pop(&cq->wq); rq->handle_rx_cqe(rq, cqe); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index 115752b53d85..83bc32b25849 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -72,6 +72,8 @@ struct mlx5e_sw_stats { u64 rx_mpwqe_filler; u64 rx_mpwqe_frag; u64 rx_buff_alloc_err; + u64 rx_cqe_compress_blks; + u64 rx_cqe_compress_pkts; /* Special handling counters */ u64 link_down_events; @@ -101,6 +103,8 @@ static const struct counter_desc sw_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_frag) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, link_down_events) }, }; @@ -283,6 +287,8 @@ struct mlx5e_rq_stats { u64 mpwqe_filler; u64 mpwqe_frag; u64 buff_alloc_err; + u64 cqe_compress_blks; + u64 cqe_compress_pkts; }; static const struct counter_desc rq_stats_desc[] = { @@ -297,6 +303,8 @@ static const struct counter_desc rq_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, mpwqe_filler) }, { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, mpwqe_frag) }, { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, buff_alloc_err) }, + { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, cqe_compress_blks) }, + { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) }, }; struct mlx5e_sq_stats { diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index ee0d5a937f02..035abdf62cfe 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -685,6 +685,40 @@ struct mlx5_cqe64 { u8 op_own; }; +struct mlx5_mini_cqe8 { + union { + __be32 rx_hash_result; + struct { + __be16 checksum; + __be16 rsvd; + }; + struct { + __be16 wqe_counter; + u8 s_wqe_opcode; + u8 reserved; + } s_wqe_info; + }; + __be32 byte_cnt; +}; + +enum { + MLX5_NO_INLINE_DATA, + MLX5_INLINE_DATA32_SEG, + MLX5_INLINE_DATA64_SEG, + MLX5_COMPRESSED, +}; + +enum { + MLX5_CQE_FORMAT_CSUM = 0x1, +}; + +#define MLX5_MINI_CQE_ARRAY_SIZE 8 + +static inline int mlx5_get_cqe_format(struct mlx5_cqe64 *cqe) +{ + return (cqe->op_own >> 2) & 0x3; +} + static inline int get_cqe_lro_tcppsh(struct mlx5_cqe64 *cqe) { return (cqe->lro_tcppsh_abort_dupack >> 6) & 1; From d9d9f156f3807b07bb84c1cfb074d620383c016a Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Wed, 11 May 2016 00:29:15 +0300 Subject: [PATCH 1499/1649] net/mlx5e: Expand WQE stride when CQE compression is enabled Make the MPWQE/Striding RQ default configuration dynamic and not statically set at compile time. Now at driver load we set stride size and num strides dynamically. By default we use same values as before, but when CQE compression is enabled, we set larger stride size to benefit from CQE compression for larger packets. Signed-off-by: Tariq Toukan Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 13 ++++--- .../net/ethernet/mellanox/mlx5/core/en_main.c | 23 +++++++++-- .../net/ethernet/mellanox/mlx5/core/en_rx.c | 39 ++++++++++--------- 3 files changed, 46 insertions(+), 29 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 19f0d8db27ce..e05abad50c7b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -64,12 +64,9 @@ #define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW 0x4 #define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW 0x6 -#define MLX5_MPWRQ_LOG_NUM_STRIDES 11 /* >= 9, HW restriction */ #define MLX5_MPWRQ_LOG_STRIDE_SIZE 6 /* >= 6, HW restriction */ -#define MLX5_MPWRQ_NUM_STRIDES BIT(MLX5_MPWRQ_LOG_NUM_STRIDES) -#define MLX5_MPWRQ_STRIDE_SIZE BIT(MLX5_MPWRQ_LOG_STRIDE_SIZE) -#define MLX5_MPWRQ_LOG_WQE_SZ (MLX5_MPWRQ_LOG_NUM_STRIDES +\ - MLX5_MPWRQ_LOG_STRIDE_SIZE) +#define MLX5_MPWRQ_LOG_STRIDE_SIZE_CQE_COMPRESS 8 /* >= 6, HW restriction */ +#define MLX5_MPWRQ_LOG_WQE_SZ 17 #define MLX5_MPWRQ_WQE_PAGE_ORDER (MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT > 0 ? \ MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT : 0) #define MLX5_MPWRQ_PAGES_PER_WQE BIT(MLX5_MPWRQ_WQE_PAGE_ORDER) @@ -154,6 +151,8 @@ struct mlx5e_umr_wqe { struct mlx5e_params { u8 log_sq_size; u8 rq_wq_type; + u8 mpwqe_log_stride_sz; + u8 mpwqe_log_num_strides; u8 log_rq_size; u16 num_channels; u8 num_tc; @@ -249,6 +248,8 @@ struct mlx5e_rq { /* control */ struct mlx5_wq_ctrl wq_ctrl; u8 wq_type; + u32 mpwqe_stride_sz; + u32 mpwqe_num_strides; u32 rqn; struct mlx5e_channel *channel; struct mlx5e_priv *priv; @@ -272,7 +273,7 @@ struct mlx5e_mpw_info { void (*dma_pre_sync)(struct device *pdev, struct mlx5e_mpw_info *wi, u32 wqe_offset, u32 len); - void (*add_skb_frag)(struct device *pdev, + void (*add_skb_frag)(struct mlx5e_rq *rq, struct sk_buff *skb, struct mlx5e_mpw_info *wi, u32 page_idx, u32 frag_offset, u32 len); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 0ea4c03a7946..e40e59a0ff38 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -307,7 +307,9 @@ static int mlx5e_create_rq(struct mlx5e_channel *c, rq->handle_rx_cqe = mlx5e_handle_rx_cqe_mpwrq; rq->alloc_wqe = mlx5e_alloc_rx_mpwqe; - rq->wqe_sz = MLX5_MPWRQ_NUM_STRIDES * MLX5_MPWRQ_STRIDE_SIZE; + rq->mpwqe_stride_sz = BIT(priv->params.mpwqe_log_stride_sz); + rq->mpwqe_num_strides = BIT(priv->params.mpwqe_log_num_strides); + rq->wqe_sz = rq->mpwqe_stride_sz * rq->mpwqe_num_strides; byte_count = rq->wqe_sz; break; default: /* MLX5_WQ_TYPE_LINKED_LIST */ @@ -1130,9 +1132,9 @@ static void mlx5e_build_rq_param(struct mlx5e_priv *priv, switch (priv->params.rq_wq_type) { case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: MLX5_SET(wq, wq, log_wqe_num_of_strides, - MLX5_MPWRQ_LOG_NUM_STRIDES - 9); + priv->params.mpwqe_log_num_strides - 9); MLX5_SET(wq, wq, log_wqe_stride_size, - MLX5_MPWRQ_LOG_STRIDE_SIZE - 6); + priv->params.mpwqe_log_stride_sz - 6); MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ); break; default: /* MLX5_WQ_TYPE_LINKED_LIST */ @@ -1199,7 +1201,7 @@ static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv, switch (priv->params.rq_wq_type) { case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: log_cq_size = priv->params.log_rq_size + - MLX5_MPWRQ_LOG_NUM_STRIDES; + priv->params.mpwqe_log_num_strides; break; default: /* MLX5_WQ_TYPE_LINKED_LIST */ log_cq_size = priv->params.log_rq_size; @@ -2729,12 +2731,25 @@ static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev, switch (priv->params.rq_wq_type) { case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: priv->params.log_rq_size = MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW; + priv->params.mpwqe_log_stride_sz = + priv->params.rx_cqe_compress ? + MLX5_MPWRQ_LOG_STRIDE_SIZE_CQE_COMPRESS : + MLX5_MPWRQ_LOG_STRIDE_SIZE; + priv->params.mpwqe_log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ - + priv->params.mpwqe_log_stride_sz; priv->params.lro_en = true; break; default: /* MLX5_WQ_TYPE_LINKED_LIST */ priv->params.log_rq_size = MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE; } + mlx5_core_info(mdev, + "MLX5E: StrdRq(%d) RqSz(%ld) StrdSz(%ld) RxCqeCmprss(%d)\n", + priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ, + BIT(priv->params.log_rq_size), + BIT(priv->params.mpwqe_log_stride_sz), + priv->params.rx_cqe_compress_admin); + priv->params.min_rx_wqes = mlx5_min_rx_wqes(priv->params.rq_wq_type, BIT(priv->params.log_rq_size)); priv->params.rx_cq_moderation_usec = diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index c8b8d456268f..f3456798c596 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -127,7 +127,7 @@ static inline u32 mlx5e_decompress_cqes_cont(struct mlx5e_rq *rq, for (i = update_owner_only; i < cqe_count; i++, cq->mini_arr_idx++, cqcc++) { - if (unlikely(cq->mini_arr_idx == MLX5_MINI_CQE_ARRAY_SIZE)) + if (cq->mini_arr_idx == MLX5_MINI_CQE_ARRAY_SIZE) mlx5e_read_mini_arr_slot(cq, cqcc); mlx5e_decompress_cqe_no_hash(rq, cq, cqcc); @@ -212,6 +212,11 @@ err_free_skb: return -ENOMEM; } +static inline int mlx5e_mpwqe_strides_per_page(struct mlx5e_rq *rq) +{ + return rq->mpwqe_num_strides >> MLX5_MPWRQ_WQE_PAGE_ORDER; +} + static inline void mlx5e_dma_pre_sync_linear_mpwqe(struct device *pdev, struct mlx5e_mpw_info *wi, @@ -230,13 +235,13 @@ mlx5e_dma_pre_sync_fragmented_mpwqe(struct device *pdev, } static inline void -mlx5e_add_skb_frag_linear_mpwqe(struct device *pdev, +mlx5e_add_skb_frag_linear_mpwqe(struct mlx5e_rq *rq, struct sk_buff *skb, struct mlx5e_mpw_info *wi, u32 page_idx, u32 frag_offset, u32 len) { - unsigned int truesize = ALIGN(len, MLX5_MPWRQ_STRIDE_SIZE); + unsigned int truesize = ALIGN(len, rq->mpwqe_stride_sz); wi->skbs_frags[page_idx]++; skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, @@ -245,15 +250,15 @@ mlx5e_add_skb_frag_linear_mpwqe(struct device *pdev, } static inline void -mlx5e_add_skb_frag_fragmented_mpwqe(struct device *pdev, +mlx5e_add_skb_frag_fragmented_mpwqe(struct mlx5e_rq *rq, struct sk_buff *skb, struct mlx5e_mpw_info *wi, u32 page_idx, u32 frag_offset, u32 len) { - unsigned int truesize = ALIGN(len, MLX5_MPWRQ_STRIDE_SIZE); + unsigned int truesize = ALIGN(len, rq->mpwqe_stride_sz); - dma_sync_single_for_cpu(pdev, + dma_sync_single_for_cpu(rq->pdev, wi->umr.dma_info[page_idx].addr + frag_offset, len, DMA_FROM_DEVICE); wi->skbs_frags[page_idx]++; @@ -293,7 +298,6 @@ mlx5e_copy_skb_header_fragmented_mpwqe(struct device *pdev, skb_copy_to_linear_data_offset(skb, 0, page_address(dma_info->page) + offset, len); -#if (MLX5_MPWRQ_SMALL_PACKET_THRESHOLD >= MLX5_MPWRQ_STRIDE_SIZE) if (unlikely(offset + headlen > PAGE_SIZE)) { dma_info++; headlen_pg = len; @@ -304,7 +308,6 @@ mlx5e_copy_skb_header_fragmented_mpwqe(struct device *pdev, page_address(dma_info->page), len); } -#endif } static u16 mlx5e_get_wqe_mtt_offset(u16 rq_ix, u16 wqe_ix) @@ -430,7 +433,7 @@ static int mlx5e_alloc_rx_fragmented_mpwqe(struct mlx5e_rq *rq, for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) { if (unlikely(mlx5e_alloc_and_map_page(rq, wi, i))) goto err_unmap; - atomic_add(MLX5_MPWRQ_STRIDES_PER_PAGE, + atomic_add(mlx5e_mpwqe_strides_per_page(rq), &wi->umr.dma_info[i].page->_count); wi->skbs_frags[i] = 0; } @@ -449,7 +452,7 @@ err_unmap: while (--i >= 0) { dma_unmap_page(rq->pdev, wi->umr.dma_info[i].addr, PAGE_SIZE, PCI_DMA_FROMDEVICE); - atomic_sub(MLX5_MPWRQ_STRIDES_PER_PAGE, + atomic_sub(mlx5e_mpwqe_strides_per_page(rq), &wi->umr.dma_info[i].page->_count); put_page(wi->umr.dma_info[i].page); } @@ -474,7 +477,7 @@ void mlx5e_free_rx_fragmented_mpwqe(struct mlx5e_rq *rq, for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) { dma_unmap_page(rq->pdev, wi->umr.dma_info[i].addr, PAGE_SIZE, PCI_DMA_FROMDEVICE); - atomic_sub(MLX5_MPWRQ_STRIDES_PER_PAGE - wi->skbs_frags[i], + atomic_sub(mlx5e_mpwqe_strides_per_page(rq) - wi->skbs_frags[i], &wi->umr.dma_info[i].page->_count); put_page(wi->umr.dma_info[i].page); } @@ -524,7 +527,7 @@ static int mlx5e_alloc_rx_linear_mpwqe(struct mlx5e_rq *rq, */ split_page(wi->dma_info.page, MLX5_MPWRQ_WQE_PAGE_ORDER); for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) { - atomic_add(MLX5_MPWRQ_STRIDES_PER_PAGE, + atomic_add(mlx5e_mpwqe_strides_per_page(rq), &wi->dma_info.page[i]._count); wi->skbs_frags[i] = 0; } @@ -548,7 +551,7 @@ void mlx5e_free_rx_linear_mpwqe(struct mlx5e_rq *rq, dma_unmap_page(rq->pdev, wi->dma_info.addr, rq->wqe_sz, PCI_DMA_FROMDEVICE); for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) { - atomic_sub(MLX5_MPWRQ_STRIDES_PER_PAGE - wi->skbs_frags[i], + atomic_sub(mlx5e_mpwqe_strides_per_page(rq) - wi->skbs_frags[i], &wi->dma_info.page[i]._count); put_page(&wi->dma_info.page[i]); } @@ -793,9 +796,9 @@ static inline void mlx5e_mpwqe_fill_rx_skb(struct mlx5e_rq *rq, u32 cqe_bcnt, struct sk_buff *skb) { - u32 consumed_bytes = ALIGN(cqe_bcnt, MLX5_MPWRQ_STRIDE_SIZE); + u32 consumed_bytes = ALIGN(cqe_bcnt, rq->mpwqe_stride_sz); u16 stride_ix = mpwrq_get_cqe_stride_index(cqe); - u32 wqe_offset = stride_ix * MLX5_MPWRQ_STRIDE_SIZE; + u32 wqe_offset = stride_ix * rq->mpwqe_stride_sz; u32 head_offset = wqe_offset & (PAGE_SIZE - 1); u32 page_idx = wqe_offset >> PAGE_SHIFT; u32 head_page_idx = page_idx; @@ -803,19 +806,17 @@ static inline void mlx5e_mpwqe_fill_rx_skb(struct mlx5e_rq *rq, u32 frag_offset = head_offset + headlen; u16 byte_cnt = cqe_bcnt - headlen; -#if (MLX5_MPWRQ_SMALL_PACKET_THRESHOLD >= MLX5_MPWRQ_STRIDE_SIZE) if (unlikely(frag_offset >= PAGE_SIZE)) { page_idx++; frag_offset -= PAGE_SIZE; } -#endif wi->dma_pre_sync(rq->pdev, wi, wqe_offset, consumed_bytes); while (byte_cnt) { u32 pg_consumed_bytes = min_t(u32, PAGE_SIZE - frag_offset, byte_cnt); - wi->add_skb_frag(rq->pdev, skb, wi, page_idx, frag_offset, + wi->add_skb_frag(rq, skb, wi, page_idx, frag_offset, pg_consumed_bytes); byte_cnt -= pg_consumed_bytes; frag_offset = 0; @@ -865,7 +866,7 @@ void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); mpwrq_cqe_out: - if (likely(wi->consumed_strides < MLX5_MPWRQ_NUM_STRIDES)) + if (likely(wi->consumed_strides < rq->mpwqe_num_strides)) return; wi->free_wqe(rq, wi); From b797a684b0ddeb6f78193c9b1bfae6bd824ec01a Mon Sep 17 00:00:00 2001 From: Saeed Mahameed Date: Wed, 11 May 2016 00:29:16 +0300 Subject: [PATCH 1500/1649] net/mlx5e: Enable CQE compression when PCI is slower than link We turn the feature ON, only for servers with PCI BW < MAX LINK BW, as it helps reducing PCI pressure on weak PCI slots, but it adds some software overhead. Signed-off-by: Saeed Mahameed Signed-off-by: Tariq Toukan Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 1 + .../ethernet/mellanox/mlx5/core/en_ethtool.c | 19 +++++++ .../net/ethernet/mellanox/mlx5/core/en_main.c | 52 +++++++++++++++++++ 3 files changed, 72 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index e05abad50c7b..e8a6c3325b39 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -645,6 +645,7 @@ int mlx5e_close_locked(struct net_device *netdev); void mlx5e_build_default_indir_rqt(struct mlx5_core_dev *mdev, u32 *indirection_rqt, int len, int num_channels); +int mlx5e_get_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed); static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq, struct mlx5_wqe_ctrl_seg *ctrl, int bf_sz) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 534d99e2f9c8..fc7dcc03b1de 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -613,6 +613,25 @@ static u32 ptys2ethtool_supported_port(u32 eth_proto_cap) return 0; } +int mlx5e_get_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed) +{ + u32 max_speed = 0; + u32 proto_cap; + int err; + int i; + + err = mlx5_query_port_proto_cap(mdev, &proto_cap, MLX5_PTYS_EN); + if (err) + return err; + + for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) + if (proto_cap & MLX5E_PROT_MASK(i)) + max_speed = max(max_speed, ptys2ethtool_table[i].speed); + + *speed = max_speed; + return 0; +} + static void get_speed_duplex(struct net_device *netdev, u32 eth_proto_oper, struct ethtool_cmd *cmd) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index e40e59a0ff38..08040702824d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -2716,11 +2716,49 @@ static bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev) MLX5_CAP_ETH(mdev, reg_umr_sq); } +static int mlx5e_get_pci_bw(struct mlx5_core_dev *mdev, u32 *pci_bw) +{ + enum pcie_link_width width; + enum pci_bus_speed speed; + int err = 0; + + err = pcie_get_minimum_link(mdev->pdev, &speed, &width); + if (err) + return err; + + if (speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN) + return -EINVAL; + + switch (speed) { + case PCIE_SPEED_2_5GT: + *pci_bw = 2500 * width; + break; + case PCIE_SPEED_5_0GT: + *pci_bw = 5000 * width; + break; + case PCIE_SPEED_8_0GT: + *pci_bw = 8000 * width; + break; + default: + return -EINVAL; + } + + return 0; +} + +static bool cqe_compress_heuristic(u32 link_speed, u32 pci_bw) +{ + return (link_speed && pci_bw && + (pci_bw < 40000) && (pci_bw < link_speed)); +} + static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev, struct net_device *netdev, int num_channels) { struct mlx5e_priv *priv = netdev_priv(netdev); + u32 link_speed = 0; + u32 pci_bw = 0; priv->params.log_sq_size = MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE; @@ -2728,6 +2766,20 @@ static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev, MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ : MLX5_WQ_TYPE_LINKED_LIST; + /* set CQE compression */ + priv->params.rx_cqe_compress_admin = false; + if (MLX5_CAP_GEN(mdev, cqe_compression) && + MLX5_CAP_GEN(mdev, vport_group_manager)) { + mlx5e_get_max_linkspeed(mdev, &link_speed); + mlx5e_get_pci_bw(mdev, &pci_bw); + mlx5_core_dbg(mdev, "Max link speed = %d, PCI BW = %d\n", + link_speed, pci_bw); + priv->params.rx_cqe_compress_admin = + cqe_compress_heuristic(link_speed, pci_bw); + } + + priv->params.rx_cqe_compress = priv->params.rx_cqe_compress_admin; + switch (priv->params.rq_wq_type) { case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: priv->params.log_rq_size = MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW; From 32a47e72c9eb17e3b1bb507184e788b10d69ad4b Mon Sep 17 00:00:00 2001 From: Yuval Mintz Date: Wed, 11 May 2016 16:36:12 +0300 Subject: [PATCH 1501/1649] qed: Add CONFIG_QED_SRIOV Add support for a new Kconfig option for qed* driver which would allow [eventually] the support in VFs. This patch adds the necessary logic in the PF to learn about the possible VFs it will have to support [Based on PCI configuration space and HW], and prepare a database with an entry per-VF as infrastructure for future interaction with said VFs. Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/Kconfig | 10 + drivers/net/ethernet/qlogic/qed/Makefile | 1 + drivers/net/ethernet/qlogic/qed/qed.h | 7 + drivers/net/ethernet/qlogic/qed/qed_dev.c | 19 + drivers/net/ethernet/qlogic/qed/qed_hw.c | 11 + drivers/net/ethernet/qlogic/qed/qed_hw.h | 10 + drivers/net/ethernet/qlogic/qed/qed_sriov.c | 366 ++++++++++++++++++++ drivers/net/ethernet/qlogic/qed/qed_sriov.h | 185 ++++++++++ drivers/net/ethernet/qlogic/qed/qed_vf.h | 41 +++ 9 files changed, 650 insertions(+) create mode 100644 drivers/net/ethernet/qlogic/qed/qed_sriov.c create mode 100644 drivers/net/ethernet/qlogic/qed/qed_sriov.h create mode 100644 drivers/net/ethernet/qlogic/qed/qed_vf.h diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig index c0a11b5158e7..680d8c736d2b 100644 --- a/drivers/net/ethernet/qlogic/Kconfig +++ b/drivers/net/ethernet/qlogic/Kconfig @@ -98,6 +98,16 @@ config QED ---help--- This enables the support for ... +config QED_SRIOV + bool "QLogic QED 25/40/100Gb SR-IOV support" + depends on QED && PCI_IOV + default y + ---help--- + This configuration parameter enables Single Root Input Output + Virtualization support for QED devices. + This allows for virtual function acceleration in virtualized + environments. + config QEDE tristate "QLogic QED 25/40/100Gb Ethernet NIC" depends on QED diff --git a/drivers/net/ethernet/qlogic/qed/Makefile b/drivers/net/ethernet/qlogic/qed/Makefile index aafa6692e62f..e11a809034ea 100644 --- a/drivers/net/ethernet/qlogic/qed/Makefile +++ b/drivers/net/ethernet/qlogic/qed/Makefile @@ -3,3 +3,4 @@ obj-$(CONFIG_QED) := qed.o qed-y := qed_cxt.o qed_dev.o qed_hw.o qed_init_fw_funcs.o qed_init_ops.o \ qed_int.o qed_main.o qed_mcp.o qed_sp_commands.o qed_spq.o qed_l2.o \ qed_selftest.o +qed-$(CONFIG_QED_SRIOV) += qed_sriov.o diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index cceac3272cce..2e067c7e8f38 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h @@ -152,6 +152,7 @@ enum QED_RESOURCES { enum QED_FEATURE { QED_PF_L2_QUE, + QED_VF, QED_MAX_FEATURES, }; @@ -360,6 +361,7 @@ struct qed_hwfn { /* True if the driver requests for the link */ bool b_drv_link_init; + struct qed_pf_iov *pf_iov_info; struct qed_mcp_info *mcp_info; struct qed_hw_cid_data *p_tx_cids; @@ -484,6 +486,10 @@ struct qed_dev { u8 num_hwfns; struct qed_hwfn hwfns[MAX_HWFNS_PER_DEVICE]; + /* SRIOV */ + struct qed_hw_sriov_info *p_iov_info; +#define IS_QED_SRIOV(cdev) (!!(cdev)->p_iov_info) + unsigned long tunn_mode; u32 drv_type; @@ -514,6 +520,7 @@ struct qed_dev { const struct firmware *firmware; }; +#define NUM_OF_VFS(dev) MAX_NUM_VFS_BB #define NUM_OF_SBS(dev) MAX_SB_PER_PATH_BB #define NUM_OF_ENG_PFS(dev) MAX_NUM_PFS_BB diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index b500c86d7d06..7a359c45360f 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -30,6 +30,7 @@ #include "qed_mcp.h" #include "qed_reg_addr.h" #include "qed_sp.h" +#include "qed_sriov.h" /* API common to all protocols */ enum BAR_ID { @@ -136,6 +137,7 @@ void qed_resc_free(struct qed_dev *cdev) qed_eq_free(p_hwfn, p_hwfn->p_eq); qed_consq_free(p_hwfn, p_hwfn->p_consq); qed_int_free(p_hwfn); + qed_iov_free(p_hwfn); qed_dmae_info_free(p_hwfn); } } @@ -316,6 +318,10 @@ int qed_resc_alloc(struct qed_dev *cdev) if (rc) goto alloc_err; + rc = qed_iov_alloc(p_hwfn); + if (rc) + goto alloc_err; + /* EQ */ p_eq = qed_eq_alloc(p_hwfn, 256); if (!p_eq) { @@ -373,6 +379,8 @@ void qed_resc_setup(struct qed_dev *cdev) p_hwfn->mcp_info->mfw_mb_length); qed_int_setup(p_hwfn, p_hwfn->p_main_ptt); + + qed_iov_setup(p_hwfn, p_hwfn->p_main_ptt); } } @@ -1238,6 +1246,13 @@ qed_get_hw_info(struct qed_hwfn *p_hwfn, u32 port_mode; int rc; + /* Since all information is common, only first hwfns should do this */ + if (IS_LEAD_HWFN(p_hwfn)) { + rc = qed_iov_hw_info(p_hwfn); + if (rc) + return rc; + } + /* Read the port mode */ port_mode = qed_rd(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB_B0); @@ -1397,6 +1412,8 @@ static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn, return rc; err2: + if (IS_LEAD_HWFN(p_hwfn)) + qed_iov_free_hw_info(p_hwfn->cdev); qed_mcp_free(p_hwfn); err1: qed_hw_hwfn_free(p_hwfn); @@ -1463,6 +1480,8 @@ void qed_hw_remove(struct qed_dev *cdev) qed_hw_hwfn_free(p_hwfn); qed_mcp_free(p_hwfn); } + + qed_iov_free_hw_info(cdev); } int qed_chain_alloc(struct qed_dev *cdev, diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.c b/drivers/net/ethernet/qlogic/qed/qed_hw.c index a95a3e4b3101..a8cf96c34bef 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hw.c +++ b/drivers/net/ethernet/qlogic/qed/qed_hw.c @@ -338,6 +338,17 @@ void qed_port_unpretend(struct qed_hwfn *p_hwfn, *(u32 *)&p_ptt->pxp.pretend); } +u32 qed_vfid_to_concrete(struct qed_hwfn *p_hwfn, u8 vfid) +{ + u32 concrete_fid = 0; + + SET_FIELD(concrete_fid, PXP_CONCRETE_FID_PFID, p_hwfn->rel_pf_id); + SET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFID, vfid); + SET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFVALID, 1); + + return concrete_fid; +} + /* DMAE */ static void qed_dmae_opcode(struct qed_hwfn *p_hwfn, const u8 is_src_type_grc, diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.h b/drivers/net/ethernet/qlogic/qed/qed_hw.h index e56d433793be..4367363ade40 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hw.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hw.h @@ -220,6 +220,16 @@ void qed_port_pretend(struct qed_hwfn *p_hwfn, void qed_port_unpretend(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); +/** + * @brief qed_vfid_to_concrete - build a concrete FID for a + * given VF ID + * + * @param p_hwfn + * @param p_ptt + * @param vfid + */ +u32 qed_vfid_to_concrete(struct qed_hwfn *p_hwfn, u8 vfid); + /** * @brief qed_dmae_idx_to_go_cmd - map the idx to dmae cmd * this is declared here since other files will require it. diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c new file mode 100644 index 000000000000..685e3fa7bc52 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -0,0 +1,366 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015 QLogic Corporation + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ + +#include "qed_hw.h" +#include "qed_int.h" +#include "qed_reg_addr.h" +#include "qed_sriov.h" +#include "qed_vf.h" + +bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn, + int rel_vf_id, bool b_enabled_only) +{ + if (!p_hwfn->pf_iov_info) { + DP_NOTICE(p_hwfn->cdev, "No iov info\n"); + return false; + } + + if ((rel_vf_id >= p_hwfn->cdev->p_iov_info->total_vfs) || + (rel_vf_id < 0)) + return false; + + if ((!p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_init) && + b_enabled_only) + return false; + + return true; +} + +static int qed_iov_pci_cfg_info(struct qed_dev *cdev) +{ + struct qed_hw_sriov_info *iov = cdev->p_iov_info; + int pos = iov->pos; + + DP_VERBOSE(cdev, QED_MSG_IOV, "sriov ext pos %d\n", pos); + pci_read_config_word(cdev->pdev, pos + PCI_SRIOV_CTRL, &iov->ctrl); + + pci_read_config_word(cdev->pdev, + pos + PCI_SRIOV_TOTAL_VF, &iov->total_vfs); + pci_read_config_word(cdev->pdev, + pos + PCI_SRIOV_INITIAL_VF, &iov->initial_vfs); + + pci_read_config_word(cdev->pdev, pos + PCI_SRIOV_NUM_VF, &iov->num_vfs); + if (iov->num_vfs) { + DP_VERBOSE(cdev, + QED_MSG_IOV, + "Number of VFs are already set to non-zero value. Ignoring PCI configuration value\n"); + iov->num_vfs = 0; + } + + pci_read_config_word(cdev->pdev, + pos + PCI_SRIOV_VF_OFFSET, &iov->offset); + + pci_read_config_word(cdev->pdev, + pos + PCI_SRIOV_VF_STRIDE, &iov->stride); + + pci_read_config_word(cdev->pdev, + pos + PCI_SRIOV_VF_DID, &iov->vf_device_id); + + pci_read_config_dword(cdev->pdev, + pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz); + + pci_read_config_dword(cdev->pdev, pos + PCI_SRIOV_CAP, &iov->cap); + + pci_read_config_byte(cdev->pdev, pos + PCI_SRIOV_FUNC_LINK, &iov->link); + + DP_VERBOSE(cdev, + QED_MSG_IOV, + "IOV info: nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n", + iov->nres, + iov->cap, + iov->ctrl, + iov->total_vfs, + iov->initial_vfs, + iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz); + + /* Some sanity checks */ + if (iov->num_vfs > NUM_OF_VFS(cdev) || + iov->total_vfs > NUM_OF_VFS(cdev)) { + /* This can happen only due to a bug. In this case we set + * num_vfs to zero to avoid memory corruption in the code that + * assumes max number of vfs + */ + DP_NOTICE(cdev, + "IOV: Unexpected number of vfs set: %d setting num_vf to zero\n", + iov->num_vfs); + + iov->num_vfs = 0; + iov->total_vfs = 0; + } + + return 0; +} + +static void qed_iov_clear_vf_igu_blocks(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt) +{ + struct qed_igu_block *p_sb; + u16 sb_id; + u32 val; + + if (!p_hwfn->hw_info.p_igu_info) { + DP_ERR(p_hwfn, + "qed_iov_clear_vf_igu_blocks IGU Info not initialized\n"); + return; + } + + for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev); + sb_id++) { + p_sb = &p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks[sb_id]; + if ((p_sb->status & QED_IGU_STATUS_FREE) && + !(p_sb->status & QED_IGU_STATUS_PF)) { + val = qed_rd(p_hwfn, p_ptt, + IGU_REG_MAPPING_MEMORY + sb_id * 4); + SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0); + qed_wr(p_hwfn, p_ptt, + IGU_REG_MAPPING_MEMORY + 4 * sb_id, val); + } + } +} + +static void qed_iov_setup_vfdb(struct qed_hwfn *p_hwfn) +{ + struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info; + struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info; + struct qed_bulletin_content *p_bulletin_virt; + dma_addr_t req_p, rply_p, bulletin_p; + union pfvf_tlvs *p_reply_virt_addr; + union vfpf_tlvs *p_req_virt_addr; + u8 idx = 0; + + memset(p_iov_info->vfs_array, 0, sizeof(p_iov_info->vfs_array)); + + p_req_virt_addr = p_iov_info->mbx_msg_virt_addr; + req_p = p_iov_info->mbx_msg_phys_addr; + p_reply_virt_addr = p_iov_info->mbx_reply_virt_addr; + rply_p = p_iov_info->mbx_reply_phys_addr; + p_bulletin_virt = p_iov_info->p_bulletins; + bulletin_p = p_iov_info->bulletins_phys; + if (!p_req_virt_addr || !p_reply_virt_addr || !p_bulletin_virt) { + DP_ERR(p_hwfn, + "qed_iov_setup_vfdb called without allocating mem first\n"); + return; + } + + for (idx = 0; idx < p_iov->total_vfs; idx++) { + struct qed_vf_info *vf = &p_iov_info->vfs_array[idx]; + u32 concrete; + + vf->vf_mbx.req_virt = p_req_virt_addr + idx; + vf->vf_mbx.req_phys = req_p + idx * sizeof(union vfpf_tlvs); + vf->vf_mbx.reply_virt = p_reply_virt_addr + idx; + vf->vf_mbx.reply_phys = rply_p + idx * sizeof(union pfvf_tlvs); + + vf->state = VF_STOPPED; + vf->b_init = false; + + vf->bulletin.phys = idx * + sizeof(struct qed_bulletin_content) + + bulletin_p; + vf->bulletin.p_virt = p_bulletin_virt + idx; + vf->bulletin.size = sizeof(struct qed_bulletin_content); + + vf->relative_vf_id = idx; + vf->abs_vf_id = idx + p_iov->first_vf_in_pf; + concrete = qed_vfid_to_concrete(p_hwfn, vf->abs_vf_id); + vf->concrete_fid = concrete; + vf->opaque_fid = (p_hwfn->hw_info.opaque_fid & 0xff) | + (vf->abs_vf_id << 8); + vf->vport_id = idx + 1; + } +} + +static int qed_iov_allocate_vfdb(struct qed_hwfn *p_hwfn) +{ + struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info; + void **p_v_addr; + u16 num_vfs = 0; + + num_vfs = p_hwfn->cdev->p_iov_info->total_vfs; + + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "qed_iov_allocate_vfdb for %d VFs\n", num_vfs); + + /* Allocate PF Mailbox buffer (per-VF) */ + p_iov_info->mbx_msg_size = sizeof(union vfpf_tlvs) * num_vfs; + p_v_addr = &p_iov_info->mbx_msg_virt_addr; + *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, + p_iov_info->mbx_msg_size, + &p_iov_info->mbx_msg_phys_addr, + GFP_KERNEL); + if (!*p_v_addr) + return -ENOMEM; + + /* Allocate PF Mailbox Reply buffer (per-VF) */ + p_iov_info->mbx_reply_size = sizeof(union pfvf_tlvs) * num_vfs; + p_v_addr = &p_iov_info->mbx_reply_virt_addr; + *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, + p_iov_info->mbx_reply_size, + &p_iov_info->mbx_reply_phys_addr, + GFP_KERNEL); + if (!*p_v_addr) + return -ENOMEM; + + p_iov_info->bulletins_size = sizeof(struct qed_bulletin_content) * + num_vfs; + p_v_addr = &p_iov_info->p_bulletins; + *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, + p_iov_info->bulletins_size, + &p_iov_info->bulletins_phys, + GFP_KERNEL); + if (!*p_v_addr) + return -ENOMEM; + + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "PF's Requests mailbox [%p virt 0x%llx phys], Response mailbox [%p virt 0x%llx phys] Bulletins [%p virt 0x%llx phys]\n", + p_iov_info->mbx_msg_virt_addr, + (u64) p_iov_info->mbx_msg_phys_addr, + p_iov_info->mbx_reply_virt_addr, + (u64) p_iov_info->mbx_reply_phys_addr, + p_iov_info->p_bulletins, (u64) p_iov_info->bulletins_phys); + + return 0; +} + +static void qed_iov_free_vfdb(struct qed_hwfn *p_hwfn) +{ + struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info; + + if (p_hwfn->pf_iov_info->mbx_msg_virt_addr) + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + p_iov_info->mbx_msg_size, + p_iov_info->mbx_msg_virt_addr, + p_iov_info->mbx_msg_phys_addr); + + if (p_hwfn->pf_iov_info->mbx_reply_virt_addr) + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + p_iov_info->mbx_reply_size, + p_iov_info->mbx_reply_virt_addr, + p_iov_info->mbx_reply_phys_addr); + + if (p_iov_info->p_bulletins) + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + p_iov_info->bulletins_size, + p_iov_info->p_bulletins, + p_iov_info->bulletins_phys); +} + +int qed_iov_alloc(struct qed_hwfn *p_hwfn) +{ + struct qed_pf_iov *p_sriov; + + if (!IS_PF_SRIOV(p_hwfn)) { + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "No SR-IOV - no need for IOV db\n"); + return 0; + } + + p_sriov = kzalloc(sizeof(*p_sriov), GFP_KERNEL); + if (!p_sriov) { + DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_sriov'\n"); + return -ENOMEM; + } + + p_hwfn->pf_iov_info = p_sriov; + + return qed_iov_allocate_vfdb(p_hwfn); +} + +void qed_iov_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + if (!IS_PF_SRIOV(p_hwfn) || !IS_PF_SRIOV_ALLOC(p_hwfn)) + return; + + qed_iov_setup_vfdb(p_hwfn); + qed_iov_clear_vf_igu_blocks(p_hwfn, p_ptt); +} + +void qed_iov_free(struct qed_hwfn *p_hwfn) +{ + if (IS_PF_SRIOV_ALLOC(p_hwfn)) { + qed_iov_free_vfdb(p_hwfn); + kfree(p_hwfn->pf_iov_info); + } +} + +void qed_iov_free_hw_info(struct qed_dev *cdev) +{ + kfree(cdev->p_iov_info); + cdev->p_iov_info = NULL; +} + +int qed_iov_hw_info(struct qed_hwfn *p_hwfn) +{ + struct qed_dev *cdev = p_hwfn->cdev; + int pos; + int rc; + + /* Learn the PCI configuration */ + pos = pci_find_ext_capability(p_hwfn->cdev->pdev, + PCI_EXT_CAP_ID_SRIOV); + if (!pos) { + DP_VERBOSE(p_hwfn, QED_MSG_IOV, "No PCIe IOV support\n"); + return 0; + } + + /* Allocate a new struct for IOV information */ + cdev->p_iov_info = kzalloc(sizeof(*cdev->p_iov_info), GFP_KERNEL); + if (!cdev->p_iov_info) { + DP_NOTICE(p_hwfn, "Can't support IOV due to lack of memory\n"); + return -ENOMEM; + } + cdev->p_iov_info->pos = pos; + + rc = qed_iov_pci_cfg_info(cdev); + if (rc) + return rc; + + /* We want PF IOV to be synonemous with the existance of p_iov_info; + * In case the capability is published but there are no VFs, simply + * de-allocate the struct. + */ + if (!cdev->p_iov_info->total_vfs) { + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "IOV capabilities, but no VFs are published\n"); + kfree(cdev->p_iov_info); + cdev->p_iov_info = NULL; + return 0; + } + + /* Calculate the first VF index - this is a bit tricky; Basically, + * VFs start at offset 16 relative to PF0, and 2nd engine VFs begin + * after the first engine's VFs. + */ + cdev->p_iov_info->first_vf_in_pf = p_hwfn->cdev->p_iov_info->offset + + p_hwfn->abs_pf_id - 16; + if (QED_PATH_ID(p_hwfn)) + cdev->p_iov_info->first_vf_in_pf -= MAX_NUM_VFS_BB; + + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "First VF in hwfn 0x%08x\n", + cdev->p_iov_info->first_vf_in_pf); + + return 0; +} + +u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id) +{ + struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info; + u16 i; + + if (!p_iov) + goto out; + + for (i = rel_vf_id; i < p_iov->total_vfs; i++) + if (qed_iov_is_valid_vfid(p_hwfn, rel_vf_id, true)) + return i; + +out: + return MAX_NUM_VFS; +} diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.h b/drivers/net/ethernet/qlogic/qed/qed_sriov.h new file mode 100644 index 000000000000..0ac561916e2c --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.h @@ -0,0 +1,185 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015 QLogic Corporation + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ + +#ifndef _QED_SRIOV_H +#define _QED_SRIOV_H +#include +#include "qed_vf.h" +#define QED_VF_ARRAY_LENGTH (3) + +#define IS_VF(cdev) ((cdev)->b_is_vf) +#define IS_PF(cdev) (!((cdev)->b_is_vf)) +#ifdef CONFIG_QED_SRIOV +#define IS_PF_SRIOV(p_hwfn) (!!((p_hwfn)->cdev->p_iov_info)) +#else +#define IS_PF_SRIOV(p_hwfn) (0) +#endif +#define IS_PF_SRIOV_ALLOC(p_hwfn) (!!((p_hwfn)->pf_iov_info)) + +/* This struct is part of qed_dev and contains data relevant to all hwfns; + * Initialized only if SR-IOV cpabability is exposed in PCIe config space. + */ +struct qed_hw_sriov_info { + int pos; /* capability position */ + int nres; /* number of resources */ + u32 cap; /* SR-IOV Capabilities */ + u16 ctrl; /* SR-IOV Control */ + u16 total_vfs; /* total VFs associated with the PF */ + u16 num_vfs; /* number of vfs that have been started */ + u16 initial_vfs; /* initial VFs associated with the PF */ + u16 nr_virtfn; /* number of VFs available */ + u16 offset; /* first VF Routing ID offset */ + u16 stride; /* following VF stride */ + u16 vf_device_id; /* VF device id */ + u32 pgsz; /* page size for BAR alignment */ + u8 link; /* Function Dependency Link */ + + u32 first_vf_in_pf; +}; + +/* This mailbox is maintained per VF in its PF contains all information + * required for sending / receiving a message. + */ +struct qed_iov_vf_mbx { + union vfpf_tlvs *req_virt; + dma_addr_t req_phys; + union pfvf_tlvs *reply_virt; + dma_addr_t reply_phys; +}; + +enum vf_state { + VF_STOPPED /* VF, Stopped */ +}; + +/* PFs maintain an array of this structure, per VF */ +struct qed_vf_info { + struct qed_iov_vf_mbx vf_mbx; + enum vf_state state; + bool b_init; + + struct qed_bulletin bulletin; + dma_addr_t vf_bulletin; + + u32 concrete_fid; + u16 opaque_fid; + + u8 vport_id; + u8 relative_vf_id; + u8 abs_vf_id; +#define QED_VF_ABS_ID(p_hwfn, p_vf) (QED_PATH_ID(p_hwfn) ? \ + (p_vf)->abs_vf_id + MAX_NUM_VFS_BB : \ + (p_vf)->abs_vf_id) +}; + +/* This structure is part of qed_hwfn and used only for PFs that have sriov + * capability enabled. + */ +struct qed_pf_iov { + struct qed_vf_info vfs_array[MAX_NUM_VFS]; + u64 pending_events[QED_VF_ARRAY_LENGTH]; + u64 pending_flr[QED_VF_ARRAY_LENGTH]; + + /* Allocate message address continuosuly and split to each VF */ + void *mbx_msg_virt_addr; + dma_addr_t mbx_msg_phys_addr; + u32 mbx_msg_size; + void *mbx_reply_virt_addr; + dma_addr_t mbx_reply_phys_addr; + u32 mbx_reply_size; + void *p_bulletins; + dma_addr_t bulletins_phys; + u32 bulletins_size; +}; + +#ifdef CONFIG_QED_SRIOV +/** + * @brief - Given a VF index, return index of next [including that] active VF. + * + * @param p_hwfn + * @param rel_vf_id + * + * @return MAX_NUM_VFS in case no further active VFs, otherwise index. + */ +u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id); + +/** + * @brief Read sriov related information and allocated resources + * reads from configuraiton space, shmem, etc. + * + * @param p_hwfn + * + * @return int + */ +int qed_iov_hw_info(struct qed_hwfn *p_hwfn); + +/** + * @brief qed_iov_alloc - allocate sriov related resources + * + * @param p_hwfn + * + * @return int + */ +int qed_iov_alloc(struct qed_hwfn *p_hwfn); + +/** + * @brief qed_iov_setup - setup sriov related resources + * + * @param p_hwfn + * @param p_ptt + */ +void qed_iov_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); + +/** + * @brief qed_iov_free - free sriov related resources + * + * @param p_hwfn + */ +void qed_iov_free(struct qed_hwfn *p_hwfn); + +/** + * @brief free sriov related memory that was allocated during hw_prepare + * + * @param cdev + */ +void qed_iov_free_hw_info(struct qed_dev *cdev); +#else +static inline u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, + u16 rel_vf_id) +{ + return MAX_NUM_VFS; +} + +static inline int qed_iov_hw_info(struct qed_hwfn *p_hwfn) +{ + return 0; +} + +static inline int qed_iov_alloc(struct qed_hwfn *p_hwfn) +{ + return 0; +} + +static inline void qed_iov_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ +} + +static inline void qed_iov_free(struct qed_hwfn *p_hwfn) +{ +} + +static inline void qed_iov_free_hw_info(struct qed_dev *cdev) +{ +} +#endif + +#define qed_for_each_vf(_p_hwfn, _i) \ + for (_i = qed_iov_get_next_active_vf(_p_hwfn, 0); \ + _i < MAX_NUM_VFS; \ + _i = qed_iov_get_next_active_vf(_p_hwfn, _i + 1)) + +#endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.h b/drivers/net/ethernet/qlogic/qed/qed_vf.h new file mode 100644 index 000000000000..52cfa4889d88 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.h @@ -0,0 +1,41 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015 QLogic Corporation + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ + +#ifndef _QED_VF_H +#define _QED_VF_H + +#define TLV_BUFFER_SIZE 1024 +struct tlv_buffer_size { + u8 tlv_buffer[TLV_BUFFER_SIZE]; +}; + +union vfpf_tlvs { + struct tlv_buffer_size tlv_buf_size; +}; + +union pfvf_tlvs { + struct tlv_buffer_size tlv_buf_size; +}; + +struct qed_bulletin_content { + /* crc of structure to ensure is not in mid-update */ + u32 crc; + + u32 version; + + /* bitmap indicating which fields hold valid values */ + u64 valid_bitmap; +}; + +struct qed_bulletin { + dma_addr_t phys; + struct qed_bulletin_content *p_virt; + u32 size; +}; + +#endif From 37bff2b9c6addf6216c8d04e95be596678e8deff Mon Sep 17 00:00:00 2001 From: Yuval Mintz Date: Wed, 11 May 2016 16:36:13 +0300 Subject: [PATCH 1502/1649] qed: Add VF->PF channel infrastructure Communication between VF and PF is based on a dedicated HW channel; VF will prepare a messge, and by signaling the HW the PF would get a notification of that message existance. The PF would then copy the message, process it and DMA an answer back to the VF as a response. The messages themselves are TLV-based - allowing easier backward/forward compatibility. This patch adds the infrastructure of the channel on the PF side - starting with the arrival of the notification and ending with DMAing the response back to the VF. It also adds a dummy-response as reference, as it only lays the groundwork of the communication; it doesn't really add support of any actual messages. Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed.h | 6 + drivers/net/ethernet/qlogic/qed/qed_dev_api.h | 27 +- drivers/net/ethernet/qlogic/qed/qed_hsi.h | 2 +- drivers/net/ethernet/qlogic/qed/qed_hw.c | 44 +- drivers/net/ethernet/qlogic/qed/qed_main.c | 10 +- drivers/net/ethernet/qlogic/qed/qed_spq.c | 16 +- drivers/net/ethernet/qlogic/qed/qed_sriov.c | 385 ++++++++++++++++++ drivers/net/ethernet/qlogic/qed/qed_sriov.h | 53 +++ drivers/net/ethernet/qlogic/qed/qed_vf.h | 52 +++ include/linux/qed/common_hsi.h | 5 + 10 files changed, 585 insertions(+), 15 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index 2e067c7e8f38..01f9b6c880bd 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h @@ -378,6 +378,12 @@ struct qed_hwfn { struct qed_simd_fp_handler simd_proto_handler[64]; +#ifdef CONFIG_QED_SRIOV + struct workqueue_struct *iov_wq; + struct delayed_work iov_task; + unsigned long iov_task_flags; +#endif + struct z_stream_s *stream; }; diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h index 6aac3f855aa1..f567371fe304 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h +++ b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h @@ -182,11 +182,15 @@ enum qed_dmae_address_type_t { * used mostly to write a zeroed buffer to destination address * using DMA */ -#define QED_DMAE_FLAG_RW_REPL_SRC 0x00000001 -#define QED_DMAE_FLAG_COMPLETION_DST 0x00000008 +#define QED_DMAE_FLAG_RW_REPL_SRC 0x00000001 +#define QED_DMAE_FLAG_VF_SRC 0x00000002 +#define QED_DMAE_FLAG_VF_DST 0x00000004 +#define QED_DMAE_FLAG_COMPLETION_DST 0x00000008 struct qed_dmae_params { - u32 flags; /* consists of QED_DMAE_FLAG_* values */ + u32 flags; /* consists of QED_DMAE_FLAG_* values */ + u8 src_vfid; + u8 dst_vfid; }; /** @@ -208,6 +212,23 @@ qed_dmae_host2grc(struct qed_hwfn *p_hwfn, u32 size_in_dwords, u32 flags); +/** + * @brief qed_dmae_host2host - copy data from to source address + * to a destination adress (for SRIOV) using the given ptt + * + * @param p_hwfn + * @param p_ptt + * @param source_addr + * @param dest_addr + * @param size_in_dwords + * @param params + */ +int qed_dmae_host2host(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + dma_addr_t source_addr, + dma_addr_t dest_addr, + u32 size_in_dwords, struct qed_dmae_params *p_params); + /** * @brief qed_chain_alloc - Allocate and initialize a chain * diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index c4fae71bed11..6ba197c107ed 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -31,7 +31,7 @@ enum common_event_opcode { COMMON_EVENT_PF_STOP, COMMON_EVENT_RESERVED, COMMON_EVENT_RESERVED2, - COMMON_EVENT_RESERVED3, + COMMON_EVENT_VF_PF_CHANNEL, COMMON_EVENT_RESERVED4, COMMON_EVENT_RESERVED5, COMMON_EVENT_RESERVED6, diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.c b/drivers/net/ethernet/qlogic/qed/qed_hw.c index a8cf96c34bef..a9be5a422d2d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hw.c +++ b/drivers/net/ethernet/qlogic/qed/qed_hw.c @@ -355,8 +355,8 @@ static void qed_dmae_opcode(struct qed_hwfn *p_hwfn, const u8 is_dst_type_grc, struct qed_dmae_params *p_params) { + u16 opcode_b = 0; u32 opcode = 0; - u16 opcodeB = 0; /* Whether the source is the PCIe or the GRC. * 0- The source is the PCIe @@ -398,14 +398,24 @@ static void qed_dmae_opcode(struct qed_hwfn *p_hwfn, opcode |= (DMAE_CMD_DST_ADDR_RESET_MASK << DMAE_CMD_DST_ADDR_RESET_SHIFT); - opcodeB |= (DMAE_CMD_SRC_VF_ID_MASK << - DMAE_CMD_SRC_VF_ID_SHIFT); + /* SRC/DST VFID: all 1's - pf, otherwise VF id */ + if (p_params->flags & QED_DMAE_FLAG_VF_SRC) { + opcode |= 1 << DMAE_CMD_SRC_VF_ID_VALID_SHIFT; + opcode_b |= p_params->src_vfid << DMAE_CMD_SRC_VF_ID_SHIFT; + } else { + opcode_b |= DMAE_CMD_SRC_VF_ID_MASK << + DMAE_CMD_SRC_VF_ID_SHIFT; + } - opcodeB |= (DMAE_CMD_DST_VF_ID_MASK << - DMAE_CMD_DST_VF_ID_SHIFT); + if (p_params->flags & QED_DMAE_FLAG_VF_DST) { + opcode |= 1 << DMAE_CMD_DST_VF_ID_VALID_SHIFT; + opcode_b |= p_params->dst_vfid << DMAE_CMD_DST_VF_ID_SHIFT; + } else { + opcode_b |= DMAE_CMD_DST_VF_ID_MASK << DMAE_CMD_DST_VF_ID_SHIFT; + } p_hwfn->dmae_info.p_dmae_cmd->opcode = cpu_to_le32(opcode); - p_hwfn->dmae_info.p_dmae_cmd->opcode_b = cpu_to_le16(opcodeB); + p_hwfn->dmae_info.p_dmae_cmd->opcode_b = cpu_to_le16(opcode_b); } u32 qed_dmae_idx_to_go_cmd(u8 idx) @@ -753,6 +763,28 @@ int qed_dmae_host2grc(struct qed_hwfn *p_hwfn, return rc; } +int +qed_dmae_host2host(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + dma_addr_t source_addr, + dma_addr_t dest_addr, + u32 size_in_dwords, struct qed_dmae_params *p_params) +{ + int rc; + + mutex_lock(&(p_hwfn->dmae_info.mutex)); + + rc = qed_dmae_execute_command(p_hwfn, p_ptt, source_addr, + dest_addr, + QED_DMAE_ADDRESS_HOST_PHYS, + QED_DMAE_ADDRESS_HOST_PHYS, + size_in_dwords, p_params); + + mutex_unlock(&(p_hwfn->dmae_info.mutex)); + + return rc; +} + u16 qed_get_qm_pq(struct qed_hwfn *p_hwfn, enum protocol_type proto, union qed_qm_pq_params *p_params) diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 1b758bdec587..c209ed49deae 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -24,6 +24,7 @@ #include #include "qed.h" +#include "qed_sriov.h" #include "qed_sp.h" #include "qed_dev_api.h" #include "qed_mcp.h" @@ -749,7 +750,10 @@ static int qed_slowpath_start(struct qed_dev *cdev, struct qed_mcp_drv_version drv_version; const u8 *data = NULL; struct qed_hwfn *hwfn; - int rc; + int rc = -EINVAL; + + if (qed_iov_wq_start(cdev)) + goto err; rc = request_firmware(&cdev->firmware, QED_FW_FILE_NAME, &cdev->pdev->dev); @@ -826,6 +830,8 @@ err1: err: release_firmware(cdev->firmware); + qed_iov_wq_stop(cdev, false); + return rc; } @@ -842,6 +848,8 @@ static int qed_slowpath_stop(struct qed_dev *cdev) qed_disable_msix(cdev); qed_nic_reset(cdev); + qed_iov_wq_stop(cdev, true); + release_firmware(cdev->firmware); return 0; diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c index 89469d5aae25..0e439e46fbe9 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_spq.c +++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c @@ -27,6 +27,7 @@ #include "qed_mcp.h" #include "qed_reg_addr.h" #include "qed_sp.h" +#include "qed_sriov.h" /*************************************************************************** * Structures & Definitions @@ -242,10 +243,17 @@ static int qed_async_event_completion(struct qed_hwfn *p_hwfn, struct event_ring_entry *p_eqe) { - DP_NOTICE(p_hwfn, - "Unknown Async completion for protocol: %d\n", - p_eqe->protocol_id); - return -EINVAL; + switch (p_eqe->protocol_id) { + case PROTOCOLID_COMMON: + return qed_sriov_eqe_event(p_hwfn, + p_eqe->opcode, + p_eqe->echo, &p_eqe->data); + default: + DP_NOTICE(p_hwfn, + "Unknown Async completion for protocol: %d\n", + p_eqe->protocol_id); + return -EINVAL; + } } /*************************************************************************** diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index 685e3fa7bc52..4a6af4264141 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -31,6 +31,26 @@ bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn, return true; } +static struct qed_vf_info *qed_iov_get_vf_info(struct qed_hwfn *p_hwfn, + u16 relative_vf_id, + bool b_enabled_only) +{ + struct qed_vf_info *vf = NULL; + + if (!p_hwfn->pf_iov_info) { + DP_NOTICE(p_hwfn->cdev, "No iov info\n"); + return NULL; + } + + if (qed_iov_is_valid_vfid(p_hwfn, relative_vf_id, b_enabled_only)) + vf = &p_hwfn->pf_iov_info->vfs_array[relative_vf_id]; + else + DP_ERR(p_hwfn, "qed_iov_get_vf_info: VF[%d] is not enabled\n", + relative_vf_id); + + return vf; +} + static int qed_iov_pci_cfg_info(struct qed_dev *cdev) { struct qed_hw_sriov_info *iov = cdev->p_iov_info; @@ -349,6 +369,232 @@ int qed_iov_hw_info(struct qed_hwfn *p_hwfn) return 0; } +static bool qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn, int vfid) +{ + /* Check PF supports sriov */ + if (!IS_QED_SRIOV(p_hwfn->cdev) || !IS_PF_SRIOV_ALLOC(p_hwfn)) + return false; + + /* Check VF validity */ + if (!qed_iov_is_valid_vfid(p_hwfn, vfid, true)) + return false; + + return true; +} + +static bool qed_iov_tlv_supported(u16 tlvtype) +{ + return CHANNEL_TLV_NONE < tlvtype && tlvtype < CHANNEL_TLV_MAX; +} + +/* place a given tlv on the tlv buffer, continuing current tlv list */ +void *qed_add_tlv(struct qed_hwfn *p_hwfn, u8 **offset, u16 type, u16 length) +{ + struct channel_tlv *tl = (struct channel_tlv *)*offset; + + tl->type = type; + tl->length = length; + + /* Offset should keep pointing to next TLV (the end of the last) */ + *offset += length; + + /* Return a pointer to the start of the added tlv */ + return *offset - length; +} + +/* list the types and lengths of the tlvs on the buffer */ +void qed_dp_tlv_list(struct qed_hwfn *p_hwfn, void *tlvs_list) +{ + u16 i = 1, total_length = 0; + struct channel_tlv *tlv; + + do { + tlv = (struct channel_tlv *)((u8 *)tlvs_list + total_length); + + /* output tlv */ + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "TLV number %d: type %d, length %d\n", + i, tlv->type, tlv->length); + + if (tlv->type == CHANNEL_TLV_LIST_END) + return; + + /* Validate entry - protect against malicious VFs */ + if (!tlv->length) { + DP_NOTICE(p_hwfn, "TLV of length 0 found\n"); + return; + } + + total_length += tlv->length; + + if (total_length >= sizeof(struct tlv_buffer_size)) { + DP_NOTICE(p_hwfn, "TLV ==> Buffer overflow\n"); + return; + } + + i++; + } while (1); +} + +static void qed_iov_send_response(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_vf_info *p_vf, + u16 length, u8 status) +{ + struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx; + struct qed_dmae_params params; + u8 eng_vf_id; + + mbx->reply_virt->default_resp.hdr.status = status; + + qed_dp_tlv_list(p_hwfn, mbx->reply_virt); + + eng_vf_id = p_vf->abs_vf_id; + + memset(¶ms, 0, sizeof(struct qed_dmae_params)); + params.flags = QED_DMAE_FLAG_VF_DST; + params.dst_vfid = eng_vf_id; + + qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys + sizeof(u64), + mbx->req_virt->first_tlv.reply_address + + sizeof(u64), + (sizeof(union pfvf_tlvs) - sizeof(u64)) / 4, + ¶ms); + + qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys, + mbx->req_virt->first_tlv.reply_address, + sizeof(u64) / 4, ¶ms); + + REG_WR(p_hwfn, + GTT_BAR0_MAP_REG_USDM_RAM + + USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id), 1); +} + +static void qed_iov_prepare_resp(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_vf_info *vf_info, + u16 type, u16 length, u8 status) +{ + struct qed_iov_vf_mbx *mbx = &vf_info->vf_mbx; + + mbx->offset = (u8 *)mbx->reply_virt; + + qed_add_tlv(p_hwfn, &mbx->offset, type, length); + qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + qed_iov_send_response(p_hwfn, p_ptt, vf_info, length, status); +} + +static void qed_iov_process_mbx_dummy_resp(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_vf_info *p_vf) +{ + qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf, CHANNEL_TLV_NONE, + sizeof(struct pfvf_def_resp_tlv), + PFVF_STATUS_SUCCESS); +} + +static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, int vfid) +{ + struct qed_iov_vf_mbx *mbx; + struct qed_vf_info *p_vf; + int i; + + p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); + if (!p_vf) + return; + + mbx = &p_vf->vf_mbx; + + /* qed_iov_process_mbx_request */ + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "qed_iov_process_mbx_req vfid %d\n", p_vf->abs_vf_id); + + mbx->first_tlv = mbx->req_virt->first_tlv; + + /* check if tlv type is known */ + if (qed_iov_tlv_supported(mbx->first_tlv.tl.type)) { + qed_iov_process_mbx_dummy_resp(p_hwfn, p_ptt, p_vf); + } else { + /* unknown TLV - this may belong to a VF driver from the future + * - a version written after this PF driver was written, which + * supports features unknown as of yet. Too bad since we don't + * support them. Or this may be because someone wrote a crappy + * VF driver and is sending garbage over the channel. + */ + DP_ERR(p_hwfn, + "unknown TLV. type %d length %d. first 20 bytes of mailbox buffer:\n", + mbx->first_tlv.tl.type, mbx->first_tlv.tl.length); + + for (i = 0; i < 20; i++) { + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "%x ", + mbx->req_virt->tlv_buf_size.tlv_buffer[i]); + } + } +} + +void qed_iov_pf_add_pending_events(struct qed_hwfn *p_hwfn, u8 vfid) +{ + u64 add_bit = 1ULL << (vfid % 64); + + p_hwfn->pf_iov_info->pending_events[vfid / 64] |= add_bit; +} + +static void qed_iov_pf_get_and_clear_pending_events(struct qed_hwfn *p_hwfn, + u64 *events) +{ + u64 *p_pending_events = p_hwfn->pf_iov_info->pending_events; + + memcpy(events, p_pending_events, sizeof(u64) * QED_VF_ARRAY_LENGTH); + memset(p_pending_events, 0, sizeof(u64) * QED_VF_ARRAY_LENGTH); +} + +static int qed_sriov_vfpf_msg(struct qed_hwfn *p_hwfn, + u16 abs_vfid, struct regpair *vf_msg) +{ + u8 min = (u8)p_hwfn->cdev->p_iov_info->first_vf_in_pf; + struct qed_vf_info *p_vf; + + if (!qed_iov_pf_sanity_check(p_hwfn, (int)abs_vfid - min)) { + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "Got a message from VF [abs 0x%08x] that cannot be handled by PF\n", + abs_vfid); + return 0; + } + p_vf = &p_hwfn->pf_iov_info->vfs_array[(u8)abs_vfid - min]; + + /* List the physical address of the request so that handler + * could later on copy the message from it. + */ + p_vf->vf_mbx.pending_req = (((u64)vf_msg->hi) << 32) | vf_msg->lo; + + /* Mark the event and schedule the workqueue */ + qed_iov_pf_add_pending_events(p_hwfn, p_vf->relative_vf_id); + qed_schedule_iov(p_hwfn, QED_IOV_WQ_MSG_FLAG); + + return 0; +} + +int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, + u8 opcode, __le16 echo, union event_ring_data *data) +{ + switch (opcode) { + case COMMON_EVENT_VF_PF_CHANNEL: + return qed_sriov_vfpf_msg(p_hwfn, le16_to_cpu(echo), + &data->vf_pf_channel.msg_addr); + default: + DP_INFO(p_hwfn->cdev, "Unknown sriov eqe event 0x%02x\n", + opcode); + return -EINVAL; + } +} + u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id) { struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info; @@ -364,3 +610,142 @@ u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id) out: return MAX_NUM_VFS; } + +static int qed_iov_copy_vf_msg(struct qed_hwfn *p_hwfn, struct qed_ptt *ptt, + int vfid) +{ + struct qed_dmae_params params; + struct qed_vf_info *vf_info; + + vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); + if (!vf_info) + return -EINVAL; + + memset(¶ms, 0, sizeof(struct qed_dmae_params)); + params.flags = QED_DMAE_FLAG_VF_SRC | QED_DMAE_FLAG_COMPLETION_DST; + params.src_vfid = vf_info->abs_vf_id; + + if (qed_dmae_host2host(p_hwfn, ptt, + vf_info->vf_mbx.pending_req, + vf_info->vf_mbx.req_phys, + sizeof(union vfpf_tlvs) / 4, ¶ms)) { + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "Failed to copy message from VF 0x%02x\n", vfid); + + return -EIO; + } + + return 0; +} + +/** + * qed_schedule_iov - schedules IOV task for VF and PF + * @hwfn: hardware function pointer + * @flag: IOV flag for VF/PF + */ +void qed_schedule_iov(struct qed_hwfn *hwfn, enum qed_iov_wq_flag flag) +{ + smp_mb__before_atomic(); + set_bit(flag, &hwfn->iov_task_flags); + smp_mb__after_atomic(); + DP_VERBOSE(hwfn, QED_MSG_IOV, "Scheduling iov task [Flag: %d]\n", flag); + queue_delayed_work(hwfn->iov_wq, &hwfn->iov_task, 0); +} + +static void qed_handle_vf_msg(struct qed_hwfn *hwfn) +{ + u64 events[QED_VF_ARRAY_LENGTH]; + struct qed_ptt *ptt; + int i; + + ptt = qed_ptt_acquire(hwfn); + if (!ptt) { + DP_VERBOSE(hwfn, QED_MSG_IOV, + "Can't acquire PTT; re-scheduling\n"); + qed_schedule_iov(hwfn, QED_IOV_WQ_MSG_FLAG); + return; + } + + qed_iov_pf_get_and_clear_pending_events(hwfn, events); + + DP_VERBOSE(hwfn, QED_MSG_IOV, + "Event mask of VF events: 0x%llx 0x%llx 0x%llx\n", + events[0], events[1], events[2]); + + qed_for_each_vf(hwfn, i) { + /* Skip VFs with no pending messages */ + if (!(events[i / 64] & (1ULL << (i % 64)))) + continue; + + DP_VERBOSE(hwfn, QED_MSG_IOV, + "Handling VF message from VF 0x%02x [Abs 0x%02x]\n", + i, hwfn->cdev->p_iov_info->first_vf_in_pf + i); + + /* Copy VF's message to PF's request buffer for that VF */ + if (qed_iov_copy_vf_msg(hwfn, ptt, i)) + continue; + + qed_iov_process_mbx_req(hwfn, ptt, i); + } + + qed_ptt_release(hwfn, ptt); +} + +void qed_iov_pf_task(struct work_struct *work) +{ + struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn, + iov_task.work); + + if (test_and_clear_bit(QED_IOV_WQ_STOP_WQ_FLAG, &hwfn->iov_task_flags)) + return; + + if (test_and_clear_bit(QED_IOV_WQ_MSG_FLAG, &hwfn->iov_task_flags)) + qed_handle_vf_msg(hwfn); +} + +void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first) +{ + int i; + + for_each_hwfn(cdev, i) { + if (!cdev->hwfns[i].iov_wq) + continue; + + if (schedule_first) { + qed_schedule_iov(&cdev->hwfns[i], + QED_IOV_WQ_STOP_WQ_FLAG); + cancel_delayed_work_sync(&cdev->hwfns[i].iov_task); + } + + flush_workqueue(cdev->hwfns[i].iov_wq); + destroy_workqueue(cdev->hwfns[i].iov_wq); + } +} + +int qed_iov_wq_start(struct qed_dev *cdev) +{ + char name[NAME_SIZE]; + int i; + + for_each_hwfn(cdev, i) { + struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + + /* PFs needs a dedicated workqueue only if they support IOV. */ + if (!IS_PF_SRIOV(p_hwfn)) + continue; + + snprintf(name, NAME_SIZE, "iov-%02x:%02x.%02x", + cdev->pdev->bus->number, + PCI_SLOT(cdev->pdev->devfn), p_hwfn->abs_pf_id); + + p_hwfn->iov_wq = create_singlethread_workqueue(name); + if (!p_hwfn->iov_wq) { + DP_NOTICE(p_hwfn, "Cannot create iov workqueue\n"); + return -ENOMEM; + } + + INIT_DELAYED_WORK(&p_hwfn->iov_task, qed_iov_pf_task); + } + + return 0; +} diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.h b/drivers/net/ethernet/qlogic/qed/qed_sriov.h index 0ac561916e2c..112216812a12 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.h @@ -50,6 +50,14 @@ struct qed_iov_vf_mbx { dma_addr_t req_phys; union pfvf_tlvs *reply_virt; dma_addr_t reply_phys; + + /* Address in VF where a pending message is located */ + dma_addr_t pending_req; + + u8 *offset; + + /* saved VF request header */ + struct vfpf_first_tlv first_tlv; }; enum vf_state { @@ -96,6 +104,14 @@ struct qed_pf_iov { u32 bulletins_size; }; +enum qed_iov_wq_flag { + QED_IOV_WQ_MSG_FLAG, + QED_IOV_WQ_SET_UNICAST_FILTER_FLAG, + QED_IOV_WQ_BULLETIN_UPDATE_FLAG, + QED_IOV_WQ_STOP_WQ_FLAG, + QED_IOV_WQ_FLR_FLAG, +}; + #ifdef CONFIG_QED_SRIOV /** * @brief - Given a VF index, return index of next [including that] active VF. @@ -147,6 +163,22 @@ void qed_iov_free(struct qed_hwfn *p_hwfn); * @param cdev */ void qed_iov_free_hw_info(struct qed_dev *cdev); + +/** + * @brief qed_sriov_eqe_event - handle async sriov event arrived on eqe. + * + * @param p_hwfn + * @param opcode + * @param echo + * @param data + */ +int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, + u8 opcode, __le16 echo, union event_ring_data *data); + +void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first); +int qed_iov_wq_start(struct qed_dev *cdev); + +void qed_schedule_iov(struct qed_hwfn *hwfn, enum qed_iov_wq_flag flag); #else static inline u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id) @@ -175,6 +207,27 @@ static inline void qed_iov_free(struct qed_hwfn *p_hwfn) static inline void qed_iov_free_hw_info(struct qed_dev *cdev) { } + +static inline int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, + u8 opcode, + __le16 echo, union event_ring_data *data) +{ + return -EINVAL; +} + +static inline void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first) +{ +} + +static inline int qed_iov_wq_start(struct qed_dev *cdev) +{ + return 0; +} + +static inline void qed_schedule_iov(struct qed_hwfn *hwfn, + enum qed_iov_wq_flag flag) +{ +} #endif #define qed_for_each_vf(_p_hwfn, _i) \ diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.h b/drivers/net/ethernet/qlogic/qed/qed_vf.h index 52cfa4889d88..f0d8de2be581 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.h +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.h @@ -9,16 +9,62 @@ #ifndef _QED_VF_H #define _QED_VF_H +enum { + PFVF_STATUS_WAITING, + PFVF_STATUS_SUCCESS, + PFVF_STATUS_FAILURE, + PFVF_STATUS_NOT_SUPPORTED, + PFVF_STATUS_NO_RESOURCE, + PFVF_STATUS_FORCED, +}; + +/* vf pf channel tlvs */ +/* general tlv header (used for both vf->pf request and pf->vf response) */ +struct channel_tlv { + u16 type; + u16 length; +}; + +/* header of first vf->pf tlv carries the offset used to calculate reponse + * buffer address + */ +struct vfpf_first_tlv { + struct channel_tlv tl; + u32 padding; + u64 reply_address; +}; + +/* header of pf->vf tlvs, carries the status of handling the request */ +struct pfvf_tlv { + struct channel_tlv tl; + u8 status; + u8 padding[3]; +}; + +/* response tlv used for most tlvs */ +struct pfvf_def_resp_tlv { + struct pfvf_tlv hdr; +}; + +/* used to terminate and pad a tlv list */ +struct channel_list_end_tlv { + struct channel_tlv tl; + u8 padding[4]; +}; + #define TLV_BUFFER_SIZE 1024 struct tlv_buffer_size { u8 tlv_buffer[TLV_BUFFER_SIZE]; }; union vfpf_tlvs { + struct vfpf_first_tlv first_tlv; + struct channel_list_end_tlv list_end; struct tlv_buffer_size tlv_buf_size; }; union pfvf_tlvs { + struct pfvf_def_resp_tlv default_resp; struct tlv_buffer_size tlv_buf_size; }; @@ -38,4 +84,10 @@ struct qed_bulletin { u32 size; }; +enum { + CHANNEL_TLV_NONE, /* ends tlv sequence */ + CHANNEL_TLV_LIST_END, + CHANNEL_TLV_MAX +}; + #endif diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h index 53ecb37ae563..8914d271ba73 100644 --- a/include/linux/qed/common_hsi.h +++ b/include/linux/qed/common_hsi.h @@ -327,9 +327,14 @@ struct regpair { __le32 hi; }; +struct vf_pf_channel_eqe_data { + struct regpair msg_addr; +}; + /* Event Data Union */ union event_ring_data { u8 bytes[8]; + struct vf_pf_channel_eqe_data vf_pf_channel; struct async_data async_info; }; From 1408cc1fa48c5450c0dc4b40cbd9718ecb09d1c9 Mon Sep 17 00:00:00 2001 From: Yuval Mintz Date: Wed, 11 May 2016 16:36:14 +0300 Subject: [PATCH 1503/1649] qed: Introduce VFs This adds the qed VFs for the first time - The vfs are limited functions, with a very different PCI bar structure [when compared with PFs] to better impose the related security demands associated with them. This patch includes the logic neccesary to allow VFs to successfully probe [without actually adding the ability to enable iov]. This includes diverging all the flows that would occur as part of the pci probe of the driver, preventing VF from accessing registers/memories it can't and instead utilize the VF->PF channel to query the PF for needed information. Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/Makefile | 2 +- drivers/net/ethernet/qlogic/qed/qed.h | 5 + drivers/net/ethernet/qlogic/qed/qed_cxt.c | 184 +++++++-- drivers/net/ethernet/qlogic/qed/qed_cxt.h | 3 + drivers/net/ethernet/qlogic/qed/qed_dev.c | 182 +++++++-- drivers/net/ethernet/qlogic/qed/qed_hsi.h | 47 ++- drivers/net/ethernet/qlogic/qed/qed_hw.c | 12 +- .../net/ethernet/qlogic/qed/qed_init_ops.c | 4 + drivers/net/ethernet/qlogic/qed/qed_int.c | 99 ++++- drivers/net/ethernet/qlogic/qed/qed_int.h | 16 + drivers/net/ethernet/qlogic/qed/qed_l2.c | 57 ++- drivers/net/ethernet/qlogic/qed/qed_main.c | 192 ++++++--- drivers/net/ethernet/qlogic/qed/qed_mcp.c | 81 +++- drivers/net/ethernet/qlogic/qed/qed_mcp.h | 27 +- .../net/ethernet/qlogic/qed/qed_reg_addr.h | 6 + drivers/net/ethernet/qlogic/qed/qed_sp.h | 2 + .../net/ethernet/qlogic/qed/qed_sp_commands.c | 8 + drivers/net/ethernet/qlogic/qed/qed_spq.c | 3 + drivers/net/ethernet/qlogic/qed/qed_sriov.c | 379 +++++++++++++++++- drivers/net/ethernet/qlogic/qed/qed_sriov.h | 49 +++ drivers/net/ethernet/qlogic/qed/qed_vf.c | 357 +++++++++++++++++ drivers/net/ethernet/qlogic/qed/qed_vf.h | 229 +++++++++++ drivers/net/ethernet/qlogic/qede/qede_main.c | 13 +- include/linux/qed/common_hsi.h | 57 +++ include/linux/qed/qed_if.h | 10 +- 25 files changed, 1839 insertions(+), 185 deletions(-) create mode 100644 drivers/net/ethernet/qlogic/qed/qed_vf.c diff --git a/drivers/net/ethernet/qlogic/qed/Makefile b/drivers/net/ethernet/qlogic/qed/Makefile index e11a809034ea..a44874562cfd 100644 --- a/drivers/net/ethernet/qlogic/qed/Makefile +++ b/drivers/net/ethernet/qlogic/qed/Makefile @@ -3,4 +3,4 @@ obj-$(CONFIG_QED) := qed.o qed-y := qed_cxt.o qed_dev.o qed_hw.o qed_init_fw_funcs.o qed_init_ops.o \ qed_int.o qed_main.o qed_mcp.o qed_sp_commands.o qed_spq.o qed_l2.o \ qed_selftest.o -qed-$(CONFIG_QED_SRIOV) += qed_sriov.o +qed-$(CONFIG_QED_SRIOV) += qed_sriov.o qed_vf.o diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index 01f9b6c880bd..f9a3576305a1 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h @@ -311,6 +311,8 @@ struct qed_hwfn { bool first_on_engine; bool hw_init_done; + u8 num_funcs_on_engine; + /* BAR access */ void __iomem *regview; void __iomem *doorbells; @@ -361,6 +363,7 @@ struct qed_hwfn { /* True if the driver requests for the link */ bool b_drv_link_init; + struct qed_vf_iov *vf_iov_info; struct qed_pf_iov *pf_iov_info; struct qed_mcp_info *mcp_info; @@ -497,6 +500,8 @@ struct qed_dev { #define IS_QED_SRIOV(cdev) (!!(cdev)->p_iov_info) unsigned long tunn_mode; + + bool b_is_vf; u32 drv_type; struct qed_eth_stats *reset_stats; diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c index fc767c07a264..ac284c58d8c2 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c @@ -24,11 +24,13 @@ #include "qed_hw.h" #include "qed_init_ops.h" #include "qed_reg_addr.h" +#include "qed_sriov.h" /* Max number of connection types in HW (DQ/CDU etc.) */ #define MAX_CONN_TYPES PROTOCOLID_COMMON #define NUM_TASK_TYPES 2 #define NUM_TASK_PF_SEGMENTS 4 +#define NUM_TASK_VF_SEGMENTS 1 /* QM constants */ #define QM_PQ_ELEMENT_SIZE 4 /* in bytes */ @@ -63,10 +65,12 @@ union conn_context { struct qed_conn_type_cfg { u32 cid_count; u32 cid_start; + u32 cids_per_vf; }; /* ILT Client configuration, Per connection type (protocol) resources. */ #define ILT_CLI_PF_BLOCKS (1 + NUM_TASK_PF_SEGMENTS * 2) +#define ILT_CLI_VF_BLOCKS (1 + NUM_TASK_VF_SEGMENTS * 2) #define CDUC_BLK (0) enum ilt_clients { @@ -97,6 +101,10 @@ struct qed_ilt_client_cfg { /* ILT client blocks for PF */ struct qed_ilt_cli_blk pf_blks[ILT_CLI_PF_BLOCKS]; u32 pf_total_lines; + + /* ILT client blocks for VFs */ + struct qed_ilt_cli_blk vf_blks[ILT_CLI_VF_BLOCKS]; + u32 vf_total_lines; }; /* Per Path - @@ -123,6 +131,11 @@ struct qed_cxt_mngr { /* computed ILT structure */ struct qed_ilt_client_cfg clients[ILT_CLI_MAX]; + /* total number of VFs for this hwfn - + * ALL VFs are symmetric in terms of HW resources + */ + u32 vf_count; + /* Acquired CIDs */ struct qed_cid_acquired_map acquired[MAX_CONN_TYPES]; @@ -131,37 +144,60 @@ struct qed_cxt_mngr { u32 pf_start_line; }; -static u32 qed_cxt_cdu_iids(struct qed_cxt_mngr *p_mngr) +/* counts the iids for the CDU/CDUC ILT client configuration */ +struct qed_cdu_iids { + u32 pf_cids; + u32 per_vf_cids; +}; + +static void qed_cxt_cdu_iids(struct qed_cxt_mngr *p_mngr, + struct qed_cdu_iids *iids) { - u32 type, pf_cids = 0; + u32 type; - for (type = 0; type < MAX_CONN_TYPES; type++) - pf_cids += p_mngr->conn_cfg[type].cid_count; - - return pf_cids; + for (type = 0; type < MAX_CONN_TYPES; type++) { + iids->pf_cids += p_mngr->conn_cfg[type].cid_count; + iids->per_vf_cids += p_mngr->conn_cfg[type].cids_per_vf; + } } static void qed_cxt_qm_iids(struct qed_hwfn *p_hwfn, struct qed_qm_iids *iids) { struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; - int type; + u32 vf_cids = 0, type; - for (type = 0; type < MAX_CONN_TYPES; type++) + for (type = 0; type < MAX_CONN_TYPES; type++) { iids->cids += p_mngr->conn_cfg[type].cid_count; + vf_cids += p_mngr->conn_cfg[type].cids_per_vf; + } - DP_VERBOSE(p_hwfn, QED_MSG_ILT, "iids: CIDS %08x\n", iids->cids); + iids->vf_cids += vf_cids * p_mngr->vf_count; + DP_VERBOSE(p_hwfn, QED_MSG_ILT, + "iids: CIDS %08x vf_cids %08x\n", + iids->cids, iids->vf_cids); } /* set the iids count per protocol */ static void qed_cxt_set_proto_cid_count(struct qed_hwfn *p_hwfn, enum protocol_type type, - u32 cid_count) + u32 cid_count, u32 vf_cid_cnt) { struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr; struct qed_conn_type_cfg *p_conn = &p_mgr->conn_cfg[type]; p_conn->cid_count = roundup(cid_count, DQ_RANGE_ALIGN); + p_conn->cids_per_vf = roundup(vf_cid_cnt, DQ_RANGE_ALIGN); +} + +u32 qed_cxt_get_proto_cid_count(struct qed_hwfn *p_hwfn, + enum protocol_type type, + u32 *vf_cid) +{ + if (vf_cid) + *vf_cid = p_hwfn->p_cxt_mngr->conn_cfg[type].cids_per_vf; + + return p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count; } static void qed_ilt_cli_blk_fill(struct qed_ilt_client_cfg *p_cli, @@ -210,10 +246,12 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn) struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; struct qed_ilt_client_cfg *p_cli; struct qed_ilt_cli_blk *p_blk; - u32 curr_line, total, pf_cids; + struct qed_cdu_iids cdu_iids; struct qed_qm_iids qm_iids; + u32 curr_line, total, i; memset(&qm_iids, 0, sizeof(qm_iids)); + memset(&cdu_iids, 0, sizeof(cdu_iids)); p_mngr->pf_start_line = RESC_START(p_hwfn, QED_ILT); @@ -224,14 +262,16 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn) /* CDUC */ p_cli = &p_mngr->clients[ILT_CLI_CDUC]; curr_line = p_mngr->pf_start_line; + + /* CDUC PF */ p_cli->pf_total_lines = 0; /* get the counters for the CDUC and QM clients */ - pf_cids = qed_cxt_cdu_iids(p_mngr); + qed_cxt_cdu_iids(p_mngr, &cdu_iids); p_blk = &p_cli->pf_blks[CDUC_BLK]; - total = pf_cids * CONN_CXT_SIZE(p_hwfn); + total = cdu_iids.pf_cids * CONN_CXT_SIZE(p_hwfn); qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line, total, CONN_CXT_SIZE(p_hwfn)); @@ -239,17 +279,36 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn) qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC); p_cli->pf_total_lines = curr_line - p_blk->start_line; + /* CDUC VF */ + p_blk = &p_cli->vf_blks[CDUC_BLK]; + total = cdu_iids.per_vf_cids * CONN_CXT_SIZE(p_hwfn); + + qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line, + total, CONN_CXT_SIZE(p_hwfn)); + + qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC); + p_cli->vf_total_lines = curr_line - p_blk->start_line; + + for (i = 1; i < p_mngr->vf_count; i++) + qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, + ILT_CLI_CDUC); + /* QM */ p_cli = &p_mngr->clients[ILT_CLI_QM]; p_blk = &p_cli->pf_blks[0]; qed_cxt_qm_iids(p_hwfn, &qm_iids); - total = qed_qm_pf_mem_size(p_hwfn->rel_pf_id, qm_iids.cids, 0, 0, - p_hwfn->qm_info.num_pqs, 0); + total = qed_qm_pf_mem_size(p_hwfn->rel_pf_id, qm_iids.cids, + qm_iids.vf_cids, 0, + p_hwfn->qm_info.num_pqs, + p_hwfn->qm_info.num_vf_pqs); - DP_VERBOSE(p_hwfn, QED_MSG_ILT, - "QM ILT Info, (cids=%d, num_pqs=%d, memory_size=%d)\n", - qm_iids.cids, p_hwfn->qm_info.num_pqs, total); + DP_VERBOSE(p_hwfn, + QED_MSG_ILT, + "QM ILT Info, (cids=%d, vf_cids=%d, num_pqs=%d, num_vf_pqs=%d, memory_size=%d)\n", + qm_iids.cids, + qm_iids.vf_cids, + p_hwfn->qm_info.num_pqs, p_hwfn->qm_info.num_vf_pqs, total); qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line, total * 0x1000, @@ -358,7 +417,7 @@ static int qed_ilt_shadow_alloc(struct qed_hwfn *p_hwfn) struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; struct qed_ilt_client_cfg *clients = p_mngr->clients; struct qed_ilt_cli_blk *p_blk; - u32 size, i, j; + u32 size, i, j, k; int rc; size = qed_cxt_ilt_shadow_size(clients); @@ -383,6 +442,16 @@ static int qed_ilt_shadow_alloc(struct qed_hwfn *p_hwfn) if (rc != 0) goto ilt_shadow_fail; } + for (k = 0; k < p_mngr->vf_count; k++) { + for (j = 0; j < ILT_CLI_VF_BLOCKS; j++) { + u32 lines = clients[i].vf_total_lines * k; + + p_blk = &clients[i].vf_blks[j]; + rc = qed_ilt_blk_alloc(p_hwfn, p_blk, i, lines); + if (rc != 0) + goto ilt_shadow_fail; + } + } } return 0; @@ -467,6 +536,9 @@ int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn) for (i = 0; i < ILT_CLI_MAX; i++) p_mngr->clients[i].p_size.val = ILT_DEFAULT_HW_P_SIZE; + if (p_hwfn->cdev->p_iov_info) + p_mngr->vf_count = p_hwfn->cdev->p_iov_info->total_vfs; + /* Set the cxt mangr pointer priori to further allocations */ p_hwfn->p_cxt_mngr = p_mngr; @@ -579,8 +651,10 @@ void qed_qm_init_pf(struct qed_hwfn *p_hwfn) params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port; params.is_first_pf = p_hwfn->first_on_engine; params.num_pf_cids = iids.cids; + params.num_vf_cids = iids.vf_cids; params.start_pq = qm_info->start_pq; - params.num_pf_pqs = qm_info->num_pqs; + params.num_pf_pqs = qm_info->num_pqs - qm_info->num_vf_pqs; + params.num_vf_pqs = qm_info->num_vf_pqs; params.start_vport = qm_info->start_vport; params.num_vports = qm_info->num_vports; params.pf_wfq = qm_info->pf_wfq; @@ -610,26 +684,55 @@ static int qed_cm_init_pf(struct qed_hwfn *p_hwfn) static void qed_dq_init_pf(struct qed_hwfn *p_hwfn) { struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; - u32 dq_pf_max_cid = 0; + u32 dq_pf_max_cid = 0, dq_vf_max_cid = 0; dq_pf_max_cid += (p_mngr->conn_cfg[0].cid_count >> DQ_RANGE_SHIFT); STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_0_RT_OFFSET, dq_pf_max_cid); + dq_vf_max_cid += (p_mngr->conn_cfg[0].cids_per_vf >> DQ_RANGE_SHIFT); + STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_0_RT_OFFSET, dq_vf_max_cid); + dq_pf_max_cid += (p_mngr->conn_cfg[1].cid_count >> DQ_RANGE_SHIFT); STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_1_RT_OFFSET, dq_pf_max_cid); + dq_vf_max_cid += (p_mngr->conn_cfg[1].cids_per_vf >> DQ_RANGE_SHIFT); + STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_1_RT_OFFSET, dq_vf_max_cid); + dq_pf_max_cid += (p_mngr->conn_cfg[2].cid_count >> DQ_RANGE_SHIFT); STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_2_RT_OFFSET, dq_pf_max_cid); + dq_vf_max_cid += (p_mngr->conn_cfg[2].cids_per_vf >> DQ_RANGE_SHIFT); + STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_2_RT_OFFSET, dq_vf_max_cid); + dq_pf_max_cid += (p_mngr->conn_cfg[3].cid_count >> DQ_RANGE_SHIFT); STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_3_RT_OFFSET, dq_pf_max_cid); + dq_vf_max_cid += (p_mngr->conn_cfg[3].cids_per_vf >> DQ_RANGE_SHIFT); + STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_3_RT_OFFSET, dq_vf_max_cid); + dq_pf_max_cid += (p_mngr->conn_cfg[4].cid_count >> DQ_RANGE_SHIFT); STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_4_RT_OFFSET, dq_pf_max_cid); - /* 5 - PF */ + dq_vf_max_cid += (p_mngr->conn_cfg[4].cids_per_vf >> DQ_RANGE_SHIFT); + STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_4_RT_OFFSET, dq_vf_max_cid); + dq_pf_max_cid += (p_mngr->conn_cfg[5].cid_count >> DQ_RANGE_SHIFT); STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_5_RT_OFFSET, dq_pf_max_cid); + + dq_vf_max_cid += (p_mngr->conn_cfg[5].cids_per_vf >> DQ_RANGE_SHIFT); + STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_5_RT_OFFSET, dq_vf_max_cid); + + /* Connection types 6 & 7 are not in use, yet they must be configured + * as the highest possible connection. Not configuring them means the + * defaults will be used, and with a large number of cids a bug may + * occur, if the defaults will be smaller than dq_pf_max_cid / + * dq_vf_max_cid. + */ + STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_6_RT_OFFSET, dq_pf_max_cid); + STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_6_RT_OFFSET, dq_vf_max_cid); + + STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_7_RT_OFFSET, dq_pf_max_cid); + STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_7_RT_OFFSET, dq_vf_max_cid); } static void qed_ilt_bounds_init(struct qed_hwfn *p_hwfn) @@ -653,6 +756,38 @@ static void qed_ilt_bounds_init(struct qed_hwfn *p_hwfn) } } +static void qed_ilt_vf_bounds_init(struct qed_hwfn *p_hwfn) +{ + struct qed_ilt_client_cfg *p_cli; + u32 blk_factor; + + /* For simplicty we set the 'block' to be an ILT page */ + if (p_hwfn->cdev->p_iov_info) { + struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info; + + STORE_RT_REG(p_hwfn, + PSWRQ2_REG_VF_BASE_RT_OFFSET, + p_iov->first_vf_in_pf); + STORE_RT_REG(p_hwfn, + PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET, + p_iov->first_vf_in_pf + p_iov->total_vfs); + } + + p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC]; + blk_factor = ilog2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10); + if (p_cli->active) { + STORE_RT_REG(p_hwfn, + PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET, + blk_factor); + STORE_RT_REG(p_hwfn, + PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET, + p_cli->pf_total_lines); + STORE_RT_REG(p_hwfn, + PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET, + p_cli->vf_total_lines); + } +} + /* ILT (PSWRQ2) PF */ static void qed_ilt_init_pf(struct qed_hwfn *p_hwfn) { @@ -662,6 +797,7 @@ static void qed_ilt_init_pf(struct qed_hwfn *p_hwfn) u32 line, rt_offst, i; qed_ilt_bounds_init(p_hwfn); + qed_ilt_vf_bounds_init(p_hwfn); p_mngr = p_hwfn->p_cxt_mngr; p_shdw = p_mngr->ilt_shadow; @@ -839,10 +975,10 @@ int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn) /* Set the number of required CORE connections */ u32 core_cids = 1; /* SPQ */ - qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_CORE, core_cids); + qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_CORE, core_cids, 0); qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_ETH, - p_params->num_cons); + p_params->num_cons, 1); return 0; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.h b/drivers/net/ethernet/qlogic/qed/qed_cxt.h index c8e1f5e5c42b..078ff3fd7920 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.h +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.h @@ -51,6 +51,9 @@ enum qed_cxt_elem_type { QED_ELEM_TASK }; +u32 qed_cxt_get_proto_cid_count(struct qed_hwfn *p_hwfn, + enum protocol_type type, u32 *vf_cid); + /** * @brief qed_cxt_set_pf_params - Set the PF params for cxt init * diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 7a359c45360f..362e8db2b374 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -41,10 +41,14 @@ enum BAR_ID { static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn, enum BAR_ID bar_id) { - u32 bar_reg = (bar_id == BAR_ID_0 ? - PGLUE_B_REG_PF_BAR0_SIZE : PGLUE_B_REG_PF_BAR1_SIZE); - u32 val = qed_rd(p_hwfn, p_hwfn->p_main_ptt, bar_reg); + u32 bar_reg = (bar_id == BAR_ID_0 ? + PGLUE_B_REG_PF_BAR0_SIZE : PGLUE_B_REG_PF_BAR1_SIZE); + u32 val; + if (IS_VF(p_hwfn->cdev)) + return 1 << 17; + + val = qed_rd(p_hwfn, p_hwfn->p_main_ptt, bar_reg); if (val) return 1 << (val + 15); @@ -114,6 +118,9 @@ void qed_resc_free(struct qed_dev *cdev) { int i; + if (IS_VF(cdev)) + return; + kfree(cdev->fw_data); cdev->fw_data = NULL; @@ -144,14 +151,19 @@ void qed_resc_free(struct qed_dev *cdev) static int qed_init_qm_info(struct qed_hwfn *p_hwfn) { + u8 num_vports, vf_offset = 0, i, vport_id, num_ports, curr_queue = 0; struct qed_qm_info *qm_info = &p_hwfn->qm_info; struct init_qm_port_params *p_qm_port; - u8 num_vports, i, vport_id, num_ports; u16 num_pqs, multi_cos_tcs = 1; + u16 num_vfs = 0; +#ifdef CONFIG_QED_SRIOV + if (p_hwfn->cdev->p_iov_info) + num_vfs = p_hwfn->cdev->p_iov_info->total_vfs; +#endif memset(qm_info, 0, sizeof(*qm_info)); - num_pqs = multi_cos_tcs + 1; /* The '1' is for pure-LB */ + num_pqs = multi_cos_tcs + num_vfs + 1; /* The '1' is for pure-LB */ num_vports = (u8)RESC_NUM(p_hwfn, QED_VPORT); /* Sanity checking that setup requires legal number of resources */ @@ -187,8 +199,9 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn) vport_id = (u8)RESC_START(p_hwfn, QED_VPORT); /* First init per-TC PQs */ - for (i = 0; i < multi_cos_tcs; i++) { - struct init_qm_pq_params *params = &qm_info->qm_pq_params[i]; + for (i = 0; i < multi_cos_tcs; i++, curr_queue++) { + struct init_qm_pq_params *params = + &qm_info->qm_pq_params[curr_queue]; params->vport_id = vport_id; params->tc_id = p_hwfn->hw_info.non_offload_tc; @@ -196,13 +209,26 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn) } /* Then init pure-LB PQ */ - qm_info->pure_lb_pq = i; - qm_info->qm_pq_params[i].vport_id = (u8)RESC_START(p_hwfn, QED_VPORT); - qm_info->qm_pq_params[i].tc_id = PURE_LB_TC; - qm_info->qm_pq_params[i].wrr_group = 1; - i++; + qm_info->pure_lb_pq = curr_queue; + qm_info->qm_pq_params[curr_queue].vport_id = + (u8) RESC_START(p_hwfn, QED_VPORT); + qm_info->qm_pq_params[curr_queue].tc_id = PURE_LB_TC; + qm_info->qm_pq_params[curr_queue].wrr_group = 1; + curr_queue++; qm_info->offload_pq = 0; + /* Then init per-VF PQs */ + vf_offset = curr_queue; + for (i = 0; i < num_vfs; i++) { + /* First vport is used by the PF */ + qm_info->qm_pq_params[curr_queue].vport_id = vport_id + i + 1; + qm_info->qm_pq_params[curr_queue].tc_id = + p_hwfn->hw_info.non_offload_tc; + qm_info->qm_pq_params[curr_queue].wrr_group = 1; + curr_queue++; + } + + qm_info->vf_queues_offset = vf_offset; qm_info->num_pqs = num_pqs; qm_info->num_vports = num_vports; @@ -220,7 +246,8 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn) qm_info->start_pq = (u16)RESC_START(p_hwfn, QED_PQ); - qm_info->start_vport = (u8)RESC_START(p_hwfn, QED_VPORT); + qm_info->num_vf_pqs = num_vfs; + qm_info->start_vport = (u8) RESC_START(p_hwfn, QED_VPORT); for (i = 0; i < qm_info->num_vports; i++) qm_info->qm_vport_params[i].vport_wfq = 1; @@ -244,6 +271,9 @@ int qed_resc_alloc(struct qed_dev *cdev) struct qed_eq *p_eq; int i, rc = 0; + if (IS_VF(cdev)) + return rc; + cdev->fw_data = kzalloc(sizeof(*cdev->fw_data), GFP_KERNEL); if (!cdev->fw_data) return -ENOMEM; @@ -364,6 +394,9 @@ void qed_resc_setup(struct qed_dev *cdev) { int i; + if (IS_VF(cdev)) + return; + for_each_hwfn(cdev, i) { struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; @@ -508,7 +541,9 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn, struct qed_qm_info *qm_info = &p_hwfn->qm_info; struct qed_qm_common_rt_init_params params; struct qed_dev *cdev = p_hwfn->cdev; + u32 concrete_fid; int rc = 0; + u8 vf_id; qed_init_cau_rt_data(cdev); @@ -558,6 +593,14 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn, qed_wr(p_hwfn, p_ptt, 0x20b4, qed_rd(p_hwfn, p_ptt, 0x20b4) & ~0x10); + for (vf_id = 0; vf_id < MAX_NUM_VFS_BB; vf_id++) { + concrete_fid = qed_vfid_to_concrete(p_hwfn, vf_id); + qed_fid_pretend(p_hwfn, p_ptt, (u16) concrete_fid); + qed_wr(p_hwfn, p_ptt, CCFC_REG_STRONG_ENABLE_VF, 0x1); + } + /* pretend to original PF */ + qed_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id); + return rc; } @@ -698,13 +741,20 @@ int qed_hw_init(struct qed_dev *cdev, u32 load_code, param; int rc, mfw_rc, i; - rc = qed_init_fw_data(cdev, bin_fw_data); - if (rc != 0) - return rc; + if (IS_PF(cdev)) { + rc = qed_init_fw_data(cdev, bin_fw_data); + if (rc != 0) + return rc; + } for_each_hwfn(cdev, i) { struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + if (IS_VF(cdev)) { + p_hwfn->b_int_enabled = 1; + continue; + } + /* Enable DMAE in PXP */ rc = qed_change_pci_hwfn(p_hwfn, p_hwfn->p_main_ptt, true); @@ -829,6 +879,11 @@ int qed_hw_stop(struct qed_dev *cdev) DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Stopping hw/fw\n"); + if (IS_VF(cdev)) { + /* To be implemented in a later patch */ + continue; + } + /* mark the hw as uninitialized... */ p_hwfn->hw_init_done = false; @@ -860,15 +915,16 @@ int qed_hw_stop(struct qed_dev *cdev) usleep_range(1000, 2000); } - /* Disable DMAE in PXP - in CMT, this should only be done for - * first hw-function, and only after all transactions have - * stopped for all active hw-functions. - */ - t_rc = qed_change_pci_hwfn(&cdev->hwfns[0], - cdev->hwfns[0].p_main_ptt, - false); - if (t_rc != 0) - rc = t_rc; + if (IS_PF(cdev)) { + /* Disable DMAE in PXP - in CMT, this should only be done for + * first hw-function, and only after all transactions have + * stopped for all active hw-functions. + */ + t_rc = qed_change_pci_hwfn(&cdev->hwfns[0], + cdev->hwfns[0].p_main_ptt, false); + if (t_rc != 0) + rc = t_rc; + } return rc; } @@ -932,6 +988,11 @@ int qed_hw_reset(struct qed_dev *cdev) for_each_hwfn(cdev, i) { struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + if (IS_VF(cdev)) { + /* Will be implemented in a later patch */ + continue; + } + DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Resetting hw/fw\n"); /* Check for incorrect states */ @@ -1027,11 +1088,10 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn) static void qed_hw_get_resc(struct qed_hwfn *p_hwfn) { u32 *resc_start = p_hwfn->hw_info.resc_start; + u8 num_funcs = p_hwfn->num_funcs_on_engine; u32 *resc_num = p_hwfn->hw_info.resc_num; struct qed_sb_cnt_info sb_cnt_info; - int num_funcs, i; - - num_funcs = MAX_NUM_PFS_BB; + int i; memset(&sb_cnt_info, 0, sizeof(sb_cnt_info)); qed_int_get_num_sbs(p_hwfn, &sb_cnt_info); @@ -1238,6 +1298,51 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, return qed_mcp_fill_shmem_func_info(p_hwfn, p_ptt); } +static void qed_get_num_funcs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + u32 reg_function_hide, tmp, eng_mask; + u8 num_funcs; + + num_funcs = MAX_NUM_PFS_BB; + + /* Bit 0 of MISCS_REG_FUNCTION_HIDE indicates whether the bypass values + * in the other bits are selected. + * Bits 1-15 are for functions 1-15, respectively, and their value is + * '0' only for enabled functions (function 0 always exists and + * enabled). + * In case of CMT, only the "even" functions are enabled, and thus the + * number of functions for both hwfns is learnt from the same bits. + */ + reg_function_hide = qed_rd(p_hwfn, p_ptt, MISCS_REG_FUNCTION_HIDE); + + if (reg_function_hide & 0x1) { + if (QED_PATH_ID(p_hwfn) && p_hwfn->cdev->num_hwfns == 1) { + num_funcs = 0; + eng_mask = 0xaaaa; + } else { + num_funcs = 1; + eng_mask = 0x5554; + } + + /* Get the number of the enabled functions on the engine */ + tmp = (reg_function_hide ^ 0xffffffff) & eng_mask; + while (tmp) { + if (tmp & 0x1) + num_funcs++; + tmp >>= 0x1; + } + } + + p_hwfn->num_funcs_on_engine = num_funcs; + + DP_VERBOSE(p_hwfn, + NETIF_MSG_PROBE, + "PF [rel_id %d, abs_id %d] within the %d enabled functions on the engine\n", + p_hwfn->rel_pf_id, + p_hwfn->abs_pf_id, + p_hwfn->num_funcs_on_engine); +} + static int qed_get_hw_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -1296,6 +1401,8 @@ qed_get_hw_info(struct qed_hwfn *p_hwfn, p_hwfn->hw_info.personality = protocol; } + qed_get_num_funcs(p_hwfn, p_ptt); + qed_hw_get_resc(p_hwfn); return rc; @@ -1361,6 +1468,9 @@ static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn, p_hwfn->regview = p_regview; p_hwfn->doorbells = p_doorbells; + if (IS_VF(p_hwfn->cdev)) + return qed_vf_hw_prepare(p_hwfn); + /* Validate that chip access is feasible */ if (REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR) == 0xffffffff) { DP_ERR(p_hwfn, @@ -1428,7 +1538,8 @@ int qed_hw_prepare(struct qed_dev *cdev, int rc; /* Store the precompiled init data ptrs */ - qed_init_iro_array(cdev); + if (IS_PF(cdev)) + qed_init_iro_array(cdev); /* Initialize the first hwfn - will learn number of hwfns */ rc = qed_hw_prepare_single(p_hwfn, @@ -1460,9 +1571,11 @@ int qed_hw_prepare(struct qed_dev *cdev, * initiliazed hwfn 0. */ if (rc) { - qed_init_free(p_hwfn); - qed_mcp_free(p_hwfn); - qed_hw_hwfn_free(p_hwfn); + if (IS_PF(cdev)) { + qed_init_free(p_hwfn); + qed_mcp_free(p_hwfn); + qed_hw_hwfn_free(p_hwfn); + } } } @@ -1476,6 +1589,11 @@ void qed_hw_remove(struct qed_dev *cdev) for_each_hwfn(cdev, i) { struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + if (IS_VF(cdev)) { + /* Will be implemented in a later patch */ + continue; + } + qed_init_free(p_hwfn); qed_hw_hwfn_free(p_hwfn); qed_mcp_free(p_hwfn); diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index 6ba197c107ed..c511106870d0 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -29,7 +29,7 @@ struct qed_ptt; enum common_event_opcode { COMMON_EVENT_PF_START, COMMON_EVENT_PF_STOP, - COMMON_EVENT_RESERVED, + COMMON_EVENT_VF_START, COMMON_EVENT_RESERVED2, COMMON_EVENT_VF_PF_CHANNEL, COMMON_EVENT_RESERVED4, @@ -44,7 +44,7 @@ enum common_ramrod_cmd_id { COMMON_RAMROD_UNUSED, COMMON_RAMROD_PF_START /* PF Function Start Ramrod */, COMMON_RAMROD_PF_STOP /* PF Function Stop Ramrod */, - COMMON_RAMROD_RESERVED, + COMMON_RAMROD_VF_START, COMMON_RAMROD_RESERVED2, COMMON_RAMROD_PF_UPDATE, COMMON_RAMROD_EMPTY, @@ -573,6 +573,14 @@ union event_ring_element { struct event_ring_next_addr next_addr; }; +struct mstorm_non_trigger_vf_zone { + struct eth_mstorm_per_queue_stat eth_queue_stat; +}; + +struct mstorm_vf_zone { + struct mstorm_non_trigger_vf_zone non_trigger; +}; + enum personality_type { BAD_PERSONALITY_TYP, PERSONALITY_RESERVED, @@ -671,6 +679,16 @@ enum ports_mode { MAX_PORTS_MODE }; +struct pstorm_non_trigger_vf_zone { + struct eth_pstorm_per_queue_stat eth_queue_stat; + struct regpair reserved[2]; +}; + +struct pstorm_vf_zone { + struct pstorm_non_trigger_vf_zone non_trigger; + struct regpair reserved[7]; +}; + /* Ramrod Header of SPQE */ struct ramrod_header { __le32 cid /* Slowpath Connection CID */; @@ -700,6 +718,29 @@ struct tstorm_per_port_stat { struct regpair preroce_irregular_pkt; }; +struct ustorm_non_trigger_vf_zone { + struct eth_ustorm_per_queue_stat eth_queue_stat; + struct regpair vf_pf_msg_addr; +}; + +struct ustorm_trigger_vf_zone { + u8 vf_pf_msg_valid; + u8 reserved[7]; +}; + +struct ustorm_vf_zone { + struct ustorm_non_trigger_vf_zone non_trigger; + struct ustorm_trigger_vf_zone trigger; +}; + +struct vf_start_ramrod_data { + u8 vf_id; + u8 enable_flr_ack; + __le16 opaque_fid; + u8 personality; + u8 reserved[3]; +}; + struct atten_status_block { __le32 atten_bits; __le32 atten_ack; @@ -1026,7 +1067,7 @@ enum init_phases { PHASE_ENGINE, PHASE_PORT, PHASE_PF, - PHASE_RESERVED, + PHASE_VF, PHASE_QM_PF, MAX_INIT_PHASES }; diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.c b/drivers/net/ethernet/qlogic/qed/qed_hw.c index a9be5a422d2d..0ada7fdb91bc 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hw.c +++ b/drivers/net/ethernet/qlogic/qed/qed_hw.c @@ -23,6 +23,7 @@ #include "qed_hsi.h" #include "qed_hw.h" #include "qed_reg_addr.h" +#include "qed_sriov.h" #define QED_BAR_ACQUIRE_TIMEOUT 1000 @@ -236,8 +237,12 @@ static void qed_memcpy_hw(struct qed_hwfn *p_hwfn, quota = min_t(size_t, n - done, PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE); - qed_ptt_set_win(p_hwfn, p_ptt, hw_addr + done); - hw_offset = qed_ptt_get_bar_addr(p_ptt); + if (IS_PF(p_hwfn->cdev)) { + qed_ptt_set_win(p_hwfn, p_ptt, hw_addr + done); + hw_offset = qed_ptt_get_bar_addr(p_ptt); + } else { + hw_offset = hw_addr + done; + } dw_count = quota / 4; host_addr = (u32 *)((u8 *)addr + done); @@ -808,6 +813,9 @@ u16 qed_get_qm_pq(struct qed_hwfn *p_hwfn, break; case PROTOCOLID_ETH: pq_id = p_params->eth.tc; + if (p_params->eth.is_vf) + pq_id += p_hwfn->qm_info.vf_queues_offset + + p_params->eth.vf_id; break; default: pq_id = 0; diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c index 3269b3610e03..d358c3bb1308 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c +++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c @@ -18,6 +18,7 @@ #include "qed_hw.h" #include "qed_init_ops.h" #include "qed_reg_addr.h" +#include "qed_sriov.h" #define QED_INIT_MAX_POLL_COUNT 100 #define QED_INIT_POLL_PERIOD_US 500 @@ -128,6 +129,9 @@ int qed_init_alloc(struct qed_hwfn *p_hwfn) { struct qed_rt_data *rt_data = &p_hwfn->rt_data; + if (IS_VF(p_hwfn->cdev)) + return 0; + rt_data->b_valid = kzalloc(sizeof(bool) * RUNTIME_ARRAY_SIZE, GFP_KERNEL); if (!rt_data->b_valid) diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c index 2017b0121f5f..bbecfa579364 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.c +++ b/drivers/net/ethernet/qlogic/qed/qed_int.c @@ -26,6 +26,8 @@ #include "qed_mcp.h" #include "qed_reg_addr.h" #include "qed_sp.h" +#include "qed_sriov.h" +#include "qed_vf.h" struct qed_pi_info { qed_int_comp_cb_t comp_cb; @@ -2513,6 +2515,9 @@ void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn, u32 sb_offset; u32 pi_offset; + if (IS_VF(p_hwfn->cdev)) + return; + sb_offset = igu_sb_id * PIS_PER_SB; memset(&pi_entry, 0, sizeof(struct cau_pi_entry)); @@ -2542,8 +2547,9 @@ void qed_int_sb_setup(struct qed_hwfn *p_hwfn, sb_info->sb_ack = 0; memset(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt)); - qed_int_cau_conf_sb(p_hwfn, p_ptt, sb_info->sb_phys, - sb_info->igu_sb_id, 0, 0); + if (IS_PF(p_hwfn->cdev)) + qed_int_cau_conf_sb(p_hwfn, p_ptt, sb_info->sb_phys, + sb_info->igu_sb_id, 0, 0); } /** @@ -2563,8 +2569,10 @@ static u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn, /* Assuming continuous set of IGU SBs dedicated for given PF */ if (sb_id == QED_SP_SB_ID) igu_sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id; - else + else if (IS_PF(p_hwfn->cdev)) igu_sb_id = sb_id + p_hwfn->hw_info.p_igu_info->igu_base_sb; + else + igu_sb_id = qed_vf_get_igu_sb_id(p_hwfn, sb_id); DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "SB [%s] index is 0x%04x\n", (sb_id == QED_SP_SB_ID) ? "DSB" : "non-DSB", igu_sb_id); @@ -2594,9 +2602,16 @@ int qed_int_sb_init(struct qed_hwfn *p_hwfn, /* The igu address will hold the absolute address that needs to be * written to for a specific status block */ - sb_info->igu_addr = (u8 __iomem *)p_hwfn->regview + - GTT_BAR0_MAP_REG_IGU_CMD + - (sb_info->igu_sb_id << 3); + if (IS_PF(p_hwfn->cdev)) { + sb_info->igu_addr = (u8 __iomem *)p_hwfn->regview + + GTT_BAR0_MAP_REG_IGU_CMD + + (sb_info->igu_sb_id << 3); + } else { + sb_info->igu_addr = (u8 __iomem *)p_hwfn->regview + + PXP_VF_BAR0_START_IGU + + ((IGU_CMD_INT_ACK_BASE + + sb_info->igu_sb_id) << 3); + } sb_info->flags |= QED_SB_INFO_INIT; @@ -2783,6 +2798,9 @@ void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn, { p_hwfn->b_int_enabled = 0; + if (IS_VF(p_hwfn->cdev)) + return; + qed_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, 0); } @@ -2935,9 +2953,9 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { struct qed_igu_info *p_igu_info; + u32 val, min_vf = 0, max_vf = 0; + u16 sb_id, last_iov_sb_id = 0; struct qed_igu_block *blk; - u32 val; - u16 sb_id; u16 prev_sb_id = 0xFF; p_hwfn->hw_info.p_igu_info = kzalloc(sizeof(*p_igu_info), GFP_KERNEL); @@ -2947,12 +2965,19 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, p_igu_info = p_hwfn->hw_info.p_igu_info; - /* Initialize base sb / sb cnt for PFs */ + /* Initialize base sb / sb cnt for PFs and VFs */ p_igu_info->igu_base_sb = 0xffff; p_igu_info->igu_sb_cnt = 0; p_igu_info->igu_dsb_id = 0xffff; p_igu_info->igu_base_sb_iov = 0xffff; + if (p_hwfn->cdev->p_iov_info) { + struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info; + + min_vf = p_iov->first_vf_in_pf; + max_vf = p_iov->first_vf_in_pf + p_iov->total_vfs; + } + for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev); sb_id++) { blk = &p_igu_info->igu_map.igu_blocks[sb_id]; @@ -2986,14 +3011,43 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, (p_igu_info->igu_sb_cnt)++; } } + } else { + if ((blk->function_id >= min_vf) && + (blk->function_id < max_vf)) { + /* Available for VFs of this PF */ + if (p_igu_info->igu_base_sb_iov == 0xffff) { + p_igu_info->igu_base_sb_iov = sb_id; + } else if (last_iov_sb_id != sb_id - 1) { + if (!val) { + DP_VERBOSE(p_hwfn->cdev, + NETIF_MSG_INTR, + "First uninitialized IGU CAM entry at index 0x%04x\n", + sb_id); + } else { + DP_NOTICE(p_hwfn->cdev, + "Consecutive igu vectors for HWFN %x vfs is broken [jumps from %04x to %04x]\n", + p_hwfn->rel_pf_id, + last_iov_sb_id, + sb_id); } + break; + } + blk->status |= QED_IGU_STATUS_FREE; + p_hwfn->hw_info.p_igu_info->free_blks++; + last_iov_sb_id = sb_id; + } } } + p_igu_info->igu_sb_cnt_iov = p_igu_info->free_blks; - DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, - "IGU igu_base_sb=0x%x igu_sb_cnt=%d igu_dsb_id=0x%x\n", - p_igu_info->igu_base_sb, - p_igu_info->igu_sb_cnt, - p_igu_info->igu_dsb_id); + DP_VERBOSE( + p_hwfn, + NETIF_MSG_INTR, + "IGU igu_base_sb=0x%x [IOV 0x%x] igu_sb_cnt=%d [IOV 0x%x] igu_dsb_id=0x%x\n", + p_igu_info->igu_base_sb, + p_igu_info->igu_base_sb_iov, + p_igu_info->igu_sb_cnt, + p_igu_info->igu_sb_cnt_iov, + p_igu_info->igu_dsb_id); if (p_igu_info->igu_base_sb == 0xffff || p_igu_info->igu_dsb_id == 0xffff || @@ -3116,6 +3170,23 @@ void qed_int_get_num_sbs(struct qed_hwfn *p_hwfn, p_sb_cnt_info->sb_free_blk = info->free_blks; } +u16 qed_int_queue_id_from_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id) +{ + struct qed_igu_info *p_info = p_hwfn->hw_info.p_igu_info; + + /* Determine origin of SB id */ + if ((sb_id >= p_info->igu_base_sb) && + (sb_id < p_info->igu_base_sb + p_info->igu_sb_cnt)) { + return sb_id - p_info->igu_base_sb; + } else if ((sb_id >= p_info->igu_base_sb_iov) && + (sb_id < p_info->igu_base_sb_iov + p_info->igu_sb_cnt_iov)) { + return sb_id - p_info->igu_base_sb_iov + p_info->igu_sb_cnt; + } else { + DP_NOTICE(p_hwfn, "SB %d not in range for function\n", sb_id); + return 0; + } +} + void qed_int_disable_post_isr_release(struct qed_dev *cdev) { int i; diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h index c57f2e680770..295df4451e31 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.h +++ b/drivers/net/ethernet/qlogic/qed/qed_int.h @@ -20,6 +20,12 @@ #define IGU_PF_CONF_ATTN_BIT_EN (0x1 << 3) /* attention enable */ #define IGU_PF_CONF_SINGLE_ISR_EN (0x1 << 4) /* single ISR mode enable */ #define IGU_PF_CONF_SIMD_MODE (0x1 << 5) /* simd all ones mode */ +/* Fields of IGU VF CONFIGRATION REGISTER */ +#define IGU_VF_CONF_FUNC_EN (0x1 << 0) /* function enable */ +#define IGU_VF_CONF_MSI_MSIX_EN (0x1 << 1) /* MSI/MSIX enable */ +#define IGU_VF_CONF_SINGLE_ISR_EN (0x1 << 4) /* single ISR mode enable */ +#define IGU_VF_CONF_PARENT_MASK (0xF) /* Parent PF */ +#define IGU_VF_CONF_PARENT_SHIFT 5 /* Parent PF */ /* Igu control commands */ @@ -364,6 +370,16 @@ void qed_int_free(struct qed_hwfn *p_hwfn); void qed_int_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); +/** + * @brief - Returns an Rx queue index appropriate for usage with given SB. + * + * @param p_hwfn + * @param sb_id - absolute index of SB + * + * @return index of Rx queue + */ +u16 qed_int_queue_id_from_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id); + /** * @brief - Enable Interrupt & Attention for hw function * diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index 31e1d510a991..8bcbf92b776f 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -34,6 +34,7 @@ #include "qed_mcp.h" #include "qed_reg_addr.h" #include "qed_sp.h" +#include "qed_sriov.h" struct qed_rss_params { u8 update_rss_config; @@ -1580,32 +1581,53 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev, info->num_tc = 1; - if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { - for_each_hwfn(cdev, i) - info->num_queues += FEAT_NUM(&cdev->hwfns[i], - QED_PF_L2_QUE); - if (cdev->int_params.fp_msix_cnt) - info->num_queues = min_t(u8, info->num_queues, - cdev->int_params.fp_msix_cnt); + if (IS_PF(cdev)) { + if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { + for_each_hwfn(cdev, i) + info->num_queues += + FEAT_NUM(&cdev->hwfns[i], QED_PF_L2_QUE); + if (cdev->int_params.fp_msix_cnt) + info->num_queues = + min_t(u8, info->num_queues, + cdev->int_params.fp_msix_cnt); + } else { + info->num_queues = cdev->num_hwfns; + } + + info->num_vlan_filters = RESC_NUM(&cdev->hwfns[0], QED_VLAN); + ether_addr_copy(info->port_mac, + cdev->hwfns[0].hw_info.hw_mac_addr); } else { - info->num_queues = cdev->num_hwfns; + qed_vf_get_num_rxqs(QED_LEADING_HWFN(cdev), &info->num_queues); + if (cdev->num_hwfns > 1) { + u8 queues = 0; + + qed_vf_get_num_rxqs(&cdev->hwfns[1], &queues); + info->num_queues += queues; + } + + qed_vf_get_num_vlan_filters(&cdev->hwfns[0], + &info->num_vlan_filters); + qed_vf_get_port_mac(&cdev->hwfns[0], info->port_mac); } - info->num_vlan_filters = RESC_NUM(&cdev->hwfns[0], QED_VLAN); - ether_addr_copy(info->port_mac, - cdev->hwfns[0].hw_info.hw_mac_addr); - qed_fill_dev_info(cdev, &info->common); + if (IS_VF(cdev)) + memset(info->common.hw_mac, 0, ETH_ALEN); + return 0; } static void qed_register_eth_ops(struct qed_dev *cdev, - struct qed_eth_cb_ops *ops, - void *cookie) + struct qed_eth_cb_ops *ops, void *cookie) { - cdev->protocol_ops.eth = ops; - cdev->ops_cookie = cookie; + cdev->protocol_ops.eth = ops; + cdev->ops_cookie = cookie; + + /* For VF, we start bulletin reading */ + if (IS_VF(cdev)) + qed_vf_start_iov_wq(cdev); } static int qed_start_vport(struct qed_dev *cdev, @@ -1890,6 +1912,9 @@ static int qed_tunn_configure(struct qed_dev *cdev, struct qed_tunn_update_params tunn_info; int i, rc; + if (IS_VF(cdev)) + return 0; + memset(&tunn_info, 0, sizeof(tunn_info)); if (tunn_params->update_vxlan_port == 1) { tunn_info.update_vxlan_udp_port = 1; diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index c209ed49deae..898347bd2db7 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -126,7 +126,7 @@ static int qed_init_pci(struct qed_dev *cdev, goto err1; } - if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { + if (IS_PF(cdev) && !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { DP_NOTICE(cdev, "No memory region found in bar #2\n"); rc = -EIO; goto err1; @@ -176,12 +176,14 @@ static int qed_init_pci(struct qed_dev *cdev, goto err2; } - cdev->db_phys_addr = pci_resource_start(cdev->pdev, 2); - cdev->db_size = pci_resource_len(cdev->pdev, 2); - cdev->doorbells = ioremap_wc(cdev->db_phys_addr, cdev->db_size); - if (!cdev->doorbells) { - DP_NOTICE(cdev, "Cannot map doorbell space\n"); - return -ENOMEM; + if (IS_PF(cdev)) { + cdev->db_phys_addr = pci_resource_start(cdev->pdev, 2); + cdev->db_size = pci_resource_len(cdev->pdev, 2); + cdev->doorbells = ioremap_wc(cdev->db_phys_addr, cdev->db_size); + if (!cdev->doorbells) { + DP_NOTICE(cdev, "Cannot map doorbell space\n"); + return -ENOMEM; + } } return 0; @@ -208,20 +210,32 @@ int qed_fill_dev_info(struct qed_dev *cdev, dev_info->is_mf_default = IS_MF_DEFAULT(&cdev->hwfns[0]); ether_addr_copy(dev_info->hw_mac, cdev->hwfns[0].hw_info.hw_mac_addr); - dev_info->fw_major = FW_MAJOR_VERSION; - dev_info->fw_minor = FW_MINOR_VERSION; - dev_info->fw_rev = FW_REVISION_VERSION; - dev_info->fw_eng = FW_ENGINEERING_VERSION; - dev_info->mf_mode = cdev->mf_mode; + if (IS_PF(cdev)) { + dev_info->fw_major = FW_MAJOR_VERSION; + dev_info->fw_minor = FW_MINOR_VERSION; + dev_info->fw_rev = FW_REVISION_VERSION; + dev_info->fw_eng = FW_ENGINEERING_VERSION; + dev_info->mf_mode = cdev->mf_mode; + } else { + qed_vf_get_fw_version(&cdev->hwfns[0], &dev_info->fw_major, + &dev_info->fw_minor, &dev_info->fw_rev, + &dev_info->fw_eng); + } - qed_mcp_get_mfw_ver(cdev, &dev_info->mfw_rev); + if (IS_PF(cdev)) { + ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev)); + if (ptt) { + qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), ptt, + &dev_info->mfw_rev, NULL); - ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev)); - if (ptt) { - qed_mcp_get_flash_size(QED_LEADING_HWFN(cdev), ptt, - &dev_info->flash_size); + qed_mcp_get_flash_size(QED_LEADING_HWFN(cdev), ptt, + &dev_info->flash_size); - qed_ptt_release(QED_LEADING_HWFN(cdev), ptt); + qed_ptt_release(QED_LEADING_HWFN(cdev), ptt); + } + } else { + qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), NULL, + &dev_info->mfw_rev, NULL); } return 0; @@ -258,9 +272,7 @@ static int qed_set_power_state(struct qed_dev *cdev, /* probing */ static struct qed_dev *qed_probe(struct pci_dev *pdev, - enum qed_protocol protocol, - u32 dp_module, - u8 dp_level) + struct qed_probe_params *params) { struct qed_dev *cdev; int rc; @@ -269,9 +281,12 @@ static struct qed_dev *qed_probe(struct pci_dev *pdev, if (!cdev) goto err0; - cdev->protocol = protocol; + cdev->protocol = params->protocol; - qed_init_dp(cdev, dp_module, dp_level); + if (params->is_vf) + cdev->b_is_vf = true; + + qed_init_dp(cdev, params->dp_module, params->dp_level); rc = qed_init_pci(cdev, pdev); if (rc) { @@ -665,6 +680,35 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev, return 0; } +static int qed_slowpath_vf_setup_int(struct qed_dev *cdev) +{ + int rc; + + memset(&cdev->int_params, 0, sizeof(struct qed_int_params)); + cdev->int_params.in.int_mode = QED_INT_MODE_MSIX; + + qed_vf_get_num_rxqs(QED_LEADING_HWFN(cdev), + &cdev->int_params.in.num_vectors); + if (cdev->num_hwfns > 1) { + u8 vectors = 0; + + qed_vf_get_num_rxqs(&cdev->hwfns[1], &vectors); + cdev->int_params.in.num_vectors += vectors; + } + + /* We want a minimum of one fastpath vector per vf hwfn */ + cdev->int_params.in.min_msix_cnt = cdev->num_hwfns; + + rc = qed_set_int_mode(cdev, true); + if (rc) + return rc; + + cdev->int_params.fp_msix_base = 0; + cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors; + + return 0; +} + u32 qed_unzip_data(struct qed_hwfn *p_hwfn, u32 input_len, u8 *input_buf, u32 max_size, u8 *unzip_buf) { @@ -755,32 +799,38 @@ static int qed_slowpath_start(struct qed_dev *cdev, if (qed_iov_wq_start(cdev)) goto err; - rc = request_firmware(&cdev->firmware, QED_FW_FILE_NAME, - &cdev->pdev->dev); - if (rc) { - DP_NOTICE(cdev, - "Failed to find fw file - /lib/firmware/%s\n", - QED_FW_FILE_NAME); - goto err; + if (IS_PF(cdev)) { + rc = request_firmware(&cdev->firmware, QED_FW_FILE_NAME, + &cdev->pdev->dev); + if (rc) { + DP_NOTICE(cdev, + "Failed to find fw file - /lib/firmware/%s\n", + QED_FW_FILE_NAME); + goto err; + } } rc = qed_nic_setup(cdev); if (rc) goto err; - rc = qed_slowpath_setup_int(cdev, params->int_mode); + if (IS_PF(cdev)) + rc = qed_slowpath_setup_int(cdev, params->int_mode); + else + rc = qed_slowpath_vf_setup_int(cdev); if (rc) goto err1; - /* Allocate stream for unzipping */ - rc = qed_alloc_stream_mem(cdev); - if (rc) { - DP_NOTICE(cdev, "Failed to allocate stream memory\n"); - goto err2; - } + if (IS_PF(cdev)) { + /* Allocate stream for unzipping */ + rc = qed_alloc_stream_mem(cdev); + if (rc) { + DP_NOTICE(cdev, "Failed to allocate stream memory\n"); + goto err2; + } - /* Start the slowpath */ - data = cdev->firmware->data; + data = cdev->firmware->data; + } memset(&tunn_info, 0, sizeof(tunn_info)); tunn_info.tunn_mode |= 1 << QED_MODE_VXLAN_TUNN | @@ -793,6 +843,7 @@ static int qed_slowpath_start(struct qed_dev *cdev, tunn_info.tunn_clss_l2gre = QED_TUNN_CLSS_MAC_VLAN; tunn_info.tunn_clss_ipgre = QED_TUNN_CLSS_MAC_VLAN; + /* Start the slowpath */ rc = qed_hw_init(cdev, &tunn_info, true, cdev->int_params.out.int_mode, true, data); @@ -802,18 +853,20 @@ static int qed_slowpath_start(struct qed_dev *cdev, DP_INFO(cdev, "HW initialization and function start completed successfully\n"); - hwfn = QED_LEADING_HWFN(cdev); - drv_version.version = (params->drv_major << 24) | - (params->drv_minor << 16) | - (params->drv_rev << 8) | - (params->drv_eng); - strlcpy(drv_version.name, params->name, - MCP_DRV_VER_STR_SIZE - 4); - rc = qed_mcp_send_drv_version(hwfn, hwfn->p_main_ptt, - &drv_version); - if (rc) { - DP_NOTICE(cdev, "Failed sending drv version command\n"); - return rc; + if (IS_PF(cdev)) { + hwfn = QED_LEADING_HWFN(cdev); + drv_version.version = (params->drv_major << 24) | + (params->drv_minor << 16) | + (params->drv_rev << 8) | + (params->drv_eng); + strlcpy(drv_version.name, params->name, + MCP_DRV_VER_STR_SIZE - 4); + rc = qed_mcp_send_drv_version(hwfn, hwfn->p_main_ptt, + &drv_version); + if (rc) { + DP_NOTICE(cdev, "Failed sending drv version command\n"); + return rc; + } } qed_reset_vport_stats(cdev); @@ -822,13 +875,15 @@ static int qed_slowpath_start(struct qed_dev *cdev, err2: qed_hw_timers_stop_all(cdev); - qed_slowpath_irq_free(cdev); + if (IS_PF(cdev)) + qed_slowpath_irq_free(cdev); qed_free_stream_mem(cdev); qed_disable_msix(cdev); err1: qed_resc_free(cdev); err: - release_firmware(cdev->firmware); + if (IS_PF(cdev)) + release_firmware(cdev->firmware); qed_iov_wq_stop(cdev, false); @@ -840,17 +895,20 @@ static int qed_slowpath_stop(struct qed_dev *cdev) if (!cdev) return -ENODEV; - qed_free_stream_mem(cdev); + if (IS_PF(cdev)) { + qed_free_stream_mem(cdev); - qed_nic_stop(cdev); - qed_slowpath_irq_free(cdev); + qed_nic_stop(cdev); + qed_slowpath_irq_free(cdev); + } qed_disable_msix(cdev); qed_nic_reset(cdev); qed_iov_wq_stop(cdev, true); - release_firmware(cdev->firmware); + if (IS_PF(cdev)) + release_firmware(cdev->firmware); return 0; } @@ -940,6 +998,9 @@ static int qed_set_link(struct qed_dev *cdev, if (!cdev) return -ENODEV; + if (IS_VF(cdev)) + return 0; + /* The link should be set only once per PF */ hwfn = &cdev->hwfns[0]; @@ -1051,10 +1112,16 @@ static void qed_fill_link(struct qed_hwfn *hwfn, memset(if_link, 0, sizeof(*if_link)); /* Prepare source inputs */ - memcpy(¶ms, qed_mcp_get_link_params(hwfn), sizeof(params)); - memcpy(&link, qed_mcp_get_link_state(hwfn), sizeof(link)); - memcpy(&link_caps, qed_mcp_get_link_capabilities(hwfn), - sizeof(link_caps)); + if (IS_PF(hwfn->cdev)) { + memcpy(¶ms, qed_mcp_get_link_params(hwfn), sizeof(params)); + memcpy(&link, qed_mcp_get_link_state(hwfn), sizeof(link)); + memcpy(&link_caps, qed_mcp_get_link_capabilities(hwfn), + sizeof(link_caps)); + } else { + memset(¶ms, 0, sizeof(params)); + memset(&link, 0, sizeof(link)); + memset(&link_caps, 0, sizeof(link_caps)); + } /* Set the link parameters to pass to protocol driver */ if (link.link_up) @@ -1177,6 +1244,9 @@ static int qed_drain(struct qed_dev *cdev) struct qed_ptt *ptt; int i, rc; + if (IS_VF(cdev)) + return 0; + for_each_hwfn(cdev, i) { hwfn = &cdev->hwfns[i]; ptt = qed_ptt_acquire(hwfn); diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index 2f8309d772c8..83175007f616 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -19,6 +19,8 @@ #include "qed_hw.h" #include "qed_mcp.h" #include "qed_reg_addr.h" +#include "qed_sriov.h" + #define CHIP_MCP_RESP_ITER_US 10 #define QED_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */ @@ -787,26 +789,42 @@ int qed_mcp_handle_events(struct qed_hwfn *p_hwfn, return rc; } -int qed_mcp_get_mfw_ver(struct qed_dev *cdev, - u32 *p_mfw_ver) +int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *p_mfw_ver, u32 *p_running_bundle_id) { - struct qed_hwfn *p_hwfn = &cdev->hwfns[0]; - struct qed_ptt *p_ptt; u32 global_offsize; - p_ptt = qed_ptt_acquire(p_hwfn); - if (!p_ptt) - return -EBUSY; + if (IS_VF(p_hwfn->cdev)) { + if (p_hwfn->vf_iov_info) { + struct pfvf_acquire_resp_tlv *p_resp; + + p_resp = &p_hwfn->vf_iov_info->acquire_resp; + *p_mfw_ver = p_resp->pfdev_info.mfw_ver; + return 0; + } else { + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "VF requested MFW version prior to ACQUIRE\n"); + return -EINVAL; + } + } global_offsize = qed_rd(p_hwfn, p_ptt, - SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info-> - public_base, + SECTION_OFFSIZE_ADDR(p_hwfn-> + mcp_info->public_base, PUBLIC_GLOBAL)); - *p_mfw_ver = qed_rd(p_hwfn, p_ptt, - SECTION_ADDR(global_offsize, 0) + - offsetof(struct public_global, mfw_ver)); + *p_mfw_ver = + qed_rd(p_hwfn, p_ptt, + SECTION_ADDR(global_offsize, + 0) + offsetof(struct public_global, mfw_ver)); - qed_ptt_release(p_hwfn, p_ptt); + if (p_running_bundle_id != NULL) { + *p_running_bundle_id = qed_rd(p_hwfn, p_ptt, + SECTION_ADDR(global_offsize, 0) + + offsetof(struct public_global, + running_bundle_id)); + } return 0; } @@ -817,6 +835,9 @@ int qed_mcp_get_media_type(struct qed_dev *cdev, struct qed_hwfn *p_hwfn = &cdev->hwfns[0]; struct qed_ptt *p_ptt; + if (IS_VF(cdev)) + return -EINVAL; + if (!qed_mcp_is_init(p_hwfn)) { DP_NOTICE(p_hwfn, "MFW is not initialized !\n"); return -EBUSY; @@ -951,6 +972,9 @@ int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn, { u32 flash_size; + if (IS_VF(p_hwfn->cdev)) + return -EINVAL; + flash_size = qed_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4); flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >> MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT; @@ -961,6 +985,37 @@ int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn, return 0; } +int qed_mcp_config_vf_msix(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u8 vf_id, u8 num) +{ + u32 resp = 0, param = 0, rc_param = 0; + int rc; + + /* Only Leader can configure MSIX, and need to take CMT into account */ + if (!IS_LEAD_HWFN(p_hwfn)) + return 0; + num *= p_hwfn->cdev->num_hwfns; + + param |= (vf_id << DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT) & + DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK; + param |= (num << DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT) & + DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK; + + rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_VF_MSIX, param, + &resp, &rc_param); + + if (resp != FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE) { + DP_NOTICE(p_hwfn, "VF[%d]: MFW failed to set MSI-X\n", vf_id); + rc = -EINVAL; + } else { + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "Requested 0x%02x MSI-x interrupts from VF 0x%02x\n", + num, vf_id); + } + + return rc; +} + int qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h index 5f218eed0541..e3d5cdfe8e8d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h @@ -149,13 +149,16 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn, /** * @brief Get the management firmware version value * - * @param cdev - qed dev pointer - * @param mfw_ver - mfw version value + * @param p_hwfn + * @param p_ptt + * @param p_mfw_ver - mfw version value + * @param p_running_bundle_id - image id in nvram; Optional. * - * @return int - 0 - operation was successul. + * @return int - 0 - operation was successful. */ -int qed_mcp_get_mfw_ver(struct qed_dev *cdev, - u32 *mfw_ver); +int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *p_mfw_ver, u32 *p_running_bundle_id); /** * @brief Get media type value of the port. @@ -418,6 +421,20 @@ int qed_mcp_reset(struct qed_hwfn *p_hwfn, * @return true iff MFW is running and mcp_info is initialized */ bool qed_mcp_is_init(struct qed_hwfn *p_hwfn); + +/** + * @brief request MFW to configure MSI-X for a VF + * + * @param p_hwfn + * @param p_ptt + * @param vf_id - absolute inside engine + * @param num_sbs - number of entries to request + * + * @return int + */ +int qed_mcp_config_vf_msix(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u8 vf_id, u8 num); + int qed_configure_pf_min_bandwidth(struct qed_dev *cdev, u8 min_bw); int qed_configure_pf_max_bandwidth(struct qed_dev *cdev, u8 max_bw); int __qed_configure_pf_max_bandwidth(struct qed_hwfn *p_hwfn, diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h index bf4d7ccd56bb..a508b6b7f1d4 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h +++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h @@ -39,6 +39,8 @@ 0x2aae04UL #define PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER \ 0x2aa16cUL +#define PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR \ + 0x2aa118UL #define BAR0_MAP_REG_MSDM_RAM \ 0x1d00000UL #define BAR0_MAP_REG_USDM_RAM \ @@ -111,6 +113,8 @@ 0x009778UL #define MISCS_REG_CHIP_METAL \ 0x009774UL +#define MISCS_REG_FUNCTION_HIDE \ + 0x0096f0UL #define BRB_REG_HEADER_SIZE \ 0x340804UL #define BTB_REG_HEADER_SIZE \ @@ -119,6 +123,8 @@ 0x1c0708UL #define CCFC_REG_ACTIVITY_COUNTER \ 0x2e8800UL +#define CCFC_REG_STRONG_ENABLE_VF \ + 0x2e070cUL #define CDU_REG_CID_ADDR_PARAMS \ 0x580900UL #define DBG_REG_CLIENT_ENABLE \ diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h index eec137f40895..dde69090379f 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h @@ -62,6 +62,8 @@ union ramrod_data { struct vport_stop_ramrod_data vport_stop; struct vport_update_ramrod_data vport_update; struct vport_filter_update_ramrod_data vport_filter_update; + + struct vf_start_ramrod_data vf_start; }; #define EQ_MAX_CREDIT 0xffffffff diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c index e1e2344b1906..ed90947c451d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c @@ -20,6 +20,7 @@ #include "qed_int.h" #include "qed_reg_addr.h" #include "qed_sp.h" +#include "qed_sriov.h" int qed_sp_init_request(struct qed_hwfn *p_hwfn, struct qed_spq_entry **pp_ent, @@ -357,6 +358,13 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, &p_ramrod->tunnel_config); p_hwfn->hw_info.personality = PERSONALITY_ETH; + if (p_hwfn->cdev->p_iov_info) { + struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info; + + p_ramrod->base_vf_id = (u8) p_iov->first_vf_in_pf; + p_ramrod->num_vfs = (u8) p_iov->total_vfs; + } + DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "Setting event_ring_sb [id %04x index %02x], outer_tag [%d]\n", sb, sb_index, diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c index 0e439e46fbe9..acac6626a1b2 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_spq.c +++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c @@ -387,6 +387,9 @@ static int qed_cqe_completion( struct eth_slow_path_rx_cqe *cqe, enum protocol_type protocol) { + if (IS_VF(p_hwfn->cdev)) + return 0; + /* @@@tmp - it's possible we'll eventually want to handle some * actual commands that can arrive here, but for now this is only * used to complete the ramrod using the echo value on the cqe diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index 4a6af4264141..699d96fb87f0 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -6,12 +6,48 @@ * this source tree. */ +#include "qed_cxt.h" +#include "qed_hsi.h" #include "qed_hw.h" +#include "qed_init_ops.h" #include "qed_int.h" +#include "qed_mcp.h" #include "qed_reg_addr.h" +#include "qed_sp.h" #include "qed_sriov.h" #include "qed_vf.h" +/* IOV ramrods */ +static int qed_sp_vf_start(struct qed_hwfn *p_hwfn, + u32 concrete_vfid, u16 opaque_vfid) +{ + struct vf_start_ramrod_data *p_ramrod = NULL; + struct qed_spq_entry *p_ent = NULL; + struct qed_sp_init_data init_data; + int rc = -EINVAL; + + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = qed_spq_get_cid(p_hwfn); + init_data.opaque_fid = opaque_vfid; + init_data.comp_mode = QED_SPQ_MODE_EBLOCK; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + COMMON_RAMROD_VF_START, + PROTOCOLID_COMMON, &init_data); + if (rc) + return rc; + + p_ramrod = &p_ent->ramrod.vf_start; + + p_ramrod->vf_id = GET_FIELD(concrete_vfid, PXP_CONCRETE_FID_VFID); + p_ramrod->opaque_fid = cpu_to_le16(opaque_vfid); + + p_ramrod->personality = PERSONALITY_ETH; + + return qed_spq_post(p_hwfn, p_ent, NULL); +} + bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn, int rel_vf_id, bool b_enabled_only) { @@ -321,6 +357,9 @@ int qed_iov_hw_info(struct qed_hwfn *p_hwfn) int pos; int rc; + if (IS_VF(p_hwfn->cdev)) + return 0; + /* Learn the PCI configuration */ pos = pci_find_ext_capability(p_hwfn->cdev->pdev, PCI_EXT_CAP_ID_SRIOV); @@ -376,12 +415,189 @@ static bool qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn, int vfid) return false; /* Check VF validity */ - if (!qed_iov_is_valid_vfid(p_hwfn, vfid, true)) + if (IS_VF(p_hwfn->cdev) || !IS_QED_SRIOV(p_hwfn->cdev) || + !IS_PF_SRIOV_ALLOC(p_hwfn)) return false; return true; } +static void qed_iov_vf_pglue_clear_err(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u8 abs_vfid) +{ + qed_wr(p_hwfn, p_ptt, + PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR + (abs_vfid >> 5) * 4, + 1 << (abs_vfid & 0x1f)); +} + +static int qed_iov_enable_vf_access(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_vf_info *vf) +{ + u32 igu_vf_conf = IGU_VF_CONF_FUNC_EN; + int rc; + + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "Enable internal access for vf %x [abs %x]\n", + vf->abs_vf_id, QED_VF_ABS_ID(p_hwfn, vf)); + + qed_iov_vf_pglue_clear_err(p_hwfn, p_ptt, QED_VF_ABS_ID(p_hwfn, vf)); + + rc = qed_mcp_config_vf_msix(p_hwfn, p_ptt, vf->abs_vf_id, vf->num_sbs); + if (rc) + return rc; + + qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid); + + SET_FIELD(igu_vf_conf, IGU_VF_CONF_PARENT, p_hwfn->rel_pf_id); + STORE_RT_REG(p_hwfn, IGU_REG_VF_CONFIGURATION_RT_OFFSET, igu_vf_conf); + + qed_init_run(p_hwfn, p_ptt, PHASE_VF, vf->abs_vf_id, + p_hwfn->hw_info.hw_mode); + + /* unpretend */ + qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid); + + if (vf->state != VF_STOPPED) { + DP_NOTICE(p_hwfn, "VF[%02x] is already started\n", + vf->abs_vf_id); + return -EINVAL; + } + + /* Start VF */ + rc = qed_sp_vf_start(p_hwfn, vf->concrete_fid, vf->opaque_fid); + if (rc) + DP_NOTICE(p_hwfn, "Failed to start VF[%02x]\n", vf->abs_vf_id); + + vf->state = VF_FREE; + + return rc; +} + +static u8 qed_iov_alloc_vf_igu_sbs(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_vf_info *vf, u16 num_rx_queues) +{ + struct qed_igu_block *igu_blocks; + int qid = 0, igu_id = 0; + u32 val = 0; + + igu_blocks = p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks; + + if (num_rx_queues > p_hwfn->hw_info.p_igu_info->free_blks) + num_rx_queues = p_hwfn->hw_info.p_igu_info->free_blks; + p_hwfn->hw_info.p_igu_info->free_blks -= num_rx_queues; + + SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, vf->abs_vf_id); + SET_FIELD(val, IGU_MAPPING_LINE_VALID, 1); + SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, 0); + + while ((qid < num_rx_queues) && + (igu_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev))) { + if (igu_blocks[igu_id].status & QED_IGU_STATUS_FREE) { + struct cau_sb_entry sb_entry; + + vf->igu_sbs[qid] = (u16)igu_id; + igu_blocks[igu_id].status &= ~QED_IGU_STATUS_FREE; + + SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, qid); + + qed_wr(p_hwfn, p_ptt, + IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id, + val); + + /* Configure igu sb in CAU which were marked valid */ + qed_init_cau_sb_entry(p_hwfn, &sb_entry, + p_hwfn->rel_pf_id, + vf->abs_vf_id, 1); + qed_dmae_host2grc(p_hwfn, p_ptt, + (u64)(uintptr_t)&sb_entry, + CAU_REG_SB_VAR_MEMORY + + igu_id * sizeof(u64), 2, 0); + qid++; + } + igu_id++; + } + + vf->num_sbs = (u8) num_rx_queues; + + return vf->num_sbs; +} + +static int qed_iov_init_hw_for_vf(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u16 rel_vf_id, u16 num_rx_queues) +{ + u8 num_of_vf_avaiable_chains = 0; + struct qed_vf_info *vf = NULL; + int rc = 0; + u32 cids; + u8 i; + + vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false); + if (!vf) { + DP_ERR(p_hwfn, "qed_iov_init_hw_for_vf : vf is NULL\n"); + return -EINVAL; + } + + if (vf->b_init) { + DP_NOTICE(p_hwfn, "VF[%d] is already active.\n", rel_vf_id); + return -EINVAL; + } + + /* Limit number of queues according to number of CIDs */ + qed_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, &cids); + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "VF[%d] - requesting to initialize for 0x%04x queues [0x%04x CIDs available]\n", + vf->relative_vf_id, num_rx_queues, (u16) cids); + num_rx_queues = min_t(u16, num_rx_queues, ((u16) cids)); + + num_of_vf_avaiable_chains = qed_iov_alloc_vf_igu_sbs(p_hwfn, + p_ptt, + vf, + num_rx_queues); + if (!num_of_vf_avaiable_chains) { + DP_ERR(p_hwfn, "no available igu sbs\n"); + return -ENOMEM; + } + + /* Choose queue number and index ranges */ + vf->num_rxqs = num_of_vf_avaiable_chains; + vf->num_txqs = num_of_vf_avaiable_chains; + + for (i = 0; i < vf->num_rxqs; i++) { + u16 queue_id = qed_int_queue_id_from_sb_id(p_hwfn, + vf->igu_sbs[i]); + + if (queue_id > RESC_NUM(p_hwfn, QED_L2_QUEUE)) { + DP_NOTICE(p_hwfn, + "VF[%d] will require utilizing of out-of-bounds queues - %04x\n", + vf->relative_vf_id, queue_id); + return -EINVAL; + } + + /* CIDs are per-VF, so no problem having them 0-based. */ + vf->vf_queues[i].fw_rx_qid = queue_id; + vf->vf_queues[i].fw_tx_qid = queue_id; + vf->vf_queues[i].fw_cid = i; + + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "VF[%d] - [%d] SB %04x, Tx/Rx queue %04x CID %04x\n", + vf->relative_vf_id, i, vf->igu_sbs[i], queue_id, i); + } + rc = qed_iov_enable_vf_access(p_hwfn, p_ptt, vf); + if (!rc) { + vf->b_init = true; + + if (IS_LEAD_HWFN(p_hwfn)) + p_hwfn->cdev->p_iov_info->num_vfs++; + } + + return rc; +} + static bool qed_iov_tlv_supported(u16 tlvtype) { return CHANNEL_TLV_NONE < tlvtype && tlvtype < CHANNEL_TLV_MAX; @@ -486,13 +702,147 @@ static void qed_iov_prepare_resp(struct qed_hwfn *p_hwfn, qed_iov_send_response(p_hwfn, p_ptt, vf_info, length, status); } -static void qed_iov_process_mbx_dummy_resp(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - struct qed_vf_info *p_vf) +static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_vf_info *vf) { - qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf, CHANNEL_TLV_NONE, - sizeof(struct pfvf_def_resp_tlv), - PFVF_STATUS_SUCCESS); + struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; + struct pfvf_acquire_resp_tlv *resp = &mbx->reply_virt->acquire_resp; + struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info; + struct vfpf_acquire_tlv *req = &mbx->req_virt->acquire; + u8 i, vfpf_status = PFVF_STATUS_SUCCESS; + struct pf_vf_resc *resc = &resp->resc; + + /* Validate FW compatibility */ + if (req->vfdev_info.fw_major != FW_MAJOR_VERSION || + req->vfdev_info.fw_minor != FW_MINOR_VERSION || + req->vfdev_info.fw_revision != FW_REVISION_VERSION || + req->vfdev_info.fw_engineering != FW_ENGINEERING_VERSION) { + DP_INFO(p_hwfn, + "VF[%d] is running an incompatible driver [VF needs FW %02x:%02x:%02x:%02x but Hypervisor is using %02x:%02x:%02x:%02x]\n", + vf->abs_vf_id, + req->vfdev_info.fw_major, + req->vfdev_info.fw_minor, + req->vfdev_info.fw_revision, + req->vfdev_info.fw_engineering, + FW_MAJOR_VERSION, + FW_MINOR_VERSION, + FW_REVISION_VERSION, FW_ENGINEERING_VERSION); + vfpf_status = PFVF_STATUS_NOT_SUPPORTED; + goto out; + } + + /* On 100g PFs, prevent old VFs from loading */ + if ((p_hwfn->cdev->num_hwfns > 1) && + !(req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_100G)) { + DP_INFO(p_hwfn, + "VF[%d] is running an old driver that doesn't support 100g\n", + vf->abs_vf_id); + vfpf_status = PFVF_STATUS_NOT_SUPPORTED; + goto out; + } + + memset(resp, 0, sizeof(*resp)); + + /* Fill in vf info stuff */ + vf->opaque_fid = req->vfdev_info.opaque_fid; + vf->num_mac_filters = 1; + vf->num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS; + + vf->vf_bulletin = req->bulletin_addr; + vf->bulletin.size = (vf->bulletin.size < req->bulletin_size) ? + vf->bulletin.size : req->bulletin_size; + + /* fill in pfdev info */ + pfdev_info->chip_num = p_hwfn->cdev->chip_num; + pfdev_info->db_size = 0; + pfdev_info->indices_per_sb = PIS_PER_SB; + + pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED | + PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE; + if (p_hwfn->cdev->num_hwfns > 1) + pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_100G; + + pfdev_info->stats_info.mstats.address = + PXP_VF_BAR0_START_MSDM_ZONE_B + + offsetof(struct mstorm_vf_zone, non_trigger.eth_queue_stat); + pfdev_info->stats_info.mstats.len = + sizeof(struct eth_mstorm_per_queue_stat); + + pfdev_info->stats_info.ustats.address = + PXP_VF_BAR0_START_USDM_ZONE_B + + offsetof(struct ustorm_vf_zone, non_trigger.eth_queue_stat); + pfdev_info->stats_info.ustats.len = + sizeof(struct eth_ustorm_per_queue_stat); + + pfdev_info->stats_info.pstats.address = + PXP_VF_BAR0_START_PSDM_ZONE_B + + offsetof(struct pstorm_vf_zone, non_trigger.eth_queue_stat); + pfdev_info->stats_info.pstats.len = + sizeof(struct eth_pstorm_per_queue_stat); + + pfdev_info->stats_info.tstats.address = 0; + pfdev_info->stats_info.tstats.len = 0; + + memcpy(pfdev_info->port_mac, p_hwfn->hw_info.hw_mac_addr, ETH_ALEN); + + pfdev_info->fw_major = FW_MAJOR_VERSION; + pfdev_info->fw_minor = FW_MINOR_VERSION; + pfdev_info->fw_rev = FW_REVISION_VERSION; + pfdev_info->fw_eng = FW_ENGINEERING_VERSION; + pfdev_info->os_type = VFPF_ACQUIRE_OS_LINUX; + qed_mcp_get_mfw_ver(p_hwfn, p_ptt, &pfdev_info->mfw_ver, NULL); + + pfdev_info->dev_type = p_hwfn->cdev->type; + pfdev_info->chip_rev = p_hwfn->cdev->chip_rev; + + resc->num_rxqs = vf->num_rxqs; + resc->num_txqs = vf->num_txqs; + resc->num_sbs = vf->num_sbs; + for (i = 0; i < resc->num_sbs; i++) { + resc->hw_sbs[i].hw_sb_id = vf->igu_sbs[i]; + resc->hw_sbs[i].sb_qid = 0; + } + + for (i = 0; i < resc->num_rxqs; i++) { + qed_fw_l2_queue(p_hwfn, vf->vf_queues[i].fw_rx_qid, + (u16 *)&resc->hw_qid[i]); + resc->cid[i] = vf->vf_queues[i].fw_cid; + } + + resc->num_mac_filters = min_t(u8, vf->num_mac_filters, + req->resc_request.num_mac_filters); + resc->num_vlan_filters = min_t(u8, vf->num_vlan_filters, + req->resc_request.num_vlan_filters); + + /* This isn't really required as VF isn't limited, but some VFs might + * actually test this value, so need to provide it. + */ + resc->num_mc_filters = req->resc_request.num_mc_filters; + + /* Fill agreed size of bulletin board in response */ + resp->bulletin_size = vf->bulletin.size; + + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x, db_size=%d, idx_per_sb=%d, pf_cap=0x%llx\n" + "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d\n", + vf->abs_vf_id, + resp->pfdev_info.chip_num, + resp->pfdev_info.db_size, + resp->pfdev_info.indices_per_sb, + resp->pfdev_info.capabilities, + resc->num_rxqs, + resc->num_txqs, + resc->num_sbs, + resc->num_mac_filters, + resc->num_vlan_filters); + vf->state = VF_ACQUIRED; + + /* Prepare Response */ +out: + qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_ACQUIRE, + sizeof(struct pfvf_acquire_resp_tlv), vfpf_status); } static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn, @@ -517,7 +867,11 @@ static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn, /* check if tlv type is known */ if (qed_iov_tlv_supported(mbx->first_tlv.tl.type)) { - qed_iov_process_mbx_dummy_resp(p_hwfn, p_ptt, p_vf); + switch (mbx->first_tlv.tl.type) { + case CHANNEL_TLV_ACQUIRE: + qed_iov_vf_mbx_acquire(p_hwfn, p_ptt, p_vf); + break; + } } else { /* unknown TLV - this may belong to a VF driver from the future * - a version written after this PF driver was written, which @@ -652,6 +1006,15 @@ void qed_schedule_iov(struct qed_hwfn *hwfn, enum qed_iov_wq_flag flag) queue_delayed_work(hwfn->iov_wq, &hwfn->iov_task, 0); } +void qed_vf_start_iov_wq(struct qed_dev *cdev) +{ + int i; + + for_each_hwfn(cdev, i) + queue_delayed_work(cdev->hwfns[i].iov_wq, + &cdev->hwfns[i].iov_task, 0); +} + static void qed_handle_vf_msg(struct qed_hwfn *hwfn) { u64 events[QED_VF_ARRAY_LENGTH]; diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.h b/drivers/net/ethernet/qlogic/qed/qed_sriov.h index 112216812a12..4f190d25ee14 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.h @@ -21,6 +21,9 @@ #endif #define IS_PF_SRIOV_ALLOC(p_hwfn) (!!((p_hwfn)->pf_iov_info)) +#define QED_MAX_VF_CHAINS_PER_PF 16 +#define QED_ETH_VF_NUM_VLAN_FILTERS 2 + /* This struct is part of qed_dev and contains data relevant to all hwfns; * Initialized only if SR-IOV cpabability is exposed in PCIe config space. */ @@ -60,7 +63,17 @@ struct qed_iov_vf_mbx { struct vfpf_first_tlv first_tlv; }; +struct qed_vf_q_info { + u16 fw_rx_qid; + u16 fw_tx_qid; + u8 fw_cid; + u8 rxq_active; + u8 txq_active; +}; + enum vf_state { + VF_FREE = 0, /* VF ready to be acquired holds no resc */ + VF_ACQUIRED, /* VF, acquired, but not initalized */ VF_STOPPED /* VF, Stopped */ }; @@ -82,6 +95,17 @@ struct qed_vf_info { #define QED_VF_ABS_ID(p_hwfn, p_vf) (QED_PATH_ID(p_hwfn) ? \ (p_vf)->abs_vf_id + MAX_NUM_VFS_BB : \ (p_vf)->abs_vf_id) + + u8 num_rxqs; + u8 num_txqs; + + u8 num_sbs; + + u8 num_mac_filters; + u8 num_vlan_filters; + struct qed_vf_q_info vf_queues[QED_MAX_VF_CHAINS_PER_PF]; + u16 igu_sbs[QED_MAX_VF_CHAINS_PER_PF]; + }; /* This structure is part of qed_hwfn and used only for PFs that have sriov @@ -133,6 +157,26 @@ u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id); */ int qed_iov_hw_info(struct qed_hwfn *p_hwfn); +/** + * @brief qed_add_tlv - place a given tlv on the tlv buffer at next offset + * + * @param p_hwfn + * @param p_iov + * @param type + * @param length + * + * @return pointer to the newly placed tlv + */ +void *qed_add_tlv(struct qed_hwfn *p_hwfn, u8 **offset, u16 type, u16 length); + +/** + * @brief list the types and lengths of the tlvs on the buffer + * + * @param p_hwfn + * @param tlvs_list + */ +void qed_dp_tlv_list(struct qed_hwfn *p_hwfn, void *tlvs_list); + /** * @brief qed_iov_alloc - allocate sriov related resources * @@ -179,6 +223,7 @@ void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first); int qed_iov_wq_start(struct qed_dev *cdev); void qed_schedule_iov(struct qed_hwfn *hwfn, enum qed_iov_wq_flag flag); +void qed_vf_start_iov_wq(struct qed_dev *cdev); #else static inline u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id) @@ -228,6 +273,10 @@ static inline void qed_schedule_iov(struct qed_hwfn *hwfn, enum qed_iov_wq_flag flag) { } + +static inline void qed_vf_start_iov_wq(struct qed_dev *cdev) +{ +} #endif #define qed_for_each_vf(_p_hwfn, _i) \ diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c new file mode 100644 index 000000000000..a3c8f4e1b9c1 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c @@ -0,0 +1,357 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015 QLogic Corporation + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ + +#include "qed.h" +#include "qed_sriov.h" +#include "qed_vf.h" + +static void *qed_vf_pf_prep(struct qed_hwfn *p_hwfn, u16 type, u16 length) +{ + struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; + void *p_tlv; + + /* This lock is released when we receive PF's response + * in qed_send_msg2pf(). + * So, qed_vf_pf_prep() and qed_send_msg2pf() + * must come in sequence. + */ + mutex_lock(&(p_iov->mutex)); + + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "preparing to send 0x%04x tlv over vf pf channel\n", + type); + + /* Reset Requst offset */ + p_iov->offset = (u8 *)p_iov->vf2pf_request; + + /* Clear mailbox - both request and reply */ + memset(p_iov->vf2pf_request, 0, sizeof(union vfpf_tlvs)); + memset(p_iov->pf2vf_reply, 0, sizeof(union pfvf_tlvs)); + + /* Init type and length */ + p_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, type, length); + + /* Init first tlv header */ + ((struct vfpf_first_tlv *)p_tlv)->reply_address = + (u64)p_iov->pf2vf_reply_phys; + + return p_tlv; +} + +static int qed_send_msg2pf(struct qed_hwfn *p_hwfn, u8 *done, u32 resp_size) +{ + union vfpf_tlvs *p_req = p_hwfn->vf_iov_info->vf2pf_request; + struct ustorm_trigger_vf_zone trigger; + struct ustorm_vf_zone *zone_data; + int rc = 0, time = 100; + + zone_data = (struct ustorm_vf_zone *)PXP_VF_BAR0_START_USDM_ZONE_B; + + /* output tlvs list */ + qed_dp_tlv_list(p_hwfn, p_req); + + /* need to add the END TLV to the message size */ + resp_size += sizeof(struct channel_list_end_tlv); + + /* Send TLVs over HW channel */ + memset(&trigger, 0, sizeof(struct ustorm_trigger_vf_zone)); + trigger.vf_pf_msg_valid = 1; + + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "VF -> PF [%02x] message: [%08x, %08x] --> %p, %08x --> %p\n", + GET_FIELD(p_hwfn->hw_info.concrete_fid, + PXP_CONCRETE_FID_PFID), + upper_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys), + lower_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys), + &zone_data->non_trigger.vf_pf_msg_addr, + *((u32 *)&trigger), &zone_data->trigger); + + REG_WR(p_hwfn, + (uintptr_t)&zone_data->non_trigger.vf_pf_msg_addr.lo, + lower_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys)); + + REG_WR(p_hwfn, + (uintptr_t)&zone_data->non_trigger.vf_pf_msg_addr.hi, + upper_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys)); + + /* The message data must be written first, to prevent trigger before + * data is written. + */ + wmb(); + + REG_WR(p_hwfn, (uintptr_t)&zone_data->trigger, *((u32 *)&trigger)); + + /* When PF would be done with the response, it would write back to the + * `done' address. Poll until then. + */ + while ((!*done) && time) { + msleep(25); + time--; + } + + if (!*done) { + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "VF <-- PF Timeout [Type %d]\n", + p_req->first_tlv.tl.type); + rc = -EBUSY; + goto exit; + } else { + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "PF response: %d [Type %d]\n", + *done, p_req->first_tlv.tl.type); + } + +exit: + mutex_unlock(&(p_hwfn->vf_iov_info->mutex)); + + return rc; +} + +#define VF_ACQUIRE_THRESH 3 +#define VF_ACQUIRE_MAC_FILTERS 1 + +static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn) +{ + struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct pfvf_acquire_resp_tlv *resp = &p_iov->pf2vf_reply->acquire_resp; + struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info; + u8 rx_count = 1, tx_count = 1, num_sbs = 1; + u8 num_mac = VF_ACQUIRE_MAC_FILTERS; + bool resources_acquired = false; + struct vfpf_acquire_tlv *req; + int rc = 0, attempts = 0; + + /* clear mailbox and prep first tlv */ + req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_ACQUIRE, sizeof(*req)); + + /* starting filling the request */ + req->vfdev_info.opaque_fid = p_hwfn->hw_info.opaque_fid; + + req->resc_request.num_rxqs = rx_count; + req->resc_request.num_txqs = tx_count; + req->resc_request.num_sbs = num_sbs; + req->resc_request.num_mac_filters = num_mac; + req->resc_request.num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS; + + req->vfdev_info.os_type = VFPF_ACQUIRE_OS_LINUX; + req->vfdev_info.fw_major = FW_MAJOR_VERSION; + req->vfdev_info.fw_minor = FW_MINOR_VERSION; + req->vfdev_info.fw_revision = FW_REVISION_VERSION; + req->vfdev_info.fw_engineering = FW_ENGINEERING_VERSION; + + /* Fill capability field with any non-deprecated config we support */ + req->vfdev_info.capabilities |= VFPF_ACQUIRE_CAP_100G; + + /* pf 2 vf bulletin board address */ + req->bulletin_addr = p_iov->bulletin.phys; + req->bulletin_size = p_iov->bulletin.size; + + /* add list termination tlv */ + qed_add_tlv(p_hwfn, &p_iov->offset, + CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); + + while (!resources_acquired) { + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, "attempting to acquire resources\n"); + + /* send acquire request */ + rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); + if (rc) + return rc; + + /* copy acquire response from buffer to p_hwfn */ + memcpy(&p_iov->acquire_resp, resp, sizeof(p_iov->acquire_resp)); + + attempts++; + + if (resp->hdr.status == PFVF_STATUS_SUCCESS) { + /* PF agrees to allocate our resources */ + if (!(resp->pfdev_info.capabilities & + PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE)) { + DP_INFO(p_hwfn, + "PF is using old incompatible driver; Either downgrade driver or request provider to update hypervisor version\n"); + return -EINVAL; + } + DP_VERBOSE(p_hwfn, QED_MSG_IOV, "resources acquired\n"); + resources_acquired = true; + } else if (resp->hdr.status == PFVF_STATUS_NO_RESOURCE && + attempts < VF_ACQUIRE_THRESH) { + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "PF unwilling to fullfill resource request. Try PF recommended amount\n"); + + /* humble our request */ + req->resc_request.num_txqs = resp->resc.num_txqs; + req->resc_request.num_rxqs = resp->resc.num_rxqs; + req->resc_request.num_sbs = resp->resc.num_sbs; + req->resc_request.num_mac_filters = + resp->resc.num_mac_filters; + req->resc_request.num_vlan_filters = + resp->resc.num_vlan_filters; + + /* Clear response buffer */ + memset(p_iov->pf2vf_reply, 0, sizeof(union pfvf_tlvs)); + } else { + DP_ERR(p_hwfn, + "PF returned error %d to VF acquisition request\n", + resp->hdr.status); + return -EAGAIN; + } + } + + /* Update bulletin board size with response from PF */ + p_iov->bulletin.size = resp->bulletin_size; + + /* get HW info */ + p_hwfn->cdev->type = resp->pfdev_info.dev_type; + p_hwfn->cdev->chip_rev = resp->pfdev_info.chip_rev; + + p_hwfn->cdev->chip_num = pfdev_info->chip_num & 0xffff; + + /* Learn of the possibility of CMT */ + if (IS_LEAD_HWFN(p_hwfn)) { + if (resp->pfdev_info.capabilities & PFVF_ACQUIRE_CAP_100G) { + DP_NOTICE(p_hwfn, "100g VF\n"); + p_hwfn->cdev->num_hwfns = 2; + } + } + + return 0; +} + +int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn) +{ + struct qed_vf_iov *p_iov; + u32 reg; + + /* Set number of hwfns - might be overriden once leading hwfn learns + * actual configuration from PF. + */ + if (IS_LEAD_HWFN(p_hwfn)) + p_hwfn->cdev->num_hwfns = 1; + + /* Set the doorbell bar. Assumption: regview is set */ + p_hwfn->doorbells = (u8 __iomem *)p_hwfn->regview + + PXP_VF_BAR0_START_DQ; + + reg = PXP_VF_BAR0_ME_OPAQUE_ADDRESS; + p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn, reg); + + reg = PXP_VF_BAR0_ME_CONCRETE_ADDRESS; + p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, reg); + + /* Allocate vf sriov info */ + p_iov = kzalloc(sizeof(*p_iov), GFP_KERNEL); + if (!p_iov) { + DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_sriov'\n"); + return -ENOMEM; + } + + /* Allocate vf2pf msg */ + p_iov->vf2pf_request = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, + sizeof(union vfpf_tlvs), + &p_iov->vf2pf_request_phys, + GFP_KERNEL); + if (!p_iov->vf2pf_request) { + DP_NOTICE(p_hwfn, + "Failed to allocate `vf2pf_request' DMA memory\n"); + goto free_p_iov; + } + + p_iov->pf2vf_reply = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, + sizeof(union pfvf_tlvs), + &p_iov->pf2vf_reply_phys, + GFP_KERNEL); + if (!p_iov->pf2vf_reply) { + DP_NOTICE(p_hwfn, + "Failed to allocate `pf2vf_reply' DMA memory\n"); + goto free_vf2pf_request; + } + + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "VF's Request mailbox [%p virt 0x%llx phys], Response mailbox [%p virt 0x%llx phys]\n", + p_iov->vf2pf_request, + (u64) p_iov->vf2pf_request_phys, + p_iov->pf2vf_reply, (u64)p_iov->pf2vf_reply_phys); + + /* Allocate Bulletin board */ + p_iov->bulletin.size = sizeof(struct qed_bulletin_content); + p_iov->bulletin.p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, + p_iov->bulletin.size, + &p_iov->bulletin.phys, + GFP_KERNEL); + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "VF's bulletin Board [%p virt 0x%llx phys 0x%08x bytes]\n", + p_iov->bulletin.p_virt, + (u64)p_iov->bulletin.phys, p_iov->bulletin.size); + + mutex_init(&p_iov->mutex); + + p_hwfn->vf_iov_info = p_iov; + + p_hwfn->hw_info.personality = QED_PCI_ETH; + + return qed_vf_pf_acquire(p_hwfn); + +free_vf2pf_request: + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + sizeof(union vfpf_tlvs), + p_iov->vf2pf_request, p_iov->vf2pf_request_phys); +free_p_iov: + kfree(p_iov); + + return -ENOMEM; +} + +u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id) +{ + struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; + + if (!p_iov) { + DP_NOTICE(p_hwfn, "vf_sriov_info isn't initialized\n"); + return 0; + } + + return p_iov->acquire_resp.resc.hw_sbs[sb_id].hw_sb_id; +} + +void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs) +{ + *num_rxqs = p_hwfn->vf_iov_info->acquire_resp.resc.num_rxqs; +} + +void qed_vf_get_port_mac(struct qed_hwfn *p_hwfn, u8 *port_mac) +{ + memcpy(port_mac, + p_hwfn->vf_iov_info->acquire_resp.pfdev_info.port_mac, ETH_ALEN); +} + +void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn, u8 *num_vlan_filters) +{ + struct qed_vf_iov *p_vf; + + p_vf = p_hwfn->vf_iov_info; + *num_vlan_filters = p_vf->acquire_resp.resc.num_vlan_filters; +} + +void qed_vf_get_fw_version(struct qed_hwfn *p_hwfn, + u16 *fw_major, u16 *fw_minor, + u16 *fw_rev, u16 *fw_eng) +{ + struct pf_vf_pfdev_info *info; + + info = &p_hwfn->vf_iov_info->acquire_resp.pfdev_info; + + *fw_major = info->fw_major; + *fw_minor = info->fw_minor; + *fw_rev = info->fw_rev; + *fw_eng = info->fw_eng; +} diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.h b/drivers/net/ethernet/qlogic/qed/qed_vf.h index f0d8de2be581..de9fe8501d21 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.h +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.h @@ -9,6 +9,22 @@ #ifndef _QED_VF_H #define _QED_VF_H +struct vf_pf_resc_request { + u8 num_rxqs; + u8 num_txqs; + u8 num_sbs; + u8 num_mac_filters; + u8 num_vlan_filters; + u8 num_mc_filters; + u16 padding; +}; + +struct hw_sb_info { + u16 hw_sb_id; + u8 sb_qid; + u8 padding[5]; +}; + enum { PFVF_STATUS_WAITING, PFVF_STATUS_SUCCESS, @@ -52,6 +68,107 @@ struct channel_list_end_tlv { u8 padding[4]; }; +#define VFPF_ACQUIRE_OS_LINUX (0) +#define VFPF_ACQUIRE_OS_WINDOWS (1) +#define VFPF_ACQUIRE_OS_ESX (2) +#define VFPF_ACQUIRE_OS_SOLARIS (3) +#define VFPF_ACQUIRE_OS_LINUX_USERSPACE (4) + +struct vfpf_acquire_tlv { + struct vfpf_first_tlv first_tlv; + + struct vf_pf_vfdev_info { +#define VFPF_ACQUIRE_CAP_OBSOLETE (1 << 0) +#define VFPF_ACQUIRE_CAP_100G (1 << 1) /* VF can support 100g */ + u64 capabilities; + u8 fw_major; + u8 fw_minor; + u8 fw_revision; + u8 fw_engineering; + u32 driver_version; + u16 opaque_fid; /* ME register value */ + u8 os_type; /* VFPF_ACQUIRE_OS_* value */ + u8 padding[5]; + } vfdev_info; + + struct vf_pf_resc_request resc_request; + + u64 bulletin_addr; + u32 bulletin_size; + u32 padding; +}; + +struct pfvf_storm_stats { + u32 address; + u32 len; +}; + +struct pfvf_stats_info { + struct pfvf_storm_stats mstats; + struct pfvf_storm_stats pstats; + struct pfvf_storm_stats tstats; + struct pfvf_storm_stats ustats; +}; + +struct pfvf_acquire_resp_tlv { + struct pfvf_tlv hdr; + + struct pf_vf_pfdev_info { + u32 chip_num; + u32 mfw_ver; + + u16 fw_major; + u16 fw_minor; + u16 fw_rev; + u16 fw_eng; + + u64 capabilities; +#define PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED BIT(0) +#define PFVF_ACQUIRE_CAP_100G BIT(1) /* If set, 100g PF */ +/* There are old PF versions where the PF might mistakenly override the sanity + * mechanism [version-based] and allow a VF that can't be supported to pass + * the acquisition phase. + * To overcome this, PFs now indicate that they're past that point and the new + * VFs would fail probe on the older PFs that fail to do so. + */ +#define PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE BIT(2) + + u16 db_size; + u8 indices_per_sb; + u8 os_type; + + /* These should match the PF's qed_dev values */ + u16 chip_rev; + u8 dev_type; + + u8 padding; + + struct pfvf_stats_info stats_info; + + u8 port_mac[ETH_ALEN]; + u8 padding2[2]; + } pfdev_info; + + struct pf_vf_resc { +#define PFVF_MAX_QUEUES_PER_VF 16 +#define PFVF_MAX_SBS_PER_VF 16 + struct hw_sb_info hw_sbs[PFVF_MAX_SBS_PER_VF]; + u8 hw_qid[PFVF_MAX_QUEUES_PER_VF]; + u8 cid[PFVF_MAX_QUEUES_PER_VF]; + + u8 num_rxqs; + u8 num_txqs; + u8 num_sbs; + u8 num_mac_filters; + u8 num_vlan_filters; + u8 num_mc_filters; + u8 padding[2]; + } resc; + + u32 bulletin_size; + u32 padding; +}; + #define TLV_BUFFER_SIZE 1024 struct tlv_buffer_size { u8 tlv_buffer[TLV_BUFFER_SIZE]; @@ -59,12 +176,14 @@ struct tlv_buffer_size { union vfpf_tlvs { struct vfpf_first_tlv first_tlv; + struct vfpf_acquire_tlv acquire; struct channel_list_end_tlv list_end; struct tlv_buffer_size tlv_buf_size; }; union pfvf_tlvs { struct pfvf_def_resp_tlv default_resp; + struct pfvf_acquire_resp_tlv acquire_resp; struct tlv_buffer_size tlv_buf_size; }; @@ -86,8 +205,118 @@ struct qed_bulletin { enum { CHANNEL_TLV_NONE, /* ends tlv sequence */ + CHANNEL_TLV_ACQUIRE, CHANNEL_TLV_LIST_END, CHANNEL_TLV_MAX }; +/* This data is held in the qed_hwfn structure for VFs only. */ +struct qed_vf_iov { + union vfpf_tlvs *vf2pf_request; + dma_addr_t vf2pf_request_phys; + union pfvf_tlvs *pf2vf_reply; + dma_addr_t pf2vf_reply_phys; + + /* Should be taken whenever the mailbox buffers are accessed */ + struct mutex mutex; + u8 *offset; + + /* Bulletin Board */ + struct qed_bulletin bulletin; + struct qed_bulletin_content bulletin_shadow; + + /* we set aside a copy of the acquire response */ + struct pfvf_acquire_resp_tlv acquire_resp; +}; + +#ifdef CONFIG_QED_SRIOV +/** + * @brief Get number of Rx queues allocated for VF by qed + * + * @param p_hwfn + * @param num_rxqs - allocated RX queues + */ +void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs); + +/** + * @brief Get port mac address for VF + * + * @param p_hwfn + * @param port_mac - destination location for port mac + */ +void qed_vf_get_port_mac(struct qed_hwfn *p_hwfn, u8 *port_mac); + +/** + * @brief Get number of VLAN filters allocated for VF by qed + * + * @param p_hwfn + * @param num_rxqs - allocated VLAN filters + */ +void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn, + u8 *num_vlan_filters); + +/** + * @brief Set firmware version information in dev_info from VFs acquire response tlv + * + * @param p_hwfn + * @param fw_major + * @param fw_minor + * @param fw_rev + * @param fw_eng + */ +void qed_vf_get_fw_version(struct qed_hwfn *p_hwfn, + u16 *fw_major, u16 *fw_minor, + u16 *fw_rev, u16 *fw_eng); + +/** + * @brief hw preparation for VF + * sends ACQUIRE message + * + * @param p_hwfn + * + * @return int + */ +int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn); + +/** + * @brief qed_vf_get_igu_sb_id - Get the IGU SB ID for a given + * sb_id. For VFs igu sbs don't have to be contiguous + * + * @param p_hwfn + * @param sb_id + * + * @return INLINE u16 + */ +u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id); +#else +static inline void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs) +{ +} + +static inline void qed_vf_get_port_mac(struct qed_hwfn *p_hwfn, u8 *port_mac) +{ +} + +static inline void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn, + u8 *num_vlan_filters) +{ +} + +static inline void qed_vf_get_fw_version(struct qed_hwfn *p_hwfn, + u16 *fw_major, u16 *fw_minor, + u16 *fw_rev, u16 *fw_eng) +{ +} + +static inline int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn) +{ + return -EINVAL; +} + +static inline u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id) +{ + return 0; +} +#endif + #endif diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 075faa52eb48..2d5f2735dc0a 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -2283,8 +2283,9 @@ enum qede_probe_mode { }; static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level, - enum qede_probe_mode mode) + bool is_vf, enum qede_probe_mode mode) { + struct qed_probe_params probe_params; struct qed_slowpath_params params; struct qed_dev_eth_info dev_info; struct qede_dev *edev; @@ -2294,8 +2295,12 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level, if (unlikely(dp_level & QED_LEVEL_INFO)) pr_notice("Starting qede probe\n"); - cdev = qed_ops->common->probe(pdev, QED_PROTOCOL_ETH, - dp_module, dp_level); + memset(&probe_params, 0, sizeof(probe_params)); + probe_params.protocol = QED_PROTOCOL_ETH; + probe_params.dp_module = dp_module; + probe_params.dp_level = dp_level; + probe_params.is_vf = is_vf; + cdev = qed_ops->common->probe(pdev, &probe_params); if (!cdev) { rc = -ENODEV; goto err0; @@ -2365,7 +2370,7 @@ static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id) qede_config_debug(debug, &dp_module, &dp_level); - return __qede_probe(pdev, dp_module, dp_level, + return __qede_probe(pdev, dp_module, dp_level, false, QEDE_PROBE_NORMAL); } diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h index 8914d271ba73..3f14c7efe68f 100644 --- a/include/linux/qed/common_hsi.h +++ b/include/linux/qed/common_hsi.h @@ -285,6 +285,63 @@ #define PXP_ILT_PAGE_SIZE_NUM_BITS_MIN 12 #define PXP_ILT_BLOCK_FACTOR_MULTIPLIER 1024 +#define PXP_VF_BAR0_START_IGU 0 +#define PXP_VF_BAR0_IGU_LENGTH 0x3000 +#define PXP_VF_BAR0_END_IGU (PXP_VF_BAR0_START_IGU + \ + PXP_VF_BAR0_IGU_LENGTH - 1) + +#define PXP_VF_BAR0_START_DQ 0x3000 +#define PXP_VF_BAR0_DQ_LENGTH 0x200 +#define PXP_VF_BAR0_DQ_OPAQUE_OFFSET 0 +#define PXP_VF_BAR0_ME_OPAQUE_ADDRESS (PXP_VF_BAR0_START_DQ + \ + PXP_VF_BAR0_DQ_OPAQUE_OFFSET) +#define PXP_VF_BAR0_ME_CONCRETE_ADDRESS (PXP_VF_BAR0_ME_OPAQUE_ADDRESS \ + + 4) +#define PXP_VF_BAR0_END_DQ (PXP_VF_BAR0_START_DQ + \ + PXP_VF_BAR0_DQ_LENGTH - 1) + +#define PXP_VF_BAR0_START_TSDM_ZONE_B 0x3200 +#define PXP_VF_BAR0_SDM_LENGTH_ZONE_B 0x200 +#define PXP_VF_BAR0_END_TSDM_ZONE_B (PXP_VF_BAR0_START_TSDM_ZONE_B \ + + \ + PXP_VF_BAR0_SDM_LENGTH_ZONE_B \ + - 1) + +#define PXP_VF_BAR0_START_MSDM_ZONE_B 0x3400 +#define PXP_VF_BAR0_END_MSDM_ZONE_B (PXP_VF_BAR0_START_MSDM_ZONE_B \ + + \ + PXP_VF_BAR0_SDM_LENGTH_ZONE_B \ + - 1) + +#define PXP_VF_BAR0_START_USDM_ZONE_B 0x3600 +#define PXP_VF_BAR0_END_USDM_ZONE_B (PXP_VF_BAR0_START_USDM_ZONE_B \ + + \ + PXP_VF_BAR0_SDM_LENGTH_ZONE_B \ + - 1) + +#define PXP_VF_BAR0_START_XSDM_ZONE_B 0x3800 +#define PXP_VF_BAR0_END_XSDM_ZONE_B (PXP_VF_BAR0_START_XSDM_ZONE_B \ + + \ + PXP_VF_BAR0_SDM_LENGTH_ZONE_B \ + - 1) + +#define PXP_VF_BAR0_START_YSDM_ZONE_B 0x3a00 +#define PXP_VF_BAR0_END_YSDM_ZONE_B (PXP_VF_BAR0_START_YSDM_ZONE_B \ + + \ + PXP_VF_BAR0_SDM_LENGTH_ZONE_B \ + - 1) + +#define PXP_VF_BAR0_START_PSDM_ZONE_B 0x3c00 +#define PXP_VF_BAR0_END_PSDM_ZONE_B (PXP_VF_BAR0_START_PSDM_ZONE_B \ + + \ + PXP_VF_BAR0_SDM_LENGTH_ZONE_B \ + - 1) + +#define PXP_VF_BAR0_START_SDM_ZONE_A 0x4000 +#define PXP_VF_BAR0_END_SDM_ZONE_A 0x10000 + +#define PXP_VF_BAR0_GRC_WINDOW_LENGTH 32 + /* ILT Records */ #define PXP_NUM_ILT_RECORDS_BB 7600 #define PXP_NUM_ILT_RECORDS_K2 11000 diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index d72c832a9397..76a6f168a190 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -140,6 +140,13 @@ struct qed_link_output { u32 pause_config; }; +struct qed_probe_params { + enum qed_protocol protocol; + u32 dp_module; + u8 dp_level; + bool is_vf; +}; + #define QED_DRV_VER_STR_SIZE 12 struct qed_slowpath_params { u32 int_mode; @@ -207,8 +214,7 @@ struct qed_common_ops { struct qed_selftest_ops *selftest; struct qed_dev* (*probe)(struct pci_dev *dev, - enum qed_protocol protocol, - u32 dp_module, u8 dp_level); + struct qed_probe_params *params); void (*remove)(struct qed_dev *cdev); From 0b55e27d563f493665693b494735574e68c3c5b9 Mon Sep 17 00:00:00 2001 From: Yuval Mintz Date: Wed, 11 May 2016 16:36:15 +0300 Subject: [PATCH 1504/1649] qed: IOV configure and FLR While previous patches have already added the necessary logic to probe VFs as well as enabling them in the HW, this patch adds the ability to support VF FLR & SRIOV disable. It then wraps both flows together into the first IOV callback to be provided to the protocol driver - `configure'. This would later to be used to enable and disable SRIOV in the adapter. Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_dev.c | 17 +- drivers/net/ethernet/qlogic/qed/qed_dev_api.h | 4 +- drivers/net/ethernet/qlogic/qed/qed_hsi.h | 11 +- drivers/net/ethernet/qlogic/qed/qed_l2.c | 7 + drivers/net/ethernet/qlogic/qed/qed_main.c | 1 + drivers/net/ethernet/qlogic/qed/qed_mcp.c | 72 ++ drivers/net/ethernet/qlogic/qed/qed_mcp.h | 12 + .../net/ethernet/qlogic/qed/qed_reg_addr.h | 10 + drivers/net/ethernet/qlogic/qed/qed_sp.h | 1 + drivers/net/ethernet/qlogic/qed/qed_sriov.c | 660 ++++++++++++++++++ drivers/net/ethernet/qlogic/qed/qed_sriov.h | 33 +- drivers/net/ethernet/qlogic/qed/qed_vf.c | 97 +++ drivers/net/ethernet/qlogic/qed/qed_vf.h | 45 ++ include/linux/qed/qed_eth_if.h | 4 + include/linux/qed/qed_iov_if.h | 20 + 15 files changed, 983 insertions(+), 11 deletions(-) create mode 100644 include/linux/qed/qed_iov_if.h diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 362e8db2b374..78e25cf6836f 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -31,6 +31,7 @@ #include "qed_reg_addr.h" #include "qed_sp.h" #include "qed_sriov.h" +#include "qed_vf.h" /* API common to all protocols */ enum BAR_ID { @@ -420,8 +421,7 @@ void qed_resc_setup(struct qed_dev *cdev) #define FINAL_CLEANUP_POLL_CNT (100) #define FINAL_CLEANUP_POLL_TIME (10) int qed_final_cleanup(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u16 id) + struct qed_ptt *p_ptt, u16 id, bool is_vf) { u32 command = 0, addr, count = FINAL_CLEANUP_POLL_CNT; int rc = -EBUSY; @@ -429,6 +429,9 @@ int qed_final_cleanup(struct qed_hwfn *p_hwfn, addr = GTT_BAR0_MAP_REG_USDM_RAM + USTORM_FLR_FINAL_ACK_OFFSET(p_hwfn->rel_pf_id); + if (is_vf) + id += 0x10; + command |= X_FINAL_CLEANUP_AGG_INT << SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT; command |= 1 << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT; @@ -663,7 +666,7 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn, STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_ROCE_RT_OFFSET, 0); /* Cleanup chip from previous driver if such remains exist */ - rc = qed_final_cleanup(p_hwfn, p_ptt, rel_pf_id); + rc = qed_final_cleanup(p_hwfn, p_ptt, rel_pf_id, false); if (rc != 0) return rc; @@ -880,7 +883,7 @@ int qed_hw_stop(struct qed_dev *cdev) DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Stopping hw/fw\n"); if (IS_VF(cdev)) { - /* To be implemented in a later patch */ + qed_vf_pf_int_cleanup(p_hwfn); continue; } @@ -989,7 +992,9 @@ int qed_hw_reset(struct qed_dev *cdev) struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; if (IS_VF(cdev)) { - /* Will be implemented in a later patch */ + rc = qed_vf_pf_reset(p_hwfn); + if (rc) + return rc; continue; } @@ -1590,7 +1595,7 @@ void qed_hw_remove(struct qed_dev *cdev) struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; if (IS_VF(cdev)) { - /* Will be implemented in a later patch */ + qed_vf_pf_release(p_hwfn); continue; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h index f567371fe304..dde364d6f502 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h +++ b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h @@ -303,11 +303,11 @@ int qed_fw_rss_eng(struct qed_hwfn *p_hwfn, * @param p_hwfn * @param p_ptt * @param id - For PF, engine-relative. For VF, PF-relative. + * @param is_vf - true iff cleanup is made for a VF. * * @return int */ int qed_final_cleanup(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u16 id); + struct qed_ptt *p_ptt, u16 id, bool is_vf); #endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index c511106870d0..82b7727d090b 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -30,7 +30,7 @@ enum common_event_opcode { COMMON_EVENT_PF_START, COMMON_EVENT_PF_STOP, COMMON_EVENT_VF_START, - COMMON_EVENT_RESERVED2, + COMMON_EVENT_VF_STOP, COMMON_EVENT_VF_PF_CHANNEL, COMMON_EVENT_RESERVED4, COMMON_EVENT_RESERVED5, @@ -45,7 +45,7 @@ enum common_ramrod_cmd_id { COMMON_RAMROD_PF_START /* PF Function Start Ramrod */, COMMON_RAMROD_PF_STOP /* PF Function Stop Ramrod */, COMMON_RAMROD_VF_START, - COMMON_RAMROD_RESERVED2, + COMMON_RAMROD_VF_STOP, COMMON_RAMROD_PF_UPDATE, COMMON_RAMROD_EMPTY, MAX_COMMON_RAMROD_CMD_ID @@ -741,6 +741,13 @@ struct vf_start_ramrod_data { u8 reserved[3]; }; +struct vf_stop_ramrod_data { + u8 vf_id; + u8 reserved0; + __le16 reserved1; + __le32 reserved2; +}; + struct atten_status_block { __le32 atten_bits; __le32 atten_ack; diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index 8bcbf92b776f..5978bb57f883 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -2066,8 +2066,15 @@ static int qed_fp_cqe_completion(struct qed_dev *dev, cqe); } +#ifdef CONFIG_QED_SRIOV +extern const struct qed_iov_hv_ops qed_iov_ops_pass; +#endif + static const struct qed_eth_ops qed_eth_ops_pass = { .common = &qed_common_ops_pass, +#ifdef CONFIG_QED_SRIOV + .iov = &qed_iov_ops_pass, +#endif .fill_dev_info = &qed_fill_eth_dev_info, .register_ops = &qed_register_eth_ops, .vport_start = &qed_start_vport, diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 898347bd2db7..e98610e5bf70 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -897,6 +897,7 @@ static int qed_slowpath_stop(struct qed_dev *cdev) if (IS_PF(cdev)) { qed_free_stream_mem(cdev); + qed_sriov_disable(cdev, true); qed_nic_stop(cdev); qed_slowpath_irq_free(cdev); diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index 83175007f616..2be943b91916 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -442,6 +442,75 @@ int qed_mcp_load_req(struct qed_hwfn *p_hwfn, return 0; } +static void qed_mcp_handle_vf_flr(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt) +{ + u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, + PUBLIC_PATH); + u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr); + u32 path_addr = SECTION_ADDR(mfw_path_offsize, + QED_PATH_ID(p_hwfn)); + u32 disabled_vfs[VF_MAX_STATIC / 32]; + int i; + + DP_VERBOSE(p_hwfn, + QED_MSG_SP, + "Reading Disabled VF information from [offset %08x], path_addr %08x\n", + mfw_path_offsize, path_addr); + + for (i = 0; i < (VF_MAX_STATIC / 32); i++) { + disabled_vfs[i] = qed_rd(p_hwfn, p_ptt, + path_addr + + offsetof(struct public_path, + mcp_vf_disabled) + + sizeof(u32) * i); + DP_VERBOSE(p_hwfn, (QED_MSG_SP | QED_MSG_IOV), + "FLR-ed VFs [%08x,...,%08x] - %08x\n", + i * 32, (i + 1) * 32 - 1, disabled_vfs[i]); + } + + if (qed_iov_mark_vf_flr(p_hwfn, disabled_vfs)) + qed_schedule_iov(p_hwfn, QED_IOV_WQ_FLR_FLAG); +} + +int qed_mcp_ack_vf_flr(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u32 *vfs_to_ack) +{ + u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, + PUBLIC_FUNC); + u32 mfw_func_offsize = qed_rd(p_hwfn, p_ptt, addr); + u32 func_addr = SECTION_ADDR(mfw_func_offsize, + MCP_PF_ID(p_hwfn)); + struct qed_mcp_mb_params mb_params; + union drv_union_data union_data; + int rc; + int i; + + for (i = 0; i < (VF_MAX_STATIC / 32); i++) + DP_VERBOSE(p_hwfn, (QED_MSG_SP | QED_MSG_IOV), + "Acking VFs [%08x,...,%08x] - %08x\n", + i * 32, (i + 1) * 32 - 1, vfs_to_ack[i]); + + memset(&mb_params, 0, sizeof(mb_params)); + mb_params.cmd = DRV_MSG_CODE_VF_DISABLED_DONE; + memcpy(&union_data.ack_vf_disabled, vfs_to_ack, VF_MAX_STATIC / 8); + mb_params.p_data_src = &union_data; + rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); + if (rc) { + DP_NOTICE(p_hwfn, "Failed to pass ACK for VF flr to MFW\n"); + return -EBUSY; + } + + /* Clear the ACK bits */ + for (i = 0; i < (VF_MAX_STATIC / 32); i++) + qed_wr(p_hwfn, p_ptt, + func_addr + + offsetof(struct public_func, drv_ack_vf_disabled) + + i * sizeof(u32), 0); + + return rc; +} + static void qed_mcp_handle_transceiver_change(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { @@ -753,6 +822,9 @@ int qed_mcp_handle_events(struct qed_hwfn *p_hwfn, case MFW_DRV_MSG_LINK_CHANGE: qed_mcp_handle_link_change(p_hwfn, p_ptt, false); break; + case MFW_DRV_MSG_VF_DISABLED: + qed_mcp_handle_vf_flr(p_hwfn, p_ptt); + break; case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE: qed_mcp_handle_transceiver_change(p_hwfn, p_ptt); break; diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h index e3d5cdfe8e8d..6dd59eb7f4c6 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h @@ -392,6 +392,18 @@ int qed_mcp_load_req(struct qed_hwfn *p_hwfn, void qed_mcp_read_mb(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); +/** + * @brief Ack to mfw that driver finished FLR process for VFs + * + * @param p_hwfn + * @param p_ptt + * @param vfs_to_ack - bit mask of all engine VFs for which the PF acks. + * + * @param return int - 0 upon success. + */ +int qed_mcp_ack_vf_flr(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u32 *vfs_to_ack); + /** * @brief - calls during init to read shmem of all function-related info. * diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h index a508b6b7f1d4..80a621754f13 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h +++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h @@ -41,6 +41,8 @@ 0x2aa16cUL #define PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR \ 0x2aa118UL +#define PSWHST_REG_ZONE_PERMISSION_TABLE \ + 0x2a0800UL #define BAR0_MAP_REG_MSDM_RAM \ 0x1d00000UL #define BAR0_MAP_REG_USDM_RAM \ @@ -79,6 +81,8 @@ 0x2f2eb0UL #define DORQ_REG_PF_DB_ENABLE \ 0x100508UL +#define DORQ_REG_VF_USAGE_CNT \ + 0x1009c4UL #define QM_REG_PF_EN \ 0x2f2ea4UL #define TCFC_REG_STRONG_ENABLE_PF \ @@ -167,6 +171,10 @@ 0x040200UL #define PBF_REG_INIT \ 0xd80000UL +#define PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0 \ + 0xd806c8UL +#define PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 \ + 0xd806ccUL #define PTU_REG_ATC_INIT_ARRAY \ 0x560000UL #define PCM_REG_INIT \ @@ -391,6 +399,8 @@ 0x1d0000UL #define IGU_REG_PF_CONFIGURATION \ 0x180800UL +#define IGU_REG_VF_CONFIGURATION \ + 0x180804UL #define MISC_REG_AEU_ENABLE1_IGU_OUT_0 \ 0x00849cUL #define MISC_REG_AEU_AFTER_INVERT_1_IGU \ diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h index dde69090379f..c2999cb5d1e2 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h @@ -64,6 +64,7 @@ union ramrod_data { struct vport_filter_update_ramrod_data vport_filter_update; struct vf_start_ramrod_data vf_start; + struct vf_stop_ramrod_data vf_stop; }; #define EQ_MAX_CREDIT 0xffffffff diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index 699d96fb87f0..750166db57cd 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -6,6 +6,7 @@ * this source tree. */ +#include #include "qed_cxt.h" #include "qed_hsi.h" #include "qed_hw.h" @@ -48,6 +49,33 @@ static int qed_sp_vf_start(struct qed_hwfn *p_hwfn, return qed_spq_post(p_hwfn, p_ent, NULL); } +static int qed_sp_vf_stop(struct qed_hwfn *p_hwfn, + u32 concrete_vfid, u16 opaque_vfid) +{ + struct vf_stop_ramrod_data *p_ramrod = NULL; + struct qed_spq_entry *p_ent = NULL; + struct qed_sp_init_data init_data; + int rc = -EINVAL; + + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = qed_spq_get_cid(p_hwfn); + init_data.opaque_fid = opaque_vfid; + init_data.comp_mode = QED_SPQ_MODE_EBLOCK; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + COMMON_RAMROD_VF_STOP, + PROTOCOLID_COMMON, &init_data); + if (rc) + return rc; + + p_ramrod = &p_ent->ramrod.vf_stop; + + p_ramrod->vf_id = GET_FIELD(concrete_vfid, PXP_CONCRETE_FID_VFID); + + return qed_spq_post(p_hwfn, p_ent, NULL); +} + bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn, int rel_vf_id, bool b_enabled_only) { @@ -422,6 +450,34 @@ static bool qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn, int vfid) return true; } +static void qed_iov_set_vf_to_disable(struct qed_dev *cdev, + u16 rel_vf_id, u8 to_disable) +{ + struct qed_vf_info *vf; + int i; + + for_each_hwfn(cdev, i) { + struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + + vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false); + if (!vf) + continue; + + vf->to_disable = to_disable; + } +} + +void qed_iov_set_vfs_to_disable(struct qed_dev *cdev, u8 to_disable) +{ + u16 i; + + if (!IS_QED_SRIOV(cdev)) + return; + + for (i = 0; i < cdev->p_iov_info->total_vfs; i++) + qed_iov_set_vf_to_disable(cdev, i, to_disable); +} + static void qed_iov_vf_pglue_clear_err(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u8 abs_vfid) { @@ -430,6 +486,27 @@ static void qed_iov_vf_pglue_clear_err(struct qed_hwfn *p_hwfn, 1 << (abs_vfid & 0x1f)); } +static void qed_iov_vf_igu_set_int(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_vf_info *vf, bool enable) +{ + u32 igu_vf_conf; + + qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid); + + igu_vf_conf = qed_rd(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION); + + if (enable) + igu_vf_conf |= IGU_VF_CONF_MSI_MSIX_EN; + else + igu_vf_conf &= ~IGU_VF_CONF_MSI_MSIX_EN; + + qed_wr(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION, igu_vf_conf); + + /* unpretend */ + qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid); +} + static int qed_iov_enable_vf_access(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_vf_info *vf) @@ -437,6 +514,9 @@ static int qed_iov_enable_vf_access(struct qed_hwfn *p_hwfn, u32 igu_vf_conf = IGU_VF_CONF_FUNC_EN; int rc; + if (vf->to_disable) + return 0; + DP_VERBOSE(p_hwfn, QED_MSG_IOV, "Enable internal access for vf %x [abs %x]\n", @@ -475,6 +555,36 @@ static int qed_iov_enable_vf_access(struct qed_hwfn *p_hwfn, return rc; } +/** + * @brief qed_iov_config_perm_table - configure the permission + * zone table. + * In E4, queue zone permission table size is 320x9. There + * are 320 VF queues for single engine device (256 for dual + * engine device), and each entry has the following format: + * {Valid, VF[7:0]} + * @param p_hwfn + * @param p_ptt + * @param vf + * @param enable + */ +static void qed_iov_config_perm_table(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_vf_info *vf, u8 enable) +{ + u32 reg_addr, val; + u16 qzone_id = 0; + int qid; + + for (qid = 0; qid < vf->num_rxqs; qid++) { + qed_fw_l2_queue(p_hwfn, vf->vf_queues[qid].fw_rx_qid, + &qzone_id); + + reg_addr = PSWHST_REG_ZONE_PERMISSION_TABLE + qzone_id * 4; + val = enable ? (vf->abs_vf_id | (1 << 8)) : 0; + qed_wr(p_hwfn, p_ptt, reg_addr, val); + } +} + static u8 qed_iov_alloc_vf_igu_sbs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_vf_info *vf, u16 num_rx_queues) @@ -525,6 +635,32 @@ static u8 qed_iov_alloc_vf_igu_sbs(struct qed_hwfn *p_hwfn, return vf->num_sbs; } +static void qed_iov_free_vf_igu_sbs(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_vf_info *vf) +{ + struct qed_igu_info *p_info = p_hwfn->hw_info.p_igu_info; + int idx, igu_id; + u32 addr, val; + + /* Invalidate igu CAM lines and mark them as free */ + for (idx = 0; idx < vf->num_sbs; idx++) { + igu_id = vf->igu_sbs[idx]; + addr = IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id; + + val = qed_rd(p_hwfn, p_ptt, addr); + SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0); + qed_wr(p_hwfn, p_ptt, addr, val); + + p_info->igu_map.igu_blocks[igu_id].status |= + QED_IGU_STATUS_FREE; + + p_hwfn->hw_info.p_igu_info->free_blks++; + } + + vf->num_sbs = 0; +} + static int qed_iov_init_hw_for_vf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 rel_vf_id, u16 num_rx_queues) @@ -598,6 +734,54 @@ static int qed_iov_init_hw_for_vf(struct qed_hwfn *p_hwfn, return rc; } +static int qed_iov_release_hw_for_vf(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u16 rel_vf_id) +{ + struct qed_vf_info *vf = NULL; + int rc = 0; + + vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true); + if (!vf) { + DP_ERR(p_hwfn, "qed_iov_release_hw_for_vf : vf is NULL\n"); + return -EINVAL; + } + + if (vf->state != VF_STOPPED) { + /* Stopping the VF */ + rc = qed_sp_vf_stop(p_hwfn, vf->concrete_fid, vf->opaque_fid); + + if (rc != 0) { + DP_ERR(p_hwfn, "qed_sp_vf_stop returned error %d\n", + rc); + return rc; + } + + vf->state = VF_STOPPED; + } + + /* disablng interrupts and resetting permission table was done during + * vf-close, however, we could get here without going through vf_close + */ + /* Disable Interrupts for VF */ + qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0); + + /* Reset Permission table */ + qed_iov_config_perm_table(p_hwfn, p_ptt, vf, 0); + + vf->num_rxqs = 0; + vf->num_txqs = 0; + qed_iov_free_vf_igu_sbs(p_hwfn, p_ptt, vf); + + if (vf->b_init) { + vf->b_init = false; + + if (IS_LEAD_HWFN(p_hwfn)) + p_hwfn->cdev->p_iov_info->num_vfs--; + } + + return 0; +} + static bool qed_iov_tlv_supported(u16 tlvtype) { return CHANNEL_TLV_NONE < tlvtype && tlvtype < CHANNEL_TLV_MAX; @@ -702,6 +886,51 @@ static void qed_iov_prepare_resp(struct qed_hwfn *p_hwfn, qed_iov_send_response(p_hwfn, p_ptt, vf_info, length, status); } +struct qed_public_vf_info *qed_iov_get_public_vf_info(struct qed_hwfn *p_hwfn, + u16 relative_vf_id, + bool b_enabled_only) +{ + struct qed_vf_info *vf = NULL; + + vf = qed_iov_get_vf_info(p_hwfn, relative_vf_id, b_enabled_only); + if (!vf) + return NULL; + + return &vf->p_vf_info; +} + +void qed_iov_clean_vf(struct qed_hwfn *p_hwfn, u8 vfid) +{ + struct qed_public_vf_info *vf_info; + + vf_info = qed_iov_get_public_vf_info(p_hwfn, vfid, false); + + if (!vf_info) + return; + + /* Clear the VF mac */ + memset(vf_info->mac, 0, ETH_ALEN); +} + +static void qed_iov_vf_cleanup(struct qed_hwfn *p_hwfn, + struct qed_vf_info *p_vf) +{ + u32 i; + + p_vf->vf_bulletin = 0; + p_vf->num_mac_filters = 0; + p_vf->num_vlan_filters = 0; + + /* If VF previously requested less resources, go back to default */ + p_vf->num_rxqs = p_vf->num_sbs; + p_vf->num_txqs = p_vf->num_sbs; + + for (i = 0; i < QED_MAX_VF_CHAINS_PER_PF; i++) + p_vf->vf_queues[i].rxq_active = 0; + + qed_iov_clean_vf(p_hwfn, p_vf->relative_vf_id); +} + static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_vf_info *vf) @@ -845,6 +1074,271 @@ out: sizeof(struct pfvf_acquire_resp_tlv), vfpf_status); } +static void qed_iov_vf_mbx_int_cleanup(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_vf_info *vf) +{ + int i; + + /* Reset the SBs */ + for (i = 0; i < vf->num_sbs; i++) + qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, + vf->igu_sbs[i], + vf->opaque_fid, false); + + qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_INT_CLEANUP, + sizeof(struct pfvf_def_resp_tlv), + PFVF_STATUS_SUCCESS); +} + +static void qed_iov_vf_mbx_close(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, struct qed_vf_info *vf) +{ + u16 length = sizeof(struct pfvf_def_resp_tlv); + u8 status = PFVF_STATUS_SUCCESS; + + /* Disable Interrupts for VF */ + qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0); + + /* Reset Permission table */ + qed_iov_config_perm_table(p_hwfn, p_ptt, vf, 0); + + qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_CLOSE, + length, status); +} + +static void qed_iov_vf_mbx_release(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_vf_info *p_vf) +{ + u16 length = sizeof(struct pfvf_def_resp_tlv); + + qed_iov_vf_cleanup(p_hwfn, p_vf); + + qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf, CHANNEL_TLV_RELEASE, + length, PFVF_STATUS_SUCCESS); +} + +static int +qed_iov_vf_flr_poll_dorq(struct qed_hwfn *p_hwfn, + struct qed_vf_info *p_vf, struct qed_ptt *p_ptt) +{ + int cnt; + u32 val; + + qed_fid_pretend(p_hwfn, p_ptt, (u16) p_vf->concrete_fid); + + for (cnt = 0; cnt < 50; cnt++) { + val = qed_rd(p_hwfn, p_ptt, DORQ_REG_VF_USAGE_CNT); + if (!val) + break; + msleep(20); + } + qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid); + + if (cnt == 50) { + DP_ERR(p_hwfn, + "VF[%d] - dorq failed to cleanup [usage 0x%08x]\n", + p_vf->abs_vf_id, val); + return -EBUSY; + } + + return 0; +} + +static int +qed_iov_vf_flr_poll_pbf(struct qed_hwfn *p_hwfn, + struct qed_vf_info *p_vf, struct qed_ptt *p_ptt) +{ + u32 cons[MAX_NUM_VOQS], distance[MAX_NUM_VOQS]; + int i, cnt; + + /* Read initial consumers & producers */ + for (i = 0; i < MAX_NUM_VOQS; i++) { + u32 prod; + + cons[i] = qed_rd(p_hwfn, p_ptt, + PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 + + i * 0x40); + prod = qed_rd(p_hwfn, p_ptt, + PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0 + + i * 0x40); + distance[i] = prod - cons[i]; + } + + /* Wait for consumers to pass the producers */ + i = 0; + for (cnt = 0; cnt < 50; cnt++) { + for (; i < MAX_NUM_VOQS; i++) { + u32 tmp; + + tmp = qed_rd(p_hwfn, p_ptt, + PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 + + i * 0x40); + if (distance[i] > tmp - cons[i]) + break; + } + + if (i == MAX_NUM_VOQS) + break; + + msleep(20); + } + + if (cnt == 50) { + DP_ERR(p_hwfn, "VF[%d] - pbf polling failed on VOQ %d\n", + p_vf->abs_vf_id, i); + return -EBUSY; + } + + return 0; +} + +static int qed_iov_vf_flr_poll(struct qed_hwfn *p_hwfn, + struct qed_vf_info *p_vf, struct qed_ptt *p_ptt) +{ + int rc; + + rc = qed_iov_vf_flr_poll_dorq(p_hwfn, p_vf, p_ptt); + if (rc) + return rc; + + rc = qed_iov_vf_flr_poll_pbf(p_hwfn, p_vf, p_ptt); + if (rc) + return rc; + + return 0; +} + +static int +qed_iov_execute_vf_flr_cleanup(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u16 rel_vf_id, u32 *ack_vfs) +{ + struct qed_vf_info *p_vf; + int rc = 0; + + p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false); + if (!p_vf) + return 0; + + if (p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] & + (1ULL << (rel_vf_id % 64))) { + u16 vfid = p_vf->abs_vf_id; + + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "VF[%d] - Handling FLR\n", vfid); + + qed_iov_vf_cleanup(p_hwfn, p_vf); + + /* If VF isn't active, no need for anything but SW */ + if (!p_vf->b_init) + goto cleanup; + + rc = qed_iov_vf_flr_poll(p_hwfn, p_vf, p_ptt); + if (rc) + goto cleanup; + + rc = qed_final_cleanup(p_hwfn, p_ptt, vfid, true); + if (rc) { + DP_ERR(p_hwfn, "Failed handle FLR of VF[%d]\n", vfid); + return rc; + } + + /* VF_STOPPED has to be set only after final cleanup + * but prior to re-enabling the VF. + */ + p_vf->state = VF_STOPPED; + + rc = qed_iov_enable_vf_access(p_hwfn, p_ptt, p_vf); + if (rc) { + DP_ERR(p_hwfn, "Failed to re-enable VF[%d] acces\n", + vfid); + return rc; + } +cleanup: + /* Mark VF for ack and clean pending state */ + if (p_vf->state == VF_RESET) + p_vf->state = VF_STOPPED; + ack_vfs[vfid / 32] |= (1 << (vfid % 32)); + p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &= + ~(1ULL << (rel_vf_id % 64)); + p_hwfn->pf_iov_info->pending_events[rel_vf_id / 64] &= + ~(1ULL << (rel_vf_id % 64)); + } + + return rc; +} + +int qed_iov_vf_flr_cleanup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + u32 ack_vfs[VF_MAX_STATIC / 32]; + int rc = 0; + u16 i; + + memset(ack_vfs, 0, sizeof(u32) * (VF_MAX_STATIC / 32)); + + /* Since BRB <-> PRS interface can't be tested as part of the flr + * polling due to HW limitations, simply sleep a bit. And since + * there's no need to wait per-vf, do it before looping. + */ + msleep(100); + + for (i = 0; i < p_hwfn->cdev->p_iov_info->total_vfs; i++) + qed_iov_execute_vf_flr_cleanup(p_hwfn, p_ptt, i, ack_vfs); + + rc = qed_mcp_ack_vf_flr(p_hwfn, p_ptt, ack_vfs); + return rc; +} + +int qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *p_disabled_vfs) +{ + u16 i, found = 0; + + DP_VERBOSE(p_hwfn, QED_MSG_IOV, "Marking FLR-ed VFs\n"); + for (i = 0; i < (VF_MAX_STATIC / 32); i++) + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "[%08x,...,%08x]: %08x\n", + i * 32, (i + 1) * 32 - 1, p_disabled_vfs[i]); + + if (!p_hwfn->cdev->p_iov_info) { + DP_NOTICE(p_hwfn, "VF flr but no IOV\n"); + return 0; + } + + /* Mark VFs */ + for (i = 0; i < p_hwfn->cdev->p_iov_info->total_vfs; i++) { + struct qed_vf_info *p_vf; + u8 vfid; + + p_vf = qed_iov_get_vf_info(p_hwfn, i, false); + if (!p_vf) + continue; + + vfid = p_vf->abs_vf_id; + if ((1 << (vfid % 32)) & p_disabled_vfs[vfid / 32]) { + u64 *p_flr = p_hwfn->pf_iov_info->pending_flr; + u16 rel_vf_id = p_vf->relative_vf_id; + + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "VF[%d] [rel %d] got FLR-ed\n", + vfid, rel_vf_id); + + p_vf->state = VF_RESET; + + /* No need to lock here, since pending_flr should + * only change here and before ACKing MFw. Since + * MFW will not trigger an additional attention for + * VF flr until ACKs, we're safe. + */ + p_flr[rel_vf_id / 64] |= 1ULL << (rel_vf_id % 64); + found = 1; + } + } + + return found; +} + static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, int vfid) { @@ -871,6 +1365,15 @@ static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn, case CHANNEL_TLV_ACQUIRE: qed_iov_vf_mbx_acquire(p_hwfn, p_ptt, p_vf); break; + case CHANNEL_TLV_CLOSE: + qed_iov_vf_mbx_close(p_hwfn, p_ptt, p_vf); + break; + case CHANNEL_TLV_INT_CLEANUP: + qed_iov_vf_mbx_int_cleanup(p_hwfn, p_ptt, p_vf); + break; + case CHANNEL_TLV_RELEASE: + qed_iov_vf_mbx_release(p_hwfn, p_ptt, p_vf); + break; } } else { /* unknown TLV - this may belong to a VF driver from the future @@ -992,6 +1495,17 @@ static int qed_iov_copy_vf_msg(struct qed_hwfn *p_hwfn, struct qed_ptt *ptt, return 0; } +bool qed_iov_is_vf_stopped(struct qed_hwfn *p_hwfn, int vfid) +{ + struct qed_vf_info *p_vf_info; + + p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); + if (!p_vf_info) + return true; + + return p_vf_info->state == VF_STOPPED; +} + /** * qed_schedule_iov - schedules IOV task for VF and PF * @hwfn: hardware function pointer @@ -1015,6 +1529,132 @@ void qed_vf_start_iov_wq(struct qed_dev *cdev) &cdev->hwfns[i].iov_task, 0); } +int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled) +{ + int i, j; + + for_each_hwfn(cdev, i) + if (cdev->hwfns[i].iov_wq) + flush_workqueue(cdev->hwfns[i].iov_wq); + + /* Mark VFs for disablement */ + qed_iov_set_vfs_to_disable(cdev, true); + + if (cdev->p_iov_info && cdev->p_iov_info->num_vfs && pci_enabled) + pci_disable_sriov(cdev->pdev); + + for_each_hwfn(cdev, i) { + struct qed_hwfn *hwfn = &cdev->hwfns[i]; + struct qed_ptt *ptt = qed_ptt_acquire(hwfn); + + /* Failure to acquire the ptt in 100g creates an odd error + * where the first engine has already relased IOV. + */ + if (!ptt) { + DP_ERR(hwfn, "Failed to acquire ptt\n"); + return -EBUSY; + } + + qed_for_each_vf(hwfn, j) { + int k; + + if (!qed_iov_is_valid_vfid(hwfn, j, true)) + continue; + + /* Wait until VF is disabled before releasing */ + for (k = 0; k < 100; k++) { + if (!qed_iov_is_vf_stopped(hwfn, j)) + msleep(20); + else + break; + } + + if (k < 100) + qed_iov_release_hw_for_vf(&cdev->hwfns[i], + ptt, j); + else + DP_ERR(hwfn, + "Timeout waiting for VF's FLR to end\n"); + } + + qed_ptt_release(hwfn, ptt); + } + + qed_iov_set_vfs_to_disable(cdev, false); + + return 0; +} + +static int qed_sriov_enable(struct qed_dev *cdev, int num) +{ + struct qed_sb_cnt_info sb_cnt_info; + int i, j, rc; + + if (num >= RESC_NUM(&cdev->hwfns[0], QED_VPORT)) { + DP_NOTICE(cdev, "Can start at most %d VFs\n", + RESC_NUM(&cdev->hwfns[0], QED_VPORT) - 1); + return -EINVAL; + } + + /* Initialize HW for VF access */ + for_each_hwfn(cdev, j) { + struct qed_hwfn *hwfn = &cdev->hwfns[j]; + struct qed_ptt *ptt = qed_ptt_acquire(hwfn); + int num_sbs = 0, limit = 16; + + if (!ptt) { + DP_ERR(hwfn, "Failed to acquire ptt\n"); + rc = -EBUSY; + goto err; + } + + memset(&sb_cnt_info, 0, sizeof(sb_cnt_info)); + qed_int_get_num_sbs(hwfn, &sb_cnt_info); + num_sbs = min_t(int, sb_cnt_info.sb_free_blk, limit); + + for (i = 0; i < num; i++) { + if (!qed_iov_is_valid_vfid(hwfn, i, false)) + continue; + + rc = qed_iov_init_hw_for_vf(hwfn, + ptt, i, num_sbs / num); + if (rc) { + DP_ERR(cdev, "Failed to enable VF[%d]\n", i); + qed_ptt_release(hwfn, ptt); + goto err; + } + } + + qed_ptt_release(hwfn, ptt); + } + + /* Enable SRIOV PCIe functions */ + rc = pci_enable_sriov(cdev->pdev, num); + if (rc) { + DP_ERR(cdev, "Failed to enable sriov [%d]\n", rc); + goto err; + } + + return num; + +err: + qed_sriov_disable(cdev, false); + return rc; +} + +static int qed_sriov_configure(struct qed_dev *cdev, int num_vfs_param) +{ + if (!IS_QED_SRIOV(cdev)) { + DP_VERBOSE(cdev, QED_MSG_IOV, "SR-IOV is not supported\n"); + return -EOPNOTSUPP; + } + + if (num_vfs_param) + return qed_sriov_enable(cdev, num_vfs_param); + else + return qed_sriov_disable(cdev, true); +} + static void qed_handle_vf_msg(struct qed_hwfn *hwfn) { u64 events[QED_VF_ARRAY_LENGTH]; @@ -1058,10 +1698,26 @@ void qed_iov_pf_task(struct work_struct *work) { struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn, iov_task.work); + int rc; if (test_and_clear_bit(QED_IOV_WQ_STOP_WQ_FLAG, &hwfn->iov_task_flags)) return; + if (test_and_clear_bit(QED_IOV_WQ_FLR_FLAG, &hwfn->iov_task_flags)) { + struct qed_ptt *ptt = qed_ptt_acquire(hwfn); + + if (!ptt) { + qed_schedule_iov(hwfn, QED_IOV_WQ_FLR_FLAG); + return; + } + + rc = qed_iov_vf_flr_cleanup(hwfn, ptt); + if (rc) + qed_schedule_iov(hwfn, QED_IOV_WQ_FLR_FLAG); + + qed_ptt_release(hwfn, ptt); + } + if (test_and_clear_bit(QED_IOV_WQ_MSG_FLAG, &hwfn->iov_task_flags)) qed_handle_vf_msg(hwfn); } @@ -1112,3 +1768,7 @@ int qed_iov_wq_start(struct qed_dev *cdev) return 0; } + +const struct qed_iov_hv_ops qed_iov_ops_pass = { + .configure = &qed_sriov_configure, +}; diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.h b/drivers/net/ethernet/qlogic/qed/qed_sriov.h index 4f190d25ee14..10794b08fd21 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.h @@ -24,6 +24,13 @@ #define QED_MAX_VF_CHAINS_PER_PF 16 #define QED_ETH_VF_NUM_VLAN_FILTERS 2 +struct qed_public_vf_info { + /* These copies will later be reflected in the bulletin board, + * but this copy should be newer. + */ + u8 mac[ETH_ALEN]; +}; + /* This struct is part of qed_dev and contains data relevant to all hwfns; * Initialized only if SR-IOV cpabability is exposed in PCIe config space. */ @@ -74,6 +81,7 @@ struct qed_vf_q_info { enum vf_state { VF_FREE = 0, /* VF ready to be acquired holds no resc */ VF_ACQUIRED, /* VF, acquired, but not initalized */ + VF_RESET, /* VF, FLR'd, pending cleanup */ VF_STOPPED /* VF, Stopped */ }; @@ -82,6 +90,7 @@ struct qed_vf_info { struct qed_iov_vf_mbx vf_mbx; enum vf_state state; bool b_init; + u8 to_disable; struct qed_bulletin bulletin; dma_addr_t vf_bulletin; @@ -105,7 +114,7 @@ struct qed_vf_info { u8 num_vlan_filters; struct qed_vf_q_info vf_queues[QED_MAX_VF_CHAINS_PER_PF]; u16 igu_sbs[QED_MAX_VF_CHAINS_PER_PF]; - + struct qed_public_vf_info p_vf_info; }; /* This structure is part of qed_hwfn and used only for PFs that have sriov @@ -219,11 +228,22 @@ void qed_iov_free_hw_info(struct qed_dev *cdev); int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, u8 opcode, __le16 echo, union event_ring_data *data); +/** + * @brief Mark structs of vfs that have been FLR-ed. + * + * @param p_hwfn + * @param disabled_vfs - bitmask of all VFs on path that were FLRed + * + * @return 1 iff one of the PF's vfs got FLRed. 0 otherwise. + */ +int qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *disabled_vfs); + void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first); int qed_iov_wq_start(struct qed_dev *cdev); void qed_schedule_iov(struct qed_hwfn *hwfn, enum qed_iov_wq_flag flag); void qed_vf_start_iov_wq(struct qed_dev *cdev); +int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled); #else static inline u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id) @@ -260,6 +280,12 @@ static inline int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, return -EINVAL; } +static inline int qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, + u32 *disabled_vfs) +{ + return 0; +} + static inline void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first) { } @@ -277,6 +303,11 @@ static inline void qed_schedule_iov(struct qed_hwfn *hwfn, static inline void qed_vf_start_iov_wq(struct qed_dev *cdev) { } + +static inline int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled) +{ + return 0; +} #endif #define qed_for_each_vf(_p_hwfn, _i) \ diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c index a3c8f4e1b9c1..2460e39724f1 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.c +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c @@ -311,6 +311,103 @@ free_p_iov: return -ENOMEM; } +int qed_vf_pf_reset(struct qed_hwfn *p_hwfn) +{ + struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct pfvf_def_resp_tlv *resp; + struct vfpf_first_tlv *req; + int rc; + + /* clear mailbox and prep first tlv */ + req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_CLOSE, sizeof(*req)); + + /* add list termination tlv */ + qed_add_tlv(p_hwfn, &p_iov->offset, + CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); + + resp = &p_iov->pf2vf_reply->default_resp; + rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); + if (rc) + return rc; + + if (resp->hdr.status != PFVF_STATUS_SUCCESS) + return -EAGAIN; + + p_hwfn->b_int_enabled = 0; + + return 0; +} + +int qed_vf_pf_release(struct qed_hwfn *p_hwfn) +{ + struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct pfvf_def_resp_tlv *resp; + struct vfpf_first_tlv *req; + u32 size; + int rc; + + /* clear mailbox and prep first tlv */ + req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_RELEASE, sizeof(*req)); + + /* add list termination tlv */ + qed_add_tlv(p_hwfn, &p_iov->offset, + CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); + + resp = &p_iov->pf2vf_reply->default_resp; + rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); + + if (!rc && resp->hdr.status != PFVF_STATUS_SUCCESS) + rc = -EAGAIN; + + p_hwfn->b_int_enabled = 0; + + if (p_iov->vf2pf_request) + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + sizeof(union vfpf_tlvs), + p_iov->vf2pf_request, + p_iov->vf2pf_request_phys); + if (p_iov->pf2vf_reply) + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + sizeof(union pfvf_tlvs), + p_iov->pf2vf_reply, p_iov->pf2vf_reply_phys); + + if (p_iov->bulletin.p_virt) { + size = sizeof(struct qed_bulletin_content); + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + size, + p_iov->bulletin.p_virt, p_iov->bulletin.phys); + } + + kfree(p_hwfn->vf_iov_info); + p_hwfn->vf_iov_info = NULL; + + return rc; +} + +int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn) +{ + struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp; + int rc; + + /* clear mailbox and prep first tlv */ + qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_INT_CLEANUP, + sizeof(struct vfpf_first_tlv)); + + /* add list termination tlv */ + qed_add_tlv(p_hwfn, &p_iov->offset, + CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); + + rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); + if (rc) + return rc; + + if (resp->hdr.status != PFVF_STATUS_SUCCESS) + return -EINVAL; + + return 0; +} + u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id) { struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.h b/drivers/net/ethernet/qlogic/qed/qed_vf.h index de9fe8501d21..c872e5e2985e 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.h +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.h @@ -206,6 +206,9 @@ struct qed_bulletin { enum { CHANNEL_TLV_NONE, /* ends tlv sequence */ CHANNEL_TLV_ACQUIRE, + CHANNEL_TLV_INT_CLEANUP, + CHANNEL_TLV_CLOSE, + CHANNEL_TLV_RELEASE, CHANNEL_TLV_LIST_END, CHANNEL_TLV_MAX }; @@ -278,6 +281,24 @@ void qed_vf_get_fw_version(struct qed_hwfn *p_hwfn, */ int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn); +/** + * + * @brief VF - send a close message to PF + * + * @param p_hwfn + * + * @return enum _qed_status + */ +int qed_vf_pf_reset(struct qed_hwfn *p_hwfn); + +/** + * @brief VF - free vf`s memories + * + * @param p_hwfn + * + * @return enum _qed_status + */ +int qed_vf_pf_release(struct qed_hwfn *p_hwfn); /** * @brief qed_vf_get_igu_sb_id - Get the IGU SB ID for a given * sb_id. For VFs igu sbs don't have to be contiguous @@ -288,6 +309,15 @@ int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn); * @return INLINE u16 */ u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id); + +/** + * @brief qed_vf_pf_int_cleanup - clean the SB of the VF + * + * @param p_hwfn + * + * @return enum _qed_status + */ +int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn); #else static inline void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs) { @@ -313,10 +343,25 @@ static inline int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn) return -EINVAL; } +static inline int qed_vf_pf_reset(struct qed_hwfn *p_hwfn) +{ + return -EINVAL; +} + +static inline int qed_vf_pf_release(struct qed_hwfn *p_hwfn) +{ + return -EINVAL; +} + static inline u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id) { return 0; } + +static inline int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn) +{ + return -EINVAL; +} #endif #endif diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h index 3a4c806be156..acfafca43aa5 100644 --- a/include/linux/qed/qed_eth_if.h +++ b/include/linux/qed/qed_eth_if.h @@ -13,6 +13,7 @@ #include #include #include +#include struct qed_dev_eth_info { struct qed_dev_info common; @@ -125,6 +126,9 @@ struct qed_eth_cb_ops { struct qed_eth_ops { const struct qed_common_ops *common; +#ifdef CONFIG_QED_SRIOV + const struct qed_iov_hv_ops *iov; +#endif int (*fill_dev_info)(struct qed_dev *cdev, struct qed_dev_eth_info *info); diff --git a/include/linux/qed/qed_iov_if.h b/include/linux/qed/qed_iov_if.h new file mode 100644 index 000000000000..c53bfa6374c5 --- /dev/null +++ b/include/linux/qed/qed_iov_if.h @@ -0,0 +1,20 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015 QLogic Corporation + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ + +#ifndef _QED_IOV_IF_H +#define _QED_IOV_IF_H + +#include + +/* Structs used by PF to control and manipulate child VFs */ +struct qed_iov_hv_ops { + int (*configure)(struct qed_dev *cdev, int num_vfs_param); + +}; + +#endif From dacd88d6f6851510735e8db7a5981d4abcda6cb2 Mon Sep 17 00:00:00 2001 From: Yuval Mintz Date: Wed, 11 May 2016 16:36:16 +0300 Subject: [PATCH 1505/1649] qed: IOV l2 functionality This adds sufficient changes to allow VFs l2-configuration flows to work. While the fastpath of the VF and the PF are meant to be exactly the same, the configuration of the VF is done by the PF. This diverges all VF-related configuration flows that originate from a VF, making them pass through the VF->PF channel and adding sufficient logic on the PF side to support them. Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed.h | 1 + drivers/net/ethernet/qlogic/qed/qed_dev.c | 10 +- drivers/net/ethernet/qlogic/qed/qed_l2.c | 450 ++++++----- drivers/net/ethernet/qlogic/qed/qed_l2.h | 178 +++++ .../net/ethernet/qlogic/qed/qed_reg_addr.h | 2 + drivers/net/ethernet/qlogic/qed/qed_sriov.c | 704 ++++++++++++++++++ drivers/net/ethernet/qlogic/qed/qed_sriov.h | 24 + drivers/net/ethernet/qlogic/qed/qed_vf.c | 445 +++++++++++ drivers/net/ethernet/qlogic/qed/qed_vf.h | 383 +++++++++- 9 files changed, 1954 insertions(+), 243 deletions(-) create mode 100644 drivers/net/ethernet/qlogic/qed/qed_l2.h diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index f9a3576305a1..d7da64556e4b 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h @@ -532,6 +532,7 @@ struct qed_dev { }; #define NUM_OF_VFS(dev) MAX_NUM_VFS_BB +#define NUM_OF_L2_QUEUES(dev) MAX_NUM_L2_QUEUES_BB #define NUM_OF_SBS(dev) MAX_SB_PER_PATH_BB #define NUM_OF_ENG_PFS(dev) MAX_NUM_PFS_BB diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 78e25cf6836f..9d01a16bfb1a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -938,7 +938,12 @@ void qed_hw_stop_fastpath(struct qed_dev *cdev) for_each_hwfn(cdev, j) { struct qed_hwfn *p_hwfn = &cdev->hwfns[j]; - struct qed_ptt *p_ptt = p_hwfn->p_main_ptt; + struct qed_ptt *p_ptt = p_hwfn->p_main_ptt; + + if (IS_VF(cdev)) { + qed_vf_pf_int_cleanup(p_hwfn); + continue; + } DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, @@ -962,6 +967,9 @@ void qed_hw_stop_fastpath(struct qed_dev *cdev) void qed_hw_start_fastpath(struct qed_hwfn *p_hwfn) { + if (IS_VF(p_hwfn->cdev)) + return; + /* Re-open incoming traffic */ qed_wr(p_hwfn, p_hwfn->p_main_ptt, NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0); diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index 5978bb57f883..9f88f2feb5ec 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -31,125 +31,25 @@ #include "qed_hsi.h" #include "qed_hw.h" #include "qed_int.h" +#include "qed_l2.h" #include "qed_mcp.h" #include "qed_reg_addr.h" #include "qed_sp.h" #include "qed_sriov.h" -struct qed_rss_params { - u8 update_rss_config; - u8 rss_enable; - u8 rss_eng_id; - u8 update_rss_capabilities; - u8 update_rss_ind_table; - u8 update_rss_key; - u8 rss_caps; - u8 rss_table_size_log; - u16 rss_ind_table[QED_RSS_IND_TABLE_SIZE]; - u32 rss_key[QED_RSS_KEY_SIZE]; -}; - -enum qed_filter_opcode { - QED_FILTER_ADD, - QED_FILTER_REMOVE, - QED_FILTER_MOVE, - QED_FILTER_REPLACE, /* Delete all MACs and add new one instead */ - QED_FILTER_FLUSH, /* Removes all filters */ -}; - -enum qed_filter_ucast_type { - QED_FILTER_MAC, - QED_FILTER_VLAN, - QED_FILTER_MAC_VLAN, - QED_FILTER_INNER_MAC, - QED_FILTER_INNER_VLAN, - QED_FILTER_INNER_PAIR, - QED_FILTER_INNER_MAC_VNI_PAIR, - QED_FILTER_MAC_VNI_PAIR, - QED_FILTER_VNI, -}; - -struct qed_filter_ucast { - enum qed_filter_opcode opcode; - enum qed_filter_ucast_type type; - u8 is_rx_filter; - u8 is_tx_filter; - u8 vport_to_add_to; - u8 vport_to_remove_from; - unsigned char mac[ETH_ALEN]; - u8 assert_on_error; - u16 vlan; - u32 vni; -}; - -struct qed_filter_mcast { - /* MOVE is not supported for multicast */ - enum qed_filter_opcode opcode; - u8 vport_to_add_to; - u8 vport_to_remove_from; - u8 num_mc_addrs; -#define QED_MAX_MC_ADDRS 64 - unsigned char mac[QED_MAX_MC_ADDRS][ETH_ALEN]; -}; - -struct qed_filter_accept_flags { - u8 update_rx_mode_config; - u8 update_tx_mode_config; - u8 rx_accept_filter; - u8 tx_accept_filter; -#define QED_ACCEPT_NONE 0x01 -#define QED_ACCEPT_UCAST_MATCHED 0x02 -#define QED_ACCEPT_UCAST_UNMATCHED 0x04 -#define QED_ACCEPT_MCAST_MATCHED 0x08 -#define QED_ACCEPT_MCAST_UNMATCHED 0x10 -#define QED_ACCEPT_BCAST 0x20 -}; - -struct qed_sp_vport_update_params { - u16 opaque_fid; - u8 vport_id; - u8 update_vport_active_rx_flg; - u8 vport_active_rx_flg; - u8 update_vport_active_tx_flg; - u8 vport_active_tx_flg; - u8 update_approx_mcast_flg; - u8 update_accept_any_vlan_flg; - u8 accept_any_vlan; - unsigned long bins[8]; - struct qed_rss_params *rss_params; - struct qed_filter_accept_flags accept_flags; -}; - -enum qed_tpa_mode { - QED_TPA_MODE_NONE, - QED_TPA_MODE_UNUSED, - QED_TPA_MODE_GRO, - QED_TPA_MODE_MAX -}; - -struct qed_sp_vport_start_params { - enum qed_tpa_mode tpa_mode; - bool remove_inner_vlan; - bool drop_ttl0; - u8 max_buffers_per_cqe; - u32 concrete_fid; - u16 opaque_fid; - u8 vport_id; - u16 mtu; -}; #define QED_MAX_SGES_NUM 16 #define CRC32_POLY 0x1edc6f41 -static int qed_sp_vport_start(struct qed_hwfn *p_hwfn, - struct qed_sp_vport_start_params *p_params) +int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn, + struct qed_sp_vport_start_params *p_params) { struct vport_start_ramrod_data *p_ramrod = NULL; struct qed_spq_entry *p_ent = NULL; struct qed_sp_init_data init_data; + u8 abs_vport_id = 0; int rc = -EINVAL; u16 rx_mode = 0; - u8 abs_vport_id = 0; rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id); if (rc != 0) @@ -206,6 +106,20 @@ static int qed_sp_vport_start(struct qed_hwfn *p_hwfn, return qed_spq_post(p_hwfn, p_ent, NULL); } +int qed_sp_vport_start(struct qed_hwfn *p_hwfn, + struct qed_sp_vport_start_params *p_params) +{ + if (IS_VF(p_hwfn->cdev)) { + return qed_vf_pf_vport_start(p_hwfn, p_params->vport_id, + p_params->mtu, + p_params->remove_inner_vlan, + p_params->tpa_mode, + p_params->max_buffers_per_cqe); + } + + return qed_sp_eth_vport_start(p_hwfn, p_params); +} + static int qed_sp_vport_update_rss(struct qed_hwfn *p_hwfn, struct vport_update_ramrod_data *p_ramrod, @@ -371,11 +285,10 @@ qed_sp_update_mcast_bin(struct qed_hwfn *p_hwfn, } } -static int -qed_sp_vport_update(struct qed_hwfn *p_hwfn, - struct qed_sp_vport_update_params *p_params, - enum spq_mode comp_mode, - struct qed_spq_comp_cb *p_comp_data) +int qed_sp_vport_update(struct qed_hwfn *p_hwfn, + struct qed_sp_vport_update_params *p_params, + enum spq_mode comp_mode, + struct qed_spq_comp_cb *p_comp_data) { struct qed_rss_params *p_rss_params = p_params->rss_params; struct vport_update_ramrod_data_cmn *p_cmn; @@ -385,6 +298,11 @@ qed_sp_vport_update(struct qed_hwfn *p_hwfn, u8 abs_vport_id = 0; int rc = -EINVAL; + if (IS_VF(p_hwfn->cdev)) { + rc = qed_vf_pf_vport_update(p_hwfn, p_params); + return rc; + } + rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id); if (rc != 0) return rc; @@ -427,9 +345,7 @@ qed_sp_vport_update(struct qed_hwfn *p_hwfn, return qed_spq_post(p_hwfn, p_ent, NULL); } -static int qed_sp_vport_stop(struct qed_hwfn *p_hwfn, - u16 opaque_fid, - u8 vport_id) +int qed_sp_vport_stop(struct qed_hwfn *p_hwfn, u16 opaque_fid, u8 vport_id) { struct vport_stop_ramrod_data *p_ramrod; struct qed_sp_init_data init_data; @@ -437,6 +353,9 @@ static int qed_sp_vport_stop(struct qed_hwfn *p_hwfn, u8 abs_vport_id = 0; int rc; + if (IS_VF(p_hwfn->cdev)) + return qed_vf_pf_vport_stop(p_hwfn); + rc = qed_fw_vport(p_hwfn, vport_id, &abs_vport_id); if (rc != 0) return rc; @@ -458,13 +377,26 @@ static int qed_sp_vport_stop(struct qed_hwfn *p_hwfn, return qed_spq_post(p_hwfn, p_ent, NULL); } +static int +qed_vf_pf_accept_flags(struct qed_hwfn *p_hwfn, + struct qed_filter_accept_flags *p_accept_flags) +{ + struct qed_sp_vport_update_params s_params; + + memset(&s_params, 0, sizeof(s_params)); + memcpy(&s_params.accept_flags, p_accept_flags, + sizeof(struct qed_filter_accept_flags)); + + return qed_vf_pf_vport_update(p_hwfn, &s_params); +} + static int qed_filter_accept_cmd(struct qed_dev *cdev, u8 vport, struct qed_filter_accept_flags accept_flags, u8 update_accept_any_vlan, u8 accept_any_vlan, - enum spq_mode comp_mode, - struct qed_spq_comp_cb *p_comp_data) + enum spq_mode comp_mode, + struct qed_spq_comp_cb *p_comp_data) { struct qed_sp_vport_update_params vport_update_params; int i, rc; @@ -481,6 +413,13 @@ static int qed_filter_accept_cmd(struct qed_dev *cdev, vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid; + if (IS_VF(cdev)) { + rc = qed_vf_pf_accept_flags(p_hwfn, &accept_flags); + if (rc) + return rc; + continue; + } + rc = qed_sp_vport_update(p_hwfn, &vport_update_params, comp_mode, p_comp_data); if (rc != 0) { @@ -515,16 +454,14 @@ static int qed_sp_release_queue_cid( return 0; } -static int -qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn, - u16 opaque_fid, - u32 cid, - struct qed_queue_start_common_params *params, - u8 stats_id, - u16 bd_max_bytes, - dma_addr_t bd_chain_phys_addr, - dma_addr_t cqe_pbl_addr, - u16 cqe_pbl_size) +int qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn, + u16 opaque_fid, + u32 cid, + struct qed_queue_start_common_params *params, + u8 stats_id, + u16 bd_max_bytes, + dma_addr_t bd_chain_phys_addr, + dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size) { struct rx_queue_start_ramrod_data *p_ramrod = NULL; struct qed_spq_entry *p_ent = NULL; @@ -593,8 +530,7 @@ qed_sp_eth_rx_queue_start(struct qed_hwfn *p_hwfn, u16 bd_max_bytes, dma_addr_t bd_chain_phys_addr, dma_addr_t cqe_pbl_addr, - u16 cqe_pbl_size, - void __iomem **pp_prod) + u16 cqe_pbl_size, void __iomem **pp_prod) { struct qed_hw_cid_data *p_rx_cid; u64 init_prod_val = 0; @@ -602,6 +538,16 @@ qed_sp_eth_rx_queue_start(struct qed_hwfn *p_hwfn, u8 abs_stats_id = 0; int rc; + if (IS_VF(p_hwfn->cdev)) { + return qed_vf_pf_rxq_start(p_hwfn, + params->queue_id, + params->sb, + params->sb_idx, + bd_max_bytes, + bd_chain_phys_addr, + cqe_pbl_addr, cqe_pbl_size, pp_prod); + } + rc = qed_fw_l2_queue(p_hwfn, params->queue_id, &abs_l2_queue); if (rc != 0) return rc; @@ -644,10 +590,9 @@ qed_sp_eth_rx_queue_start(struct qed_hwfn *p_hwfn, return rc; } -static int qed_sp_eth_rx_queue_stop(struct qed_hwfn *p_hwfn, - u16 rx_queue_id, - bool eq_completion_only, - bool cqe_completion) +int qed_sp_eth_rx_queue_stop(struct qed_hwfn *p_hwfn, + u16 rx_queue_id, + bool eq_completion_only, bool cqe_completion) { struct qed_hw_cid_data *p_rx_cid = &p_hwfn->p_rx_cids[rx_queue_id]; struct rx_queue_stop_ramrod_data *p_ramrod = NULL; @@ -656,6 +601,9 @@ static int qed_sp_eth_rx_queue_stop(struct qed_hwfn *p_hwfn, u16 abs_rx_q_id = 0; int rc = -EINVAL; + if (IS_VF(p_hwfn->cdev)) + return qed_vf_pf_rxq_stop(p_hwfn, rx_queue_id, cqe_completion); + /* Get SPQ entry */ memset(&init_data, 0, sizeof(init_data)); init_data.cid = p_rx_cid->cid; @@ -691,15 +639,14 @@ static int qed_sp_eth_rx_queue_stop(struct qed_hwfn *p_hwfn, return qed_sp_release_queue_cid(p_hwfn, p_rx_cid); } -static int -qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn, - u16 opaque_fid, - u32 cid, - struct qed_queue_start_common_params *p_params, - u8 stats_id, - dma_addr_t pbl_addr, - u16 pbl_size, - union qed_qm_pq_params *p_pq_params) +int qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn, + u16 opaque_fid, + u32 cid, + struct qed_queue_start_common_params *p_params, + u8 stats_id, + dma_addr_t pbl_addr, + u16 pbl_size, + union qed_qm_pq_params *p_pq_params) { struct tx_queue_start_ramrod_data *p_ramrod = NULL; struct qed_spq_entry *p_ent = NULL; @@ -753,14 +700,21 @@ qed_sp_eth_tx_queue_start(struct qed_hwfn *p_hwfn, u16 opaque_fid, struct qed_queue_start_common_params *p_params, dma_addr_t pbl_addr, - u16 pbl_size, - void __iomem **pp_doorbell) + u16 pbl_size, void __iomem **pp_doorbell) { struct qed_hw_cid_data *p_tx_cid; union qed_qm_pq_params pq_params; u8 abs_stats_id = 0; int rc; + if (IS_VF(p_hwfn->cdev)) { + return qed_vf_pf_txq_start(p_hwfn, + p_params->queue_id, + p_params->sb, + p_params->sb_idx, + pbl_addr, pbl_size, pp_doorbell); + } + rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_stats_id); if (rc) return rc; @@ -801,14 +755,16 @@ qed_sp_eth_tx_queue_start(struct qed_hwfn *p_hwfn, return rc; } -static int qed_sp_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, - u16 tx_queue_id) +int qed_sp_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, u16 tx_queue_id) { struct qed_hw_cid_data *p_tx_cid = &p_hwfn->p_tx_cids[tx_queue_id]; struct qed_spq_entry *p_ent = NULL; struct qed_sp_init_data init_data; int rc = -EINVAL; + if (IS_VF(p_hwfn->cdev)) + return qed_vf_pf_txq_stop(p_hwfn, tx_queue_id); + /* Get SPQ entry */ memset(&init_data, 0, sizeof(init_data)); init_data.cid = p_tx_cid->cid; @@ -1004,11 +960,11 @@ qed_filter_ucast_common(struct qed_hwfn *p_hwfn, return 0; } -static int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn, - u16 opaque_fid, - struct qed_filter_ucast *p_filter_cmd, - enum spq_mode comp_mode, - struct qed_spq_comp_cb *p_comp_data) +int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn, + u16 opaque_fid, + struct qed_filter_ucast *p_filter_cmd, + enum spq_mode comp_mode, + struct qed_spq_comp_cb *p_comp_data) { struct vport_filter_update_ramrod_data *p_ramrod = NULL; struct qed_spq_entry *p_ent = NULL; @@ -1106,7 +1062,7 @@ static inline u32 qed_crc32c_le(u32 seed, return qed_calc_crc32c((u8 *)packet_buf, 8, seed, 0); } -static u8 qed_mcast_bin_from_mac(u8 *mac) +u8 qed_mcast_bin_from_mac(u8 *mac) { u32 crc = qed_crc32c_le(ETH_MULTICAST_BIN_FROM_MAC_SEED, mac, ETH_ALEN); @@ -1189,11 +1145,10 @@ qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn, return qed_spq_post(p_hwfn, p_ent, NULL); } -static int -qed_filter_mcast_cmd(struct qed_dev *cdev, - struct qed_filter_mcast *p_filter_cmd, - enum spq_mode comp_mode, - struct qed_spq_comp_cb *p_comp_data) +static int qed_filter_mcast_cmd(struct qed_dev *cdev, + struct qed_filter_mcast *p_filter_cmd, + enum spq_mode comp_mode, + struct qed_spq_comp_cb *p_comp_data) { int rc = 0; int i; @@ -1209,8 +1164,10 @@ qed_filter_mcast_cmd(struct qed_dev *cdev, u16 opaque_fid; - if (rc != 0) - break; + if (IS_VF(cdev)) { + qed_vf_pf_filter_mcast(p_hwfn, p_filter_cmd); + continue; + } opaque_fid = p_hwfn->hw_info.opaque_fid; @@ -1235,8 +1192,10 @@ static int qed_filter_ucast_cmd(struct qed_dev *cdev, struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; u16 opaque_fid; - if (rc != 0) - break; + if (IS_VF(cdev)) { + rc = qed_vf_pf_filter_ucast(p_hwfn, p_filter_cmd); + continue; + } opaque_fid = p_hwfn->hw_info.opaque_fid; @@ -1245,6 +1204,8 @@ static int qed_filter_ucast_cmd(struct qed_dev *cdev, p_filter_cmd, comp_mode, p_comp_data); + if (rc != 0) + break; } return rc; @@ -1253,12 +1214,19 @@ static int qed_filter_ucast_cmd(struct qed_dev *cdev, /* Statistics related code */ static void __qed_get_vport_pstats_addrlen(struct qed_hwfn *p_hwfn, u32 *p_addr, - u32 *p_len, - u16 statistics_bin) + u32 *p_len, u16 statistics_bin) { - *p_addr = BAR0_MAP_REG_PSDM_RAM + - PSTORM_QUEUE_STAT_OFFSET(statistics_bin); - *p_len = sizeof(struct eth_pstorm_per_queue_stat); + if (IS_PF(p_hwfn->cdev)) { + *p_addr = BAR0_MAP_REG_PSDM_RAM + + PSTORM_QUEUE_STAT_OFFSET(statistics_bin); + *p_len = sizeof(struct eth_pstorm_per_queue_stat); + } else { + struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp; + + *p_addr = p_resp->pfdev_info.stats_info.pstats.address; + *p_len = p_resp->pfdev_info.stats_info.pstats.len; + } } static void __qed_get_vport_pstats(struct qed_hwfn *p_hwfn, @@ -1273,32 +1241,15 @@ static void __qed_get_vport_pstats(struct qed_hwfn *p_hwfn, statistics_bin); memset(&pstats, 0, sizeof(pstats)); - qed_memcpy_from(p_hwfn, p_ptt, &pstats, - pstats_addr, pstats_len); + qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, pstats_len); - p_stats->tx_ucast_bytes += - HILO_64_REGPAIR(pstats.sent_ucast_bytes); - p_stats->tx_mcast_bytes += - HILO_64_REGPAIR(pstats.sent_mcast_bytes); - p_stats->tx_bcast_bytes += - HILO_64_REGPAIR(pstats.sent_bcast_bytes); - p_stats->tx_ucast_pkts += - HILO_64_REGPAIR(pstats.sent_ucast_pkts); - p_stats->tx_mcast_pkts += - HILO_64_REGPAIR(pstats.sent_mcast_pkts); - p_stats->tx_bcast_pkts += - HILO_64_REGPAIR(pstats.sent_bcast_pkts); - p_stats->tx_err_drop_pkts += - HILO_64_REGPAIR(pstats.error_drop_pkts); -} - -static void __qed_get_vport_tstats_addrlen(struct qed_hwfn *p_hwfn, - u32 *p_addr, - u32 *p_len) -{ - *p_addr = BAR0_MAP_REG_TSDM_RAM + - TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn)); - *p_len = sizeof(struct tstorm_per_port_stat); + p_stats->tx_ucast_bytes += HILO_64_REGPAIR(pstats.sent_ucast_bytes); + p_stats->tx_mcast_bytes += HILO_64_REGPAIR(pstats.sent_mcast_bytes); + p_stats->tx_bcast_bytes += HILO_64_REGPAIR(pstats.sent_bcast_bytes); + p_stats->tx_ucast_pkts += HILO_64_REGPAIR(pstats.sent_ucast_pkts); + p_stats->tx_mcast_pkts += HILO_64_REGPAIR(pstats.sent_mcast_pkts); + p_stats->tx_bcast_pkts += HILO_64_REGPAIR(pstats.sent_bcast_pkts); + p_stats->tx_err_drop_pkts += HILO_64_REGPAIR(pstats.error_drop_pkts); } static void __qed_get_vport_tstats(struct qed_hwfn *p_hwfn, @@ -1306,14 +1257,23 @@ static void __qed_get_vport_tstats(struct qed_hwfn *p_hwfn, struct qed_eth_stats *p_stats, u16 statistics_bin) { - u32 tstats_addr = 0, tstats_len = 0; struct tstorm_per_port_stat tstats; + u32 tstats_addr, tstats_len; - __qed_get_vport_tstats_addrlen(p_hwfn, &tstats_addr, &tstats_len); + if (IS_PF(p_hwfn->cdev)) { + tstats_addr = BAR0_MAP_REG_TSDM_RAM + + TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn)); + tstats_len = sizeof(struct tstorm_per_port_stat); + } else { + struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp; + + tstats_addr = p_resp->pfdev_info.stats_info.tstats.address; + tstats_len = p_resp->pfdev_info.stats_info.tstats.len; + } memset(&tstats, 0, sizeof(tstats)); - qed_memcpy_from(p_hwfn, p_ptt, &tstats, - tstats_addr, tstats_len); + qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, tstats_len); p_stats->mftag_filter_discards += HILO_64_REGPAIR(tstats.mftag_filter_discard); @@ -1323,12 +1283,19 @@ static void __qed_get_vport_tstats(struct qed_hwfn *p_hwfn, static void __qed_get_vport_ustats_addrlen(struct qed_hwfn *p_hwfn, u32 *p_addr, - u32 *p_len, - u16 statistics_bin) + u32 *p_len, u16 statistics_bin) { - *p_addr = BAR0_MAP_REG_USDM_RAM + - USTORM_QUEUE_STAT_OFFSET(statistics_bin); - *p_len = sizeof(struct eth_ustorm_per_queue_stat); + if (IS_PF(p_hwfn->cdev)) { + *p_addr = BAR0_MAP_REG_USDM_RAM + + USTORM_QUEUE_STAT_OFFSET(statistics_bin); + *p_len = sizeof(struct eth_ustorm_per_queue_stat); + } else { + struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp; + + *p_addr = p_resp->pfdev_info.stats_info.ustats.address; + *p_len = p_resp->pfdev_info.stats_info.ustats.len; + } } static void __qed_get_vport_ustats(struct qed_hwfn *p_hwfn, @@ -1343,31 +1310,31 @@ static void __qed_get_vport_ustats(struct qed_hwfn *p_hwfn, statistics_bin); memset(&ustats, 0, sizeof(ustats)); - qed_memcpy_from(p_hwfn, p_ptt, &ustats, - ustats_addr, ustats_len); + qed_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, ustats_len); - p_stats->rx_ucast_bytes += - HILO_64_REGPAIR(ustats.rcv_ucast_bytes); - p_stats->rx_mcast_bytes += - HILO_64_REGPAIR(ustats.rcv_mcast_bytes); - p_stats->rx_bcast_bytes += - HILO_64_REGPAIR(ustats.rcv_bcast_bytes); - p_stats->rx_ucast_pkts += - HILO_64_REGPAIR(ustats.rcv_ucast_pkts); - p_stats->rx_mcast_pkts += - HILO_64_REGPAIR(ustats.rcv_mcast_pkts); - p_stats->rx_bcast_pkts += - HILO_64_REGPAIR(ustats.rcv_bcast_pkts); + p_stats->rx_ucast_bytes += HILO_64_REGPAIR(ustats.rcv_ucast_bytes); + p_stats->rx_mcast_bytes += HILO_64_REGPAIR(ustats.rcv_mcast_bytes); + p_stats->rx_bcast_bytes += HILO_64_REGPAIR(ustats.rcv_bcast_bytes); + p_stats->rx_ucast_pkts += HILO_64_REGPAIR(ustats.rcv_ucast_pkts); + p_stats->rx_mcast_pkts += HILO_64_REGPAIR(ustats.rcv_mcast_pkts); + p_stats->rx_bcast_pkts += HILO_64_REGPAIR(ustats.rcv_bcast_pkts); } static void __qed_get_vport_mstats_addrlen(struct qed_hwfn *p_hwfn, u32 *p_addr, - u32 *p_len, - u16 statistics_bin) + u32 *p_len, u16 statistics_bin) { - *p_addr = BAR0_MAP_REG_MSDM_RAM + - MSTORM_QUEUE_STAT_OFFSET(statistics_bin); - *p_len = sizeof(struct eth_mstorm_per_queue_stat); + if (IS_PF(p_hwfn->cdev)) { + *p_addr = BAR0_MAP_REG_MSDM_RAM + + MSTORM_QUEUE_STAT_OFFSET(statistics_bin); + *p_len = sizeof(struct eth_mstorm_per_queue_stat); + } else { + struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp; + + *p_addr = p_resp->pfdev_info.stats_info.mstats.address; + *p_len = p_resp->pfdev_info.stats_info.mstats.len; + } } static void __qed_get_vport_mstats(struct qed_hwfn *p_hwfn, @@ -1382,21 +1349,17 @@ static void __qed_get_vport_mstats(struct qed_hwfn *p_hwfn, statistics_bin); memset(&mstats, 0, sizeof(mstats)); - qed_memcpy_from(p_hwfn, p_ptt, &mstats, - mstats_addr, mstats_len); + qed_memcpy_from(p_hwfn, p_ptt, &mstats, mstats_addr, mstats_len); - p_stats->no_buff_discards += - HILO_64_REGPAIR(mstats.no_buff_discard); + p_stats->no_buff_discards += HILO_64_REGPAIR(mstats.no_buff_discard); p_stats->packet_too_big_discard += HILO_64_REGPAIR(mstats.packet_too_big_discard); - p_stats->ttl0_discard += - HILO_64_REGPAIR(mstats.ttl0_discard); + p_stats->ttl0_discard += HILO_64_REGPAIR(mstats.ttl0_discard); p_stats->tpa_coalesced_pkts += HILO_64_REGPAIR(mstats.tpa_coalesced_pkts); p_stats->tpa_coalesced_events += HILO_64_REGPAIR(mstats.tpa_coalesced_events); - p_stats->tpa_aborts_num += - HILO_64_REGPAIR(mstats.tpa_aborts_num); + p_stats->tpa_aborts_num += HILO_64_REGPAIR(mstats.tpa_aborts_num); p_stats->tpa_coalesced_bytes += HILO_64_REGPAIR(mstats.tpa_coalesced_bytes); } @@ -1469,44 +1432,49 @@ static void __qed_get_vport_port_stats(struct qed_hwfn *p_hwfn, static void __qed_get_vport_stats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_eth_stats *stats, - u16 statistics_bin) + u16 statistics_bin, bool b_get_port_stats) { __qed_get_vport_mstats(p_hwfn, p_ptt, stats, statistics_bin); __qed_get_vport_ustats(p_hwfn, p_ptt, stats, statistics_bin); __qed_get_vport_tstats(p_hwfn, p_ptt, stats, statistics_bin); __qed_get_vport_pstats(p_hwfn, p_ptt, stats, statistics_bin); - if (p_hwfn->mcp_info) + if (b_get_port_stats && p_hwfn->mcp_info) __qed_get_vport_port_stats(p_hwfn, p_ptt, stats); } static void _qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats) { - u8 fw_vport = 0; - int i; + u8 fw_vport = 0; + int i; memset(stats, 0, sizeof(*stats)); for_each_hwfn(cdev, i) { struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; - struct qed_ptt *p_ptt; + struct qed_ptt *p_ptt = IS_PF(cdev) ? qed_ptt_acquire(p_hwfn) + : NULL; - /* The main vport index is relative first */ - if (qed_fw_vport(p_hwfn, 0, &fw_vport)) { - DP_ERR(p_hwfn, "No vport available!\n"); - continue; + if (IS_PF(cdev)) { + /* The main vport index is relative first */ + if (qed_fw_vport(p_hwfn, 0, &fw_vport)) { + DP_ERR(p_hwfn, "No vport available!\n"); + goto out; + } } - p_ptt = qed_ptt_acquire(p_hwfn); - if (!p_ptt) { + if (IS_PF(cdev) && !p_ptt) { DP_ERR(p_hwfn, "Failed to acquire ptt\n"); continue; } - __qed_get_vport_stats(p_hwfn, p_ptt, stats, fw_vport); + __qed_get_vport_stats(p_hwfn, p_ptt, stats, fw_vport, + IS_PF(cdev) ? true : false); - qed_ptt_release(p_hwfn, p_ptt); +out: + if (IS_PF(cdev) && p_ptt) + qed_ptt_release(p_hwfn, p_ptt); } } @@ -1540,10 +1508,11 @@ void qed_reset_vport_stats(struct qed_dev *cdev) struct eth_mstorm_per_queue_stat mstats; struct eth_ustorm_per_queue_stat ustats; struct eth_pstorm_per_queue_stat pstats; - struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn); + struct qed_ptt *p_ptt = IS_PF(cdev) ? qed_ptt_acquire(p_hwfn) + : NULL; u32 addr = 0, len = 0; - if (!p_ptt) { + if (IS_PF(cdev) && !p_ptt) { DP_ERR(p_hwfn, "Failed to acquire ptt\n"); continue; } @@ -1560,7 +1529,8 @@ void qed_reset_vport_stats(struct qed_dev *cdev) __qed_get_vport_pstats_addrlen(p_hwfn, &addr, &len, 0); qed_memcpy_to(p_hwfn, p_ptt, addr, &pstats, len); - qed_ptt_release(p_hwfn, p_ptt); + if (IS_PF(cdev)) + qed_ptt_release(p_hwfn, p_ptt); } /* PORT statistics are not necessarily reset, so we need to diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h new file mode 100644 index 000000000000..3b65a45c1ec2 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.h @@ -0,0 +1,178 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015 QLogic Corporation + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ +#ifndef _QED_L2_H +#define _QED_L2_H +#include +#include +#include +#include +#include +#include "qed.h" +#include "qed_hw.h" +#include "qed_sp.h" + +enum qed_filter_opcode { + QED_FILTER_ADD, + QED_FILTER_REMOVE, + QED_FILTER_MOVE, + QED_FILTER_REPLACE, /* Delete all MACs and add new one instead */ + QED_FILTER_FLUSH, /* Removes all filters */ +}; + +enum qed_filter_ucast_type { + QED_FILTER_MAC, + QED_FILTER_VLAN, + QED_FILTER_MAC_VLAN, + QED_FILTER_INNER_MAC, + QED_FILTER_INNER_VLAN, + QED_FILTER_INNER_PAIR, + QED_FILTER_INNER_MAC_VNI_PAIR, + QED_FILTER_MAC_VNI_PAIR, + QED_FILTER_VNI, +}; + +struct qed_filter_ucast { + enum qed_filter_opcode opcode; + enum qed_filter_ucast_type type; + u8 is_rx_filter; + u8 is_tx_filter; + u8 vport_to_add_to; + u8 vport_to_remove_from; + unsigned char mac[ETH_ALEN]; + u8 assert_on_error; + u16 vlan; + u32 vni; +}; + +struct qed_filter_mcast { + /* MOVE is not supported for multicast */ + enum qed_filter_opcode opcode; + u8 vport_to_add_to; + u8 vport_to_remove_from; + u8 num_mc_addrs; +#define QED_MAX_MC_ADDRS 64 + unsigned char mac[QED_MAX_MC_ADDRS][ETH_ALEN]; +}; + +int qed_sp_eth_rx_queue_stop(struct qed_hwfn *p_hwfn, + u16 rx_queue_id, + bool eq_completion_only, bool cqe_completion); + +int qed_sp_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, u16 tx_queue_id); + +enum qed_tpa_mode { + QED_TPA_MODE_NONE, + QED_TPA_MODE_UNUSED, + QED_TPA_MODE_GRO, + QED_TPA_MODE_MAX +}; + +struct qed_sp_vport_start_params { + enum qed_tpa_mode tpa_mode; + bool remove_inner_vlan; + bool drop_ttl0; + u8 max_buffers_per_cqe; + u32 concrete_fid; + u16 opaque_fid; + u8 vport_id; + u16 mtu; +}; + +int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn, + struct qed_sp_vport_start_params *p_params); + +struct qed_rss_params { + u8 update_rss_config; + u8 rss_enable; + u8 rss_eng_id; + u8 update_rss_capabilities; + u8 update_rss_ind_table; + u8 update_rss_key; + u8 rss_caps; + u8 rss_table_size_log; + u16 rss_ind_table[QED_RSS_IND_TABLE_SIZE]; + u32 rss_key[QED_RSS_KEY_SIZE]; +}; + +struct qed_filter_accept_flags { + u8 update_rx_mode_config; + u8 update_tx_mode_config; + u8 rx_accept_filter; + u8 tx_accept_filter; +#define QED_ACCEPT_NONE 0x01 +#define QED_ACCEPT_UCAST_MATCHED 0x02 +#define QED_ACCEPT_UCAST_UNMATCHED 0x04 +#define QED_ACCEPT_MCAST_MATCHED 0x08 +#define QED_ACCEPT_MCAST_UNMATCHED 0x10 +#define QED_ACCEPT_BCAST 0x20 +}; + +struct qed_sp_vport_update_params { + u16 opaque_fid; + u8 vport_id; + u8 update_vport_active_rx_flg; + u8 vport_active_rx_flg; + u8 update_vport_active_tx_flg; + u8 vport_active_tx_flg; + u8 update_approx_mcast_flg; + u8 update_accept_any_vlan_flg; + u8 accept_any_vlan; + unsigned long bins[8]; + struct qed_rss_params *rss_params; + struct qed_filter_accept_flags accept_flags; +}; + +int qed_sp_vport_update(struct qed_hwfn *p_hwfn, + struct qed_sp_vport_update_params *p_params, + enum spq_mode comp_mode, + struct qed_spq_comp_cb *p_comp_data); + +/** + * @brief qed_sp_vport_stop - + * + * This ramrod closes a VPort after all its RX and TX queues are terminated. + * An Assert is generated if any queues are left open. + * + * @param p_hwfn + * @param opaque_fid + * @param vport_id VPort ID + * + * @return int + */ +int qed_sp_vport_stop(struct qed_hwfn *p_hwfn, u16 opaque_fid, u8 vport_id); + +int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn, + u16 opaque_fid, + struct qed_filter_ucast *p_filter_cmd, + enum spq_mode comp_mode, + struct qed_spq_comp_cb *p_comp_data); + +int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn, + struct qed_sp_vport_start_params *p_params); + +int qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn, + u16 opaque_fid, + u32 cid, + struct qed_queue_start_common_params *params, + u8 stats_id, + u16 bd_max_bytes, + dma_addr_t bd_chain_phys_addr, + dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size); + +int qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn, + u16 opaque_fid, + u32 cid, + struct qed_queue_start_common_params *p_params, + u8 stats_id, + dma_addr_t pbl_addr, + u16 pbl_size, + union qed_qm_pq_params *p_pq_params); + +u8 qed_mcast_bin_from_mac(u8 *mac); + +#endif /* _QED_L2_H */ diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h index 80a621754f13..bb7dcf12b7c2 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h +++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h @@ -427,6 +427,8 @@ 0x1 << 0) #define IGU_REG_MAPPING_MEMORY \ 0x184000UL +#define IGU_REG_STATISTIC_NUM_VF_MSG_SENT \ + 0x180408UL #define MISCS_REG_GENERIC_POR_0 \ 0x0096d4UL #define MCP_REG_NVM_CFG4 \ diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index 750166db57cd..82f1eda38962 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -6,6 +6,7 @@ * this source tree. */ +#include #include #include "qed_cxt.h" #include "qed_hsi.h" @@ -486,6 +487,36 @@ static void qed_iov_vf_pglue_clear_err(struct qed_hwfn *p_hwfn, 1 << (abs_vfid & 0x1f)); } +static void qed_iov_vf_igu_reset(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, struct qed_vf_info *vf) +{ + u16 igu_sb_id; + int i; + + /* Set VF masks and configuration - pretend */ + qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid); + + qed_wr(p_hwfn, p_ptt, IGU_REG_STATISTIC_NUM_VF_MSG_SENT, 0); + + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "value in VF_CONFIGURATION of vf %d after write %x\n", + vf->abs_vf_id, + qed_rd(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION)); + + /* unpretend */ + qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid); + + /* iterate over all queues, clear sb consumer */ + for (i = 0; i < vf->num_sbs; i++) { + igu_sb_id = vf->igu_sbs[i]; + /* Set then clear... */ + qed_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 1, + vf->opaque_fid); + qed_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 0, + vf->opaque_fid); + } +} + static void qed_iov_vf_igu_set_int(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_vf_info *vf, bool enable) @@ -585,6 +616,19 @@ static void qed_iov_config_perm_table(struct qed_hwfn *p_hwfn, } } +static void qed_iov_enable_vf_traffic(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_vf_info *vf) +{ + /* Reset vf in IGU - interrupts are still disabled */ + qed_iov_vf_igu_reset(p_hwfn, p_ptt, vf); + + qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 1); + + /* Permission Table */ + qed_iov_config_perm_table(p_hwfn, p_ptt, vf, true); +} + static u8 qed_iov_alloc_vf_igu_sbs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_vf_info *vf, u16 num_rx_queues) @@ -870,6 +914,67 @@ static void qed_iov_send_response(struct qed_hwfn *p_hwfn, USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id), 1); } +static u16 qed_iov_vport_to_tlv(struct qed_hwfn *p_hwfn, + enum qed_iov_vport_update_flag flag) +{ + switch (flag) { + case QED_IOV_VP_UPDATE_ACTIVATE: + return CHANNEL_TLV_VPORT_UPDATE_ACTIVATE; + case QED_IOV_VP_UPDATE_MCAST: + return CHANNEL_TLV_VPORT_UPDATE_MCAST; + case QED_IOV_VP_UPDATE_ACCEPT_PARAM: + return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM; + case QED_IOV_VP_UPDATE_RSS: + return CHANNEL_TLV_VPORT_UPDATE_RSS; + default: + return 0; + } +} + +static u16 qed_iov_prep_vp_update_resp_tlvs(struct qed_hwfn *p_hwfn, + struct qed_vf_info *p_vf, + struct qed_iov_vf_mbx *p_mbx, + u8 status, + u16 tlvs_mask, u16 tlvs_accepted) +{ + struct pfvf_def_resp_tlv *resp; + u16 size, total_len, i; + + memset(p_mbx->reply_virt, 0, sizeof(union pfvf_tlvs)); + p_mbx->offset = (u8 *)p_mbx->reply_virt; + size = sizeof(struct pfvf_def_resp_tlv); + total_len = size; + + qed_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_VPORT_UPDATE, size); + + /* Prepare response for all extended tlvs if they are found by PF */ + for (i = 0; i < QED_IOV_VP_UPDATE_MAX; i++) { + if (!(tlvs_mask & (1 << i))) + continue; + + resp = qed_add_tlv(p_hwfn, &p_mbx->offset, + qed_iov_vport_to_tlv(p_hwfn, i), size); + + if (tlvs_accepted & (1 << i)) + resp->hdr.status = status; + else + resp->hdr.status = PFVF_STATUS_NOT_SUPPORTED; + + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "VF[%d] - vport_update response: TLV %d, status %02x\n", + p_vf->relative_vf_id, + qed_iov_vport_to_tlv(p_hwfn, i), resp->hdr.status); + + total_len += size; + } + + qed_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + return total_len; +} + static void qed_iov_prepare_resp(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_vf_info *vf_info, @@ -918,6 +1023,7 @@ static void qed_iov_vf_cleanup(struct qed_hwfn *p_hwfn, u32 i; p_vf->vf_bulletin = 0; + p_vf->vport_instance = 0; p_vf->num_mac_filters = 0; p_vf->num_vlan_filters = 0; @@ -925,6 +1031,8 @@ static void qed_iov_vf_cleanup(struct qed_hwfn *p_hwfn, p_vf->num_rxqs = p_vf->num_sbs; p_vf->num_txqs = p_vf->num_sbs; + p_vf->num_active_rxqs = 0; + for (i = 0; i < QED_MAX_VF_CHAINS_PER_PF; i++) p_vf->vf_queues[i].rxq_active = 0; @@ -1074,6 +1182,578 @@ out: sizeof(struct pfvf_acquire_resp_tlv), vfpf_status); } +static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_vf_info *vf) +{ + struct qed_sp_vport_start_params params = { 0 }; + struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; + struct vfpf_vport_start_tlv *start; + u8 status = PFVF_STATUS_SUCCESS; + struct qed_vf_info *vf_info; + int sb_id; + int rc; + + vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vf->relative_vf_id, true); + if (!vf_info) { + DP_NOTICE(p_hwfn->cdev, + "Failed to get VF info, invalid vfid [%d]\n", + vf->relative_vf_id); + return; + } + + vf->state = VF_ENABLED; + start = &mbx->req_virt->start_vport; + + /* Initialize Status block in CAU */ + for (sb_id = 0; sb_id < vf->num_sbs; sb_id++) { + if (!start->sb_addr[sb_id]) { + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "VF[%d] did not fill the address of SB %d\n", + vf->relative_vf_id, sb_id); + break; + } + + qed_int_cau_conf_sb(p_hwfn, p_ptt, + start->sb_addr[sb_id], + vf->igu_sbs[sb_id], + vf->abs_vf_id, 1); + } + qed_iov_enable_vf_traffic(p_hwfn, p_ptt, vf); + + vf->mtu = start->mtu; + + params.tpa_mode = start->tpa_mode; + params.remove_inner_vlan = start->inner_vlan_removal; + + params.drop_ttl0 = false; + params.concrete_fid = vf->concrete_fid; + params.opaque_fid = vf->opaque_fid; + params.vport_id = vf->vport_id; + params.max_buffers_per_cqe = start->max_buffers_per_cqe; + params.mtu = vf->mtu; + + rc = qed_sp_eth_vport_start(p_hwfn, ¶ms); + if (rc != 0) { + DP_ERR(p_hwfn, + "qed_iov_vf_mbx_start_vport returned error %d\n", rc); + status = PFVF_STATUS_FAILURE; + } else { + vf->vport_instance++; + } + qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_START, + sizeof(struct pfvf_def_resp_tlv), status); +} + +static void qed_iov_vf_mbx_stop_vport(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_vf_info *vf) +{ + u8 status = PFVF_STATUS_SUCCESS; + int rc; + + vf->vport_instance--; + + rc = qed_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id); + if (rc != 0) { + DP_ERR(p_hwfn, "qed_iov_vf_mbx_stop_vport returned error %d\n", + rc); + status = PFVF_STATUS_FAILURE; + } + + qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_TEARDOWN, + sizeof(struct pfvf_def_resp_tlv), status); +} + +#define TSTORM_QZONE_START PXP_VF_BAR0_START_SDM_ZONE_A +#define MSTORM_QZONE_START(dev) (TSTORM_QZONE_START + \ + (TSTORM_QZONE_SIZE * NUM_OF_L2_QUEUES(dev))) + +static void qed_iov_vf_mbx_start_rxq_resp(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_vf_info *vf, u8 status) +{ + struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; + struct pfvf_start_queue_resp_tlv *p_tlv; + struct vfpf_start_rxq_tlv *req; + + mbx->offset = (u8 *)mbx->reply_virt; + + p_tlv = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_RXQ, + sizeof(*p_tlv)); + qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + /* Update the TLV with the response */ + if (status == PFVF_STATUS_SUCCESS) { + u16 hw_qid = 0; + + req = &mbx->req_virt->start_rxq; + qed_fw_l2_queue(p_hwfn, vf->vf_queues[req->rx_qid].fw_rx_qid, + &hw_qid); + + p_tlv->offset = MSTORM_QZONE_START(p_hwfn->cdev) + + hw_qid * MSTORM_QZONE_SIZE + + offsetof(struct mstorm_eth_queue_zone, + rx_producers); + } + + qed_iov_send_response(p_hwfn, p_ptt, vf, sizeof(*p_tlv), status); +} + +static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_vf_info *vf) +{ + struct qed_queue_start_common_params params; + struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; + u8 status = PFVF_STATUS_SUCCESS; + struct vfpf_start_rxq_tlv *req; + int rc; + + memset(¶ms, 0, sizeof(params)); + req = &mbx->req_virt->start_rxq; + params.queue_id = vf->vf_queues[req->rx_qid].fw_rx_qid; + params.vport_id = vf->vport_id; + params.sb = req->hw_sb; + params.sb_idx = req->sb_index; + + rc = qed_sp_eth_rxq_start_ramrod(p_hwfn, vf->opaque_fid, + vf->vf_queues[req->rx_qid].fw_cid, + ¶ms, + vf->abs_vf_id + 0x10, + req->bd_max_bytes, + req->rxq_addr, + req->cqe_pbl_addr, req->cqe_pbl_size); + + if (rc) { + status = PFVF_STATUS_FAILURE; + } else { + vf->vf_queues[req->rx_qid].rxq_active = true; + vf->num_active_rxqs++; + } + + qed_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status); +} + +static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_vf_info *vf) +{ + u16 length = sizeof(struct pfvf_def_resp_tlv); + struct qed_queue_start_common_params params; + struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; + union qed_qm_pq_params pq_params; + u8 status = PFVF_STATUS_SUCCESS; + struct vfpf_start_txq_tlv *req; + int rc; + + /* Prepare the parameters which would choose the right PQ */ + memset(&pq_params, 0, sizeof(pq_params)); + pq_params.eth.is_vf = 1; + pq_params.eth.vf_id = vf->relative_vf_id; + + memset(¶ms, 0, sizeof(params)); + req = &mbx->req_virt->start_txq; + params.queue_id = vf->vf_queues[req->tx_qid].fw_tx_qid; + params.vport_id = vf->vport_id; + params.sb = req->hw_sb; + params.sb_idx = req->sb_index; + + rc = qed_sp_eth_txq_start_ramrod(p_hwfn, + vf->opaque_fid, + vf->vf_queues[req->tx_qid].fw_cid, + ¶ms, + vf->abs_vf_id + 0x10, + req->pbl_addr, + req->pbl_size, &pq_params); + + if (rc) + status = PFVF_STATUS_FAILURE; + else + vf->vf_queues[req->tx_qid].txq_active = true; + + qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_START_TXQ, + length, status); +} + +static int qed_iov_vf_stop_rxqs(struct qed_hwfn *p_hwfn, + struct qed_vf_info *vf, + u16 rxq_id, u8 num_rxqs, bool cqe_completion) +{ + int rc = 0; + int qid; + + if (rxq_id + num_rxqs > ARRAY_SIZE(vf->vf_queues)) + return -EINVAL; + + for (qid = rxq_id; qid < rxq_id + num_rxqs; qid++) { + if (vf->vf_queues[qid].rxq_active) { + rc = qed_sp_eth_rx_queue_stop(p_hwfn, + vf->vf_queues[qid]. + fw_rx_qid, false, + cqe_completion); + + if (rc) + return rc; + } + vf->vf_queues[qid].rxq_active = false; + vf->num_active_rxqs--; + } + + return rc; +} + +static int qed_iov_vf_stop_txqs(struct qed_hwfn *p_hwfn, + struct qed_vf_info *vf, u16 txq_id, u8 num_txqs) +{ + int rc = 0; + int qid; + + if (txq_id + num_txqs > ARRAY_SIZE(vf->vf_queues)) + return -EINVAL; + + for (qid = txq_id; qid < txq_id + num_txqs; qid++) { + if (vf->vf_queues[qid].txq_active) { + rc = qed_sp_eth_tx_queue_stop(p_hwfn, + vf->vf_queues[qid]. + fw_tx_qid); + + if (rc) + return rc; + } + vf->vf_queues[qid].txq_active = false; + } + return rc; +} + +static void qed_iov_vf_mbx_stop_rxqs(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_vf_info *vf) +{ + u16 length = sizeof(struct pfvf_def_resp_tlv); + struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; + u8 status = PFVF_STATUS_SUCCESS; + struct vfpf_stop_rxqs_tlv *req; + int rc; + + /* We give the option of starting from qid != 0, in this case we + * need to make sure that qid + num_qs doesn't exceed the actual + * amount of queues that exist. + */ + req = &mbx->req_virt->stop_rxqs; + rc = qed_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid, + req->num_rxqs, req->cqe_completion); + if (rc) + status = PFVF_STATUS_FAILURE; + + qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_RXQS, + length, status); +} + +static void qed_iov_vf_mbx_stop_txqs(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_vf_info *vf) +{ + u16 length = sizeof(struct pfvf_def_resp_tlv); + struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; + u8 status = PFVF_STATUS_SUCCESS; + struct vfpf_stop_txqs_tlv *req; + int rc; + + /* We give the option of starting from qid != 0, in this case we + * need to make sure that qid + num_qs doesn't exceed the actual + * amount of queues that exist. + */ + req = &mbx->req_virt->stop_txqs; + rc = qed_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid, req->num_txqs); + if (rc) + status = PFVF_STATUS_FAILURE; + + qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_TXQS, + length, status); +} + +void *qed_iov_search_list_tlvs(struct qed_hwfn *p_hwfn, + void *p_tlvs_list, u16 req_type) +{ + struct channel_tlv *p_tlv = (struct channel_tlv *)p_tlvs_list; + int len = 0; + + do { + if (!p_tlv->length) { + DP_NOTICE(p_hwfn, "Zero length TLV found\n"); + return NULL; + } + + if (p_tlv->type == req_type) { + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "Extended tlv type %d, length %d found\n", + p_tlv->type, p_tlv->length); + return p_tlv; + } + + len += p_tlv->length; + p_tlv = (struct channel_tlv *)((u8 *)p_tlv + p_tlv->length); + + if ((len + p_tlv->length) > TLV_BUFFER_SIZE) { + DP_NOTICE(p_hwfn, "TLVs has overrun the buffer size\n"); + return NULL; + } + } while (p_tlv->type != CHANNEL_TLV_LIST_END); + + return NULL; +} + +static void +qed_iov_vp_update_act_param(struct qed_hwfn *p_hwfn, + struct qed_sp_vport_update_params *p_data, + struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) +{ + struct vfpf_vport_update_activate_tlv *p_act_tlv; + u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE; + + p_act_tlv = (struct vfpf_vport_update_activate_tlv *) + qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); + if (!p_act_tlv) + return; + + p_data->update_vport_active_rx_flg = p_act_tlv->update_rx; + p_data->vport_active_rx_flg = p_act_tlv->active_rx; + p_data->update_vport_active_tx_flg = p_act_tlv->update_tx; + p_data->vport_active_tx_flg = p_act_tlv->active_tx; + *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACTIVATE; +} + +static void +qed_iov_vp_update_mcast_bin_param(struct qed_hwfn *p_hwfn, + struct qed_sp_vport_update_params *p_data, + struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) +{ + struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv; + u16 tlv = CHANNEL_TLV_VPORT_UPDATE_MCAST; + + p_mcast_tlv = (struct vfpf_vport_update_mcast_bin_tlv *) + qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); + if (!p_mcast_tlv) + return; + + p_data->update_approx_mcast_flg = 1; + memcpy(p_data->bins, p_mcast_tlv->bins, + sizeof(unsigned long) * ETH_MULTICAST_MAC_BINS_IN_REGS); + *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_MCAST; +} + +static void +qed_iov_vp_update_accept_flag(struct qed_hwfn *p_hwfn, + struct qed_sp_vport_update_params *p_data, + struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) +{ + struct qed_filter_accept_flags *p_flags = &p_data->accept_flags; + struct vfpf_vport_update_accept_param_tlv *p_accept_tlv; + u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM; + + p_accept_tlv = (struct vfpf_vport_update_accept_param_tlv *) + qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); + if (!p_accept_tlv) + return; + + p_flags->update_rx_mode_config = p_accept_tlv->update_rx_mode; + p_flags->rx_accept_filter = p_accept_tlv->rx_accept_filter; + p_flags->update_tx_mode_config = p_accept_tlv->update_tx_mode; + p_flags->tx_accept_filter = p_accept_tlv->tx_accept_filter; + *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACCEPT_PARAM; +} + +static void +qed_iov_vp_update_rss_param(struct qed_hwfn *p_hwfn, + struct qed_vf_info *vf, + struct qed_sp_vport_update_params *p_data, + struct qed_rss_params *p_rss, + struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) +{ + struct vfpf_vport_update_rss_tlv *p_rss_tlv; + u16 tlv = CHANNEL_TLV_VPORT_UPDATE_RSS; + u16 i, q_idx, max_q_idx; + u16 table_size; + + p_rss_tlv = (struct vfpf_vport_update_rss_tlv *) + qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); + if (!p_rss_tlv) { + p_data->rss_params = NULL; + return; + } + + memset(p_rss, 0, sizeof(struct qed_rss_params)); + + p_rss->update_rss_config = !!(p_rss_tlv->update_rss_flags & + VFPF_UPDATE_RSS_CONFIG_FLAG); + p_rss->update_rss_capabilities = !!(p_rss_tlv->update_rss_flags & + VFPF_UPDATE_RSS_CAPS_FLAG); + p_rss->update_rss_ind_table = !!(p_rss_tlv->update_rss_flags & + VFPF_UPDATE_RSS_IND_TABLE_FLAG); + p_rss->update_rss_key = !!(p_rss_tlv->update_rss_flags & + VFPF_UPDATE_RSS_KEY_FLAG); + + p_rss->rss_enable = p_rss_tlv->rss_enable; + p_rss->rss_eng_id = vf->relative_vf_id + 1; + p_rss->rss_caps = p_rss_tlv->rss_caps; + p_rss->rss_table_size_log = p_rss_tlv->rss_table_size_log; + memcpy(p_rss->rss_ind_table, p_rss_tlv->rss_ind_table, + sizeof(p_rss->rss_ind_table)); + memcpy(p_rss->rss_key, p_rss_tlv->rss_key, sizeof(p_rss->rss_key)); + + table_size = min_t(u16, ARRAY_SIZE(p_rss->rss_ind_table), + (1 << p_rss_tlv->rss_table_size_log)); + + max_q_idx = ARRAY_SIZE(vf->vf_queues); + + for (i = 0; i < table_size; i++) { + u16 index = vf->vf_queues[0].fw_rx_qid; + + q_idx = p_rss->rss_ind_table[i]; + if (q_idx >= max_q_idx) + DP_NOTICE(p_hwfn, + "rss_ind_table[%d] = %d, rxq is out of range\n", + i, q_idx); + else if (!vf->vf_queues[q_idx].rxq_active) + DP_NOTICE(p_hwfn, + "rss_ind_table[%d] = %d, rxq is not active\n", + i, q_idx); + else + index = vf->vf_queues[q_idx].fw_rx_qid; + p_rss->rss_ind_table[i] = index; + } + + p_data->rss_params = p_rss; + *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_RSS; +} + +static void qed_iov_vf_mbx_vport_update(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_vf_info *vf) +{ + struct qed_sp_vport_update_params params; + struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; + struct qed_rss_params rss_params; + u8 status = PFVF_STATUS_SUCCESS; + u16 tlvs_mask = 0; + u16 length; + int rc; + + memset(¶ms, 0, sizeof(params)); + params.opaque_fid = vf->opaque_fid; + params.vport_id = vf->vport_id; + params.rss_params = NULL; + + /* Search for extended tlvs list and update values + * from VF in struct qed_sp_vport_update_params. + */ + qed_iov_vp_update_act_param(p_hwfn, ¶ms, mbx, &tlvs_mask); + qed_iov_vp_update_mcast_bin_param(p_hwfn, ¶ms, mbx, &tlvs_mask); + qed_iov_vp_update_accept_flag(p_hwfn, ¶ms, mbx, &tlvs_mask); + qed_iov_vp_update_rss_param(p_hwfn, vf, ¶ms, &rss_params, + mbx, &tlvs_mask); + + /* Just log a message if there is no single extended tlv in buffer. + * When all features of vport update ramrod would be requested by VF + * as extended TLVs in buffer then an error can be returned in response + * if there is no extended TLV present in buffer. + */ + if (!tlvs_mask) { + DP_NOTICE(p_hwfn, + "No feature tlvs found for vport update\n"); + status = PFVF_STATUS_NOT_SUPPORTED; + goto out; + } + + rc = qed_sp_vport_update(p_hwfn, ¶ms, QED_SPQ_MODE_EBLOCK, NULL); + + if (rc) + status = PFVF_STATUS_FAILURE; + +out: + length = qed_iov_prep_vp_update_resp_tlvs(p_hwfn, vf, mbx, status, + tlvs_mask, tlvs_mask); + qed_iov_send_response(p_hwfn, p_ptt, vf, length, status); +} + +int qed_iov_chk_ucast(struct qed_hwfn *hwfn, + int vfid, struct qed_filter_ucast *params) +{ + struct qed_public_vf_info *vf; + + vf = qed_iov_get_public_vf_info(hwfn, vfid, true); + if (!vf) + return -EINVAL; + + /* No real decision to make; Store the configured MAC */ + if (params->type == QED_FILTER_MAC || + params->type == QED_FILTER_MAC_VLAN) + ether_addr_copy(vf->mac, params->mac); + + return 0; +} + +static void qed_iov_vf_mbx_ucast_filter(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_vf_info *vf) +{ + struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; + struct vfpf_ucast_filter_tlv *req; + u8 status = PFVF_STATUS_SUCCESS; + struct qed_filter_ucast params; + int rc; + + /* Prepare the unicast filter params */ + memset(¶ms, 0, sizeof(struct qed_filter_ucast)); + req = &mbx->req_virt->ucast_filter; + params.opcode = (enum qed_filter_opcode)req->opcode; + params.type = (enum qed_filter_ucast_type)req->type; + + params.is_rx_filter = 1; + params.is_tx_filter = 1; + params.vport_to_remove_from = vf->vport_id; + params.vport_to_add_to = vf->vport_id; + memcpy(params.mac, req->mac, ETH_ALEN); + params.vlan = req->vlan; + + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "VF[%d]: opcode 0x%02x type 0x%02x [%s %s] [vport 0x%02x] MAC %02x:%02x:%02x:%02x:%02x:%02x, vlan 0x%04x\n", + vf->abs_vf_id, params.opcode, params.type, + params.is_rx_filter ? "RX" : "", + params.is_tx_filter ? "TX" : "", + params.vport_to_add_to, + params.mac[0], params.mac[1], + params.mac[2], params.mac[3], + params.mac[4], params.mac[5], params.vlan); + + if (!vf->vport_instance) { + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "No VPORT instance available for VF[%d], failing ucast MAC configuration\n", + vf->abs_vf_id); + status = PFVF_STATUS_FAILURE; + goto out; + } + + rc = qed_iov_chk_ucast(p_hwfn, vf->relative_vf_id, ¶ms); + if (rc) { + status = PFVF_STATUS_FAILURE; + goto out; + } + + rc = qed_sp_eth_filter_ucast(p_hwfn, vf->opaque_fid, ¶ms, + QED_SPQ_MODE_CB, NULL); + if (rc) + status = PFVF_STATUS_FAILURE; + +out: + qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UCAST_FILTER, + sizeof(struct pfvf_def_resp_tlv), status); +} + static void qed_iov_vf_mbx_int_cleanup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_vf_info *vf) @@ -1365,6 +2045,30 @@ static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn, case CHANNEL_TLV_ACQUIRE: qed_iov_vf_mbx_acquire(p_hwfn, p_ptt, p_vf); break; + case CHANNEL_TLV_VPORT_START: + qed_iov_vf_mbx_start_vport(p_hwfn, p_ptt, p_vf); + break; + case CHANNEL_TLV_VPORT_TEARDOWN: + qed_iov_vf_mbx_stop_vport(p_hwfn, p_ptt, p_vf); + break; + case CHANNEL_TLV_START_RXQ: + qed_iov_vf_mbx_start_rxq(p_hwfn, p_ptt, p_vf); + break; + case CHANNEL_TLV_START_TXQ: + qed_iov_vf_mbx_start_txq(p_hwfn, p_ptt, p_vf); + break; + case CHANNEL_TLV_STOP_RXQS: + qed_iov_vf_mbx_stop_rxqs(p_hwfn, p_ptt, p_vf); + break; + case CHANNEL_TLV_STOP_TXQS: + qed_iov_vf_mbx_stop_txqs(p_hwfn, p_ptt, p_vf); + break; + case CHANNEL_TLV_VPORT_UPDATE: + qed_iov_vf_mbx_vport_update(p_hwfn, p_ptt, p_vf); + break; + case CHANNEL_TLV_UCAST_FILTER: + qed_iov_vf_mbx_ucast_filter(p_hwfn, p_ptt, p_vf); + break; case CHANNEL_TLV_CLOSE: qed_iov_vf_mbx_close(p_hwfn, p_ptt, p_vf); break; diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.h b/drivers/net/ethernet/qlogic/qed/qed_sriov.h index 10794b08fd21..63bce9c53f06 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.h @@ -24,6 +24,14 @@ #define QED_MAX_VF_CHAINS_PER_PF 16 #define QED_ETH_VF_NUM_VLAN_FILTERS 2 +enum qed_iov_vport_update_flag { + QED_IOV_VP_UPDATE_ACTIVATE, + QED_IOV_VP_UPDATE_MCAST, + QED_IOV_VP_UPDATE_ACCEPT_PARAM, + QED_IOV_VP_UPDATE_RSS, + QED_IOV_VP_UPDATE_MAX, +}; + struct qed_public_vf_info { /* These copies will later be reflected in the bulletin board, * but this copy should be newer. @@ -81,6 +89,7 @@ struct qed_vf_q_info { enum vf_state { VF_FREE = 0, /* VF ready to be acquired holds no resc */ VF_ACQUIRED, /* VF, acquired, but not initalized */ + VF_ENABLED, /* VF, Enabled */ VF_RESET, /* VF, FLR'd, pending cleanup */ VF_STOPPED /* VF, Stopped */ }; @@ -97,6 +106,7 @@ struct qed_vf_info { u32 concrete_fid; u16 opaque_fid; + u16 mtu; u8 vport_id; u8 relative_vf_id; @@ -105,6 +115,7 @@ struct qed_vf_info { (p_vf)->abs_vf_id + MAX_NUM_VFS_BB : \ (p_vf)->abs_vf_id) + u8 vport_instance; u8 num_rxqs; u8 num_txqs; @@ -114,6 +125,7 @@ struct qed_vf_info { u8 num_vlan_filters; struct qed_vf_q_info vf_queues[QED_MAX_VF_CHAINS_PER_PF]; u16 igu_sbs[QED_MAX_VF_CHAINS_PER_PF]; + u8 num_active_rxqs; struct qed_public_vf_info p_vf_info; }; @@ -238,6 +250,18 @@ int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, */ int qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *disabled_vfs); +/** + * @brief Search extended TLVs in request/reply buffer. + * + * @param p_hwfn + * @param p_tlvs_list - Pointer to tlvs list + * @param req_type - Type of TLV + * + * @return pointer to tlv type if found, otherwise returns NULL. + */ +void *qed_iov_search_list_tlvs(struct qed_hwfn *p_hwfn, + void *p_tlvs_list, u16 req_type); + void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first); int qed_iov_wq_start(struct qed_dev *cdev); diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c index 2460e39724f1..961de771392c 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.c +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c @@ -311,6 +311,400 @@ free_p_iov: return -ENOMEM; } +int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn, + u8 rx_qid, + u16 sb, + u8 sb_index, + u16 bd_max_bytes, + dma_addr_t bd_chain_phys_addr, + dma_addr_t cqe_pbl_addr, + u16 cqe_pbl_size, void __iomem **pp_prod) +{ + struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct pfvf_start_queue_resp_tlv *resp; + struct vfpf_start_rxq_tlv *req; + int rc; + + /* clear mailbox and prep first tlv */ + req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_RXQ, sizeof(*req)); + + req->rx_qid = rx_qid; + req->cqe_pbl_addr = cqe_pbl_addr; + req->cqe_pbl_size = cqe_pbl_size; + req->rxq_addr = bd_chain_phys_addr; + req->hw_sb = sb; + req->sb_index = sb_index; + req->bd_max_bytes = bd_max_bytes; + req->stat_id = -1; + + /* add list termination tlv */ + qed_add_tlv(p_hwfn, &p_iov->offset, + CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); + + resp = &p_iov->pf2vf_reply->queue_start; + rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); + if (rc) + return rc; + + if (resp->hdr.status != PFVF_STATUS_SUCCESS) + return -EINVAL; + + /* Learn the address of the producer from the response */ + if (pp_prod) { + u64 init_prod_val = 0; + + *pp_prod = (u8 __iomem *)p_hwfn->regview + resp->offset; + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "Rxq[0x%02x]: producer at %p [offset 0x%08x]\n", + rx_qid, *pp_prod, resp->offset); + + /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */ + __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u64), + (u32 *)&init_prod_val); + } + + return rc; +} + +int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn, u16 rx_qid, bool cqe_completion) +{ + struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct vfpf_stop_rxqs_tlv *req; + struct pfvf_def_resp_tlv *resp; + int rc; + + /* clear mailbox and prep first tlv */ + req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_RXQS, sizeof(*req)); + + req->rx_qid = rx_qid; + req->num_rxqs = 1; + req->cqe_completion = cqe_completion; + + /* add list termination tlv */ + qed_add_tlv(p_hwfn, &p_iov->offset, + CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); + + resp = &p_iov->pf2vf_reply->default_resp; + rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); + if (rc) + return rc; + + if (resp->hdr.status != PFVF_STATUS_SUCCESS) + return -EINVAL; + + return rc; +} + +int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn, + u16 tx_queue_id, + u16 sb, + u8 sb_index, + dma_addr_t pbl_addr, + u16 pbl_size, void __iomem **pp_doorbell) +{ + struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct vfpf_start_txq_tlv *req; + struct pfvf_def_resp_tlv *resp; + int rc; + + /* clear mailbox and prep first tlv */ + req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_TXQ, sizeof(*req)); + + req->tx_qid = tx_queue_id; + + /* Tx */ + req->pbl_addr = pbl_addr; + req->pbl_size = pbl_size; + req->hw_sb = sb; + req->sb_index = sb_index; + + /* add list termination tlv */ + qed_add_tlv(p_hwfn, &p_iov->offset, + CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); + + resp = &p_iov->pf2vf_reply->default_resp; + rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); + if (rc) + return rc; + + if (resp->hdr.status != PFVF_STATUS_SUCCESS) + return -EINVAL; + + if (pp_doorbell) { + u8 cid = p_iov->acquire_resp.resc.cid[tx_queue_id]; + + *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells + + qed_db_addr(cid, DQ_DEMS_LEGACY); + } + + return rc; +} + +int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, u16 tx_qid) +{ + struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct vfpf_stop_txqs_tlv *req; + struct pfvf_def_resp_tlv *resp; + int rc; + + /* clear mailbox and prep first tlv */ + req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_TXQS, sizeof(*req)); + + req->tx_qid = tx_qid; + req->num_txqs = 1; + + /* add list termination tlv */ + qed_add_tlv(p_hwfn, &p_iov->offset, + CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); + + resp = &p_iov->pf2vf_reply->default_resp; + rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); + if (rc) + return rc; + + if (resp->hdr.status != PFVF_STATUS_SUCCESS) + return -EINVAL; + + return rc; +} + +int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn, + u8 vport_id, + u16 mtu, + u8 inner_vlan_removal, + enum qed_tpa_mode tpa_mode, + u8 max_buffers_per_cqe) +{ + struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct vfpf_vport_start_tlv *req; + struct pfvf_def_resp_tlv *resp; + int rc, i; + + /* clear mailbox and prep first tlv */ + req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_START, sizeof(*req)); + + req->mtu = mtu; + req->vport_id = vport_id; + req->inner_vlan_removal = inner_vlan_removal; + req->tpa_mode = tpa_mode; + req->max_buffers_per_cqe = max_buffers_per_cqe; + + /* status blocks */ + for (i = 0; i < p_hwfn->vf_iov_info->acquire_resp.resc.num_sbs; i++) + if (p_hwfn->sbs_info[i]) + req->sb_addr[i] = p_hwfn->sbs_info[i]->sb_phys; + + /* add list termination tlv */ + qed_add_tlv(p_hwfn, &p_iov->offset, + CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); + + resp = &p_iov->pf2vf_reply->default_resp; + rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); + if (rc) + return rc; + + if (resp->hdr.status != PFVF_STATUS_SUCCESS) + return -EINVAL; + + return rc; +} + +int qed_vf_pf_vport_stop(struct qed_hwfn *p_hwfn) +{ + struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp; + int rc; + + /* clear mailbox and prep first tlv */ + qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_TEARDOWN, + sizeof(struct vfpf_first_tlv)); + + /* add list termination tlv */ + qed_add_tlv(p_hwfn, &p_iov->offset, + CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); + + rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); + if (rc) + return rc; + + if (resp->hdr.status != PFVF_STATUS_SUCCESS) + return -EINVAL; + + return rc; +} + +static bool +qed_vf_handle_vp_update_is_needed(struct qed_hwfn *p_hwfn, + struct qed_sp_vport_update_params *p_data, + u16 tlv) +{ + switch (tlv) { + case CHANNEL_TLV_VPORT_UPDATE_ACTIVATE: + return !!(p_data->update_vport_active_rx_flg || + p_data->update_vport_active_tx_flg); + case CHANNEL_TLV_VPORT_UPDATE_MCAST: + return !!p_data->update_approx_mcast_flg; + case CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM: + return !!(p_data->accept_flags.update_rx_mode_config || + p_data->accept_flags.update_tx_mode_config); + case CHANNEL_TLV_VPORT_UPDATE_RSS: + return !!p_data->rss_params; + default: + DP_INFO(p_hwfn, "Unexpected vport-update TLV[%d]\n", + tlv); + return false; + } +} + +static void +qed_vf_handle_vp_update_tlvs_resp(struct qed_hwfn *p_hwfn, + struct qed_sp_vport_update_params *p_data) +{ + struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct pfvf_def_resp_tlv *p_resp; + u16 tlv; + + for (tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE; + tlv < CHANNEL_TLV_VPORT_UPDATE_MAX; tlv++) { + if (!qed_vf_handle_vp_update_is_needed(p_hwfn, p_data, tlv)) + continue; + + p_resp = (struct pfvf_def_resp_tlv *) + qed_iov_search_list_tlvs(p_hwfn, p_iov->pf2vf_reply, + tlv); + if (p_resp && p_resp->hdr.status) + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "TLV[%d] Configuration %s\n", + tlv, + (p_resp && p_resp->hdr.status) ? "succeeded" + : "failed"); + } +} + +int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn, + struct qed_sp_vport_update_params *p_params) +{ + struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct vfpf_vport_update_tlv *req; + struct pfvf_def_resp_tlv *resp; + u8 update_rx, update_tx; + u32 resp_size = 0; + u16 size, tlv; + int rc; + + resp = &p_iov->pf2vf_reply->default_resp; + resp_size = sizeof(*resp); + + update_rx = p_params->update_vport_active_rx_flg; + update_tx = p_params->update_vport_active_tx_flg; + + /* clear mailbox and prep header tlv */ + qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_UPDATE, sizeof(*req)); + + /* Prepare extended tlvs */ + if (update_rx || update_tx) { + struct vfpf_vport_update_activate_tlv *p_act_tlv; + + size = sizeof(struct vfpf_vport_update_activate_tlv); + p_act_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, + CHANNEL_TLV_VPORT_UPDATE_ACTIVATE, + size); + resp_size += sizeof(struct pfvf_def_resp_tlv); + + if (update_rx) { + p_act_tlv->update_rx = update_rx; + p_act_tlv->active_rx = p_params->vport_active_rx_flg; + } + + if (update_tx) { + p_act_tlv->update_tx = update_tx; + p_act_tlv->active_tx = p_params->vport_active_tx_flg; + } + } + + if (p_params->update_approx_mcast_flg) { + struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv; + + size = sizeof(struct vfpf_vport_update_mcast_bin_tlv); + p_mcast_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, + CHANNEL_TLV_VPORT_UPDATE_MCAST, size); + resp_size += sizeof(struct pfvf_def_resp_tlv); + + memcpy(p_mcast_tlv->bins, p_params->bins, + sizeof(unsigned long) * ETH_MULTICAST_MAC_BINS_IN_REGS); + } + + update_rx = p_params->accept_flags.update_rx_mode_config; + update_tx = p_params->accept_flags.update_tx_mode_config; + + if (update_rx || update_tx) { + struct vfpf_vport_update_accept_param_tlv *p_accept_tlv; + + tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM; + size = sizeof(struct vfpf_vport_update_accept_param_tlv); + p_accept_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, tlv, size); + resp_size += sizeof(struct pfvf_def_resp_tlv); + + if (update_rx) { + p_accept_tlv->update_rx_mode = update_rx; + p_accept_tlv->rx_accept_filter = + p_params->accept_flags.rx_accept_filter; + } + + if (update_tx) { + p_accept_tlv->update_tx_mode = update_tx; + p_accept_tlv->tx_accept_filter = + p_params->accept_flags.tx_accept_filter; + } + } + + if (p_params->rss_params) { + struct qed_rss_params *rss_params = p_params->rss_params; + struct vfpf_vport_update_rss_tlv *p_rss_tlv; + + size = sizeof(struct vfpf_vport_update_rss_tlv); + p_rss_tlv = qed_add_tlv(p_hwfn, + &p_iov->offset, + CHANNEL_TLV_VPORT_UPDATE_RSS, size); + resp_size += sizeof(struct pfvf_def_resp_tlv); + + if (rss_params->update_rss_config) + p_rss_tlv->update_rss_flags |= + VFPF_UPDATE_RSS_CONFIG_FLAG; + if (rss_params->update_rss_capabilities) + p_rss_tlv->update_rss_flags |= + VFPF_UPDATE_RSS_CAPS_FLAG; + if (rss_params->update_rss_ind_table) + p_rss_tlv->update_rss_flags |= + VFPF_UPDATE_RSS_IND_TABLE_FLAG; + if (rss_params->update_rss_key) + p_rss_tlv->update_rss_flags |= VFPF_UPDATE_RSS_KEY_FLAG; + + p_rss_tlv->rss_enable = rss_params->rss_enable; + p_rss_tlv->rss_caps = rss_params->rss_caps; + p_rss_tlv->rss_table_size_log = rss_params->rss_table_size_log; + memcpy(p_rss_tlv->rss_ind_table, rss_params->rss_ind_table, + sizeof(rss_params->rss_ind_table)); + memcpy(p_rss_tlv->rss_key, rss_params->rss_key, + sizeof(rss_params->rss_key)); + } + + /* add list termination tlv */ + qed_add_tlv(p_hwfn, &p_iov->offset, + CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); + + rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, resp_size); + if (rc) + return rc; + + if (resp->hdr.status != PFVF_STATUS_SUCCESS) + return -EINVAL; + + qed_vf_handle_vp_update_tlvs_resp(p_hwfn, p_params); + + return rc; +} + int qed_vf_pf_reset(struct qed_hwfn *p_hwfn) { struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; @@ -384,6 +778,57 @@ int qed_vf_pf_release(struct qed_hwfn *p_hwfn) return rc; } +void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn, + struct qed_filter_mcast *p_filter_cmd) +{ + struct qed_sp_vport_update_params sp_params; + int i; + + memset(&sp_params, 0, sizeof(sp_params)); + sp_params.update_approx_mcast_flg = 1; + + if (p_filter_cmd->opcode == QED_FILTER_ADD) { + for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) { + u32 bit; + + bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]); + __set_bit(bit, sp_params.bins); + } + } + + qed_vf_pf_vport_update(p_hwfn, &sp_params); +} + +int qed_vf_pf_filter_ucast(struct qed_hwfn *p_hwfn, + struct qed_filter_ucast *p_ucast) +{ + struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct vfpf_ucast_filter_tlv *req; + struct pfvf_def_resp_tlv *resp; + int rc; + + /* clear mailbox and prep first tlv */ + req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_UCAST_FILTER, sizeof(*req)); + req->opcode = (u8) p_ucast->opcode; + req->type = (u8) p_ucast->type; + memcpy(req->mac, p_ucast->mac, ETH_ALEN); + req->vlan = p_ucast->vlan; + + /* add list termination tlv */ + qed_add_tlv(p_hwfn, &p_iov->offset, + CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); + + resp = &p_iov->pf2vf_reply->default_resp; + rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); + if (rc) + return rc; + + if (resp->hdr.status != PFVF_STATUS_SUCCESS) + return -EAGAIN; + + return 0; +} + int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn) { struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.h b/drivers/net/ethernet/qlogic/qed/qed_vf.h index c872e5e2985e..35337b186aa5 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.h +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.h @@ -9,6 +9,11 @@ #ifndef _QED_VF_H #define _QED_VF_H +#include "qed_l2.h" + +#define T_ETH_INDIRECTION_TABLE_SIZE 128 +#define T_ETH_RSS_KEY_SIZE 10 + struct vf_pf_resc_request { u8 num_rxqs; u8 num_txqs; @@ -25,6 +30,8 @@ struct hw_sb_info { u8 padding[5]; }; +#define TLV_BUFFER_SIZE 1024 + enum { PFVF_STATUS_WAITING, PFVF_STATUS_SUCCESS, @@ -98,6 +105,23 @@ struct vfpf_acquire_tlv { u32 padding; }; +/* receive side scaling tlv */ +struct vfpf_vport_update_rss_tlv { + struct channel_tlv tl; + + u8 update_rss_flags; +#define VFPF_UPDATE_RSS_CONFIG_FLAG BIT(0) +#define VFPF_UPDATE_RSS_CAPS_FLAG BIT(1) +#define VFPF_UPDATE_RSS_IND_TABLE_FLAG BIT(2) +#define VFPF_UPDATE_RSS_KEY_FLAG BIT(3) + + u8 rss_enable; + u8 rss_caps; + u8 rss_table_size_log; /* The table size is 2 ^ rss_table_size_log */ + u16 rss_ind_table[T_ETH_INDIRECTION_TABLE_SIZE]; + u32 rss_key[T_ETH_RSS_KEY_SIZE]; +}; + struct pfvf_storm_stats { u32 address; u32 len; @@ -169,7 +193,157 @@ struct pfvf_acquire_resp_tlv { u32 padding; }; -#define TLV_BUFFER_SIZE 1024 +struct pfvf_start_queue_resp_tlv { + struct pfvf_tlv hdr; + u32 offset; /* offset to consumer/producer of queue */ + u8 padding[4]; +}; + +/* Setup Queue */ +struct vfpf_start_rxq_tlv { + struct vfpf_first_tlv first_tlv; + + /* physical addresses */ + u64 rxq_addr; + u64 deprecated_sge_addr; + u64 cqe_pbl_addr; + + u16 cqe_pbl_size; + u16 hw_sb; + u16 rx_qid; + u16 hc_rate; /* desired interrupts per sec. */ + + u16 bd_max_bytes; + u16 stat_id; + u8 sb_index; + u8 padding[3]; +}; + +struct vfpf_start_txq_tlv { + struct vfpf_first_tlv first_tlv; + + /* physical addresses */ + u64 pbl_addr; + u16 pbl_size; + u16 stat_id; + u16 tx_qid; + u16 hw_sb; + + u32 flags; /* VFPF_QUEUE_FLG_X flags */ + u16 hc_rate; /* desired interrupts per sec. */ + u8 sb_index; + u8 padding[3]; +}; + +/* Stop RX Queue */ +struct vfpf_stop_rxqs_tlv { + struct vfpf_first_tlv first_tlv; + + u16 rx_qid; + u8 num_rxqs; + u8 cqe_completion; + u8 padding[4]; +}; + +/* Stop TX Queues */ +struct vfpf_stop_txqs_tlv { + struct vfpf_first_tlv first_tlv; + + u16 tx_qid; + u8 num_txqs; + u8 padding[5]; +}; + +struct vfpf_update_rxq_tlv { + struct vfpf_first_tlv first_tlv; + + u64 deprecated_sge_addr[PFVF_MAX_QUEUES_PER_VF]; + + u16 rx_qid; + u8 num_rxqs; + u8 flags; +#define VFPF_RXQ_UPD_INIT_SGE_DEPRECATE_FLAG BIT(0) +#define VFPF_RXQ_UPD_COMPLETE_CQE_FLAG BIT(1) +#define VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG BIT(2) + + u8 padding[4]; +}; + +/* Set Queue Filters */ +struct vfpf_q_mac_vlan_filter { + u32 flags; +#define VFPF_Q_FILTER_DEST_MAC_VALID 0x01 +#define VFPF_Q_FILTER_VLAN_TAG_VALID 0x02 +#define VFPF_Q_FILTER_SET_MAC 0x100 /* set/clear */ + + u8 mac[ETH_ALEN]; + u16 vlan_tag; + + u8 padding[4]; +}; + +/* Start a vport */ +struct vfpf_vport_start_tlv { + struct vfpf_first_tlv first_tlv; + + u64 sb_addr[PFVF_MAX_SBS_PER_VF]; + + u32 tpa_mode; + u16 dep1; + u16 mtu; + + u8 vport_id; + u8 inner_vlan_removal; + + u8 only_untagged; + u8 max_buffers_per_cqe; + + u8 padding[4]; +}; + +/* Extended tlvs - need to add rss, mcast, accept mode tlvs */ +struct vfpf_vport_update_activate_tlv { + struct channel_tlv tl; + u8 update_rx; + u8 update_tx; + u8 active_rx; + u8 active_tx; +}; + +struct vfpf_vport_update_mcast_bin_tlv { + struct channel_tlv tl; + u8 padding[4]; + + u64 bins[8]; +}; + +struct vfpf_vport_update_accept_param_tlv { + struct channel_tlv tl; + u8 update_rx_mode; + u8 update_tx_mode; + u8 rx_accept_filter; + u8 tx_accept_filter; +}; + +/* Primary tlv as a header for various extended tlvs for + * various functionalities in vport update ramrod. + */ +struct vfpf_vport_update_tlv { + struct vfpf_first_tlv first_tlv; +}; + +struct vfpf_ucast_filter_tlv { + struct vfpf_first_tlv first_tlv; + + u8 opcode; + u8 type; + + u8 mac[ETH_ALEN]; + + u16 vlan; + u16 padding[3]; +}; + struct tlv_buffer_size { u8 tlv_buffer[TLV_BUFFER_SIZE]; }; @@ -177,6 +351,13 @@ struct tlv_buffer_size { union vfpf_tlvs { struct vfpf_first_tlv first_tlv; struct vfpf_acquire_tlv acquire; + struct vfpf_start_rxq_tlv start_rxq; + struct vfpf_start_txq_tlv start_txq; + struct vfpf_stop_rxqs_tlv stop_rxqs; + struct vfpf_stop_txqs_tlv stop_txqs; + struct vfpf_vport_start_tlv start_vport; + struct vfpf_vport_update_tlv vport_update; + struct vfpf_ucast_filter_tlv ucast_filter; struct channel_list_end_tlv list_end; struct tlv_buffer_size tlv_buf_size; }; @@ -185,6 +366,7 @@ union pfvf_tlvs { struct pfvf_def_resp_tlv default_resp; struct pfvf_acquire_resp_tlv acquire_resp; struct tlv_buffer_size tlv_buf_size; + struct pfvf_start_queue_resp_tlv queue_start; }; struct qed_bulletin_content { @@ -206,11 +388,28 @@ struct qed_bulletin { enum { CHANNEL_TLV_NONE, /* ends tlv sequence */ CHANNEL_TLV_ACQUIRE, + CHANNEL_TLV_VPORT_START, + CHANNEL_TLV_VPORT_UPDATE, + CHANNEL_TLV_VPORT_TEARDOWN, + CHANNEL_TLV_START_RXQ, + CHANNEL_TLV_START_TXQ, + CHANNEL_TLV_STOP_RXQS, + CHANNEL_TLV_STOP_TXQS, CHANNEL_TLV_INT_CLEANUP, CHANNEL_TLV_CLOSE, CHANNEL_TLV_RELEASE, CHANNEL_TLV_LIST_END, - CHANNEL_TLV_MAX + CHANNEL_TLV_UCAST_FILTER, + CHANNEL_TLV_VPORT_UPDATE_ACTIVATE, + CHANNEL_TLV_VPORT_UPDATE_MCAST, + CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM, + CHANNEL_TLV_VPORT_UPDATE_RSS, + CHANNEL_TLV_MAX, + + /* Required for iterating over vport-update tlvs. + * Will break in case non-sequential vport-update tlvs. + */ + CHANNEL_TLV_VPORT_UPDATE_MAX = CHANNEL_TLV_VPORT_UPDATE_RSS + 1, }; /* This data is held in the qed_hwfn structure for VFs only. */ @@ -281,6 +480,85 @@ void qed_vf_get_fw_version(struct qed_hwfn *p_hwfn, */ int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn); +/** + * @brief VF - start the RX Queue by sending a message to the PF + * @param p_hwfn + * @param cid - zero based within the VF + * @param rx_queue_id - zero based within the VF + * @param sb - VF status block for this queue + * @param sb_index - Index within the status block + * @param bd_max_bytes - maximum number of bytes per bd + * @param bd_chain_phys_addr - physical address of bd chain + * @param cqe_pbl_addr - physical address of pbl + * @param cqe_pbl_size - pbl size + * @param pp_prod - pointer to the producer to be + * used in fastpath + * + * @return int + */ +int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn, + u8 rx_queue_id, + u16 sb, + u8 sb_index, + u16 bd_max_bytes, + dma_addr_t bd_chain_phys_addr, + dma_addr_t cqe_pbl_addr, + u16 cqe_pbl_size, void __iomem **pp_prod); + +/** + * @brief VF - start the TX queue by sending a message to the + * PF. + * + * @param p_hwfn + * @param tx_queue_id - zero based within the VF + * @param sb - status block for this queue + * @param sb_index - index within the status block + * @param bd_chain_phys_addr - physical address of tx chain + * @param pp_doorbell - pointer to address to which to + * write the doorbell too.. + * + * @return int + */ +int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn, + u16 tx_queue_id, + u16 sb, + u8 sb_index, + dma_addr_t pbl_addr, + u16 pbl_size, void __iomem **pp_doorbell); + +/** + * @brief VF - stop the RX queue by sending a message to the PF + * + * @param p_hwfn + * @param rx_qid + * @param cqe_completion + * + * @return int + */ +int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn, + u16 rx_qid, bool cqe_completion); + +/** + * @brief VF - stop the TX queue by sending a message to the PF + * + * @param p_hwfn + * @param tx_qid + * + * @return int + */ +int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, u16 tx_qid); + +/** + * @brief VF - send a vport update command + * + * @param p_hwfn + * @param params + * + * @return int + */ +int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn, + struct qed_sp_vport_update_params *p_params); + /** * * @brief VF - send a close message to PF @@ -310,6 +588,41 @@ int qed_vf_pf_release(struct qed_hwfn *p_hwfn); */ u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id); +/** + * @brief qed_vf_pf_vport_start - perform vport start for VF. + * + * @param p_hwfn + * @param vport_id + * @param mtu + * @param inner_vlan_removal + * @param tpa_mode + * @param max_buffers_per_cqe, + * @param only_untagged - default behavior regarding vlan acceptance + * + * @return enum _qed_status + */ +int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn, + u8 vport_id, + u16 mtu, + u8 inner_vlan_removal, + enum qed_tpa_mode tpa_mode, + u8 max_buffers_per_cqe); + +/** + * @brief qed_vf_pf_vport_stop - stop the VF's vport + * + * @param p_hwfn + * + * @return enum _qed_status + */ +int qed_vf_pf_vport_stop(struct qed_hwfn *p_hwfn); + +int qed_vf_pf_filter_ucast(struct qed_hwfn *p_hwfn, + struct qed_filter_ucast *p_param); + +void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn, + struct qed_filter_mcast *p_filter_cmd); + /** * @brief qed_vf_pf_int_cleanup - clean the SB of the VF * @@ -343,6 +656,46 @@ static inline int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn) return -EINVAL; } +static inline int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn, + u8 rx_queue_id, + u16 sb, + u8 sb_index, + u16 bd_max_bytes, + dma_addr_t bd_chain_phys_adr, + dma_addr_t cqe_pbl_addr, + u16 cqe_pbl_size, void __iomem **pp_prod) +{ + return -EINVAL; +} + +static inline int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn, + u16 tx_queue_id, + u16 sb, + u8 sb_index, + dma_addr_t pbl_addr, + u16 pbl_size, void __iomem **pp_doorbell) +{ + return -EINVAL; +} + +static inline int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn, + u16 rx_qid, bool cqe_completion) +{ + return -EINVAL; +} + +static inline int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, u16 tx_qid) +{ + return -EINVAL; +} + +static inline int +qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn, + struct qed_sp_vport_update_params *p_params) +{ + return -EINVAL; +} + static inline int qed_vf_pf_reset(struct qed_hwfn *p_hwfn) { return -EINVAL; @@ -358,6 +711,32 @@ static inline u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id) return 0; } +static inline int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn, + u8 vport_id, + u16 mtu, + u8 inner_vlan_removal, + enum qed_tpa_mode tpa_mode, + u8 max_buffers_per_cqe) +{ + return -EINVAL; +} + +static inline int qed_vf_pf_vport_stop(struct qed_hwfn *p_hwfn) +{ + return -EINVAL; +} + +static inline int qed_vf_pf_filter_ucast(struct qed_hwfn *p_hwfn, + struct qed_filter_ucast *p_param) +{ + return -EINVAL; +} + +static inline void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn, + struct qed_filter_mcast *p_filter_cmd) +{ +} + static inline int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn) { return -EINVAL; From 36558c3d77032feb2a49ff7818338256378a774f Mon Sep 17 00:00:00 2001 From: Yuval Mintz Date: Wed, 11 May 2016 16:36:17 +0300 Subject: [PATCH 1506/1649] qed: Bulletin and Link Up to this point, VF and PF communication always originates from VF. As a result, VF cannot be notified of any async changes, and specifically cannot be informed of the current link state. This introduces the bulletin board, the mechanism through which the PF is going to communicate async notifications back to the VF. basically, it's a well-defined structure agreed by both PF and VF which the VF would continuously poll and into which the PF would DMA messages when needed. [Bulletin board is actually allocated and communicated in previous patches but never before used] Based on the bulletin infrastructure, the VF can query its link status and receive said async carrier changes. Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_main.c | 12 +- drivers/net/ethernet/qlogic/qed/qed_sriov.c | 136 ++++++++++++++++- drivers/net/ethernet/qlogic/qed/qed_sriov.h | 5 + drivers/net/ethernet/qlogic/qed/qed_vf.c | 122 +++++++++++++++ drivers/net/ethernet/qlogic/qed/qed_vf.h | 156 ++++++++++++++++++++ 5 files changed, 425 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index e98610e5bf70..dcb782c14e5c 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -1119,9 +1119,9 @@ static void qed_fill_link(struct qed_hwfn *hwfn, memcpy(&link_caps, qed_mcp_get_link_capabilities(hwfn), sizeof(link_caps)); } else { - memset(¶ms, 0, sizeof(params)); - memset(&link, 0, sizeof(link)); - memset(&link_caps, 0, sizeof(link_caps)); + qed_vf_get_link_params(hwfn, ¶ms); + qed_vf_get_link_state(hwfn, &link); + qed_vf_get_link_caps(hwfn, &link_caps); } /* Set the link parameters to pass to protocol driver */ @@ -1224,7 +1224,12 @@ static void qed_fill_link(struct qed_hwfn *hwfn, static void qed_get_current_link(struct qed_dev *cdev, struct qed_link_output *if_link) { + int i; + qed_fill_link(&cdev->hwfns[0], if_link); + + for_each_hwfn(cdev, i) + qed_inform_vf_link_state(&cdev->hwfns[i]); } void qed_link_update(struct qed_hwfn *hwfn) @@ -1234,6 +1239,7 @@ void qed_link_update(struct qed_hwfn *hwfn) struct qed_link_output if_link; qed_fill_link(hwfn, &if_link); + qed_inform_vf_link_state(hwfn); if (IS_LEAD_HWFN(hwfn) && cookie) op->link_update(cookie, &if_link); diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index 82f1eda38962..f6540c0ae595 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -7,6 +7,7 @@ */ #include +#include #include #include "qed_cxt.h" #include "qed_hsi.h" @@ -116,6 +117,41 @@ static struct qed_vf_info *qed_iov_get_vf_info(struct qed_hwfn *p_hwfn, return vf; } +int qed_iov_post_vf_bulletin(struct qed_hwfn *p_hwfn, + int vfid, struct qed_ptt *p_ptt) +{ + struct qed_bulletin_content *p_bulletin; + int crc_size = sizeof(p_bulletin->crc); + struct qed_dmae_params params; + struct qed_vf_info *p_vf; + + p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); + if (!p_vf) + return -EINVAL; + + if (!p_vf->vf_bulletin) + return -EINVAL; + + p_bulletin = p_vf->bulletin.p_virt; + + /* Increment bulletin board version and compute crc */ + p_bulletin->version++; + p_bulletin->crc = crc32(0, (u8 *)p_bulletin + crc_size, + p_vf->bulletin.size - crc_size); + + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "Posting Bulletin 0x%08x to VF[%d] (CRC 0x%08x)\n", + p_bulletin->version, p_vf->relative_vf_id, p_bulletin->crc); + + /* propagate bulletin board via dmae to vm memory */ + memset(¶ms, 0, sizeof(params)); + params.flags = QED_DMAE_FLAG_VF_DST; + params.dst_vfid = p_vf->abs_vf_id; + return qed_dmae_host2host(p_hwfn, p_ptt, p_vf->bulletin.phys, + p_vf->vf_bulletin, p_vf->bulletin.size / 4, + ¶ms); +} + static int qed_iov_pci_cfg_info(struct qed_dev *cdev) { struct qed_hw_sriov_info *iov = cdev->p_iov_info; @@ -790,6 +826,11 @@ static int qed_iov_release_hw_for_vf(struct qed_hwfn *p_hwfn, return -EINVAL; } + if (vf->bulletin.p_virt) + memset(vf->bulletin.p_virt, 0, sizeof(*vf->bulletin.p_virt)); + + memset(&vf->p_vf_info, 0, sizeof(vf->p_vf_info)); + if (vf->state != VF_STOPPED) { /* Stopping the VF */ rc = qed_sp_vf_stop(p_hwfn, vf->concrete_fid, vf->opaque_fid); @@ -1159,6 +1200,7 @@ static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn, /* Fill agreed size of bulletin board in response */ resp->bulletin_size = vf->bulletin.size; + qed_iov_post_vf_bulletin(p_hwfn, vf->relative_vf_id, p_ptt); DP_VERBOSE(p_hwfn, QED_MSG_IOV, @@ -2019,6 +2061,45 @@ int qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *p_disabled_vfs) return found; } +void qed_iov_set_link(struct qed_hwfn *p_hwfn, + u16 vfid, + struct qed_mcp_link_params *params, + struct qed_mcp_link_state *link, + struct qed_mcp_link_capabilities *p_caps) +{ + struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn, + vfid, + false); + struct qed_bulletin_content *p_bulletin; + + if (!p_vf) + return; + + p_bulletin = p_vf->bulletin.p_virt; + p_bulletin->req_autoneg = params->speed.autoneg; + p_bulletin->req_adv_speed = params->speed.advertised_speeds; + p_bulletin->req_forced_speed = params->speed.forced_speed; + p_bulletin->req_autoneg_pause = params->pause.autoneg; + p_bulletin->req_forced_rx = params->pause.forced_rx; + p_bulletin->req_forced_tx = params->pause.forced_tx; + p_bulletin->req_loopback = params->loopback_mode; + + p_bulletin->link_up = link->link_up; + p_bulletin->speed = link->speed; + p_bulletin->full_duplex = link->full_duplex; + p_bulletin->autoneg = link->an; + p_bulletin->autoneg_complete = link->an_complete; + p_bulletin->parallel_detection = link->parallel_detection; + p_bulletin->pfc_enabled = link->pfc_enabled; + p_bulletin->partner_adv_speed = link->partner_adv_speed; + p_bulletin->partner_tx_flow_ctrl_en = link->partner_tx_flow_ctrl_en; + p_bulletin->partner_rx_flow_ctrl_en = link->partner_rx_flow_ctrl_en; + p_bulletin->partner_adv_pause = link->partner_adv_pause; + p_bulletin->sfp_tx_fault = link->sfp_tx_fault; + + p_bulletin->capability_speed = p_caps->speed_capabilities; +} + static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, int vfid) { @@ -2359,6 +2440,29 @@ static int qed_sriov_configure(struct qed_dev *cdev, int num_vfs_param) return qed_sriov_disable(cdev, true); } +void qed_inform_vf_link_state(struct qed_hwfn *hwfn) +{ + struct qed_mcp_link_capabilities caps; + struct qed_mcp_link_params params; + struct qed_mcp_link_state link; + int i; + + if (!hwfn->pf_iov_info) + return; + + /* Update bulletin of all future possible VFs with link configuration */ + for (i = 0; i < hwfn->cdev->p_iov_info->total_vfs; i++) { + memcpy(¶ms, qed_mcp_get_link_params(hwfn), sizeof(params)); + memcpy(&link, qed_mcp_get_link_state(hwfn), sizeof(link)); + memcpy(&caps, qed_mcp_get_link_capabilities(hwfn), + sizeof(caps)); + + qed_iov_set_link(hwfn, i, ¶ms, &link, &caps); + } + + qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG); +} + static void qed_handle_vf_msg(struct qed_hwfn *hwfn) { u64 events[QED_VF_ARRAY_LENGTH]; @@ -2398,6 +2502,24 @@ static void qed_handle_vf_msg(struct qed_hwfn *hwfn) qed_ptt_release(hwfn, ptt); } +static void qed_handle_bulletin_post(struct qed_hwfn *hwfn) +{ + struct qed_ptt *ptt; + int i; + + ptt = qed_ptt_acquire(hwfn); + if (!ptt) { + DP_NOTICE(hwfn, "Failed allocating a ptt entry\n"); + qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG); + return; + } + + qed_for_each_vf(hwfn, i) + qed_iov_post_vf_bulletin(hwfn, i, ptt); + + qed_ptt_release(hwfn, ptt); +} + void qed_iov_pf_task(struct work_struct *work) { struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn, @@ -2424,6 +2546,9 @@ void qed_iov_pf_task(struct work_struct *work) if (test_and_clear_bit(QED_IOV_WQ_MSG_FLAG, &hwfn->iov_task_flags)) qed_handle_vf_msg(hwfn); + if (test_and_clear_bit(QED_IOV_WQ_BULLETIN_UPDATE_FLAG, + &hwfn->iov_task_flags)) + qed_handle_bulletin_post(hwfn); } void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first) @@ -2453,8 +2578,10 @@ int qed_iov_wq_start(struct qed_dev *cdev) for_each_hwfn(cdev, i) { struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; - /* PFs needs a dedicated workqueue only if they support IOV. */ - if (!IS_PF_SRIOV(p_hwfn)) + /* PFs needs a dedicated workqueue only if they support IOV. + * VFs always require one. + */ + if (IS_PF(p_hwfn->cdev) && !IS_PF_SRIOV(p_hwfn)) continue; snprintf(name, NAME_SIZE, "iov-%02x:%02x.%02x", @@ -2467,7 +2594,10 @@ int qed_iov_wq_start(struct qed_dev *cdev) return -ENOMEM; } - INIT_DELAYED_WORK(&p_hwfn->iov_task, qed_iov_pf_task); + if (IS_PF(cdev)) + INIT_DELAYED_WORK(&p_hwfn->iov_task, qed_iov_pf_task); + else + INIT_DELAYED_WORK(&p_hwfn->iov_task, qed_iov_vf_task); } return 0; diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.h b/drivers/net/ethernet/qlogic/qed/qed_sriov.h index 63bce9c53f06..0f5689b1b45e 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.h @@ -268,6 +268,7 @@ int qed_iov_wq_start(struct qed_dev *cdev); void qed_schedule_iov(struct qed_hwfn *hwfn, enum qed_iov_wq_flag flag); void qed_vf_start_iov_wq(struct qed_dev *cdev); int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled); +void qed_inform_vf_link_state(struct qed_hwfn *hwfn); #else static inline u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id) @@ -332,6 +333,10 @@ static inline int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled) { return 0; } + +static inline void qed_inform_vf_link_state(struct qed_hwfn *hwfn) +{ +} #endif #define qed_for_each_vf(_p_hwfn, _i) \ diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c index 961de771392c..05b3ccadbcea 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.c +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c @@ -6,6 +6,7 @@ * this source tree. */ +#include #include "qed.h" #include "qed_sriov.h" #include "qed_vf.h" @@ -865,6 +866,103 @@ u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id) return p_iov->acquire_resp.resc.hw_sbs[sb_id].hw_sb_id; } +int qed_vf_read_bulletin(struct qed_hwfn *p_hwfn, u8 *p_change) +{ + struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct qed_bulletin_content shadow; + u32 crc, crc_size; + + crc_size = sizeof(p_iov->bulletin.p_virt->crc); + *p_change = 0; + + /* Need to guarantee PF is not in the middle of writing it */ + memcpy(&shadow, p_iov->bulletin.p_virt, p_iov->bulletin.size); + + /* If version did not update, no need to do anything */ + if (shadow.version == p_iov->bulletin_shadow.version) + return 0; + + /* Verify the bulletin we see is valid */ + crc = crc32(0, (u8 *)&shadow + crc_size, + p_iov->bulletin.size - crc_size); + if (crc != shadow.crc) + return -EAGAIN; + + /* Set the shadow bulletin and process it */ + memcpy(&p_iov->bulletin_shadow, &shadow, p_iov->bulletin.size); + + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "Read a bulletin update %08x\n", shadow.version); + + *p_change = 1; + + return 0; +} + +void __qed_vf_get_link_params(struct qed_hwfn *p_hwfn, + struct qed_mcp_link_params *p_params, + struct qed_bulletin_content *p_bulletin) +{ + memset(p_params, 0, sizeof(*p_params)); + + p_params->speed.autoneg = p_bulletin->req_autoneg; + p_params->speed.advertised_speeds = p_bulletin->req_adv_speed; + p_params->speed.forced_speed = p_bulletin->req_forced_speed; + p_params->pause.autoneg = p_bulletin->req_autoneg_pause; + p_params->pause.forced_rx = p_bulletin->req_forced_rx; + p_params->pause.forced_tx = p_bulletin->req_forced_tx; + p_params->loopback_mode = p_bulletin->req_loopback; +} + +void qed_vf_get_link_params(struct qed_hwfn *p_hwfn, + struct qed_mcp_link_params *params) +{ + __qed_vf_get_link_params(p_hwfn, params, + &(p_hwfn->vf_iov_info->bulletin_shadow)); +} + +void __qed_vf_get_link_state(struct qed_hwfn *p_hwfn, + struct qed_mcp_link_state *p_link, + struct qed_bulletin_content *p_bulletin) +{ + memset(p_link, 0, sizeof(*p_link)); + + p_link->link_up = p_bulletin->link_up; + p_link->speed = p_bulletin->speed; + p_link->full_duplex = p_bulletin->full_duplex; + p_link->an = p_bulletin->autoneg; + p_link->an_complete = p_bulletin->autoneg_complete; + p_link->parallel_detection = p_bulletin->parallel_detection; + p_link->pfc_enabled = p_bulletin->pfc_enabled; + p_link->partner_adv_speed = p_bulletin->partner_adv_speed; + p_link->partner_tx_flow_ctrl_en = p_bulletin->partner_tx_flow_ctrl_en; + p_link->partner_rx_flow_ctrl_en = p_bulletin->partner_rx_flow_ctrl_en; + p_link->partner_adv_pause = p_bulletin->partner_adv_pause; + p_link->sfp_tx_fault = p_bulletin->sfp_tx_fault; +} + +void qed_vf_get_link_state(struct qed_hwfn *p_hwfn, + struct qed_mcp_link_state *link) +{ + __qed_vf_get_link_state(p_hwfn, link, + &(p_hwfn->vf_iov_info->bulletin_shadow)); +} + +void __qed_vf_get_link_caps(struct qed_hwfn *p_hwfn, + struct qed_mcp_link_capabilities *p_link_caps, + struct qed_bulletin_content *p_bulletin) +{ + memset(p_link_caps, 0, sizeof(*p_link_caps)); + p_link_caps->speed_capabilities = p_bulletin->capability_speed; +} + +void qed_vf_get_link_caps(struct qed_hwfn *p_hwfn, + struct qed_mcp_link_capabilities *p_link_caps) +{ + __qed_vf_get_link_caps(p_hwfn, p_link_caps, + &(p_hwfn->vf_iov_info->bulletin_shadow)); +} + void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs) { *num_rxqs = p_hwfn->vf_iov_info->acquire_resp.resc.num_rxqs; @@ -897,3 +995,27 @@ void qed_vf_get_fw_version(struct qed_hwfn *p_hwfn, *fw_rev = info->fw_rev; *fw_eng = info->fw_eng; } + +static void qed_handle_bulletin_change(struct qed_hwfn *hwfn) +{ + /* Always update link configuration according to bulletin */ + qed_link_update(hwfn); +} + +void qed_iov_vf_task(struct work_struct *work) +{ + struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn, + iov_task.work); + u8 change = 0; + + if (test_and_clear_bit(QED_IOV_WQ_STOP_WQ_FLAG, &hwfn->iov_task_flags)) + return; + + /* Handle bulletin board changes */ + qed_vf_read_bulletin(hwfn, &change); + if (change) + qed_handle_bulletin_change(hwfn); + + /* As VF is polling bulletin board, need to constantly re-schedule */ + queue_delayed_work(hwfn->iov_wq, &hwfn->iov_task, HZ); +} diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.h b/drivers/net/ethernet/qlogic/qed/qed_vf.h index 35337b186aa5..eb99c2569779 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.h +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.h @@ -10,6 +10,7 @@ #define _QED_VF_H #include "qed_l2.h" +#include "qed_mcp.h" #define T_ETH_INDIRECTION_TABLE_SIZE 128 #define T_ETH_RSS_KEY_SIZE 10 @@ -377,6 +378,46 @@ struct qed_bulletin_content { /* bitmap indicating which fields hold valid values */ u64 valid_bitmap; + + /* used for MAC_ADDR or MAC_ADDR_FORCED */ + u8 mac[ETH_ALEN]; + + /* If valid, 1 => only untagged Rx if no vlan is configured */ + u8 default_only_untagged; + u8 padding; + + /* The following is a 'copy' of qed_mcp_link_state, + * qed_mcp_link_params and qed_mcp_link_capabilities. Since it's + * possible the structs will increase further along the road we cannot + * have it here; Instead we need to have all of its fields. + */ + u8 req_autoneg; + u8 req_autoneg_pause; + u8 req_forced_rx; + u8 req_forced_tx; + u8 padding2[4]; + + u32 req_adv_speed; + u32 req_forced_speed; + u32 req_loopback; + u32 padding3; + + u8 link_up; + u8 full_duplex; + u8 autoneg; + u8 autoneg_complete; + u8 parallel_detection; + u8 pfc_enabled; + u8 partner_tx_flow_ctrl_en; + u8 partner_rx_flow_ctrl_en; + u8 partner_adv_pause; + u8 sfp_tx_fault; + u8 padding4[6]; + + u32 speed; + u32 partner_adv_speed; + + u32 capability_speed; }; struct qed_bulletin { @@ -432,6 +473,43 @@ struct qed_vf_iov { }; #ifdef CONFIG_QED_SRIOV +/** + * @brief Read the VF bulletin and act on it if needed + * + * @param p_hwfn + * @param p_change - qed fills 1 iff bulletin board has changed, 0 otherwise. + * + * @return enum _qed_status + */ +int qed_vf_read_bulletin(struct qed_hwfn *p_hwfn, u8 *p_change); + +/** + * @brief Get link paramters for VF from qed + * + * @param p_hwfn + * @param params - the link params structure to be filled for the VF + */ +void qed_vf_get_link_params(struct qed_hwfn *p_hwfn, + struct qed_mcp_link_params *params); + +/** + * @brief Get link state for VF from qed + * + * @param p_hwfn + * @param link - the link state structure to be filled for the VF + */ +void qed_vf_get_link_state(struct qed_hwfn *p_hwfn, + struct qed_mcp_link_state *link); + +/** + * @brief Get link capabilities for VF from qed + * + * @param p_hwfn + * @param p_link_caps - the link capabilities structure to be filled for the VF + */ +void qed_vf_get_link_caps(struct qed_hwfn *p_hwfn, + struct qed_mcp_link_capabilities *p_link_caps); + /** * @brief Get number of Rx queues allocated for VF by qed * @@ -577,6 +655,7 @@ int qed_vf_pf_reset(struct qed_hwfn *p_hwfn); * @return enum _qed_status */ int qed_vf_pf_release(struct qed_hwfn *p_hwfn); + /** * @brief qed_vf_get_igu_sb_id - Get the IGU SB ID for a given * sb_id. For VFs igu sbs don't have to be contiguous @@ -631,7 +710,58 @@ void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn, * @return enum _qed_status */ int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn); + +/** + * @brief - return the link params in a given bulletin board + * + * @param p_hwfn + * @param p_params - pointer to a struct to fill with link params + * @param p_bulletin + */ +void __qed_vf_get_link_params(struct qed_hwfn *p_hwfn, + struct qed_mcp_link_params *p_params, + struct qed_bulletin_content *p_bulletin); + +/** + * @brief - return the link state in a given bulletin board + * + * @param p_hwfn + * @param p_link - pointer to a struct to fill with link state + * @param p_bulletin + */ +void __qed_vf_get_link_state(struct qed_hwfn *p_hwfn, + struct qed_mcp_link_state *p_link, + struct qed_bulletin_content *p_bulletin); + +/** + * @brief - return the link capabilities in a given bulletin board + * + * @param p_hwfn + * @param p_link - pointer to a struct to fill with link capabilities + * @param p_bulletin + */ +void __qed_vf_get_link_caps(struct qed_hwfn *p_hwfn, + struct qed_mcp_link_capabilities *p_link_caps, + struct qed_bulletin_content *p_bulletin); + +void qed_iov_vf_task(struct work_struct *work); #else +static inline void qed_vf_get_link_params(struct qed_hwfn *p_hwfn, + struct qed_mcp_link_params *params) +{ +} + +static inline void qed_vf_get_link_state(struct qed_hwfn *p_hwfn, + struct qed_mcp_link_state *link) +{ +} + +static inline void +qed_vf_get_link_caps(struct qed_hwfn *p_hwfn, + struct qed_mcp_link_capabilities *p_link_caps) +{ +} + static inline void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs) { } @@ -741,6 +871,32 @@ static inline int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn) { return -EINVAL; } + +static inline void __qed_vf_get_link_params(struct qed_hwfn *p_hwfn, + struct qed_mcp_link_params + *p_params, + struct qed_bulletin_content + *p_bulletin) +{ +} + +static inline void __qed_vf_get_link_state(struct qed_hwfn *p_hwfn, + struct qed_mcp_link_state *p_link, + struct qed_bulletin_content + *p_bulletin) +{ +} + +static inline void +__qed_vf_get_link_caps(struct qed_hwfn *p_hwfn, + struct qed_mcp_link_capabilities *p_link_caps, + struct qed_bulletin_content *p_bulletin) +{ +} + +static inline void qed_iov_vf_task(struct work_struct *work) +{ +} #endif #endif From 17b235c1456e4ab203c39050c5535e28fe7d0de9 Mon Sep 17 00:00:00 2001 From: Yuval Mintz Date: Wed, 11 May 2016 16:36:18 +0300 Subject: [PATCH 1507/1649] qed: Align TLVs As the VF infrastructure is supposed to offer backward/forward compatibility, the various types associated with VF<->PF communication should be aligned across all various platforms that support IOV on our family of adapters. This adds a couple of currently missing values, specifically aligning the enum for the various TLVs possible in the communication between them. It then adds the PF implementation for some of those missing VF requests. This support isn't really necessary for the Linux VF as those VFs aren't requiring it [at least today], but are required by VFs running on other OSes. LRO is an example of one such configuration. Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_l2.c | 92 ++++++++++- drivers/net/ethernet/qlogic/qed/qed_l2.h | 52 ++++++ drivers/net/ethernet/qlogic/qed/qed_sriov.c | 169 ++++++++++++++++++++ drivers/net/ethernet/qlogic/qed/qed_sriov.h | 4 + drivers/net/ethernet/qlogic/qed/qed_vf.c | 6 + drivers/net/ethernet/qlogic/qed/qed_vf.h | 54 ++++++- 6 files changed, 375 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index 9f88f2feb5ec..80f0b853a142 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -264,6 +264,38 @@ qed_sp_update_accept_mode(struct qed_hwfn *p_hwfn, } } +static void +qed_sp_vport_update_sge_tpa(struct qed_hwfn *p_hwfn, + struct vport_update_ramrod_data *p_ramrod, + struct qed_sge_tpa_params *p_params) +{ + struct eth_vport_tpa_param *p_tpa; + + if (!p_params) { + p_ramrod->common.update_tpa_param_flg = 0; + p_ramrod->common.update_tpa_en_flg = 0; + p_ramrod->common.update_tpa_param_flg = 0; + return; + } + + p_ramrod->common.update_tpa_en_flg = p_params->update_tpa_en_flg; + p_tpa = &p_ramrod->tpa_param; + p_tpa->tpa_ipv4_en_flg = p_params->tpa_ipv4_en_flg; + p_tpa->tpa_ipv6_en_flg = p_params->tpa_ipv6_en_flg; + p_tpa->tpa_ipv4_tunn_en_flg = p_params->tpa_ipv4_tunn_en_flg; + p_tpa->tpa_ipv6_tunn_en_flg = p_params->tpa_ipv6_tunn_en_flg; + + p_ramrod->common.update_tpa_param_flg = p_params->update_tpa_param_flg; + p_tpa->max_buff_num = p_params->max_buffers_per_cqe; + p_tpa->tpa_pkt_split_flg = p_params->tpa_pkt_split_flg; + p_tpa->tpa_hdr_data_split_flg = p_params->tpa_hdr_data_split_flg; + p_tpa->tpa_gro_consistent_flg = p_params->tpa_gro_consistent_flg; + p_tpa->tpa_max_aggs_num = p_params->tpa_max_aggs_num; + p_tpa->tpa_max_size = p_params->tpa_max_size; + p_tpa->tpa_min_size_to_start = p_params->tpa_min_size_to_start; + p_tpa->tpa_min_size_to_cont = p_params->tpa_min_size_to_cont; +} + static void qed_sp_update_mcast_bin(struct qed_hwfn *p_hwfn, struct vport_update_ramrod_data *p_ramrod, @@ -295,7 +327,7 @@ int qed_sp_vport_update(struct qed_hwfn *p_hwfn, struct qed_sp_init_data init_data; struct vport_update_ramrod_data *p_ramrod = NULL; struct qed_spq_entry *p_ent = NULL; - u8 abs_vport_id = 0; + u8 abs_vport_id = 0, val; int rc = -EINVAL; if (IS_VF(p_hwfn->cdev)) { @@ -331,6 +363,13 @@ int qed_sp_vport_update(struct qed_hwfn *p_hwfn, p_cmn->accept_any_vlan = p_params->accept_any_vlan; p_cmn->update_accept_any_vlan_flg = p_params->update_accept_any_vlan_flg; + + p_cmn->inner_vlan_removal_en = p_params->inner_vlan_removal_flg; + val = p_params->update_inner_vlan_removal_flg; + p_cmn->update_inner_vlan_removal_en_flg = val; + p_ramrod->common.tx_switching_en = p_params->tx_switching_flg; + p_cmn->update_tx_switching_en_flg = p_params->update_tx_switching_flg; + rc = qed_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss_params); if (rc) { /* Return spq entry which is taken in qed_sp_init_request()*/ @@ -342,6 +381,7 @@ int qed_sp_vport_update(struct qed_hwfn *p_hwfn, qed_sp_update_mcast_bin(p_hwfn, p_ramrod, p_params); qed_sp_update_accept_mode(p_hwfn, p_ramrod, p_params->accept_flags); + qed_sp_vport_update_sge_tpa(p_hwfn, p_ramrod, p_params->sge_tpa_params); return qed_spq_post(p_hwfn, p_ent, NULL); } @@ -590,6 +630,56 @@ qed_sp_eth_rx_queue_start(struct qed_hwfn *p_hwfn, return rc; } +int qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn, + u16 rx_queue_id, + u8 num_rxqs, + u8 complete_cqe_flg, + u8 complete_event_flg, + enum spq_mode comp_mode, + struct qed_spq_comp_cb *p_comp_data) +{ + struct rx_queue_update_ramrod_data *p_ramrod = NULL; + struct qed_spq_entry *p_ent = NULL; + struct qed_sp_init_data init_data; + struct qed_hw_cid_data *p_rx_cid; + u16 qid, abs_rx_q_id = 0; + int rc = -EINVAL; + u8 i; + + memset(&init_data, 0, sizeof(init_data)); + init_data.comp_mode = comp_mode; + init_data.p_comp_data = p_comp_data; + + for (i = 0; i < num_rxqs; i++) { + qid = rx_queue_id + i; + p_rx_cid = &p_hwfn->p_rx_cids[qid]; + + /* Get SPQ entry */ + init_data.cid = p_rx_cid->cid; + init_data.opaque_fid = p_rx_cid->opaque_fid; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + ETH_RAMROD_RX_QUEUE_UPDATE, + PROTOCOLID_ETH, &init_data); + if (rc) + return rc; + + p_ramrod = &p_ent->ramrod.rx_queue_update; + + qed_fw_vport(p_hwfn, p_rx_cid->vport_id, &p_ramrod->vport_id); + qed_fw_l2_queue(p_hwfn, qid, &abs_rx_q_id); + p_ramrod->rx_queue_id = cpu_to_le16(abs_rx_q_id); + p_ramrod->complete_cqe_flg = complete_cqe_flg; + p_ramrod->complete_event_flg = complete_event_flg; + + rc = qed_spq_post(p_hwfn, p_ent, NULL); + if (rc) + return rc; + } + + return rc; +} + int qed_sp_eth_rx_queue_stop(struct qed_hwfn *p_hwfn, u16 rx_queue_id, bool eq_completion_only, bool cqe_completion) diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h index 3b65a45c1ec2..f9e677a29751 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.h +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.h @@ -16,6 +16,25 @@ #include "qed_hw.h" #include "qed_sp.h" +struct qed_sge_tpa_params { + u8 max_buffers_per_cqe; + + u8 update_tpa_en_flg; + u8 tpa_ipv4_en_flg; + u8 tpa_ipv6_en_flg; + u8 tpa_ipv4_tunn_en_flg; + u8 tpa_ipv6_tunn_en_flg; + + u8 update_tpa_param_flg; + u8 tpa_pkt_split_flg; + u8 tpa_hdr_data_split_flg; + u8 tpa_gro_consistent_flg; + u8 tpa_max_aggs_num; + u16 tpa_max_size; + u16 tpa_min_size_to_start; + u16 tpa_min_size_to_cont; +}; + enum qed_filter_opcode { QED_FILTER_ADD, QED_FILTER_REMOVE, @@ -119,12 +138,17 @@ struct qed_sp_vport_update_params { u8 vport_active_rx_flg; u8 update_vport_active_tx_flg; u8 vport_active_tx_flg; + u8 update_inner_vlan_removal_flg; + u8 inner_vlan_removal_flg; + u8 update_tx_switching_flg; + u8 tx_switching_flg; u8 update_approx_mcast_flg; u8 update_accept_any_vlan_flg; u8 accept_any_vlan; unsigned long bins[8]; struct qed_rss_params *rss_params; struct qed_filter_accept_flags accept_flags; + struct qed_sge_tpa_params *sge_tpa_params; }; int qed_sp_vport_update(struct qed_hwfn *p_hwfn, @@ -152,6 +176,34 @@ int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn, enum spq_mode comp_mode, struct qed_spq_comp_cb *p_comp_data); +/** + * @brief qed_sp_rx_eth_queues_update - + * + * This ramrod updates an RX queue. It is used for setting the active state + * of the queue and updating the TPA and SGE parameters. + * + * @note At the moment - only used by non-linux VFs. + * + * @param p_hwfn + * @param rx_queue_id RX Queue ID + * @param num_rxqs Allow to update multiple rx + * queues, from rx_queue_id to + * (rx_queue_id + num_rxqs) + * @param complete_cqe_flg Post completion to the CQE Ring if set + * @param complete_event_flg Post completion to the Event Ring if set + * + * @return int + */ + +int +qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn, + u16 rx_queue_id, + u8 num_rxqs, + u8 complete_cqe_flg, + u8 complete_event_flg, + enum spq_mode comp_mode, + struct qed_spq_comp_cb *p_comp_data); + int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn, struct qed_sp_vport_start_params *p_params); diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index f6540c0ae595..29a53dd0d9fd 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -961,12 +961,20 @@ static u16 qed_iov_vport_to_tlv(struct qed_hwfn *p_hwfn, switch (flag) { case QED_IOV_VP_UPDATE_ACTIVATE: return CHANNEL_TLV_VPORT_UPDATE_ACTIVATE; + case QED_IOV_VP_UPDATE_VLAN_STRIP: + return CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP; + case QED_IOV_VP_UPDATE_TX_SWITCH: + return CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH; case QED_IOV_VP_UPDATE_MCAST: return CHANNEL_TLV_VPORT_UPDATE_MCAST; case QED_IOV_VP_UPDATE_ACCEPT_PARAM: return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM; case QED_IOV_VP_UPDATE_RSS: return CHANNEL_TLV_VPORT_UPDATE_RSS; + case QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN: + return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN; + case QED_IOV_VP_UPDATE_SGE_TPA: + return CHANNEL_TLV_VPORT_UPDATE_SGE_TPA; default: return 0; } @@ -1516,6 +1524,51 @@ static void qed_iov_vf_mbx_stop_txqs(struct qed_hwfn *p_hwfn, length, status); } +static void qed_iov_vf_mbx_update_rxqs(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_vf_info *vf) +{ + u16 length = sizeof(struct pfvf_def_resp_tlv); + struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; + struct vfpf_update_rxq_tlv *req; + u8 status = PFVF_STATUS_SUCCESS; + u8 complete_event_flg; + u8 complete_cqe_flg; + u16 qid; + int rc; + u8 i; + + req = &mbx->req_virt->update_rxq; + complete_cqe_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_CQE_FLAG); + complete_event_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG); + + for (i = 0; i < req->num_rxqs; i++) { + qid = req->rx_qid + i; + + if (!vf->vf_queues[qid].rxq_active) { + DP_NOTICE(p_hwfn, "VF rx_qid = %d isn`t active!\n", + qid); + status = PFVF_STATUS_FAILURE; + break; + } + + rc = qed_sp_eth_rx_queues_update(p_hwfn, + vf->vf_queues[qid].fw_rx_qid, + 1, + complete_cqe_flg, + complete_event_flg, + QED_SPQ_MODE_EBLOCK, NULL); + + if (rc) { + status = PFVF_STATUS_FAILURE; + break; + } + } + + qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UPDATE_RXQ, + length, status); +} + void *qed_iov_search_list_tlvs(struct qed_hwfn *p_hwfn, void *p_tlvs_list, u16 req_type) { @@ -1567,6 +1620,45 @@ qed_iov_vp_update_act_param(struct qed_hwfn *p_hwfn, *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACTIVATE; } +static void +qed_iov_vp_update_vlan_param(struct qed_hwfn *p_hwfn, + struct qed_sp_vport_update_params *p_data, + struct qed_vf_info *p_vf, + struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) +{ + struct vfpf_vport_update_vlan_strip_tlv *p_vlan_tlv; + u16 tlv = CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP; + + p_vlan_tlv = (struct vfpf_vport_update_vlan_strip_tlv *) + qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); + if (!p_vlan_tlv) + return; + + p_data->update_inner_vlan_removal_flg = 1; + p_data->inner_vlan_removal_flg = p_vlan_tlv->remove_vlan; + + *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_VLAN_STRIP; +} + +static void +qed_iov_vp_update_tx_switch(struct qed_hwfn *p_hwfn, + struct qed_sp_vport_update_params *p_data, + struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) +{ + struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv; + u16 tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH; + + p_tx_switch_tlv = (struct vfpf_vport_update_tx_switch_tlv *) + qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, + tlv); + if (!p_tx_switch_tlv) + return; + + p_data->update_tx_switching_flg = 1; + p_data->tx_switching_flg = p_tx_switch_tlv->tx_switching; + *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_TX_SWITCH; +} + static void qed_iov_vp_update_mcast_bin_param(struct qed_hwfn *p_hwfn, struct qed_sp_vport_update_params *p_data, @@ -1607,6 +1699,26 @@ qed_iov_vp_update_accept_flag(struct qed_hwfn *p_hwfn, *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACCEPT_PARAM; } +static void +qed_iov_vp_update_accept_any_vlan(struct qed_hwfn *p_hwfn, + struct qed_sp_vport_update_params *p_data, + struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) +{ + struct vfpf_vport_update_accept_any_vlan_tlv *p_accept_any_vlan; + u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN; + + p_accept_any_vlan = (struct vfpf_vport_update_accept_any_vlan_tlv *) + qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, + tlv); + if (!p_accept_any_vlan) + return; + + p_data->accept_any_vlan = p_accept_any_vlan->accept_any_vlan; + p_data->update_accept_any_vlan_flg = + p_accept_any_vlan->update_accept_any_vlan_flg; + *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN; +} + static void qed_iov_vp_update_rss_param(struct qed_hwfn *p_hwfn, struct qed_vf_info *vf, @@ -1671,12 +1783,61 @@ qed_iov_vp_update_rss_param(struct qed_hwfn *p_hwfn, *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_RSS; } +static void +qed_iov_vp_update_sge_tpa_param(struct qed_hwfn *p_hwfn, + struct qed_vf_info *vf, + struct qed_sp_vport_update_params *p_data, + struct qed_sge_tpa_params *p_sge_tpa, + struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) +{ + struct vfpf_vport_update_sge_tpa_tlv *p_sge_tpa_tlv; + u16 tlv = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA; + + p_sge_tpa_tlv = (struct vfpf_vport_update_sge_tpa_tlv *) + qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); + + if (!p_sge_tpa_tlv) { + p_data->sge_tpa_params = NULL; + return; + } + + memset(p_sge_tpa, 0, sizeof(struct qed_sge_tpa_params)); + + p_sge_tpa->update_tpa_en_flg = + !!(p_sge_tpa_tlv->update_sge_tpa_flags & VFPF_UPDATE_TPA_EN_FLAG); + p_sge_tpa->update_tpa_param_flg = + !!(p_sge_tpa_tlv->update_sge_tpa_flags & + VFPF_UPDATE_TPA_PARAM_FLAG); + + p_sge_tpa->tpa_ipv4_en_flg = + !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV4_EN_FLAG); + p_sge_tpa->tpa_ipv6_en_flg = + !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV6_EN_FLAG); + p_sge_tpa->tpa_pkt_split_flg = + !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_PKT_SPLIT_FLAG); + p_sge_tpa->tpa_hdr_data_split_flg = + !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_HDR_DATA_SPLIT_FLAG); + p_sge_tpa->tpa_gro_consistent_flg = + !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_GRO_CONSIST_FLAG); + + p_sge_tpa->tpa_max_aggs_num = p_sge_tpa_tlv->tpa_max_aggs_num; + p_sge_tpa->tpa_max_size = p_sge_tpa_tlv->tpa_max_size; + p_sge_tpa->tpa_min_size_to_start = p_sge_tpa_tlv->tpa_min_size_to_start; + p_sge_tpa->tpa_min_size_to_cont = p_sge_tpa_tlv->tpa_min_size_to_cont; + p_sge_tpa->max_buffers_per_cqe = p_sge_tpa_tlv->max_buffers_per_cqe; + + p_data->sge_tpa_params = p_sge_tpa; + + *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_SGE_TPA; +} + static void qed_iov_vf_mbx_vport_update(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_vf_info *vf) { struct qed_sp_vport_update_params params; struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; + struct qed_sge_tpa_params sge_tpa_params; struct qed_rss_params rss_params; u8 status = PFVF_STATUS_SUCCESS; u16 tlvs_mask = 0; @@ -1692,10 +1853,15 @@ static void qed_iov_vf_mbx_vport_update(struct qed_hwfn *p_hwfn, * from VF in struct qed_sp_vport_update_params. */ qed_iov_vp_update_act_param(p_hwfn, ¶ms, mbx, &tlvs_mask); + qed_iov_vp_update_vlan_param(p_hwfn, ¶ms, vf, mbx, &tlvs_mask); + qed_iov_vp_update_tx_switch(p_hwfn, ¶ms, mbx, &tlvs_mask); qed_iov_vp_update_mcast_bin_param(p_hwfn, ¶ms, mbx, &tlvs_mask); qed_iov_vp_update_accept_flag(p_hwfn, ¶ms, mbx, &tlvs_mask); qed_iov_vp_update_rss_param(p_hwfn, vf, ¶ms, &rss_params, mbx, &tlvs_mask); + qed_iov_vp_update_accept_any_vlan(p_hwfn, ¶ms, mbx, &tlvs_mask); + qed_iov_vp_update_sge_tpa_param(p_hwfn, vf, ¶ms, + &sge_tpa_params, mbx, &tlvs_mask); /* Just log a message if there is no single extended tlv in buffer. * When all features of vport update ramrod would be requested by VF @@ -2144,6 +2310,9 @@ static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn, case CHANNEL_TLV_STOP_TXQS: qed_iov_vf_mbx_stop_txqs(p_hwfn, p_ptt, p_vf); break; + case CHANNEL_TLV_UPDATE_RXQ: + qed_iov_vf_mbx_update_rxqs(p_hwfn, p_ptt, p_vf); + break; case CHANNEL_TLV_VPORT_UPDATE: qed_iov_vf_mbx_vport_update(p_hwfn, p_ptt, p_vf); break; diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.h b/drivers/net/ethernet/qlogic/qed/qed_sriov.h index 0f5689b1b45e..2c94b445d07f 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.h @@ -26,9 +26,13 @@ enum qed_iov_vport_update_flag { QED_IOV_VP_UPDATE_ACTIVATE, + QED_IOV_VP_UPDATE_VLAN_STRIP, + QED_IOV_VP_UPDATE_TX_SWITCH, QED_IOV_VP_UPDATE_MCAST, QED_IOV_VP_UPDATE_ACCEPT_PARAM, QED_IOV_VP_UPDATE_RSS, + QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN, + QED_IOV_VP_UPDATE_SGE_TPA, QED_IOV_VP_UPDATE_MAX, }; diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c index 05b3ccadbcea..e788954568d4 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.c +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c @@ -543,6 +543,10 @@ qed_vf_handle_vp_update_is_needed(struct qed_hwfn *p_hwfn, case CHANNEL_TLV_VPORT_UPDATE_ACTIVATE: return !!(p_data->update_vport_active_rx_flg || p_data->update_vport_active_tx_flg); + case CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH: + return !!p_data->update_tx_switching_flg; + case CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP: + return !!p_data->update_inner_vlan_removal_flg; case CHANNEL_TLV_VPORT_UPDATE_MCAST: return !!p_data->update_approx_mcast_flg; case CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM: @@ -550,6 +554,8 @@ qed_vf_handle_vp_update_is_needed(struct qed_hwfn *p_hwfn, p_data->accept_flags.update_tx_mode_config); case CHANNEL_TLV_VPORT_UPDATE_RSS: return !!p_data->rss_params; + case CHANNEL_TLV_VPORT_UPDATE_SGE_TPA: + return !!p_data->sge_tpa_params; default: DP_INFO(p_hwfn, "Unexpected vport-update TLV[%d]\n", tlv); diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.h b/drivers/net/ethernet/qlogic/qed/qed_vf.h index eb99c2569779..d9a8aa684ad7 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.h +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.h @@ -311,6 +311,18 @@ struct vfpf_vport_update_activate_tlv { u8 active_tx; }; +struct vfpf_vport_update_tx_switch_tlv { + struct channel_tlv tl; + u8 tx_switching; + u8 padding[3]; +}; + +struct vfpf_vport_update_vlan_strip_tlv { + struct channel_tlv tl; + u8 remove_vlan; + u8 padding[3]; +}; + struct vfpf_vport_update_mcast_bin_tlv { struct channel_tlv tl; u8 padding[4]; @@ -326,6 +338,40 @@ struct vfpf_vport_update_accept_param_tlv { u8 tx_accept_filter; }; +struct vfpf_vport_update_accept_any_vlan_tlv { + struct channel_tlv tl; + u8 update_accept_any_vlan_flg; + u8 accept_any_vlan; + + u8 padding[2]; +}; + +struct vfpf_vport_update_sge_tpa_tlv { + struct channel_tlv tl; + + u16 sge_tpa_flags; +#define VFPF_TPA_IPV4_EN_FLAG BIT(0) +#define VFPF_TPA_IPV6_EN_FLAG BIT(1) +#define VFPF_TPA_PKT_SPLIT_FLAG BIT(2) +#define VFPF_TPA_HDR_DATA_SPLIT_FLAG BIT(3) +#define VFPF_TPA_GRO_CONSIST_FLAG BIT(4) + + u8 update_sge_tpa_flags; +#define VFPF_UPDATE_SGE_DEPRECATED_FLAG BIT(0) +#define VFPF_UPDATE_TPA_EN_FLAG BIT(1) +#define VFPF_UPDATE_TPA_PARAM_FLAG BIT(2) + + u8 max_buffers_per_cqe; + + u16 deprecated_sge_buff_size; + u16 tpa_max_size; + u16 tpa_min_size_to_start; + u16 tpa_min_size_to_cont; + + u8 tpa_max_aggs_num; + u8 padding[7]; +}; + /* Primary tlv as a header for various extended tlvs for * various functionalities in vport update ramrod. */ @@ -356,6 +402,7 @@ union vfpf_tlvs { struct vfpf_start_txq_tlv start_txq; struct vfpf_stop_rxqs_tlv stop_rxqs; struct vfpf_stop_txqs_tlv stop_txqs; + struct vfpf_update_rxq_tlv update_rxq; struct vfpf_vport_start_tlv start_vport; struct vfpf_vport_update_tlv vport_update; struct vfpf_ucast_filter_tlv ucast_filter; @@ -436,21 +483,26 @@ enum { CHANNEL_TLV_START_TXQ, CHANNEL_TLV_STOP_RXQS, CHANNEL_TLV_STOP_TXQS, + CHANNEL_TLV_UPDATE_RXQ, CHANNEL_TLV_INT_CLEANUP, CHANNEL_TLV_CLOSE, CHANNEL_TLV_RELEASE, CHANNEL_TLV_LIST_END, CHANNEL_TLV_UCAST_FILTER, CHANNEL_TLV_VPORT_UPDATE_ACTIVATE, + CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH, + CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP, CHANNEL_TLV_VPORT_UPDATE_MCAST, CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM, CHANNEL_TLV_VPORT_UPDATE_RSS, + CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN, + CHANNEL_TLV_VPORT_UPDATE_SGE_TPA, CHANNEL_TLV_MAX, /* Required for iterating over vport-update tlvs. * Will break in case non-sequential vport-update tlvs. */ - CHANNEL_TLV_VPORT_UPDATE_MAX = CHANNEL_TLV_VPORT_UPDATE_RSS + 1, + CHANNEL_TLV_VPORT_UPDATE_MAX = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA + 1, }; /* This data is held in the qed_hwfn structure for VFs only. */ From fefb0202cc5c12172abba78a8404e69c6d82d680 Mon Sep 17 00:00:00 2001 From: Yuval Mintz Date: Wed, 11 May 2016 16:36:19 +0300 Subject: [PATCH 1508/1649] qede: Add VF support Adding a PCI callback for `sriov_configure' and a new PCI device id for the VF [+ Some minor changes to accomodate differences between PF and VF at the qede]. Following this, VF creation should be possible and the entire subset of existing PF functionality that's allow to VFs should be supported. Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qede/qede.h | 4 ++ .../net/ethernet/qlogic/qede/qede_ethtool.c | 43 ++++++++++++++- drivers/net/ethernet/qlogic/qede/qede_main.c | 52 ++++++++++++++++--- 3 files changed, 90 insertions(+), 9 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h index ff3ac0caad5b..47d6b22252f6 100644 --- a/drivers/net/ethernet/qlogic/qede/qede.h +++ b/drivers/net/ethernet/qlogic/qede/qede.h @@ -112,6 +112,10 @@ struct qede_dev { u32 dp_module; u8 dp_level; + u32 flags; +#define QEDE_FLAG_IS_VF BIT(0) +#define IS_VF(edev) (!!((edev)->flags & QEDE_FLAG_IS_VF)) + const struct qed_eth_ops *ops; struct qed_dev_eth_info dev_info; diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c index 0d04f163ae45..1bc75358cbc4 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c @@ -151,6 +151,8 @@ static void qede_get_strings_stats(struct qede_dev *edev, u8 *buf) int i, j, k; for (i = 0, j = 0; i < QEDE_NUM_STATS; i++) { + if (IS_VF(edev) && qede_stats_arr[i].pf_only) + continue; strcpy(buf + j * ETH_GSTRING_LEN, qede_stats_arr[i].string); j++; @@ -194,8 +196,11 @@ static void qede_get_ethtool_stats(struct net_device *dev, mutex_lock(&edev->qede_lock); - for (sidx = 0; sidx < QEDE_NUM_STATS; sidx++) + for (sidx = 0; sidx < QEDE_NUM_STATS; sidx++) { + if (IS_VF(edev) && qede_stats_arr[sidx].pf_only) + continue; buf[cnt++] = QEDE_STATS_DATA(edev, sidx); + } for (sidx = 0; sidx < QEDE_NUM_RQSTATS; sidx++) { buf[cnt] = 0; @@ -214,6 +219,13 @@ static int qede_get_sset_count(struct net_device *dev, int stringset) switch (stringset) { case ETH_SS_STATS: + if (IS_VF(edev)) { + int i; + + for (i = 0; i < QEDE_NUM_STATS; i++) + if (qede_stats_arr[i].pf_only) + num_stats--; + } return num_stats + QEDE_NUM_RQSTATS; case ETH_SS_PRIV_FLAGS: return QEDE_PRI_FLAG_LEN; @@ -1142,7 +1154,34 @@ static const struct ethtool_ops qede_ethtool_ops = { .self_test = qede_self_test, }; +static const struct ethtool_ops qede_vf_ethtool_ops = { + .get_settings = qede_get_settings, + .get_drvinfo = qede_get_drvinfo, + .get_msglevel = qede_get_msglevel, + .set_msglevel = qede_set_msglevel, + .get_link = qede_get_link, + .get_ringparam = qede_get_ringparam, + .set_ringparam = qede_set_ringparam, + .get_strings = qede_get_strings, + .get_ethtool_stats = qede_get_ethtool_stats, + .get_priv_flags = qede_get_priv_flags, + .get_sset_count = qede_get_sset_count, + .get_rxnfc = qede_get_rxnfc, + .set_rxnfc = qede_set_rxnfc, + .get_rxfh_indir_size = qede_get_rxfh_indir_size, + .get_rxfh_key_size = qede_get_rxfh_key_size, + .get_rxfh = qede_get_rxfh, + .set_rxfh = qede_set_rxfh, + .get_channels = qede_get_channels, + .set_channels = qede_set_channels, +}; + void qede_set_ethtool_ops(struct net_device *dev) { - dev->ethtool_ops = &qede_ethtool_ops; + struct qede_dev *edev = netdev_priv(dev); + + if (IS_VF(edev)) + dev->ethtool_ops = &qede_vf_ethtool_ops; + else + dev->ethtool_ops = &qede_ethtool_ops; } diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 2d5f2735dc0a..bf54cfcd75c0 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -63,6 +63,7 @@ static const struct qed_eth_ops *qed_ops; #define CHIP_NUM_57980S_100 0x1644 #define CHIP_NUM_57980S_50 0x1654 #define CHIP_NUM_57980S_25 0x1656 +#define CHIP_NUM_57980S_IOV 0x1664 #ifndef PCI_DEVICE_ID_NX2_57980E #define PCI_DEVICE_ID_57980S_40 CHIP_NUM_57980S_40 @@ -71,15 +72,22 @@ static const struct qed_eth_ops *qed_ops; #define PCI_DEVICE_ID_57980S_100 CHIP_NUM_57980S_100 #define PCI_DEVICE_ID_57980S_50 CHIP_NUM_57980S_50 #define PCI_DEVICE_ID_57980S_25 CHIP_NUM_57980S_25 +#define PCI_DEVICE_ID_57980S_IOV CHIP_NUM_57980S_IOV #endif +enum qede_pci_private { + QEDE_PRIVATE_PF, + QEDE_PRIVATE_VF +}; + static const struct pci_device_id qede_pci_tbl[] = { - { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_40), 0 }, - { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_10), 0 }, - { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_MF), 0 }, - { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_100), 0 }, - { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_50), 0 }, - { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_25), 0 }, + {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_40), QEDE_PRIVATE_PF}, + {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_10), QEDE_PRIVATE_PF}, + {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_MF), QEDE_PRIVATE_PF}, + {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_100), QEDE_PRIVATE_PF}, + {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_50), QEDE_PRIVATE_PF}, + {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_25), QEDE_PRIVATE_PF}, + {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_IOV), QEDE_PRIVATE_VF}, { 0 } }; @@ -94,11 +102,25 @@ static int qede_alloc_rx_buffer(struct qede_dev *edev, struct qede_rx_queue *rxq); static void qede_link_update(void *dev, struct qed_link_output *link); +#ifdef CONFIG_QED_SRIOV +static int qede_sriov_configure(struct pci_dev *pdev, int num_vfs_param) +{ + struct qede_dev *edev = netdev_priv(pci_get_drvdata(pdev)); + + DP_VERBOSE(edev, QED_MSG_IOV, "Requested %d VFs\n", num_vfs_param); + + return edev->ops->iov->configure(edev->cdev, num_vfs_param); +} +#endif + static struct pci_driver qede_pci_driver = { .name = "qede", .id_table = qede_pci_tbl, .probe = qede_probe, .remove = qede_remove, +#ifdef CONFIG_QED_SRIOV + .sriov_configure = qede_sriov_configure, +#endif }; static struct qed_eth_cb_ops qede_ll_ops = { @@ -2334,6 +2356,9 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level, goto err2; } + if (is_vf) + edev->flags |= QEDE_FLAG_IS_VF; + qede_init_ndev(edev); rc = register_netdev(edev->ndev); @@ -2365,12 +2390,24 @@ err0: static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id) { + bool is_vf = false; u32 dp_module = 0; u8 dp_level = 0; + switch ((enum qede_pci_private)id->driver_data) { + case QEDE_PRIVATE_VF: + if (debug & QED_LOG_VERBOSE_MASK) + dev_err(&pdev->dev, "Probing a VF\n"); + is_vf = true; + break; + default: + if (debug & QED_LOG_VERBOSE_MASK) + dev_err(&pdev->dev, "Probing a PF\n"); + } + qede_config_debug(debug, &dp_module, &dp_level); - return __qede_probe(pdev, dp_module, dp_level, false, + return __qede_probe(pdev, dp_module, dp_level, is_vf, QEDE_PROBE_NORMAL); } @@ -3067,6 +3104,7 @@ static int qede_start_queues(struct qede_dev *edev) struct qed_dev *cdev = edev->cdev; struct qed_update_vport_params vport_update_params; struct qed_queue_start_common_params q_params; + struct qed_dev_info *qed_info = &edev->dev_info.common; struct qed_start_vport_params start = {0}; bool reset_rss_indir = false; From 08feecd7fc709077ce92d21a979f522a5f57170a Mon Sep 17 00:00:00 2001 From: Yuval Mintz Date: Wed, 11 May 2016 16:36:20 +0300 Subject: [PATCH 1509/1649] qed*: Support PVID configuration This adds support for PF control over the VF vlan configuration. I.e., `ip link ... vf vlan ' should now be supported. 1. != 0 => VF receives [unknowingly] only traffic tagged by and tags all outgoing traffic sent by VF with . 2. == 0 ==> Remove the pvid configuration, reverting to previous. Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_dev.c | 9 +- drivers/net/ethernet/qlogic/qed/qed_l2.c | 14 +- drivers/net/ethernet/qlogic/qed/qed_l2.h | 6 + drivers/net/ethernet/qlogic/qed/qed_sriov.c | 364 ++++++++++++++++++- drivers/net/ethernet/qlogic/qed/qed_sriov.h | 26 ++ drivers/net/ethernet/qlogic/qed/qed_vf.c | 18 +- drivers/net/ethernet/qlogic/qed/qed_vf.h | 19 +- drivers/net/ethernet/qlogic/qede/qede_main.c | 18 + include/linux/qed/qed_iov_if.h | 1 + 9 files changed, 468 insertions(+), 7 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 9d01a16bfb1a..e75e73a77b27 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -1104,9 +1104,16 @@ static void qed_hw_get_resc(struct qed_hwfn *p_hwfn) u8 num_funcs = p_hwfn->num_funcs_on_engine; u32 *resc_num = p_hwfn->hw_info.resc_num; struct qed_sb_cnt_info sb_cnt_info; - int i; + int i, max_vf_vlan_filters; memset(&sb_cnt_info, 0, sizeof(sb_cnt_info)); + +#ifdef CONFIG_QED_SRIOV + max_vf_vlan_filters = QED_ETH_MAX_VF_NUM_VLAN_FILTERS; +#else + max_vf_vlan_filters = 0; +#endif + qed_int_get_num_sbs(p_hwfn, &sb_cnt_info); resc_num[QED_SB] = min_t(u32, diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index 80f0b853a142..7fb6b82f1a97 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -114,7 +114,8 @@ int qed_sp_vport_start(struct qed_hwfn *p_hwfn, p_params->mtu, p_params->remove_inner_vlan, p_params->tpa_mode, - p_params->max_buffers_per_cqe); + p_params->max_buffers_per_cqe, + p_params->only_untagged); } return qed_sp_eth_vport_start(p_hwfn, p_params); @@ -367,6 +368,16 @@ int qed_sp_vport_update(struct qed_hwfn *p_hwfn, p_cmn->inner_vlan_removal_en = p_params->inner_vlan_removal_flg; val = p_params->update_inner_vlan_removal_flg; p_cmn->update_inner_vlan_removal_en_flg = val; + + p_cmn->default_vlan_en = p_params->default_vlan_enable_flg; + val = p_params->update_default_vlan_enable_flg; + p_cmn->update_default_vlan_en_flg = val; + + p_cmn->default_vlan = cpu_to_le16(p_params->default_vlan); + p_cmn->update_default_vlan_flg = p_params->update_default_vlan_flg; + + p_cmn->silent_vlan_removal_en = p_params->silent_vlan_removal_flg; + p_ramrod->common.tx_switching_en = p_params->tx_switching_flg; p_cmn->update_tx_switching_en_flg = p_params->update_tx_switching_flg; @@ -1702,6 +1713,7 @@ static int qed_start_vport(struct qed_dev *cdev, start.tpa_mode = params->gro_enable ? QED_TPA_MODE_GRO : QED_TPA_MODE_NONE; start.remove_inner_vlan = params->remove_inner_vlan; + start.only_untagged = true; /* untagged only */ start.drop_ttl0 = params->drop_ttl0; start.opaque_fid = p_hwfn->hw_info.opaque_fid; start.concrete_fid = p_hwfn->hw_info.concrete_fid; diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h index f9e677a29751..fad30ae12f63 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.h +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.h @@ -94,6 +94,7 @@ enum qed_tpa_mode { struct qed_sp_vport_start_params { enum qed_tpa_mode tpa_mode; bool remove_inner_vlan; + bool only_untagged; bool drop_ttl0; u8 max_buffers_per_cqe; u32 concrete_fid; @@ -140,6 +141,11 @@ struct qed_sp_vport_update_params { u8 vport_active_tx_flg; u8 update_inner_vlan_removal_flg; u8 inner_vlan_removal_flg; + u8 silent_vlan_removal_flg; + u8 update_default_vlan_enable_flg; + u8 default_vlan_enable_flg; + u8 update_default_vlan_flg; + u16 default_vlan; u8 update_tx_switching_flg; u8 tx_switching_flg; u8 update_approx_mcast_flg; diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index 29a53dd0d9fd..77d44baa5df3 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -1075,6 +1075,7 @@ static void qed_iov_vf_cleanup(struct qed_hwfn *p_hwfn, p_vf->vport_instance = 0; p_vf->num_mac_filters = 0; p_vf->num_vlan_filters = 0; + p_vf->configured_features = 0; /* If VF previously requested less resources, go back to default */ p_vf->num_rxqs = p_vf->num_sbs; @@ -1085,6 +1086,7 @@ static void qed_iov_vf_cleanup(struct qed_hwfn *p_hwfn, for (i = 0; i < QED_MAX_VF_CHAINS_PER_PF; i++) p_vf->vf_queues[i].rxq_active = 0; + memset(&p_vf->shadow_config, 0, sizeof(p_vf->shadow_config)); qed_iov_clean_vf(p_hwfn, p_vf->relative_vf_id); } @@ -1232,6 +1234,149 @@ out: sizeof(struct pfvf_acquire_resp_tlv), vfpf_status); } +static int qed_iov_reconfigure_unicast_vlan(struct qed_hwfn *p_hwfn, + struct qed_vf_info *p_vf) +{ + struct qed_filter_ucast filter; + int rc = 0; + int i; + + memset(&filter, 0, sizeof(filter)); + filter.is_rx_filter = 1; + filter.is_tx_filter = 1; + filter.vport_to_add_to = p_vf->vport_id; + filter.opcode = QED_FILTER_ADD; + + /* Reconfigure vlans */ + for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++) { + if (!p_vf->shadow_config.vlans[i].used) + continue; + + filter.type = QED_FILTER_VLAN; + filter.vlan = p_vf->shadow_config.vlans[i].vid; + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "Reconfiguring VLAN [0x%04x] for VF [%04x]\n", + filter.vlan, p_vf->relative_vf_id); + rc = qed_sp_eth_filter_ucast(p_hwfn, + p_vf->opaque_fid, + &filter, + QED_SPQ_MODE_CB, NULL); + if (rc) { + DP_NOTICE(p_hwfn, + "Failed to configure VLAN [%04x] to VF [%04x]\n", + filter.vlan, p_vf->relative_vf_id); + break; + } + } + + return rc; +} + +static int +qed_iov_reconfigure_unicast_shadow(struct qed_hwfn *p_hwfn, + struct qed_vf_info *p_vf, u64 events) +{ + int rc = 0; + + if ((events & (1 << VLAN_ADDR_FORCED)) && + !(p_vf->configured_features & (1 << VLAN_ADDR_FORCED))) + rc = qed_iov_reconfigure_unicast_vlan(p_hwfn, p_vf); + + return rc; +} + +static int qed_iov_configure_vport_forced(struct qed_hwfn *p_hwfn, + struct qed_vf_info *p_vf, u64 events) +{ + int rc = 0; + struct qed_filter_ucast filter; + + if (!p_vf->vport_instance) + return -EINVAL; + + if (events & (1 << VLAN_ADDR_FORCED)) { + struct qed_sp_vport_update_params vport_update; + u8 removal; + int i; + + memset(&filter, 0, sizeof(filter)); + filter.type = QED_FILTER_VLAN; + filter.is_rx_filter = 1; + filter.is_tx_filter = 1; + filter.vport_to_add_to = p_vf->vport_id; + filter.vlan = p_vf->bulletin.p_virt->pvid; + filter.opcode = filter.vlan ? QED_FILTER_REPLACE : + QED_FILTER_FLUSH; + + /* Send the ramrod */ + rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid, + &filter, QED_SPQ_MODE_CB, NULL); + if (rc) { + DP_NOTICE(p_hwfn, + "PF failed to configure VLAN for VF\n"); + return rc; + } + + /* Update the default-vlan & silent vlan stripping */ + memset(&vport_update, 0, sizeof(vport_update)); + vport_update.opaque_fid = p_vf->opaque_fid; + vport_update.vport_id = p_vf->vport_id; + vport_update.update_default_vlan_enable_flg = 1; + vport_update.default_vlan_enable_flg = filter.vlan ? 1 : 0; + vport_update.update_default_vlan_flg = 1; + vport_update.default_vlan = filter.vlan; + + vport_update.update_inner_vlan_removal_flg = 1; + removal = filter.vlan ? 1 + : p_vf->shadow_config.inner_vlan_removal; + vport_update.inner_vlan_removal_flg = removal; + vport_update.silent_vlan_removal_flg = filter.vlan ? 1 : 0; + rc = qed_sp_vport_update(p_hwfn, + &vport_update, + QED_SPQ_MODE_EBLOCK, NULL); + if (rc) { + DP_NOTICE(p_hwfn, + "PF failed to configure VF vport for vlan\n"); + return rc; + } + + /* Update all the Rx queues */ + for (i = 0; i < QED_MAX_VF_CHAINS_PER_PF; i++) { + u16 qid; + + if (!p_vf->vf_queues[i].rxq_active) + continue; + + qid = p_vf->vf_queues[i].fw_rx_qid; + + rc = qed_sp_eth_rx_queues_update(p_hwfn, qid, + 1, 0, 1, + QED_SPQ_MODE_EBLOCK, + NULL); + if (rc) { + DP_NOTICE(p_hwfn, + "Failed to send Rx update fo queue[0x%04x]\n", + qid); + return rc; + } + } + + if (filter.vlan) + p_vf->configured_features |= 1 << VLAN_ADDR_FORCED; + else + p_vf->configured_features &= ~(1 << VLAN_ADDR_FORCED); + } + + /* If forced features are terminated, we need to configure the shadow + * configuration back again. + */ + if (events) + qed_iov_reconfigure_unicast_shadow(p_hwfn, p_vf, events); + + return rc; +} + static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_vf_info *vf) @@ -1241,6 +1386,7 @@ static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn, struct vfpf_vport_start_tlv *start; u8 status = PFVF_STATUS_SUCCESS; struct qed_vf_info *vf_info; + u64 *p_bitmap; int sb_id; int rc; @@ -1272,10 +1418,24 @@ static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn, qed_iov_enable_vf_traffic(p_hwfn, p_ptt, vf); vf->mtu = start->mtu; + vf->shadow_config.inner_vlan_removal = start->inner_vlan_removal; + + /* Take into consideration configuration forced by hypervisor; + * If none is configured, use the supplied VF values [for old + * vfs that would still be fine, since they passed '0' as padding]. + */ + p_bitmap = &vf_info->bulletin.p_virt->valid_bitmap; + if (!(*p_bitmap & (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED))) { + u8 vf_req = start->only_untagged; + + vf_info->bulletin.p_virt->default_only_untagged = vf_req; + *p_bitmap |= 1 << VFPF_BULLETIN_UNTAGGED_DEFAULT; + } params.tpa_mode = start->tpa_mode; params.remove_inner_vlan = start->inner_vlan_removal; + params.only_untagged = vf_info->bulletin.p_virt->default_only_untagged; params.drop_ttl0 = false; params.concrete_fid = vf->concrete_fid; params.opaque_fid = vf->opaque_fid; @@ -1290,6 +1450,9 @@ static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn, status = PFVF_STATUS_FAILURE; } else { vf->vport_instance++; + + /* Force configuration if needed on the newly opened vport */ + qed_iov_configure_vport_forced(p_hwfn, vf, *p_bitmap); } qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_START, sizeof(struct pfvf_def_resp_tlv), status); @@ -1311,6 +1474,10 @@ static void qed_iov_vf_mbx_stop_vport(struct qed_hwfn *p_hwfn, status = PFVF_STATUS_FAILURE; } + /* Forget the configuration on the vport */ + vf->configured_features = 0; + memset(&vf->shadow_config, 0, sizeof(vf->shadow_config)); + qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_TEARDOWN, sizeof(struct pfvf_def_resp_tlv), status); } @@ -1634,8 +1801,13 @@ qed_iov_vp_update_vlan_param(struct qed_hwfn *p_hwfn, if (!p_vlan_tlv) return; - p_data->update_inner_vlan_removal_flg = 1; - p_data->inner_vlan_removal_flg = p_vlan_tlv->remove_vlan; + p_vf->shadow_config.inner_vlan_removal = p_vlan_tlv->remove_vlan; + + /* Ignore the VF request if we're forcing a vlan */ + if (!(p_vf->configured_features & (1 << VLAN_ADDR_FORCED))) { + p_data->update_inner_vlan_removal_flg = 1; + p_data->inner_vlan_removal_flg = p_vlan_tlv->remove_vlan; + } *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_VLAN_STRIP; } @@ -1886,6 +2058,67 @@ out: qed_iov_send_response(p_hwfn, p_ptt, vf, length, status); } +static int qed_iov_vf_update_unicast_shadow(struct qed_hwfn *p_hwfn, + struct qed_vf_info *p_vf, + struct qed_filter_ucast *p_params) +{ + int i; + + if (p_params->type == QED_FILTER_MAC) + return 0; + + /* First remove entries and then add new ones */ + if (p_params->opcode == QED_FILTER_REMOVE) { + for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++) + if (p_vf->shadow_config.vlans[i].used && + p_vf->shadow_config.vlans[i].vid == + p_params->vlan) { + p_vf->shadow_config.vlans[i].used = false; + break; + } + if (i == QED_ETH_VF_NUM_VLAN_FILTERS + 1) { + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "VF [%d] - Tries to remove a non-existing vlan\n", + p_vf->relative_vf_id); + return -EINVAL; + } + } else if (p_params->opcode == QED_FILTER_REPLACE || + p_params->opcode == QED_FILTER_FLUSH) { + for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++) + p_vf->shadow_config.vlans[i].used = false; + } + + /* In forced mode, we're willing to remove entries - but we don't add + * new ones. + */ + if (p_vf->bulletin.p_virt->valid_bitmap & (1 << VLAN_ADDR_FORCED)) + return 0; + + if (p_params->opcode == QED_FILTER_ADD || + p_params->opcode == QED_FILTER_REPLACE) { + for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++) { + if (p_vf->shadow_config.vlans[i].used) + continue; + + p_vf->shadow_config.vlans[i].used = true; + p_vf->shadow_config.vlans[i].vid = p_params->vlan; + break; + } + + if (i == QED_ETH_VF_NUM_VLAN_FILTERS + 1) { + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "VF [%d] - Tries to configure more than %d vlan filters\n", + p_vf->relative_vf_id, + QED_ETH_VF_NUM_VLAN_FILTERS + 1); + return -EINVAL; + } + } + + return 0; +} + int qed_iov_chk_ucast(struct qed_hwfn *hwfn, int vfid, struct qed_filter_ucast *params) { @@ -1907,6 +2140,7 @@ static void qed_iov_vf_mbx_ucast_filter(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_vf_info *vf) { + struct qed_bulletin_content *p_bulletin = vf->bulletin.p_virt; struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; struct vfpf_ucast_filter_tlv *req; u8 status = PFVF_STATUS_SUCCESS; @@ -1946,6 +2180,25 @@ static void qed_iov_vf_mbx_ucast_filter(struct qed_hwfn *p_hwfn, goto out; } + /* Update shadow copy of the VF configuration */ + if (qed_iov_vf_update_unicast_shadow(p_hwfn, vf, ¶ms)) { + status = PFVF_STATUS_FAILURE; + goto out; + } + + /* Determine if the unicast filtering is acceptible by PF */ + if ((p_bulletin->valid_bitmap & (1 << VLAN_ADDR_FORCED)) && + (params.type == QED_FILTER_VLAN || + params.type == QED_FILTER_MAC_VLAN)) { + /* Once VLAN is forced or PVID is set, do not allow + * to add/replace any further VLANs. + */ + if (params.opcode == QED_FILTER_ADD || + params.opcode == QED_FILTER_REPLACE) + status = PFVF_STATUS_FORCED; + goto out; + } + rc = qed_iov_chk_ucast(p_hwfn, vf->relative_vf_id, ¶ms); if (rc) { status = PFVF_STATUS_FAILURE; @@ -2449,6 +2702,29 @@ static int qed_iov_copy_vf_msg(struct qed_hwfn *p_hwfn, struct qed_ptt *ptt, return 0; } +void qed_iov_bulletin_set_forced_vlan(struct qed_hwfn *p_hwfn, + u16 pvid, int vfid) +{ + struct qed_vf_info *vf_info; + u64 feature; + + vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); + if (!vf_info) { + DP_NOTICE(p_hwfn->cdev, + "Can not set forced MAC, invalid vfid [%d]\n", vfid); + return; + } + + feature = 1 << VLAN_ADDR_FORCED; + vf_info->bulletin.p_virt->pvid = pvid; + if (pvid) + vf_info->bulletin.p_virt->valid_bitmap |= feature; + else + vf_info->bulletin.p_virt->valid_bitmap &= ~feature; + + qed_iov_configure_vport_forced(p_hwfn, vf_info, feature); +} + bool qed_iov_is_vf_stopped(struct qed_hwfn *p_hwfn, int vfid) { struct qed_vf_info *p_vf_info; @@ -2460,6 +2736,20 @@ bool qed_iov_is_vf_stopped(struct qed_hwfn *p_hwfn, int vfid) return p_vf_info->state == VF_STOPPED; } +u16 qed_iov_bulletin_get_forced_vlan(struct qed_hwfn *p_hwfn, u16 rel_vf_id) +{ + struct qed_vf_info *p_vf; + + p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true); + if (!p_vf || !p_vf->bulletin.p_virt) + return 0; + + if (!(p_vf->bulletin.p_virt->valid_bitmap & (1 << VLAN_ADDR_FORCED))) + return 0; + + return p_vf->bulletin.p_virt->pvid; +} + /** * qed_schedule_iov - schedules IOV task for VF and PF * @hwfn: hardware function pointer @@ -2609,6 +2899,38 @@ static int qed_sriov_configure(struct qed_dev *cdev, int num_vfs_param) return qed_sriov_disable(cdev, true); } +static int qed_sriov_pf_set_vlan(struct qed_dev *cdev, u16 vid, int vfid) +{ + int i; + + if (!IS_QED_SRIOV(cdev) || !IS_PF_SRIOV_ALLOC(&cdev->hwfns[0])) { + DP_VERBOSE(cdev, QED_MSG_IOV, + "Cannot set a VF MAC; Sriov is not enabled\n"); + return -EINVAL; + } + + if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vfid, true)) { + DP_VERBOSE(cdev, QED_MSG_IOV, + "Cannot set VF[%d] MAC (VF is not active)\n", vfid); + return -EINVAL; + } + + for_each_hwfn(cdev, i) { + struct qed_hwfn *hwfn = &cdev->hwfns[i]; + struct qed_public_vf_info *vf_info; + + vf_info = qed_iov_get_public_vf_info(hwfn, vfid, true); + if (!vf_info) + continue; + + /* Set the forced vlan, and schedule the IOV task */ + vf_info->forced_vlan = vid; + qed_schedule_iov(hwfn, QED_IOV_WQ_SET_UNICAST_FILTER_FLAG); + } + + return 0; +} + void qed_inform_vf_link_state(struct qed_hwfn *hwfn) { struct qed_mcp_link_capabilities caps; @@ -2671,6 +2993,38 @@ static void qed_handle_vf_msg(struct qed_hwfn *hwfn) qed_ptt_release(hwfn, ptt); } +static void qed_handle_pf_set_vf_unicast(struct qed_hwfn *hwfn) +{ + int i; + + qed_for_each_vf(hwfn, i) { + struct qed_public_vf_info *info; + bool update = false; + + info = qed_iov_get_public_vf_info(hwfn, i, true); + if (!info) + continue; + + /* Update data on bulletin board */ + + if (qed_iov_bulletin_get_forced_vlan(hwfn, i) ^ + info->forced_vlan) { + DP_VERBOSE(hwfn, + QED_MSG_IOV, + "Handling PF setting of pvid [0x%04x] to VF 0x%02x [Abs 0x%02x]\n", + info->forced_vlan, + i, + hwfn->cdev->p_iov_info->first_vf_in_pf + i); + qed_iov_bulletin_set_forced_vlan(hwfn, + info->forced_vlan, i); + update = true; + } + + if (update) + qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG); + } +} + static void qed_handle_bulletin_post(struct qed_hwfn *hwfn) { struct qed_ptt *ptt; @@ -2715,6 +3069,11 @@ void qed_iov_pf_task(struct work_struct *work) if (test_and_clear_bit(QED_IOV_WQ_MSG_FLAG, &hwfn->iov_task_flags)) qed_handle_vf_msg(hwfn); + + if (test_and_clear_bit(QED_IOV_WQ_SET_UNICAST_FILTER_FLAG, + &hwfn->iov_task_flags)) + qed_handle_pf_set_vf_unicast(hwfn); + if (test_and_clear_bit(QED_IOV_WQ_BULLETIN_UPDATE_FLAG, &hwfn->iov_task_flags)) qed_handle_bulletin_post(hwfn); @@ -2774,4 +3133,5 @@ int qed_iov_wq_start(struct qed_dev *cdev) const struct qed_iov_hv_ops qed_iov_ops_pass = { .configure = &qed_sriov_configure, + .set_vlan = &qed_sriov_pf_set_vlan, }; diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.h b/drivers/net/ethernet/qlogic/qed/qed_sriov.h index 2c94b445d07f..e65f403349c2 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.h @@ -24,6 +24,9 @@ #define QED_MAX_VF_CHAINS_PER_PF 16 #define QED_ETH_VF_NUM_VLAN_FILTERS 2 +#define QED_ETH_MAX_VF_NUM_VLAN_FILTERS \ + (MAX_NUM_VFS * QED_ETH_VF_NUM_VLAN_FILTERS) + enum qed_iov_vport_update_flag { QED_IOV_VP_UPDATE_ACTIVATE, QED_IOV_VP_UPDATE_VLAN_STRIP, @@ -40,6 +43,7 @@ struct qed_public_vf_info { /* These copies will later be reflected in the bulletin board, * but this copy should be newer. */ + u16 forced_vlan; u8 mac[ETH_ALEN]; }; @@ -98,6 +102,18 @@ enum vf_state { VF_STOPPED /* VF, Stopped */ }; +struct qed_vf_vlan_shadow { + bool used; + u16 vid; +}; + +struct qed_vf_shadow_config { + /* Shadow copy of all guest vlans */ + struct qed_vf_vlan_shadow vlans[QED_ETH_VF_NUM_VLAN_FILTERS + 1]; + + u8 inner_vlan_removal; +}; + /* PFs maintain an array of this structure, per VF */ struct qed_vf_info { struct qed_iov_vf_mbx vf_mbx; @@ -131,6 +147,16 @@ struct qed_vf_info { u16 igu_sbs[QED_MAX_VF_CHAINS_PER_PF]; u8 num_active_rxqs; struct qed_public_vf_info p_vf_info; + + /* Stores the configuration requested by VF */ + struct qed_vf_shadow_config shadow_config; + + /* A bitfield using bulletin's valid-map bits, used to indicate + * which of the bulletin board features have been configured. + */ + u64 configured_features; +#define QED_IOV_CONFIGURED_FEATURES_MASK ((1 << MAC_ADDR_FORCED) | \ + (1 << VLAN_ADDR_FORCED)) }; /* This structure is part of qed_hwfn and used only for PFs that have sriov diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c index e788954568d4..3c8911de3ed4 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.c +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c @@ -474,7 +474,7 @@ int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn, u16 mtu, u8 inner_vlan_removal, enum qed_tpa_mode tpa_mode, - u8 max_buffers_per_cqe) + u8 max_buffers_per_cqe, u8 only_untagged) { struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; struct vfpf_vport_start_tlv *req; @@ -489,6 +489,7 @@ int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn, req->inner_vlan_removal = inner_vlan_removal; req->tpa_mode = tpa_mode; req->max_buffers_per_cqe = max_buffers_per_cqe; + req->only_untagged = only_untagged; /* status blocks */ for (i = 0; i < p_hwfn->vf_iov_info->acquire_resp.resc.num_sbs; i++) @@ -547,6 +548,8 @@ qed_vf_handle_vp_update_is_needed(struct qed_hwfn *p_hwfn, return !!p_data->update_tx_switching_flg; case CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP: return !!p_data->update_inner_vlan_removal_flg; + case CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN: + return !!p_data->update_accept_any_vlan_flg; case CHANNEL_TLV_VPORT_UPDATE_MCAST: return !!p_data->update_approx_mcast_flg; case CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM: @@ -696,6 +699,19 @@ int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn, sizeof(rss_params->rss_key)); } + if (p_params->update_accept_any_vlan_flg) { + struct vfpf_vport_update_accept_any_vlan_tlv *p_any_vlan_tlv; + + size = sizeof(struct vfpf_vport_update_accept_any_vlan_tlv); + tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN; + p_any_vlan_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, tlv, size); + + resp_size += sizeof(struct pfvf_def_resp_tlv); + p_any_vlan_tlv->accept_any_vlan = p_params->accept_any_vlan; + p_any_vlan_tlv->update_accept_any_vlan_flg = + p_params->update_accept_any_vlan_flg; + } + /* add list termination tlv */ qed_add_tlv(p_hwfn, &p_iov->offset, CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.h b/drivers/net/ethernet/qlogic/qed/qed_vf.h index d9a8aa684ad7..35eced3691ba 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.h +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.h @@ -417,6 +417,16 @@ union pfvf_tlvs { struct pfvf_start_queue_resp_tlv queue_start; }; +enum qed_bulletin_bit { + /* Alert the VF that a forced VLAN was set by the PF */ + VLAN_ADDR_FORCED = 2, + + /* Indicate that `default_only_untagged' contains actual data */ + VFPF_BULLETIN_UNTAGGED_DEFAULT = 3, + VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED = 4, + +}; + struct qed_bulletin_content { /* crc of structure to ensure is not in mid-update */ u32 crc; @@ -465,6 +475,10 @@ struct qed_bulletin_content { u32 partner_adv_speed; u32 capability_speed; + + /* Forced vlan */ + u16 pvid; + u16 padding5; }; struct qed_bulletin { @@ -737,7 +751,7 @@ int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn, u16 mtu, u8 inner_vlan_removal, enum qed_tpa_mode tpa_mode, - u8 max_buffers_per_cqe); + u8 max_buffers_per_cqe, u8 only_untagged); /** * @brief qed_vf_pf_vport_stop - stop the VF's vport @@ -898,7 +912,8 @@ static inline int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn, u16 mtu, u8 inner_vlan_removal, enum qed_tpa_mode tpa_mode, - u8 max_buffers_per_cqe) + u8 max_buffers_per_cqe, + u8 only_untagged) { return -EINVAL; } diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index bf54cfcd75c0..4d59d7e00e42 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -103,6 +103,21 @@ static int qede_alloc_rx_buffer(struct qede_dev *edev, static void qede_link_update(void *dev, struct qed_link_output *link); #ifdef CONFIG_QED_SRIOV +static int qede_set_vf_vlan(struct net_device *ndev, int vf, u16 vlan, u8 qos) +{ + struct qede_dev *edev = netdev_priv(ndev); + + if (vlan > 4095) { + DP_NOTICE(edev, "Illegal vlan value %d\n", vlan); + return -EINVAL; + } + + DP_VERBOSE(edev, QED_MSG_IOV, "Setting Vlan 0x%04x to VF [%d]\n", + vlan, vf); + + return edev->ops->iov->set_vlan(edev->cdev, vlan, vf); +} + static int qede_sriov_configure(struct pci_dev *pdev, int num_vfs_param) { struct qede_dev *edev = netdev_priv(pci_get_drvdata(pdev)); @@ -2071,6 +2086,9 @@ static const struct net_device_ops qede_netdev_ops = { .ndo_set_mac_address = qede_set_mac_addr, .ndo_validate_addr = eth_validate_addr, .ndo_change_mtu = qede_change_mtu, +#ifdef CONFIG_QED_SRIOV + .ndo_set_vf_vlan = qede_set_vf_vlan, +#endif .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid, .ndo_get_stats64 = qede_get_stats64, diff --git a/include/linux/qed/qed_iov_if.h b/include/linux/qed/qed_iov_if.h index c53bfa6374c5..825c007d50f1 100644 --- a/include/linux/qed/qed_iov_if.h +++ b/include/linux/qed/qed_iov_if.h @@ -15,6 +15,7 @@ struct qed_iov_hv_ops { int (*configure)(struct qed_dev *cdev, int num_vfs_param); + int (*set_vlan) (struct qed_dev *cdev, u16 vid, int vfid); }; #endif From eff169608c250193e72089dc4ab15cb79e0bd68c Mon Sep 17 00:00:00 2001 From: Yuval Mintz Date: Wed, 11 May 2016 16:36:21 +0300 Subject: [PATCH 1510/1649] qed*: Support forced MAC Allows the PF to enforce the VF's mac. i.e., by using `ip link ... vf mac '. While a MAC is forced, PF would prevent the VF from configuring any other MAC. Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_l2.c | 9 ++ drivers/net/ethernet/qlogic/qed/qed_sriov.c | 120 +++++++++++++++++++ drivers/net/ethernet/qlogic/qed/qed_sriov.h | 1 + drivers/net/ethernet/qlogic/qed/qed_vf.c | 47 ++++++++ drivers/net/ethernet/qlogic/qed/qed_vf.h | 21 ++++ drivers/net/ethernet/qlogic/qede/qede_main.c | 31 +++++ include/linux/qed/qed_eth_if.h | 3 + include/linux/qed/qed_iov_if.h | 2 + 8 files changed, 234 insertions(+) diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index 7fb6b82f1a97..8d83250aa5ba 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -1701,6 +1701,14 @@ static void qed_register_eth_ops(struct qed_dev *cdev, qed_vf_start_iov_wq(cdev); } +static bool qed_check_mac(struct qed_dev *cdev, u8 *mac) +{ + if (IS_PF(cdev)) + return true; + + return qed_vf_check_mac(&cdev->hwfns[0], mac); +} + static int qed_start_vport(struct qed_dev *cdev, struct qed_start_vport_params *params) { @@ -2149,6 +2157,7 @@ static const struct qed_eth_ops qed_eth_ops_pass = { #endif .fill_dev_info = &qed_fill_eth_dev_info, .register_ops = &qed_register_eth_ops, + .check_mac = &qed_check_mac, .vport_start = &qed_start_vport, .vport_stop = &qed_stop_vport, .vport_update = &qed_update_vport, diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index 77d44baa5df3..c1b79190ce4d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -1295,6 +1295,29 @@ static int qed_iov_configure_vport_forced(struct qed_hwfn *p_hwfn, if (!p_vf->vport_instance) return -EINVAL; + if (events & (1 << MAC_ADDR_FORCED)) { + /* Since there's no way [currently] of removing the MAC, + * we can always assume this means we need to force it. + */ + memset(&filter, 0, sizeof(filter)); + filter.type = QED_FILTER_MAC; + filter.opcode = QED_FILTER_REPLACE; + filter.is_rx_filter = 1; + filter.is_tx_filter = 1; + filter.vport_to_add_to = p_vf->vport_id; + ether_addr_copy(filter.mac, p_vf->bulletin.p_virt->mac); + + rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid, + &filter, QED_SPQ_MODE_CB, NULL); + if (rc) { + DP_NOTICE(p_hwfn, + "PF failed to configure MAC for VF\n"); + return rc; + } + + p_vf->configured_features |= 1 << MAC_ADDR_FORCED; + } + if (events & (1 << VLAN_ADDR_FORCED)) { struct qed_sp_vport_update_params vport_update; u8 removal; @@ -2199,6 +2222,16 @@ static void qed_iov_vf_mbx_ucast_filter(struct qed_hwfn *p_hwfn, goto out; } + if ((p_bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)) && + (params.type == QED_FILTER_MAC || + params.type == QED_FILTER_MAC_VLAN)) { + if (!ether_addr_equal(p_bulletin->mac, params.mac) || + (params.opcode != QED_FILTER_ADD && + params.opcode != QED_FILTER_REPLACE)) + status = PFVF_STATUS_FORCED; + goto out; + } + rc = qed_iov_chk_ucast(p_hwfn, vf->relative_vf_id, ¶ms); if (rc) { status = PFVF_STATUS_FAILURE; @@ -2702,6 +2735,30 @@ static int qed_iov_copy_vf_msg(struct qed_hwfn *p_hwfn, struct qed_ptt *ptt, return 0; } +static void qed_iov_bulletin_set_forced_mac(struct qed_hwfn *p_hwfn, + u8 *mac, int vfid) +{ + struct qed_vf_info *vf_info; + u64 feature; + + vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true); + if (!vf_info) { + DP_NOTICE(p_hwfn->cdev, + "Can not set forced MAC, invalid vfid [%d]\n", vfid); + return; + } + + feature = 1 << MAC_ADDR_FORCED; + memcpy(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN); + + vf_info->bulletin.p_virt->valid_bitmap |= feature; + /* Forced MAC will disable MAC_ADDR */ + vf_info->bulletin.p_virt->valid_bitmap &= + ~(1 << VFPF_BULLETIN_MAC_ADDR); + + qed_iov_configure_vport_forced(p_hwfn, vf_info, feature); +} + void qed_iov_bulletin_set_forced_vlan(struct qed_hwfn *p_hwfn, u16 pvid, int vfid) { @@ -2736,6 +2793,21 @@ bool qed_iov_is_vf_stopped(struct qed_hwfn *p_hwfn, int vfid) return p_vf_info->state == VF_STOPPED; } +static u8 *qed_iov_bulletin_get_forced_mac(struct qed_hwfn *p_hwfn, + u16 rel_vf_id) +{ + struct qed_vf_info *p_vf; + + p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true); + if (!p_vf || !p_vf->bulletin.p_virt) + return NULL; + + if (!(p_vf->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED))) + return NULL; + + return p_vf->bulletin.p_virt->mac; +} + u16 qed_iov_bulletin_get_forced_vlan(struct qed_hwfn *p_hwfn, u16 rel_vf_id) { struct qed_vf_info *p_vf; @@ -2899,6 +2971,38 @@ static int qed_sriov_configure(struct qed_dev *cdev, int num_vfs_param) return qed_sriov_disable(cdev, true); } +static int qed_sriov_pf_set_mac(struct qed_dev *cdev, u8 *mac, int vfid) +{ + int i; + + if (!IS_QED_SRIOV(cdev) || !IS_PF_SRIOV_ALLOC(&cdev->hwfns[0])) { + DP_VERBOSE(cdev, QED_MSG_IOV, + "Cannot set a VF MAC; Sriov is not enabled\n"); + return -EINVAL; + } + + if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vfid, true)) { + DP_VERBOSE(cdev, QED_MSG_IOV, + "Cannot set VF[%d] MAC (VF is not active)\n", vfid); + return -EINVAL; + } + + for_each_hwfn(cdev, i) { + struct qed_hwfn *hwfn = &cdev->hwfns[i]; + struct qed_public_vf_info *vf_info; + + vf_info = qed_iov_get_public_vf_info(hwfn, vfid, true); + if (!vf_info) + continue; + + /* Set the forced MAC, and schedule the IOV task */ + ether_addr_copy(vf_info->forced_mac, mac); + qed_schedule_iov(hwfn, QED_IOV_WQ_SET_UNICAST_FILTER_FLAG); + } + + return 0; +} + static int qed_sriov_pf_set_vlan(struct qed_dev *cdev, u16 vid, int vfid) { int i; @@ -3000,12 +3104,27 @@ static void qed_handle_pf_set_vf_unicast(struct qed_hwfn *hwfn) qed_for_each_vf(hwfn, i) { struct qed_public_vf_info *info; bool update = false; + u8 *mac; info = qed_iov_get_public_vf_info(hwfn, i, true); if (!info) continue; /* Update data on bulletin board */ + mac = qed_iov_bulletin_get_forced_mac(hwfn, i); + if (is_valid_ether_addr(info->forced_mac) && + (!mac || !ether_addr_equal(mac, info->forced_mac))) { + DP_VERBOSE(hwfn, + QED_MSG_IOV, + "Handling PF setting of VF MAC to VF 0x%02x [Abs 0x%02x]\n", + i, + hwfn->cdev->p_iov_info->first_vf_in_pf + i); + + /* Update bulletin board with forced MAC */ + qed_iov_bulletin_set_forced_mac(hwfn, + info->forced_mac, i); + update = true; + } if (qed_iov_bulletin_get_forced_vlan(hwfn, i) ^ info->forced_vlan) { @@ -3133,5 +3252,6 @@ int qed_iov_wq_start(struct qed_dev *cdev) const struct qed_iov_hv_ops qed_iov_ops_pass = { .configure = &qed_sriov_configure, + .set_mac = &qed_sriov_pf_set_mac, .set_vlan = &qed_sriov_pf_set_vlan, }; diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.h b/drivers/net/ethernet/qlogic/qed/qed_sriov.h index e65f403349c2..e38ea985abe1 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.h @@ -43,6 +43,7 @@ struct qed_public_vf_info { /* These copies will later be reflected in the bulletin board, * but this copy should be newer. */ + u8 forced_mac[ETH_ALEN]; u16 forced_vlan; u8 mac[ETH_ALEN]; }; diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c index 3c8911de3ed4..db14e230c9a4 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.c +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c @@ -7,6 +7,7 @@ */ #include +#include #include "qed.h" #include "qed_sriov.h" #include "qed_vf.h" @@ -1004,6 +1005,43 @@ void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn, u8 *num_vlan_filters) *num_vlan_filters = p_vf->acquire_resp.resc.num_vlan_filters; } +bool qed_vf_check_mac(struct qed_hwfn *p_hwfn, u8 *mac) +{ + struct qed_bulletin_content *bulletin; + + bulletin = &p_hwfn->vf_iov_info->bulletin_shadow; + if (!(bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED))) + return true; + + /* Forbid VF from changing a MAC enforced by PF */ + if (ether_addr_equal(bulletin->mac, mac)) + return false; + + return false; +} + +bool qed_vf_bulletin_get_forced_mac(struct qed_hwfn *hwfn, + u8 *dst_mac, u8 *p_is_forced) +{ + struct qed_bulletin_content *bulletin; + + bulletin = &hwfn->vf_iov_info->bulletin_shadow; + + if (bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)) { + if (p_is_forced) + *p_is_forced = 1; + } else if (bulletin->valid_bitmap & (1 << VFPF_BULLETIN_MAC_ADDR)) { + if (p_is_forced) + *p_is_forced = 0; + } else { + return false; + } + + ether_addr_copy(dst_mac, bulletin->mac); + + return true; +} + void qed_vf_get_fw_version(struct qed_hwfn *p_hwfn, u16 *fw_major, u16 *fw_minor, u16 *fw_rev, u16 *fw_eng) @@ -1020,6 +1058,15 @@ void qed_vf_get_fw_version(struct qed_hwfn *p_hwfn, static void qed_handle_bulletin_change(struct qed_hwfn *hwfn) { + struct qed_eth_cb_ops *ops = hwfn->cdev->protocol_ops.eth; + u8 mac[ETH_ALEN], is_mac_exist, is_mac_forced; + void *cookie = hwfn->cdev->ops_cookie; + + is_mac_exist = qed_vf_bulletin_get_forced_mac(hwfn, mac, + &is_mac_forced); + if (is_mac_exist && is_mac_forced && cookie) + ops->force_mac(cookie, mac); + /* Always update link configuration according to bulletin */ qed_link_update(hwfn); } diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.h b/drivers/net/ethernet/qlogic/qed/qed_vf.h index 35eced3691ba..b82fda964bbd 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.h +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.h @@ -418,6 +418,8 @@ union pfvf_tlvs { }; enum qed_bulletin_bit { + /* Alert the VF that a forced MAC was set by the PF */ + MAC_ADDR_FORCED = 0, /* Alert the VF that a forced VLAN was set by the PF */ VLAN_ADDR_FORCED = 2, @@ -425,6 +427,10 @@ enum qed_bulletin_bit { VFPF_BULLETIN_UNTAGGED_DEFAULT = 3, VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED = 4, + /* Alert the VF that suggested mac was sent by the PF. + * MAC_ADDR will be disabled in case MAC_ADDR_FORCED is set. + */ + VFPF_BULLETIN_MAC_ADDR = 5 }; struct qed_bulletin_content { @@ -601,6 +607,16 @@ void qed_vf_get_port_mac(struct qed_hwfn *p_hwfn, u8 *port_mac); void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn, u8 *num_vlan_filters); +/** + * @brief Check if VF can set a MAC address + * + * @param p_hwfn + * @param mac + * + * @return bool + */ +bool qed_vf_check_mac(struct qed_hwfn *p_hwfn, u8 *mac); + /** * @brief Set firmware version information in dev_info from VFs acquire response tlv * @@ -841,6 +857,11 @@ static inline void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn, { } +static inline bool qed_vf_check_mac(struct qed_hwfn *p_hwfn, u8 *mac) +{ + return false; +} + static inline void qed_vf_get_fw_version(struct qed_hwfn *p_hwfn, u16 *fw_major, u16 *fw_minor, u16 *fw_rev, u16 *fw_eng) diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 4d59d7e00e42..b326b15d5196 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -118,6 +118,22 @@ static int qede_set_vf_vlan(struct net_device *ndev, int vf, u16 vlan, u8 qos) return edev->ops->iov->set_vlan(edev->cdev, vlan, vf); } +static int qede_set_vf_mac(struct net_device *ndev, int vfidx, u8 *mac) +{ + struct qede_dev *edev = netdev_priv(ndev); + + DP_VERBOSE(edev, QED_MSG_IOV, + "Setting MAC %02x:%02x:%02x:%02x:%02x:%02x to VF [%d]\n", + mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], vfidx); + + if (!is_valid_ether_addr(mac)) { + DP_VERBOSE(edev, QED_MSG_IOV, "MAC address isn't valid\n"); + return -EINVAL; + } + + return edev->ops->iov->set_mac(edev->cdev, mac, vfidx); +} + static int qede_sriov_configure(struct pci_dev *pdev, int num_vfs_param) { struct qede_dev *edev = netdev_priv(pci_get_drvdata(pdev)); @@ -138,10 +154,19 @@ static struct pci_driver qede_pci_driver = { #endif }; +static void qede_force_mac(void *dev, u8 *mac) +{ + struct qede_dev *edev = dev; + + ether_addr_copy(edev->ndev->dev_addr, mac); + ether_addr_copy(edev->primary_mac, mac); +} + static struct qed_eth_cb_ops qede_ll_ops = { { .link_update = qede_link_update, }, + .force_mac = qede_force_mac, }; static int qede_netdev_event(struct notifier_block *this, unsigned long event, @@ -2087,6 +2112,7 @@ static const struct net_device_ops qede_netdev_ops = { .ndo_validate_addr = eth_validate_addr, .ndo_change_mtu = qede_change_mtu, #ifdef CONFIG_QED_SRIOV + .ndo_set_vf_mac = qede_set_vf_mac, .ndo_set_vf_vlan = qede_set_vf_vlan, #endif .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid, @@ -3512,6 +3538,11 @@ static int qede_set_mac_addr(struct net_device *ndev, void *p) return -EFAULT; } + if (!edev->ops->check_mac(edev->cdev, addr->sa_data)) { + DP_NOTICE(edev, "qed prevents setting MAC\n"); + return -EINVAL; + } + ether_addr_copy(ndev->dev_addr, addr->sa_data); if (!netif_running(ndev)) { diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h index acfafca43aa5..e0f6e6482031 100644 --- a/include/linux/qed/qed_eth_if.h +++ b/include/linux/qed/qed_eth_if.h @@ -122,6 +122,7 @@ struct qed_tunn_params { struct qed_eth_cb_ops { struct qed_common_cb_ops common; + void (*force_mac) (void *dev, u8 *mac); }; struct qed_eth_ops { @@ -137,6 +138,8 @@ struct qed_eth_ops { struct qed_eth_cb_ops *ops, void *cookie); + bool(*check_mac) (struct qed_dev *cdev, u8 *mac); + int (*vport_start)(struct qed_dev *cdev, struct qed_start_vport_params *params); diff --git a/include/linux/qed/qed_iov_if.h b/include/linux/qed/qed_iov_if.h index 825c007d50f1..7a67fbf4336a 100644 --- a/include/linux/qed/qed_iov_if.h +++ b/include/linux/qed/qed_iov_if.h @@ -15,6 +15,8 @@ struct qed_iov_hv_ops { int (*configure)(struct qed_dev *cdev, int num_vfs_param); + int (*set_mac) (struct qed_dev *cdev, u8 *mac, int vfid); + int (*set_vlan) (struct qed_dev *cdev, u16 vid, int vfid); }; From 733def6a04bf3d2810dd675e1240f8df94d633c3 Mon Sep 17 00:00:00 2001 From: Yuval Mintz Date: Wed, 11 May 2016 16:36:22 +0300 Subject: [PATCH 1511/1649] qed*: IOV link control This adds support in 2 ndo that allow PF to tweak the VF's view of the link - `ndo_set_vf_link_state' to allow it a view independent of the PF's, and `ndo_set_vf_rate' which would allow the PF to limit the VF speed. Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed.h | 2 + drivers/net/ethernet/qlogic/qed/qed_dev.c | 76 +++++++++ drivers/net/ethernet/qlogic/qed/qed_sriov.c | 164 +++++++++++++++++++ drivers/net/ethernet/qlogic/qed/qed_sriov.h | 6 + drivers/net/ethernet/qlogic/qede/qede_main.c | 26 +++ include/linux/qed/qed_iov_if.h | 6 + 6 files changed, 280 insertions(+) diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index d7da64556e4b..77323fc70927 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h @@ -554,8 +554,10 @@ static inline u8 qed_concrete_to_sw_fid(struct qed_dev *cdev, #define PURE_LB_TC 8 +int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate); void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, u32 min_pf_rate); +void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); #define QED_LEADING_HWFN(dev) (&dev->hwfns[0]) /* Other Linux specific common definitions */ diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index e75e73a77b27..acaa2866dae3 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -1889,6 +1889,32 @@ static int qed_init_wfq_param(struct qed_hwfn *p_hwfn, return 0; } +static int __qed_configure_vport_wfq(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u16 vp_id, u32 rate) +{ + struct qed_mcp_link_state *p_link; + int rc = 0; + + p_link = &p_hwfn->cdev->hwfns[0].mcp_info->link_output; + + if (!p_link->min_pf_rate) { + p_hwfn->qm_info.wfq_data[vp_id].min_speed = rate; + p_hwfn->qm_info.wfq_data[vp_id].configured = true; + return rc; + } + + rc = qed_init_wfq_param(p_hwfn, vp_id, rate, p_link->min_pf_rate); + + if (rc == 0) + qed_configure_wfq_for_all_vports(p_hwfn, p_ptt, + p_link->min_pf_rate); + else + DP_NOTICE(p_hwfn, + "Validation failed while configuring min rate\n"); + + return rc; +} + static int __qed_configure_vp_wfq_on_link_change(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 min_pf_rate) @@ -1923,6 +1949,42 @@ static int __qed_configure_vp_wfq_on_link_change(struct qed_hwfn *p_hwfn, return rc; } +/* Main API for qed clients to configure vport min rate. + * vp_id - vport id in PF Range[0 - (total_num_vports_per_pf - 1)] + * rate - Speed in Mbps needs to be assigned to a given vport. + */ +int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate) +{ + int i, rc = -EINVAL; + + /* Currently not supported; Might change in future */ + if (cdev->num_hwfns > 1) { + DP_NOTICE(cdev, + "WFQ configuration is not supported for this device\n"); + return rc; + } + + for_each_hwfn(cdev, i) { + struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + struct qed_ptt *p_ptt; + + p_ptt = qed_ptt_acquire(p_hwfn); + if (!p_ptt) + return -EBUSY; + + rc = __qed_configure_vport_wfq(p_hwfn, p_ptt, vp_id, rate); + + if (!rc) { + qed_ptt_release(p_hwfn, p_ptt); + return rc; + } + + qed_ptt_release(p_hwfn, p_ptt); + } + + return rc; +} + /* API to configure WFQ from mcp link change */ void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, u32 min_pf_rate) { @@ -2069,3 +2131,17 @@ int qed_configure_pf_min_bandwidth(struct qed_dev *cdev, u8 min_bw) return rc; } + +void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + struct qed_mcp_link_state *p_link; + + p_link = &p_hwfn->mcp_info->link_output; + + if (p_link->min_pf_rate) + qed_disable_wfq_for_all_vports(p_hwfn, p_ptt, + p_link->min_pf_rate); + + memset(p_hwfn->qm_info.wfq_data, 0, + sizeof(*p_hwfn->qm_info.wfq_data) * p_hwfn->qm_info.num_vports); +} diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index c1b79190ce4d..c9a3bb63cc87 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -2822,6 +2822,46 @@ u16 qed_iov_bulletin_get_forced_vlan(struct qed_hwfn *p_hwfn, u16 rel_vf_id) return p_vf->bulletin.p_virt->pvid; } +static int qed_iov_configure_tx_rate(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, int vfid, int val) +{ + struct qed_vf_info *vf; + u8 abs_vp_id = 0; + int rc; + + vf = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true); + if (!vf) + return -EINVAL; + + rc = qed_fw_vport(p_hwfn, vf->vport_id, &abs_vp_id); + if (rc) + return rc; + + return qed_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val); +} + +int qed_iov_configure_min_tx_rate(struct qed_dev *cdev, int vfid, u32 rate) +{ + struct qed_vf_info *vf; + u8 vport_id; + int i; + + for_each_hwfn(cdev, i) { + struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + + if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) { + DP_NOTICE(p_hwfn, + "SR-IOV sanity check failed, can't set min rate\n"); + return -EINVAL; + } + } + + vf = qed_iov_get_vf_info(QED_LEADING_HWFN(cdev), (u16)vfid, true); + vport_id = vf->vport_id; + + return qed_configure_vport_wfq(cdev, vport_id, rate); +} + /** * qed_schedule_iov - schedules IOV task for VF and PF * @hwfn: hardware function pointer @@ -2871,6 +2911,9 @@ int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled) return -EBUSY; } + /* Clean WFQ db and configure equal weight for all vports */ + qed_clean_wfq_db(hwfn, ptt); + qed_for_each_vf(hwfn, j) { int k; @@ -3047,17 +3090,136 @@ void qed_inform_vf_link_state(struct qed_hwfn *hwfn) /* Update bulletin of all future possible VFs with link configuration */ for (i = 0; i < hwfn->cdev->p_iov_info->total_vfs; i++) { + struct qed_public_vf_info *vf_info; + + vf_info = qed_iov_get_public_vf_info(hwfn, i, false); + if (!vf_info) + continue; + memcpy(¶ms, qed_mcp_get_link_params(hwfn), sizeof(params)); memcpy(&link, qed_mcp_get_link_state(hwfn), sizeof(link)); memcpy(&caps, qed_mcp_get_link_capabilities(hwfn), sizeof(caps)); + /* Modify link according to the VF's configured link state */ + switch (vf_info->link_state) { + case IFLA_VF_LINK_STATE_DISABLE: + link.link_up = false; + break; + case IFLA_VF_LINK_STATE_ENABLE: + link.link_up = true; + /* Set speed according to maximum supported by HW. + * that is 40G for regular devices and 100G for CMT + * mode devices. + */ + link.speed = (hwfn->cdev->num_hwfns > 1) ? + 100000 : 40000; + default: + /* In auto mode pass PF link image to VF */ + break; + } + + if (link.link_up && vf_info->tx_rate) { + struct qed_ptt *ptt; + int rate; + + rate = min_t(int, vf_info->tx_rate, link.speed); + + ptt = qed_ptt_acquire(hwfn); + if (!ptt) { + DP_NOTICE(hwfn, "Failed to acquire PTT\n"); + return; + } + + if (!qed_iov_configure_tx_rate(hwfn, ptt, i, rate)) { + vf_info->tx_rate = rate; + link.speed = rate; + } + + qed_ptt_release(hwfn, ptt); + } + qed_iov_set_link(hwfn, i, ¶ms, &link, &caps); } qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG); } +static int qed_set_vf_link_state(struct qed_dev *cdev, + int vf_id, int link_state) +{ + int i; + + /* Sanitize request */ + if (IS_VF(cdev)) + return -EINVAL; + + if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vf_id, true)) { + DP_VERBOSE(cdev, QED_MSG_IOV, + "VF index [%d] isn't active\n", vf_id); + return -EINVAL; + } + + /* Handle configuration of link state */ + for_each_hwfn(cdev, i) { + struct qed_hwfn *hwfn = &cdev->hwfns[i]; + struct qed_public_vf_info *vf; + + vf = qed_iov_get_public_vf_info(hwfn, vf_id, true); + if (!vf) + continue; + + if (vf->link_state == link_state) + continue; + + vf->link_state = link_state; + qed_inform_vf_link_state(&cdev->hwfns[i]); + } + + return 0; +} + +static int qed_configure_max_vf_rate(struct qed_dev *cdev, int vfid, int rate) +{ + int i; + + for_each_hwfn(cdev, i) { + struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + struct qed_public_vf_info *vf; + + if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) { + DP_NOTICE(p_hwfn, + "SR-IOV sanity check failed, can't set tx rate\n"); + return -EINVAL; + } + + vf = qed_iov_get_public_vf_info(p_hwfn, vfid, true); + + vf->tx_rate = rate; + + qed_inform_vf_link_state(p_hwfn); + } + + return 0; +} + +static int qed_set_vf_rate(struct qed_dev *cdev, + int vfid, u32 min_rate, u32 max_rate) +{ + int rc_min = 0, rc_max = 0; + + if (max_rate) + rc_max = qed_configure_max_vf_rate(cdev, vfid, max_rate); + + if (min_rate) + rc_min = qed_iov_configure_min_tx_rate(cdev, vfid, min_rate); + + if (rc_max | rc_min) + return -EINVAL; + + return 0; +} + static void qed_handle_vf_msg(struct qed_hwfn *hwfn) { u64 events[QED_VF_ARRAY_LENGTH]; @@ -3254,4 +3416,6 @@ const struct qed_iov_hv_ops qed_iov_ops_pass = { .configure = &qed_sriov_configure, .set_mac = &qed_sriov_pf_set_mac, .set_vlan = &qed_sriov_pf_set_vlan, + .set_link_state = &qed_set_vf_link_state, + .set_rate = &qed_set_vf_rate, }; diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.h b/drivers/net/ethernet/qlogic/qed/qed_sriov.h index e38ea985abe1..ab3d291cf7f0 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.h @@ -46,6 +46,12 @@ struct qed_public_vf_info { u8 forced_mac[ETH_ALEN]; u16 forced_vlan; u8 mac[ETH_ALEN]; + + /* IFLA_VF_LINK_STATE_ */ + int link_state; + + /* Currently configured Tx rate in MB/sec. 0 if unconfigured */ + int tx_rate; }; /* This struct is part of qed_dev and contains data relevant to all hwfns; diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index b326b15d5196..3d0f98f81122 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -1792,6 +1792,28 @@ static struct rtnl_link_stats64 *qede_get_stats64( return stats; } +#ifdef CONFIG_QED_SRIOV +static int qede_set_vf_rate(struct net_device *dev, int vfidx, + int min_tx_rate, int max_tx_rate) +{ + struct qede_dev *edev = netdev_priv(dev); + + return edev->ops->iov->set_rate(edev->cdev, vfidx, max_tx_rate, + max_tx_rate); +} + +static int qede_set_vf_link_state(struct net_device *dev, int vfidx, + int link_state) +{ + struct qede_dev *edev = netdev_priv(dev); + + if (!edev->ops) + return -EINVAL; + + return edev->ops->iov->set_link_state(edev->cdev, vfidx, link_state); +} +#endif + static void qede_config_accept_any_vlan(struct qede_dev *edev, bool action) { struct qed_update_vport_params params; @@ -2118,6 +2140,10 @@ static const struct net_device_ops qede_netdev_ops = { .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid, .ndo_get_stats64 = qede_get_stats64, +#ifdef CONFIG_QED_SRIOV + .ndo_set_vf_link_state = qede_set_vf_link_state, + .ndo_set_vf_rate = qede_set_vf_rate, +#endif #ifdef CONFIG_QEDE_VXLAN .ndo_add_vxlan_port = qede_add_vxlan_port, .ndo_del_vxlan_port = qede_del_vxlan_port, diff --git a/include/linux/qed/qed_iov_if.h b/include/linux/qed/qed_iov_if.h index 7a67fbf4336a..f364f2bd7a4d 100644 --- a/include/linux/qed/qed_iov_if.h +++ b/include/linux/qed/qed_iov_if.h @@ -18,6 +18,12 @@ struct qed_iov_hv_ops { int (*set_mac) (struct qed_dev *cdev, u8 *mac, int vfid); int (*set_vlan) (struct qed_dev *cdev, u16 vid, int vfid); + + int (*set_link_state) (struct qed_dev *cdev, int vf_id, + int link_state); + + int (*set_rate) (struct qed_dev *cdev, int vfid, + u32 min_rate, u32 max_rate); }; #endif From 6ddc7608258d57d61e16d55461400bb6eff18d72 Mon Sep 17 00:00:00 2001 From: Yuval Mintz Date: Wed, 11 May 2016 16:36:23 +0300 Subject: [PATCH 1512/1649] qed*: IOV support spoof-checking Add support in `ndo_set_vf_spoofchk' for allowing PF control over its VF spoof-checking configuration. Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_l2.c | 4 + drivers/net/ethernet/qlogic/qed/qed_l2.h | 2 + drivers/net/ethernet/qlogic/qed/qed_sriov.c | 91 ++++++++++++++++++++ drivers/net/ethernet/qlogic/qed/qed_sriov.h | 2 + drivers/net/ethernet/qlogic/qede/qede_main.c | 11 +++ include/linux/qed/qed_iov_if.h | 2 + 6 files changed, 112 insertions(+) diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index 8d83250aa5ba..e0275a78b121 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -381,6 +381,10 @@ int qed_sp_vport_update(struct qed_hwfn *p_hwfn, p_ramrod->common.tx_switching_en = p_params->tx_switching_flg; p_cmn->update_tx_switching_en_flg = p_params->update_tx_switching_flg; + p_cmn->anti_spoofing_en = p_params->anti_spoofing_en; + val = p_params->update_anti_spoofing_en_flg; + p_ramrod->common.update_anti_spoofing_en_flg = val; + rc = qed_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss_params); if (rc) { /* Return spq entry which is taken in qed_sp_init_request()*/ diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h index fad30ae12f63..a04fb7f061ea 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.h +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.h @@ -149,6 +149,8 @@ struct qed_sp_vport_update_params { u8 update_tx_switching_flg; u8 tx_switching_flg; u8 update_approx_mcast_flg; + u8 update_anti_spoofing_en_flg; + u8 anti_spoofing_en; u8 update_accept_any_vlan_flg; u8 accept_any_vlan; unsigned long bins[8]; diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index c9a3bb63cc87..804102c257e6 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -1234,6 +1234,39 @@ out: sizeof(struct pfvf_acquire_resp_tlv), vfpf_status); } +static int __qed_iov_spoofchk_set(struct qed_hwfn *p_hwfn, + struct qed_vf_info *p_vf, bool val) +{ + struct qed_sp_vport_update_params params; + int rc; + + if (val == p_vf->spoof_chk) { + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "Spoofchk value[%d] is already configured\n", val); + return 0; + } + + memset(¶ms, 0, sizeof(struct qed_sp_vport_update_params)); + params.opaque_fid = p_vf->opaque_fid; + params.vport_id = p_vf->vport_id; + params.update_anti_spoofing_en_flg = 1; + params.anti_spoofing_en = val; + + rc = qed_sp_vport_update(p_hwfn, ¶ms, QED_SPQ_MODE_EBLOCK, NULL); + if (rc) { + p_vf->spoof_chk = val; + p_vf->req_spoofchk_val = p_vf->spoof_chk; + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "Spoofchk val[%d] configured\n", val); + } else { + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "Spoofchk configuration[val:%d] failed for VF[%d]\n", + val, p_vf->relative_vf_id); + } + + return rc; +} + static int qed_iov_reconfigure_unicast_vlan(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf) { @@ -1476,6 +1509,8 @@ static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn, /* Force configuration if needed on the newly opened vport */ qed_iov_configure_vport_forced(p_hwfn, vf, *p_bitmap); + + __qed_iov_spoofchk_set(p_hwfn, vf, vf->req_spoofchk_val); } qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_START, sizeof(struct pfvf_def_resp_tlv), status); @@ -1489,6 +1524,7 @@ static void qed_iov_vf_mbx_stop_vport(struct qed_hwfn *p_hwfn, int rc; vf->vport_instance--; + vf->spoof_chk = false; rc = qed_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id); if (rc != 0) { @@ -2782,6 +2818,17 @@ void qed_iov_bulletin_set_forced_vlan(struct qed_hwfn *p_hwfn, qed_iov_configure_vport_forced(p_hwfn, vf_info, feature); } +static bool qed_iov_vf_has_vport_instance(struct qed_hwfn *p_hwfn, int vfid) +{ + struct qed_vf_info *p_vf_info; + + p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); + if (!p_vf_info) + return false; + + return !!p_vf_info->vport_instance; +} + bool qed_iov_is_vf_stopped(struct qed_hwfn *p_hwfn, int vfid) { struct qed_vf_info *p_vf_info; @@ -2793,6 +2840,34 @@ bool qed_iov_is_vf_stopped(struct qed_hwfn *p_hwfn, int vfid) return p_vf_info->state == VF_STOPPED; } +int qed_iov_spoofchk_set(struct qed_hwfn *p_hwfn, int vfid, bool val) +{ + struct qed_vf_info *vf; + int rc = -EINVAL; + + if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) { + DP_NOTICE(p_hwfn, + "SR-IOV sanity check failed, can't set spoofchk\n"); + goto out; + } + + vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); + if (!vf) + goto out; + + if (!qed_iov_vf_has_vport_instance(p_hwfn, vfid)) { + /* After VF VPORT start PF will configure spoof check */ + vf->req_spoofchk_val = val; + rc = 0; + goto out; + } + + rc = __qed_iov_spoofchk_set(p_hwfn, vf, val); + +out: + return rc; +} + static u8 *qed_iov_bulletin_get_forced_mac(struct qed_hwfn *p_hwfn, u16 rel_vf_id) { @@ -3179,6 +3254,21 @@ static int qed_set_vf_link_state(struct qed_dev *cdev, return 0; } +static int qed_spoof_configure(struct qed_dev *cdev, int vfid, bool val) +{ + int i, rc = -EINVAL; + + for_each_hwfn(cdev, i) { + struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + + rc = qed_iov_spoofchk_set(p_hwfn, vfid, val); + if (rc) + break; + } + + return rc; +} + static int qed_configure_max_vf_rate(struct qed_dev *cdev, int vfid, int rate) { int i; @@ -3417,5 +3507,6 @@ const struct qed_iov_hv_ops qed_iov_ops_pass = { .set_mac = &qed_sriov_pf_set_mac, .set_vlan = &qed_sriov_pf_set_vlan, .set_link_state = &qed_set_vf_link_state, + .set_spoof = &qed_spoof_configure, .set_rate = &qed_set_vf_rate, }; diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.h b/drivers/net/ethernet/qlogic/qed/qed_sriov.h index ab3d291cf7f0..c8667c65e685 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.h @@ -154,6 +154,8 @@ struct qed_vf_info { u16 igu_sbs[QED_MAX_VF_CHAINS_PER_PF]; u8 num_active_rxqs; struct qed_public_vf_info p_vf_info; + bool spoof_chk; + bool req_spoofchk_val; /* Stores the configuration requested by VF */ struct qed_vf_shadow_config shadow_config; diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 3d0f98f81122..a908bd69d252 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -1802,6 +1802,16 @@ static int qede_set_vf_rate(struct net_device *dev, int vfidx, max_tx_rate); } +static int qede_set_vf_spoofchk(struct net_device *dev, int vfidx, bool val) +{ + struct qede_dev *edev = netdev_priv(dev); + + if (!edev->ops) + return -EINVAL; + + return edev->ops->iov->set_spoof(edev->cdev, vfidx, val); +} + static int qede_set_vf_link_state(struct net_device *dev, int vfidx, int link_state) { @@ -2142,6 +2152,7 @@ static const struct net_device_ops qede_netdev_ops = { .ndo_get_stats64 = qede_get_stats64, #ifdef CONFIG_QED_SRIOV .ndo_set_vf_link_state = qede_set_vf_link_state, + .ndo_set_vf_spoofchk = qede_set_vf_spoofchk, .ndo_set_vf_rate = qede_set_vf_rate, #endif #ifdef CONFIG_QEDE_VXLAN diff --git a/include/linux/qed/qed_iov_if.h b/include/linux/qed/qed_iov_if.h index f364f2bd7a4d..2596d30d9e63 100644 --- a/include/linux/qed/qed_iov_if.h +++ b/include/linux/qed/qed_iov_if.h @@ -22,6 +22,8 @@ struct qed_iov_hv_ops { int (*set_link_state) (struct qed_dev *cdev, int vf_id, int link_state); + int (*set_spoof) (struct qed_dev *cdev, int vfid, bool val); + int (*set_rate) (struct qed_dev *cdev, int vfid, u32 min_rate, u32 max_rate); }; From 73390ac9d82bf9f0c849ff57b06a03145fbf05d6 Mon Sep 17 00:00:00 2001 From: Yuval Mintz Date: Wed, 11 May 2016 16:36:24 +0300 Subject: [PATCH 1513/1649] qed*: support ndo_get_vf_config Allows the user to view the VF configuration by observing the PF's device. Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_sriov.c | 93 ++++++++++++++++++++ drivers/net/ethernet/qlogic/qede/qede_main.c | 12 +++ include/linux/qed/qed_iov_if.h | 3 + 3 files changed, 108 insertions(+) diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index 804102c257e6..6af8fd9fd560 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -2588,6 +2588,30 @@ void qed_iov_set_link(struct qed_hwfn *p_hwfn, p_bulletin->capability_speed = p_caps->speed_capabilities; } +static void qed_iov_get_link(struct qed_hwfn *p_hwfn, + u16 vfid, + struct qed_mcp_link_params *p_params, + struct qed_mcp_link_state *p_link, + struct qed_mcp_link_capabilities *p_caps) +{ + struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn, + vfid, + false); + struct qed_bulletin_content *p_bulletin; + + if (!p_vf) + return; + + p_bulletin = p_vf->bulletin.p_virt; + + if (p_params) + __qed_vf_get_link_params(p_hwfn, p_params, p_bulletin); + if (p_link) + __qed_vf_get_link_state(p_hwfn, p_link, p_bulletin); + if (p_caps) + __qed_vf_get_link_caps(p_hwfn, p_caps, p_bulletin); +} + static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, int vfid) { @@ -2840,6 +2864,17 @@ bool qed_iov_is_vf_stopped(struct qed_hwfn *p_hwfn, int vfid) return p_vf_info->state == VF_STOPPED; } +static bool qed_iov_spoofchk_get(struct qed_hwfn *p_hwfn, int vfid) +{ + struct qed_vf_info *vf_info; + + vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); + if (!vf_info) + return false; + + return vf_info->spoof_chk; +} + int qed_iov_spoofchk_set(struct qed_hwfn *p_hwfn, int vfid, bool val) { struct qed_vf_info *vf; @@ -2937,6 +2972,23 @@ int qed_iov_configure_min_tx_rate(struct qed_dev *cdev, int vfid, u32 rate) return qed_configure_vport_wfq(cdev, vport_id, rate); } +static int qed_iov_get_vf_min_rate(struct qed_hwfn *p_hwfn, int vfid) +{ + struct qed_wfq_data *vf_vp_wfq; + struct qed_vf_info *vf_info; + + vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); + if (!vf_info) + return 0; + + vf_vp_wfq = &p_hwfn->qm_info.wfq_data[vf_info->vport_id]; + + if (vf_vp_wfq->configured) + return vf_vp_wfq->min_speed; + else + return 0; +} + /** * qed_schedule_iov - schedules IOV task for VF and PF * @hwfn: hardware function pointer @@ -3153,6 +3205,46 @@ static int qed_sriov_pf_set_vlan(struct qed_dev *cdev, u16 vid, int vfid) return 0; } +static int qed_get_vf_config(struct qed_dev *cdev, + int vf_id, struct ifla_vf_info *ivi) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_public_vf_info *vf_info; + struct qed_mcp_link_state link; + u32 tx_rate; + + /* Sanitize request */ + if (IS_VF(cdev)) + return -EINVAL; + + if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vf_id, true)) { + DP_VERBOSE(cdev, QED_MSG_IOV, + "VF index [%d] isn't active\n", vf_id); + return -EINVAL; + } + + vf_info = qed_iov_get_public_vf_info(hwfn, vf_id, true); + + qed_iov_get_link(hwfn, vf_id, NULL, &link, NULL); + + /* Fill information about VF */ + ivi->vf = vf_id; + + if (is_valid_ether_addr(vf_info->forced_mac)) + ether_addr_copy(ivi->mac, vf_info->forced_mac); + else + ether_addr_copy(ivi->mac, vf_info->mac); + + ivi->vlan = vf_info->forced_vlan; + ivi->spoofchk = qed_iov_spoofchk_get(hwfn, vf_id); + ivi->linkstate = vf_info->link_state; + tx_rate = vf_info->tx_rate; + ivi->max_tx_rate = tx_rate ? tx_rate : link.speed; + ivi->min_tx_rate = qed_iov_get_vf_min_rate(hwfn, vf_id); + + return 0; +} + void qed_inform_vf_link_state(struct qed_hwfn *hwfn) { struct qed_mcp_link_capabilities caps; @@ -3506,6 +3598,7 @@ const struct qed_iov_hv_ops qed_iov_ops_pass = { .configure = &qed_sriov_configure, .set_mac = &qed_sriov_pf_set_mac, .set_vlan = &qed_sriov_pf_set_vlan, + .get_config = &qed_get_vf_config, .set_link_state = &qed_set_vf_link_state, .set_spoof = &qed_spoof_configure, .set_rate = &qed_set_vf_rate, diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index a908bd69d252..7130ee7f87da 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -1793,6 +1793,17 @@ static struct rtnl_link_stats64 *qede_get_stats64( } #ifdef CONFIG_QED_SRIOV +static int qede_get_vf_config(struct net_device *dev, int vfidx, + struct ifla_vf_info *ivi) +{ + struct qede_dev *edev = netdev_priv(dev); + + if (!edev->ops) + return -EINVAL; + + return edev->ops->iov->get_config(edev->cdev, vfidx, ivi); +} + static int qede_set_vf_rate(struct net_device *dev, int vfidx, int min_tx_rate, int max_tx_rate) { @@ -2153,6 +2164,7 @@ static const struct net_device_ops qede_netdev_ops = { #ifdef CONFIG_QED_SRIOV .ndo_set_vf_link_state = qede_set_vf_link_state, .ndo_set_vf_spoofchk = qede_set_vf_spoofchk, + .ndo_get_vf_config = qede_get_vf_config, .ndo_set_vf_rate = qede_set_vf_rate, #endif #ifdef CONFIG_QEDE_VXLAN diff --git a/include/linux/qed/qed_iov_if.h b/include/linux/qed/qed_iov_if.h index 2596d30d9e63..5a4f8d0899e9 100644 --- a/include/linux/qed/qed_iov_if.h +++ b/include/linux/qed/qed_iov_if.h @@ -19,6 +19,9 @@ struct qed_iov_hv_ops { int (*set_vlan) (struct qed_dev *cdev, u16 vid, int vfid); + int (*get_config) (struct qed_dev *cdev, int vf_id, + struct ifla_vf_info *ivi); + int (*set_link_state) (struct qed_dev *cdev, int vf_id, int link_state); From 831bfb0e88b54726d6e027a1d547066ffeb8b27e Mon Sep 17 00:00:00 2001 From: Yuval Mintz Date: Wed, 11 May 2016 16:36:25 +0300 Subject: [PATCH 1514/1649] qed*: Tx-switching configuration Device should be configured by default to VEB once VFs are active. This changes the configuration of both PFs' and VFs' vports into enabling tx-switching once sriov is enabled. Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_dev.c | 3 ++- drivers/net/ethernet/qlogic/qed/qed_l2.c | 4 ++++ drivers/net/ethernet/qlogic/qed/qed_l2.h | 1 + drivers/net/ethernet/qlogic/qed/qed_main.c | 1 + drivers/net/ethernet/qlogic/qed/qed_sp.h | 3 ++- .../net/ethernet/qlogic/qed/qed_sp_commands.c | 5 +++- drivers/net/ethernet/qlogic/qed/qed_sriov.c | 1 + drivers/net/ethernet/qlogic/qed/qed_vf.c | 12 ++++++++++ drivers/net/ethernet/qlogic/qede/qede_main.c | 24 ++++++++++++++++++- include/linux/qed/qed_eth_if.h | 2 ++ include/linux/qed/qed_if.h | 1 + 11 files changed, 53 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index acaa2866dae3..6fb6016409c6 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -688,7 +688,8 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn, qed_int_igu_enable(p_hwfn, p_ptt, int_mode); /* send function start command */ - rc = qed_sp_pf_start(p_hwfn, p_tunn, p_hwfn->cdev->mf_mode); + rc = qed_sp_pf_start(p_hwfn, p_tunn, p_hwfn->cdev->mf_mode, + allow_npar_tx_switch); if (rc) DP_NOTICE(p_hwfn, "Function start ramrod failed\n"); } diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index e0275a78b121..8fba87dd48af 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -99,6 +99,8 @@ int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn, break; } + p_ramrod->tx_switching_en = p_params->tx_switching; + /* Software Function ID in hwfn (PFs are 0 - 15, VFs are 16 - 135) */ p_ramrod->sw_fid = qed_concrete_to_sw_fid(p_hwfn->cdev, p_params->concrete_fid); @@ -1792,6 +1794,8 @@ static int qed_update_vport(struct qed_dev *cdev, params->update_vport_active_flg; sp_params.vport_active_rx_flg = params->vport_active_flg; sp_params.vport_active_tx_flg = params->vport_active_flg; + sp_params.update_tx_switching_flg = params->update_tx_switching_flg; + sp_params.tx_switching_flg = params->tx_switching_flg; sp_params.accept_any_vlan = params->accept_any_vlan; sp_params.update_accept_any_vlan_flg = params->update_accept_any_vlan_flg; diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h index a04fb7f061ea..002114543451 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.h +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.h @@ -94,6 +94,7 @@ enum qed_tpa_mode { struct qed_sp_vport_start_params { enum qed_tpa_mode tpa_mode; bool remove_inner_vlan; + bool tx_switching; bool only_untagged; bool drop_ttl0; u8 max_buffers_per_cqe; diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index dcb782c14e5c..6ffc21da1415 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -216,6 +216,7 @@ int qed_fill_dev_info(struct qed_dev *cdev, dev_info->fw_rev = FW_REVISION_VERSION; dev_info->fw_eng = FW_ENGINEERING_VERSION; dev_info->mf_mode = cdev->mf_mode; + dev_info->tx_switching = true; } else { qed_vf_get_fw_version(&cdev->hwfns[0], &dev_info->fw_major, &dev_info->fw_minor, &dev_info->fw_rev, diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h index c2999cb5d1e2..ab5549f4e5ea 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h @@ -344,13 +344,14 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn, * @param p_hwfn * @param p_tunn * @param mode + * @param allow_npar_tx_switch * * @return int */ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, struct qed_tunn_start_params *p_tunn, - enum qed_mf_mode mode); + enum qed_mf_mode mode, bool allow_npar_tx_switch); /** * @brief qed_sp_pf_stop - PF Function Stop Ramrod diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c index ed90947c451d..8c555ed1f949 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c @@ -299,7 +299,7 @@ qed_tunn_set_pf_start_params(struct qed_hwfn *p_hwfn, int qed_sp_pf_start(struct qed_hwfn *p_hwfn, struct qed_tunn_start_params *p_tunn, - enum qed_mf_mode mode) + enum qed_mf_mode mode, bool allow_npar_tx_switch) { struct pf_start_ramrod_data *p_ramrod = NULL; u16 sb = qed_int_get_sp_sb_id(p_hwfn); @@ -358,6 +358,9 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, &p_ramrod->tunnel_config); p_hwfn->hw_info.personality = PERSONALITY_ETH; + if (IS_MF_SI(p_hwfn)) + p_ramrod->allow_npar_tx_switching = allow_npar_tx_switch; + if (p_hwfn->cdev->p_iov_info) { struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info; diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index 6af8fd9fd560..d4df406ac0a4 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -1490,6 +1490,7 @@ static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn, params.tpa_mode = start->tpa_mode; params.remove_inner_vlan = start->inner_vlan_removal; + params.tx_switching = true; params.only_untagged = vf_info->bulletin.p_virt->default_only_untagged; params.drop_ttl0 = false; diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c index db14e230c9a4..72e69c0ec10d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.c +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c @@ -633,6 +633,18 @@ int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn, } } + if (p_params->update_tx_switching_flg) { + struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv; + + size = sizeof(struct vfpf_vport_update_tx_switch_tlv); + tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH; + p_tx_switch_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, + tlv, size); + resp_size += sizeof(struct pfvf_def_resp_tlv); + + p_tx_switch_tlv->tx_switching = p_params->tx_switching_flg; + } + if (p_params->update_approx_mcast_flg) { struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv; diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 7130ee7f87da..8114541f327c 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -137,10 +137,26 @@ static int qede_set_vf_mac(struct net_device *ndev, int vfidx, u8 *mac) static int qede_sriov_configure(struct pci_dev *pdev, int num_vfs_param) { struct qede_dev *edev = netdev_priv(pci_get_drvdata(pdev)); + struct qed_dev_info *qed_info = &edev->dev_info.common; + int rc; DP_VERBOSE(edev, QED_MSG_IOV, "Requested %d VFs\n", num_vfs_param); - return edev->ops->iov->configure(edev->cdev, num_vfs_param); + rc = edev->ops->iov->configure(edev->cdev, num_vfs_param); + + /* Enable/Disable Tx switching for PF */ + if ((rc == num_vfs_param) && netif_running(edev->ndev) && + qed_info->mf_mode != QED_MF_NPAR && qed_info->tx_switching) { + struct qed_update_vport_params params; + + memset(¶ms, 0, sizeof(params)); + params.vport_id = 0; + params.update_tx_switching_flg = 1; + params.tx_switching_flg = num_vfs_param ? 1 : 0; + edev->ops->vport_update(edev->cdev, ¶ms); + } + + return rc; } #endif @@ -3291,6 +3307,12 @@ static int qede_start_queues(struct qede_dev *edev) vport_update_params.update_vport_active_flg = 1; vport_update_params.vport_active_flg = 1; + if ((qed_info->mf_mode == QED_MF_NPAR || pci_num_vf(edev->pdev)) && + qed_info->tx_switching) { + vport_update_params.update_tx_switching_flg = 1; + vport_update_params.tx_switching_flg = 1; + } + /* Fill struct with RSS params */ if (QEDE_RSS_CNT(edev) > 1) { vport_update_params.update_rss_flg = 1; diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h index e0f6e6482031..6ae8cb4a61d3 100644 --- a/include/linux/qed/qed_eth_if.h +++ b/include/linux/qed/qed_eth_if.h @@ -35,6 +35,8 @@ struct qed_update_vport_params { u8 vport_id; u8 update_vport_active_flg; u8 vport_active_flg; + u8 update_tx_switching_flg; + u8 tx_switching_flg; u8 update_accept_any_vlan_flg; u8 accept_any_vlan; u8 update_rss_flg; diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index 76a6f168a190..0fd8f247e65f 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -93,6 +93,7 @@ struct qed_dev_info { u32 flash_size; u8 mf_mode; + bool tx_switching; }; enum qed_sb_type { From 20eb7ea93f7ffaaa24b19b8bfc411b1f7605759f Mon Sep 17 00:00:00 2001 From: David Spinadel Date: Tue, 3 May 2016 16:05:02 +0300 Subject: [PATCH 1515/1649] mac80211: remove disconnected APs from BSS table In some cases, after a sudden AP disappearing and reconnection to another AP in the same ESS, user space gets the old AP in scan results (cached). User space may decide to roam to that old AP which will cause a disconnection and longer recovery. Remove APs that are probably out of range from BSS table. Signed-off-by: David Spinadel Signed-off-by: Luca Coelho Signed-off-by: Johannes Berg --- net/mac80211/mlme.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 885f4ca0888d..8d426f637f58 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -2399,6 +2399,11 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata) return; } + /* AP is probably out of range (or not reachable for another reason) so + * remove the bss struct for that AP. + */ + cfg80211_unlink_bss(local->hw.wiphy, ifmgd->associated); + ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY, true, frame_buf); From f631a77ba920f7153a1094d09cd8f2ebbffd0328 Mon Sep 17 00:00:00 2001 From: Sara Sharon Date: Tue, 3 May 2016 15:59:44 +0300 Subject: [PATCH 1516/1649] mac80211: allow same PN for AMSDU sub-frames Some hardware (iwlwifi an example) de-aggregate AMSDUs and copy the IV as is to the generated MPDUs, so the same PN appears in multiple packets without being a replay attack. Allow driver to explicitly indicate that a frame is allowed to have the same PN as the previous frame. Signed-off-by: Sara Sharon Signed-off-by: Luca Coelho Signed-off-by: Johannes Berg --- include/net/mac80211.h | 6 +++++- net/mac80211/wpa.c | 16 ++++++++++++---- 2 files changed, 17 insertions(+), 5 deletions(-) diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 07ef9378df2b..ce2f6e3be3cf 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -1068,6 +1068,9 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info) * @RX_FLAG_RADIOTAP_VENDOR_DATA: This frame contains vendor-specific * radiotap data in the skb->data (before the frame) as described by * the &struct ieee80211_vendor_radiotap. + * @RX_FLAG_ALLOW_SAME_PN: Allow the same PN as same packet before. + * This is used for AMSDU subframes which can have the same PN as + * the first subframe. */ enum mac80211_rx_flags { RX_FLAG_MMIC_ERROR = BIT(0), @@ -1101,7 +1104,8 @@ enum mac80211_rx_flags { RX_FLAG_5MHZ = BIT(29), RX_FLAG_AMSDU_MORE = BIT(30), RX_FLAG_RADIOTAP_VENDOR_DATA = BIT(31), - RX_FLAG_MIC_STRIPPED = BIT_ULL(32), + RX_FLAG_MIC_STRIPPED = BIT_ULL(32), + RX_FLAG_ALLOW_SAME_PN = BIT_ULL(33), }; #define RX_FLAG_STBC_SHIFT 26 diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c index 7e4f2652bca7..b48c1e13e281 100644 --- a/net/mac80211/wpa.c +++ b/net/mac80211/wpa.c @@ -519,12 +519,16 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx, return RX_DROP_UNUSABLE; if (!(status->flag & RX_FLAG_PN_VALIDATED)) { + int res; + ccmp_hdr2pn(pn, skb->data + hdrlen); queue = rx->security_idx; - if (memcmp(pn, key->u.ccmp.rx_pn[queue], - IEEE80211_CCMP_PN_LEN) <= 0) { + res = memcmp(pn, key->u.ccmp.rx_pn[queue], + IEEE80211_CCMP_PN_LEN); + if (res < 0 || + (!res && !(status->flag & RX_FLAG_ALLOW_SAME_PN))) { key->u.ccmp.replays++; return RX_DROP_UNUSABLE; } @@ -745,12 +749,16 @@ ieee80211_crypto_gcmp_decrypt(struct ieee80211_rx_data *rx) return RX_DROP_UNUSABLE; if (!(status->flag & RX_FLAG_PN_VALIDATED)) { + int res; + gcmp_hdr2pn(pn, skb->data + hdrlen); queue = rx->security_idx; - if (memcmp(pn, key->u.gcmp.rx_pn[queue], - IEEE80211_GCMP_PN_LEN) <= 0) { + res = memcmp(pn, key->u.gcmp.rx_pn[queue], + IEEE80211_GCMP_PN_LEN); + if (res < 0 || + (!res && !(status->flag & RX_FLAG_ALLOW_SAME_PN))) { key->u.gcmp.replays++; return RX_DROP_UNUSABLE; } From 9e9ea43905597d9ba79b421b87c7851b8350717c Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Tue, 3 May 2016 16:08:07 +0300 Subject: [PATCH 1517/1649] cfg80211: allow finding vendor with OUI without specifying the OUI type This allows finding vendor IE from a specific vendor. Signed-off-by: Emmanuel Grumbach Signed-off-by: Luca Coelho Signed-off-by: Johannes Berg --- include/net/cfg80211.h | 4 ++-- net/wireless/scan.c | 8 ++++++-- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 1e008cddd41d..5f6e98ad21a2 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -3893,7 +3893,7 @@ const u8 *cfg80211_find_ie(u8 eid, const u8 *ies, int len); * cfg80211_find_vendor_ie - find vendor specific information element in data * * @oui: vendor OUI - * @oui_type: vendor-specific OUI type + * @oui_type: vendor-specific OUI type (must be < 0xff), negative means any * @ies: data consisting of IEs * @len: length of data * @@ -3905,7 +3905,7 @@ const u8 *cfg80211_find_ie(u8 eid, const u8 *ies, int len); * Note: There are no checks on the element length other than having to fit into * the given data. */ -const u8 *cfg80211_find_vendor_ie(unsigned int oui, u8 oui_type, +const u8 *cfg80211_find_vendor_ie(unsigned int oui, int oui_type, const u8 *ies, int len); /** diff --git a/net/wireless/scan.c b/net/wireless/scan.c index abdf651a70d9..ef2955c89a00 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c @@ -364,13 +364,16 @@ const u8 *cfg80211_find_ie(u8 eid, const u8 *ies, int len) } EXPORT_SYMBOL(cfg80211_find_ie); -const u8 *cfg80211_find_vendor_ie(unsigned int oui, u8 oui_type, +const u8 *cfg80211_find_vendor_ie(unsigned int oui, int oui_type, const u8 *ies, int len) { struct ieee80211_vendor_ie *ie; const u8 *pos = ies, *end = ies + len; int ie_oui; + if (WARN_ON(oui_type > 0xff)) + return NULL; + while (pos < end) { pos = cfg80211_find_ie(WLAN_EID_VENDOR_SPECIFIC, pos, end - pos); @@ -386,7 +389,8 @@ const u8 *cfg80211_find_vendor_ie(unsigned int oui, u8 oui_type, goto cont; ie_oui = ie->oui[0] << 16 | ie->oui[1] << 8 | ie->oui[2]; - if (ie_oui == oui && ie->oui_type == oui_type) + if (ie_oui == oui && + (oui_type < 0 || ie->oui_type == oui_type)) return pos; cont: pos += 2 + ie->len; From 8b9b2f06998f33bdd1774a9860ec60e945977384 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Tue, 3 May 2016 16:26:40 +0300 Subject: [PATCH 1518/1649] cfg80211: remove erroneous comment The devlist_mtx mutex was removed about two years ago, in favour of just using RTNL/RCU protection. Remove the comment still referencing it. Signed-off-by: Johannes Berg --- net/wireless/core.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/wireless/core.h b/net/wireless/core.h index ac44e77ac2f2..f75d7605bc38 100644 --- a/net/wireless/core.h +++ b/net/wireless/core.h @@ -53,7 +53,7 @@ struct cfg80211_registered_device { /* associated wireless interfaces, protected by rtnl or RCU */ struct list_head wdev_list; int devlist_generation, wdev_id; - int opencount; /* also protected by devlist_mtx */ + int opencount; wait_queue_head_t dev_wait; struct list_head beacon_registrations; From 53873f134d285191ef6435882d55837093a36c53 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Tue, 3 May 2016 16:52:04 +0300 Subject: [PATCH 1519/1649] cfg80211: make wdev_list accessible to drivers There's no harm in having drivers read the list, since they can use RCU protection or RTNL locking; allow this to not require each and every driver to also implement its own bookkeeping. Signed-off-by: Johannes Berg --- include/net/cfg80211.h | 5 +++++ net/wireless/chan.c | 2 +- net/wireless/core.c | 17 ++++++++++------- net/wireless/core.h | 3 +-- net/wireless/nl80211.c | 16 ++++++++-------- net/wireless/reg.c | 2 +- net/wireless/sme.c | 4 ++-- net/wireless/sysfs.c | 2 +- net/wireless/util.c | 4 ++-- 9 files changed, 31 insertions(+), 24 deletions(-) diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 5f6e98ad21a2..63921672bed0 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -3189,6 +3189,9 @@ struct wiphy_vendor_command { * @vht_capa_mod_mask: Specify what VHT capabilities can be over-ridden. * If null, then none can be over-ridden. * + * @wdev_list: the list of associated (virtual) interfaces; this list must + * not be modified by the driver, but can be read with RTNL/RCU protection. + * * @max_acl_mac_addrs: Maximum number of MAC addresses that the device * supports for ACL. * @@ -3328,6 +3331,8 @@ struct wiphy { const struct ieee80211_ht_cap *ht_capa_mod_mask; const struct ieee80211_vht_cap *vht_capa_mod_mask; + struct list_head wdev_list; + /* the network namespace this phy lives in currently */ possible_net_t _net; diff --git a/net/wireless/chan.c b/net/wireless/chan.c index a6631fb319c1..da49c0b1fd32 100644 --- a/net/wireless/chan.c +++ b/net/wireless/chan.c @@ -739,7 +739,7 @@ static bool cfg80211_ir_permissive_chan(struct wiphy *wiphy, * and thus fail the GO instantiation, consider only the interfaces of * the current registered device. */ - list_for_each_entry(wdev, &rdev->wdev_list, list) { + list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) { struct ieee80211_channel *other_chan = NULL; int r1, r2; diff --git a/net/wireless/core.c b/net/wireless/core.c index 7f7b9409bf4c..d25c82bc1bbe 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c @@ -3,6 +3,7 @@ * * Copyright 2006-2010 Johannes Berg * Copyright 2013-2014 Intel Mobile Communications GmbH + * Copyright 2015 Intel Deutschland GmbH */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt @@ -157,7 +158,7 @@ int cfg80211_switch_netns(struct cfg80211_registered_device *rdev, if (!(rdev->wiphy.flags & WIPHY_FLAG_NETNS_OK)) return -EOPNOTSUPP; - list_for_each_entry(wdev, &rdev->wdev_list, list) { + list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) { if (!wdev->netdev) continue; wdev->netdev->features &= ~NETIF_F_NETNS_LOCAL; @@ -171,7 +172,8 @@ int cfg80211_switch_netns(struct cfg80211_registered_device *rdev, /* failed -- clean up to old netns */ net = wiphy_net(&rdev->wiphy); - list_for_each_entry_continue_reverse(wdev, &rdev->wdev_list, + list_for_each_entry_continue_reverse(wdev, + &rdev->wiphy.wdev_list, list) { if (!wdev->netdev) continue; @@ -230,7 +232,7 @@ void cfg80211_shutdown_all_interfaces(struct wiphy *wiphy) ASSERT_RTNL(); - list_for_each_entry(wdev, &rdev->wdev_list, list) { + list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) { if (wdev->netdev) { dev_close(wdev->netdev); continue; @@ -298,7 +300,8 @@ void cfg80211_destroy_ifaces(struct cfg80211_registered_device *rdev) kfree(item); spin_unlock_irq(&rdev->destroy_list_lock); - list_for_each_entry_safe(wdev, tmp, &rdev->wdev_list, list) { + list_for_each_entry_safe(wdev, tmp, + &rdev->wiphy.wdev_list, list) { if (nlportid == wdev->owner_nlportid) rdev_del_virtual_intf(rdev, wdev); } @@ -410,7 +413,7 @@ use_default_name: dev_set_name(&rdev->wiphy.dev, PHY_NAME "%d", rdev->wiphy_idx); } - INIT_LIST_HEAD(&rdev->wdev_list); + INIT_LIST_HEAD(&rdev->wiphy.wdev_list); INIT_LIST_HEAD(&rdev->beacon_registrations); spin_lock_init(&rdev->beacon_registrations_lock); spin_lock_init(&rdev->bss_lock); @@ -799,7 +802,7 @@ void wiphy_unregister(struct wiphy *wiphy) nl80211_notify_wiphy(rdev, NL80211_CMD_DEL_WIPHY); rdev->wiphy.registered = false; - WARN_ON(!list_empty(&rdev->wdev_list)); + WARN_ON(!list_empty(&rdev->wiphy.wdev_list)); /* * First remove the hardware from everywhere, this makes @@ -1021,7 +1024,7 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb, spin_lock_init(&wdev->mgmt_registrations_lock); wdev->identifier = ++rdev->wdev_id; - list_add_rcu(&wdev->list, &rdev->wdev_list); + list_add_rcu(&wdev->list, &rdev->wiphy.wdev_list); rdev->devlist_generation++; /* can only change netns with wiphy */ dev->features |= NETIF_F_NETNS_LOCAL; diff --git a/net/wireless/core.h b/net/wireless/core.h index f75d7605bc38..025b7a5d508b 100644 --- a/net/wireless/core.h +++ b/net/wireless/core.h @@ -50,8 +50,7 @@ struct cfg80211_registered_device { /* wiphy index, internal only */ int wiphy_idx; - /* associated wireless interfaces, protected by rtnl or RCU */ - struct list_head wdev_list; + /* protected by RTNL */ int devlist_generation, wdev_id; int opencount; wait_queue_head_t dev_wait; diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 9bc84a2ddd34..d7599014055d 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -103,7 +103,7 @@ __cfg80211_wdev_from_attrs(struct net *netns, struct nlattr **attrs) if (have_wdev_id && rdev->wiphy_idx != wiphy_idx) continue; - list_for_each_entry(wdev, &rdev->wdev_list, list) { + list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) { if (have_ifidx && wdev->netdev && wdev->netdev->ifindex == ifidx) { result = wdev; @@ -149,7 +149,7 @@ __cfg80211_rdev_from_attrs(struct net *netns, struct nlattr **attrs) tmp = cfg80211_rdev_by_wiphy_idx(wdev_id >> 32); if (tmp) { /* make sure wdev exists */ - list_for_each_entry(wdev, &tmp->wdev_list, list) { + list_for_each_entry(wdev, &tmp->wiphy.wdev_list, list) { if (wdev->identifier != (u32)wdev_id) continue; found = true; @@ -535,7 +535,7 @@ static int nl80211_prepare_wdev_dump(struct sk_buff *skb, *rdev = wiphy_to_rdev(wiphy); *wdev = NULL; - list_for_each_entry(tmp, &(*rdev)->wdev_list, list) { + list_for_each_entry(tmp, &(*rdev)->wiphy.wdev_list, list) { if (tmp->identifier == cb->args[1]) { *wdev = tmp; break; @@ -2490,7 +2490,7 @@ static int nl80211_dump_interface(struct sk_buff *skb, struct netlink_callback * } if_idx = 0; - list_for_each_entry(wdev, &rdev->wdev_list, list) { + list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) { if (if_idx < if_start) { if_idx++; continue; @@ -2762,7 +2762,7 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info) spin_lock_init(&wdev->mgmt_registrations_lock); wdev->identifier = ++rdev->wdev_id; - list_add_rcu(&wdev->list, &rdev->wdev_list); + list_add_rcu(&wdev->list, &rdev->wiphy.wdev_list); rdev->devlist_generation++; break; default: @@ -3298,7 +3298,7 @@ static bool nl80211_get_ap_channel(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev; bool ret = false; - list_for_each_entry(wdev, &rdev->wdev_list, list) { + list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) { if (wdev->iftype != NL80211_IFTYPE_AP && wdev->iftype != NL80211_IFTYPE_P2P_GO) continue; @@ -10392,7 +10392,7 @@ static int nl80211_prepare_vendor_dump(struct sk_buff *skb, *wdev = NULL; if (cb->args[1]) { - list_for_each_entry(tmp, &(*rdev)->wdev_list, list) { + list_for_each_entry(tmp, &wiphy->wdev_list, list) { if (tmp->identifier == cb->args[1] - 1) { *wdev = tmp; break; @@ -13413,7 +13413,7 @@ static int nl80211_netlink_notify(struct notifier_block * nb, sched_scan_req->owner_nlportid == notify->portid) schedule_scan_stop = true; - list_for_each_entry_rcu(wdev, &rdev->wdev_list, list) { + list_for_each_entry_rcu(wdev, &rdev->wiphy.wdev_list, list) { cfg80211_mlme_unregister_socket(wdev, notify->portid); if (wdev->owner_nlportid == notify->portid) diff --git a/net/wireless/reg.c b/net/wireless/reg.c index e271dea6bc02..5dbac3749738 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c @@ -1639,7 +1639,7 @@ static void reg_leave_invalid_chans(struct wiphy *wiphy) ASSERT_RTNL(); - list_for_each_entry(wdev, &rdev->wdev_list, list) + list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) if (!reg_wdev_chan_valid(wiphy, wdev)) cfg80211_leave(rdev, wdev); } diff --git a/net/wireless/sme.c b/net/wireless/sme.c index d814279fb556..584fdc347221 100644 --- a/net/wireless/sme.c +++ b/net/wireless/sme.c @@ -223,7 +223,7 @@ void cfg80211_conn_work(struct work_struct *work) rtnl_lock(); - list_for_each_entry(wdev, &rdev->wdev_list, list) { + list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) { if (!wdev->netdev) continue; @@ -617,7 +617,7 @@ static bool cfg80211_is_all_idle(void) * count as new regulatory hints. */ list_for_each_entry(rdev, &cfg80211_rdev_list, list) { - list_for_each_entry(wdev, &rdev->wdev_list, list) { + list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) { wdev_lock(wdev); if (wdev->conn || wdev->current_bss) is_all_idle = false; diff --git a/net/wireless/sysfs.c b/net/wireless/sysfs.c index 9cee0220665d..e46469bc130f 100644 --- a/net/wireless/sysfs.c +++ b/net/wireless/sysfs.c @@ -91,7 +91,7 @@ static void cfg80211_leave_all(struct cfg80211_registered_device *rdev) { struct wireless_dev *wdev; - list_for_each_entry(wdev, &rdev->wdev_list, list) + list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) cfg80211_leave(rdev, wdev); } diff --git a/net/wireless/util.c b/net/wireless/util.c index 7cfabd6e83c6..219bd197039e 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c @@ -986,7 +986,7 @@ void cfg80211_process_rdev_events(struct cfg80211_registered_device *rdev) ASSERT_RTNL(); - list_for_each_entry(wdev, &rdev->wdev_list, list) + list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) cfg80211_process_wdev_events(wdev); } @@ -1560,7 +1560,7 @@ int cfg80211_validate_beacon_int(struct cfg80211_registered_device *rdev, if (!beacon_int) return -EINVAL; - list_for_each_entry(wdev, &rdev->wdev_list, list) { + list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) { if (!wdev->beacon_interval) continue; if (wdev->beacon_interval != beacon_int) { From 46fa38e84b656f80edf83d21144221b0cad18d61 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Tue, 3 May 2016 16:58:00 +0300 Subject: [PATCH 1520/1649] mac80211: allow software PS-Poll/U-APSD with AP_LINK_PS When using RSS, frames might not be processed in the correct order, and thus AP_LINK_PS must be used; most likely with firmware keeping track of the powersave state, this is the case in iwlwifi now. In this case, the driver can use ieee80211_sta_ps_transition() to still have mac80211 manage powersave buffering. However, for U-APSD and PS-Poll this isn't sufficient. If the device can't manage that entirely on its own, mac80211's code should be used. To allow this, export two functions: ieee80211_sta_uapsd_trigger() and ieee80211_sta_pspoll(). Signed-off-by: Johannes Berg --- include/net/mac80211.h | 27 ++++++++++++++++ net/mac80211/rx.c | 70 ++++++++++++++++++++++++++---------------- 2 files changed, 71 insertions(+), 26 deletions(-) diff --git a/include/net/mac80211.h b/include/net/mac80211.h index ce2f6e3be3cf..be30b0549b88 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -3996,6 +3996,33 @@ static inline int ieee80211_sta_ps_transition_ni(struct ieee80211_sta *sta, return ret; } +/** + * ieee80211_sta_pspoll - PS-Poll frame received + * @sta: currently connected station + * + * When operating in AP mode with the %IEEE80211_HW_AP_LINK_PS flag set, + * use this function to inform mac80211 that a PS-Poll frame from a + * connected station was received. + * This must be used in conjunction with ieee80211_sta_ps_transition() + * and possibly ieee80211_sta_uapsd_trigger(); calls to all three must + * be serialized. + */ +void ieee80211_sta_pspoll(struct ieee80211_sta *sta); + +/** + * ieee80211_sta_uapsd_trigger - (potential) U-APSD trigger frame received + * @sta: currently connected station + * @tid: TID of the received (potential) trigger frame + * + * When operating in AP mode with the %IEEE80211_HW_AP_LINK_PS flag set, + * use this function to inform mac80211 that a (potential) trigger frame + * from a connected station was received. + * This must be used in conjunction with ieee80211_sta_ps_transition() + * and possibly ieee80211_sta_pspoll(); calls to all three must be + * serialized. + */ +void ieee80211_sta_uapsd_trigger(struct ieee80211_sta *sta, u8 tid); + /* * The TX headroom reserved by mac80211 for its own tx_status functions. * This is enough for the radiotap header. diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index c5678703921e..5e65e838992a 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -1319,13 +1319,52 @@ int ieee80211_sta_ps_transition(struct ieee80211_sta *pubsta, bool start) } EXPORT_SYMBOL(ieee80211_sta_ps_transition); +void ieee80211_sta_pspoll(struct ieee80211_sta *pubsta) +{ + struct sta_info *sta = container_of(pubsta, struct sta_info, sta); + + if (test_sta_flag(sta, WLAN_STA_SP)) + return; + + if (!test_sta_flag(sta, WLAN_STA_PS_DRIVER)) + ieee80211_sta_ps_deliver_poll_response(sta); + else + set_sta_flag(sta, WLAN_STA_PSPOLL); +} +EXPORT_SYMBOL(ieee80211_sta_pspoll); + +void ieee80211_sta_uapsd_trigger(struct ieee80211_sta *pubsta, u8 tid) +{ + struct sta_info *sta = container_of(pubsta, struct sta_info, sta); + u8 ac = ieee802_1d_to_ac[tid & 7]; + + /* + * If this AC is not trigger-enabled do nothing. + * + * NB: This could/should check a separate bitmap of trigger- + * enabled queues, but for now we only implement uAPSD w/o + * TSPEC changes to the ACs, so they're always the same. + */ + if (!(sta->sta.uapsd_queues & BIT(ac))) + return; + + /* if we are in a service period, do nothing */ + if (test_sta_flag(sta, WLAN_STA_SP)) + return; + + if (!test_sta_flag(sta, WLAN_STA_PS_DRIVER)) + ieee80211_sta_ps_deliver_uapsd(sta); + else + set_sta_flag(sta, WLAN_STA_UAPSD); +} +EXPORT_SYMBOL(ieee80211_sta_uapsd_trigger); + static ieee80211_rx_result debug_noinline ieee80211_rx_h_uapsd_and_pspoll(struct ieee80211_rx_data *rx) { struct ieee80211_sub_if_data *sdata = rx->sdata; struct ieee80211_hdr *hdr = (void *)rx->skb->data; struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); - int tid, ac; if (!rx->sta) return RX_CONTINUE; @@ -1351,12 +1390,7 @@ ieee80211_rx_h_uapsd_and_pspoll(struct ieee80211_rx_data *rx) return RX_CONTINUE; if (unlikely(ieee80211_is_pspoll(hdr->frame_control))) { - if (!test_sta_flag(rx->sta, WLAN_STA_SP)) { - if (!test_sta_flag(rx->sta, WLAN_STA_PS_DRIVER)) - ieee80211_sta_ps_deliver_poll_response(rx->sta); - else - set_sta_flag(rx->sta, WLAN_STA_PSPOLL); - } + ieee80211_sta_pspoll(&rx->sta->sta); /* Free PS Poll skb here instead of returning RX_DROP that would * count as an dropped frame. */ @@ -1368,27 +1402,11 @@ ieee80211_rx_h_uapsd_and_pspoll(struct ieee80211_rx_data *rx) ieee80211_has_pm(hdr->frame_control) && (ieee80211_is_data_qos(hdr->frame_control) || ieee80211_is_qos_nullfunc(hdr->frame_control))) { + u8 tid; + tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK; - ac = ieee802_1d_to_ac[tid & 7]; - /* - * If this AC is not trigger-enabled do nothing. - * - * NB: This could/should check a separate bitmap of trigger- - * enabled queues, but for now we only implement uAPSD w/o - * TSPEC changes to the ACs, so they're always the same. - */ - if (!(rx->sta->sta.uapsd_queues & BIT(ac))) - return RX_CONTINUE; - - /* if we are in a service period, do nothing */ - if (test_sta_flag(rx->sta, WLAN_STA_SP)) - return RX_CONTINUE; - - if (!test_sta_flag(rx->sta, WLAN_STA_PS_DRIVER)) - ieee80211_sta_ps_deliver_uapsd(rx->sta); - else - set_sta_flag(rx->sta, WLAN_STA_UAPSD); + ieee80211_sta_uapsd_trigger(&rx->sta->sta, tid); } return RX_CONTINUE; From 27ee441a43392ebe5b027fe5d78640e839673d21 Mon Sep 17 00:00:00 2001 From: Pablo Neira Date: Thu, 12 May 2016 17:16:31 +0200 Subject: [PATCH 1521/1649] gtp: put back reference to netns when not required anymore This patch fixes a netns leak. Fixes: 93edb8c7f94f ("gtp: reload GTPv1 header after pskb_may_pull()") Reported-by: Cong Wang Signed-off-by: Pablo Neira Ayuso Signed-off-by: David S. Miller --- drivers/net/gtp.c | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c index f7caf1e35d83..4e976a0d5a76 100644 --- a/drivers/net/gtp.c +++ b/drivers/net/gtp.c @@ -1028,8 +1028,11 @@ static int gtp_genl_new_pdp(struct sk_buff *skb, struct genl_info *info) /* Check if there's an existing gtpX device to configure */ dev = gtp_find_dev(net, nla_get_u32(info->attrs[GTPA_LINK])); - if (dev == NULL) + if (dev == NULL) { + put_net(net); return -ENODEV; + } + put_net(net); return ipv4_pdp_add(dev, info); } @@ -1051,8 +1054,11 @@ static int gtp_genl_del_pdp(struct sk_buff *skb, struct genl_info *info) /* Check if there's an existing gtpX device to configure */ dev = gtp_find_dev(net, nla_get_u32(info->attrs[GTPA_LINK])); - if (dev == NULL) + if (dev == NULL) { + put_net(net); return -ENODEV; + } + put_net(net); gtp = netdev_priv(dev); @@ -1163,8 +1169,11 @@ static int gtp_genl_get_pdp(struct sk_buff *skb, struct genl_info *info) /* Check if there's an existing gtpX device to configure */ dev = gtp_find_dev(net, nla_get_u32(info->attrs[GTPA_LINK])); - if (dev == NULL) + if (dev == NULL) { + put_net(net); return -ENODEV; + } + put_net(net); gtp = netdev_priv(dev); From 23f72215bc030d1eea437713abfef91c635e2e1f Mon Sep 17 00:00:00 2001 From: Haishuang Yan Date: Wed, 11 May 2016 18:48:31 +0800 Subject: [PATCH 1522/1649] ip6_gre: Fix get_size calculation for gre6 tunnel Do not include attribute IFLA_GRE_TOS. Signed-off-by: Haishuang Yan Signed-off-by: David S. Miller --- net/ipv6/ip6_gre.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index ee62ec469ab3..3c25fe67d3da 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c @@ -1394,8 +1394,6 @@ static size_t ip6gre_get_size(const struct net_device *dev) nla_total_size(sizeof(struct in6_addr)) + /* IFLA_GRE_TTL */ nla_total_size(1) + - /* IFLA_GRE_TOS */ - nla_total_size(1) + /* IFLA_GRE_ENCAP_LIMIT */ nla_total_size(1) + /* IFLA_GRE_FLOWINFO */ @@ -1420,7 +1418,6 @@ static int ip6gre_fill_info(struct sk_buff *skb, const struct net_device *dev) nla_put_in6_addr(skb, IFLA_GRE_LOCAL, &p->laddr) || nla_put_in6_addr(skb, IFLA_GRE_REMOTE, &p->raddr) || nla_put_u8(skb, IFLA_GRE_TTL, p->hop_limit) || - /*nla_put_u8(skb, IFLA_GRE_TOS, t->priority) ||*/ nla_put_u8(skb, IFLA_GRE_ENCAP_LIMIT, p->encap_limit) || nla_put_be32(skb, IFLA_GRE_FLOWINFO, p->flowinfo) || nla_put_u32(skb, IFLA_GRE_FLAGS, p->flags)) From da73b4e9538b9be96498241ab3f13eab94181e96 Mon Sep 17 00:00:00 2001 From: Haishuang Yan Date: Wed, 11 May 2016 18:48:32 +0800 Subject: [PATCH 1523/1649] gre: Fix wrong tpi->proto in WCCP When dealing with WCCP in gre6 tunnel, it sets the wrong tpi->protocol, that is, ETH_P_IP instead of ETH_P_IPV6 for the encapuslated traffic. Signed-off-by: Haishuang Yan Signed-off-by: David S. Miller --- include/net/gre.h | 2 +- net/ipv4/gre_demux.c | 6 +++--- net/ipv4/ip_gre.c | 4 ++-- net/ipv6/ip6_gre.c | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/include/net/gre.h b/include/net/gre.h index a14093c70eab..5dce30a6abe3 100644 --- a/include/net/gre.h +++ b/include/net/gre.h @@ -26,7 +26,7 @@ int gre_del_protocol(const struct gre_protocol *proto, u8 version); struct net_device *gretap_fb_dev_create(struct net *net, const char *name, u8 name_assign_type); int gre_parse_header(struct sk_buff *skb, struct tnl_ptk_info *tpi, - bool *csum_err); + bool *csum_err, __be16 proto); static inline int gre_calc_hlen(__be16 o_flags) { diff --git a/net/ipv4/gre_demux.c b/net/ipv4/gre_demux.c index d78e2eefc0f7..4c39f4fd332a 100644 --- a/net/ipv4/gre_demux.c +++ b/net/ipv4/gre_demux.c @@ -62,7 +62,7 @@ EXPORT_SYMBOL_GPL(gre_del_protocol); /* Fills in tpi and returns header length to be pulled. */ int gre_parse_header(struct sk_buff *skb, struct tnl_ptk_info *tpi, - bool *csum_err) + bool *csum_err, __be16 proto) { const struct gre_base_hdr *greh; __be32 *options; @@ -109,11 +109,11 @@ int gre_parse_header(struct sk_buff *skb, struct tnl_ptk_info *tpi, tpi->seq = 0; } /* WCCP version 1 and 2 protocol decoding. - * - Change protocol to IP + * - Change protocol to IPv4/IPv6 * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header */ if (greh->flags == 0 && tpi->proto == htons(ETH_P_WCCP)) { - tpi->proto = htons(ETH_P_IP); + tpi->proto = proto; if ((*(u8 *)options & 0xF0) != 0x40) hdr_len += 4; } diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 2b267e71ebf5..aaeb478b54cd 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -222,7 +222,7 @@ static void gre_err(struct sk_buff *skb, u32 info) struct tnl_ptk_info tpi; bool csum_err = false; - if (gre_parse_header(skb, &tpi, &csum_err) < 0) { + if (gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP)) < 0) { if (!csum_err) /* ignore csum errors. */ return; } @@ -335,7 +335,7 @@ static int gre_rcv(struct sk_buff *skb) } #endif - hdr_len = gre_parse_header(skb, &tpi, &csum_err); + hdr_len = gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP)); if (hdr_len < 0) goto drop; diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index 3c25fe67d3da..4541fa54035e 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c @@ -468,7 +468,7 @@ static int gre_rcv(struct sk_buff *skb) bool csum_err = false; int hdr_len; - hdr_len = gre_parse_header(skb, &tpi, &csum_err); + hdr_len = gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IPV6)); if (hdr_len < 0) goto drop; From f893a99e7e2125ed7874d69cb309ca40a0bea371 Mon Sep 17 00:00:00 2001 From: Fabio Estevam Date: Wed, 11 May 2016 17:02:05 -0300 Subject: [PATCH 1524/1649] phy: micrel: Use MICREL_PHY_ID_MASK definition Replace the hardcoded mask 0x00fffff0 with MICREL_PHY_ID_MASK for better readability. Suggested-by: Andrew Lunn Signed-off-by: Fabio Estevam Reviewed-by: Andrew Lunn Acked-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/phy/micrel.c | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index 4516c8a4fd82..5a8fefc25157 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -726,7 +726,7 @@ static int kszphy_probe(struct phy_device *phydev) static struct phy_driver ksphy_driver[] = { { .phy_id = PHY_ID_KS8737, - .phy_id_mask = 0x00fffff0, + .phy_id_mask = MICREL_PHY_ID_MASK, .name = "Micrel KS8737", .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause), .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, @@ -781,7 +781,7 @@ static struct phy_driver ksphy_driver[] = { .resume = genphy_resume, }, { .phy_id = PHY_ID_KSZ8041, - .phy_id_mask = 0x00fffff0, + .phy_id_mask = MICREL_PHY_ID_MASK, .name = "Micrel KSZ8041", .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause | SUPPORTED_Asym_Pause), @@ -800,7 +800,7 @@ static struct phy_driver ksphy_driver[] = { .resume = genphy_resume, }, { .phy_id = PHY_ID_KSZ8041RNLI, - .phy_id_mask = 0x00fffff0, + .phy_id_mask = MICREL_PHY_ID_MASK, .name = "Micrel KSZ8041RNLI", .features = PHY_BASIC_FEATURES | SUPPORTED_Pause | SUPPORTED_Asym_Pause, @@ -819,7 +819,7 @@ static struct phy_driver ksphy_driver[] = { .resume = genphy_resume, }, { .phy_id = PHY_ID_KSZ8051, - .phy_id_mask = 0x00fffff0, + .phy_id_mask = MICREL_PHY_ID_MASK, .name = "Micrel KSZ8051", .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause | SUPPORTED_Asym_Pause), @@ -857,7 +857,7 @@ static struct phy_driver ksphy_driver[] = { }, { .phy_id = PHY_ID_KSZ8081, .name = "Micrel KSZ8081 or KSZ8091", - .phy_id_mask = 0x00fffff0, + .phy_id_mask = MICREL_PHY_ID_MASK, .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause), .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, .driver_data = &ksz8081_type, @@ -875,7 +875,7 @@ static struct phy_driver ksphy_driver[] = { }, { .phy_id = PHY_ID_KSZ8061, .name = "Micrel KSZ8061", - .phy_id_mask = 0x00fffff0, + .phy_id_mask = MICREL_PHY_ID_MASK, .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause), .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, .config_init = kszphy_config_init, @@ -909,7 +909,7 @@ static struct phy_driver ksphy_driver[] = { .write_mmd_indirect = ksz9021_wr_mmd_phyreg, }, { .phy_id = PHY_ID_KSZ9031, - .phy_id_mask = 0x00fffff0, + .phy_id_mask = MICREL_PHY_ID_MASK, .name = "Micrel KSZ9031 Gigabit PHY", .features = (PHY_GBIT_FEATURES | SUPPORTED_Pause), .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, @@ -926,7 +926,7 @@ static struct phy_driver ksphy_driver[] = { .resume = genphy_resume, }, { .phy_id = PHY_ID_KSZ8873MLL, - .phy_id_mask = 0x00fffff0, + .phy_id_mask = MICREL_PHY_ID_MASK, .name = "Micrel KSZ8873MLL Switch", .features = (SUPPORTED_Pause | SUPPORTED_Asym_Pause), .flags = PHY_HAS_MAGICANEG, @@ -940,7 +940,7 @@ static struct phy_driver ksphy_driver[] = { .resume = genphy_resume, }, { .phy_id = PHY_ID_KSZ886X, - .phy_id_mask = 0x00fffff0, + .phy_id_mask = MICREL_PHY_ID_MASK, .name = "Micrel KSZ886X Switch", .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause), .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, @@ -962,17 +962,17 @@ MODULE_LICENSE("GPL"); static struct mdio_device_id __maybe_unused micrel_tbl[] = { { PHY_ID_KSZ9021, 0x000ffffe }, - { PHY_ID_KSZ9031, 0x00fffff0 }, + { PHY_ID_KSZ9031, MICREL_PHY_ID_MASK }, { PHY_ID_KSZ8001, 0x00ffffff }, - { PHY_ID_KS8737, 0x00fffff0 }, + { PHY_ID_KS8737, MICREL_PHY_ID_MASK }, { PHY_ID_KSZ8021, 0x00ffffff }, { PHY_ID_KSZ8031, 0x00ffffff }, - { PHY_ID_KSZ8041, 0x00fffff0 }, - { PHY_ID_KSZ8051, 0x00fffff0 }, - { PHY_ID_KSZ8061, 0x00fffff0 }, - { PHY_ID_KSZ8081, 0x00fffff0 }, - { PHY_ID_KSZ8873MLL, 0x00fffff0 }, - { PHY_ID_KSZ886X, 0x00fffff0 }, + { PHY_ID_KSZ8041, MICREL_PHY_ID_MASK }, + { PHY_ID_KSZ8051, MICREL_PHY_ID_MASK }, + { PHY_ID_KSZ8061, MICREL_PHY_ID_MASK }, + { PHY_ID_KSZ8081, MICREL_PHY_ID_MASK }, + { PHY_ID_KSZ8873MLL, MICREL_PHY_ID_MASK }, + { PHY_ID_KSZ886X, MICREL_PHY_ID_MASK }, { } }; From e7142c341c9ce3678f3533a2cfbf8477a09a95ad Mon Sep 17 00:00:00 2001 From: Jon Paul Maloy Date: Wed, 11 May 2016 19:15:45 -0400 Subject: [PATCH 1525/1649] tipc: eliminate risk of double link_up events When an ACTIVATE or data packet is received in a link in state ESTABLISHING, the link does not immediately change state to ESTABLISHED, but does instead return a LINK_UP event to the caller, which will execute the state change in a different lock context. This non-atomic approach incurs a low risk that we may have two LINK_UP events pending simultaneously for the same link, resulting in the final part of the setup procedure being executed twice. The only potential harm caused by this it that we may see two LINK_UP events issued to subsribers of the topology server, something that may cause confusion. This commit eliminates this risk by checking if the link is already up before proceeding with the second half of the setup. Signed-off-by: Jon Maloy Signed-off-by: David S. Miller --- net/tipc/node.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/tipc/node.c b/net/tipc/node.c index d903f560e2fd..e01e2c71b5a1 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c @@ -542,7 +542,7 @@ static void __tipc_node_link_up(struct tipc_node *n, int bearer_id, struct tipc_link *ol = node_active_link(n, 0); struct tipc_link *nl = n->links[bearer_id].link; - if (!nl) + if (!nl || tipc_link_is_up(nl)) return; tipc_link_fsm_evt(nl, LINK_ESTABLISH_EVT); From b4411457d5c9062f07f0762f1ddb513d90dd1379 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 12 May 2016 21:41:39 -0700 Subject: [PATCH 1526/1649] sunrpc: set SOCK_FASYNC sunrpc is using SOCKWQ_ASYNC_NOSPACE without setting SOCK_FASYNC, so the recent optimizations done in sk_set_bit() and sk_clear_bit() broke it. There is still the risk that a subsequent sock_fasync() call would clear SOCK_FASYNC, but sunrpc does not use this yet. Fixes: 9317bb69824e ("net: SOCKWQ_ASYNC_NOSPACE optimizations") Signed-off-by: Eric Dumazet Reported-by: Jiri Pirko Reported-by: Huang, Ying Tested-by: Jiri Pirko Tested-by: Huang, Ying Signed-off-by: David S. Miller --- net/sunrpc/xprtsock.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index a6c68dc086af..b90c5397b5e1 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -1950,6 +1950,7 @@ static int xs_local_finish_connecting(struct rpc_xprt *xprt, sk->sk_user_data = xprt; sk->sk_data_ready = xs_data_ready; sk->sk_write_space = xs_udp_write_space; + sock_set_flag(sk, SOCK_FASYNC); sk->sk_error_report = xs_error_report; sk->sk_allocation = GFP_NOIO; @@ -2136,6 +2137,7 @@ static void xs_udp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock) sk->sk_user_data = xprt; sk->sk_data_ready = xs_data_ready; sk->sk_write_space = xs_udp_write_space; + sock_set_flag(sk, SOCK_FASYNC); sk->sk_allocation = GFP_NOIO; xprt_set_connected(xprt); @@ -2237,6 +2239,7 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock) sk->sk_data_ready = xs_tcp_data_ready; sk->sk_state_change = xs_tcp_state_change; sk->sk_write_space = xs_tcp_write_space; + sock_set_flag(sk, SOCK_FASYNC); sk->sk_error_report = xs_error_report; sk->sk_allocation = GFP_NOIO; From ed7cbbce544856b20e5811de373cf92e92499771 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Thu, 12 May 2016 16:23:44 -0700 Subject: [PATCH 1527/1649] udp: Resolve NULL pointer dereference over flow-based vxlan device While testing an OpenStack configuration using VXLANs I saw the following call trace: RIP: 0010:[] udp4_lib_lookup_skb+0x49/0x80 RSP: 0018:ffff88103867bc50 EFLAGS: 00010286 RAX: ffff88103269bf00 RBX: ffff88103269bf00 RCX: 00000000ffffffff RDX: 0000000000004300 RSI: 0000000000000000 RDI: ffff880f2932e780 RBP: ffff88103867bc60 R08: 0000000000000000 R09: 000000009001a8c0 R10: 0000000000004400 R11: ffffffff81333a58 R12: ffff880f2932e794 R13: 0000000000000014 R14: 0000000000000014 R15: ffffe8efbfd89ca0 FS: 0000000000000000(0000) GS:ffff88103fd80000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 0000000000000488 CR3: 0000000001c06000 CR4: 00000000001426e0 Stack: ffffffff81576515 ffffffff815733c0 ffff88103867bc98 ffffffff815fcc17 ffff88103269bf00 ffffe8efbfd89ca0 0000000000000014 0000000000000080 ffffe8efbfd89ca0 ffff88103867bcc8 ffffffff815fcf8b ffff880f2932e794 Call Trace: [] ? skb_checksum+0x35/0x50 [] ? skb_push+0x40/0x40 [] udp_gro_receive+0x57/0x130 [] udp4_gro_receive+0x10b/0x2c0 [] inet_gro_receive+0x1d3/0x270 [] dev_gro_receive+0x269/0x3b0 [] napi_gro_receive+0x38/0x120 [] gro_cell_poll+0x57/0x80 [vxlan] [] net_rx_action+0x160/0x380 [] __do_softirq+0xd7/0x2c5 [] run_ksoftirqd+0x29/0x50 [] smpboot_thread_fn+0x10f/0x160 [] ? sort_range+0x30/0x30 [] kthread+0xd8/0xf0 [] ret_from_fork+0x22/0x40 [] ? kthread_park+0x60/0x60 The following trace is seen when receiving a DHCP request over a flow-based VXLAN tunnel. I believe this is caused by the metadata dst having a NULL dev value and as a result dev_net(dev) is causing a NULL pointer dereference. To resolve this I am replacing the check for skb_dst(skb)->dev with just skb->dev. This makes sense as the callers of this function are usually in the receive path and as such skb->dev should always be populated. In addition other functions in the area where these are called are already using dev_net(skb->dev) to determine the namespace the UDP packet belongs in. Fixes: 63058308cd55 ("udp: Add udp6_lib_lookup_skb and udp4_lib_lookup_skb") Signed-off-by: Alexander Duyck Acked-by: Eric Dumazet Signed-off-by: David S. Miller --- net/ipv4/udp.c | 10 ++-------- net/ipv6/udp.c | 8 +++----- 2 files changed, 5 insertions(+), 13 deletions(-) diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index f67f52ba4809..2e3ebfe5549e 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -604,7 +604,7 @@ static inline struct sock *__udp4_lib_lookup_skb(struct sk_buff *skb, { const struct iphdr *iph = ip_hdr(skb); - return __udp4_lib_lookup(dev_net(skb_dst(skb)->dev), iph->saddr, sport, + return __udp4_lib_lookup(dev_net(skb->dev), iph->saddr, sport, iph->daddr, dport, inet_iif(skb), udptable, skb); } @@ -612,13 +612,7 @@ static inline struct sock *__udp4_lib_lookup_skb(struct sk_buff *skb, struct sock *udp4_lib_lookup_skb(struct sk_buff *skb, __be16 sport, __be16 dport) { - const struct iphdr *iph = ip_hdr(skb); - const struct net_device *dev = - skb_dst(skb) ? skb_dst(skb)->dev : skb->dev; - - return __udp4_lib_lookup(dev_net(dev), iph->saddr, sport, - iph->daddr, dport, inet_iif(skb), - &udp_table, skb); + return __udp4_lib_lookup_skb(skb, sport, dport, &udp_table); } EXPORT_SYMBOL_GPL(udp4_lib_lookup_skb); diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index aca06094110f..2ba6a77a8815 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -315,13 +315,13 @@ static struct sock *__udp6_lib_lookup_skb(struct sk_buff *skb, __be16 sport, __be16 dport, struct udp_table *udptable) { - struct sock *sk; const struct ipv6hdr *iph = ipv6_hdr(skb); + struct sock *sk; sk = skb_steal_sock(skb); if (unlikely(sk)) return sk; - return __udp6_lib_lookup(dev_net(skb_dst(skb)->dev), &iph->saddr, sport, + return __udp6_lib_lookup(dev_net(skb->dev), &iph->saddr, sport, &iph->daddr, dport, inet6_iif(skb), udptable, skb); } @@ -330,10 +330,8 @@ struct sock *udp6_lib_lookup_skb(struct sk_buff *skb, __be16 sport, __be16 dport) { const struct ipv6hdr *iph = ipv6_hdr(skb); - const struct net_device *dev = - skb_dst(skb) ? skb_dst(skb)->dev : skb->dev; - return __udp6_lib_lookup(dev_net(dev), &iph->saddr, sport, + return __udp6_lib_lookup(dev_net(skb->dev), &iph->saddr, sport, &iph->daddr, dport, inet6_iif(skb), &udp_table, skb); } From bf389cabb3b8079c23f9762e62b05f291e2d5e99 Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Fri, 13 May 2016 10:38:49 +0200 Subject: [PATCH 1528/1649] Bluetooth: fix power_on vs close race With all the latest fixes applied, I am still able to reproduce this (and other) warning(s): WARNING: CPU: 1 PID: 19684 at ../kernel/workqueue.c:4092 destroy_workqueue+0x70a/0x770() ... Call Trace: [] ? dump_stack+0xb3/0x112 [] ? warn_slowpath_common+0xde/0x140 [] ? destroy_workqueue+0x70a/0x770 [] ? warn_slowpath_null+0x2e/0x40 [] ? destroy_workqueue+0x70a/0x770 [] ? hci_unregister_dev+0x2a9/0x720 [bluetooth] [] ? vhci_release+0x7b/0xf0 [hci_vhci] [] ? vhci_flush+0x50/0x50 [hci_vhci] [] ? do_exit+0x863/0x2b90 This is due to race present in the hci_unregister_dev path. hdev->power_on work races with hci_dev_do_close. One tries to open, the other tries to close, leading to warning like the above. (Another example is a warning in kobject_get or kobject_put depending on who wins the race.) Fix this by switching those two racers to ensure hdev->power_on never triggers while hci_dev_do_close is in progress. Signed-off-by: Jiri Slaby Signed-off-by: Marcel Holtmann --- net/bluetooth/hci_core.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 2713fc86e85a..45a9fc68c677 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -3139,10 +3139,10 @@ void hci_unregister_dev(struct hci_dev *hdev) list_del(&hdev->list); write_unlock(&hci_dev_list_lock); - hci_dev_do_close(hdev); - cancel_work_sync(&hdev->power_on); + hci_dev_do_close(hdev); + if (!test_bit(HCI_INIT, &hdev->flags) && !hci_dev_test_flag(hdev, HCI_SETUP) && !hci_dev_test_flag(hdev, HCI_CONFIG)) { From 72f9f8b58bc743e6b6abdc68f60db98486c3ffcf Mon Sep 17 00:00:00 2001 From: Lauro Costa Date: Mon, 9 May 2016 17:36:11 -0300 Subject: [PATCH 1529/1649] Bluetooth: Add USB ID 13D3:3487 to ath3k Add hw id to ath3k usb device list and btusb blacklist T: Bus=01 Lev=01 Prnt=01 Port=08 Cnt=02 Dev#= 4 Spd=12 MxCh= 0 D: Ver= 1.10 Cls=e0(wlcon) Sub=01 Prot=01 MxPS=64 #Cfgs= 1 P: Vendor=13d3 ProdID=3487 Rev=00.02 C: #Ifs= 2 Cfg#= 1 Atr=e0 MxPwr=100mA I: If#= 0 Alt= 0 #EPs= 3 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb I: If#= 1 Alt= 0 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb Requires these firmwares: ar3k/AthrBT_0x11020100.dfu and ar3k/ramps_0x11020100_40.dfu Firmwares are available in linux-firmware. Device found in a laptop ASUS model N552VW. It's an Atheros AR9462 chip. Signed-off-by: Lauro Costa Signed-off-by: Marcel Holtmann --- drivers/bluetooth/ath3k.c | 2 ++ drivers/bluetooth/btusb.c | 1 + 2 files changed, 3 insertions(+) diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c index 641c2d19fc57..25894687c168 100644 --- a/drivers/bluetooth/ath3k.c +++ b/drivers/bluetooth/ath3k.c @@ -122,6 +122,7 @@ static const struct usb_device_id ath3k_table[] = { { USB_DEVICE(0x13d3, 0x3432) }, { USB_DEVICE(0x13d3, 0x3472) }, { USB_DEVICE(0x13d3, 0x3474) }, + { USB_DEVICE(0x13d3, 0x3487) }, /* Atheros AR5BBU12 with sflash firmware */ { USB_DEVICE(0x0489, 0xE02C) }, @@ -188,6 +189,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = { { USB_DEVICE(0x13d3, 0x3432), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x13d3, 0x3472), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x13d3, 0x3474), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x13d3, 0x3487), .driver_info = BTUSB_ATH3012 }, /* Atheros AR5BBU22 with sflash firmware */ { USB_DEVICE(0x0489, 0xE036), .driver_info = BTUSB_ATH3012 }, diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 6aae9590511a..a3be65e6231a 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -236,6 +236,7 @@ static const struct usb_device_id blacklist_table[] = { { USB_DEVICE(0x13d3, 0x3432), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x13d3, 0x3472), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x13d3, 0x3474), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x13d3, 0x3487), .driver_info = BTUSB_ATH3012 }, /* Atheros AR5BBU12 with sflash firmware */ { USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE }, From e11f303e3d0731a7379252192e7d02a1ae319238 Mon Sep 17 00:00:00 2001 From: Steve Shih Date: Tue, 5 Apr 2016 11:30:03 -0700 Subject: [PATCH 1530/1649] e1000e: fix ethtool autoneg off for non-copper This patch fixes the issues for disabling auto-negotiation and forcing speed and duplex settings for the non-copper media. For non-copper media, e1000_get_settings should return ETH_TP_MDI_INVALID for eth_tp_mdix_ctrl instead of ETH_TP_MDI_AUTO so subsequent e1000_set_settings call would not fail with -EOPNOTSUPP. e1000_set_spd_dplx should not automatically turn autoneg back on for forced 1000 Mbps full duplex settings for non-copper media. Cc: xe-kernel@external.cisco.com Cc: Daniel Walker Signed-off-by: Steve Shih Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/e1000e/ethtool.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c index 1e3973aa707c..83a815b501ed 100644 --- a/drivers/net/ethernet/intel/e1000e/ethtool.c +++ b/drivers/net/ethernet/intel/e1000e/ethtool.c @@ -201,6 +201,9 @@ static int e1000_get_settings(struct net_device *netdev, else ecmd->eth_tp_mdix_ctrl = hw->phy.mdix; + if (hw->phy.media_type != e1000_media_type_copper) + ecmd->eth_tp_mdix_ctrl = ETH_TP_MDI_INVALID; + return 0; } @@ -236,8 +239,13 @@ static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx) mac->forced_speed_duplex = ADVERTISE_100_FULL; break; case SPEED_1000 + DUPLEX_FULL: - mac->autoneg = 1; - adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; + if (adapter->hw.phy.media_type == e1000_media_type_copper) { + mac->autoneg = 1; + adapter->hw.phy.autoneg_advertised = + ADVERTISE_1000_FULL; + } else { + mac->forced_speed_duplex = ADVERTISE_1000_FULL; + } break; case SPEED_1000 + DUPLEX_HALF: /* not supported */ default: From 847042a6a51e6dbb789c259750609b78aa3f27a3 Mon Sep 17 00:00:00 2001 From: Brian Walsh Date: Tue, 12 Apr 2016 23:22:30 -0400 Subject: [PATCH 1531/1649] e1000e: Cleanup consistency in ret_val variable usage Fixed the file to use a consistent ret_val for return value checking. Signed-off-by: Brian Walsh Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/e1000e/netdev.c | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 269087cb7b96..671256d32525 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -3368,12 +3368,12 @@ static int e1000e_write_uc_addr_list(struct net_device *netdev) * combining */ netdev_for_each_uc_addr(ha, netdev) { - int rval; + int ret_val; if (!rar_entries) break; - rval = hw->mac.ops.rar_set(hw, ha->addr, rar_entries--); - if (rval < 0) + ret_val = hw->mac.ops.rar_set(hw, ha->addr, rar_entries--); + if (ret_val < 0) return -ENOMEM; count++; } @@ -6965,7 +6965,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) int bars, i, err, pci_using_dac; u16 eeprom_data = 0; u16 eeprom_apme_mask = E1000_EEPROM_APME; - s32 rval = 0; + s32 ret_val = 0; if (ei->flags2 & FLAG2_DISABLE_ASPM_L0S) aspm_disable_flag = PCIE_LINK_STATE_L0S; @@ -7200,18 +7200,18 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } else if (adapter->flags & FLAG_APME_IN_CTRL3) { if (adapter->flags & FLAG_APME_CHECK_PORT_B && (adapter->hw.bus.func == 1)) - rval = e1000_read_nvm(&adapter->hw, + ret_val = e1000_read_nvm(&adapter->hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); else - rval = e1000_read_nvm(&adapter->hw, + ret_val = e1000_read_nvm(&adapter->hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); } /* fetch WoL from EEPROM */ - if (rval) - e_dbg("NVM read error getting WoL initial values: %d\n", rval); + if (ret_val) + e_dbg("NVM read error getting WoL initial values: %d\n", ret_val); else if (eeprom_data & eeprom_apme_mask) adapter->eeprom_wol |= E1000_WUFC_MAG; @@ -7231,10 +7231,10 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) device_wakeup_enable(&pdev->dev); /* save off EEPROM version number */ - rval = e1000_read_nvm(&adapter->hw, 5, 1, &adapter->eeprom_vers); + ret_val = e1000_read_nvm(&adapter->hw, 5, 1, &adapter->eeprom_vers); - if (rval) { - e_dbg("NVM read error getting EEPROM version: %d\n", rval); + if (ret_val) { + e_dbg("NVM read error getting EEPROM version: %d\n", ret_val); adapter->eeprom_vers = 0; } From a51d8c217b15b97fede844dd6860f7b3c6ffcfef Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Wed, 13 Apr 2016 16:08:28 -0700 Subject: [PATCH 1532/1649] igb: use BIT() macro or unsigned prefix For bitshifts, we should make use of the BIT macro when possible, and ensure that other bitshifts are marked as unsigned. This helps prevent signed bitshift errors, and ensures similar style. Make use of GENMASK and the unsigned postfix where BIT() isn't appropriate. Signed-off-by: Jacob Keller Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igb/e1000_82575.c | 8 +- drivers/net/ethernet/intel/igb/e1000_82575.h | 30 ++--- .../net/ethernet/intel/igb/e1000_defines.h | 108 +++++++++--------- drivers/net/ethernet/intel/igb/e1000_mac.c | 10 +- drivers/net/ethernet/intel/igb/e1000_mbx.c | 4 +- drivers/net/ethernet/intel/igb/e1000_nvm.c | 2 +- drivers/net/ethernet/intel/igb/e1000_phy.h | 6 +- drivers/net/ethernet/intel/igb/igb.h | 32 +++--- drivers/net/ethernet/intel/igb/igb_ethtool.c | 18 +-- drivers/net/ethernet/intel/igb/igb_main.c | 48 ++++---- drivers/net/ethernet/intel/igb/igb_ptp.c | 6 +- 11 files changed, 136 insertions(+), 136 deletions(-) diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c index a23aa6704394..a61447fd778e 100644 --- a/drivers/net/ethernet/intel/igb/e1000_82575.c +++ b/drivers/net/ethernet/intel/igb/e1000_82575.c @@ -361,7 +361,7 @@ static s32 igb_init_nvm_params_82575(struct e1000_hw *hw) if (size > 15) size = 15; - nvm->word_size = 1 << size; + nvm->word_size = BIT(size); nvm->opcode_bits = 8; nvm->delay_usec = 1; @@ -380,7 +380,7 @@ static s32 igb_init_nvm_params_82575(struct e1000_hw *hw) 16 : 8; break; } - if (nvm->word_size == (1 << 15)) + if (nvm->word_size == BIT(15)) nvm->page_size = 128; nvm->type = e1000_nvm_eeprom_spi; @@ -391,7 +391,7 @@ static s32 igb_init_nvm_params_82575(struct e1000_hw *hw) nvm->ops.write = igb_write_nvm_spi; nvm->ops.validate = igb_validate_nvm_checksum; nvm->ops.update = igb_update_nvm_checksum; - if (nvm->word_size < (1 << 15)) + if (nvm->word_size < BIT(15)) nvm->ops.read = igb_read_nvm_eerd; else nvm->ops.read = igb_read_nvm_spi; @@ -2107,7 +2107,7 @@ void igb_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf) /* The PF can spoof - it has to in order to * support emulation mode NICs */ - reg_val ^= (1 << pf | 1 << (pf + MAX_NUM_VFS)); + reg_val ^= (BIT(pf) | BIT(pf + MAX_NUM_VFS)); } else { reg_val &= ~(E1000_DTXSWC_MAC_SPOOF_MASK | E1000_DTXSWC_VLAN_SPOOF_MASK); diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.h b/drivers/net/ethernet/intel/igb/e1000_82575.h index de8805a2a2fe..199ff98209cf 100644 --- a/drivers/net/ethernet/intel/igb/e1000_82575.h +++ b/drivers/net/ethernet/intel/igb/e1000_82575.h @@ -168,16 +168,16 @@ struct e1000_adv_tx_context_desc { #define E1000_DCA_CTRL_DCA_MODE_CB2 0x02 /* DCA Mode CB2 */ #define E1000_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */ -#define E1000_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */ -#define E1000_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header enable */ -#define E1000_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload enable */ -#define E1000_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* DCA Rx rd Desc Relax Order */ +#define E1000_DCA_RXCTRL_DESC_DCA_EN BIT(5) /* DCA Rx Desc enable */ +#define E1000_DCA_RXCTRL_HEAD_DCA_EN BIT(6) /* DCA Rx Desc header enable */ +#define E1000_DCA_RXCTRL_DATA_DCA_EN BIT(7) /* DCA Rx Desc payload enable */ +#define E1000_DCA_RXCTRL_DESC_RRO_EN BIT(9) /* DCA Rx rd Desc Relax Order */ #define E1000_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */ -#define E1000_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */ -#define E1000_DCA_TXCTRL_DESC_RRO_EN (1 << 9) /* Tx rd Desc Relax Order */ -#define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */ -#define E1000_DCA_TXCTRL_DATA_RRO_EN (1 << 13) /* Tx rd data Relax Order */ +#define E1000_DCA_TXCTRL_DESC_DCA_EN BIT(5) /* DCA Tx Desc enable */ +#define E1000_DCA_TXCTRL_DESC_RRO_EN BIT(9) /* Tx rd Desc Relax Order */ +#define E1000_DCA_TXCTRL_TX_WB_RO_EN BIT(11) /* Tx Desc writeback RO bit */ +#define E1000_DCA_TXCTRL_DATA_RRO_EN BIT(13) /* Tx rd data Relax Order */ /* Additional DCA related definitions, note change in position of CPUID */ #define E1000_DCA_TXCTRL_CPUID_MASK_82576 0xFF000000 /* Tx CPUID Mask */ @@ -186,8 +186,8 @@ struct e1000_adv_tx_context_desc { #define E1000_DCA_RXCTRL_CPUID_SHIFT 24 /* Rx CPUID now in the last byte */ /* ETQF register bit definitions */ -#define E1000_ETQF_FILTER_ENABLE (1 << 26) -#define E1000_ETQF_1588 (1 << 30) +#define E1000_ETQF_FILTER_ENABLE BIT(26) +#define E1000_ETQF_1588 BIT(30) /* FTQF register bit definitions */ #define E1000_FTQF_VF_BP 0x00008000 @@ -203,16 +203,16 @@ struct e1000_adv_tx_context_desc { #define E1000_DTXSWC_VLAN_SPOOF_MASK 0x0000FF00 /* Per VF VLAN spoof control */ #define E1000_DTXSWC_LLE_MASK 0x00FF0000 /* Per VF Local LB enables */ #define E1000_DTXSWC_VLAN_SPOOF_SHIFT 8 -#define E1000_DTXSWC_VMDQ_LOOPBACK_EN (1 << 31) /* global VF LB enable */ +#define E1000_DTXSWC_VMDQ_LOOPBACK_EN BIT(31) /* global VF LB enable */ /* Easy defines for setting default pool, would normally be left a zero */ #define E1000_VT_CTL_DEFAULT_POOL_SHIFT 7 #define E1000_VT_CTL_DEFAULT_POOL_MASK (0x7 << E1000_VT_CTL_DEFAULT_POOL_SHIFT) /* Other useful VMD_CTL register defines */ -#define E1000_VT_CTL_IGNORE_MAC (1 << 28) -#define E1000_VT_CTL_DISABLE_DEF_POOL (1 << 29) -#define E1000_VT_CTL_VM_REPL_EN (1 << 30) +#define E1000_VT_CTL_IGNORE_MAC BIT(28) +#define E1000_VT_CTL_DISABLE_DEF_POOL BIT(29) +#define E1000_VT_CTL_VM_REPL_EN BIT(30) /* Per VM Offload register setup */ #define E1000_VMOLR_RLPML_MASK 0x00003FFF /* Long Packet Maximum Length mask */ @@ -252,7 +252,7 @@ struct e1000_adv_tx_context_desc { #define E1000_DTXCTL_MDP_EN 0x0020 #define E1000_DTXCTL_SPOOF_INT 0x0040 -#define E1000_EEPROM_PCS_AUTONEG_DISABLE_BIT (1 << 14) +#define E1000_EEPROM_PCS_AUTONEG_DISABLE_BIT BIT(14) #define ALL_QUEUES 0xFFFF diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h index e9f23ee8f15e..2997c443c5dc 100644 --- a/drivers/net/ethernet/intel/igb/e1000_defines.h +++ b/drivers/net/ethernet/intel/igb/e1000_defines.h @@ -530,65 +530,65 @@ /* Time Sync Interrupt Cause/Mask Register Bits */ -#define TSINTR_SYS_WRAP (1 << 0) /* SYSTIM Wrap around. */ -#define TSINTR_TXTS (1 << 1) /* Transmit Timestamp. */ -#define TSINTR_RXTS (1 << 2) /* Receive Timestamp. */ -#define TSINTR_TT0 (1 << 3) /* Target Time 0 Trigger. */ -#define TSINTR_TT1 (1 << 4) /* Target Time 1 Trigger. */ -#define TSINTR_AUTT0 (1 << 5) /* Auxiliary Timestamp 0 Taken. */ -#define TSINTR_AUTT1 (1 << 6) /* Auxiliary Timestamp 1 Taken. */ -#define TSINTR_TADJ (1 << 7) /* Time Adjust Done. */ +#define TSINTR_SYS_WRAP BIT(0) /* SYSTIM Wrap around. */ +#define TSINTR_TXTS BIT(1) /* Transmit Timestamp. */ +#define TSINTR_RXTS BIT(2) /* Receive Timestamp. */ +#define TSINTR_TT0 BIT(3) /* Target Time 0 Trigger. */ +#define TSINTR_TT1 BIT(4) /* Target Time 1 Trigger. */ +#define TSINTR_AUTT0 BIT(5) /* Auxiliary Timestamp 0 Taken. */ +#define TSINTR_AUTT1 BIT(6) /* Auxiliary Timestamp 1 Taken. */ +#define TSINTR_TADJ BIT(7) /* Time Adjust Done. */ #define TSYNC_INTERRUPTS TSINTR_TXTS #define E1000_TSICR_TXTS TSINTR_TXTS /* TSAUXC Configuration Bits */ -#define TSAUXC_EN_TT0 (1 << 0) /* Enable target time 0. */ -#define TSAUXC_EN_TT1 (1 << 1) /* Enable target time 1. */ -#define TSAUXC_EN_CLK0 (1 << 2) /* Enable Configurable Frequency Clock 0. */ -#define TSAUXC_SAMP_AUT0 (1 << 3) /* Latch SYSTIML/H into AUXSTMPL/0. */ -#define TSAUXC_ST0 (1 << 4) /* Start Clock 0 Toggle on Target Time 0. */ -#define TSAUXC_EN_CLK1 (1 << 5) /* Enable Configurable Frequency Clock 1. */ -#define TSAUXC_SAMP_AUT1 (1 << 6) /* Latch SYSTIML/H into AUXSTMPL/1. */ -#define TSAUXC_ST1 (1 << 7) /* Start Clock 1 Toggle on Target Time 1. */ -#define TSAUXC_EN_TS0 (1 << 8) /* Enable hardware timestamp 0. */ -#define TSAUXC_AUTT0 (1 << 9) /* Auxiliary Timestamp Taken. */ -#define TSAUXC_EN_TS1 (1 << 10) /* Enable hardware timestamp 0. */ -#define TSAUXC_AUTT1 (1 << 11) /* Auxiliary Timestamp Taken. */ -#define TSAUXC_PLSG (1 << 17) /* Generate a pulse. */ -#define TSAUXC_DISABLE (1 << 31) /* Disable SYSTIM Count Operation. */ +#define TSAUXC_EN_TT0 BIT(0) /* Enable target time 0. */ +#define TSAUXC_EN_TT1 BIT(1) /* Enable target time 1. */ +#define TSAUXC_EN_CLK0 BIT(2) /* Enable Configurable Frequency Clock 0. */ +#define TSAUXC_SAMP_AUT0 BIT(3) /* Latch SYSTIML/H into AUXSTMPL/0. */ +#define TSAUXC_ST0 BIT(4) /* Start Clock 0 Toggle on Target Time 0. */ +#define TSAUXC_EN_CLK1 BIT(5) /* Enable Configurable Frequency Clock 1. */ +#define TSAUXC_SAMP_AUT1 BIT(6) /* Latch SYSTIML/H into AUXSTMPL/1. */ +#define TSAUXC_ST1 BIT(7) /* Start Clock 1 Toggle on Target Time 1. */ +#define TSAUXC_EN_TS0 BIT(8) /* Enable hardware timestamp 0. */ +#define TSAUXC_AUTT0 BIT(9) /* Auxiliary Timestamp Taken. */ +#define TSAUXC_EN_TS1 BIT(10) /* Enable hardware timestamp 0. */ +#define TSAUXC_AUTT1 BIT(11) /* Auxiliary Timestamp Taken. */ +#define TSAUXC_PLSG BIT(17) /* Generate a pulse. */ +#define TSAUXC_DISABLE BIT(31) /* Disable SYSTIM Count Operation. */ /* SDP Configuration Bits */ -#define AUX0_SEL_SDP0 (0 << 0) /* Assign SDP0 to auxiliary time stamp 0. */ -#define AUX0_SEL_SDP1 (1 << 0) /* Assign SDP1 to auxiliary time stamp 0. */ -#define AUX0_SEL_SDP2 (2 << 0) /* Assign SDP2 to auxiliary time stamp 0. */ -#define AUX0_SEL_SDP3 (3 << 0) /* Assign SDP3 to auxiliary time stamp 0. */ -#define AUX0_TS_SDP_EN (1 << 2) /* Enable auxiliary time stamp trigger 0. */ -#define AUX1_SEL_SDP0 (0 << 3) /* Assign SDP0 to auxiliary time stamp 1. */ -#define AUX1_SEL_SDP1 (1 << 3) /* Assign SDP1 to auxiliary time stamp 1. */ -#define AUX1_SEL_SDP2 (2 << 3) /* Assign SDP2 to auxiliary time stamp 1. */ -#define AUX1_SEL_SDP3 (3 << 3) /* Assign SDP3 to auxiliary time stamp 1. */ -#define AUX1_TS_SDP_EN (1 << 5) /* Enable auxiliary time stamp trigger 1. */ -#define TS_SDP0_SEL_TT0 (0 << 6) /* Target time 0 is output on SDP0. */ -#define TS_SDP0_SEL_TT1 (1 << 6) /* Target time 1 is output on SDP0. */ -#define TS_SDP0_SEL_FC0 (2 << 6) /* Freq clock 0 is output on SDP0. */ -#define TS_SDP0_SEL_FC1 (3 << 6) /* Freq clock 1 is output on SDP0. */ -#define TS_SDP0_EN (1 << 8) /* SDP0 is assigned to Tsync. */ -#define TS_SDP1_SEL_TT0 (0 << 9) /* Target time 0 is output on SDP1. */ -#define TS_SDP1_SEL_TT1 (1 << 9) /* Target time 1 is output on SDP1. */ -#define TS_SDP1_SEL_FC0 (2 << 9) /* Freq clock 0 is output on SDP1. */ -#define TS_SDP1_SEL_FC1 (3 << 9) /* Freq clock 1 is output on SDP1. */ -#define TS_SDP1_EN (1 << 11) /* SDP1 is assigned to Tsync. */ -#define TS_SDP2_SEL_TT0 (0 << 12) /* Target time 0 is output on SDP2. */ -#define TS_SDP2_SEL_TT1 (1 << 12) /* Target time 1 is output on SDP2. */ -#define TS_SDP2_SEL_FC0 (2 << 12) /* Freq clock 0 is output on SDP2. */ -#define TS_SDP2_SEL_FC1 (3 << 12) /* Freq clock 1 is output on SDP2. */ -#define TS_SDP2_EN (1 << 14) /* SDP2 is assigned to Tsync. */ -#define TS_SDP3_SEL_TT0 (0 << 15) /* Target time 0 is output on SDP3. */ -#define TS_SDP3_SEL_TT1 (1 << 15) /* Target time 1 is output on SDP3. */ -#define TS_SDP3_SEL_FC0 (2 << 15) /* Freq clock 0 is output on SDP3. */ -#define TS_SDP3_SEL_FC1 (3 << 15) /* Freq clock 1 is output on SDP3. */ -#define TS_SDP3_EN (1 << 17) /* SDP3 is assigned to Tsync. */ +#define AUX0_SEL_SDP0 (0u << 0) /* Assign SDP0 to auxiliary time stamp 0. */ +#define AUX0_SEL_SDP1 (1u << 0) /* Assign SDP1 to auxiliary time stamp 0. */ +#define AUX0_SEL_SDP2 (2u << 0) /* Assign SDP2 to auxiliary time stamp 0. */ +#define AUX0_SEL_SDP3 (3u << 0) /* Assign SDP3 to auxiliary time stamp 0. */ +#define AUX0_TS_SDP_EN (1u << 2) /* Enable auxiliary time stamp trigger 0. */ +#define AUX1_SEL_SDP0 (0u << 3) /* Assign SDP0 to auxiliary time stamp 1. */ +#define AUX1_SEL_SDP1 (1u << 3) /* Assign SDP1 to auxiliary time stamp 1. */ +#define AUX1_SEL_SDP2 (2u << 3) /* Assign SDP2 to auxiliary time stamp 1. */ +#define AUX1_SEL_SDP3 (3u << 3) /* Assign SDP3 to auxiliary time stamp 1. */ +#define AUX1_TS_SDP_EN (1u << 5) /* Enable auxiliary time stamp trigger 1. */ +#define TS_SDP0_SEL_TT0 (0u << 6) /* Target time 0 is output on SDP0. */ +#define TS_SDP0_SEL_TT1 (1u << 6) /* Target time 1 is output on SDP0. */ +#define TS_SDP0_SEL_FC0 (2u << 6) /* Freq clock 0 is output on SDP0. */ +#define TS_SDP0_SEL_FC1 (3u << 6) /* Freq clock 1 is output on SDP0. */ +#define TS_SDP0_EN (1u << 8) /* SDP0 is assigned to Tsync. */ +#define TS_SDP1_SEL_TT0 (0u << 9) /* Target time 0 is output on SDP1. */ +#define TS_SDP1_SEL_TT1 (1u << 9) /* Target time 1 is output on SDP1. */ +#define TS_SDP1_SEL_FC0 (2u << 9) /* Freq clock 0 is output on SDP1. */ +#define TS_SDP1_SEL_FC1 (3u << 9) /* Freq clock 1 is output on SDP1. */ +#define TS_SDP1_EN (1u << 11) /* SDP1 is assigned to Tsync. */ +#define TS_SDP2_SEL_TT0 (0u << 12) /* Target time 0 is output on SDP2. */ +#define TS_SDP2_SEL_TT1 (1u << 12) /* Target time 1 is output on SDP2. */ +#define TS_SDP2_SEL_FC0 (2u << 12) /* Freq clock 0 is output on SDP2. */ +#define TS_SDP2_SEL_FC1 (3u << 12) /* Freq clock 1 is output on SDP2. */ +#define TS_SDP2_EN (1u << 14) /* SDP2 is assigned to Tsync. */ +#define TS_SDP3_SEL_TT0 (0u << 15) /* Target time 0 is output on SDP3. */ +#define TS_SDP3_SEL_TT1 (1u << 15) /* Target time 1 is output on SDP3. */ +#define TS_SDP3_SEL_FC0 (2u << 15) /* Freq clock 0 is output on SDP3. */ +#define TS_SDP3_SEL_FC1 (3u << 15) /* Freq clock 1 is output on SDP3. */ +#define TS_SDP3_EN (1u << 17) /* SDP3 is assigned to Tsync. */ #define E1000_MDICNFG_EXT_MDIO 0x80000000 /* MDI ext/int destination */ #define E1000_MDICNFG_COM_MDIO 0x40000000 /* MDI shared w/ lan 0 */ @@ -997,8 +997,8 @@ #define E1000_M88E1543_FIBER_CTRL 0x0 #define E1000_EEE_ADV_DEV_I354 7 #define E1000_EEE_ADV_ADDR_I354 60 -#define E1000_EEE_ADV_100_SUPPORTED (1 << 1) /* 100BaseTx EEE Supported */ -#define E1000_EEE_ADV_1000_SUPPORTED (1 << 2) /* 1000BaseT EEE Supported */ +#define E1000_EEE_ADV_100_SUPPORTED BIT(1) /* 100BaseTx EEE Supported */ +#define E1000_EEE_ADV_1000_SUPPORTED BIT(2) /* 1000BaseT EEE Supported */ #define E1000_PCS_STATUS_DEV_I354 3 #define E1000_PCS_STATUS_ADDR_I354 1 #define E1000_PCS_STATUS_TX_LPI_IND 0x0200 /* Tx in LPI state */ diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c index 07cf4fe58338..5010e2232c50 100644 --- a/drivers/net/ethernet/intel/igb/e1000_mac.c +++ b/drivers/net/ethernet/intel/igb/e1000_mac.c @@ -212,7 +212,7 @@ s32 igb_vfta_set(struct e1000_hw *hw, u32 vlan, u32 vind, * bits[4-0]: which bit in the register */ regidx = vlan / 32; - vfta_delta = 1 << (vlan % 32); + vfta_delta = BIT(vlan % 32); vfta = adapter->shadow_vfta[regidx]; /* vfta_delta represents the difference between the current value @@ -243,12 +243,12 @@ s32 igb_vfta_set(struct e1000_hw *hw, u32 vlan, u32 vind, bits = rd32(E1000_VLVF(vlvf_index)); /* set the pool bit */ - bits |= 1 << (E1000_VLVF_POOLSEL_SHIFT + vind); + bits |= BIT(E1000_VLVF_POOLSEL_SHIFT + vind); if (vlan_on) goto vlvf_update; /* clear the pool bit */ - bits ^= 1 << (E1000_VLVF_POOLSEL_SHIFT + vind); + bits ^= BIT(E1000_VLVF_POOLSEL_SHIFT + vind); if (!(bits & E1000_VLVF_POOLSEL_MASK)) { /* Clear VFTA first, then disable VLVF. Otherwise @@ -427,7 +427,7 @@ void igb_mta_set(struct e1000_hw *hw, u32 hash_value) mta = array_rd32(E1000_MTA, hash_reg); - mta |= (1 << hash_bit); + mta |= BIT(hash_bit); array_wr32(E1000_MTA, hash_reg, mta); wrfl(); @@ -527,7 +527,7 @@ void igb_update_mc_addr_list(struct e1000_hw *hw, hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1); hash_bit = hash_value & 0x1F; - hw->mac.mta_shadow[hash_reg] |= (1 << hash_bit); + hw->mac.mta_shadow[hash_reg] |= BIT(hash_bit); mc_addr_list += (ETH_ALEN); } diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.c b/drivers/net/ethernet/intel/igb/e1000_mbx.c index 10f5c9e016a9..00e263f0c030 100644 --- a/drivers/net/ethernet/intel/igb/e1000_mbx.c +++ b/drivers/net/ethernet/intel/igb/e1000_mbx.c @@ -302,9 +302,9 @@ static s32 igb_check_for_rst_pf(struct e1000_hw *hw, u16 vf_number) u32 vflre = rd32(E1000_VFLRE); s32 ret_val = -E1000_ERR_MBX; - if (vflre & (1 << vf_number)) { + if (vflre & BIT(vf_number)) { ret_val = 0; - wr32(E1000_VFLRE, (1 << vf_number)); + wr32(E1000_VFLRE, BIT(vf_number)); hw->mbx.stats.rsts++; } diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.c b/drivers/net/ethernet/intel/igb/e1000_nvm.c index e8280d0d7f02..3582c5cf8843 100644 --- a/drivers/net/ethernet/intel/igb/e1000_nvm.c +++ b/drivers/net/ethernet/intel/igb/e1000_nvm.c @@ -72,7 +72,7 @@ static void igb_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count) u32 eecd = rd32(E1000_EECD); u32 mask; - mask = 0x01 << (count - 1); + mask = 1u << (count - 1); if (nvm->type == e1000_nvm_eeprom_spi) eecd |= E1000_EECD_DO; diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.h b/drivers/net/ethernet/intel/igb/e1000_phy.h index 969a6ddafa3b..9b622b33bb5a 100644 --- a/drivers/net/ethernet/intel/igb/e1000_phy.h +++ b/drivers/net/ethernet/intel/igb/e1000_phy.h @@ -91,10 +91,10 @@ s32 igb_check_polarity_m88(struct e1000_hw *hw); #define I82580_ADDR_REG 16 #define I82580_CFG_REG 22 -#define I82580_CFG_ASSERT_CRS_ON_TX (1 << 15) -#define I82580_CFG_ENABLE_DOWNSHIFT (3 << 10) /* auto downshift 100/10 */ +#define I82580_CFG_ASSERT_CRS_ON_TX BIT(15) +#define I82580_CFG_ENABLE_DOWNSHIFT (3u << 10) /* auto downshift 100/10 */ #define I82580_CTRL_REG 23 -#define I82580_CTRL_DOWNSHIFT_MASK (7 << 10) +#define I82580_CTRL_DOWNSHIFT_MASK (7u << 10) /* 82580 specific PHY registers */ #define I82580_PHY_CTRL_2 18 diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h index 9413fa61392f..7a7bc31493a5 100644 --- a/drivers/net/ethernet/intel/igb/igb.h +++ b/drivers/net/ethernet/intel/igb/igb.h @@ -169,7 +169,7 @@ enum igb_tx_flags { * maintain a power of two alignment we have to limit ourselves to 32K. */ #define IGB_MAX_TXD_PWR 15 -#define IGB_MAX_DATA_PER_TXD (1 << IGB_MAX_TXD_PWR) +#define IGB_MAX_DATA_PER_TXD (1u << IGB_MAX_TXD_PWR) /* Tx Descriptors needed, worst case */ #define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IGB_MAX_DATA_PER_TXD) @@ -466,21 +466,21 @@ struct igb_adapter { u16 eee_advert; }; -#define IGB_FLAG_HAS_MSI (1 << 0) -#define IGB_FLAG_DCA_ENABLED (1 << 1) -#define IGB_FLAG_QUAD_PORT_A (1 << 2) -#define IGB_FLAG_QUEUE_PAIRS (1 << 3) -#define IGB_FLAG_DMAC (1 << 4) -#define IGB_FLAG_PTP (1 << 5) -#define IGB_FLAG_RSS_FIELD_IPV4_UDP (1 << 6) -#define IGB_FLAG_RSS_FIELD_IPV6_UDP (1 << 7) -#define IGB_FLAG_WOL_SUPPORTED (1 << 8) -#define IGB_FLAG_NEED_LINK_UPDATE (1 << 9) -#define IGB_FLAG_MEDIA_RESET (1 << 10) -#define IGB_FLAG_MAS_CAPABLE (1 << 11) -#define IGB_FLAG_MAS_ENABLE (1 << 12) -#define IGB_FLAG_HAS_MSIX (1 << 13) -#define IGB_FLAG_EEE (1 << 14) +#define IGB_FLAG_HAS_MSI BIT(0) +#define IGB_FLAG_DCA_ENABLED BIT(1) +#define IGB_FLAG_QUAD_PORT_A BIT(2) +#define IGB_FLAG_QUEUE_PAIRS BIT(3) +#define IGB_FLAG_DMAC BIT(4) +#define IGB_FLAG_PTP BIT(5) +#define IGB_FLAG_RSS_FIELD_IPV4_UDP BIT(6) +#define IGB_FLAG_RSS_FIELD_IPV6_UDP BIT(7) +#define IGB_FLAG_WOL_SUPPORTED BIT(8) +#define IGB_FLAG_NEED_LINK_UPDATE BIT(9) +#define IGB_FLAG_MEDIA_RESET BIT(10) +#define IGB_FLAG_MAS_CAPABLE BIT(11) +#define IGB_FLAG_MAS_ENABLE BIT(12) +#define IGB_FLAG_HAS_MSIX BIT(13) +#define IGB_FLAG_EEE BIT(14) #define IGB_FLAG_VLAN_PROMISC BIT(15) /* Media Auto Sense */ diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index bb4d6cdcd0b8..64e91c575a39 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -466,7 +466,7 @@ static void igb_get_regs(struct net_device *netdev, memset(p, 0, IGB_REGS_LEN * sizeof(u32)); - regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id; + regs->version = (1u << 24) | (hw->revision_id << 16) | hw->device_id; /* General Registers */ regs_buff[0] = rd32(E1000_CTRL); @@ -1448,7 +1448,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data) /* Test each interrupt */ for (; i < 31; i++) { /* Interrupt to test */ - mask = 1 << i; + mask = BIT(i); if (!(mask & ics_mask)) continue; @@ -2411,19 +2411,19 @@ static int igb_get_ts_info(struct net_device *dev, SOF_TIMESTAMPING_RAW_HARDWARE; info->tx_types = - (1 << HWTSTAMP_TX_OFF) | - (1 << HWTSTAMP_TX_ON); + BIT(HWTSTAMP_TX_OFF) | + BIT(HWTSTAMP_TX_ON); - info->rx_filters = 1 << HWTSTAMP_FILTER_NONE; + info->rx_filters = BIT(HWTSTAMP_FILTER_NONE); /* 82576 does not support timestamping all packets. */ if (adapter->hw.mac.type >= e1000_82580) - info->rx_filters |= 1 << HWTSTAMP_FILTER_ALL; + info->rx_filters |= BIT(HWTSTAMP_FILTER_ALL); else info->rx_filters |= - (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | - (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | - (1 << HWTSTAMP_FILTER_PTP_V2_EVENT); + BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | + BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | + BIT(HWTSTAMP_FILTER_PTP_V2_EVENT); return 0; default: diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 7460bdbe2e49..0191c5f9103a 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -836,7 +836,7 @@ static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector) igb_write_ivar(hw, msix_vector, tx_queue & 0x7, ((tx_queue & 0x8) << 1) + 8); - q_vector->eims_value = 1 << msix_vector; + q_vector->eims_value = BIT(msix_vector); break; case e1000_82580: case e1000_i350: @@ -857,7 +857,7 @@ static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector) igb_write_ivar(hw, msix_vector, tx_queue >> 1, ((tx_queue & 0x1) << 4) + 8); - q_vector->eims_value = 1 << msix_vector; + q_vector->eims_value = BIT(msix_vector); break; default: BUG(); @@ -919,7 +919,7 @@ static void igb_configure_msix(struct igb_adapter *adapter) E1000_GPIE_NSICR); /* enable msix_other interrupt */ - adapter->eims_other = 1 << vector; + adapter->eims_other = BIT(vector); tmp = (vector++ | E1000_IVAR_VALID) << 8; wr32(E1000_IVAR_MISC, tmp); @@ -4064,7 +4064,7 @@ static int igb_vlan_promisc_enable(struct igb_adapter *adapter) for (i = E1000_VLVF_ARRAY_SIZE; --i;) { u32 vlvf = rd32(E1000_VLVF(i)); - vlvf |= 1 << pf_id; + vlvf |= BIT(pf_id); wr32(E1000_VLVF(i), vlvf); } @@ -4091,7 +4091,7 @@ static void igb_scrub_vfta(struct igb_adapter *adapter, u32 vfta_offset) /* guarantee that we don't scrub out management VLAN */ vid = adapter->mng_vlan_id; if (vid >= vid_start && vid < vid_end) - vfta[(vid - vid_start) / 32] |= 1 << (vid % 32); + vfta[(vid - vid_start) / 32] |= BIT(vid % 32); if (!adapter->vfs_allocated_count) goto set_vfta; @@ -4110,7 +4110,7 @@ static void igb_scrub_vfta(struct igb_adapter *adapter, u32 vfta_offset) if (vlvf & E1000_VLVF_VLANID_ENABLE) { /* record VLAN ID in VFTA */ - vfta[(vid - vid_start) / 32] |= 1 << (vid % 32); + vfta[(vid - vid_start) / 32] |= BIT(vid % 32); /* if PF is part of this then continue */ if (test_bit(vid, adapter->active_vlans)) @@ -4118,7 +4118,7 @@ static void igb_scrub_vfta(struct igb_adapter *adapter, u32 vfta_offset) } /* remove PF from the pool */ - bits = ~(1 << pf_id); + bits = ~BIT(pf_id); bits &= rd32(E1000_VLVF(i)); wr32(E1000_VLVF(i), bits); } @@ -4276,13 +4276,13 @@ static void igb_spoof_check(struct igb_adapter *adapter) return; for (j = 0; j < adapter->vfs_allocated_count; j++) { - if (adapter->wvbr & (1 << j) || - adapter->wvbr & (1 << (j + IGB_STAGGERED_QUEUE_OFFSET))) { + if (adapter->wvbr & BIT(j) || + adapter->wvbr & BIT(j + IGB_STAGGERED_QUEUE_OFFSET)) { dev_warn(&adapter->pdev->dev, "Spoof event(s) detected on VF %d\n", j); adapter->wvbr &= - ~((1 << j) | - (1 << (j + IGB_STAGGERED_QUEUE_OFFSET))); + ~(BIT(j) | + BIT(j + IGB_STAGGERED_QUEUE_OFFSET)); } } } @@ -5963,11 +5963,11 @@ static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf) /* create mask for VF and other pools */ pool_mask = E1000_VLVF_POOLSEL_MASK; - vlvf_mask = 1 << (E1000_VLVF_POOLSEL_SHIFT + vf); + vlvf_mask = BIT(E1000_VLVF_POOLSEL_SHIFT + vf); /* drop PF from pool bits */ - pool_mask &= ~(1 << (E1000_VLVF_POOLSEL_SHIFT + - adapter->vfs_allocated_count)); + pool_mask &= ~BIT(E1000_VLVF_POOLSEL_SHIFT + + adapter->vfs_allocated_count); /* Find the vlan filter for this id */ for (i = E1000_VLVF_ARRAY_SIZE; i--;) { @@ -5990,7 +5990,7 @@ static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf) goto update_vlvf; vid = vlvf & E1000_VLVF_VLANID_MASK; - vfta_mask = 1 << (vid % 32); + vfta_mask = BIT(vid % 32); /* clear bit from VFTA */ vfta = adapter->shadow_vfta[vid / 32]; @@ -6041,13 +6041,13 @@ void igb_update_pf_vlvf(struct igb_adapter *adapter, u32 vid) * entry other than the PF. */ pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT; - bits = ~(1 << pf_id) & E1000_VLVF_POOLSEL_MASK; + bits = ~BIT(pf_id) & E1000_VLVF_POOLSEL_MASK; bits &= rd32(E1000_VLVF(idx)); /* Disable the filter so this falls into the default pool. */ if (!bits) { if (adapter->flags & IGB_FLAG_VLAN_PROMISC) - wr32(E1000_VLVF(idx), 1 << pf_id); + wr32(E1000_VLVF(idx), BIT(pf_id)); else wr32(E1000_VLVF(idx), 0); } @@ -6231,9 +6231,9 @@ static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf) /* enable transmit and receive for vf */ reg = rd32(E1000_VFTE); - wr32(E1000_VFTE, reg | (1 << vf)); + wr32(E1000_VFTE, reg | BIT(vf)); reg = rd32(E1000_VFRE); - wr32(E1000_VFRE, reg | (1 << vf)); + wr32(E1000_VFRE, reg | BIT(vf)); adapter->vf_data[vf].flags |= IGB_VF_FLAG_CTS; @@ -7927,7 +7927,7 @@ static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate, /* Calculate the rate factor values to set */ rf_int = link_speed / tx_rate; rf_dec = (link_speed - (rf_int * tx_rate)); - rf_dec = (rf_dec * (1 << E1000_RTTBCNRC_RF_INT_SHIFT)) / + rf_dec = (rf_dec * BIT(E1000_RTTBCNRC_RF_INT_SHIFT)) / tx_rate; bcnrc_val = E1000_RTTBCNRC_RS_ENA; @@ -8017,11 +8017,11 @@ static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, reg_offset = (hw->mac.type == e1000_82576) ? E1000_DTXSWC : E1000_TXSWC; reg_val = rd32(reg_offset); if (setting) - reg_val |= ((1 << vf) | - (1 << (vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT))); + reg_val |= (BIT(vf) | + BIT(vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT)); else - reg_val &= ~((1 << vf) | - (1 << (vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT))); + reg_val &= ~(BIT(vf) | + BIT(vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT)); wr32(reg_offset, reg_val); adapter->vf_data[vf].spoofchk_enabled = setting; diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c index 22a8a29895b4..fdb6dfd3ab77 100644 --- a/drivers/net/ethernet/intel/igb/igb_ptp.c +++ b/drivers/net/ethernet/intel/igb/igb_ptp.c @@ -69,9 +69,9 @@ #define IGB_SYSTIM_OVERFLOW_PERIOD (HZ * 60 * 9) #define IGB_PTP_TX_TIMEOUT (HZ * 15) -#define INCPERIOD_82576 (1 << E1000_TIMINCA_16NS_SHIFT) -#define INCVALUE_82576_MASK ((1 << E1000_TIMINCA_16NS_SHIFT) - 1) -#define INCVALUE_82576 (16 << IGB_82576_TSYNC_SHIFT) +#define INCPERIOD_82576 BIT(E1000_TIMINCA_16NS_SHIFT) +#define INCVALUE_82576_MASK GENMASK(E1000_TIMINCA_16NS_SHIFT - 1, 0) +#define INCVALUE_82576 (16u << IGB_82576_TSYNC_SHIFT) #define IGB_NBITS_82580 40 static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter); From 8008f68cb805c0099723ba1b58d26257a52ce890 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Wed, 13 Apr 2016 16:08:29 -0700 Subject: [PATCH 1533/1649] igb: make igb_update_pf_vlvf static Signed-off-by: Jacob Keller Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igb/igb_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 0191c5f9103a..cab306934462 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -6027,7 +6027,7 @@ static int igb_find_vlvf_entry(struct e1000_hw *hw, u32 vlan) return idx; } -void igb_update_pf_vlvf(struct igb_adapter *adapter, u32 vid) +static void igb_update_pf_vlvf(struct igb_adapter *adapter, u32 vid) { struct e1000_hw *hw = &adapter->hw; u32 bits, pf_id; From fb5277f2c2e4db4a29740ff071072a688892d2df Mon Sep 17 00:00:00 2001 From: Denys Vlasenko Date: Wed, 20 Apr 2016 17:45:54 +0200 Subject: [PATCH 1534/1649] e1000e: e1000e_cyclecounter_read(): incvalue is 32 bits, not 64 "incvalue" variable holds a result of "er32(TIMINCA) & E1000_TIMINCA_INCVALUE_MASK" and used in "do_div(temp, incvalue)" as a divisor. Thus, "u64 incvalue" declaration is probably a mistake. Even though it seems to be a harmless one, let's fix it. Signed-off-by: Denys Vlasenko Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/e1000e/netdev.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 671256d32525..4969f647db88 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -4300,7 +4300,8 @@ static cycle_t e1000e_cyclecounter_read(const struct cyclecounter *cc) } if ((hw->mac.type == e1000_82574) || (hw->mac.type == e1000_82583)) { - u64 incvalue, time_delta, rem, temp; + u64 time_delta, rem, temp; + u32 incvalue; int i; /* errata for 82574/82583 possible bad bits read from SYSTIMH/L From a07fd74d5ea9c45a5c6e41f7cb4b997cf40d50f3 Mon Sep 17 00:00:00 2001 From: Denys Vlasenko Date: Wed, 20 Apr 2016 17:45:55 +0200 Subject: [PATCH 1535/1649] e1000e: e1000e_cyclecounter_read(): fix er32(SYSTIML) overflow check If two consecutive reads of the counter are the same, it is also not an overflow. "systimel_1 < systimel_2" should be "systimel_1 <= systimel_2". Before the patch, we could perform an *erroneous* correction: Let's say that systimel_1 == systimel_2 == 0xffffffff. "systimel_1 < systimel_2" is false, we think it's an overflow, we read "systimeh = er32(SYSTIMH)" which meanwhile had incremented, and use "(systimeh << 32) + systimel_2" value which is 2^32 too large. Signed-off-by: Denys Vlasenko CC: intel-wired-lan@lists.osuosl.org Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/e1000e/netdev.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 4969f647db88..02c64bcda71d 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -4287,7 +4287,7 @@ static cycle_t e1000e_cyclecounter_read(const struct cyclecounter *cc) systimeh = er32(SYSTIMH); systimel_2 = er32(SYSTIML); /* Check for overflow. If there was no overflow, use the values */ - if (systimel_1 < systimel_2) { + if (systimel_1 <= systimel_2) { systim = (cycle_t)systimel_1; systim |= (cycle_t)systimeh << 32; } else { From ab507c9a54ce3580e6a3829c9f4c24a13c32cbac Mon Sep 17 00:00:00 2001 From: Denys Vlasenko Date: Wed, 20 Apr 2016 17:45:56 +0200 Subject: [PATCH 1536/1649] e1000e: e1000e_cyclecounter_read(): do overflow check only if needed SYSTIMH:SYSTIML registers are incremented by 24-bit value TIMINCA[23..0] er32(SYSTIML) are probably moderately expensive (they are pci bus reads). Can we avoid one of them? Yes, we can. If the SYSTIML value we see is smaller than 0xff000000, the overflow into SYSTIMH would require at least two increments. We do two reads, er32(SYSTIML) and er32(SYSTIMH), in this order. Even if one increment happens between them, the overflow into SYSTIMH is impossible, and we can avoid doing another er32(SYSTIML) read and overflow check. Signed-off-by: Denys Vlasenko Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/e1000e/netdev.c | 28 +++++++++++----------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 02c64bcda71d..0d3c00deb160 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -4275,7 +4275,7 @@ static cycle_t e1000e_cyclecounter_read(const struct cyclecounter *cc) struct e1000_adapter *adapter = container_of(cc, struct e1000_adapter, cc); struct e1000_hw *hw = &adapter->hw; - u32 systimel_1, systimel_2, systimeh; + u32 systimel, systimeh; cycle_t systim, systim_next; /* SYSTIMH latching upon SYSTIML read does not work well. * This means that if SYSTIML overflows after we read it but before @@ -4283,21 +4283,21 @@ static cycle_t e1000e_cyclecounter_read(const struct cyclecounter *cc) * will experience a huge non linear increment in the systime value * to fix that we test for overflow and if true, we re-read systime. */ - systimel_1 = er32(SYSTIML); + systimel = er32(SYSTIML); systimeh = er32(SYSTIMH); - systimel_2 = er32(SYSTIML); - /* Check for overflow. If there was no overflow, use the values */ - if (systimel_1 <= systimel_2) { - systim = (cycle_t)systimel_1; - systim |= (cycle_t)systimeh << 32; - } else { - /* There was an overflow, read again SYSTIMH, and use - * systimel_2 - */ - systimeh = er32(SYSTIMH); - systim = (cycle_t)systimel_2; - systim |= (cycle_t)systimeh << 32; + /* Is systimel is so large that overflow is possible? */ + if (systimel >= (u32)0xffffffff - E1000_TIMINCA_INCVALUE_MASK) { + u32 systimel_2 = er32(SYSTIML); + if (systimel > systimel_2) { + /* There was an overflow, read again SYSTIMH, and use + * systimel_2 + */ + systimeh = er32(SYSTIMH); + systimel = systimel_2; + } } + systim = (cycle_t)systimel; + systim |= (cycle_t)systimeh << 32; if ((hw->mac.type == e1000_82574) || (hw->mac.type == e1000_82583)) { u64 time_delta, rem, temp; From 3f544d2a4d5c2d817cfee9e6302fc2909aaef155 Mon Sep 17 00:00:00 2001 From: Nathan Sullivan Date: Tue, 3 May 2016 18:10:56 -0500 Subject: [PATCH 1537/1649] igb: adjust PTP timestamps for Tx/Rx latency Table 7-62 on page 338 of the i210 datasheet lists TX and RX latencies for the various speeds the chip supports. To give better PTP timestamp accuracy, adjust the timestamps by the amounts Intel gives based on current link speed. Signed-off-by: Nathan Sullivan Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igb/igb.h | 8 ++++++ drivers/net/ethernet/intel/igb/igb_ptp.c | 36 ++++++++++++++++++++++++ 2 files changed, 44 insertions(+) diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h index 7a7bc31493a5..b9609afa5ca3 100644 --- a/drivers/net/ethernet/intel/igb/igb.h +++ b/drivers/net/ethernet/intel/igb/igb.h @@ -91,6 +91,14 @@ struct igb_adapter; #define NVM_COMB_VER_OFF 0x0083 #define NVM_COMB_VER_PTR 0x003d +/* Transmit and receive latency (for PTP timestamps) */ +#define IGB_I210_TX_LATENCY_10 9542 +#define IGB_I210_TX_LATENCY_100 1024 +#define IGB_I210_TX_LATENCY_1000 178 +#define IGB_I210_RX_LATENCY_10 20662 +#define IGB_I210_RX_LATENCY_100 2213 +#define IGB_I210_RX_LATENCY_1000 448 + struct vf_data_storage { unsigned char vf_mac_addresses[ETH_ALEN]; u16 vf_mc_hashes[IGB_MAX_VF_MC_ENTRIES]; diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c index fdb6dfd3ab77..f097c5a8ab93 100644 --- a/drivers/net/ethernet/intel/igb/igb_ptp.c +++ b/drivers/net/ethernet/intel/igb/igb_ptp.c @@ -722,11 +722,29 @@ static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter) struct e1000_hw *hw = &adapter->hw; struct skb_shared_hwtstamps shhwtstamps; u64 regval; + int adjust = 0; regval = rd32(E1000_TXSTMPL); regval |= (u64)rd32(E1000_TXSTMPH) << 32; igb_ptp_systim_to_hwtstamp(adapter, &shhwtstamps, regval); + /* adjust timestamp for the TX latency based on link speed */ + if (adapter->hw.mac.type == e1000_i210) { + switch (adapter->link_speed) { + case SPEED_10: + adjust = IGB_I210_TX_LATENCY_10; + break; + case SPEED_100: + adjust = IGB_I210_TX_LATENCY_100; + break; + case SPEED_1000: + adjust = IGB_I210_TX_LATENCY_1000; + break; + } + } + + shhwtstamps.hwtstamp = ktime_sub_ns(shhwtstamps.hwtstamp, adjust); + skb_tstamp_tx(adapter->ptp_tx_skb, &shhwtstamps); dev_kfree_skb_any(adapter->ptp_tx_skb); adapter->ptp_tx_skb = NULL; @@ -771,6 +789,7 @@ void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct igb_adapter *adapter = q_vector->adapter; struct e1000_hw *hw = &adapter->hw; u64 regval; + int adjust = 0; /* If this bit is set, then the RX registers contain the time stamp. No * other packet will be time stamped until we read these registers, so @@ -790,6 +809,23 @@ void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, igb_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval); + /* adjust timestamp for the RX latency based on link speed */ + if (adapter->hw.mac.type == e1000_i210) { + switch (adapter->link_speed) { + case SPEED_10: + adjust = IGB_I210_RX_LATENCY_10; + break; + case SPEED_100: + adjust = IGB_I210_RX_LATENCY_100; + break; + case SPEED_1000: + adjust = IGB_I210_RX_LATENCY_1000; + break; + } + } + skb_hwtstamps(skb)->hwtstamp = + ktime_add_ns(skb_hwtstamps(skb)->hwtstamp, adjust); + /* Update the last_rx_timestamp timer in order to enable watchdog check * for error case of latched timestamp on a dropped packet. */ From 12b28b41084aa61970fecb417c66c88dcce6afed Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Wed, 13 Apr 2016 16:08:30 -0700 Subject: [PATCH 1538/1649] igbvf: remove unused variable and dead code The variable rdlen is set but never used, and thus setting it is dead code. Remove it. Signed-off-by: Jacob Keller Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igbvf/netdev.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c index c12442252adb..78af4c7716d3 100644 --- a/drivers/net/ethernet/intel/igbvf/netdev.c +++ b/drivers/net/ethernet/intel/igbvf/netdev.c @@ -1367,7 +1367,7 @@ static void igbvf_configure_rx(struct igbvf_adapter *adapter) struct e1000_hw *hw = &adapter->hw; struct igbvf_ring *rx_ring = adapter->rx_ring; u64 rdba; - u32 rdlen, rxdctl; + u32 rxdctl; /* disable receives */ rxdctl = er32(RXDCTL(0)); @@ -1375,8 +1375,6 @@ static void igbvf_configure_rx(struct igbvf_adapter *adapter) e1e_flush(); msleep(10); - rdlen = rx_ring->count * sizeof(union e1000_adv_rx_desc); - /* Setup the HW Rx Head and Tail Descriptor Pointers and * the Base and Length of the Rx Descriptor Ring */ From 0ed2dbf4f469e2e286d903ebc091edfd9be4d063 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Wed, 13 Apr 2016 16:08:31 -0700 Subject: [PATCH 1539/1649] igbvf: use BIT() macro instead of shifts To prevent signed bitshift issues, and improve code readability, use the BIT() macro. Also make use of GENMASK or the unsigned postfix where this is more appropriate than BIT() Signed-off-by: Jacob Keller Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igbvf/defines.h | 2 +- drivers/net/ethernet/intel/igbvf/ethtool.c | 3 ++- drivers/net/ethernet/intel/igbvf/igbvf.h | 4 ++-- drivers/net/ethernet/intel/igbvf/netdev.c | 10 +++++----- drivers/net/ethernet/intel/igbvf/vf.c | 2 +- 5 files changed, 11 insertions(+), 10 deletions(-) diff --git a/drivers/net/ethernet/intel/igbvf/defines.h b/drivers/net/ethernet/intel/igbvf/defines.h index ae3f28332fa0..ee1ef08d7fc4 100644 --- a/drivers/net/ethernet/intel/igbvf/defines.h +++ b/drivers/net/ethernet/intel/igbvf/defines.h @@ -113,7 +113,7 @@ #define E1000_RXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Rx Que */ /* Direct Cache Access (DCA) definitions */ -#define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */ +#define E1000_DCA_TXCTRL_TX_WB_RO_EN BIT(11) /* Tx Desc writeback RO bit */ #define E1000_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */ diff --git a/drivers/net/ethernet/intel/igbvf/ethtool.c b/drivers/net/ethernet/intel/igbvf/ethtool.c index b74ce53d7b52..8dea1b1367ef 100644 --- a/drivers/net/ethernet/intel/igbvf/ethtool.c +++ b/drivers/net/ethernet/intel/igbvf/ethtool.c @@ -154,7 +154,8 @@ static void igbvf_get_regs(struct net_device *netdev, memset(p, 0, IGBVF_REGS_LEN * sizeof(u32)); - regs->version = (1 << 24) | (adapter->pdev->revision << 16) | + regs->version = (1u << 24) | + (adapter->pdev->revision << 16) | adapter->pdev->device; regs_buff[0] = er32(CTRL); diff --git a/drivers/net/ethernet/intel/igbvf/igbvf.h b/drivers/net/ethernet/intel/igbvf/igbvf.h index f166baab8d7e..6f4290d6dc9f 100644 --- a/drivers/net/ethernet/intel/igbvf/igbvf.h +++ b/drivers/net/ethernet/intel/igbvf/igbvf.h @@ -287,8 +287,8 @@ struct igbvf_info { }; /* hardware capability, feature, and workaround flags */ -#define IGBVF_FLAG_RX_CSUM_DISABLED (1 << 0) -#define IGBVF_FLAG_RX_LB_VLAN_BSWAP (1 << 1) +#define IGBVF_FLAG_RX_CSUM_DISABLED BIT(0) +#define IGBVF_FLAG_RX_LB_VLAN_BSWAP BIT(1) #define IGBVF_RX_DESC_ADV(R, i) \ (&((((R).desc))[i].rx_desc)) #define IGBVF_TX_DESC_ADV(R, i) \ diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c index 78af4c7716d3..57894a80c469 100644 --- a/drivers/net/ethernet/intel/igbvf/netdev.c +++ b/drivers/net/ethernet/intel/igbvf/netdev.c @@ -964,7 +964,7 @@ static void igbvf_assign_vector(struct igbvf_adapter *adapter, int rx_queue, ivar = ivar & 0xFFFFFF00; ivar |= msix_vector | E1000_IVAR_VALID; } - adapter->rx_ring[rx_queue].eims_value = 1 << msix_vector; + adapter->rx_ring[rx_queue].eims_value = BIT(msix_vector); array_ew32(IVAR0, index, ivar); } if (tx_queue > IGBVF_NO_QUEUE) { @@ -979,7 +979,7 @@ static void igbvf_assign_vector(struct igbvf_adapter *adapter, int rx_queue, ivar = ivar & 0xFFFF00FF; ivar |= (msix_vector | E1000_IVAR_VALID) << 8; } - adapter->tx_ring[tx_queue].eims_value = 1 << msix_vector; + adapter->tx_ring[tx_queue].eims_value = BIT(msix_vector); array_ew32(IVAR0, index, ivar); } } @@ -1014,8 +1014,8 @@ static void igbvf_configure_msix(struct igbvf_adapter *adapter) ew32(IVAR_MISC, tmp); - adapter->eims_enable_mask = (1 << (vector)) - 1; - adapter->eims_other = 1 << (vector - 1); + adapter->eims_enable_mask = GENMASK(vector - 1, 0); + adapter->eims_other = BIT(vector - 1); e1e_flush(); } @@ -2089,7 +2089,7 @@ static int igbvf_maybe_stop_tx(struct net_device *netdev, int size) } #define IGBVF_MAX_TXD_PWR 16 -#define IGBVF_MAX_DATA_PER_TXD (1 << IGBVF_MAX_TXD_PWR) +#define IGBVF_MAX_DATA_PER_TXD (1u << IGBVF_MAX_TXD_PWR) static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter, struct igbvf_ring *tx_ring, diff --git a/drivers/net/ethernet/intel/igbvf/vf.c b/drivers/net/ethernet/intel/igbvf/vf.c index a13baa90ae20..335ba6642145 100644 --- a/drivers/net/ethernet/intel/igbvf/vf.c +++ b/drivers/net/ethernet/intel/igbvf/vf.c @@ -266,7 +266,7 @@ static s32 e1000_set_vfta_vf(struct e1000_hw *hw, u16 vid, bool set) msgbuf[1] = vid; /* Setting the 8 bit field MSG INFO to true indicates "add" */ if (set) - msgbuf[0] |= 1 << E1000_VT_MSGINFO_SHIFT; + msgbuf[0] |= BIT(E1000_VT_MSGINFO_SHIFT); mbx->ops.write_posted(hw, msgbuf, 2); From 18dd23920703891c39c7965873f8ae369bd3a237 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Wed, 13 Apr 2016 16:08:32 -0700 Subject: [PATCH 1540/1649] e1000e: use BIT() macro for bit defines This prevents signed bitshift issues when the shift would overwrite the signed bit, and prevents making this mistake in the future when copying and modifying code. Use GENMASK or the unsigned postfix for cases which aren't suitable for BIT() macro. Signed-off-by: Jacob Keller Signed-off-by: Jeff Kirsher --- .../net/ethernet/intel/e1000e/80003es2lan.c | 12 +- drivers/net/ethernet/intel/e1000e/82571.c | 30 ++--- drivers/net/ethernet/intel/e1000e/e1000.h | 106 +++++++++--------- drivers/net/ethernet/intel/e1000e/ethtool.c | 43 +++---- drivers/net/ethernet/intel/e1000e/ich8lan.c | 44 ++++---- drivers/net/ethernet/intel/e1000e/mac.c | 2 +- drivers/net/ethernet/intel/e1000e/netdev.c | 28 ++--- drivers/net/ethernet/intel/e1000e/nvm.c | 2 +- drivers/net/ethernet/intel/e1000e/phy.c | 4 +- drivers/net/ethernet/intel/e1000e/phy.h | 10 +- 10 files changed, 141 insertions(+), 140 deletions(-) diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c index 2af603f3e418..cd391376036c 100644 --- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c +++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c @@ -121,7 +121,7 @@ static s32 e1000_init_nvm_params_80003es2lan(struct e1000_hw *hw) /* EEPROM access above 16k is unsupported */ if (size > 14) size = 14; - nvm->word_size = 1 << size; + nvm->word_size = BIT(size); return 0; } @@ -845,27 +845,27 @@ static void e1000_initialize_hw_bits_80003es2lan(struct e1000_hw *hw) /* Transmit Descriptor Control 0 */ reg = er32(TXDCTL(0)); - reg |= (1 << 22); + reg |= BIT(22); ew32(TXDCTL(0), reg); /* Transmit Descriptor Control 1 */ reg = er32(TXDCTL(1)); - reg |= (1 << 22); + reg |= BIT(22); ew32(TXDCTL(1), reg); /* Transmit Arbitration Control 0 */ reg = er32(TARC(0)); reg &= ~(0xF << 27); /* 30:27 */ if (hw->phy.media_type != e1000_media_type_copper) - reg &= ~(1 << 20); + reg &= ~BIT(20); ew32(TARC(0), reg); /* Transmit Arbitration Control 1 */ reg = er32(TARC(1)); if (er32(TCTL) & E1000_TCTL_MULR) - reg &= ~(1 << 28); + reg &= ~BIT(28); else - reg |= (1 << 28); + reg |= BIT(28); ew32(TARC(1), reg); /* Disable IPv6 extension header parsing because some malformed diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c index 5f7016442ec4..7fd4d54599e4 100644 --- a/drivers/net/ethernet/intel/e1000e/82571.c +++ b/drivers/net/ethernet/intel/e1000e/82571.c @@ -185,7 +185,7 @@ static s32 e1000_init_nvm_params_82571(struct e1000_hw *hw) /* EEPROM access above 16k is unsupported */ if (size > 14) size = 14; - nvm->word_size = 1 << size; + nvm->word_size = BIT(size); break; } @@ -1163,12 +1163,12 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw) /* Transmit Descriptor Control 0 */ reg = er32(TXDCTL(0)); - reg |= (1 << 22); + reg |= BIT(22); ew32(TXDCTL(0), reg); /* Transmit Descriptor Control 1 */ reg = er32(TXDCTL(1)); - reg |= (1 << 22); + reg |= BIT(22); ew32(TXDCTL(1), reg); /* Transmit Arbitration Control 0 */ @@ -1177,11 +1177,11 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw) switch (hw->mac.type) { case e1000_82571: case e1000_82572: - reg |= (1 << 23) | (1 << 24) | (1 << 25) | (1 << 26); + reg |= BIT(23) | BIT(24) | BIT(25) | BIT(26); break; case e1000_82574: case e1000_82583: - reg |= (1 << 26); + reg |= BIT(26); break; default: break; @@ -1193,12 +1193,12 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw) switch (hw->mac.type) { case e1000_82571: case e1000_82572: - reg &= ~((1 << 29) | (1 << 30)); - reg |= (1 << 22) | (1 << 24) | (1 << 25) | (1 << 26); + reg &= ~(BIT(29) | BIT(30)); + reg |= BIT(22) | BIT(24) | BIT(25) | BIT(26); if (er32(TCTL) & E1000_TCTL_MULR) - reg &= ~(1 << 28); + reg &= ~BIT(28); else - reg |= (1 << 28); + reg |= BIT(28); ew32(TARC(1), reg); break; default: @@ -1211,7 +1211,7 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw) case e1000_82574: case e1000_82583: reg = er32(CTRL); - reg &= ~(1 << 29); + reg &= ~BIT(29); ew32(CTRL, reg); break; default: @@ -1224,8 +1224,8 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw) case e1000_82574: case e1000_82583: reg = er32(CTRL_EXT); - reg &= ~(1 << 23); - reg |= (1 << 22); + reg &= ~BIT(23); + reg |= BIT(22); ew32(CTRL_EXT, reg); break; default: @@ -1261,7 +1261,7 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw) case e1000_82574: case e1000_82583: reg = er32(GCR); - reg |= (1 << 22); + reg |= BIT(22); ew32(GCR, reg); /* Workaround for hardware errata. @@ -1308,8 +1308,8 @@ static void e1000_clear_vfta_82571(struct e1000_hw *hw) E1000_VFTA_ENTRY_SHIFT) & E1000_VFTA_ENTRY_MASK; vfta_bit_in_reg = - 1 << (hw->mng_cookie.vlan_id & - E1000_VFTA_ENTRY_BIT_SHIFT_MASK); + BIT(hw->mng_cookie.vlan_id & + E1000_VFTA_ENTRY_BIT_SHIFT_MASK); } break; default: diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h index 52eb641fc9dc..010e6d61c855 100644 --- a/drivers/net/ethernet/intel/e1000e/e1000.h +++ b/drivers/net/ethernet/intel/e1000e/e1000.h @@ -109,18 +109,18 @@ struct e1000_info; #define E1000_TXDCTL_DMA_BURST_ENABLE \ (E1000_TXDCTL_GRAN | /* set descriptor granularity */ \ E1000_TXDCTL_COUNT_DESC | \ - (1 << 16) | /* wthresh must be +1 more than desired */\ - (1 << 8) | /* hthresh */ \ - 0x1f) /* pthresh */ + (1u << 16) | /* wthresh must be +1 more than desired */\ + (1u << 8) | /* hthresh */ \ + 0x1f) /* pthresh */ #define E1000_RXDCTL_DMA_BURST_ENABLE \ (0x01000000 | /* set descriptor granularity */ \ - (4 << 16) | /* set writeback threshold */ \ - (4 << 8) | /* set prefetch threshold */ \ + (4u << 16) | /* set writeback threshold */ \ + (4u << 8) | /* set prefetch threshold */ \ 0x20) /* set hthresh */ -#define E1000_TIDV_FPD (1 << 31) -#define E1000_RDTR_FPD (1 << 31) +#define E1000_TIDV_FPD BIT(31) +#define E1000_RDTR_FPD BIT(31) enum e1000_boards { board_82571, @@ -404,53 +404,53 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca); #define E1000_82574_SYSTIM_EPSILON (1ULL << 35ULL) /* hardware capability, feature, and workaround flags */ -#define FLAG_HAS_AMT (1 << 0) -#define FLAG_HAS_FLASH (1 << 1) -#define FLAG_HAS_HW_VLAN_FILTER (1 << 2) -#define FLAG_HAS_WOL (1 << 3) -/* reserved bit4 */ -#define FLAG_HAS_CTRLEXT_ON_LOAD (1 << 5) -#define FLAG_HAS_SWSM_ON_LOAD (1 << 6) -#define FLAG_HAS_JUMBO_FRAMES (1 << 7) -#define FLAG_READ_ONLY_NVM (1 << 8) -#define FLAG_IS_ICH (1 << 9) -#define FLAG_HAS_MSIX (1 << 10) -#define FLAG_HAS_SMART_POWER_DOWN (1 << 11) -#define FLAG_IS_QUAD_PORT_A (1 << 12) -#define FLAG_IS_QUAD_PORT (1 << 13) -#define FLAG_HAS_HW_TIMESTAMP (1 << 14) -#define FLAG_APME_IN_WUC (1 << 15) -#define FLAG_APME_IN_CTRL3 (1 << 16) -#define FLAG_APME_CHECK_PORT_B (1 << 17) -#define FLAG_DISABLE_FC_PAUSE_TIME (1 << 18) -#define FLAG_NO_WAKE_UCAST (1 << 19) -#define FLAG_MNG_PT_ENABLED (1 << 20) -#define FLAG_RESET_OVERWRITES_LAA (1 << 21) -#define FLAG_TARC_SPEED_MODE_BIT (1 << 22) -#define FLAG_TARC_SET_BIT_ZERO (1 << 23) -#define FLAG_RX_NEEDS_RESTART (1 << 24) -#define FLAG_LSC_GIG_SPEED_DROP (1 << 25) -#define FLAG_SMART_POWER_DOWN (1 << 26) -#define FLAG_MSI_ENABLED (1 << 27) -/* reserved (1 << 28) */ -#define FLAG_TSO_FORCE (1 << 29) -#define FLAG_RESTART_NOW (1 << 30) -#define FLAG_MSI_TEST_FAILED (1 << 31) +#define FLAG_HAS_AMT BIT(0) +#define FLAG_HAS_FLASH BIT(1) +#define FLAG_HAS_HW_VLAN_FILTER BIT(2) +#define FLAG_HAS_WOL BIT(3) +/* reserved BIT(4) */ +#define FLAG_HAS_CTRLEXT_ON_LOAD BIT(5) +#define FLAG_HAS_SWSM_ON_LOAD BIT(6) +#define FLAG_HAS_JUMBO_FRAMES BIT(7) +#define FLAG_READ_ONLY_NVM BIT(8) +#define FLAG_IS_ICH BIT(9) +#define FLAG_HAS_MSIX BIT(10) +#define FLAG_HAS_SMART_POWER_DOWN BIT(11) +#define FLAG_IS_QUAD_PORT_A BIT(12) +#define FLAG_IS_QUAD_PORT BIT(13) +#define FLAG_HAS_HW_TIMESTAMP BIT(14) +#define FLAG_APME_IN_WUC BIT(15) +#define FLAG_APME_IN_CTRL3 BIT(16) +#define FLAG_APME_CHECK_PORT_B BIT(17) +#define FLAG_DISABLE_FC_PAUSE_TIME BIT(18) +#define FLAG_NO_WAKE_UCAST BIT(19) +#define FLAG_MNG_PT_ENABLED BIT(20) +#define FLAG_RESET_OVERWRITES_LAA BIT(21) +#define FLAG_TARC_SPEED_MODE_BIT BIT(22) +#define FLAG_TARC_SET_BIT_ZERO BIT(23) +#define FLAG_RX_NEEDS_RESTART BIT(24) +#define FLAG_LSC_GIG_SPEED_DROP BIT(25) +#define FLAG_SMART_POWER_DOWN BIT(26) +#define FLAG_MSI_ENABLED BIT(27) +/* reserved BIT(28) */ +#define FLAG_TSO_FORCE BIT(29) +#define FLAG_RESTART_NOW BIT(30) +#define FLAG_MSI_TEST_FAILED BIT(31) -#define FLAG2_CRC_STRIPPING (1 << 0) -#define FLAG2_HAS_PHY_WAKEUP (1 << 1) -#define FLAG2_IS_DISCARDING (1 << 2) -#define FLAG2_DISABLE_ASPM_L1 (1 << 3) -#define FLAG2_HAS_PHY_STATS (1 << 4) -#define FLAG2_HAS_EEE (1 << 5) -#define FLAG2_DMA_BURST (1 << 6) -#define FLAG2_DISABLE_ASPM_L0S (1 << 7) -#define FLAG2_DISABLE_AIM (1 << 8) -#define FLAG2_CHECK_PHY_HANG (1 << 9) -#define FLAG2_NO_DISABLE_RX (1 << 10) -#define FLAG2_PCIM2PCI_ARBITER_WA (1 << 11) -#define FLAG2_DFLT_CRC_STRIPPING (1 << 12) -#define FLAG2_CHECK_RX_HWTSTAMP (1 << 13) +#define FLAG2_CRC_STRIPPING BIT(0) +#define FLAG2_HAS_PHY_WAKEUP BIT(1) +#define FLAG2_IS_DISCARDING BIT(2) +#define FLAG2_DISABLE_ASPM_L1 BIT(3) +#define FLAG2_HAS_PHY_STATS BIT(4) +#define FLAG2_HAS_EEE BIT(5) +#define FLAG2_DMA_BURST BIT(6) +#define FLAG2_DISABLE_ASPM_L0S BIT(7) +#define FLAG2_DISABLE_AIM BIT(8) +#define FLAG2_CHECK_PHY_HANG BIT(9) +#define FLAG2_NO_DISABLE_RX BIT(10) +#define FLAG2_PCIM2PCI_ARBITER_WA BIT(11) +#define FLAG2_DFLT_CRC_STRIPPING BIT(12) +#define FLAG2_CHECK_RX_HWTSTAMP BIT(13) #define E1000_RX_DESC_PS(R, i) \ (&(((union e1000_rx_desc_packet_split *)((R).desc))[i])) diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c index 83a815b501ed..7aff68a4a4df 100644 --- a/drivers/net/ethernet/intel/e1000e/ethtool.c +++ b/drivers/net/ethernet/intel/e1000e/ethtool.c @@ -447,8 +447,9 @@ static void e1000_get_regs(struct net_device *netdev, memset(p, 0, E1000_REGS_LEN * sizeof(u32)); - regs->version = (1 << 24) | (adapter->pdev->revision << 16) | - adapter->pdev->device; + regs->version = (1u << 24) | + (adapter->pdev->revision << 16) | + adapter->pdev->device; regs_buff[0] = er32(CTRL); regs_buff[1] = er32(STATUS); @@ -903,7 +904,7 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data) case e1000_pch2lan: case e1000_pch_lpt: case e1000_pch_spt: - mask |= (1 << 18); + mask |= BIT(18); break; default: break; @@ -922,9 +923,9 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data) /* SHRAH[9] different than the others */ if (i == 10) - mask |= (1 << 30); + mask |= BIT(30); else - mask &= ~(1 << 30); + mask &= ~BIT(30); } if (mac->type == e1000_pch2lan) { /* SHRAH[0,1,2] different than previous */ @@ -932,7 +933,7 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data) mask &= 0xFFF4FFFF; /* SHRAH[3] different than SHRAH[0,1,2] */ if (i == 4) - mask |= (1 << 30); + mask |= BIT(30); /* RAR[1-6] owned by management engine - skipping */ if (i > 0) i += 6; @@ -1027,7 +1028,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data) /* Test each interrupt */ for (i = 0; i < 10; i++) { /* Interrupt to test */ - mask = 1 << i; + mask = BIT(i); if (adapter->flags & FLAG_IS_ICH) { switch (mask) { @@ -1395,7 +1396,7 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter) case e1000_phy_82579: /* Disable PHY energy detect power down */ e1e_rphy(hw, PHY_REG(0, 21), &phy_reg); - e1e_wphy(hw, PHY_REG(0, 21), phy_reg & ~(1 << 3)); + e1e_wphy(hw, PHY_REG(0, 21), phy_reg & ~BIT(3)); /* Disable full chip energy detect */ e1e_rphy(hw, PHY_REG(776, 18), &phy_reg); e1e_wphy(hw, PHY_REG(776, 18), phy_reg | 1); @@ -1461,7 +1462,7 @@ static int e1000_set_82571_fiber_loopback(struct e1000_adapter *adapter) /* disable autoneg */ ctrl = er32(TXCW); - ctrl &= ~(1 << 31); + ctrl &= ~BIT(31); ew32(TXCW, ctrl); link = (er32(STATUS) & E1000_STATUS_LU); @@ -2291,19 +2292,19 @@ static int e1000e_get_ts_info(struct net_device *netdev, SOF_TIMESTAMPING_RX_HARDWARE | SOF_TIMESTAMPING_RAW_HARDWARE); - info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); + info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON); - info->rx_filters = ((1 << HWTSTAMP_FILTER_NONE) | - (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | - (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | - (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) | - (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) | - (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) | - (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) | - (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) | - (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) | - (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) | - (1 << HWTSTAMP_FILTER_ALL)); + info->rx_filters = (BIT(HWTSTAMP_FILTER_NONE) | + BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | + BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | + BIT(HWTSTAMP_FILTER_PTP_V2_L4_SYNC) | + BIT(HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) | + BIT(HWTSTAMP_FILTER_PTP_V2_L2_SYNC) | + BIT(HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) | + BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) | + BIT(HWTSTAMP_FILTER_PTP_V2_SYNC) | + BIT(HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) | + BIT(HWTSTAMP_FILTER_ALL)); if (adapter->ptp_clock) info->phc_index = ptp_clock_index(adapter->ptp_clock); diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index c0f4887ea44d..3e11322d8d58 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -1048,7 +1048,7 @@ static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link) while (value > PCI_LTR_VALUE_MASK) { scale++; - value = DIV_ROUND_UP(value, (1 << 5)); + value = DIV_ROUND_UP(value, BIT(5)); } if (scale > E1000_LTRV_SCALE_MAX) { e_dbg("Invalid LTR latency scale %d\n", scale); @@ -1573,7 +1573,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) phy_reg &= ~HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK; if ((er32(STATUS) & E1000_STATUS_FD) != E1000_STATUS_FD) - phy_reg |= (1 << HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT); + phy_reg |= BIT(HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT); e1e_wphy(hw, HV_KMRN_FIFO_CTRLSTA, phy_reg); break; @@ -2044,9 +2044,9 @@ static s32 e1000_write_smbus_addr(struct e1000_hw *hw) /* Restore SMBus frequency */ if (freq--) { phy_data &= ~HV_SMB_ADDR_FREQ_MASK; - phy_data |= (freq & (1 << 0)) << + phy_data |= (freq & BIT(0)) << HV_SMB_ADDR_FREQ_LOW_SHIFT; - phy_data |= (freq & (1 << 1)) << + phy_data |= (freq & BIT(1)) << (HV_SMB_ADDR_FREQ_HIGH_SHIFT - 1); } else { e_dbg("Unsupported SMB frequency in PHY\n"); @@ -2530,7 +2530,7 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable) /* disable Rx path while enabling/disabling workaround */ e1e_rphy(hw, PHY_REG(769, 20), &phy_reg); - ret_val = e1e_wphy(hw, PHY_REG(769, 20), phy_reg | (1 << 14)); + ret_val = e1e_wphy(hw, PHY_REG(769, 20), phy_reg | BIT(14)); if (ret_val) return ret_val; @@ -2561,7 +2561,7 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable) /* Enable jumbo frame workaround in the MAC */ mac_reg = er32(FFLT_DBG); - mac_reg &= ~(1 << 14); + mac_reg &= ~BIT(14); mac_reg |= (7 << 15); ew32(FFLT_DBG, mac_reg); @@ -2576,7 +2576,7 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable) return ret_val; ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_CTRL_OFFSET, - data | (1 << 0)); + data | BIT(0)); if (ret_val) return ret_val; ret_val = e1000e_read_kmrn_reg(hw, @@ -2600,7 +2600,7 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable) if (ret_val) return ret_val; e1e_rphy(hw, PHY_REG(769, 16), &data); - data &= ~(1 << 13); + data &= ~BIT(13); ret_val = e1e_wphy(hw, PHY_REG(769, 16), data); if (ret_val) return ret_val; @@ -2614,7 +2614,7 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable) if (ret_val) return ret_val; e1e_rphy(hw, HV_PM_CTRL, &data); - ret_val = e1e_wphy(hw, HV_PM_CTRL, data | (1 << 10)); + ret_val = e1e_wphy(hw, HV_PM_CTRL, data | BIT(10)); if (ret_val) return ret_val; } else { @@ -2634,7 +2634,7 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable) return ret_val; ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_CTRL_OFFSET, - data & ~(1 << 0)); + data & ~BIT(0)); if (ret_val) return ret_val; ret_val = e1000e_read_kmrn_reg(hw, @@ -2657,7 +2657,7 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable) if (ret_val) return ret_val; e1e_rphy(hw, PHY_REG(769, 16), &data); - data |= (1 << 13); + data |= BIT(13); ret_val = e1e_wphy(hw, PHY_REG(769, 16), data); if (ret_val) return ret_val; @@ -2671,13 +2671,13 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable) if (ret_val) return ret_val; e1e_rphy(hw, HV_PM_CTRL, &data); - ret_val = e1e_wphy(hw, HV_PM_CTRL, data & ~(1 << 10)); + ret_val = e1e_wphy(hw, HV_PM_CTRL, data & ~BIT(10)); if (ret_val) return ret_val; } /* re-enable Rx path after enabling/disabling workaround */ - return e1e_wphy(hw, PHY_REG(769, 20), phy_reg & ~(1 << 14)); + return e1e_wphy(hw, PHY_REG(769, 20), phy_reg & ~BIT(14)); } /** @@ -4841,7 +4841,7 @@ static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw) /* Extended Device Control */ reg = er32(CTRL_EXT); - reg |= (1 << 22); + reg |= BIT(22); /* Enable PHY low-power state when MAC is at D3 w/o WoL */ if (hw->mac.type >= e1000_pchlan) reg |= E1000_CTRL_EXT_PHYPDEN; @@ -4849,34 +4849,34 @@ static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw) /* Transmit Descriptor Control 0 */ reg = er32(TXDCTL(0)); - reg |= (1 << 22); + reg |= BIT(22); ew32(TXDCTL(0), reg); /* Transmit Descriptor Control 1 */ reg = er32(TXDCTL(1)); - reg |= (1 << 22); + reg |= BIT(22); ew32(TXDCTL(1), reg); /* Transmit Arbitration Control 0 */ reg = er32(TARC(0)); if (hw->mac.type == e1000_ich8lan) - reg |= (1 << 28) | (1 << 29); - reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27); + reg |= BIT(28) | BIT(29); + reg |= BIT(23) | BIT(24) | BIT(26) | BIT(27); ew32(TARC(0), reg); /* Transmit Arbitration Control 1 */ reg = er32(TARC(1)); if (er32(TCTL) & E1000_TCTL_MULR) - reg &= ~(1 << 28); + reg &= ~BIT(28); else - reg |= (1 << 28); - reg |= (1 << 24) | (1 << 26) | (1 << 30); + reg |= BIT(28); + reg |= BIT(24) | BIT(26) | BIT(30); ew32(TARC(1), reg); /* Device Status */ if (hw->mac.type == e1000_ich8lan) { reg = er32(STATUS); - reg &= ~(1 << 31); + reg &= ~BIT(31); ew32(STATUS, reg); } diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c index e59d7c283cd4..b322011ec282 100644 --- a/drivers/net/ethernet/intel/e1000e/mac.c +++ b/drivers/net/ethernet/intel/e1000e/mac.c @@ -346,7 +346,7 @@ void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw, hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1); hash_bit = hash_value & 0x1F; - hw->mac.mta_shadow[hash_reg] |= (1 << hash_bit); + hw->mac.mta_shadow[hash_reg] |= BIT(hash_bit); mc_addr_list += (ETH_ALEN); } diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 0d3c00deb160..c597398f2922 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -317,8 +317,8 @@ static void e1000e_dump(struct e1000_adapter *adapter) else next_desc = ""; pr_info("T%c[0x%03X] %016llX %016llX %016llX %04X %3X %016llX %p%s\n", - (!(le64_to_cpu(u0->b) & (1 << 29)) ? 'l' : - ((le64_to_cpu(u0->b) & (1 << 20)) ? 'd' : 'c')), + (!(le64_to_cpu(u0->b) & BIT(29)) ? 'l' : + ((le64_to_cpu(u0->b) & BIT(20)) ? 'd' : 'c')), i, (unsigned long long)le64_to_cpu(u0->a), (unsigned long long)le64_to_cpu(u0->b), @@ -2018,7 +2018,7 @@ static void e1000_configure_msix(struct e1000_adapter *adapter) adapter->eiac_mask |= E1000_IMS_OTHER; /* Cause Tx interrupts on every write back */ - ivar |= (1 << 31); + ivar |= BIT(31); ew32(IVAR, ivar); @@ -2709,7 +2709,7 @@ static int e1000_vlan_rx_add_vid(struct net_device *netdev, if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { index = (vid >> 5) & 0x7F; vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index); - vfta |= (1 << (vid & 0x1F)); + vfta |= BIT((vid & 0x1F)); hw->mac.ops.write_vfta(hw, index, vfta); } @@ -2737,7 +2737,7 @@ static int e1000_vlan_rx_kill_vid(struct net_device *netdev, if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { index = (vid >> 5) & 0x7F; vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index); - vfta &= ~(1 << (vid & 0x1F)); + vfta &= ~BIT((vid & 0x1F)); hw->mac.ops.write_vfta(hw, index, vfta); } @@ -2878,7 +2878,7 @@ static void e1000_init_manageability_pt(struct e1000_adapter *adapter) /* Enable this decision filter in MANC2H */ if (mdef) - manc2h |= (1 << i); + manc2h |= BIT(i); j |= mdef; } @@ -2891,7 +2891,7 @@ static void e1000_init_manageability_pt(struct e1000_adapter *adapter) if (er32(MDEF(i)) == 0) { ew32(MDEF(i), (E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664)); - manc2h |= (1 << 1); + manc2h |= BIT(1); j++; break; } @@ -2971,7 +2971,7 @@ static void e1000_configure_tx(struct e1000_adapter *adapter) /* set the speed mode bit, we'll clear it if we're not at * gigabit link later */ -#define SPEED_MODE_BIT (1 << 21) +#define SPEED_MODE_BIT BIT(21) tarc |= SPEED_MODE_BIT; ew32(TARC(0), tarc); } @@ -3071,12 +3071,12 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter) e1e_rphy(hw, PHY_REG(770, 26), &phy_data); phy_data &= 0xfff8; - phy_data |= (1 << 2); + phy_data |= BIT(2); e1e_wphy(hw, PHY_REG(770, 26), phy_data); e1e_rphy(hw, 22, &phy_data); phy_data &= 0x0fff; - phy_data |= (1 << 14); + phy_data |= BIT(14); e1e_wphy(hw, 0x10, 0x2823); e1e_wphy(hw, 0x11, 0x0003); e1e_wphy(hw, 22, phy_data); @@ -3503,8 +3503,8 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca) !(er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_ENABLED)) { u32 fextnvm7 = er32(FEXTNVM7); - if (!(fextnvm7 & (1 << 0))) { - ew32(FEXTNVM7, fextnvm7 | (1 << 0)); + if (!(fextnvm7 & BIT(0))) { + ew32(FEXTNVM7, fextnvm7 | BIT(0)); e1e_flush(); } } @@ -3839,7 +3839,7 @@ static void e1000_flush_rx_ring(struct e1000_adapter *adapter) /* update thresholds: prefetch threshold to 31, host threshold to 1 * and make sure the granularity is "descriptors" and not "cache lines" */ - rxdctl |= (0x1F | (1 << 8) | E1000_RXDCTL_THRESH_UNIT_DESC); + rxdctl |= (0x1F | BIT(8) | E1000_RXDCTL_THRESH_UNIT_DESC); ew32(RXDCTL(0), rxdctl); /* momentarily enable the RX ring for the changes to take effect */ @@ -6862,7 +6862,7 @@ static void e1000_eeprom_checks(struct e1000_adapter *adapter) ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &buf); le16_to_cpus(&buf); - if (!ret_val && (!(buf & (1 << 0)))) { + if (!ret_val && (!(buf & BIT(0)))) { /* Deep Smart Power Down (DSPD) */ dev_warn(&adapter->pdev->dev, "Warning: detected DSPD enabled in EEPROM\n"); diff --git a/drivers/net/ethernet/intel/e1000e/nvm.c b/drivers/net/ethernet/intel/e1000e/nvm.c index 49f205c023bf..2efd80dfd88e 100644 --- a/drivers/net/ethernet/intel/e1000e/nvm.c +++ b/drivers/net/ethernet/intel/e1000e/nvm.c @@ -67,7 +67,7 @@ static void e1000_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count) u32 eecd = er32(EECD); u32 mask; - mask = 0x01 << (count - 1); + mask = BIT(count - 1); if (nvm->type == e1000_nvm_eeprom_spi) eecd |= E1000_EECD_DO; diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c index de13aeacae97..d78d47b41a71 100644 --- a/drivers/net/ethernet/intel/e1000e/phy.c +++ b/drivers/net/ethernet/intel/e1000e/phy.c @@ -2894,11 +2894,11 @@ static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data, if ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision >= 1) && (hw->phy.addr == 2) && - !(MAX_PHY_REG_ADDRESS & reg) && (data & (1 << 11))) { + !(MAX_PHY_REG_ADDRESS & reg) && (data & BIT(11))) { u16 data2 = 0x7EFF; ret_val = e1000_access_phy_debug_regs_hv(hw, - (1 << 6) | 0x3, + BIT(6) | 0x3, &data2, false); if (ret_val) goto out; diff --git a/drivers/net/ethernet/intel/e1000e/phy.h b/drivers/net/ethernet/intel/e1000e/phy.h index 55bfe473514d..3027f63ee793 100644 --- a/drivers/net/ethernet/intel/e1000e/phy.h +++ b/drivers/net/ethernet/intel/e1000e/phy.h @@ -104,9 +104,9 @@ s32 e1000_get_cable_length_82577(struct e1000_hw *hw); #define BM_WUC_DATA_OPCODE 0x12 #define BM_WUC_ENABLE_PAGE BM_PORT_CTRL_PAGE #define BM_WUC_ENABLE_REG 17 -#define BM_WUC_ENABLE_BIT (1 << 2) -#define BM_WUC_HOST_WU_BIT (1 << 4) -#define BM_WUC_ME_WU_BIT (1 << 5) +#define BM_WUC_ENABLE_BIT BIT(2) +#define BM_WUC_HOST_WU_BIT BIT(4) +#define BM_WUC_ME_WU_BIT BIT(5) #define PHY_UPPER_SHIFT 21 #define BM_PHY_REG(page, reg) \ @@ -124,8 +124,8 @@ s32 e1000_get_cable_length_82577(struct e1000_hw *hw); #define I82578_ADDR_REG 29 #define I82577_ADDR_REG 16 #define I82577_CFG_REG 22 -#define I82577_CFG_ASSERT_CRS_ON_TX (1 << 15) -#define I82577_CFG_ENABLE_DOWNSHIFT (3 << 10) /* auto downshift */ +#define I82577_CFG_ASSERT_CRS_ON_TX BIT(15) +#define I82577_CFG_ENABLE_DOWNSHIFT (3u << 10) /* auto downshift */ #define I82577_CTRL_REG 23 /* 82577 specific PHY registers */ From 942c711206d1e0cd3dffc591829cbcbb9bcc0b1b Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Wed, 13 Apr 2016 16:08:33 -0700 Subject: [PATCH 1541/1649] e1000e: mark shifted values as unsigned The E1000_ICH_NVM_SIG_MASK value is shifted, out to the 31st bit, which is the signed bit for signed constants. Mark these values as unsigned to prevent compiler warnings and issues on platforms which a different signed bit implementation. Signed-off-by: Jacob Keller Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/e1000e/ich8lan.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h index 2311f6003f58..67163ca898ba 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.h +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h @@ -73,10 +73,10 @@ (ID_LED_OFF1_ON2 << 4) | \ (ID_LED_DEF1_DEF2)) -#define E1000_ICH_NVM_SIG_WORD 0x13 -#define E1000_ICH_NVM_SIG_MASK 0xC000 -#define E1000_ICH_NVM_VALID_SIG_MASK 0xC0 -#define E1000_ICH_NVM_SIG_VALUE 0x80 +#define E1000_ICH_NVM_SIG_WORD 0x13u +#define E1000_ICH_NVM_SIG_MASK 0xC000u +#define E1000_ICH_NVM_VALID_SIG_MASK 0xC0u +#define E1000_ICH_NVM_SIG_VALUE 0x80u #define E1000_ICH8_LAN_INIT_TIMEOUT 1500 From e10715d3e9618901c5ef820a92e6a8e6548b43d3 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Thu, 14 Apr 2016 17:19:38 -0400 Subject: [PATCH 1542/1649] igb/igbvf: Add support for GSO partial This patch adds support for partial GSO segmentation in the case of tunnels. Specifically with this change the driver an perform segmentation as long as the frame either has IPv6 inner headers, or we are allowed to mangle the IP IDs on the inner header. This is needed because we will not be modifying any fields from the start of the start of the outer transport header to the start of the inner transport header as we are treating them like they are just a block of IP options. Signed-off-by: Alexander Duyck Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igb/igb_main.c | 137 +++++++++++----- drivers/net/ethernet/intel/igbvf/netdev.c | 182 +++++++++++++--------- 2 files changed, 203 insertions(+), 116 deletions(-) diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index cab306934462..21727692bef6 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -2087,6 +2087,40 @@ static int igb_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], return ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, flags); } +#define IGB_MAX_MAC_HDR_LEN 127 +#define IGB_MAX_NETWORK_HDR_LEN 511 + +static netdev_features_t +igb_features_check(struct sk_buff *skb, struct net_device *dev, + netdev_features_t features) +{ + unsigned int network_hdr_len, mac_hdr_len; + + /* Make certain the headers can be described by a context descriptor */ + mac_hdr_len = skb_network_header(skb) - skb->data; + if (unlikely(mac_hdr_len > IGB_MAX_MAC_HDR_LEN)) + return features & ~(NETIF_F_HW_CSUM | + NETIF_F_SCTP_CRC | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_TSO | + NETIF_F_TSO6); + + network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb); + if (unlikely(network_hdr_len > IGB_MAX_NETWORK_HDR_LEN)) + return features & ~(NETIF_F_HW_CSUM | + NETIF_F_SCTP_CRC | + NETIF_F_TSO | + NETIF_F_TSO6); + + /* We can only support IPV4 TSO in tunnels if we can mangle the + * inner IP ID field, so strip TSO if MANGLEID is not supported. + */ + if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) + features &= ~NETIF_F_TSO; + + return features; +} + static const struct net_device_ops igb_netdev_ops = { .ndo_open = igb_open, .ndo_stop = igb_close, @@ -2111,7 +2145,7 @@ static const struct net_device_ops igb_netdev_ops = { .ndo_fix_features = igb_fix_features, .ndo_set_features = igb_set_features, .ndo_fdb_add = igb_ndo_fdb_add, - .ndo_features_check = passthru_features_check, + .ndo_features_check = igb_features_check, }; /** @@ -2377,39 +2411,44 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) NETIF_F_TSO6 | NETIF_F_RXHASH | NETIF_F_RXCSUM | - NETIF_F_HW_CSUM | - NETIF_F_HW_VLAN_CTAG_RX | - NETIF_F_HW_VLAN_CTAG_TX; + NETIF_F_HW_CSUM; if (hw->mac.type >= e1000_82576) netdev->features |= NETIF_F_SCTP_CRC; +#define IGB_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \ + NETIF_F_GSO_GRE_CSUM | \ + NETIF_F_GSO_IPIP | \ + NETIF_F_GSO_SIT | \ + NETIF_F_GSO_UDP_TUNNEL | \ + NETIF_F_GSO_UDP_TUNNEL_CSUM) + + netdev->gso_partial_features = IGB_GSO_PARTIAL_FEATURES; + netdev->features |= NETIF_F_GSO_PARTIAL | IGB_GSO_PARTIAL_FEATURES; + /* copy netdev features into list of user selectable features */ - netdev->hw_features |= netdev->features; - netdev->hw_features |= NETIF_F_RXALL; + netdev->hw_features |= netdev->features | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_RXALL; if (hw->mac.type >= e1000_i350) netdev->hw_features |= NETIF_F_NTUPLE; - /* set this bit last since it cannot be part of hw_features */ - netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; - - netdev->vlan_features |= NETIF_F_SG | - NETIF_F_TSO | - NETIF_F_TSO6 | - NETIF_F_HW_CSUM | - NETIF_F_SCTP_CRC; + if (pci_using_dac) + netdev->features |= NETIF_F_HIGHDMA; + netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID; netdev->mpls_features |= NETIF_F_HW_CSUM; - netdev->hw_enc_features |= NETIF_F_HW_CSUM; + netdev->hw_enc_features |= netdev->vlan_features; + + /* set this bit last since it cannot be part of vlan_features */ + netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_TX; netdev->priv_flags |= IFF_SUPP_NOFCS; - if (pci_using_dac) { - netdev->features |= NETIF_F_HIGHDMA; - netdev->vlan_features |= NETIF_F_HIGHDMA; - } - netdev->priv_flags |= IFF_UNICAST_FLT; adapter->en_mng_pt = igb_enable_mng_pass_thru(hw); @@ -4842,9 +4881,18 @@ static int igb_tso(struct igb_ring *tx_ring, struct igb_tx_buffer *first, u8 *hdr_len) { + u32 vlan_macip_lens, type_tucmd, mss_l4len_idx; struct sk_buff *skb = first->skb; - u32 vlan_macip_lens, type_tucmd; - u32 mss_l4len_idx, l4len; + union { + struct iphdr *v4; + struct ipv6hdr *v6; + unsigned char *hdr; + } ip; + union { + struct tcphdr *tcp; + unsigned char *hdr; + } l4; + u32 paylen, l4_offset; int err; if (skb->ip_summed != CHECKSUM_PARTIAL) @@ -4857,45 +4905,52 @@ static int igb_tso(struct igb_ring *tx_ring, if (err < 0) return err; + ip.hdr = skb_network_header(skb); + l4.hdr = skb_checksum_start(skb); + /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP; - if (first->protocol == htons(ETH_P_IP)) { - struct iphdr *iph = ip_hdr(skb); - iph->tot_len = 0; - iph->check = 0; - tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, - iph->daddr, 0, - IPPROTO_TCP, - 0); + /* initialize outer IP header fields */ + if (ip.v4->version == 4) { + /* IP header will have to cancel out any data that + * is not a part of the outer IP header + */ + ip.v4->check = csum_fold(csum_add(lco_csum(skb), + csum_unfold(l4.tcp->check))); type_tucmd |= E1000_ADVTXD_TUCMD_IPV4; + + ip.v4->tot_len = 0; first->tx_flags |= IGB_TX_FLAGS_TSO | IGB_TX_FLAGS_CSUM | IGB_TX_FLAGS_IPV4; - } else if (skb_is_gso_v6(skb)) { - ipv6_hdr(skb)->payload_len = 0; - tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, - &ipv6_hdr(skb)->daddr, - 0, IPPROTO_TCP, 0); + } else { + ip.v6->payload_len = 0; first->tx_flags |= IGB_TX_FLAGS_TSO | IGB_TX_FLAGS_CSUM; } - /* compute header lengths */ - l4len = tcp_hdrlen(skb); - *hdr_len = skb_transport_offset(skb) + l4len; + /* determine offset of inner transport header */ + l4_offset = l4.hdr - skb->data; + + /* compute length of segmentation header */ + *hdr_len = (l4.tcp->doff * 4) + l4_offset; + + /* remove payload length from inner checksum */ + paylen = skb->len - l4_offset; + csum_replace_by_diff(&l4.tcp->check, htonl(paylen)); /* update gso size and bytecount with header size */ first->gso_segs = skb_shinfo(skb)->gso_segs; first->bytecount += (first->gso_segs - 1) * *hdr_len; /* MSS L4LEN IDX */ - mss_l4len_idx = l4len << E1000_ADVTXD_L4LEN_SHIFT; + mss_l4len_idx = (*hdr_len - l4_offset) << E1000_ADVTXD_L4LEN_SHIFT; mss_l4len_idx |= skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT; /* VLAN MACLEN IPLEN */ - vlan_macip_lens = skb_network_header_len(skb); - vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT; + vlan_macip_lens = l4.hdr - ip.hdr; + vlan_macip_lens |= (ip.hdr - skb->data) << E1000_ADVTXD_MACLEN_SHIFT; vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK; igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx); diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c index 57894a80c469..322a2d7828a5 100644 --- a/drivers/net/ethernet/intel/igbvf/netdev.c +++ b/drivers/net/ethernet/intel/igbvf/netdev.c @@ -1931,83 +1931,74 @@ static void igbvf_tx_ctxtdesc(struct igbvf_ring *tx_ring, u32 vlan_macip_lens, buffer_info->dma = 0; } -static int igbvf_tso(struct igbvf_adapter *adapter, - struct igbvf_ring *tx_ring, - struct sk_buff *skb, u32 tx_flags, u8 *hdr_len, - __be16 protocol) +static int igbvf_tso(struct igbvf_ring *tx_ring, + struct sk_buff *skb, u32 tx_flags, u8 *hdr_len) { - struct e1000_adv_tx_context_desc *context_desc; - struct igbvf_buffer *buffer_info; - u32 info = 0, tu_cmd = 0; - u32 mss_l4len_idx, l4len; - unsigned int i; + u32 vlan_macip_lens, type_tucmd, mss_l4len_idx; + union { + struct iphdr *v4; + struct ipv6hdr *v6; + unsigned char *hdr; + } ip; + union { + struct tcphdr *tcp; + unsigned char *hdr; + } l4; + u32 paylen, l4_offset; int err; - *hdr_len = 0; + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; + + if (!skb_is_gso(skb)) + return 0; err = skb_cow_head(skb, 0); - if (err < 0) { - dev_err(&adapter->pdev->dev, "igbvf_tso returning an error\n"); + if (err < 0) return err; - } - l4len = tcp_hdrlen(skb); - *hdr_len += l4len; - - if (protocol == htons(ETH_P_IP)) { - struct iphdr *iph = ip_hdr(skb); - - iph->tot_len = 0; - iph->check = 0; - tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, - iph->daddr, 0, - IPPROTO_TCP, - 0); - } else if (skb_is_gso_v6(skb)) { - ipv6_hdr(skb)->payload_len = 0; - tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, - &ipv6_hdr(skb)->daddr, - 0, IPPROTO_TCP, 0); - } - - i = tx_ring->next_to_use; - - buffer_info = &tx_ring->buffer_info[i]; - context_desc = IGBVF_TX_CTXTDESC_ADV(*tx_ring, i); - /* VLAN MACLEN IPLEN */ - if (tx_flags & IGBVF_TX_FLAGS_VLAN) - info |= (tx_flags & IGBVF_TX_FLAGS_VLAN_MASK); - info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT); - *hdr_len += skb_network_offset(skb); - info |= (skb_transport_header(skb) - skb_network_header(skb)); - *hdr_len += (skb_transport_header(skb) - skb_network_header(skb)); - context_desc->vlan_macip_lens = cpu_to_le32(info); + ip.hdr = skb_network_header(skb); + l4.hdr = skb_checksum_start(skb); /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ - tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT); + type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP; - if (protocol == htons(ETH_P_IP)) - tu_cmd |= E1000_ADVTXD_TUCMD_IPV4; - tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP; + /* initialize outer IP header fields */ + if (ip.v4->version == 4) { + /* IP header will have to cancel out any data that + * is not a part of the outer IP header + */ + ip.v4->check = csum_fold(csum_add(lco_csum(skb), + csum_unfold(l4.tcp->check))); + type_tucmd |= E1000_ADVTXD_TUCMD_IPV4; - context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd); + ip.v4->tot_len = 0; + } else { + ip.v6->payload_len = 0; + } + + /* determine offset of inner transport header */ + l4_offset = l4.hdr - skb->data; + + /* compute length of segmentation header */ + *hdr_len = (l4.tcp->doff * 4) + l4_offset; + + /* remove payload length from inner checksum */ + paylen = skb->len - l4_offset; + csum_replace_by_diff(&l4.tcp->check, htonl(paylen)); /* MSS L4LEN IDX */ - mss_l4len_idx = (skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT); - mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT); + mss_l4len_idx = (*hdr_len - l4_offset) << E1000_ADVTXD_L4LEN_SHIFT; + mss_l4len_idx |= skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT; - context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); - context_desc->seqnum_seed = 0; + /* VLAN MACLEN IPLEN */ + vlan_macip_lens = l4.hdr - ip.hdr; + vlan_macip_lens |= (ip.hdr - skb->data) << E1000_ADVTXD_MACLEN_SHIFT; + vlan_macip_lens |= tx_flags & IGBVF_TX_FLAGS_VLAN_MASK; - buffer_info->time_stamp = jiffies; - buffer_info->dma = 0; - i++; - if (i == tx_ring->count) - i = 0; + igbvf_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx); - tx_ring->next_to_use = i; - - return true; + return 1; } static inline bool igbvf_ipv6_csum_is_sctp(struct sk_buff *skb) @@ -2269,8 +2260,7 @@ static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb, first = tx_ring->next_to_use; - tso = skb_is_gso(skb) ? - igbvf_tso(adapter, tx_ring, skb, tx_flags, &hdr_len, protocol) : 0; + tso = igbvf_tso(tx_ring, skb, tx_flags, &hdr_len); if (unlikely(tso < 0)) { dev_kfree_skb_any(skb); return NETDEV_TX_OK; @@ -2613,6 +2603,40 @@ static int igbvf_set_features(struct net_device *netdev, return 0; } +#define IGBVF_MAX_MAC_HDR_LEN 127 +#define IGBVF_MAX_NETWORK_HDR_LEN 511 + +static netdev_features_t +igbvf_features_check(struct sk_buff *skb, struct net_device *dev, + netdev_features_t features) +{ + unsigned int network_hdr_len, mac_hdr_len; + + /* Make certain the headers can be described by a context descriptor */ + mac_hdr_len = skb_network_header(skb) - skb->data; + if (unlikely(mac_hdr_len > IGBVF_MAX_MAC_HDR_LEN)) + return features & ~(NETIF_F_HW_CSUM | + NETIF_F_SCTP_CRC | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_TSO | + NETIF_F_TSO6); + + network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb); + if (unlikely(network_hdr_len > IGBVF_MAX_NETWORK_HDR_LEN)) + return features & ~(NETIF_F_HW_CSUM | + NETIF_F_SCTP_CRC | + NETIF_F_TSO | + NETIF_F_TSO6); + + /* We can only support IPV4 TSO in tunnels if we can mangle the + * inner IP ID field, so strip TSO if MANGLEID is not supported. + */ + if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) + features &= ~NETIF_F_TSO; + + return features; +} + static const struct net_device_ops igbvf_netdev_ops = { .ndo_open = igbvf_open, .ndo_stop = igbvf_close, @@ -2629,7 +2653,7 @@ static const struct net_device_ops igbvf_netdev_ops = { .ndo_poll_controller = igbvf_netpoll, #endif .ndo_set_features = igbvf_set_features, - .ndo_features_check = passthru_features_check, + .ndo_features_check = igbvf_features_check, }; /** @@ -2737,22 +2761,30 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) NETIF_F_HW_CSUM | NETIF_F_SCTP_CRC; - netdev->features = netdev->hw_features | - NETIF_F_HW_VLAN_CTAG_TX | - NETIF_F_HW_VLAN_CTAG_RX | - NETIF_F_HW_VLAN_CTAG_FILTER; +#define IGBVF_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \ + NETIF_F_GSO_GRE_CSUM | \ + NETIF_F_GSO_IPIP | \ + NETIF_F_GSO_SIT | \ + NETIF_F_GSO_UDP_TUNNEL | \ + NETIF_F_GSO_UDP_TUNNEL_CSUM) + + netdev->gso_partial_features = IGBVF_GSO_PARTIAL_FEATURES; + netdev->hw_features |= NETIF_F_GSO_PARTIAL | + IGBVF_GSO_PARTIAL_FEATURES; + + netdev->features = netdev->hw_features; if (pci_using_dac) netdev->features |= NETIF_F_HIGHDMA; - netdev->vlan_features |= NETIF_F_SG | - NETIF_F_TSO | - NETIF_F_TSO6 | - NETIF_F_HW_CSUM | - NETIF_F_SCTP_CRC; - + netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID; netdev->mpls_features |= NETIF_F_HW_CSUM; - netdev->hw_enc_features |= NETIF_F_HW_CSUM; + netdev->hw_enc_features |= netdev->vlan_features; + + /* set this bit last since it cannot be part of vlan_features */ + netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_TX; /*reset the controller to put the device in a known good state */ err = hw->mac.ops.reset_hw(hw); From aa524b66c5efd1d3220b74168d803e8b2ee1d212 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Wed, 20 Apr 2016 11:36:42 -0700 Subject: [PATCH 1543/1649] e1000e: don't modify SYSTIM registers during SIOCSHWTSTAMP ioctl The e1000e_config_hwtstamp function was incorrectly resetting the SYSTIM registers every time the ioctl was being run. If you happened to be running ptp4l and lost the PTP connect (removing cable, or blocking the UDP traffic for example), then ptp4l will eventually perform a restart which involves re-requesting timestamp settings. In e1000e this has the unfortunate and incorrect result of resetting SYSTIME to the kernel time. Since kernel time is usually in UTC, and PTP time is in TAI, this results in the leap second being re-applied. Fix this by extracting the SYSTIME reset out into its own function, e1000e_ptp_reset, which we call during reset to restore the hardware registers. This function will (a) restart the timecounter based on the new system time, (b) restore the previous PPB setting, and (c) restore the previous hwtstamp settings. In order to perform (b), I had to modify the adjfreq ptp function pointer to store the old delta each time it is called. This also has the side effect of restoring the correct base timinca register correctly. The driver does not need to explicitly zero the ptp_delta variable since the entire adapter structure comes zero-initialized. Reported-by: Brian Walsh Signed-off-by: Jacob Keller Tested-by: Brian Walsh Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/e1000e/e1000.h | 1 + drivers/net/ethernet/intel/e1000e/netdev.c | 68 +++++++++++++++++----- drivers/net/ethernet/intel/e1000e/ptp.c | 2 + 3 files changed, 55 insertions(+), 16 deletions(-) diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h index 010e6d61c855..ef96cd11d6d2 100644 --- a/drivers/net/ethernet/intel/e1000e/e1000.h +++ b/drivers/net/ethernet/intel/e1000e/e1000.h @@ -347,6 +347,7 @@ struct e1000_adapter { struct ptp_clock *ptp_clock; struct ptp_clock_info ptp_clock_info; struct pm_qos_request pm_qos_req; + s32 ptp_delta; u16 eee_advert; }; diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index c597398f2922..75e60897b7e7 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -3580,7 +3580,6 @@ static int e1000e_config_hwtstamp(struct e1000_adapter *adapter, bool is_l4 = false; bool is_l2 = false; u32 regval; - s32 ret_val; if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP)) return -EINVAL; @@ -3719,16 +3718,6 @@ static int e1000e_config_hwtstamp(struct e1000_adapter *adapter, er32(RXSTMPH); er32(TXSTMPH); - /* Get and set the System Time Register SYSTIM base frequency */ - ret_val = e1000e_get_base_timinca(adapter, ®val); - if (ret_val) - return ret_val; - ew32(TIMINCA, regval); - - /* reset the ns time counter */ - timecounter_init(&adapter->tc, &adapter->cc, - ktime_to_ns(ktime_get_real())); - return 0; } @@ -3884,6 +3873,53 @@ static void e1000_flush_desc_rings(struct e1000_adapter *adapter) e1000_flush_rx_ring(adapter); } +/** + * e1000e_systim_reset - reset the timesync registers after a hardware reset + * @adapter: board private structure + * + * When the MAC is reset, all hardware bits for timesync will be reset to the + * default values. This function will restore the settings last in place. + * Since the clock SYSTIME registers are reset, we will simply restore the + * cyclecounter to the kernel real clock time. + **/ +static void e1000e_systim_reset(struct e1000_adapter *adapter) +{ + struct ptp_clock_info *info = &adapter->ptp_clock_info; + struct e1000_hw *hw = &adapter->hw; + unsigned long flags; + u32 timinca; + s32 ret_val; + + if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP)) + return; + + if (info->adjfreq) { + /* restore the previous ptp frequency delta */ + ret_val = info->adjfreq(info, adapter->ptp_delta); + } else { + /* set the default base frequency if no adjustment possible */ + ret_val = e1000e_get_base_timinca(adapter, &timinca); + if (!ret_val) + ew32(TIMINCA, timinca); + } + + if (ret_val) { + dev_warn(&adapter->pdev->dev, + "Failed to restore TIMINCA clock rate delta: %d\n", + ret_val); + return; + } + + /* reset the systim ns time counter */ + spin_lock_irqsave(&adapter->systim_lock, flags); + timecounter_init(&adapter->tc, &adapter->cc, + ktime_to_ns(ktime_get_real())); + spin_unlock_irqrestore(&adapter->systim_lock, flags); + + /* restore the previous hwtstamp configuration settings */ + e1000e_config_hwtstamp(adapter, &adapter->hwtstamp_config); +} + /** * e1000e_reset - bring the hardware into a known good state * @@ -4063,8 +4099,8 @@ void e1000e_reset(struct e1000_adapter *adapter) e1000e_reset_adaptive(hw); - /* initialize systim and reset the ns time counter */ - e1000e_config_hwtstamp(adapter, &adapter->hwtstamp_config); + /* restore systim and hwtstamp settings */ + e1000e_systim_reset(adapter); /* Set EEE advertisement as appropriate */ if (adapter->flags2 & FLAG2_HAS_EEE) { @@ -7239,6 +7275,9 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) adapter->eeprom_vers = 0; } + /* init PTP hardware clock */ + e1000e_ptp_init(adapter); + /* reset the hardware with the new settings */ e1000e_reset(adapter); @@ -7257,9 +7296,6 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* carrier off reporting is important to ethtool even BEFORE open */ netif_carrier_off(netdev); - /* init PTP hardware clock */ - e1000e_ptp_init(adapter); - e1000_print_device_info(adapter); if (pci_dev_run_wake(pdev)) diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c index e2ff3ef75d5d..2e1b17ad52a3 100644 --- a/drivers/net/ethernet/intel/e1000e/ptp.c +++ b/drivers/net/ethernet/intel/e1000e/ptp.c @@ -79,6 +79,8 @@ static int e1000e_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta) ew32(TIMINCA, timinca); + adapter->ptp_delta = delta; + spin_unlock_irqrestore(&adapter->systim_lock, flags); return 0; From 06c0e39bbefd04b19ca50f29be698eeed21630b5 Mon Sep 17 00:00:00 2001 From: Kevin Scott Date: Tue, 3 May 2016 15:13:09 -0700 Subject: [PATCH 1544/1649] i40e: Add support for disabling all link and change bits needed for PHY interactions Add flag to tell firmware to disable link on all ports. This patch changes the bits set for telling firmware the PHY needs to be modified by driver. Without this patch, the setting will only set that mode for the current port on the device. Because the MDIO interface is common for the copper device. The command needs to set the mode for all ports. Change-ID: I8baa7da91d384291ac95b41ae1a516604f8eb67f Signed-off-by: Kevin Scott Signed-off-by: Carolyn Wyborny Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e.h | 4 +++- drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h | 3 +++ drivers/net/ethernet/intel/i40e/i40e_ethtool.c | 2 +- 3 files changed, 7 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 2a6a5d3dd874..01cc732195c0 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -111,7 +111,9 @@ #define I40E_OEM_VER_PATCH_MASK 0xff #define I40E_OEM_VER_BUILD_SHIFT 8 #define I40E_OEM_VER_SHIFT 24 -#define I40E_PHY_DEBUG_PORT BIT(4) +#define I40E_PHY_DEBUG_ALL \ + (I40E_AQ_PHY_DEBUG_DISABLE_LINK_FW | \ + I40E_AQ_PHY_DEBUG_DISABLE_ALL_LINK_FW) /* The values in here are decimal coded as hex as is the case in the NVM map*/ #define I40E_CURRENT_NVM_VERSION_HI 0x2 diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h index eacbe7430b48..11cf1a5ebccf 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h @@ -1833,7 +1833,10 @@ struct i40e_aqc_set_phy_debug { #define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_NONE 0x00 #define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_HARD 0x01 #define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SOFT 0x02 +/* Disable link manageability on a single port */ #define I40E_AQ_PHY_DEBUG_DISABLE_LINK_FW 0x10 +/* Disable link manageability on all ports */ +#define I40E_AQ_PHY_DEBUG_DISABLE_ALL_LINK_FW 0x20 u8 reserved[15]; }; diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 51a994d85870..6fa05c448268 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -1880,7 +1880,7 @@ static int i40e_set_phys_id(struct net_device *netdev, if (!(pf->flags & I40E_FLAG_HAVE_10GBASET_PHY)) { pf->led_status = i40e_led_get(hw); } else { - i40e_aq_set_phy_debug(hw, I40E_PHY_DEBUG_PORT, NULL); + i40e_aq_set_phy_debug(hw, I40E_PHY_DEBUG_ALL, NULL); ret = i40e_led_get_phy(hw, &temp_status, &pf->phy_led_val); pf->led_status = temp_status; From f42a5c74da9996d602093d7760f6916444009a9d Mon Sep 17 00:00:00 2001 From: Anjali Singhai Jain Date: Tue, 3 May 2016 15:13:10 -0700 Subject: [PATCH 1545/1649] i40e: Add allmulti support for the VF This patch enables a feature to enable/disable all multicast for a trusted VF. Change-Id: I926eba7f8850c8d40f8ad7e08bbe4056bbd3985f Signed-off-by: Anjali Singhai Jain Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40evf/i40evf.h | 3 +++ drivers/net/ethernet/intel/i40evf/i40evf_main.c | 15 ++++++++++++++- .../net/ethernet/intel/i40evf/i40evf_virtchnl.c | 15 +++++++++++++-- 3 files changed, 30 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h index fa044a904208..76ed97db28e2 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf.h +++ b/drivers/net/ethernet/intel/i40evf/i40evf.h @@ -215,6 +215,7 @@ struct i40evf_adapter { #define I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE BIT(12) #define I40EVF_FLAG_ADDR_SET_BY_PF BIT(13) #define I40EVF_FLAG_PROMISC_ON BIT(15) +#define I40EVF_FLAG_ALLMULTI_ON BIT(16) /* duplicates for common code */ #define I40E_FLAG_FDIR_ATR_ENABLED 0 #define I40E_FLAG_DCB_ENABLED 0 @@ -241,6 +242,8 @@ struct i40evf_adapter { #define I40EVF_FLAG_AQ_SET_RSS_LUT BIT(14) #define I40EVF_FLAG_AQ_REQUEST_PROMISC BIT(15) #define I40EVF_FLAG_AQ_RELEASE_PROMISC BIT(16) +#define I40EVF_FLAG_AQ_REQUEST_ALLMULTI BIT(17) +#define I40EVF_FLAG_AQ_RELEASE_ALLMULTI BIT(18) /* OS defined structs */ struct net_device *netdev; diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index b548dbe78cd3..642bb45ed906 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -934,6 +934,13 @@ bottom_of_search_loop: adapter->flags & I40EVF_FLAG_PROMISC_ON) adapter->aq_required |= I40EVF_FLAG_AQ_RELEASE_PROMISC; + if (netdev->flags & IFF_ALLMULTI && + !(adapter->flags & I40EVF_FLAG_ALLMULTI_ON)) + adapter->aq_required |= I40EVF_FLAG_AQ_REQUEST_ALLMULTI; + else if (!(netdev->flags & IFF_ALLMULTI) && + adapter->flags & I40EVF_FLAG_ALLMULTI_ON) + adapter->aq_required |= I40EVF_FLAG_AQ_RELEASE_ALLMULTI; + clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); } @@ -1612,7 +1619,13 @@ static void i40evf_watchdog_task(struct work_struct *work) goto watchdog_done; } - if (adapter->aq_required & I40EVF_FLAG_AQ_RELEASE_PROMISC) { + if (adapter->aq_required & I40EVF_FLAG_AQ_REQUEST_ALLMULTI) { + i40evf_set_promiscuous(adapter, I40E_FLAG_VF_MULTICAST_PROMISC); + goto watchdog_done; + } + + if ((adapter->aq_required & I40EVF_FLAG_AQ_RELEASE_PROMISC) && + (adapter->aq_required & I40EVF_FLAG_AQ_RELEASE_ALLMULTI)) { i40evf_set_promiscuous(adapter, 0); goto watchdog_done; } diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c index c5d33a2cea87..f13445691507 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c @@ -641,6 +641,7 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter) void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags) { struct i40e_virtchnl_promisc_info vpi; + int promisc_all; if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ @@ -649,11 +650,21 @@ void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags) return; } - if (flags) { + promisc_all = I40E_FLAG_VF_UNICAST_PROMISC | + I40E_FLAG_VF_MULTICAST_PROMISC; + if ((flags & promisc_all) == promisc_all) { adapter->flags |= I40EVF_FLAG_PROMISC_ON; adapter->aq_required &= ~I40EVF_FLAG_AQ_REQUEST_PROMISC; dev_info(&adapter->pdev->dev, "Entering promiscuous mode\n"); - } else { + } + + if (flags & I40E_FLAG_VF_MULTICAST_PROMISC) { + adapter->flags |= I40EVF_FLAG_ALLMULTI_ON; + adapter->aq_required &= ~I40EVF_FLAG_AQ_REQUEST_ALLMULTI; + dev_info(&adapter->pdev->dev, "Entering multicast promiscuous mode\n"); + } + + if (!flags) { adapter->flags &= ~I40EVF_FLAG_PROMISC_ON; adapter->aq_required &= ~I40EVF_FLAG_AQ_RELEASE_PROMISC; dev_info(&adapter->pdev->dev, "Leaving promiscuous mode\n"); From f3d5849756f0da14edabb1835e35aea8b6bc7440 Mon Sep 17 00:00:00 2001 From: Shannon Nelson Date: Tue, 3 May 2016 15:13:11 -0700 Subject: [PATCH 1546/1649] i40e: Implement the API function for aq_set_switch_config Add the support code for calling the AdminQ API call aq_set_switch_config Signed-off-by: Shannon Nelson Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_common.c | 29 +++++++++++++++++++ .../net/ethernet/intel/i40e/i40e_prototype.h | 4 +++ 2 files changed, 33 insertions(+) diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index 4a934e14574d..4739a9ca60d2 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -2282,6 +2282,35 @@ i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw, return status; } +/** + * i40e_aq_set_switch_config + * @hw: pointer to the hardware structure + * @flags: bit flag values to set + * @valid_flags: which bit flags to set + * @cmd_details: pointer to command details structure or NULL + * + * Set switch configuration bits + **/ +enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw, + u16 flags, + u16 valid_flags, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_set_switch_config *scfg = + (struct i40e_aqc_set_switch_config *)&desc.params.raw; + enum i40e_status_code status; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_set_switch_config); + scfg->flags = cpu_to_le16(flags); + scfg->valid_flags = cpu_to_le16(valid_flags); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + /** * i40e_aq_get_firmware_version * @hw: pointer to the hw struct diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h index 4c8977c805df..b76b1587743c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h +++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h @@ -182,6 +182,10 @@ i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw, struct i40e_aqc_get_switch_config_resp *buf, u16 buf_size, u16 *start_seid, struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw, + u16 flags, + u16 valid_flags, + struct i40e_asq_cmd_details *cmd_details); i40e_status i40e_aq_request_resource(struct i40e_hw *hw, enum i40e_aq_resources_ids resource, enum i40e_aq_resource_access_type access, From b5569892309e2e62641076f43aef6919286343b4 Mon Sep 17 00:00:00 2001 From: Anjali Singhai Jain Date: Tue, 3 May 2016 15:13:12 -0700 Subject: [PATCH 1547/1649] i40e: Add vf-true-promisc-support priv flag This patch adds priv-flag knob to configure global true promisc support. With this patch the user can decide the flavor of promiscuous that the VFs will see when promiscuous mode is enabled on the interface. Since this a global setting for the whole device, the priv-flag is exposed only on the first PF of the device. The default is true promisc support is off, which means the promisc mode for the VF will be limited/defport mode. For the PF, we still will be in limited promisc unless in MFP mode irrespective of the flavor picked through this knob. Usage: On PF0 ethtool --show-priv-flags p261p1 Private flags for p261p1: MFP : off LinkPolling : off flow-director-atr : on veb-stats : off hw-atr-eviction : off vf-true-promisc-support: off to enable setting true promisc ethtool --set-priv-flags p261p1 vf-true-promisc-support on At this point if the VF is set to trust and promisc is enabled on the VF through ip link set ... promisc on The VF/VFs will be able to see ALL ingress traffic Change-Id: I8fac4b6eb1af9ca77b5376b79c50bdce5055bd94 Signed-off-by: Anjali Singhai Jain Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e.h | 12 ++-- drivers/net/ethernet/intel/i40e/i40e_common.c | 9 ++- .../net/ethernet/intel/i40e/i40e_ethtool.c | 72 +++++++++++++++++-- drivers/net/ethernet/intel/i40e/i40e_main.c | 30 +++++++- .../net/ethernet/intel/i40e/i40e_prototype.h | 3 +- .../ethernet/intel/i40e/i40e_virtchnl_pf.c | 3 +- 6 files changed, 111 insertions(+), 18 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 01cc732195c0..9c44739da5e2 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -97,11 +97,12 @@ #define I40E_INT_NAME_STR_LEN (IFNAMSIZ + 16) /* Ethtool Private Flags */ -#define I40E_PRIV_FLAGS_NPAR_FLAG BIT(0) -#define I40E_PRIV_FLAGS_LINKPOLL_FLAG BIT(1) -#define I40E_PRIV_FLAGS_FD_ATR BIT(2) -#define I40E_PRIV_FLAGS_VEB_STATS BIT(3) -#define I40E_PRIV_FLAGS_HW_ATR_EVICT BIT(5) +#define I40E_PRIV_FLAGS_MFP_FLAG BIT(0) +#define I40E_PRIV_FLAGS_LINKPOLL_FLAG BIT(1) +#define I40E_PRIV_FLAGS_FD_ATR BIT(2) +#define I40E_PRIV_FLAGS_VEB_STATS BIT(3) +#define I40E_PRIV_FLAGS_HW_ATR_EVICT BIT(4) +#define I40E_PRIV_FLAGS_TRUE_PROMISC_SUPPORT BIT(5) #define I40E_NVM_VERSION_LO_SHIFT 0 #define I40E_NVM_VERSION_LO_MASK (0xff << I40E_NVM_VERSION_LO_SHIFT) @@ -358,6 +359,7 @@ struct i40e_pf { #define I40E_FLAG_STOP_FW_LLDP BIT_ULL(47) #define I40E_FLAG_HAVE_10GBASET_PHY BIT_ULL(48) #define I40E_FLAG_PF_MAC BIT_ULL(50) +#define I40E_FLAG_TRUE_PROMISC_SUPPORT BIT_ULL(51) /* tracks features that get auto disabled by errors */ u64 auto_disable_flags; diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index 4739a9ca60d2..27c6f9ddf684 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -1972,10 +1972,12 @@ aq_add_vsi_exit: * @seid: vsi number * @set: set unicast promiscuous enable/disable * @cmd_details: pointer to command details structure or NULL + * @rx_only_promisc: flag to decide if egress traffic gets mirrored in promisc **/ i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw, u16 seid, bool set, - struct i40e_asq_cmd_details *cmd_details) + struct i40e_asq_cmd_details *cmd_details, + bool rx_only_promisc) { struct i40e_aq_desc desc; struct i40e_aqc_set_vsi_promiscuous_modes *cmd = @@ -1988,8 +1990,9 @@ i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw, if (set) { flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST; - if (((hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver >= 5)) || - (hw->aq.api_maj_ver > 1)) + if (rx_only_promisc && + (((hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver >= 5)) || + (hw->aq.api_maj_ver > 1))) flags |= I40E_AQC_SET_VSI_PROMISC_TX; } diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 6fa05c448268..52b58e37d863 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -230,6 +230,17 @@ static const char i40e_gstrings_test[][ETH_GSTRING_LEN] = { #define I40E_TEST_LEN (sizeof(i40e_gstrings_test) / ETH_GSTRING_LEN) +static const char i40e_priv_flags_strings_gl[][ETH_GSTRING_LEN] = { + "MFP", + "LinkPolling", + "flow-director-atr", + "veb-stats", + "hw-atr-eviction", + "vf-true-promisc-support", +}; + +#define I40E_PRIV_FLAGS_GL_STR_LEN ARRAY_SIZE(i40e_priv_flags_strings_gl) + static const char i40e_priv_flags_strings[][ETH_GSTRING_LEN] = { "NPAR", "LinkPolling", @@ -1158,6 +1169,10 @@ static void i40e_get_drvinfo(struct net_device *netdev, sizeof(drvinfo->fw_version)); strlcpy(drvinfo->bus_info, pci_name(pf->pdev), sizeof(drvinfo->bus_info)); + if (pf->hw.pf_id == 0) + drvinfo->n_priv_flags = I40E_PRIV_FLAGS_GL_STR_LEN; + else + drvinfo->n_priv_flags = I40E_PRIV_FLAGS_STR_LEN; } static void i40e_get_ringparam(struct net_device *netdev, @@ -1385,7 +1400,10 @@ static int i40e_get_sset_count(struct net_device *netdev, int sset) return I40E_VSI_STATS_LEN(netdev); } case ETH_SS_PRIV_FLAGS: - return I40E_PRIV_FLAGS_STR_LEN; + if (pf->hw.pf_id == 0) + return I40E_PRIV_FLAGS_GL_STR_LEN; + else + return I40E_PRIV_FLAGS_STR_LEN; default: return -EOPNOTSUPP; } @@ -1583,10 +1601,18 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset, /* BUG_ON(p - data != I40E_STATS_LEN * ETH_GSTRING_LEN); */ break; case ETH_SS_PRIV_FLAGS: - for (i = 0; i < I40E_PRIV_FLAGS_STR_LEN; i++) { - memcpy(data, i40e_priv_flags_strings[i], - ETH_GSTRING_LEN); - data += ETH_GSTRING_LEN; + if (pf->hw.pf_id == 0) { + for (i = 0; i < I40E_PRIV_FLAGS_GL_STR_LEN; i++) { + memcpy(data, i40e_priv_flags_strings_gl[i], + ETH_GSTRING_LEN); + data += ETH_GSTRING_LEN; + } + } else { + for (i = 0; i < I40E_PRIV_FLAGS_STR_LEN; i++) { + memcpy(data, i40e_priv_flags_strings[i], + ETH_GSTRING_LEN); + data += ETH_GSTRING_LEN; + } } break; default: @@ -2848,8 +2874,6 @@ static u32 i40e_get_priv_flags(struct net_device *dev) struct i40e_pf *pf = vsi->back; u32 ret_flags = 0; - ret_flags |= pf->hw.func_caps.npar_enable ? - I40E_PRIV_FLAGS_NPAR_FLAG : 0; ret_flags |= pf->flags & I40E_FLAG_LINK_POLLING_ENABLED ? I40E_PRIV_FLAGS_LINKPOLL_FLAG : 0; ret_flags |= pf->flags & I40E_FLAG_FD_ATR_ENABLED ? @@ -2858,6 +2882,10 @@ static u32 i40e_get_priv_flags(struct net_device *dev) I40E_PRIV_FLAGS_VEB_STATS : 0; ret_flags |= pf->auto_disable_flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE ? 0 : I40E_PRIV_FLAGS_HW_ATR_EVICT; + if (pf->hw.pf_id == 0) { + ret_flags |= pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT ? + I40E_PRIV_FLAGS_TRUE_PROMISC_SUPPORT : 0; + } return ret_flags; } @@ -2872,7 +2900,10 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags) struct i40e_netdev_priv *np = netdev_priv(dev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; + u16 sw_flags = 0, valid_flags = 0; bool reset_required = false; + bool promisc_change = false; + int ret; /* NOTE: MFP is not settable */ @@ -2902,6 +2933,33 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags) reset_required = true; } + if (pf->hw.pf_id == 0) { + if ((flags & I40E_PRIV_FLAGS_TRUE_PROMISC_SUPPORT) && + !(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT)) { + pf->flags |= I40E_FLAG_TRUE_PROMISC_SUPPORT; + promisc_change = true; + } else if (!(flags & I40E_PRIV_FLAGS_TRUE_PROMISC_SUPPORT) && + (pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT)) { + pf->flags &= ~I40E_FLAG_TRUE_PROMISC_SUPPORT; + promisc_change = true; + } + } + if (promisc_change) { + if (!(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT)) + sw_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC; + valid_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC; + ret = i40e_aq_set_switch_config(&pf->hw, sw_flags, valid_flags, + NULL); + if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) { + dev_info(&pf->pdev->dev, + "couldn't set switch config bits, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, + pf->hw.aq.asq_last_status)); + /* not a fatal problem, just keep going */ + } + } + if ((flags & I40E_PRIV_FLAGS_HW_ATR_EVICT) && (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE)) pf->auto_disable_flags &= ~I40E_FLAG_HW_ATR_EVICT_CAPABLE; diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 46a3a674c635..f8038d09c1c2 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -2128,7 +2128,8 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) aq_ret = i40e_aq_set_vsi_unicast_promiscuous( &vsi->back->hw, vsi->seid, - cur_promisc, NULL); + cur_promisc, NULL, + true); if (aq_ret) { retval = i40e_aq_rc_to_posix(aq_ret, @@ -10407,6 +10408,7 @@ int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig) **/ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit) { + u16 flags = 0; int ret; /* find out what's out there already */ @@ -10420,6 +10422,32 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit) } i40e_pf_reset_stats(pf); + /* set the switch config bit for the whole device to + * support limited promisc or true promisc + * when user requests promisc. The default is limited + * promisc. + */ + + if ((pf->hw.pf_id == 0) && + !(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT)) + flags = I40E_AQ_SET_SWITCH_CFG_PROMISC; + + if (pf->hw.pf_id == 0) { + u16 valid_flags; + + valid_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC; + ret = i40e_aq_set_switch_config(&pf->hw, flags, valid_flags, + NULL); + if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) { + dev_info(&pf->pdev->dev, + "couldn't set switch config bits, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, + pf->hw.aq.asq_last_status)); + /* not a fatal problem, just keep going */ + } + } + /* first time setup */ if (pf->lan_vsi == I40E_NO_VSI || reinit) { struct i40e_vsi *vsi = NULL; diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h index b76b1587743c..80403c6ee7f0 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h +++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h @@ -130,7 +130,8 @@ i40e_status i40e_aq_set_vsi_broadcast(struct i40e_hw *hw, u16 vsi_id, bool set_filter, struct i40e_asq_cmd_details *cmd_details); i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw, - u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details); + u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details, + bool rx_only_promisc); i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw, u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw, diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index a9b04e72df82..6430933f99b3 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -1562,7 +1562,8 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, } } else { aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid, - allmulti, NULL); + allmulti, NULL, + true); aq_err = pf->hw.aq.asq_last_status; if (aq_ret) dev_err(&pf->pdev->dev, From eee4172abcdcc610e40eb9513d19ff16c7820270 Mon Sep 17 00:00:00 2001 From: Mitch Williams Date: Tue, 3 May 2016 15:13:13 -0700 Subject: [PATCH 1548/1649] i40e: lie to the VF If an untrusted VF attempts to configure promiscuous mode, log a message pointing out its naughty behavior. But then, instead of returning an error to the offender, just lie to it and say everything's OK. It will continue on its way, thinking it's in promiscuous mode, but receiving no packets except its own. Change-ID: I63369215b1720f3c531eedfc06af86ff8c0e3dc8 Signed-off-by: Mitch Williams Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 6430933f99b3..94734290907c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -1474,14 +1474,18 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, vsi = i40e_find_vsi_from_id(pf, info->vsi_id); if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || - !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) || !i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) { - dev_err(&pf->pdev->dev, - "VF %d doesn't meet requirements to enter promiscuous mode\n", - vf->vf_id); aq_ret = I40E_ERR_PARAM; goto error_param; } + if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) { + dev_err(&pf->pdev->dev, + "Unprivileged VF %d is attempting to configure promiscuous mode\n", + vf->vf_id); + /* Lie to the VF on purpose. */ + aq_ret = 0; + goto error_param; + } /* Multicast promiscuous handling*/ if (info->flags & I40E_FLAG_VF_MULTICAST_PROMISC) allmulti = true; From 06566e5dd4e53f57fc3daa12fb8b5252772d70de Mon Sep 17 00:00:00 2001 From: Catherine Sullivan Date: Tue, 3 May 2016 15:13:14 -0700 Subject: [PATCH 1549/1649] i40e: Refactor ethtool get_settings Previously we were only looking at the FW supported PHY types if link was down, because we want to be more specific when link is up. This refactor changes this. When link is down, we still rely on the FW supported PHY types, but when link is up, we select the possible supported link modes from what we know about the current PHY type, and AND that with the FW supported PHY types. Change-ID: Ice5dad83f2a17932b0b8b59f07439696ad6aa013 Signed-off-by: Catherine Sullivan Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- .../net/ethernet/intel/i40e/i40e_ethtool.c | 258 +++++++++--------- 1 file changed, 135 insertions(+), 123 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 52b58e37d863..5e8d84ff7d5f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -261,6 +261,110 @@ static void i40e_partition_setting_complaint(struct i40e_pf *pf) "The link settings are allowed to be changed only from the first partition of a given port. Please switch to the first partition in order to change the setting.\n"); } +/** + * i40e_phy_type_to_ethtool - convert the phy_types to ethtool link modes + * @phy_types: PHY types to convert + * @supported: pointer to the ethtool supported variable to fill in + * @advertising: pointer to the ethtool advertising variable to fill in + * + **/ +static void i40e_phy_type_to_ethtool(struct i40e_pf *pf, u32 *supported, + u32 *advertising) +{ + enum i40e_aq_capabilities_phy_type phy_types = pf->hw.phy.phy_types; + + *supported = 0x0; + *advertising = 0x0; + + if (phy_types & I40E_CAP_PHY_TYPE_SGMII) { + *supported |= SUPPORTED_Autoneg | + SUPPORTED_1000baseT_Full; + *advertising |= ADVERTISED_Autoneg | + ADVERTISED_1000baseT_Full; + if (pf->flags & I40E_FLAG_100M_SGMII_CAPABLE) { + *supported |= SUPPORTED_100baseT_Full; + *advertising |= ADVERTISED_100baseT_Full; + } + } + if (phy_types & I40E_CAP_PHY_TYPE_XAUI || + phy_types & I40E_CAP_PHY_TYPE_XFI || + phy_types & I40E_CAP_PHY_TYPE_SFI || + phy_types & I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU || + phy_types & I40E_CAP_PHY_TYPE_10GBASE_AOC) + *supported |= SUPPORTED_10000baseT_Full; + if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1_CU || + phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1 || + phy_types & I40E_CAP_PHY_TYPE_10GBASE_T || + phy_types & I40E_CAP_PHY_TYPE_10GBASE_SR || + phy_types & I40E_CAP_PHY_TYPE_10GBASE_LR) { + *supported |= SUPPORTED_Autoneg | + SUPPORTED_10000baseT_Full; + *advertising |= ADVERTISED_Autoneg | + ADVERTISED_10000baseT_Full; + } + if (phy_types & I40E_CAP_PHY_TYPE_XLAUI || + phy_types & I40E_CAP_PHY_TYPE_XLPPI || + phy_types & I40E_CAP_PHY_TYPE_40GBASE_AOC) + *supported |= SUPPORTED_40000baseCR4_Full; + if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_CR4_CU || + phy_types & I40E_CAP_PHY_TYPE_40GBASE_CR4) { + *supported |= SUPPORTED_Autoneg | + SUPPORTED_40000baseCR4_Full; + *advertising |= ADVERTISED_Autoneg | + ADVERTISED_40000baseCR4_Full; + } + if ((phy_types & I40E_CAP_PHY_TYPE_100BASE_TX) && + !(phy_types & I40E_CAP_PHY_TYPE_1000BASE_T)) { + *supported |= SUPPORTED_Autoneg | + SUPPORTED_100baseT_Full; + *advertising |= ADVERTISED_Autoneg | + ADVERTISED_100baseT_Full; + } + if (phy_types & I40E_CAP_PHY_TYPE_1000BASE_T || + phy_types & I40E_CAP_PHY_TYPE_1000BASE_SX || + phy_types & I40E_CAP_PHY_TYPE_1000BASE_LX || + phy_types & I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL) { + *supported |= SUPPORTED_Autoneg | + SUPPORTED_1000baseT_Full; + *advertising |= ADVERTISED_Autoneg | + ADVERTISED_1000baseT_Full; + } + if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_SR4) + *supported |= SUPPORTED_40000baseSR4_Full; + if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_LR4) + *supported |= SUPPORTED_40000baseLR4_Full; + if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_KR4) { + *supported |= SUPPORTED_40000baseKR4_Full | + SUPPORTED_Autoneg; + *advertising |= ADVERTISED_40000baseKR4_Full | + ADVERTISED_Autoneg; + } + if (phy_types & I40E_CAP_PHY_TYPE_20GBASE_KR2) { + *supported |= SUPPORTED_20000baseKR2_Full | + SUPPORTED_Autoneg; + *advertising |= ADVERTISED_20000baseKR2_Full | + ADVERTISED_Autoneg; + } + if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_KR) { + *supported |= SUPPORTED_10000baseKR_Full | + SUPPORTED_Autoneg; + *advertising |= ADVERTISED_10000baseKR_Full | + ADVERTISED_Autoneg; + } + if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_KX4) { + *supported |= SUPPORTED_10000baseKX4_Full | + SUPPORTED_Autoneg; + *advertising |= ADVERTISED_10000baseKX4_Full | + ADVERTISED_Autoneg; + } + if (phy_types & I40E_CAP_PHY_TYPE_1000BASE_KX) { + *supported |= SUPPORTED_1000baseKX_Full | + SUPPORTED_Autoneg; + *advertising |= ADVERTISED_1000baseKX_Full | + ADVERTISED_Autoneg; + } +} + /** * i40e_get_settings_link_up - Get the Link settings for when link is up * @hw: hw structure @@ -275,6 +379,8 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw, { struct i40e_link_status *hw_link_info = &hw->phy.link_info; u32 link_speed = hw_link_info->link_speed; + u32 e_advertising = 0x0; + u32 e_supported = 0x0; /* Initialize supported and advertised settings based on phy settings */ switch (hw_link_info->phy_type) { @@ -315,21 +421,18 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw, break; case I40E_PHY_TYPE_10GBASE_T: case I40E_PHY_TYPE_1000BASE_T: + case I40E_PHY_TYPE_100BASE_TX: ecmd->supported = SUPPORTED_Autoneg | SUPPORTED_10000baseT_Full | - SUPPORTED_1000baseT_Full; + SUPPORTED_1000baseT_Full | + SUPPORTED_100baseT_Full; ecmd->advertising = ADVERTISED_Autoneg; if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) ecmd->advertising |= ADVERTISED_10000baseT_Full; if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) ecmd->advertising |= ADVERTISED_1000baseT_Full; - /* adding 100baseT support for 10GBASET_PHY */ - if (pf->flags & I40E_FLAG_HAVE_10GBASET_PHY) { - ecmd->supported |= SUPPORTED_100baseT_Full; - ecmd->advertising |= ADVERTISED_100baseT_Full | - ADVERTISED_1000baseT_Full | - ADVERTISED_10000baseT_Full; - } + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB) + ecmd->advertising |= ADVERTISED_100baseT_Full; break; case I40E_PHY_TYPE_1000BASE_T_OPTICAL: ecmd->supported = SUPPORTED_Autoneg | @@ -337,21 +440,6 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw, ecmd->advertising = ADVERTISED_Autoneg | ADVERTISED_1000baseT_Full; break; - case I40E_PHY_TYPE_100BASE_TX: - ecmd->supported = SUPPORTED_Autoneg | - SUPPORTED_100baseT_Full; - if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB) - ecmd->advertising |= ADVERTISED_100baseT_Full; - /* firmware detects 10G phy as 100M phy at 100M speed */ - if (pf->flags & I40E_FLAG_HAVE_10GBASET_PHY) { - ecmd->supported |= SUPPORTED_10000baseT_Full | - SUPPORTED_1000baseT_Full; - ecmd->advertising |= ADVERTISED_Autoneg | - ADVERTISED_100baseT_Full | - ADVERTISED_1000baseT_Full | - ADVERTISED_10000baseT_Full; - } - break; case I40E_PHY_TYPE_10GBASE_CR1_CU: case I40E_PHY_TYPE_10GBASE_CR1: ecmd->supported = SUPPORTED_Autoneg | @@ -378,14 +466,23 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw, ecmd->advertising |= ADVERTISED_100baseT_Full; } break; - /* Backplane is set based on supported phy types in get_settings - * so don't set anything here but don't warn either - */ case I40E_PHY_TYPE_40GBASE_KR4: case I40E_PHY_TYPE_20GBASE_KR2: case I40E_PHY_TYPE_10GBASE_KR: case I40E_PHY_TYPE_10GBASE_KX4: case I40E_PHY_TYPE_1000BASE_KX: + ecmd->supported |= SUPPORTED_40000baseKR4_Full | + SUPPORTED_20000baseKR2_Full | + SUPPORTED_10000baseKR_Full | + SUPPORTED_10000baseKX4_Full | + SUPPORTED_1000baseKX_Full | + SUPPORTED_Autoneg; + ecmd->advertising |= ADVERTISED_40000baseKR4_Full | + ADVERTISED_20000baseKR2_Full | + ADVERTISED_10000baseKR_Full | + ADVERTISED_10000baseKX4_Full | + ADVERTISED_1000baseKX_Full | + ADVERTISED_Autoneg; break; default: /* if we got here and link is up something bad is afoot */ @@ -393,6 +490,16 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw, hw_link_info->phy_type); } + /* Now that we've worked out everything that could be supported by the + * current PHY type, get what is supported by the NVM and them to + * get what is truly supported + */ + i40e_phy_type_to_ethtool(pf, &e_supported, + &e_advertising); + + ecmd->supported = ecmd->supported & e_supported; + ecmd->advertising = ecmd->advertising & e_advertising; + /* Set speed and duplex */ switch (link_speed) { case I40E_LINK_SPEED_40GB: @@ -427,74 +534,11 @@ static void i40e_get_settings_link_down(struct i40e_hw *hw, struct ethtool_cmd *ecmd, struct i40e_pf *pf) { - enum i40e_aq_capabilities_phy_type phy_types = hw->phy.phy_types; - /* link is down and the driver needs to fall back on * supported phy types to figure out what info to display */ - ecmd->supported = 0x0; - ecmd->advertising = 0x0; - if (phy_types & I40E_CAP_PHY_TYPE_SGMII) { - ecmd->supported |= SUPPORTED_Autoneg | - SUPPORTED_1000baseT_Full; - ecmd->advertising |= ADVERTISED_Autoneg | - ADVERTISED_1000baseT_Full; - if (pf->hw.mac.type == I40E_MAC_X722) { - ecmd->supported |= SUPPORTED_100baseT_Full; - ecmd->advertising |= ADVERTISED_100baseT_Full; - if (pf->flags & I40E_FLAG_100M_SGMII_CAPABLE) { - ecmd->supported |= SUPPORTED_100baseT_Full; - ecmd->advertising |= ADVERTISED_100baseT_Full; - } - } - } - if (phy_types & I40E_CAP_PHY_TYPE_XAUI || - phy_types & I40E_CAP_PHY_TYPE_XFI || - phy_types & I40E_CAP_PHY_TYPE_SFI || - phy_types & I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU || - phy_types & I40E_CAP_PHY_TYPE_10GBASE_AOC) - ecmd->supported |= SUPPORTED_10000baseT_Full; - if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1_CU || - phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1 || - phy_types & I40E_CAP_PHY_TYPE_10GBASE_T || - phy_types & I40E_CAP_PHY_TYPE_10GBASE_SR || - phy_types & I40E_CAP_PHY_TYPE_10GBASE_LR) { - ecmd->supported |= SUPPORTED_Autoneg | - SUPPORTED_10000baseT_Full; - ecmd->advertising |= ADVERTISED_Autoneg | - ADVERTISED_10000baseT_Full; - } - if (phy_types & I40E_CAP_PHY_TYPE_XLAUI || - phy_types & I40E_CAP_PHY_TYPE_XLPPI || - phy_types & I40E_CAP_PHY_TYPE_40GBASE_AOC) - ecmd->supported |= SUPPORTED_40000baseCR4_Full; - if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_CR4_CU || - phy_types & I40E_CAP_PHY_TYPE_40GBASE_CR4) { - ecmd->supported |= SUPPORTED_Autoneg | - SUPPORTED_40000baseCR4_Full; - ecmd->advertising |= ADVERTISED_Autoneg | - ADVERTISED_40000baseCR4_Full; - } - if ((phy_types & I40E_CAP_PHY_TYPE_100BASE_TX) && - !(phy_types & I40E_CAP_PHY_TYPE_1000BASE_T)) { - ecmd->supported |= SUPPORTED_Autoneg | - SUPPORTED_100baseT_Full; - ecmd->advertising |= ADVERTISED_Autoneg | - ADVERTISED_100baseT_Full; - } - if (phy_types & I40E_CAP_PHY_TYPE_1000BASE_T || - phy_types & I40E_CAP_PHY_TYPE_1000BASE_SX || - phy_types & I40E_CAP_PHY_TYPE_1000BASE_LX || - phy_types & I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL) { - ecmd->supported |= SUPPORTED_Autoneg | - SUPPORTED_1000baseT_Full; - ecmd->advertising |= ADVERTISED_Autoneg | - ADVERTISED_1000baseT_Full; - } - if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_SR4) - ecmd->supported |= SUPPORTED_40000baseSR4_Full; - if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_LR4) - ecmd->supported |= SUPPORTED_40000baseLR4_Full; + i40e_phy_type_to_ethtool(pf, &ecmd->supported, + &ecmd->advertising); /* With no link speed and duplex are unknown */ ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN); @@ -523,38 +567,6 @@ static int i40e_get_settings(struct net_device *netdev, i40e_get_settings_link_down(hw, ecmd, pf); /* Now set the settings that don't rely on link being up/down */ - - /* For backplane, supported and advertised are only reliant on the - * phy types the NVM specifies are supported. - */ - if (hw->device_id == I40E_DEV_ID_KX_B || - hw->device_id == I40E_DEV_ID_KX_C || - hw->device_id == I40E_DEV_ID_20G_KR2 || - hw->device_id == I40E_DEV_ID_20G_KR2_A) { - ecmd->supported = SUPPORTED_Autoneg; - ecmd->advertising = ADVERTISED_Autoneg; - if (hw->phy.phy_types & I40E_CAP_PHY_TYPE_40GBASE_KR4) { - ecmd->supported |= SUPPORTED_40000baseKR4_Full; - ecmd->advertising |= ADVERTISED_40000baseKR4_Full; - } - if (hw->phy.phy_types & I40E_CAP_PHY_TYPE_20GBASE_KR2) { - ecmd->supported |= SUPPORTED_20000baseKR2_Full; - ecmd->advertising |= ADVERTISED_20000baseKR2_Full; - } - if (hw->phy.phy_types & I40E_CAP_PHY_TYPE_10GBASE_KR) { - ecmd->supported |= SUPPORTED_10000baseKR_Full; - ecmd->advertising |= ADVERTISED_10000baseKR_Full; - } - if (hw->phy.phy_types & I40E_CAP_PHY_TYPE_10GBASE_KX4) { - ecmd->supported |= SUPPORTED_10000baseKX4_Full; - ecmd->advertising |= ADVERTISED_10000baseKX4_Full; - } - if (hw->phy.phy_types & I40E_CAP_PHY_TYPE_1000BASE_KX) { - ecmd->supported |= SUPPORTED_1000baseKX_Full; - ecmd->advertising |= ADVERTISED_1000baseKX_Full; - } - } - /* Set autoneg settings */ ecmd->autoneg = ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ? AUTONEG_ENABLE : AUTONEG_DISABLE); From c420815d12b744ad8cf9312af13af794bad48216 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Tue, 3 May 2016 15:13:15 -0700 Subject: [PATCH 1550/1649] i40e: change Rx hang message into a WARN_ONCE Use WARN_ONCE in order to highlight the issue, but don't display a warning every time. The user should be able to see the ethtool counter we created if necessary to see how often it is occurring. Change-ID: I40c4ea159819b64a7d33b7f5716749089791533a Signed-off-by: Jacob Keller Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_ptp.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c index a1b878abd5b0..ed39cbad24bd 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c @@ -289,9 +289,7 @@ void i40e_ptp_rx_hang(struct i40e_vsi *vsi) rd32(hw, I40E_PRTTSYN_RXTIME_H(3)); pf->last_rx_ptp_check = jiffies; pf->rx_hwtstamp_cleared++; - dev_warn(&vsi->back->pdev->dev, - "%s: clearing Rx timestamp hang\n", - __func__); + WARN_ONCE(1, "Detected Rx timestamp register hang\n"); } } From 73df8c9e3e3d1d5d38d48ba4bfb0f709f68c8e13 Mon Sep 17 00:00:00 2001 From: Akeem G Abodunrin Date: Tue, 3 May 2016 15:13:16 -0700 Subject: [PATCH 1551/1649] i40e: Correct UDP packet header for non_tunnel-ipv6 This patch corrects Rx ptype payload layer for non_tunneled ipv6. It should be layer 4 for UDP, instead of layer 3. Change-ID: I9382e4458ab3c4e58f6d2e9f195d5d4ee513805e Signed-off-by: Akeem G Abodunrin Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_common.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index 27c6f9ddf684..422b41d61c9a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -696,7 +696,7 @@ struct i40e_rx_ptype_decoded i40e_ptype_lookup[] = { /* Non Tunneled IPv6 */ I40E_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3), I40E_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3), - I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY3), + I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4), I40E_PTT_UNUSED_ENTRY(91), I40E_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4), I40E_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4), From 4b28cdba4859c2d1eb77144fe2383afb8daea1ca Mon Sep 17 00:00:00 2001 From: Ashish Shah Date: Tue, 3 May 2016 15:13:17 -0700 Subject: [PATCH 1552/1649] i40e: set context to use VSI RSS LUT for SR-IOV For the SR-IOV VSIs, when the queue filtering section is valid, the RSS LUT needs to be set to use the VSI specific lookup table (otherwise it will use the PF RSS LUT table). Change-ID: Ia9377cc818078238a75c3bdeade1b593a91b3480 Signed-off-by: Ashish Shah Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_main.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index f8038d09c1c2..a9812466c57d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -9362,7 +9362,8 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID); ctxt.info.queueing_opt_flags |= - I40E_AQ_VSI_QUE_OPT_TCP_ENA; + (I40E_AQ_VSI_QUE_OPT_TCP_ENA | + I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI); } ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); From a876c3ba59a69a1d4ba240ee7cb50acf31116647 Mon Sep 17 00:00:00 2001 From: Mitch Williams Date: Tue, 3 May 2016 15:13:18 -0700 Subject: [PATCH 1553/1649] i40e/i40evf: properly report Rx packet hash This logic is inverted. If the RXHASH flag is set, then we should go ahead and call skb_set_hash. Change-ID: Ib2e30356dced1d3e939c8061ab6ad5bd94197e7c Signed-off-by: Mitch Williams Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_txrx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index b0edffe88492..99a524db5560 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -1394,7 +1394,7 @@ static inline void i40e_rx_hash(struct i40e_ring *ring, cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH << I40E_RX_DESC_STATUS_FLTSTAT_SHIFT); - if (ring->netdev->features & NETIF_F_RXHASH) + if (!(ring->netdev->features & NETIF_F_RXHASH)) return; if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) { From d96a83def2f70ea7b26268efdd44eb9f1e400171 Mon Sep 17 00:00:00 2001 From: Mitch Williams Date: Tue, 3 May 2016 15:13:19 -0700 Subject: [PATCH 1554/1649] i40e: don't add broadcast filter for VFs Now that all VSIs are configured to receive broadcasts as default, we don't need to add a filter. This eliminates an annoying but harmless error message each time VFs are created or reset. Change-ID: I4cd6339684df45b0d2722133eeb84c14fa93ea19 Signed-off-by: Mitch Williams Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c | 8 -------- 1 file changed, 8 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 94734290907c..1fcafcfa8f14 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -665,8 +665,6 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type) goto error_alloc_vsi_res; } if (type == I40E_VSI_SRIOV) { - u8 brdcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; - vf->lan_vsi_idx = vsi->idx; vf->lan_vsi_id = vsi->id; /* If the port VLAN has been configured and then the @@ -688,12 +686,6 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type) "Could not add MAC filter %pM for VF %d\n", vf->default_lan_addr.addr, vf->vf_id); } - f = i40e_add_filter(vsi, brdcast, - vf->port_vlan_id ? vf->port_vlan_id : -1, - true, false); - if (!f) - dev_info(&pf->pdev->dev, - "Could not allocate VF broadcast filter\n"); spin_unlock_bh(&vsi->mac_filter_list_lock); } From c74dff1aaaf2ee51368a0cbf2f903cc3d8690abc Mon Sep 17 00:00:00 2001 From: Bimmy Pujari Date: Tue, 3 May 2016 15:13:20 -0700 Subject: [PATCH 1555/1649] i40e: Bump version from 1.5.10 to 1.5.16 Signed-off-by: Bimmy Pujari Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index a9812466c57d..1cd0ebf7520a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -46,7 +46,7 @@ static const char i40e_driver_string[] = #define DRV_VERSION_MAJOR 1 #define DRV_VERSION_MINOR 5 -#define DRV_VERSION_BUILD 10 +#define DRV_VERSION_BUILD 16 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ __stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_BUILD) DRV_KERN From 1c306f7f62a38ee5f05f0ee994dfe82d654cf47c Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Thu, 5 May 2016 16:18:02 +0300 Subject: [PATCH 1556/1649] i40e: fix an uninitialized variable bug We removed this initialization but it is required. Let's put it back. Fixes: 895106a577c4 ('i40e: trivial fixes') Signed-off-by: Dan Carpenter Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_hmc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_hmc.c index 5ebe12d56ebf..a7c7b1d9b7c8 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_hmc.c +++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.c @@ -49,7 +49,7 @@ i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw, struct i40e_hmc_sd_entry *sd_entry; bool dma_mem_alloc_done = false; struct i40e_dma_mem mem; - i40e_status ret_code; + i40e_status ret_code = I40E_SUCCESS; u64 alloc_len; if (NULL == hmc_info->sd_table.sd_entry) { From 8fbb89c6fbfd1cda9ae413cd1e042837a8edbdeb Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Sat, 14 May 2016 12:49:54 -0700 Subject: [PATCH 1557/1649] net: switchdev: Drop EXPERIMENTAL from description Switchdev has been around for quite a while now, putting "EXPERIMENTAL" in the description is no longer accurate, drop it. Signed-off-by: Florian Fainelli Acked-by: Jiri Pirko Signed-off-by: David S. Miller --- net/switchdev/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/switchdev/Kconfig b/net/switchdev/Kconfig index 86a47e17cfaf..651fa201a570 100644 --- a/net/switchdev/Kconfig +++ b/net/switchdev/Kconfig @@ -3,7 +3,7 @@ # config NET_SWITCHDEV - bool "Switch (and switch-ish) device support (EXPERIMENTAL)" + bool "Switch (and switch-ish) device support" depends on INET ---help--- This module provides glue between core networking code and device From 18d6e4e2d800cbd44a7d7d215a49f99c6508e4a5 Mon Sep 17 00:00:00 2001 From: Satish Baddipadige Date: Sun, 15 May 2016 03:04:43 -0400 Subject: [PATCH 1558/1649] bnxt_en: Fix invalid max channel parameter in ethtool -l. When there is only 1 MSI-X vector or in INTA mode, tx and rx pre-set max channel parameters are shown incorrectly in ethtool -l. With only 1 vector, bnxt_get_max_rings() will return -ENOMEM. bnxt_get_channels should check this return value, and set max_rx/max_tx to 0 if it is non-zero. Signed-off-by: Satish Baddipadige Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index d6e41f237f2c..28171f96ebbe 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -327,7 +327,11 @@ static void bnxt_get_channels(struct net_device *dev, bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, true); channel->max_combined = max_rx_rings; - bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, false); + if (bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, false)) { + max_rx_rings = 0; + max_tx_rings = 0; + } + tcs = netdev_get_num_tc(dev); if (tcs > 1) max_tx_rings /= tcs; From 42ee18fe4ca2a12b8370bb1c53fa6b9f9300c70c Mon Sep 17 00:00:00 2001 From: Ajit Khaparde Date: Sun, 15 May 2016 03:04:44 -0400 Subject: [PATCH 1559/1649] bnxt_en: Add Support for ETHTOOL_GMODULEINFO and ETHTOOL_GMODULEEEPRO Add support to fetch the SFP EEPROM settings from the firmware and display it via the ethtool -m command. We support SFP+ and QSFP modules. v2: Fixed a bug in bnxt_get_module_eeprom() found by Ben Hutchings. Signed-off-by: Ajit Khaparde Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 1 + drivers/net/ethernet/broadcom/bnxt/bnxt.h | 11 ++ .../net/ethernet/broadcom/bnxt/bnxt_ethtool.c | 121 ++++++++++++++++++ drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h | 34 +++++ 4 files changed, 167 insertions(+) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index db84498ddbd7..448ab296ddb2 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -4734,6 +4734,7 @@ static int bnxt_update_link(struct bnxt *bp, bool chng_link_state) link_info->transceiver = resp->xcvr_pkg_type; link_info->phy_addr = resp->eee_config_phy_addr & PORT_PHY_QCFG_RESP_PHY_ADDR_MASK; + link_info->module_status = resp->module_status; if (bp->flags & BNXT_FLAG_EEE_CAP) { struct ethtool_eee *eee = &bp->eee; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 79ea558eaf64..fe81e64b390b 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -831,6 +831,7 @@ struct bnxt_link_info { u16 lp_auto_link_speeds; u16 force_link_speed; u32 preemphasis; + u8 module_status; /* copy of requested setting from ethtool cmd */ u8 autoneg; @@ -1123,6 +1124,16 @@ static inline void bnxt_disable_poll(struct bnxt_napi *bnapi) #endif +#define I2C_DEV_ADDR_A0 0xa0 +#define I2C_DEV_ADDR_A2 0xa2 +#define SFP_EEPROM_SFF_8472_COMP_ADDR 0x5e +#define SFP_EEPROM_SFF_8472_COMP_SIZE 1 +#define SFF_MODULE_ID_SFP 0x3 +#define SFF_MODULE_ID_QSFP 0xc +#define SFF_MODULE_ID_QSFP_PLUS 0xd +#define SFF_MODULE_ID_QSFP28 0x11 +#define BNXT_MAX_PHY_I2C_RESP_SIZE 64 + void bnxt_set_ring_params(struct bnxt *); void bnxt_hwrm_cmd_hdr_init(struct bnxt *, void *, u16, u16, u16); int _hwrm_send_message(struct bnxt *, void *, u32, int); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 28171f96ebbe..a38cb047b540 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -1498,6 +1498,125 @@ static int bnxt_get_eee(struct net_device *dev, struct ethtool_eee *edata) return 0; } +static int bnxt_read_sfp_module_eeprom_info(struct bnxt *bp, u16 i2c_addr, + u16 page_number, u16 start_addr, + u16 data_length, u8 *buf) +{ + struct hwrm_port_phy_i2c_read_input req = {0}; + struct hwrm_port_phy_i2c_read_output *output = bp->hwrm_cmd_resp_addr; + int rc, byte_offset = 0; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_I2C_READ, -1, -1); + req.i2c_slave_addr = i2c_addr; + req.page_number = cpu_to_le16(page_number); + req.port_id = cpu_to_le16(bp->pf.port_id); + do { + u16 xfer_size; + + xfer_size = min_t(u16, data_length, BNXT_MAX_PHY_I2C_RESP_SIZE); + data_length -= xfer_size; + req.page_offset = cpu_to_le16(start_addr + byte_offset); + req.data_length = xfer_size; + req.enables = cpu_to_le32(start_addr + byte_offset ? + PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET : 0); + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, &req, sizeof(req), + HWRM_CMD_TIMEOUT); + if (!rc) + memcpy(buf + byte_offset, output->data, xfer_size); + mutex_unlock(&bp->hwrm_cmd_lock); + byte_offset += xfer_size; + } while (!rc && data_length > 0); + + return rc; +} + +static int bnxt_get_module_info(struct net_device *dev, + struct ethtool_modinfo *modinfo) +{ + struct bnxt *bp = netdev_priv(dev); + struct hwrm_port_phy_i2c_read_input req = {0}; + struct hwrm_port_phy_i2c_read_output *output = bp->hwrm_cmd_resp_addr; + int rc; + + /* No point in going further if phy status indicates + * module is not inserted or if it is powered down or + * if it is of type 10GBase-T + */ + if (bp->link_info.module_status > + PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG) + return -EOPNOTSUPP; + + /* This feature is not supported in older firmware versions */ + if (bp->hwrm_spec_code < 0x10202) + return -EOPNOTSUPP; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_I2C_READ, -1, -1); + req.i2c_slave_addr = I2C_DEV_ADDR_A0; + req.page_number = 0; + req.page_offset = cpu_to_le16(SFP_EEPROM_SFF_8472_COMP_ADDR); + req.data_length = SFP_EEPROM_SFF_8472_COMP_SIZE; + req.port_id = cpu_to_le16(bp->pf.port_id); + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (!rc) { + u32 module_id = le32_to_cpu(output->data[0]); + + switch (module_id) { + case SFF_MODULE_ID_SFP: + modinfo->type = ETH_MODULE_SFF_8472; + modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; + break; + case SFF_MODULE_ID_QSFP: + case SFF_MODULE_ID_QSFP_PLUS: + modinfo->type = ETH_MODULE_SFF_8436; + modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; + break; + case SFF_MODULE_ID_QSFP28: + modinfo->type = ETH_MODULE_SFF_8636; + modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN; + break; + default: + rc = -EOPNOTSUPP; + break; + } + } + mutex_unlock(&bp->hwrm_cmd_lock); + return rc; +} + +static int bnxt_get_module_eeprom(struct net_device *dev, + struct ethtool_eeprom *eeprom, + u8 *data) +{ + struct bnxt *bp = netdev_priv(dev); + u16 start = eeprom->offset, length = eeprom->len; + int rc; + + memset(data, 0, eeprom->len); + + /* Read A0 portion of the EEPROM */ + if (start < ETH_MODULE_SFF_8436_LEN) { + if (start + eeprom->len > ETH_MODULE_SFF_8436_LEN) + length = ETH_MODULE_SFF_8436_LEN - start; + rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0, + start, length, data); + if (rc) + return rc; + start += length; + data += length; + length = eeprom->len - length; + } + + /* Read A2 portion of the EEPROM */ + if (length) { + start -= ETH_MODULE_SFF_8436_LEN; + bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A2, 1, start, + length, data); + } + return rc; +} + const struct ethtool_ops bnxt_ethtool_ops = { .get_settings = bnxt_get_settings, .set_settings = bnxt_set_settings, @@ -1528,4 +1647,6 @@ const struct ethtool_ops bnxt_ethtool_ops = { .get_link = bnxt_get_link, .get_eee = bnxt_get_eee, .set_eee = bnxt_set_eee, + .get_module_info = bnxt_get_module_info, + .get_module_eeprom = bnxt_get_module_eeprom, }; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h index 80f95560086d..05e3c49a7677 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h @@ -2093,6 +2093,40 @@ struct hwrm_port_phy_qcaps_output { #define PORT_PHY_QCAPS_RESP_VALID_SFT 24 }; +/* hwrm_port_phy_i2c_read */ +/* Input (40 bytes) */ +struct hwrm_port_phy_i2c_read_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + __le32 enables; + #define PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET 0x1UL + __le16 port_id; + u8 i2c_slave_addr; + u8 unused_0; + __le16 page_number; + __le16 page_offset; + u8 data_length; + u8 unused_1[7]; +}; + +/* Output (80 bytes) */ +struct hwrm_port_phy_i2c_read_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 data[16]; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + /* Input (24 bytes) */ struct hwrm_queue_qportcfg_input { __le16 req_type; From 90c4f788f6c08aaa52edbb47a817403376523375 Mon Sep 17 00:00:00 2001 From: Ajit Khaparde Date: Sun, 15 May 2016 03:04:45 -0400 Subject: [PATCH 1560/1649] bnxt_en: Report PCIe link speed and width during driver load Add code to log a message during driver load indicating PCIe link speed and width. The log message will look like this: bnxt_en 0000:86:00.0 eth0: PCIe: Speed 8.0GT/s Width x8 Signed-off-by: Ajit Khaparde Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 448ab296ddb2..1171ad14d323 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -6261,6 +6261,22 @@ static int bnxt_set_dflt_rings(struct bnxt *bp) return rc; } +static void bnxt_parse_log_pcie_link(struct bnxt *bp) +{ + enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN; + enum pci_bus_speed speed = PCI_SPEED_UNKNOWN; + + if (pcie_get_minimum_link(bp->pdev, &speed, &width) || + speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN) + netdev_info(bp->dev, "Failed to determine PCIe Link Info\n"); + else + netdev_info(bp->dev, "PCIe: Speed %s Width x%d\n", + speed == PCIE_SPEED_2_5GT ? "2.5GT/s" : + speed == PCIE_SPEED_5_0GT ? "5.0GT/s" : + speed == PCIE_SPEED_8_0GT ? "8.0GT/s" : + "Unknown", width); +} + static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { static int version_printed; @@ -6381,6 +6397,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) board_info[ent->driver_data].name, (long)pci_resource_start(pdev, 0), dev->dev_addr); + bnxt_parse_log_pcie_link(bp); + return 0; init_err: From d0a42d6fc8eaf1b64f62b0bbc3b829b756eacf57 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Sun, 15 May 2016 03:04:46 -0400 Subject: [PATCH 1561/1649] bnxt_en: Reduce maximum ring pages if page size is 64K. The chip supports 4K/8K/64K page sizes for the rings and we try to match it to the CPU PAGE_SIZE. The current page size limits for the rings are based on 4K/8K page size. If the page size is 64K, these limits are too large. Reduce them appropriately. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.h | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index fe81e64b390b..2e4ba7216548 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -425,10 +425,17 @@ struct rx_tpa_end_cmp_ext { #define MAX_TPA 64 +#if (BNXT_PAGE_SHIFT == 16) +#define MAX_RX_PAGES 1 +#define MAX_RX_AGG_PAGES 4 +#define MAX_TX_PAGES 1 +#define MAX_CP_PAGES 8 +#else #define MAX_RX_PAGES 8 #define MAX_RX_AGG_PAGES 32 #define MAX_TX_PAGES 8 #define MAX_CP_PAGES 64 +#endif #define RX_DESC_CNT (BNXT_PAGE_SIZE / sizeof(struct rx_bd)) #define TX_DESC_CNT (BNXT_PAGE_SIZE / sizeof(struct tx_bd)) From a11fa2be6d1564375dc57530680268ad569c2632 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Sun, 15 May 2016 03:04:47 -0400 Subject: [PATCH 1562/1649] bnxt_en: Improve the delay logic for firmware response. The current code has 2 problems: 1. The maximum wait time is not long enough. It is about 60% of the duration specified by the firmware. It is calling usleep_range(600, 800) for every 1 msec we are supposed to wait. 2. The granularity of the delay is too coarse. Many simple firmware commands finish in 25 usec or less. We fix these 2 issues by multiplying the original 1 msec loop counter by 40 and calling usleep_range(25, 40) for each iteration. There is also a second delay loop to wait for the last DMA word to complete. This delay loop should be a very short 5 usec wait. This change results in much faster bring-up/down time: Before the patch: time ip link set p4p1 up real 0m0.120s user 0m0.001s sys 0m0.009s After the patch: time ip link set p4p1 up real 0m0.030s user 0m0.000s sys 0m0.010s Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 1171ad14d323..e20c258775ed 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -2780,7 +2780,7 @@ void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type, static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, int timeout, bool silent) { - int i, intr_process, rc; + int i, intr_process, rc, tmo_count; struct input *req = msg; u32 *data = msg; __le32 *resp_len, *valid; @@ -2809,11 +2809,12 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, timeout = DFLT_HWRM_CMD_TIMEOUT; i = 0; + tmo_count = timeout * 40; if (intr_process) { /* Wait until hwrm response cmpl interrupt is processed */ while (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID && - i++ < timeout) { - usleep_range(600, 800); + i++ < tmo_count) { + usleep_range(25, 40); } if (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID) { @@ -2824,15 +2825,15 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, } else { /* Check if response len is updated */ resp_len = bp->hwrm_cmd_resp_addr + HWRM_RESP_LEN_OFFSET; - for (i = 0; i < timeout; i++) { + for (i = 0; i < tmo_count; i++) { len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >> HWRM_RESP_LEN_SFT; if (len) break; - usleep_range(600, 800); + usleep_range(25, 40); } - if (i >= timeout) { + if (i >= tmo_count) { netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n", timeout, le16_to_cpu(req->req_type), le16_to_cpu(req->seq_id), *resp_len); @@ -2841,13 +2842,13 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, /* Last word of resp contains valid bit */ valid = bp->hwrm_cmd_resp_addr + len - 4; - for (i = 0; i < timeout; i++) { + for (i = 0; i < 5; i++) { if (le32_to_cpu(*valid) & HWRM_RESP_VALID_MASK) break; - usleep_range(600, 800); + udelay(1); } - if (i >= timeout) { + if (i >= 5) { netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n", timeout, le16_to_cpu(req->req_type), le16_to_cpu(req->seq_id), len, *valid); From 8578d6c19a308dea3daf3d03acdf18724ec05590 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Sun, 15 May 2016 03:04:48 -0400 Subject: [PATCH 1563/1649] bnxt_en: Fix length value in dmesg log firmware error message. The len value in the hwrm error message is wrong. Use the properly adjusted value in the variable len. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index e20c258775ed..09d663762475 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -2836,7 +2836,7 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, if (i >= tmo_count) { netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n", timeout, le16_to_cpu(req->req_type), - le16_to_cpu(req->seq_id), *resp_len); + le16_to_cpu(req->seq_id), len); return -1; } From 10289bec0072b13f629a654d94faf1dadd44f335 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Sun, 15 May 2016 03:04:49 -0400 Subject: [PATCH 1564/1649] bnxt_en: Simplify and improve unsupported SFP+ module reporting. The current code is more complicated than necessary and can only report unsupported SFP+ module if it is plugged in after the device is up. Rename bnxt_port_module_event() to bnxt_get_port_module_status(). We already have the current module_status in the link_info structure, so just check that and report any unsupported SFP+ module status. Delete the unnecessary last_port_module_event. Call this function at the end of bnxt_open to report unsupported module already plugged in. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 66 +++++++++++------------ drivers/net/ethernet/broadcom/bnxt/bnxt.h | 1 - 2 files changed, 30 insertions(+), 37 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 09d663762475..a3be41a2af26 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -1324,15 +1324,6 @@ next_rx_no_prod: ((data) & \ HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK) -#define BNXT_EVENT_POLICY_MASK \ - HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_MASK - -#define BNXT_EVENT_POLICY_SFT \ - HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_SFT - -#define BNXT_GET_EVENT_POLICY(data) \ - (((data) & BNXT_EVENT_POLICY_MASK) >> BNXT_EVENT_POLICY_SFT) - static int bnxt_async_event_process(struct bnxt *bp, struct hwrm_async_event_cmpl *cmpl) { @@ -1371,9 +1362,6 @@ static int bnxt_async_event_process(struct bnxt *bp, if (bp->pf.port_id != port_id) break; - bp->link_info.last_port_module_event = - BNXT_GET_EVENT_POLICY(data1); - set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event); break; } @@ -4788,6 +4776,33 @@ static int bnxt_update_link(struct bnxt *bp, bool chng_link_state) return 0; } +static void bnxt_get_port_module_status(struct bnxt *bp) +{ + struct bnxt_link_info *link_info = &bp->link_info; + struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp; + u8 module_status; + + if (bnxt_update_link(bp, true)) + return; + + module_status = link_info->module_status; + switch (module_status) { + case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX: + case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN: + case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG: + netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n", + bp->pf.port_id); + if (bp->hwrm_spec_code >= 0x10201) { + netdev_warn(bp->dev, "Module part number %s\n", + resp->phy_vendor_partnumber); + } + if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX) + netdev_warn(bp->dev, "TX is disabled\n"); + if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN) + netdev_warn(bp->dev, "SFP+ module is shutdown\n"); + } +} + static void bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req) { @@ -5080,7 +5095,8 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) /* Enable TX queues */ bnxt_tx_enable(bp); mod_timer(&bp->timer, jiffies + bp->current_interval); - bnxt_update_link(bp, true); + /* Poll link status and check for SFP+ module status */ + bnxt_get_port_module_status(bp); return 0; @@ -5615,28 +5631,6 @@ bnxt_restart_timer: mod_timer(&bp->timer, jiffies + bp->current_interval); } -static void bnxt_port_module_event(struct bnxt *bp) -{ - struct bnxt_link_info *link_info = &bp->link_info; - struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp; - - if (bnxt_update_link(bp, true)) - return; - - if (link_info->last_port_module_event != 0) { - netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n", - bp->pf.port_id); - if (bp->hwrm_spec_code >= 0x10201) { - netdev_warn(bp->dev, "Module part number %s\n", - resp->phy_vendor_partnumber); - } - } - if (link_info->last_port_module_event == 1) - netdev_warn(bp->dev, "TX is disabled\n"); - if (link_info->last_port_module_event == 3) - netdev_warn(bp->dev, "Shutdown SFP+ module\n"); -} - static void bnxt_cfg_ntp_filters(struct bnxt *); static void bnxt_sp_task(struct work_struct *work) @@ -5685,7 +5679,7 @@ static void bnxt_sp_task(struct work_struct *work) } if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) - bnxt_port_module_event(bp); + bnxt_get_port_module_status(bp); if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) bnxt_hwrm_port_qstats(bp); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 2e4ba7216548..2824d65b2e35 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -850,7 +850,6 @@ struct bnxt_link_info { u32 advertising; bool force_link_chng; - u8 last_port_module_event; /* a copy of phy_qcfg output used to report link * info to VF */ From 5049e33b559a44e9f216d86c58c7c7fce6f5df2f Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Sun, 15 May 2016 03:04:50 -0400 Subject: [PATCH 1565/1649] bnxt_en: Add BCM57314 device ID. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index a3be41a2af26..9de1594be702 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -78,6 +78,7 @@ enum board_idx { BCM57402, BCM57404, BCM57406, + BCM57314, BCM57304_VF, BCM57404_VF, }; @@ -92,6 +93,7 @@ static const struct { { "Broadcom BCM57402 NetXtreme-E Dual-port 10Gb Ethernet" }, { "Broadcom BCM57404 NetXtreme-E Dual-port 10Gb/25Gb Ethernet" }, { "Broadcom BCM57406 NetXtreme-E Dual-port 10GBase-T Ethernet" }, + { "Broadcom BCM57314 NetXtreme-C Dual-port 10Gb/25Gb/40Gb/50Gb Ethernet" }, { "Broadcom BCM57304 NetXtreme-C Ethernet Virtual Function" }, { "Broadcom BCM57404 NetXtreme-E Ethernet Virtual Function" }, }; @@ -103,6 +105,7 @@ static const struct pci_device_id bnxt_pci_tbl[] = { { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 }, { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 }, { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 }, + { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 }, #ifdef CONFIG_BNXT_SRIOV { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = BCM57304_VF }, { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = BCM57404_VF }, From b67daab033293b3882ba4dc926ffb084d70044e0 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Sun, 15 May 2016 03:04:51 -0400 Subject: [PATCH 1566/1649] bnxt_en: Use dma_rmb() instead of rmb(). Use the weaker but more appropriate dma_rmb() to order the reading of the completion ring. Suggested-by: Ajit Khaparde Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 9de1594be702..5a0dca3e6ef6 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -1494,7 +1494,7 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) /* The valid test of the entry must be done first before * reading any further. */ - rmb(); + dma_rmb(); if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) { tx_pkts++; /* return full budget so NAPI will complete. */ From da47b4572056487fd7941c26f73b3e8815ff712a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Uwe=20Kleine-K=C3=B6nig?= Date: Thu, 12 May 2016 12:00:33 +0200 Subject: [PATCH 1567/1649] phy: add support for a reset-gpio specification MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The framework only asserts (for now) that the reset gpio is not active. Signed-off-by: Uwe Kleine-König Reviewed-by: Roger Quadros Signed-off-by: David S. Miller --- Documentation/devicetree/bindings/net/phy.txt | 3 +++ drivers/net/phy/phy_device.c | 8 ++++++++ 2 files changed, 11 insertions(+) diff --git a/Documentation/devicetree/bindings/net/phy.txt b/Documentation/devicetree/bindings/net/phy.txt index bc1c3c8bf8fa..c00a9a894547 100644 --- a/Documentation/devicetree/bindings/net/phy.txt +++ b/Documentation/devicetree/bindings/net/phy.txt @@ -35,6 +35,8 @@ Optional Properties: - broken-turn-around: If set, indicates the PHY device does not correctly release the turn around line low at the end of a MDIO transaction. +- reset-gpios: Reference to a GPIO used to reset the phy. + Example: ethernet-phy@0 { @@ -42,4 +44,5 @@ ethernet-phy@0 { interrupt-parent = <40000>; interrupts = <35 1>; reg = <0>; + reset-gpios = <&gpio1 17 GPIO_ACTIVE_LOW>; }; diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index e977ba931878..307f72a0f2e2 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -34,6 +34,7 @@ #include #include #include +#include #include @@ -1570,9 +1571,16 @@ static int phy_probe(struct device *dev) struct device_driver *drv = phydev->mdio.dev.driver; struct phy_driver *phydrv = to_phy_driver(drv); int err = 0; + struct gpio_descs *reset_gpios; phydev->drv = phydrv; + /* take phy out of reset */ + reset_gpios = devm_gpiod_get_array_optional(dev, "reset", + GPIOD_OUT_LOW); + if (IS_ERR(reset_gpios)) + return PTR_ERR(reset_gpios); + /* Disable the interrupt if the PHY doesn't support it * but the interrupt is still a valid one */ From f580aec4bfd7babe51f086e599400027def08ed8 Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Fri, 13 May 2016 13:55:20 +0200 Subject: [PATCH 1568/1649] hv_netvsc: move start_remove flag to net_device_context struct netvsc_device is destroyed on mtu change so keeping the protection flag there is not a good idea. Move it to struct net_device_context which is preserved. Signed-off-by: Vitaly Kuznetsov Signed-off-by: David S. Miller --- drivers/net/hyperv/hyperv_net.h | 4 +++- drivers/net/hyperv/netvsc.c | 3 +-- drivers/net/hyperv/netvsc_drv.c | 12 +++++++++--- 3 files changed, 13 insertions(+), 6 deletions(-) diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index 6700a4dca7c8..18e9cc8ea47b 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -672,6 +672,9 @@ struct net_device_context { /* Ethtool settings */ u8 duplex; u32 speed; + + /* the device is going away */ + bool start_remove; }; /* Per netvsc device */ @@ -682,7 +685,6 @@ struct netvsc_device { atomic_t num_outstanding_sends; wait_queue_head_t wait_drain; - bool start_remove; bool destroy; /* Receive buffer allocated by us but manages by NetVSP */ diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index eddce3cdafa8..5e2017bb93f1 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -74,7 +74,6 @@ static struct netvsc_device *alloc_net_device(struct hv_device *device) } init_waitqueue_head(&net_device->wait_drain); - net_device->start_remove = false; net_device->destroy = false; atomic_set(&net_device->open_cnt, 0); atomic_set(&net_device->vf_use_cnt, 0); @@ -691,7 +690,7 @@ static void netvsc_send_completion(struct netvsc_device *net_device, wake_up(&net_device->wait_drain); if (netif_tx_queue_stopped(netdev_get_tx_queue(ndev, q_idx)) && - !net_device->start_remove && + !net_device->nd_ctx->start_remove && (hv_ringbuf_avail_percent(&channel->outbound) > RING_AVAIL_PERCENT_HIWATER || queue_sends < 1)) netif_tx_wake_queue(netdev_get_tx_queue( diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index ba3f3f3d48ef..b3fa2cdcb3f6 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -793,7 +793,7 @@ static int netvsc_set_channels(struct net_device *net, goto out; do_set: - nvdev->start_remove = true; + net_device_ctx->start_remove = true; rndis_filter_device_remove(dev); nvdev->num_chn = channels->combined_count; @@ -837,6 +837,7 @@ static int netvsc_set_channels(struct net_device *net, out: netvsc_open(net); + net_device_ctx->start_remove = false; return ret; @@ -927,7 +928,7 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu) num_chn = nvdev->num_chn; - nvdev->start_remove = true; + ndevctx->start_remove = true; rndis_filter_device_remove(hdev); ndev->mtu = mtu; @@ -943,6 +944,7 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu) out: netvsc_open(ndev); + ndevctx->start_remove = false; return ret; } @@ -1358,6 +1360,9 @@ static int netvsc_probe(struct hv_device *dev, } hv_set_drvdata(dev, net); + + net_device_ctx->start_remove = false; + INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change); INIT_WORK(&net_device_ctx->work, do_set_multicast); INIT_WORK(&net_device_ctx->gwrk.dwrk, netvsc_notify_peers); @@ -1419,9 +1424,10 @@ static int netvsc_remove(struct hv_device *dev) return 0; } - net_device->start_remove = true; ndev_ctx = netdev_priv(net); + ndev_ctx->start_remove = true; + cancel_delayed_work_sync(&ndev_ctx->dwork); cancel_work_sync(&ndev_ctx->work); From 1bdcec8a5f05445752a0639edd603ac09ae6c553 Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Fri, 13 May 2016 13:55:21 +0200 Subject: [PATCH 1569/1649] hv_netvsc: use start_remove flag to protect netvsc_link_change() netvsc_link_change() can race with netvsc_change_mtu() or netvsc_set_channels() as these functions destroy struct netvsc_device and rndis filter. Use start_remove flag for syncronization. As netvsc_change_mtu()/netvsc_set_channels() are called with rtnl lock held we need to take it before checking start_remove value in netvsc_link_change(). Reported-by: Haiyang Zhang Signed-off-by: Vitaly Kuznetsov Signed-off-by: David S. Miller --- drivers/net/hyperv/netvsc_drv.c | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index b3fa2cdcb3f6..01de2dcef170 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -838,6 +838,8 @@ static int netvsc_set_channels(struct net_device *net, out: netvsc_open(net); net_device_ctx->start_remove = false; + /* We may have missed link change notifications */ + schedule_delayed_work(&net_device_ctx->dwork, 0); return ret; @@ -946,6 +948,9 @@ out: netvsc_open(ndev); ndevctx->start_remove = false; + /* We may have missed link change notifications */ + schedule_delayed_work(&ndevctx->dwork, 0); + return ret; } @@ -1066,6 +1071,11 @@ static void netvsc_link_change(struct work_struct *w) unsigned long flags, next_reconfig, delay; ndev_ctx = container_of(w, struct net_device_context, dwork.work); + + rtnl_lock(); + if (ndev_ctx->start_remove) + goto out_unlock; + net_device = hv_get_drvdata(ndev_ctx->device_ctx); rdev = net_device->extension; net = net_device->ndev; @@ -1079,7 +1089,7 @@ static void netvsc_link_change(struct work_struct *w) delay = next_reconfig - jiffies; delay = delay < LINKCHANGE_INT ? delay : LINKCHANGE_INT; schedule_delayed_work(&ndev_ctx->dwork, delay); - return; + goto out_unlock; } ndev_ctx->last_reconfig = jiffies; @@ -1093,9 +1103,7 @@ static void netvsc_link_change(struct work_struct *w) spin_unlock_irqrestore(&ndev_ctx->lock, flags); if (!event) - return; - - rtnl_lock(); + goto out_unlock; switch (event->event) { /* Only the following events are possible due to the check in @@ -1144,6 +1152,11 @@ static void netvsc_link_change(struct work_struct *w) */ if (reschedule) schedule_delayed_work(&ndev_ctx->dwork, LINKCHANGE_INT); + + return; + +out_unlock: + rtnl_unlock(); } static void netvsc_free_netdev(struct net_device *netdev) From 3d541ac5a92af708d0085925d136f875f3a58d57 Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Fri, 13 May 2016 13:55:22 +0200 Subject: [PATCH 1570/1649] hv_netvsc: untangle the pointer mess We have the following structures keeping netvsc adapter state: - struct net_device - struct net_device_context - struct netvsc_device - struct rndis_device - struct hv_device and there are pointers/dependencies between them: - struct net_device_context is contained in struct net_device - struct hv_device has driver_data pointer which points to 'struct net_device' OR 'struct netvsc_device' depending on driver's state (!). - struct net_device_context has a pointer to 'struct hv_device'. - struct netvsc_device has pointers to 'struct hv_device' and 'struct net_device_context'. - struct rndis_device has a pointer to 'struct netvsc_device'. Different functions get different structures as parameters and use these pointers for traveling. The problem is (in addition to keeping in mind this complex graph) that some of these structures (struct netvsc_device and struct rndis_device) are being removed and re-created on mtu change (as we implement it as re-creation of hyper-v device) so our travel using these pointers is dangerous. Simplify this to a the following: - add struct netvsc_device pointer to struct net_device_context (which is a part of struct net_device and thus never disappears) - remove struct hv_device and struct net_device_context pointers from struct netvsc_device - replace pointer to 'struct netvsc_device' with pointer to 'struct net_device'. - always keep 'struct net_device' in hv_device driver_data. We'll end up with the following 'circular' structure: net_device: [net_device_context] -> netvsc_device -> rndis_device -> net_device -> hv_device -> net_device On MTU change we'll be removing the 'netvsc_device -> rndis_device' branch and re-creating it making the synchronization easier. There is one additional redundant pointer left, it is struct net_device link in struct netvsc_device, it is going to be removed in a separate commit. Signed-off-by: Vitaly Kuznetsov Signed-off-by: David S. Miller --- drivers/net/hyperv/hyperv_net.h | 10 ++-- drivers/net/hyperv/netvsc.c | 82 +++++++++++++------------------ drivers/net/hyperv/netvsc_drv.c | 51 ++++++++----------- drivers/net/hyperv/rndis_filter.c | 78 +++++++++++++++-------------- 4 files changed, 99 insertions(+), 122 deletions(-) diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index 18e9cc8ea47b..0f3874379869 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -158,7 +158,7 @@ enum rndis_device_state { }; struct rndis_device { - struct netvsc_device *net_dev; + struct net_device *ndev; enum rndis_device_state state; bool link_state; @@ -173,6 +173,7 @@ struct rndis_device { /* Interface */ struct rndis_message; +struct netvsc_device; int netvsc_device_add(struct hv_device *device, void *additional_info); int netvsc_device_remove(struct hv_device *device); int netvsc_send(struct hv_device *device, @@ -653,6 +654,8 @@ struct garp_wrk { struct net_device_context { /* point back to our device context */ struct hv_device *device_ctx; + /* netvsc_device */ + struct netvsc_device *nvdev; /* reconfigure work */ struct delayed_work dwork; /* last reconfig time */ @@ -679,8 +682,6 @@ struct net_device_context { /* Per netvsc device */ struct netvsc_device { - struct hv_device *dev; - u32 nvsp_version; atomic_t num_outstanding_sends; @@ -734,9 +735,6 @@ struct netvsc_device { u32 max_pkt; /* max number of pkt in one send, e.g. 8 */ u32 pkt_align; /* alignment bytes, e.g. 8 */ - /* The net device context */ - struct net_device_context *nd_ctx; - /* 1: allocated, serial number is valid. 0: not allocated */ u32 vf_alloc; /* Serial number of the VF to team with */ diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 5e2017bb93f1..1cd01ad2194f 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -40,7 +40,9 @@ void netvsc_switch_datapath(struct netvsc_device *nv_dev, bool vf) { struct nvsp_message *init_pkt = &nv_dev->channel_init_pkt; - struct hv_device *dev = nv_dev->dev; + struct net_device *ndev = nv_dev->ndev; + struct net_device_context *net_device_ctx = netdev_priv(ndev); + struct hv_device *dev = net_device_ctx->device_ctx; memset(init_pkt, 0, sizeof(struct nvsp_message)); init_pkt->hdr.msg_type = NVSP_MSG4_TYPE_SWITCH_DATA_PATH; @@ -62,6 +64,7 @@ static struct netvsc_device *alloc_net_device(struct hv_device *device) { struct netvsc_device *net_device; struct net_device *ndev = hv_get_drvdata(device); + struct net_device_context *net_device_ctx = netdev_priv(ndev); net_device = kzalloc(sizeof(struct netvsc_device), GFP_KERNEL); if (!net_device) @@ -77,7 +80,6 @@ static struct netvsc_device *alloc_net_device(struct hv_device *device) net_device->destroy = false; atomic_set(&net_device->open_cnt, 0); atomic_set(&net_device->vf_use_cnt, 0); - net_device->dev = device; net_device->ndev = ndev; net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT; net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT; @@ -85,7 +87,8 @@ static struct netvsc_device *alloc_net_device(struct hv_device *device) net_device->vf_netdev = NULL; net_device->vf_inject = false; - hv_set_drvdata(device, net_device); + net_device_ctx->nvdev = net_device; + return net_device; } @@ -97,9 +100,10 @@ static void free_netvsc_device(struct netvsc_device *nvdev) static struct netvsc_device *get_outbound_net_device(struct hv_device *device) { - struct netvsc_device *net_device; + struct net_device *ndev = hv_get_drvdata(device); + struct net_device_context *net_device_ctx = netdev_priv(ndev); + struct netvsc_device *net_device = net_device_ctx->nvdev; - net_device = hv_get_drvdata(device); if (net_device && net_device->destroy) net_device = NULL; @@ -108,9 +112,9 @@ static struct netvsc_device *get_outbound_net_device(struct hv_device *device) static struct netvsc_device *get_inbound_net_device(struct hv_device *device) { - struct netvsc_device *net_device; - - net_device = hv_get_drvdata(device); + struct net_device *ndev = hv_get_drvdata(device); + struct net_device_context *net_device_ctx = netdev_priv(ndev); + struct netvsc_device *net_device = net_device_ctx->nvdev; if (!net_device) goto get_in_err; @@ -124,11 +128,13 @@ get_in_err: } -static int netvsc_destroy_buf(struct netvsc_device *net_device) +static int netvsc_destroy_buf(struct hv_device *device) { struct nvsp_message *revoke_packet; int ret = 0; - struct net_device *ndev = net_device->ndev; + struct net_device *ndev = hv_get_drvdata(device); + struct net_device_context *net_device_ctx = netdev_priv(ndev); + struct netvsc_device *net_device = net_device_ctx->nvdev; /* * If we got a section count, it means we received a @@ -146,7 +152,7 @@ static int netvsc_destroy_buf(struct netvsc_device *net_device) revoke_packet->msg.v1_msg. revoke_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID; - ret = vmbus_sendpacket(net_device->dev->channel, + ret = vmbus_sendpacket(device->channel, revoke_packet, sizeof(struct nvsp_message), (unsigned long)revoke_packet, @@ -164,8 +170,8 @@ static int netvsc_destroy_buf(struct netvsc_device *net_device) /* Teardown the gpadl on the vsp end */ if (net_device->recv_buf_gpadl_handle) { - ret = vmbus_teardown_gpadl(net_device->dev->channel, - net_device->recv_buf_gpadl_handle); + ret = vmbus_teardown_gpadl(device->channel, + net_device->recv_buf_gpadl_handle); /* If we failed here, we might as well return and have a leak * rather than continue and a bugchk @@ -206,7 +212,7 @@ static int netvsc_destroy_buf(struct netvsc_device *net_device) revoke_packet->msg.v1_msg.revoke_send_buf.id = NETVSC_SEND_BUFFER_ID; - ret = vmbus_sendpacket(net_device->dev->channel, + ret = vmbus_sendpacket(device->channel, revoke_packet, sizeof(struct nvsp_message), (unsigned long)revoke_packet, @@ -222,7 +228,7 @@ static int netvsc_destroy_buf(struct netvsc_device *net_device) } /* Teardown the gpadl on the vsp end */ if (net_device->send_buf_gpadl_handle) { - ret = vmbus_teardown_gpadl(net_device->dev->channel, + ret = vmbus_teardown_gpadl(device->channel, net_device->send_buf_gpadl_handle); /* If we failed here, we might as well return and have a leak @@ -434,7 +440,7 @@ static int netvsc_init_buf(struct hv_device *device) goto exit; cleanup: - netvsc_destroy_buf(net_device); + netvsc_destroy_buf(device); exit: return ret; @@ -565,9 +571,9 @@ cleanup: return ret; } -static void netvsc_disconnect_vsp(struct netvsc_device *net_device) +static void netvsc_disconnect_vsp(struct hv_device *device) { - netvsc_destroy_buf(net_device); + netvsc_destroy_buf(device); } /* @@ -575,24 +581,13 @@ static void netvsc_disconnect_vsp(struct netvsc_device *net_device) */ int netvsc_device_remove(struct hv_device *device) { - struct netvsc_device *net_device; - unsigned long flags; + struct net_device *ndev = hv_get_drvdata(device); + struct net_device_context *net_device_ctx = netdev_priv(ndev); + struct netvsc_device *net_device = net_device_ctx->nvdev; - net_device = hv_get_drvdata(device); + netvsc_disconnect_vsp(device); - netvsc_disconnect_vsp(net_device); - - /* - * Since we have already drained, we don't need to busy wait - * as was done in final_release_stor_device() - * Note that we cannot set the ext pointer to NULL until - * we have drained - to drain the outgoing packets, we need to - * allow incoming packets. - */ - - spin_lock_irqsave(&device->channel->inbound_lock, flags); - hv_set_drvdata(device, NULL); - spin_unlock_irqrestore(&device->channel->inbound_lock, flags); + net_device_ctx->nvdev = NULL; /* * At this point, no one should be accessing net_device @@ -640,12 +635,11 @@ static void netvsc_send_completion(struct netvsc_device *net_device, { struct nvsp_message *nvsp_packet; struct hv_netvsc_packet *nvsc_packet; - struct net_device *ndev; + struct net_device *ndev = hv_get_drvdata(device); + struct net_device_context *net_device_ctx = netdev_priv(ndev); u32 send_index; struct sk_buff *skb; - ndev = net_device->ndev; - nvsp_packet = (struct nvsp_message *)((unsigned long)packet + (packet->offset8 << 3)); @@ -690,7 +684,7 @@ static void netvsc_send_completion(struct netvsc_device *net_device, wake_up(&net_device->wait_drain); if (netif_tx_queue_stopped(netdev_get_tx_queue(ndev, q_idx)) && - !net_device->nd_ctx->start_remove && + !net_device_ctx->start_remove && (hv_ringbuf_avail_percent(&channel->outbound) > RING_AVAIL_PERCENT_HIWATER || queue_sends < 1)) netif_tx_wake_queue(netdev_get_tx_queue( @@ -1264,17 +1258,7 @@ int netvsc_device_add(struct hv_device *device, void *additional_info) net_device->ring_size = ring_size; - /* - * Coming into this function, struct net_device * is - * registered as the driver private data. - * In alloc_net_device(), we register struct netvsc_device * - * as the driver private data and stash away struct net_device * - * in struct netvsc_device *. - */ - ndev = net_device->ndev; - - /* Add netvsc_device context to netvsc_device */ - net_device->nd_ctx = netdev_priv(ndev); + ndev = hv_get_drvdata(device); /* Initialize the NetVSC channel extension */ init_completion(&net_device->channel_init_wait); diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 01de2dcef170..a33a1c92d489 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -70,7 +70,7 @@ static void do_set_multicast(struct work_struct *w) struct netvsc_device *nvdev; struct rndis_device *rdev; - nvdev = hv_get_drvdata(ndevctx->device_ctx); + nvdev = ndevctx->nvdev; if (nvdev == NULL || nvdev->ndev == NULL) return; @@ -99,7 +99,7 @@ static int netvsc_open(struct net_device *net) { struct net_device_context *net_device_ctx = netdev_priv(net); struct hv_device *device_obj = net_device_ctx->device_ctx; - struct netvsc_device *nvdev; + struct netvsc_device *nvdev = net_device_ctx->nvdev; struct rndis_device *rdev; int ret = 0; @@ -114,7 +114,6 @@ static int netvsc_open(struct net_device *net) netif_tx_wake_all_queues(net); - nvdev = hv_get_drvdata(device_obj); rdev = nvdev->extension; if (!rdev->link_state) netif_carrier_on(net); @@ -126,7 +125,7 @@ static int netvsc_close(struct net_device *net) { struct net_device_context *net_device_ctx = netdev_priv(net); struct hv_device *device_obj = net_device_ctx->device_ctx; - struct netvsc_device *nvdev = hv_get_drvdata(device_obj); + struct netvsc_device *nvdev = net_device_ctx->nvdev; int ret; u32 aread, awrite, i, msec = 10, retry = 0, retry_max = 20; struct vmbus_channel *chn; @@ -205,8 +204,7 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb, void *accel_priv, select_queue_fallback_t fallback) { struct net_device_context *net_device_ctx = netdev_priv(ndev); - struct hv_device *hdev = net_device_ctx->device_ctx; - struct netvsc_device *nvsc_dev = hv_get_drvdata(hdev); + struct netvsc_device *nvsc_dev = net_device_ctx->nvdev; u32 hash; u16 q_idx = 0; @@ -580,7 +578,6 @@ void netvsc_linkstatus_callback(struct hv_device *device_obj, struct rndis_indicate_status *indicate = &resp->msg.indicate_status; struct net_device *net; struct net_device_context *ndev_ctx; - struct netvsc_device *net_device; struct netvsc_reconfig *event; unsigned long flags; @@ -590,8 +587,7 @@ void netvsc_linkstatus_callback(struct hv_device *device_obj, indicate->status != RNDIS_STATUS_MEDIA_DISCONNECT) return; - net_device = hv_get_drvdata(device_obj); - net = net_device->ndev; + net = hv_get_drvdata(device_obj); if (!net || net->reg_state != NETREG_REGISTERED) return; @@ -659,16 +655,15 @@ int netvsc_recv_callback(struct hv_device *device_obj, struct vmbus_channel *channel, u16 vlan_tci) { - struct net_device *net; - struct net_device_context *net_device_ctx; + struct net_device *net = hv_get_drvdata(device_obj); + struct net_device_context *net_device_ctx = netdev_priv(net); struct sk_buff *skb; struct sk_buff *vf_skb; struct netvsc_stats *rx_stats; - struct netvsc_device *netvsc_dev = hv_get_drvdata(device_obj); + struct netvsc_device *netvsc_dev = net_device_ctx->nvdev; u32 bytes_recvd = packet->total_data_buflen; int ret = 0; - net = netvsc_dev->ndev; if (!net || net->reg_state != NETREG_REGISTERED) return NVSP_STAT_FAIL; @@ -743,8 +738,7 @@ static void netvsc_get_channels(struct net_device *net, struct ethtool_channels *channel) { struct net_device_context *net_device_ctx = netdev_priv(net); - struct hv_device *dev = net_device_ctx->device_ctx; - struct netvsc_device *nvdev = hv_get_drvdata(dev); + struct netvsc_device *nvdev = net_device_ctx->nvdev; if (nvdev) { channel->max_combined = nvdev->max_chn; @@ -757,7 +751,7 @@ static int netvsc_set_channels(struct net_device *net, { struct net_device_context *net_device_ctx = netdev_priv(net); struct hv_device *dev = net_device_ctx->device_ctx; - struct netvsc_device *nvdev = hv_get_drvdata(dev); + struct netvsc_device *nvdev = net_device_ctx->nvdev; struct netvsc_device_info device_info; u32 num_chn; u32 max_chn; @@ -798,9 +792,6 @@ static int netvsc_set_channels(struct net_device *net, nvdev->num_chn = channels->combined_count; - net_device_ctx->device_ctx = dev; - hv_set_drvdata(dev, net); - memset(&device_info, 0, sizeof(device_info)); device_info.num_chn = nvdev->num_chn; /* passed to RNDIS */ device_info.ring_size = ring_size; @@ -815,7 +806,7 @@ static int netvsc_set_channels(struct net_device *net, goto recover; } - nvdev = hv_get_drvdata(dev); + nvdev = net_device_ctx->nvdev; ret = netif_set_real_num_tx_queues(net, nvdev->num_chn); if (ret) { @@ -908,8 +899,8 @@ static int netvsc_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) static int netvsc_change_mtu(struct net_device *ndev, int mtu) { struct net_device_context *ndevctx = netdev_priv(ndev); - struct hv_device *hdev = ndevctx->device_ctx; - struct netvsc_device *nvdev = hv_get_drvdata(hdev); + struct netvsc_device *nvdev = ndevctx->nvdev; + struct hv_device *hdev = ndevctx->device_ctx; struct netvsc_device_info device_info; int limit = ETH_DATA_LEN; u32 num_chn; @@ -935,9 +926,6 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu) ndev->mtu = mtu; - ndevctx->device_ctx = hdev; - hv_set_drvdata(hdev, ndev); - memset(&device_info, 0, sizeof(device_info)); device_info.ring_size = ring_size; device_info.num_chn = num_chn; @@ -1076,7 +1064,7 @@ static void netvsc_link_change(struct work_struct *w) if (ndev_ctx->start_remove) goto out_unlock; - net_device = hv_get_drvdata(ndev_ctx->device_ctx); + net_device = ndev_ctx->nvdev; rdev = net_device->extension; net = net_device->ndev; @@ -1201,7 +1189,7 @@ static struct netvsc_device *get_netvsc_device(char *mac) if (netvsc_ctx == NULL) return NULL; - return hv_get_drvdata(netvsc_ctx->device_ctx); + return netvsc_ctx->nvdev; } static int netvsc_register_vf(struct net_device *vf_netdev) @@ -1407,7 +1395,7 @@ static int netvsc_probe(struct hv_device *dev, } memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN); - nvdev = hv_get_drvdata(dev); + nvdev = net_device_ctx->nvdev; netif_set_real_num_tx_queues(net, nvdev->num_chn); netif_set_real_num_rx_queues(net, nvdev->num_chn); @@ -1429,8 +1417,7 @@ static int netvsc_remove(struct hv_device *dev) struct net_device_context *ndev_ctx; struct netvsc_device *net_device; - net_device = hv_get_drvdata(dev); - net = net_device->ndev; + net = hv_get_drvdata(dev); if (net == NULL) { dev_err(&dev->device, "No net device to remove\n"); @@ -1439,6 +1426,8 @@ static int netvsc_remove(struct hv_device *dev) ndev_ctx = netdev_priv(net); + net_device = ndev_ctx->nvdev; + ndev_ctx->start_remove = true; cancel_delayed_work_sync(&ndev_ctx->dwork); @@ -1455,6 +1444,8 @@ static int netvsc_remove(struct hv_device *dev) */ rndis_filter_device_remove(dev); + hv_set_drvdata(dev, NULL); + netvsc_free_netdev(net); return 0; } diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index a59cdebc9b4b..6caba5166ebb 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -126,11 +126,7 @@ static void put_rndis_request(struct rndis_device *dev, static void dump_rndis_message(struct hv_device *hv_dev, struct rndis_message *rndis_msg) { - struct net_device *netdev; - struct netvsc_device *net_device; - - net_device = hv_get_drvdata(hv_dev); - netdev = net_device->ndev; + struct net_device *netdev = hv_get_drvdata(hv_dev); switch (rndis_msg->ndis_msg_type) { case RNDIS_MSG_PACKET: @@ -211,6 +207,7 @@ static int rndis_filter_send_request(struct rndis_device *dev, struct hv_netvsc_packet *packet; struct hv_page_buffer page_buf[2]; struct hv_page_buffer *pb = page_buf; + struct net_device_context *net_device_ctx = netdev_priv(dev->ndev); /* Setup the packet to send it */ packet = &req->pkt; @@ -236,7 +233,7 @@ static int rndis_filter_send_request(struct rndis_device *dev, pb[0].len; } - ret = netvsc_send(dev->net_dev->dev, packet, NULL, &pb, NULL); + ret = netvsc_send(net_device_ctx->device_ctx, packet, NULL, &pb, NULL); return ret; } @@ -262,9 +259,7 @@ static void rndis_filter_receive_response(struct rndis_device *dev, struct rndis_request *request = NULL; bool found = false; unsigned long flags; - struct net_device *ndev; - - ndev = dev->net_dev->ndev; + struct net_device *ndev = dev->ndev; spin_lock_irqsave(&dev->request_lock, flags); list_for_each_entry(request, &dev->req_list, list_ent) { @@ -355,6 +350,7 @@ static int rndis_filter_receive_data(struct rndis_device *dev, struct ndis_pkt_8021q_info *vlan; struct ndis_tcp_ip_checksum_info *csum_info; u16 vlan_tci = 0; + struct net_device_context *net_device_ctx = netdev_priv(dev->ndev); rndis_pkt = &msg->msg.pkt; @@ -368,7 +364,7 @@ static int rndis_filter_receive_data(struct rndis_device *dev, * should be the data packet size plus the trailer padding size */ if (pkt->total_data_buflen < rndis_pkt->data_len) { - netdev_err(dev->net_dev->ndev, "rndis message buffer " + netdev_err(dev->ndev, "rndis message buffer " "overflow detected (got %u, min %u)" "...dropping this message!\n", pkt->total_data_buflen, rndis_pkt->data_len); @@ -390,7 +386,7 @@ static int rndis_filter_receive_data(struct rndis_device *dev, } csum_info = rndis_get_ppi(rndis_pkt, TCPIP_CHKSUM_PKTINFO); - return netvsc_recv_callback(dev->net_dev->dev, pkt, data, + return netvsc_recv_callback(net_device_ctx->device_ctx, pkt, data, csum_info, channel, vlan_tci); } @@ -399,10 +395,11 @@ int rndis_filter_receive(struct hv_device *dev, void **data, struct vmbus_channel *channel) { - struct netvsc_device *net_dev = hv_get_drvdata(dev); + struct net_device *ndev = hv_get_drvdata(dev); + struct net_device_context *net_device_ctx = netdev_priv(ndev); + struct netvsc_device *net_dev = net_device_ctx->nvdev; struct rndis_device *rndis_dev; struct rndis_message *rndis_msg; - struct net_device *ndev; int ret = 0; if (!net_dev) { @@ -410,8 +407,6 @@ int rndis_filter_receive(struct hv_device *dev, goto exit; } - ndev = net_dev->ndev; - /* Make sure the rndis device state is initialized */ if (!net_dev->extension) { netdev_err(ndev, "got rndis message but no rndis device - " @@ -430,7 +425,7 @@ int rndis_filter_receive(struct hv_device *dev, rndis_msg = *data; - if (netif_msg_rx_err(net_dev->nd_ctx)) + if (netif_msg_rx_err(net_device_ctx)) dump_rndis_message(dev, rndis_msg); switch (rndis_msg->ndis_msg_type) { @@ -550,9 +545,10 @@ static int rndis_filter_query_device_mac(struct rndis_device *dev) int rndis_filter_set_device_mac(struct hv_device *hdev, char *mac) { - struct netvsc_device *nvdev = hv_get_drvdata(hdev); + struct net_device *ndev = hv_get_drvdata(hdev); + struct net_device_context *net_device_ctx = netdev_priv(ndev); + struct netvsc_device *nvdev = net_device_ctx->nvdev; struct rndis_device *rdev = nvdev->extension; - struct net_device *ndev = nvdev->ndev; struct rndis_request *request; struct rndis_set_request *set; struct rndis_config_parameter_info *cpi; @@ -629,9 +625,10 @@ static int rndis_filter_set_offload_params(struct hv_device *hdev, struct ndis_offload_params *req_offloads) { - struct netvsc_device *nvdev = hv_get_drvdata(hdev); + struct net_device *ndev = hv_get_drvdata(hdev); + struct net_device_context *net_device_ctx = netdev_priv(ndev); + struct netvsc_device *nvdev = net_device_ctx->nvdev; struct rndis_device *rdev = nvdev->extension; - struct net_device *ndev = nvdev->ndev; struct rndis_request *request; struct rndis_set_request *set; struct ndis_offload_params *offload_params; @@ -703,7 +700,7 @@ u8 netvsc_hash_key[HASH_KEYLEN] = { static int rndis_filter_set_rss_param(struct rndis_device *rdev, int num_queue) { - struct net_device *ndev = rdev->net_dev->ndev; + struct net_device *ndev = rdev->ndev; struct rndis_request *request; struct rndis_set_request *set; struct rndis_set_complete *set_complete; @@ -799,9 +796,7 @@ int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter) u32 status; int ret; unsigned long t; - struct net_device *ndev; - - ndev = dev->net_dev->ndev; + struct net_device *ndev = dev->ndev; request = get_rndis_request(dev, RNDIS_MSG_SET, RNDIS_MESSAGE_SIZE(struct rndis_set_request) + @@ -856,7 +851,8 @@ static int rndis_filter_init_device(struct rndis_device *dev) u32 status; int ret; unsigned long t; - struct netvsc_device *nvdev = dev->net_dev; + struct net_device_context *net_device_ctx = netdev_priv(dev->ndev); + struct netvsc_device *nvdev = net_device_ctx->nvdev; request = get_rndis_request(dev, RNDIS_MSG_INIT, RNDIS_MESSAGE_SIZE(struct rndis_initialize_request)); @@ -879,7 +875,6 @@ static int rndis_filter_init_device(struct rndis_device *dev) goto cleanup; } - t = wait_for_completion_timeout(&request->wait_event, 5*HZ); if (t == 0) { @@ -910,8 +905,9 @@ static void rndis_filter_halt_device(struct rndis_device *dev) { struct rndis_request *request; struct rndis_halt_request *halt; - struct netvsc_device *nvdev = dev->net_dev; - struct hv_device *hdev = nvdev->dev; + struct net_device_context *net_device_ctx = netdev_priv(dev->ndev); + struct netvsc_device *nvdev = net_device_ctx->nvdev; + struct hv_device *hdev = net_device_ctx->device_ctx; ulong flags; /* Attempt to do a rndis device halt */ @@ -979,13 +975,14 @@ static int rndis_filter_close_device(struct rndis_device *dev) static void netvsc_sc_open(struct vmbus_channel *new_sc) { - struct netvsc_device *nvscdev; + struct net_device *ndev = + hv_get_drvdata(new_sc->primary_channel->device_obj); + struct net_device_context *net_device_ctx = netdev_priv(ndev); + struct netvsc_device *nvscdev = net_device_ctx->nvdev; u16 chn_index = new_sc->offermsg.offer.sub_channel_index; int ret; unsigned long flags; - nvscdev = hv_get_drvdata(new_sc->primary_channel->device_obj); - if (chn_index >= nvscdev->num_chn) return; @@ -1010,6 +1007,8 @@ int rndis_filter_device_add(struct hv_device *dev, void *additional_info) { int ret; + struct net_device *net = hv_get_drvdata(dev); + struct net_device_context *net_device_ctx = netdev_priv(net); struct netvsc_device *net_device; struct rndis_device *rndis_device; struct netvsc_device_info *device_info = additional_info; @@ -1040,16 +1039,15 @@ int rndis_filter_device_add(struct hv_device *dev, return ret; } - /* Initialize the rndis device */ - net_device = hv_get_drvdata(dev); + net_device = net_device_ctx->nvdev; net_device->max_chn = 1; net_device->num_chn = 1; spin_lock_init(&net_device->sc_lock); net_device->extension = rndis_device; - rndis_device->net_dev = net_device; + rndis_device->ndev = net; /* Send the rndis initialization message */ ret = rndis_filter_init_device(rndis_device); @@ -1198,7 +1196,9 @@ err_dev_remv: void rndis_filter_device_remove(struct hv_device *dev) { - struct netvsc_device *net_dev = hv_get_drvdata(dev); + struct net_device *ndev = hv_get_drvdata(dev); + struct net_device_context *net_device_ctx = netdev_priv(ndev); + struct netvsc_device *net_dev = net_device_ctx->nvdev; struct rndis_device *rndis_dev = net_dev->extension; unsigned long t; @@ -1224,7 +1224,9 @@ void rndis_filter_device_remove(struct hv_device *dev) int rndis_filter_open(struct hv_device *dev) { - struct netvsc_device *net_device = hv_get_drvdata(dev); + struct net_device *ndev = hv_get_drvdata(dev); + struct net_device_context *net_device_ctx = netdev_priv(ndev); + struct netvsc_device *net_device = net_device_ctx->nvdev; if (!net_device) return -EINVAL; @@ -1237,7 +1239,9 @@ int rndis_filter_open(struct hv_device *dev) int rndis_filter_close(struct hv_device *dev) { - struct netvsc_device *nvdev = hv_get_drvdata(dev); + struct net_device *ndev = hv_get_drvdata(dev); + struct net_device_context *net_device_ctx = netdev_priv(ndev); + struct netvsc_device *nvdev = net_device_ctx->nvdev; if (!nvdev) return -EINVAL; From 0a1275ca5128b84ffffc149960969ed351ae00eb Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Fri, 13 May 2016 13:55:23 +0200 Subject: [PATCH 1571/1649] hv_netvsc: get rid of struct net_device pointer in struct netvsc_device Simplify netvsvc pointer graph by getting rid of the redundant ndev pointer. We can always get a pointer to struct net_device from somewhere else. Signed-off-by: Vitaly Kuznetsov Signed-off-by: David S. Miller --- drivers/net/hyperv/hyperv_net.h | 5 +- drivers/net/hyperv/netvsc.c | 36 +++++------- drivers/net/hyperv/netvsc_drv.c | 91 ++++++++++++++++++------------- drivers/net/hyperv/rndis_filter.c | 4 +- 4 files changed, 72 insertions(+), 64 deletions(-) diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index 0f3874379869..c270c5a54f3a 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -173,7 +173,6 @@ struct rndis_device { /* Interface */ struct rndis_message; -struct netvsc_device; int netvsc_device_add(struct hv_device *device, void *additional_info); int netvsc_device_remove(struct hv_device *device); int netvsc_send(struct hv_device *device, @@ -203,7 +202,7 @@ int rndis_filter_receive(struct hv_device *dev, int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter); int rndis_filter_set_device_mac(struct hv_device *hdev, char *mac); -void netvsc_switch_datapath(struct netvsc_device *nv_dev, bool vf); +void netvsc_switch_datapath(struct net_device *nv_dev, bool vf); #define NVSP_INVALID_PROTOCOL_VERSION ((u32)0xFFFFFFFF) @@ -711,8 +710,6 @@ struct netvsc_device { struct nvsp_message revoke_packet; /* unsigned char HwMacAddr[HW_MACADDR_LEN]; */ - struct net_device *ndev; - struct vmbus_channel *chn_table[VRSS_CHANNEL_MAX]; u32 send_table[VRSS_SEND_TAB_SIZE]; u32 max_chn; diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 1cd01ad2194f..96b3c32a7deb 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -37,12 +37,12 @@ * Switch the data path from the synthetic interface to the VF * interface. */ -void netvsc_switch_datapath(struct netvsc_device *nv_dev, bool vf) +void netvsc_switch_datapath(struct net_device *ndev, bool vf) { - struct nvsp_message *init_pkt = &nv_dev->channel_init_pkt; - struct net_device *ndev = nv_dev->ndev; struct net_device_context *net_device_ctx = netdev_priv(ndev); struct hv_device *dev = net_device_ctx->device_ctx; + struct netvsc_device *nv_dev = net_device_ctx->nvdev; + struct nvsp_message *init_pkt = &nv_dev->channel_init_pkt; memset(init_pkt, 0, sizeof(struct nvsp_message)); init_pkt->hdr.msg_type = NVSP_MSG4_TYPE_SWITCH_DATA_PATH; @@ -80,7 +80,6 @@ static struct netvsc_device *alloc_net_device(struct hv_device *device) net_device->destroy = false; atomic_set(&net_device->open_cnt, 0); atomic_set(&net_device->vf_use_cnt, 0); - net_device->ndev = ndev; net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT; net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT; @@ -263,7 +262,7 @@ static int netvsc_init_buf(struct hv_device *device) net_device = get_outbound_net_device(device); if (!net_device) return -ENODEV; - ndev = net_device->ndev; + ndev = hv_get_drvdata(device); node = cpu_to_node(device->channel->target_cpu); net_device->recv_buf = vzalloc_node(net_device->recv_buf_size, node); @@ -453,6 +452,7 @@ static int negotiate_nvsp_ver(struct hv_device *device, struct nvsp_message *init_packet, u32 nvsp_ver) { + struct net_device *ndev = hv_get_drvdata(device); int ret; unsigned long t; @@ -486,8 +486,7 @@ static int negotiate_nvsp_ver(struct hv_device *device, /* NVSPv2 or later: Send NDIS config */ memset(init_packet, 0, sizeof(struct nvsp_message)); init_packet->hdr.msg_type = NVSP_MSG2_TYPE_SEND_NDIS_CONFIG; - init_packet->msg.v2_msg.send_ndis_config.mtu = net_device->ndev->mtu + - ETH_HLEN; + init_packet->msg.v2_msg.send_ndis_config.mtu = ndev->mtu + ETH_HLEN; init_packet->msg.v2_msg.send_ndis_config.capability.ieee8021q = 1; if (nvsp_ver >= NVSP_PROTOCOL_VERSION_5) @@ -507,7 +506,6 @@ static int netvsc_connect_vsp(struct hv_device *device) struct netvsc_device *net_device; struct nvsp_message *init_packet; int ndis_version; - struct net_device *ndev; u32 ver_list[] = { NVSP_PROTOCOL_VERSION_1, NVSP_PROTOCOL_VERSION_2, NVSP_PROTOCOL_VERSION_4, NVSP_PROTOCOL_VERSION_5 }; int i, num_ver = 4; /* number of different NVSP versions */ @@ -515,7 +513,6 @@ static int netvsc_connect_vsp(struct hv_device *device) net_device = get_outbound_net_device(device); if (!net_device) return -ENODEV; - ndev = net_device->ndev; init_packet = &net_device->channel_init_pkt; @@ -768,6 +765,7 @@ static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device, } static inline int netvsc_send_pkt( + struct hv_device *device, struct hv_netvsc_packet *packet, struct netvsc_device *net_device, struct hv_page_buffer **pb, @@ -776,7 +774,7 @@ static inline int netvsc_send_pkt( struct nvsp_message nvmsg; u16 q_idx = packet->q_idx; struct vmbus_channel *out_channel = net_device->chn_table[q_idx]; - struct net_device *ndev = net_device->ndev; + struct net_device *ndev = hv_get_drvdata(device); u64 req_id; int ret; struct hv_page_buffer *pgbuf; @@ -971,7 +969,8 @@ int netvsc_send(struct hv_device *device, } if (msd_send) { - m_ret = netvsc_send_pkt(msd_send, net_device, NULL, msd_skb); + m_ret = netvsc_send_pkt(device, msd_send, net_device, + NULL, msd_skb); if (m_ret != 0) { netvsc_free_send_slot(net_device, @@ -982,7 +981,7 @@ int netvsc_send(struct hv_device *device, send_now: if (cur_send) - ret = netvsc_send_pkt(cur_send, net_device, pb, skb); + ret = netvsc_send_pkt(device, cur_send, net_device, pb, skb); if (ret != 0 && section_index != NETVSC_INVALID_INDEX) netvsc_free_send_slot(net_device, section_index); @@ -998,9 +997,7 @@ static void netvsc_send_recv_completion(struct hv_device *device, struct nvsp_message recvcompMessage; int retries = 0; int ret; - struct net_device *ndev; - - ndev = net_device->ndev; + struct net_device *ndev = hv_get_drvdata(device); recvcompMessage.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE; @@ -1047,11 +1044,9 @@ static void netvsc_receive(struct netvsc_device *net_device, u32 status = NVSP_STAT_SUCCESS; int i; int count = 0; - struct net_device *ndev; + struct net_device *ndev = hv_get_drvdata(device); void *data; - ndev = net_device->ndev; - /* * All inbound packets other than send completion should be xfer page * packet @@ -1107,14 +1102,13 @@ static void netvsc_send_table(struct hv_device *hdev, struct nvsp_message *nvmsg) { struct netvsc_device *nvscdev; - struct net_device *ndev; + struct net_device *ndev = hv_get_drvdata(hdev); int i; u32 count, *tab; nvscdev = get_outbound_net_device(hdev); if (!nvscdev) return; - ndev = nvscdev->ndev; count = nvmsg->msg.v5_msg.send_table.count; if (count != VRSS_SEND_TAB_SIZE) { @@ -1173,7 +1167,7 @@ void netvsc_channel_cb(void *context) net_device = get_inbound_net_device(device); if (!net_device) return; - ndev = net_device->ndev; + ndev = hv_get_drvdata(device); buffer = get_per_channel_state(channel); do { diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index a33a1c92d489..7325d693fc4a 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -67,18 +67,19 @@ static void do_set_multicast(struct work_struct *w) { struct net_device_context *ndevctx = container_of(w, struct net_device_context, work); - struct netvsc_device *nvdev; + struct hv_device *device_obj = ndevctx->device_ctx; + struct net_device *ndev = hv_get_drvdata(device_obj); + struct netvsc_device *nvdev = ndevctx->nvdev; struct rndis_device *rdev; - nvdev = ndevctx->nvdev; - if (nvdev == NULL || nvdev->ndev == NULL) + if (!nvdev) return; rdev = nvdev->extension; if (rdev == NULL) return; - if (nvdev->ndev->flags & IFF_PROMISC) + if (ndev->flags & IFF_PROMISC) rndis_filter_set_packet_filter(rdev, NDIS_PACKET_TYPE_PROMISCUOUS); else @@ -1050,23 +1051,22 @@ static const struct net_device_ops device_ops = { */ static void netvsc_link_change(struct work_struct *w) { - struct net_device_context *ndev_ctx; - struct net_device *net; + struct net_device_context *ndev_ctx = + container_of(w, struct net_device_context, dwork.work); + struct hv_device *device_obj = ndev_ctx->device_ctx; + struct net_device *net = hv_get_drvdata(device_obj); struct netvsc_device *net_device; struct rndis_device *rdev; struct netvsc_reconfig *event = NULL; bool notify = false, reschedule = false; unsigned long flags, next_reconfig, delay; - ndev_ctx = container_of(w, struct net_device_context, dwork.work); - rtnl_lock(); if (ndev_ctx->start_remove) goto out_unlock; net_device = ndev_ctx->nvdev; rdev = net_device->extension; - net = net_device->ndev; next_reconfig = ndev_ctx->last_reconfig + LINKCHANGE_INT; if (time_is_after_jiffies(next_reconfig)) { @@ -1167,10 +1167,9 @@ static void netvsc_notify_peers(struct work_struct *wrk) atomic_dec(&gwrk->netvsc_dev->vf_use_cnt); } -static struct netvsc_device *get_netvsc_device(char *mac) +static struct net_device *get_netvsc_net_device(char *mac) { - struct net_device *dev; - struct net_device_context *netvsc_ctx = NULL; + struct net_device *dev, *found = NULL; int rtnl_locked; rtnl_locked = rtnl_trylock(); @@ -1179,21 +1178,20 @@ static struct netvsc_device *get_netvsc_device(char *mac) if (memcmp(dev->dev_addr, mac, ETH_ALEN) == 0) { if (dev->netdev_ops != &device_ops) continue; - netvsc_ctx = netdev_priv(dev); + found = dev; break; } } if (rtnl_locked) rtnl_unlock(); - if (netvsc_ctx == NULL) - return NULL; - - return netvsc_ctx->nvdev; + return found; } static int netvsc_register_vf(struct net_device *vf_netdev) { + struct net_device *ndev; + struct net_device_context *net_device_ctx; struct netvsc_device *netvsc_dev; const struct ethtool_ops *eth_ops = vf_netdev->ethtool_ops; @@ -1205,11 +1203,16 @@ static int netvsc_register_vf(struct net_device *vf_netdev) * associate with the VF interface. If we don't find a matching * synthetic interface, move on. */ - netvsc_dev = get_netvsc_device(vf_netdev->dev_addr); + ndev = get_netvsc_net_device(vf_netdev->dev_addr); + if (!ndev) + return NOTIFY_DONE; + + net_device_ctx = netdev_priv(ndev); + netvsc_dev = net_device_ctx->nvdev; if (netvsc_dev == NULL) return NOTIFY_DONE; - netdev_info(netvsc_dev->ndev, "VF registering: %s\n", vf_netdev->name); + netdev_info(ndev, "VF registering: %s\n", vf_netdev->name); /* * Take a reference on the module. */ @@ -1221,6 +1224,7 @@ static int netvsc_register_vf(struct net_device *vf_netdev) static int netvsc_vf_up(struct net_device *vf_netdev) { + struct net_device *ndev; struct netvsc_device *netvsc_dev; const struct ethtool_ops *eth_ops = vf_netdev->ethtool_ops; struct net_device_context *net_device_ctx; @@ -1228,13 +1232,17 @@ static int netvsc_vf_up(struct net_device *vf_netdev) if (eth_ops == ðtool_ops) return NOTIFY_DONE; - netvsc_dev = get_netvsc_device(vf_netdev->dev_addr); + ndev = get_netvsc_net_device(vf_netdev->dev_addr); + if (!ndev) + return NOTIFY_DONE; + + net_device_ctx = netdev_priv(ndev); + netvsc_dev = net_device_ctx->nvdev; if ((netvsc_dev == NULL) || (netvsc_dev->vf_netdev == NULL)) return NOTIFY_DONE; - netdev_info(netvsc_dev->ndev, "VF up: %s\n", vf_netdev->name); - net_device_ctx = netdev_priv(netvsc_dev->ndev); + netdev_info(ndev, "VF up: %s\n", vf_netdev->name); netvsc_dev->vf_inject = true; /* @@ -1245,11 +1253,10 @@ static int netvsc_vf_up(struct net_device *vf_netdev) /* * notify the host to switch the data path. */ - netvsc_switch_datapath(netvsc_dev, true); - netdev_info(netvsc_dev->ndev, "Data path switched to VF: %s\n", - vf_netdev->name); + netvsc_switch_datapath(ndev, true); + netdev_info(ndev, "Data path switched to VF: %s\n", vf_netdev->name); - netif_carrier_off(netvsc_dev->ndev); + netif_carrier_off(ndev); /* * Now notify peers. We are scheduling work to @@ -1267,6 +1274,7 @@ static int netvsc_vf_up(struct net_device *vf_netdev) static int netvsc_vf_down(struct net_device *vf_netdev) { + struct net_device *ndev; struct netvsc_device *netvsc_dev; struct net_device_context *net_device_ctx; const struct ethtool_ops *eth_ops = vf_netdev->ethtool_ops; @@ -1274,13 +1282,17 @@ static int netvsc_vf_down(struct net_device *vf_netdev) if (eth_ops == ðtool_ops) return NOTIFY_DONE; - netvsc_dev = get_netvsc_device(vf_netdev->dev_addr); + ndev = get_netvsc_net_device(vf_netdev->dev_addr); + if (!ndev) + return NOTIFY_DONE; + + net_device_ctx = netdev_priv(ndev); + netvsc_dev = net_device_ctx->nvdev; if ((netvsc_dev == NULL) || (netvsc_dev->vf_netdev == NULL)) return NOTIFY_DONE; - netdev_info(netvsc_dev->ndev, "VF down: %s\n", vf_netdev->name); - net_device_ctx = netdev_priv(netvsc_dev->ndev); + netdev_info(ndev, "VF down: %s\n", vf_netdev->name); netvsc_dev->vf_inject = false; /* * Wait for currently active users to @@ -1289,16 +1301,15 @@ static int netvsc_vf_down(struct net_device *vf_netdev) while (atomic_read(&netvsc_dev->vf_use_cnt) != 0) udelay(50); - netvsc_switch_datapath(netvsc_dev, false); - netdev_info(netvsc_dev->ndev, "Data path switched from VF: %s\n", - vf_netdev->name); + netvsc_switch_datapath(ndev, false); + netdev_info(ndev, "Data path switched from VF: %s\n", vf_netdev->name); rndis_filter_close(net_device_ctx->device_ctx); - netif_carrier_on(netvsc_dev->ndev); + netif_carrier_on(ndev); /* * Notify peers. */ atomic_inc(&netvsc_dev->vf_use_cnt); - net_device_ctx->gwrk.netdev = netvsc_dev->ndev; + net_device_ctx->gwrk.netdev = ndev; net_device_ctx->gwrk.netvsc_dev = netvsc_dev; schedule_work(&net_device_ctx->gwrk.dwrk); @@ -1308,17 +1319,23 @@ static int netvsc_vf_down(struct net_device *vf_netdev) static int netvsc_unregister_vf(struct net_device *vf_netdev) { + struct net_device *ndev; struct netvsc_device *netvsc_dev; const struct ethtool_ops *eth_ops = vf_netdev->ethtool_ops; + struct net_device_context *net_device_ctx; if (eth_ops == ðtool_ops) return NOTIFY_DONE; - netvsc_dev = get_netvsc_device(vf_netdev->dev_addr); + ndev = get_netvsc_net_device(vf_netdev->dev_addr); + if (!ndev) + return NOTIFY_DONE; + + net_device_ctx = netdev_priv(ndev); + netvsc_dev = net_device_ctx->nvdev; if (netvsc_dev == NULL) return NOTIFY_DONE; - netdev_info(netvsc_dev->ndev, "VF unregistering: %s\n", - vf_netdev->name); + netdev_info(ndev, "VF unregistering: %s\n", vf_netdev->name); netvsc_dev->vf_netdev = NULL; module_put(THIS_MODULE); diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index 6caba5166ebb..97c292b7dbea 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -1061,8 +1061,8 @@ int rndis_filter_device_add(struct hv_device *dev, ret = rndis_filter_query_device(rndis_device, RNDIS_OID_GEN_MAXIMUM_FRAME_SIZE, &mtu, &size); - if (ret == 0 && size == sizeof(u32) && mtu < net_device->ndev->mtu) - net_device->ndev->mtu = mtu; + if (ret == 0 && size == sizeof(u32) && mtu < net->mtu) + net->mtu = mtu; /* Get the mac address */ ret = rndis_filter_query_device_mac(rndis_device); From 6da7225f5a95ba68e3c6225c4051182bef30eed4 Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Fri, 13 May 2016 13:55:24 +0200 Subject: [PATCH 1572/1649] hv_netvsc: synchronize netvsc_change_mtu()/netvsc_set_channels() with netvsc_remove() When netvsc device is removed during mtu change or channels setup we get into troubles as both paths are trying to remove the device. Synchronize them with start_remove flag and rtnl lock. Signed-off-by: Vitaly Kuznetsov Signed-off-by: David S. Miller --- drivers/net/hyperv/netvsc_drv.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 7325d693fc4a..6a69b5cc9fe2 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -759,7 +759,7 @@ static int netvsc_set_channels(struct net_device *net, int ret = 0; bool recovering = false; - if (!nvdev || nvdev->destroy) + if (net_device_ctx->start_remove || !nvdev || nvdev->destroy) return -ENODEV; num_chn = nvdev->num_chn; @@ -907,7 +907,7 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu) u32 num_chn; int ret = 0; - if (nvdev == NULL || nvdev->destroy) + if (ndevctx->start_remove || !nvdev || nvdev->destroy) return -ENODEV; if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2) @@ -1445,7 +1445,12 @@ static int netvsc_remove(struct hv_device *dev) ndev_ctx = netdev_priv(net); net_device = ndev_ctx->nvdev; + /* Avoid racing with netvsc_change_mtu()/netvsc_set_channels() + * removing the device. + */ + rtnl_lock(); ndev_ctx->start_remove = true; + rtnl_unlock(); cancel_delayed_work_sync(&ndev_ctx->dwork); cancel_work_sync(&ndev_ctx->work); From 88098834827025cc04c15f1b4b0d9bbef3cf55af Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Fri, 13 May 2016 13:55:25 +0200 Subject: [PATCH 1573/1649] hv_netvsc: set nvdev link after populating chn_table Crash in netvsc_send() is observed when netvsc device is re-created on mtu change/set channels. The crash is caused by dereferencing of NULL channel pointer which comes from chn_table. The root cause is a mixture of two facts: - we set nvdev pointer in net_device_context in alloc_net_device() before we populate chn_table. - we populate chn_table[0] only. The issue could be papered over by checking channel != NULL in netvsc_send() but populating the whole chn_table and writing the nvdev pointer afterwards seems more appropriate. Signed-off-by: Vitaly Kuznetsov Signed-off-by: David S. Miller --- drivers/net/hyperv/netvsc.c | 29 ++++++++++++++++++----------- 1 file changed, 18 insertions(+), 11 deletions(-) diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 96b3c32a7deb..719cb3578e55 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -60,11 +60,9 @@ void netvsc_switch_datapath(struct net_device *ndev, bool vf) } -static struct netvsc_device *alloc_net_device(struct hv_device *device) +static struct netvsc_device *alloc_net_device(void) { struct netvsc_device *net_device; - struct net_device *ndev = hv_get_drvdata(device); - struct net_device_context *net_device_ctx = netdev_priv(ndev); net_device = kzalloc(sizeof(struct netvsc_device), GFP_KERNEL); if (!net_device) @@ -86,8 +84,6 @@ static struct netvsc_device *alloc_net_device(struct hv_device *device) net_device->vf_netdev = NULL; net_device->vf_inject = false; - net_device_ctx->nvdev = net_device; - return net_device; } @@ -1240,20 +1236,19 @@ void netvsc_channel_cb(void *context) */ int netvsc_device_add(struct hv_device *device, void *additional_info) { - int ret = 0; + int i, ret = 0; int ring_size = ((struct netvsc_device_info *)additional_info)->ring_size; struct netvsc_device *net_device; - struct net_device *ndev; + struct net_device *ndev = hv_get_drvdata(device); + struct net_device_context *net_device_ctx = netdev_priv(ndev); - net_device = alloc_net_device(device); + net_device = alloc_net_device(); if (!net_device) return -ENOMEM; net_device->ring_size = ring_size; - ndev = hv_get_drvdata(device); - /* Initialize the NetVSC channel extension */ init_completion(&net_device->channel_init_wait); @@ -1272,7 +1267,19 @@ int netvsc_device_add(struct hv_device *device, void *additional_info) /* Channel is opened */ pr_info("hv_netvsc channel opened successfully\n"); - net_device->chn_table[0] = device->channel; + /* If we're reopening the device we may have multiple queues, fill the + * chn_table with the default channel to use it before subchannels are + * opened. + */ + for (i = 0; i < VRSS_CHANNEL_MAX; i++) + net_device->chn_table[i] = device->channel; + + /* Writing nvdev pointer unlocks netvsc_send(), make sure chn_table is + * populated. + */ + wmb(); + + net_device_ctx->nvdev = net_device; /* Connect with the NetVsp */ ret = netvsc_connect_vsp(device); From 760edee8b59ebf05bb268d0a6b568f76bb1bb599 Mon Sep 17 00:00:00 2001 From: "Samudrala, Sridhar" Date: Thu, 12 May 2016 17:08:22 -0700 Subject: [PATCH 1574/1649] net: sched: Move TCA_CLS_FLAGS_SKIP_HW to uapi header file. Signed-off-by: Sridhar Samudrala Acked-by: John Fastabend Signed-off-by: David S. Miller --- include/net/pkt_cls.h | 3 --- include/uapi/linux/pkt_cls.h | 3 +++ 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index caa5e18636df..339ef08e35ae 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -392,9 +392,6 @@ struct tc_cls_u32_offload { }; }; -/* tca flags definitions */ -#define TCA_CLS_FLAGS_SKIP_HW 1 - static inline bool tc_should_offload(struct net_device *dev, u32 flags) { if (!(dev->features & NETIF_F_HW_TC)) diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h index 84660905fedf..3e8b65fb6664 100644 --- a/include/uapi/linux/pkt_cls.h +++ b/include/uapi/linux/pkt_cls.h @@ -151,6 +151,9 @@ enum { #define TCA_POLICE_MAX (__TCA_POLICE_MAX - 1) +/* tca flags definitions */ +#define TCA_CLS_FLAGS_SKIP_HW (1 << 0) + /* U32 filters */ #define TC_U32_HTID(h) ((h)&0xFFF00000) From d34e3e181395192d6d1f50dd97bd7854e04e33a4 Mon Sep 17 00:00:00 2001 From: "Samudrala, Sridhar" Date: Thu, 12 May 2016 17:08:23 -0700 Subject: [PATCH 1575/1649] net: cls_u32: Add support for skip-sw flag to tc u32 classifier. On devices that support TC U32 offloads, this flag enables a filter to be added only to HW. skip-sw and skip-hw are mutually exclusive flags. By default without any flags, the filter is added to both HW and SW, but no error checks are done in case of failure to add to HW. With skip-sw, failure to add to HW is treated as an error. Here is a sample script that adds 2 filters, one with skip-sw and the other with skip-hw flag. # add ingress qdisc tc qdisc add dev p4p1 ingress # enable hw tc offload. ethtool -K p4p1 hw-tc-offload on # add u32 filter with skip-sw flag. tc filter add dev p4p1 parent ffff: protocol ip prio 99 \ handle 800:0:1 u32 ht 800: flowid 800:1 \ skip-sw \ match ip src 192.168.1.0/24 \ action drop # add u32 filter with skip-hw flag. tc filter add dev p4p1 parent ffff: protocol ip prio 99 \ handle 800:0:2 u32 ht 800: flowid 800:2 \ skip-hw \ match ip src 192.168.2.0/24 \ action drop Signed-off-by: Sridhar Samudrala Acked-by: John Fastabend Signed-off-by: David S. Miller --- include/net/pkt_cls.h | 17 ++++++++++++++ include/uapi/linux/pkt_cls.h | 1 + net/sched/cls_u32.c | 45 ++++++++++++++++++++++++++++-------- 3 files changed, 54 insertions(+), 9 deletions(-) diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index 339ef08e35ae..8b4893878cf4 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -406,6 +406,23 @@ static inline bool tc_should_offload(struct net_device *dev, u32 flags) return true; } +static inline bool tc_skip_sw(u32 flags) +{ + return (flags & TCA_CLS_FLAGS_SKIP_SW) ? true : false; +} + +/* SKIP_HW and SKIP_SW are mutually exclusive flags. */ +static inline bool tc_flags_valid(u32 flags) +{ + if (flags & ~(TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW)) + return false; + + if (!(flags ^ (TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW))) + return false; + + return true; +} + enum tc_fl_command { TC_CLSFLOWER_REPLACE, TC_CLSFLOWER_DESTROY, diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h index 3e8b65fb6664..eba5914ba5d1 100644 --- a/include/uapi/linux/pkt_cls.h +++ b/include/uapi/linux/pkt_cls.h @@ -153,6 +153,7 @@ enum { /* tca flags definitions */ #define TCA_CLS_FLAGS_SKIP_HW (1 << 0) +#define TCA_CLS_FLAGS_SKIP_SW (1 << 1) /* U32 filters */ diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c index e64877a3c084..079b43b3c5d2 100644 --- a/net/sched/cls_u32.c +++ b/net/sched/cls_u32.c @@ -134,6 +134,11 @@ next_knode: j = 0; #endif + if (tc_skip_sw(n->flags)) { + n = rcu_dereference_bh(n->next); + goto next_knode; + } + #ifdef CONFIG_CLS_U32_MARK if ((skb->mark & n->mask) != n->val) { n = rcu_dereference_bh(n->next); @@ -443,13 +448,14 @@ static void u32_remove_hw_knode(struct tcf_proto *tp, u32 handle) } } -static void u32_replace_hw_hnode(struct tcf_proto *tp, +static int u32_replace_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h, u32 flags) { struct net_device *dev = tp->q->dev_queue->dev; struct tc_cls_u32_offload u32_offload = {0}; struct tc_to_netdev offload; + int err; offload.type = TC_SETUP_CLSU32; offload.cls_u32 = &u32_offload; @@ -460,9 +466,13 @@ static void u32_replace_hw_hnode(struct tcf_proto *tp, offload.cls_u32->hnode.handle = h->handle; offload.cls_u32->hnode.prio = h->prio; - dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, - tp->protocol, &offload); + err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, + tp->protocol, &offload); + if (tc_skip_sw(flags)) + return err; } + + return 0; } static void u32_clear_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h) @@ -485,13 +495,14 @@ static void u32_clear_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h) } } -static void u32_replace_hw_knode(struct tcf_proto *tp, +static int u32_replace_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n, u32 flags) { struct net_device *dev = tp->q->dev_queue->dev; struct tc_cls_u32_offload u32_offload = {0}; struct tc_to_netdev offload; + int err; offload.type = TC_SETUP_CLSU32; offload.cls_u32 = &u32_offload; @@ -512,9 +523,13 @@ static void u32_replace_hw_knode(struct tcf_proto *tp, if (n->ht_down) offload.cls_u32->knode.link_handle = n->ht_down->handle; - dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, - tp->protocol, &offload); + err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, + tp->protocol, &offload); + if (tc_skip_sw(flags)) + return err; } + + return 0; } static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht) @@ -845,8 +860,11 @@ static int u32_change(struct net *net, struct sk_buff *in_skb, if (err < 0) return err; - if (tb[TCA_U32_FLAGS]) + if (tb[TCA_U32_FLAGS]) { flags = nla_get_u32(tb[TCA_U32_FLAGS]); + if (!tc_flags_valid(flags)) + return err; + } n = (struct tc_u_knode *)*arg; if (n) { @@ -871,10 +889,15 @@ static int u32_change(struct net *net, struct sk_buff *in_skb, return err; } + err = u32_replace_hw_knode(tp, new, flags); + if (err) { + u32_destroy_key(tp, new, false); + return err; + } + u32_replace_knode(tp, tp_c, new); tcf_unbind_filter(tp, &n->res); call_rcu(&n->rcu, u32_delete_key_rcu); - u32_replace_hw_knode(tp, new, flags); return 0; } @@ -978,6 +1001,10 @@ static int u32_change(struct net *net, struct sk_buff *in_skb, struct tc_u_knode __rcu **ins; struct tc_u_knode *pins; + err = u32_replace_hw_knode(tp, n, flags); + if (err) + goto errhw; + ins = &ht->ht[TC_U32_HASH(handle)]; for (pins = rtnl_dereference(*ins); pins; ins = &pins->next, pins = rtnl_dereference(*ins)) @@ -986,11 +1013,11 @@ static int u32_change(struct net *net, struct sk_buff *in_skb, RCU_INIT_POINTER(n->next, pins); rcu_assign_pointer(*ins, n); - u32_replace_hw_knode(tp, n, flags); *arg = (unsigned long)n; return 0; } +errhw: #ifdef CONFIG_CLS_U32_MARK free_percpu(n->pcpu_success); errout: From 4e15ee2cb46fed730fe6f0195a86d44e5aeef129 Mon Sep 17 00:00:00 2001 From: Paul Durrant Date: Fri, 13 May 2016 09:37:26 +0100 Subject: [PATCH 1576/1649] xen-netback: add control ring boilerplate My recent patch to include/xen/interface/io/netif.h defines a new shared ring (in addition to the rx and tx rings) for passing control messages from a VM frontend driver to a backend driver. This patch adds the necessary code to xen-netback to map this new shared ring, should it be created by a frontend, but does not add implementations for any of the defined protocol messages. These are added in a subsequent patch for clarity. Signed-off-by: Paul Durrant Acked-by: Wei Liu Signed-off-by: David S. Miller --- drivers/net/xen-netback/common.h | 28 +++++--- drivers/net/xen-netback/interface.c | 101 ++++++++++++++++++++++++++-- drivers/net/xen-netback/netback.c | 99 +++++++++++++++++++++++++-- drivers/net/xen-netback/xenbus.c | 79 +++++++++++++++++++--- 4 files changed, 277 insertions(+), 30 deletions(-) diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h index f44b38846420..093a12abf71f 100644 --- a/drivers/net/xen-netback/common.h +++ b/drivers/net/xen-netback/common.h @@ -260,6 +260,11 @@ struct xenvif { struct dentry *xenvif_dbg_root; #endif + struct xen_netif_ctrl_back_ring ctrl; + struct task_struct *ctrl_task; + wait_queue_head_t ctrl_wq; + unsigned int ctrl_irq; + /* Miscellaneous private stuff. */ struct net_device *dev; }; @@ -285,10 +290,15 @@ struct xenvif *xenvif_alloc(struct device *parent, int xenvif_init_queue(struct xenvif_queue *queue); void xenvif_deinit_queue(struct xenvif_queue *queue); -int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref, - unsigned long rx_ring_ref, unsigned int tx_evtchn, - unsigned int rx_evtchn); -void xenvif_disconnect(struct xenvif *vif); +int xenvif_connect_data(struct xenvif_queue *queue, + unsigned long tx_ring_ref, + unsigned long rx_ring_ref, + unsigned int tx_evtchn, + unsigned int rx_evtchn); +void xenvif_disconnect_data(struct xenvif *vif); +int xenvif_connect_ctrl(struct xenvif *vif, grant_ref_t ring_ref, + unsigned int evtchn); +void xenvif_disconnect_ctrl(struct xenvif *vif); void xenvif_free(struct xenvif *vif); int xenvif_xenbus_init(void); @@ -300,10 +310,10 @@ int xenvif_queue_stopped(struct xenvif_queue *queue); void xenvif_wake_queue(struct xenvif_queue *queue); /* (Un)Map communication rings. */ -void xenvif_unmap_frontend_rings(struct xenvif_queue *queue); -int xenvif_map_frontend_rings(struct xenvif_queue *queue, - grant_ref_t tx_ring_ref, - grant_ref_t rx_ring_ref); +void xenvif_unmap_frontend_data_rings(struct xenvif_queue *queue); +int xenvif_map_frontend_data_rings(struct xenvif_queue *queue, + grant_ref_t tx_ring_ref, + grant_ref_t rx_ring_ref); /* Check for SKBs from frontend and schedule backend processing */ void xenvif_napi_schedule_or_enable_events(struct xenvif_queue *queue); @@ -318,6 +328,8 @@ void xenvif_kick_thread(struct xenvif_queue *queue); int xenvif_dealloc_kthread(void *data); +int xenvif_ctrl_kthread(void *data); + void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb); void xenvif_carrier_on(struct xenvif *vif); diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index f5231a2dd2ac..78a10d2af101 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c @@ -128,6 +128,15 @@ irqreturn_t xenvif_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } +irqreturn_t xenvif_ctrl_interrupt(int irq, void *dev_id) +{ + struct xenvif *vif = dev_id; + + wake_up(&vif->ctrl_wq); + + return IRQ_HANDLED; +} + int xenvif_queue_stopped(struct xenvif_queue *queue) { struct net_device *dev = queue->vif->dev; @@ -527,9 +536,66 @@ void xenvif_carrier_on(struct xenvif *vif) rtnl_unlock(); } -int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref, - unsigned long rx_ring_ref, unsigned int tx_evtchn, - unsigned int rx_evtchn) +int xenvif_connect_ctrl(struct xenvif *vif, grant_ref_t ring_ref, + unsigned int evtchn) +{ + struct net_device *dev = vif->dev; + void *addr; + struct xen_netif_ctrl_sring *shared; + struct task_struct *task; + int err = -ENOMEM; + + err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif), + &ring_ref, 1, &addr); + if (err) + goto err; + + shared = (struct xen_netif_ctrl_sring *)addr; + BACK_RING_INIT(&vif->ctrl, shared, XEN_PAGE_SIZE); + + init_waitqueue_head(&vif->ctrl_wq); + + err = bind_interdomain_evtchn_to_irqhandler(vif->domid, evtchn, + xenvif_ctrl_interrupt, + 0, dev->name, vif); + if (err < 0) + goto err_unmap; + + vif->ctrl_irq = err; + + task = kthread_create(xenvif_ctrl_kthread, (void *)vif, + "%s-control", dev->name); + if (IS_ERR(task)) { + pr_warn("Could not allocate kthread for %s\n", dev->name); + err = PTR_ERR(task); + goto err_deinit; + } + + get_task_struct(task); + vif->ctrl_task = task; + + wake_up_process(vif->ctrl_task); + + return 0; + +err_deinit: + unbind_from_irqhandler(vif->ctrl_irq, vif); + vif->ctrl_irq = 0; + +err_unmap: + xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif), + vif->ctrl.sring); + vif->ctrl.sring = NULL; + +err: + return err; +} + +int xenvif_connect_data(struct xenvif_queue *queue, + unsigned long tx_ring_ref, + unsigned long rx_ring_ref, + unsigned int tx_evtchn, + unsigned int rx_evtchn) { struct task_struct *task; int err = -ENOMEM; @@ -538,7 +604,8 @@ int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref, BUG_ON(queue->task); BUG_ON(queue->dealloc_task); - err = xenvif_map_frontend_rings(queue, tx_ring_ref, rx_ring_ref); + err = xenvif_map_frontend_data_rings(queue, tx_ring_ref, + rx_ring_ref); if (err < 0) goto err; @@ -614,7 +681,7 @@ err_tx_unbind: unbind_from_irqhandler(queue->tx_irq, queue); queue->tx_irq = 0; err_unmap: - xenvif_unmap_frontend_rings(queue); + xenvif_unmap_frontend_data_rings(queue); netif_napi_del(&queue->napi); err: module_put(THIS_MODULE); @@ -634,7 +701,7 @@ void xenvif_carrier_off(struct xenvif *vif) rtnl_unlock(); } -void xenvif_disconnect(struct xenvif *vif) +void xenvif_disconnect_data(struct xenvif *vif) { struct xenvif_queue *queue = NULL; unsigned int num_queues = vif->num_queues; @@ -668,12 +735,32 @@ void xenvif_disconnect(struct xenvif *vif) queue->tx_irq = 0; } - xenvif_unmap_frontend_rings(queue); + xenvif_unmap_frontend_data_rings(queue); } xenvif_mcast_addr_list_free(vif); } +void xenvif_disconnect_ctrl(struct xenvif *vif) +{ + if (vif->ctrl_task) { + kthread_stop(vif->ctrl_task); + put_task_struct(vif->ctrl_task); + vif->ctrl_task = NULL; + } + + if (vif->ctrl_irq) { + unbind_from_irqhandler(vif->ctrl_irq, vif); + vif->ctrl_irq = 0; + } + + if (vif->ctrl.sring) { + xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif), + vif->ctrl.sring); + vif->ctrl.sring = NULL; + } +} + /* Reverse the relevant parts of xenvif_init_queue(). * Used for queue teardown from xenvif_free(), and on the * error handling paths in xenbus.c:connect(). diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 4412a57ec862..ff22b6daa077 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -1926,7 +1926,7 @@ static inline bool tx_dealloc_work_todo(struct xenvif_queue *queue) return queue->dealloc_cons != queue->dealloc_prod; } -void xenvif_unmap_frontend_rings(struct xenvif_queue *queue) +void xenvif_unmap_frontend_data_rings(struct xenvif_queue *queue) { if (queue->tx.sring) xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif), @@ -1936,9 +1936,9 @@ void xenvif_unmap_frontend_rings(struct xenvif_queue *queue) queue->rx.sring); } -int xenvif_map_frontend_rings(struct xenvif_queue *queue, - grant_ref_t tx_ring_ref, - grant_ref_t rx_ring_ref) +int xenvif_map_frontend_data_rings(struct xenvif_queue *queue, + grant_ref_t tx_ring_ref, + grant_ref_t rx_ring_ref) { void *addr; struct xen_netif_tx_sring *txs; @@ -1965,7 +1965,7 @@ int xenvif_map_frontend_rings(struct xenvif_queue *queue, return 0; err: - xenvif_unmap_frontend_rings(queue); + xenvif_unmap_frontend_data_rings(queue); return err; } @@ -2164,6 +2164,95 @@ int xenvif_dealloc_kthread(void *data) return 0; } +static void make_ctrl_response(struct xenvif *vif, + const struct xen_netif_ctrl_request *req, + u32 status, u32 data) +{ + RING_IDX idx = vif->ctrl.rsp_prod_pvt; + struct xen_netif_ctrl_response rsp = { + .id = req->id, + .type = req->type, + .status = status, + .data = data, + }; + + *RING_GET_RESPONSE(&vif->ctrl, idx) = rsp; + vif->ctrl.rsp_prod_pvt = ++idx; +} + +static void push_ctrl_response(struct xenvif *vif) +{ + int notify; + + RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->ctrl, notify); + if (notify) + notify_remote_via_irq(vif->ctrl_irq); +} + +static void process_ctrl_request(struct xenvif *vif, + const struct xen_netif_ctrl_request *req) +{ + make_ctrl_response(vif, req, XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED, + 0); + push_ctrl_response(vif); +} + +static void xenvif_ctrl_action(struct xenvif *vif) +{ + for (;;) { + RING_IDX req_prod, req_cons; + + req_prod = vif->ctrl.sring->req_prod; + req_cons = vif->ctrl.req_cons; + + /* Make sure we can see requests before we process them. */ + rmb(); + + if (req_cons == req_prod) + break; + + while (req_cons != req_prod) { + struct xen_netif_ctrl_request req; + + RING_COPY_REQUEST(&vif->ctrl, req_cons, &req); + req_cons++; + + process_ctrl_request(vif, &req); + } + + vif->ctrl.req_cons = req_cons; + vif->ctrl.sring->req_event = req_cons + 1; + } +} + +static bool xenvif_ctrl_work_todo(struct xenvif *vif) +{ + if (likely(RING_HAS_UNCONSUMED_REQUESTS(&vif->ctrl))) + return 1; + + return 0; +} + +int xenvif_ctrl_kthread(void *data) +{ + struct xenvif *vif = data; + + for (;;) { + wait_event_interruptible(vif->ctrl_wq, + xenvif_ctrl_work_todo(vif) || + kthread_should_stop()); + if (kthread_should_stop()) + break; + + while (xenvif_ctrl_work_todo(vif)) + xenvif_ctrl_action(vif); + + cond_resched(); + } + + return 0; +} + static int __init netback_init(void) { int rc = 0; diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c index bd182cd55dda..6a31f2610c23 100644 --- a/drivers/net/xen-netback/xenbus.c +++ b/drivers/net/xen-netback/xenbus.c @@ -38,7 +38,8 @@ struct backend_info { const char *hotplug_script; }; -static int connect_rings(struct backend_info *be, struct xenvif_queue *queue); +static int connect_data_rings(struct backend_info *be, + struct xenvif_queue *queue); static void connect(struct backend_info *be); static int read_xenbus_vif_flags(struct backend_info *be); static int backend_create_xenvif(struct backend_info *be); @@ -367,6 +368,12 @@ static int netback_probe(struct xenbus_device *dev, if (err) pr_debug("Error writing multi-queue-max-queues\n"); + err = xenbus_printf(XBT_NIL, dev->nodename, + "feature-ctrl-ring", + "%u", true); + if (err) + pr_debug("Error writing feature-ctrl-ring\n"); + script = xenbus_read(XBT_NIL, dev->nodename, "script", NULL); if (IS_ERR(script)) { err = PTR_ERR(script); @@ -457,7 +464,8 @@ static void backend_disconnect(struct backend_info *be) #ifdef CONFIG_DEBUG_FS xenvif_debugfs_delif(be->vif); #endif /* CONFIG_DEBUG_FS */ - xenvif_disconnect(be->vif); + xenvif_disconnect_data(be->vif); + xenvif_disconnect_ctrl(be->vif); } } @@ -825,6 +833,48 @@ static void hotplug_status_changed(struct xenbus_watch *watch, kfree(str); } +static int connect_ctrl_ring(struct backend_info *be) +{ + struct xenbus_device *dev = be->dev; + struct xenvif *vif = be->vif; + unsigned int val; + grant_ref_t ring_ref; + unsigned int evtchn; + int err; + + err = xenbus_gather(XBT_NIL, dev->otherend, + "ctrl-ring-ref", "%u", &val, NULL); + if (err) + goto done; /* The frontend does not have a control ring */ + + ring_ref = val; + + err = xenbus_gather(XBT_NIL, dev->otherend, + "event-channel-ctrl", "%u", &val, NULL); + if (err) { + xenbus_dev_fatal(dev, err, + "reading %s/event-channel-ctrl", + dev->otherend); + goto fail; + } + + evtchn = val; + + err = xenvif_connect_ctrl(vif, ring_ref, evtchn); + if (err) { + xenbus_dev_fatal(dev, err, + "mapping shared-frame %u port %u", + ring_ref, evtchn); + goto fail; + } + +done: + return 0; + +fail: + return err; +} + static void connect(struct backend_info *be) { int err; @@ -861,6 +911,12 @@ static void connect(struct backend_info *be) xen_register_watchers(dev, be->vif); read_xenbus_vif_flags(be); + err = connect_ctrl_ring(be); + if (err) { + xenbus_dev_fatal(dev, err, "connecting control ring"); + return; + } + /* Use the number of queues requested by the frontend */ be->vif->queues = vzalloc(requested_num_queues * sizeof(struct xenvif_queue)); @@ -896,11 +952,12 @@ static void connect(struct backend_info *be) queue->remaining_credit = credit_bytes; queue->credit_usec = credit_usec; - err = connect_rings(be, queue); + err = connect_data_rings(be, queue); if (err) { - /* connect_rings() cleans up after itself on failure, - * but we need to clean up after xenvif_init_queue() here, - * and also clean up any previously initialised queues. + /* connect_data_rings() cleans up after itself on + * failure, but we need to clean up after + * xenvif_init_queue() here, and also clean up any + * previously initialised queues. */ xenvif_deinit_queue(queue); be->vif->num_queues = queue_index; @@ -935,15 +992,17 @@ static void connect(struct backend_info *be) err: if (be->vif->num_queues > 0) - xenvif_disconnect(be->vif); /* Clean up existing queues */ + xenvif_disconnect_data(be->vif); /* Clean up existing queues */ vfree(be->vif->queues); be->vif->queues = NULL; be->vif->num_queues = 0; + xenvif_disconnect_ctrl(be->vif); return; } -static int connect_rings(struct backend_info *be, struct xenvif_queue *queue) +static int connect_data_rings(struct backend_info *be, + struct xenvif_queue *queue) { struct xenbus_device *dev = be->dev; unsigned int num_queues = queue->vif->num_queues; @@ -1007,8 +1066,8 @@ static int connect_rings(struct backend_info *be, struct xenvif_queue *queue) } /* Map the shared frame, irq etc. */ - err = xenvif_connect(queue, tx_ring_ref, rx_ring_ref, - tx_evtchn, rx_evtchn); + err = xenvif_connect_data(queue, tx_ring_ref, rx_ring_ref, + tx_evtchn, rx_evtchn); if (err) { xenbus_dev_fatal(dev, err, "mapping shared-frames %lu/%lu port tx %u rx %u", From 40d8abdee806d496a60ee607a6d01b1cd7fabaf0 Mon Sep 17 00:00:00 2001 From: Paul Durrant Date: Fri, 13 May 2016 09:37:27 +0100 Subject: [PATCH 1577/1649] xen-netback: add control protocol implementation My recent patch to include/xen/interface/io/netif.h defines a new shared ring (in addition to the rx and tx rings) for passing control messages from a VM frontend driver to a backend driver. A previous patch added the necessary boilerplate for mapping the control ring from the frontend, should it be created. This patch adds implementations for each of the defined protocol messages. Signed-off-by: Paul Durrant Cc: Wei Liu Acked-by: Wei Liu Signed-off-by: David S. Miller --- drivers/net/xen-netback/Makefile | 2 +- drivers/net/xen-netback/common.h | 46 ++++ drivers/net/xen-netback/hash.c | 384 ++++++++++++++++++++++++++++ drivers/net/xen-netback/interface.c | 24 ++ drivers/net/xen-netback/netback.c | 49 +++- 5 files changed, 502 insertions(+), 3 deletions(-) create mode 100644 drivers/net/xen-netback/hash.c diff --git a/drivers/net/xen-netback/Makefile b/drivers/net/xen-netback/Makefile index e346e8125ef5..11e02be9db1a 100644 --- a/drivers/net/xen-netback/Makefile +++ b/drivers/net/xen-netback/Makefile @@ -1,3 +1,3 @@ obj-$(CONFIG_XEN_NETDEV_BACKEND) := xen-netback.o -xen-netback-y := netback.o xenbus.o interface.o +xen-netback-y := netback.o xenbus.o interface.o hash.o diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h index 093a12abf71f..84d6cbdd11b2 100644 --- a/drivers/net/xen-netback/common.h +++ b/drivers/net/xen-netback/common.h @@ -220,6 +220,35 @@ struct xenvif_mcast_addr { #define XEN_NETBK_MCAST_MAX 64 +#define XEN_NETBK_MAX_HASH_KEY_SIZE 40 +#define XEN_NETBK_MAX_HASH_MAPPING_SIZE 128 +#define XEN_NETBK_HASH_TAG_SIZE 40 + +struct xenvif_hash_cache_entry { + struct list_head link; + struct rcu_head rcu; + u8 tag[XEN_NETBK_HASH_TAG_SIZE]; + unsigned int len; + u32 val; + int seq; +}; + +struct xenvif_hash_cache { + spinlock_t lock; + struct list_head list; + unsigned int count; + atomic_t seq; +}; + +struct xenvif_hash { + unsigned int alg; + u32 flags; + u8 key[XEN_NETBK_MAX_HASH_KEY_SIZE]; + u32 mapping[XEN_NETBK_MAX_HASH_MAPPING_SIZE]; + unsigned int size; + struct xenvif_hash_cache cache; +}; + struct xenvif { /* Unique identifier for this interface. */ domid_t domid; @@ -251,6 +280,8 @@ struct xenvif { unsigned int num_queues; /* active queues, resource allocated */ unsigned int stalled_queues; + struct xenvif_hash hash; + struct xenbus_watch credit_watch; struct xenbus_watch mcast_ctrl_watch; @@ -353,6 +384,7 @@ extern bool separate_tx_rx_irq; extern unsigned int rx_drain_timeout_msecs; extern unsigned int rx_stall_timeout_msecs; extern unsigned int xenvif_max_queues; +extern unsigned int xenvif_hash_cache_size; #ifdef CONFIG_DEBUG_FS extern struct dentry *xen_netback_dbg_root; @@ -366,4 +398,18 @@ void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue); bool xenvif_mcast_match(struct xenvif *vif, const u8 *addr); void xenvif_mcast_addr_list_free(struct xenvif *vif); +/* Hash */ +void xenvif_init_hash(struct xenvif *vif); +void xenvif_deinit_hash(struct xenvif *vif); + +u32 xenvif_set_hash_alg(struct xenvif *vif, u32 alg); +u32 xenvif_get_hash_flags(struct xenvif *vif, u32 *flags); +u32 xenvif_set_hash_flags(struct xenvif *vif, u32 flags); +u32 xenvif_set_hash_key(struct xenvif *vif, u32 gref, u32 len); +u32 xenvif_set_hash_mapping_size(struct xenvif *vif, u32 size); +u32 xenvif_set_hash_mapping(struct xenvif *vif, u32 gref, u32 len, + u32 off); + +void xenvif_set_skb_hash(struct xenvif *vif, struct sk_buff *skb); + #endif /* __XEN_NETBACK__COMMON_H__ */ diff --git a/drivers/net/xen-netback/hash.c b/drivers/net/xen-netback/hash.c new file mode 100644 index 000000000000..392e3929ae84 --- /dev/null +++ b/drivers/net/xen-netback/hash.c @@ -0,0 +1,384 @@ +/* + * Copyright (c) 2016 Citrix Systems Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version 2 + * as published by the Free Softare Foundation; or, when distributed + * separately from the Linux kernel or incorporated into other + * software packages, subject to the following license: + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this source file (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, modify, + * merge, publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#define XEN_NETIF_DEFINE_TOEPLITZ + +#include "common.h" +#include +#include + +static void xenvif_del_hash(struct rcu_head *rcu) +{ + struct xenvif_hash_cache_entry *entry; + + entry = container_of(rcu, struct xenvif_hash_cache_entry, rcu); + + kfree(entry); +} + +static void xenvif_add_hash(struct xenvif *vif, const u8 *tag, + unsigned int len, u32 val) +{ + struct xenvif_hash_cache_entry *new, *entry, *oldest; + unsigned long flags; + bool found; + + new = kmalloc(sizeof(*entry), GFP_KERNEL); + if (!new) + return; + + memcpy(new->tag, tag, len); + new->len = len; + new->val = val; + + spin_lock_irqsave(&vif->hash.cache.lock, flags); + + found = false; + oldest = NULL; + list_for_each_entry_rcu(entry, &vif->hash.cache.list, link) { + /* Make sure we don't add duplicate entries */ + if (entry->len == len && + memcmp(entry->tag, tag, len) == 0) + found = true; + if (!oldest || entry->seq < oldest->seq) + oldest = entry; + } + + if (!found) { + new->seq = atomic_inc_return(&vif->hash.cache.seq); + list_add_rcu(&new->link, &vif->hash.cache.list); + + if (++vif->hash.cache.count > xenvif_hash_cache_size) { + list_del_rcu(&oldest->link); + vif->hash.cache.count--; + call_rcu(&oldest->rcu, xenvif_del_hash); + } + } + + spin_unlock_irqrestore(&vif->hash.cache.lock, flags); + + if (found) + kfree(new); +} + +static u32 xenvif_new_hash(struct xenvif *vif, const u8 *data, + unsigned int len) +{ + u32 val; + + val = xen_netif_toeplitz_hash(vif->hash.key, + sizeof(vif->hash.key), + data, len); + + if (xenvif_hash_cache_size != 0) + xenvif_add_hash(vif, data, len, val); + + return val; +} + +static void xenvif_flush_hash(struct xenvif *vif) +{ + struct xenvif_hash_cache_entry *entry; + unsigned long flags; + + if (xenvif_hash_cache_size == 0) + return; + + spin_lock_irqsave(&vif->hash.cache.lock, flags); + + list_for_each_entry_rcu(entry, &vif->hash.cache.list, link) { + list_del_rcu(&entry->link); + vif->hash.cache.count--; + call_rcu(&entry->rcu, xenvif_del_hash); + } + + spin_unlock_irqrestore(&vif->hash.cache.lock, flags); +} + +static u32 xenvif_find_hash(struct xenvif *vif, const u8 *data, + unsigned int len) +{ + struct xenvif_hash_cache_entry *entry; + u32 val; + bool found; + + if (len >= XEN_NETBK_HASH_TAG_SIZE) + return 0; + + if (xenvif_hash_cache_size == 0) + return xenvif_new_hash(vif, data, len); + + rcu_read_lock(); + + found = false; + + list_for_each_entry_rcu(entry, &vif->hash.cache.list, link) { + if (entry->len == len && + memcmp(entry->tag, data, len) == 0) { + val = entry->val; + entry->seq = atomic_inc_return(&vif->hash.cache.seq); + found = true; + break; + } + } + + rcu_read_unlock(); + + if (!found) + val = xenvif_new_hash(vif, data, len); + + return val; +} + +void xenvif_set_skb_hash(struct xenvif *vif, struct sk_buff *skb) +{ + struct flow_keys flow; + u32 hash = 0; + enum pkt_hash_types type = PKT_HASH_TYPE_NONE; + u32 flags = vif->hash.flags; + bool has_tcp_hdr; + + /* Quick rejection test: If the network protocol doesn't + * correspond to any enabled hash type then there's no point + * in parsing the packet header. + */ + switch (skb->protocol) { + case htons(ETH_P_IP): + if (flags & (XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP | + XEN_NETIF_CTRL_HASH_TYPE_IPV4)) + break; + + goto done; + + case htons(ETH_P_IPV6): + if (flags & (XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP | + XEN_NETIF_CTRL_HASH_TYPE_IPV6)) + break; + + goto done; + + default: + goto done; + } + + memset(&flow, 0, sizeof(flow)); + if (!skb_flow_dissect_flow_keys(skb, &flow, 0)) + goto done; + + has_tcp_hdr = (flow.basic.ip_proto == IPPROTO_TCP) && + !(flow.control.flags & FLOW_DIS_IS_FRAGMENT); + + switch (skb->protocol) { + case htons(ETH_P_IP): + if (has_tcp_hdr && + (flags & XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP)) { + u8 data[12]; + + memcpy(&data[0], &flow.addrs.v4addrs.src, 4); + memcpy(&data[4], &flow.addrs.v4addrs.dst, 4); + memcpy(&data[8], &flow.ports.src, 2); + memcpy(&data[10], &flow.ports.dst, 2); + + hash = xenvif_find_hash(vif, data, sizeof(data)); + type = PKT_HASH_TYPE_L4; + } else if (flags & XEN_NETIF_CTRL_HASH_TYPE_IPV4) { + u8 data[8]; + + memcpy(&data[0], &flow.addrs.v4addrs.src, 4); + memcpy(&data[4], &flow.addrs.v4addrs.dst, 4); + + hash = xenvif_find_hash(vif, data, sizeof(data)); + type = PKT_HASH_TYPE_L3; + } + + break; + + case htons(ETH_P_IPV6): + if (has_tcp_hdr && + (flags & XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP)) { + u8 data[36]; + + memcpy(&data[0], &flow.addrs.v6addrs.src, 16); + memcpy(&data[16], &flow.addrs.v6addrs.dst, 16); + memcpy(&data[32], &flow.ports.src, 2); + memcpy(&data[34], &flow.ports.dst, 2); + + hash = xenvif_find_hash(vif, data, sizeof(data)); + type = PKT_HASH_TYPE_L4; + } else if (flags & XEN_NETIF_CTRL_HASH_TYPE_IPV6) { + u8 data[32]; + + memcpy(&data[0], &flow.addrs.v6addrs.src, 16); + memcpy(&data[16], &flow.addrs.v6addrs.dst, 16); + + hash = xenvif_find_hash(vif, data, sizeof(data)); + type = PKT_HASH_TYPE_L3; + } + + break; + } + +done: + if (type == PKT_HASH_TYPE_NONE) + skb_clear_hash(skb); + else + __skb_set_sw_hash(skb, hash, type == PKT_HASH_TYPE_L4); +} + +u32 xenvif_set_hash_alg(struct xenvif *vif, u32 alg) +{ + switch (alg) { + case XEN_NETIF_CTRL_HASH_ALGORITHM_NONE: + case XEN_NETIF_CTRL_HASH_ALGORITHM_TOEPLITZ: + break; + + default: + return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER; + } + + vif->hash.alg = alg; + + return XEN_NETIF_CTRL_STATUS_SUCCESS; +} + +u32 xenvif_get_hash_flags(struct xenvif *vif, u32 *flags) +{ + if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE) + return XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED; + + *flags = XEN_NETIF_CTRL_HASH_TYPE_IPV4 | + XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP | + XEN_NETIF_CTRL_HASH_TYPE_IPV6 | + XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP; + + return XEN_NETIF_CTRL_STATUS_SUCCESS; +} + +u32 xenvif_set_hash_flags(struct xenvif *vif, u32 flags) +{ + if (flags & ~(XEN_NETIF_CTRL_HASH_TYPE_IPV4 | + XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP | + XEN_NETIF_CTRL_HASH_TYPE_IPV6 | + XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP)) + return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER; + + if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE) + return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER; + + vif->hash.flags = flags; + + return XEN_NETIF_CTRL_STATUS_SUCCESS; +} + +u32 xenvif_set_hash_key(struct xenvif *vif, u32 gref, u32 len) +{ + u8 *key = vif->hash.key; + struct gnttab_copy copy_op = { + .source.u.ref = gref, + .source.domid = vif->domid, + .dest.u.gmfn = virt_to_gfn(key), + .dest.domid = DOMID_SELF, + .dest.offset = xen_offset_in_page(key), + .len = len, + .flags = GNTCOPY_source_gref + }; + + if (len > XEN_NETBK_MAX_HASH_KEY_SIZE) + return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER; + + if (len != 0) { + gnttab_batch_copy(©_op, 1); + + if (copy_op.status != GNTST_okay) + return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER; + } + + /* Clear any remaining key octets */ + if (len < XEN_NETBK_MAX_HASH_KEY_SIZE) + memset(key + len, 0, XEN_NETBK_MAX_HASH_KEY_SIZE - len); + + xenvif_flush_hash(vif); + + return XEN_NETIF_CTRL_STATUS_SUCCESS; +} + +u32 xenvif_set_hash_mapping_size(struct xenvif *vif, u32 size) +{ + if (size > XEN_NETBK_MAX_HASH_MAPPING_SIZE) + return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER; + + vif->hash.size = size; + memset(vif->hash.mapping, 0, sizeof(u32) * size); + + return XEN_NETIF_CTRL_STATUS_SUCCESS; +} + +u32 xenvif_set_hash_mapping(struct xenvif *vif, u32 gref, u32 len, + u32 off) +{ + u32 *mapping = &vif->hash.mapping[off]; + struct gnttab_copy copy_op = { + .source.u.ref = gref, + .source.domid = vif->domid, + .dest.u.gmfn = virt_to_gfn(mapping), + .dest.domid = DOMID_SELF, + .dest.offset = xen_offset_in_page(mapping), + .len = len * sizeof(u32), + .flags = GNTCOPY_source_gref + }; + + if ((off + len > vif->hash.size) || copy_op.len > XEN_PAGE_SIZE) + return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER; + + while (len-- != 0) + if (mapping[off++] >= vif->num_queues) + return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER; + + if (len != 0) { + gnttab_batch_copy(©_op, 1); + + if (copy_op.status != GNTST_okay) + return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER; + } + + return XEN_NETIF_CTRL_STATUS_SUCCESS; +} + +void xenvif_init_hash(struct xenvif *vif) +{ + if (xenvif_hash_cache_size == 0) + return; + + spin_lock_init(&vif->hash.cache.lock); + INIT_LIST_HEAD(&vif->hash.cache.list); +} + +void xenvif_deinit_hash(struct xenvif *vif) +{ + xenvif_flush_hash(vif); +} diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index 78a10d2af101..5a39cdbc217c 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c @@ -151,6 +151,24 @@ void xenvif_wake_queue(struct xenvif_queue *queue) netif_tx_wake_queue(netdev_get_tx_queue(dev, id)); } +static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb, + void *accel_priv, + select_queue_fallback_t fallback) +{ + struct xenvif *vif = netdev_priv(dev); + unsigned int size = vif->hash.size; + + if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE) + return fallback(dev, skb) % dev->real_num_tx_queues; + + xenvif_set_skb_hash(vif, skb); + + if (size == 0) + return skb_get_hash_raw(skb) % dev->real_num_tx_queues; + + return vif->hash.mapping[skb_get_hash_raw(skb) % size]; +} + static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct xenvif *vif = netdev_priv(dev); @@ -395,6 +413,7 @@ static const struct ethtool_ops xenvif_ethtool_ops = { }; static const struct net_device_ops xenvif_netdev_ops = { + .ndo_select_queue = xenvif_select_queue, .ndo_start_xmit = xenvif_start_xmit, .ndo_get_stats = xenvif_get_stats, .ndo_open = xenvif_open, @@ -563,6 +582,8 @@ int xenvif_connect_ctrl(struct xenvif *vif, grant_ref_t ring_ref, vif->ctrl_irq = err; + xenvif_init_hash(vif); + task = kthread_create(xenvif_ctrl_kthread, (void *)vif, "%s-control", dev->name); if (IS_ERR(task)) { @@ -579,6 +600,7 @@ int xenvif_connect_ctrl(struct xenvif *vif, grant_ref_t ring_ref, return 0; err_deinit: + xenvif_deinit_hash(vif); unbind_from_irqhandler(vif->ctrl_irq, vif); vif->ctrl_irq = 0; @@ -749,6 +771,8 @@ void xenvif_disconnect_ctrl(struct xenvif *vif) vif->ctrl_task = NULL; } + xenvif_deinit_hash(vif); + if (vif->ctrl_irq) { unbind_from_irqhandler(vif->ctrl_irq, vif); vif->ctrl_irq = 0; diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index ff22b6daa077..1916ab332d60 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -89,6 +89,11 @@ module_param(fatal_skb_slots, uint, 0444); */ #define XEN_NETBACK_TX_COPY_LEN 128 +/* This is the maximum number of flows in the hash cache. */ +#define XENVIF_HASH_CACHE_SIZE_DEFAULT 64 +unsigned int xenvif_hash_cache_size = XENVIF_HASH_CACHE_SIZE_DEFAULT; +module_param_named(hash_cache_size, xenvif_hash_cache_size, uint, 0644); +MODULE_PARM_DESC(hash_cache_size, "Number of flows in the hash cache"); static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx, u8 status); @@ -2192,8 +2197,48 @@ static void push_ctrl_response(struct xenvif *vif) static void process_ctrl_request(struct xenvif *vif, const struct xen_netif_ctrl_request *req) { - make_ctrl_response(vif, req, XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED, - 0); + u32 status = XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED; + u32 data = 0; + + switch (req->type) { + case XEN_NETIF_CTRL_TYPE_SET_HASH_ALGORITHM: + status = xenvif_set_hash_alg(vif, req->data[0]); + break; + + case XEN_NETIF_CTRL_TYPE_GET_HASH_FLAGS: + status = xenvif_get_hash_flags(vif, &data); + break; + + case XEN_NETIF_CTRL_TYPE_SET_HASH_FLAGS: + status = xenvif_set_hash_flags(vif, req->data[0]); + break; + + case XEN_NETIF_CTRL_TYPE_SET_HASH_KEY: + status = xenvif_set_hash_key(vif, req->data[0], + req->data[1]); + break; + + case XEN_NETIF_CTRL_TYPE_GET_HASH_MAPPING_SIZE: + status = XEN_NETIF_CTRL_STATUS_SUCCESS; + data = XEN_NETBK_MAX_HASH_MAPPING_SIZE; + break; + + case XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING_SIZE: + status = xenvif_set_hash_mapping_size(vif, + req->data[0]); + break; + + case XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING: + status = xenvif_set_hash_mapping(vif, req->data[0], + req->data[1], + req->data[2]); + break; + + default: + break; + } + + make_ctrl_response(vif, req, status, data); push_ctrl_response(vif); } From f07f989338587bc2b202f6e3c8e8468c450bd6a2 Mon Sep 17 00:00:00 2001 From: Paul Durrant Date: Fri, 13 May 2016 09:37:28 +0100 Subject: [PATCH 1578/1649] xen-netback: pass hash value to the frontend My recent patch to include/xen/interface/io/netif.h defines a new extra info type that can be used to pass hash values between backend and guest frontend. This patch adds code to xen-netback to pass hash values calculated for guest receive-side packets (i.e. netback transmit side) to the frontend. Signed-off-by: Paul Durrant Acked-by: Wei Liu Signed-off-by: David S. Miller --- drivers/net/xen-netback/interface.c | 13 ++++- drivers/net/xen-netback/netback.c | 78 ++++++++++++++++++++++++----- 2 files changed, 77 insertions(+), 14 deletions(-) diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index 5a39cdbc217c..1c7f49b5acc1 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c @@ -158,8 +158,17 @@ static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb, struct xenvif *vif = netdev_priv(dev); unsigned int size = vif->hash.size; - if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE) - return fallback(dev, skb) % dev->real_num_tx_queues; + if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE) { + u16 index = fallback(dev, skb) % dev->real_num_tx_queues; + + /* Make sure there is no hash information in the socket + * buffer otherwise it would be incorrectly forwarded + * to the frontend. + */ + skb_clear_hash(skb); + + return index; + } xenvif_set_skb_hash(vif, skb); diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 1916ab332d60..9d6bd862f3c1 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -168,6 +168,8 @@ static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue) needed = DIV_ROUND_UP(skb->len, XEN_PAGE_SIZE); if (skb_is_gso(skb)) needed++; + if (skb->sw_hash) + needed++; do { prod = queue->rx.sring->req_prod; @@ -285,6 +287,8 @@ struct gop_frag_copy { struct xenvif_rx_meta *meta; int head; int gso_type; + int protocol; + int hash_present; struct page *page; }; @@ -331,8 +335,15 @@ static void xenvif_setup_copy_gop(unsigned long gfn, npo->copy_off += *len; info->meta->size += *len; + if (!info->head) + return; + /* Leave a gap for the GSO descriptor. */ - if (info->head && ((1 << info->gso_type) & queue->vif->gso_mask)) + if ((1 << info->gso_type) & queue->vif->gso_mask) + queue->rx.req_cons++; + + /* Leave a gap for the hash extra segment. */ + if (info->hash_present) queue->rx.req_cons++; info->head = 0; /* There must be something in this buffer now */ @@ -367,6 +378,11 @@ static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb .npo = npo, .head = *head, .gso_type = XEN_NETIF_GSO_TYPE_NONE, + /* xenvif_set_skb_hash() will have either set a s/w + * hash or cleared the hash depending on + * whether the the frontend wants a hash for this skb. + */ + .hash_present = skb->sw_hash, }; unsigned long bytes; @@ -555,6 +571,7 @@ void xenvif_kick_thread(struct xenvif_queue *queue) static void xenvif_rx_action(struct xenvif_queue *queue) { + struct xenvif *vif = queue->vif; s8 status; u16 flags; struct xen_netif_rx_response *resp; @@ -590,9 +607,10 @@ static void xenvif_rx_action(struct xenvif_queue *queue) gnttab_batch_copy(queue->grant_copy_op, npo.copy_prod); while ((skb = __skb_dequeue(&rxq)) != NULL) { + struct xen_netif_extra_info *extra = NULL; if ((1 << queue->meta[npo.meta_cons].gso_type) & - queue->vif->gso_prefix_mask) { + vif->gso_prefix_mask) { resp = RING_GET_RESPONSE(&queue->rx, queue->rx.rsp_prod_pvt++); @@ -610,7 +628,7 @@ static void xenvif_rx_action(struct xenvif_queue *queue) queue->stats.tx_bytes += skb->len; queue->stats.tx_packets++; - status = xenvif_check_gop(queue->vif, + status = xenvif_check_gop(vif, XENVIF_RX_CB(skb)->meta_slots_used, &npo); @@ -632,21 +650,57 @@ static void xenvif_rx_action(struct xenvif_queue *queue) flags); if ((1 << queue->meta[npo.meta_cons].gso_type) & - queue->vif->gso_mask) { - struct xen_netif_extra_info *gso = - (struct xen_netif_extra_info *) + vif->gso_mask) { + extra = (struct xen_netif_extra_info *) RING_GET_RESPONSE(&queue->rx, queue->rx.rsp_prod_pvt++); resp->flags |= XEN_NETRXF_extra_info; - gso->u.gso.type = queue->meta[npo.meta_cons].gso_type; - gso->u.gso.size = queue->meta[npo.meta_cons].gso_size; - gso->u.gso.pad = 0; - gso->u.gso.features = 0; + extra->u.gso.type = queue->meta[npo.meta_cons].gso_type; + extra->u.gso.size = queue->meta[npo.meta_cons].gso_size; + extra->u.gso.pad = 0; + extra->u.gso.features = 0; - gso->type = XEN_NETIF_EXTRA_TYPE_GSO; - gso->flags = 0; + extra->type = XEN_NETIF_EXTRA_TYPE_GSO; + extra->flags = 0; + } + + if (skb->sw_hash) { + /* Since the skb got here via xenvif_select_queue() + * we know that the hash has been re-calculated + * according to a configuration set by the frontend + * and therefore we know that it is legitimate to + * pass it to the frontend. + */ + if (resp->flags & XEN_NETRXF_extra_info) + extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE; + else + resp->flags |= XEN_NETRXF_extra_info; + + extra = (struct xen_netif_extra_info *) + RING_GET_RESPONSE(&queue->rx, + queue->rx.rsp_prod_pvt++); + + extra->u.hash.algorithm = + XEN_NETIF_CTRL_HASH_ALGORITHM_TOEPLITZ; + + if (skb->l4_hash) + extra->u.hash.type = + skb->protocol == htons(ETH_P_IP) ? + _XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP : + _XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP; + else + extra->u.hash.type = + skb->protocol == htons(ETH_P_IP) ? + _XEN_NETIF_CTRL_HASH_TYPE_IPV4 : + _XEN_NETIF_CTRL_HASH_TYPE_IPV6; + + *(uint32_t *)extra->u.hash.value = + skb_get_hash_raw(skb); + + extra->type = XEN_NETIF_EXTRA_TYPE_HASH; + extra->flags = 0; } xenvif_add_frag_responses(queue, status, From c2d09fde7299f68f29e84fe5a415d5c1a7abae75 Mon Sep 17 00:00:00 2001 From: Paul Durrant Date: Fri, 13 May 2016 09:37:29 +0100 Subject: [PATCH 1579/1649] xen-netback: use hash value from the frontend My recent patch to include/xen/interface/io/netif.h defines a new extra info type that can be used to pass hash values between backend and guest frontend. This patch adds code to xen-netback to use the value in a hash extra info fragment passed from the guest frontend in a transmit-side (i.e. netback receive side) packet to set the skb hash accordingly. Signed-off-by: Paul Durrant Acked-by: Wei Liu Signed-off-by: David S. Miller --- drivers/net/xen-netback/netback.c | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 9d6bd862f3c1..edbae0b1e8f0 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -1510,6 +1510,33 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, } } + if (extras[XEN_NETIF_EXTRA_TYPE_HASH - 1].type) { + struct xen_netif_extra_info *extra; + enum pkt_hash_types type = PKT_HASH_TYPE_NONE; + + extra = &extras[XEN_NETIF_EXTRA_TYPE_HASH - 1]; + + switch (extra->u.hash.type) { + case _XEN_NETIF_CTRL_HASH_TYPE_IPV4: + case _XEN_NETIF_CTRL_HASH_TYPE_IPV6: + type = PKT_HASH_TYPE_L3; + break; + + case _XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP: + case _XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP: + type = PKT_HASH_TYPE_L4; + break; + + default: + break; + } + + if (type != PKT_HASH_TYPE_NONE) + skb_set_hash(skb, + *(u32 *)extra->u.hash.value, + type); + } + XENVIF_TX_CB(skb)->pending_idx = pending_idx; __skb_put(skb, data_len); From 8be0cfa4d352167df508acd571eb19afd8a2ce93 Mon Sep 17 00:00:00 2001 From: Jiri Benc Date: Fri, 13 May 2016 10:48:42 +0200 Subject: [PATCH 1580/1649] vxlan: set mac_header correctly in GPE mode For VXLAN-GPE, the interface is ARPHRD_NONE, thus we need to reset mac_header after pulling the outer header. v2: Put the code to the existing conditional block as suggested by Shmulik Ladkani. Fixes: e1e5314de08b ("vxlan: implement GPE") Signed-off-by: Jiri Benc Reviewed-by: Shmulik Ladkani Signed-off-by: David S. Miller --- drivers/net/vxlan.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 2f29d20aa08f..25ab6bf013c4 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -1381,6 +1381,7 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb) if (!vxlan_set_mac(vxlan, vs, skb)) goto drop; } else { + skb_reset_mac_header(skb); skb->dev = vxlan->dev; skb->pkt_type = PACKET_HOST; } From 3ed687823c298b2349ab1f3d38ac88d967b8eaea Mon Sep 17 00:00:00 2001 From: Jisheng Zhang Date: Fri, 13 May 2016 19:57:29 +0800 Subject: [PATCH 1581/1649] net: pxa168_eth: use {readl|writel}_relaxed instead of readl/writel Since appropriate memory barriers are already there, use the relaxed version to improve performance a bit. Signed-off-by: Jisheng Zhang Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/pxa168_eth.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c index 89d0d835352e..ab9d0e838a40 100644 --- a/drivers/net/ethernet/marvell/pxa168_eth.c +++ b/drivers/net/ethernet/marvell/pxa168_eth.c @@ -286,12 +286,12 @@ static int pxa168_eth_stop(struct net_device *dev); static inline u32 rdl(struct pxa168_eth_private *pep, int offset) { - return readl(pep->base + offset); + return readl_relaxed(pep->base + offset); } static inline void wrl(struct pxa168_eth_private *pep, int offset, u32 data) { - writel(data, pep->base + offset); + writel_relaxed(data, pep->base + offset); } static void abort_dma(struct pxa168_eth_private *pep) From b17d15592df057437ad356b431bb876889940620 Mon Sep 17 00:00:00 2001 From: Jisheng Zhang Date: Fri, 13 May 2016 19:57:30 +0800 Subject: [PATCH 1582/1649] net: pxa168_eth: Use dma_wmb/rmb where appropriate Update the pxa168_eth driver to use the dma_rmb/wmb calls instead of the full barriers in order to improve performance: reduced 97ns/39ns on average in tx/rx path on Marvell BG4CT platform. Signed-off-by: Jisheng Zhang Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/pxa168_eth.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c index ab9d0e838a40..54d5154ac0f8 100644 --- a/drivers/net/ethernet/marvell/pxa168_eth.c +++ b/drivers/net/ethernet/marvell/pxa168_eth.c @@ -342,9 +342,9 @@ static void rxq_refill(struct net_device *dev) pep->rx_skb[used_rx_desc] = skb; /* Return the descriptor to DMA ownership */ - wmb(); + dma_wmb(); p_used_rx_desc->cmd_sts = BUF_OWNED_BY_DMA | RX_EN_INT; - wmb(); + dma_wmb(); /* Move the used descriptor pointer to the next descriptor */ pep->rx_used_desc_q = (used_rx_desc + 1) % pep->rx_ring_size; @@ -794,7 +794,7 @@ static int rxq_process(struct net_device *dev, int budget) rx_used_desc = pep->rx_used_desc_q; rx_desc = &pep->p_rx_desc_area[rx_curr_desc]; cmd_sts = rx_desc->cmd_sts; - rmb(); + dma_rmb(); if (cmd_sts & (BUF_OWNED_BY_DMA)) break; skb = pep->rx_skb[rx_curr_desc]; @@ -1287,7 +1287,7 @@ static int pxa168_eth_start_xmit(struct sk_buff *skb, struct net_device *dev) skb_tx_timestamp(skb); - wmb(); + dma_wmb(); desc->cmd_sts = BUF_OWNED_BY_DMA | TX_GEN_CRC | TX_FIRST_DESC | TX_ZERO_PADDING | TX_LAST_DESC | TX_EN_INT; wmb(); From 3804070235264ea883c3fdccd9ed16fef20b5ccb Mon Sep 17 00:00:00 2001 From: Amir Vadai Date: Fri, 13 May 2016 12:55:35 +0000 Subject: [PATCH 1583/1649] net/sched: Enable netdev drivers to update statistics of offloaded actions Introduce stats_update callback. netdev driver could call it for offloaded actions to update the basic statistics (packets, bytes and last use). Since bstats_update() and bstats_cpu_update() use skb as an argument to get the counters, _bstats_update() and _bstats_cpu_update(), that get bytes and packets as arguments, were added. Signed-off-by: Amir Vadai Signed-off-by: David S. Miller --- include/net/act_api.h | 12 ++++++++++++ include/net/sch_generic.h | 20 ++++++++++++++++++-- 2 files changed, 30 insertions(+), 2 deletions(-) diff --git a/include/net/act_api.h b/include/net/act_api.h index 03e322b30218..2cd9e9bb059a 100644 --- a/include/net/act_api.h +++ b/include/net/act_api.h @@ -106,6 +106,7 @@ struct tc_action_ops { int bind); int (*walk)(struct net *, struct sk_buff *, struct netlink_callback *, int, struct tc_action *); + void (*stats_update)(struct tc_action *, u64, u32, u64); }; struct tc_action_net { @@ -178,10 +179,21 @@ int tcf_action_copy_stats(struct sk_buff *, struct tc_action *, int); #define tc_for_each_action(_a, _exts) \ list_for_each_entry(a, &(_exts)->actions, list) + +static inline void tcf_action_stats_update(struct tc_action *a, u64 bytes, + u64 packets, u64 lastuse) +{ + if (!a->ops->stats_update) + return; + + a->ops->stats_update(a, bytes, packets, lastuse); +} + #else /* CONFIG_NET_CLS_ACT */ #define tc_no_actions(_exts) true #define tc_for_each_action(_a, _exts) while (0) +#define tcf_action_stats_update(a, bytes, packets, lastuse) #endif /* CONFIG_NET_CLS_ACT */ #endif diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 46e55f0202a6..a1fd76c22a59 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -527,11 +527,27 @@ static inline bool qdisc_is_percpu_stats(const struct Qdisc *q) return q->flags & TCQ_F_CPUSTATS; } +static inline void _bstats_update(struct gnet_stats_basic_packed *bstats, + __u64 bytes, __u32 packets) +{ + bstats->bytes += bytes; + bstats->packets += packets; +} + static inline void bstats_update(struct gnet_stats_basic_packed *bstats, const struct sk_buff *skb) { - bstats->bytes += qdisc_pkt_len(skb); - bstats->packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1; + _bstats_update(bstats, + qdisc_pkt_len(skb), + skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1); +} + +static inline void _bstats_cpu_update(struct gnet_stats_basic_cpu *bstats, + __u64 bytes, __u32 packets) +{ + u64_stats_update_begin(&bstats->syncp); + _bstats_update(&bstats->bstats, bytes, packets); + u64_stats_update_end(&bstats->syncp); } static inline void bstats_cpu_update(struct gnet_stats_basic_cpu *bstats, From 9fea47d93bcc98946a6eca0f019ced337564a344 Mon Sep 17 00:00:00 2001 From: Amir Vadai Date: Fri, 13 May 2016 12:55:36 +0000 Subject: [PATCH 1584/1649] net/sched: act_gact: Update statistics when offloaded to hardware Implement the stats_update callback that will be called by NIC drivers for hardware offloaded filters. Signed-off-by: Amir Vadai Signed-off-by: David S. Miller --- net/sched/act_gact.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c index 1a6e09fbb2a5..ec5cc8435238 100644 --- a/net/sched/act_gact.c +++ b/net/sched/act_gact.c @@ -148,6 +148,20 @@ static int tcf_gact(struct sk_buff *skb, const struct tc_action *a, return action; } +static void tcf_gact_stats_update(struct tc_action *a, u64 bytes, u32 packets, + u64 lastuse) +{ + struct tcf_gact *gact = a->priv; + int action = READ_ONCE(gact->tcf_action); + struct tcf_t *tm = &gact->tcf_tm; + + _bstats_cpu_update(this_cpu_ptr(gact->common.cpu_bstats), bytes, packets); + if (action == TC_ACT_SHOT) + this_cpu_ptr(gact->common.cpu_qstats)->drops += packets; + + tm->lastuse = lastuse; +} + static int tcf_gact_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref) { unsigned char *b = skb_tail_pointer(skb); @@ -207,6 +221,7 @@ static struct tc_action_ops act_gact_ops = { .type = TCA_ACT_GACT, .owner = THIS_MODULE, .act = tcf_gact, + .stats_update = tcf_gact_stats_update, .dump = tcf_gact_dump, .init = tcf_gact_init, .walk = tcf_gact_walker, From 10cbc6843446165ee250e1ee80dc19ee325f1e6d Mon Sep 17 00:00:00 2001 From: Amir Vadai Date: Fri, 13 May 2016 12:55:37 +0000 Subject: [PATCH 1585/1649] net/sched: cls_flower: Hardware offloaded filters statistics support Introduce a new command in ndo_setup_tc() for hardware offloaded filters, to call the NIC driver, and make it update the statistics. This will be done before dumping the filter and its statistics. Signed-off-by: Amir Vadai Signed-off-by: David S. Miller --- include/net/pkt_cls.h | 1 + net/sched/cls_flower.c | 21 +++++++++++++++++++++ 2 files changed, 22 insertions(+) diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index 8b4893878cf4..0f7efa88f210 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -426,6 +426,7 @@ static inline bool tc_flags_valid(u32 flags) enum tc_fl_command { TC_CLSFLOWER_REPLACE, TC_CLSFLOWER_DESTROY, + TC_CLSFLOWER_STATS, }; struct tc_cls_flower_offload { diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index 2181ffc76638..730aacafc22d 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c @@ -210,6 +210,25 @@ static void fl_hw_replace_filter(struct tcf_proto *tp, dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol, &tc); } +static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f) +{ + struct net_device *dev = tp->q->dev_queue->dev; + struct tc_cls_flower_offload offload = {0}; + struct tc_to_netdev tc; + + if (!tc_should_offload(dev, 0)) + return; + + offload.command = TC_CLSFLOWER_STATS; + offload.cookie = (unsigned long)f; + offload.exts = &f->exts; + + tc.type = TC_SETUP_CLSFLOWER; + tc.cls_flower = &offload; + + dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol, &tc); +} + static bool fl_destroy(struct tcf_proto *tp, bool force) { struct cls_fl_head *head = rtnl_dereference(tp->root); @@ -662,6 +681,8 @@ static int fl_dump(struct net *net, struct tcf_proto *tp, unsigned long fh, goto nla_put_failure; } + fl_hw_update_stats(tp, f); + if (fl_dump_key_val(skb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST, mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK, sizeof(key->eth.dst)) || From 42ca502e179d0654ef441333a9d0f35c948734f3 Mon Sep 17 00:00:00 2001 From: Amir Vadai Date: Fri, 13 May 2016 12:55:38 +0000 Subject: [PATCH 1586/1649] net/mlx5_core: Use a macro in mlx5_command_str() Use a macro instead of copying the OP name. Signed-off-by: Amir Vadai Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/cmd.c | 303 ++++++++---------- 1 file changed, 132 insertions(+), 171 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index eb926e1ee71c..63cac841cfa9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -406,178 +406,139 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, const char *mlx5_command_str(int command) { +#define MLX5_COMMAND_STR_CASE(__cmd) case MLX5_CMD_OP_ ## __cmd: return #__cmd + switch (command) { - case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT: - return "QUERY_HCA_VPORT_CONTEXT"; - - case MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT: - return "MODIFY_HCA_VPORT_CONTEXT"; - - case MLX5_CMD_OP_QUERY_HCA_CAP: - return "QUERY_HCA_CAP"; - - case MLX5_CMD_OP_SET_HCA_CAP: - return "SET_HCA_CAP"; - - case MLX5_CMD_OP_QUERY_ADAPTER: - return "QUERY_ADAPTER"; - - case MLX5_CMD_OP_INIT_HCA: - return "INIT_HCA"; - - case MLX5_CMD_OP_TEARDOWN_HCA: - return "TEARDOWN_HCA"; - - case MLX5_CMD_OP_ENABLE_HCA: - return "MLX5_CMD_OP_ENABLE_HCA"; - - case MLX5_CMD_OP_DISABLE_HCA: - return "MLX5_CMD_OP_DISABLE_HCA"; - - case MLX5_CMD_OP_QUERY_PAGES: - return "QUERY_PAGES"; - - case MLX5_CMD_OP_MANAGE_PAGES: - return "MANAGE_PAGES"; - - case MLX5_CMD_OP_CREATE_MKEY: - return "CREATE_MKEY"; - - case MLX5_CMD_OP_QUERY_MKEY: - return "QUERY_MKEY"; - - case MLX5_CMD_OP_DESTROY_MKEY: - return "DESTROY_MKEY"; - - case MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS: - return "QUERY_SPECIAL_CONTEXTS"; - - case MLX5_CMD_OP_CREATE_EQ: - return "CREATE_EQ"; - - case MLX5_CMD_OP_DESTROY_EQ: - return "DESTROY_EQ"; - - case MLX5_CMD_OP_QUERY_EQ: - return "QUERY_EQ"; - - case MLX5_CMD_OP_CREATE_CQ: - return "CREATE_CQ"; - - case MLX5_CMD_OP_DESTROY_CQ: - return "DESTROY_CQ"; - - case MLX5_CMD_OP_QUERY_CQ: - return "QUERY_CQ"; - - case MLX5_CMD_OP_MODIFY_CQ: - return "MODIFY_CQ"; - - case MLX5_CMD_OP_CREATE_QP: - return "CREATE_QP"; - - case MLX5_CMD_OP_DESTROY_QP: - return "DESTROY_QP"; - - case MLX5_CMD_OP_RST2INIT_QP: - return "RST2INIT_QP"; - - case MLX5_CMD_OP_INIT2RTR_QP: - return "INIT2RTR_QP"; - - case MLX5_CMD_OP_RTR2RTS_QP: - return "RTR2RTS_QP"; - - case MLX5_CMD_OP_RTS2RTS_QP: - return "RTS2RTS_QP"; - - case MLX5_CMD_OP_SQERR2RTS_QP: - return "SQERR2RTS_QP"; - - case MLX5_CMD_OP_2ERR_QP: - return "2ERR_QP"; - - case MLX5_CMD_OP_2RST_QP: - return "2RST_QP"; - - case MLX5_CMD_OP_QUERY_QP: - return "QUERY_QP"; - - case MLX5_CMD_OP_MAD_IFC: - return "MAD_IFC"; - - case MLX5_CMD_OP_INIT2INIT_QP: - return "INIT2INIT_QP"; - - case MLX5_CMD_OP_CREATE_PSV: - return "CREATE_PSV"; - - case MLX5_CMD_OP_DESTROY_PSV: - return "DESTROY_PSV"; - - case MLX5_CMD_OP_CREATE_SRQ: - return "CREATE_SRQ"; - - case MLX5_CMD_OP_DESTROY_SRQ: - return "DESTROY_SRQ"; - - case MLX5_CMD_OP_QUERY_SRQ: - return "QUERY_SRQ"; - - case MLX5_CMD_OP_ARM_RQ: - return "ARM_RQ"; - - case MLX5_CMD_OP_CREATE_XRC_SRQ: - return "CREATE_XRC_SRQ"; - - case MLX5_CMD_OP_DESTROY_XRC_SRQ: - return "DESTROY_XRC_SRQ"; - - case MLX5_CMD_OP_QUERY_XRC_SRQ: - return "QUERY_XRC_SRQ"; - - case MLX5_CMD_OP_ARM_XRC_SRQ: - return "ARM_XRC_SRQ"; - - case MLX5_CMD_OP_ALLOC_PD: - return "ALLOC_PD"; - - case MLX5_CMD_OP_DEALLOC_PD: - return "DEALLOC_PD"; - - case MLX5_CMD_OP_ALLOC_UAR: - return "ALLOC_UAR"; - - case MLX5_CMD_OP_DEALLOC_UAR: - return "DEALLOC_UAR"; - - case MLX5_CMD_OP_ATTACH_TO_MCG: - return "ATTACH_TO_MCG"; - - case MLX5_CMD_OP_DETTACH_FROM_MCG: - return "DETTACH_FROM_MCG"; - - case MLX5_CMD_OP_ALLOC_XRCD: - return "ALLOC_XRCD"; - - case MLX5_CMD_OP_DEALLOC_XRCD: - return "DEALLOC_XRCD"; - - case MLX5_CMD_OP_ACCESS_REG: - return "MLX5_CMD_OP_ACCESS_REG"; - - case MLX5_CMD_OP_SET_WOL_ROL: - return "SET_WOL_ROL"; - - case MLX5_CMD_OP_QUERY_WOL_ROL: - return "QUERY_WOL_ROL"; - - case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT: - return "ADD_VXLAN_UDP_DPORT"; - - case MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT: - return "DELETE_VXLAN_UDP_DPORT"; - + MLX5_COMMAND_STR_CASE(QUERY_HCA_CAP); + MLX5_COMMAND_STR_CASE(QUERY_ADAPTER); + MLX5_COMMAND_STR_CASE(INIT_HCA); + MLX5_COMMAND_STR_CASE(TEARDOWN_HCA); + MLX5_COMMAND_STR_CASE(ENABLE_HCA); + MLX5_COMMAND_STR_CASE(DISABLE_HCA); + MLX5_COMMAND_STR_CASE(QUERY_PAGES); + MLX5_COMMAND_STR_CASE(MANAGE_PAGES); + MLX5_COMMAND_STR_CASE(SET_HCA_CAP); + MLX5_COMMAND_STR_CASE(QUERY_ISSI); + MLX5_COMMAND_STR_CASE(SET_ISSI); + MLX5_COMMAND_STR_CASE(CREATE_MKEY); + MLX5_COMMAND_STR_CASE(QUERY_MKEY); + MLX5_COMMAND_STR_CASE(DESTROY_MKEY); + MLX5_COMMAND_STR_CASE(QUERY_SPECIAL_CONTEXTS); + MLX5_COMMAND_STR_CASE(PAGE_FAULT_RESUME); + MLX5_COMMAND_STR_CASE(CREATE_EQ); + MLX5_COMMAND_STR_CASE(DESTROY_EQ); + MLX5_COMMAND_STR_CASE(QUERY_EQ); + MLX5_COMMAND_STR_CASE(GEN_EQE); + MLX5_COMMAND_STR_CASE(CREATE_CQ); + MLX5_COMMAND_STR_CASE(DESTROY_CQ); + MLX5_COMMAND_STR_CASE(QUERY_CQ); + MLX5_COMMAND_STR_CASE(MODIFY_CQ); + MLX5_COMMAND_STR_CASE(CREATE_QP); + MLX5_COMMAND_STR_CASE(DESTROY_QP); + MLX5_COMMAND_STR_CASE(RST2INIT_QP); + MLX5_COMMAND_STR_CASE(INIT2RTR_QP); + MLX5_COMMAND_STR_CASE(RTR2RTS_QP); + MLX5_COMMAND_STR_CASE(RTS2RTS_QP); + MLX5_COMMAND_STR_CASE(SQERR2RTS_QP); + MLX5_COMMAND_STR_CASE(2ERR_QP); + MLX5_COMMAND_STR_CASE(2RST_QP); + MLX5_COMMAND_STR_CASE(QUERY_QP); + MLX5_COMMAND_STR_CASE(SQD_RTS_QP); + MLX5_COMMAND_STR_CASE(INIT2INIT_QP); + MLX5_COMMAND_STR_CASE(CREATE_PSV); + MLX5_COMMAND_STR_CASE(DESTROY_PSV); + MLX5_COMMAND_STR_CASE(CREATE_SRQ); + MLX5_COMMAND_STR_CASE(DESTROY_SRQ); + MLX5_COMMAND_STR_CASE(QUERY_SRQ); + MLX5_COMMAND_STR_CASE(ARM_RQ); + MLX5_COMMAND_STR_CASE(CREATE_XRC_SRQ); + MLX5_COMMAND_STR_CASE(DESTROY_XRC_SRQ); + MLX5_COMMAND_STR_CASE(QUERY_XRC_SRQ); + MLX5_COMMAND_STR_CASE(ARM_XRC_SRQ); + MLX5_COMMAND_STR_CASE(CREATE_DCT); + MLX5_COMMAND_STR_CASE(DESTROY_DCT); + MLX5_COMMAND_STR_CASE(DRAIN_DCT); + MLX5_COMMAND_STR_CASE(QUERY_DCT); + MLX5_COMMAND_STR_CASE(ARM_DCT_FOR_KEY_VIOLATION); + MLX5_COMMAND_STR_CASE(QUERY_VPORT_STATE); + MLX5_COMMAND_STR_CASE(MODIFY_VPORT_STATE); + MLX5_COMMAND_STR_CASE(QUERY_ESW_VPORT_CONTEXT); + MLX5_COMMAND_STR_CASE(MODIFY_ESW_VPORT_CONTEXT); + MLX5_COMMAND_STR_CASE(QUERY_NIC_VPORT_CONTEXT); + MLX5_COMMAND_STR_CASE(MODIFY_NIC_VPORT_CONTEXT); + MLX5_COMMAND_STR_CASE(QUERY_ROCE_ADDRESS); + MLX5_COMMAND_STR_CASE(SET_ROCE_ADDRESS); + MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_CONTEXT); + MLX5_COMMAND_STR_CASE(MODIFY_HCA_VPORT_CONTEXT); + MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_GID); + MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_PKEY); + MLX5_COMMAND_STR_CASE(QUERY_VPORT_COUNTER); + MLX5_COMMAND_STR_CASE(ALLOC_Q_COUNTER); + MLX5_COMMAND_STR_CASE(DEALLOC_Q_COUNTER); + MLX5_COMMAND_STR_CASE(QUERY_Q_COUNTER); + MLX5_COMMAND_STR_CASE(ALLOC_PD); + MLX5_COMMAND_STR_CASE(DEALLOC_PD); + MLX5_COMMAND_STR_CASE(ALLOC_UAR); + MLX5_COMMAND_STR_CASE(DEALLOC_UAR); + MLX5_COMMAND_STR_CASE(CONFIG_INT_MODERATION); + MLX5_COMMAND_STR_CASE(ACCESS_REG); + MLX5_COMMAND_STR_CASE(ATTACH_TO_MCG); + MLX5_COMMAND_STR_CASE(DETTACH_FROM_MCG); + MLX5_COMMAND_STR_CASE(GET_DROPPED_PACKET_LOG); + MLX5_COMMAND_STR_CASE(MAD_IFC); + MLX5_COMMAND_STR_CASE(QUERY_MAD_DEMUX); + MLX5_COMMAND_STR_CASE(SET_MAD_DEMUX); + MLX5_COMMAND_STR_CASE(NOP); + MLX5_COMMAND_STR_CASE(ALLOC_XRCD); + MLX5_COMMAND_STR_CASE(DEALLOC_XRCD); + MLX5_COMMAND_STR_CASE(ALLOC_TRANSPORT_DOMAIN); + MLX5_COMMAND_STR_CASE(DEALLOC_TRANSPORT_DOMAIN); + MLX5_COMMAND_STR_CASE(QUERY_CONG_STATUS); + MLX5_COMMAND_STR_CASE(MODIFY_CONG_STATUS); + MLX5_COMMAND_STR_CASE(QUERY_CONG_PARAMS); + MLX5_COMMAND_STR_CASE(MODIFY_CONG_PARAMS); + MLX5_COMMAND_STR_CASE(QUERY_CONG_STATISTICS); + MLX5_COMMAND_STR_CASE(ADD_VXLAN_UDP_DPORT); + MLX5_COMMAND_STR_CASE(DELETE_VXLAN_UDP_DPORT); + MLX5_COMMAND_STR_CASE(SET_L2_TABLE_ENTRY); + MLX5_COMMAND_STR_CASE(QUERY_L2_TABLE_ENTRY); + MLX5_COMMAND_STR_CASE(DELETE_L2_TABLE_ENTRY); + MLX5_COMMAND_STR_CASE(SET_WOL_ROL); + MLX5_COMMAND_STR_CASE(QUERY_WOL_ROL); + MLX5_COMMAND_STR_CASE(CREATE_TIR); + MLX5_COMMAND_STR_CASE(MODIFY_TIR); + MLX5_COMMAND_STR_CASE(DESTROY_TIR); + MLX5_COMMAND_STR_CASE(QUERY_TIR); + MLX5_COMMAND_STR_CASE(CREATE_SQ); + MLX5_COMMAND_STR_CASE(MODIFY_SQ); + MLX5_COMMAND_STR_CASE(DESTROY_SQ); + MLX5_COMMAND_STR_CASE(QUERY_SQ); + MLX5_COMMAND_STR_CASE(CREATE_RQ); + MLX5_COMMAND_STR_CASE(MODIFY_RQ); + MLX5_COMMAND_STR_CASE(DESTROY_RQ); + MLX5_COMMAND_STR_CASE(QUERY_RQ); + MLX5_COMMAND_STR_CASE(CREATE_RMP); + MLX5_COMMAND_STR_CASE(MODIFY_RMP); + MLX5_COMMAND_STR_CASE(DESTROY_RMP); + MLX5_COMMAND_STR_CASE(QUERY_RMP); + MLX5_COMMAND_STR_CASE(CREATE_TIS); + MLX5_COMMAND_STR_CASE(MODIFY_TIS); + MLX5_COMMAND_STR_CASE(DESTROY_TIS); + MLX5_COMMAND_STR_CASE(QUERY_TIS); + MLX5_COMMAND_STR_CASE(CREATE_RQT); + MLX5_COMMAND_STR_CASE(MODIFY_RQT); + MLX5_COMMAND_STR_CASE(DESTROY_RQT); + MLX5_COMMAND_STR_CASE(QUERY_RQT); + MLX5_COMMAND_STR_CASE(SET_FLOW_TABLE_ROOT); + MLX5_COMMAND_STR_CASE(CREATE_FLOW_TABLE); + MLX5_COMMAND_STR_CASE(DESTROY_FLOW_TABLE); + MLX5_COMMAND_STR_CASE(QUERY_FLOW_TABLE); + MLX5_COMMAND_STR_CASE(CREATE_FLOW_GROUP); + MLX5_COMMAND_STR_CASE(DESTROY_FLOW_GROUP); + MLX5_COMMAND_STR_CASE(QUERY_FLOW_GROUP); + MLX5_COMMAND_STR_CASE(SET_FLOW_TABLE_ENTRY); + MLX5_COMMAND_STR_CASE(QUERY_FLOW_TABLE_ENTRY); + MLX5_COMMAND_STR_CASE(DELETE_FLOW_TABLE_ENTRY); default: return "unknown command opcode"; } } From 9dc0b289c4c09bc1a92bdcc055cb37af9b72eb28 Mon Sep 17 00:00:00 2001 From: Amir Vadai Date: Fri, 13 May 2016 12:55:39 +0000 Subject: [PATCH 1587/1649] net/mlx5_core: Firmware commands to support flow counters Getting packet/byte statistics on flows is done through flow counters. Implement the firmware commands to alloc, free and query flow counters. Signed-off-by: Amir Vadai Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/cmd.c | 6 ++ .../net/ethernet/mellanox/mlx5/core/fs_cmd.c | 66 +++++++++++++ .../net/ethernet/mellanox/mlx5/core/fs_cmd.h | 5 + include/linux/mlx5/mlx5_ifc.h | 99 ++++++++++++++++++- 4 files changed, 173 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index 63cac841cfa9..dcd2df6518de 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -294,6 +294,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, case MLX5_CMD_OP_DESTROY_FLOW_TABLE: case MLX5_CMD_OP_DESTROY_FLOW_GROUP: case MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY: + case MLX5_CMD_OP_DEALLOC_FLOW_COUNTER: return MLX5_CMD_STAT_OK; case MLX5_CMD_OP_QUERY_HCA_CAP: @@ -395,6 +396,8 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, case MLX5_CMD_OP_QUERY_FLOW_GROUP: case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY: case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY: + case MLX5_CMD_OP_ALLOC_FLOW_COUNTER: + case MLX5_CMD_OP_QUERY_FLOW_COUNTER: *status = MLX5_DRIVER_STATUS_ABORTED; *synd = MLX5_DRIVER_SYND; return -EIO; @@ -539,6 +542,9 @@ const char *mlx5_command_str(int command) MLX5_COMMAND_STR_CASE(SET_FLOW_TABLE_ENTRY); MLX5_COMMAND_STR_CASE(QUERY_FLOW_TABLE_ENTRY); MLX5_COMMAND_STR_CASE(DELETE_FLOW_TABLE_ENTRY); + MLX5_COMMAND_STR_CASE(ALLOC_FLOW_COUNTER); + MLX5_COMMAND_STR_CASE(DEALLOC_FLOW_COUNTER); + MLX5_COMMAND_STR_CASE(QUERY_FLOW_COUNTER); default: return "unknown command opcode"; } } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c index 9797768891ee..ccb63a0bb54a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c @@ -323,3 +323,69 @@ int mlx5_cmd_delete_fte(struct mlx5_core_dev *dev, return err; } + +int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u16 *id) +{ + u32 in[MLX5_ST_SZ_DW(alloc_flow_counter_in)]; + u32 out[MLX5_ST_SZ_DW(alloc_flow_counter_out)]; + int err; + + memset(in, 0, sizeof(in)); + memset(out, 0, sizeof(out)); + + MLX5_SET(alloc_flow_counter_in, in, opcode, + MLX5_CMD_OP_ALLOC_FLOW_COUNTER); + + err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, + sizeof(out)); + if (err) + return err; + + *id = MLX5_GET(alloc_flow_counter_out, out, flow_counter_id); + + return 0; +} + +int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u16 id) +{ + u32 in[MLX5_ST_SZ_DW(dealloc_flow_counter_in)]; + u32 out[MLX5_ST_SZ_DW(dealloc_flow_counter_out)]; + + memset(in, 0, sizeof(in)); + memset(out, 0, sizeof(out)); + + MLX5_SET(dealloc_flow_counter_in, in, opcode, + MLX5_CMD_OP_DEALLOC_FLOW_COUNTER); + MLX5_SET(dealloc_flow_counter_in, in, flow_counter_id, id); + + return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, + sizeof(out)); +} + +int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u16 id, + u64 *packets, u64 *bytes) +{ + u32 out[MLX5_ST_SZ_BYTES(query_flow_counter_out) + + MLX5_ST_SZ_BYTES(traffic_counter)]; + u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)]; + void *stats; + int err = 0; + + memset(in, 0, sizeof(in)); + memset(out, 0, sizeof(out)); + + MLX5_SET(query_flow_counter_in, in, opcode, + MLX5_CMD_OP_QUERY_FLOW_COUNTER); + MLX5_SET(query_flow_counter_in, in, op_mod, 0); + MLX5_SET(query_flow_counter_in, in, flow_counter_id, id); + + err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out)); + if (err) + return err; + + stats = MLX5_ADDR_OF(query_flow_counter_out, out, flow_statistics); + *packets = MLX5_GET64(traffic_counter, stats, packets); + *bytes = MLX5_GET64(traffic_counter, stats, octets); + + return 0; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h index c97b4a03eeed..18c111a4691f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h @@ -70,4 +70,9 @@ int mlx5_cmd_delete_fte(struct mlx5_core_dev *dev, int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft); + +int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u16 *id); +int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u16 id); +int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u16 id, + u64 *packets, u64 *bytes); #endif diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 4ce4ea422a10..614c795eadea 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -202,6 +202,9 @@ enum { MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY = 0x936, MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY = 0x937, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY = 0x938, + MLX5_CMD_OP_ALLOC_FLOW_COUNTER = 0x939, + MLX5_CMD_OP_DEALLOC_FLOW_COUNTER = 0x93a, + MLX5_CMD_OP_QUERY_FLOW_COUNTER = 0x93b, MLX5_CMD_OP_MODIFY_FLOW_TABLE = 0x93c }; @@ -265,7 +268,8 @@ struct mlx5_ifc_flow_table_fields_supported_bits { struct mlx5_ifc_flow_table_prop_layout_bits { u8 ft_support[0x1]; - u8 reserved_at_1[0x2]; + u8 reserved_at_1[0x1]; + u8 flow_counter[0x1]; u8 flow_modify_en[0x1]; u8 modify_root[0x1]; u8 identified_miss_table_mode[0x1]; @@ -941,6 +945,19 @@ struct mlx5_ifc_dest_format_struct_bits { u8 reserved_at_20[0x20]; }; +struct mlx5_ifc_flow_counter_list_bits { + u8 reserved_at_0[0x10]; + u8 flow_counter_id[0x10]; + + u8 reserved_at_20[0x20]; +}; + +union mlx5_ifc_dest_format_struct_flow_counter_list_auto_bits { + struct mlx5_ifc_dest_format_struct_bits dest_format_struct; + struct mlx5_ifc_flow_counter_list_bits flow_counter_list; + u8 reserved_at_0[0x40]; +}; + struct mlx5_ifc_fte_match_param_bits { struct mlx5_ifc_fte_match_set_lyr_2_4_bits outer_headers; @@ -2006,6 +2023,7 @@ enum { MLX5_FLOW_CONTEXT_ACTION_ALLOW = 0x1, MLX5_FLOW_CONTEXT_ACTION_DROP = 0x2, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST = 0x4, + MLX5_FLOW_CONTEXT_ACTION_COUNT = 0x8, }; struct mlx5_ifc_flow_context_bits { @@ -2022,13 +2040,16 @@ struct mlx5_ifc_flow_context_bits { u8 reserved_at_80[0x8]; u8 destination_list_size[0x18]; - u8 reserved_at_a0[0x160]; + u8 reserved_at_a0[0x8]; + u8 flow_counter_list_size[0x18]; + + u8 reserved_at_c0[0x140]; struct mlx5_ifc_fte_match_param_bits match_value; u8 reserved_at_1200[0x600]; - struct mlx5_ifc_dest_format_struct_bits destination[0]; + union mlx5_ifc_dest_format_struct_flow_counter_list_auto_bits destination[0]; }; enum { @@ -3937,6 +3958,34 @@ struct mlx5_ifc_query_flow_group_in_bits { u8 reserved_at_e0[0x120]; }; +struct mlx5_ifc_query_flow_counter_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; + + struct mlx5_ifc_traffic_counter_bits flow_statistics[0]; +}; + +struct mlx5_ifc_query_flow_counter_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x80]; + + u8 clear[0x1]; + u8 reserved_at_c1[0xf]; + u8 num_of_counters[0x10]; + + u8 reserved_at_e0[0x10]; + u8 flow_counter_id[0x10]; +}; + struct mlx5_ifc_query_esw_vport_context_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; @@ -5510,6 +5559,28 @@ struct mlx5_ifc_dealloc_pd_in_bits { u8 reserved_at_60[0x20]; }; +struct mlx5_ifc_dealloc_flow_counter_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_dealloc_flow_counter_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x10]; + u8 flow_counter_id[0x10]; + + u8 reserved_at_60[0x20]; +}; + struct mlx5_ifc_create_xrc_srq_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; @@ -6237,6 +6308,28 @@ struct mlx5_ifc_alloc_pd_in_bits { u8 reserved_at_40[0x40]; }; +struct mlx5_ifc_alloc_flow_counter_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x10]; + u8 flow_counter_id[0x10]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_alloc_flow_counter_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x40]; +}; + struct mlx5_ifc_add_vxlan_udp_dport_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; From bd5251dbf156b6bc0661a9409d46e47160df61dd Mon Sep 17 00:00:00 2001 From: Amir Vadai Date: Fri, 13 May 2016 12:55:40 +0000 Subject: [PATCH 1588/1649] net/mlx5_core: Introduce flow steering destination of type counter When adding a flow steering rule with a counter, need to supply a destination of type MLX5_FLOW_DESTINATION_TYPE_COUNTER, with a pointer to a struct mlx5_fc. Also, MLX5_FLOW_CONTEXT_ACTION_COUNT bit should be set in the action. Signed-off-by: Amir Vadai Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlx5/core/fs_cmd.c | 36 ++++++++++--- .../net/ethernet/mellanox/mlx5/core/fs_cmd.h | 1 + .../net/ethernet/mellanox/mlx5/core/fs_core.c | 52 +++++++++++++++++-- .../net/ethernet/mellanox/mlx5/core/fs_core.h | 23 ++++++++ include/linux/mlx5/fs.h | 2 + include/linux/mlx5/mlx5_ifc.h | 2 + 6 files changed, 106 insertions(+), 10 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c index ccb63a0bb54a..a5bb6b695242 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c @@ -241,17 +241,20 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, MLX5_SET(flow_context, in_flow_context, group_id, group_id); MLX5_SET(flow_context, in_flow_context, flow_tag, fte->flow_tag); MLX5_SET(flow_context, in_flow_context, action, fte->action); - MLX5_SET(flow_context, in_flow_context, destination_list_size, - fte->dests_size); in_match_value = MLX5_ADDR_OF(flow_context, in_flow_context, match_value); memcpy(in_match_value, &fte->val, MLX5_ST_SZ_BYTES(fte_match_param)); + in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination); if (fte->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { - in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination); + int list_size = 0; + list_for_each_entry(dst, &fte->node.children, node.list) { unsigned int id; + if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER) + continue; + MLX5_SET(dest_format_struct, in_dests, destination_type, dst->dest_attr.type); if (dst->dest_attr.type == @@ -262,8 +265,31 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, } MLX5_SET(dest_format_struct, in_dests, destination_id, id); in_dests += MLX5_ST_SZ_BYTES(dest_format_struct); + list_size++; } + + MLX5_SET(flow_context, in_flow_context, destination_list_size, + list_size); } + + if (fte->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { + int list_size = 0; + + list_for_each_entry(dst, &fte->node.children, node.list) { + if (dst->dest_attr.type != + MLX5_FLOW_DESTINATION_TYPE_COUNTER) + continue; + + MLX5_SET(flow_counter_list, in_dests, flow_counter_id, + dst->dest_attr.counter->id); + in_dests += MLX5_ST_SZ_BYTES(dest_format_struct); + list_size++; + } + + MLX5_SET(flow_context, in_flow_context, flow_counter_list_size, + list_size); + } + memset(out, 0, sizeof(out)); err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out)); @@ -283,18 +309,16 @@ int mlx5_cmd_create_fte(struct mlx5_core_dev *dev, int mlx5_cmd_update_fte(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft, unsigned group_id, + int modify_mask, struct fs_fte *fte) { int opmod; - int modify_mask; int atomic_mod_cap = MLX5_CAP_FLOWTABLE(dev, flow_table_properties_nic_receive. flow_modify_en); if (!atomic_mod_cap) return -ENOTSUPP; opmod = 1; - modify_mask = 1 << - MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST; return mlx5_cmd_set_fte(dev, opmod, modify_mask, ft, group_id, fte); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h index 18c111a4691f..fc4f7b83fe0a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h @@ -62,6 +62,7 @@ int mlx5_cmd_create_fte(struct mlx5_core_dev *dev, int mlx5_cmd_update_fte(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft, unsigned group_id, + int modify_mask, struct fs_fte *fte); int mlx5_cmd_delete_fte(struct mlx5_core_dev *dev, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 659a6980cda2..9420def3a2fe 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -344,6 +344,7 @@ static void del_rule(struct fs_node *node) struct mlx5_flow_group *fg; struct fs_fte *fte; u32 *match_value; + int modify_mask; struct mlx5_core_dev *dev = get_dev(node); int match_len = MLX5_ST_SZ_BYTES(fte_match_param); int err; @@ -367,8 +368,11 @@ static void del_rule(struct fs_node *node) } if ((fte->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) && --fte->dests_size) { + modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST), err = mlx5_cmd_update_fte(dev, ft, - fg->id, fte); + fg->id, + modify_mask, + fte); if (err) pr_warn("%s can't del rule fg id=%d fte_index=%d\n", __func__, fg->id, fte->index); @@ -615,6 +619,7 @@ int mlx5_modify_rule_destination(struct mlx5_flow_rule *rule, struct mlx5_flow_table *ft; struct mlx5_flow_group *fg; struct fs_fte *fte; + int modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST); int err = 0; fs_get_obj(fte, rule->node.parent); @@ -626,7 +631,9 @@ int mlx5_modify_rule_destination(struct mlx5_flow_rule *rule, memcpy(&rule->dest_attr, dest, sizeof(*dest)); err = mlx5_cmd_update_fte(get_dev(&ft->node), - ft, fg->id, fte); + ft, fg->id, + modify_mask, + fte); unlock_ref_node(&fte->node); return err; @@ -877,6 +884,7 @@ static struct mlx5_flow_rule *add_rule_fte(struct fs_fte *fte, { struct mlx5_flow_table *ft; struct mlx5_flow_rule *rule; + int modify_mask = 0; int err; rule = alloc_rule(dest); @@ -892,14 +900,20 @@ static struct mlx5_flow_rule *add_rule_fte(struct fs_fte *fte, list_add(&rule->node.list, &fte->node.children); else list_add_tail(&rule->node.list, &fte->node.children); - if (dest) + if (dest) { fte->dests_size++; + + modify_mask |= dest->type == MLX5_FLOW_DESTINATION_TYPE_COUNTER ? + BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS) : + BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST); + } + if (fte->dests_size == 1 || !dest) err = mlx5_cmd_create_fte(get_dev(&ft->node), ft, fg->id, fte); else err = mlx5_cmd_update_fte(get_dev(&ft->node), - ft, fg->id, fte); + ft, fg->id, modify_mask, fte); if (err) goto free_rule; @@ -1092,10 +1106,40 @@ unlock_fg: return rule; } +struct mlx5_fc *mlx5_flow_rule_counter(struct mlx5_flow_rule *rule) +{ + struct mlx5_flow_rule *dst; + struct fs_fte *fte; + + fs_get_obj(fte, rule->node.parent); + + fs_for_each_dst(dst, fte) { + if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER) + return dst->dest_attr.counter; + } + + return NULL; +} + +static bool counter_is_valid(struct mlx5_fc *counter, u32 action) +{ + if (!(action & MLX5_FLOW_CONTEXT_ACTION_COUNT)) + return !counter; + + if (!counter) + return false; + + /* Hardware support counter for a drop action only */ + return action == (MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT); +} + static bool dest_is_valid(struct mlx5_flow_destination *dest, u32 action, struct mlx5_flow_table *ft) { + if (dest && (dest->type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)) + return counter_is_valid(dest->counter, action); + if (!(action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST)) return true; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h index 8e76cc505f5a..1989048ebdfd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h @@ -96,6 +96,28 @@ struct mlx5_flow_table { struct list_head fwd_rules; }; +struct mlx5_fc_cache { + u64 packets; + u64 bytes; + u64 lastuse; +}; + +struct mlx5_fc { + struct list_head list; + + /* last{packets,bytes} members are used when calculating the delta since + * last reading + */ + u64 lastpackets; + u64 lastbytes; + + u16 id; + bool deleted; + bool aging; + + struct mlx5_fc_cache cache ____cacheline_aligned_in_smp; +}; + /* Type of children is mlx5_flow_rule */ struct fs_fte { struct fs_node node; @@ -105,6 +127,7 @@ struct fs_fte { u32 index; u32 action; enum fs_fte_status status; + struct mlx5_fc *counter; }; /* Type of children is mlx5_flow_table/namespace */ diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h index 6467569ad76e..c8b9ede1c20a 100644 --- a/include/linux/mlx5/fs.h +++ b/include/linux/mlx5/fs.h @@ -73,6 +73,7 @@ struct mlx5_flow_destination { u32 tir_num; struct mlx5_flow_table *ft; u32 vport_num; + struct mlx5_fc *counter; }; }; @@ -125,4 +126,5 @@ void mlx5_del_flow_rule(struct mlx5_flow_rule *fr); int mlx5_modify_rule_destination(struct mlx5_flow_rule *rule, struct mlx5_flow_destination *dest); +struct mlx5_fc *mlx5_flow_rule_counter(struct mlx5_flow_rule *rule); #endif diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 614c795eadea..9a05cd7e5890 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -936,6 +936,8 @@ enum mlx5_flow_destination_type { MLX5_FLOW_DESTINATION_TYPE_VPORT = 0x0, MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE = 0x1, MLX5_FLOW_DESTINATION_TYPE_TIR = 0x2, + + MLX5_FLOW_DESTINATION_TYPE_COUNTER = 0x100, }; struct mlx5_ifc_dest_format_struct_bits { From 43a335e055bb7ebdc8a68ce7362ef26ef5bda92b Mon Sep 17 00:00:00 2001 From: Amir Vadai Date: Fri, 13 May 2016 12:55:41 +0000 Subject: [PATCH 1589/1649] net/mlx5_core: Flow counters infrastructure If a counter has the aging flag set when created, it is added to a list of counters that will be queried periodically from a workqueue. query result and last use timestamp are cached. add/del counter must be very efficient since thousands of such operations might be issued in a second. There is only a single reference to counters without aging, therefore no need for locks. But, counters with aging enabled are stored in a list. In order to make code as lockless as possible, all the list manipulation and access to hardware is done from a single context - the periodic counters query thread. The hardware supports multiple counters per FTE, however currently we are using one counter for each FTE. Signed-off-by: Amir Vadai Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlx5/core/Makefile | 2 +- .../net/ethernet/mellanox/mlx5/core/fs_core.c | 7 +- .../net/ethernet/mellanox/mlx5/core/fs_core.h | 3 + .../ethernet/mellanox/mlx5/core/fs_counters.c | 226 ++++++++++++++++++ include/linux/mlx5/driver.h | 14 ++ include/linux/mlx5/fs.h | 5 + 6 files changed, 255 insertions(+), 2 deletions(-) create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index b531d4f3c00b..9ea7b583096a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -2,7 +2,7 @@ obj-$(CONFIG_MLX5_CORE) += mlx5_core.o mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \ health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o \ - mad.o transobj.o vport.o sriov.o fs_cmd.o fs_core.o + mad.o transobj.o vport.o sriov.o fs_cmd.o fs_core.o fs_counters.o mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o eswitch.o \ en_main.o en_fs.o en_ethtool.o en_tx.o en_rx.o \ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 9420def3a2fe..8b5f0b2c0d5c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -1771,6 +1771,7 @@ void mlx5_cleanup_fs(struct mlx5_core_dev *dev) cleanup_single_prio_root_ns(dev, dev->priv.fdb_root_ns); cleanup_single_prio_root_ns(dev, dev->priv.esw_egress_root_ns); cleanup_single_prio_root_ns(dev, dev->priv.esw_ingress_root_ns); + mlx5_cleanup_fc_stats(dev); } static int init_fdb_root_ns(struct mlx5_core_dev *dev) @@ -1827,10 +1828,14 @@ int mlx5_init_fs(struct mlx5_core_dev *dev) { int err = 0; + err = mlx5_init_fc_stats(dev); + if (err) + return err; + if (MLX5_CAP_GEN(dev, nic_flow_table)) { err = init_root_ns(dev); if (err) - return err; + goto err; } if (MLX5_CAP_GEN(dev, eswitch_flow_table)) { err = init_fdb_root_ns(dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h index 1989048ebdfd..aa41a7314691 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h @@ -169,6 +169,9 @@ struct mlx5_flow_root_namespace { struct mutex chain_lock; }; +int mlx5_init_fc_stats(struct mlx5_core_dev *dev); +void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev); + int mlx5_init_fs(struct mlx5_core_dev *dev); void mlx5_cleanup_fs(struct mlx5_core_dev *dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c new file mode 100644 index 000000000000..164dc37fda72 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c @@ -0,0 +1,226 @@ +/* + * Copyright (c) 2016, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include "mlx5_core.h" +#include "fs_core.h" +#include "fs_cmd.h" + +#define MLX5_FC_STATS_PERIOD msecs_to_jiffies(1000) + +/* locking scheme: + * + * It is the responsibility of the user to prevent concurrent calls or bad + * ordering to mlx5_fc_create(), mlx5_fc_destroy() and accessing a reference + * to struct mlx5_fc. + * e.g en_tc.c is protected by RTNL lock of its caller, and will never call a + * dump (access to struct mlx5_fc) after a counter is destroyed. + * + * access to counter list: + * - create (user context) + * - mlx5_fc_create() only adds to an addlist to be used by + * mlx5_fc_stats_query_work(). addlist is protected by a spinlock. + * - spawn thread to do the actual destroy + * + * - destroy (user context) + * - mark a counter as deleted + * - spawn thread to do the actual del + * + * - dump (user context) + * user should not call dump after destroy + * + * - query (single thread workqueue context) + * destroy/dump - no conflict (see destroy) + * query/dump - packets and bytes might be inconsistent (since update is not + * atomic) + * query/create - no conflict (see create) + * since every create/destroy spawn the work, only after necessary time has + * elapsed, the thread will actually query the hardware. + */ + +static void mlx5_fc_stats_work(struct work_struct *work) +{ + struct mlx5_core_dev *dev = container_of(work, struct mlx5_core_dev, + priv.fc_stats.work.work); + struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; + unsigned long now = jiffies; + struct mlx5_fc *counter; + struct mlx5_fc *tmp; + int err = 0; + + spin_lock(&fc_stats->addlist_lock); + + list_splice_tail_init(&fc_stats->addlist, &fc_stats->list); + + if (!list_empty(&fc_stats->list)) + queue_delayed_work(fc_stats->wq, &fc_stats->work, MLX5_FC_STATS_PERIOD); + + spin_unlock(&fc_stats->addlist_lock); + + list_for_each_entry_safe(counter, tmp, &fc_stats->list, list) { + struct mlx5_fc_cache *c = &counter->cache; + u64 packets; + u64 bytes; + + if (counter->deleted) { + list_del(&counter->list); + + mlx5_cmd_fc_free(dev, counter->id); + + kfree(counter); + continue; + } + + if (time_before(now, fc_stats->next_query)) + continue; + + err = mlx5_cmd_fc_query(dev, counter->id, &packets, &bytes); + if (err) { + pr_err("Error querying stats for counter id %d\n", + counter->id); + continue; + } + + if (packets == c->packets) + continue; + + c->lastuse = jiffies; + c->packets = packets; + c->bytes = bytes; + } + + if (time_after_eq(now, fc_stats->next_query)) + fc_stats->next_query = now + MLX5_FC_STATS_PERIOD; +} + +struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging) +{ + struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; + struct mlx5_fc *counter; + int err; + + counter = kzalloc(sizeof(*counter), GFP_KERNEL); + if (!counter) + return ERR_PTR(-ENOMEM); + + err = mlx5_cmd_fc_alloc(dev, &counter->id); + if (err) + goto err_out; + + if (aging) { + counter->aging = true; + + spin_lock(&fc_stats->addlist_lock); + list_add(&counter->list, &fc_stats->addlist); + spin_unlock(&fc_stats->addlist_lock); + + mod_delayed_work(fc_stats->wq, &fc_stats->work, 0); + } + + return counter; + +err_out: + kfree(counter); + + return ERR_PTR(err); +} + +void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter) +{ + struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; + + if (!counter) + return; + + if (counter->aging) { + counter->deleted = true; + mod_delayed_work(fc_stats->wq, &fc_stats->work, 0); + return; + } + + mlx5_cmd_fc_free(dev, counter->id); + kfree(counter); +} + +int mlx5_init_fc_stats(struct mlx5_core_dev *dev) +{ + struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; + + INIT_LIST_HEAD(&fc_stats->list); + INIT_LIST_HEAD(&fc_stats->addlist); + spin_lock_init(&fc_stats->addlist_lock); + + fc_stats->wq = create_singlethread_workqueue("mlx5_fc"); + if (!fc_stats->wq) + return -ENOMEM; + + INIT_DELAYED_WORK(&fc_stats->work, mlx5_fc_stats_work); + + return 0; +} + +void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev) +{ + struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; + struct mlx5_fc *counter; + struct mlx5_fc *tmp; + + cancel_delayed_work_sync(&dev->priv.fc_stats.work); + destroy_workqueue(dev->priv.fc_stats.wq); + dev->priv.fc_stats.wq = NULL; + + list_splice_tail_init(&fc_stats->addlist, &fc_stats->list); + + list_for_each_entry_safe(counter, tmp, &fc_stats->list, list) { + list_del(&counter->list); + + mlx5_cmd_fc_free(dev, counter->id); + + kfree(counter); + } +} + +void mlx5_fc_query_cached(struct mlx5_fc *counter, + u64 *bytes, u64 *packets, u64 *lastuse) +{ + struct mlx5_fc_cache c; + + c = counter->cache; + + *bytes = c.bytes - counter->lastbytes; + *packets = c.packets - counter->lastpackets; + *lastuse = c.lastuse; + + counter->lastbytes = c.bytes; + counter->lastpackets = c.packets; +} diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 9613143f0561..07b504f7eb84 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -41,6 +41,7 @@ #include #include #include +#include #include #include @@ -457,6 +458,17 @@ struct mlx5_irq_info { char name[MLX5_MAX_IRQ_NAME]; }; +struct mlx5_fc_stats { + struct list_head list; + struct list_head addlist; + /* protect addlist add/splice operations */ + spinlock_t addlist_lock; + + struct workqueue_struct *wq; + struct delayed_work work; + unsigned long next_query; +}; + struct mlx5_eswitch; struct mlx5_priv { @@ -520,6 +532,8 @@ struct mlx5_priv { struct mlx5_flow_root_namespace *fdb_root_ns; struct mlx5_flow_root_namespace *esw_egress_root_ns; struct mlx5_flow_root_namespace *esw_ingress_root_ns; + + struct mlx5_fc_stats fc_stats; }; enum mlx5_device_state { diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h index c8b9ede1c20a..4b7a107d9c19 100644 --- a/include/linux/mlx5/fs.h +++ b/include/linux/mlx5/fs.h @@ -127,4 +127,9 @@ int mlx5_modify_rule_destination(struct mlx5_flow_rule *rule, struct mlx5_flow_destination *dest); struct mlx5_fc *mlx5_flow_rule_counter(struct mlx5_flow_rule *rule); +struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging); +void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter); +void mlx5_fc_query_cached(struct mlx5_fc *counter, + u64 *bytes, u64 *packets, u64 *lastuse); + #endif From aad7e08d39bda94aedc594a82576980941306fc9 Mon Sep 17 00:00:00 2001 From: Amir Vadai Date: Fri, 13 May 2016 12:55:42 +0000 Subject: [PATCH 1590/1649] net/mlx5e: Hardware offloaded flower filter statistics support Introduce support in updating statistics of offloaded TC flower classifiers. Currently only the DROP action is supported. Signed-off-by: Amir Vadai Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlx5/core/en_main.c | 2 + .../net/ethernet/mellanox/mlx5/core/en_tc.c | 71 +++++++++++++++++-- .../net/ethernet/mellanox/mlx5/core/en_tc.h | 3 + 3 files changed, 69 insertions(+), 7 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 08040702824d..fd4392999eee 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -2154,6 +2154,8 @@ static int mlx5e_ndo_setup_tc(struct net_device *dev, u32 handle, return mlx5e_configure_flower(priv, proto, tc->cls_flower); case TC_CLSFLOWER_DESTROY: return mlx5e_delete_flower(priv, tc->cls_flower); + case TC_CLSFLOWER_STATS: + return mlx5e_stats_flower(priv, tc->cls_flower); } default: return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index ef017c0decdc..704c3d30493e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -53,13 +53,24 @@ static struct mlx5_flow_rule *mlx5e_tc_add_flow(struct mlx5e_priv *priv, u32 *match_c, u32 *match_v, u32 action, u32 flow_tag) { - struct mlx5_flow_destination dest = { - .type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE, - {.ft = priv->fs.vlan.ft.t}, - }; + struct mlx5_core_dev *dev = priv->mdev; + struct mlx5_flow_destination dest = { 0 }; + struct mlx5_fc *counter = NULL; struct mlx5_flow_rule *rule; bool table_created = false; + if (action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { + dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; + dest.ft = priv->fs.vlan.ft.t; + } else { + counter = mlx5_fc_create(dev, true); + if (IS_ERR(counter)) + return ERR_CAST(counter); + + dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; + dest.counter = counter; + } + if (IS_ERR_OR_NULL(priv->fs.tc.t)) { priv->fs.tc.t = mlx5_create_auto_grouped_flow_table(priv->fs.ns, @@ -70,7 +81,8 @@ static struct mlx5_flow_rule *mlx5e_tc_add_flow(struct mlx5e_priv *priv, if (IS_ERR(priv->fs.tc.t)) { netdev_err(priv->netdev, "Failed to create tc offload table\n"); - return ERR_CAST(priv->fs.tc.t); + rule = ERR_CAST(priv->fs.tc.t); + goto err_create_ft; } table_created = true; @@ -79,12 +91,20 @@ static struct mlx5_flow_rule *mlx5e_tc_add_flow(struct mlx5e_priv *priv, rule = mlx5_add_flow_rule(priv->fs.tc.t, MLX5_MATCH_OUTER_HEADERS, match_c, match_v, action, flow_tag, - action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST ? &dest : NULL); + &dest); - if (IS_ERR(rule) && table_created) { + if (IS_ERR(rule)) + goto err_add_rule; + + return rule; + +err_add_rule: + if (table_created) { mlx5_destroy_flow_table(priv->fs.tc.t); priv->fs.tc.t = NULL; } +err_create_ft: + mlx5_fc_destroy(dev, counter); return rule; } @@ -92,8 +112,14 @@ static struct mlx5_flow_rule *mlx5e_tc_add_flow(struct mlx5e_priv *priv, static void mlx5e_tc_del_flow(struct mlx5e_priv *priv, struct mlx5_flow_rule *rule) { + struct mlx5_fc *counter = NULL; + + counter = mlx5_flow_rule_counter(rule); + mlx5_del_flow_rule(rule); + mlx5_fc_destroy(priv->mdev, counter); + if (!mlx5e_tc_num_filters(priv)) { mlx5_destroy_flow_table(priv->fs.tc.t); priv->fs.tc.t = NULL; @@ -286,6 +312,9 @@ static int parse_tc_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, if (is_tcf_gact_shot(a)) { *action |= MLX5_FLOW_CONTEXT_ACTION_DROP; + if (MLX5_CAP_FLOWTABLE(priv->mdev, + flow_table_properties_nic_receive.flow_counter)) + *action |= MLX5_FLOW_CONTEXT_ACTION_COUNT; continue; } @@ -394,6 +423,34 @@ int mlx5e_delete_flower(struct mlx5e_priv *priv, return 0; } +int mlx5e_stats_flower(struct mlx5e_priv *priv, + struct tc_cls_flower_offload *f) +{ + struct mlx5e_tc_table *tc = &priv->fs.tc; + struct mlx5e_tc_flow *flow; + struct tc_action *a; + struct mlx5_fc *counter; + u64 bytes; + u64 packets; + u64 lastuse; + + flow = rhashtable_lookup_fast(&tc->ht, &f->cookie, + tc->ht_params); + if (!flow) + return -EINVAL; + + counter = mlx5_flow_rule_counter(flow->rule); + if (!counter) + return 0; + + mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse); + + tc_for_each_action(a, f->exts) + tcf_action_stats_update(a, bytes, packets, lastuse); + + return 0; +} + static const struct rhashtable_params mlx5e_tc_flow_ht_params = { .head_offset = offsetof(struct mlx5e_tc_flow, node), .key_offset = offsetof(struct mlx5e_tc_flow, cookie), diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h index a4f17b974d62..34bf903fc886 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h @@ -43,6 +43,9 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol, int mlx5e_delete_flower(struct mlx5e_priv *priv, struct tc_cls_flower_offload *f); +int mlx5e_stats_flower(struct mlx5e_priv *priv, + struct tc_cls_flower_offload *f); + static inline int mlx5e_tc_num_filters(struct mlx5e_priv *priv) { return atomic_read(&priv->fs.tc.ht.nelems); From a986a05de95b730fd6a00b2b2f82a06d917fd6a2 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Fri, 13 May 2016 15:09:58 +0200 Subject: [PATCH 1591/1649] net: qrtr: fix build problems Having multiple loadable modules with the same name cannot work with modprobe, and having both net/qrtr/smd.ko and drivers/soc/qcom/smd.ko results in a (somewhat cryptic) build error: ERROR: "qcom_smd_driver_unregister" [net/qrtr/smd.ko] undefined! ERROR: "qcom_smd_driver_register" [net/qrtr/smd.ko] undefined! ERROR: "qcom_smd_set_drvdata" [net/qrtr/smd.ko] undefined! ERROR: "qcom_smd_send" [net/qrtr/smd.ko] undefined! ERROR: "qcom_smd_get_drvdata" [net/qrtr/smd.ko] undefined! ERROR: "qcom_smd_driver_unregister" [drivers/soc/qcom/wcnss_ctrl.ko] undefined! ERROR: "qcom_smd_driver_register" [drivers/soc/qcom/wcnss_ctrl.ko] undefined! ERROR: "qcom_smd_set_drvdata" [drivers/soc/qcom/wcnss_ctrl.ko] undefined! ERROR: "qcom_smd_send" [drivers/soc/qcom/wcnss_ctrl.ko] undefined! ERROR: "qcom_smd_get_drvdata" [drivers/soc/qcom/wcnss_ctrl.ko] undefined! Also, the qrtr driver uses the SMD interface and has a Kconfig dependency, but also allows for compile-testing when SMD is disabled. However, if with QCOM_SMD=m and COMPILE_TEST=y we can end up with QRTR_SMD=y and that fails with a related link error. The changes the dependency so we can still compile-test the driver but not have it built-in if SMD is a module, to avoid running in the broken configuration, and changes the Makefile to provide the driver under a different module name. Signed-off-by: Arnd Bergmann Fixes: bdabad3e363d ("net: Add Qualcomm IPC router") Signed-off-by: David S. Miller --- net/qrtr/Kconfig | 2 +- net/qrtr/Makefile | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/net/qrtr/Kconfig b/net/qrtr/Kconfig index 673fd1f86ebe..b83c6807a5ae 100644 --- a/net/qrtr/Kconfig +++ b/net/qrtr/Kconfig @@ -16,7 +16,7 @@ if QRTR config QRTR_SMD tristate "SMD IPC Router channels" - depends on QCOM_SMD || COMPILE_TEST + depends on QCOM_SMD || (COMPILE_TEST && QCOM_SMD=n) ---help--- Say Y here to support SMD based ipcrouter channels. SMD is the most common transport for IPC Router. diff --git a/net/qrtr/Makefile b/net/qrtr/Makefile index 6c00dc623b7e..ab09e40f7c74 100644 --- a/net/qrtr/Makefile +++ b/net/qrtr/Makefile @@ -1,2 +1,4 @@ obj-$(CONFIG_QRTR) := qrtr.o -obj-$(CONFIG_QRTR_SMD) += smd.o + +obj-$(CONFIG_QRTR_SMD) += qrtr-smd.o +qrtr-smd-y := smd.o From 2632616bc484de9281bc2a1f5b033325783f8a10 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 13 May 2016 06:14:37 -0700 Subject: [PATCH 1592/1649] sock: propagate __sock_cmsg_send() error __sock_cmsg_send() might return different error codes, not only -EINVAL. Fixes: 24025c465f77 ("ipv4: process socket-level control messages in IPv4") Fixes: ad1e46a83716 ("ipv6: process socket-level control messages in IPv6") Signed-off-by: Eric Dumazet Cc: Soheil Hassas Yeganeh Acked-by: Soheil Hassas Yeganeh Signed-off-by: David S. Miller --- net/ipv4/ip_sockglue.c | 5 +++-- net/ipv6/datagram.c | 5 +++-- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index 5805762d7fc7..71a52f4d4cff 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c @@ -247,8 +247,9 @@ int ip_cmsg_send(struct sock *sk, struct msghdr *msg, struct ipcm_cookie *ipc, } #endif if (cmsg->cmsg_level == SOL_SOCKET) { - if (__sock_cmsg_send(sk, msg, cmsg, &ipc->sockc)) - return -EINVAL; + err = __sock_cmsg_send(sk, msg, cmsg, &ipc->sockc); + if (err) + return err; continue; } diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index 00d0c2903173..37874e2f30ed 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c @@ -746,8 +746,9 @@ int ip6_datagram_send_ctl(struct net *net, struct sock *sk, } if (cmsg->cmsg_level == SOL_SOCKET) { - if (__sock_cmsg_send(sk, msg, cmsg, sockc)) - return -EINVAL; + err = __sock_cmsg_send(sk, msg, cmsg, sockc); + if (err) + return err; continue; } From 5022524308c64f2954ac206a8781b64a98cddf00 Mon Sep 17 00:00:00 2001 From: Nicolas Dichtel Date: Fri, 13 May 2016 15:25:40 +0200 Subject: [PATCH 1593/1649] netlink: kill nla_put_u64() This function is not used anymore. nla_put_u64_64bit() should be used instead. Signed-off-by: Nicolas Dichtel Signed-off-by: David S. Miller --- include/net/netlink.h | 14 ++------------ 1 file changed, 2 insertions(+), 12 deletions(-) diff --git a/include/net/netlink.h b/include/net/netlink.h index e589cb3dccee..254a0fc01800 100644 --- a/include/net/netlink.h +++ b/include/net/netlink.h @@ -98,7 +98,8 @@ * nla_put_u8(skb, type, value) add u8 attribute to skb * nla_put_u16(skb, type, value) add u16 attribute to skb * nla_put_u32(skb, type, value) add u32 attribute to skb - * nla_put_u64(skb, type, value) add u64 attribute to skb + * nla_put_u64_64bits(skb, type, + * value, padattr) add u64 attribute to skb * nla_put_s8(skb, type, value) add s8 attribute to skb * nla_put_s16(skb, type, value) add s16 attribute to skb * nla_put_s32(skb, type, value) add s32 attribute to skb @@ -846,17 +847,6 @@ static inline int nla_put_le32(struct sk_buff *skb, int attrtype, __le32 value) return nla_put(skb, attrtype, sizeof(__le32), &value); } -/** - * nla_put_u64 - Add a u64 netlink attribute to a socket buffer - * @skb: socket buffer to add attribute to - * @attrtype: attribute type - * @value: numeric value - */ -static inline int nla_put_u64(struct sk_buff *skb, int attrtype, u64 value) -{ - return nla_put(skb, attrtype, sizeof(u64), &value); -} - /** * nla_put_u64_64bit - Add a u64 netlink attribute to a skb and align it * @skb: socket buffer to add attribute to From ea1627c20c3462168a087ccecc69084b55b9c0b2 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 13 May 2016 09:16:40 -0700 Subject: [PATCH 1594/1649] tcp: minor optimizations around tcp_hdr() usage tcp_hdr() is slightly more expensive than using skb->data in contexts where we know they point to the same byte. In receive path, tcp_v4_rcv() and tcp_v6_rcv() are in this situation, as tcp header has not been pulled yet. In output path, the same can be said when we just pushed the tcp header in the skb, in tcp_transmit_skb() and tcp_make_synack() Also factorize the two checks for tcb->tcp_flags & TCPHDR_SYN in tcp_transmit_skb() and pass tcp header pointer to tcp_ecn_send(), so that compiler can further optimize and avoid a reload. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/ipv4/tcp_ipv4.c | 6 +++--- net/ipv4/tcp_output.c | 30 ++++++++++++++---------------- net/ipv6/tcp_ipv6.c | 6 +++--- 3 files changed, 20 insertions(+), 22 deletions(-) diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 8219d0d8dc83..3708de2a6683 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -1556,9 +1556,9 @@ int tcp_v4_rcv(struct sk_buff *skb) if (!pskb_may_pull(skb, sizeof(struct tcphdr))) goto discard_it; - th = tcp_hdr(skb); + th = (const struct tcphdr *)skb->data; - if (th->doff < sizeof(struct tcphdr) / 4) + if (unlikely(th->doff < sizeof(struct tcphdr) / 4)) goto bad_packet; if (!pskb_may_pull(skb, th->doff * 4)) goto discard_it; @@ -1571,7 +1571,7 @@ int tcp_v4_rcv(struct sk_buff *skb) if (skb_checksum_init(skb, IPPROTO_TCP, inet_compute_pseudo)) goto csum_error; - th = tcp_hdr(skb); + th = (const struct tcphdr *)skb->data; iph = ip_hdr(skb); /* This is tricky : We move IPCB at its correct location into TCP_SKB_CB() * barrier() makes sure compiler wont play fool^Waliasing games. diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index b69d84e7a97d..8bd9911fdd16 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -364,7 +364,7 @@ tcp_ecn_make_synack(const struct request_sock *req, struct tcphdr *th) * be sent. */ static void tcp_ecn_send(struct sock *sk, struct sk_buff *skb, - int tcp_header_len) + struct tcphdr *th, int tcp_header_len) { struct tcp_sock *tp = tcp_sk(sk); @@ -375,7 +375,7 @@ static void tcp_ecn_send(struct sock *sk, struct sk_buff *skb, INET_ECN_xmit(sk); if (tp->ecn_flags & TCP_ECN_QUEUE_CWR) { tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR; - tcp_hdr(skb)->cwr = 1; + th->cwr = 1; skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; } } else if (!tcp_ca_needs_ecn(sk)) { @@ -383,7 +383,7 @@ static void tcp_ecn_send(struct sock *sk, struct sk_buff *skb, INET_ECN_dontxmit(sk); } if (tp->ecn_flags & TCP_ECN_DEMAND_CWR) - tcp_hdr(skb)->ece = 1; + th->ece = 1; } } @@ -954,7 +954,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, atomic_add(skb->truesize, &sk->sk_wmem_alloc); /* Build TCP header and checksum it. */ - th = tcp_hdr(skb); + th = (struct tcphdr *)skb->data; th->source = inet->inet_sport; th->dest = inet->inet_dport; th->seq = htonl(tcb->seq); @@ -962,14 +962,6 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, *(((__be16 *)th) + 6) = htons(((tcp_header_size >> 2) << 12) | tcb->tcp_flags); - if (unlikely(tcb->tcp_flags & TCPHDR_SYN)) { - /* RFC1323: The window in SYN & SYN/ACK segments - * is never scaled. - */ - th->window = htons(min(tp->rcv_wnd, 65535U)); - } else { - th->window = htons(tcp_select_window(sk)); - } th->check = 0; th->urg_ptr = 0; @@ -986,9 +978,15 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, tcp_options_write((__be32 *)(th + 1), tp, &opts); skb_shinfo(skb)->gso_type = sk->sk_gso_type; - if (likely((tcb->tcp_flags & TCPHDR_SYN) == 0)) - tcp_ecn_send(sk, skb, tcp_header_size); - + if (likely(!(tcb->tcp_flags & TCPHDR_SYN))) { + th->window = htons(tcp_select_window(sk)); + tcp_ecn_send(sk, skb, th, tcp_header_size); + } else { + /* RFC1323: The window in SYN & SYN/ACK segments + * is never scaled. + */ + th->window = htons(min(tp->rcv_wnd, 65535U)); + } #ifdef CONFIG_TCP_MD5SIG /* Calculate the MD5 hash, as we have all we need now */ if (md5) { @@ -3040,7 +3038,7 @@ struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst, skb_push(skb, tcp_header_size); skb_reset_transport_header(skb); - th = tcp_hdr(skb); + th = (struct tcphdr *)skb->data; memset(th, 0, sizeof(struct tcphdr)); th->syn = 1; th->ack = 1; diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index c4efaa97280c..79e33e02f11a 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -1369,9 +1369,9 @@ static int tcp_v6_rcv(struct sk_buff *skb) if (!pskb_may_pull(skb, sizeof(struct tcphdr))) goto discard_it; - th = tcp_hdr(skb); + th = (const struct tcphdr *)skb->data; - if (th->doff < sizeof(struct tcphdr)/4) + if (unlikely(th->doff < sizeof(struct tcphdr)/4)) goto bad_packet; if (!pskb_may_pull(skb, th->doff*4)) goto discard_it; @@ -1379,7 +1379,7 @@ static int tcp_v6_rcv(struct sk_buff *skb) if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo)) goto csum_error; - th = tcp_hdr(skb); + th = (const struct tcphdr *)skb->data; hdr = ipv6_hdr(skb); lookup: From b0e95ccdd77591f108c938bbc702b57554a1665d Mon Sep 17 00:00:00 2001 From: David Ahern Date: Fri, 13 May 2016 12:23:45 -0700 Subject: [PATCH 1595/1649] net: vrf: protect changes to private data with rcu One cpu can be processing packets which includes using the cached route entries in the vrf device's private data and on another cpu the device gets deleted which releases the routes and sets the pointers in net_vrf to NULL. This results in datapath dereferencing a NULL pointer. Fix by protecting access to dst's with rcu. Fixes: 193125dbd8eb ("net: Introduce VRF device driver") Fixes: 35402e313663 ("net: Add IPv6 support to VRF device") Signed-off-by: David Ahern Signed-off-by: David S. Miller --- drivers/net/vrf.c | 70 +++++++++++++++++++++++++++++++---------------- 1 file changed, 47 insertions(+), 23 deletions(-) diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index 0ea29345eb2e..dff08842f26d 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -43,8 +43,8 @@ #define DRV_VERSION "1.0" struct net_vrf { - struct rtable *rth; - struct rt6_info *rt6; + struct rtable __rcu *rth; + struct rt6_info __rcu *rt6; u32 tb_id; }; @@ -273,10 +273,15 @@ static int vrf_output6(struct net *net, struct sock *sk, struct sk_buff *skb) !(IP6CB(skb)->flags & IP6SKB_REROUTED)); } +/* holding rtnl */ static void vrf_rt6_release(struct net_vrf *vrf) { - dst_release(&vrf->rt6->dst); - vrf->rt6 = NULL; + struct rt6_info *rt6 = rtnl_dereference(vrf->rt6); + + rcu_assign_pointer(vrf->rt6, NULL); + + if (rt6) + dst_release(&rt6->dst); } static int vrf_rt6_create(struct net_device *dev) @@ -300,7 +305,8 @@ static int vrf_rt6_create(struct net_device *dev) rt6->rt6i_table = rt6i_table; rt6->dst.output = vrf_output6; - vrf->rt6 = rt6; + rcu_assign_pointer(vrf->rt6, rt6); + rc = 0; out: return rc; @@ -374,29 +380,35 @@ static int vrf_output(struct net *net, struct sock *sk, struct sk_buff *skb) !(IPCB(skb)->flags & IPSKB_REROUTED)); } +/* holding rtnl */ static void vrf_rtable_release(struct net_vrf *vrf) { - struct dst_entry *dst = (struct dst_entry *)vrf->rth; + struct rtable *rth = rtnl_dereference(vrf->rth); - dst_release(dst); - vrf->rth = NULL; + rcu_assign_pointer(vrf->rth, NULL); + + if (rth) + dst_release(&rth->dst); } -static struct rtable *vrf_rtable_create(struct net_device *dev) +static int vrf_rtable_create(struct net_device *dev) { struct net_vrf *vrf = netdev_priv(dev); struct rtable *rth; if (!fib_new_table(dev_net(dev), vrf->tb_id)) - return NULL; + return -ENOMEM; rth = rt_dst_alloc(dev, 0, RTN_UNICAST, 1, 1, 0); - if (rth) { - rth->dst.output = vrf_output; - rth->rt_table_id = vrf->tb_id; - } + if (!rth) + return -ENOMEM; - return rth; + rth->dst.output = vrf_output; + rth->rt_table_id = vrf->tb_id; + + rcu_assign_pointer(vrf->rth, rth); + + return 0; } /**************************** device handling ********************/ @@ -484,8 +496,7 @@ static int vrf_dev_init(struct net_device *dev) goto out_nomem; /* create the default dst which points back to us */ - vrf->rth = vrf_rtable_create(dev); - if (!vrf->rth) + if (vrf_rtable_create(dev) != 0) goto out_stats; if (vrf_rt6_create(dev) != 0) @@ -528,8 +539,13 @@ static struct rtable *vrf_get_rtable(const struct net_device *dev, if (!(fl4->flowi4_flags & FLOWI_FLAG_L3MDEV_SRC)) { struct net_vrf *vrf = netdev_priv(dev); - rth = vrf->rth; - dst_hold(&rth->dst); + rcu_read_lock(); + + rth = rcu_dereference(vrf->rth); + if (likely(rth)) + dst_hold(&rth->dst); + + rcu_read_unlock(); } return rth; @@ -665,16 +681,24 @@ static struct sk_buff *vrf_l3_rcv(struct net_device *vrf_dev, static struct dst_entry *vrf_get_rt6_dst(const struct net_device *dev, const struct flowi6 *fl6) { - struct rt6_info *rt = NULL; + struct dst_entry *dst = NULL; if (!(fl6->flowi6_flags & FLOWI_FLAG_L3MDEV_SRC)) { struct net_vrf *vrf = netdev_priv(dev); + struct rt6_info *rt; - rt = vrf->rt6; - dst_hold(&rt->dst); + rcu_read_lock(); + + rt = rcu_dereference(vrf->rt6); + if (likely(rt)) { + dst = &rt->dst; + dst_hold(dst); + } + + rcu_read_unlock(); } - return (struct dst_entry *)rt; + return dst; } #endif From 553eb544444e28749e2d752dee11e2ae4a3ecfb6 Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Fri, 13 May 2016 20:38:23 -0400 Subject: [PATCH 1596/1649] net: dsa: mv88e6xxx: remove bridge work Now that the bridge code defers the switchdev port state setting, there is no need to defer the port STP state change within the mv88e6xxx code. Thus get rid of the driver's bridge work code. This also fixes a race condition where the DSA layer assumes that the bridge code already set the unbridged port's STP state to Disabled before restoring the Forwarding state. As a consequence, this also fixes the FDB flush for the unbridged port which now correctly occurs during the Forwarding to Disabled transition. Fixes: 0bc05d585d38 ("switchdev: allow caller to explicitly request attr_set as deferred") Reported-by: Andrew Lunn Signed-off-by: Vivien Didelot Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx.c | 37 ++++++++----------------------------- drivers/net/dsa/mv88e6xxx.h | 5 ----- 2 files changed, 8 insertions(+), 34 deletions(-) diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index a3f0e7ec4067..ba9dfc9421ef 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c @@ -1373,6 +1373,7 @@ static void mv88e6xxx_port_stp_state_set(struct dsa_switch *ds, int port, { struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); int stp_state; + int err; if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_PORTSTATE)) return; @@ -1394,12 +1395,13 @@ static void mv88e6xxx_port_stp_state_set(struct dsa_switch *ds, int port, break; } - /* mv88e6xxx_port_stp_state_set may be called with softirqs disabled, - * so we can not update the port state directly but need to schedule it. - */ - ps->ports[port].state = stp_state; - set_bit(port, ps->port_state_update_mask); - schedule_work(&ps->bridge_work); + mutex_lock(&ps->smi_mutex); + err = _mv88e6xxx_port_state(ps, port, stp_state); + mutex_unlock(&ps->smi_mutex); + + if (err) + netdev_err(ds->ports[port], "failed to update state to %s\n", + mv88e6xxx_port_state_names[stp_state]); } static int _mv88e6xxx_port_pvid(struct mv88e6xxx_priv_state *ps, int port, @@ -2535,27 +2537,6 @@ static void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port) mutex_unlock(&ps->smi_mutex); } -static void mv88e6xxx_bridge_work(struct work_struct *work) -{ - struct mv88e6xxx_priv_state *ps; - struct dsa_switch *ds; - int port; - - ps = container_of(work, struct mv88e6xxx_priv_state, bridge_work); - ds = ps->ds; - - mutex_lock(&ps->smi_mutex); - - for (port = 0; port < ps->info->num_ports; ++port) - if (test_and_clear_bit(port, ps->port_state_update_mask) && - _mv88e6xxx_port_state(ps, port, ps->ports[port].state)) - netdev_warn(ds->ports[port], - "failed to update state to %s\n", - mv88e6xxx_port_state_names[ps->ports[port].state]); - - mutex_unlock(&ps->smi_mutex); -} - static int _mv88e6xxx_phy_page_write(struct mv88e6xxx_priv_state *ps, int port, int page, int reg, int val) { @@ -3145,8 +3126,6 @@ static int mv88e6xxx_setup(struct dsa_switch *ds) ps->ds = ds; - INIT_WORK(&ps->bridge_work, mv88e6xxx_bridge_work); - if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_EEPROM)) mutex_init(&ps->eeprom_mutex); diff --git a/drivers/net/dsa/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx.h index 40e8721ecfb1..36d0e1504de1 100644 --- a/drivers/net/dsa/mv88e6xxx.h +++ b/drivers/net/dsa/mv88e6xxx.h @@ -543,7 +543,6 @@ struct mv88e6xxx_vtu_stu_entry { struct mv88e6xxx_priv_port { struct net_device *bridge_dev; - u8 state; }; struct mv88e6xxx_priv_state { @@ -593,10 +592,6 @@ struct mv88e6xxx_priv_state { struct mv88e6xxx_priv_port ports[DSA_MAX_PORTS]; - DECLARE_BITMAP(port_state_update_mask, DSA_MAX_PORTS); - - struct work_struct bridge_work; - /* A switch may have a GPIO line tied to its reset pin. Parse * this from the device tree, and use it before performing * switch soft reset. From 4936e3528e3e272c567fe4ff0abb7ce3e1500575 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Fri, 13 May 2016 19:08:26 +0200 Subject: [PATCH 1597/1649] bpf: minor cleanups in ebpf code Besides others, remove redundant comments where the code is self documenting enough, and properly indent various bpf_verifier_ops and bpf_prog_type_list declarations. Moreover, remove two exports that actually have no module user. Signed-off-by: Daniel Borkmann Acked-by: Alexei Starovoitov Signed-off-by: David S. Miller --- kernel/bpf/core.c | 2 -- net/core/filter.c | 34 +++++++++++++++------------------- 2 files changed, 15 insertions(+), 21 deletions(-) diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index d781b077431f..5313d09d4b62 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -129,14 +129,12 @@ struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size, return fp; } -EXPORT_SYMBOL_GPL(bpf_prog_realloc); void __bpf_prog_free(struct bpf_prog *fp) { kfree(fp->aux); vfree(fp); } -EXPORT_SYMBOL_GPL(__bpf_prog_free); #ifdef CONFIG_BPF_JIT struct bpf_binary_header * diff --git a/net/core/filter.c b/net/core/filter.c index 71c2a1f473ad..ea51b479cf02 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -2069,16 +2069,12 @@ tc_cls_act_func_proto(enum bpf_func_id func_id) static bool __is_valid_access(int off, int size, enum bpf_access_type type) { - /* check bounds */ if (off < 0 || off >= sizeof(struct __sk_buff)) return false; - - /* disallow misaligned access */ + /* The verifier guarantees that size > 0. */ if (off % size != 0) return false; - - /* all __sk_buff fields are __u32 */ - if (size != 4) + if (size != sizeof(__u32)) return false; return true; @@ -2097,7 +2093,7 @@ static bool sk_filter_is_valid_access(int off, int size, if (type == BPF_WRITE) { switch (off) { case offsetof(struct __sk_buff, cb[0]) ... - offsetof(struct __sk_buff, cb[4]): + offsetof(struct __sk_buff, cb[4]): break; default: return false; @@ -2278,30 +2274,30 @@ static u32 bpf_net_convert_ctx_access(enum bpf_access_type type, int dst_reg, } static const struct bpf_verifier_ops sk_filter_ops = { - .get_func_proto = sk_filter_func_proto, - .is_valid_access = sk_filter_is_valid_access, - .convert_ctx_access = bpf_net_convert_ctx_access, + .get_func_proto = sk_filter_func_proto, + .is_valid_access = sk_filter_is_valid_access, + .convert_ctx_access = bpf_net_convert_ctx_access, }; static const struct bpf_verifier_ops tc_cls_act_ops = { - .get_func_proto = tc_cls_act_func_proto, - .is_valid_access = tc_cls_act_is_valid_access, - .convert_ctx_access = bpf_net_convert_ctx_access, + .get_func_proto = tc_cls_act_func_proto, + .is_valid_access = tc_cls_act_is_valid_access, + .convert_ctx_access = bpf_net_convert_ctx_access, }; static struct bpf_prog_type_list sk_filter_type __read_mostly = { - .ops = &sk_filter_ops, - .type = BPF_PROG_TYPE_SOCKET_FILTER, + .ops = &sk_filter_ops, + .type = BPF_PROG_TYPE_SOCKET_FILTER, }; static struct bpf_prog_type_list sched_cls_type __read_mostly = { - .ops = &tc_cls_act_ops, - .type = BPF_PROG_TYPE_SCHED_CLS, + .ops = &tc_cls_act_ops, + .type = BPF_PROG_TYPE_SCHED_CLS, }; static struct bpf_prog_type_list sched_act_type __read_mostly = { - .ops = &tc_cls_act_ops, - .type = BPF_PROG_TYPE_SCHED_ACT, + .ops = &tc_cls_act_ops, + .type = BPF_PROG_TYPE_SCHED_ACT, }; static int __init register_sk_filter_ops(void) From c94987e40ebbae3b7b6c3ece37b6f8338830f6b1 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Fri, 13 May 2016 19:08:27 +0200 Subject: [PATCH 1598/1649] bpf: move bpf_jit_enable declaration Move the bpf_jit_enable declaration to the filter.h file where most other core code is declared, also since we're going to add a second knob there. Signed-off-by: Daniel Borkmann Acked-by: Alexei Starovoitov Signed-off-by: David S. Miller --- include/linux/filter.h | 2 ++ include/linux/netdevice.h | 1 - 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/include/linux/filter.h b/include/linux/filter.h index ec1411c89105..4ff0e647598f 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -496,6 +496,8 @@ void bpf_int_jit_compile(struct bpf_prog *fp); bool bpf_helper_changes_skb_data(void *func); #ifdef CONFIG_BPF_JIT +extern int bpf_jit_enable; + typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size); struct bpf_binary_header * diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index c2f5112f08f7..c148edfe4965 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -3759,7 +3759,6 @@ void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64, extern int netdev_max_backlog; extern int netdev_tstamp_prequeue; extern int weight_p; -extern int bpf_jit_enable; bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev); struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev, From 6077776b5908e0493a3946f7d3bc63871b201e87 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Fri, 13 May 2016 19:08:28 +0200 Subject: [PATCH 1599/1649] bpf: split HAVE_BPF_JIT into cBPF and eBPF variant Split the HAVE_BPF_JIT into two for distinguishing cBPF and eBPF JITs. Current cBPF ones: # git grep -n HAVE_CBPF_JIT arch/ arch/arm/Kconfig:44: select HAVE_CBPF_JIT arch/mips/Kconfig:18: select HAVE_CBPF_JIT if !CPU_MICROMIPS arch/powerpc/Kconfig:129: select HAVE_CBPF_JIT arch/sparc/Kconfig:35: select HAVE_CBPF_JIT Current eBPF ones: # git grep -n HAVE_EBPF_JIT arch/ arch/arm64/Kconfig:61: select HAVE_EBPF_JIT arch/s390/Kconfig:126: select HAVE_EBPF_JIT if PACK_STACK && HAVE_MARCH_Z196_FEATURES arch/x86/Kconfig:94: select HAVE_EBPF_JIT if X86_64 Later code also needs this facility to check for eBPF JITs. Signed-off-by: Daniel Borkmann Acked-by: Alexei Starovoitov Signed-off-by: David S. Miller --- arch/arm/Kconfig | 2 +- arch/arm64/Kconfig | 2 +- arch/mips/Kconfig | 2 +- arch/powerpc/Kconfig | 2 +- arch/s390/Kconfig | 2 +- arch/sparc/Kconfig | 2 +- arch/x86/Kconfig | 2 +- net/Kconfig | 14 +++++++++++--- 8 files changed, 18 insertions(+), 10 deletions(-) diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index cdfa6c2b7626..2315b0d1b4f4 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -41,7 +41,7 @@ config ARM select HAVE_ARCH_SECCOMP_FILTER if (AEABI && !OABI_COMPAT) select HAVE_ARCH_TRACEHOOK select HAVE_ARM_SMCCC if CPU_V7 - select HAVE_BPF_JIT + select HAVE_CBPF_JIT select HAVE_CC_STACKPROTECTOR select HAVE_CONTEXT_TRACKING select HAVE_C_RECORDMCOUNT diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 4f436220384f..e6761ea2feec 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -58,7 +58,7 @@ config ARM64 select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT select HAVE_ARCH_SECCOMP_FILTER select HAVE_ARCH_TRACEHOOK - select HAVE_BPF_JIT + select HAVE_EBPF_JIT select HAVE_C_RECORDMCOUNT select HAVE_CC_STACKPROTECTOR select HAVE_CMPXCHG_DOUBLE diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 2018c2b0e078..3ee1ea61b2dc 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -15,7 +15,7 @@ config MIPS select HAVE_ARCH_KGDB select HAVE_ARCH_SECCOMP_FILTER select HAVE_ARCH_TRACEHOOK - select HAVE_BPF_JIT if !CPU_MICROMIPS + select HAVE_CBPF_JIT if !CPU_MICROMIPS select HAVE_FUNCTION_TRACER select HAVE_DYNAMIC_FTRACE select HAVE_FTRACE_MCOUNT_RECORD diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 7cd32c038286..2fdb73d9198a 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -126,7 +126,7 @@ config PPC select IRQ_FORCED_THREADING select HAVE_RCU_TABLE_FREE if SMP select HAVE_SYSCALL_TRACEPOINTS - select HAVE_BPF_JIT + select HAVE_CBPF_JIT select HAVE_ARCH_JUMP_LABEL select ARCH_HAVE_NMI_SAFE_CMPXCHG select ARCH_HAS_GCOV_PROFILE_ALL diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index bf24ab188921..a883981c0174 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -126,7 +126,7 @@ config S390 select HAVE_ARCH_SOFT_DIRTY select HAVE_ARCH_TRACEHOOK select HAVE_ARCH_TRANSPARENT_HUGEPAGE - select HAVE_BPF_JIT if PACK_STACK && HAVE_MARCH_Z196_FEATURES + select HAVE_EBPF_JIT if PACK_STACK && HAVE_MARCH_Z196_FEATURES select HAVE_CMPXCHG_DOUBLE select HAVE_CMPXCHG_LOCAL select HAVE_DEBUG_KMEMLEAK diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 57ffaf285c2f..d5003812c748 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -32,7 +32,7 @@ config SPARC select ARCH_WANT_IPC_PARSE_VERSION select GENERIC_PCI_IOMAP select HAVE_NMI_WATCHDOG if SPARC64 - select HAVE_BPF_JIT + select HAVE_CBPF_JIT select HAVE_DEBUG_BUGVERBOSE select GENERIC_SMP_IDLE_THREAD select GENERIC_CLOCKEVENTS diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 2dc18605831f..ae83046d51a8 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -91,7 +91,7 @@ config X86 select HAVE_ARCH_SOFT_DIRTY if X86_64 select HAVE_ARCH_TRACEHOOK select HAVE_ARCH_TRANSPARENT_HUGEPAGE - select HAVE_BPF_JIT if X86_64 + select HAVE_EBPF_JIT if X86_64 select HAVE_CC_STACKPROTECTOR select HAVE_CMPXCHG_DOUBLE select HAVE_CMPXCHG_LOCAL diff --git a/net/Kconfig b/net/Kconfig index b841c42e5c9b..f7148f24f114 100644 --- a/net/Kconfig +++ b/net/Kconfig @@ -289,7 +289,7 @@ config BQL config BPF_JIT bool "enable BPF Just In Time compiler" - depends on HAVE_BPF_JIT + depends on HAVE_CBPF_JIT || HAVE_EBPF_JIT depends on MODULES ---help--- Berkeley Packet Filter filtering capabilities are normally handled @@ -419,6 +419,14 @@ config MAY_USE_DEVLINK endif # if NET -# Used by archs to tell that they support BPF_JIT -config HAVE_BPF_JIT +# Used by archs to tell that they support BPF JIT compiler plus which flavour. +# Only one of the two can be selected for a specific arch since eBPF JIT supersedes +# the cBPF JIT. + +# Classic BPF JIT (cBPF) +config HAVE_CBPF_JIT + bool + +# Extended BPF JIT (eBPF) +config HAVE_EBPF_JIT bool From 93a73d442d370e20ed1009cd79cb29c4d7c0ee86 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Fri, 13 May 2016 19:08:29 +0200 Subject: [PATCH 1600/1649] bpf, x86/arm64: remove useless checks on prog There is never such a situation, where bpf_int_jit_compile() is called with either prog as NULL or len as 0, so the tests are unnecessary and confusing as people would just copy them. s390 doesn't have them, so no change is needed there. Signed-off-by: Daniel Borkmann Acked-by: Alexei Starovoitov Signed-off-by: David S. Miller --- arch/arm64/net/bpf_jit_comp.c | 3 --- arch/x86/net/bpf_jit_comp.c | 3 --- 2 files changed, 6 deletions(-) diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c index b405bbb54431..ef35e866caf7 100644 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c @@ -772,9 +772,6 @@ void bpf_int_jit_compile(struct bpf_prog *prog) if (!bpf_jit_enable) return; - if (!prog || !prog->len) - return; - memset(&ctx, 0, sizeof(ctx)); ctx.prog = prog; diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index 4286f3618bd0..f5bfd4fd28dd 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -1086,9 +1086,6 @@ void bpf_int_jit_compile(struct bpf_prog *prog) if (!bpf_jit_enable) return; - if (!prog || !prog->len) - return; - addrs = kmalloc(prog->len * sizeof(*addrs), GFP_KERNEL); if (!addrs) return; From c237ee5eb33bf19fe0591c04ff8db19da7323a83 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Fri, 13 May 2016 19:08:30 +0200 Subject: [PATCH 1601/1649] bpf: add bpf_patch_insn_single helper Move the functionality to patch instructions out of the verifier code and into the core as the new bpf_patch_insn_single() helper will be needed later on for blinding as well. No changes in functionality. Signed-off-by: Daniel Borkmann Acked-by: Alexei Starovoitov Signed-off-by: David S. Miller --- include/linux/filter.h | 3 ++ kernel/bpf/core.c | 71 ++++++++++++++++++++++++++++++++++++++++++ kernel/bpf/verifier.c | 53 ++++++------------------------- 3 files changed, 83 insertions(+), 44 deletions(-) diff --git a/include/linux/filter.h b/include/linux/filter.h index 4ff0e647598f..c4aae496f376 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -495,6 +495,9 @@ u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); void bpf_int_jit_compile(struct bpf_prog *fp); bool bpf_helper_changes_skb_data(void *func); +struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off, + const struct bpf_insn *patch, u32 len); + #ifdef CONFIG_BPF_JIT extern int bpf_jit_enable; diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 5313d09d4b62..49b5538a5301 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -136,6 +136,77 @@ void __bpf_prog_free(struct bpf_prog *fp) vfree(fp); } +static bool bpf_is_jmp_and_has_target(const struct bpf_insn *insn) +{ + return BPF_CLASS(insn->code) == BPF_JMP && + /* Call and Exit are both special jumps with no + * target inside the BPF instruction image. + */ + BPF_OP(insn->code) != BPF_CALL && + BPF_OP(insn->code) != BPF_EXIT; +} + +static void bpf_adj_branches(struct bpf_prog *prog, u32 pos, u32 delta) +{ + struct bpf_insn *insn = prog->insnsi; + u32 i, insn_cnt = prog->len; + + for (i = 0; i < insn_cnt; i++, insn++) { + if (!bpf_is_jmp_and_has_target(insn)) + continue; + + /* Adjust offset of jmps if we cross boundaries. */ + if (i < pos && i + insn->off + 1 > pos) + insn->off += delta; + else if (i > pos + delta && i + insn->off + 1 <= pos + delta) + insn->off -= delta; + } +} + +struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off, + const struct bpf_insn *patch, u32 len) +{ + u32 insn_adj_cnt, insn_rest, insn_delta = len - 1; + struct bpf_prog *prog_adj; + + /* Since our patchlet doesn't expand the image, we're done. */ + if (insn_delta == 0) { + memcpy(prog->insnsi + off, patch, sizeof(*patch)); + return prog; + } + + insn_adj_cnt = prog->len + insn_delta; + + /* Several new instructions need to be inserted. Make room + * for them. Likely, there's no need for a new allocation as + * last page could have large enough tailroom. + */ + prog_adj = bpf_prog_realloc(prog, bpf_prog_size(insn_adj_cnt), + GFP_USER); + if (!prog_adj) + return NULL; + + prog_adj->len = insn_adj_cnt; + + /* Patching happens in 3 steps: + * + * 1) Move over tail of insnsi from next instruction onwards, + * so we can patch the single target insn with one or more + * new ones (patching is always from 1 to n insns, n > 0). + * 2) Inject new instructions at the target location. + * 3) Adjust branch offsets if necessary. + */ + insn_rest = insn_adj_cnt - off - len; + + memmove(prog_adj->insnsi + off + len, prog_adj->insnsi + off + 1, + sizeof(*patch) * insn_rest); + memcpy(prog_adj->insnsi + off, patch, sizeof(*patch) * len); + + bpf_adj_branches(prog_adj, off, insn_delta); + + return prog_adj; +} + #ifdef CONFIG_BPF_JIT struct bpf_binary_header * bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr, diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 84bff68cf80e..a08d66215245 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -2587,26 +2587,6 @@ static void convert_pseudo_ld_imm64(struct verifier_env *env) insn->src_reg = 0; } -static void adjust_branches(struct bpf_prog *prog, int pos, int delta) -{ - struct bpf_insn *insn = prog->insnsi; - int insn_cnt = prog->len; - int i; - - for (i = 0; i < insn_cnt; i++, insn++) { - if (BPF_CLASS(insn->code) != BPF_JMP || - BPF_OP(insn->code) == BPF_CALL || - BPF_OP(insn->code) == BPF_EXIT) - continue; - - /* adjust offset of jmps if necessary */ - if (i < pos && i + insn->off + 1 > pos) - insn->off += delta; - else if (i > pos + delta && i + insn->off + 1 <= pos + delta) - insn->off -= delta; - } -} - /* convert load instructions that access fields of 'struct __sk_buff' * into sequence of instructions that access fields of 'struct sk_buff' */ @@ -2616,14 +2596,15 @@ static int convert_ctx_accesses(struct verifier_env *env) int insn_cnt = env->prog->len; struct bpf_insn insn_buf[16]; struct bpf_prog *new_prog; - u32 cnt; - int i; enum bpf_access_type type; + int i; if (!env->prog->aux->ops->convert_ctx_access) return 0; for (i = 0; i < insn_cnt; i++, insn++) { + u32 insn_delta, cnt; + if (insn->code == (BPF_LDX | BPF_MEM | BPF_W)) type = BPF_READ; else if (insn->code == (BPF_STX | BPF_MEM | BPF_W)) @@ -2645,34 +2626,18 @@ static int convert_ctx_accesses(struct verifier_env *env) return -EINVAL; } - if (cnt == 1) { - memcpy(insn, insn_buf, sizeof(*insn)); - continue; - } - - /* several new insns need to be inserted. Make room for them */ - insn_cnt += cnt - 1; - new_prog = bpf_prog_realloc(env->prog, - bpf_prog_size(insn_cnt), - GFP_USER); + new_prog = bpf_patch_insn_single(env->prog, i, insn_buf, cnt); if (!new_prog) return -ENOMEM; - new_prog->len = insn_cnt; - - memmove(new_prog->insnsi + i + cnt, new_prog->insns + i + 1, - sizeof(*insn) * (insn_cnt - i - cnt)); - - /* copy substitute insns in place of load instruction */ - memcpy(new_prog->insnsi + i, insn_buf, sizeof(*insn) * cnt); - - /* adjust branches in the whole program */ - adjust_branches(new_prog, i, cnt - 1); + insn_delta = cnt - 1; /* keep walking new program and skip insns we just inserted */ env->prog = new_prog; - insn = new_prog->insnsi + i + cnt - 1; - i += cnt - 1; + insn = new_prog->insnsi + i + insn_delta; + + insn_cnt += insn_delta; + i += insn_delta; } return 0; From d1c55ab5e41fcd72cb0a8bef86d3f652ad9ad9f5 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Fri, 13 May 2016 19:08:31 +0200 Subject: [PATCH 1602/1649] bpf: prepare bpf_int_jit_compile/bpf_prog_select_runtime apis Since the blinding is strictly only called from inside eBPF JITs, we need to change signatures for bpf_int_jit_compile() and bpf_prog_select_runtime() first in order to prepare that the eBPF program we're dealing with can change underneath. Hence, for call sites, we need to return the latest prog. No functional change in this patch. Signed-off-by: Daniel Borkmann Acked-by: Alexei Starovoitov Signed-off-by: David S. Miller --- arch/arm64/net/bpf_jit_comp.c | 7 ++++--- arch/s390/net/bpf_jit_comp.c | 8 +++++--- arch/x86/net/bpf_jit_comp.c | 7 ++++--- include/linux/filter.h | 5 +++-- kernel/bpf/core.c | 18 ++++++++++++++---- kernel/bpf/syscall.c | 2 +- lib/test_bpf.c | 5 ++++- net/core/filter.c | 6 +++++- 8 files changed, 40 insertions(+), 18 deletions(-) diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c index ef35e866caf7..dd428807cb30 100644 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c @@ -762,7 +762,7 @@ void bpf_jit_compile(struct bpf_prog *prog) /* Nothing to do here. We support Internal BPF. */ } -void bpf_int_jit_compile(struct bpf_prog *prog) +struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) { struct bpf_binary_header *header; struct jit_ctx ctx; @@ -770,14 +770,14 @@ void bpf_int_jit_compile(struct bpf_prog *prog) u8 *image_ptr; if (!bpf_jit_enable) - return; + return prog; memset(&ctx, 0, sizeof(ctx)); ctx.prog = prog; ctx.offset = kcalloc(prog->len, sizeof(int), GFP_KERNEL); if (ctx.offset == NULL) - return; + return prog; /* 1. Initial fake pass to compute ctx->idx. */ @@ -828,6 +828,7 @@ void bpf_int_jit_compile(struct bpf_prog *prog) prog->jited = 1; out: kfree(ctx.offset); + return prog; } void bpf_jit_free(struct bpf_prog *prog) diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index 3c0bfc1f2694..fcf301a889e7 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -1262,18 +1262,19 @@ void bpf_jit_compile(struct bpf_prog *fp) /* * Compile eBPF program "fp" */ -void bpf_int_jit_compile(struct bpf_prog *fp) +struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp) { struct bpf_binary_header *header; struct bpf_jit jit; int pass; if (!bpf_jit_enable) - return; + return fp; + memset(&jit, 0, sizeof(jit)); jit.addrs = kcalloc(fp->len + 1, sizeof(*jit.addrs), GFP_KERNEL); if (jit.addrs == NULL) - return; + return fp; /* * Three initial passes: * - 1/2: Determine clobbered registers @@ -1305,6 +1306,7 @@ void bpf_int_jit_compile(struct bpf_prog *fp) } free_addrs: kfree(jit.addrs); + return fp; } /* diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index f5bfd4fd28dd..6b2d23ea3590 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -1073,7 +1073,7 @@ void bpf_jit_compile(struct bpf_prog *prog) { } -void bpf_int_jit_compile(struct bpf_prog *prog) +struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) { struct bpf_binary_header *header = NULL; int proglen, oldproglen = 0; @@ -1084,11 +1084,11 @@ void bpf_int_jit_compile(struct bpf_prog *prog) int i; if (!bpf_jit_enable) - return; + return prog; addrs = kmalloc(prog->len * sizeof(*addrs), GFP_KERNEL); if (!addrs) - return; + return prog; /* Before first pass, make a rough estimation of addrs[] * each bpf instruction is translated to less than 64 bytes @@ -1140,6 +1140,7 @@ void bpf_int_jit_compile(struct bpf_prog *prog) } out: kfree(addrs); + return prog; } void bpf_jit_free(struct bpf_prog *fp) diff --git a/include/linux/filter.h b/include/linux/filter.h index c4aae496f376..891852cf7716 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -458,7 +458,7 @@ static inline void bpf_prog_unlock_ro(struct bpf_prog *fp) int sk_filter(struct sock *sk, struct sk_buff *skb); -int bpf_prog_select_runtime(struct bpf_prog *fp); +struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err); void bpf_prog_free(struct bpf_prog *fp); struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags); @@ -492,7 +492,8 @@ bool sk_filter_charge(struct sock *sk, struct sk_filter *fp); void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp); u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); -void bpf_int_jit_compile(struct bpf_prog *fp); + +struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog); bool bpf_helper_changes_skb_data(void *func); struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off, diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 49b5538a5301..70f0821aca47 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -761,15 +761,22 @@ static int bpf_check_tail_call(const struct bpf_prog *fp) /** * bpf_prog_select_runtime - select exec runtime for BPF program * @fp: bpf_prog populated with internal BPF program + * @err: pointer to error variable * * Try to JIT eBPF program, if JIT is not available, use interpreter. * The BPF program will be executed via BPF_PROG_RUN() macro. */ -int bpf_prog_select_runtime(struct bpf_prog *fp) +struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err) { fp->bpf_func = (void *) __bpf_prog_run; - bpf_int_jit_compile(fp); + /* eBPF JITs can rewrite the program in case constant + * blinding is active. However, in case of error during + * blinding, bpf_int_jit_compile() must always return a + * valid program, which in this case would simply not + * be JITed, but falls back to the interpreter. + */ + fp = bpf_int_jit_compile(fp); bpf_prog_lock_ro(fp); /* The tail call compatibility check can only be done at @@ -777,7 +784,9 @@ int bpf_prog_select_runtime(struct bpf_prog *fp) * with JITed or non JITed program concatenations and not * all eBPF JITs might immediately support all features. */ - return bpf_check_tail_call(fp); + *err = bpf_check_tail_call(fp); + + return fp; } EXPORT_SYMBOL_GPL(bpf_prog_select_runtime); @@ -859,8 +868,9 @@ const struct bpf_func_proto bpf_tail_call_proto = { }; /* For classic BPF JITs that don't implement bpf_int_jit_compile(). */ -void __weak bpf_int_jit_compile(struct bpf_prog *prog) +struct bpf_prog * __weak bpf_int_jit_compile(struct bpf_prog *prog) { + return prog; } bool __weak bpf_helper_changes_skb_data(void *func) diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index cf5e9f7ad13a..46ecce4b79ed 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -762,7 +762,7 @@ static int bpf_prog_load(union bpf_attr *attr) fixup_bpf_calls(prog); /* eBPF program is ready to be JITed */ - err = bpf_prog_select_runtime(prog); + prog = bpf_prog_select_runtime(prog, &err); if (err < 0) goto free_used_maps; diff --git a/lib/test_bpf.c b/lib/test_bpf.c index 8f22fbedc3a6..93f45011a59d 100644 --- a/lib/test_bpf.c +++ b/lib/test_bpf.c @@ -5621,7 +5621,10 @@ static struct bpf_prog *generate_filter(int which, int *err) fp->type = BPF_PROG_TYPE_SOCKET_FILTER; memcpy(fp->insnsi, fptr, fp->len * sizeof(struct bpf_insn)); - bpf_prog_select_runtime(fp); + /* We cannot error here as we don't need type compatibility + * checks. + */ + fp = bpf_prog_select_runtime(fp, err); break; } diff --git a/net/core/filter.c b/net/core/filter.c index ea51b479cf02..68adb5f52110 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -994,7 +994,11 @@ static struct bpf_prog *bpf_migrate_filter(struct bpf_prog *fp) */ goto out_err_free; - bpf_prog_select_runtime(fp); + /* We are guaranteed to never error here with cBPF to eBPF + * transitions, since there's no issue with type compatibility + * checks on program arrays. + */ + fp = bpf_prog_select_runtime(fp, &err); kfree(old_prog); return fp; From 4f3446bb809f20ad56cadf712e6006815ae7a8f9 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Fri, 13 May 2016 19:08:32 +0200 Subject: [PATCH 1603/1649] bpf: add generic constant blinding for use in jits This work adds a generic facility for use from eBPF JIT compilers that allows for further hardening of JIT generated images through blinding constants. In response to the original work on BPF JIT spraying published by Keegan McAllister [1], most BPF JITs were changed to make images read-only and start at a randomized offset in the page, where the rest was filled with trap instructions. We have this nowadays in x86, arm, arm64 and s390 JIT compilers. Additionally, later work also made eBPF interpreter images read only for kernels supporting DEBUG_SET_MODULE_RONX, that is, x86, arm, arm64 and s390 archs as well currently. This is done by default for mentioned JITs when JITing is enabled. Furthermore, we had a generic and configurable constant blinding facility on our todo for quite some time now to further make spraying harder, and first implementation since around netconf 2016. We found that for systems where untrusted users can load cBPF/eBPF code where JIT is enabled, start offset randomization helps a bit to make jumps into crafted payload harder, but in case where larger programs that cross page boundary are injected, we again have some part of the program opcodes at a page start offset. With improved guessing and more reliable payload injection, chances can increase to jump into such payload. Elena Reshetova recently wrote a test case for it [2, 3]. Moreover, eBPF comes with 64 bit constants, which can leave some more room for payloads. Note that for all this, additional bugs in the kernel are still required to make the jump (and of course to guess right, to not jump into a trap) and naturally the JIT must be enabled, which is disabled by default. For helping mitigation, the general idea is to provide an option bpf_jit_harden that admins can tweak along with bpf_jit_enable, so that for cases where JIT should be enabled for performance reasons, the generated image can be further hardened with blinding constants for unpriviledged users (bpf_jit_harden == 1), with trading off performance for these, but not for privileged ones. We also added the option of blinding for all users (bpf_jit_harden == 2), which is quite helpful for testing f.e. with test_bpf.ko. There are no further e.g. hardening levels of bpf_jit_harden switch intended, rationale is to have it dead simple to use as on/off. Since this functionality would need to be duplicated over and over for JIT compilers to use, which are already complex enough, we provide a generic eBPF byte-code level based blinding implementation, which is then just transparently JITed. JIT compilers need to make only a few changes to integrate this facility and can be migrated one by one. This option is for eBPF JITs and will be used in x86, arm64, s390 without too much effort, and soon ppc64 JITs, thus that native eBPF can be blinded as well as cBPF to eBPF migrations, so that both can be covered with a single implementation. The rule for JITs is that bpf_jit_blind_constants() must be called from bpf_int_jit_compile(), and in case blinding is disabled, we follow normally with JITing the passed program. In case blinding is enabled and we fail during the process of blinding itself, we must return with the interpreter. Similarly, in case the JITing process after the blinding failed, we return normally to the interpreter with the non-blinded code. Meaning, interpreter doesn't change in any way and operates on eBPF code as usual. For doing this pre-JIT blinding step, we need to make use of a helper/auxiliary register, here BPF_REG_AX. This is strictly internal to the JIT and not in any way part of the eBPF architecture. Just like in the same way as JITs internally make use of some helper registers when emitting code, only that here the helper register is one abstraction level higher in eBPF bytecode, but nevertheless in JIT phase. That helper register is needed since f.e. manually written program can issue loads to all registers of eBPF architecture. The core concept with the additional register is: blind out all 32 and 64 bit constants by converting BPF_K based instructions into a small sequence from K_VAL into ((RND ^ K_VAL) ^ RND). Therefore, this is transformed into: BPF_REG_AX := (RND ^ K_VAL), BPF_REG_AX ^= RND, and REG BPF_REG_AX, so actual operation on the target register is translated from BPF_K into BPF_X one that is operating on BPF_REG_AX's content. During rewriting phase when blinding, RND is newly generated via prandom_u32() for each processed instruction. 64 bit loads are split into two 32 bit loads to make translation and patching not too complex. Only basic thing required by JITs is to call the helper bpf_jit_blind_constants()/bpf_jit_prog_release_other() pair, and to map BPF_REG_AX into an unused register. Small bpf_jit_disasm extract from [2] when applied to x86 JIT: echo 0 > /proc/sys/net/core/bpf_jit_harden ffffffffa034f5e9 + : [...] 39: mov $0xa8909090,%eax 3e: mov $0xa8909090,%eax 43: mov $0xa8ff3148,%eax 48: mov $0xa89081b4,%eax 4d: mov $0xa8900bb0,%eax 52: mov $0xa810e0c1,%eax 57: mov $0xa8908eb4,%eax 5c: mov $0xa89020b0,%eax [...] echo 1 > /proc/sys/net/core/bpf_jit_harden ffffffffa034f1e5 + : [...] 39: mov $0xe1192563,%r10d 3f: xor $0x4989b5f3,%r10d 46: mov %r10d,%eax 49: mov $0xb8296d93,%r10d 4f: xor $0x10b9fd03,%r10d 56: mov %r10d,%eax 59: mov $0x8c381146,%r10d 5f: xor $0x24c7200e,%r10d 66: mov %r10d,%eax 69: mov $0xeb2a830e,%r10d 6f: xor $0x43ba02ba,%r10d 76: mov %r10d,%eax 79: mov $0xd9730af,%r10d 7f: xor $0xa5073b1f,%r10d 86: mov %r10d,%eax 89: mov $0x9a45662b,%r10d 8f: xor $0x325586ea,%r10d 96: mov %r10d,%eax [...] As can be seen, original constants that carry payload are hidden when enabled, actual operations are transformed from constant-based to register-based ones, making jumps into constants ineffective. Above extract/example uses single BPF load instruction over and over, but of course all instructions with constants are blinded. Performance wise, JIT with blinding performs a bit slower than just JIT and faster than interpreter case. This is expected, since we still get all the performance benefits from JITing and in normal use-cases not every single instruction needs to be blinded. Summing up all 296 test cases averaged over multiple runs from test_bpf.ko suite, interpreter was 55% slower than JIT only and JIT with blinding was 8% slower than JIT only. Since there are also some extremes in the test suite, I expect for ordinary workloads that the performance for the JIT with blinding case is even closer to JIT only case, f.e. nmap test case from suite has averaged timings in ns 29 (JIT), 35 (+ blinding), and 151 (interpreter). BPF test suite, seccomp test suite, eBPF sample code and various bigger networking eBPF programs have been tested with this and were running fine. For testing purposes, I also adapted interpreter and redirected blinded eBPF image to interpreter and also here all tests pass. [1] http://mainisusuallyafunction.blogspot.com/2012/11/attacking-hardened-linux-systems-with.html [2] https://github.com/01org/jit-spray-poc-for-ksp/ [3] http://www.openwall.com/lists/kernel-hardening/2016/05/03/5 Signed-off-by: Daniel Borkmann Reviewed-by: Elena Reshetova Acked-by: Alexei Starovoitov Signed-off-by: David S. Miller --- Documentation/sysctl/net.txt | 11 ++ include/linux/filter.h | 42 ++++++++ kernel/bpf/core.c | 203 +++++++++++++++++++++++++++++++++++ net/Kconfig | 7 +- net/core/sysctl_net_core.c | 9 ++ 5 files changed, 270 insertions(+), 2 deletions(-) diff --git a/Documentation/sysctl/net.txt b/Documentation/sysctl/net.txt index 809ab6efcc74..f0480f7ea740 100644 --- a/Documentation/sysctl/net.txt +++ b/Documentation/sysctl/net.txt @@ -43,6 +43,17 @@ Values : 1 - enable the JIT 2 - enable the JIT and ask the compiler to emit traces on kernel log. +bpf_jit_harden +-------------- + +This enables hardening for the Berkeley Packet Filter Just in Time compiler. +Supported are eBPF JIT backends. Enabling hardening trades off performance, +but can mitigate JIT spraying. +Values : + 0 - disable JIT hardening (default value) + 1 - enable JIT hardening for unprivileged users only + 2 - enable JIT hardening for all users + dev_weight -------------- diff --git a/include/linux/filter.h b/include/linux/filter.h index 891852cf7716..6fc31ef1da2d 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -13,6 +13,8 @@ #include #include #include +#include + #include #include @@ -42,6 +44,15 @@ struct bpf_prog_aux; #define BPF_REG_X BPF_REG_7 #define BPF_REG_TMP BPF_REG_8 +/* Kernel hidden auxiliary/helper register for hardening step. + * Only used by eBPF JITs. It's nothing more than a temporary + * register that JITs use internally, only that here it's part + * of eBPF instructions that have been rewritten for blinding + * constants. See JIT pre-step in bpf_jit_blind_constants(). + */ +#define BPF_REG_AX MAX_BPF_REG +#define MAX_BPF_JIT_REG (MAX_BPF_REG + 1) + /* BPF program can access up to 512 bytes of stack space. */ #define MAX_BPF_STACK 512 @@ -501,6 +512,7 @@ struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off, #ifdef CONFIG_BPF_JIT extern int bpf_jit_enable; +extern int bpf_jit_harden; typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size); @@ -513,6 +525,9 @@ void bpf_jit_binary_free(struct bpf_binary_header *hdr); void bpf_jit_compile(struct bpf_prog *fp); void bpf_jit_free(struct bpf_prog *fp); +struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *fp); +void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other); + static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen, u32 pass, void *image) { @@ -523,6 +538,33 @@ static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen, print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_OFFSET, 16, 1, image, proglen, false); } + +static inline bool bpf_jit_is_ebpf(void) +{ +# ifdef CONFIG_HAVE_EBPF_JIT + return true; +# else + return false; +# endif +} + +static inline bool bpf_jit_blinding_enabled(void) +{ + /* These are the prerequisites, should someone ever have the + * idea to call blinding outside of them, we make sure to + * bail out. + */ + if (!bpf_jit_is_ebpf()) + return false; + if (!bpf_jit_enable) + return false; + if (!bpf_jit_harden) + return false; + if (bpf_jit_harden == 1 && capable(CAP_SYS_ADMIN)) + return false; + + return true; +} #else static inline void bpf_jit_compile(struct bpf_prog *fp) { diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 70f0821aca47..f1e8a0def99b 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -243,6 +243,209 @@ void bpf_jit_binary_free(struct bpf_binary_header *hdr) { module_memfree(hdr); } + +int bpf_jit_harden __read_mostly; + +static int bpf_jit_blind_insn(const struct bpf_insn *from, + const struct bpf_insn *aux, + struct bpf_insn *to_buff) +{ + struct bpf_insn *to = to_buff; + u32 imm_rnd = prandom_u32(); + s16 off; + + BUILD_BUG_ON(BPF_REG_AX + 1 != MAX_BPF_JIT_REG); + BUILD_BUG_ON(MAX_BPF_REG + 1 != MAX_BPF_JIT_REG); + + if (from->imm == 0 && + (from->code == (BPF_ALU | BPF_MOV | BPF_K) || + from->code == (BPF_ALU64 | BPF_MOV | BPF_K))) { + *to++ = BPF_ALU64_REG(BPF_XOR, from->dst_reg, from->dst_reg); + goto out; + } + + switch (from->code) { + case BPF_ALU | BPF_ADD | BPF_K: + case BPF_ALU | BPF_SUB | BPF_K: + case BPF_ALU | BPF_AND | BPF_K: + case BPF_ALU | BPF_OR | BPF_K: + case BPF_ALU | BPF_XOR | BPF_K: + case BPF_ALU | BPF_MUL | BPF_K: + case BPF_ALU | BPF_MOV | BPF_K: + case BPF_ALU | BPF_DIV | BPF_K: + case BPF_ALU | BPF_MOD | BPF_K: + *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm); + *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); + *to++ = BPF_ALU32_REG(from->code, from->dst_reg, BPF_REG_AX); + break; + + case BPF_ALU64 | BPF_ADD | BPF_K: + case BPF_ALU64 | BPF_SUB | BPF_K: + case BPF_ALU64 | BPF_AND | BPF_K: + case BPF_ALU64 | BPF_OR | BPF_K: + case BPF_ALU64 | BPF_XOR | BPF_K: + case BPF_ALU64 | BPF_MUL | BPF_K: + case BPF_ALU64 | BPF_MOV | BPF_K: + case BPF_ALU64 | BPF_DIV | BPF_K: + case BPF_ALU64 | BPF_MOD | BPF_K: + *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm); + *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); + *to++ = BPF_ALU64_REG(from->code, from->dst_reg, BPF_REG_AX); + break; + + case BPF_JMP | BPF_JEQ | BPF_K: + case BPF_JMP | BPF_JNE | BPF_K: + case BPF_JMP | BPF_JGT | BPF_K: + case BPF_JMP | BPF_JGE | BPF_K: + case BPF_JMP | BPF_JSGT | BPF_K: + case BPF_JMP | BPF_JSGE | BPF_K: + case BPF_JMP | BPF_JSET | BPF_K: + /* Accommodate for extra offset in case of a backjump. */ + off = from->off; + if (off < 0) + off -= 2; + *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm); + *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); + *to++ = BPF_JMP_REG(from->code, from->dst_reg, BPF_REG_AX, off); + break; + + case BPF_LD | BPF_ABS | BPF_W: + case BPF_LD | BPF_ABS | BPF_H: + case BPF_LD | BPF_ABS | BPF_B: + *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm); + *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); + *to++ = BPF_LD_IND(from->code, BPF_REG_AX, 0); + break; + + case BPF_LD | BPF_IND | BPF_W: + case BPF_LD | BPF_IND | BPF_H: + case BPF_LD | BPF_IND | BPF_B: + *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm); + *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); + *to++ = BPF_ALU32_REG(BPF_ADD, BPF_REG_AX, from->src_reg); + *to++ = BPF_LD_IND(from->code, BPF_REG_AX, 0); + break; + + case BPF_LD | BPF_IMM | BPF_DW: + *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[1].imm); + *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); + *to++ = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32); + *to++ = BPF_ALU64_REG(BPF_MOV, aux[0].dst_reg, BPF_REG_AX); + break; + case 0: /* Part 2 of BPF_LD | BPF_IMM | BPF_DW. */ + *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[0].imm); + *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); + *to++ = BPF_ALU64_REG(BPF_OR, aux[0].dst_reg, BPF_REG_AX); + break; + + case BPF_ST | BPF_MEM | BPF_DW: + case BPF_ST | BPF_MEM | BPF_W: + case BPF_ST | BPF_MEM | BPF_H: + case BPF_ST | BPF_MEM | BPF_B: + *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm); + *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); + *to++ = BPF_STX_MEM(from->code, from->dst_reg, BPF_REG_AX, from->off); + break; + } +out: + return to - to_buff; +} + +static struct bpf_prog *bpf_prog_clone_create(struct bpf_prog *fp_other, + gfp_t gfp_extra_flags) +{ + gfp_t gfp_flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO | + gfp_extra_flags; + struct bpf_prog *fp; + + fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags, PAGE_KERNEL); + if (fp != NULL) { + kmemcheck_annotate_bitfield(fp, meta); + + /* aux->prog still points to the fp_other one, so + * when promoting the clone to the real program, + * this still needs to be adapted. + */ + memcpy(fp, fp_other, fp_other->pages * PAGE_SIZE); + } + + return fp; +} + +static void bpf_prog_clone_free(struct bpf_prog *fp) +{ + /* aux was stolen by the other clone, so we cannot free + * it from this path! It will be freed eventually by the + * other program on release. + * + * At this point, we don't need a deferred release since + * clone is guaranteed to not be locked. + */ + fp->aux = NULL; + __bpf_prog_free(fp); +} + +void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other) +{ + /* We have to repoint aux->prog to self, as we don't + * know whether fp here is the clone or the original. + */ + fp->aux->prog = fp; + bpf_prog_clone_free(fp_other); +} + +struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog) +{ + struct bpf_insn insn_buff[16], aux[2]; + struct bpf_prog *clone, *tmp; + int insn_delta, insn_cnt; + struct bpf_insn *insn; + int i, rewritten; + + if (!bpf_jit_blinding_enabled()) + return prog; + + clone = bpf_prog_clone_create(prog, GFP_USER); + if (!clone) + return ERR_PTR(-ENOMEM); + + insn_cnt = clone->len; + insn = clone->insnsi; + + for (i = 0; i < insn_cnt; i++, insn++) { + /* We temporarily need to hold the original ld64 insn + * so that we can still access the first part in the + * second blinding run. + */ + if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW) && + insn[1].code == 0) + memcpy(aux, insn, sizeof(aux)); + + rewritten = bpf_jit_blind_insn(insn, aux, insn_buff); + if (!rewritten) + continue; + + tmp = bpf_patch_insn_single(clone, i, insn_buff, rewritten); + if (!tmp) { + /* Patching may have repointed aux->prog during + * realloc from the original one, so we need to + * fix it up here on error. + */ + bpf_jit_prog_release_other(prog, clone); + return ERR_PTR(-ENOMEM); + } + + clone = tmp; + insn_delta = rewritten - 1; + + /* Walk new program and skip insns we just inserted. */ + insn = clone->insnsi + i + insn_delta; + insn_cnt += insn_delta; + i += insn_delta; + } + + return clone; +} #endif /* CONFIG_BPF_JIT */ /* Base function for offset calculation. Needs to go into .text section, diff --git a/net/Kconfig b/net/Kconfig index f7148f24f114..ff40562a782c 100644 --- a/net/Kconfig +++ b/net/Kconfig @@ -295,8 +295,11 @@ config BPF_JIT Berkeley Packet Filter filtering capabilities are normally handled by an interpreter. This option allows kernel to generate a native code when filter is loaded in memory. This should speedup - packet sniffing (libpcap/tcpdump). Note : Admin should enable - this feature changing /proc/sys/net/core/bpf_jit_enable + packet sniffing (libpcap/tcpdump). + + Note, admin should enable this feature changing: + /proc/sys/net/core/bpf_jit_enable + /proc/sys/net/core/bpf_jit_harden (optional) config NET_FLOW_LIMIT bool diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c index a6beb7b6ae55..0df2aa652530 100644 --- a/net/core/sysctl_net_core.c +++ b/net/core/sysctl_net_core.c @@ -294,6 +294,15 @@ static struct ctl_table net_core_table[] = { .mode = 0644, .proc_handler = proc_dointvec }, +# ifdef CONFIG_HAVE_EBPF_JIT + { + .procname = "bpf_jit_harden", + .data = &bpf_jit_harden, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = proc_dointvec, + }, +# endif #endif { .procname = "netdev_tstamp_prequeue", From 959a7579160349d222cc5da30db3b138139b6fbc Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Fri, 13 May 2016 19:08:33 +0200 Subject: [PATCH 1604/1649] bpf, x86: add support for constant blinding This patch adds recently added constant blinding helpers into the x86 eBPF JIT. In the bpf_int_jit_compile() path, requirements are to utilize bpf_jit_blind_constants()/bpf_jit_prog_release_other() pair for rewriting the program into a blinded one, and to map the BPF_REG_AX register to a CPU register. The mapping of BPF_REG_AX is at non-callee saved register r10, and thus shared with cached skb->data used for ld_abs/ind and not in every program type needed. When blinding is not used, there's zero additional overhead in the generated image. Signed-off-by: Daniel Borkmann Acked-by: Alexei Starovoitov Signed-off-by: David S. Miller --- arch/x86/net/bpf_jit_comp.c | 66 +++++++++++++++++++++++++++++-------- 1 file changed, 53 insertions(+), 13 deletions(-) diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index 6b2d23ea3590..fe04a04dab8e 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -110,11 +110,16 @@ static void bpf_flush_icache(void *start, void *end) ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset) /* pick a register outside of BPF range for JIT internal work */ -#define AUX_REG (MAX_BPF_REG + 1) +#define AUX_REG (MAX_BPF_JIT_REG + 1) -/* the following table maps BPF registers to x64 registers. - * x64 register r12 is unused, since if used as base address register - * in load/store instructions, it always needs an extra byte of encoding +/* The following table maps BPF registers to x64 registers. + * + * x64 register r12 is unused, since if used as base address + * register in load/store instructions, it always needs an + * extra byte of encoding and is callee saved. + * + * r9 caches skb->len - skb->data_len + * r10 caches skb->data, and used for blinding (if enabled) */ static const int reg2hex[] = { [BPF_REG_0] = 0, /* rax */ @@ -128,6 +133,7 @@ static const int reg2hex[] = { [BPF_REG_8] = 6, /* r14 callee saved */ [BPF_REG_9] = 7, /* r15 callee saved */ [BPF_REG_FP] = 5, /* rbp readonly */ + [BPF_REG_AX] = 2, /* r10 temp register */ [AUX_REG] = 3, /* r11 temp register */ }; @@ -141,7 +147,8 @@ static bool is_ereg(u32 reg) BIT(AUX_REG) | BIT(BPF_REG_7) | BIT(BPF_REG_8) | - BIT(BPF_REG_9)); + BIT(BPF_REG_9) | + BIT(BPF_REG_AX)); } /* add modifiers if 'reg' maps to x64 registers r8..r15 */ @@ -182,6 +189,7 @@ static void jit_fill_hole(void *area, unsigned int size) struct jit_context { int cleanup_addr; /* epilogue code offset */ bool seen_ld_abs; + bool seen_ax_reg; }; /* maximum number of bytes emitted while JITing one eBPF insn */ @@ -345,6 +353,7 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, struct bpf_insn *insn = bpf_prog->insnsi; int insn_cnt = bpf_prog->len; bool seen_ld_abs = ctx->seen_ld_abs | (oldproglen == 0); + bool seen_ax_reg = ctx->seen_ax_reg | (oldproglen == 0); bool seen_exit = false; u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY]; int i, cnt = 0; @@ -367,6 +376,9 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, int ilen; u8 *func; + if (dst_reg == BPF_REG_AX || src_reg == BPF_REG_AX) + ctx->seen_ax_reg = seen_ax_reg = true; + switch (insn->code) { /* ALU */ case BPF_ALU | BPF_ADD | BPF_X: @@ -1002,6 +1014,10 @@ common_load: * sk_load_* helpers also use %r10 and %r9d. * See bpf_jit.S */ + if (seen_ax_reg) + /* r10 = skb->data, mov %r10, off32(%rbx) */ + EMIT3_off32(0x4c, 0x8b, 0x93, + offsetof(struct sk_buff, data)); EMIT1_off32(0xE8, jmp_offset); /* call */ break; @@ -1076,19 +1092,34 @@ void bpf_jit_compile(struct bpf_prog *prog) struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) { struct bpf_binary_header *header = NULL; + struct bpf_prog *tmp, *orig_prog = prog; int proglen, oldproglen = 0; struct jit_context ctx = {}; + bool tmp_blinded = false; u8 *image = NULL; int *addrs; int pass; int i; if (!bpf_jit_enable) - return prog; + return orig_prog; + + tmp = bpf_jit_blind_constants(prog); + /* If blinding was requested and we failed during blinding, + * we must fall back to the interpreter. + */ + if (IS_ERR(tmp)) + return orig_prog; + if (tmp != prog) { + tmp_blinded = true; + prog = tmp; + } addrs = kmalloc(prog->len * sizeof(*addrs), GFP_KERNEL); - if (!addrs) - return prog; + if (!addrs) { + prog = orig_prog; + goto out; + } /* Before first pass, make a rough estimation of addrs[] * each bpf instruction is translated to less than 64 bytes @@ -1110,21 +1141,25 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) image = NULL; if (header) bpf_jit_binary_free(header); - goto out; + prog = orig_prog; + goto out_addrs; } if (image) { if (proglen != oldproglen) { pr_err("bpf_jit: proglen=%d != oldproglen=%d\n", proglen, oldproglen); - goto out; + prog = orig_prog; + goto out_addrs; } break; } if (proglen == oldproglen) { header = bpf_jit_binary_alloc(proglen, &image, 1, jit_fill_hole); - if (!header) - goto out; + if (!header) { + prog = orig_prog; + goto out_addrs; + } } oldproglen = proglen; } @@ -1138,8 +1173,13 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) prog->bpf_func = (void *)image; prog->jited = 1; } -out: + +out_addrs: kfree(addrs); +out: + if (tmp_blinded) + bpf_jit_prog_release_other(prog, prog == orig_prog ? + tmp : orig_prog); return prog; } From 26eb042ee4c7845aa395c41c4e125c240b82b984 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Fri, 13 May 2016 19:08:34 +0200 Subject: [PATCH 1605/1649] bpf, arm64: add support for constant blinding This patch adds recently added constant blinding helpers into the arm64 eBPF JIT. In the bpf_int_jit_compile() path, requirements are to utilize bpf_jit_blind_constants()/bpf_jit_prog_release_other() pair for rewriting the program into a blinded one, and to map the BPF_REG_AX register to a CPU register. The mapping is on x9. Signed-off-by: Daniel Borkmann Acked-by: Zi Shen Lim Acked-by: Yang Shi Tested-by: Yang Shi Signed-off-by: David S. Miller --- arch/arm64/net/bpf_jit_comp.c | 52 +++++++++++++++++++++++++++-------- 1 file changed, 40 insertions(+), 12 deletions(-) diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c index dd428807cb30..d0d51903c7e0 100644 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c @@ -31,8 +31,8 @@ int bpf_jit_enable __read_mostly; -#define TMP_REG_1 (MAX_BPF_REG + 0) -#define TMP_REG_2 (MAX_BPF_REG + 1) +#define TMP_REG_1 (MAX_BPF_JIT_REG + 0) +#define TMP_REG_2 (MAX_BPF_JIT_REG + 1) /* Map BPF registers to A64 registers */ static const int bpf2a64[] = { @@ -54,6 +54,8 @@ static const int bpf2a64[] = { /* temporary register for internal BPF JIT */ [TMP_REG_1] = A64_R(23), [TMP_REG_2] = A64_R(24), + /* temporary register for blinding constants */ + [BPF_REG_AX] = A64_R(9), }; struct jit_ctx { @@ -764,26 +766,43 @@ void bpf_jit_compile(struct bpf_prog *prog) struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) { + struct bpf_prog *tmp, *orig_prog = prog; struct bpf_binary_header *header; + bool tmp_blinded = false; struct jit_ctx ctx; int image_size; u8 *image_ptr; if (!bpf_jit_enable) - return prog; + return orig_prog; + + tmp = bpf_jit_blind_constants(prog); + /* If blinding was requested and we failed during blinding, + * we must fall back to the interpreter. + */ + if (IS_ERR(tmp)) + return orig_prog; + if (tmp != prog) { + tmp_blinded = true; + prog = tmp; + } memset(&ctx, 0, sizeof(ctx)); ctx.prog = prog; ctx.offset = kcalloc(prog->len, sizeof(int), GFP_KERNEL); - if (ctx.offset == NULL) - return prog; + if (ctx.offset == NULL) { + prog = orig_prog; + goto out; + } /* 1. Initial fake pass to compute ctx->idx. */ /* Fake pass to fill in ctx->offset and ctx->tmp_used. */ - if (build_body(&ctx)) - goto out; + if (build_body(&ctx)) { + prog = orig_prog; + goto out_off; + } build_prologue(&ctx); @@ -794,8 +813,10 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) image_size = sizeof(u32) * ctx.idx; header = bpf_jit_binary_alloc(image_size, &image_ptr, sizeof(u32), jit_fill_hole); - if (header == NULL) - goto out; + if (header == NULL) { + prog = orig_prog; + goto out_off; + } /* 2. Now, the actual pass. */ @@ -806,7 +827,8 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) if (build_body(&ctx)) { bpf_jit_binary_free(header); - goto out; + prog = orig_prog; + goto out_off; } build_epilogue(&ctx); @@ -814,7 +836,8 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) /* 3. Extra pass to validate JITed code. */ if (validate_code(&ctx)) { bpf_jit_binary_free(header); - goto out; + prog = orig_prog; + goto out_off; } /* And we're done. */ @@ -826,8 +849,13 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) set_memory_ro((unsigned long)header, header->pages); prog->bpf_func = (void *)ctx.image; prog->jited = 1; -out: + +out_off: kfree(ctx.offset); +out: + if (tmp_blinded) + bpf_jit_prog_release_other(prog, prog == orig_prog ? + tmp : orig_prog); return prog; } From d93a47f735f3455a896e46b18d0ac26fa19639e6 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Fri, 13 May 2016 19:08:35 +0200 Subject: [PATCH 1606/1649] bpf, s390: add support for constant blinding This patch adds recently added constant blinding helpers into the s390 eBPF JIT. In the bpf_int_jit_compile() path, requirements are to utilize bpf_jit_blind_constants()/bpf_jit_prog_release_other() pair for rewriting the program into a blinded one, and to map the BPF_REG_AX register to a CPU register. The mapping of BPF_REG_AX is at r12 and similarly like in x86 case performs reloading when ld_abs/ind is used. When blinding is not used, there's no additional overhead in the generated image. When BPF_REG_AX is used, we don't need to emit skb->data reload when helper function changed skb->data, as this will be reloaded later on anyway from stack on ld_abs/ind, where skb->data is needed. s390 allows for this w/o much additional complexity unlike f.e. x86. Signed-off-by: Daniel Borkmann Signed-off-by: Michael Holzheu Signed-off-by: David S. Miller --- arch/s390/net/bpf_jit_comp.c | 73 +++++++++++++++++++++++++++--------- 1 file changed, 56 insertions(+), 17 deletions(-) diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index fcf301a889e7..9133b0ec000b 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -54,16 +54,17 @@ struct bpf_jit { #define SEEN_FUNC 16 /* calls C functions */ #define SEEN_TAIL_CALL 32 /* code uses tail calls */ #define SEEN_SKB_CHANGE 64 /* code changes skb data */ +#define SEEN_REG_AX 128 /* code uses constant blinding */ #define SEEN_STACK (SEEN_FUNC | SEEN_MEM | SEEN_SKB) /* * s390 registers */ -#define REG_W0 (__MAX_BPF_REG+0) /* Work register 1 (even) */ -#define REG_W1 (__MAX_BPF_REG+1) /* Work register 2 (odd) */ -#define REG_SKB_DATA (__MAX_BPF_REG+2) /* SKB data register */ -#define REG_L (__MAX_BPF_REG+3) /* Literal pool register */ -#define REG_15 (__MAX_BPF_REG+4) /* Register 15 */ +#define REG_W0 (MAX_BPF_JIT_REG + 0) /* Work register 1 (even) */ +#define REG_W1 (MAX_BPF_JIT_REG + 1) /* Work register 2 (odd) */ +#define REG_SKB_DATA (MAX_BPF_JIT_REG + 2) /* SKB data register */ +#define REG_L (MAX_BPF_JIT_REG + 3) /* Literal pool register */ +#define REG_15 (MAX_BPF_JIT_REG + 4) /* Register 15 */ #define REG_0 REG_W0 /* Register 0 */ #define REG_1 REG_W1 /* Register 1 */ #define REG_2 BPF_REG_1 /* Register 2 */ @@ -88,6 +89,8 @@ static const int reg2hex[] = { [BPF_REG_9] = 10, /* BPF stack pointer */ [BPF_REG_FP] = 13, + /* Register for blinding (shared with REG_SKB_DATA) */ + [BPF_REG_AX] = 12, /* SKB data pointer */ [REG_SKB_DATA] = 12, /* Work registers for s390x backend */ @@ -385,7 +388,7 @@ static void save_restore_regs(struct bpf_jit *jit, int op) /* * For SKB access %b1 contains the SKB pointer. For "bpf_jit.S" * we store the SKB header length on the stack and the SKB data - * pointer in REG_SKB_DATA. + * pointer in REG_SKB_DATA if BPF_REG_AX is not used. */ static void emit_load_skb_data_hlen(struct bpf_jit *jit) { @@ -397,9 +400,10 @@ static void emit_load_skb_data_hlen(struct bpf_jit *jit) offsetof(struct sk_buff, data_len)); /* stg %w1,ST_OFF_HLEN(%r0,%r15) */ EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0, REG_15, STK_OFF_HLEN); - /* lg %skb_data,data_off(%b1) */ - EMIT6_DISP_LH(0xe3000000, 0x0004, REG_SKB_DATA, REG_0, - BPF_REG_1, offsetof(struct sk_buff, data)); + if (!(jit->seen & SEEN_REG_AX)) + /* lg %skb_data,data_off(%b1) */ + EMIT6_DISP_LH(0xe3000000, 0x0004, REG_SKB_DATA, REG_0, + BPF_REG_1, offsetof(struct sk_buff, data)); } /* @@ -487,6 +491,8 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i s32 imm = insn->imm; s16 off = insn->off; + if (dst_reg == BPF_REG_AX || src_reg == BPF_REG_AX) + jit->seen |= SEEN_REG_AX; switch (insn->code) { /* * BPF_MOV @@ -1188,7 +1194,7 @@ call_fn: /* * Implicit input: * BPF_REG_6 (R7) : skb pointer - * REG_SKB_DATA (R12): skb data pointer + * REG_SKB_DATA (R12): skb data pointer (if no BPF_REG_AX) * * Calculated input: * BPF_REG_2 (R3) : offset of byte(s) to fetch in skb @@ -1209,6 +1215,11 @@ call_fn: /* agfr %b2,%src (%src is s32 here) */ EMIT4(0xb9180000, BPF_REG_2, src_reg); + /* Reload REG_SKB_DATA if BPF_REG_AX is used */ + if (jit->seen & SEEN_REG_AX) + /* lg %skb_data,data_off(%b6) */ + EMIT6_DISP_LH(0xe3000000, 0x0004, REG_SKB_DATA, REG_0, + BPF_REG_6, offsetof(struct sk_buff, data)); /* basr %b5,%w1 (%b5 is call saved) */ EMIT2(0x0d00, BPF_REG_5, REG_W1); @@ -1264,36 +1275,60 @@ void bpf_jit_compile(struct bpf_prog *fp) */ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp) { + struct bpf_prog *tmp, *orig_fp = fp; struct bpf_binary_header *header; + bool tmp_blinded = false; struct bpf_jit jit; int pass; if (!bpf_jit_enable) - return fp; + return orig_fp; + + tmp = bpf_jit_blind_constants(fp); + /* + * If blinding was requested and we failed during blinding, + * we must fall back to the interpreter. + */ + if (IS_ERR(tmp)) + return orig_fp; + if (tmp != fp) { + tmp_blinded = true; + fp = tmp; + } memset(&jit, 0, sizeof(jit)); jit.addrs = kcalloc(fp->len + 1, sizeof(*jit.addrs), GFP_KERNEL); - if (jit.addrs == NULL) - return fp; + if (jit.addrs == NULL) { + fp = orig_fp; + goto out; + } /* * Three initial passes: * - 1/2: Determine clobbered registers * - 3: Calculate program size and addrs arrray */ for (pass = 1; pass <= 3; pass++) { - if (bpf_jit_prog(&jit, fp)) + if (bpf_jit_prog(&jit, fp)) { + fp = orig_fp; goto free_addrs; + } } /* * Final pass: Allocate and generate program */ - if (jit.size >= BPF_SIZE_MAX) + if (jit.size >= BPF_SIZE_MAX) { + fp = orig_fp; goto free_addrs; + } header = bpf_jit_binary_alloc(jit.size, &jit.prg_buf, 2, jit_fill_hole); - if (!header) + if (!header) { + fp = orig_fp; goto free_addrs; - if (bpf_jit_prog(&jit, fp)) + } + if (bpf_jit_prog(&jit, fp)) { + fp = orig_fp; goto free_addrs; + } if (bpf_jit_enable > 1) { bpf_jit_dump(fp->len, jit.size, pass, jit.prg_buf); if (jit.prg_buf) @@ -1306,6 +1341,10 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp) } free_addrs: kfree(jit.addrs); +out: + if (tmp_blinded) + bpf_jit_prog_release_other(fp, fp == orig_fp ? + tmp : orig_fp); return fp; } From 04e6233d57a9e68e97c248f97d1aa2cb354f3947 Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Fri, 13 May 2016 16:08:02 -0700 Subject: [PATCH 1607/1649] lxt: simplify lxt97[01]_config_intr() Both these functions declare the 'err' local variables for no good reason, get rid of them. Signed-off-by: Sergei Shtylyov Signed-off-by: David S. Miller --- drivers/net/phy/lxt.c | 16 ++++------------ 1 file changed, 4 insertions(+), 12 deletions(-) diff --git a/drivers/net/phy/lxt.c b/drivers/net/phy/lxt.c index f6078376ef50..89b2c0fd69b9 100644 --- a/drivers/net/phy/lxt.c +++ b/drivers/net/phy/lxt.c @@ -80,14 +80,10 @@ static int lxt970_ack_interrupt(struct phy_device *phydev) static int lxt970_config_intr(struct phy_device *phydev) { - int err; - if (phydev->interrupts == PHY_INTERRUPT_ENABLED) - err = phy_write(phydev, MII_LXT970_IER, MII_LXT970_IER_IEN); + return phy_write(phydev, MII_LXT970_IER, MII_LXT970_IER_IEN); else - err = phy_write(phydev, MII_LXT970_IER, 0); - - return err; + return phy_write(phydev, MII_LXT970_IER, 0); } static int lxt970_config_init(struct phy_device *phydev) @@ -112,14 +108,10 @@ static int lxt971_ack_interrupt(struct phy_device *phydev) static int lxt971_config_intr(struct phy_device *phydev) { - int err; - if (phydev->interrupts == PHY_INTERRUPT_ENABLED) - err = phy_write(phydev, MII_LXT971_IER, MII_LXT971_IER_IEN); + return phy_write(phydev, MII_LXT971_IER, MII_LXT971_IER_IEN); else - err = phy_write(phydev, MII_LXT971_IER, 0); - - return err; + return phy_write(phydev, MII_LXT971_IER, 0); } /* From c8396d84c70b47b31b5a9e34732e88bcc7dae9e9 Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Sat, 14 May 2016 02:09:07 +0300 Subject: [PATCH 1608/1649] lxt: simplify lxt970_config_init() This function declares the 'err' local variable for no good reason, get rid of it. Signed-off-by: Sergei Shtylyov Signed-off-by: David S. Miller --- drivers/net/phy/lxt.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/drivers/net/phy/lxt.c b/drivers/net/phy/lxt.c index 89b2c0fd69b9..b9fde1bcf0f0 100644 --- a/drivers/net/phy/lxt.c +++ b/drivers/net/phy/lxt.c @@ -88,11 +88,7 @@ static int lxt970_config_intr(struct phy_device *phydev) static int lxt970_config_init(struct phy_device *phydev) { - int err; - - err = phy_write(phydev, MII_LXT970_CONFIG, 0); - - return err; + return phy_write(phydev, MII_LXT970_CONFIG, 0); } From e9f0cd94c1697ad6c98422a1953105c8ffc515f3 Mon Sep 17 00:00:00 2001 From: Akinobu Mita Date: Sat, 14 May 2016 14:55:47 +0900 Subject: [PATCH 1609/1649] net: w5100: remove unused is_w5200() The is_w5200() function is not used anymore by the commit which adds the W5500 support. Signed-off-by: Akinobu Mita Cc: Mike Sinkovsky Cc: David S. Miller Signed-off-by: David S. Miller --- drivers/net/ethernet/wiznet/w5100.c | 5 ----- 1 file changed, 5 deletions(-) diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c index ec1889ce38a3..df0ba2b2b93a 100644 --- a/drivers/net/ethernet/wiznet/w5100.c +++ b/drivers/net/ethernet/wiznet/w5100.c @@ -173,11 +173,6 @@ struct w5100_priv { struct work_struct restart_work; }; -static inline bool is_w5200(struct w5100_priv *priv) -{ - return priv->ops->chip_id == W5200; -} - /************************************************************************ * * Lowlevel I/O functions From d41cd5f7e2fce8d3c5b1345a7cf9ed3f0d2d99c1 Mon Sep 17 00:00:00 2001 From: Akinobu Mita Date: Sat, 14 May 2016 14:55:48 +0900 Subject: [PATCH 1610/1649] net: w5100: fix MAC filtering for W5500 W5500 has different bit position for MAC filter in Socket n mode register from W5100 and W5200. Signed-off-by: Akinobu Mita Cc: Mike Sinkovsky Cc: David S. Miller Signed-off-by: David S. Miller --- drivers/net/ethernet/wiznet/w5100.c | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c index df0ba2b2b93a..7c6d5081242e 100644 --- a/drivers/net/ethernet/wiznet/w5100.c +++ b/drivers/net/ethernet/wiznet/w5100.c @@ -63,8 +63,9 @@ MODULE_LICENSE("GPL"); #define S0_REGS(priv) ((priv)->s0_regs) #define W5100_S0_MR(priv) (S0_REGS(priv) + W5100_Sn_MR) -#define S0_MR_MACRAW 0x04 /* MAC RAW mode (promiscuous) */ -#define S0_MR_MACRAW_MF 0x44 /* MAC RAW mode (filtered) */ +#define S0_MR_MACRAW 0x04 /* MAC RAW mode */ +#define S0_MR_MF 0x40 /* MAC Filter for W5100 and W5200 */ +#define W5500_S0_MR_MF 0x80 /* MAC Filter for W5500 */ #define W5100_S0_CR(priv) (S0_REGS(priv) + W5100_Sn_CR) #define S0_CR_OPEN 0x01 /* OPEN command */ #define S0_CR_CLOSE 0x10 /* CLOSE command */ @@ -702,8 +703,16 @@ static int w5100_hw_reset(struct w5100_priv *priv) static void w5100_hw_start(struct w5100_priv *priv) { - w5100_write(priv, W5100_S0_MR(priv), priv->promisc ? - S0_MR_MACRAW : S0_MR_MACRAW_MF); + u8 mode = S0_MR_MACRAW; + + if (!priv->promisc) { + if (priv->ops->chip_id == W5500) + mode |= W5500_S0_MR_MF; + else + mode |= S0_MR_MF; + } + + w5100_write(priv, W5100_S0_MR(priv), mode); w5100_command(priv, S0_CR_OPEN); w5100_enable_intr(priv); } From 7d6da453efce17fae35707fa7e5757e7ade8b3cc Mon Sep 17 00:00:00 2001 From: Akinobu Mita Date: Sat, 14 May 2016 14:55:49 +0900 Subject: [PATCH 1611/1649] net: w5100: increase TX timeout period This increases TX timeout period from one second to 5 seconds which is the default value if the driver doesn't explicitly set net_device->watchdog_timeo. The one second timeout is too short for W5100 with SPI interface mode which doesn't support burst READ/WRITE processing in the SPI transfer. If the packet is transmitted while RX packets are being received at a very high rate, the TX transmittion work in the workqueue is delayed and the watchdog timer is expired. Signed-off-by: Akinobu Mita Cc: Mike Sinkovsky Cc: David S. Miller Signed-off-by: David S. Miller --- drivers/net/ethernet/wiznet/w5100.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c index 7c6d5081242e..21bef2ca2ac0 100644 --- a/drivers/net/ethernet/wiznet/w5100.c +++ b/drivers/net/ethernet/wiznet/w5100.c @@ -1142,7 +1142,6 @@ int w5100_probe(struct device *dev, const struct w5100_ops *ops, ndev->netdev_ops = &w5100_netdev_ops; ndev->ethtool_ops = &w5100_ethtool_ops; - ndev->watchdog_timeo = HZ; netif_napi_add(ndev, &priv->napi, w5100_napi_poll, 16); /* This chip doesn't support VLAN packets with normal MTU, From c3875ca7d9f9ad135debc78e211ea062ac48323c Mon Sep 17 00:00:00 2001 From: Akinobu Mita Date: Sat, 14 May 2016 14:55:50 +0900 Subject: [PATCH 1612/1649] net: w5100-spi: add support to specify MAC address by device tree This adds support to specify the MAC address by 'mac-address' or 'local-mac-address' properties in the device tree. These are common properties for the Ethernet controller. Signed-off-by: Akinobu Mita Cc: Mike Sinkovsky Cc: David S. Miller Signed-off-by: David S. Miller --- drivers/net/ethernet/wiznet/w5100-spi.c | 4 +++- drivers/net/ethernet/wiznet/w5100.c | 5 +++-- drivers/net/ethernet/wiznet/w5100.h | 3 ++- 3 files changed, 8 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/wiznet/w5100-spi.c b/drivers/net/ethernet/wiznet/w5100-spi.c index b868e458d0b5..93a2d3c07303 100644 --- a/drivers/net/ethernet/wiznet/w5100-spi.c +++ b/drivers/net/ethernet/wiznet/w5100-spi.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include "w5100.h" @@ -414,6 +415,7 @@ static int w5100_spi_probe(struct spi_device *spi) const struct spi_device_id *id = spi_get_device_id(spi); const struct w5100_ops *ops; int priv_size; + const void *mac = of_get_mac_address(spi->dev.of_node); switch (id->driver_data) { case W5100: @@ -432,7 +434,7 @@ static int w5100_spi_probe(struct spi_device *spi) return -EINVAL; } - return w5100_probe(&spi->dev, ops, priv_size, NULL, spi->irq, -EINVAL); + return w5100_probe(&spi->dev, ops, priv_size, mac, spi->irq, -EINVAL); } static int w5100_spi_remove(struct spi_device *spi) diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c index 21bef2ca2ac0..4f6255cf62ce 100644 --- a/drivers/net/ethernet/wiznet/w5100.c +++ b/drivers/net/ethernet/wiznet/w5100.c @@ -1052,7 +1052,7 @@ static const struct net_device_ops w5100_netdev_ops = { static int w5100_mmio_probe(struct platform_device *pdev) { struct wiznet_platform_data *data = dev_get_platdata(&pdev->dev); - u8 *mac_addr = NULL; + const void *mac_addr = NULL; struct resource *mem; const struct w5100_ops *ops; int irq; @@ -1087,7 +1087,8 @@ void *w5100_ops_priv(const struct net_device *ndev) EXPORT_SYMBOL_GPL(w5100_ops_priv); int w5100_probe(struct device *dev, const struct w5100_ops *ops, - int sizeof_ops_priv, u8 *mac_addr, int irq, int link_gpio) + int sizeof_ops_priv, const void *mac_addr, int irq, + int link_gpio) { struct w5100_priv *priv; struct net_device *ndev; diff --git a/drivers/net/ethernet/wiznet/w5100.h b/drivers/net/ethernet/wiznet/w5100.h index f8a16fad807b..17983a3b8d6c 100644 --- a/drivers/net/ethernet/wiznet/w5100.h +++ b/drivers/net/ethernet/wiznet/w5100.h @@ -30,7 +30,8 @@ struct w5100_ops { void *w5100_ops_priv(const struct net_device *ndev); int w5100_probe(struct device *dev, const struct w5100_ops *ops, - int sizeof_ops_priv, u8 *mac_addr, int irq, int link_gpio); + int sizeof_ops_priv, const void *mac_addr, int irq, + int link_gpio); int w5100_remove(struct device *dev); extern const struct dev_pm_ops w5100_pm_ops; From 2bb07e155bb3e0c722c806723f737cf8020961ef Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Sun, 15 May 2016 10:21:26 +0300 Subject: [PATCH 1613/1649] net/mlx4_core: Fix access to uninitialized index Prevent using uninitialized or negative index when handling steering entries. Fixes: b12d93d63c32 ('mlx4: Add support for promiscuous mode in the new steering model.') Signed-off-by: Tariq Toukan Reported-by: Dan Carpenter Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/mcg.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c index 6aa73972d478..f2d0920018a5 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mcg.c +++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c @@ -1102,7 +1102,7 @@ int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], struct mlx4_cmd_mailbox *mailbox; struct mlx4_mgm *mgm; u32 members_count; - int index, prev; + int index = -1, prev; int link = 0; int i; int err; @@ -1181,7 +1181,7 @@ int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], goto out; out: - if (prot == MLX4_PROT_ETH) { + if (prot == MLX4_PROT_ETH && index != -1) { /* manage the steering entry for promisc mode */ if (new_entry) err = new_steering_entry(dev, port, steer, From b0409fa0940b59dc0c313eb424cfbc4730778ecc Mon Sep 17 00:00:00 2001 From: Yuval Mintz Date: Sun, 15 May 2016 14:48:05 +0300 Subject: [PATCH 1614/1649] qed: Correct PF-sanity check Seems like something broke in commit 1408cc1fa48c ("qed: Introduce VFs") and the function no longer verifies that the vf is indeed a valid one. Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_sriov.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index d4df406ac0a4..2c4f9b038db2 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -476,12 +476,12 @@ int qed_iov_hw_info(struct qed_hwfn *p_hwfn) static bool qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn, int vfid) { /* Check PF supports sriov */ - if (!IS_QED_SRIOV(p_hwfn->cdev) || !IS_PF_SRIOV_ALLOC(p_hwfn)) + if (IS_VF(p_hwfn->cdev) || !IS_QED_SRIOV(p_hwfn->cdev) || + !IS_PF_SRIOV_ALLOC(p_hwfn)) return false; /* Check VF validity */ - if (IS_VF(p_hwfn->cdev) || !IS_QED_SRIOV(p_hwfn->cdev) || - !IS_PF_SRIOV_ALLOC(p_hwfn)) + if (!qed_iov_is_valid_vfid(p_hwfn, vfid, true)) return false; return true; From b2b897eba66636b7fd8e56cc4f7464819623609e Mon Sep 17 00:00:00 2001 From: Yuval Mintz Date: Sun, 15 May 2016 14:48:06 +0300 Subject: [PATCH 1615/1649] qed: Improve VF interrupt reset During FLR flow, need to make sure HW is no longer capable of writing to host memory as part of its interrupt mechanisms. While we're at it, unify the logic cleaning the driver's status-blocks into using a single API function for both PFs and VFs. Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_int.c | 59 ++++++++++--------- drivers/net/ethernet/qlogic/qed/qed_int.h | 20 +------ .../net/ethernet/qlogic/qed/qed_reg_addr.h | 2 + drivers/net/ethernet/qlogic/qed/qed_sriov.c | 20 ++----- 4 files changed, 41 insertions(+), 60 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c index bbecfa579364..09a6ad3d22dd 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.c +++ b/drivers/net/ethernet/qlogic/qed/qed_int.c @@ -2805,20 +2805,13 @@ void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn, } #define IGU_CLEANUP_SLEEP_LENGTH (1000) -void qed_int_igu_cleanup_sb(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u32 sb_id, - bool cleanup_set, - u16 opaque_fid - ) +static void qed_int_igu_cleanup_sb(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 sb_id, bool cleanup_set, u16 opaque_fid) { + u32 cmd_ctrl = 0, val = 0, sb_bit = 0, sb_bit_addr = 0, data = 0; u32 pxp_addr = IGU_CMD_INT_ACK_BASE + sb_id; u32 sleep_cnt = IGU_CLEANUP_SLEEP_LENGTH; - u32 data = 0; - u32 cmd_ctrl = 0; - u32 val = 0; - u32 sb_bit = 0; - u32 sb_bit_addr = 0; /* Set the data field */ SET_FIELD(data, IGU_CLEANUP_CLEANUP_SET, cleanup_set ? 1 : 0); @@ -2863,11 +2856,9 @@ void qed_int_igu_cleanup_sb(struct qed_hwfn *p_hwfn, void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, - u32 sb_id, - u16 opaque, - bool b_set) + u32 sb_id, u16 opaque, bool b_set) { - int pi; + int pi, i; /* Set */ if (b_set) @@ -2876,6 +2867,22 @@ void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn, /* Clear */ qed_int_igu_cleanup_sb(p_hwfn, p_ptt, sb_id, 0, opaque); + /* Wait for the IGU SB to cleanup */ + for (i = 0; i < IGU_CLEANUP_SLEEP_LENGTH; i++) { + u32 val; + + val = qed_rd(p_hwfn, p_ptt, + IGU_REG_WRITE_DONE_PENDING + ((sb_id / 32) * 4)); + if (val & (1 << (sb_id % 32))) + usleep_range(10, 20); + else + break; + } + if (i == IGU_CLEANUP_SLEEP_LENGTH) + DP_NOTICE(p_hwfn, + "Failed SB[0x%08x] still appearing in WRITE_DONE_PENDING\n", + sb_id); + /* Clear the CAU for the SB */ for (pi = 0; pi < 12; pi++) qed_wr(p_hwfn, p_ptt, @@ -2884,13 +2891,11 @@ void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn, void qed_int_igu_init_pure_rt(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, - bool b_set, - bool b_slowpath) + bool b_set, bool b_slowpath) { u32 igu_base_sb = p_hwfn->hw_info.p_igu_info->igu_base_sb; u32 igu_sb_cnt = p_hwfn->hw_info.p_igu_info->igu_sb_cnt; - u32 sb_id = 0; - u32 val = 0; + u32 sb_id = 0, val = 0; val = qed_rd(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION); val |= IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN; @@ -2906,14 +2911,14 @@ void qed_int_igu_init_pure_rt(struct qed_hwfn *p_hwfn, p_hwfn->hw_info.opaque_fid, b_set); - if (b_slowpath) { - sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id; - DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, - "IGU cleaning slowpath SB [%d]\n", sb_id); - qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, sb_id, - p_hwfn->hw_info.opaque_fid, - b_set); - } + if (!b_slowpath) + return; + + sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id; + DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, + "IGU cleaning slowpath SB [%d]\n", sb_id); + qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, sb_id, + p_hwfn->hw_info.opaque_fid, b_set); } static u32 qed_int_igu_read_cam_block(struct qed_hwfn *p_hwfn, diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h index 295df4451e31..20b468637504 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.h +++ b/drivers/net/ethernet/qlogic/qed/qed_int.h @@ -291,24 +291,6 @@ int qed_int_unregister_cb(struct qed_hwfn *p_hwfn, */ u16 qed_int_get_sp_sb_id(struct qed_hwfn *p_hwfn); -/** - * @brief Status block cleanup. Should be called for each status - * block that will be used -> both PF / VF - * - * @param p_hwfn - * @param p_ptt - * @param sb_id - igu status block id - * @param cleanup_set - set(1) / clear(0) - * @param opaque_fid - the function for which to perform - * cleanup, for example a PF on behalf of - * its VFs. - */ -void qed_int_igu_cleanup_sb(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u32 sb_id, - bool cleanup_set, - u16 opaque_fid); - /** * @brief Status block cleanup. Should be called for each status * block that will be used -> both PF / VF @@ -317,7 +299,7 @@ void qed_int_igu_cleanup_sb(struct qed_hwfn *p_hwfn, * @param p_ptt * @param sb_id - igu status block id * @param opaque - opaque fid of the sb owner. - * @param cleanup_set - set(1) / clear(0) + * @param b_set - set(1) / clear(0) */ void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h index bb7dcf12b7c2..3a6c506f0d71 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h +++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h @@ -429,6 +429,8 @@ 0x184000UL #define IGU_REG_STATISTIC_NUM_VF_MSG_SENT \ 0x180408UL +#define IGU_REG_WRITE_DONE_PENDING \ + 0x180900UL #define MISCS_REG_GENERIC_POR_0 \ 0x0096d4UL #define MCP_REG_NVM_CFG4 \ diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index 2c4f9b038db2..7b6b4a0f5d1d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -526,7 +526,6 @@ static void qed_iov_vf_pglue_clear_err(struct qed_hwfn *p_hwfn, static void qed_iov_vf_igu_reset(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_vf_info *vf) { - u16 igu_sb_id; int i; /* Set VF masks and configuration - pretend */ @@ -534,23 +533,14 @@ static void qed_iov_vf_igu_reset(struct qed_hwfn *p_hwfn, qed_wr(p_hwfn, p_ptt, IGU_REG_STATISTIC_NUM_VF_MSG_SENT, 0); - DP_VERBOSE(p_hwfn, QED_MSG_IOV, - "value in VF_CONFIGURATION of vf %d after write %x\n", - vf->abs_vf_id, - qed_rd(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION)); - /* unpretend */ qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid); /* iterate over all queues, clear sb consumer */ - for (i = 0; i < vf->num_sbs; i++) { - igu_sb_id = vf->igu_sbs[i]; - /* Set then clear... */ - qed_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 1, - vf->opaque_fid); - qed_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 0, - vf->opaque_fid); - } + for (i = 0; i < vf->num_sbs; i++) + qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, + vf->igu_sbs[i], + vf->opaque_fid, true); } static void qed_iov_vf_igu_set_int(struct qed_hwfn *p_hwfn, @@ -591,6 +581,8 @@ static int qed_iov_enable_vf_access(struct qed_hwfn *p_hwfn, qed_iov_vf_pglue_clear_err(p_hwfn, p_ptt, QED_VF_ABS_ID(p_hwfn, vf)); + qed_iov_vf_igu_reset(p_hwfn, p_ptt, vf); + rc = qed_mcp_config_vf_msix(p_hwfn, p_ptt, vf->abs_vf_id, vf->num_sbs); if (rc) return rc; From 079d20a6739e0b4a06d73f37b8435d443897cc0c Mon Sep 17 00:00:00 2001 From: Manish Chopra Date: Sun, 15 May 2016 14:48:07 +0300 Subject: [PATCH 1616/1649] qed: Reset link on IOV disable PF updates its VFs' bulletin boards with link configurations whenever the physical carrier changes or whenever hyper-user explicitly requires some setting of the VFs link via the hypervisor's PF. Since the bulletin board is getting cleaned as part of the IOV disable flow on the PF side, re-enabling sriov would lead to a VF that sees the carrier as 'down', until an event causing the PF to re-fill the bulletin with the link configuration would occur. To fix this we simply refelect the link state during the flows, giving the later VFs a default reflecting the PFs link state. Signed-off-by: Manish Chopra Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_sriov.c | 90 ++++++++++++--------- 1 file changed, 51 insertions(+), 39 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index 7b6b4a0f5d1d..a977f39f534c 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -806,9 +806,51 @@ static int qed_iov_init_hw_for_vf(struct qed_hwfn *p_hwfn, return rc; } +static void qed_iov_set_link(struct qed_hwfn *p_hwfn, + u16 vfid, + struct qed_mcp_link_params *params, + struct qed_mcp_link_state *link, + struct qed_mcp_link_capabilities *p_caps) +{ + struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn, + vfid, + false); + struct qed_bulletin_content *p_bulletin; + + if (!p_vf) + return; + + p_bulletin = p_vf->bulletin.p_virt; + p_bulletin->req_autoneg = params->speed.autoneg; + p_bulletin->req_adv_speed = params->speed.advertised_speeds; + p_bulletin->req_forced_speed = params->speed.forced_speed; + p_bulletin->req_autoneg_pause = params->pause.autoneg; + p_bulletin->req_forced_rx = params->pause.forced_rx; + p_bulletin->req_forced_tx = params->pause.forced_tx; + p_bulletin->req_loopback = params->loopback_mode; + + p_bulletin->link_up = link->link_up; + p_bulletin->speed = link->speed; + p_bulletin->full_duplex = link->full_duplex; + p_bulletin->autoneg = link->an; + p_bulletin->autoneg_complete = link->an_complete; + p_bulletin->parallel_detection = link->parallel_detection; + p_bulletin->pfc_enabled = link->pfc_enabled; + p_bulletin->partner_adv_speed = link->partner_adv_speed; + p_bulletin->partner_tx_flow_ctrl_en = link->partner_tx_flow_ctrl_en; + p_bulletin->partner_rx_flow_ctrl_en = link->partner_rx_flow_ctrl_en; + p_bulletin->partner_adv_pause = link->partner_adv_pause; + p_bulletin->sfp_tx_fault = link->sfp_tx_fault; + + p_bulletin->capability_speed = p_caps->speed_capabilities; +} + static int qed_iov_release_hw_for_vf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 rel_vf_id) { + struct qed_mcp_link_capabilities caps; + struct qed_mcp_link_params params; + struct qed_mcp_link_state link; struct qed_vf_info *vf = NULL; int rc = 0; @@ -823,6 +865,15 @@ static int qed_iov_release_hw_for_vf(struct qed_hwfn *p_hwfn, memset(&vf->p_vf_info, 0, sizeof(vf->p_vf_info)); + /* Get the link configuration back in bulletin so + * that when VFs are re-enabled they get the actual + * link configuration. + */ + memcpy(¶ms, qed_mcp_get_link_params(p_hwfn), sizeof(params)); + memcpy(&link, qed_mcp_get_link_state(p_hwfn), sizeof(link)); + memcpy(&caps, qed_mcp_get_link_capabilities(p_hwfn), sizeof(caps)); + qed_iov_set_link(p_hwfn, rel_vf_id, ¶ms, &link, &caps); + if (vf->state != VF_STOPPED) { /* Stopping the VF */ rc = qed_sp_vf_stop(p_hwfn, vf->concrete_fid, vf->opaque_fid); @@ -2542,45 +2593,6 @@ int qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *p_disabled_vfs) return found; } -void qed_iov_set_link(struct qed_hwfn *p_hwfn, - u16 vfid, - struct qed_mcp_link_params *params, - struct qed_mcp_link_state *link, - struct qed_mcp_link_capabilities *p_caps) -{ - struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn, - vfid, - false); - struct qed_bulletin_content *p_bulletin; - - if (!p_vf) - return; - - p_bulletin = p_vf->bulletin.p_virt; - p_bulletin->req_autoneg = params->speed.autoneg; - p_bulletin->req_adv_speed = params->speed.advertised_speeds; - p_bulletin->req_forced_speed = params->speed.forced_speed; - p_bulletin->req_autoneg_pause = params->pause.autoneg; - p_bulletin->req_forced_rx = params->pause.forced_rx; - p_bulletin->req_forced_tx = params->pause.forced_tx; - p_bulletin->req_loopback = params->loopback_mode; - - p_bulletin->link_up = link->link_up; - p_bulletin->speed = link->speed; - p_bulletin->full_duplex = link->full_duplex; - p_bulletin->autoneg = link->an; - p_bulletin->autoneg_complete = link->an_complete; - p_bulletin->parallel_detection = link->parallel_detection; - p_bulletin->pfc_enabled = link->pfc_enabled; - p_bulletin->partner_adv_speed = link->partner_adv_speed; - p_bulletin->partner_tx_flow_ctrl_en = link->partner_tx_flow_ctrl_en; - p_bulletin->partner_rx_flow_ctrl_en = link->partner_rx_flow_ctrl_en; - p_bulletin->partner_adv_pause = link->partner_adv_pause; - p_bulletin->sfp_tx_fault = link->sfp_tx_fault; - - p_bulletin->capability_speed = p_caps->speed_capabilities; -} - static void qed_iov_get_link(struct qed_hwfn *p_hwfn, u16 vfid, struct qed_mcp_link_params *p_params, From 83f34bd436fefc70376efe6f6dd7ddcb5265d88b Mon Sep 17 00:00:00 2001 From: Yuval Mintz Date: Sun, 15 May 2016 14:48:08 +0300 Subject: [PATCH 1617/1649] qed: Allow more than 16 VFs In multi-function modes, PFs are currently limited to using 16 VFs - But that limitation would also currently apply in case there's a single PCI function exposed, where no such restriction should have existed. This lifts the restriction for the default mode; User should be able to start the maximum number of VFs as appear in the PCI config space. Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_sriov.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index a977f39f534c..c325ee857ecd 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -3099,6 +3099,9 @@ static int qed_sriov_enable(struct qed_dev *cdev, int num) goto err; } + if (IS_MF_DEFAULT(hwfn)) + limit = MAX_NUM_VFS_BB / hwfn->num_funcs_on_engine; + memset(&sb_cnt_info, 0, sizeof(sb_cnt_info)); qed_int_get_num_sbs(hwfn, &sb_cnt_info); num_sbs = min_t(int, sb_cnt_info.sb_free_blk, limit); From 416cdf0635b2173e96b7a66b303e768e0749b778 Mon Sep 17 00:00:00 2001 From: Yuval Mintz Date: Sun, 15 May 2016 14:48:09 +0300 Subject: [PATCH 1618/1649] qed: VFs gracefully accept lack of PM VF's probe might log that it has no PM capability in its PCI configuration space. As this is a valid configuration, silence such prints. Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 6ffc21da1415..56f6bc19cc3e 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -158,7 +158,7 @@ static int qed_init_pci(struct qed_dev *cdev, } cdev->pci_params.pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); - if (cdev->pci_params.pm_cap == 0) + if (IS_PF(cdev) && !cdev->pci_params.pm_cap) DP_NOTICE(cdev, "Cannot find power management capability\n"); rc = qed_set_coherency_mask(cdev); From 15db6e0dc73ebdbf42c35f2f64907a5f4e154be1 Mon Sep 17 00:00:00 2001 From: Muhammad Falak R Wani Date: Sun, 15 May 2016 19:37:44 +0530 Subject: [PATCH 1619/1649] net/hsr: Use setup_timer and mod_timer. The function setup_timer combines the initialization of a timer with the initialization of the timer's function and data fields. The mulitiline code for timer initialization is now replaced with function setup_timer. Also, quoting the mod_timer() function comment: -> mod_timer() is a more efficient way to update the expire field of an active timer (if the timer is inactive it will be activated). Use setup_timer() and mod_timer() to setup and arm a timer, making the code compact and aid readablity. Signed-off-by: Muhammad Falak R Wani Signed-off-by: David S. Miller --- net/hsr/hsr_device.c | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c index 386cbce7bc51..16737cd8dae8 100644 --- a/net/hsr/hsr_device.c +++ b/net/hsr/hsr_device.c @@ -461,13 +461,9 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2], hsr->sequence_nr = HSR_SEQNR_START; hsr->sup_sequence_nr = HSR_SUP_SEQNR_START; - init_timer(&hsr->announce_timer); - hsr->announce_timer.function = hsr_announce; - hsr->announce_timer.data = (unsigned long) hsr; + setup_timer(&hsr->announce_timer, hsr_announce, (unsigned long)hsr); - init_timer(&hsr->prune_timer); - hsr->prune_timer.function = hsr_prune_nodes; - hsr->prune_timer.data = (unsigned long) hsr; + setup_timer(&hsr->prune_timer, hsr_prune_nodes, (unsigned long)hsr); ether_addr_copy(hsr->sup_multicast_addr, def_multicast_addr); hsr->sup_multicast_addr[ETH_ALEN - 1] = multicast_spec; @@ -502,8 +498,7 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2], if (res) goto fail; - hsr->prune_timer.expires = jiffies + msecs_to_jiffies(PRUNE_PERIOD); - add_timer(&hsr->prune_timer); + mod_timer(&hsr->prune_timer, jiffies + msecs_to_jiffies(PRUNE_PERIOD)); return 0; From 7e2c3aea4398d079745b9faa2c17b6cbd010f221 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Sun, 15 May 2016 23:28:29 +0200 Subject: [PATCH 1620/1649] net: also make sch_handle_egress() drop monitor ready Follow-up for 8a3a4c6e7b34 ("net: make sch_handle_ingress() drop monitor ready") to also make the egress side drop monitor ready. Also here only TC_ACT_SHOT is a clear indication that something went wrong. Hence don't provide false positives to drop monitors such as 'perf record -e skb:kfree_skb ...'. Signed-off-by: Daniel Borkmann Acked-by: Alexei Starovoitov Signed-off-by: David S. Miller --- net/core/dev.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/net/core/dev.c b/net/core/dev.c index 12436d1312ca..904ff431d570 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -3186,12 +3186,12 @@ sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev) case TC_ACT_SHOT: qdisc_qstats_cpu_drop(cl->q); *ret = NET_XMIT_DROP; - goto drop; + kfree_skb(skb); + return NULL; case TC_ACT_STOLEN: case TC_ACT_QUEUED: *ret = NET_XMIT_SUCCESS; -drop: - kfree_skb(skb); + consume_skb(skb); return NULL; case TC_ACT_REDIRECT: /* No need to push/pop skb's mac_header here on egress! */ From 11f2b494bc07f3d054687159ad6b1f3ec12a9040 Mon Sep 17 00:00:00 2001 From: Emil Tantilov Date: Wed, 4 May 2016 15:01:27 -0700 Subject: [PATCH 1621/1649] ixgbe: use correct mask when enabling sriov Swap the parameters in GENMASK in order to generate the correct mask. This change fixes Tx hangs when enabling SRIOV. Signed-off-by: Emil Tantilov Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index d08fbcfb9417..7bbf9b12bf38 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -3767,9 +3767,9 @@ static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter) reg_offset = (VMDQ_P(0) >= 32) ? 1 : 0; /* Enable only the PF's pool for Tx/Rx */ - IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), GENMASK(vf_shift, 31)); + IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), GENMASK(31, vf_shift)); IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), reg_offset - 1); - IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), GENMASK(vf_shift, 31)); + IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), GENMASK(31, vf_shift)); IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), reg_offset - 1); if (adapter->bridge_mode == BRIDGE_MODE_VEB) IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); From 5eee87cd51df8492d7e61c2d8b5154a15a4888b2 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Wed, 11 May 2016 13:23:34 -0700 Subject: [PATCH 1622/1649] ixgbe: Fix VLAN features error It looks like at some point I somehow transposed the location of setting the VLAN features in netdev->features and the configuration of the vlan_features. As a result the driver is now generating a warning about vlan_features being setup incorrectly. This patch corrects that by placing the update of netdev->features to include the VLAN features so that it is after the point where we write netdev->features into netdev->vlan_features. Fixes: b83e30104bd9 ("ixgbe/ixgbevf: Add support for GSO partial") Signed-off-by: Alexander Duyck Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 7bbf9b12bf38..9f3677c7e96f 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -9508,15 +9508,15 @@ skip_sriov: if (pci_using_dac) netdev->features |= NETIF_F_HIGHDMA; + netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID; + netdev->hw_enc_features |= netdev->vlan_features; + netdev->mpls_features |= NETIF_F_HW_CSUM; + /* set this bit last since it cannot be part of vlan_features */ netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX; - netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID; - netdev->hw_enc_features |= netdev->vlan_features; - netdev->mpls_features |= NETIF_F_HW_CSUM; - netdev->priv_flags |= IFF_UNICAST_FLT; netdev->priv_flags |= IFF_SUPP_NOFCS; From 4c4a6b0e8fa24a628cbca3931745d482c66d303e Mon Sep 17 00:00:00 2001 From: Philippe Reynes Date: Mon, 16 May 2016 01:30:08 +0200 Subject: [PATCH 1623/1649] net: ethernet: gianfar: use phydev from struct net_device The private structure contain a pointer to phydev, but the structure net_device already contain such pointer. So we can remove the pointer phydev in the private structure, and update the driver to use the one contained in struct net_device. Signed-off-by: Philippe Reynes Acked-by: Claudiu Manoil Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/gianfar.c | 42 ++++++++++--------- drivers/net/ethernet/freescale/gianfar.h | 1 - .../net/ethernet/freescale/gianfar_ethtool.c | 24 ++++++----- 3 files changed, 35 insertions(+), 32 deletions(-) diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index a5800413f917..7615e0668acb 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -999,7 +999,7 @@ static int gfar_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr) static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { - struct gfar_private *priv = netdev_priv(dev); + struct phy_device *phydev = dev->phydev; if (!netif_running(dev)) return -EINVAL; @@ -1009,10 +1009,10 @@ static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) if (cmd == SIOCGHWTSTAMP) return gfar_hwtstamp_get(dev, rq); - if (!priv->phydev) + if (!phydev) return -ENODEV; - return phy_mii_ioctl(priv->phydev, rq, cmd); + return phy_mii_ioctl(phydev, rq, cmd); } static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar, @@ -1635,7 +1635,7 @@ static int gfar_suspend(struct device *dev) gfar_start_wol_filer(priv); } else { - phy_stop(priv->phydev); + phy_stop(ndev->phydev); } return 0; @@ -1664,7 +1664,7 @@ static int gfar_resume(struct device *dev) gfar_filer_restore_table(priv); } else { - phy_start(priv->phydev); + phy_start(ndev->phydev); } gfar_start(priv); @@ -1698,8 +1698,8 @@ static int gfar_restore(struct device *dev) priv->oldspeed = 0; priv->oldduplex = -1; - if (priv->phydev) - phy_start(priv->phydev); + if (ndev->phydev) + phy_start(ndev->phydev); netif_device_attach(ndev); enable_napi(priv); @@ -1778,6 +1778,7 @@ static int init_phy(struct net_device *dev) priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ? GFAR_SUPPORTED_GBIT : 0; phy_interface_t interface; + struct phy_device *phydev; priv->oldlink = 0; priv->oldspeed = 0; @@ -1785,9 +1786,9 @@ static int init_phy(struct net_device *dev) interface = gfar_get_interface(dev); - priv->phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0, - interface); - if (!priv->phydev) { + phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0, + interface); + if (!phydev) { dev_err(&dev->dev, "could not attach to PHY\n"); return -ENODEV; } @@ -1796,11 +1797,11 @@ static int init_phy(struct net_device *dev) gfar_configure_serdes(dev); /* Remove any features not supported by the controller */ - priv->phydev->supported &= (GFAR_SUPPORTED | gigabit_support); - priv->phydev->advertising = priv->phydev->supported; + phydev->supported &= (GFAR_SUPPORTED | gigabit_support); + phydev->advertising = phydev->supported; /* Add support for flow control, but don't advertise it by default */ - priv->phydev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause); + phydev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause); return 0; } @@ -1944,7 +1945,7 @@ void stop_gfar(struct net_device *dev) /* disable ints and gracefully shut down Rx/Tx DMA */ gfar_halt(priv); - phy_stop(priv->phydev); + phy_stop(dev->phydev); free_skb_resources(priv); } @@ -2204,7 +2205,7 @@ int startup_gfar(struct net_device *ndev) priv->oldspeed = 0; priv->oldduplex = -1; - phy_start(priv->phydev); + phy_start(ndev->phydev); enable_napi(priv); @@ -2572,8 +2573,7 @@ static int gfar_close(struct net_device *dev) stop_gfar(dev); /* Disconnect from the PHY */ - phy_disconnect(priv->phydev); - priv->phydev = NULL; + phy_disconnect(dev->phydev); gfar_free_irq(priv); @@ -3379,7 +3379,7 @@ static irqreturn_t gfar_interrupt(int irq, void *grp_id) static void adjust_link(struct net_device *dev) { struct gfar_private *priv = netdev_priv(dev); - struct phy_device *phydev = priv->phydev; + struct phy_device *phydev = dev->phydev; if (unlikely(phydev->link != priv->oldlink || (phydev->link && (phydev->duplex != priv->oldduplex || @@ -3620,7 +3620,8 @@ static irqreturn_t gfar_error(int irq, void *grp_id) static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv) { - struct phy_device *phydev = priv->phydev; + struct net_device *ndev = priv->ndev; + struct phy_device *phydev = ndev->phydev; u32 val = 0; if (!phydev->duplex) @@ -3660,7 +3661,8 @@ static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv) static noinline void gfar_update_link_state(struct gfar_private *priv) { struct gfar __iomem *regs = priv->gfargrp[0].regs; - struct phy_device *phydev = priv->phydev; + struct net_device *ndev = priv->ndev; + struct phy_device *phydev = ndev->phydev; struct gfar_priv_rx_q *rx_queue = NULL; int i; diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h index cb77667971a7..373fd094f2f3 100644 --- a/drivers/net/ethernet/freescale/gianfar.h +++ b/drivers/net/ethernet/freescale/gianfar.h @@ -1153,7 +1153,6 @@ struct gfar_private { phy_interface_t interface; struct device_node *phy_node; struct device_node *tbi_node; - struct phy_device *phydev; struct mii_bus *mii_bus; int oldspeed; int oldduplex; diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c index 2c45c80d9b03..94a8dc5935e7 100644 --- a/drivers/net/ethernet/freescale/gianfar_ethtool.c +++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c @@ -188,10 +188,9 @@ static void gfar_gdrvinfo(struct net_device *dev, static int gfar_set_ksettings(struct net_device *dev, const struct ethtool_link_ksettings *cmd) { - struct gfar_private *priv = netdev_priv(dev); - struct phy_device *phydev = priv->phydev; + struct phy_device *phydev = dev->phydev; - if (NULL == phydev) + if (!phydev) return -ENODEV; return phy_ethtool_ksettings_set(phydev, cmd); @@ -200,10 +199,9 @@ static int gfar_set_ksettings(struct net_device *dev, static int gfar_get_ksettings(struct net_device *dev, struct ethtool_link_ksettings *cmd) { - struct gfar_private *priv = netdev_priv(dev); - struct phy_device *phydev = priv->phydev; + struct phy_device *phydev = dev->phydev; - if (NULL == phydev) + if (!phydev) return -ENODEV; return phy_ethtool_ksettings_get(phydev, cmd); @@ -233,10 +231,12 @@ static void gfar_get_regs(struct net_device *dev, struct ethtool_regs *regs, static unsigned int gfar_usecs2ticks(struct gfar_private *priv, unsigned int usecs) { + struct net_device *ndev = priv->ndev; + struct phy_device *phydev = ndev->phydev; unsigned int count; /* The timer is different, depending on the interface speed */ - switch (priv->phydev->speed) { + switch (phydev->speed) { case SPEED_1000: count = GFAR_GBIT_TIME; break; @@ -258,10 +258,12 @@ static unsigned int gfar_usecs2ticks(struct gfar_private *priv, static unsigned int gfar_ticks2usecs(struct gfar_private *priv, unsigned int ticks) { + struct net_device *ndev = priv->ndev; + struct phy_device *phydev = ndev->phydev; unsigned int count; /* The timer is different, depending on the interface speed */ - switch (priv->phydev->speed) { + switch (phydev->speed) { case SPEED_1000: count = GFAR_GBIT_TIME; break; @@ -295,7 +297,7 @@ static int gfar_gcoalesce(struct net_device *dev, if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE)) return -EOPNOTSUPP; - if (NULL == priv->phydev) + if (!dev->phydev) return -ENODEV; rx_queue = priv->rx_queue[0]; @@ -356,7 +358,7 @@ static int gfar_scoalesce(struct net_device *dev, if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE)) return -EOPNOTSUPP; - if (NULL == priv->phydev) + if (!dev->phydev) return -ENODEV; /* Check the bounds of the values */ @@ -520,7 +522,7 @@ static int gfar_spauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) { struct gfar_private *priv = netdev_priv(dev); - struct phy_device *phydev = priv->phydev; + struct phy_device *phydev = dev->phydev; struct gfar __iomem *regs = priv->gfargrp[0].regs; u32 oldadv, newadv; From cd5f9bb4be950079e911a64972dd7311f0bf8534 Mon Sep 17 00:00:00 2001 From: Philippe Reynes Date: Mon, 16 May 2016 01:30:09 +0200 Subject: [PATCH 1624/1649] net: ethernet: gianfar: use phy_ethtool_{get|set}_link_ksettings There are two generics functions phy_ethtool_{get|set}_link_ksettings, so we can use them instead of defining the same code in the driver. Signed-off-by: Philippe Reynes Signed-off-by: David S. Miller --- .../net/ethernet/freescale/gianfar_ethtool.c | 27 ++----------------- 1 file changed, 2 insertions(+), 25 deletions(-) diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c index 94a8dc5935e7..56588f2e1d91 100644 --- a/drivers/net/ethernet/freescale/gianfar_ethtool.c +++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c @@ -184,29 +184,6 @@ static void gfar_gdrvinfo(struct net_device *dev, strlcpy(drvinfo->bus_info, "N/A", sizeof(drvinfo->bus_info)); } - -static int gfar_set_ksettings(struct net_device *dev, - const struct ethtool_link_ksettings *cmd) -{ - struct phy_device *phydev = dev->phydev; - - if (!phydev) - return -ENODEV; - - return phy_ethtool_ksettings_set(phydev, cmd); -} - -static int gfar_get_ksettings(struct net_device *dev, - struct ethtool_link_ksettings *cmd) -{ - struct phy_device *phydev = dev->phydev; - - if (!phydev) - return -ENODEV; - - return phy_ethtool_ksettings_get(phydev, cmd); -} - /* Return the length of the register structure */ static int gfar_reglen(struct net_device *dev) { @@ -1580,6 +1557,6 @@ const struct ethtool_ops gfar_ethtool_ops = { .set_rxnfc = gfar_set_nfc, .get_rxnfc = gfar_get_nfc, .get_ts_info = gfar_get_ts_info, - .get_link_ksettings = gfar_get_ksettings, - .set_link_ksettings = gfar_set_ksettings, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, }; From b3c40adcc9891a709d30b620f0cabd4679a355b5 Mon Sep 17 00:00:00 2001 From: Philippe Reynes Date: Mon, 16 May 2016 01:35:13 +0200 Subject: [PATCH 1625/1649] net: ethernet: ftgmac100: use phydev from struct net_device The private structure contain a pointer to phydev, but the structure net_device already contain such pointer. So we can remove the pointer phydev in the private structure, and update the driver to use the one contained in struct net_device. Signed-off-by: Philippe Reynes Signed-off-by: David S. Miller --- drivers/net/ethernet/faraday/ftgmac100.c | 24 ++++++++---------------- 1 file changed, 8 insertions(+), 16 deletions(-) diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c index 84384e1585a5..9cc23c3fcbf1 100644 --- a/drivers/net/ethernet/faraday/ftgmac100.c +++ b/drivers/net/ethernet/faraday/ftgmac100.c @@ -71,7 +71,6 @@ struct ftgmac100 { struct napi_struct napi; struct mii_bus *mii_bus; - struct phy_device *phydev; int old_speed; }; @@ -807,7 +806,7 @@ err: static void ftgmac100_adjust_link(struct net_device *netdev) { struct ftgmac100 *priv = netdev_priv(netdev); - struct phy_device *phydev = priv->phydev; + struct phy_device *phydev = netdev->phydev; int ier; if (phydev->speed == priv->old_speed) @@ -850,7 +849,6 @@ static int ftgmac100_mii_probe(struct ftgmac100 *priv) return PTR_ERR(phydev); } - priv->phydev = phydev; return 0; } @@ -942,17 +940,13 @@ static void ftgmac100_get_drvinfo(struct net_device *netdev, static int ftgmac100_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd) { - struct ftgmac100 *priv = netdev_priv(netdev); - - return phy_ethtool_gset(priv->phydev, cmd); + return phy_ethtool_gset(netdev->phydev, cmd); } static int ftgmac100_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd) { - struct ftgmac100 *priv = netdev_priv(netdev); - - return phy_ethtool_sset(priv->phydev, cmd); + return phy_ethtool_sset(netdev->phydev, cmd); } static const struct ethtool_ops ftgmac100_ethtool_ops = { @@ -1085,7 +1079,7 @@ static int ftgmac100_open(struct net_device *netdev) ftgmac100_init_hw(priv); ftgmac100_start_hw(priv, 10); - phy_start(priv->phydev); + phy_start(netdev->phydev); napi_enable(&priv->napi); netif_start_queue(netdev); @@ -1111,7 +1105,7 @@ static int ftgmac100_stop(struct net_device *netdev) netif_stop_queue(netdev); napi_disable(&priv->napi); - phy_stop(priv->phydev); + phy_stop(netdev->phydev); ftgmac100_stop_hw(priv); free_irq(priv->irq, netdev); @@ -1152,9 +1146,7 @@ static int ftgmac100_hard_start_xmit(struct sk_buff *skb, /* optional */ static int ftgmac100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) { - struct ftgmac100 *priv = netdev_priv(netdev); - - return phy_mii_ioctl(priv->phydev, ifr, cmd); + return phy_mii_ioctl(netdev->phydev, ifr, cmd); } static const struct net_device_ops ftgmac100_netdev_ops = { @@ -1275,7 +1267,7 @@ static int ftgmac100_probe(struct platform_device *pdev) return 0; err_register_netdev: - phy_disconnect(priv->phydev); + phy_disconnect(netdev->phydev); err_mii_probe: mdiobus_unregister(priv->mii_bus); err_register_mdiobus: @@ -1301,7 +1293,7 @@ static int __exit ftgmac100_remove(struct platform_device *pdev) unregister_netdev(netdev); - phy_disconnect(priv->phydev); + phy_disconnect(netdev->phydev); mdiobus_unregister(priv->mii_bus); mdiobus_free(priv->mii_bus); From fd24d72ca9b52558b1511edc814951cdfe5f21cf Mon Sep 17 00:00:00 2001 From: Philippe Reynes Date: Mon, 16 May 2016 01:35:14 +0200 Subject: [PATCH 1626/1649] net: ethernet: ftgmac100: use phy_ethtool_{get|set}_link_ksettings There are two generics functions phy_ethtool_{get|set}_link_ksettings, so we can use them instead of defining the same code in the driver. Signed-off-by: Philippe Reynes Signed-off-by: David S. Miller --- drivers/net/ethernet/faraday/ftgmac100.c | 16 ++-------------- 1 file changed, 2 insertions(+), 14 deletions(-) diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c index 9cc23c3fcbf1..e7cf313e359b 100644 --- a/drivers/net/ethernet/faraday/ftgmac100.c +++ b/drivers/net/ethernet/faraday/ftgmac100.c @@ -937,23 +937,11 @@ static void ftgmac100_get_drvinfo(struct net_device *netdev, strlcpy(info->bus_info, dev_name(&netdev->dev), sizeof(info->bus_info)); } -static int ftgmac100_get_settings(struct net_device *netdev, - struct ethtool_cmd *cmd) -{ - return phy_ethtool_gset(netdev->phydev, cmd); -} - -static int ftgmac100_set_settings(struct net_device *netdev, - struct ethtool_cmd *cmd) -{ - return phy_ethtool_sset(netdev->phydev, cmd); -} - static const struct ethtool_ops ftgmac100_ethtool_ops = { - .set_settings = ftgmac100_set_settings, - .get_settings = ftgmac100_get_settings, .get_drvinfo = ftgmac100_get_drvinfo, .get_link = ethtool_op_get_link, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, }; /****************************************************************************** From 77f57761466e9aef2b57acf8ce07df11bf360e34 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Sun, 15 May 2016 18:16:38 -0700 Subject: [PATCH 1627/1649] fq_codel: fix memory limitation drift memory_usage must be decreased in dequeue_func(), not in fq_codel_dequeue(), otherwise packets dropped by Codel algo are missing this decrease. Also we need to clear memory_usage in fq_codel_reset() Fixes: 95b58430abe7 ("fq_codel: add memory limitation per queue") Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/sched/sch_fq_codel.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c index bb8bd9314629..6883a8971562 100644 --- a/net/sched/sch_fq_codel.c +++ b/net/sched/sch_fq_codel.c @@ -262,6 +262,7 @@ static struct sk_buff *dequeue_func(struct codel_vars *vars, void *ctx) if (flow->head) { skb = dequeue_head(flow); q->backlogs[flow - q->flows] -= qdisc_pkt_len(skb); + q->memory_usage -= skb->truesize; sch->q.qlen--; sch->qstats.backlog -= qdisc_pkt_len(skb); } @@ -318,7 +319,6 @@ begin: list_del_init(&flow->flowchain); goto begin; } - q->memory_usage -= skb->truesize; qdisc_bstats_update(sch, skb); flow->deficit -= qdisc_pkt_len(skb); /* We cant call qdisc_tree_reduce_backlog() if our qlen is 0, @@ -355,6 +355,7 @@ static void fq_codel_reset(struct Qdisc *sch) } memset(q->backlogs, 0, q->flows_cnt * sizeof(u32)); sch->q.qlen = 0; + q->memory_usage = 0; } static const struct nla_policy fq_codel_policy[TCA_FQ_CODEL_MAX + 1] = { From 45e093ae2830cd1264677d47ff9a95a71f5d9f9c Mon Sep 17 00:00:00 2001 From: Richard Alpe Date: Mon, 16 May 2016 11:14:54 +0200 Subject: [PATCH 1628/1649] tipc: check nl sock before parsing nested attributes Make sure the socket for which the user is listing publication exists before parsing the socket netlink attributes. Prior to this patch a call without any socket caused a NULL pointer dereference in tipc_nl_publ_dump(). Tested-and-reported-by: Baozeng Ding Signed-off-by: Richard Alpe Acked-by: Jon Maloy Signed-off-by: David S. Miller --- net/tipc/socket.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 12628890c219..3b7a79991d55 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -2853,6 +2853,9 @@ int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb) if (err) return err; + if (!attrs[TIPC_NLA_SOCK]) + return -EINVAL; + err = nla_parse_nested(sock, TIPC_NLA_SOCK_MAX, attrs[TIPC_NLA_SOCK], tipc_nl_sock_policy); From 92964c79b357efd980812c4de5c1fd2ec8bb5520 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Mon, 16 May 2016 17:28:16 +0800 Subject: [PATCH 1629/1649] netlink: Fix dump skb leak/double free When we free cb->skb after a dump, we do it after releasing the lock. This means that a new dump could have started in the time being and we'll end up freeing their skb instead of ours. This patch saves the skb and module before we unlock so we free the right memory. Fixes: 16b304f3404f ("netlink: Eliminate kmalloc in netlink dump operation.") Reported-by: Baozeng Ding Signed-off-by: Herbert Xu Acked-by: Cong Wang Signed-off-by: David S. Miller --- net/netlink/af_netlink.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index aeefe127691a..627f898c05b9 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -2059,6 +2059,7 @@ static int netlink_dump(struct sock *sk) struct netlink_callback *cb; struct sk_buff *skb = NULL; struct nlmsghdr *nlh; + struct module *module; int len, err = -ENOBUFS; int alloc_min_size; int alloc_size; @@ -2134,9 +2135,11 @@ static int netlink_dump(struct sock *sk) cb->done(cb); nlk->cb_running = false; + module = cb->module; + skb = cb->skb; mutex_unlock(nlk->cb_mutex); - module_put(cb->module); - consume_skb(cb->skb); + module_put(module); + consume_skb(skb); return 0; errout_skb: From c1c511a275c9c0a14836c2071d37946df3862c25 Mon Sep 17 00:00:00 2001 From: Philippe Reynes Date: Mon, 16 May 2016 16:52:36 +0200 Subject: [PATCH 1630/1649] net: ethernet: fs-enet: use phydev from struct net_device The private structure contain a pointer to phydev, but the structure net_device already contain such pointer. So we can remove the pointer phydev in the private structure, and update the driver to use the one contained in struct net_device. Signed-off-by: Philippe Reynes Signed-off-by: David S. Miller --- .../ethernet/freescale/fs_enet/fs_enet-main.c | 31 +++++++------------ .../net/ethernet/freescale/fs_enet/fs_enet.h | 1 - .../net/ethernet/freescale/fs_enet/mac-fcc.c | 4 +-- .../net/ethernet/freescale/fs_enet/mac-fec.c | 6 ++-- .../net/ethernet/freescale/fs_enet/mac-scc.c | 2 +- 5 files changed, 18 insertions(+), 26 deletions(-) diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c index da90b5ad6e36..dc3700ca48d2 100644 --- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c +++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c @@ -652,13 +652,13 @@ static void fs_timeout(struct net_device *dev) spin_lock_irqsave(&fep->lock, flags); if (dev->flags & IFF_UP) { - phy_stop(fep->phydev); + phy_stop(dev->phydev); (*fep->ops->stop)(dev); (*fep->ops->restart)(dev); - phy_start(fep->phydev); + phy_start(dev->phydev); } - phy_start(fep->phydev); + phy_start(dev->phydev); wake = fep->tx_free && !(CBDR_SC(fep->cur_tx) & BD_ENET_TX_READY); spin_unlock_irqrestore(&fep->lock, flags); @@ -672,7 +672,7 @@ static void fs_timeout(struct net_device *dev) static void generic_adjust_link(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); - struct phy_device *phydev = fep->phydev; + struct phy_device *phydev = dev->phydev; int new_state = 0; if (phydev->link) { @@ -741,8 +741,6 @@ static int fs_init_phy(struct net_device *dev) return -ENODEV; } - fep->phydev = phydev; - return 0; } @@ -776,7 +774,7 @@ static int fs_enet_open(struct net_device *dev) napi_disable(&fep->napi_tx); return err; } - phy_start(fep->phydev); + phy_start(dev->phydev); netif_start_queue(dev); @@ -792,7 +790,7 @@ static int fs_enet_close(struct net_device *dev) netif_carrier_off(dev); napi_disable(&fep->napi); napi_disable(&fep->napi_tx); - phy_stop(fep->phydev); + phy_stop(dev->phydev); spin_lock_irqsave(&fep->lock, flags); spin_lock(&fep->tx_lock); @@ -801,8 +799,7 @@ static int fs_enet_close(struct net_device *dev) spin_unlock_irqrestore(&fep->lock, flags); /* release any irqs */ - phy_disconnect(fep->phydev); - fep->phydev = NULL; + phy_disconnect(dev->phydev); free_irq(fep->interrupt, dev); return 0; @@ -850,10 +847,9 @@ static void fs_get_regs(struct net_device *dev, struct ethtool_regs *regs, static int fs_get_ksettings(struct net_device *dev, struct ethtool_link_ksettings *cmd) { - struct fs_enet_private *fep = netdev_priv(dev); - struct phy_device *phydev = fep->phydev; + struct phy_device *phydev = dev->phydev; - if (!fep->phydev) + if (!phydev) return -ENODEV; return phy_ethtool_ksettings_get(phydev, cmd); @@ -862,10 +858,9 @@ static int fs_get_ksettings(struct net_device *dev, static int fs_set_ksettings(struct net_device *dev, const struct ethtool_link_ksettings *cmd) { - struct fs_enet_private *fep = netdev_priv(dev); - struct phy_device *phydev = fep->phydev; + struct phy_device *phydev = dev->phydev; - if (!fep->phydev) + if (!phydev) return -ENODEV; return phy_ethtool_ksettings_set(phydev, cmd); @@ -903,12 +898,10 @@ static const struct ethtool_ops fs_ethtool_ops = { static int fs_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { - struct fs_enet_private *fep = netdev_priv(dev); - if (!netif_running(dev)) return -EINVAL; - return phy_mii_ioctl(fep->phydev, rq, cmd); + return phy_mii_ioctl(dev->phydev, rq, cmd); } extern int fs_mii_connect(struct net_device *dev); diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet.h b/drivers/net/ethernet/freescale/fs_enet/fs_enet.h index f184d8f952e2..e29f54a35210 100644 --- a/drivers/net/ethernet/freescale/fs_enet/fs_enet.h +++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet.h @@ -149,7 +149,6 @@ struct fs_enet_private { unsigned int last_mii_status; int interrupt; - struct phy_device *phydev; int oldduplex, oldspeed, oldlink; /* current settings */ /* event masks */ diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c index 1ba359f17ec6..d71761a34022 100644 --- a/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c +++ b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c @@ -370,7 +370,7 @@ static void restart(struct net_device *dev) /* adjust to speed (for RMII mode) */ if (fpi->use_rmii) { - if (fep->phydev->speed == 100) + if (dev->phydev->speed == 100) C8(fcccp, fcc_gfemr, 0x20); else S8(fcccp, fcc_gfemr, 0x20); @@ -396,7 +396,7 @@ static void restart(struct net_device *dev) S32(fccp, fcc_fpsmr, FCC_PSMR_RMII); /* adjust to duplex mode */ - if (fep->phydev->duplex) + if (dev->phydev->duplex) S32(fccp, fcc_fpsmr, FCC_PSMR_FDE | FCC_PSMR_LPB); else C32(fccp, fcc_fpsmr, FCC_PSMR_FDE | FCC_PSMR_LPB); diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c index bade2f8f9b5c..35a318ed3a62 100644 --- a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c +++ b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c @@ -254,7 +254,7 @@ static void restart(struct net_device *dev) int r; u32 addrhi, addrlo; - struct mii_bus *mii = fep->phydev->mdio.bus; + struct mii_bus *mii = dev->phydev->mdio.bus; struct fec_info* fec_inf = mii->priv; r = whack_reset(fep->fec.fecp); @@ -333,7 +333,7 @@ static void restart(struct net_device *dev) /* * adjust to duplex mode */ - if (fep->phydev->duplex) { + if (dev->phydev->duplex) { FC(fecp, r_cntrl, FEC_RCNTRL_DRT); FS(fecp, x_cntrl, FEC_TCNTRL_FDEN); /* FD enable */ } else { @@ -363,7 +363,7 @@ static void stop(struct net_device *dev) const struct fs_platform_info *fpi = fep->fpi; struct fec __iomem *fecp = fep->fec.fecp; - struct fec_info *feci = fep->phydev->mdio.bus->priv; + struct fec_info *feci = dev->phydev->mdio.bus->priv; int i; diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-scc.c b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c index 7a184e8816a4..e8b9c33d35b4 100644 --- a/drivers/net/ethernet/freescale/fs_enet/mac-scc.c +++ b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c @@ -352,7 +352,7 @@ static void restart(struct net_device *dev) W16(sccp, scc_psmr, SCC_PSMR_ENCRC | SCC_PSMR_NIB22); /* Set full duplex mode if needed */ - if (fep->phydev->duplex) + if (dev->phydev->duplex) S16(sccp, scc_psmr, SCC_PSMR_LPB | SCC_PSMR_FDE); /* Restore multicast and promiscuous settings */ From 73d9011c1f50ad3f82886d2187799acba1feb236 Mon Sep 17 00:00:00 2001 From: Philippe Reynes Date: Mon, 16 May 2016 16:52:37 +0200 Subject: [PATCH 1631/1649] net: ethernet: fs-enet: use phy_ethtool_{get|set}_link_ksettings There are two generics functions phy_ethtool_{get|set}_link_ksettings, so we can use them instead of defining the same code in the driver. Signed-off-by: Philippe Reynes Signed-off-by: David S. Miller --- .../ethernet/freescale/fs_enet/fs_enet-main.c | 26 ++----------------- 1 file changed, 2 insertions(+), 24 deletions(-) diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c index dc3700ca48d2..61fd486c50bb 100644 --- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c +++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c @@ -844,28 +844,6 @@ static void fs_get_regs(struct net_device *dev, struct ethtool_regs *regs, regs->version = 0; } -static int fs_get_ksettings(struct net_device *dev, - struct ethtool_link_ksettings *cmd) -{ - struct phy_device *phydev = dev->phydev; - - if (!phydev) - return -ENODEV; - - return phy_ethtool_ksettings_get(phydev, cmd); -} - -static int fs_set_ksettings(struct net_device *dev, - const struct ethtool_link_ksettings *cmd) -{ - struct phy_device *phydev = dev->phydev; - - if (!phydev) - return -ENODEV; - - return phy_ethtool_ksettings_set(phydev, cmd); -} - static int fs_nway_reset(struct net_device *dev) { return 0; @@ -892,8 +870,8 @@ static const struct ethtool_ops fs_ethtool_ops = { .set_msglevel = fs_set_msglevel, .get_regs = fs_get_regs, .get_ts_info = ethtool_op_get_ts_info, - .get_link_ksettings = fs_get_ksettings, - .set_link_ksettings = fs_set_ksettings, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, }; static int fs_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) From a8df35d45800c2af2b9bac04a8f9d4e426862e4d Mon Sep 17 00:00:00 2001 From: Ezequiel Garcia Date: Mon, 16 May 2016 12:41:07 -0300 Subject: [PATCH 1632/1649] stmmac: hardware TX COE doesn't work when force_thresh_dma_mode is set Commit f748be531d70 ("stmmac: support new GMAC4") reverted a previous fix by mistake. This commit re-applies said fix: commit dec2165ff38a99f937fe61875d102c6c8596c815 Author: Sonic Zhang Date: Thu Jan 22 14:55:57 2015 +0800 stmmac: hardware TX COE doesn't work when force_thresh_dma_mode is set Clear the TX COE bit when force_thresh_dma_mode is set even hardware dma capability says support. Tested on BF609. Signed-off-by: Sonic Zhang Acked-by: Giuseppe Cavallaro Signed-off-by: David S. Miller Tested on LPC4350 Hitex board. Fixes: f748be531d70 ("stmmac: support new GMAC4") Signed-off-by: Ezequiel Garcia Tested-by: Joachim Eastwood Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index fd5ab7bfdb76..eac45d0c75e2 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -3131,7 +3131,12 @@ static int stmmac_hw_init(struct stmmac_priv *priv) priv->plat->enh_desc = priv->dma_cap.enh_desc; priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up; - priv->plat->tx_coe = priv->dma_cap.tx_coe; + /* TXCOE doesn't work in thresh DMA mode */ + if (priv->plat->force_thresh_dma_mode) + priv->plat->tx_coe = 0; + else + priv->plat->tx_coe = priv->dma_cap.tx_coe; + /* In case of GMAC4 rx_coe is from HW cap register. */ priv->plat->rx_coe = priv->dma_cap.rx_coe; From 9295c034726e025395e6eff3013fa9e3753bcb39 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Mon, 16 May 2016 23:06:53 +0200 Subject: [PATCH 1633/1649] bpf, doc: fix typo on bpf_asm descriptions Fix description of some of the bpf_asm tool related jump instructions and generally move them to format A k. Reported-by: Sebastian Amend Signed-off-by: Daniel Borkmann Acked-by: Alexei Starovoitov Signed-off-by: David S. Miller --- Documentation/networking/filter.txt | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/Documentation/networking/filter.txt b/Documentation/networking/filter.txt index 6aef0b5f3bc7..b9a4edf21ade 100644 --- a/Documentation/networking/filter.txt +++ b/Documentation/networking/filter.txt @@ -216,14 +216,14 @@ opcodes as defined in linux/filter.h stand for: jmp 6 Jump to label ja 6 Jump to label - jeq 7, 8 Jump on k == A - jneq 8 Jump on k != A - jne 8 Jump on k != A - jlt 8 Jump on k < A - jle 8 Jump on k <= A - jgt 7, 8 Jump on k > A - jge 7, 8 Jump on k >= A - jset 7, 8 Jump on k & A + jeq 7, 8 Jump on A == k + jneq 8 Jump on A != k + jne 8 Jump on A != k + jlt 8 Jump on A < k + jle 8 Jump on A <= k + jgt 7, 8 Jump on A > k + jge 7, 8 Jump on A >= k + jset 7, 8 Jump on A & k add 0, 4 A + sub 0, 4 A - From a54d20f85ab12fab4873d2947b2be965430bf609 Mon Sep 17 00:00:00 2001 From: Philippe Reynes Date: Tue, 17 May 2016 00:32:33 +0200 Subject: [PATCH 1634/1649] net: ethernet: fec-mpc52xx: use phydev from struct net_device The private structure contain a pointer to phydev, but the structure net_device already contain such pointer. So we can remove the pointer phydev in the private structure, and update the driver to use the one contained in struct net_device. Signed-off-by: Philippe Reynes Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/fec_mpc52xx.c | 43 +++++++++----------- 1 file changed, 20 insertions(+), 23 deletions(-) diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c index f44471485d00..bcf0600d7cd8 100644 --- a/drivers/net/ethernet/freescale/fec_mpc52xx.c +++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c @@ -66,7 +66,6 @@ struct mpc52xx_fec_priv { /* MDIO link details */ unsigned int mdio_speed; struct device_node *phy_node; - struct phy_device *phydev; enum phy_state link; int seven_wire_mode; }; @@ -165,7 +164,7 @@ static int mpc52xx_fec_alloc_rx_buffers(struct net_device *dev, struct bcom_task static void mpc52xx_fec_adjust_link(struct net_device *dev) { struct mpc52xx_fec_priv *priv = netdev_priv(dev); - struct phy_device *phydev = priv->phydev; + struct phy_device *phydev = dev->phydev; int new_state = 0; if (phydev->link != PHY_DOWN) { @@ -215,16 +214,17 @@ static void mpc52xx_fec_adjust_link(struct net_device *dev) static int mpc52xx_fec_open(struct net_device *dev) { struct mpc52xx_fec_priv *priv = netdev_priv(dev); + struct phy_device *phydev = NULL; int err = -EBUSY; if (priv->phy_node) { - priv->phydev = of_phy_connect(priv->ndev, priv->phy_node, - mpc52xx_fec_adjust_link, 0, 0); - if (!priv->phydev) { + phydev = of_phy_connect(priv->ndev, priv->phy_node, + mpc52xx_fec_adjust_link, 0, 0); + if (!phydev) { dev_err(&dev->dev, "of_phy_connect failed\n"); return -ENODEV; } - phy_start(priv->phydev); + phy_start(phydev); } if (request_irq(dev->irq, mpc52xx_fec_interrupt, IRQF_SHARED, @@ -268,10 +268,9 @@ static int mpc52xx_fec_open(struct net_device *dev) free_ctrl_irq: free_irq(dev->irq, dev); free_phy: - if (priv->phydev) { - phy_stop(priv->phydev); - phy_disconnect(priv->phydev); - priv->phydev = NULL; + if (phydev) { + phy_stop(phydev); + phy_disconnect(phydev); } return err; @@ -280,6 +279,7 @@ static int mpc52xx_fec_open(struct net_device *dev) static int mpc52xx_fec_close(struct net_device *dev) { struct mpc52xx_fec_priv *priv = netdev_priv(dev); + struct phy_device *phydev = dev->phydev; netif_stop_queue(dev); @@ -291,11 +291,10 @@ static int mpc52xx_fec_close(struct net_device *dev) free_irq(priv->r_irq, dev); free_irq(priv->t_irq, dev); - if (priv->phydev) { + if (phydev) { /* power down phy */ - phy_stop(priv->phydev); - phy_disconnect(priv->phydev); - priv->phydev = NULL; + phy_stop(phydev); + phy_disconnect(phydev); } return 0; @@ -766,10 +765,9 @@ static void mpc52xx_fec_reset(struct net_device *dev) static int mpc52xx_fec_get_ksettings(struct net_device *dev, struct ethtool_link_ksettings *cmd) { - struct mpc52xx_fec_priv *priv = netdev_priv(dev); - struct phy_device *phydev = priv->phydev; + struct phy_device *phydev = dev->phydev; - if (!priv->phydev) + if (!phydev) return -ENODEV; return phy_ethtool_ksettings_get(phydev, cmd); @@ -778,10 +776,9 @@ static int mpc52xx_fec_get_ksettings(struct net_device *dev, static int mpc52xx_fec_set_ksettings(struct net_device *dev, const struct ethtool_link_ksettings *cmd) { - struct mpc52xx_fec_priv *priv = netdev_priv(dev); - struct phy_device *phydev = priv->phydev; + struct phy_device *phydev = dev->phydev; - if (!priv->phydev) + if (!phydev) return -ENODEV; return phy_ethtool_ksettings_set(phydev, cmd); @@ -811,12 +808,12 @@ static const struct ethtool_ops mpc52xx_fec_ethtool_ops = { static int mpc52xx_fec_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { - struct mpc52xx_fec_priv *priv = netdev_priv(dev); + struct phy_device *phydev = dev->phydev; - if (!priv->phydev) + if (!phydev) return -ENOTSUPP; - return phy_mii_ioctl(priv->phydev, rq, cmd); + return phy_mii_ioctl(phydev, rq, cmd); } static const struct net_device_ops mpc52xx_fec_netdev_ops = { From b1725423732dd7b58856adcc03cbf1145fcea491 Mon Sep 17 00:00:00 2001 From: Philippe Reynes Date: Tue, 17 May 2016 00:32:34 +0200 Subject: [PATCH 1635/1649] net: ethernet: fec-mpc52xx: use phy_ethtool_{get|set}_link_ksettings There are two generics functions phy_ethtool_{get|set}_link_ksettings, so we can use them instead of defining the same code in the driver. Signed-off-by: Philippe Reynes Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/fec_mpc52xx.c | 26 ++------------------ 1 file changed, 2 insertions(+), 24 deletions(-) diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c index bcf0600d7cd8..446ae9d60c71 100644 --- a/drivers/net/ethernet/freescale/fec_mpc52xx.c +++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c @@ -762,28 +762,6 @@ static void mpc52xx_fec_reset(struct net_device *dev) /* ethtool interface */ -static int mpc52xx_fec_get_ksettings(struct net_device *dev, - struct ethtool_link_ksettings *cmd) -{ - struct phy_device *phydev = dev->phydev; - - if (!phydev) - return -ENODEV; - - return phy_ethtool_ksettings_get(phydev, cmd); -} - -static int mpc52xx_fec_set_ksettings(struct net_device *dev, - const struct ethtool_link_ksettings *cmd) -{ - struct phy_device *phydev = dev->phydev; - - if (!phydev) - return -ENODEV; - - return phy_ethtool_ksettings_set(phydev, cmd); -} - static u32 mpc52xx_fec_get_msglevel(struct net_device *dev) { struct mpc52xx_fec_priv *priv = netdev_priv(dev); @@ -801,8 +779,8 @@ static const struct ethtool_ops mpc52xx_fec_ethtool_ops = { .get_msglevel = mpc52xx_fec_get_msglevel, .set_msglevel = mpc52xx_fec_set_msglevel, .get_ts_info = ethtool_op_get_ts_info, - .get_link_ksettings = mpc52xx_fec_get_ksettings, - .set_link_ksettings = mpc52xx_fec_set_ksettings, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, }; From f82731b454a953b6ff2aa1f601f65dec52551600 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Tue, 17 May 2016 11:09:20 +0300 Subject: [PATCH 1636/1649] qed: Remove a stray tab This line was indented more than it should be. Signed-off-by: Dan Carpenter Acked-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 56f6bc19cc3e..8b22f87033ce 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -177,7 +177,7 @@ static int qed_init_pci(struct qed_dev *cdev, } if (IS_PF(cdev)) { - cdev->db_phys_addr = pci_resource_start(cdev->pdev, 2); + cdev->db_phys_addr = pci_resource_start(cdev->pdev, 2); cdev->db_size = pci_resource_len(cdev->pdev, 2); cdev->doorbells = ioremap_wc(cdev->db_phys_addr, cdev->db_size); if (!cdev->doorbells) { From ccf928249c92b04ec9527e97a7c6b2cd8cd9dc10 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Tue, 17 May 2016 11:05:34 +0200 Subject: [PATCH 1637/1649] ravb: Add missing free_irq() calls to ravb_close() When reopening the network device on ra7795/salvator-x, e.g. after a DHCP timeout: IP-Config: Reopening network devices... genirq: Flags mismatch irq 139. 00000000 (eth0:ch0:rx_be) vs. 00000000 (ravb e6800000.ethernet eth0: cannot request IRQ eth0:ch0:rx_be IP-Config: Failed to open eth0 IP-Config: No network devices available The "mismatch" is due to requesting an IRQ that is already in use, while IRQF_PROBE_SHARED wasn't set. However, the real cause is that ravb_close() doesn't release any of the R-Car Gen3-specific secondary IRQs. Add the missing free_irq() calls to fix this. Fixes: f51bdc236b6c5835 ("ravb: Add dma queue interrupt support") Signed-off-by: Geert Uytterhoeven Acked-by: Sergei Shtylyov Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/ravb_main.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index bcebafd78023..867caf6e7a5a 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -1667,8 +1667,13 @@ static int ravb_close(struct net_device *ndev) priv->phydev = NULL; } - if (priv->chip_id == RCAR_GEN3) + if (priv->chip_id != RCAR_GEN2) { + free_irq(priv->tx_irqs[RAVB_NC], ndev); + free_irq(priv->rx_irqs[RAVB_NC], ndev); + free_irq(priv->tx_irqs[RAVB_BE], ndev); + free_irq(priv->rx_irqs[RAVB_BE], ndev); free_irq(priv->emac_irq, ndev); + } free_irq(ndev->irq, ndev); napi_disable(&priv->napi[RAVB_NC]); From 39651abd28146fff2bfac63d68a7a56250a4aead Mon Sep 17 00:00:00 2001 From: Sudarsana Reddy Kalluru Date: Tue, 17 May 2016 06:44:26 -0400 Subject: [PATCH 1638/1649] qed: add support for dcbx. This patch adds the necessary driver support for Management Firmware to configure the device/firmware with the dcbx results. Management Firmware is responsible for communicating the DCBX and driving the negotiation, but the driver has responsibility of receiving async notification and configuring the results in hw/fw. This patch also adds the dcbx support for future protocols (e.g., FCoE) as preparation to their imminent submission. Signed-off-by: Sudarsana Reddy Kalluru Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/Makefile | 2 +- drivers/net/ethernet/qlogic/qed/qed.h | 2 + drivers/net/ethernet/qlogic/qed/qed_cxt.h | 10 + drivers/net/ethernet/qlogic/qed/qed_dcbx.c | 562 ++++++++++++++++++ drivers/net/ethernet/qlogic/qed/qed_dcbx.h | 80 +++ drivers/net/ethernet/qlogic/qed/qed_dev.c | 105 +++- drivers/net/ethernet/qlogic/qed/qed_hsi.h | 21 +- drivers/net/ethernet/qlogic/qed/qed_mcp.c | 13 + drivers/net/ethernet/qlogic/qed/qed_sp.h | 13 + .../net/ethernet/qlogic/qed/qed_sp_commands.c | 25 + include/linux/qed/qed_if.h | 9 + 11 files changed, 834 insertions(+), 8 deletions(-) create mode 100644 drivers/net/ethernet/qlogic/qed/qed_dcbx.c create mode 100644 drivers/net/ethernet/qlogic/qed/qed_dcbx.h diff --git a/drivers/net/ethernet/qlogic/qed/Makefile b/drivers/net/ethernet/qlogic/qed/Makefile index a44874562cfd..d1f157e439cf 100644 --- a/drivers/net/ethernet/qlogic/qed/Makefile +++ b/drivers/net/ethernet/qlogic/qed/Makefile @@ -2,5 +2,5 @@ obj-$(CONFIG_QED) := qed.o qed-y := qed_cxt.o qed_dev.o qed_hw.o qed_init_fw_funcs.o qed_init_ops.o \ qed_int.o qed_main.o qed_mcp.o qed_sp_commands.o qed_spq.o qed_l2.o \ - qed_selftest.o + qed_selftest.o qed_dcbx.o qed-$(CONFIG_QED_SRIOV) += qed_sriov.o qed_vf.o diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index 77323fc70927..1042f2af854a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h @@ -367,6 +367,8 @@ struct qed_hwfn { struct qed_pf_iov *pf_iov_info; struct qed_mcp_info *mcp_info; + struct qed_dcbx_info *p_dcbx_info; + struct qed_hw_cid_data *p_tx_cids; struct qed_hw_cid_data *p_rx_cids; diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.h b/drivers/net/ethernet/qlogic/qed/qed_cxt.h index 078ff3fd7920..234c0fa8db2a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.h +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.h @@ -130,6 +130,16 @@ void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn); void qed_qm_init_pf(struct qed_hwfn *p_hwfn); +/** + * @brief Reconfigures QM pf on the fly + * + * @param p_hwfn + * @param p_ptt + * + * @return int + */ +int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); + /** * @brief qed_cxt_release - Release a cid * diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c new file mode 100644 index 000000000000..cbf58e1f9333 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c @@ -0,0 +1,562 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015 QLogic Corporation + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ + +#include +#include +#include +#include +#include +#include +#include +#include "qed.h" +#include "qed_cxt.h" +#include "qed_dcbx.h" +#include "qed_hsi.h" +#include "qed_sp.h" + +#define QED_DCBX_MAX_MIB_READ_TRY (100) +#define QED_ETH_TYPE_DEFAULT (0) +#define QED_ETH_TYPE_ROCE (0x8915) +#define QED_UDP_PORT_TYPE_ROCE_V2 (0x12B7) +#define QED_ETH_TYPE_FCOE (0x8906) +#define QED_TCP_PORT_ISCSI (0xCBC) + +#define QED_DCBX_INVALID_PRIORITY 0xFF + +/* Get Traffic Class from priority traffic class table, 4 bits represent + * the traffic class corresponding to the priority. + */ +#define QED_DCBX_PRIO2TC(prio_tc_tbl, prio) \ + ((u32)(prio_tc_tbl >> ((7 - prio) * 4)) & 0x7) + +static const struct qed_dcbx_app_metadata qed_dcbx_app_update[] = { + {DCBX_PROTOCOL_ISCSI, "ISCSI", QED_PCI_DEFAULT}, + {DCBX_PROTOCOL_FCOE, "FCOE", QED_PCI_DEFAULT}, + {DCBX_PROTOCOL_ROCE, "ROCE", QED_PCI_DEFAULT}, + {DCBX_PROTOCOL_ROCE_V2, "ROCE_V2", QED_PCI_DEFAULT}, + {DCBX_PROTOCOL_ETH, "ETH", QED_PCI_ETH} +}; + +static bool qed_dcbx_app_ethtype(u32 app_info_bitmap) +{ + return !!(QED_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF) == + DCBX_APP_SF_ETHTYPE); +} + +static bool qed_dcbx_app_port(u32 app_info_bitmap) +{ + return !!(QED_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF) == + DCBX_APP_SF_PORT); +} + +static bool qed_dcbx_default_tlv(u32 app_info_bitmap, u16 proto_id) +{ + return !!(qed_dcbx_app_ethtype(app_info_bitmap) && + proto_id == QED_ETH_TYPE_DEFAULT); +} + +static bool qed_dcbx_iscsi_tlv(u32 app_info_bitmap, u16 proto_id) +{ + return !!(qed_dcbx_app_port(app_info_bitmap) && + proto_id == QED_TCP_PORT_ISCSI); +} + +static bool qed_dcbx_fcoe_tlv(u32 app_info_bitmap, u16 proto_id) +{ + return !!(qed_dcbx_app_ethtype(app_info_bitmap) && + proto_id == QED_ETH_TYPE_FCOE); +} + +static bool qed_dcbx_roce_tlv(u32 app_info_bitmap, u16 proto_id) +{ + return !!(qed_dcbx_app_ethtype(app_info_bitmap) && + proto_id == QED_ETH_TYPE_ROCE); +} + +static bool qed_dcbx_roce_v2_tlv(u32 app_info_bitmap, u16 proto_id) +{ + return !!(qed_dcbx_app_port(app_info_bitmap) && + proto_id == QED_UDP_PORT_TYPE_ROCE_V2); +} + +static void +qed_dcbx_dp_protocol(struct qed_hwfn *p_hwfn, struct qed_dcbx_results *p_data) +{ + enum dcbx_protocol_type id; + int i; + + DP_VERBOSE(p_hwfn, QED_MSG_DCB, "DCBX negotiated: %d\n", + p_data->dcbx_enabled); + + for (i = 0; i < ARRAY_SIZE(qed_dcbx_app_update); i++) { + id = qed_dcbx_app_update[i].id; + + DP_VERBOSE(p_hwfn, QED_MSG_DCB, + "%s info: update %d, enable %d, prio %d, tc %d, num_tc %d\n", + qed_dcbx_app_update[i].name, p_data->arr[id].update, + p_data->arr[id].enable, p_data->arr[id].priority, + p_data->arr[id].tc, p_hwfn->hw_info.num_tc); + } +} + +static void +qed_dcbx_set_params(struct qed_dcbx_results *p_data, + struct qed_hw_info *p_info, + bool enable, + bool update, + u8 prio, + u8 tc, + enum dcbx_protocol_type type, + enum qed_pci_personality personality) +{ + /* PF update ramrod data */ + p_data->arr[type].update = update; + p_data->arr[type].enable = enable; + p_data->arr[type].priority = prio; + p_data->arr[type].tc = tc; + + /* QM reconf data */ + if (p_info->personality == personality) { + if (personality == QED_PCI_ETH) + p_info->non_offload_tc = tc; + else + p_info->offload_tc = tc; + } +} + +/* Update app protocol data and hw_info fields with the TLV info */ +static void +qed_dcbx_update_app_info(struct qed_dcbx_results *p_data, + struct qed_hwfn *p_hwfn, + bool enable, + bool update, + u8 prio, u8 tc, enum dcbx_protocol_type type) +{ + struct qed_hw_info *p_info = &p_hwfn->hw_info; + enum qed_pci_personality personality; + enum dcbx_protocol_type id; + char *name; + int i; + + for (i = 0; i < ARRAY_SIZE(qed_dcbx_app_update); i++) { + id = qed_dcbx_app_update[i].id; + + if (type != id) + continue; + + personality = qed_dcbx_app_update[i].personality; + name = qed_dcbx_app_update[i].name; + + qed_dcbx_set_params(p_data, p_info, enable, update, + prio, tc, type, personality); + } +} + +static bool +qed_dcbx_get_app_protocol_type(struct qed_hwfn *p_hwfn, + u32 app_prio_bitmap, + u16 id, enum dcbx_protocol_type *type) +{ + if (qed_dcbx_fcoe_tlv(app_prio_bitmap, id)) { + *type = DCBX_PROTOCOL_FCOE; + } else if (qed_dcbx_roce_tlv(app_prio_bitmap, id)) { + *type = DCBX_PROTOCOL_ROCE; + } else if (qed_dcbx_iscsi_tlv(app_prio_bitmap, id)) { + *type = DCBX_PROTOCOL_ISCSI; + } else if (qed_dcbx_default_tlv(app_prio_bitmap, id)) { + *type = DCBX_PROTOCOL_ETH; + } else if (qed_dcbx_roce_v2_tlv(app_prio_bitmap, id)) { + *type = DCBX_PROTOCOL_ROCE_V2; + } else { + *type = DCBX_MAX_PROTOCOL_TYPE; + DP_ERR(p_hwfn, + "No action required, App TLV id = 0x%x app_prio_bitmap = 0x%x\n", + id, app_prio_bitmap); + return false; + } + + return true; +} + +/* Parse app TLV's to update TC information in hw_info structure for + * reconfiguring QM. Get protocol specific data for PF update ramrod command. + */ +static int +qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, + struct qed_dcbx_results *p_data, + struct dcbx_app_priority_entry *p_tbl, + u32 pri_tc_tbl, int count, bool dcbx_enabled) +{ + u8 tc, priority, priority_map; + enum dcbx_protocol_type type; + u16 protocol_id; + bool enable; + int i; + + DP_VERBOSE(p_hwfn, QED_MSG_DCB, "Num APP entries = %d\n", count); + + /* Parse APP TLV */ + for (i = 0; i < count; i++) { + protocol_id = QED_MFW_GET_FIELD(p_tbl[i].entry, + DCBX_APP_PROTOCOL_ID); + priority_map = QED_MFW_GET_FIELD(p_tbl[i].entry, + DCBX_APP_PRI_MAP); + priority = ffs(priority_map) - 1; + if (priority < 0) { + DP_ERR(p_hwfn, "Invalid priority\n"); + return -EINVAL; + } + + tc = QED_DCBX_PRIO2TC(pri_tc_tbl, priority); + if (qed_dcbx_get_app_protocol_type(p_hwfn, p_tbl[i].entry, + protocol_id, &type)) { + /* ETH always have the enable bit reset, as it gets + * vlan information per packet. For other protocols, + * should be set according to the dcbx_enabled + * indication, but we only got here if there was an + * app tlv for the protocol, so dcbx must be enabled. + */ + enable = !!(type == DCBX_PROTOCOL_ETH); + + qed_dcbx_update_app_info(p_data, p_hwfn, enable, true, + priority, tc, type); + } + } + + /* If RoCE-V2 TLV is not detected, driver need to use RoCE app + * data for RoCE-v2 not the default app data. + */ + if (!p_data->arr[DCBX_PROTOCOL_ROCE_V2].update && + p_data->arr[DCBX_PROTOCOL_ROCE].update) { + tc = p_data->arr[DCBX_PROTOCOL_ROCE].tc; + priority = p_data->arr[DCBX_PROTOCOL_ROCE].priority; + qed_dcbx_update_app_info(p_data, p_hwfn, true, true, + priority, tc, DCBX_PROTOCOL_ROCE_V2); + } + + /* Update ramrod protocol data and hw_info fields + * with default info when corresponding APP TLV's are not detected. + * The enabled field has a different logic for ethernet as only for + * ethernet dcb should disabled by default, as the information arrives + * from the OS (unless an explicit app tlv was present). + */ + tc = p_data->arr[DCBX_PROTOCOL_ETH].tc; + priority = p_data->arr[DCBX_PROTOCOL_ETH].priority; + for (type = 0; type < DCBX_MAX_PROTOCOL_TYPE; type++) { + if (p_data->arr[type].update) + continue; + + enable = (type == DCBX_PROTOCOL_ETH) ? false : dcbx_enabled; + qed_dcbx_update_app_info(p_data, p_hwfn, enable, true, + priority, tc, type); + } + + return 0; +} + +/* Parse app TLV's to update TC information in hw_info structure for + * reconfiguring QM. Get protocol specific data for PF update ramrod command. + */ +static int qed_dcbx_process_mib_info(struct qed_hwfn *p_hwfn) +{ + struct dcbx_app_priority_feature *p_app; + struct dcbx_app_priority_entry *p_tbl; + struct qed_dcbx_results data = { 0 }; + struct dcbx_ets_feature *p_ets; + struct qed_hw_info *p_info; + u32 pri_tc_tbl, flags; + bool dcbx_enabled; + int num_entries; + int rc = 0; + + /* If DCBx version is non zero, then negotiation was + * successfuly performed + */ + flags = p_hwfn->p_dcbx_info->operational.flags; + dcbx_enabled = !!QED_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION); + + p_app = &p_hwfn->p_dcbx_info->operational.features.app; + p_tbl = p_app->app_pri_tbl; + + p_ets = &p_hwfn->p_dcbx_info->operational.features.ets; + pri_tc_tbl = p_ets->pri_tc_tbl[0]; + + p_info = &p_hwfn->hw_info; + num_entries = QED_MFW_GET_FIELD(p_app->flags, DCBX_APP_NUM_ENTRIES); + + rc = qed_dcbx_process_tlv(p_hwfn, &data, p_tbl, pri_tc_tbl, + num_entries, dcbx_enabled); + if (rc) + return rc; + + p_info->num_tc = QED_MFW_GET_FIELD(p_ets->flags, DCBX_ETS_MAX_TCS); + data.pf_id = p_hwfn->rel_pf_id; + data.dcbx_enabled = dcbx_enabled; + + qed_dcbx_dp_protocol(p_hwfn, &data); + + memcpy(&p_hwfn->p_dcbx_info->results, &data, + sizeof(struct qed_dcbx_results)); + + return 0; +} + +static int +qed_dcbx_copy_mib(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_dcbx_mib_meta_data *p_data, + enum qed_mib_read_type type) +{ + u32 prefix_seq_num, suffix_seq_num; + int read_count = 0; + int rc = 0; + + /* The data is considered to be valid only if both sequence numbers are + * the same. + */ + do { + if (type == QED_DCBX_REMOTE_LLDP_MIB) { + qed_memcpy_from(p_hwfn, p_ptt, p_data->lldp_remote, + p_data->addr, p_data->size); + prefix_seq_num = p_data->lldp_remote->prefix_seq_num; + suffix_seq_num = p_data->lldp_remote->suffix_seq_num; + } else { + qed_memcpy_from(p_hwfn, p_ptt, p_data->mib, + p_data->addr, p_data->size); + prefix_seq_num = p_data->mib->prefix_seq_num; + suffix_seq_num = p_data->mib->suffix_seq_num; + } + read_count++; + + DP_VERBOSE(p_hwfn, + QED_MSG_DCB, + "mib type = %d, try count = %d prefix seq num = %d suffix seq num = %d\n", + type, read_count, prefix_seq_num, suffix_seq_num); + } while ((prefix_seq_num != suffix_seq_num) && + (read_count < QED_DCBX_MAX_MIB_READ_TRY)); + + if (read_count >= QED_DCBX_MAX_MIB_READ_TRY) { + DP_ERR(p_hwfn, + "MIB read err, mib type = %d, try count = %d prefix seq num = %d suffix seq num = %d\n", + type, read_count, prefix_seq_num, suffix_seq_num); + rc = -EIO; + } + + return rc; +} + +static int +qed_dcbx_read_local_lldp_mib(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + struct qed_dcbx_mib_meta_data data; + int rc = 0; + + memset(&data, 0, sizeof(data)); + data.addr = p_hwfn->mcp_info->port_addr + offsetof(struct public_port, + lldp_config_params); + data.lldp_local = p_hwfn->p_dcbx_info->lldp_local; + data.size = sizeof(struct lldp_config_params_s); + qed_memcpy_from(p_hwfn, p_ptt, data.lldp_local, data.addr, data.size); + + return rc; +} + +static int +qed_dcbx_read_remote_lldp_mib(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + enum qed_mib_read_type type) +{ + struct qed_dcbx_mib_meta_data data; + int rc = 0; + + memset(&data, 0, sizeof(data)); + data.addr = p_hwfn->mcp_info->port_addr + offsetof(struct public_port, + lldp_status_params); + data.lldp_remote = p_hwfn->p_dcbx_info->lldp_remote; + data.size = sizeof(struct lldp_status_params_s); + rc = qed_dcbx_copy_mib(p_hwfn, p_ptt, &data, type); + + return rc; +} + +static int +qed_dcbx_read_operational_mib(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + enum qed_mib_read_type type) +{ + struct qed_dcbx_mib_meta_data data; + int rc = 0; + + memset(&data, 0, sizeof(data)); + data.addr = p_hwfn->mcp_info->port_addr + + offsetof(struct public_port, operational_dcbx_mib); + data.mib = &p_hwfn->p_dcbx_info->operational; + data.size = sizeof(struct dcbx_mib); + rc = qed_dcbx_copy_mib(p_hwfn, p_ptt, &data, type); + + return rc; +} + +static int +qed_dcbx_read_remote_mib(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, enum qed_mib_read_type type) +{ + struct qed_dcbx_mib_meta_data data; + int rc = 0; + + memset(&data, 0, sizeof(data)); + data.addr = p_hwfn->mcp_info->port_addr + + offsetof(struct public_port, remote_dcbx_mib); + data.mib = &p_hwfn->p_dcbx_info->remote; + data.size = sizeof(struct dcbx_mib); + rc = qed_dcbx_copy_mib(p_hwfn, p_ptt, &data, type); + + return rc; +} + +static int +qed_dcbx_read_local_mib(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + struct qed_dcbx_mib_meta_data data; + int rc = 0; + + memset(&data, 0, sizeof(data)); + data.addr = p_hwfn->mcp_info->port_addr + + offsetof(struct public_port, local_admin_dcbx_mib); + data.local_admin = &p_hwfn->p_dcbx_info->local_admin; + data.size = sizeof(struct dcbx_local_params); + qed_memcpy_from(p_hwfn, p_ptt, data.local_admin, data.addr, data.size); + + return rc; +} + +static int qed_dcbx_read_mib(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, enum qed_mib_read_type type) +{ + int rc = -EINVAL; + + switch (type) { + case QED_DCBX_OPERATIONAL_MIB: + rc = qed_dcbx_read_operational_mib(p_hwfn, p_ptt, type); + break; + case QED_DCBX_REMOTE_MIB: + rc = qed_dcbx_read_remote_mib(p_hwfn, p_ptt, type); + break; + case QED_DCBX_LOCAL_MIB: + rc = qed_dcbx_read_local_mib(p_hwfn, p_ptt); + break; + case QED_DCBX_REMOTE_LLDP_MIB: + rc = qed_dcbx_read_remote_lldp_mib(p_hwfn, p_ptt, type); + break; + case QED_DCBX_LOCAL_LLDP_MIB: + rc = qed_dcbx_read_local_lldp_mib(p_hwfn, p_ptt); + break; + default: + DP_ERR(p_hwfn, "MIB read err, unknown mib type %d\n", type); + } + + return rc; +} + +/* Read updated MIB. + * Reconfigure QM and invoke PF update ramrod command if operational MIB + * change is detected. + */ +int +qed_dcbx_mib_update_event(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, enum qed_mib_read_type type) +{ + int rc = 0; + + rc = qed_dcbx_read_mib(p_hwfn, p_ptt, type); + if (rc) + return rc; + + if (type == QED_DCBX_OPERATIONAL_MIB) { + rc = qed_dcbx_process_mib_info(p_hwfn); + if (!rc) { + /* reconfigure tcs of QM queues according + * to negotiation results + */ + qed_qm_reconf(p_hwfn, p_ptt); + + /* update storm FW with negotiation results */ + qed_sp_pf_update(p_hwfn); + } + } + + return rc; +} + +int qed_dcbx_info_alloc(struct qed_hwfn *p_hwfn) +{ + int rc = 0; + + p_hwfn->p_dcbx_info = kzalloc(sizeof(*p_hwfn->p_dcbx_info), GFP_KERNEL); + if (!p_hwfn->p_dcbx_info) { + DP_NOTICE(p_hwfn, + "Failed to allocate 'struct qed_dcbx_info'\n"); + rc = -ENOMEM; + } + + return rc; +} + +void qed_dcbx_info_free(struct qed_hwfn *p_hwfn, + struct qed_dcbx_info *p_dcbx_info) +{ + kfree(p_hwfn->p_dcbx_info); +} + +static void qed_dcbx_update_protocol_data(struct protocol_dcb_data *p_data, + struct qed_dcbx_results *p_src, + enum dcbx_protocol_type type) +{ + p_data->dcb_enable_flag = p_src->arr[type].enable; + p_data->dcb_priority = p_src->arr[type].priority; + p_data->dcb_tc = p_src->arr[type].tc; +} + +/* Set pf update ramrod command params */ +void qed_dcbx_set_pf_update_params(struct qed_dcbx_results *p_src, + struct pf_update_ramrod_data *p_dest) +{ + struct protocol_dcb_data *p_dcb_data; + bool update_flag = false; + + p_dest->pf_id = p_src->pf_id; + + update_flag = p_src->arr[DCBX_PROTOCOL_FCOE].update; + p_dest->update_fcoe_dcb_data_flag = update_flag; + + update_flag = p_src->arr[DCBX_PROTOCOL_ROCE].update; + p_dest->update_roce_dcb_data_flag = update_flag; + update_flag = p_src->arr[DCBX_PROTOCOL_ROCE_V2].update; + p_dest->update_roce_dcb_data_flag = update_flag; + + update_flag = p_src->arr[DCBX_PROTOCOL_ISCSI].update; + p_dest->update_iscsi_dcb_data_flag = update_flag; + update_flag = p_src->arr[DCBX_PROTOCOL_ETH].update; + p_dest->update_eth_dcb_data_flag = update_flag; + + p_dcb_data = &p_dest->fcoe_dcb_data; + qed_dcbx_update_protocol_data(p_dcb_data, p_src, DCBX_PROTOCOL_FCOE); + p_dcb_data = &p_dest->roce_dcb_data; + + if (p_src->arr[DCBX_PROTOCOL_ROCE].update) + qed_dcbx_update_protocol_data(p_dcb_data, p_src, + DCBX_PROTOCOL_ROCE); + if (p_src->arr[DCBX_PROTOCOL_ROCE_V2].update) + qed_dcbx_update_protocol_data(p_dcb_data, p_src, + DCBX_PROTOCOL_ROCE_V2); + + p_dcb_data = &p_dest->iscsi_dcb_data; + qed_dcbx_update_protocol_data(p_dcb_data, p_src, DCBX_PROTOCOL_ISCSI); + p_dcb_data = &p_dest->eth_dcb_data; + qed_dcbx_update_protocol_data(p_dcb_data, p_src, DCBX_PROTOCOL_ETH); +} diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.h b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h new file mode 100644 index 000000000000..e7f834dbda2d --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h @@ -0,0 +1,80 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015 QLogic Corporation + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ + +#ifndef _QED_DCBX_H +#define _QED_DCBX_H +#include +#include +#include "qed.h" +#include "qed_hsi.h" +#include "qed_hw.h" +#include "qed_mcp.h" +#include "qed_reg_addr.h" + +#define DCBX_CONFIG_MAX_APP_PROTOCOL 4 + +enum qed_mib_read_type { + QED_DCBX_OPERATIONAL_MIB, + QED_DCBX_REMOTE_MIB, + QED_DCBX_LOCAL_MIB, + QED_DCBX_REMOTE_LLDP_MIB, + QED_DCBX_LOCAL_LLDP_MIB +}; + +struct qed_dcbx_app_data { + bool enable; /* DCB enabled */ + bool update; /* Update indication */ + u8 priority; /* Priority */ + u8 tc; /* Traffic Class */ +}; + +struct qed_dcbx_results { + bool dcbx_enabled; + u8 pf_id; + struct qed_dcbx_app_data arr[DCBX_MAX_PROTOCOL_TYPE]; +}; + +struct qed_dcbx_app_metadata { + enum dcbx_protocol_type id; + char *name; + enum qed_pci_personality personality; +}; + +#define QED_MFW_GET_FIELD(name, field) \ + (((name) & (field ## _MASK)) >> (field ## _SHIFT)) + +struct qed_dcbx_info { + struct lldp_status_params_s lldp_remote[LLDP_MAX_LLDP_AGENTS]; + struct lldp_config_params_s lldp_local[LLDP_MAX_LLDP_AGENTS]; + struct dcbx_local_params local_admin; + struct qed_dcbx_results results; + struct dcbx_mib operational; + struct dcbx_mib remote; + u8 dcbx_cap; +}; + +struct qed_dcbx_mib_meta_data { + struct lldp_config_params_s *lldp_local; + struct lldp_status_params_s *lldp_remote; + struct dcbx_local_params *local_admin; + struct dcbx_mib *mib; + size_t size; + u32 addr; +}; + +/* QED local interface routines */ +int +qed_dcbx_mib_update_event(struct qed_hwfn *, + struct qed_ptt *, enum qed_mib_read_type); + +int qed_dcbx_info_alloc(struct qed_hwfn *p_hwfn); +void qed_dcbx_info_free(struct qed_hwfn *, struct qed_dcbx_info *); +void qed_dcbx_set_pf_update_params(struct qed_dcbx_results *p_src, + struct pf_update_ramrod_data *p_dest); + +#endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 6fb6016409c6..089016f46f26 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -22,6 +22,7 @@ #include #include "qed.h" #include "qed_cxt.h" +#include "qed_dcbx.h" #include "qed_dev_api.h" #include "qed_hsi.h" #include "qed_hw.h" @@ -33,6 +34,9 @@ #include "qed_sriov.h" #include "qed_vf.h" +static spinlock_t qm_lock; +static bool qm_lock_init = false; + /* API common to all protocols */ enum BAR_ID { BAR_ID_0, /* used for GRC */ @@ -147,6 +151,7 @@ void qed_resc_free(struct qed_dev *cdev) qed_int_free(p_hwfn); qed_iov_free(p_hwfn); qed_dmae_info_free(p_hwfn); + qed_dcbx_info_free(p_hwfn, p_hwfn->p_dcbx_info); } } @@ -200,13 +205,19 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn) vport_id = (u8)RESC_START(p_hwfn, QED_VPORT); /* First init per-TC PQs */ - for (i = 0; i < multi_cos_tcs; i++, curr_queue++) { + for (i = 0; i < multi_cos_tcs; i++) { struct init_qm_pq_params *params = - &qm_info->qm_pq_params[curr_queue]; + &qm_info->qm_pq_params[curr_queue++]; - params->vport_id = vport_id; - params->tc_id = p_hwfn->hw_info.non_offload_tc; - params->wrr_group = 1; + if (p_hwfn->hw_info.personality == QED_PCI_ETH) { + params->vport_id = vport_id; + params->tc_id = p_hwfn->hw_info.non_offload_tc; + params->wrr_group = 1; + } else { + params->vport_id = vport_id; + params->tc_id = p_hwfn->hw_info.offload_tc; + params->wrr_group = 1; + } } /* Then init pure-LB PQ */ @@ -266,6 +277,63 @@ alloc_err: return -ENOMEM; } +/* This function reconfigures the QM pf on the fly. + * For this purpose we: + * 1. reconfigure the QM database + * 2. set new values to runtime arrat + * 3. send an sdm_qm_cmd through the rbc interface to stop the QM + * 4. activate init tool in QM_PF stage + * 5. send an sdm_qm_cmd through rbc interface to release the QM + */ +int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + struct qed_qm_info *qm_info = &p_hwfn->qm_info; + bool b_rc; + int rc; + + /* qm_info is allocated in qed_init_qm_info() which is already called + * from qed_resc_alloc() or previous call of qed_qm_reconf(). + * The allocated size may change each init, so we free it before next + * allocation. + */ + qed_qm_info_free(p_hwfn); + + /* initialize qed's qm data structure */ + rc = qed_init_qm_info(p_hwfn); + if (rc) + return rc; + + /* stop PF's qm queues */ + spin_lock_bh(&qm_lock); + b_rc = qed_send_qm_stop_cmd(p_hwfn, p_ptt, false, true, + qm_info->start_pq, qm_info->num_pqs); + spin_unlock_bh(&qm_lock); + if (!b_rc) + return -EINVAL; + + /* clear the QM_PF runtime phase leftovers from previous init */ + qed_init_clear_rt_data(p_hwfn); + + /* prepare QM portion of runtime array */ + qed_qm_init_pf(p_hwfn); + + /* activate init tool on runtime array */ + rc = qed_init_run(p_hwfn, p_ptt, PHASE_QM_PF, p_hwfn->rel_pf_id, + p_hwfn->hw_info.hw_mode); + if (rc) + return rc; + + /* start PF's qm queues */ + spin_lock_bh(&qm_lock); + b_rc = qed_send_qm_stop_cmd(p_hwfn, p_ptt, true, true, + qm_info->start_pq, qm_info->num_pqs); + spin_unlock_bh(&qm_lock); + if (!b_rc) + return -EINVAL; + + return 0; +} + int qed_resc_alloc(struct qed_dev *cdev) { struct qed_consq *p_consq; @@ -375,6 +443,14 @@ int qed_resc_alloc(struct qed_dev *cdev) "Failed to allocate memory for dmae_info structure\n"); goto alloc_err; } + + /* DCBX initialization */ + rc = qed_dcbx_info_alloc(p_hwfn); + if (rc) { + DP_NOTICE(p_hwfn, + "Failed to allocate memory for dcbx structure\n"); + goto alloc_err; + } } cdev->reset_stats = kzalloc(sizeof(*cdev->reset_stats), GFP_KERNEL); @@ -780,6 +856,11 @@ int qed_hw_init(struct qed_dev *cdev, p_hwfn->first_on_engine = (load_code == FW_MSG_CODE_DRV_LOAD_ENGINE); + if (!qm_lock_init) { + spin_lock_init(&qm_lock); + qm_lock_init = true; + } + switch (load_code) { case FW_MSG_CODE_DRV_LOAD_ENGINE: rc = qed_hw_init_common(p_hwfn, p_hwfn->p_main_ptt, @@ -821,6 +902,20 @@ int qed_hw_init(struct qed_dev *cdev, return mfw_rc; } + /* send DCBX attention request command */ + DP_VERBOSE(p_hwfn, + QED_MSG_DCB, + "sending phony dcbx set command to trigger DCBx attention handling\n"); + mfw_rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt, + DRV_MSG_CODE_SET_DCBX, + 1 << DRV_MB_PARAM_DCBX_NOTIFY_SHIFT, + &load_code, ¶m); + if (mfw_rc) { + DP_NOTICE(p_hwfn, + "Failed to send DCBX attention request\n"); + return mfw_rc; + } + p_hwfn->hw_init_done = true; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index 82b7727d090b..9afc15fdbb02 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -634,6 +634,14 @@ struct pf_start_ramrod_data { u8 reserved0[4]; }; +/* Data for port update ramrod */ +struct protocol_dcb_data { + u8 dcb_enable_flag; + u8 dcb_priority; + u8 dcb_tc; + u8 reserved; +}; + /* tunnel configuration */ struct pf_update_tunnel_config { u8 update_rx_pf_clss; @@ -656,8 +664,17 @@ struct pf_update_tunnel_config { }; struct pf_update_ramrod_data { - u32 reserved[2]; - u32 reserved_1[6]; + u8 pf_id; + u8 update_eth_dcb_data_flag; + u8 update_fcoe_dcb_data_flag; + u8 update_iscsi_dcb_data_flag; + u8 update_roce_dcb_data_flag; + u8 update_mf_vlan_flag; + __le16 mf_vlan; + struct protocol_dcb_data eth_dcb_data; + struct protocol_dcb_data fcoe_dcb_data; + struct protocol_dcb_data iscsi_dcb_data; + struct protocol_dcb_data roce_dcb_data; struct pf_update_tunnel_config tunnel_config; }; diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index 2be943b91916..1182361798b5 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -15,6 +15,7 @@ #include #include #include "qed.h" +#include "qed_dcbx.h" #include "qed_hsi.h" #include "qed_hw.h" #include "qed_mcp.h" @@ -825,6 +826,18 @@ int qed_mcp_handle_events(struct qed_hwfn *p_hwfn, case MFW_DRV_MSG_VF_DISABLED: qed_mcp_handle_vf_flr(p_hwfn, p_ptt); break; + case MFW_DRV_MSG_LLDP_DATA_UPDATED: + qed_dcbx_mib_update_event(p_hwfn, p_ptt, + QED_DCBX_REMOTE_LLDP_MIB); + break; + case MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED: + qed_dcbx_mib_update_event(p_hwfn, p_ptt, + QED_DCBX_REMOTE_MIB); + break; + case MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED: + qed_dcbx_mib_update_event(p_hwfn, p_ptt, + QED_DCBX_OPERATIONAL_MIB); + break; case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE: qed_mcp_handle_transceiver_change(p_hwfn, p_ptt); break; diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h index ab5549f4e5ea..ea4e9ce53e0a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h @@ -353,6 +353,19 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, struct qed_tunn_start_params *p_tunn, enum qed_mf_mode mode, bool allow_npar_tx_switch); +/** + * @brief qed_sp_pf_update - PF Function Update Ramrod + * + * This ramrod updates function-related parameters. Every parameter can be + * updated independently, according to configuration flags. + * + * @param p_hwfn + * + * @return int + */ + +int qed_sp_pf_update(struct qed_hwfn *p_hwfn); + /** * @brief qed_sp_pf_stop - PF Function Stop Ramrod * diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c index 8c555ed1f949..67f6ce3c84c8 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c @@ -15,6 +15,7 @@ #include "qed.h" #include #include "qed_cxt.h" +#include "qed_dcbx.h" #include "qed_hsi.h" #include "qed_hw.h" #include "qed_int.h" @@ -384,6 +385,30 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, return rc; } +int qed_sp_pf_update(struct qed_hwfn *p_hwfn) +{ + struct qed_spq_entry *p_ent = NULL; + struct qed_sp_init_data init_data; + int rc = -EINVAL; + + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = qed_spq_get_cid(p_hwfn); + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = QED_SPQ_MODE_CB; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON, + &init_data); + if (rc) + return rc; + + qed_dcbx_set_pf_update_params(&p_hwfn->p_dcbx_info->results, + &p_ent->ramrod.pf_update); + + return qed_spq_post(p_hwfn, p_ent, NULL); +} + /* Set pf update ramrod command params */ int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn, struct qed_tunn_update_params *p_tunn, diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index 0fd8f247e65f..4c29439f54bf 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -25,6 +25,15 @@ #include #include +enum dcbx_protocol_type { + DCBX_PROTOCOL_ISCSI, + DCBX_PROTOCOL_FCOE, + DCBX_PROTOCOL_ROCE, + DCBX_PROTOCOL_ROCE_V2, + DCBX_PROTOCOL_ETH, + DCBX_MAX_PROTOCOL_TYPE +}; + enum qed_led_mode { QED_LED_MODE_OFF, QED_LED_MODE_ON, From 3274940bd3d87043c74aa444ff3b0dff56b3e9ea Mon Sep 17 00:00:00 2001 From: Harvey Hunt Date: Tue, 17 May 2016 14:33:27 +0100 Subject: [PATCH 1639/1649] drivers: net: Don't print unpopulated net_device name For ethernet devices, net_device.name will be eth%d before register_netdev() is called. Don't print the net_device name until the format string is replaced. Signed-off-by: Harvey Hunt Cc: Marcel Ziswiler Cc: Robert Jarzmik Cc: Barry Song Cc: netdev@vger.kernel.org Cc: linux-kernel@vger.kernel.org Signed-off-by: David S. Miller --- drivers/net/ethernet/davicom/dm9000.c | 11 +++++++---- drivers/net/ethernet/micrel/ks8695net.c | 7 +++++-- drivers/net/ethernet/netx-eth.c | 12 ++++++++---- 3 files changed, 20 insertions(+), 10 deletions(-) diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c index 9e061307975f..1471e16ba719 100644 --- a/drivers/net/ethernet/davicom/dm9000.c +++ b/drivers/net/ethernet/davicom/dm9000.c @@ -1432,6 +1432,7 @@ dm9000_probe(struct platform_device *pdev) int reset_gpios; enum of_gpio_flags flags; struct regulator *power; + bool inv_mac_addr = false; power = devm_regulator_get(dev, "vcc"); if (IS_ERR(power)) { @@ -1686,9 +1687,7 @@ dm9000_probe(struct platform_device *pdev) } if (!is_valid_ether_addr(ndev->dev_addr)) { - dev_warn(db->dev, "%s: Invalid ethernet MAC address. Please " - "set using ifconfig\n", ndev->name); - + inv_mac_addr = true; eth_hw_addr_random(ndev); mac_src = "random"; } @@ -1697,11 +1696,15 @@ dm9000_probe(struct platform_device *pdev) platform_set_drvdata(pdev, ndev); ret = register_netdev(ndev); - if (ret == 0) + if (ret == 0) { + if (inv_mac_addr) + dev_warn(db->dev, "%s: Invalid ethernet MAC address. Please set using ip\n", + ndev->name); printk(KERN_INFO "%s: dm9000%c at %p,%p IRQ %d MAC: %pM (%s)\n", ndev->name, dm9000_type_to_char(db->type), db->io_addr, db->io_data, ndev->irq, ndev->dev_addr, mac_src); + } return 0; out: diff --git a/drivers/net/ethernet/micrel/ks8695net.c b/drivers/net/ethernet/micrel/ks8695net.c index a8522d8af95d..20cb85bc0c5f 100644 --- a/drivers/net/ethernet/micrel/ks8695net.c +++ b/drivers/net/ethernet/micrel/ks8695net.c @@ -1354,6 +1354,7 @@ ks8695_probe(struct platform_device *pdev) struct resource *rxirq_res, *txirq_res, *linkirq_res; int ret = 0; int buff_n; + bool inv_mac_addr = false; u32 machigh, maclow; /* Initialise a net_device */ @@ -1456,8 +1457,7 @@ ks8695_probe(struct platform_device *pdev) ndev->dev_addr[5] = maclow & 0xFF; if (!is_valid_ether_addr(ndev->dev_addr)) - dev_warn(ksp->dev, "%s: Invalid ethernet MAC address. Please " - "set using ifconfig\n", ndev->name); + inv_mac_addr = true; /* In order to be efficient memory-wise, we allocate both * rings in one go. @@ -1520,6 +1520,9 @@ ks8695_probe(struct platform_device *pdev) ret = register_netdev(ndev); if (ret == 0) { + if (inv_mac_addr) + dev_warn(ksp->dev, "%s: Invalid ethernet MAC address. Please set using ip\n", + ndev->name); dev_info(ksp->dev, "ks8695 ethernet (%s) MAC: %pM\n", ks8695_port_type(ksp), ndev->dev_addr); } else { diff --git a/drivers/net/ethernet/netx-eth.c b/drivers/net/ethernet/netx-eth.c index 9fbc30264237..adbc47f2d132 100644 --- a/drivers/net/ethernet/netx-eth.c +++ b/drivers/net/ethernet/netx-eth.c @@ -313,7 +313,8 @@ static int netx_eth_enable(struct net_device *ndev) { struct netx_eth_priv *priv = netdev_priv(ndev); unsigned int mac4321, mac65; - int running, i; + int running, i, ret; + bool inv_mac_addr = false; ndev->netdev_ops = &netx_eth_netdev_ops; ndev->watchdog_timeo = msecs_to_jiffies(5000); @@ -358,15 +359,18 @@ static int netx_eth_enable(struct net_device *ndev) xc_start(priv->xc); if (!is_valid_ether_addr(ndev->dev_addr)) - printk("%s: Invalid ethernet MAC address. Please " - "set using ifconfig\n", ndev->name); + inv_mac_addr = true; for (i=2; i<=18; i++) pfifo_push(EMPTY_PTR_FIFO(priv->id), FIFO_PTR_FRAMENO(i) | FIFO_PTR_SEGMENT(priv->id)); - return register_netdev(ndev); + ret = register_netdev(ndev); + if (inv_mac_addr) + printk("%s: Invalid ethernet MAC address. Please set using ip\n", + ndev->name); + return ret; } static int netx_eth_drv_probe(struct platform_device *pdev) From 03aaaa9b941e136757b55c4cf775aab6068dfd94 Mon Sep 17 00:00:00 2001 From: Richard Alpe Date: Tue, 17 May 2016 16:57:37 +0200 Subject: [PATCH 1640/1649] tipc: fix nametable publication field in nl compat The publication field of the old netlink API should contain the publication key and not the publication reference. Fixes: 44a8ae94fd55 (tipc: convert legacy nl name table dump to nl compat) Signed-off-by: Richard Alpe Acked-by: Jon Maloy Signed-off-by: David S. Miller --- net/tipc/netlink_compat.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c index d7d050f44fc1..4dfc5c14f8c3 100644 --- a/net/tipc/netlink_compat.c +++ b/net/tipc/netlink_compat.c @@ -802,7 +802,7 @@ static int tipc_nl_compat_name_table_dump(struct tipc_nl_compat_msg *msg, goto out; tipc_tlv_sprintf(msg->rep, "%-10u %s", - nla_get_u32(publ[TIPC_NLA_PUBL_REF]), + nla_get_u32(publ[TIPC_NLA_PUBL_KEY]), scope_str[nla_get_u32(publ[TIPC_NLA_PUBL_SCOPE])]); out: tipc_tlv_sprintf(msg->rep, "\n"); From dc327f8931cb9d66191f489eb9a852fc04530546 Mon Sep 17 00:00:00 2001 From: WANG Cong Date: Mon, 16 May 2016 15:11:18 -0700 Subject: [PATCH 1641/1649] net_sched: close another race condition in tcf_mirred_release() We saw the following extra refcount release on veth device: kernel: [7957821.463992] unregister_netdevice: waiting for mesos50284 to become free. Usage count = -1 Since we heavily use mirred action to redirect packets to veth, I think this is caused by the following race condition: CPU0: tcf_mirred_release(): (in RCU callback) struct net_device *dev = rcu_dereference_protected(m->tcfm_dev, 1); CPU1: mirred_device_event(): spin_lock_bh(&mirred_list_lock); list_for_each_entry(m, &mirred_list, tcfm_list) { if (rcu_access_pointer(m->tcfm_dev) == dev) { dev_put(dev); /* Note : no rcu grace period necessary, as * net_device are already rcu protected. */ RCU_INIT_POINTER(m->tcfm_dev, NULL); } } spin_unlock_bh(&mirred_list_lock); CPU0: tcf_mirred_release(): spin_lock_bh(&mirred_list_lock); list_del(&m->tcfm_list); spin_unlock_bh(&mirred_list_lock); if (dev) // <======== Stil refers to the old m->tcfm_dev dev_put(dev); // <======== dev_put() is called on it again The action init code path is good because it is impossible to modify an action that is being removed. So, fix this by moving everything under the spinlock. Fixes: 2ee22a90c7af ("net_sched: act_mirred: remove spinlock in fast path") Fixes: 6bd00b850635 ("act_mirred: fix a race condition on mirred_list") Cc: Jamal Hadi Salim Signed-off-by: Cong Wang Acked-by: Jamal Hadi Salim Signed-off-by: David S. Miller --- net/sched/act_mirred.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c index 7737cdb7d574..128942bc9e42 100644 --- a/net/sched/act_mirred.c +++ b/net/sched/act_mirred.c @@ -36,14 +36,15 @@ static DEFINE_SPINLOCK(mirred_list_lock); static void tcf_mirred_release(struct tc_action *a, int bind) { struct tcf_mirred *m = to_mirred(a); - struct net_device *dev = rcu_dereference_protected(m->tcfm_dev, 1); + struct net_device *dev; /* We could be called either in a RCU callback or with RTNL lock held. */ spin_lock_bh(&mirred_list_lock); list_del(&m->tcfm_list); - spin_unlock_bh(&mirred_list_lock); + dev = rcu_dereference_protected(m->tcfm_dev, 1); if (dev) dev_put(dev); + spin_unlock_bh(&mirred_list_lock); } static const struct nla_policy mirred_policy[TCA_MIRRED_MAX + 1] = { From da4ed55165d41b1073f9a476f1c18493e9bf8c8e Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Tue, 17 May 2016 18:58:08 +0200 Subject: [PATCH 1642/1649] switchdev: pass pointer to fib_info instead of copy The problem is that fib_info->nh is [0] so the struct fib_info allocation size depends on number of nexthops. If we just copy fib_info, we do not copy the nexthops info and driver accesses memory which is not ours. Given the fact that fib4 does not defer operations and therefore it does not need copy, just pass the pointer down to drivers as it was done before. Fixes: 850d0cbc91 ("switchdev: remove pointers from switchdev objects") Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/rocker/rocker_ofdpa.c | 4 ++-- include/net/switchdev.h | 2 +- net/switchdev/switchdev.c | 6 ++---- 3 files changed, 5 insertions(+), 7 deletions(-) diff --git a/drivers/net/ethernet/rocker/rocker_ofdpa.c b/drivers/net/ethernet/rocker/rocker_ofdpa.c index 0e758bcb26b0..1ca796316173 100644 --- a/drivers/net/ethernet/rocker/rocker_ofdpa.c +++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c @@ -2727,7 +2727,7 @@ static int ofdpa_port_obj_fib4_add(struct rocker_port *rocker_port, return ofdpa_port_fib_ipv4(ofdpa_port, trans, htonl(fib4->dst), fib4->dst_len, - &fib4->fi, fib4->tb_id, 0); + fib4->fi, fib4->tb_id, 0); } static int ofdpa_port_obj_fib4_del(struct rocker_port *rocker_port, @@ -2737,7 +2737,7 @@ static int ofdpa_port_obj_fib4_del(struct rocker_port *rocker_port, return ofdpa_port_fib_ipv4(ofdpa_port, NULL, htonl(fib4->dst), fib4->dst_len, - &fib4->fi, fib4->tb_id, + fib4->fi, fib4->tb_id, OFDPA_OP_FLAG_REMOVE); } diff --git a/include/net/switchdev.h b/include/net/switchdev.h index 51d77b2ce2b2..985619a59323 100644 --- a/include/net/switchdev.h +++ b/include/net/switchdev.h @@ -97,7 +97,7 @@ struct switchdev_obj_ipv4_fib { struct switchdev_obj obj; u32 dst; int dst_len; - struct fib_info fi; + struct fib_info *fi; u8 tos; u8 type; u32 nlflags; diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c index b7e01d88bdc5..59658b2e9cdf 100644 --- a/net/switchdev/switchdev.c +++ b/net/switchdev/switchdev.c @@ -1188,6 +1188,7 @@ int switchdev_fib_ipv4_add(u32 dst, int dst_len, struct fib_info *fi, .obj.id = SWITCHDEV_OBJ_ID_IPV4_FIB, .dst = dst, .dst_len = dst_len, + .fi = fi, .tos = tos, .type = type, .nlflags = nlflags, @@ -1196,8 +1197,6 @@ int switchdev_fib_ipv4_add(u32 dst, int dst_len, struct fib_info *fi, struct net_device *dev; int err = 0; - memcpy(&ipv4_fib.fi, fi, sizeof(ipv4_fib.fi)); - /* Don't offload route if using custom ip rules or if * IPv4 FIB offloading has been disabled completely. */ @@ -1242,6 +1241,7 @@ int switchdev_fib_ipv4_del(u32 dst, int dst_len, struct fib_info *fi, .obj.id = SWITCHDEV_OBJ_ID_IPV4_FIB, .dst = dst, .dst_len = dst_len, + .fi = fi, .tos = tos, .type = type, .nlflags = 0, @@ -1250,8 +1250,6 @@ int switchdev_fib_ipv4_del(u32 dst, int dst_len, struct fib_info *fi, struct net_device *dev; int err = 0; - memcpy(&ipv4_fib.fi, fi, sizeof(ipv4_fib.fi)); - if (!(fi->fib_flags & RTNH_F_OFFLOAD)) return 0; From cd9e2e5d3ff148be9ea210f622ce3e8e8292fcd6 Mon Sep 17 00:00:00 2001 From: John Stultz Date: Mon, 16 May 2016 20:36:15 -0700 Subject: [PATCH 1643/1649] asix: Fix offset calculation in asix_rx_fixup() causing slow transmissions In testing with HiKey, we found that since commit 3f30b158eba5 ("asix: On RX avoid creating bad Ethernet frames"), we're seeing lots of noise during network transfers: [ 239.027993] asix 1-1.1:1.0 eth0: asix_rx_fixup() Data Header synchronisation was lost, remaining 988 [ 239.037310] asix 1-1.1:1.0 eth0: asix_rx_fixup() Bad Header Length 0x54ebb5ec, offset 4 [ 239.045519] asix 1-1.1:1.0 eth0: asix_rx_fixup() Bad Header Length 0xcdffe7a2, offset 4 [ 239.275044] asix 1-1.1:1.0 eth0: asix_rx_fixup() Data Header synchronisation was lost, remaining 988 [ 239.284355] asix 1-1.1:1.0 eth0: asix_rx_fixup() Bad Header Length 0x1d36f59d, offset 4 [ 239.292541] asix 1-1.1:1.0 eth0: asix_rx_fixup() Bad Header Length 0xaef3c1e9, offset 4 [ 239.518996] asix 1-1.1:1.0 eth0: asix_rx_fixup() Data Header synchronisation was lost, remaining 988 [ 239.528300] asix 1-1.1:1.0 eth0: asix_rx_fixup() Bad Header Length 0x2881912, offset 4 [ 239.536413] asix 1-1.1:1.0 eth0: asix_rx_fixup() Bad Header Length 0x5638f7e2, offset 4 And network throughput ends up being pretty bursty and slow with a overall throughput of at best ~30kB/s (where as previously we got 1.1MB/s with the slower USB1.1 "full speed" host). We found the issue also was reproducible on a x86_64 system, using a "high-speed" USB2.0 port but the throughput did not measurably drop (possibly due to the scp transfer being cpu bound on my slow test hardware). After lots of debugging, I found the check added in the problematic commit seems to be calculating the offset incorrectly. In the normal case, in the main loop of the function, we do: (where offset is zero, or set to "offset += (copy_length + 1) & 0xfffe" in the previous loop) rx->header = get_unaligned_le32(skb->data + offset); offset += sizeof(u32); But the problematic patch calculates: offset = ((rx->remaining + 1) & 0xfffe) + sizeof(u32); rx->header = get_unaligned_le32(skb->data + offset); Adding some debug logic to check those offset calculation used to find rx->header, the one in problematic code is always too large by sizeof(u32). Thus, this patch removes the incorrect " + sizeof(u32)" addition in the problematic calculation, and resolves the issue. Cc: Dean Jenkins Cc: "David B. Robins" Cc: Mark Craske Cc: Emil Goode Cc: "David S. Miller" Cc: YongQin Liu Cc: Guodong Xu Cc: Ivan Vecera Cc: linux-usb@vger.kernel.org Cc: netdev@vger.kernel.org Cc: stable #4.4+ Reported-by: Yongqin Liu Signed-off-by: John Stultz Signed-off-by: David S. Miller --- drivers/net/usb/asix_common.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c index 0c5c22b84da8..7de5ab589e4e 100644 --- a/drivers/net/usb/asix_common.c +++ b/drivers/net/usb/asix_common.c @@ -66,7 +66,7 @@ int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb, * buffer. */ if (rx->remaining && (rx->remaining + sizeof(u32) <= skb->len)) { - offset = ((rx->remaining + 1) & 0xfffe) + sizeof(u32); + offset = ((rx->remaining + 1) & 0xfffe); rx->header = get_unaligned_le32(skb->data + offset); offset = 0; From 4c1cd4fdfd14ecd417962f8c2166506132697f7c Mon Sep 17 00:00:00 2001 From: Yang Shi Date: Mon, 16 May 2016 16:36:26 -0700 Subject: [PATCH 1644/1649] bpf: arm64: remove callee-save registers use for tmp registers In the current implementation of ARM64 eBPF JIT, R23 and R24 are used for tmp registers, which are callee-saved registers. This leads to variable size of JIT prologue and epilogue. The latest blinding constant change prefers to constant size of prologue and epilogue. AAPCS reserves R9 ~ R15 for temp registers which not need to be saved/restored during function call. So, replace R23 and R24 to R10 and R11, and remove tmp_used flag to save 2 instructions for some jited BPF program. CC: Daniel Borkmann Acked-by: Zi Shen Lim Signed-off-by: Yang Shi Acked-by: Catalin Marinas Acked-by: Daniel Borkmann Signed-off-by: David S. Miller --- arch/arm64/net/bpf_jit_comp.c | 34 +++++----------------------------- 1 file changed, 5 insertions(+), 29 deletions(-) diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c index d0d51903c7e0..49ba37e4bfc0 100644 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c @@ -51,9 +51,9 @@ static const int bpf2a64[] = { [BPF_REG_9] = A64_R(22), /* read-only frame pointer to access stack */ [BPF_REG_FP] = A64_R(25), - /* temporary register for internal BPF JIT */ - [TMP_REG_1] = A64_R(23), - [TMP_REG_2] = A64_R(24), + /* temporary registers for internal BPF JIT */ + [TMP_REG_1] = A64_R(10), + [TMP_REG_2] = A64_R(11), /* temporary register for blinding constants */ [BPF_REG_AX] = A64_R(9), }; @@ -61,7 +61,6 @@ static const int bpf2a64[] = { struct jit_ctx { const struct bpf_prog *prog; int idx; - int tmp_used; int epilogue_offset; int *offset; u32 *image; @@ -154,8 +153,6 @@ static void build_prologue(struct jit_ctx *ctx) const u8 r8 = bpf2a64[BPF_REG_8]; const u8 r9 = bpf2a64[BPF_REG_9]; const u8 fp = bpf2a64[BPF_REG_FP]; - const u8 tmp1 = bpf2a64[TMP_REG_1]; - const u8 tmp2 = bpf2a64[TMP_REG_2]; /* * BPF prog stack layout @@ -167,7 +164,7 @@ static void build_prologue(struct jit_ctx *ctx) * | ... | callee saved registers * +-----+ * | | x25/x26 - * BPF fp register => -80:+-----+ <= (BPF_FP) + * BPF fp register => -64:+-----+ <= (BPF_FP) * | | * | ... | BPF prog stack * | | @@ -189,8 +186,6 @@ static void build_prologue(struct jit_ctx *ctx) /* Save callee-saved register */ emit(A64_PUSH(r6, r7, A64_SP), ctx); emit(A64_PUSH(r8, r9, A64_SP), ctx); - if (ctx->tmp_used) - emit(A64_PUSH(tmp1, tmp2, A64_SP), ctx); /* Save fp (x25) and x26. SP requires 16 bytes alignment */ emit(A64_PUSH(fp, A64_R(26), A64_SP), ctx); @@ -210,8 +205,6 @@ static void build_epilogue(struct jit_ctx *ctx) const u8 r8 = bpf2a64[BPF_REG_8]; const u8 r9 = bpf2a64[BPF_REG_9]; const u8 fp = bpf2a64[BPF_REG_FP]; - const u8 tmp1 = bpf2a64[TMP_REG_1]; - const u8 tmp2 = bpf2a64[TMP_REG_2]; /* We're done with BPF stack */ emit(A64_ADD_I(1, A64_SP, A64_SP, STACK_SIZE), ctx); @@ -220,8 +213,6 @@ static void build_epilogue(struct jit_ctx *ctx) emit(A64_POP(fp, A64_R(26), A64_SP), ctx); /* Restore callee-saved register */ - if (ctx->tmp_used) - emit(A64_POP(tmp1, tmp2, A64_SP), ctx); emit(A64_POP(r8, r9, A64_SP), ctx); emit(A64_POP(r6, r7, A64_SP), ctx); @@ -317,7 +308,6 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) emit(A64_UDIV(is64, dst, dst, src), ctx); break; case BPF_MOD: - ctx->tmp_used = 1; emit(A64_UDIV(is64, tmp, dst, src), ctx); emit(A64_MUL(is64, tmp, tmp, src), ctx); emit(A64_SUB(is64, dst, dst, tmp), ctx); @@ -390,49 +380,41 @@ emit_bswap_uxt: /* dst = dst OP imm */ case BPF_ALU | BPF_ADD | BPF_K: case BPF_ALU64 | BPF_ADD | BPF_K: - ctx->tmp_used = 1; emit_a64_mov_i(is64, tmp, imm, ctx); emit(A64_ADD(is64, dst, dst, tmp), ctx); break; case BPF_ALU | BPF_SUB | BPF_K: case BPF_ALU64 | BPF_SUB | BPF_K: - ctx->tmp_used = 1; emit_a64_mov_i(is64, tmp, imm, ctx); emit(A64_SUB(is64, dst, dst, tmp), ctx); break; case BPF_ALU | BPF_AND | BPF_K: case BPF_ALU64 | BPF_AND | BPF_K: - ctx->tmp_used = 1; emit_a64_mov_i(is64, tmp, imm, ctx); emit(A64_AND(is64, dst, dst, tmp), ctx); break; case BPF_ALU | BPF_OR | BPF_K: case BPF_ALU64 | BPF_OR | BPF_K: - ctx->tmp_used = 1; emit_a64_mov_i(is64, tmp, imm, ctx); emit(A64_ORR(is64, dst, dst, tmp), ctx); break; case BPF_ALU | BPF_XOR | BPF_K: case BPF_ALU64 | BPF_XOR | BPF_K: - ctx->tmp_used = 1; emit_a64_mov_i(is64, tmp, imm, ctx); emit(A64_EOR(is64, dst, dst, tmp), ctx); break; case BPF_ALU | BPF_MUL | BPF_K: case BPF_ALU64 | BPF_MUL | BPF_K: - ctx->tmp_used = 1; emit_a64_mov_i(is64, tmp, imm, ctx); emit(A64_MUL(is64, dst, dst, tmp), ctx); break; case BPF_ALU | BPF_DIV | BPF_K: case BPF_ALU64 | BPF_DIV | BPF_K: - ctx->tmp_used = 1; emit_a64_mov_i(is64, tmp, imm, ctx); emit(A64_UDIV(is64, dst, dst, tmp), ctx); break; case BPF_ALU | BPF_MOD | BPF_K: case BPF_ALU64 | BPF_MOD | BPF_K: - ctx->tmp_used = 1; emit_a64_mov_i(is64, tmp2, imm, ctx); emit(A64_UDIV(is64, tmp, dst, tmp2), ctx); emit(A64_MUL(is64, tmp, tmp, tmp2), ctx); @@ -503,12 +485,10 @@ emit_cond_jmp: case BPF_JMP | BPF_JNE | BPF_K: case BPF_JMP | BPF_JSGT | BPF_K: case BPF_JMP | BPF_JSGE | BPF_K: - ctx->tmp_used = 1; emit_a64_mov_i(1, tmp, imm, ctx); emit(A64_CMP(1, dst, tmp), ctx); goto emit_cond_jmp; case BPF_JMP | BPF_JSET | BPF_K: - ctx->tmp_used = 1; emit_a64_mov_i(1, tmp, imm, ctx); emit(A64_TST(1, dst, tmp), ctx); goto emit_cond_jmp; @@ -518,7 +498,6 @@ emit_cond_jmp: const u8 r0 = bpf2a64[BPF_REG_0]; const u64 func = (u64)__bpf_call_base + imm; - ctx->tmp_used = 1; emit_a64_mov_i64(tmp, func, ctx); emit(A64_PUSH(A64_FP, A64_LR, A64_SP), ctx); emit(A64_MOV(1, A64_FP, A64_SP), ctx); @@ -564,7 +543,6 @@ emit_cond_jmp: case BPF_LDX | BPF_MEM | BPF_H: case BPF_LDX | BPF_MEM | BPF_B: case BPF_LDX | BPF_MEM | BPF_DW: - ctx->tmp_used = 1; emit_a64_mov_i(1, tmp, off, ctx); switch (BPF_SIZE(code)) { case BPF_W: @@ -588,7 +566,6 @@ emit_cond_jmp: case BPF_ST | BPF_MEM | BPF_B: case BPF_ST | BPF_MEM | BPF_DW: /* Load imm to a register then store it */ - ctx->tmp_used = 1; emit_a64_mov_i(1, tmp2, off, ctx); emit_a64_mov_i(1, tmp, imm, ctx); switch (BPF_SIZE(code)) { @@ -612,7 +589,6 @@ emit_cond_jmp: case BPF_STX | BPF_MEM | BPF_H: case BPF_STX | BPF_MEM | BPF_B: case BPF_STX | BPF_MEM | BPF_DW: - ctx->tmp_used = 1; emit_a64_mov_i(1, tmp, off, ctx); switch (BPF_SIZE(code)) { case BPF_W: @@ -798,7 +774,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) /* 1. Initial fake pass to compute ctx->idx. */ - /* Fake pass to fill in ctx->offset and ctx->tmp_used. */ + /* Fake pass to fill in ctx->offset. */ if (build_body(&ctx)) { prog = orig_prog; goto out_off; From 7f32541c2fdaa84af418c3e1431bbd066ab44d09 Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Mon, 16 May 2016 20:52:42 +0200 Subject: [PATCH 1645/1649] phy dp83867: Fix compilation with CONFIG_OF_MDIO=m When CONFIG_OF_MDIO is configured as module, the #define for it really is CONFIG_OF_MDIO_MODULE, not CONFIG_OF_MDIO. So if we are compiling it as module, the dp83867 doesn't see that OF_MDIO was selected and doesn't read the dt rgmii parameters. The fix is simple: Use IS_ENABLED(). It checks for both - module as well as compiled in code. Signed-off-by: Alexander Graf Signed-off-by: David S. Miller --- drivers/net/phy/dp83867.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c index 2afa61b51d41..94cc278b3136 100644 --- a/drivers/net/phy/dp83867.c +++ b/drivers/net/phy/dp83867.c @@ -99,7 +99,7 @@ static int dp83867_config_intr(struct phy_device *phydev) return phy_write(phydev, MII_DP83867_MICR, micr_status); } -#ifdef CONFIG_OF_MDIO +#if IS_ENABLED(CONFIG_OF_MDIO) static int dp83867_of_init(struct phy_device *phydev) { struct dp83867_private *dp83867 = phydev->priv; From 81003bc924bac0a99bfdc2869f5dff5a87aa4a3d Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Mon, 16 May 2016 20:52:43 +0200 Subject: [PATCH 1646/1649] phy dp83867: Make rgmii parameters optional If you compile without OF_MDIO support in an RGMII configuration, we fail to configure the dp83867 phy today by writing garbage into its configuration registers. On the other hand if you do compile with OF_MDIO and the phy gets loaded via device tree, you have to have the properties set in the device tree, otherwise we fail to load the driver and don't even attach the generic phy driver to the interface anymore. To make things slightly more consistent, make the rgmii configuration properties optional and allow a user to omit them in their device tree. Signed-off-by: Alexander Graf Signed-off-by: David S. Miller --- drivers/net/phy/dp83867.c | 31 ++++++++++++++++++++++++++++--- 1 file changed, 28 insertions(+), 3 deletions(-) diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c index 94cc278b3136..1b01680987c4 100644 --- a/drivers/net/phy/dp83867.c +++ b/drivers/net/phy/dp83867.c @@ -65,6 +65,7 @@ struct dp83867_private { int rx_id_delay; int tx_id_delay; int fifo_depth; + int values_are_sane; }; static int dp83867_ack_interrupt(struct phy_device *phydev) @@ -113,15 +114,30 @@ static int dp83867_of_init(struct phy_device *phydev) ret = of_property_read_u32(of_node, "ti,rx-internal-delay", &dp83867->rx_id_delay); if (ret) - return ret; + goto invalid_dt; ret = of_property_read_u32(of_node, "ti,tx-internal-delay", &dp83867->tx_id_delay); if (ret) - return ret; + goto invalid_dt; - return of_property_read_u32(of_node, "ti,fifo-depth", + ret = of_property_read_u32(of_node, "ti,fifo-depth", &dp83867->fifo_depth); + if (ret) + goto invalid_dt; + + dp83867->values_are_sane = 1; + + return 0; + +invalid_dt: + phydev_err(phydev, "missing properties in device tree"); + + /* + * We can still run with a broken dt by not using any of the optional + * parameters, so just don't set dp83867->values_are_sane. + */ + return 0; } #else static int dp83867_of_init(struct phy_device *phydev) @@ -150,6 +166,15 @@ static int dp83867_config_init(struct phy_device *phydev) dp83867 = (struct dp83867_private *)phydev->priv; } + /* + * With no or broken device tree, we don't have the values that we would + * want to configure the phy with. In that case, cross our fingers and + * assume that firmware did everything correctly for us or that we don't + * need them. + */ + if (!dp83867->values_are_sane) + return 0; + if (phy_interface_is_rgmii(phydev)) { ret = phy_write(phydev, MII_DP83867_PHYCTRL, (dp83867->fifo_depth << DP83867_PHYCR_FIFO_DEPTH_SHIFT)); From 27896c83fe94e2190f121a2fdbffffbf1d83d573 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Sat, 14 May 2016 22:40:15 +0200 Subject: [PATCH 1647/1649] r8169: default to 64-bit DMA on recent PCIe chips The current logic around the 'use_dac' module parameter prevents the r81969 driver from being loadable on 64-bit systems without any RAM below 4 GB when the parameter is left at its default value. So introduce a new default value -1 which indicates that 64-bit DMA should be enabled on sufficiently recent PCIe chips, i.e., versions RTL_GIGA_MAC_VER_18 or later. Explicit param values of 0 or 1 retain the existing behavior of unconditionally enabling/disabling 64-bit DMA on 64-bit architectures (i.e., regardless of the type and version of the chip) Since PCIe chips do not need to CPlusCmd Dual Address Cycle to be set, make that conditional on the device type as well. Cc: Realtek linux nic maintainers Signed-off-by: Ard Biesheuvel Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169.c | 44 ++++++++++++++++------------ 1 file changed, 25 insertions(+), 19 deletions(-) diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 94f08f1e841c..0e62d74b09b3 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -345,7 +345,7 @@ static const struct pci_device_id rtl8169_pci_tbl[] = { MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl); static int rx_buf_sz = 16383; -static int use_dac; +static int use_dac = -1; static struct { u32 msg_enable; } debug = { -1 }; @@ -8224,20 +8224,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_out_mwi_2; } - tp->cp_cmd = 0; - - if ((sizeof(dma_addr_t) > 4) && - !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) { - tp->cp_cmd |= PCIDAC; - dev->features |= NETIF_F_HIGHDMA; - } else { - rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); - if (rc < 0) { - netif_err(tp, probe, dev, "DMA configuration failed\n"); - goto err_out_free_res_3; - } - } - /* ioremap MMIO region */ ioaddr = ioremap(pci_resource_start(pdev, region), R8169_REGS_SIZE); if (!ioaddr) { @@ -8253,6 +8239,25 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) /* Identify chip attached to board */ rtl8169_get_mac_version(tp, dev, cfg->default_ver); + tp->cp_cmd = 0; + + if ((sizeof(dma_addr_t) > 4) && + (use_dac == 1 || (use_dac == -1 && pci_is_pcie(pdev) && + tp->mac_version >= RTL_GIGA_MAC_VER_18)) && + !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { + + /* CPlusCmd Dual Access Cycle is only needed for non-PCIe */ + if (!pci_is_pcie(pdev)) + tp->cp_cmd |= PCIDAC; + dev->features |= NETIF_F_HIGHDMA; + } else { + rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (rc < 0) { + netif_err(tp, probe, dev, "DMA configuration failed\n"); + goto err_out_unmap_4; + } + } + rtl_init_rxcfg(tp); rtl_irq_disable(tp); @@ -8412,12 +8417,12 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) &tp->counters_phys_addr, GFP_KERNEL); if (!tp->counters) { rc = -ENOMEM; - goto err_out_msi_4; + goto err_out_msi_5; } rc = register_netdev(dev); if (rc < 0) - goto err_out_cnt_5; + goto err_out_cnt_6; pci_set_drvdata(pdev, dev); @@ -8451,12 +8456,13 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) out: return rc; -err_out_cnt_5: +err_out_cnt_6: dma_free_coherent(&pdev->dev, sizeof(*tp->counters), tp->counters, tp->counters_phys_addr); -err_out_msi_4: +err_out_msi_5: netif_napi_del(&tp->napi); rtl_disable_msi(pdev, tp); +err_out_unmap_4: iounmap(ioaddr); err_out_free_res_3: pci_release_regions(pdev); From c606b4999b8b11600baabab6a18fb02d296569a3 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 17 May 2016 14:49:24 -0400 Subject: [PATCH 1648/1649] Revert "phy dp83867: Make rgmii parameters optional" This reverts commit 81003bc924bac0a99bfdc2869f5dff5a87aa4a3d. Developers have asked me to revert this for now. Signed-off-by: David S. Miller --- drivers/net/phy/dp83867.c | 31 +++---------------------------- 1 file changed, 3 insertions(+), 28 deletions(-) diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c index 1b01680987c4..94cc278b3136 100644 --- a/drivers/net/phy/dp83867.c +++ b/drivers/net/phy/dp83867.c @@ -65,7 +65,6 @@ struct dp83867_private { int rx_id_delay; int tx_id_delay; int fifo_depth; - int values_are_sane; }; static int dp83867_ack_interrupt(struct phy_device *phydev) @@ -114,30 +113,15 @@ static int dp83867_of_init(struct phy_device *phydev) ret = of_property_read_u32(of_node, "ti,rx-internal-delay", &dp83867->rx_id_delay); if (ret) - goto invalid_dt; + return ret; ret = of_property_read_u32(of_node, "ti,tx-internal-delay", &dp83867->tx_id_delay); if (ret) - goto invalid_dt; + return ret; - ret = of_property_read_u32(of_node, "ti,fifo-depth", + return of_property_read_u32(of_node, "ti,fifo-depth", &dp83867->fifo_depth); - if (ret) - goto invalid_dt; - - dp83867->values_are_sane = 1; - - return 0; - -invalid_dt: - phydev_err(phydev, "missing properties in device tree"); - - /* - * We can still run with a broken dt by not using any of the optional - * parameters, so just don't set dp83867->values_are_sane. - */ - return 0; } #else static int dp83867_of_init(struct phy_device *phydev) @@ -166,15 +150,6 @@ static int dp83867_config_init(struct phy_device *phydev) dp83867 = (struct dp83867_private *)phydev->priv; } - /* - * With no or broken device tree, we don't have the values that we would - * want to configure the phy with. In that case, cross our fingers and - * assume that firmware did everything correctly for us or that we don't - * need them. - */ - if (!dp83867->values_are_sane) - return 0; - if (phy_interface_is_rgmii(phydev)) { ret = phy_write(phydev, MII_DP83867_PHYCTRL, (dp83867->fifo_depth << DP83867_PHYCR_FIFO_DEPTH_SHIFT)); From 917fa5353da05e8a0045b8acacba8d50400d5b12 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 17 May 2016 14:49:55 -0400 Subject: [PATCH 1649/1649] Revert "phy dp83867: Fix compilation with CONFIG_OF_MDIO=m" This reverts commit 7f32541c2fdaa84af418c3e1431bbd066ab44d09. This needs reverting too, as per requests. Signed-off-by: David S. Miller --- drivers/net/phy/dp83867.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c index 94cc278b3136..2afa61b51d41 100644 --- a/drivers/net/phy/dp83867.c +++ b/drivers/net/phy/dp83867.c @@ -99,7 +99,7 @@ static int dp83867_config_intr(struct phy_device *phydev) return phy_write(phydev, MII_DP83867_MICR, micr_status); } -#if IS_ENABLED(CONFIG_OF_MDIO) +#ifdef CONFIG_OF_MDIO static int dp83867_of_init(struct phy_device *phydev) { struct dp83867_private *dp83867 = phydev->priv;